From ce2d7d4546d6df7afb2d42aaabb3ed089e27b05d Mon Sep 17 00:00:00 2001 From: Vincent Shen Date: Tue, 31 Mar 2026 01:35:46 -0700 Subject: [PATCH] chore(deps): update all Go dependencies --- go.mod | 336 +- go.sum | 832 +- internal/pkg/util/mock_client.go | 4 +- .../protovalidate/protocolbuffers/go/LICENSE | 201 + .../go/buf/validate/validate.pb.go | 17632 ++++++++++++++++ .../buf/validate/validate_protoopaque.pb.go | 15998 ++++++++++++++ vendor/cel.dev/expr/BUILD.bazel | 1 - vendor/cel.dev/expr/MODULE.bazel | 22 +- vendor/cel.dev/expr/checked.pb.go | 749 +- vendor/cel.dev/expr/eval.pb.go | 77 +- vendor/cel.dev/expr/explain.pb.go | 113 +- vendor/cel.dev/expr/syntax.pb.go | 879 +- vendor/cel.dev/expr/value.pb.go | 348 +- vendor/cloud.google.com/go/auth/CHANGES.md | 19 + .../go/auth/credentials/internal/gdch/gdch.go | 33 +- .../go/auth/httptransport/httptransport.go | 9 + .../go/auth/httptransport/transport.go | 83 +- .../go/auth/internal/jwt/jwt.go | 41 +- .../go/auth/internal/transport/transport.go | 30 + .../go/auth/internal/version.go | 4 +- vendor/connectrpc.com/connect/.gitignore | 5 + vendor/connectrpc.com/connect/.golangci.yml | 130 + vendor/connectrpc.com/connect/LICENSE | 201 + vendor/connectrpc.com/connect/MAINTAINERS.md | 12 + vendor/connectrpc.com/connect/Makefile | 121 + vendor/connectrpc.com/connect/README.md | 184 + vendor/connectrpc.com/connect/RELEASE.md | 44 + vendor/connectrpc.com/connect/SECURITY.md | 5 + vendor/connectrpc.com/connect/buf.gen.yaml | 19 + vendor/connectrpc.com/connect/buf.yaml | 14 + vendor/connectrpc.com/connect/buffer_pool.go | 54 + vendor/connectrpc.com/connect/client.go | 392 + .../connectrpc.com/connect/client_stream.go | 441 + vendor/connectrpc.com/connect/code.go | 226 + vendor/connectrpc.com/connect/codec.go | 259 + vendor/connectrpc.com/connect/compression.go | 224 + vendor/connectrpc.com/connect/connect.go | 482 + vendor/connectrpc.com/connect/context.go | 242 + .../connect/duplex_http_call.go | 471 + vendor/connectrpc.com/connect/envelope.go | 387 + vendor/connectrpc.com/connect/error.go | 458 + vendor/connectrpc.com/connect/error_writer.go | 179 + vendor/connectrpc.com/connect/handler.go | 423 + .../connectrpc.com/connect/handler_stream.go | 198 + vendor/connectrpc.com/connect/header.go | 128 + .../connect/idempotency_level.go | 68 + vendor/connectrpc.com/connect/interceptor.go | 138 + .../connectext/grpc/status/v1/status.pb.go | 165 + vendor/connectrpc.com/connect/option.go | 647 + .../connectrpc.com/connect/protobuf_util.go | 42 + vendor/connectrpc.com/connect/protocol.go | 424 + .../connect/protocol_connect.go | 1449 ++ .../connectrpc.com/connect/protocol_grpc.go | 1010 + vendor/connectrpc.com/connect/recover.go | 64 + vendor/cuelabs.dev/go/oci/ociregistry/func.go | 13 +- .../go/oci/ociregistry/interface.go | 7 +- vendor/cuelabs.dev/go/oci/ociregistry/iter.go | 19 +- .../go/oci/ociregistry/ociauth/authfile.go | 15 +- .../go/oci/ociregistry/ociclient/lister.go | 9 +- vendor/cuelang.org/go/cue/ast/ast.go | 357 +- .../cuelang.org/go/cue/ast/astutil/apply.go | 131 +- .../cuelang.org/go/cue/ast/astutil/resolve.go | 223 +- .../go/cue/ast/astutil/sanitize.go | 48 +- vendor/cuelang.org/go/cue/ast/astutil/util.go | 12 +- vendor/cuelang.org/go/cue/ast/ident.go | 29 +- vendor/cuelang.org/go/cue/ast/importpath.go | 12 +- vendor/cuelang.org/go/cue/ast/walk.go | 56 +- vendor/cuelang.org/go/cue/build/context.go | 6 + vendor/cuelang.org/go/cue/build/import.go | 2 +- vendor/cuelang.org/go/cue/build/instance.go | 138 +- vendor/cuelang.org/go/cue/context.go | 26 +- vendor/cuelang.org/go/cue/cue.go | 13 +- .../go/cue/cuecontext/cuecontext.go | 7 +- vendor/cuelang.org/go/cue/decode.go | 121 +- vendor/cuelang.org/go/cue/errors.go | 37 +- vendor/cuelang.org/go/cue/errors/errors.go | 147 +- vendor/cuelang.org/go/cue/format/import.go | 6 +- vendor/cuelang.org/go/cue/format/node.go | 155 +- vendor/cuelang.org/go/cue/format/printer.go | 4 +- vendor/cuelang.org/go/cue/format/simplify.go | 22 +- vendor/cuelang.org/go/cue/instance.go | 30 +- .../go/cue/interpreter/embed/embed.go | 326 +- vendor/cuelang.org/go/cue/literal/quote.go | 14 +- vendor/cuelang.org/go/cue/literal/string.go | 19 +- vendor/cuelang.org/go/cue/load/config.go | 27 +- vendor/cuelang.org/go/cue/load/errors.go | 2 + vendor/cuelang.org/go/cue/load/fs.go | 9 + vendor/cuelang.org/go/cue/load/import.go | 34 +- vendor/cuelang.org/go/cue/load/instances.go | 35 +- .../cuelang.org/go/cue/load/loader_common.go | 29 +- vendor/cuelang.org/go/cue/load/search.go | 31 +- vendor/cuelang.org/go/cue/load/tags.go | 22 +- vendor/cuelang.org/go/cue/marshal.go | 41 +- vendor/cuelang.org/go/cue/op.go | 2 + vendor/cuelang.org/go/cue/parser/interface.go | 25 - vendor/cuelang.org/go/cue/parser/parser.go | 503 +- vendor/cuelang.org/go/cue/path.go | 60 +- vendor/cuelang.org/go/cue/query.go | 12 + vendor/cuelang.org/go/cue/scanner/scanner.go | 52 +- vendor/cuelang.org/go/cue/stats/stats.go | 10 +- vendor/cuelang.org/go/cue/token/position.go | 233 +- .../cuelang.org/go/cue/token/relpos_string.go | 5 +- vendor/cuelang.org/go/cue/token/token.go | 14 +- .../cuelang.org/go/cue/token/token_string.go | 35 +- vendor/cuelang.org/go/cue/types.go | 438 +- vendor/cuelang.org/go/encoding/json/json.go | 120 +- .../cuelang.org/go/encoding/json/pointer.go | 98 + .../encoding/jsonschema/constraints_array.go | 2 +- .../encoding/jsonschema/constraints_object.go | 37 +- .../go/encoding/jsonschema/crd.cue | 5 +- .../cuelang.org/go/encoding/jsonschema/crd.go | 24 +- .../go/encoding/jsonschema/decode.go | 57 +- .../jsonschema/external_teststats.txt | 31 +- .../go/encoding/jsonschema/generate.go | 1253 ++ .../go/encoding/jsonschema/generate_items.go | 926 + .../go/encoding/jsonschema/pointer.go | 44 - .../cuelang.org/go/encoding/jsonschema/ref.go | 18 +- .../go/encoding/jsonschema/util.go | 25 +- .../go/encoding/jsonschema/version.go | 2 +- .../go/encoding/jsonschema/version_string.go | 5 +- .../cuelang.org/go/encoding/openapi/build.go | 15 +- .../cuelang.org/go/encoding/openapi/cycle.go | 20 +- .../cuelang.org/go/encoding/openapi/decode.go | 8 +- .../cuelang.org/go/encoding/openapi/errors.go | 12 + .../go/encoding/openapi/openapi.go | 15 +- .../go/encoding/protobuf/jsonpb/decoder.go | 2 +- .../cuelang.org/go/encoding/protobuf/parse.go | 22 +- .../encoding/protobuf/pbinternal/attribute.go | 5 +- .../go/encoding/protobuf/textproto/decoder.go | 18 +- .../go/encoding/protobuf/textproto/encoder.go | 2 +- .../cuelang.org/go/encoding/protobuf/types.go | 32 +- .../cuelang.org/go/encoding/protobuf/util.go | 7 +- vendor/cuelang.org/go/encoding/toml/decode.go | 38 +- .../go/encoding/xml/koala/decode.go | 36 +- vendor/cuelang.org/go/encoding/yaml/yaml.go | 40 +- .../go/internal/anyunique/unique.go | 122 + .../go/internal/astinternal/debug.go | 111 +- vendor/cuelang.org/go/internal/attrs.go | 2 +- vendor/cuelang.org/go/internal/cli/cli.go | 17 +- .../cuelang.org/go/internal/core/adt/adt.go | 14 +- .../go/internal/core/adt/arctype_string.go | 28 + .../cuelang.org/go/internal/core/adt/binop.go | 58 +- .../cuelang.org/go/internal/core/adt/call.go | 32 +- .../go/internal/core/adt/closed.go | 467 +- .../go/internal/core/adt/closed2.go | 69 - .../go/internal/core/adt/composite.go | 549 +- .../go/internal/core/adt/comprehension.go | 162 +- .../go/internal/core/adt/conjunct.go | 342 +- .../go/internal/core/adt/constraints.go | 65 +- .../go/internal/core/adt/context.go | 619 +- .../cuelang.org/go/internal/core/adt/cycle.go | 635 +- .../cuelang.org/go/internal/core/adt/debug.go | 2 +- .../go/internal/core/adt/default.go | 79 +- .../go/internal/core/adt/defidtype_string.go | 27 + .../go/internal/core/adt/disjunct.go | 446 +- .../go/internal/core/adt/disjunct2.go | 108 +- .../go/internal/core/adt/equality.go | 42 +- .../go/internal/core/adt/errorcode_string.go | 5 +- .../go/internal/core/adt/errors.go | 155 +- .../cuelang.org/go/internal/core/adt/eval.go | 2151 +- .../cuelang.org/go/internal/core/adt/expr.go | 774 +- .../go/internal/core/adt/feature.go | 18 +- .../go/internal/core/adt/fields.go | 21 +- .../cuelang.org/go/internal/core/adt/flags.go | 41 + .../cuelang.org/go/internal/core/adt/kind.go | 64 +- .../cuelang.org/go/internal/core/adt/log.go | 2 +- .../cuelang.org/go/internal/core/adt/mem.go | 26 +- vendor/cuelang.org/go/internal/core/adt/op.go | 4 +- .../go/internal/core/adt/op_string.go | 10 +- .../go/internal/core/adt/optional.go | 154 - .../go/internal/core/adt/overlay.go | 166 +- .../go/internal/core/adt/runmode_string.go | 8 +- .../cuelang.org/go/internal/core/adt/sched.go | 42 +- .../cuelang.org/go/internal/core/adt/share.go | 17 +- .../go/internal/core/adt/simplify.go | 118 +- .../go/internal/core/adt/states.go | 20 +- .../cuelang.org/go/internal/core/adt/tasks.go | 72 +- .../go/internal/core/adt/typocheck.go | 277 +- .../cuelang.org/go/internal/core/adt/unify.go | 107 +- .../go/internal/core/adt/validate.go | 32 +- .../internal/core/adt/vertexstatus_string.go | 12 +- .../go/internal/core/adt/weakmap.go | 71 + .../go/internal/core/compile/builtin.go | 127 +- .../go/internal/core/compile/compile.go | 434 +- .../go/internal/core/compile/label.go | 9 +- .../go/internal/core/compile/predeclared.go | 8 + .../go/internal/core/compile/validator.go | 20 +- .../go/internal/core/convert/go.go | 763 +- .../go/internal/core/debug/compact.go | 57 +- .../go/internal/core/debug/debug.go | 160 +- .../cuelang.org/go/internal/core/dep/dep.go | 87 +- .../cuelang.org/go/internal/core/dep/mixed.go | 15 +- .../go/internal/core/export/adt.go | 84 +- .../go/internal/core/export/export.go | 79 +- .../go/internal/core/export/expr.go | 36 +- .../go/internal/core/export/extract.go | 41 +- .../go/internal/core/export/label.go | 12 +- .../go/internal/core/export/self.go | 60 +- .../go/internal/core/export/toposort.go | 18 +- .../go/internal/core/export/value.go | 36 +- .../go/internal/core/format/printer.go | 26 + .../walk.go => internal/core/layer/layer.go} | 27 +- .../go/internal/core/runtime/build.go | 31 +- .../go/internal/core/runtime/errors.go | 52 - .../go/internal/core/runtime/extern.go | 78 +- .../go/internal/core/runtime/imports.go | 156 +- .../go/internal/core/runtime/resolve.go | 161 - .../go/internal/core/runtime/runtime.go | 27 +- .../go/internal/core/subsume/structural.go | 2 +- .../go/internal/core/subsume/subsume.go | 5 +- .../go/internal/core/subsume/value.go | 30 +- .../go/internal/core/subsume/vertex.go | 219 +- .../go/internal/core/toposort/graph.go | 12 +- .../go/internal/core/toposort/scc.go | 14 +- .../go/internal/core/toposort/vertex.go | 70 +- .../cuelang.org/go/internal/core/walk/walk.go | 14 + .../go/internal/cueconfig/config.go | 18 +- .../go/internal/cueexperiment/exp.go | 99 +- .../go/internal/cueexperiment/file.go | 263 +- .../go/internal/cueexperiment/parse.go | 68 +- .../go/internal/cueversion/version.go | 41 +- .../go/internal/encoding/encoder.go | 75 +- .../go/internal/encoding/encoding.go | 42 +- .../go/internal/encoding/json/patch.go | 136 + .../go/internal/encoding/yaml/decode.go | 379 +- .../go/internal/encoding/yaml/encode.go | 37 +- .../cuelang.org/go/internal/envflag/flag.go | 12 +- .../go/internal/filetypes/fileinfo.dat | Bin 153648 -> 153648 bytes .../go/internal/filetypes/filetypes.go | 53 +- .../go/internal/filetypes/tagtype_string.go | 5 +- .../go/internal/filetypes/tofile.go | 2 +- .../go/internal/filetypes/tofile_bootstrap.go | 2 +- .../go/internal/filetypes/types.cue | 3 +- .../go/internal/filetypes/types.go | 2 +- .../go/internal/filetypes/types_gen.go | 157 +- .../go/internal/filetypes/types_gen.go.tmpl | 2 +- vendor/cuelang.org/go/internal/internal.go | 219 +- .../builtin.go => internal/iterutil/iter.go} | 22 +- .../go/internal/mod/modfiledata/modfile.go | 2 + .../go/internal/mod/modimports/modimports.go | 47 +- .../go/internal/mod/modpkgload/import.go | 11 +- .../go/internal/mod/modpkgload/pkgload.go | 35 + .../mod/modrequirements/requirements.go | 1 - .../go/internal/mod/modresolve/resolve.go | 4 +- .../go/internal/mod/semver/semver.go | 29 +- vendor/cuelang.org/go/internal/pkg/builtin.go | 22 +- vendor/cuelang.org/go/internal/pkg/context.go | 136 +- vendor/cuelang.org/go/internal/pkg/errors.go | 2 +- .../cuelang.org/go/internal/pkg/register.go | 9 +- vendor/cuelang.org/go/internal/pkg/types.go | 14 +- .../cuelang.org/go/internal/source/source.go | 73 +- vendor/cuelang.org/go/internal/task/task.go | 152 +- vendor/cuelang.org/go/internal/tools.mod | 10 +- vendor/cuelang.org/go/internal/tools.sum | 17 +- vendor/cuelang.org/go/internal/types/value.go | 10 +- vendor/cuelang.org/go/internal/value/value.go | 18 +- vendor/cuelang.org/go/mod/modfile/modfile.go | 2 +- .../cuelang.org/go/mod/modregistry/client.go | 73 +- vendor/cuelang.org/go/mod/module/dirfs.go | 15 +- vendor/cuelang.org/go/mod/module/module.go | 11 +- vendor/cuelang.org/go/mod/module/path.go | 18 +- vendor/cuelang.org/go/mod/module/versions.go | 3 - vendor/cuelang.org/go/mod/modzip/zip.go | 6 +- .../go/pkg/encoding/openapi/openapi.cue | 56 + .../go/pkg/encoding/openapi/openapi.go | 94 + .../go/pkg/encoding/openapi/pkg.go | 58 + .../go/pkg/encoding/yaml/manual.go | 4 +- vendor/cuelang.org/go/pkg/list/list.go | 9 +- vendor/cuelang.org/go/pkg/list/sort.go | 53 +- vendor/cuelang.org/go/pkg/math/bits/manual.go | 46 +- vendor/cuelang.org/go/pkg/net/ip.go | 146 +- vendor/cuelang.org/go/pkg/net/pkg.go | 64 + vendor/cuelang.org/go/pkg/net/url.go | 2 +- vendor/cuelang.org/go/pkg/path/os.go | 1 - vendor/cuelang.org/go/pkg/path/path.go | 2 +- vendor/cuelang.org/go/pkg/path/path_nix.go | 8 - vendor/cuelang.org/go/pkg/path/path_p9.go | 8 - vendor/cuelang.org/go/pkg/path/path_win.go | 11 - vendor/cuelang.org/go/pkg/regexp/manual.go | 8 +- vendor/cuelang.org/go/pkg/regexp/regexp.go | 1 - vendor/cuelang.org/go/pkg/register.go | 1 + vendor/cuelang.org/go/pkg/strconv/pkg.go | 12 + vendor/cuelang.org/go/pkg/strconv/strconv.go | 155 +- vendor/cuelang.org/go/pkg/strings/manual.go | 5 +- vendor/cuelang.org/go/pkg/strings/strings.go | 4 +- vendor/cuelang.org/go/pkg/tool/exec/exec.go | 2 +- vendor/cuelang.org/go/pkg/tool/file/file.cue | 18 + vendor/cuelang.org/go/pkg/tool/file/file.go | 15 + vendor/cuelang.org/go/pkg/tool/file/pkg.go | 24 + vendor/cuelang.org/go/pkg/tool/http/http.cue | 163 +- vendor/cuelang.org/go/pkg/tool/http/http.go | 21 +- vendor/cuelang.org/go/pkg/tool/http/pkg.go | 192 +- vendor/cuelang.org/go/pkg/tool/http/serve.go | 256 + vendor/cuelang.org/go/tools/flow/cycle.go | 116 + vendor/cuelang.org/go/tools/flow/flow.go | 528 + vendor/cuelang.org/go/tools/flow/run.go | 259 + .../cuelang.org/go/tools/flow/state_string.go | 27 + vendor/cuelang.org/go/tools/flow/tasks.go | 336 + vendor/cyphar.com/go-pathrs/.golangci.yml | 2 +- vendor/cyphar.com/go-pathrs/doc.go | 2 +- vendor/cyphar.com/go-pathrs/handle_linux.go | 10 +- .../go-pathrs/internal/fdutils/fd_linux.go | 2 +- .../internal/libpathrs/error_unix.go | 2 +- .../internal/libpathrs/libpathrs_linux.go | 6 +- .../go-pathrs/procfs/procfs_linux.go | 15 +- vendor/cyphar.com/go-pathrs/root_linux.go | 30 +- vendor/cyphar.com/go-pathrs/utils_linux.go | 2 +- .../pkg/credentials/provider/aliyuncli.go | 338 + .../provider/aliyuncli_configuration.go | 96 + .../credentials/provider/aliyuncli_profile.go | 149 + .../credentials/provider/chain_provider.go | 55 +- .../pkg/credentials/provider/credentials.go | 25 + .../provider/ecsmetadata_provider.go | 144 +- .../pkg/credentials/provider/env.go | 1 + .../pkg/credentials/provider/env_provider.go | 91 +- .../pkg/credentials/provider/file_cache.go | 143 + .../credentials/provider/function_provider.go | 53 +- .../pkg/credentials/provider/http.go | 29 + .../pkg/credentials/provider/ini.go | 134 + .../pkg/credentials/provider/ini_provider.go | 80 + .../pkg/credentials/provider/log.go | 15 +- .../pkg/credentials/provider/oidc_provider.go | 35 +- .../credentials/provider/remote_provider.go | 77 + .../pkg/credentials/provider/req.go | 18 + .../credentials/provider/rolearn_provider.go | 15 +- .../pkg/credentials/provider/updater.go | 34 +- .../pkg/credentials/provider/v1sdk.go | 4 + .../pkg/credentials/provider/v2sdk.go | 42 +- .../ack-ram-tool/pkg/ecsmetadata}/LICENSE | 0 .../ack-ram-tool/pkg/ecsmetadata/base.go | 33 + .../ack-ram-tool/pkg/ecsmetadata/client.go | 296 + .../ack-ram-tool/pkg/ecsmetadata/disk.go | 24 + .../ack-ram-tool/pkg/ecsmetadata/dynamic.go | 50 + .../ack-ram-tool/pkg/ecsmetadata/errors.go | 72 + .../ack-ram-tool/pkg/ecsmetadata/instance.go | 35 + .../pkg/ecsmetadata/interfaces.go | 117 + .../ack-ram-tool/pkg/ecsmetadata/net.go | 75 + .../ack-ram-tool/pkg/ecsmetadata/ram.go | 65 + .../ack-ram-tool/pkg/ecsmetadata/retry.go | 78 + .../ack-ram-tool/pkg/ecsmetadata/test.go | 24 + .../ack-ram-tool/pkg/ecsmetadata/userdata.go | 11 + .../ack-ram-tool/pkg/ecsmetadata/utils.go | 28 + .../Azure/go-autorest/autorest/adal/README.md | 2 +- .../go-autorest/autorest/adal/devicetoken.go | 10 +- .../go-autorest/autorest/adal/persist.go | 3 +- .../Azure/go-autorest/autorest/adal/token.go | 9 +- .../autorest/authorization_storage.go | 4 +- .../Azure/go-autorest/autorest/azure/async.go | 14 +- .../go-autorest/autorest/azure/auth/auth.go | 10 +- .../Azure/go-autorest/autorest/azure/azure.go | 4 +- .../go-autorest/autorest/azure/cli/profile.go | 3 +- .../go-autorest/autorest/azure/cli/token.go | 2 +- .../autorest/azure/environments.go | 3 +- .../autorest/azure/metadata_environment.go | 4 +- .../Azure/go-autorest/autorest/client.go | 9 +- .../autorest/date/go_mod_tidy_hack.go | 1 + .../Azure/go-autorest/autorest/preparer.go | 17 +- .../Azure/go-autorest/autorest/responder.go | 13 +- .../go-autorest/autorest/retriablerequest.go | 5 +- .../autorest/retriablerequest_1.7.go | 6 +- .../autorest/retriablerequest_1.8.go | 5 +- .../Azure/go-autorest/autorest/utility.go | 3 +- .../go-autorest/logger/go_mod_tidy_hack.go | 1 + .../Azure/go-autorest/logger/logger.go | 11 +- .../go-autorest/tracing/go_mod_tidy_hack.go | 1 + .../alibabacloud-go/debug/debug/assert.go | 12 - .../alibabacloud-go/debug/debug/debug.go | 22 + .../openapi-util/service/service.go | 164 +- .../alibabacloud-go/tea-utils/v2/LICENSE | 201 + .../tea-utils/v2/service/service.go | 694 + .../alibabacloud-go/tea/tea/json_parser.go | 4 +- .../github.com/alibabacloud-go/tea/tea/tea.go | 103 +- .../alibabacloud-go/tea/utils/logger.go | 8 +- .../aliyun/credentials-go/.gitignore | 4 + .../aliyun/credentials-go/.scrutinizer.yml | 13 + .../aliyun/credentials-go/CONTRIBUTING.md | 14 + .../aliyun/credentials-go/README-CN.md | 492 + .../aliyun/credentials-go/README.md | 503 + .../credentials/access_key_credential.go | 50 - .../credentials/bearer_token_credential.go | 5 +- .../credentials-go/credentials/credential.go | 380 +- .../credentials/credential_model.go | 15 + .../aliyun/credentials-go/credentials/doc.go | 2 + ...o => ecs_ram_role_credentials_provider.go} | 137 +- .../credentials/env_provider.go | 20 +- .../credentials/instance_provider.go | 7 +- .../credentials/internal/http/http.go | 151 + .../credentials/internal/utils/path.go | 18 + .../credentials/internal/utils/runtime.go | 36 + .../credentials/internal/utils/utils.go | 204 + .../credentials/oidc_credential.go | 195 - .../credentials-go/credentials/oidc_token | 1 - .../credentials/profile_provider.go | 57 +- .../credentials-go/credentials/provider.go | 17 +- .../credentials/provider_chain.go | 2 +- .../credentials/providers/cli_profile.go | 506 + .../credentials/providers/cloud_sso.go | 216 + .../credentials/providers/credentials.go | 22 + .../credentials/providers/default.go | 113 + .../credentials/providers/ecs_ram_role.go | 283 + .../credentials/providers/env.go | 55 + .../credentials/providers/external.go | 253 + .../credentials/providers/hook.go | 7 + .../credentials/providers/lock_unix.go | 17 + .../credentials/providers/lock_windows.go | 59 + .../credentials/providers/oauth.go | 290 + .../credentials/providers/oidc.go | 278 + .../credentials/providers/profile.go | 169 + .../credentials/providers/ram_role_arn.go | 375 + .../credentials/providers/static_ak.go | 67 + .../credentials/providers/static_sts.go | 83 + .../credentials/providers/uri.go | 152 + ...o => ram_role_arn_credentials_provider.go} | 93 +- .../credentials-go/credentials/request/doc.go | 3 + .../credentials/response/doc.go | 3 + ...o => rsa_key_pair_credentials_provider.go} | 47 +- .../credentials/sts_credential.go | 53 - .../credentials/uri_credential.go | 70 +- .../credentials-go/credentials/utils/doc.go | 3 + .../credentials/utils/runtime.go | 3 + .../credentials-go/credentials/utils/utils.go | 6 + .../github.com/aliyun/credentials-go/doc.go | 4 + .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/protocol/query/middleware.go | 4 +- .../aws/aws-sdk-go-v2/aws/retry/middleware.go | 24 +- .../aws/transport/http/client.go | 11 + .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 35 + .../aws/aws-sdk-go-v2/config/config.go | 6 +- .../config/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/config/provider.go | 2 +- .../aws/aws-sdk-go-v2/config/resolve.go | 10 +- .../config/resolve_credentials.go | 3 +- .../aws/aws-sdk-go-v2/config/shared_config.go | 3 +- .../aws-sdk-go-v2/credentials/CHANGELOG.md | 34 + .../credentials/go_module_metadata.go | 2 +- .../credentials/ssocreds/sso_cached_token.go | 3 +- .../stscreds/web_identity_provider.go | 4 +- .../feature/ec2/imds/CHANGELOG.md | 23 + .../feature/ec2/imds/api_client.go | 4 +- .../feature/ec2/imds/api_op_GetDynamicData.go | 4 +- .../feature/ec2/imds/api_op_GetIAMInfo.go | 4 +- .../api_op_GetInstanceIdentityDocument.go | 4 +- .../feature/ec2/imds/api_op_GetMetadata.go | 4 +- .../feature/ec2/imds/api_op_GetRegion.go | 2 +- .../feature/ec2/imds/api_op_GetToken.go | 4 +- .../feature/ec2/imds/api_op_GetUserData.go | 4 +- .../feature/ec2/imds/go_module_metadata.go | 2 +- .../feature/ec2/imds/request_middleware.go | 18 +- .../internal/configsources/CHANGELOG.md | 22 + .../internal/configsources/config.go | 6 +- .../internal/configsources/endpoints.go | 4 +- .../configsources/go_module_metadata.go | 2 +- .../endpoints/awsrulesfn/partitions.json | 2 +- .../internal/endpoints/v2/CHANGELOG.md | 22 + .../internal/endpoints/v2/endpoints.go | 6 +- .../endpoints/v2/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/ini/CHANGELOG.md | 9 + .../internal/ini/go_module_metadata.go | 2 +- .../internal/middleware/middleware.go | 42 - .../aws-sdk-go-v2/service/ecr/CHANGELOG.md | 164 + .../aws-sdk-go-v2/service/ecr/api_client.go | 222 +- .../ecr/api_op_BatchCheckLayerAvailability.go | 14 +- .../service/ecr/api_op_BatchDeleteImage.go | 14 +- .../service/ecr/api_op_BatchGetImage.go | 14 +- ...BatchGetRepositoryScanningConfiguration.go | 14 +- .../service/ecr/api_op_CompleteLayerUpload.go | 14 +- .../ecr/api_op_CreatePullThroughCacheRule.go | 14 +- .../service/ecr/api_op_CreateRepository.go | 22 +- ...api_op_CreateRepositoryCreationTemplate.go | 22 +- .../ecr/api_op_DeleteLifecyclePolicy.go | 14 +- .../ecr/api_op_DeletePullThroughCacheRule.go | 14 +- .../ecr/api_op_DeleteRegistryPolicy.go | 14 +- .../service/ecr/api_op_DeleteRepository.go | 14 +- ...api_op_DeleteRepositoryCreationTemplate.go | 14 +- .../ecr/api_op_DeleteRepositoryPolicy.go | 14 +- .../ecr/api_op_DeleteSigningConfiguration.go | 155 + ...pi_op_DeregisterPullTimeUpdateExclusion.go | 156 + .../api_op_DescribeImageReplicationStatus.go | 14 +- .../ecr/api_op_DescribeImageScanFindings.go | 14 +- .../ecr/api_op_DescribeImageSigningStatus.go | 180 + .../service/ecr/api_op_DescribeImages.go | 14 +- .../api_op_DescribePullThroughCacheRules.go | 14 +- .../service/ecr/api_op_DescribeRegistry.go | 14 +- .../ecr/api_op_DescribeRepositories.go | 14 +- ..._op_DescribeRepositoryCreationTemplates.go | 14 +- .../service/ecr/api_op_GetAccountSetting.go | 24 +- .../ecr/api_op_GetAuthorizationToken.go | 14 +- .../ecr/api_op_GetDownloadUrlForLayer.go | 14 +- .../service/ecr/api_op_GetLifecyclePolicy.go | 14 +- .../ecr/api_op_GetLifecyclePolicyPreview.go | 14 +- .../service/ecr/api_op_GetRegistryPolicy.go | 14 +- ...api_op_GetRegistryScanningConfiguration.go | 14 +- .../service/ecr/api_op_GetRepositoryPolicy.go | 14 +- .../ecr/api_op_GetSigningConfiguration.go | 153 + .../service/ecr/api_op_InitiateLayerUpload.go | 14 +- .../service/ecr/api_op_ListImageReferrers.go | 197 + .../service/ecr/api_op_ListImages.go | 14 +- .../api_op_ListPullTimeUpdateExclusions.go | 173 + .../service/ecr/api_op_ListTagsForResource.go | 14 +- .../service/ecr/api_op_PutAccountSetting.go | 24 +- .../service/ecr/api_op_PutImage.go | 18 +- .../api_op_PutImageScanningConfiguration.go | 14 +- .../ecr/api_op_PutImageTagMutability.go | 22 +- .../service/ecr/api_op_PutLifecyclePolicy.go | 14 +- .../service/ecr/api_op_PutRegistryPolicy.go | 14 +- ...api_op_PutRegistryScanningConfiguration.go | 14 +- .../ecr/api_op_PutReplicationConfiguration.go | 14 +- .../ecr/api_op_PutSigningConfiguration.go | 163 + .../api_op_RegisterPullTimeUpdateExclusion.go | 161 + .../service/ecr/api_op_SetRepositoryPolicy.go | 14 +- .../service/ecr/api_op_StartImageScan.go | 14 +- .../ecr/api_op_StartLifecyclePolicyPreview.go | 14 +- .../service/ecr/api_op_TagResource.go | 14 +- .../service/ecr/api_op_UntagResource.go | 14 +- .../ecr/api_op_UpdateImageStorageClass.go | 181 + .../ecr/api_op_UpdatePullThroughCacheRule.go | 14 +- ...api_op_UpdateRepositoryCreationTemplate.go | 22 +- .../service/ecr/api_op_UploadLayerPart.go | 14 +- .../api_op_ValidatePullThroughCacheRule.go | 14 +- .../aws/aws-sdk-go-v2/service/ecr/auth.go | 44 +- .../service/ecr/deserializers.go | 5040 ++++- .../aws-sdk-go-v2/service/ecr/endpoints.go | 995 +- .../aws-sdk-go-v2/service/ecr/generated.json | 12 +- .../service/ecr/go_module_metadata.go | 2 +- .../ecr/internal/endpoints/endpoints.go | 25 + .../aws/aws-sdk-go-v2/service/ecr/options.go | 10 +- .../aws-sdk-go-v2/service/ecr/serializers.go | 1034 +- .../aws-sdk-go-v2/service/ecr/types/enums.go | 222 +- .../aws-sdk-go-v2/service/ecr/types/errors.go | 164 + .../aws-sdk-go-v2/service/ecr/types/types.go | 224 +- .../aws-sdk-go-v2/service/ecr/validators.go | 415 + .../service/ecrpublic/CHANGELOG.md | 125 + .../service/ecrpublic/api_client.go | 222 +- .../api_op_BatchCheckLayerAvailability.go | 14 +- .../ecrpublic/api_op_BatchDeleteImage.go | 14 +- .../ecrpublic/api_op_CompleteLayerUpload.go | 14 +- .../ecrpublic/api_op_CreateRepository.go | 14 +- .../ecrpublic/api_op_DeleteRepository.go | 14 +- .../api_op_DeleteRepositoryPolicy.go | 14 +- .../ecrpublic/api_op_DescribeImageTags.go | 14 +- .../ecrpublic/api_op_DescribeImages.go | 14 +- .../ecrpublic/api_op_DescribeRegistries.go | 14 +- .../ecrpublic/api_op_DescribeRepositories.go | 14 +- .../ecrpublic/api_op_GetAuthorizationToken.go | 14 +- .../api_op_GetRegistryCatalogData.go | 14 +- .../api_op_GetRepositoryCatalogData.go | 14 +- .../ecrpublic/api_op_GetRepositoryPolicy.go | 14 +- .../ecrpublic/api_op_InitiateLayerUpload.go | 14 +- .../ecrpublic/api_op_ListTagsForResource.go | 14 +- .../service/ecrpublic/api_op_PutImage.go | 14 +- .../api_op_PutRegistryCatalogData.go | 14 +- .../api_op_PutRepositoryCatalogData.go | 14 +- .../ecrpublic/api_op_SetRepositoryPolicy.go | 14 +- .../service/ecrpublic/api_op_TagResource.go | 14 +- .../service/ecrpublic/api_op_UntagResource.go | 14 +- .../ecrpublic/api_op_UploadLayerPart.go | 14 +- .../aws-sdk-go-v2/service/ecrpublic/auth.go | 44 +- .../service/ecrpublic/deserializers.go | 9 - .../service/ecrpublic/endpoints.go | 29 +- .../service/ecrpublic/generated.json | 3 +- .../service/ecrpublic/go_module_metadata.go | 2 +- .../ecrpublic/internal/endpoints/endpoints.go | 14 + .../service/ecrpublic/options.go | 10 +- .../internal/accept-encoding/CHANGELOG.md | 12 + .../accept-encoding/go_module_metadata.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 22 + .../presigned-url/go_module_metadata.go | 2 +- .../internal/presigned-url/middleware.go | 12 +- .../aws-sdk-go-v2/service/signin/CHANGELOG.md | 22 + .../service/signin/api_client.go | 24 +- .../signin/api_op_CreateOAuth2Token.go | 5 +- .../service/signin/generated.json | 3 +- .../service/signin/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/signin/options.go | 3 +- .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 30 + .../aws-sdk-go-v2/service/sso/api_client.go | 24 +- .../service/sso/api_op_GetRoleCredentials.go | 5 +- .../service/sso/api_op_ListAccountRoles.go | 5 +- .../service/sso/api_op_ListAccounts.go | 5 +- .../service/sso/api_op_Logout.go | 5 +- .../aws-sdk-go-v2/service/sso/generated.json | 3 +- .../service/sso/go_module_metadata.go | 2 +- .../sso/internal/endpoints/endpoints.go | 6 + .../aws/aws-sdk-go-v2/service/sso/options.go | 3 +- .../service/ssooidc/CHANGELOG.md | 26 + .../service/ssooidc/api_client.go | 24 +- .../service/ssooidc/api_op_CreateToken.go | 5 +- .../ssooidc/api_op_CreateTokenWithIAM.go | 5 +- .../service/ssooidc/api_op_RegisterClient.go | 5 +- .../api_op_StartDeviceAuthorization.go | 5 +- .../service/ssooidc/generated.json | 3 +- .../service/ssooidc/go_module_metadata.go | 2 +- .../ssooidc/internal/endpoints/endpoints.go | 3 + .../aws-sdk-go-v2/service/ssooidc/options.go | 3 +- .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 22 + .../aws-sdk-go-v2/service/sts/api_client.go | 24 +- .../service/sts/api_op_AssumeRole.go | 5 +- .../service/sts/api_op_AssumeRoleWithSAML.go | 5 +- .../sts/api_op_AssumeRoleWithWebIdentity.go | 5 +- .../service/sts/api_op_AssumeRoot.go | 5 +- .../sts/api_op_DecodeAuthorizationMessage.go | 5 +- .../service/sts/api_op_GetAccessKeyInfo.go | 5 +- .../service/sts/api_op_GetCallerIdentity.go | 5 +- .../sts/api_op_GetDelegatedAccessToken.go | 5 +- .../service/sts/api_op_GetFederationToken.go | 5 +- .../service/sts/api_op_GetSessionToken.go | 5 +- .../service/sts/api_op_GetWebIdentityToken.go | 5 +- .../aws-sdk-go-v2/service/sts/generated.json | 3 +- .../service/sts/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/sts/options.go | 3 +- vendor/github.com/aws/smithy-go/CHANGELOG.md | 14 + vendor/github.com/aws/smithy-go/README.md | 4 +- .../aws/smithy-go/go_module_metadata.go | 2 +- .../middleware/eventstream_middleware.go | 21 + .../ecr-login/api/client.go | 95 +- .../ecr-login/api/factory.go | 4 +- .../ecr-login/cache/build.go | 54 +- .../ecr-login/cache/file.go | 65 +- .../ecr-login/config/log.go | 3 + .../ecr-login/config/url_redactor.go | 102 + .../ecr-login/ecr.go | 2 + .../buildkite/agent/v3/api/BUILD.bazel | 6 + .../buildkite/agent/v3/api/annotations.go | 12 +- .../buildkite/agent/v3/api/client.go | 7 +- .../agent/v3/api/github_code_access_token.go | 1 - .../github.com/buildkite/agent/v3/api/jobs.go | 62 +- .../github.com/buildkite/agent/v3/api/oidc.go | 3 + .../buildkite/agent/v3/api/pings_streaming.go | 72 + .../agent/v3/api/proto/gen/BUILD.bazel | 13 + .../agent/v3/api/proto/gen/agentedge.pb.go | 488 + .../proto/gen/agentedgev1connect/BUILD.bazel | 12 + .../agentedgev1connect/agentedge.connect.go | 109 + .../buildkite/agent/v3/api/retryable.go | 15 +- .../buildkite/agent/v3/api/token.go | 35 + .../agent/v3/internal/agenthttp/client.go | 12 +- .../buildkite/agent/v3/logger/buffer.go | 6 + .../buildkite/agent/v3/logger/init_windows.go | 1 - .../buildkite/agent/v3/logger/log.go | 12 +- .../buildkite/agent/v3/version/VERSION | 2 +- .../buildkite/go-pipeline/pipeline.go | 5 +- .../buildkite/go-pipeline/secret.go | 43 + .../buildkite/go-pipeline/secrets.go | 141 + .../buildkite/go-pipeline/step_command.go | 12 +- .../cert-manager/cert-manager/LICENSES | 8 +- .../cert-manager/pkg/apis/acme/v1/types.go | 18 + .../pkg/apis/acme/v1/types_challenge.go | 3 + .../pkg/apis/acme/v1/types_issuer.go | 37 +- .../pkg/apis/acme/v1/types_order.go | 3 + .../pkg/apis/certmanager/v1/generic_issuer.go | 4 + .../pkg/apis/certmanager/v1/types.go | 11 +- .../apis/certmanager/v1/types_certificate.go | 18 +- .../v1/types_certificaterequest.go | 3 + .../pkg/apis/certmanager/v1/types_issuer.go | 48 +- .../cert-manager/pkg/apis/meta/v1/types.go | 1 + .../clipperhouse/displaywidth/.gitignore | 2 + .../clipperhouse/displaywidth/AGENTS.md | 24 +- .../clipperhouse/displaywidth/CHANGELOG.md | 71 +- .../clipperhouse/displaywidth/README.md | 137 +- .../clipperhouse/displaywidth/graphemes.go | 23 +- .../clipperhouse/displaywidth/options.go | 30 + .../clipperhouse/displaywidth/tables.go | 91 - .../clipperhouse/displaywidth/trie.go | 537 +- .../clipperhouse/displaywidth/truncate.go | 149 + .../clipperhouse/displaywidth/width.go | 254 +- .../clipperhouse/stringish/.gitignore | 2 - .../github.com/clipperhouse/stringish/LICENSE | 21 - .../clipperhouse/stringish/README.md | 64 - .../clipperhouse/stringish/interface.go | 5 - .../clipperhouse/uax29/v2/graphemes/README.md | 74 +- .../clipperhouse/uax29/v2/graphemes/ansi.go | 138 + .../clipperhouse/uax29/v2/graphemes/ansi8.go | 79 + .../uax29/v2/graphemes/iterator.go | 145 +- .../uax29/v2/graphemes/splitfunc.go | 47 +- .../clipperhouse/uax29/v2/graphemes/trie.go | 2338 +- .../uax29/v2/internal/iterators/iterator.go | 100 - .../github.com/cockroachdb/apd/v3/bigint.go | 6 +- .../github.com/cockroachdb/apd/v3/decimal.go | 5 + .../cockroachdb/apd/v3/decomposer.go | 30 +- vendor/github.com/cockroachdb/apd/v3/table.go | 3 +- .../stargz-snapshotter/estargz/estargz.go | 2 +- .../coreos/go-systemd/v22/dbus/dbus.go | 4 +- .../coreos/go-systemd/v22/dbus/methods.go | 231 +- .../coreos/go-systemd/v22/dbus/set.go | 17 +- .../go-systemd/v22/dbus/subscription.go | 32 +- .../go-systemd/v22/dbus/subscription_set.go | 12 +- .../dcrec/secp256k1/v4/ellipticadaptor.go | 47 +- vendor/github.com/digitorus/pkcs7/.gitignore | 3 + .../github.com/digitorus/pkcs7/.golangci.yml | 66 + vendor/github.com/digitorus/pkcs7/Makefile | 17 +- vendor/github.com/digitorus/pkcs7/ber.go | 38 +- vendor/github.com/digitorus/pkcs7/encrypt.go | 7 +- vendor/github.com/digitorus/pkcs7/sign.go | 10 +- vendor/github.com/digitorus/pkcs7/verify.go | 29 +- .../digitorus/pkcs7/verify_test_dsa.go | 182 - .../docker/cli/cli/config/configfile/file.go | 8 +- vendor/github.com/emicklei/proto/CHANGES.md | 5 + vendor/github.com/emicklei/proto/literals.go | 35 +- vendor/github.com/emicklei/proto/parser.go | 6 + vendor/github.com/fatih/color/color.go | 112 +- .../github.com/fatih/color/color_windows.go | 3 + .../fxamacker/cbor/v2/.golangci.yml | 176 +- vendor/github.com/fxamacker/cbor/v2/README.md | 11 +- vendor/github.com/fxamacker/cbor/v2/cache.go | 257 +- vendor/github.com/fxamacker/cbor/v2/decode.go | 440 +- .../fxamacker/cbor/v2/decode_map_utils.go | 98 + .../github.com/fxamacker/cbor/v2/diagnose.go | 25 +- vendor/github.com/fxamacker/cbor/v2/encode.go | 55 +- .../fxamacker/cbor/v2/simplevalue.go | 2 +- vendor/github.com/fxamacker/cbor/v2/stream.go | 48 +- .../fxamacker/cbor/v2/structfields.go | 65 +- vendor/github.com/fxamacker/cbor/v2/valid.go | 20 +- .../go-chi/chi/v5/middleware/route_headers.go | 1 + vendor/github.com/go-ini/ini/.editorconfig | 12 - vendor/github.com/go-ini/ini/.gitignore | 7 - vendor/github.com/go-ini/ini/.golangci.yml | 27 - vendor/github.com/go-ini/ini/LICENSE | 191 - vendor/github.com/go-ini/ini/Makefile | 15 - vendor/github.com/go-ini/ini/README.md | 43 - vendor/github.com/go-ini/ini/codecov.yml | 16 - vendor/github.com/go-ini/ini/data_source.go | 76 - vendor/github.com/go-ini/ini/deprecated.go | 22 - vendor/github.com/go-ini/ini/error.go | 49 - vendor/github.com/go-ini/ini/file.go | 541 - vendor/github.com/go-ini/ini/helper.go | 24 - vendor/github.com/go-ini/ini/ini.go | 176 - vendor/github.com/go-ini/ini/key.go | 837 - vendor/github.com/go-ini/ini/parser.go | 520 - vendor/github.com/go-ini/ini/section.go | 256 - vendor/github.com/go-ini/ini/struct.go | 747 - .../go-openapi/analysis/.codecov.yml | 4 + .../go-openapi/analysis/.editorconfig | 26 + .../github.com/go-openapi/analysis/.gitignore | 6 +- .../go-openapi/analysis/.golangci.yml | 34 +- .../go-openapi/analysis/CODE_OF_CONDUCT.md | 6 +- .../go-openapi/analysis/CONTRIBUTORS.md | 27 + .../github.com/go-openapi/analysis/README.md | 116 +- .../go-openapi/analysis/SECURITY.md | 37 + .../go-openapi/analysis/analyzer.go | 76 +- .../github.com/go-openapi/analysis/debug.go | 2 +- vendor/github.com/go-openapi/analysis/doc.go | 55 +- .../github.com/go-openapi/analysis/flatten.go | 57 +- .../go-openapi/analysis/flatten_name.go | 8 +- .../go-openapi/analysis/flatten_options.go | 6 +- vendor/github.com/go-openapi/analysis/go.work | 6 + .../go-openapi/analysis/go.work.sum | 47 + .../analysis/internal/debug/debug.go | 6 +- .../internal/flatten/normalize/normalize.go | 10 +- .../internal/flatten/operations/operations.go | 12 +- .../internal/flatten/replace/errors.go | 2 +- .../internal/flatten/replace/replace.go | 43 +- .../flatten/schutils/flatten_schema.go | 4 +- .../analysis/internal/flatten/sortref/keys.go | 44 +- .../internal/flatten/sortref/sort_ref.go | 22 +- .../github.com/go-openapi/analysis/mixin.go | 5 +- .../github.com/go-openapi/analysis/schema.go | 6 +- .../github.com/go-openapi/errors/.cliff.toml | 181 - .../github.com/go-openapi/errors/.gitignore | 6 +- .../go-openapi/errors/.golangci.yml | 5 + .../go-openapi/errors/CODE_OF_CONDUCT.md | 6 +- .../go-openapi/errors/CONTRIBUTORS.md | 27 +- vendor/github.com/go-openapi/errors/README.md | 13 +- .../github.com/go-openapi/errors/SECURITY.md | 28 +- vendor/github.com/go-openapi/errors/api.go | 2 +- vendor/github.com/go-openapi/errors/doc.go | 20 +- .../go-openapi/jsonpointer/.gitignore | 2 + .../go-openapi/jsonpointer/.golangci.yml | 1 + .../go-openapi/jsonpointer/CODE_OF_CONDUCT.md | 6 +- .../go-openapi/jsonpointer/CONTRIBUTORS.md | 26 +- .../go-openapi/jsonpointer/README.md | 19 +- .../go-openapi/jsonpointer/SECURITY.md | 28 +- .../go-openapi/jsonreference/.cliff.toml | 181 - .../go-openapi/jsonreference/.gitignore | 7 +- .../go-openapi/jsonreference/.golangci.yml | 1 + .../jsonreference/CODE_OF_CONDUCT.md | 6 +- .../go-openapi/jsonreference/CONTRIBUTORS.md | 4 +- .../go-openapi/jsonreference/NOTICE | 4 +- .../go-openapi/jsonreference/README.md | 36 +- .../go-openapi/jsonreference/SECURITY.md | 28 +- .../go-openapi/jsonreference/reference.go | 1 + vendor/github.com/go-openapi/loads/.gitignore | 9 +- .../github.com/go-openapi/loads/.golangci.yml | 28 +- .../github.com/go-openapi/loads/.travis.yml | 25 - .../go-openapi/loads/CODE_OF_CONDUCT.md | 6 +- .../go-openapi/loads/CONTRIBUTORS.md | 26 + vendor/github.com/go-openapi/loads/README.md | 96 +- .../github.com/go-openapi/loads/SECURITY.md | 37 + vendor/github.com/go-openapi/loads/doc.go | 2 + vendor/github.com/go-openapi/loads/errors.go | 4 +- vendor/github.com/go-openapi/loads/loaders.go | 37 +- vendor/github.com/go-openapi/loads/options.go | 6 +- vendor/github.com/go-openapi/loads/spec.go | 36 +- .../github.com/go-openapi/runtime/.gitignore | 8 +- .../go-openapi/runtime/CODE_OF_CONDUCT.md | 6 +- vendor/github.com/go-openapi/runtime/NOTICE | 45 + .../github.com/go-openapi/runtime/README.md | 110 +- .../github.com/go-openapi/runtime/SECURITY.md | 37 + .../go-openapi/runtime/bytestream.go | 22 +- .../go-openapi/runtime/client/auth_info.go | 8 +- .../go-openapi/runtime/client/keepalive.go | 2 +- .../runtime/client/opentelemetry.go | 6 +- .../go-openapi/runtime/client/request.go | 23 +- .../go-openapi/runtime/client/runtime.go | 44 +- .../go-openapi/runtime/client_auth_info.go | 6 +- .../go-openapi/runtime/client_operation.go | 4 +- .../go-openapi/runtime/client_request.go | 10 +- .../go-openapi/runtime/client_response.go | 26 +- .../go-openapi/runtime/constants.go | 26 +- vendor/github.com/go-openapi/runtime/csv.go | 32 +- .../go-openapi/runtime/csv_options.go | 6 +- vendor/github.com/go-openapi/runtime/doc.go | 6 + .../github.com/go-openapi/runtime/go.work.sum | 16 + .../github.com/go-openapi/runtime/headers.go | 2 +- .../go-openapi/runtime/interfaces.go | 34 +- vendor/github.com/go-openapi/runtime/json.go | 4 +- .../go-openapi/runtime/middleware/context.go | 76 +- .../runtime/middleware/denco/README.md | 2 +- .../runtime/middleware/denco/router.go | 12 +- .../runtime/middleware/denco/server.go | 16 +- .../runtime/middleware/denco/util.go | 2 + .../go-openapi/runtime/middleware/doc.go | 94 +- .../runtime/middleware/not_implemented.go | 4 +- .../runtime/middleware/operation.go | 2 +- .../runtime/middleware/parameter.go | 2 +- .../go-openapi/runtime/middleware/rapidoc.go | 6 +- .../go-openapi/runtime/middleware/redoc.go | 8 +- .../go-openapi/runtime/middleware/request.go | 4 +- .../go-openapi/runtime/middleware/router.go | 48 +- .../go-openapi/runtime/middleware/spec.go | 12 +- .../runtime/middleware/swaggerui.go | 8 +- .../runtime/middleware/ui_options.go | 16 +- .../runtime/middleware/untyped/api.go | 34 +- .../runtime/middleware/validation.go | 2 +- .../github.com/go-openapi/runtime/request.go | 18 +- .../runtime/security/authenticator.go | 36 +- .../go-openapi/runtime/security/authorizer.go | 4 +- .../github.com/go-openapi/runtime/statuses.go | 2 +- vendor/github.com/go-openapi/runtime/text.go | 4 +- .../github.com/go-openapi/runtime/values.go | 2 +- vendor/github.com/go-openapi/runtime/xml.go | 4 +- .../go-openapi/runtime/yamlpc/yaml.go | 4 +- vendor/github.com/go-openapi/spec/.cliff.toml | 181 - vendor/github.com/go-openapi/spec/.gitignore | 5 + .../github.com/go-openapi/spec/.golangci.yml | 1 + .../go-openapi/spec/CODE_OF_CONDUCT.md | 6 +- .../go-openapi/spec/CONTRIBUTORS.md | 78 +- vendor/github.com/go-openapi/spec/README.md | 12 +- vendor/github.com/go-openapi/spec/SECURITY.md | 28 +- vendor/github.com/go-openapi/spec/cache.go | 18 +- .../go-openapi/spec/contact_info.go | 6 +- vendor/github.com/go-openapi/spec/debug.go | 10 +- vendor/github.com/go-openapi/spec/errors.go | 12 +- vendor/github.com/go-openapi/spec/expander.go | 11 +- vendor/github.com/go-openapi/spec/header.go | 42 +- vendor/github.com/go-openapi/spec/info.go | 26 +- vendor/github.com/go-openapi/spec/items.go | 44 +- vendor/github.com/go-openapi/spec/license.go | 6 +- .../github.com/go-openapi/spec/normalizer.go | 2 +- .../github.com/go-openapi/spec/operation.go | 42 +- .../github.com/go-openapi/spec/parameter.go | 84 +- .../github.com/go-openapi/spec/path_item.go | 8 +- vendor/github.com/go-openapi/spec/paths.go | 6 +- .../github.com/go-openapi/spec/properties.go | 6 +- vendor/github.com/go-openapi/spec/ref.go | 24 +- vendor/github.com/go-openapi/spec/resolver.go | 18 +- vendor/github.com/go-openapi/spec/response.go | 22 +- .../github.com/go-openapi/spec/responses.go | 10 +- vendor/github.com/go-openapi/spec/schema.go | 148 +- .../go-openapi/spec/schema_loader.go | 23 +- .../go-openapi/spec/security_scheme.go | 22 +- vendor/github.com/go-openapi/spec/spec.go | 13 +- vendor/github.com/go-openapi/spec/swagger.go | 60 +- vendor/github.com/go-openapi/spec/tag.go | 10 +- .../github.com/go-openapi/spec/validations.go | 16 +- .../github.com/go-openapi/spec/xml_object.go | 14 +- .../github.com/go-openapi/strfmt/.codecov.yml | 9 + .../github.com/go-openapi/strfmt/.gitignore | 8 +- .../go-openapi/strfmt/.golangci.yml | 29 +- .../go-openapi/strfmt/CODE_OF_CONDUCT.md | 6 +- .../go-openapi/strfmt/CONTRIBUTORS.md | 52 + vendor/github.com/go-openapi/strfmt/README.md | 162 +- .../github.com/go-openapi/strfmt/SECURITY.md | 37 + vendor/github.com/go-openapi/strfmt/bson.go | 88 +- vendor/github.com/go-openapi/strfmt/date.go | 27 +- .../github.com/go-openapi/strfmt/default.go | 434 +- vendor/github.com/go-openapi/strfmt/doc.go | 4 +- .../github.com/go-openapi/strfmt/duration.go | 24 +- vendor/github.com/go-openapi/strfmt/errors.go | 2 +- vendor/github.com/go-openapi/strfmt/format.go | 174 +- vendor/github.com/go-openapi/strfmt/go.work | 7 + .../github.com/go-openapi/strfmt/go.work.sum | 16 + vendor/github.com/go-openapi/strfmt/ifaces.go | 14 +- .../strfmt/internal/bsonlite/codec.go | 71 + .../strfmt/internal/bsonlite/lite.go | 213 + vendor/github.com/go-openapi/strfmt/mongo.go | 701 +- vendor/github.com/go-openapi/strfmt/time.go | 82 +- vendor/github.com/go-openapi/strfmt/ulid.go | 56 +- vendor/github.com/go-openapi/swag/.gitignore | 2 + .../go-openapi/swag/CODE_OF_CONDUCT.md | 6 +- .../go-openapi/swag/CONTRIBUTORS.md | 36 + vendor/github.com/go-openapi/swag/README.md | 285 +- .../go-openapi/swag/jsonutils/README.md | 11 +- .../go-openapi/swag/mangling/BENCHMARK.md | 4 +- .../github.com/go-openapi/validate/.gitignore | 8 +- .../go-openapi/validate/.golangci.yml | 29 +- .../go-openapi/validate/BENCHMARK.md | 5 +- .../go-openapi/validate/CODE_OF_CONDUCT.md | 6 +- .../go-openapi/validate/CONTRIBUTORS.md | 43 + .../github.com/go-openapi/validate/README.md | 116 +- .../go-openapi/validate/SECURITY.md | 37 + .../github.com/go-openapi/validate/context.go | 8 +- .../github.com/go-openapi/validate/debug.go | 2 +- .../go-openapi/validate/default_validator.go | 13 +- vendor/github.com/go-openapi/validate/doc.go | 144 +- .../go-openapi/validate/example_validator.go | 15 +- .../github.com/go-openapi/validate/formats.go | 7 +- .../github.com/go-openapi/validate/helpers.go | 30 +- .../go-openapi/validate/object_validator.go | 13 +- .../github.com/go-openapi/validate/options.go | 2 +- .../github.com/go-openapi/validate/pools.go | 30 +- .../github.com/go-openapi/validate/result.go | 14 +- vendor/github.com/go-openapi/validate/rexp.go | 2 +- .../github.com/go-openapi/validate/schema.go | 14 +- .../go-openapi/validate/schema_messages.go | 27 +- .../go-openapi/validate/schema_option.go | 14 +- .../go-openapi/validate/schema_props.go | 5 +- .../go-openapi/validate/slice_validator.go | 3 +- vendor/github.com/go-openapi/validate/spec.go | 33 +- .../go-openapi/validate/spec_messages.go | 117 +- vendor/github.com/go-openapi/validate/type.go | 19 +- .../go-openapi/validate/update-fixtures.sh | 4 +- .../go-openapi/validate/validator.go | 35 +- .../github.com/go-openapi/validate/values.go | 67 +- vendor/github.com/go-piv/piv-go/v2/piv/key.go | 58 +- vendor/github.com/go-piv/piv-go/v2/piv/piv.go | 19 + .../go-viper/mapstructure/v2/.editorconfig | 3 + .../go-viper/mapstructure/v2/.envrc | 11 +- .../go-viper/mapstructure/v2/.gitignore | 10 +- .../go-viper/mapstructure/v2/devenv.lock | 103 + .../go-viper/mapstructure/v2/devenv.nix | 14 + .../go-viper/mapstructure/v2/devenv.yaml | 4 + .../go-viper/mapstructure/v2/flake.lock | 294 - .../go-viper/mapstructure/v2/flake.nix | 46 - .../go-viper/mapstructure/v2/mapstructure.go | 332 +- .../goccy/go-json/internal/decoder/compile.go | 18 +- .../internal/decoder/compile_norace.go | 1 + .../go-json/internal/decoder/compile_race.go | 1 + .../goccy/go-json/internal/encoder/code.go | 1 + .../go-json/internal/encoder/compiler.go | 16 +- .../internal/encoder/compiler_norace.go | 1 + .../go-json/internal/encoder/compiler_race.go | 1 + .../goccy/go-json/internal/encoder/encoder.go | 5 + .../goccy/go-json/internal/runtime/type.go | 108 +- vendor/github.com/godbus/dbus/v5/.cirrus.yml | 11 + .../github.com/godbus/dbus/v5/.golangci.yml | 13 + vendor/github.com/godbus/dbus/v5/README.md | 5 +- vendor/github.com/godbus/dbus/v5/SECURITY.md | 13 + vendor/github.com/godbus/dbus/v5/auth.go | 48 +- .../godbus/dbus/v5/auth_default_other.go | 7 + .../godbus/dbus/v5/auth_default_windows.go | 5 + .../v5/{auth_sha1.go => auth_sha1_windows.go} | 7 + vendor/github.com/godbus/dbus/v5/call.go | 9 +- vendor/github.com/godbus/dbus/v5/conn.go | 91 +- .../github.com/godbus/dbus/v5/conn_darwin.go | 1 - .../github.com/godbus/dbus/v5/conn_other.go | 23 +- vendor/github.com/godbus/dbus/v5/conn_unix.go | 25 +- .../github.com/godbus/dbus/v5/conn_windows.go | 2 - vendor/github.com/godbus/dbus/v5/dbus.go | 29 +- vendor/github.com/godbus/dbus/v5/decoder.go | 202 +- .../godbus/dbus/v5/default_handler.go | 32 +- vendor/github.com/godbus/dbus/v5/doc.go | 43 +- vendor/github.com/godbus/dbus/v5/encoder.go | 4 +- vendor/github.com/godbus/dbus/v5/export.go | 93 +- vendor/github.com/godbus/dbus/v5/homedir.go | 25 - vendor/github.com/godbus/dbus/v5/match.go | 8 +- vendor/github.com/godbus/dbus/v5/message.go | 21 +- vendor/github.com/godbus/dbus/v5/object.go | 39 +- .../godbus/dbus/v5/sequential_handler.go | 2 +- .../godbus/dbus/v5/server_interfaces.go | 12 +- vendor/github.com/godbus/dbus/v5/sig.go | 17 +- .../godbus/dbus/v5/transport_nonce_tcp.go | 8 +- .../godbus/dbus/v5/transport_unix.go | 147 +- .../dbus/v5/transport_unixcred_freebsd.go | 38 +- .../dbus/v5/transport_unixcred_linux.go | 2 +- vendor/github.com/godbus/dbus/v5/variant.go | 31 +- .../godbus/dbus/v5/variant_lexer.go | 2 +- .../godbus/dbus/v5/variant_parser.go | 46 +- vendor/github.com/golang/snappy/README | 7 +- .../github.com/golang/snappy/encode_arm64.s | 4 +- .../github.com/google/cel-go/cel/BUILD.bazel | 7 +- .../github.com/google/cel-go/cel/library.go | 1 - .../github.com/google/cel-go/cel/optimizer.go | 54 +- .../google/cel-go/checker/checker.go | 59 +- .../github.com/google/cel-go/checker/env.go | 112 +- .../google/cel-go/checker/scopes.go | 25 + .../google/cel-go/common/ast/ast.go | 55 + .../google/cel-go/common/debug/debug.go | 15 + .../google/cel-go/common/env/BUILD.bazel | 2 +- .../google/cel-go/common/env/env.go | 2 +- .../google/cel-go/common/types/BUILD.bazel | 1 - .../google/cel-go/common/types/bool.go | 2 +- .../google/cel-go/common/types/bytes.go | 2 +- .../google/cel-go/common/types/double.go | 2 +- .../google/cel-go/common/types/duration.go | 2 +- .../google/cel-go/common/types/int.go | 2 +- .../google/cel-go/common/types/json_value.go | 9 +- .../google/cel-go/common/types/list.go | 9 +- .../google/cel-go/common/types/map.go | 48 +- .../google/cel-go/common/types/null.go | 8 +- .../google/cel-go/common/types/object.go | 2 +- .../google/cel-go/common/types/string.go | 2 +- .../google/cel-go/common/types/timestamp.go | 2 +- .../google/cel-go/common/types/uint.go | 2 +- vendor/github.com/google/cel-go/ext/README.md | 78 +- .../github.com/google/cel-go/ext/bindings.go | 10 + .../google/cel-go/ext/comprehensions.go | 8 +- .../cel-go/ext/extension_option_factory.go | 37 +- .../google/cel-go/ext/formatting_v2.go | 8 +- vendor/github.com/google/cel-go/ext/native.go | 2 +- vendor/github.com/google/cel-go/ext/regex.go | 150 +- .../cel-go/interpreter/attribute_patterns.go | 12 +- .../google/cel-go/interpreter/attributes.go | 53 +- .../cel-go/interpreter/interpretable.go | 5 + .../google/cel-go/interpreter/interpreter.go | 16 + .../google/cel-go/interpreter/planner.go | 104 +- .../github.com/google/cel-go/parser/helper.go | 2 +- .../certificate-transparency-go/CHANGELOG.md | 9 + .../certificate-transparency-go/README.md | 45 +- .../client/configpb/multilog.pb.go | 114 +- .../loglist3/logfilter.go | 77 +- .../loglist3/loglist3.go | 137 +- .../loglist3/logstatus_string.go | 5 +- .../certificate-transparency-go/types.go | 3 +- .../gnostic-models/extensions/extension.proto | 2 +- .../gnostic-models/openapiv2/OpenAPIv2.proto | 2 +- .../gnostic-models/openapiv3/OpenAPIv3.proto | 2 +- .../openapiv3/annotations.proto | 2 +- .../pkg/authn/keychain.go | 8 +- .../go-containerregistry/pkg/v1/hash.go | 28 +- .../pkg/v1/layout/layoutpath.go | 2 +- .../pkg/v1/mutate/mutate.go | 25 +- .../pkg/v1/remote/transport/ping.go | 6 +- .../pkg/v1/remote/write.go | 2 +- .../pkg/v1/tarball/image.go | 11 +- .../pkg/v1/zz_deepcopy_generated.go | 1 - .../google/go-querystring/query/encode.go | 73 +- .../gax-go/v2/.release-please-manifest.json | 3 + .../googleapis/gax-go/v2/CHANGES.md | 222 + .../googleapis/gax-go/v2/apierror/apierror.go | 413 + .../v2/apierror/internal/proto/README.md | 30 + .../internal/proto/custom_error.pb.go | 226 + .../internal/proto/custom_error.proto | 50 + .../v2/apierror/internal/proto/error.pb.go | 232 + .../v2/apierror/internal/proto/error.proto | 46 + .../googleapis/gax-go/v2/call_option.go | 288 + .../googleapis/gax-go/v2/callctx/callctx.go | 146 + .../googleapis/gax-go/v2/content_type.go | 112 + .../googleapis/gax-go/v2/feature.go | 75 + vendor/github.com/googleapis/gax-go/v2/gax.go | 41 + .../github.com/googleapis/gax-go/v2/header.go | 200 + .../googleapis/gax-go/v2/internal/version.go} | 22 +- .../github.com/googleapis/gax-go/v2/invoke.go | 140 + .../googleapis/gax-go/v2/proto_json_stream.go | 127 + .../gax-go/v2/release-please-config.json | 10 + .../googleapis/gax-go/v2/telemetry.go | 469 + .../grpc-gateway/v2/runtime/handler.go | 4 +- .../grpc-gateway/v2/runtime/mux.go | 11 + .../go/v1/resource_descriptor.pb.go | 2 +- .../in-toto/attestation/go/v1/statement.go | 14 +- .../in-toto/attestation/go/v1/statement.pb.go | 2 +- .../in-toto-golang/in_toto/attestations.go | 36 +- .../in-toto/in-toto-golang/in_toto/keylib.go | 216 +- .../in-toto/in-toto-golang/in_toto/model.go | 51 +- .../in-toto/in-toto-golang/in_toto/runlib.go | 92 +- .../in_toto/slsa_provenance/common/common.go | 2 +- .../slsa_provenance/v0.1/provenance.go | 4 +- .../slsa_provenance/v0.2/provenance.go | 4 +- .../in_toto/slsa_provenance/v1/provenance.go | 36 +- .../in-toto/in-toto-golang/in_toto/util.go | 4 +- .../in-toto-golang/in_toto/verifylib.go | 90 +- .../github.com/jedisct1/go-minisign/LICENSE | 2 +- .../klauspost/compress/.goreleaser.yml | 11 +- .../github.com/klauspost/compress/README.md | 45 +- .../compress/huff0/decompress_amd64.go | 1 - .../compress/huff0/decompress_generic.go | 1 - .../internal/cpuinfo/cpuinfo_amd64.go | 1 - .../klauspost/compress/zstd/blockenc.go | 1 + .../klauspost/compress/zstd/decoder.go | 28 +- .../compress/zstd/decoder_options.go | 60 +- .../klauspost/compress/zstd/enc_base.go | 2 +- .../klauspost/compress/zstd/enc_best.go | 14 +- .../klauspost/compress/zstd/enc_better.go | 14 +- .../klauspost/compress/zstd/enc_dfast.go | 6 +- .../klauspost/compress/zstd/enc_fast.go | 6 +- .../klauspost/compress/zstd/encoder.go | 29 + .../compress/zstd/encoder_options.go | 45 +- .../compress/zstd/fse_decoder_amd64.go | 1 - .../compress/zstd/fse_decoder_generic.go | 1 - .../zstd/internal/xxhash/xxhash_other.go | 1 - .../klauspost/compress/zstd/matchlen_amd64.go | 1 - .../compress/zstd/matchlen_generic.go | 1 - .../klauspost/compress/zstd/seqdec_amd64.go | 1 - .../klauspost/compress/zstd/seqdec_generic.go | 1 - .../lestrrat-go/dsig-secp256k1/.gitignore | 32 + .../lestrrat-go/dsig-secp256k1/Changes | 5 + .../{option => dsig-secp256k1}/LICENSE | 2 +- .../lestrrat-go/dsig-secp256k1/secp256k1.go | 29 + vendor/github.com/lestrrat-go/dsig/.gitignore | 32 + vendor/github.com/lestrrat-go/dsig/Changes | 5 + .../conc => lestrrat-go/dsig}/LICENSE | 2 +- vendor/github.com/lestrrat-go/dsig/README.md | 163 + .../github.com/lestrrat-go/dsig/algorithms.go | 37 + .../lestrrat-go/dsig/crypto_signer.go | 45 + vendor/github.com/lestrrat-go/dsig/dsig.go | 224 + vendor/github.com/lestrrat-go/dsig/ecdsa.go | 200 + vendor/github.com/lestrrat-go/dsig/eddsa.go | 44 + vendor/github.com/lestrrat-go/dsig/hmac.go | 45 + .../dsig/internal/ecutil/ecutil.go | 76 + vendor/github.com/lestrrat-go/dsig/rsa.go | 63 + vendor/github.com/lestrrat-go/dsig/sign.go | 100 + .../github.com/lestrrat-go/dsig/validation.go | 66 + vendor/github.com/lestrrat-go/dsig/verify.go | 134 + .../lestrrat-go/httprc/v3/.golangci.yml | 2 + .../github.com/lestrrat-go/httprc/v3/Changes | 25 +- .../lestrrat-go/httprc/v3/backend.go | 91 +- .../lestrrat-go/httprc/v3/client.go | 14 +- .../lestrrat-go/httprc/v3/controller.go | 16 +- .../lestrrat-go/httprc/v3/errors.go | 48 + .../lestrrat-go/httprc/v3/options.go | 2 +- .../httprc/v3/proxysink/proxysink.go | 1 + .../lestrrat-go/httprc/v3/resource.go | 19 +- .../lestrrat-go/httprc/v3/worker.go | 4 +- .../lestrrat-go/jwx/v3/.golangci.yml | 3 + vendor/github.com/lestrrat-go/jwx/v3/Changes | 64 + .../lestrrat-go/jwx/v3/MODULE.bazel | 6 +- .../jwx/v3/formatkind_string_gen.go | 5 +- .../lestrrat-go/jwx/v3/internal/json/goccy.go | 1 - .../jwx/v3/internal/json/stdlib.go | 2 +- .../jwx/v3/internal/keyconv/keyconv.go | 89 + .../lestrrat-go/jwx/v3/jwa/secp2561k.go | 1 - .../lestrrat-go/jwx/v3/jwe/encrypt.go | 41 +- .../jwx/v3/jwe/internal/aescbc/aescbc.go | 4 +- .../github.com/lestrrat-go/jwx/v3/jwe/jwe.go | 138 +- .../lestrrat-go/jwx/v3/jwe/message.go | 40 +- .../lestrrat-go/jwx/v3/jwe/options.go | 5 +- .../lestrrat-go/jwx/v3/jwe/options.yaml | 40 +- .../lestrrat-go/jwx/v3/jwe/options_gen.go | 42 + .../lestrrat-go/jwx/v3/jwk/cache.go | 6 +- .../lestrrat-go/jwx/v3/jwk/ecdsa.go | 4 +- .../lestrrat-go/jwx/v3/jwk/es256k.go | 1 - .../lestrrat-go/jwx/v3/jwk/fetch.go | 4 +- .../lestrrat-go/jwx/v3/jwk/interface.go | 7 +- .../github.com/lestrrat-go/jwx/v3/jwk/jwk.go | 13 +- .../github.com/lestrrat-go/jwx/v3/jwk/okp.go | 4 +- .../github.com/lestrrat-go/jwx/v3/jwk/rsa.go | 4 +- .../github.com/lestrrat-go/jwx/v3/jwk/set.go | 14 +- .../lestrrat-go/jwx/v3/jwk/symmetric.go | 2 +- .../github.com/lestrrat-go/jwx/v3/jwk/x509.go | 2 +- .../lestrrat-go/jwx/v3/jws/es256k.go | 2 +- .../github.com/lestrrat-go/jwx/v3/jws/jws.go | 21 +- .../lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel | 1 + .../lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go | 87 +- .../lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go | 44 +- .../lestrrat-go/jwx/v3/jws/jwsbb/es256k.go | 14 + .../lestrrat-go/jwx/v3/jws/jwsbb/header.go | 2 +- .../lestrrat-go/jwx/v3/jws/jwsbb/hmac.go | 71 +- .../lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go | 65 + .../lestrrat-go/jwx/v3/jws/jwsbb/rsa.go | 95 +- .../lestrrat-go/jwx/v3/jws/jwsbb/sign.go | 119 +- .../lestrrat-go/jwx/v3/jws/jwsbb/verify.go | 142 +- .../lestrrat-go/jwx/v3/jws/legacy.go | 5 +- .../lestrrat-go/jwx/v3/jws/legacy/legacy.go | 2 +- .../lestrrat-go/jwx/v3/jws/options.go | 4 +- .../lestrrat-go/jwx/v3/jws/options.yaml | 6 +- .../lestrrat-go/jwx/v3/jws/options_gen.go | 6 +- .../lestrrat-go/jwx/v3/jws/signer.go | 57 +- .../jwx/v3/jwt/internal/errors/errors.go | 2 + .../github.com/lestrrat-go/jwx/v3/jwt/jwt.go | 9 +- .../lestrrat-go/jwx/v3/jwt/options.go | 10 + .../lestrrat-go/jwx/v3/jwt/token_options.go | 2 +- .../jwx/v3/jwt/token_options_gen.go | 8 +- .../lestrrat-go/jwx/v3/jwt/validate.go | 9 +- .../github.com/lestrrat-go/option/.gitignore | 15 - .../github.com/lestrrat-go/option/README.md | 245 - .../github.com/lestrrat-go/option/option.go | 38 - .../letsencrypt/boulder/core/challenges.go | 20 +- .../letsencrypt/boulder/core/objects.go | 55 +- .../letsencrypt/boulder/core/util.go | 21 + .../letsencrypt/boulder/probs/probs.go | 13 - .../mattn/go-runewidth/runewidth.go | 5 + .../mattn/go-runewidth/runewidth_table.go | 650 +- vendor/github.com/miekg/pkcs11/params.go | 33 +- vendor/github.com/miekg/pkcs11/pkcs11.go | 38 +- vendor/github.com/miekg/pkcs11/release.go | 2 +- vendor/github.com/miekg/pkcs11/types.go | 17 +- vendor/github.com/miekg/pkcs11/vendor.go | 14 +- vendor/github.com/miekg/pkcs11/zconst.go | 1444 +- vendor/github.com/oklog/ulid/.travis.yml | 16 - vendor/github.com/oklog/ulid/Gopkg.lock | 15 - vendor/github.com/oklog/ulid/Gopkg.toml | 26 - .../github.com/oklog/ulid/{ => v2}/.gitignore | 0 .../github.com/oklog/ulid/{ => v2}/AUTHORS.md | 0 .../oklog/ulid/{ => v2}/CHANGELOG.md | 0 .../oklog/ulid/{ => v2}/CONTRIBUTING.md | 0 .../oklog/ulid/v2}/LICENSE | 0 .../github.com/oklog/ulid/{ => v2}/README.md | 116 +- vendor/github.com/oklog/ulid/{ => v2}/ulid.go | 207 +- vendor/github.com/olekukonko/errors/chain.go | 52 +- vendor/github.com/olekukonko/errors/errors.go | 66 +- vendor/github.com/olekukonko/errors/helper.go | 5 + .../github.com/olekukonko/ll/.goreleaser.yaml | 37 + vendor/github.com/olekukonko/ll/Makefile | 99 + vendor/github.com/olekukonko/ll/README.md | 581 +- vendor/github.com/olekukonko/ll/comb.hcl | 12 + .../github.com/olekukonko/ll/conditional.go | 443 +- vendor/github.com/olekukonko/ll/dbg.go | 282 + vendor/github.com/olekukonko/ll/field.go | 267 +- vendor/github.com/olekukonko/ll/global.go | 104 +- vendor/github.com/olekukonko/ll/inspector.go | 8 +- .../github.com/olekukonko/ll/lh/buffered.go | 258 +- .../github.com/olekukonko/ll/lh/colorized.go | 1009 +- .../olekukonko/ll/lh/colorized_unix.go | 10 + .../olekukonko/ll/lh/colorized_windows.go | 47 + vendor/github.com/olekukonko/ll/lh/dedup.go | 248 + vendor/github.com/olekukonko/ll/lh/json.go | 167 +- vendor/github.com/olekukonko/ll/lh/lh.go | 75 + vendor/github.com/olekukonko/ll/lh/memory.go | 5 +- vendor/github.com/olekukonko/ll/lh/multi.go | 31 +- vendor/github.com/olekukonko/ll/lh/pipe.go | 76 + vendor/github.com/olekukonko/ll/lh/rotate.go | 235 + vendor/github.com/olekukonko/ll/lh/slog.go | 20 +- vendor/github.com/olekukonko/ll/lh/text.go | 161 +- vendor/github.com/olekukonko/ll/ll.go | 724 +- vendor/github.com/olekukonko/ll/lx/field.go | 140 + .../github.com/olekukonko/ll/lx/interface.go | 72 + vendor/github.com/olekukonko/ll/lx/lx.go | 175 +- .../olekukonko/ll/lx/{ns.go => namespace.go} | 37 +- vendor/github.com/olekukonko/ll/lx/types.go | 145 + vendor/github.com/olekukonko/ll/middleware.go | 6 +- vendor/github.com/olekukonko/ll/options.go | 69 + vendor/github.com/olekukonko/ll/since.go | 388 + vendor/github.com/olekukonko/ll/writer.go | 53 + .../olekukonko/tablewriter/MIGRATION.md | 2 +- .../olekukonko/tablewriter/README.md | 6 +- .../olekukonko/tablewriter/benchstat.txt | 194 - .../olekukonko/tablewriter/comb.hcl | 10 + .../olekukonko/tablewriter/config.go | 13 + .../github.com/olekukonko/tablewriter/new.txt | 248 - .../github.com/olekukonko/tablewriter/old.txt | 248 - .../olekukonko/tablewriter/option.go | 13 + .../olekukonko/tablewriter/pkg/twwarp/wrap.go | 23 +- .../tablewriter/pkg/twwidth/cache.go | 26 + .../olekukonko/tablewriter/pkg/twwidth/ea.go | 424 + .../olekukonko/tablewriter/pkg/twwidth/tab.go | 288 + .../tablewriter/pkg/twwidth/width.go | 236 +- .../tablewriter/renderer/blueprint.go | 42 +- .../tablewriter/renderer/colorized.go | 65 +- .../olekukonko/tablewriter/renderer/html.go | 2 +- .../tablewriter/renderer/markdown.go | 2 +- .../olekukonko/tablewriter/renderer/ocean.go | 2 +- .../olekukonko/tablewriter/renderer/svg.go | 2 +- .../olekukonko/tablewriter/renderer/tint.go | 25 + .../olekukonko/tablewriter/tablewriter.go | 79 +- .../olekukonko/tablewriter/tw/types.go | 3 +- .../github.com/olekukonko/tablewriter/zoo.go | 22 +- .../opa/capabilities/capabilities.go | 3 - .../opa/capabilities/v1.10.0.json | 4867 +++++ .../opa/capabilities/v1.11.0.json | 4878 +++++ .../opa/capabilities/v1.11.1.json | 4878 +++++ .../opa/capabilities/v1.12.0.json | 4896 +++++ .../opa/capabilities/v1.12.1.json | 4896 +++++ .../opa/capabilities/v1.12.2.json | 4896 +++++ .../opa/capabilities/v1.12.3.json | 4896 +++++ .../opa/capabilities/v1.13.0.json | 4916 +++++ .../opa/capabilities/v1.13.1.json | 4916 +++++ .../opa/capabilities/v1.13.2.json | 4916 +++++ .../opa/capabilities/v1.14.0.json | 4916 +++++ .../opa/capabilities/v1.14.1.json | 4916 +++++ .../opa/capabilities/v1.15.0.json | 4916 +++++ .../opa/capabilities/v1.15.1.json | 4916 +++++ .../opa/capabilities/v1.9.0.json | 4867 +++++ .../opa/internal/bundle/utils.go | 2 +- .../internal/compiler/wasm/opa/callgraph.csv | 1223 +- .../opa/internal/compiler/wasm/opa/opa.wasm | Bin 436729 -> 431796 bytes .../internal/compiler/wasm/optimizations.go | 2 +- .../opa/internal/compiler/wasm/wasm.go | 428 +- .../opa/internal/config/config.go | 176 - .../internal/edittree/bitvector/bitvector.go | 25 +- .../opa/internal/edittree/edittree.go | 345 +- .../opa/internal/file/archive/tarball.go | 71 +- .../opa/internal/gojsonschema/utils.go | 2 +- .../opa/internal/planner/planner.go | 4 +- .../opa/internal/providers/aws/crypto/ecc.go | 59 +- .../opa/internal/providers/aws/signing_v4.go | 2 + .../opa/internal/providers/aws/signing_v4a.go | 14 +- .../open-policy-agent/opa/internal/ref/ref.go | 4 +- .../opa/internal/report/report.go | 218 - .../opa/internal/runtime/init/init.go | 261 - .../opa/internal/semver/semver.go | 292 +- .../opa/internal/strvals/doc.go | 33 - .../opa/internal/strvals/parser.go | 429 - .../opa/internal/uuid/uuid.go | 2 +- .../opa/internal/wasm/encoding/reader.go | 2 +- .../opa/v1/ast/annotations.go | 60 +- .../open-policy-agent/opa/v1/ast/builtins.go | 508 +- .../opa/v1/ast/capabilities.go | 19 +- .../open-policy-agent/opa/v1/ast/check.go | 180 +- .../open-policy-agent/opa/v1/ast/compare.go | 228 +- .../open-policy-agent/opa/v1/ast/compile.go | 1426 +- .../opa/v1/ast/compilehelper.go | 3 +- .../open-policy-agent/opa/v1/ast/conflicts.go | 4 +- .../open-policy-agent/opa/v1/ast/env.go | 70 +- .../open-policy-agent/opa/v1/ast/errors.go | 21 +- .../open-policy-agent/opa/v1/ast/index.go | 503 +- .../opa/v1/ast/index_debug.go | 219 + .../opa/v1/ast/internal/scanner/scanner.go | 151 +- .../opa/v1/ast/internal/tokens/tokens.go | 108 +- .../open-policy-agent/opa/v1/ast/interning.go | 1736 +- .../opa/v1/ast/location/location.go | 41 +- .../open-policy-agent/opa/v1/ast/parser.go | 622 +- .../opa/v1/ast/parser_ext.go | 18 +- .../opa/v1/ast/performance.go | 99 + .../open-policy-agent/opa/v1/ast/policy.go | 279 +- .../opa/v1/ast/policy_appenders.go | 342 + .../open-policy-agent/opa/v1/ast/rego_v1.go | 27 +- .../open-policy-agent/opa/v1/ast/slices.go | 15 + .../opa/v1/ast/string_length.go | 351 + .../open-policy-agent/opa/v1/ast/strings.go | 2 + .../open-policy-agent/opa/v1/ast/syncpools.go | 88 +- .../open-policy-agent/opa/v1/ast/term.go | 962 +- .../opa/v1/ast/term_appenders.go | 266 + .../open-policy-agent/opa/v1/ast/transform.go | 45 +- .../open-policy-agent/opa/v1/ast/unify.go | 11 +- .../open-policy-agent/opa/v1/ast/varset.go | 14 +- .../opa/v1/ast/version_index.json | 849 +- .../open-policy-agent/opa/v1/ast/visit.go | 464 +- .../open-policy-agent/opa/v1/bundle/bundle.go | 316 +- .../open-policy-agent/opa/v1/bundle/file.go | 15 +- .../open-policy-agent/opa/v1/bundle/filefs.go | 3 - .../open-policy-agent/opa/v1/bundle/hash.go | 3 +- .../open-policy-agent/opa/v1/bundle/store.go | 16 +- .../open-policy-agent/opa/v1/bundle/verify.go | 4 - .../opa/v1/capabilities/capabilities.go | 5 +- .../open-policy-agent/opa/v1/config/config.go | 393 - .../open-policy-agent/opa/v1/format/format.go | 190 +- .../open-policy-agent/opa/v1/hooks/hooks.go | 97 - .../open-policy-agent/opa/v1/loader/loader.go | 6 +- .../opa/v1/logging/buffered_logger.go | 182 + .../opa/v1/logging/logging.go | 196 + .../opa/v1/metrics/metrics.go | 36 +- .../opa/v1/plugins/plugins.go | 1195 -- .../opa/v1/plugins/rest/auth.go | 1211 -- .../opa/v1/plugins/rest/aws.go | 1088 - .../opa/v1/plugins/rest/azure.go | 287 - .../opa/v1/plugins/rest/gcp.go | 173 - .../opa/v1/plugins/rest/rest.go | 366 - .../open-policy-agent/opa/v1/rego/plugins.go | 6 + .../open-policy-agent/opa/v1/rego/rego.go | 51 +- .../opa/v1/rego/resultset.go | 18 +- .../opa/v1/storage/inmem/ast.go | 17 +- .../opa/v1/storage/inmem/inmem.go | 103 +- .../opa/v1/storage/inmem/txn.go | 198 +- .../opa/v1/storage/interface.go | 12 + .../opa/v1/storage/internal/errors/errors.go | 31 +- .../opa/v1/storage/internal/ptr/ptr.go | 51 +- .../open-policy-agent/opa/v1/storage/path.go | 54 +- .../opa/v1/topdown/aggregates.go | 14 +- .../open-policy-agent/opa/v1/topdown/array.go | 55 +- .../opa/v1/topdown/bindings.go | 106 +- .../opa/v1/topdown/builtins/builtins.go | 27 +- .../opa/v1/topdown/cache/cache.go | 25 +- .../open-policy-agent/opa/v1/topdown/cidr.go | 2 +- .../copypropagation/copypropagation.go | 17 +- .../opa/v1/topdown/crypto.go | 68 +- .../opa/v1/topdown/encoding.go | 72 +- .../opa/v1/topdown/errors.go | 26 +- .../open-policy-agent/opa/v1/topdown/eval.go | 418 +- .../opa/v1/topdown/graphql.go | 16 +- .../open-policy-agent/opa/v1/topdown/http.go | 140 +- .../opa/v1/topdown/http_fixup.go | 3 +- .../opa/v1/topdown/http_fixup_darwin.go | 3 - .../open-policy-agent/opa/v1/topdown/json.go | 219 +- .../opa/v1/topdown/jsonschema.go | 4 +- .../open-policy-agent/opa/v1/topdown/net.go | 2 +- .../opa/v1/topdown/numbers.go | 17 +- .../opa/v1/topdown/object.go | 31 +- .../open-policy-agent/opa/v1/topdown/print.go | 27 +- .../opa/v1/topdown/providers.go | 47 +- .../open-policy-agent/opa/v1/topdown/query.go | 7 +- .../open-policy-agent/opa/v1/topdown/regex.go | 86 +- .../opa/v1/topdown/resolver.go | 2 +- .../opa/v1/topdown/semver.go | 21 +- .../open-policy-agent/opa/v1/topdown/sets.go | 24 +- .../open-policy-agent/opa/v1/topdown/sink.go | 73 + .../opa/v1/topdown/strings.go | 90 +- .../opa/v1/topdown/template.go | 6 +- .../opa/v1/topdown/template_string.go | 45 + .../opa/v1/topdown/tokens.go | 41 +- .../open-policy-agent/opa/v1/topdown/trace.go | 8 +- .../open-policy-agent/opa/v1/topdown/walk.go | 25 +- .../open-policy-agent/opa/v1/types/types.go | 3 +- .../open-policy-agent/opa/v1/util/graph.go | 7 +- .../open-policy-agent/opa/v1/util/json.go | 78 +- .../opa/v1/util/performance.go | 125 +- .../opa/v1/util/read_gzip_body.go | 83 +- .../open-policy-agent/opa/v1/util/strings.go | 13 + .../opa/v1/version/version.go | 2 +- .../runc/internal/linux/linux.go | 26 +- .../runc/libcontainer/configs/memorypolicy.go | 28 +- .../internal/userns/userns_maps_linux.go | 2 +- .../runc/libcontainer/seccomp/config.go | 1 + .../runc/libcontainer/specconv/spec_linux.go | 40 +- .../openshift/api/config/v1/register.go | 6 + .../openshift/api/config/v1/types.go | 60 +- .../api/config/v1/types_apiserver.go | 103 +- .../api/config/v1/types_authentication.go | 558 +- .../openshift/api/config/v1/types_build.go | 31 +- .../config/v1/types_cluster_image_policy.go | 87 + .../api/config/v1/types_cluster_operator.go | 34 +- .../api/config/v1/types_cluster_version.go | 167 +- .../openshift/api/config/v1/types_console.go | 2 +- .../openshift/api/config/v1/types_dns.go | 12 +- .../openshift/api/config/v1/types_feature.go | 15 +- .../openshift/api/config/v1/types_image.go | 3 +- .../config/v1/types_image_content_policy.go | 2 - .../v1/types_image_digest_mirror_set.go | 2 - .../api/config/v1/types_image_policy.go | 322 + .../config/v1/types_image_tag_mirror_set.go | 2 - .../api/config/v1/types_infrastructure.go | 443 +- .../openshift/api/config/v1/types_ingress.go | 13 +- .../openshift/api/config/v1/types_insights.go | 231 + .../api/config/v1/types_kmsencryption.go | 55 + .../openshift/api/config/v1/types_network.go | 34 +- .../openshift/api/config/v1/types_node.go | 14 +- .../openshift/api/config/v1/types_oauth.go | 1 - .../api/config/v1/types_operatorhub.go | 1 + .../openshift/api/config/v1/types_project.go | 1 - .../openshift/api/config/v1/types_proxy.go | 3 +- .../api/config/v1/types_scheduling.go | 7 +- .../api/config/v1/types_testreporting.go | 9 +- .../api/config/v1/types_tlssecurityprofile.go | 181 +- .../api/config/v1/zz_generated.deepcopy.go | 843 +- ..._generated.featuregated-crd-manifests.yaml | 102 +- .../v1/zz_generated.swagger_doc_generated.go | 721 +- .../pelletier/go-toml/v2/.gitignore | 1 + .../pelletier/go-toml/v2/.golangci.toml | 74 +- .../pelletier/go-toml/v2/.goreleaser.yaml | 3 - .../github.com/pelletier/go-toml/v2/AGENTS.md | 64 + .../pelletier/go-toml/v2/CONTRIBUTING.md | 60 +- .../github.com/pelletier/go-toml/v2/README.md | 116 +- vendor/github.com/pelletier/go-toml/v2/ci.sh | 7 +- .../github.com/pelletier/go-toml/v2/decode.go | 5 +- .../github.com/pelletier/go-toml/v2/errors.go | 39 +- .../go-toml/v2/internal/characters/ascii.go | 6 +- .../go-toml/v2/internal/characters/utf8.go | 68 +- .../go-toml/v2/internal/danger/danger.go | 65 - .../go-toml/v2/internal/danger/typeid.go | 23 - .../go-toml/v2/internal/tracker/key.go | 2 +- .../go-toml/v2/internal/tracker/seen.go | 13 +- .../go-toml/v2/internal/tracker/tracker.go | 1 + .../pelletier/go-toml/v2/localtime.go | 2 +- .../pelletier/go-toml/v2/marshaler.go | 165 +- .../github.com/pelletier/go-toml/v2/strict.go | 23 +- .../pelletier/go-toml/v2/test-go-versions.sh | 597 + .../github.com/pelletier/go-toml/v2/types.go | 21 +- .../pelletier/go-toml/v2/unmarshaler.go | 228 +- .../pelletier/go-toml/v2/unstable/ast.go | 71 +- .../pelletier/go-toml/v2/unstable/builder.go | 27 +- .../pelletier/go-toml/v2/unstable/kind.go | 20 +- .../pelletier/go-toml/v2/unstable/parser.go | 103 +- .../go-toml/v2/unstable/unmarshaler.go | 31 +- .../apis/monitoring/v1/alertmanager_types.go | 81 +- .../apis/monitoring/v1/prometheus_types.go | 46 +- .../pkg/apis/monitoring/v1/thanos_types.go | 49 +- .../pkg/apis/monitoring/v1/types.go | 10 +- .../monitoring/v1/zz_generated.deepcopy.go | 5 + .../prometheus/procfs/.golangci.yml | 8 + .../prometheus/procfs/Makefile.common | 247 +- .../github.com/prometheus/procfs/cpuinfo.go | 3 +- .../prometheus/procfs/cpuinfo_armx.go | 2 - .../prometheus/procfs/cpuinfo_loong64.go | 1 - .../prometheus/procfs/cpuinfo_mipsx.go | 2 - .../prometheus/procfs/cpuinfo_others.go | 1 - .../prometheus/procfs/cpuinfo_ppcx.go | 2 - .../prometheus/procfs/cpuinfo_riscvx.go | 2 - .../prometheus/procfs/cpuinfo_s390x.go | 1 - .../prometheus/procfs/cpuinfo_x86.go | 2 - .../prometheus/procfs/fs_statfs_notype.go | 1 - .../prometheus/procfs/fs_statfs_type.go | 1 - .../procfs/internal/util/sysreadfile.go | 2 - .../internal/util/sysreadfile_compat.go | 1 - .../prometheus/procfs/kernel_hung.go | 1 - .../prometheus/procfs/kernel_random.go | 1 - .../github.com/prometheus/procfs/net_tcp.go | 4 + .../prometheus/procfs/proc_interrupts.go | 2 +- .../github.com/prometheus/procfs/proc_maps.go | 2 - .../prometheus/procfs/proc_smaps.go | 1 - .../prometheus/procfs/proc_statm.go | 1 + .../prometheus/procfs/proc_status.go | 43 + vendor/github.com/prometheus/procfs/vm.go | 1 - .../github.com/prometheus/procfs/zoneinfo.go | 1 - .../protocolbuffers/txtpbfmt/ast/ast.go | 81 +- .../protocolbuffers/txtpbfmt/config/config.go | 19 + .../txtpbfmt/descriptor/descriptor.go | 83 + .../protocolbuffers/txtpbfmt/impl/impl.go | 332 +- .../protocolbuffers/txtpbfmt/sort/sort.go | 68 +- .../github.com/rcrowley/go-metrics/README.md | 9 + .../github.com/sagikazarmark/locafero/.envrc | 8 +- .../sagikazarmark/locafero/.gitignore | 11 +- .../sagikazarmark/locafero/devenv.lock | 103 + .../sagikazarmark/locafero/devenv.nix | 17 + .../sagikazarmark/locafero/devenv.yaml | 4 + .../sagikazarmark/locafero/finder.go | 10 +- .../sagikazarmark/locafero/flake.lock | 255 - .../sagikazarmark/locafero/flake.nix | 42 - .../locafero/internal/queue/eager.go | 53 + .../locafero/internal/queue/queue.go | 8 + .../cjson/canonicaljson.go | 1 - .../go-securesystemslib/dsse/envelope.go | 2 +- .../go-securesystemslib/dsse/sign.go | 2 +- .../go-securesystemslib/dsse/verify.go | 8 +- .../signerverifier/ecdsa.go | 7 +- .../signerverifier/ed25519.go | 7 +- .../go-securesystemslib/signerverifier/rsa.go | 7 +- vendor/github.com/segmentio/asm/LICENSE | 31 +- .../segmentio/asm/base64/decode_arm64.s | 14 +- .../rekor/pkg/types/dsse/v0.0.1/entry.go | 101 +- .../sigstore/rekor/pkg/verify/verify.go | 11 +- .../sigstore/pkg/oauth/interactive.go | 45 +- .../sigstore/sigstore/pkg/oauthflow/device.go | 7 +- .../sigstore/pkg/oauthflow/interactive.go | 1 + .../pkg/signature/algorithm_registry.go | 4 +- .../kms/cliplugin/encoding/options.go | 45 +- .../sigstore/pkg/signature/message.go | 8 +- .../sigstore/pkg/signature/payload/payload.go | 6 +- .../sigstore/sigstore/pkg/signature/util.go | 4 +- .../v2/pkg/verification/verify.go | 3 +- .../github.com/sourcegraph/conc/.golangci.yml | 11 - vendor/github.com/sourcegraph/conc/Makefile | 24 - vendor/github.com/sourcegraph/conc/README.md | 464 - .../sourcegraph/conc/panics/panics.go | 102 - .../github.com/sourcegraph/conc/panics/try.go | 11 - .../sourcegraph/conc/pool/context_pool.go | 104 - .../sourcegraph/conc/pool/error_pool.go | 100 - .../github.com/sourcegraph/conc/pool/pool.go | 174 - .../conc/pool/result_context_pool.go | 85 - .../conc/pool/result_error_pool.go | 80 - .../sourcegraph/conc/pool/result_pool.go | 142 - .../github.com/sourcegraph/conc/waitgroup.go | 52 - .../github.com/stoewer/go-strcase/.gitignore | 17 - .../stoewer/go-strcase/.golangci.yml | 19 - vendor/github.com/stoewer/go-strcase/LICENSE | 21 - .../github.com/stoewer/go-strcase/README.md | 50 - vendor/github.com/stoewer/go-strcase/camel.go | 43 - vendor/github.com/stoewer/go-strcase/doc.go | 8 - .../github.com/stoewer/go-strcase/helper.go | 77 - vendor/github.com/stoewer/go-strcase/kebab.go | 14 - vendor/github.com/stoewer/go-strcase/snake.go | 58 - .../go-tuf/v2/metadata/config/config.go | 7 +- .../go-tuf/v2/metadata/keys.go | 9 +- .../go-tuf/v2/metadata/marshal.go | 10 + .../go-tuf/v2/metadata/metadata.go | 69 +- .../trustedmetadata/trustedmetadata.go | 6 +- .../go-tuf/v2/metadata/updater/updater.go | 11 +- vendor/github.com/valyala/fastjson/arena.go | 8 +- vendor/github.com/valyala/fastjson/doc.go | 1 - vendor/github.com/valyala/fastjson/fuzz.go | 1 + vendor/github.com/valyala/fastjson/parser.go | 58 +- vendor/github.com/valyala/fastjson/pool.go | 1 + vendor/github.com/valyala/fastjson/scanner.go | 2 +- vendor/github.com/valyala/fastjson/update.go | 16 +- .../github.com/valyala/fastjson/validate.go | 6 +- .../vektah/gqlparser/v2/ast/argmap.go | 10 +- .../vektah/gqlparser/v2/ast/definition.go | 9 +- .../vektah/gqlparser/v2/ast/directive.go | 9 +- .../vektah/gqlparser/v2/ast/document.go | 7 +- .../vektah/gqlparser/v2/ast/dumper.go | 14 +- .../vektah/gqlparser/v2/ast/path.go | 4 +- .../vektah/gqlparser/v2/ast/selection.go | 5 +- .../vektah/gqlparser/v2/ast/source.go | 2 +- .../vektah/gqlparser/v2/ast/value.go | 13 +- .../vektah/gqlparser/v2/gqlerror/error.go | 31 +- .../vektah/gqlparser/v2/lexer/lexer.go | 101 +- .../vektah/gqlparser/v2/parser/parser.go | 6 +- .../vektah/gqlparser/v2/parser/query.go | 10 +- .../gqlparser/v2/validator/core/helpers.go | 19 +- .../gqlparser/v2/validator/core/walk.go | 10 +- .../validator/rules/fields_on_correct_type.go | 28 +- .../rules/fragments_on_composite_types.go | 15 +- .../validator/rules/known_argument_names.go | 15 +- .../v2/validator/rules/known_directives.go | 15 +- .../validator/rules/known_fragment_names.go | 1 - .../v2/validator/rules/known_root_type.go | 1 - .../v2/validator/rules/known_type_names.go | 1 - .../rules/lone_anonymous_operation.go | 1 - .../rules/max_introspection_depth.go | 13 +- .../v2/validator/rules/no_fragment_cycles.go | 7 +- .../validator/rules/no_undefined_variables.go | 10 +- .../v2/validator/rules/no_unused_fragments.go | 1 - .../v2/validator/rules/no_unused_variables.go | 7 +- .../rules/overlapping_fields_can_be_merged.go | 138 +- .../rules/possible_fragment_spreads.go | 29 +- .../gqlparser/v2/validator/rules/rules.go | 9 +- .../v2/validator/rules/scalar_leafs.go | 13 +- .../rules/single_field_subscriptions.go | 1 - .../validator/rules/unique_argument_names.go | 1 - .../rules/unique_directives_per_location.go | 6 +- .../validator/rules/unique_fragment_names.go | 1 - .../rules/unique_input_field_names.go | 1 - .../validator/rules/unique_operation_names.go | 1 - .../validator/rules/unique_variable_names.go | 1 - .../validator/rules/values_of_correct_type.go | 95 +- .../rules/variables_are_input_types.go | 1 - .../rules/variables_in_allowed_position.go | 13 +- .../vektah/gqlparser/v2/validator/schema.go | 230 +- .../gqlparser/v2/validator/validator.go | 19 +- .../vektah/gqlparser/v2/validator/vars.go | 48 +- vendor/github.com/xrash/smetrics/jaro.go | 2 +- .../gitlab-org/api/client-go/.gitignore | 3 + .../gitlab-org/api/client-go/.gitlab-ci.yml | 154 +- .../gitlab-org/api/client-go/.golangci.yml | 52 +- .../gitlab-org/api/client-go/.releaserc.json | 19 +- .../gitlab-org/api/client-go/.tool-versions | 5 +- .../gitlab-org/api/client-go/AGENTS.md | 401 + .../gitlab-org/api/client-go/CHANGELOG.md | 1424 ++ .../gitlab-org/api/client-go/CONTRIBUTING.md | 12 +- .../gitlab-org/api/client-go/Makefile | 17 +- .../gitlab-org/api/client-go/README.md | 57 +- .../api/client-go/access_requests.go | 297 +- .../admin_compliance_policy_settings.go | 85 + .../api/client-go/alert_management.go | 145 +- .../gitlab-org/api/client-go/appearance.go | 58 +- .../api/client-go/application_statistics.go | 46 +- .../gitlab-org/api/client-go/applications.go | 89 +- .../gitlab-org/api/client-go/attestations.go | 64 + .../gitlab-org/api/client-go/audit_events.go | 208 +- .../gitlab-org/api/client-go/avatar.go | 28 +- .../gitlab-org/api/client-go/award_emojis.go | 556 +- .../gitlab-org/api/client-go/boards.go | 441 +- .../gitlab-org/api/client-go/branches.go | 154 +- .../api/client-go/broadcast_messages.go | 152 +- .../gitlab-org/api/client-go/buf.gen.yaml | 2 +- .../gitlab-org/api/client-go/bulk_imports.go | 23 +- .../api/client-go/ci_yml_templates.go | 59 +- .../api/client-go/client_options.go | 27 + .../api/client-go/cluster_agents.go | 296 +- .../api/client-go/commitlint.config.mjs | 8 - .../gitlab-org/api/client-go/commits.go | 467 +- .../api/client-go/container_registry.go | 305 +- .../container_registry_protection_rules.go | 156 +- .../api/client-go/custom_attributes.go | 225 +- .../api/client-go/database_migrations.go | 32 +- .../gitlab-org/api/client-go/dependencies.go | 44 +- .../api/client-go/dependency_list_export.go | 128 +- .../api/client-go/dependency_proxy.go | 33 +- .../gitlab-org/api/client-go/deploy_keys.go | 328 +- .../gitlab-org/api/client-go/deploy_tokens.go | 306 +- .../gitlab-org/api/client-go/deployments.go | 284 +- .../client-go/deployments_merge_requests.go | 41 +- .../gitlab-org/api/client-go/discussions.go | 1205 +- .../api/client-go/docker-compose.yml | 61 + .../api/client-go/dockerfile_templates.go | 62 +- .../gitlab-org/api/client-go/dora_metrics.go | 72 +- .../gitlab-org/api/client-go/draft_notes.go | 249 +- .../api/client-go/enterprise_users.go | 99 +- .../gitlab-org/api/client-go/environments.go | 190 +- .../gitlab-org/api/client-go/epic_issues.go | 128 +- .../gitlab-org/api/client-go/epics.go | 192 +- .../api/client-go/error_tracking.go | 167 +- .../gitlab-org/api/client-go/event_parsing.go | 27 +- .../api/client-go/event_systemhook_types.go | 258 +- .../api/client-go/event_webhook_types.go | 2484 ++- .../gitlab-org/api/client-go/events.go | 291 +- .../api/client-go/external_status_checks.go | 463 +- .../api/client-go/feature_flag_user_lists.go | 130 +- .../gitlab-org/api/client-go/feature_flags.go | 69 +- .../api/client-go/freeze_periods.go | 130 +- .../api/client-go/generic_packages.go | 72 +- .../gitlab-org/api/client-go/geo_nodes.go | 455 +- .../gitlab-org/api/client-go/geo_sites.go | 618 +- .../api/client-go/gitignore_templates.go | 45 +- .../gitlab-org/api/client-go/gitlab.go | 346 +- .../gitlab-org/api/client-go/graphql.go | 94 +- .../api/client-go/group_access_tokens.go | 153 +- .../api/client-go/group_activity_analytics.go | 89 +- .../gitlab-org/api/client-go/group_badges.go | 153 +- .../gitlab-org/api/client-go/group_boards.go | 364 +- .../api/client-go/group_clusters.go | 125 +- .../api/client-go/group_credentials.go | 140 + .../api/client-go/group_epic_boards.go | 60 +- .../gitlab-org/api/client-go/group_hooks.go | 258 +- .../api/client-go/group_import_export.go | 51 +- .../api/client-go/group_integrations.go | 184 + .../api/client-go/group_iterations.go | 39 +- .../gitlab-org/api/client-go/group_labels.go | 200 +- .../api/client-go/group_markdown_uploads.go | 62 +- .../gitlab-org/api/client-go/group_members.go | 373 +- .../api/client-go/group_milestones.go | 226 +- .../api/client-go/group_protected_branches.go | 177 + .../client-go/group_protected_environments.go | 172 +- .../api/client-go/group_relations_export.go | 110 + .../api/client-go/group_releases.go | 27 +- .../group_repository_storage_move.go | 130 +- .../gitlab-org/api/client-go/group_scim.go | 80 +- .../api/client-go/group_security_settings.go | 32 +- .../api/client-go/group_serviceaccounts.go | 229 +- .../api/client-go/group_ssh_certificates.go | 77 +- .../api/client-go/group_variables.go | 159 +- .../gitlab-org/api/client-go/group_wikis.go | 116 +- .../gitlab-org/api/client-go/groups.go | 886 +- .../gitlab-org/api/client-go/import.go | 127 +- .../api/client-go/instance_clusters.go | 105 +- .../api/client-go/instance_variables.go | 137 +- .../gitlab-org/api/client-go/integrations.go | 814 +- .../gitlab-org/api/client-go/invites.go | 97 +- .../gitlab-org/api/client-go/issue_links.go | 126 +- .../gitlab-org/api/client-go/issues.go | 596 +- .../api/client-go/issues_statistics.go | 133 +- .../api/client-go/job_token_scope.go | 197 +- .../gitlab-org/api/client-go/jobs.go | 711 +- .../gitlab-org/api/client-go/keys.go | 47 +- .../gitlab-org/api/client-go/labels.go | 225 +- .../gitlab-org/api/client-go/license.go | 119 +- .../api/client-go/license_templates.go | 41 +- .../gitlab-org/api/client-go/markdown.go | 24 +- .../api/client-go/markdown_uploads.go | 125 +- .../gitlab-org/api/client-go/member_roles.go | 143 +- .../merge_request_approval_settings.go | 122 +- .../api/client-go/merge_request_approvals.go | 327 +- .../merge_request_context_commits.go | 84 + .../api/client-go/merge_requests.go | 967 +- .../gitlab-org/api/client-go/merge_trains.go | 117 +- .../gitlab-org/api/client-go/metadata.go | 45 +- .../gitlab-org/api/client-go/milestones.go | 261 +- .../api/client-go/model_registry.go | 50 + .../gitlab-org/api/client-go/namespaces.go | 90 +- .../gitlab-org/api/client-go/notes.go | 602 +- .../gitlab-org/api/client-go/notifications.go | 181 +- .../gitlab-org/api/client-go/packages.go | 138 +- .../gitlab-org/api/client-go/pages.go | 99 +- .../gitlab-org/api/client-go/pages_domains.go | 165 +- .../gitlab-org/api/client-go/pagination.go | 58 +- .../api/client-go/personal_access_tokens.go | 148 +- .../api/client-go/pipeline_schedules.go | 293 +- .../api/client-go/pipeline_triggers.go | 158 +- .../gitlab-org/api/client-go/pipelines.go | 454 +- .../gitlab-org/api/client-go/plan_limits.go | 67 +- .../api/client-go/project_access_tokens.go | 146 +- .../api/client-go/project_aliases.go | 90 + .../api/client-go/project_badges.go | 149 +- .../api/client-go/project_clusters.go | 127 +- .../api/client-go/project_feature_flags.go | 173 +- .../api/client-go/project_import_export.go | 197 +- .../api/client-go/project_iterations.go | 39 +- .../api/client-go/project_markdown_uploads.go | 101 +- .../api/client-go/project_members.go | 195 +- .../api/client-go/project_mirror.go | 216 +- .../project_repository_storage_move.go | 182 +- .../client-go/project_security_settings.go | 67 +- .../api/client-go/project_snippets.go | 152 +- .../api/client-go/project_statistics.go | 44 + .../api/client-go/project_templates.go | 66 +- .../api/client-go/project_variables.go | 160 +- .../api/client-go/project_vulnerabilities.go | 72 +- .../gitlab-org/api/client-go/projects.go | 2162 +- .../api/client-go/protected_branches.go | 137 +- .../api/client-go/protected_environments.go | 172 +- .../api/client-go/protected_packages.go | 120 + .../api/client-go/protected_tags.go | 104 +- .../gitlab-org/api/client-go/releaselinks.go | 147 +- .../gitlab-org/api/client-go/releases.go | 226 +- .../gitlab-org/api/client-go/repositories.go | 292 +- .../api/client-go/repository_files.go | 229 +- .../api/client-go/repository_submodules.go | 27 +- .../api/client-go/request_handler.go | 276 + .../api/client-go/request_options.go | 82 +- .../api/client-go/resource_group.go | 93 +- .../client-go/resource_iteration_events.go | 69 +- .../api/client-go/resource_label_events.go | 200 +- .../client-go/resource_milestone_events.go | 112 +- .../api/client-go/resource_state_events.go | 112 +- .../api/client-go/resource_weight_events.go | 37 +- .../api/client-go/runner_controller_scopes.go | 131 + .../api/client-go/runner_controller_tokens.go | 125 + .../api/client-go/runner_controllers.go | 145 + .../gitlab-org/api/client-go/runners.go | 545 +- .../gitlab-org/api/client-go/search.go | 254 +- .../gitlab-org/api/client-go/secure_files.go | 126 +- .../gitlab-org/api/client-go/services.go | 1399 +- .../gitlab-org/api/client-go/settings.go | 682 +- .../api/client-go/sidekiq_metrics.go | 121 +- .../snippet_repository_storage_move.go | 132 +- .../gitlab-org/api/client-go/snippets.go | 241 +- .../gitlab-org/api/client-go/strings.go | 2 +- .../gitlab-org/api/client-go/system_hooks.go | 145 +- .../gitlab-org/api/client-go/tags.go | 163 +- .../api/client-go/terraform_states.go | 116 +- .../gitlab-org/api/client-go/time_stats.go | 131 +- .../gitlab-org/api/client-go/todos.go | 100 +- .../gitlab-org/api/client-go/topics.go | 178 +- .../gitlab-org/api/client-go/types.go | 88 +- .../gitlab-org/api/client-go/usage_data.go | 164 +- .../gitlab-org/api/client-go/users.go | 1772 +- .../gitlab-org/api/client-go/validate.go | 68 +- .../gitlab-org/api/client-go/version.go | 18 +- .../gitlab-org/api/client-go/wikis.go | 195 +- .../gitlab-org/api/client-go/workitems.go | 455 + .../go.mongodb.org/mongo-driver/bson/bson.go | 50 - .../bson/bsoncodec/array_codec.go | 55 - .../mongo-driver/bson/bsoncodec/bsoncodec.go | 382 - .../bson/bsoncodec/byte_slice_codec.go | 138 - .../bson/bsoncodec/codec_cache.go | 166 - .../bson/bsoncodec/cond_addr_codec.go | 63 - .../bson/bsoncodec/default_value_decoders.go | 1819 -- .../bson/bsoncodec/default_value_encoders.go | 856 - .../mongo-driver/bson/bsoncodec/doc.go | 95 - .../bson/bsoncodec/empty_interface_codec.go | 173 - .../mongo-driver/bson/bsoncodec/map_codec.go | 343 - .../mongo-driver/bson/bsoncodec/mode.go | 65 - .../bson/bsoncodec/pointer_codec.go | 108 - .../mongo-driver/bson/bsoncodec/proxy.go | 14 - .../mongo-driver/bson/bsoncodec/registry.go | 524 - .../bson/bsoncodec/slice_codec.go | 214 - .../bson/bsoncodec/string_codec.go | 140 - .../bson/bsoncodec/struct_codec.go | 736 - .../bson/bsoncodec/struct_tag_parser.go | 148 - .../mongo-driver/bson/bsoncodec/time_codec.go | 151 - .../mongo-driver/bson/bsoncodec/types.go | 58 - .../mongo-driver/bson/bsoncodec/uint_codec.go | 202 - .../bsonoptions/byte_slice_codec_options.go | 49 - .../mongo-driver/bson/bsonoptions/doc.go | 8 - .../empty_interface_codec_options.go | 49 - .../bson/bsonoptions/map_codec_options.go | 82 - .../bson/bsonoptions/slice_codec_options.go | 49 - .../bson/bsonoptions/string_codec_options.go | 52 - .../bson/bsonoptions/struct_codec_options.go | 107 - .../bson/bsonoptions/time_codec_options.go | 49 - .../bson/bsonoptions/uint_codec_options.go | 49 - .../mongo-driver/bson/bsonrw/copier.go | 489 - .../mongo-driver/bson/bsonrw/doc.go | 9 - .../bson/bsonrw/extjson_parser.go | 806 - .../bson/bsonrw/extjson_reader.go | 653 - .../bson/bsonrw/extjson_tables.go | 223 - .../bson/bsonrw/extjson_wrappers.go | 492 - .../bson/bsonrw/extjson_writer.go | 751 - .../mongo-driver/bson/bsonrw/json_scanner.go | 533 - .../mongo-driver/bson/bsonrw/mode.go | 108 - .../mongo-driver/bson/bsonrw/reader.go | 65 - .../mongo-driver/bson/bsonrw/value_reader.go | 888 - .../mongo-driver/bson/bsonrw/value_writer.go | 640 - .../mongo-driver/bson/bsonrw/writer.go | 87 - .../mongo-driver/bson/bsontype/bsontype.go | 116 - .../mongo-driver/bson/decoder.go | 208 - .../go.mongodb.org/mongo-driver/bson/doc.go | 142 - .../mongo-driver/bson/encoder.go | 199 - .../mongo-driver/bson/marshal.go | 453 - .../mongo-driver/bson/primitive/decimal.go | 432 - .../mongo-driver/bson/primitive/objectid.go | 206 - .../mongo-driver/bson/primitive/primitive.go | 231 - .../mongo-driver/bson/primitive_codecs.go | 122 - .../go.mongodb.org/mongo-driver/bson/raw.go | 101 - .../mongo-driver/bson/raw_element.go | 48 - .../mongo-driver/bson/raw_value.go | 324 - .../mongo-driver/bson/registry.go | 47 - .../go.mongodb.org/mongo-driver/bson/types.go | 50 - .../mongo-driver/bson/unmarshal.go | 180 - .../mongo-driver/x/bsonx/bsoncore/array.go | 164 - .../x/bsonx/bsoncore/bson_arraybuilder.go | 201 - .../x/bsonx/bsoncore/bson_documentbuilder.go | 189 - .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 842 - .../mongo-driver/x/bsonx/bsoncore/doc.go | 34 - .../mongo-driver/x/bsonx/bsoncore/document.go | 386 - .../x/bsonx/bsoncore/document_sequence.go | 189 - .../mongo-driver/x/bsonx/bsoncore/element.go | 152 - .../mongo-driver/x/bsonx/bsoncore/tables.go | 223 - .../mongo-driver/x/bsonx/bsoncore/value.go | 964 - .../net/http/otelhttp/client.go | 50 - .../net/http/otelhttp/common.go | 2 +- .../net/http/otelhttp/config.go | 24 +- .../instrumentation/net/http/otelhttp/doc.go | 3 +- .../net/http/otelhttp/handler.go | 45 +- .../internal/request/resp_writer_wrapper.go | 9 +- .../http/otelhttp/internal/semconv/client.go | 291 + .../net/http/otelhttp/internal/semconv/env.go | 248 - .../net/http/otelhttp/internal/semconv/gen.go | 8 +- .../otelhttp/internal/semconv/httpconv.go | 517 - .../http/otelhttp/internal/semconv/server.go | 396 + .../http/otelhttp/internal/semconv/util.go | 8 +- .../net/http/otelhttp/transport.go | 93 +- .../net/http/otelhttp/version.go | 5 +- vendor/go.opentelemetry.io/otel/.golangci.yml | 5 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 84 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 6 +- vendor/go.opentelemetry.io/otel/Makefile | 11 +- vendor/go.opentelemetry.io/otel/README.md | 14 +- .../otel/attribute/internal/attribute.go | 12 +- .../go.opentelemetry.io/otel/attribute/set.go | 2 +- .../otel/attribute/value.go | 3 +- .../otel/baggage/baggage.go | 113 +- .../otel/dependencies.Dockerfile | 2 +- .../otlptrace/internal/tracetransform/span.go | 16 +- .../otlp/otlptrace/otlptracegrpc/client.go | 43 +- .../otlptracegrpc/internal/counter/counter.go | 31 + .../otlptrace/otlptracegrpc/internal/gen.go | 9 + .../otlptracegrpc/internal/observ/doc.go | 6 + .../internal/observ/instrumentation.go | 350 + .../otlptracegrpc/internal/observ/target.go | 143 + .../otlptracegrpc/internal/partialsuccess.go | 11 + .../otlptracegrpc/internal/retry/retry.go | 5 + .../otlptracegrpc/internal/version.go | 8 + .../otlptracegrpc/internal/x/README.md | 36 + .../otlptracegrpc/internal/x/observ.go | 22 + .../otlptrace/otlptracegrpc/internal/x/x.go | 58 + .../otel/exporters/otlp/otlptrace/version.go | 2 +- .../internal/errorhandler/errorhandler.go | 96 + .../otel/internal/global/handler.go | 34 +- .../otel/internal/global/instruments.go | 56 + .../otel/internal/global/meter.go | 28 +- .../otel/internal/global/state.go | 36 +- .../otel/metric/asyncfloat64.go | 9 +- .../otel/metric/asyncint64.go | 9 +- .../go.opentelemetry.io/otel/metric/meter.go | 60 +- .../otel/metric/noop/noop.go | 24 + .../otel/metric/syncfloat64.go | 48 + .../otel/metric/syncint64.go | 48 + .../otel/propagation/baggage.go | 24 +- .../otel/propagation/trace_context.go | 13 +- .../go.opentelemetry.io/otel/requirements.txt | 2 +- .../otel/sdk/resource/builtin.go | 2 +- .../otel/sdk/resource/container.go | 2 +- .../otel/sdk/resource/env.go | 2 +- .../otel/sdk/resource/host_id.go | 9 +- .../otel/sdk/resource/host_id_readfile.go | 2 +- .../otel/sdk/resource/os.go | 2 +- .../otel/sdk/resource/process.go | 2 +- .../otel/sdk/trace/batch_span_processor.go | 6 +- .../internal/observ/batch_span_processor.go | 4 +- .../internal/observ/simple_span_processor.go | 4 +- .../otel/sdk/trace/internal/observ/tracer.go | 2 +- .../otel/sdk/trace/provider.go | 8 +- .../otel/sdk/trace/sampling.go | 28 + .../otel/sdk/trace/span.go | 2 +- .../go.opentelemetry.io/otel/sdk/version.go | 2 +- .../otel/semconv/v1.40.0/MIGRATION.md | 27 + .../otel/semconv/v1.40.0/README.md | 3 + .../otel/semconv/v1.40.0/attribute_group.go | 16861 +++++++++++++++ .../otel/semconv/v1.40.0/doc.go | 9 + .../otel/semconv/v1.40.0/error_type.go | 66 + .../otel/semconv/v1.40.0/exception.go | 9 + .../{v1.37.0 => v1.40.0}/httpconv/metric.go | 91 +- .../{v1.37.0 => v1.40.0}/otelconv/metric.go | 52 +- .../otel/semconv/v1.40.0/schema.go | 9 + vendor/go.opentelemetry.io/otel/trace/auto.go | 2 +- .../go.opentelemetry.io/otel/trace/trace.go | 5 + .../otel/trace/tracestate.go | 9 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 11 +- .../proto/otlp/common/v1/common.pb.go | 168 +- .../proto/otlp/resource/v1/resource.pb.go | 3 +- .../proto/otlp/trace/v1/trace.pb.go | 36 +- .../common/pkg/seccomp/default_linux.go | 33 +- .../common/pkg/seccomp/seccomp.json | 43 +- .../chacha20poly1305/chacha20poly1305.go | 3 + .../chacha20poly1305/fips140only_compat.go | 9 + .../chacha20poly1305/fips140only_go1.26.go | 11 + .../chacha20poly1305/xchacha20poly1305.go | 3 + vendor/golang.org/x/crypto/scrypt/scrypt.go | 3 + .../x/net/http2/client_priority_go126.go | 20 + .../x/net/http2/client_priority_go127.go | 13 + vendor/golang.org/x/net/http2/frame.go | 188 +- vendor/golang.org/x/net/http2/http2.go | 18 +- vendor/golang.org/x/net/http2/server.go | 88 +- vendor/golang.org/x/net/http2/transport.go | 15 +- vendor/golang.org/x/net/http2/writesched.go | 6 + .../net/http2/writesched_priority_rfc7540.go | 9 + .../net/http2/writesched_priority_rfc9218.go | 15 + .../x/net/http2/writesched_random.go | 2 + .../x/net/internal/httpsfv/httpsfv.go | 665 + vendor/golang.org/x/net/websocket/hybi.go | 1 + vendor/golang.org/x/oauth2/google/default.go | 131 +- vendor/golang.org/x/oauth2/google/google.go | 3 +- .../x/sync/singleflight/singleflight.go | 14 +- .../x/sys/cpu/asm_darwin_arm64_gc.s | 12 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 9 +- .../golang.org/x/sys/cpu/cpu_darwin_arm64.go | 67 + .../x/sys/cpu/cpu_darwin_arm64_other.go | 29 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 1 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 6 +- .../golang.org/x/sys/cpu/cpu_windows_arm64.go | 42 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 174 +- .../x/sys/cpu/syscall_darwin_arm64_gc.go | 54 + .../golang.org/x/sys/plan9/syscall_plan9.go | 8 +- vendor/golang.org/x/sys/unix/ioctl_signed.go | 11 +- .../golang.org/x/sys/unix/ioctl_unsigned.go | 11 +- .../golang.org/x/sys/unix/syscall_solaris.go | 8 - vendor/golang.org/x/sys/unix/syscall_unix.go | 10 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 229 +- vendor/golang.org/x/sys/windows/aliases.go | 1 + .../golang.org/x/sys/windows/registry/key.go | 15 +- .../x/sys/windows/syscall_windows.go | 15 +- .../golang.org/x/sys/windows/types_windows.go | 85 + .../x/sys/windows/zsyscall_windows.go | 7 + vendor/golang.org/x/term/terminal.go | 28 +- .../golang.org/x/text/cases/tables10.0.0.go | 2255 -- .../golang.org/x/text/cases/tables11.0.0.go | 2316 -- .../golang.org/x/text/cases/tables12.0.0.go | 2359 --- .../golang.org/x/text/cases/tables15.0.0.go | 2 +- .../{tables13.0.0.go => tables17.0.0.go} | 1473 +- vendor/golang.org/x/text/cases/tables9.0.0.go | 2215 -- .../x/text/message/catalog/catalog.go | 2 +- .../golang.org/x/text/message/catalog/dict.go | 6 +- .../golang.org/x/text/message/catalog/go19.go | 15 - .../x/text/message/catalog/gopre19.go | 23 - .../x/text/secure/bidirule/bidirule.go | 4 + .../x/text/secure/bidirule/bidirule10.0.0.go | 11 - .../x/text/secure/bidirule/bidirule9.0.0.go | 14 - .../x/text/unicode/bidi/tables10.0.0.go | 1815 -- .../x/text/unicode/bidi/tables11.0.0.go | 1887 -- .../x/text/unicode/bidi/tables12.0.0.go | 1923 -- .../x/text/unicode/bidi/tables13.0.0.go | 1955 -- .../x/text/unicode/bidi/tables15.0.0.go | 2 +- .../x/text/unicode/bidi/tables17.0.0.go | 2135 ++ .../x/text/unicode/bidi/tables9.0.0.go | 1781 -- .../x/text/unicode/norm/forminfo.go | 26 +- .../x/text/unicode/norm/tables10.0.0.go | 7657 ------- .../x/text/unicode/norm/tables11.0.0.go | 7693 ------- .../x/text/unicode/norm/tables12.0.0.go | 7710 ------- .../x/text/unicode/norm/tables15.0.0.go | 2820 +-- .../norm/{tables13.0.0.go => tables17.0.0.go} | 6716 +++--- .../x/text/unicode/norm/tables9.0.0.go | 7637 ------- .../x/tools/go/ast/inspector/cursor.go | 44 +- .../x/tools/go/ast/inspector/inspector.go | 4 +- .../x/tools/go/ast/inspector/iter.go | 36 +- .../golang.org/x/tools/go/packages/golist.go | 33 +- .../x/tools/go/packages/packages.go | 19 +- .../x/tools/go/types/objectpath/objectpath.go | 16 +- .../x/tools/internal/aliases/aliases.go | 30 +- .../x/tools/internal/aliases/aliases_go122.go | 80 - .../x/tools/internal/event/core/event.go | 23 +- .../x/tools/internal/event/keys/keys.go | 439 +- .../x/tools/internal/event/label/label.go | 11 +- .../x/tools/internal/gcimporter/iexport.go | 11 +- .../x/tools/internal/gcimporter/iimport.go | 4 +- .../tools/internal/gcimporter/ureader_yes.go | 4 +- .../x/tools/internal/modindex/index.go | 11 +- .../x/tools/internal/stdlib/deps.go | 630 +- .../x/tools/internal/stdlib/manifest.go | 74 +- .../x/tools/internal/typeparams/free.go | 4 +- .../x/tools/internal/typesinternal/types.go | 3 +- .../api/googleapi/googleapi.go | 552 + .../google.golang.org/api/googleapi/types.go | 202 + .../api/internal/settings.go | 7 + .../internal/third_party/uritemplates/LICENSE | 27 + .../third_party/uritemplates/METADATA | 14 + .../third_party/uritemplates/uritemplates.go | 248 + .../third_party/uritemplates/utils.go | 17 + .../google.golang.org/api/internal/version.go | 2 +- .../option/internaloption/internaloption.go | 18 + .../option/internaloption/unsaferesolver.go | 106 + .../api/transport/http/dial.go | 1 + .../googleapis/api/annotations/client.pb.go | 926 +- .../api/annotations/field_behavior.pb.go | 2 +- .../api/annotations/field_info.pb.go | 2 +- .../googleapis/api/annotations/http.pb.go | 2 +- .../googleapis/api/annotations/resource.pb.go | 2 +- .../googleapis/api/annotations/routing.pb.go | 12 +- .../googleapis/api/httpbody/httpbody.pb.go | 2 +- .../googleapis/api/launch_stage.pb.go | 2 +- .../genproto/googleapis/rpc/code/code.pb.go | 336 + .../grpc/balancer/balancer.go | 2 - .../balancer/pickfirst/internal/internal.go | 2 + .../grpc/balancer/pickfirst/pickfirst.go | 57 +- .../grpc/balancer/subconn.go | 14 - .../grpc/balancer_wrapper.go | 6 +- vendor/google.golang.org/grpc/clientconn.go | 20 +- .../grpc/cmd/protoc-gen-go-grpc/main.go | 2 +- .../google.golang.org/grpc/credentials/tls.go | 6 +- .../grpc/encoding/encoding.go | 4 - .../grpc/encoding/gzip/gzip.go | 12 - .../grpc/experimental/stats/metrics.go | 69 +- vendor/google.golang.org/grpc/interceptor.go | 12 +- .../grpc/internal/balancer/weight/weight.go | 66 + .../grpc/internal/envconfig/envconfig.go | 22 + .../grpc/internal/experimental.go | 3 + .../grpc/internal/internal.go | 25 +- .../internal/resolver/dns/dns_resolver.go | 15 +- .../internal/stats/metrics_recorder_list.go | 60 + .../grpc/internal/transport/client_stream.go | 10 +- .../grpc/internal/transport/controlbuf.go | 24 +- .../grpc/internal/transport/http2_client.go | 25 +- .../grpc/internal/transport/http2_server.go | 94 +- .../grpc/internal/transport/transport.go | 29 +- .../google.golang.org/grpc/mem/buffer_pool.go | 2 +- vendor/google.golang.org/grpc/mem/buffers.go | 65 +- .../grpc/resolver/resolver.go | 1 + vendor/google.golang.org/grpc/server.go | 61 +- vendor/google.golang.org/grpc/stream.go | 19 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/k8s.io/klog/v2/README.md | 2 - .../klog/v2/internal/serialize/keyvalues.go | 232 +- .../internal/serialize/keyvalues_no_slog.go | 10 +- .../v2/internal/serialize/keyvalues_slog.go | 12 +- vendor/k8s.io/klog/v2/klog.go | 87 +- vendor/k8s.io/klog/v2/klogr.go | 4 +- vendor/k8s.io/klog/v2/klogr_slog.go | 11 +- vendor/k8s.io/klog/v2/textlogger/options.go | 18 + .../k8s.io/klog/v2/textlogger/textlogger.go | 53 +- .../kube-openapi/pkg/validation/spec/ref.go | 50 - vendor/k8s.io/utils/buffer/ring_fixed.go | 120 + vendor/k8s.io/utils/ptr/ptr.go | 2 +- vendor/modules.txt | 569 +- .../controller-runtime/.golangci.yml | 17 +- .../controller-runtime/.gomodcheck.yaml | 4 + .../sigs.k8s.io/controller-runtime/Makefile | 12 +- .../sigs.k8s.io/controller-runtime/README.md | 1 + .../sigs.k8s.io/controller-runtime/alias.go | 17 +- .../pkg/builder/controller.go | 2 +- .../controller-runtime/pkg/builder/webhook.go | 178 +- .../controller-runtime/pkg/cache/cache.go | 4 +- .../pkg/cache/delegating_by_gvk_cache.go | 6 +- .../pkg/cache/informer_cache.go | 29 +- .../pkg/cache/internal/cache_reader.go | 16 +- .../pkg/cache/internal/informers.go | 25 +- .../pkg/client/apiutil/apimachinery.go | 2 +- .../pkg/client/apiutil/errors.go | 4 +- .../controller-runtime/pkg/client/client.go | 60 + .../pkg/client/config/config.go | 6 - .../controller-runtime/pkg/client/dryrun.go | 4 + .../pkg/client/fake/client.go | 339 +- .../pkg/client/fake/versioned_tracker.go | 368 + .../pkg/client/fieldowner.go | 4 + .../pkg/client/fieldvalidation.go | 7 + .../pkg/client/interceptor/intercept.go | 8 + .../pkg/client/interfaces.go | 3 + .../pkg/client/namespaced_client.go | 49 +- .../controller-runtime/pkg/client/options.go | 19 + .../controller-runtime/pkg/client/patch.go | 11 +- .../pkg/client/typed_client.go | 33 + .../pkg/client/unstructured_client.go | 32 + .../controller-runtime/pkg/cluster/cluster.go | 28 +- .../pkg/cluster/internal.go | 5 + .../pkg/config/controller.go | 2 +- .../pkg/controller/controller.go | 6 +- .../controllerutil/controllerutil.go | 16 +- .../controller/priorityqueue/priorityqueue.go | 385 +- .../controller-runtime/pkg/healthz/healthz.go | 4 +- .../pkg/internal/controller/controller.go | 29 +- .../internal/controller/metrics/metrics.go | 10 + .../pkg/internal/metrics/workqueue.go | 48 +- .../pkg/internal/recorder/recorder.go | 106 +- .../pkg/internal/source/event_handler.go | 6 +- .../pkg/internal/source/kind.go | 11 +- .../pkg/leaderelection/leader_election.go | 11 +- .../controller-runtime/pkg/log/deleg.go | 10 +- .../controller-runtime/pkg/log/log.go | 2 +- .../controller-runtime/pkg/log/null.go | 6 +- .../pkg/manager/internal.go | 24 +- .../controller-runtime/pkg/manager/manager.go | 36 +- .../pkg/manager/signals/signal_posix.go | 1 - .../pkg/reconcile/reconcile.go | 7 +- .../pkg/recorder/recorder.go | 7 +- .../pkg/webhook/admission/decode.go | 2 +- .../pkg/webhook/admission/defaulter_custom.go | 75 +- .../pkg/webhook/admission/validator_custom.go | 60 +- .../controller-runtime/pkg/webhook/alias.go | 8 +- .../pkg/webhook/conversion/conversion.go | 17 +- .../webhook/conversion/conversion_hubspoke.go | 173 + .../webhook/conversion/conversion_registry.go | 57 + .../pkg/webhook/internal/metrics/metrics.go | 1 + .../apis/v1/backendtlspolicy_types.go | 58 +- .../gateway-api/apis/v1/gateway_types.go | 281 +- .../gateway-api/apis/v1/gatewayclass_types.go | 2 +- .../gateway-api/apis/v1/grpcroute_types.go | 4 +- .../gateway-api/apis/v1/httproute_types.go | 102 +- .../gateway-api/apis/v1/listenerset_types.go | 632 + .../apis/v1/object_reference_types.go | 4 +- .../apis/v1/referencegrant_types.go | 157 + .../gateway-api/apis/v1/shared_types.go | 37 +- .../gateway-api/apis/v1/tlsroute_types.go | 162 + .../apis/v1/zz_generated.deepcopy.go | 455 + .../apis/v1/zz_generated.register.go | 7 + .../v6/schema/elements.go | 47 +- .../structured-merge-diff/v6/typed/remove.go | 65 +- .../v6/value/reflectcache.go | 4 + 2183 files changed, 246873 insertions(+), 146991 deletions(-) create mode 100644 vendor/buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/LICENSE create mode 100644 vendor/buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/buf/validate/validate.pb.go create mode 100644 vendor/buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/buf/validate/validate_protoopaque.pb.go create mode 100644 vendor/connectrpc.com/connect/.gitignore create mode 100644 vendor/connectrpc.com/connect/.golangci.yml create mode 100644 vendor/connectrpc.com/connect/LICENSE create mode 100644 vendor/connectrpc.com/connect/MAINTAINERS.md create mode 100644 vendor/connectrpc.com/connect/Makefile create mode 100644 vendor/connectrpc.com/connect/README.md create mode 100644 vendor/connectrpc.com/connect/RELEASE.md create mode 100644 vendor/connectrpc.com/connect/SECURITY.md create mode 100644 vendor/connectrpc.com/connect/buf.gen.yaml create mode 100644 vendor/connectrpc.com/connect/buf.yaml create mode 100644 vendor/connectrpc.com/connect/buffer_pool.go create mode 100644 vendor/connectrpc.com/connect/client.go create mode 100644 vendor/connectrpc.com/connect/client_stream.go create mode 100644 vendor/connectrpc.com/connect/code.go create mode 100644 vendor/connectrpc.com/connect/codec.go create mode 100644 vendor/connectrpc.com/connect/compression.go create mode 100644 vendor/connectrpc.com/connect/connect.go create mode 100644 vendor/connectrpc.com/connect/context.go create mode 100644 vendor/connectrpc.com/connect/duplex_http_call.go create mode 100644 vendor/connectrpc.com/connect/envelope.go create mode 100644 vendor/connectrpc.com/connect/error.go create mode 100644 vendor/connectrpc.com/connect/error_writer.go create mode 100644 vendor/connectrpc.com/connect/handler.go create mode 100644 vendor/connectrpc.com/connect/handler_stream.go create mode 100644 vendor/connectrpc.com/connect/header.go create mode 100644 vendor/connectrpc.com/connect/idempotency_level.go create mode 100644 vendor/connectrpc.com/connect/interceptor.go create mode 100644 vendor/connectrpc.com/connect/internal/gen/connectext/grpc/status/v1/status.pb.go create mode 100644 vendor/connectrpc.com/connect/option.go create mode 100644 vendor/connectrpc.com/connect/protobuf_util.go create mode 100644 vendor/connectrpc.com/connect/protocol.go create mode 100644 vendor/connectrpc.com/connect/protocol_connect.go create mode 100644 vendor/connectrpc.com/connect/protocol_grpc.go create mode 100644 vendor/connectrpc.com/connect/recover.go create mode 100644 vendor/cuelang.org/go/encoding/json/pointer.go create mode 100644 vendor/cuelang.org/go/encoding/jsonschema/generate.go create mode 100644 vendor/cuelang.org/go/encoding/jsonschema/generate_items.go delete mode 100644 vendor/cuelang.org/go/encoding/jsonschema/pointer.go create mode 100644 vendor/cuelang.org/go/internal/anyunique/unique.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/arctype_string.go delete mode 100644 vendor/cuelang.org/go/internal/core/adt/closed2.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/defidtype_string.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/flags.go delete mode 100644 vendor/cuelang.org/go/internal/core/adt/optional.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/weakmap.go create mode 100644 vendor/cuelang.org/go/internal/core/format/printer.go rename vendor/cuelang.org/go/{cue/ast/astutil/walk.go => internal/core/layer/layer.go} (53%) delete mode 100644 vendor/cuelang.org/go/internal/core/runtime/errors.go delete mode 100644 vendor/cuelang.org/go/internal/core/runtime/resolve.go create mode 100644 vendor/cuelang.org/go/internal/encoding/json/patch.go rename vendor/cuelang.org/go/{cue/builtin.go => internal/iterutil/iter.go} (69%) create mode 100644 vendor/cuelang.org/go/pkg/encoding/openapi/openapi.cue create mode 100644 vendor/cuelang.org/go/pkg/encoding/openapi/openapi.go create mode 100644 vendor/cuelang.org/go/pkg/encoding/openapi/pkg.go create mode 100644 vendor/cuelang.org/go/pkg/tool/http/serve.go create mode 100644 vendor/cuelang.org/go/tools/flow/cycle.go create mode 100644 vendor/cuelang.org/go/tools/flow/flow.go create mode 100644 vendor/cuelang.org/go/tools/flow/run.go create mode 100644 vendor/cuelang.org/go/tools/flow/state_string.go create mode 100644 vendor/cuelang.org/go/tools/flow/tasks.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/aliyuncli.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/aliyuncli_configuration.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/aliyuncli_profile.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/file_cache.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ini.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ini_provider.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/remote_provider.go rename vendor/github.com/{oklog/ulid => AliyunContainerService/ack-ram-tool/pkg/ecsmetadata}/LICENSE (100%) create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/base.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/client.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/disk.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/dynamic.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/errors.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/instance.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/interfaces.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/net.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/ram.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/retry.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/test.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/userdata.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/utils.go delete mode 100644 vendor/github.com/alibabacloud-go/debug/debug/assert.go create mode 100644 vendor/github.com/alibabacloud-go/tea-utils/v2/LICENSE create mode 100644 vendor/github.com/alibabacloud-go/tea-utils/v2/service/service.go create mode 100644 vendor/github.com/aliyun/credentials-go/.gitignore create mode 100644 vendor/github.com/aliyun/credentials-go/.scrutinizer.yml create mode 100644 vendor/github.com/aliyun/credentials-go/CONTRIBUTING.md create mode 100644 vendor/github.com/aliyun/credentials-go/README-CN.md create mode 100644 vendor/github.com/aliyun/credentials-go/README.md delete mode 100644 vendor/github.com/aliyun/credentials-go/credentials/access_key_credential.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/doc.go rename vendor/github.com/aliyun/credentials-go/credentials/{ecs_ram_role.go => ecs_ram_role_credentials_provider.go} (54%) create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/internal/http/http.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/internal/utils/path.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/internal/utils/runtime.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/internal/utils/utils.go delete mode 100644 vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go delete mode 100644 vendor/github.com/aliyun/credentials-go/credentials/oidc_token create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/cli_profile.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/cloud_sso.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/credentials.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/default.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/ecs_ram_role.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/env.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/external.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/hook.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/lock_unix.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/lock_windows.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/oauth.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/oidc.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/profile.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/ram_role_arn.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/static_ak.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/static_sts.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/uri.go rename vendor/github.com/aliyun/credentials-go/credentials/{sts_role_arn_credential.go => ram_role_arn_credentials_provider.go} (70%) create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/request/doc.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/response/doc.go rename vendor/github.com/aliyun/credentials-go/credentials/{rsa_key_pair_credential.go => rsa_key_pair_credentials_provider.go} (82%) delete mode 100644 vendor/github.com/aliyun/credentials-go/credentials/sts_credential.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/utils/doc.go create mode 100644 vendor/github.com/aliyun/credentials-go/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteSigningConfiguration.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeregisterPullTimeUpdateExclusion.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageSigningStatus.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetSigningConfiguration.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImageReferrers.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListPullTimeUpdateExclusions.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutSigningConfiguration.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_RegisterPullTimeUpdateExclusion.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdateImageStorageClass.go create mode 100644 vendor/github.com/aws/smithy-go/middleware/eventstream_middleware.go create mode 100644 vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/url_redactor.go create mode 100644 vendor/github.com/buildkite/agent/v3/api/pings_streaming.go create mode 100644 vendor/github.com/buildkite/agent/v3/api/proto/gen/BUILD.bazel create mode 100644 vendor/github.com/buildkite/agent/v3/api/proto/gen/agentedge.pb.go create mode 100644 vendor/github.com/buildkite/agent/v3/api/proto/gen/agentedgev1connect/BUILD.bazel create mode 100644 vendor/github.com/buildkite/agent/v3/api/proto/gen/agentedgev1connect/agentedge.connect.go create mode 100644 vendor/github.com/buildkite/agent/v3/api/token.go create mode 100644 vendor/github.com/buildkite/go-pipeline/secret.go create mode 100644 vendor/github.com/buildkite/go-pipeline/secrets.go create mode 100644 vendor/github.com/clipperhouse/displaywidth/options.go delete mode 100644 vendor/github.com/clipperhouse/displaywidth/tables.go create mode 100644 vendor/github.com/clipperhouse/displaywidth/truncate.go delete mode 100644 vendor/github.com/clipperhouse/stringish/.gitignore delete mode 100644 vendor/github.com/clipperhouse/stringish/LICENSE delete mode 100644 vendor/github.com/clipperhouse/stringish/README.md delete mode 100644 vendor/github.com/clipperhouse/stringish/interface.go create mode 100644 vendor/github.com/clipperhouse/uax29/v2/graphemes/ansi.go create mode 100644 vendor/github.com/clipperhouse/uax29/v2/graphemes/ansi8.go delete mode 100644 vendor/github.com/clipperhouse/uax29/v2/internal/iterators/iterator.go create mode 100644 vendor/github.com/digitorus/pkcs7/.golangci.yml delete mode 100644 vendor/github.com/digitorus/pkcs7/verify_test_dsa.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/decode_map_utils.go delete mode 100644 vendor/github.com/go-ini/ini/.editorconfig delete mode 100644 vendor/github.com/go-ini/ini/.gitignore delete mode 100644 vendor/github.com/go-ini/ini/.golangci.yml delete mode 100644 vendor/github.com/go-ini/ini/LICENSE delete mode 100644 vendor/github.com/go-ini/ini/Makefile delete mode 100644 vendor/github.com/go-ini/ini/README.md delete mode 100644 vendor/github.com/go-ini/ini/codecov.yml delete mode 100644 vendor/github.com/go-ini/ini/data_source.go delete mode 100644 vendor/github.com/go-ini/ini/deprecated.go delete mode 100644 vendor/github.com/go-ini/ini/error.go delete mode 100644 vendor/github.com/go-ini/ini/file.go delete mode 100644 vendor/github.com/go-ini/ini/helper.go delete mode 100644 vendor/github.com/go-ini/ini/ini.go delete mode 100644 vendor/github.com/go-ini/ini/key.go delete mode 100644 vendor/github.com/go-ini/ini/parser.go delete mode 100644 vendor/github.com/go-ini/ini/section.go delete mode 100644 vendor/github.com/go-ini/ini/struct.go create mode 100644 vendor/github.com/go-openapi/analysis/.editorconfig create mode 100644 vendor/github.com/go-openapi/analysis/CONTRIBUTORS.md create mode 100644 vendor/github.com/go-openapi/analysis/SECURITY.md create mode 100644 vendor/github.com/go-openapi/analysis/go.work create mode 100644 vendor/github.com/go-openapi/analysis/go.work.sum delete mode 100644 vendor/github.com/go-openapi/errors/.cliff.toml delete mode 100644 vendor/github.com/go-openapi/jsonreference/.cliff.toml delete mode 100644 vendor/github.com/go-openapi/loads/.travis.yml create mode 100644 vendor/github.com/go-openapi/loads/CONTRIBUTORS.md create mode 100644 vendor/github.com/go-openapi/loads/SECURITY.md create mode 100644 vendor/github.com/go-openapi/runtime/NOTICE create mode 100644 vendor/github.com/go-openapi/runtime/SECURITY.md create mode 100644 vendor/github.com/go-openapi/runtime/doc.go delete mode 100644 vendor/github.com/go-openapi/spec/.cliff.toml create mode 100644 vendor/github.com/go-openapi/strfmt/.codecov.yml create mode 100644 vendor/github.com/go-openapi/strfmt/CONTRIBUTORS.md create mode 100644 vendor/github.com/go-openapi/strfmt/SECURITY.md create mode 100644 vendor/github.com/go-openapi/strfmt/go.work create mode 100644 vendor/github.com/go-openapi/strfmt/go.work.sum create mode 100644 vendor/github.com/go-openapi/strfmt/internal/bsonlite/codec.go create mode 100644 vendor/github.com/go-openapi/strfmt/internal/bsonlite/lite.go create mode 100644 vendor/github.com/go-openapi/swag/CONTRIBUTORS.md create mode 100644 vendor/github.com/go-openapi/validate/CONTRIBUTORS.md create mode 100644 vendor/github.com/go-openapi/validate/SECURITY.md create mode 100644 vendor/github.com/go-viper/mapstructure/v2/devenv.lock create mode 100644 vendor/github.com/go-viper/mapstructure/v2/devenv.nix create mode 100644 vendor/github.com/go-viper/mapstructure/v2/devenv.yaml delete mode 100644 vendor/github.com/go-viper/mapstructure/v2/flake.lock delete mode 100644 vendor/github.com/go-viper/mapstructure/v2/flake.nix create mode 100644 vendor/github.com/godbus/dbus/v5/.cirrus.yml create mode 100644 vendor/github.com/godbus/dbus/v5/.golangci.yml create mode 100644 vendor/github.com/godbus/dbus/v5/SECURITY.md create mode 100644 vendor/github.com/godbus/dbus/v5/auth_default_other.go create mode 100644 vendor/github.com/godbus/dbus/v5/auth_default_windows.go rename vendor/github.com/godbus/dbus/v5/{auth_sha1.go => auth_sha1_windows.go} (95%) delete mode 100644 vendor/github.com/godbus/dbus/v5/homedir.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json create mode 100644 vendor/github.com/googleapis/gax-go/v2/CHANGES.md create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto create mode 100644 vendor/github.com/googleapis/gax-go/v2/call_option.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/content_type.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/feature.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/gax.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/header.go rename vendor/{cuelang.org/go/internal/core/adt/dev.go => github.com/googleapis/gax-go/v2/internal/version.go} (56%) create mode 100644 vendor/github.com/googleapis/gax-go/v2/invoke.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/release-please-config.json create mode 100644 vendor/github.com/googleapis/gax-go/v2/telemetry.go create mode 100644 vendor/github.com/lestrrat-go/dsig-secp256k1/.gitignore create mode 100644 vendor/github.com/lestrrat-go/dsig-secp256k1/Changes rename vendor/github.com/lestrrat-go/{option => dsig-secp256k1}/LICENSE (97%) create mode 100644 vendor/github.com/lestrrat-go/dsig-secp256k1/secp256k1.go create mode 100644 vendor/github.com/lestrrat-go/dsig/.gitignore create mode 100644 vendor/github.com/lestrrat-go/dsig/Changes rename vendor/github.com/{sourcegraph/conc => lestrrat-go/dsig}/LICENSE (97%) create mode 100644 vendor/github.com/lestrrat-go/dsig/README.md create mode 100644 vendor/github.com/lestrrat-go/dsig/algorithms.go create mode 100644 vendor/github.com/lestrrat-go/dsig/crypto_signer.go create mode 100644 vendor/github.com/lestrrat-go/dsig/dsig.go create mode 100644 vendor/github.com/lestrrat-go/dsig/ecdsa.go create mode 100644 vendor/github.com/lestrrat-go/dsig/eddsa.go create mode 100644 vendor/github.com/lestrrat-go/dsig/hmac.go create mode 100644 vendor/github.com/lestrrat-go/dsig/internal/ecutil/ecutil.go create mode 100644 vendor/github.com/lestrrat-go/dsig/rsa.go create mode 100644 vendor/github.com/lestrrat-go/dsig/sign.go create mode 100644 vendor/github.com/lestrrat-go/dsig/validation.go create mode 100644 vendor/github.com/lestrrat-go/dsig/verify.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/es256k.go delete mode 100644 vendor/github.com/lestrrat-go/option/.gitignore delete mode 100644 vendor/github.com/lestrrat-go/option/README.md delete mode 100644 vendor/github.com/lestrrat-go/option/option.go delete mode 100644 vendor/github.com/oklog/ulid/.travis.yml delete mode 100644 vendor/github.com/oklog/ulid/Gopkg.lock delete mode 100644 vendor/github.com/oklog/ulid/Gopkg.toml rename vendor/github.com/oklog/ulid/{ => v2}/.gitignore (100%) rename vendor/github.com/oklog/ulid/{ => v2}/AUTHORS.md (100%) rename vendor/github.com/oklog/ulid/{ => v2}/CHANGELOG.md (100%) rename vendor/github.com/oklog/ulid/{ => v2}/CONTRIBUTING.md (100%) rename vendor/{go.mongodb.org/mongo-driver => github.com/oklog/ulid/v2}/LICENSE (100%) rename vendor/github.com/oklog/ulid/{ => v2}/README.md (55%) rename vendor/github.com/oklog/ulid/{ => v2}/ulid.go (73%) create mode 100644 vendor/github.com/olekukonko/ll/.goreleaser.yaml create mode 100644 vendor/github.com/olekukonko/ll/Makefile create mode 100644 vendor/github.com/olekukonko/ll/comb.hcl create mode 100644 vendor/github.com/olekukonko/ll/dbg.go create mode 100644 vendor/github.com/olekukonko/ll/lh/colorized_unix.go create mode 100644 vendor/github.com/olekukonko/ll/lh/colorized_windows.go create mode 100644 vendor/github.com/olekukonko/ll/lh/dedup.go create mode 100644 vendor/github.com/olekukonko/ll/lh/lh.go create mode 100644 vendor/github.com/olekukonko/ll/lh/pipe.go create mode 100644 vendor/github.com/olekukonko/ll/lh/rotate.go create mode 100644 vendor/github.com/olekukonko/ll/lx/field.go create mode 100644 vendor/github.com/olekukonko/ll/lx/interface.go rename vendor/github.com/olekukonko/ll/lx/{ns.go => namespace.go} (72%) create mode 100644 vendor/github.com/olekukonko/ll/lx/types.go create mode 100644 vendor/github.com/olekukonko/ll/options.go create mode 100644 vendor/github.com/olekukonko/ll/since.go create mode 100644 vendor/github.com/olekukonko/ll/writer.go delete mode 100644 vendor/github.com/olekukonko/tablewriter/benchstat.txt create mode 100644 vendor/github.com/olekukonko/tablewriter/comb.hcl delete mode 100644 vendor/github.com/olekukonko/tablewriter/new.txt delete mode 100644 vendor/github.com/olekukonko/tablewriter/old.txt create mode 100644 vendor/github.com/olekukonko/tablewriter/pkg/twwidth/cache.go create mode 100644 vendor/github.com/olekukonko/tablewriter/pkg/twwidth/ea.go create mode 100644 vendor/github.com/olekukonko/tablewriter/pkg/twwidth/tab.go create mode 100644 vendor/github.com/olekukonko/tablewriter/renderer/tint.go create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.10.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.11.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.11.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.12.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.12.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.12.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.12.3.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.13.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.13.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.13.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.14.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.14.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.15.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.15.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.9.0.json delete mode 100644 vendor/github.com/open-policy-agent/opa/internal/config/config.go delete mode 100644 vendor/github.com/open-policy-agent/opa/internal/report/report.go delete mode 100644 vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go delete mode 100644 vendor/github.com/open-policy-agent/opa/internal/strvals/doc.go delete mode 100644 vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/index_debug.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/performance.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/policy_appenders.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/slices.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/string_length.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/term_appenders.go delete mode 100644 vendor/github.com/open-policy-agent/opa/v1/config/config.go delete mode 100644 vendor/github.com/open-policy-agent/opa/v1/hooks/hooks.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/logging/buffered_logger.go delete mode 100644 vendor/github.com/open-policy-agent/opa/v1/plugins/plugins.go delete mode 100644 vendor/github.com/open-policy-agent/opa/v1/plugins/rest/auth.go delete mode 100644 vendor/github.com/open-policy-agent/opa/v1/plugins/rest/aws.go delete mode 100644 vendor/github.com/open-policy-agent/opa/v1/plugins/rest/azure.go delete mode 100644 vendor/github.com/open-policy-agent/opa/v1/plugins/rest/gcp.go delete mode 100644 vendor/github.com/open-policy-agent/opa/v1/plugins/rest/rest.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/sink.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/template_string.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/strings.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_image_policy.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_insights.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_kmsencryption.go create mode 100644 vendor/github.com/pelletier/go-toml/v2/AGENTS.md delete mode 100644 vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go delete mode 100644 vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go create mode 100644 vendor/github.com/pelletier/go-toml/v2/test-go-versions.sh create mode 100644 vendor/github.com/protocolbuffers/txtpbfmt/descriptor/descriptor.go create mode 100644 vendor/github.com/sagikazarmark/locafero/devenv.lock create mode 100644 vendor/github.com/sagikazarmark/locafero/devenv.nix create mode 100644 vendor/github.com/sagikazarmark/locafero/devenv.yaml delete mode 100644 vendor/github.com/sagikazarmark/locafero/flake.lock delete mode 100644 vendor/github.com/sagikazarmark/locafero/flake.nix create mode 100644 vendor/github.com/sagikazarmark/locafero/internal/queue/eager.go create mode 100644 vendor/github.com/sagikazarmark/locafero/internal/queue/queue.go delete mode 100644 vendor/github.com/sourcegraph/conc/.golangci.yml delete mode 100644 vendor/github.com/sourcegraph/conc/Makefile delete mode 100644 vendor/github.com/sourcegraph/conc/README.md delete mode 100644 vendor/github.com/sourcegraph/conc/panics/panics.go delete mode 100644 vendor/github.com/sourcegraph/conc/panics/try.go delete mode 100644 vendor/github.com/sourcegraph/conc/pool/context_pool.go delete mode 100644 vendor/github.com/sourcegraph/conc/pool/error_pool.go delete mode 100644 vendor/github.com/sourcegraph/conc/pool/pool.go delete mode 100644 vendor/github.com/sourcegraph/conc/pool/result_context_pool.go delete mode 100644 vendor/github.com/sourcegraph/conc/pool/result_error_pool.go delete mode 100644 vendor/github.com/sourcegraph/conc/pool/result_pool.go delete mode 100644 vendor/github.com/sourcegraph/conc/waitgroup.go delete mode 100644 vendor/github.com/stoewer/go-strcase/.gitignore delete mode 100644 vendor/github.com/stoewer/go-strcase/.golangci.yml delete mode 100644 vendor/github.com/stoewer/go-strcase/LICENSE delete mode 100644 vendor/github.com/stoewer/go-strcase/README.md delete mode 100644 vendor/github.com/stoewer/go-strcase/camel.go delete mode 100644 vendor/github.com/stoewer/go-strcase/doc.go delete mode 100644 vendor/github.com/stoewer/go-strcase/helper.go delete mode 100644 vendor/github.com/stoewer/go-strcase/kebab.go delete mode 100644 vendor/github.com/stoewer/go-strcase/snake.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/AGENTS.md create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/admin_compliance_policy_settings.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/attestations.go delete mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/commitlint.config.mjs create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/docker-compose.yml create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/group_credentials.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/group_integrations.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/group_protected_branches.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/group_relations_export.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/merge_request_context_commits.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/model_registry.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/project_aliases.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/project_statistics.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/protected_packages.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/request_handler.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/runner_controller_scopes.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/runner_controller_tokens.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/runner_controllers.go create mode 100644 vendor/gitlab.com/gitlab-org/api/client-go/workitems.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bson.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/decoder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/encoder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/marshal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_element.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_value.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/registry.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/types.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_arraybuilder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/tables.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/x.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/errorhandler/errorhandler.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.40.0/MIGRATION.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.40.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.40.0/attribute_group.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.40.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.40.0/error_type.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.40.0/exception.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.37.0 => v1.40.0}/httpconv/metric.go (94%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.37.0 => v1.40.0}/otelconv/metric.go (96%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.40.0/schema.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/fips140only_compat.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/fips140only_go1.26.go create mode 100644 vendor/golang.org/x/net/http2/client_priority_go126.go create mode 100644 vendor/golang.org/x/net/http2/client_priority_go127.go create mode 100644 vendor/golang.org/x/net/internal/httpsfv/httpsfv.go create mode 100644 vendor/golang.org/x/sys/cpu/asm_darwin_arm64_gc.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_darwin_arm64_other.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_windows_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_darwin_arm64_gc.go delete mode 100644 vendor/golang.org/x/text/cases/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/cases/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/cases/tables12.0.0.go rename vendor/golang.org/x/text/cases/{tables13.0.0.go => tables17.0.0.go} (60%) delete mode 100644 vendor/golang.org/x/text/cases/tables9.0.0.go delete mode 100644 vendor/golang.org/x/text/message/catalog/go19.go delete mode 100644 vendor/golang.org/x/text/message/catalog/gopre19.go delete mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go delete mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables17.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables12.0.0.go rename vendor/golang.org/x/text/unicode/norm/{tables13.0.0.go => tables17.0.0.go} (53%) delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables9.0.0.go delete mode 100644 vendor/golang.org/x/tools/internal/aliases/aliases_go122.go create mode 100644 vendor/google.golang.org/api/googleapi/googleapi.go create mode 100644 vendor/google.golang.org/api/googleapi/types.go create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go create mode 100644 vendor/google.golang.org/api/option/internaloption/unsaferesolver.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go create mode 100644 vendor/google.golang.org/grpc/internal/balancer/weight/weight.go create mode 100644 vendor/k8s.io/utils/buffer/ring_fixed.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/versioned_tracker.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion_hubspoke.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion_registry.go create mode 100644 vendor/sigs.k8s.io/gateway-api/apis/v1/listenerset_types.go create mode 100644 vendor/sigs.k8s.io/gateway-api/apis/v1/referencegrant_types.go create mode 100644 vendor/sigs.k8s.io/gateway-api/apis/v1/tlsroute_types.go diff --git a/go.mod b/go.mod index 0a2ef19f0c..ac5c0b3df9 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module sigs.k8s.io/security-profiles-operator -go 1.25.0 +go 1.25.7 // TODO: remove when https://github.com/maxbrunsfeld/counterfeiter/issues/344 // got resolved. @@ -11,10 +11,10 @@ require ( github.com/acobaugh/osrelease v0.1.0 github.com/aquasecurity/libbpfgo v0.9.2-libbpf-1.5.1 github.com/blang/semver/v4 v4.0.0 - github.com/cert-manager/cert-manager v1.19.2 + github.com/cert-manager/cert-manager v1.20.1 github.com/go-logr/logr v1.4.3 github.com/google/go-cmp v0.7.0 - github.com/google/go-containerregistry v0.20.7 + github.com/google/go-containerregistry v0.21.3 github.com/google/uuid v1.6.0 github.com/hairyhenderson/go-which v0.2.2 github.com/jellydator/ttlcache/v3 v3.4.0 @@ -22,191 +22,194 @@ require ( github.com/mogensen/kubernetes-split-yaml v0.4.0 github.com/nxadm/tail v1.4.11 github.com/opencontainers/image-spec v1.1.1 - github.com/opencontainers/runc v1.4.0 + github.com/opencontainers/runc v1.4.1 github.com/opencontainers/runtime-spec v1.3.0 - github.com/openshift/api v0.0.0-20241204141306-c1fdeb0788c1 + github.com/openshift/api v0.0.0-20260330162214-96f1f5ac7ff2 github.com/pjbgf/go-apparmor v0.1.3-0.20241107184909-1375e5e7aa89 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.88.1 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.90.1 github.com/prometheus/client_golang v1.23.2 github.com/prometheus/client_model v0.6.2 github.com/seccomp/libseccomp-golang v0.11.1 github.com/sigstore/cosign/v2 v2.6.2 github.com/stretchr/testify v1.11.1 github.com/urfave/cli/v2 v2.27.7 - go.podman.io/common v0.66.1 - golang.org/x/mod v0.32.0 - golang.org/x/sync v0.19.0 + go.podman.io/common v0.67.0 + golang.org/x/mod v0.34.0 + golang.org/x/sync v0.20.0 gomodules.xyz/jsonpatch/v2 v2.5.0 - google.golang.org/grpc v1.78.0 - google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.6.0 + google.golang.org/grpc v1.79.3 + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.6.1 google.golang.org/protobuf v1.36.11 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.35.0 - k8s.io/apimachinery v0.35.0 - k8s.io/cli-runtime v0.35.0 - k8s.io/client-go v0.35.0 - k8s.io/klog/v2 v2.130.1 + k8s.io/api v0.35.3 + k8s.io/apimachinery v0.35.3 + k8s.io/cli-runtime v0.35.3 + k8s.io/client-go v0.35.3 + k8s.io/klog/v2 v2.140.0 oras.land/oras-go/v2 v2.6.0 - sigs.k8s.io/controller-runtime v0.22.4 + sigs.k8s.io/controller-runtime v0.23.3 sigs.k8s.io/controller-tools v0.20.0 sigs.k8s.io/release-utils v0.12.3 sigs.k8s.io/yaml v1.6.0 ) require ( - cel.dev/expr v0.24.0 // indirect - cloud.google.com/go/auth v0.18.0 // indirect + buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1 // indirect + cel.dev/expr v0.25.1 // indirect + cloud.google.com/go/auth v0.19.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect - cuelabs.dev/go/oci/ociregistry v0.0.0-20250715075730-49cab49c8e9d // indirect - cuelang.org/go v0.14.1 // indirect - cyphar.com/go-pathrs v0.2.1 // indirect - github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect + connectrpc.com/connect v1.19.1 // indirect + cuelabs.dev/go/oci/ociregistry v0.0.0-20251212221603-3adeb8663819 // indirect + cuelang.org/go v0.16.0 // indirect + cyphar.com/go-pathrs v0.2.4 // indirect + github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.20.0 // indirect + github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata v0.0.10 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.29 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/Azure/go-autorest/autorest v0.11.30 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.7 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.1 // indirect + github.com/Azure/go-autorest/logger v0.2.2 // indirect + github.com/Azure/go-autorest/tracing v0.6.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ThalesIgnite/crypto11 v1.2.5 // indirect github.com/agnivade/levenshtein v1.2.1 // indirect - github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect + github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.5 // indirect github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect github.com/alibabacloud-go/cr-20181201 v1.0.10 // indirect github.com/alibabacloud-go/darabonba-openapi v0.2.1 // indirect - github.com/alibabacloud-go/debug v1.0.0 // indirect + github.com/alibabacloud-go/debug v1.0.1 // indirect github.com/alibabacloud-go/endpoint-util v1.1.1 // indirect - github.com/alibabacloud-go/openapi-util v0.1.0 // indirect - github.com/alibabacloud-go/tea v1.2.1 // indirect + github.com/alibabacloud-go/openapi-util v0.1.2 // indirect + github.com/alibabacloud-go/tea v1.4.0 // indirect github.com/alibabacloud-go/tea-utils v1.4.5 // indirect + github.com/alibabacloud-go/tea-utils/v2 v2.0.9 // indirect github.com/alibabacloud-go/tea-xml v1.1.3 // indirect - github.com/aliyun/credentials-go v1.3.2 // indirect + github.com/aliyun/credentials-go v1.4.12 // indirect github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2 v1.41.0 // indirect - github.com/aws/aws-sdk-go-v2/config v1.32.5 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.19.5 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect - github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect - github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.30.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect - github.com/aws/smithy-go v1.24.0 // indirect - github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.10.1 // indirect + github.com/aws/aws-sdk-go-v2 v1.41.5 // indirect + github.com/aws/aws-sdk-go-v2/config v1.32.13 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.19.13 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.56.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.38.13 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.14 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.18 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.10 // indirect + github.com/aws/smithy-go v1.24.2 // indirect + github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.12.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect - github.com/buildkite/agent/v3 v3.104.0 // indirect - github.com/buildkite/go-pipeline v0.15.0 // indirect + github.com/buildkite/agent/v3 v3.121.0 // indirect + github.com/buildkite/go-pipeline v0.16.0 // indirect github.com/buildkite/interpolate v0.1.5 // indirect github.com/buildkite/roko v1.4.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect github.com/clbanning/mxj/v2 v2.7.0 // indirect - github.com/clipperhouse/displaywidth v0.6.0 // indirect - github.com/clipperhouse/stringish v0.1.1 // indirect - github.com/clipperhouse/uax29/v2 v2.3.0 // indirect - github.com/cockroachdb/apd/v3 v3.2.1 // indirect + github.com/clipperhouse/displaywidth v0.11.0 // indirect + github.com/clipperhouse/uax29/v2 v2.7.0 // indirect + github.com/cockroachdb/apd/v3 v3.2.3 // indirect github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect - github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.2 // indirect github.com/coreos/go-oidc/v3 v3.17.0 // indirect - github.com/coreos/go-systemd/v22 v22.6.0 // indirect + github.com/coreos/go-systemd/v22 v22.7.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/cyphar/filepath-securejoin v0.6.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect - github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect - github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1 // indirect + github.com/digitorus/pkcs7 v0.0.0-20250730155240-ffadbf3f398c // indirect + github.com/digitorus/timestamp v0.0.0-20250524132541-c45532741eea // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/cli v29.0.3+incompatible // indirect + github.com/docker/cli v29.3.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.9.4 // indirect + github.com/docker/docker-credential-helpers v0.9.5 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/go-restful/v3 v3.13.0 // indirect - github.com/emicklei/proto v1.14.2 // indirect + github.com/emicklei/proto v1.14.3 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect - github.com/fatih/color v1.18.0 // indirect + github.com/fatih/color v1.19.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/go-chi/chi/v5 v5.2.4 // indirect - github.com/go-ini/ini v1.67.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.1 // indirect + github.com/go-chi/chi/v5 v5.2.5 // indirect github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.24.1 // indirect - github.com/go-openapi/errors v0.22.6 // indirect - github.com/go-openapi/jsonpointer v0.22.4 // indirect - github.com/go-openapi/jsonreference v0.21.4 // indirect - github.com/go-openapi/loads v0.23.2 // indirect - github.com/go-openapi/runtime v0.29.2 // indirect - github.com/go-openapi/spec v0.22.3 // indirect - github.com/go-openapi/strfmt v0.25.0 // indirect - github.com/go-openapi/swag v0.25.4 // indirect - github.com/go-openapi/swag/cmdutils v0.25.4 // indirect - github.com/go-openapi/swag/conv v0.25.4 // indirect - github.com/go-openapi/swag/fileutils v0.25.4 // indirect - github.com/go-openapi/swag/jsonname v0.25.4 // indirect - github.com/go-openapi/swag/jsonutils v0.25.4 // indirect - github.com/go-openapi/swag/loading v0.25.4 // indirect - github.com/go-openapi/swag/mangling v0.25.4 // indirect - github.com/go-openapi/swag/netutils v0.25.4 // indirect - github.com/go-openapi/swag/stringutils v0.25.4 // indirect - github.com/go-openapi/swag/typeutils v0.25.4 // indirect - github.com/go-openapi/swag/yamlutils v0.25.4 // indirect - github.com/go-openapi/validate v0.25.1 // indirect - github.com/go-piv/piv-go/v2 v2.4.0 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/go-openapi/analysis v0.25.0 // indirect + github.com/go-openapi/errors v0.22.7 // indirect + github.com/go-openapi/jsonpointer v0.22.5 // indirect + github.com/go-openapi/jsonreference v0.21.5 // indirect + github.com/go-openapi/loads v0.23.3 // indirect + github.com/go-openapi/runtime v0.29.3 // indirect + github.com/go-openapi/spec v0.22.4 // indirect + github.com/go-openapi/strfmt v0.26.1 // indirect + github.com/go-openapi/swag v0.25.5 // indirect + github.com/go-openapi/swag/cmdutils v0.25.5 // indirect + github.com/go-openapi/swag/conv v0.25.5 // indirect + github.com/go-openapi/swag/fileutils v0.25.5 // indirect + github.com/go-openapi/swag/jsonname v0.25.5 // indirect + github.com/go-openapi/swag/jsonutils v0.25.5 // indirect + github.com/go-openapi/swag/loading v0.25.5 // indirect + github.com/go-openapi/swag/mangling v0.25.5 // indirect + github.com/go-openapi/swag/netutils v0.25.5 // indirect + github.com/go-openapi/swag/stringutils v0.25.5 // indirect + github.com/go-openapi/swag/typeutils v0.25.5 // indirect + github.com/go-openapi/swag/yamlutils v0.25.5 // indirect + github.com/go-openapi/validate v0.25.2 // indirect + github.com/go-piv/piv-go/v2 v2.5.0 // indirect + github.com/go-viper/mapstructure/v2 v2.5.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/goccy/go-json v0.10.3 // indirect - github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/goccy/go-json v0.10.6 // indirect + github.com/godbus/dbus/v5 v5.2.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect - github.com/golang/snappy v0.0.4 // indirect + github.com/golang/snappy v1.0.0 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.26.1 // indirect - github.com/google/certificate-transparency-go v1.3.2 // indirect - github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/cel-go v0.27.0 // indirect + github.com/google/certificate-transparency-go v1.3.3 // indirect + github.com/google/gnostic-models v0.7.1 // indirect github.com/google/go-github/v73 v73.0.0 // indirect - github.com/google/go-querystring v1.1.0 // indirect + github.com/google/go-querystring v1.2.0 // indirect github.com/google/s2a-go v0.1.9 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.9 // indirect - github.com/googleapis/gax-go/v2 v2.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.14 // indirect + github.com/googleapis/gax-go/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.8 // indirect - github.com/in-toto/attestation v1.1.2 // indirect - github.com/in-toto/in-toto-golang v0.9.0 // indirect + github.com/in-toto/attestation v1.2.0 // indirect + github.com/in-toto/in-toto-golang v0.10.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect + github.com/jedisct1/go-minisign v0.0.0-20241212093149-d2f9f49435c7 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.1 // indirect + github.com/klauspost/compress v1.18.5 // indirect github.com/lestrrat-go/blackmagic v1.0.4 // indirect + github.com/lestrrat-go/dsig v1.0.0 // indirect + github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect - github.com/lestrrat-go/httprc/v3 v3.0.0 // indirect - github.com/lestrrat-go/jwx/v3 v3.0.10 // indirect - github.com/lestrrat-go/option v1.0.1 // indirect + github.com/lestrrat-go/httprc/v3 v3.0.5 // indirect + github.com/lestrrat-go/jwx/v3 v3.0.13 // indirect github.com/lestrrat-go/option/v2 v2.0.0 // indirect - github.com/letsencrypt/boulder v0.20251110.0 // indirect + github.com/letsencrypt/boulder v0.20260324.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.19 // indirect - github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/mattn/go-runewidth v0.0.21 // indirect + github.com/miekg/pkcs11 v1.1.2 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect @@ -217,106 +220,103 @@ require ( github.com/mozillazg/docker-credential-acr-helper v0.4.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect - github.com/oklog/ulid v1.3.1 // indirect + github.com/oklog/ulid/v2 v2.1.1 // indirect github.com/oleiade/reflections v1.1.0 // indirect github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 // indirect - github.com/olekukonko/errors v1.1.0 // indirect - github.com/olekukonko/ll v0.1.3 // indirect - github.com/olekukonko/tablewriter v1.1.2 // indirect - github.com/open-policy-agent/opa v1.8.0 // indirect + github.com/olekukonko/errors v1.2.0 // indirect + github.com/olekukonko/ll v0.1.8 // indirect + github.com/olekukonko/tablewriter v1.1.4 // indirect + github.com/open-policy-agent/opa v1.15.1 // indirect github.com/opencontainers/cgroups v0.0.6 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/pborman/uuid v1.2.1 // indirect - github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/pelletier/go-toml/v2 v2.3.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/common v0.67.5 // indirect - github.com/prometheus/procfs v0.19.2 // indirect - github.com/protocolbuffers/txtpbfmt v0.0.0-20250627152318-f293424e46b5 // indirect - github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/prometheus/procfs v0.20.1 // indirect + github.com/protocolbuffers/txtpbfmt v0.0.0-20260217160748-a481f6a22f94 // indirect + github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sagikazarmark/locafero v0.12.0 // indirect github.com/sassoftware/relic v7.2.1+incompatible // indirect - github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect - github.com/segmentio/asm v1.2.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.10.0 // indirect + github.com/segmentio/asm v1.2.1 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/sigstore/fulcio v1.8.5 // indirect github.com/sigstore/protobuf-specs v0.5.0 // indirect - github.com/sigstore/rekor v1.5.0 // indirect - github.com/sigstore/rekor-tiles/v2 v2.1.0 // indirect - github.com/sigstore/sigstore v1.10.4 // indirect + github.com/sigstore/rekor v1.5.1 // indirect + github.com/sigstore/rekor-tiles/v2 v2.2.1 // indirect + github.com/sigstore/sigstore v1.10.5 // indirect github.com/sigstore/sigstore-go v1.1.4 // indirect - github.com/sigstore/timestamp-authority/v2 v2.0.4 // indirect + github.com/sigstore/timestamp-authority/v2 v2.0.5 // indirect github.com/sirupsen/logrus v1.9.4 // indirect - github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spf13/afero v1.15.0 // indirect github.com/spf13/cast v1.10.0 // indirect github.com/spf13/cobra v1.10.2 // indirect github.com/spf13/pflag v1.0.10 // indirect github.com/spf13/viper v1.21.0 // indirect github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect - github.com/stoewer/go-strcase v1.3.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tchap/go-patricia/v2 v2.3.3 // indirect github.com/thales-e-security/pool v0.0.2 // indirect github.com/theupdateframework/go-tuf v0.7.0 // indirect - github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect + github.com/theupdateframework/go-tuf/v2 v2.4.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/tjfoc/gmsm v1.4.1 // indirect - github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect + github.com/transparency-dev/formats v0.1.0 // indirect github.com/transparency-dev/merkle v0.0.2 // indirect - github.com/valyala/fastjson v1.6.4 // indirect + github.com/valyala/fastjson v1.6.10 // indirect github.com/vbatts/tar-split v0.12.2 // indirect - github.com/vektah/gqlparser/v2 v2.5.30 // indirect + github.com/vektah/gqlparser/v2 v2.5.32 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect + github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect github.com/yashtewari/glob-intersection v0.2.0 // indirect - gitlab.com/gitlab-org/api/client-go v0.143.3 // indirect - go.mongodb.org/mongo-driver v1.17.6 // indirect + gitlab.com/gitlab-org/api/client-go v1.46.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect - go.opentelemetry.io/otel v1.39.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect - go.opentelemetry.io/otel/metric v1.39.0 // indirect - go.opentelemetry.io/otel/sdk v1.39.0 // indirect - go.opentelemetry.io/otel/trace v1.39.0 // indirect - go.opentelemetry.io/proto/otlp v1.7.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect + go.opentelemetry.io/otel v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0 // indirect + go.opentelemetry.io/otel/metric v1.42.0 // indirect + go.opentelemetry.io/otel/sdk v1.42.0 // indirect + go.opentelemetry.io/otel/trace v1.42.0 // indirect + go.opentelemetry.io/proto/otlp v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.1 // indirect - go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v2 v2.4.4 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.46.0 // indirect - golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect - golang.org/x/net v0.48.0 // indirect - golang.org/x/oauth2 v0.34.0 // indirect - golang.org/x/sys v0.39.0 // indirect - golang.org/x/term v0.38.0 // indirect - golang.org/x/text v0.32.0 // indirect - golang.org/x/time v0.14.0 // indirect - golang.org/x/tools v0.40.0 // indirect - google.golang.org/api v0.260.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect + golang.org/x/crypto v0.49.0 // indirect + golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 // indirect + golang.org/x/net v0.52.0 // indirect + golang.org/x/oauth2 v0.36.0 // indirect + golang.org/x/sys v0.42.0 // indirect + golang.org/x/term v0.41.0 // indirect + golang.org/x/text v0.35.0 // indirect + golang.org/x/time v0.15.0 // indirect + golang.org/x/tools v0.43.0 // indirect + google.golang.org/api v0.273.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260330182312-d5a96adf58d8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260330182312-d5a96adf58d8 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - k8s.io/apiextensions-apiserver v0.35.0 // indirect - k8s.io/apiserver v0.35.0 // indirect - k8s.io/code-generator v0.35.0 // indirect - k8s.io/component-base v0.35.0 // indirect + k8s.io/apiextensions-apiserver v0.35.3 // indirect + k8s.io/apiserver v0.35.3 // indirect + k8s.io/code-generator v0.35.3 // indirect + k8s.io/component-base v0.35.3 // indirect k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b // indirect - k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect - k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 // indirect - sigs.k8s.io/gateway-api v1.4.0 // indirect + k8s.io/kube-openapi v0.0.0-20260330154417-16be699c7b31 // indirect + k8s.io/utils v0.0.0-20260319190234-28399d86e0b5 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0 // indirect + sigs.k8s.io/gateway-api v1.5.1 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.2 // indirect ) diff --git a/go.sum b/go.sum index 566ca30175..bebee50a0e 100644 --- a/go.sum +++ b/go.sum @@ -1,40 +1,46 @@ al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= -cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1 h1:PMmTMyvHScV9Mn8wc6ASge9uRcHy0jtqPd+fM35LmsQ= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1/go.mod h1:tvtbpgaVXZX4g6Pn+AnzFycuRK3MOz5HJfEGeEllXYM= +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= -cloud.google.com/go/auth v0.18.0 h1:wnqy5hrv7p3k7cShwAU/Br3nzod7fxoqG+k0VZ+/Pk0= -cloud.google.com/go/auth v0.18.0/go.mod h1:wwkPM1AgE1f2u6dG443MiWoD8C3BtOywNsUMcUTVDRo= +cloud.google.com/go/auth v0.19.0 h1:DGYwtbcsGsT1ywuxsIoWi1u/vlks0moIblQHgSDgQkQ= +cloud.google.com/go/auth v0.19.0/go.mod h1:2Aph7BT2KnaSFOM0JDPyiYgNh6PL9vGMiP8CUIXZ+IY= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= -cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= -cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= -cloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qoboQT1E= -cloud.google.com/go/longrunning v0.7.0/go.mod h1:ySn2yXmjbK9Ba0zsQqunhDkYi0+9rlXIwnoAf+h+TPY= -cuelabs.dev/go/oci/ociregistry v0.0.0-20250715075730-49cab49c8e9d h1:lX0EawyoAu4kgMJJfy7MmNkIHioBcdBGFRSKDZ+CWo0= -cuelabs.dev/go/oci/ociregistry v0.0.0-20250715075730-49cab49c8e9d/go.mod h1:4WWeZNxUO1vRoZWAHIG0KZOd6dA25ypyWuwD3ti0Tdc= -cuelang.org/go v0.14.1 h1:kxFAHr7bvrCikbtVps2chPIARazVdnRmlz65dAzKyWg= -cuelang.org/go v0.14.1/go.mod h1:aSP9UZUM5m2izHAHUvqtq0wTlWn5oLjuv2iBMQZBLLs= -cyphar.com/go-pathrs v0.2.1 h1:9nx1vOgwVvX1mNBWDu93+vaceedpbsDqo+XuBGL40b8= -cyphar.com/go-pathrs v0.2.1/go.mod h1:y8f1EMG7r+hCuFf/rXsKqMJrJAUoADZGNh5/vZPKcGc= +cloud.google.com/go/kms v1.26.0 h1:cK9mN2cf+9V63D3H1f6koxTatWy39aTI/hCjz1I+adU= +cloud.google.com/go/kms v1.26.0/go.mod h1:pHKOdFJm63hxBsiPkYtowZPltu9dW0MWvBa6IA4HM58= +cloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7EhfW8= +cloud.google.com/go/longrunning v0.8.0/go.mod h1:UmErU2Onzi+fKDg2gR7dusz11Pe26aknR4kHmJJqIfk= +connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14= +connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w= +cuelabs.dev/go/oci/ociregistry v0.0.0-20251212221603-3adeb8663819 h1:Zh+Ur3OsoWpvALHPLT45nOekHkgOt+IOfutBbPqM17I= +cuelabs.dev/go/oci/ociregistry v0.0.0-20251212221603-3adeb8663819/go.mod h1:WjmQxb+W6nVNCgj8nXrF24lIz95AHwnSl36tpjDZSU8= +cuelang.org/go v0.16.0 h1:mmt9SL/IzfSIiBKuP5wxdO4xLjvIHr3urpbjCDdMV5U= +cuelang.org/go v0.16.0/go.mod h1:4veMX+GpsK0B91b1seGXoozG80LJCczvG1M1Re/knxo= +cyphar.com/go-pathrs v0.2.4 h1:iD/mge36swa1UFKdINkr1Frkpp6wZsy3YYEildj9cLY= +cyphar.com/go-pathrs v0.2.4/go.mod h1:y8f1EMG7r+hCuFf/rXsKqMJrJAUoADZGNh5/vZPKcGc= dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +filippo.io/edwards25519 v1.1.1 h1:YpjwWWlNmGIDyXOn8zLzqiD+9TyIlPhGFG96P39uBpw= +filippo.io/edwards25519 v1.1.1/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= -github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 h1:kcnfY4vljxXliXDBrA9K9lwF8IoEZ4Up6Eg9kWTIm28= -github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0/go.mod h1:tlqp9mUGbsP+0z3Q+c0Q5MgSdq/OMwQhm5bffR3Q3ss= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.20.0 h1:LU830/Tuj5c6xSpEjyrymfY5fGInchwMWRp1aSBXbS8= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.20.0/go.mod h1:XPLlCbtg9ajYOsp4nuW/VLcOFOT5bPqtzwhF5rgJ6Eg= +github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata v0.0.10 h1:03PGMqfdIoNIuiBy1XNLnXHbZOgkWEZdOK1q7zAjw/I= +github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata v0.0.10/go.mod h1:QM3VKYNyD5thMEWqKef+uOfpNmZG7RjG7wOsCdavj9w= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 h1:fou+2+WFTib47nS+nz/ozhEBnvU96bKHy6LjRsY4E28= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0/go.mod h1:t76Ruy8AHvUAC8GfMWJMa0ElSbuIcO03NLpynfbgsPA= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= @@ -47,29 +53,32 @@ github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEK github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= -github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= -github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest v0.11.30 h1:iaZ1RGz/ALZtN5eq4Nr1SOFSlf2E4pDI3Tcsl+dZPVE= +github.com/Azure/go-autorest/autorest v0.11.30/go.mod h1:t1kpPIOpIVX7annvothKvb0stsrXa37i7b+xpmBW8Fs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.7 h1:Q9R3utmFg9K1B4OYtAZ7ZUUvIUdzQt7G2MN5Hi/d670= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.7/go.mod h1:bVrAueELJ0CKLBpUHDIvD516TwmHmzqwCpvONWRsw3s= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/date v0.3.1 h1:o9Z8Jyt+VJJTCZ/UORishuHOusBwolhjokt9s5k8I4w= +github.com/Azure/go-autorest/autorest/date v0.3.1/go.mod h1:Dz/RDmXlfiFFS/eW+b/xMUSFs1tboPVy6UjgADToWDM= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/logger v0.2.2 h1:hYqBsEBywrrOSW24kkOCXRcKfKhK76OzLTfF+MYDE2o= +github.com/Azure/go-autorest/logger v0.2.2/go.mod h1:I5fg9K52o+iuydlWfa9T5K6WFos9XYr9dYTFzpqgibw= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= +github.com/Azure/go-autorest/tracing v0.6.1 h1:YUMSrC/CeD1ZnnXcNYU4a/fzsO35u2Fsful9L/2nyR0= +github.com/Azure/go-autorest/tracing v0.6.1/go.mod h1:/3EgjbsjraOqiicERAeu3m7/z0x1TzjQGAwDrJrXGkc= +github.com/AzureAD/microsoft-authentication-library-for-go v1.7.0 h1:4iB+IesclUXdP0ICgAabvq2FYLXrJWKx1fJQ+GxSo3Y= +github.com/AzureAD/microsoft-authentication-library-for-go v1.7.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= @@ -83,8 +92,9 @@ github.com/acobaugh/osrelease v0.1.0/go.mod h1:4bFEs0MtgHNHBrmHCt67gNisnabCRAlzd github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM= github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.2/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= -github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo= github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.5 h1:zE8vH9C7JiZLNJJQ5OwjU9mSi4T9ef9u3BURT6LCLC8= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.5/go.mod h1:tWnyE9AjF8J8qqLk645oUmVUnFybApTQWklQmi5tY6g= github.com/alibabacloud-go/cr-20160607 v1.0.1 h1:WEnP1iPFKJU74ryUKh/YDPHoxMZawqlPajOymyNAkts= github.com/alibabacloud-go/cr-20160607 v1.0.1/go.mod h1:QHeKZtZ3F3FOE+/uIXCBAp8POwnUYekpLwr1dtQa5r0= github.com/alibabacloud-go/cr-20181201 v1.0.10 h1:B60f6S1imsgn2fgC6X6FrVNrONDrbCT0NwYhsJ0C9/c= @@ -95,35 +105,43 @@ github.com/alibabacloud-go/darabonba-openapi v0.2.1 h1:WyzxxKvhdVDlwpAMOHgAiCJ+N github.com/alibabacloud-go/darabonba-openapi v0.2.1/go.mod h1:zXOqLbpIqq543oioL9IuuZYOQgHQ5B8/n5OPrnko8aY= github.com/alibabacloud-go/darabonba-string v1.0.0/go.mod h1:93cTfV3vuPhhEwGGpKKqhVW4jLe7tDpo3LUM0i0g6mA= github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68/go.mod h1:6pb/Qy8c+lqua8cFpEy7g39NRRqOWc3rOwAy8m5Y2BY= -github.com/alibabacloud-go/debug v1.0.0 h1:3eIEQWfay1fB24PQIEzXAswlVJtdQok8f3EVN5VrBnA= github.com/alibabacloud-go/debug v1.0.0/go.mod h1:8gfgZCCAC3+SCzjWtY053FrOcd4/qlH6IHTI4QyICOc= +github.com/alibabacloud-go/debug v1.0.1 h1:MsW9SmUtbb1Fnt3ieC6NNZi6aEwrXfDksD4QA6GSbPg= +github.com/alibabacloud-go/debug v1.0.1/go.mod h1:8gfgZCCAC3+SCzjWtY053FrOcd4/qlH6IHTI4QyICOc= github.com/alibabacloud-go/endpoint-util v1.1.0/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= github.com/alibabacloud-go/endpoint-util v1.1.1 h1:ZkBv2/jnghxtU0p+upSU0GGzW1VL9GQdZO3mcSUTUy8= github.com/alibabacloud-go/endpoint-util v1.1.1/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= github.com/alibabacloud-go/openapi-util v0.0.9/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= github.com/alibabacloud-go/openapi-util v0.0.10/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= github.com/alibabacloud-go/openapi-util v0.0.11/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= -github.com/alibabacloud-go/openapi-util v0.1.0 h1:0z75cIULkDrdEhkLWgi9tnLe+KhAFE/r5Pb3312/eAY= -github.com/alibabacloud-go/openapi-util v0.1.0/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.1.2 h1:aljdyAPotH4xHymo5wzARjcHb3Org0zKnLP4RxS0JGY= +github.com/alibabacloud-go/openapi-util v0.1.2/go.mod h1:/UehBSE2cf1gYT43GV4E+RxTdLRzURImCYY0aRmlXpw= github.com/alibabacloud-go/tea v1.1.0/go.mod h1:IkGyUSX4Ba1V+k4pCtJUc6jDpZLFph9QMy2VUPTwukg= github.com/alibabacloud-go/tea v1.1.7/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= github.com/alibabacloud-go/tea v1.1.8/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= github.com/alibabacloud-go/tea v1.1.11/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= github.com/alibabacloud-go/tea v1.1.17/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= github.com/alibabacloud-go/tea v1.1.19/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= -github.com/alibabacloud-go/tea v1.2.1 h1:rFF1LnrAdhaiPmKwH5xwYOKlMh66CqRwPUTzIK74ask= -github.com/alibabacloud-go/tea v1.2.1/go.mod h1:qbzof29bM/IFhLMtJPrgTGK3eauV5J2wSyEUo4OEmnA= +github.com/alibabacloud-go/tea v1.2.2/go.mod h1:CF3vOzEMAG+bR4WOql8gc2G9H3EkH3ZLAQdpmpXMgwk= +github.com/alibabacloud-go/tea v1.4.0 h1:MSKhu/kWLPX7mplWMngki8nNt+CyUZ+kfkzaR5VpMhA= +github.com/alibabacloud-go/tea v1.4.0/go.mod h1:A560v/JTQ1n5zklt2BEpurJzZTI8TUT+Psg2drWlxRg= github.com/alibabacloud-go/tea-utils v1.3.1/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= github.com/alibabacloud-go/tea-utils v1.3.9/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= github.com/alibabacloud-go/tea-utils v1.4.3/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= github.com/alibabacloud-go/tea-utils v1.4.5 h1:h0/6Xd2f3bPE4XHTvkpjwxowIwRCJAJOqY6Eq8f3zfA= github.com/alibabacloud-go/tea-utils v1.4.5/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= +github.com/alibabacloud-go/tea-utils/v2 v2.0.6/go.mod h1:qxn986l+q33J5VkialKMqT/TTs3E+U9MJpd001iWQ9I= +github.com/alibabacloud-go/tea-utils/v2 v2.0.7/go.mod h1:qxn986l+q33J5VkialKMqT/TTs3E+U9MJpd001iWQ9I= +github.com/alibabacloud-go/tea-utils/v2 v2.0.9 h1:y6pUIlhjxbZl9ObDAcmA1H3c21eaAxADHTDQmBnAIgA= +github.com/alibabacloud-go/tea-utils/v2 v2.0.9/go.mod h1:qxn986l+q33J5VkialKMqT/TTs3E+U9MJpd001iWQ9I= github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= github.com/alibabacloud-go/tea-xml v1.1.3 h1:7LYnm+JbOq2B+T/B0fHC4Ies4/FofC4zHzYtqw7dgt0= github.com/alibabacloud-go/tea-xml v1.1.3/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw= -github.com/aliyun/credentials-go v1.3.2 h1:L4WppI9rctC8PdlMgyTkF8bBsy9pyKQEzBD1bHMRl+g= -github.com/aliyun/credentials-go v1.3.2/go.mod h1:tlpz4uys4Rn7Ik4/piGRrTbXy2uLKvePgQJJduE+Y5c= +github.com/aliyun/credentials-go v1.3.6/go.mod h1:1LxUuX7L5YrZUWzBrRyk0SwSdH4OmPrib8NVePL3fxM= +github.com/aliyun/credentials-go v1.4.5/go.mod h1:Jm6d+xIgwJVLVWT561vy67ZRP4lPTQxMbEYRuT2Ti1U= +github.com/aliyun/credentials-go v1.4.12 h1:7D8eXGotNwthZuUEgAMgBoqxmIHwfaPVwW+/04LIJSQ= +github.com/aliyun/credentials-go v1.4.12/go.mod h1:Jm6d+xIgwJVLVWT561vy67ZRP4lPTQxMbEYRuT2Ti1U= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= @@ -136,65 +154,66 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= -github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4= -github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= -github.com/aws/aws-sdk-go-v2/config v1.32.5 h1:pz3duhAfUgnxbtVhIK39PGF/AHYyrzGEyRD9Og0QrE8= -github.com/aws/aws-sdk-go-v2/config v1.32.5/go.mod h1:xmDjzSUs/d0BB7ClzYPAZMmgQdrodNjPPhd6bGASwoE= -github.com/aws/aws-sdk-go-v2/credentials v1.19.5 h1:xMo63RlqP3ZZydpJDMBsH9uJ10hgHYfQFIk1cHDXrR4= -github.com/aws/aws-sdk-go-v2/credentials v1.19.5/go.mod h1:hhbH6oRcou+LpXfA/0vPElh/e0M3aFeOblE1sssAAEk= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= -github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1 h1:Bwzh202Aq7/MYnAjXA9VawCf6u+hjwMdoYmZ4HYsdf8= -github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1/go.mod h1:xZzWl9AXYa6zsLLH41HBFW8KRKJRIzlGmvSM0mVMIX4= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2 h1:XJ/AEFYj9VFPJdF+VFi4SUPEDfz1akHwxxm07JfZJcs= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2/go.mod h1:JUBHdhvKbbKmhaHjLsKJAWnQL80T6nURmhB/LEprV+4= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.7 h1:eYnlt6QxnFINKzwxP5/Ucs1vkG7VT3Iezmvfgc2waUw= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.7/go.mod h1:+fWt2UHSb4kS7Pu8y+BMBvJF0EWx+4H0hzNwtDNRTrg= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 h1:AHDr0DaHIAo8c9t1emrzAlVDFp+iMMKnPdYy6XO4MCE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12/go.mod h1:GQ73XawFFiWxyWXMHWfhiomvP3tXtdNar/fi8z18sx0= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk= -github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= -github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.10.1 h1:6lMw4/QGLFPvbKQ0eri/9Oh3YX5Nm6BPrUlZR8yuJHg= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.10.1/go.mod h1:EVJOSYOVeoD3VFFZ/dWCAzWJp5wZr9lTOCjW8ejAmO0= +github.com/aws/aws-sdk-go-v2 v1.41.5 h1:dj5kopbwUsVUVFgO4Fi5BIT3t4WyqIDjGKCangnV/yY= +github.com/aws/aws-sdk-go-v2 v1.41.5/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= +github.com/aws/aws-sdk-go-v2/config v1.32.13 h1:5KgbxMaS2coSWRrx9TX/QtWbqzgQkOdEa3sZPhBhCSg= +github.com/aws/aws-sdk-go-v2/config v1.32.13/go.mod h1:8zz7wedqtCbw5e9Mi2doEwDyEgHcEE9YOJp6a8jdSMY= +github.com/aws/aws-sdk-go-v2/credentials v1.19.13 h1:mA59E3fokBvyEGHKFdnpNNrvaR351cqiHgRg+JzOSRI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.13/go.mod h1:yoTXOQKea18nrM69wGF9jBdG4WocSZA1h38A+t/MAsk= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21 h1:NUS3K4BTDArQqNu2ih7yeDLaS3bmHD0YndtA6UP884g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21/go.mod h1:YWNWJQNjKigKY1RHVJCuupeWDrrHjRqHm0N9rdrWzYI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 h1:Rgg6wvjjtX8bNHcvi9OnXWwcE0a2vGpbwmtICOsvcf4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21/go.mod h1:A/kJFst/nm//cyqonihbdpQZwiUhhzpqTsdbhDdRF9c= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 h1:PEgGVtPoB6NTpPrBgqSE5hE/o47Ij9qk/SEZFbUOe9A= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21/go.mod h1:p+hz+PRAYlY3zcpJhPwXlLC4C+kqn70WIHwnzAfs6ps= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 h1:qYQ4pzQ2Oz6WpQ8T3HvGHnZydA72MnLuFK9tJwmrbHw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY= +github.com/aws/aws-sdk-go-v2/service/ecr v1.56.2 h1:B/psparkCzFM8Ct1MHyOLQ9I6GxpC8GaIgWLMgp1uv8= +github.com/aws/aws-sdk-go-v2/service/ecr v1.56.2/go.mod h1:BcTorrilUv1q7JyC3X8qiVc48qJpttMTIb3bdVV92lA= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.38.13 h1:mRgG1o6IKIDYiOtpLmQ18yf1GxDOSCzqv2ch4gf9kZU= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.38.13/go.mod h1:9NhDlaA8e8G5r64GicBAHiIC/1ZOIIZqrKP9D6/WwLg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 h1:5EniKhLZe4xzL7a+fU3C2tfUN4nWIqlLesfrjkuPFTY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 h1:c31//R3xgIJMSC8S6hEVq+38DcvUlgFY0FM6mSI5oto= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21/go.mod h1:r6+pf23ouCB718FUxaqzZdbpYFyDtehyZcmP5KL9FkA= +github.com/aws/aws-sdk-go-v2/service/kms v1.50.3 h1:s/zDSG/a/Su9aX+v0Ld9cimUCdkr5FWPmBV8owaEbZY= +github.com/aws/aws-sdk-go-v2/service/kms v1.50.3/go.mod h1:/iSgiUor15ZuxFGQSTf3lA2FmKxFsQoc2tADOarQBSw= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.9 h1:QKZH0S178gCmFEgst8hN0mCX1KxLgHBKKY/CLqwP8lg= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.9/go.mod h1:7yuQJoT+OoH8aqIxw9vwF+8KpvLZ8AWmvmUWHsGQZvI= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.14 h1:GcLE9ba5ehAQma6wlopUesYg/hbcOhFNWTjELkiWkh4= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.14/go.mod h1:WSvS1NLr7JaPunCXqpJnWk1Bjo7IxzZXrZi1QQCkuqM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.18 h1:mP49nTpfKtpXLt5SLn8Uv8z6W+03jYVoOSAl/c02nog= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.18/go.mod h1:YO8TrYtFdl5w/4vmjL8zaBSsiNp3w0L1FfKVKenZT7w= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.10 h1:p8ogvvLugcR/zLBXTXrTkj0RYBUdErbMnAFFp12Lm/U= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.10/go.mod h1:60dv0eZJfeVXfbT1tFJinbHrDfSJ2GZl4Q//OSSNAVw= +github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= +github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.12.0 h1:JFWXO6QPihCknDdnL6VaQE57km4ZKheHIGd9YiOGcTo= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.12.0/go.mod h1:046/oLyFlYdAghYQE2yHXi/E//VM5Cf3/dFmA+3CZ0c= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/buildkite/agent/v3 v3.104.0 h1:VLwNHHb5cmOeWp7clutY3Qnz88lfKb8yj+OTWrwDp+o= -github.com/buildkite/agent/v3 v3.104.0/go.mod h1:HPO/Bv8C/aC2wfwVarAgU8LSXjUh/fTa9P3pxuGB+vw= -github.com/buildkite/go-pipeline v0.15.0 h1:ae/TEXC/4HhajbED2vKcRL5vZTtb9C71cajzwoBlP8s= -github.com/buildkite/go-pipeline v0.15.0/go.mod h1:VE37qY3X5pmAKKUMoDZvPsHOQuyakB9cmXj9Qn6QasA= +github.com/buildkite/agent/v3 v3.121.0 h1:3XZilxC68EDHetNIxrkGgYGiBktIEHi7USkspTMe1Is= +github.com/buildkite/agent/v3 v3.121.0/go.mod h1:wH8bnT10nFhpxgUJ2ToLf1OZ4WaQh5Y/RxybFYkEVsY= +github.com/buildkite/go-pipeline v0.16.0 h1:wEgWUMRAgSg1ZnWOoA3AovtYYdTvN0dLY1zwUWmPP+4= +github.com/buildkite/go-pipeline v0.16.0/go.mod h1:VE37qY3X5pmAKKUMoDZvPsHOQuyakB9cmXj9Qn6QasA= github.com/buildkite/interpolate v0.1.5 h1:v2Ji3voik69UZlbfoqzx+qfcsOKLA61nHdU79VV+tPU= github.com/buildkite/interpolate v0.1.5/go.mod h1:dHnrwHew5O8VNOAgMDpwRlFnhL5VSN6M1bHVmRZ9Ccc= github.com/buildkite/roko v1.4.0 h1:DxixoCdpNqxu4/1lXrXbfsKbJSd7r1qoxtef/TT2J80= github.com/buildkite/roko v1.4.0/go.mod h1:0vbODqUFEcVf4v2xVXRfZZRsqJVsCCHTG/TBRByGK4E= -github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= -github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= +github.com/bytecodealliance/wasmtime-go/v39 v39.0.1 h1:RibaT47yiyCRxMOj/l2cvL8cWiWBSqDXHyqsa9sGcCE= +github.com/bytecodealliance/wasmtime-go/v39 v39.0.1/go.mod h1:miR4NYIEBXeDNamZIzpskhJ0z/p8al+lwMWylQ/ZJb4= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cert-manager/cert-manager v1.19.2 h1:jSprN1h5pgNDSl7HClAmIzXuTxic/5FXJ32kbQHqjlM= -github.com/cert-manager/cert-manager v1.19.2/go.mod h1:e9NzLtOKxTw7y99qLyWGmPo6mrC1Nh0EKKcMkRfK+GE= +github.com/cert-manager/cert-manager v1.20.1 h1:99ExHJu5TPp1V92AvvE4oY6BkOSyJiWLxxMkbqbdGaY= +github.com/cert-manager/cert-manager v1.20.1/go.mod h1:ut67FnggYJJqAdDWLhSPnj10P06QwbNU88RYNh9MvMc= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4= @@ -206,30 +225,28 @@ github.com/clbanning/mxj/v2 v2.5.5/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn github.com/clbanning/mxj/v2 v2.7.0 h1:WA/La7UGCanFe5NpHF0Q3DNtnCsVoxbPKuyBNHWRyME= github.com/clbanning/mxj/v2 v2.7.0/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/clipperhouse/displaywidth v0.6.0 h1:k32vueaksef9WIKCNcoqRNyKbyvkvkysNYnAWz2fN4s= -github.com/clipperhouse/displaywidth v0.6.0/go.mod h1:R+kHuzaYWFkTm7xoMmK1lFydbci4X2CicfbGstSGg0o= -github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs= -github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= -github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4= -github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= +github.com/clipperhouse/displaywidth v0.11.0 h1:lBc6kY44VFw+TDx4I8opi/EtL9m20WSEFgwIwO+UVM8= +github.com/clipperhouse/displaywidth v0.11.0/go.mod h1:bkrFNkf81G8HyVqmKGxsPufD3JhNl3dSqnGhOoSD/o0= +github.com/clipperhouse/uax29/v2 v2.7.0 h1:+gs4oBZ2gPfVrKPthwbMzWZDaAFPGYK72F0NJv2v7Vk= +github.com/clipperhouse/uax29/v2 v2.7.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= -github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= +github.com/cockroachdb/apd/v3 v3.2.3 h1:4Zx+I3R35bFXMnltzmjP79i2cravE4jTRL6ps9Aux80= +github.com/cockroachdb/apd/v3 v3.2.3/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ= github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w= -github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= -github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/containerd/stargz-snapshotter/estargz v0.18.2 h1:yXkZFYIzz3eoLwlTUZKz2iQ4MrckBxJjkmD16ynUTrw= +github.com/containerd/stargz-snapshotter/estargz v0.18.2/go.mod h1:XyVU5tcJ3PRpkA9XS2T5us6Eg35yM0214Y+wvrZTBrY= github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= -github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= -github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= +github.com/coreos/go-systemd/v22 v22.7.0 h1:LAEzFkke61DFROc7zNLX/WA2i5J8gYqe0rSj9KI28KA= +github.com/coreos/go-systemd/v22 v22.7.0/go.mod h1:xNUYtjHu2EDXbsxz1i41wouACIwT7Ybq9o0BQhMwD0w= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.19 h1:tUN6H7LWqNx4hQVxomd0CVsDwaDr9gaRQaI4GpSmrsA= -github.com/creack/pty v1.1.19/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= +github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= @@ -240,35 +257,35 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1 h1:5RVFMOWjMyRy8cARdy79nAmgYw3hK/4HUq48LQ6Wwqo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936 h1:foGzavPWwtoyBvjWyKJYDYsyzy+23iBV7NKTwdk+LRY= github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936/go.mod h1:ttKPnOepYt4LLzD+loXQ1rT6EmpyIYHro7TAJuIIlHo= -github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs= -github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w= +github.com/dgraph-io/badger/v4 v4.9.1 h1:DocZXZkg5JJHJPtUErA0ibyHxOVUDVoXLSCV6t8NC8w= +github.com/dgraph-io/badger/v4 v4.9.1/go.mod h1:5/MEx97uzdPUHR4KtkNt8asfI2T4JiEiQlV7kWUo8c0= github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM= github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= +github.com/digitorus/pkcs7 v0.0.0-20250730155240-ffadbf3f398c h1:g349iS+CtAvba7i0Ee9EP1TlTZ9w+UncBY6HSmsFZa0= +github.com/digitorus/pkcs7 v0.0.0-20250730155240-ffadbf3f398c/go.mod h1:mCGGmWkOQvEuLdIRfPIpXViBfpWto4AhwtJlAvo62SQ= +github.com/digitorus/timestamp v0.0.0-20250524132541-c45532741eea h1:ALRwvjsSP53QmnN3Bcj0NpR8SsFLnskny/EIMebAk1c= +github.com/digitorus/timestamp v0.0.0-20250524132541-c45532741eea/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= -github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.3.1+incompatible h1:M04FDj2TRehDacrosh7Vlkgc7AuQoWloQkf1PA5hmoI= +github.com/docker/cli v29.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= -github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= +github.com/docker/docker-credential-helpers v0.9.5 h1:EFNN8DHvaiK8zVqFA2DT6BjXE0GzfLOZ38ggPTKePkY= +github.com/docker/docker-credential-helpers v0.9.5/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/proto v1.14.2 h1:wJPxPy2Xifja9cEMrcA/g08art5+7CGJNFNk35iXC1I= -github.com/emicklei/proto v1.14.2/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= +github.com/emicklei/proto v1.14.3 h1:zEhlzNkpP8kN6utonKMzlPfIvy82t5Kb9mufaJxSe1Q= +github.com/emicklei/proto v1.14.3/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -276,14 +293,14 @@ github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lSh github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fatih/color v1.19.0 h1:Zp3PiM21/9Ld6FzSKyL5c/BULoe/ONr9KlbYVOfG8+w= +github.com/fatih/color v1.19.0/go.mod h1:zNk67I0ZUT1bEGsSGyCZYZNrHuTkJJB+r6Q9VuMi0LE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= -github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= +github.com/foxcpp/go-mockdns v1.2.0 h1:omK3OrHRD1IWJz1FuFBCFquhXslXoF17OvBS6JPzZF0= +github.com/foxcpp/go-mockdns v1.2.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -292,12 +309,10 @@ github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmV github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= -github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/go-chi/chi/v5 v5.2.4 h1:WtFKPHwlywe8Srng8j2BhOD9312j9cGUxG1SP4V2cR4= -github.com/go-chi/chi/v5 v5.2.4/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0= -github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= -github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/fxamacker/cbor/v2 v2.9.1 h1:2rWm8B193Ll4VdjsJY28jxs70IdDsHRWgQYAI80+rMQ= +github.com/fxamacker/cbor/v2 v2.9.1/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug= +github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0= github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -307,56 +322,56 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= -github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= -github.com/go-openapi/errors v0.22.6 h1:eDxcf89O8odEnohIXwEjY1IB4ph5vmbUsBMsFNwXWPo= -github.com/go-openapi/errors v0.22.6/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= -github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4= -github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80= -github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8= -github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4= -github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= -github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= -github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= -github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= -github.com/go-openapi/spec v0.22.3 h1:qRSmj6Smz2rEBxMnLRBMeBWxbbOvuOoElvSvObIgwQc= -github.com/go-openapi/spec v0.22.3/go.mod h1:iIImLODL2loCh3Vnox8TY2YWYJZjMAKYyLH2Mu8lOZs= -github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= -github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= -github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= -github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= -github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= -github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= -github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= -github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= -github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= -github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= -github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= -github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= -github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= -github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= -github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= -github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= -github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= -github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= -github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= -github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= -github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= -github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= -github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= -github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= -github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= -github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= -github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= -github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= -github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= -github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= -github.com/go-piv/piv-go/v2 v2.4.0 h1:xamQ/fR4MJiw/Ndbk6yi7MVwhjrwlnDAPuaH9zcGb+I= -github.com/go-piv/piv-go/v2 v2.4.0/go.mod h1:ShZi74nnrWNQEdWzRUd/3cSig3uNOcEZp+EWl0oewnI= +github.com/go-openapi/analysis v0.25.0 h1:EnjAq1yO8wEO9HbPmY8vLPEIkdZuuFhCAKBPvCB7bCs= +github.com/go-openapi/analysis v0.25.0/go.mod h1:5WFTRE43WLkPG9r9OtlMfqkkvUTYLVVCIxLlEpyF8kE= +github.com/go-openapi/errors v0.22.7 h1:JLFBGC0Apwdzw3484MmBqspjPbwa2SHvpDm0u5aGhUA= +github.com/go-openapi/errors v0.22.7/go.mod h1://QW6SD9OsWtH6gHllUCddOXDL0tk0ZGNYHwsw4sW3w= +github.com/go-openapi/jsonpointer v0.22.5 h1:8on/0Yp4uTb9f4XvTrM2+1CPrV05QPZXu+rvu2o9jcA= +github.com/go-openapi/jsonpointer v0.22.5/go.mod h1:gyUR3sCvGSWchA2sUBJGluYMbe1zazrYWIkWPjjMUY0= +github.com/go-openapi/jsonreference v0.21.5 h1:6uCGVXU/aNF13AQNggxfysJ+5ZcU4nEAe+pJyVWRdiE= +github.com/go-openapi/jsonreference v0.21.5/go.mod h1:u25Bw85sX4E2jzFodh1FOKMTZLcfifd1Q+iKKOUxExw= +github.com/go-openapi/loads v0.23.3 h1:g5Xap1JfwKkUnZdn+S0L3SzBDpcTIYzZ5Qaag0YDkKQ= +github.com/go-openapi/loads v0.23.3/go.mod h1:NOH07zLajXo8y55hom0omlHWDVVvCwBM/S+csCK8LqA= +github.com/go-openapi/runtime v0.29.3 h1:h5twGaEqxtQg40ePiYm9vFFH1q06Czd7Ot6ufdK0w/Y= +github.com/go-openapi/runtime v0.29.3/go.mod h1:8A1W0/L5eyNJvKciqZtvIVQvYO66NlB7INMSZ9bw/oI= +github.com/go-openapi/spec v0.22.4 h1:4pxGjipMKu0FzFiu/DPwN3CTBRlVM2yLf/YTWorYfDQ= +github.com/go-openapi/spec v0.22.4/go.mod h1:WQ6Ai0VPWMZgMT4XySjlRIE6GP1bGQOtEThn3gcWLtQ= +github.com/go-openapi/strfmt v0.26.1 h1:7zGCHji7zSYDC2tCXIusoxYQz/48jAf2q+sF6wXTG+c= +github.com/go-openapi/strfmt v0.26.1/go.mod h1:Zslk5VZPOISLwmWTMBIS7oiVFem1o1EI6zULY8Uer7Y= +github.com/go-openapi/swag v0.25.5 h1:pNkwbUEeGwMtcgxDr+2GBPAk4kT+kJ+AaB+TMKAg+TU= +github.com/go-openapi/swag v0.25.5/go.mod h1:B3RT6l8q7X803JRxa2e59tHOiZlX1t8viplOcs9CwTA= +github.com/go-openapi/swag/cmdutils v0.25.5 h1:yh5hHrpgsw4NwM9KAEtaDTXILYzdXh/I8Whhx9hKj7c= +github.com/go-openapi/swag/cmdutils v0.25.5/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.5 h1:wAXBYEXJjoKwE5+vc9YHhpQOFj2JYBMF2DUi+tGu97g= +github.com/go-openapi/swag/conv v0.25.5/go.mod h1:CuJ1eWvh1c4ORKx7unQnFGyvBbNlRKbnRyAvDvzWA4k= +github.com/go-openapi/swag/fileutils v0.25.5 h1:B6JTdOcs2c0dBIs9HnkyTW+5gC+8NIhVBUwERkFhMWk= +github.com/go-openapi/swag/fileutils v0.25.5/go.mod h1:V3cT9UdMQIaH4WiTrUc9EPtVA4txS0TOmRURmhGF4kc= +github.com/go-openapi/swag/jsonname v0.25.5 h1:8p150i44rv/Drip4vWI3kGi9+4W9TdI3US3uUYSFhSo= +github.com/go-openapi/swag/jsonname v0.25.5/go.mod h1:jNqqikyiAK56uS7n8sLkdaNY/uq6+D2m2LANat09pKU= +github.com/go-openapi/swag/jsonutils v0.25.5 h1:XUZF8awQr75MXeC+/iaw5usY/iM7nXPDwdG3Jbl9vYo= +github.com/go-openapi/swag/jsonutils v0.25.5/go.mod h1:48FXUaz8YsDAA9s5AnaUvAmry1UcLcNVWUjY42XkrN4= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.5 h1:SX6sE4FrGb4sEnnxbFL/25yZBb5Hcg1inLeErd86Y1U= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.5/go.mod h1:/2KvOTrKWjVA5Xli3DZWdMCZDzz3uV/T7bXwrKWPquo= +github.com/go-openapi/swag/loading v0.25.5 h1:odQ/umlIZ1ZVRteI6ckSrvP6e2w9UTF5qgNdemJHjuU= +github.com/go-openapi/swag/loading v0.25.5/go.mod h1:I8A8RaaQ4DApxhPSWLNYWh9NvmX2YKMoB9nwvv6oW6g= +github.com/go-openapi/swag/mangling v0.25.5 h1:hyrnvbQRS7vKePQPHHDso+k6CGn5ZBs5232UqWZmJZw= +github.com/go-openapi/swag/mangling v0.25.5/go.mod h1:6hadXM/o312N/h98RwByLg088U61TPGiltQn71Iw0NY= +github.com/go-openapi/swag/netutils v0.25.5 h1:LZq2Xc2QI8+7838elRAaPCeqJnHODfSyOa7ZGfxDKlU= +github.com/go-openapi/swag/netutils v0.25.5/go.mod h1:lHbtmj4m57APG/8H7ZcMMSWzNqIQcu0RFiXrPUara14= +github.com/go-openapi/swag/stringutils v0.25.5 h1:NVkoDOA8YBgtAR/zvCx5rhJKtZF3IzXcDdwOsYzrB6M= +github.com/go-openapi/swag/stringutils v0.25.5/go.mod h1:PKK8EZdu4QJq8iezt17HM8RXnLAzY7gW0O1KKarrZII= +github.com/go-openapi/swag/typeutils v0.25.5 h1:EFJ+PCga2HfHGdo8s8VJXEVbeXRCYwzzr9u4rJk7L7E= +github.com/go-openapi/swag/typeutils v0.25.5/go.mod h1:itmFmScAYE1bSD8C4rS0W+0InZUBrB2xSPbWt6DLGuc= +github.com/go-openapi/swag/yamlutils v0.25.5 h1:kASCIS+oIeoc55j28T4o8KwlV2S4ZLPT6G0iq2SSbVQ= +github.com/go-openapi/swag/yamlutils v0.25.5/go.mod h1:Gek1/SjjfbYvM+Iq4QGwa/2lEXde9n2j4a3wI3pNuOQ= +github.com/go-openapi/testify/enable/yaml/v2 v2.4.1 h1:NZOrZmIb6PTv5LTFxr5/mKV/FjbUzGE7E6gLz7vFoOQ= +github.com/go-openapi/testify/enable/yaml/v2 v2.4.1/go.mod h1:r7dwsujEHawapMsxA69i+XMGZrQ5tRauhLAjV/sxg3Q= +github.com/go-openapi/testify/v2 v2.4.1 h1:zB34HDKj4tHwyUQHrUkpV0Q0iXQ6dUCOQtIqn8hE6Iw= +github.com/go-openapi/testify/v2 v2.4.1/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-openapi/validate v0.25.2 h1:12NsfLAwGegqbGWr2CnvT65X/Q2USJipmJ9b7xDJZz0= +github.com/go-openapi/validate v0.25.2/go.mod h1:Pgl1LpPPGFnZ+ys4/hTlDiRYQdI1ocKypgE+8Q8BLfY= +github.com/go-piv/piv-go/v2 v2.5.0 h1:w4KZ3GytEGZt8zm+S7olcIHZk0giL23xVqCa2HgwuqA= +github.com/go-piv/piv-go/v2 v2.5.0/go.mod h1:ShZi74nnrWNQEdWzRUd/3cSig3uNOcEZp+EWl0oewnI= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= @@ -367,23 +382,23 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8Wd github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro= +github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/goccy/go-json v0.10.6 h1:p8HrPJzOakx/mn/bQtjgNjdTcN+/S6FcG2CTtQOrHVU= +github.com/goccy/go-json v0.10.6/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/godbus/dbus/v5 v5.2.2 h1:TUR3TgtSVDmjiXOgAAyaZbYmIeP3DPkld3jgKGV8mXQ= +github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY= +github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -399,32 +414,33 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.26.1 h1:iPbVVEdkhTX++hpe3lzSk7D3G3QSYqLGoHOcEio+UXQ= -github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= -github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= -github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= +github.com/google/cel-go v0.27.0 h1:e7ih85+4qVrBuqQWTW4FKSqZYokVuc3HnhH5keboFTo= +github.com/google/cel-go v0.27.0/go.mod h1:tTJ11FWqnhw5KKpnWpvW9CJC3Y9GK4EIS0WXnBbebzw= +github.com/google/certificate-transparency-go v1.3.3 h1:hq/rSxztSkXN2tx/3jQqF6Xc0O565UQPdHrOWvZwybo= +github.com/google/certificate-transparency-go v1.3.3/go.mod h1:iR17ZgSaXRzSa5qvjFl8TnVD5h8ky2JMVio+dzoKMgA= github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= -github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c= +github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= -github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= +github.com/google/go-containerregistry v0.21.3 h1:Xr+yt3VvwOOn/5nJzd7UoOhwPGiPkYW0zWDLLUXqAi4= +github.com/google/go-containerregistry v0.21.3/go.mod h1:D5ZrJF1e6dMzvInpBPuMCX0FxURz7GLq2rV3Us9aPkc= github.com/google/go-github/v73 v73.0.0 h1:aR+Utnh+Y4mMkS+2qLQwcQ/cF9mOTpdwnzlaw//rG24= github.com/google/go-github/v73 v73.0.0/go.mod h1:fa6w8+/V+edSU0muqdhCVY7Beh1M8F1IlQPZIANKIYw= -github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0= +github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -438,16 +454,18 @@ github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81z github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.9 h1:TOpi/QG8iDcZlkQlGlFUti/ZtyLkliXvHDcyUIMuFrU= -github.com/googleapis/enterprise-certificate-proxy v0.3.9/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y= -github.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14= +github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8= +github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg= +github.com/googleapis/gax-go/v2 v2.20.0 h1:NIKVuLhDlIV74muWlsMM4CcQZqN6JJ20Qcxd9YMuYcs= +github.com/googleapis/gax-go/v2 v2.20.0/go.mod h1:But/NJU6TnZsrLai/xBAQLLz+Hc7fHZJt/hsCz3Fih4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/graph-gophers/graphql-go v1.9.0 h1:yu0ucKHLc5qGpRwLYKIWtr9bOoxovkWasuBrPQwlHls= +github.com/graph-gophers/graphql-go v1.9.0/go.mod h1:23olKZ7duEvHlF/2ELEoSZaY1aNPfShjP782SOoNTyM= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 h1:kEISI/Gx67NzH3nJxAmY/dGac80kKZgZt134u7Y/k1s= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4/go.mod h1:6Nz966r3vQYCqIzWsuEl9d7cf7mRhtDmm++sOxlnfxI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c= github.com/hairyhenderson/go-which v0.2.2 h1:yMyAHo4InxHiTAboIeOji8nZ5EXwIp116a2uo/MFkFI= github.com/hairyhenderson/go-which v0.2.2/go.mod h1:vBfncX6hXWQhY1Qte8qQNWuJNnsGPqFLjgmwEETyOAo= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= @@ -478,22 +496,22 @@ github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= -github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= -github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= -github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= +github.com/in-toto/attestation v1.2.0 h1:aPRUZ3azbqD7yEBD5fP3TD8Dszf+YHo284SOcpahjQk= +github.com/in-toto/attestation v1.2.0/go.mod h1:r79G45gOmzPismgObLSL+rZTFxUgZLOQJI6LofTZgXk= +github.com/in-toto/in-toto-golang v0.10.0 h1:+s2eZQSK3WmWfYV85qXVSBfqgawi/5L02MaqA4o/tpM= +github.com/in-toto/in-toto-golang v0.10.0/go.mod h1:wjT4RiyFlLWCmLUJjwB8oZcjaq7HA390aMJcD3xXgmg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo= +github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= +github.com/jedisct1/go-minisign v0.0.0-20241212093149-d2f9f49435c7 h1:FWpSWRD8FbVkKQu8M1DM9jF5oXFLyE+XpisIYfdzbic= +github.com/jedisct1/go-minisign v0.0.0-20241212093149-d2f9f49435c7/go.mod h1:BMxO138bOokdgt4UaxZiEfypcSHX0t6SIFimVP1oRfk= github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= @@ -504,8 +522,8 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= +github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -516,18 +534,20 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lestrrat-go/blackmagic v1.0.4 h1:IwQibdnf8l2KoO+qC3uT4OaTWsW7tuRQXy9TRN9QanA= github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw= +github.com/lestrrat-go/dsig v1.0.0 h1:OE09s2r9Z81kxzJYRn07TFM9XA4akrUdoMwr0L8xj38= +github.com/lestrrat-go/dsig v1.0.0/go.mod h1:dEgoOYYEJvW6XGbLasr8TFcAxoWrKlbQvmJgCR0qkDo= +github.com/lestrrat-go/dsig-secp256k1 v1.0.0 h1:JpDe4Aybfl0soBvoVwjqDbp+9S1Y2OM7gcrVVMFPOzY= +github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU= github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= -github.com/lestrrat-go/httprc/v3 v3.0.0 h1:nZUx/zFg5uc2rhlu1L1DidGr5Sj02JbXvGSpnY4LMrc= -github.com/lestrrat-go/httprc/v3 v3.0.0/go.mod h1:k2U1QIiyVqAKtkffbg+cUmsyiPGQsb9aAfNQiNFuQ9Q= -github.com/lestrrat-go/jwx/v3 v3.0.10 h1:XuoCBhZBncRIjMQ32HdEc76rH0xK/Qv2wq5TBouYJDw= -github.com/lestrrat-go/jwx/v3 v3.0.10/go.mod h1:kNMedLgTpHvPJkK5EMVa1JFz+UVyY2dMmZKu3qjl/Pk= -github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= -github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lestrrat-go/httprc/v3 v3.0.5 h1:S+Mb4L2I+bM6JGTibLmxExhyTOqnXjqx+zi9MoXw/TM= +github.com/lestrrat-go/httprc/v3 v3.0.5/go.mod h1:mSMtkZW92Z98M5YoNNztbRGxbXHql7tSitCvaxvo9l0= +github.com/lestrrat-go/jwx/v3 v3.0.13 h1:AdHKiPIYeCSnOJtvdpipPg/0SuFh9rdkN+HF3O0VdSk= +github.com/lestrrat-go/jwx/v3 v3.0.13/go.mod h1:2m0PV1A9tM4b/jVLMx8rh6rBl7F6WGb3EG2hufN9OQU= github.com/lestrrat-go/option/v2 v2.0.0 h1:XxrcaJESE1fokHy3FpaQ/cXW8ZsIdWcdFzzLOcID3Ss= github.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg= -github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= -github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= +github.com/letsencrypt/boulder v0.20260324.0 h1:smPvtVYHqGlGSqZCoXWYEsUG/tAMK+Ss6I/6KPFmCjI= +github.com/letsencrypt/boulder v0.20260324.0/go.mod h1:HcwzXNudqSEtHxh7mD7wj8Oqf76oSCwNa9ifwFLzukU= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -536,15 +556,15 @@ github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHP github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= -github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= +github.com/mattn/go-runewidth v0.0.21 h1:jJKAZiQH+2mIinzCJIaIG9Be1+0NR+5sz/lYEEjdM8w= +github.com/mattn/go-runewidth v0.0.21/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= github.com/maxbrunsfeld/counterfeiter/v6 v6.12.1 h1:D4O2wLxB384TS3ohBJMfolnxb4qGmoZ1PnWNtit8LYo= github.com/maxbrunsfeld/counterfeiter/v6 v6.12.1/go.mod h1:RuJdxo0oI6dClIaMzdl3hewq3a065RH65dofJP03h8I= -github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= -github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= +github.com/miekg/dns v1.1.72 h1:vhmr+TF2A3tuoGNkLDFK9zi36F2LS+hKTRW0Uf8kbzI= +github.com/miekg/dns v1.1.72/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs= github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= -github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.2 h1:/VxmeAX5qU6Q3EwafypogwWbYryHFmF2RpkJmw3m4MQ= +github.com/miekg/pkcs11 v1.1.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= @@ -582,54 +602,55 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s= +github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ= github.com/oleiade/reflections v1.1.0 h1:D+I/UsXQB4esMathlt0kkZRJZdUDmhv5zGi/HOwYTWo= github.com/oleiade/reflections v1.1.0/go.mod h1:mCxx0QseeVCHs5Um5HhJeCKVC7AwS8kO67tky4rdisA= github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 h1:zrbMGy9YXpIeTnGj4EljqMiZsIcE09mmF8XsD5AYOJc= github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6/go.mod h1:rEKTHC9roVVicUIfZK7DYrdIoM0EOr8mK1Hj5s3JjH0= -github.com/olekukonko/errors v1.1.0 h1:RNuGIh15QdDenh+hNvKrJkmxxjV4hcS50Db478Ou5sM= -github.com/olekukonko/errors v1.1.0/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y= -github.com/olekukonko/ll v0.1.3 h1:sV2jrhQGq5B3W0nENUISCR6azIPf7UBUpVq0x/y70Fg= -github.com/olekukonko/ll v0.1.3/go.mod h1:b52bVQRRPObe+yyBl0TxNfhesL0nedD4Cht0/zx55Ew= -github.com/olekukonko/tablewriter v1.1.2 h1:L2kI1Y5tZBct/O/TyZK1zIE9GlBj/TVs+AY5tZDCDSc= -github.com/olekukonko/tablewriter v1.1.2/go.mod h1:z7SYPugVqGVavWoA2sGsFIoOVNmEHxUAAMrhXONtfkg= +github.com/olekukonko/errors v1.2.0 h1:10Zcn4GeV59t/EGqJc8fUjtFT/FuUh5bTMzZ1XwmCRo= +github.com/olekukonko/errors v1.2.0/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y= +github.com/olekukonko/ll v0.1.8 h1:ysHCJRGHYKzmBSdz9w5AySztx7lG8SQY+naTGYUbsz8= +github.com/olekukonko/ll v0.1.8/go.mod h1:RPRC6UcscfFZgjo1nulkfMH5IM0QAYim0LfnMvUuozw= +github.com/olekukonko/tablewriter v1.1.4 h1:ORUMI3dXbMnRlRggJX3+q7OzQFDdvgbN9nVWj1drm6I= +github.com/olekukonko/tablewriter v1.1.4/go.mod h1:+kedxuyTtgoZLwif3P1Em4hARJs+mVnzKxmsCL/C5RY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= -github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/ginkgo/v2 v2.28.0 h1:Rrf+lVLmtlBIKv6KrIGJCjyY8N36vDVcutbGJkyqjJc= +github.com/onsi/ginkgo/v2 v2.28.0/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM= -github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= -github.com/open-policy-agent/opa v1.8.0 h1:4JdYuZcANeUF1v/87NGpirocpaZzJA0PcuL7xfmsMNM= -github.com/open-policy-agent/opa v1.8.0/go.mod h1:vOVZuIJQISnaYcZtQ58yTDkVCp1FmGPwK43pO9qPDqM= +github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28= +github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg= +github.com/open-policy-agent/opa v1.15.1 h1:ZE4JaXsVUzDiHFSlOMBS3nJohR5BRGB/RNz6gTNugzE= +github.com/open-policy-agent/opa v1.15.1/go.mod h1:c6SN+7jSsUcKJLQc5P4yhwx8YYDRbjpAiGkBOTqxaa4= github.com/opencontainers/cgroups v0.0.6 h1:tfZFWTIIGaUUFImTyuTg+Mr5x8XRiSdZESgEBW7UxuI= github.com/opencontainers/cgroups v0.0.6/go.mod h1:oWVzJsKK0gG9SCRBfTpnn16WcGEqDI8PAcpMGbqWxcs= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/opencontainers/runc v1.4.0 h1:FG1Hw0GBYPsNki+mBz1QOrSzbwbAcerhrAD2r097QCc= -github.com/opencontainers/runc v1.4.0/go.mod h1:sch3Bh3c1NlyAkALoAUz5Br9ubMLZzFcxuovZbnkErk= +github.com/opencontainers/runc v1.4.1 h1:eg1A930KKiZ3IShaYHPRiDi6uMrHMnd7OiElHBLgqfY= +github.com/opencontainers/runc v1.4.1/go.mod h1:ufk5PTTsy5pnGBAvTh50e+eqGk01pYH2YcVxh557Qlk= github.com/opencontainers/runtime-spec v1.3.0 h1:YZupQUdctfhpZy3TM39nN9Ika5CBWT5diQ8ibYCRkxg= github.com/opencontainers/runtime-spec v1.3.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2 h1:2xZEHOdeQBV6PW8ZtimN863bIOl7OCW/X10K0cnxKeA= github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2/go.mod h1:MXdPzqAA8pHC58USHqNCSjyLnRQ6D+NjbpP+02Z1U/0= github.com/opencontainers/selinux v1.13.0 h1:Zza88GWezyT7RLql12URvoxsbLfjFx988+LGaWfbL84= github.com/opencontainers/selinux v1.13.0/go.mod h1:XxWTed+A/s5NNq4GmYScVy+9jzXhGBVEOAyucdRUY8s= -github.com/openshift/api v0.0.0-20241204141306-c1fdeb0788c1 h1:L4hBXzk+9B34Q8Y2tcOpWPByM+vW4tPiIzTqB4oZ9Gc= -github.com/openshift/api v0.0.0-20241204141306-c1fdeb0788c1/go.mod h1:Shkl4HanLwDiiBzakv+con/aMGnVE2MAGvoKp5oyYUo= +github.com/openshift/api v0.0.0-20260330162214-96f1f5ac7ff2 h1:q89bR1UvKEH9kNh9me1oqLYszKuhaeghorpkO3+DNwY= +github.com/openshift/api v0.0.0-20260330162214-96f1f5ac7ff2/go.mod h1:pyVjK0nZ4sRs4fuQVQ4rubsJdahI1PB94LnQ8sGdvxo= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= -github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pelletier/go-toml/v2 v2.3.0 h1:k59bC/lIZREW0/iVaQR8nDHxVq8OVlIzYCOJf421CaM= +github.com/pelletier/go-toml/v2 v2.3.0/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pjbgf/go-apparmor v0.1.3-0.20241107184909-1375e5e7aa89 h1:wBXahBOWK72QV3tnaMNtbAGnOAH0a/n0lpgxruYirWs= github.com/pjbgf/go-apparmor v0.1.3-0.20241107184909-1375e5e7aa89/go.mod h1:AXUw6FFDoh4deKxcQ883jqJMnEnA/b1oU5nU29i2lPA= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -640,8 +661,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.88.1 h1:K/r+qPGyr/Fx9vbN7biV9q2/PV5ETj+bVVH5RUvqEG8= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.88.1/go.mod h1:IJwk1oNs212afqGbNnE84GAB95OHtJR/BuI1rKESiYk= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.90.1 h1:URbjn501/IBFTzPtGXrYDXHi+ZcbP2W60o6JeTrY3vQ= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.90.1/go.mod h1:Gfzi4500QCMnptFIQc8YdDi8YZ4QA0vs22LROWZ3+YU= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -649,20 +670,20 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= -github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= -github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= -github.com/protocolbuffers/txtpbfmt v0.0.0-20250627152318-f293424e46b5 h1:WWs1ZFnGobK5ZXNu+N9If+8PDNVB9xAqrib/stUXsV4= -github.com/protocolbuffers/txtpbfmt v0.0.0-20250627152318-f293424e46b5/go.mod h1:BnHogPTyzYAReeQLZrOxyxzS739DaTNtTvohVdbENmA= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/prometheus/procfs v0.20.1 h1:XwbrGOIplXW/AU3YhIhLODXMJYyC1isLFfYCsTEycfc= +github.com/prometheus/procfs v0.20.1/go.mod h1:o9EMBZGRyvDrSPH1RqdxhojkuXstoe4UlK79eF5TGGo= +github.com/protocolbuffers/txtpbfmt v0.0.0-20260217160748-a481f6a22f94 h1:2PC6Ql3jipz1KvBlqUHjjk6v4aMwE86mfDu1XMH0LR8= +github.com/protocolbuffers/txtpbfmt v0.0.0-20260217160748-a481f6a22f94/go.mod h1:JSbkp0BviKovYYt9XunS95M3mLPibE9bGg+Y95DsEEY= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= -github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4= +github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI= github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= @@ -671,10 +692,10 @@ github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= github.com/seccomp/libseccomp-golang v0.11.1 h1:wuk4ZjSx6kyQII4rj6G6fvVzRHQaSiPvccJazDagu4g= github.com/seccomp/libseccomp-golang v0.11.1/go.mod h1:5m1Lk8E9OwgZTTVz4bBOer7JuazaBa+xTkM895tDiWc= -github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= -github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= -github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= -github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/secure-systems-lab/go-securesystemslib v0.10.0 h1:l+H5ErcW0PAehBNrBxoGv1jjNpGYdZ9RcheFkB2WI14= +github.com/secure-systems-lab/go-securesystemslib v0.10.0/go.mod h1:MRKONWmRoFzPNQ9USRF9i1mc7MvAVvF1LlW8X5VWDvk= +github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0= +github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= @@ -685,31 +706,29 @@ github.com/sigstore/fulcio v1.8.5 h1:HYTD1/L5wlBp8JxsWxUf8hmfaNBBF/x3r3p5l6tZwbA github.com/sigstore/fulcio v1.8.5/go.mod h1:tSLYK3JsKvJpDW1BsIsVHZgHj+f8TjXARzqIUWSsSPQ= github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= -github.com/sigstore/rekor v1.5.0 h1:rL7SghHd5HLCtsCrxw0yQg+NczGvM75EjSPPWuGjaiQ= -github.com/sigstore/rekor v1.5.0/go.mod h1:D7JoVCUkxwQOpPDNYeu+CE8zeBC18Y5uDo6tF8s2rcQ= -github.com/sigstore/rekor-tiles/v2 v2.1.0 h1:lSxhMwVYkMsCok2rFKU3eRJXz7ppTkLEVjUnH+g8aZY= -github.com/sigstore/rekor-tiles/v2 v2.1.0/go.mod h1:qRw4VXl35azi8ENjSWbdmGtzdviLd7H08fDcp5C+97Y= -github.com/sigstore/sigstore v1.10.4 h1:ytOmxMgLdcUed3w1SbbZOgcxqwMG61lh1TmZLN+WeZE= -github.com/sigstore/sigstore v1.10.4/go.mod h1:tDiyrdOref3q6qJxm2G+JHghqfmvifB7hw+EReAfnbI= +github.com/sigstore/rekor v1.5.1 h1:Ca1egHRWRuDvXV4tZu9aXEXc3Gej9FG+HKeapV9OAMQ= +github.com/sigstore/rekor v1.5.1/go.mod h1:gTLDuZuo3SyQCuZvKqwRPA79Qo/2rw39/WtLP/rZjUQ= +github.com/sigstore/rekor-tiles/v2 v2.2.1 h1:UmV1CBQ3SjxxPGpFmwDoOhoIwiKpM2Qm1pU5tPGmvNk= +github.com/sigstore/rekor-tiles/v2 v2.2.1/go.mod h1:z8n6l6oidpaLjjE6rJERuQqY9X38ulnHZCXyL+DEL7U= +github.com/sigstore/sigstore v1.10.5 h1:KqrOjDhNOVY+uOzQFat2FrGLClPPCb3uz8pK3wuI+ow= +github.com/sigstore/sigstore v1.10.5/go.mod h1:k/mcVVXw3I87dYG/iCVTSW2xTrW7vPzxxGic4KqsqXs= github.com/sigstore/sigstore-go v1.1.4 h1:wTTsgCHOfqiEzVyBYA6mDczGtBkN7cM8mPpjJj5QvMg= github.com/sigstore/sigstore-go v1.1.4/go.mod h1:2U/mQOT9cjjxrtIUeKDVhL+sHBKsnWddn8URlswdBsg= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.3 h1:D/FRl5J9UYAJPGZRAJbP0dH78pfwWnKsyCSBwFBU8CI= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.3/go.mod h1:2GIWuNvTRMvrzd0Nl8RNqxrt9H7X0OBStwOSzGYRjYw= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.3 h1:k5VMLf/ms7hh6MLgVoorM0K+hSMwZLXoywlxh4CXqP8= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.3/go.mod h1:S1Bp3dmP7jYlXcGLAxG81wRbE01NIZING8ZIy0dJlAI= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.3 h1:AVWs0E6rVZMoDTE0Iyezrpo1J6RlI5B4QZhAC4FLE30= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.3/go.mod h1:nxQYF0D6u7mVtiP1azj1YVDIrtz7S0RYCVTqUG8IcCk= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.3 h1:lJSdaC/aOlFHlvqmmV696n1HdXLMLEKGwpNZMV0sKts= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.3/go.mod h1:b2rV9qPbt/jv/Yy75AIOZThP8j+pe1ZdLEjOwmjPdoA= -github.com/sigstore/timestamp-authority/v2 v2.0.4 h1:65IBa4LUeFWDQu9hiTt5lBpi/F5jonJWZtH6VLn4InU= -github.com/sigstore/timestamp-authority/v2 v2.0.4/go.mod h1:EXJLiMDBqRPlzC02hPiFSiYTCqSuUpU68a4vr0DFePM= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.4 h1:VZ+L6SKVWbLPHznIF0tBuO7qKMFdJiJMVwFKu9DlY5o= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.4/go.mod h1:Rstj47WpJym25il8j4jTL0BfikzP/9AhVD+DsBcYzZc= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.4 h1:G7yOv8bxk3zIEEZyVCixPxtePIAm+t3ZWSaKRPzVw+o= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.4/go.mod h1:hxJelB/bRItMYOzi6qD9xEKjse2QZcikh4TbysfdDHc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.4 h1:Qxt6dE4IwhJ6gIXmg2q4S/SeqEDSZ29nmfsv7Zb6LL4= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.4/go.mod h1:hJVeNOwarqfyALjOwsf0OR8YA/A96NABucEaQumPr30= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.4 h1:KVavYMPfSf5NryOl6VrZ9nRG3fXOOJOPp7Czk/YCPkM= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.4/go.mod h1:J7CA1AaBkyK8dYq6EdQANhj+8oEcsA7PrIp088qgPiY= +github.com/sigstore/timestamp-authority/v2 v2.0.5 h1:WT17MU4bNRvjRLlTvTO5gmrSIWJVbzwrNXgwsjB+53U= +github.com/sigstore/timestamp-authority/v2 v2.0.5/go.mod h1:oV+Yy0GsfgNAeDZcv/WJjQE42wFtMTtuD85bPLAQk5M= github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= @@ -723,8 +742,6 @@ github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= -github.com/stoewer/go-strcase v1.3.1 h1:iS0MdW+kVTxgMoE1LAZyMiYJFKlOzLooE4MxjirtkAs= -github.com/stoewer/go-strcase v1.3.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -733,7 +750,6 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -752,8 +768,8 @@ github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gt github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= -github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= -github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= +github.com/theupdateframework/go-tuf/v2 v2.4.1 h1:K6ewW064rKZCPkRo1W/CTbTtm/+IB4+coG1iNURAGCw= +github.com/theupdateframework/go-tuf/v2 v2.4.1/go.mod h1:Nex2enPVYDFCklrnbTzl3OVwD7fgIAj0J5++z/rvCj8= github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= @@ -767,26 +783,26 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHT github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho= github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= +github.com/transparency-dev/formats v0.1.0 h1:oL0zUFuYUjg8AbtjPMnIRDmjbaHo5jCjEWU5yaNuz0g= +github.com/transparency-dev/formats v0.1.0/go.mod h1:d2FibUOHfCMdCe/+/rbKt1IPLBbPTDfwj46kt541/mU= github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU= github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4= -github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= -github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= +github.com/valyala/fastjson v1.6.10 h1:/yjJg8jaVQdYR3arGxPE2X5z89xrlhS0eGXdv+ADTh4= +github.com/valyala/fastjson v1.6.10/go.mod h1:e6FubmQouUNP73jtMLmcbxS6ydWIpOfhz34TSfO3JaE= github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= -github.com/vektah/gqlparser/v2 v2.5.30 h1:EqLwGAFLIzt1wpx1IPpY67DwUujF1OfzgEyDsLrN6kE= -github.com/vektah/gqlparser/v2 v2.5.30/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= +github.com/vektah/gqlparser/v2 v2.5.32 h1:k9QPJd4sEDTL+qB4ncPLflqTJ3MmjB9SrVzJrawpFSc= +github.com/vektah/gqlparser/v2 v2.5.32/go.mod h1:c1I28gSOVNzlfc4WuDlqU7voQnsqI6OG2amkBAFmgts= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= -github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= +github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAzt5X7s6266i6cSVkkFPS0TuXWbIg= +github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= @@ -805,46 +821,42 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= -gitlab.com/gitlab-org/api/client-go v0.143.3 h1:4Q4zumLVUnxn/s06RD9U3fyibD1/zr43gTDDtRkjqbA= -gitlab.com/gitlab-org/api/client-go v0.143.3/go.mod h1:rw89Kl9AsKmxRhzkfUSfZ+1jpTewwueKvAYwoYmUoQ8= -go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= -go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +gitlab.com/gitlab-org/api/client-go v1.46.0 h1:YxBWFZIFYKcGESCb9fpkwzouo+apyB9pr/XTWzNoL24= +gitlab.com/gitlab-org/api/client-go v1.46.0/go.mod h1:FtgyU6g2HS5+fMhw6nLK96GBEEBx5MzntOiJWfIaiN8= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= -go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= -go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 h1:bDMKF3RUSxshZ5OjOTi8rsHGaPKsAt76FaqgvIUySLc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0/go.mod h1:dDT67G/IkA46Mr2l9Uj7HsQVwsjASyV9SjGofsiUZDA= -go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= -go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= -go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= -go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= -go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= -go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= -go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= -go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= -go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= -go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= -go.podman.io/common v0.66.1 h1:zDyd4HhVgQAN8LupBHCnhtM3FEOJ9DwmThjulXZq2qA= -go.podman.io/common v0.66.1/go.mod h1:aNd2a0S7pY+fx1X5kpQYuF4hbwLU8ZOccuVrhu7h1Xc= -go.step.sm/crypto v0.75.0 h1:UAHYD6q6ggYyzLlIKHv1MCUVjZIesXRZpGTlRC/HSHw= -go.step.sm/crypto v0.75.0/go.mod h1:wwQ57+ajmDype9mrI/2hRyrvJd7yja5xVgWYqpUN3PE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= +go.opentelemetry.io/otel v1.42.0 h1:lSQGzTgVR3+sgJDAU/7/ZMjN9Z+vUip7leaqBKy4sho= +go.opentelemetry.io/otel v1.42.0/go.mod h1:lJNsdRMxCUIWuMlVJWzecSMuNjE7dOYyWlqOXWkdqCc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 h1:THuZiwpQZuHPul65w4WcwEnkX2QIuMT+UFoOrygtoJw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0/go.mod h1:J2pvYM5NGHofZ2/Ru6zw/TNWnEQp5crgyDeSrYpXkAw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0 h1:zWWrB1U6nqhS/k6zYB74CjRpuiitRtLLi68VcgmOEto= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0/go.mod h1:2qXPNBX1OVRC0IwOnfo1ljoid+RD0QK3443EaqVlsOU= +go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4= +go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI= +go.opentelemetry.io/otel/sdk v1.42.0 h1:LyC8+jqk6UJwdrI/8VydAq/hvkFKNHZVIWuslJXYsDo= +go.opentelemetry.io/otel/sdk v1.42.0/go.mod h1:rGHCAxd9DAph0joO4W6OPwxjNTYWghRWmkHuGbayMts= +go.opentelemetry.io/otel/sdk/metric v1.42.0 h1:D/1QR46Clz6ajyZ3G8SgNlTJKBdGp84q9RKCAZ3YGuA= +go.opentelemetry.io/otel/sdk/metric v1.42.0/go.mod h1:Ua6AAlDKdZ7tdvaQKfSmnFTdHx37+J4ba8MwVCYM5hc= +go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY= +go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= +go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g= +go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk= +go.podman.io/common v0.67.0 h1:6Ci5oU1ek08OAxBLkHEqSyWmjNh5zf03PRqZ04cPdwU= +go.podman.io/common v0.67.0/go.mod h1:sB9L8LMtmf5Hpek2qkEyRrcSzpb+gYpG3vq5Khima3U= +go.step.sm/crypto v0.76.2 h1:JJ/yMcs/rmcCAwlo+afrHjq74XBFRTJw5B2y4Q4Z4c4= +go.step.sm/crypto v0.76.2/go.mod h1:m6KlB/HzIuGFep0UWI5e0SYi38UxpoKeCg6qUaHV6/Q= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= -go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ= +go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -854,16 +866,18 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= -golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= +golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4= -golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc= +golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA= +golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -871,8 +885,11 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= -golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -893,13 +910,16 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= -golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= -golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= +golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -908,9 +928,11 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= -golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -936,18 +958,27 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= -golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU= +golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -955,12 +986,14 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= -golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= -golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8= +golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= +golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U= +golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -972,8 +1005,10 @@ golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= -golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s= +golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= @@ -987,26 +1022,26 @@ gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0 gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/api v0.260.0 h1:XbNi5E6bOVEj/uLXQRlt6TKuEzMD7zvW/6tNwltE4P4= -google.golang.org/api v0.260.0/go.mod h1:Shj1j0Phr/9sloYrKomICzdYgsSDImpTxME8rGLaZ/o= +google.golang.org/api v0.273.0 h1:r/Bcv36Xa/te1ugaN1kdJ5LoA5Wj/cL+a4gj6FiPBjQ= +google.golang.org/api v0.273.0/go.mod h1:JbAt7mF+XVmWu6xNP8/+CTiGH30ofmCmk9nM8d8fHew= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934= -google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0= -google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E= -google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 h1:XzmzkmB14QhVhgnawEVsOn6OFsnpyxNPRY9QV01dNB0= +google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:L43LFes82YgSonw6iTXTxXUX1OlULt4AQtkik4ULL/I= +google.golang.org/genproto/googleapis/api v0.0.0-20260330182312-d5a96adf58d8 h1:udju5p8o61FW6K2fxHWPIZhChk4FHl2Hjk8+uuLNnpM= +google.golang.org/genproto/googleapis/api v0.0.0-20260330182312-d5a96adf58d8/go.mod h1:EIQZ5bFCfRQDV4MhRle7+OgjNtZ6P1PiZBgAKuxXu/Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260330182312-d5a96adf58d8 h1:OHkuo1i98/05rzpm9NBbfEtpJH/k3abEgZUKaAuCI7Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260330182312-d5a96adf58d8/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= -google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.6.0 h1:6Al3kEFFP9VJhRz3DID6quisgPnTeZVr4lep9kkxdPA= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.6.0/go.mod h1:QLvsjh0OIR0TYBeiu2bkWGTJBUNQ64st52iWj/yA93I= +google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= +google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.6.1 h1:/WILD1UcXj/ujCxgoL/DvRgt2CP3txG8+FwkUbb9110= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.6.1/go.mod h1:YNKnb2OAApgYn2oYY47Rn7alMr1zWjb2U8Q0aoGWiNc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1027,6 +1062,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.1 h1:tVBILHy0R6e4wkYOn3XmiITt/hEVH4TFMYvAX2Ytz6k= gopkg.in/ini.v1 v1.67.1/go.mod h1:x/cyOwCgZqOkJoDIJ3c1KNHMo10+nLGAhh+kn3Zizss= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= @@ -1046,53 +1082,53 @@ gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= -k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= -k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= -k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= -k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= -k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= -k8s.io/apiserver v0.35.0 h1:CUGo5o+7hW9GcAEF3x3usT3fX4f9r8xmgQeCBDaOgX4= -k8s.io/apiserver v0.35.0/go.mod h1:QUy1U4+PrzbJaM3XGu2tQ7U9A4udRRo5cyxkFX0GEds= -k8s.io/cli-runtime v0.35.0 h1:PEJtYS/Zr4p20PfZSLCbY6YvaoLrfByd6THQzPworUE= -k8s.io/cli-runtime v0.35.0/go.mod h1:VBRvHzosVAoVdP3XwUQn1Oqkvaa8facnokNkD7jOTMY= -k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= -k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= -k8s.io/code-generator v0.35.0 h1:TvrtfKYZTm9oDF2z+veFKSCcgZE3Igv0svY+ehCmjHQ= -k8s.io/code-generator v0.35.0/go.mod h1:iS1gvVf3c/T71N5DOGYO+Gt3PdJ6B9LYSvIyQ4FHzgc= -k8s.io/component-base v0.35.0 h1:+yBrOhzri2S1BVqyVSvcM3PtPyx5GUxCK2tinZz1G94= -k8s.io/component-base v0.35.0/go.mod h1:85SCX4UCa6SCFt6p3IKAPej7jSnF3L8EbfSyMZayJR0= +k8s.io/api v0.35.3 h1:pA2fiBc6+N9PDf7SAiluKGEBuScsTzd2uYBkA5RzNWQ= +k8s.io/api v0.35.3/go.mod h1:9Y9tkBcFwKNq2sxwZTQh1Njh9qHl81D0As56tu42GA4= +k8s.io/apiextensions-apiserver v0.35.3 h1:2fQUhEO7P17sijylbdwt0nBdXP0TvHrHj0KeqHD8FiU= +k8s.io/apiextensions-apiserver v0.35.3/go.mod h1:tK4Kz58ykRpwAEkXUb634HD1ZAegEElktz/B3jgETd8= +k8s.io/apimachinery v0.35.3 h1:MeaUwQCV3tjKP4bcwWGgZ/cp/vpsRnQzqO6J6tJyoF8= +k8s.io/apimachinery v0.35.3/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/apiserver v0.35.3 h1:D2eIcfJ05hEAEewoSDg+05e0aSRwx8Y4Agvd/wiomUI= +k8s.io/apiserver v0.35.3/go.mod h1:JI0n9bHYzSgIxgIrfe21dbduJ9NHzKJ6RchcsmIKWKY= +k8s.io/cli-runtime v0.35.3 h1:UZq4ipNimtzBmhN7PPKbfAdqo8quK0H0UdGl6qAQnqI= +k8s.io/cli-runtime v0.35.3/go.mod h1:O7MUmCqcKSd5xI+O5X7/pRkB5l0O2NIhOdUVwbHLXu4= +k8s.io/client-go v0.35.3 h1:s1lZbpN4uI6IxeTM2cpdtrwHcSOBML1ODNTCCfsP1pg= +k8s.io/client-go v0.35.3/go.mod h1:RzoXkc0mzpWIDvBrRnD+VlfXP+lRzqQjCmKtiwZ8Q9c= +k8s.io/code-generator v0.35.3 h1:NDGCLkEm6Ho65wTdSe2EgErmmtsrezOPwwOchlNc6FQ= +k8s.io/code-generator v0.35.3/go.mod h1:LAVriRGXQusHQ0Ns64SE1ublSswm1KrK7cXn0GuQETg= +k8s.io/component-base v0.35.3 h1:mbKbzoIMy7JDWS/wqZobYW1JDVRn/RKRaoMQHP9c4P0= +k8s.io/component-base v0.35.3/go.mod h1:IZ8LEG30kPN4Et5NeC7vjNv5aU73ku5MS15iZyvyMYk= k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b h1:gMplByicHV/TJBizHd9aVEsTYoJBnnUAT5MHlTkbjhQ= k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b/go.mod h1:CgujABENc3KuTrcsdpGmrrASjtQsWCT7R99mEV4U/fM= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= -k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= -k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= -k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/klog/v2 v2.140.0 h1:Tf+J3AH7xnUzZyVVXhTgGhEKnFqye14aadWv7bzXdzc= +k8s.io/klog/v2 v2.140.0/go.mod h1:o+/RWfJ6PwpnFn7OyAG3QnO47BFsymfEfrz6XyYSSp0= +k8s.io/kube-openapi v0.0.0-20260330154417-16be699c7b31 h1:V+sn9a/1fEYDGwnllCmqXBk8x7obZ+hl869Q3Abumkg= +k8s.io/kube-openapi v0.0.0-20260330154417-16be699c7b31/go.mod h1:uGBT7iTA6c6MvqUvSXIaYZo9ukscABYi2btjhvgKGZ0= +k8s.io/utils v0.0.0-20260319190234-28399d86e0b5 h1:kBawHLSnx/mYHmRnNUf9d4CpjREbeZuxoSGOX/J+aYM= +k8s.io/utils v0.0.0-20260319190234-28399d86e0b5/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= kernel.org/pub/linux/libs/security/libcap/cap v1.2.76 h1:mrdLPj8ujM6eIKGtd1PkkuCIodpFFDM42Cfm0YODkIM= kernel.org/pub/linux/libs/security/libcap/cap v1.2.76/go.mod h1:7V2BQeHnVAQwhCnCPJ977giCeGDiywVewWF+8vkpPlc= kernel.org/pub/linux/libs/security/libcap/psx v1.2.76 h1:3DyzQ30OHt3wiOZVL1se2g1PAPJIU7+tMUyvfMUj1dY= kernel.org/pub/linux/libs/security/libcap/psx v1.2.76/go.mod h1:+l6Ee2F59XiJ2I6WR5ObpC1utCQJZ/VLsEbQCD8RG24= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 h1:qPrZsv1cwQiFeieFlRqT627fVZ+tyfou/+S5S0H5ua0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= -sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0 h1:hSfpvjjTQXQY2Fol2CS0QHMNs/WI1MOSGzCm1KhM5ec= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.23.3 h1:VjB/vhoPoA9l1kEKZHBMnQF33tdCLQKJtydy4iqwZ80= +sigs.k8s.io/controller-runtime v0.23.3/go.mod h1:B6COOxKptp+YaUT5q4l6LqUJTRpizbgf9KSRNdQGns0= sigs.k8s.io/controller-tools v0.20.0 h1:VWZF71pwSQ2lZZCt7hFGJsOfDc5dVG28/IysjjMWXL8= sigs.k8s.io/controller-tools v0.20.0/go.mod h1:b4qPmjGU3iZwqn34alUU5tILhNa9+VXK+J3QV0fT/uU= -sigs.k8s.io/gateway-api v1.4.0 h1:ZwlNM6zOHq0h3WUX2gfByPs2yAEsy/EenYJB78jpQfQ= -sigs.k8s.io/gateway-api v1.4.0/go.mod h1:AR5RSqciWP98OPckEjOjh2XJhAe2Na4LHyXD2FUY7Qk= +sigs.k8s.io/gateway-api v1.5.1 h1:RqVRIlkhLhUO8wOHKTLnTJA6o/1un4po4/6M1nRzdd0= +sigs.k8s.io/gateway-api v1.5.1/go.mod h1:GvCETiaMAlLym5CovLxGjS0NysqFk3+Yuq3/rh6QL2o= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/release-utils v0.12.3 h1:iNVJY81QfmMCmXxMg8IvvkkeQNk6ZWlLj+iPKSlKyVQ= sigs.k8s.io/release-utils v0.12.3/go.mod h1:BvbNmm1BmM3cnEpBmNHWL3wOSziOdGlsYR8vCFq/Q0o= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2 h1:kwVWMx5yS1CrnFWA/2QHyRVJ8jM6dBA80uLmm0wJkk8= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= -software.sslmate.com/src/go-pkcs12 v0.6.0 h1:f3sQittAeF+pao32Vb+mkli+ZyT+VwKaD014qFGq6oU= -software.sslmate.com/src/go-pkcs12 v0.6.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +software.sslmate.com/src/go-pkcs12 v0.7.0 h1:Db8W44cB54TWD7stUFFSWxdfpdn6fZVcDl0w3R4RVM0= +software.sslmate.com/src/go-pkcs12 v0.7.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/internal/pkg/util/mock_client.go b/internal/pkg/util/mock_client.go index 2b2176d2ba..7b4ac943f0 100644 --- a/internal/pkg/util/mock_client.go +++ b/internal/pkg/util/mock_client.go @@ -66,7 +66,7 @@ type MockSubResourceWriterPatchFn func( // A MockSubResourceWriterApplyFn is used to mock client.Client's SubResourceWriterUpdate implementation. type MockSubResourceWriterApplyFn func( - ctx context.Context, config runtime.ApplyConfiguration, opts ...client.ApplyOption, + ctx context.Context, config runtime.ApplyConfiguration, opts ...client.SubResourceApplyOption, ) error // A MockSubResourceReaderGetFn is used to mock client.Client's SubResourceReaderGet implementation. @@ -434,7 +434,7 @@ func (m *MockSubResourceWriter) Patch( // Apply mocks the apply method. func (m *MockSubResourceWriter) Apply( - ctx context.Context, config runtime.ApplyConfiguration, opts ...client.ApplyOption, + ctx context.Context, config runtime.ApplyConfiguration, opts ...client.SubResourceApplyOption, ) error { return m.MockApply(ctx, config, opts...) } diff --git a/vendor/buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/LICENSE b/vendor/buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/LICENSE new file mode 100644 index 0000000000..347d9fc993 --- /dev/null +++ b/vendor/buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023-2026 Buf Technologies, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/buf/validate/validate.pb.go b/vendor/buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/buf/validate/validate.pb.go new file mode 100644 index 0000000000..27ad09c39d --- /dev/null +++ b/vendor/buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/buf/validate/validate.pb.go @@ -0,0 +1,17632 @@ +// Copyright 2023-2026 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: buf/validate/validate.proto + +// [Protovalidate](https://protovalidate.com/) is the semantic validation library for Protobuf. +// It provides standard annotations to validate common rules on messages and fields, as well as the ability to use [CEL](https://cel.dev) to write custom rules. +// It's the next generation of [protoc-gen-validate](https://github.com/bufbuild/protoc-gen-validate). +// +// This package provides the options, messages, and enums that power Protovalidate. +// Apply its options to messages, fields, and oneofs in your Protobuf schemas to add validation rules: +// +// ```proto +// message User { +// string id = 1 [(buf.validate.field).string.uuid = true]; +// string first_name = 2 [(buf.validate.field).string.max_len = 64]; +// string last_name = 3 [(buf.validate.field).string.max_len = 64]; +// +// option (buf.validate.message).cel = { +// id: "first_name_requires_last_name" +// message: "last_name must be present if first_name is present" +// expression: "!has(this.first_name) || has(this.last_name)" +// }; +// } +// ``` +// +// These rules are enforced at runtime by language-specific libraries. +// See the [developer quickstart](https://protovalidate.com/quickstart/) to get started, or go directly to the runtime library for your language: +// [Go](https://github.com/bufbuild/protovalidate-go) +// [JavaScript/TypeScript](https://github.com/bufbuild/protovalidate-es), +// [Java](https://github.com/bufbuild/protovalidate-java), +// [Python](https://github.com/bufbuild/protovalidate-python), +// or [C++](https://github.com/bufbuild/protovalidate-cc). + +//go:build !protoopaque + +package validate + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies how `FieldRules.ignore` behaves, depending on the field's value, and +// whether the field tracks presence. +type Ignore int32 + +const ( + // Ignore rules if the field tracks presence and is unset. This is the default + // behavior. + // + // In proto3, only message fields, members of a Protobuf `oneof`, and fields + // with the `optional` label track presence. Consequently, the following fields + // are always validated, whether a value is set or not: + // + // ```proto + // syntax="proto3"; + // + // message RulesApply { + // string email = 1 [ + // (buf.validate.field).string.email = true + // ]; + // int32 age = 2 [ + // (buf.validate.field).int32.gt = 0 + // ]; + // repeated string labels = 3 [ + // (buf.validate.field).repeated.min_items = 1 + // ]; + // } + // + // ``` + // + // In contrast, the following fields track presence, and are only validated if + // a value is set: + // + // ```proto + // syntax="proto3"; + // + // message RulesApplyIfSet { + // optional string email = 1 [ + // (buf.validate.field).string.email = true + // ]; + // oneof ref { + // string reference = 2 [ + // (buf.validate.field).string.uuid = true + // ]; + // string name = 3 [ + // (buf.validate.field).string.min_len = 4 + // ]; + // } + // SomeMessage msg = 4 [ + // (buf.validate.field).cel = {/* ... */} + // ]; + // } + // + // ``` + // + // To ensure that such a field is set, add the `required` rule. + // + // To learn which fields track presence, see the + // [Field Presence cheat sheet](https://protobuf.dev/programming-guides/field_presence/#cheat). + Ignore_IGNORE_UNSPECIFIED Ignore = 0 + // Ignore rules if the field is unset, or set to the zero value. + // + // The zero value depends on the field type: + // - For strings, the zero value is the empty string. + // - For bytes, the zero value is empty bytes. + // - For bool, the zero value is false. + // - For numeric types, the zero value is zero. + // - For enums, the zero value is the first defined enum value. + // - For repeated fields, the zero is an empty list. + // - For map fields, the zero is an empty map. + // - For message fields, absence of the message (typically a null-value) is considered zero value. + // + // For fields that track presence (e.g. adding the `optional` label in proto3), + // this a no-op and behavior is the same as the default `IGNORE_UNSPECIFIED`. + Ignore_IGNORE_IF_ZERO_VALUE Ignore = 1 + // Always ignore rules, including the `required` rule. + // + // This is useful for ignoring the rules of a referenced message, or to + // temporarily ignore rules during development. + // + // ```proto + // + // message MyMessage { + // // The field's rules will always be ignored, including any validations + // // on value's fields. + // MyOtherMessage value = 1 [ + // (buf.validate.field).ignore = IGNORE_ALWAYS + // ]; + // } + // + // ``` + Ignore_IGNORE_ALWAYS Ignore = 3 +) + +// Enum value maps for Ignore. +var ( + Ignore_name = map[int32]string{ + 0: "IGNORE_UNSPECIFIED", + 1: "IGNORE_IF_ZERO_VALUE", + 3: "IGNORE_ALWAYS", + } + Ignore_value = map[string]int32{ + "IGNORE_UNSPECIFIED": 0, + "IGNORE_IF_ZERO_VALUE": 1, + "IGNORE_ALWAYS": 3, + } +) + +func (x Ignore) Enum() *Ignore { + p := new(Ignore) + *p = x + return p +} + +func (x Ignore) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Ignore) Descriptor() protoreflect.EnumDescriptor { + return file_buf_validate_validate_proto_enumTypes[0].Descriptor() +} + +func (Ignore) Type() protoreflect.EnumType { + return &file_buf_validate_validate_proto_enumTypes[0] +} + +func (x Ignore) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// KnownRegex contains some well-known patterns. +type KnownRegex int32 + +const ( + KnownRegex_KNOWN_REGEX_UNSPECIFIED KnownRegex = 0 + // HTTP header name as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2). + KnownRegex_KNOWN_REGEX_HTTP_HEADER_NAME KnownRegex = 1 + // HTTP header value as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.4). + KnownRegex_KNOWN_REGEX_HTTP_HEADER_VALUE KnownRegex = 2 +) + +// Enum value maps for KnownRegex. +var ( + KnownRegex_name = map[int32]string{ + 0: "KNOWN_REGEX_UNSPECIFIED", + 1: "KNOWN_REGEX_HTTP_HEADER_NAME", + 2: "KNOWN_REGEX_HTTP_HEADER_VALUE", + } + KnownRegex_value = map[string]int32{ + "KNOWN_REGEX_UNSPECIFIED": 0, + "KNOWN_REGEX_HTTP_HEADER_NAME": 1, + "KNOWN_REGEX_HTTP_HEADER_VALUE": 2, + } +) + +func (x KnownRegex) Enum() *KnownRegex { + p := new(KnownRegex) + *p = x + return p +} + +func (x KnownRegex) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (KnownRegex) Descriptor() protoreflect.EnumDescriptor { + return file_buf_validate_validate_proto_enumTypes[1].Descriptor() +} + +func (KnownRegex) Type() protoreflect.EnumType { + return &file_buf_validate_validate_proto_enumTypes[1] +} + +func (x KnownRegex) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// `Rule` represents a validation rule written in the Common Expression +// Language (CEL) syntax. Each Rule includes a unique identifier, an +// optional error message, and the CEL expression to evaluate. For more +// information, [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). +// +// ```proto +// +// message Foo { +// option (buf.validate.message).cel = { +// id: "foo.bar" +// message: "bar must be greater than 0" +// expression: "this.bar > 0" +// }; +// int32 bar = 1; +// } +// +// ``` +type Rule struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `id` is a string that serves as a machine-readable name for this Rule. + // It should be unique within its scope, which could be either a message or a field. + Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // `message` is an optional field that provides a human-readable error message + // for this Rule when the CEL expression evaluates to false. If a + // non-empty message is provided, any strings resulting from the CEL + // expression evaluation are ignored. + Message *string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + // `expression` is the actual CEL expression that will be evaluated for + // validation. This string must resolve to either a boolean or a string + // value. If the expression evaluates to false or a non-empty string, the + // validation is considered failed, and the message is rejected. + Expression *string `protobuf:"bytes,3,opt,name=expression" json:"expression,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Rule) Reset() { + *x = Rule{} + mi := &file_buf_validate_validate_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Rule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Rule) ProtoMessage() {} + +func (x *Rule) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Rule) GetId() string { + if x != nil && x.Id != nil { + return *x.Id + } + return "" +} + +func (x *Rule) GetMessage() string { + if x != nil && x.Message != nil { + return *x.Message + } + return "" +} + +func (x *Rule) GetExpression() string { + if x != nil && x.Expression != nil { + return *x.Expression + } + return "" +} + +func (x *Rule) SetId(v string) { + x.Id = &v +} + +func (x *Rule) SetMessage(v string) { + x.Message = &v +} + +func (x *Rule) SetExpression(v string) { + x.Expression = &v +} + +func (x *Rule) HasId() bool { + if x == nil { + return false + } + return x.Id != nil +} + +func (x *Rule) HasMessage() bool { + if x == nil { + return false + } + return x.Message != nil +} + +func (x *Rule) HasExpression() bool { + if x == nil { + return false + } + return x.Expression != nil +} + +func (x *Rule) ClearId() { + x.Id = nil +} + +func (x *Rule) ClearMessage() { + x.Message = nil +} + +func (x *Rule) ClearExpression() { + x.Expression = nil +} + +type Rule_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `id` is a string that serves as a machine-readable name for this Rule. + // It should be unique within its scope, which could be either a message or a field. + Id *string + // `message` is an optional field that provides a human-readable error message + // for this Rule when the CEL expression evaluates to false. If a + // non-empty message is provided, any strings resulting from the CEL + // expression evaluation are ignored. + Message *string + // `expression` is the actual CEL expression that will be evaluated for + // validation. This string must resolve to either a boolean or a string + // value. If the expression evaluates to false or a non-empty string, the + // validation is considered failed, and the message is rejected. + Expression *string +} + +func (b0 Rule_builder) Build() *Rule { + m0 := &Rule{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.Message = b.Message + x.Expression = b.Expression + return m0 +} + +// MessageRules represents validation rules that are applied to the entire message. +// It includes disabling options and a list of Rule messages representing Common Expression Language (CEL) validation rules. +type MessageRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `cel_expression` is a repeated field CEL expressions. Each expression specifies a validation + // rule to be applied to this message. These rules are written in Common Expression Language (CEL) syntax. + // + // This is a simplified form of the `cel` Rule field, where only `expression` is set. This allows for + // simpler syntax when defining CEL Rules where `id` and `message` derived from the `expression`. `id` will + // be same as the `expression`. + // + // For more information, [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `foo` must be greater than 42. + // option (buf.validate.message).cel_expression = "this.foo > 42"; + // // The field `foo` must be less than 84. + // option (buf.validate.message).cel_expression = "this.foo < 84"; + // optional int32 foo = 1; + // } + // + // ``` + CelExpression []string `protobuf:"bytes,5,rep,name=cel_expression,json=celExpression" json:"cel_expression,omitempty"` + // `cel` is a repeated field of type Rule. Each Rule specifies a validation rule to be applied to this message. + // These rules are written in Common Expression Language (CEL) syntax. For more information, + // [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `foo` must be greater than 42. + // option (buf.validate.message).cel = { + // id: "my_message.value", + // message: "value must be greater than 42", + // expression: "this.foo > 42", + // }; + // optional int32 foo = 1; + // } + // + // ``` + Cel []*Rule `protobuf:"bytes,3,rep,name=cel" json:"cel,omitempty"` + // `oneof` is a repeated field of type MessageOneofRule that specifies a list of fields + // of which at most one can be present. If `required` is also specified, then exactly one + // of the specified fields _must_ be present. + // + // This will enforce oneof-like constraints with a few features not provided by + // actual Protobuf oneof declarations: + // 1. Repeated and map fields are allowed in this validation. In a Protobuf oneof, + // only scalar fields are allowed. + // 2. Fields with implicit presence are allowed. In a Protobuf oneof, all member + // fields have explicit presence. This means that, for the purpose of determining + // how many fields are set, explicitly setting such a field to its zero value is + // effectively the same as not setting it at all. + // 3. This will always generate validation errors for a message unmarshalled from + // serialized data that sets more than one field. With a Protobuf oneof, when + // multiple fields are present in the serialized form, earlier values are usually + // silently ignored when unmarshalling, with only the last field being set when + // unmarshalling completes. + // + // Note that adding a field to a `oneof` will also set the IGNORE_IF_ZERO_VALUE on the fields. This means + // only the field that is set will be validated and the unset fields are not validated according to the field rules. + // This behavior can be overridden by setting `ignore` against a field. + // + // ```proto + // + // message MyMessage { + // // Only one of `field1` or `field2` _can_ be present in this message. + // option (buf.validate.message).oneof = { fields: ["field1", "field2"] }; + // // Exactly one of `field3` or `field4` _must_ be present in this message. + // option (buf.validate.message).oneof = { fields: ["field3", "field4"], required: true }; + // string field1 = 1; + // bytes field2 = 2; + // bool field3 = 3; + // int32 field4 = 4; + // } + // + // ``` + Oneof []*MessageOneofRule `protobuf:"bytes,4,rep,name=oneof" json:"oneof,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageRules) Reset() { + *x = MessageRules{} + mi := &file_buf_validate_validate_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageRules) ProtoMessage() {} + +func (x *MessageRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *MessageRules) GetCelExpression() []string { + if x != nil { + return x.CelExpression + } + return nil +} + +func (x *MessageRules) GetCel() []*Rule { + if x != nil { + return x.Cel + } + return nil +} + +func (x *MessageRules) GetOneof() []*MessageOneofRule { + if x != nil { + return x.Oneof + } + return nil +} + +func (x *MessageRules) SetCelExpression(v []string) { + x.CelExpression = v +} + +func (x *MessageRules) SetCel(v []*Rule) { + x.Cel = v +} + +func (x *MessageRules) SetOneof(v []*MessageOneofRule) { + x.Oneof = v +} + +type MessageRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `cel_expression` is a repeated field CEL expressions. Each expression specifies a validation + // rule to be applied to this message. These rules are written in Common Expression Language (CEL) syntax. + // + // This is a simplified form of the `cel` Rule field, where only `expression` is set. This allows for + // simpler syntax when defining CEL Rules where `id` and `message` derived from the `expression`. `id` will + // be same as the `expression`. + // + // For more information, [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `foo` must be greater than 42. + // option (buf.validate.message).cel_expression = "this.foo > 42"; + // // The field `foo` must be less than 84. + // option (buf.validate.message).cel_expression = "this.foo < 84"; + // optional int32 foo = 1; + // } + // + // ``` + CelExpression []string + // `cel` is a repeated field of type Rule. Each Rule specifies a validation rule to be applied to this message. + // These rules are written in Common Expression Language (CEL) syntax. For more information, + // [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `foo` must be greater than 42. + // option (buf.validate.message).cel = { + // id: "my_message.value", + // message: "value must be greater than 42", + // expression: "this.foo > 42", + // }; + // optional int32 foo = 1; + // } + // + // ``` + Cel []*Rule + // `oneof` is a repeated field of type MessageOneofRule that specifies a list of fields + // of which at most one can be present. If `required` is also specified, then exactly one + // of the specified fields _must_ be present. + // + // This will enforce oneof-like constraints with a few features not provided by + // actual Protobuf oneof declarations: + // 1. Repeated and map fields are allowed in this validation. In a Protobuf oneof, + // only scalar fields are allowed. + // 2. Fields with implicit presence are allowed. In a Protobuf oneof, all member + // fields have explicit presence. This means that, for the purpose of determining + // how many fields are set, explicitly setting such a field to its zero value is + // effectively the same as not setting it at all. + // 3. This will always generate validation errors for a message unmarshalled from + // serialized data that sets more than one field. With a Protobuf oneof, when + // multiple fields are present in the serialized form, earlier values are usually + // silently ignored when unmarshalling, with only the last field being set when + // unmarshalling completes. + // + // Note that adding a field to a `oneof` will also set the IGNORE_IF_ZERO_VALUE on the fields. This means + // only the field that is set will be validated and the unset fields are not validated according to the field rules. + // This behavior can be overridden by setting `ignore` against a field. + // + // ```proto + // + // message MyMessage { + // // Only one of `field1` or `field2` _can_ be present in this message. + // option (buf.validate.message).oneof = { fields: ["field1", "field2"] }; + // // Exactly one of `field3` or `field4` _must_ be present in this message. + // option (buf.validate.message).oneof = { fields: ["field3", "field4"], required: true }; + // string field1 = 1; + // bytes field2 = 2; + // bool field3 = 3; + // int32 field4 = 4; + // } + // + // ``` + Oneof []*MessageOneofRule +} + +func (b0 MessageRules_builder) Build() *MessageRules { + m0 := &MessageRules{} + b, x := &b0, m0 + _, _ = b, x + x.CelExpression = b.CelExpression + x.Cel = b.Cel + x.Oneof = b.Oneof + return m0 +} + +type MessageOneofRule struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // A list of field names to include in the oneof. All field names must be + // defined in the message. At least one field must be specified, and + // duplicates are not permitted. + Fields []string `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty"` + // If true, one of the fields specified _must_ be set. + Required *bool `protobuf:"varint,2,opt,name=required" json:"required,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageOneofRule) Reset() { + *x = MessageOneofRule{} + mi := &file_buf_validate_validate_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageOneofRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageOneofRule) ProtoMessage() {} + +func (x *MessageOneofRule) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *MessageOneofRule) GetFields() []string { + if x != nil { + return x.Fields + } + return nil +} + +func (x *MessageOneofRule) GetRequired() bool { + if x != nil && x.Required != nil { + return *x.Required + } + return false +} + +func (x *MessageOneofRule) SetFields(v []string) { + x.Fields = v +} + +func (x *MessageOneofRule) SetRequired(v bool) { + x.Required = &v +} + +func (x *MessageOneofRule) HasRequired() bool { + if x == nil { + return false + } + return x.Required != nil +} + +func (x *MessageOneofRule) ClearRequired() { + x.Required = nil +} + +type MessageOneofRule_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A list of field names to include in the oneof. All field names must be + // defined in the message. At least one field must be specified, and + // duplicates are not permitted. + Fields []string + // If true, one of the fields specified _must_ be set. + Required *bool +} + +func (b0 MessageOneofRule_builder) Build() *MessageOneofRule { + m0 := &MessageOneofRule{} + b, x := &b0, m0 + _, _ = b, x + x.Fields = b.Fields + x.Required = b.Required + return m0 +} + +// The `OneofRules` message type enables you to manage rules for +// oneof fields in your protobuf messages. +type OneofRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // If `required` is true, exactly one field of the oneof must be set. A + // validation error is returned if no fields in the oneof are set. Further rules + // should be placed on the fields themselves to ensure they are valid values, + // such as `min_len` or `gt`. + // + // ```proto + // + // message MyMessage { + // oneof value { + // // Either `a` or `b` must be set. If `a` is set, it must also be + // // non-empty; whereas if `b` is set, it can still be an empty string. + // option (buf.validate.oneof).required = true; + // string a = 1 [(buf.validate.field).string.min_len = 1]; + // string b = 2; + // } + // } + // + // ``` + Required *bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *OneofRules) Reset() { + *x = OneofRules{} + mi := &file_buf_validate_validate_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *OneofRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofRules) ProtoMessage() {} + +func (x *OneofRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *OneofRules) GetRequired() bool { + if x != nil && x.Required != nil { + return *x.Required + } + return false +} + +func (x *OneofRules) SetRequired(v bool) { + x.Required = &v +} + +func (x *OneofRules) HasRequired() bool { + if x == nil { + return false + } + return x.Required != nil +} + +func (x *OneofRules) ClearRequired() { + x.Required = nil +} + +type OneofRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // If `required` is true, exactly one field of the oneof must be set. A + // validation error is returned if no fields in the oneof are set. Further rules + // should be placed on the fields themselves to ensure they are valid values, + // such as `min_len` or `gt`. + // + // ```proto + // + // message MyMessage { + // oneof value { + // // Either `a` or `b` must be set. If `a` is set, it must also be + // // non-empty; whereas if `b` is set, it can still be an empty string. + // option (buf.validate.oneof).required = true; + // string a = 1 [(buf.validate.field).string.min_len = 1]; + // string b = 2; + // } + // } + // + // ``` + Required *bool +} + +func (b0 OneofRules_builder) Build() *OneofRules { + m0 := &OneofRules{} + b, x := &b0, m0 + _, _ = b, x + x.Required = b.Required + return m0 +} + +// FieldRules encapsulates the rules for each type of field. Depending on +// the field, the correct set should be used to ensure proper validations. +type FieldRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `cel_expression` is a repeated field CEL expressions. Each expression specifies a validation + // rule to be applied to this message. These rules are written in Common Expression Language (CEL) syntax. + // + // This is a simplified form of the `cel` Rule field, where only `expression` is set. This allows for + // simpler syntax when defining CEL Rules where `id` and `message` derived from the `expression`. `id` will + // be same as the `expression`. + // + // For more information, [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `value` must be greater than 42. + // optional int32 value = 1 [(buf.validate.field).cel_expression = "this > 42"]; + // } + // + // ``` + CelExpression []string `protobuf:"bytes,29,rep,name=cel_expression,json=celExpression" json:"cel_expression,omitempty"` + // `cel` is a repeated field used to represent a textual expression + // in the Common Expression Language (CEL) syntax. For more information, + // [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `value` must be greater than 42. + // optional int32 value = 1 [(buf.validate.field).cel = { + // id: "my_message.value", + // message: "value must be greater than 42", + // expression: "this > 42", + // }]; + // } + // + // ``` + Cel []*Rule `protobuf:"bytes,23,rep,name=cel" json:"cel,omitempty"` + // If `required` is true, the field must be set. A validation error is returned + // if the field is not set. + // + // ```proto + // syntax="proto3"; + // + // message FieldsWithPresence { + // // Requires any string to be set, including the empty string. + // optional string link = 1 [ + // (buf.validate.field).required = true + // ]; + // // Requires true or false to be set. + // optional bool disabled = 2 [ + // (buf.validate.field).required = true + // ]; + // // Requires a message to be set, including the empty message. + // SomeMessage msg = 4 [ + // (buf.validate.field).required = true + // ]; + // } + // + // ``` + // + // All fields in the example above track presence. By default, Protovalidate + // ignores rules on those fields if no value is set. `required` ensures that + // the fields are set and valid. + // + // Fields that don't track presence are always validated by Protovalidate, + // whether they are set or not. It is not necessary to add `required`. It + // can be added to indicate that the field cannot be the zero value. + // + // ```proto + // syntax="proto3"; + // + // message FieldsWithoutPresence { + // // `string.email` always applies, even to an empty string. + // string link = 1 [ + // (buf.validate.field).string.email = true + // ]; + // // `repeated.min_items` always applies, even to an empty list. + // repeated string labels = 2 [ + // (buf.validate.field).repeated.min_items = 1 + // ]; + // // `required`, for fields that don't track presence, indicates + // // the value of the field can't be the zero value. + // int32 zero_value_not_allowed = 3 [ + // (buf.validate.field).required = true + // ]; + // } + // + // ``` + // + // To learn which fields track presence, see the + // [Field Presence cheat sheet](https://protobuf.dev/programming-guides/field_presence/#cheat). + // + // Note: While field rules can be applied to repeated items, map keys, and map + // values, the elements are always considered to be set. Consequently, + // specifying `repeated.items.required` is redundant. + Required *bool `protobuf:"varint,25,opt,name=required" json:"required,omitempty"` + // Ignore validation rules on the field if its value matches the specified + // criteria. See the `Ignore` enum for details. + // + // ```proto + // + // message UpdateRequest { + // // The uri rule only applies if the field is not an empty string. + // string url = 1 [ + // (buf.validate.field).ignore = IGNORE_IF_ZERO_VALUE, + // (buf.validate.field).string.uri = true + // ]; + // } + // + // ``` + Ignore *Ignore `protobuf:"varint,27,opt,name=ignore,enum=buf.validate.Ignore" json:"ignore,omitempty"` + // Types that are valid to be assigned to Type: + // + // *FieldRules_Float + // *FieldRules_Double + // *FieldRules_Int32 + // *FieldRules_Int64 + // *FieldRules_Uint32 + // *FieldRules_Uint64 + // *FieldRules_Sint32 + // *FieldRules_Sint64 + // *FieldRules_Fixed32 + // *FieldRules_Fixed64 + // *FieldRules_Sfixed32 + // *FieldRules_Sfixed64 + // *FieldRules_Bool + // *FieldRules_String_ + // *FieldRules_Bytes + // *FieldRules_Enum + // *FieldRules_Repeated + // *FieldRules_Map + // *FieldRules_Any + // *FieldRules_Duration + // *FieldRules_FieldMask + // *FieldRules_Timestamp + Type isFieldRules_Type `protobuf_oneof:"type"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FieldRules) Reset() { + *x = FieldRules{} + mi := &file_buf_validate_validate_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FieldRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldRules) ProtoMessage() {} + +func (x *FieldRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *FieldRules) GetCelExpression() []string { + if x != nil { + return x.CelExpression + } + return nil +} + +func (x *FieldRules) GetCel() []*Rule { + if x != nil { + return x.Cel + } + return nil +} + +func (x *FieldRules) GetRequired() bool { + if x != nil && x.Required != nil { + return *x.Required + } + return false +} + +func (x *FieldRules) GetIgnore() Ignore { + if x != nil && x.Ignore != nil { + return *x.Ignore + } + return Ignore_IGNORE_UNSPECIFIED +} + +func (x *FieldRules) GetType() isFieldRules_Type { + if x != nil { + return x.Type + } + return nil +} + +func (x *FieldRules) GetFloat() *FloatRules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Float); ok { + return x.Float + } + } + return nil +} + +func (x *FieldRules) GetDouble() *DoubleRules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Double); ok { + return x.Double + } + } + return nil +} + +func (x *FieldRules) GetInt32() *Int32Rules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Int32); ok { + return x.Int32 + } + } + return nil +} + +func (x *FieldRules) GetInt64() *Int64Rules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Int64); ok { + return x.Int64 + } + } + return nil +} + +func (x *FieldRules) GetUint32() *UInt32Rules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Uint32); ok { + return x.Uint32 + } + } + return nil +} + +func (x *FieldRules) GetUint64() *UInt64Rules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Uint64); ok { + return x.Uint64 + } + } + return nil +} + +func (x *FieldRules) GetSint32() *SInt32Rules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Sint32); ok { + return x.Sint32 + } + } + return nil +} + +func (x *FieldRules) GetSint64() *SInt64Rules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Sint64); ok { + return x.Sint64 + } + } + return nil +} + +func (x *FieldRules) GetFixed32() *Fixed32Rules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Fixed32); ok { + return x.Fixed32 + } + } + return nil +} + +func (x *FieldRules) GetFixed64() *Fixed64Rules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Fixed64); ok { + return x.Fixed64 + } + } + return nil +} + +func (x *FieldRules) GetSfixed32() *SFixed32Rules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Sfixed32); ok { + return x.Sfixed32 + } + } + return nil +} + +func (x *FieldRules) GetSfixed64() *SFixed64Rules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Sfixed64); ok { + return x.Sfixed64 + } + } + return nil +} + +func (x *FieldRules) GetBool() *BoolRules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Bool); ok { + return x.Bool + } + } + return nil +} + +func (x *FieldRules) GetString() *StringRules { + if x != nil { + if x, ok := x.Type.(*FieldRules_String_); ok { + return x.String_ + } + } + return nil +} + +// Deprecated: Use GetString instead. +func (x *FieldRules) GetString_() *StringRules { + return x.GetString() +} + +func (x *FieldRules) GetBytes() *BytesRules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Bytes); ok { + return x.Bytes + } + } + return nil +} + +func (x *FieldRules) GetEnum() *EnumRules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Enum); ok { + return x.Enum + } + } + return nil +} + +func (x *FieldRules) GetRepeated() *RepeatedRules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Repeated); ok { + return x.Repeated + } + } + return nil +} + +func (x *FieldRules) GetMap() *MapRules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Map); ok { + return x.Map + } + } + return nil +} + +func (x *FieldRules) GetAny() *AnyRules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Any); ok { + return x.Any + } + } + return nil +} + +func (x *FieldRules) GetDuration() *DurationRules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Duration); ok { + return x.Duration + } + } + return nil +} + +func (x *FieldRules) GetFieldMask() *FieldMaskRules { + if x != nil { + if x, ok := x.Type.(*FieldRules_FieldMask); ok { + return x.FieldMask + } + } + return nil +} + +func (x *FieldRules) GetTimestamp() *TimestampRules { + if x != nil { + if x, ok := x.Type.(*FieldRules_Timestamp); ok { + return x.Timestamp + } + } + return nil +} + +func (x *FieldRules) SetCelExpression(v []string) { + x.CelExpression = v +} + +func (x *FieldRules) SetCel(v []*Rule) { + x.Cel = v +} + +func (x *FieldRules) SetRequired(v bool) { + x.Required = &v +} + +func (x *FieldRules) SetIgnore(v Ignore) { + x.Ignore = &v +} + +func (x *FieldRules) SetFloat(v *FloatRules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Float{v} +} + +func (x *FieldRules) SetDouble(v *DoubleRules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Double{v} +} + +func (x *FieldRules) SetInt32(v *Int32Rules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Int32{v} +} + +func (x *FieldRules) SetInt64(v *Int64Rules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Int64{v} +} + +func (x *FieldRules) SetUint32(v *UInt32Rules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Uint32{v} +} + +func (x *FieldRules) SetUint64(v *UInt64Rules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Uint64{v} +} + +func (x *FieldRules) SetSint32(v *SInt32Rules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Sint32{v} +} + +func (x *FieldRules) SetSint64(v *SInt64Rules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Sint64{v} +} + +func (x *FieldRules) SetFixed32(v *Fixed32Rules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Fixed32{v} +} + +func (x *FieldRules) SetFixed64(v *Fixed64Rules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Fixed64{v} +} + +func (x *FieldRules) SetSfixed32(v *SFixed32Rules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Sfixed32{v} +} + +func (x *FieldRules) SetSfixed64(v *SFixed64Rules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Sfixed64{v} +} + +func (x *FieldRules) SetBool(v *BoolRules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Bool{v} +} + +func (x *FieldRules) SetString(v *StringRules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_String_{v} +} + +func (x *FieldRules) SetBytes(v *BytesRules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Bytes{v} +} + +func (x *FieldRules) SetEnum(v *EnumRules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Enum{v} +} + +func (x *FieldRules) SetRepeated(v *RepeatedRules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Repeated{v} +} + +func (x *FieldRules) SetMap(v *MapRules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Map{v} +} + +func (x *FieldRules) SetAny(v *AnyRules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Any{v} +} + +func (x *FieldRules) SetDuration(v *DurationRules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Duration{v} +} + +func (x *FieldRules) SetFieldMask(v *FieldMaskRules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_FieldMask{v} +} + +func (x *FieldRules) SetTimestamp(v *TimestampRules) { + if v == nil { + x.Type = nil + return + } + x.Type = &FieldRules_Timestamp{v} +} + +func (x *FieldRules) HasRequired() bool { + if x == nil { + return false + } + return x.Required != nil +} + +func (x *FieldRules) HasIgnore() bool { + if x == nil { + return false + } + return x.Ignore != nil +} + +func (x *FieldRules) HasType() bool { + if x == nil { + return false + } + return x.Type != nil +} + +func (x *FieldRules) HasFloat() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Float) + return ok +} + +func (x *FieldRules) HasDouble() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Double) + return ok +} + +func (x *FieldRules) HasInt32() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Int32) + return ok +} + +func (x *FieldRules) HasInt64() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Int64) + return ok +} + +func (x *FieldRules) HasUint32() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Uint32) + return ok +} + +func (x *FieldRules) HasUint64() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Uint64) + return ok +} + +func (x *FieldRules) HasSint32() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Sint32) + return ok +} + +func (x *FieldRules) HasSint64() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Sint64) + return ok +} + +func (x *FieldRules) HasFixed32() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Fixed32) + return ok +} + +func (x *FieldRules) HasFixed64() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Fixed64) + return ok +} + +func (x *FieldRules) HasSfixed32() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Sfixed32) + return ok +} + +func (x *FieldRules) HasSfixed64() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Sfixed64) + return ok +} + +func (x *FieldRules) HasBool() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Bool) + return ok +} + +func (x *FieldRules) HasString() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_String_) + return ok +} + +func (x *FieldRules) HasBytes() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Bytes) + return ok +} + +func (x *FieldRules) HasEnum() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Enum) + return ok +} + +func (x *FieldRules) HasRepeated() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Repeated) + return ok +} + +func (x *FieldRules) HasMap() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Map) + return ok +} + +func (x *FieldRules) HasAny() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Any) + return ok +} + +func (x *FieldRules) HasDuration() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Duration) + return ok +} + +func (x *FieldRules) HasFieldMask() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_FieldMask) + return ok +} + +func (x *FieldRules) HasTimestamp() bool { + if x == nil { + return false + } + _, ok := x.Type.(*FieldRules_Timestamp) + return ok +} + +func (x *FieldRules) ClearRequired() { + x.Required = nil +} + +func (x *FieldRules) ClearIgnore() { + x.Ignore = nil +} + +func (x *FieldRules) ClearType() { + x.Type = nil +} + +func (x *FieldRules) ClearFloat() { + if _, ok := x.Type.(*FieldRules_Float); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearDouble() { + if _, ok := x.Type.(*FieldRules_Double); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearInt32() { + if _, ok := x.Type.(*FieldRules_Int32); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearInt64() { + if _, ok := x.Type.(*FieldRules_Int64); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearUint32() { + if _, ok := x.Type.(*FieldRules_Uint32); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearUint64() { + if _, ok := x.Type.(*FieldRules_Uint64); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearSint32() { + if _, ok := x.Type.(*FieldRules_Sint32); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearSint64() { + if _, ok := x.Type.(*FieldRules_Sint64); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearFixed32() { + if _, ok := x.Type.(*FieldRules_Fixed32); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearFixed64() { + if _, ok := x.Type.(*FieldRules_Fixed64); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearSfixed32() { + if _, ok := x.Type.(*FieldRules_Sfixed32); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearSfixed64() { + if _, ok := x.Type.(*FieldRules_Sfixed64); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearBool() { + if _, ok := x.Type.(*FieldRules_Bool); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearString() { + if _, ok := x.Type.(*FieldRules_String_); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearBytes() { + if _, ok := x.Type.(*FieldRules_Bytes); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearEnum() { + if _, ok := x.Type.(*FieldRules_Enum); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearRepeated() { + if _, ok := x.Type.(*FieldRules_Repeated); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearMap() { + if _, ok := x.Type.(*FieldRules_Map); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearAny() { + if _, ok := x.Type.(*FieldRules_Any); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearDuration() { + if _, ok := x.Type.(*FieldRules_Duration); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearFieldMask() { + if _, ok := x.Type.(*FieldRules_FieldMask); ok { + x.Type = nil + } +} + +func (x *FieldRules) ClearTimestamp() { + if _, ok := x.Type.(*FieldRules_Timestamp); ok { + x.Type = nil + } +} + +const FieldRules_Type_not_set_case case_FieldRules_Type = 0 +const FieldRules_Float_case case_FieldRules_Type = 1 +const FieldRules_Double_case case_FieldRules_Type = 2 +const FieldRules_Int32_case case_FieldRules_Type = 3 +const FieldRules_Int64_case case_FieldRules_Type = 4 +const FieldRules_Uint32_case case_FieldRules_Type = 5 +const FieldRules_Uint64_case case_FieldRules_Type = 6 +const FieldRules_Sint32_case case_FieldRules_Type = 7 +const FieldRules_Sint64_case case_FieldRules_Type = 8 +const FieldRules_Fixed32_case case_FieldRules_Type = 9 +const FieldRules_Fixed64_case case_FieldRules_Type = 10 +const FieldRules_Sfixed32_case case_FieldRules_Type = 11 +const FieldRules_Sfixed64_case case_FieldRules_Type = 12 +const FieldRules_Bool_case case_FieldRules_Type = 13 +const FieldRules_String__case case_FieldRules_Type = 14 +const FieldRules_Bytes_case case_FieldRules_Type = 15 +const FieldRules_Enum_case case_FieldRules_Type = 16 +const FieldRules_Repeated_case case_FieldRules_Type = 18 +const FieldRules_Map_case case_FieldRules_Type = 19 +const FieldRules_Any_case case_FieldRules_Type = 20 +const FieldRules_Duration_case case_FieldRules_Type = 21 +const FieldRules_FieldMask_case case_FieldRules_Type = 28 +const FieldRules_Timestamp_case case_FieldRules_Type = 22 + +func (x *FieldRules) WhichType() case_FieldRules_Type { + if x == nil { + return FieldRules_Type_not_set_case + } + switch x.Type.(type) { + case *FieldRules_Float: + return FieldRules_Float_case + case *FieldRules_Double: + return FieldRules_Double_case + case *FieldRules_Int32: + return FieldRules_Int32_case + case *FieldRules_Int64: + return FieldRules_Int64_case + case *FieldRules_Uint32: + return FieldRules_Uint32_case + case *FieldRules_Uint64: + return FieldRules_Uint64_case + case *FieldRules_Sint32: + return FieldRules_Sint32_case + case *FieldRules_Sint64: + return FieldRules_Sint64_case + case *FieldRules_Fixed32: + return FieldRules_Fixed32_case + case *FieldRules_Fixed64: + return FieldRules_Fixed64_case + case *FieldRules_Sfixed32: + return FieldRules_Sfixed32_case + case *FieldRules_Sfixed64: + return FieldRules_Sfixed64_case + case *FieldRules_Bool: + return FieldRules_Bool_case + case *FieldRules_String_: + return FieldRules_String__case + case *FieldRules_Bytes: + return FieldRules_Bytes_case + case *FieldRules_Enum: + return FieldRules_Enum_case + case *FieldRules_Repeated: + return FieldRules_Repeated_case + case *FieldRules_Map: + return FieldRules_Map_case + case *FieldRules_Any: + return FieldRules_Any_case + case *FieldRules_Duration: + return FieldRules_Duration_case + case *FieldRules_FieldMask: + return FieldRules_FieldMask_case + case *FieldRules_Timestamp: + return FieldRules_Timestamp_case + default: + return FieldRules_Type_not_set_case + } +} + +type FieldRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `cel_expression` is a repeated field CEL expressions. Each expression specifies a validation + // rule to be applied to this message. These rules are written in Common Expression Language (CEL) syntax. + // + // This is a simplified form of the `cel` Rule field, where only `expression` is set. This allows for + // simpler syntax when defining CEL Rules where `id` and `message` derived from the `expression`. `id` will + // be same as the `expression`. + // + // For more information, [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `value` must be greater than 42. + // optional int32 value = 1 [(buf.validate.field).cel_expression = "this > 42"]; + // } + // + // ``` + CelExpression []string + // `cel` is a repeated field used to represent a textual expression + // in the Common Expression Language (CEL) syntax. For more information, + // [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `value` must be greater than 42. + // optional int32 value = 1 [(buf.validate.field).cel = { + // id: "my_message.value", + // message: "value must be greater than 42", + // expression: "this > 42", + // }]; + // } + // + // ``` + Cel []*Rule + // If `required` is true, the field must be set. A validation error is returned + // if the field is not set. + // + // ```proto + // syntax="proto3"; + // + // message FieldsWithPresence { + // // Requires any string to be set, including the empty string. + // optional string link = 1 [ + // (buf.validate.field).required = true + // ]; + // // Requires true or false to be set. + // optional bool disabled = 2 [ + // (buf.validate.field).required = true + // ]; + // // Requires a message to be set, including the empty message. + // SomeMessage msg = 4 [ + // (buf.validate.field).required = true + // ]; + // } + // + // ``` + // + // All fields in the example above track presence. By default, Protovalidate + // ignores rules on those fields if no value is set. `required` ensures that + // the fields are set and valid. + // + // Fields that don't track presence are always validated by Protovalidate, + // whether they are set or not. It is not necessary to add `required`. It + // can be added to indicate that the field cannot be the zero value. + // + // ```proto + // syntax="proto3"; + // + // message FieldsWithoutPresence { + // // `string.email` always applies, even to an empty string. + // string link = 1 [ + // (buf.validate.field).string.email = true + // ]; + // // `repeated.min_items` always applies, even to an empty list. + // repeated string labels = 2 [ + // (buf.validate.field).repeated.min_items = 1 + // ]; + // // `required`, for fields that don't track presence, indicates + // // the value of the field can't be the zero value. + // int32 zero_value_not_allowed = 3 [ + // (buf.validate.field).required = true + // ]; + // } + // + // ``` + // + // To learn which fields track presence, see the + // [Field Presence cheat sheet](https://protobuf.dev/programming-guides/field_presence/#cheat). + // + // Note: While field rules can be applied to repeated items, map keys, and map + // values, the elements are always considered to be set. Consequently, + // specifying `repeated.items.required` is redundant. + Required *bool + // Ignore validation rules on the field if its value matches the specified + // criteria. See the `Ignore` enum for details. + // + // ```proto + // + // message UpdateRequest { + // // The uri rule only applies if the field is not an empty string. + // string url = 1 [ + // (buf.validate.field).ignore = IGNORE_IF_ZERO_VALUE, + // (buf.validate.field).string.uri = true + // ]; + // } + // + // ``` + Ignore *Ignore + // Fields of oneof Type: + // Scalar Field Types + Float *FloatRules + Double *DoubleRules + Int32 *Int32Rules + Int64 *Int64Rules + Uint32 *UInt32Rules + Uint64 *UInt64Rules + Sint32 *SInt32Rules + Sint64 *SInt64Rules + Fixed32 *Fixed32Rules + Fixed64 *Fixed64Rules + Sfixed32 *SFixed32Rules + Sfixed64 *SFixed64Rules + Bool *BoolRules + String *StringRules + Bytes *BytesRules + // Complex Field Types + Enum *EnumRules + Repeated *RepeatedRules + Map *MapRules + // Well-Known Field Types + Any *AnyRules + Duration *DurationRules + FieldMask *FieldMaskRules + Timestamp *TimestampRules + // -- end of Type +} + +func (b0 FieldRules_builder) Build() *FieldRules { + m0 := &FieldRules{} + b, x := &b0, m0 + _, _ = b, x + x.CelExpression = b.CelExpression + x.Cel = b.Cel + x.Required = b.Required + x.Ignore = b.Ignore + if b.Float != nil { + x.Type = &FieldRules_Float{b.Float} + } + if b.Double != nil { + x.Type = &FieldRules_Double{b.Double} + } + if b.Int32 != nil { + x.Type = &FieldRules_Int32{b.Int32} + } + if b.Int64 != nil { + x.Type = &FieldRules_Int64{b.Int64} + } + if b.Uint32 != nil { + x.Type = &FieldRules_Uint32{b.Uint32} + } + if b.Uint64 != nil { + x.Type = &FieldRules_Uint64{b.Uint64} + } + if b.Sint32 != nil { + x.Type = &FieldRules_Sint32{b.Sint32} + } + if b.Sint64 != nil { + x.Type = &FieldRules_Sint64{b.Sint64} + } + if b.Fixed32 != nil { + x.Type = &FieldRules_Fixed32{b.Fixed32} + } + if b.Fixed64 != nil { + x.Type = &FieldRules_Fixed64{b.Fixed64} + } + if b.Sfixed32 != nil { + x.Type = &FieldRules_Sfixed32{b.Sfixed32} + } + if b.Sfixed64 != nil { + x.Type = &FieldRules_Sfixed64{b.Sfixed64} + } + if b.Bool != nil { + x.Type = &FieldRules_Bool{b.Bool} + } + if b.String != nil { + x.Type = &FieldRules_String_{b.String} + } + if b.Bytes != nil { + x.Type = &FieldRules_Bytes{b.Bytes} + } + if b.Enum != nil { + x.Type = &FieldRules_Enum{b.Enum} + } + if b.Repeated != nil { + x.Type = &FieldRules_Repeated{b.Repeated} + } + if b.Map != nil { + x.Type = &FieldRules_Map{b.Map} + } + if b.Any != nil { + x.Type = &FieldRules_Any{b.Any} + } + if b.Duration != nil { + x.Type = &FieldRules_Duration{b.Duration} + } + if b.FieldMask != nil { + x.Type = &FieldRules_FieldMask{b.FieldMask} + } + if b.Timestamp != nil { + x.Type = &FieldRules_Timestamp{b.Timestamp} + } + return m0 +} + +type case_FieldRules_Type protoreflect.FieldNumber + +func (x case_FieldRules_Type) String() string { + md := file_buf_validate_validate_proto_msgTypes[4].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isFieldRules_Type interface { + isFieldRules_Type() +} + +type FieldRules_Float struct { + // Scalar Field Types + Float *FloatRules `protobuf:"bytes,1,opt,name=float,oneof"` +} + +type FieldRules_Double struct { + Double *DoubleRules `protobuf:"bytes,2,opt,name=double,oneof"` +} + +type FieldRules_Int32 struct { + Int32 *Int32Rules `protobuf:"bytes,3,opt,name=int32,oneof"` +} + +type FieldRules_Int64 struct { + Int64 *Int64Rules `protobuf:"bytes,4,opt,name=int64,oneof"` +} + +type FieldRules_Uint32 struct { + Uint32 *UInt32Rules `protobuf:"bytes,5,opt,name=uint32,oneof"` +} + +type FieldRules_Uint64 struct { + Uint64 *UInt64Rules `protobuf:"bytes,6,opt,name=uint64,oneof"` +} + +type FieldRules_Sint32 struct { + Sint32 *SInt32Rules `protobuf:"bytes,7,opt,name=sint32,oneof"` +} + +type FieldRules_Sint64 struct { + Sint64 *SInt64Rules `protobuf:"bytes,8,opt,name=sint64,oneof"` +} + +type FieldRules_Fixed32 struct { + Fixed32 *Fixed32Rules `protobuf:"bytes,9,opt,name=fixed32,oneof"` +} + +type FieldRules_Fixed64 struct { + Fixed64 *Fixed64Rules `protobuf:"bytes,10,opt,name=fixed64,oneof"` +} + +type FieldRules_Sfixed32 struct { + Sfixed32 *SFixed32Rules `protobuf:"bytes,11,opt,name=sfixed32,oneof"` +} + +type FieldRules_Sfixed64 struct { + Sfixed64 *SFixed64Rules `protobuf:"bytes,12,opt,name=sfixed64,oneof"` +} + +type FieldRules_Bool struct { + Bool *BoolRules `protobuf:"bytes,13,opt,name=bool,oneof"` +} + +type FieldRules_String_ struct { + String_ *StringRules `protobuf:"bytes,14,opt,name=string,oneof"` +} + +type FieldRules_Bytes struct { + Bytes *BytesRules `protobuf:"bytes,15,opt,name=bytes,oneof"` +} + +type FieldRules_Enum struct { + // Complex Field Types + Enum *EnumRules `protobuf:"bytes,16,opt,name=enum,oneof"` +} + +type FieldRules_Repeated struct { + Repeated *RepeatedRules `protobuf:"bytes,18,opt,name=repeated,oneof"` +} + +type FieldRules_Map struct { + Map *MapRules `protobuf:"bytes,19,opt,name=map,oneof"` +} + +type FieldRules_Any struct { + // Well-Known Field Types + Any *AnyRules `protobuf:"bytes,20,opt,name=any,oneof"` +} + +type FieldRules_Duration struct { + Duration *DurationRules `protobuf:"bytes,21,opt,name=duration,oneof"` +} + +type FieldRules_FieldMask struct { + FieldMask *FieldMaskRules `protobuf:"bytes,28,opt,name=field_mask,json=fieldMask,oneof"` +} + +type FieldRules_Timestamp struct { + Timestamp *TimestampRules `protobuf:"bytes,22,opt,name=timestamp,oneof"` +} + +func (*FieldRules_Float) isFieldRules_Type() {} + +func (*FieldRules_Double) isFieldRules_Type() {} + +func (*FieldRules_Int32) isFieldRules_Type() {} + +func (*FieldRules_Int64) isFieldRules_Type() {} + +func (*FieldRules_Uint32) isFieldRules_Type() {} + +func (*FieldRules_Uint64) isFieldRules_Type() {} + +func (*FieldRules_Sint32) isFieldRules_Type() {} + +func (*FieldRules_Sint64) isFieldRules_Type() {} + +func (*FieldRules_Fixed32) isFieldRules_Type() {} + +func (*FieldRules_Fixed64) isFieldRules_Type() {} + +func (*FieldRules_Sfixed32) isFieldRules_Type() {} + +func (*FieldRules_Sfixed64) isFieldRules_Type() {} + +func (*FieldRules_Bool) isFieldRules_Type() {} + +func (*FieldRules_String_) isFieldRules_Type() {} + +func (*FieldRules_Bytes) isFieldRules_Type() {} + +func (*FieldRules_Enum) isFieldRules_Type() {} + +func (*FieldRules_Repeated) isFieldRules_Type() {} + +func (*FieldRules_Map) isFieldRules_Type() {} + +func (*FieldRules_Any) isFieldRules_Type() {} + +func (*FieldRules_Duration) isFieldRules_Type() {} + +func (*FieldRules_FieldMask) isFieldRules_Type() {} + +func (*FieldRules_Timestamp) isFieldRules_Type() {} + +// PredefinedRules are custom rules that can be re-used with +// multiple fields. +type PredefinedRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `cel` is a repeated field used to represent a textual expression + // in the Common Expression Language (CEL) syntax. For more information, + // [see our documentation](https://buf.build/docs/protovalidate/schemas/predefined-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `value` must be greater than 42. + // optional int32 value = 1 [(buf.validate.predefined).cel = { + // id: "my_message.value", + // message: "value must be greater than 42", + // expression: "this > 42", + // }]; + // } + // + // ``` + Cel []*Rule `protobuf:"bytes,1,rep,name=cel" json:"cel,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PredefinedRules) Reset() { + *x = PredefinedRules{} + mi := &file_buf_validate_validate_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PredefinedRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PredefinedRules) ProtoMessage() {} + +func (x *PredefinedRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *PredefinedRules) GetCel() []*Rule { + if x != nil { + return x.Cel + } + return nil +} + +func (x *PredefinedRules) SetCel(v []*Rule) { + x.Cel = v +} + +type PredefinedRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `cel` is a repeated field used to represent a textual expression + // in the Common Expression Language (CEL) syntax. For more information, + // [see our documentation](https://buf.build/docs/protovalidate/schemas/predefined-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `value` must be greater than 42. + // optional int32 value = 1 [(buf.validate.predefined).cel = { + // id: "my_message.value", + // message: "value must be greater than 42", + // expression: "this > 42", + // }]; + // } + // + // ``` + Cel []*Rule +} + +func (b0 PredefinedRules_builder) Build() *PredefinedRules { + m0 := &PredefinedRules{} + b, x := &b0, m0 + _, _ = b, x + x.Cel = b.Cel + return m0 +} + +// FloatRules describes the rules applied to `float` values. These +// rules may also be applied to the `google.protobuf.FloatValue` Well-Known-Type. +type FloatRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must equal 42.0 + // float value = 1 [(buf.validate.field).float.const = 42.0]; + // } + // + // ``` + Const *float32 `protobuf:"fixed32,1,opt,name=const" json:"const,omitempty"` + // Types that are valid to be assigned to LessThan: + // + // *FloatRules_Lt + // *FloatRules_Lte + LessThan isFloatRules_LessThan `protobuf_oneof:"less_than"` + // Types that are valid to be assigned to GreaterThan: + // + // *FloatRules_Gt + // *FloatRules_Gte + GreaterThan isFloatRules_GreaterThan `protobuf_oneof:"greater_than"` + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message + // is generated. + // + // ```proto + // + // message MyFloat { + // // value must be in list [1.0, 2.0, 3.0] + // float value = 1 [(buf.validate.field).float = { in: [1.0, 2.0, 3.0] }]; + // } + // + // ``` + In []float32 `protobuf:"fixed32,6,rep,name=in" json:"in,omitempty"` + // `in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyFloat { + // // value must not be in list [1.0, 2.0, 3.0] + // float value = 1 [(buf.validate.field).float = { not_in: [1.0, 2.0, 3.0] }]; + // } + // + // ``` + NotIn []float32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `finite` requires the field value to be finite. If the field value is + // infinite or NaN, an error message is generated. + Finite *bool `protobuf:"varint,8,opt,name=finite" json:"finite,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyFloat { + // float value = 1 [ + // (buf.validate.field).float.example = 1.0, + // (buf.validate.field).float.example = inf + // ]; + // } + // + // ``` + Example []float32 `protobuf:"fixed32,9,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FloatRules) Reset() { + *x = FloatRules{} + mi := &file_buf_validate_validate_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FloatRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FloatRules) ProtoMessage() {} + +func (x *FloatRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *FloatRules) GetConst() float32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *FloatRules) GetLessThan() isFloatRules_LessThan { + if x != nil { + return x.LessThan + } + return nil +} + +func (x *FloatRules) GetLt() float32 { + if x != nil { + if x, ok := x.LessThan.(*FloatRules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *FloatRules) GetLte() float32 { + if x != nil { + if x, ok := x.LessThan.(*FloatRules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *FloatRules) GetGreaterThan() isFloatRules_GreaterThan { + if x != nil { + return x.GreaterThan + } + return nil +} + +func (x *FloatRules) GetGt() float32 { + if x != nil { + if x, ok := x.GreaterThan.(*FloatRules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *FloatRules) GetGte() float32 { + if x != nil { + if x, ok := x.GreaterThan.(*FloatRules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *FloatRules) GetIn() []float32 { + if x != nil { + return x.In + } + return nil +} + +func (x *FloatRules) GetNotIn() []float32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *FloatRules) GetFinite() bool { + if x != nil && x.Finite != nil { + return *x.Finite + } + return false +} + +func (x *FloatRules) GetExample() []float32 { + if x != nil { + return x.Example + } + return nil +} + +func (x *FloatRules) SetConst(v float32) { + x.Const = &v +} + +func (x *FloatRules) SetLt(v float32) { + x.LessThan = &FloatRules_Lt{v} +} + +func (x *FloatRules) SetLte(v float32) { + x.LessThan = &FloatRules_Lte{v} +} + +func (x *FloatRules) SetGt(v float32) { + x.GreaterThan = &FloatRules_Gt{v} +} + +func (x *FloatRules) SetGte(v float32) { + x.GreaterThan = &FloatRules_Gte{v} +} + +func (x *FloatRules) SetIn(v []float32) { + x.In = v +} + +func (x *FloatRules) SetNotIn(v []float32) { + x.NotIn = v +} + +func (x *FloatRules) SetFinite(v bool) { + x.Finite = &v +} + +func (x *FloatRules) SetExample(v []float32) { + x.Example = v +} + +func (x *FloatRules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *FloatRules) HasLessThan() bool { + if x == nil { + return false + } + return x.LessThan != nil +} + +func (x *FloatRules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*FloatRules_Lt) + return ok +} + +func (x *FloatRules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*FloatRules_Lte) + return ok +} + +func (x *FloatRules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.GreaterThan != nil +} + +func (x *FloatRules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*FloatRules_Gt) + return ok +} + +func (x *FloatRules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*FloatRules_Gte) + return ok +} + +func (x *FloatRules) HasFinite() bool { + if x == nil { + return false + } + return x.Finite != nil +} + +func (x *FloatRules) ClearConst() { + x.Const = nil +} + +func (x *FloatRules) ClearLessThan() { + x.LessThan = nil +} + +func (x *FloatRules) ClearLt() { + if _, ok := x.LessThan.(*FloatRules_Lt); ok { + x.LessThan = nil + } +} + +func (x *FloatRules) ClearLte() { + if _, ok := x.LessThan.(*FloatRules_Lte); ok { + x.LessThan = nil + } +} + +func (x *FloatRules) ClearGreaterThan() { + x.GreaterThan = nil +} + +func (x *FloatRules) ClearGt() { + if _, ok := x.GreaterThan.(*FloatRules_Gt); ok { + x.GreaterThan = nil + } +} + +func (x *FloatRules) ClearGte() { + if _, ok := x.GreaterThan.(*FloatRules_Gte); ok { + x.GreaterThan = nil + } +} + +func (x *FloatRules) ClearFinite() { + x.Finite = nil +} + +const FloatRules_LessThan_not_set_case case_FloatRules_LessThan = 0 +const FloatRules_Lt_case case_FloatRules_LessThan = 2 +const FloatRules_Lte_case case_FloatRules_LessThan = 3 + +func (x *FloatRules) WhichLessThan() case_FloatRules_LessThan { + if x == nil { + return FloatRules_LessThan_not_set_case + } + switch x.LessThan.(type) { + case *FloatRules_Lt: + return FloatRules_Lt_case + case *FloatRules_Lte: + return FloatRules_Lte_case + default: + return FloatRules_LessThan_not_set_case + } +} + +const FloatRules_GreaterThan_not_set_case case_FloatRules_GreaterThan = 0 +const FloatRules_Gt_case case_FloatRules_GreaterThan = 4 +const FloatRules_Gte_case case_FloatRules_GreaterThan = 5 + +func (x *FloatRules) WhichGreaterThan() case_FloatRules_GreaterThan { + if x == nil { + return FloatRules_GreaterThan_not_set_case + } + switch x.GreaterThan.(type) { + case *FloatRules_Gt: + return FloatRules_Gt_case + case *FloatRules_Gte: + return FloatRules_Gte_case + default: + return FloatRules_GreaterThan_not_set_case + } +} + +type FloatRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must equal 42.0 + // float value = 1 [(buf.validate.field).float.const = 42.0]; + // } + // + // ``` + Const *float32 + // Fields of oneof LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be less than 10.0 + // float value = 1 [(buf.validate.field).float.lt = 10.0]; + // } + // + // ``` + Lt *float32 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be less than or equal to 10.0 + // float value = 1 [(buf.validate.field).float.lte = 10.0]; + // } + // + // ``` + Lte *float32 + // -- end of LessThan + // Fields of oneof GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be greater than 5.0 [float.gt] + // float value = 1 [(buf.validate.field).float.gt = 5.0]; + // + // // value must be greater than 5 and less than 10.0 [float.gt_lt] + // float other_value = 2 [(buf.validate.field).float = { gt: 5.0, lt: 10.0 }]; + // + // // value must be greater than 10 or less than 5.0 [float.gt_lt_exclusive] + // float another_value = 3 [(buf.validate.field).float = { gt: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gt *float32 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be greater than or equal to 5.0 [float.gte] + // float value = 1 [(buf.validate.field).float.gte = 5.0]; + // + // // value must be greater than or equal to 5.0 and less than 10.0 [float.gte_lt] + // float other_value = 2 [(buf.validate.field).float = { gte: 5.0, lt: 10.0 }]; + // + // // value must be greater than or equal to 10.0 or less than 5.0 [float.gte_lt_exclusive] + // float another_value = 3 [(buf.validate.field).float = { gte: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gte *float32 + // -- end of GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message + // is generated. + // + // ```proto + // + // message MyFloat { + // // value must be in list [1.0, 2.0, 3.0] + // float value = 1 [(buf.validate.field).float = { in: [1.0, 2.0, 3.0] }]; + // } + // + // ``` + In []float32 + // `in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyFloat { + // // value must not be in list [1.0, 2.0, 3.0] + // float value = 1 [(buf.validate.field).float = { not_in: [1.0, 2.0, 3.0] }]; + // } + // + // ``` + NotIn []float32 + // `finite` requires the field value to be finite. If the field value is + // infinite or NaN, an error message is generated. + Finite *bool + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyFloat { + // float value = 1 [ + // (buf.validate.field).float.example = 1.0, + // (buf.validate.field).float.example = inf + // ]; + // } + // + // ``` + Example []float32 +} + +func (b0 FloatRules_builder) Build() *FloatRules { + m0 := &FloatRules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + if b.Lt != nil { + x.LessThan = &FloatRules_Lt{*b.Lt} + } + if b.Lte != nil { + x.LessThan = &FloatRules_Lte{*b.Lte} + } + if b.Gt != nil { + x.GreaterThan = &FloatRules_Gt{*b.Gt} + } + if b.Gte != nil { + x.GreaterThan = &FloatRules_Gte{*b.Gte} + } + x.In = b.In + x.NotIn = b.NotIn + x.Finite = b.Finite + x.Example = b.Example + return m0 +} + +type case_FloatRules_LessThan protoreflect.FieldNumber + +func (x case_FloatRules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[6].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_FloatRules_GreaterThan protoreflect.FieldNumber + +func (x case_FloatRules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[6].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isFloatRules_LessThan interface { + isFloatRules_LessThan() +} + +type FloatRules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be less than 10.0 + // float value = 1 [(buf.validate.field).float.lt = 10.0]; + // } + // + // ``` + Lt float32 `protobuf:"fixed32,2,opt,name=lt,oneof"` +} + +type FloatRules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be less than or equal to 10.0 + // float value = 1 [(buf.validate.field).float.lte = 10.0]; + // } + // + // ``` + Lte float32 `protobuf:"fixed32,3,opt,name=lte,oneof"` +} + +func (*FloatRules_Lt) isFloatRules_LessThan() {} + +func (*FloatRules_Lte) isFloatRules_LessThan() {} + +type isFloatRules_GreaterThan interface { + isFloatRules_GreaterThan() +} + +type FloatRules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be greater than 5.0 [float.gt] + // float value = 1 [(buf.validate.field).float.gt = 5.0]; + // + // // value must be greater than 5 and less than 10.0 [float.gt_lt] + // float other_value = 2 [(buf.validate.field).float = { gt: 5.0, lt: 10.0 }]; + // + // // value must be greater than 10 or less than 5.0 [float.gt_lt_exclusive] + // float another_value = 3 [(buf.validate.field).float = { gt: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gt float32 `protobuf:"fixed32,4,opt,name=gt,oneof"` +} + +type FloatRules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be greater than or equal to 5.0 [float.gte] + // float value = 1 [(buf.validate.field).float.gte = 5.0]; + // + // // value must be greater than or equal to 5.0 and less than 10.0 [float.gte_lt] + // float other_value = 2 [(buf.validate.field).float = { gte: 5.0, lt: 10.0 }]; + // + // // value must be greater than or equal to 10.0 or less than 5.0 [float.gte_lt_exclusive] + // float another_value = 3 [(buf.validate.field).float = { gte: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gte float32 `protobuf:"fixed32,5,opt,name=gte,oneof"` +} + +func (*FloatRules_Gt) isFloatRules_GreaterThan() {} + +func (*FloatRules_Gte) isFloatRules_GreaterThan() {} + +// DoubleRules describes the rules applied to `double` values. These +// rules may also be applied to the `google.protobuf.DoubleValue` Well-Known-Type. +type DoubleRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must equal 42.0 + // double value = 1 [(buf.validate.field).double.const = 42.0]; + // } + // + // ``` + Const *float64 `protobuf:"fixed64,1,opt,name=const" json:"const,omitempty"` + // Types that are valid to be assigned to LessThan: + // + // *DoubleRules_Lt + // *DoubleRules_Lte + LessThan isDoubleRules_LessThan `protobuf_oneof:"less_than"` + // Types that are valid to be assigned to GreaterThan: + // + // *DoubleRules_Gt + // *DoubleRules_Gte + GreaterThan isDoubleRules_GreaterThan `protobuf_oneof:"greater_than"` + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyDouble { + // // value must be in list [1.0, 2.0, 3.0] + // double value = 1 [(buf.validate.field).double = { in: [1.0, 2.0, 3.0] }]; + // } + // + // ``` + In []float64 `protobuf:"fixed64,6,rep,name=in" json:"in,omitempty"` + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyDouble { + // // value must not be in list [1.0, 2.0, 3.0] + // double value = 1 [(buf.validate.field).double = { not_in: [1.0, 2.0, 3.0] }]; + // } + // + // ``` + NotIn []float64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `finite` requires the field value to be finite. If the field value is + // infinite or NaN, an error message is generated. + Finite *bool `protobuf:"varint,8,opt,name=finite" json:"finite,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyDouble { + // double value = 1 [ + // (buf.validate.field).double.example = 1.0, + // (buf.validate.field).double.example = inf + // ]; + // } + // + // ``` + Example []float64 `protobuf:"fixed64,9,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DoubleRules) Reset() { + *x = DoubleRules{} + mi := &file_buf_validate_validate_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DoubleRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleRules) ProtoMessage() {} + +func (x *DoubleRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *DoubleRules) GetConst() float64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *DoubleRules) GetLessThan() isDoubleRules_LessThan { + if x != nil { + return x.LessThan + } + return nil +} + +func (x *DoubleRules) GetLt() float64 { + if x != nil { + if x, ok := x.LessThan.(*DoubleRules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *DoubleRules) GetLte() float64 { + if x != nil { + if x, ok := x.LessThan.(*DoubleRules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *DoubleRules) GetGreaterThan() isDoubleRules_GreaterThan { + if x != nil { + return x.GreaterThan + } + return nil +} + +func (x *DoubleRules) GetGt() float64 { + if x != nil { + if x, ok := x.GreaterThan.(*DoubleRules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *DoubleRules) GetGte() float64 { + if x != nil { + if x, ok := x.GreaterThan.(*DoubleRules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *DoubleRules) GetIn() []float64 { + if x != nil { + return x.In + } + return nil +} + +func (x *DoubleRules) GetNotIn() []float64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *DoubleRules) GetFinite() bool { + if x != nil && x.Finite != nil { + return *x.Finite + } + return false +} + +func (x *DoubleRules) GetExample() []float64 { + if x != nil { + return x.Example + } + return nil +} + +func (x *DoubleRules) SetConst(v float64) { + x.Const = &v +} + +func (x *DoubleRules) SetLt(v float64) { + x.LessThan = &DoubleRules_Lt{v} +} + +func (x *DoubleRules) SetLte(v float64) { + x.LessThan = &DoubleRules_Lte{v} +} + +func (x *DoubleRules) SetGt(v float64) { + x.GreaterThan = &DoubleRules_Gt{v} +} + +func (x *DoubleRules) SetGte(v float64) { + x.GreaterThan = &DoubleRules_Gte{v} +} + +func (x *DoubleRules) SetIn(v []float64) { + x.In = v +} + +func (x *DoubleRules) SetNotIn(v []float64) { + x.NotIn = v +} + +func (x *DoubleRules) SetFinite(v bool) { + x.Finite = &v +} + +func (x *DoubleRules) SetExample(v []float64) { + x.Example = v +} + +func (x *DoubleRules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *DoubleRules) HasLessThan() bool { + if x == nil { + return false + } + return x.LessThan != nil +} + +func (x *DoubleRules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*DoubleRules_Lt) + return ok +} + +func (x *DoubleRules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*DoubleRules_Lte) + return ok +} + +func (x *DoubleRules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.GreaterThan != nil +} + +func (x *DoubleRules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*DoubleRules_Gt) + return ok +} + +func (x *DoubleRules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*DoubleRules_Gte) + return ok +} + +func (x *DoubleRules) HasFinite() bool { + if x == nil { + return false + } + return x.Finite != nil +} + +func (x *DoubleRules) ClearConst() { + x.Const = nil +} + +func (x *DoubleRules) ClearLessThan() { + x.LessThan = nil +} + +func (x *DoubleRules) ClearLt() { + if _, ok := x.LessThan.(*DoubleRules_Lt); ok { + x.LessThan = nil + } +} + +func (x *DoubleRules) ClearLte() { + if _, ok := x.LessThan.(*DoubleRules_Lte); ok { + x.LessThan = nil + } +} + +func (x *DoubleRules) ClearGreaterThan() { + x.GreaterThan = nil +} + +func (x *DoubleRules) ClearGt() { + if _, ok := x.GreaterThan.(*DoubleRules_Gt); ok { + x.GreaterThan = nil + } +} + +func (x *DoubleRules) ClearGte() { + if _, ok := x.GreaterThan.(*DoubleRules_Gte); ok { + x.GreaterThan = nil + } +} + +func (x *DoubleRules) ClearFinite() { + x.Finite = nil +} + +const DoubleRules_LessThan_not_set_case case_DoubleRules_LessThan = 0 +const DoubleRules_Lt_case case_DoubleRules_LessThan = 2 +const DoubleRules_Lte_case case_DoubleRules_LessThan = 3 + +func (x *DoubleRules) WhichLessThan() case_DoubleRules_LessThan { + if x == nil { + return DoubleRules_LessThan_not_set_case + } + switch x.LessThan.(type) { + case *DoubleRules_Lt: + return DoubleRules_Lt_case + case *DoubleRules_Lte: + return DoubleRules_Lte_case + default: + return DoubleRules_LessThan_not_set_case + } +} + +const DoubleRules_GreaterThan_not_set_case case_DoubleRules_GreaterThan = 0 +const DoubleRules_Gt_case case_DoubleRules_GreaterThan = 4 +const DoubleRules_Gte_case case_DoubleRules_GreaterThan = 5 + +func (x *DoubleRules) WhichGreaterThan() case_DoubleRules_GreaterThan { + if x == nil { + return DoubleRules_GreaterThan_not_set_case + } + switch x.GreaterThan.(type) { + case *DoubleRules_Gt: + return DoubleRules_Gt_case + case *DoubleRules_Gte: + return DoubleRules_Gte_case + default: + return DoubleRules_GreaterThan_not_set_case + } +} + +type DoubleRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must equal 42.0 + // double value = 1 [(buf.validate.field).double.const = 42.0]; + // } + // + // ``` + Const *float64 + // Fields of oneof LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be less than 10.0 + // double value = 1 [(buf.validate.field).double.lt = 10.0]; + // } + // + // ``` + Lt *float64 + // `lte` requires the field value to be less than or equal to the specified value + // (field <= value). If the field value is greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be less than or equal to 10.0 + // double value = 1 [(buf.validate.field).double.lte = 10.0]; + // } + // + // ``` + Lte *float64 + // -- end of LessThan + // Fields of oneof GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or `lte`, + // the range is reversed, and the field value must be outside the specified + // range. If the field value doesn't meet the required conditions, an error + // message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be greater than 5.0 [double.gt] + // double value = 1 [(buf.validate.field).double.gt = 5.0]; + // + // // value must be greater than 5 and less than 10.0 [double.gt_lt] + // double other_value = 2 [(buf.validate.field).double = { gt: 5.0, lt: 10.0 }]; + // + // // value must be greater than 10 or less than 5.0 [double.gt_lt_exclusive] + // double another_value = 3 [(buf.validate.field).double = { gt: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gt *float64 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be greater than or equal to 5.0 [double.gte] + // double value = 1 [(buf.validate.field).double.gte = 5.0]; + // + // // value must be greater than or equal to 5.0 and less than 10.0 [double.gte_lt] + // double other_value = 2 [(buf.validate.field).double = { gte: 5.0, lt: 10.0 }]; + // + // // value must be greater than or equal to 10.0 or less than 5.0 [double.gte_lt_exclusive] + // double another_value = 3 [(buf.validate.field).double = { gte: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gte *float64 + // -- end of GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyDouble { + // // value must be in list [1.0, 2.0, 3.0] + // double value = 1 [(buf.validate.field).double = { in: [1.0, 2.0, 3.0] }]; + // } + // + // ``` + In []float64 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyDouble { + // // value must not be in list [1.0, 2.0, 3.0] + // double value = 1 [(buf.validate.field).double = { not_in: [1.0, 2.0, 3.0] }]; + // } + // + // ``` + NotIn []float64 + // `finite` requires the field value to be finite. If the field value is + // infinite or NaN, an error message is generated. + Finite *bool + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyDouble { + // double value = 1 [ + // (buf.validate.field).double.example = 1.0, + // (buf.validate.field).double.example = inf + // ]; + // } + // + // ``` + Example []float64 +} + +func (b0 DoubleRules_builder) Build() *DoubleRules { + m0 := &DoubleRules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + if b.Lt != nil { + x.LessThan = &DoubleRules_Lt{*b.Lt} + } + if b.Lte != nil { + x.LessThan = &DoubleRules_Lte{*b.Lte} + } + if b.Gt != nil { + x.GreaterThan = &DoubleRules_Gt{*b.Gt} + } + if b.Gte != nil { + x.GreaterThan = &DoubleRules_Gte{*b.Gte} + } + x.In = b.In + x.NotIn = b.NotIn + x.Finite = b.Finite + x.Example = b.Example + return m0 +} + +type case_DoubleRules_LessThan protoreflect.FieldNumber + +func (x case_DoubleRules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[7].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_DoubleRules_GreaterThan protoreflect.FieldNumber + +func (x case_DoubleRules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[7].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isDoubleRules_LessThan interface { + isDoubleRules_LessThan() +} + +type DoubleRules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be less than 10.0 + // double value = 1 [(buf.validate.field).double.lt = 10.0]; + // } + // + // ``` + Lt float64 `protobuf:"fixed64,2,opt,name=lt,oneof"` +} + +type DoubleRules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified value + // (field <= value). If the field value is greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be less than or equal to 10.0 + // double value = 1 [(buf.validate.field).double.lte = 10.0]; + // } + // + // ``` + Lte float64 `protobuf:"fixed64,3,opt,name=lte,oneof"` +} + +func (*DoubleRules_Lt) isDoubleRules_LessThan() {} + +func (*DoubleRules_Lte) isDoubleRules_LessThan() {} + +type isDoubleRules_GreaterThan interface { + isDoubleRules_GreaterThan() +} + +type DoubleRules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or `lte`, + // the range is reversed, and the field value must be outside the specified + // range. If the field value doesn't meet the required conditions, an error + // message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be greater than 5.0 [double.gt] + // double value = 1 [(buf.validate.field).double.gt = 5.0]; + // + // // value must be greater than 5 and less than 10.0 [double.gt_lt] + // double other_value = 2 [(buf.validate.field).double = { gt: 5.0, lt: 10.0 }]; + // + // // value must be greater than 10 or less than 5.0 [double.gt_lt_exclusive] + // double another_value = 3 [(buf.validate.field).double = { gt: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gt float64 `protobuf:"fixed64,4,opt,name=gt,oneof"` +} + +type DoubleRules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be greater than or equal to 5.0 [double.gte] + // double value = 1 [(buf.validate.field).double.gte = 5.0]; + // + // // value must be greater than or equal to 5.0 and less than 10.0 [double.gte_lt] + // double other_value = 2 [(buf.validate.field).double = { gte: 5.0, lt: 10.0 }]; + // + // // value must be greater than or equal to 10.0 or less than 5.0 [double.gte_lt_exclusive] + // double another_value = 3 [(buf.validate.field).double = { gte: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gte float64 `protobuf:"fixed64,5,opt,name=gte,oneof"` +} + +func (*DoubleRules_Gt) isDoubleRules_GreaterThan() {} + +func (*DoubleRules_Gte) isDoubleRules_GreaterThan() {} + +// Int32Rules describes the rules applied to `int32` values. These +// rules may also be applied to the `google.protobuf.Int32Value` Well-Known-Type. +type Int32Rules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must equal 42 + // int32 value = 1 [(buf.validate.field).int32.const = 42]; + // } + // + // ``` + Const *int32 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // Types that are valid to be assigned to LessThan: + // + // *Int32Rules_Lt + // *Int32Rules_Lte + LessThan isInt32Rules_LessThan `protobuf_oneof:"less_than"` + // Types that are valid to be assigned to GreaterThan: + // + // *Int32Rules_Gt + // *Int32Rules_Gte + GreaterThan isInt32Rules_GreaterThan `protobuf_oneof:"greater_than"` + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyInt32 { + // // value must be in list [1, 2, 3] + // int32 value = 1 [(buf.validate.field).int32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int32 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"` + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error message + // is generated. + // + // ```proto + // + // message MyInt32 { + // // value must not be in list [1, 2, 3] + // int32 value = 1 [(buf.validate.field).int32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int32 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyInt32 { + // int32 value = 1 [ + // (buf.validate.field).int32.example = 1, + // (buf.validate.field).int32.example = -10 + // ]; + // } + // + // ``` + Example []int32 `protobuf:"varint,8,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Int32Rules) Reset() { + *x = Int32Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Int32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32Rules) ProtoMessage() {} + +func (x *Int32Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Int32Rules) GetConst() int32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *Int32Rules) GetLessThan() isInt32Rules_LessThan { + if x != nil { + return x.LessThan + } + return nil +} + +func (x *Int32Rules) GetLt() int32 { + if x != nil { + if x, ok := x.LessThan.(*Int32Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *Int32Rules) GetLte() int32 { + if x != nil { + if x, ok := x.LessThan.(*Int32Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *Int32Rules) GetGreaterThan() isInt32Rules_GreaterThan { + if x != nil { + return x.GreaterThan + } + return nil +} + +func (x *Int32Rules) GetGt() int32 { + if x != nil { + if x, ok := x.GreaterThan.(*Int32Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *Int32Rules) GetGte() int32 { + if x != nil { + if x, ok := x.GreaterThan.(*Int32Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *Int32Rules) GetIn() []int32 { + if x != nil { + return x.In + } + return nil +} + +func (x *Int32Rules) GetNotIn() []int32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *Int32Rules) GetExample() []int32 { + if x != nil { + return x.Example + } + return nil +} + +func (x *Int32Rules) SetConst(v int32) { + x.Const = &v +} + +func (x *Int32Rules) SetLt(v int32) { + x.LessThan = &Int32Rules_Lt{v} +} + +func (x *Int32Rules) SetLte(v int32) { + x.LessThan = &Int32Rules_Lte{v} +} + +func (x *Int32Rules) SetGt(v int32) { + x.GreaterThan = &Int32Rules_Gt{v} +} + +func (x *Int32Rules) SetGte(v int32) { + x.GreaterThan = &Int32Rules_Gte{v} +} + +func (x *Int32Rules) SetIn(v []int32) { + x.In = v +} + +func (x *Int32Rules) SetNotIn(v []int32) { + x.NotIn = v +} + +func (x *Int32Rules) SetExample(v []int32) { + x.Example = v +} + +func (x *Int32Rules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *Int32Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.LessThan != nil +} + +func (x *Int32Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*Int32Rules_Lt) + return ok +} + +func (x *Int32Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*Int32Rules_Lte) + return ok +} + +func (x *Int32Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.GreaterThan != nil +} + +func (x *Int32Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*Int32Rules_Gt) + return ok +} + +func (x *Int32Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*Int32Rules_Gte) + return ok +} + +func (x *Int32Rules) ClearConst() { + x.Const = nil +} + +func (x *Int32Rules) ClearLessThan() { + x.LessThan = nil +} + +func (x *Int32Rules) ClearLt() { + if _, ok := x.LessThan.(*Int32Rules_Lt); ok { + x.LessThan = nil + } +} + +func (x *Int32Rules) ClearLte() { + if _, ok := x.LessThan.(*Int32Rules_Lte); ok { + x.LessThan = nil + } +} + +func (x *Int32Rules) ClearGreaterThan() { + x.GreaterThan = nil +} + +func (x *Int32Rules) ClearGt() { + if _, ok := x.GreaterThan.(*Int32Rules_Gt); ok { + x.GreaterThan = nil + } +} + +func (x *Int32Rules) ClearGte() { + if _, ok := x.GreaterThan.(*Int32Rules_Gte); ok { + x.GreaterThan = nil + } +} + +const Int32Rules_LessThan_not_set_case case_Int32Rules_LessThan = 0 +const Int32Rules_Lt_case case_Int32Rules_LessThan = 2 +const Int32Rules_Lte_case case_Int32Rules_LessThan = 3 + +func (x *Int32Rules) WhichLessThan() case_Int32Rules_LessThan { + if x == nil { + return Int32Rules_LessThan_not_set_case + } + switch x.LessThan.(type) { + case *Int32Rules_Lt: + return Int32Rules_Lt_case + case *Int32Rules_Lte: + return Int32Rules_Lte_case + default: + return Int32Rules_LessThan_not_set_case + } +} + +const Int32Rules_GreaterThan_not_set_case case_Int32Rules_GreaterThan = 0 +const Int32Rules_Gt_case case_Int32Rules_GreaterThan = 4 +const Int32Rules_Gte_case case_Int32Rules_GreaterThan = 5 + +func (x *Int32Rules) WhichGreaterThan() case_Int32Rules_GreaterThan { + if x == nil { + return Int32Rules_GreaterThan_not_set_case + } + switch x.GreaterThan.(type) { + case *Int32Rules_Gt: + return Int32Rules_Gt_case + case *Int32Rules_Gte: + return Int32Rules_Gte_case + default: + return Int32Rules_GreaterThan_not_set_case + } +} + +type Int32Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must equal 42 + // int32 value = 1 [(buf.validate.field).int32.const = 42]; + // } + // + // ``` + Const *int32 + // Fields of oneof LessThan: + // `lt` requires the field value to be less than the specified value (field + // < value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be less than 10 + // int32 value = 1 [(buf.validate.field).int32.lt = 10]; + // } + // + // ``` + Lt *int32 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be less than or equal to 10 + // int32 value = 1 [(buf.validate.field).int32.lte = 10]; + // } + // + // ``` + Lte *int32 + // -- end of LessThan + // Fields of oneof GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be greater than 5 [int32.gt] + // int32 value = 1 [(buf.validate.field).int32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [int32.gt_lt] + // int32 other_value = 2 [(buf.validate.field).int32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [int32.gt_lt_exclusive] + // int32 another_value = 3 [(buf.validate.field).int32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *int32 + // `gte` requires the field value to be greater than or equal to the specified value + // (exclusive). If the value of `gte` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be greater than or equal to 5 [int32.gte] + // int32 value = 1 [(buf.validate.field).int32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [int32.gte_lt] + // int32 other_value = 2 [(buf.validate.field).int32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [int32.gte_lt_exclusive] + // int32 another_value = 3 [(buf.validate.field).int32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *int32 + // -- end of GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyInt32 { + // // value must be in list [1, 2, 3] + // int32 value = 1 [(buf.validate.field).int32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int32 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error message + // is generated. + // + // ```proto + // + // message MyInt32 { + // // value must not be in list [1, 2, 3] + // int32 value = 1 [(buf.validate.field).int32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int32 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyInt32 { + // int32 value = 1 [ + // (buf.validate.field).int32.example = 1, + // (buf.validate.field).int32.example = -10 + // ]; + // } + // + // ``` + Example []int32 +} + +func (b0 Int32Rules_builder) Build() *Int32Rules { + m0 := &Int32Rules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + if b.Lt != nil { + x.LessThan = &Int32Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.LessThan = &Int32Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.GreaterThan = &Int32Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.GreaterThan = &Int32Rules_Gte{*b.Gte} + } + x.In = b.In + x.NotIn = b.NotIn + x.Example = b.Example + return m0 +} + +type case_Int32Rules_LessThan protoreflect.FieldNumber + +func (x case_Int32Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[8].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_Int32Rules_GreaterThan protoreflect.FieldNumber + +func (x case_Int32Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[8].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isInt32Rules_LessThan interface { + isInt32Rules_LessThan() +} + +type Int32Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field + // < value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be less than 10 + // int32 value = 1 [(buf.validate.field).int32.lt = 10]; + // } + // + // ``` + Lt int32 `protobuf:"varint,2,opt,name=lt,oneof"` +} + +type Int32Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be less than or equal to 10 + // int32 value = 1 [(buf.validate.field).int32.lte = 10]; + // } + // + // ``` + Lte int32 `protobuf:"varint,3,opt,name=lte,oneof"` +} + +func (*Int32Rules_Lt) isInt32Rules_LessThan() {} + +func (*Int32Rules_Lte) isInt32Rules_LessThan() {} + +type isInt32Rules_GreaterThan interface { + isInt32Rules_GreaterThan() +} + +type Int32Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be greater than 5 [int32.gt] + // int32 value = 1 [(buf.validate.field).int32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [int32.gt_lt] + // int32 other_value = 2 [(buf.validate.field).int32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [int32.gt_lt_exclusive] + // int32 another_value = 3 [(buf.validate.field).int32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt int32 `protobuf:"varint,4,opt,name=gt,oneof"` +} + +type Int32Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified value + // (exclusive). If the value of `gte` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be greater than or equal to 5 [int32.gte] + // int32 value = 1 [(buf.validate.field).int32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [int32.gte_lt] + // int32 other_value = 2 [(buf.validate.field).int32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [int32.gte_lt_exclusive] + // int32 another_value = 3 [(buf.validate.field).int32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte int32 `protobuf:"varint,5,opt,name=gte,oneof"` +} + +func (*Int32Rules_Gt) isInt32Rules_GreaterThan() {} + +func (*Int32Rules_Gte) isInt32Rules_GreaterThan() {} + +// Int64Rules describes the rules applied to `int64` values. These +// rules may also be applied to the `google.protobuf.Int64Value` Well-Known-Type. +type Int64Rules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must equal 42 + // int64 value = 1 [(buf.validate.field).int64.const = 42]; + // } + // + // ``` + Const *int64 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // Types that are valid to be assigned to LessThan: + // + // *Int64Rules_Lt + // *Int64Rules_Lte + LessThan isInt64Rules_LessThan `protobuf_oneof:"less_than"` + // Types that are valid to be assigned to GreaterThan: + // + // *Int64Rules_Gt + // *Int64Rules_Gte + GreaterThan isInt64Rules_GreaterThan `protobuf_oneof:"greater_than"` + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyInt64 { + // // value must be in list [1, 2, 3] + // int64 value = 1 [(buf.validate.field).int64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int64 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"` + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must not be in list [1, 2, 3] + // int64 value = 1 [(buf.validate.field).int64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int64 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyInt64 { + // int64 value = 1 [ + // (buf.validate.field).int64.example = 1, + // (buf.validate.field).int64.example = -10 + // ]; + // } + // + // ``` + Example []int64 `protobuf:"varint,9,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Int64Rules) Reset() { + *x = Int64Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Int64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Rules) ProtoMessage() {} + +func (x *Int64Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Int64Rules) GetConst() int64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *Int64Rules) GetLessThan() isInt64Rules_LessThan { + if x != nil { + return x.LessThan + } + return nil +} + +func (x *Int64Rules) GetLt() int64 { + if x != nil { + if x, ok := x.LessThan.(*Int64Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *Int64Rules) GetLte() int64 { + if x != nil { + if x, ok := x.LessThan.(*Int64Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *Int64Rules) GetGreaterThan() isInt64Rules_GreaterThan { + if x != nil { + return x.GreaterThan + } + return nil +} + +func (x *Int64Rules) GetGt() int64 { + if x != nil { + if x, ok := x.GreaterThan.(*Int64Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *Int64Rules) GetGte() int64 { + if x != nil { + if x, ok := x.GreaterThan.(*Int64Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *Int64Rules) GetIn() []int64 { + if x != nil { + return x.In + } + return nil +} + +func (x *Int64Rules) GetNotIn() []int64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *Int64Rules) GetExample() []int64 { + if x != nil { + return x.Example + } + return nil +} + +func (x *Int64Rules) SetConst(v int64) { + x.Const = &v +} + +func (x *Int64Rules) SetLt(v int64) { + x.LessThan = &Int64Rules_Lt{v} +} + +func (x *Int64Rules) SetLte(v int64) { + x.LessThan = &Int64Rules_Lte{v} +} + +func (x *Int64Rules) SetGt(v int64) { + x.GreaterThan = &Int64Rules_Gt{v} +} + +func (x *Int64Rules) SetGte(v int64) { + x.GreaterThan = &Int64Rules_Gte{v} +} + +func (x *Int64Rules) SetIn(v []int64) { + x.In = v +} + +func (x *Int64Rules) SetNotIn(v []int64) { + x.NotIn = v +} + +func (x *Int64Rules) SetExample(v []int64) { + x.Example = v +} + +func (x *Int64Rules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *Int64Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.LessThan != nil +} + +func (x *Int64Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*Int64Rules_Lt) + return ok +} + +func (x *Int64Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*Int64Rules_Lte) + return ok +} + +func (x *Int64Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.GreaterThan != nil +} + +func (x *Int64Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*Int64Rules_Gt) + return ok +} + +func (x *Int64Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*Int64Rules_Gte) + return ok +} + +func (x *Int64Rules) ClearConst() { + x.Const = nil +} + +func (x *Int64Rules) ClearLessThan() { + x.LessThan = nil +} + +func (x *Int64Rules) ClearLt() { + if _, ok := x.LessThan.(*Int64Rules_Lt); ok { + x.LessThan = nil + } +} + +func (x *Int64Rules) ClearLte() { + if _, ok := x.LessThan.(*Int64Rules_Lte); ok { + x.LessThan = nil + } +} + +func (x *Int64Rules) ClearGreaterThan() { + x.GreaterThan = nil +} + +func (x *Int64Rules) ClearGt() { + if _, ok := x.GreaterThan.(*Int64Rules_Gt); ok { + x.GreaterThan = nil + } +} + +func (x *Int64Rules) ClearGte() { + if _, ok := x.GreaterThan.(*Int64Rules_Gte); ok { + x.GreaterThan = nil + } +} + +const Int64Rules_LessThan_not_set_case case_Int64Rules_LessThan = 0 +const Int64Rules_Lt_case case_Int64Rules_LessThan = 2 +const Int64Rules_Lte_case case_Int64Rules_LessThan = 3 + +func (x *Int64Rules) WhichLessThan() case_Int64Rules_LessThan { + if x == nil { + return Int64Rules_LessThan_not_set_case + } + switch x.LessThan.(type) { + case *Int64Rules_Lt: + return Int64Rules_Lt_case + case *Int64Rules_Lte: + return Int64Rules_Lte_case + default: + return Int64Rules_LessThan_not_set_case + } +} + +const Int64Rules_GreaterThan_not_set_case case_Int64Rules_GreaterThan = 0 +const Int64Rules_Gt_case case_Int64Rules_GreaterThan = 4 +const Int64Rules_Gte_case case_Int64Rules_GreaterThan = 5 + +func (x *Int64Rules) WhichGreaterThan() case_Int64Rules_GreaterThan { + if x == nil { + return Int64Rules_GreaterThan_not_set_case + } + switch x.GreaterThan.(type) { + case *Int64Rules_Gt: + return Int64Rules_Gt_case + case *Int64Rules_Gte: + return Int64Rules_Gte_case + default: + return Int64Rules_GreaterThan_not_set_case + } +} + +type Int64Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must equal 42 + // int64 value = 1 [(buf.validate.field).int64.const = 42]; + // } + // + // ``` + Const *int64 + // Fields of oneof LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be less than 10 + // int64 value = 1 [(buf.validate.field).int64.lt = 10]; + // } + // + // ``` + Lt *int64 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be less than or equal to 10 + // int64 value = 1 [(buf.validate.field).int64.lte = 10]; + // } + // + // ``` + Lte *int64 + // -- end of LessThan + // Fields of oneof GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be greater than 5 [int64.gt] + // int64 value = 1 [(buf.validate.field).int64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [int64.gt_lt] + // int64 other_value = 2 [(buf.validate.field).int64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [int64.gt_lt_exclusive] + // int64 another_value = 3 [(buf.validate.field).int64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *int64 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be greater than or equal to 5 [int64.gte] + // int64 value = 1 [(buf.validate.field).int64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [int64.gte_lt] + // int64 other_value = 2 [(buf.validate.field).int64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [int64.gte_lt_exclusive] + // int64 another_value = 3 [(buf.validate.field).int64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *int64 + // -- end of GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyInt64 { + // // value must be in list [1, 2, 3] + // int64 value = 1 [(buf.validate.field).int64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int64 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must not be in list [1, 2, 3] + // int64 value = 1 [(buf.validate.field).int64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int64 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyInt64 { + // int64 value = 1 [ + // (buf.validate.field).int64.example = 1, + // (buf.validate.field).int64.example = -10 + // ]; + // } + // + // ``` + Example []int64 +} + +func (b0 Int64Rules_builder) Build() *Int64Rules { + m0 := &Int64Rules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + if b.Lt != nil { + x.LessThan = &Int64Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.LessThan = &Int64Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.GreaterThan = &Int64Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.GreaterThan = &Int64Rules_Gte{*b.Gte} + } + x.In = b.In + x.NotIn = b.NotIn + x.Example = b.Example + return m0 +} + +type case_Int64Rules_LessThan protoreflect.FieldNumber + +func (x case_Int64Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[9].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_Int64Rules_GreaterThan protoreflect.FieldNumber + +func (x case_Int64Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[9].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isInt64Rules_LessThan interface { + isInt64Rules_LessThan() +} + +type Int64Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be less than 10 + // int64 value = 1 [(buf.validate.field).int64.lt = 10]; + // } + // + // ``` + Lt int64 `protobuf:"varint,2,opt,name=lt,oneof"` +} + +type Int64Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be less than or equal to 10 + // int64 value = 1 [(buf.validate.field).int64.lte = 10]; + // } + // + // ``` + Lte int64 `protobuf:"varint,3,opt,name=lte,oneof"` +} + +func (*Int64Rules_Lt) isInt64Rules_LessThan() {} + +func (*Int64Rules_Lte) isInt64Rules_LessThan() {} + +type isInt64Rules_GreaterThan interface { + isInt64Rules_GreaterThan() +} + +type Int64Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be greater than 5 [int64.gt] + // int64 value = 1 [(buf.validate.field).int64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [int64.gt_lt] + // int64 other_value = 2 [(buf.validate.field).int64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [int64.gt_lt_exclusive] + // int64 another_value = 3 [(buf.validate.field).int64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt int64 `protobuf:"varint,4,opt,name=gt,oneof"` +} + +type Int64Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be greater than or equal to 5 [int64.gte] + // int64 value = 1 [(buf.validate.field).int64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [int64.gte_lt] + // int64 other_value = 2 [(buf.validate.field).int64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [int64.gte_lt_exclusive] + // int64 another_value = 3 [(buf.validate.field).int64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte int64 `protobuf:"varint,5,opt,name=gte,oneof"` +} + +func (*Int64Rules_Gt) isInt64Rules_GreaterThan() {} + +func (*Int64Rules_Gte) isInt64Rules_GreaterThan() {} + +// UInt32Rules describes the rules applied to `uint32` values. These +// rules may also be applied to the `google.protobuf.UInt32Value` Well-Known-Type. +type UInt32Rules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must equal 42 + // uint32 value = 1 [(buf.validate.field).uint32.const = 42]; + // } + // + // ``` + Const *uint32 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // Types that are valid to be assigned to LessThan: + // + // *UInt32Rules_Lt + // *UInt32Rules_Lte + LessThan isUInt32Rules_LessThan `protobuf_oneof:"less_than"` + // Types that are valid to be assigned to GreaterThan: + // + // *UInt32Rules_Gt + // *UInt32Rules_Gte + GreaterThan isUInt32Rules_GreaterThan `protobuf_oneof:"greater_than"` + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be in list [1, 2, 3] + // uint32 value = 1 [(buf.validate.field).uint32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []uint32 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"` + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must not be in list [1, 2, 3] + // uint32 value = 1 [(buf.validate.field).uint32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []uint32 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyUInt32 { + // uint32 value = 1 [ + // (buf.validate.field).uint32.example = 1, + // (buf.validate.field).uint32.example = 10 + // ]; + // } + // + // ``` + Example []uint32 `protobuf:"varint,8,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UInt32Rules) Reset() { + *x = UInt32Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UInt32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt32Rules) ProtoMessage() {} + +func (x *UInt32Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *UInt32Rules) GetConst() uint32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *UInt32Rules) GetLessThan() isUInt32Rules_LessThan { + if x != nil { + return x.LessThan + } + return nil +} + +func (x *UInt32Rules) GetLt() uint32 { + if x != nil { + if x, ok := x.LessThan.(*UInt32Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *UInt32Rules) GetLte() uint32 { + if x != nil { + if x, ok := x.LessThan.(*UInt32Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *UInt32Rules) GetGreaterThan() isUInt32Rules_GreaterThan { + if x != nil { + return x.GreaterThan + } + return nil +} + +func (x *UInt32Rules) GetGt() uint32 { + if x != nil { + if x, ok := x.GreaterThan.(*UInt32Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *UInt32Rules) GetGte() uint32 { + if x != nil { + if x, ok := x.GreaterThan.(*UInt32Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *UInt32Rules) GetIn() []uint32 { + if x != nil { + return x.In + } + return nil +} + +func (x *UInt32Rules) GetNotIn() []uint32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *UInt32Rules) GetExample() []uint32 { + if x != nil { + return x.Example + } + return nil +} + +func (x *UInt32Rules) SetConst(v uint32) { + x.Const = &v +} + +func (x *UInt32Rules) SetLt(v uint32) { + x.LessThan = &UInt32Rules_Lt{v} +} + +func (x *UInt32Rules) SetLte(v uint32) { + x.LessThan = &UInt32Rules_Lte{v} +} + +func (x *UInt32Rules) SetGt(v uint32) { + x.GreaterThan = &UInt32Rules_Gt{v} +} + +func (x *UInt32Rules) SetGte(v uint32) { + x.GreaterThan = &UInt32Rules_Gte{v} +} + +func (x *UInt32Rules) SetIn(v []uint32) { + x.In = v +} + +func (x *UInt32Rules) SetNotIn(v []uint32) { + x.NotIn = v +} + +func (x *UInt32Rules) SetExample(v []uint32) { + x.Example = v +} + +func (x *UInt32Rules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *UInt32Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.LessThan != nil +} + +func (x *UInt32Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*UInt32Rules_Lt) + return ok +} + +func (x *UInt32Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*UInt32Rules_Lte) + return ok +} + +func (x *UInt32Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.GreaterThan != nil +} + +func (x *UInt32Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*UInt32Rules_Gt) + return ok +} + +func (x *UInt32Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*UInt32Rules_Gte) + return ok +} + +func (x *UInt32Rules) ClearConst() { + x.Const = nil +} + +func (x *UInt32Rules) ClearLessThan() { + x.LessThan = nil +} + +func (x *UInt32Rules) ClearLt() { + if _, ok := x.LessThan.(*UInt32Rules_Lt); ok { + x.LessThan = nil + } +} + +func (x *UInt32Rules) ClearLte() { + if _, ok := x.LessThan.(*UInt32Rules_Lte); ok { + x.LessThan = nil + } +} + +func (x *UInt32Rules) ClearGreaterThan() { + x.GreaterThan = nil +} + +func (x *UInt32Rules) ClearGt() { + if _, ok := x.GreaterThan.(*UInt32Rules_Gt); ok { + x.GreaterThan = nil + } +} + +func (x *UInt32Rules) ClearGte() { + if _, ok := x.GreaterThan.(*UInt32Rules_Gte); ok { + x.GreaterThan = nil + } +} + +const UInt32Rules_LessThan_not_set_case case_UInt32Rules_LessThan = 0 +const UInt32Rules_Lt_case case_UInt32Rules_LessThan = 2 +const UInt32Rules_Lte_case case_UInt32Rules_LessThan = 3 + +func (x *UInt32Rules) WhichLessThan() case_UInt32Rules_LessThan { + if x == nil { + return UInt32Rules_LessThan_not_set_case + } + switch x.LessThan.(type) { + case *UInt32Rules_Lt: + return UInt32Rules_Lt_case + case *UInt32Rules_Lte: + return UInt32Rules_Lte_case + default: + return UInt32Rules_LessThan_not_set_case + } +} + +const UInt32Rules_GreaterThan_not_set_case case_UInt32Rules_GreaterThan = 0 +const UInt32Rules_Gt_case case_UInt32Rules_GreaterThan = 4 +const UInt32Rules_Gte_case case_UInt32Rules_GreaterThan = 5 + +func (x *UInt32Rules) WhichGreaterThan() case_UInt32Rules_GreaterThan { + if x == nil { + return UInt32Rules_GreaterThan_not_set_case + } + switch x.GreaterThan.(type) { + case *UInt32Rules_Gt: + return UInt32Rules_Gt_case + case *UInt32Rules_Gte: + return UInt32Rules_Gte_case + default: + return UInt32Rules_GreaterThan_not_set_case + } +} + +type UInt32Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must equal 42 + // uint32 value = 1 [(buf.validate.field).uint32.const = 42]; + // } + // + // ``` + Const *uint32 + // Fields of oneof LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be less than 10 + // uint32 value = 1 [(buf.validate.field).uint32.lt = 10]; + // } + // + // ``` + Lt *uint32 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be less than or equal to 10 + // uint32 value = 1 [(buf.validate.field).uint32.lte = 10]; + // } + // + // ``` + Lte *uint32 + // -- end of LessThan + // Fields of oneof GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be greater than 5 [uint32.gt] + // uint32 value = 1 [(buf.validate.field).uint32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [uint32.gt_lt] + // uint32 other_value = 2 [(buf.validate.field).uint32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [uint32.gt_lt_exclusive] + // uint32 another_value = 3 [(buf.validate.field).uint32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *uint32 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be greater than or equal to 5 [uint32.gte] + // uint32 value = 1 [(buf.validate.field).uint32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [uint32.gte_lt] + // uint32 other_value = 2 [(buf.validate.field).uint32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [uint32.gte_lt_exclusive] + // uint32 another_value = 3 [(buf.validate.field).uint32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *uint32 + // -- end of GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be in list [1, 2, 3] + // uint32 value = 1 [(buf.validate.field).uint32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []uint32 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must not be in list [1, 2, 3] + // uint32 value = 1 [(buf.validate.field).uint32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []uint32 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyUInt32 { + // uint32 value = 1 [ + // (buf.validate.field).uint32.example = 1, + // (buf.validate.field).uint32.example = 10 + // ]; + // } + // + // ``` + Example []uint32 +} + +func (b0 UInt32Rules_builder) Build() *UInt32Rules { + m0 := &UInt32Rules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + if b.Lt != nil { + x.LessThan = &UInt32Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.LessThan = &UInt32Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.GreaterThan = &UInt32Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.GreaterThan = &UInt32Rules_Gte{*b.Gte} + } + x.In = b.In + x.NotIn = b.NotIn + x.Example = b.Example + return m0 +} + +type case_UInt32Rules_LessThan protoreflect.FieldNumber + +func (x case_UInt32Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[10].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_UInt32Rules_GreaterThan protoreflect.FieldNumber + +func (x case_UInt32Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[10].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isUInt32Rules_LessThan interface { + isUInt32Rules_LessThan() +} + +type UInt32Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be less than 10 + // uint32 value = 1 [(buf.validate.field).uint32.lt = 10]; + // } + // + // ``` + Lt uint32 `protobuf:"varint,2,opt,name=lt,oneof"` +} + +type UInt32Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be less than or equal to 10 + // uint32 value = 1 [(buf.validate.field).uint32.lte = 10]; + // } + // + // ``` + Lte uint32 `protobuf:"varint,3,opt,name=lte,oneof"` +} + +func (*UInt32Rules_Lt) isUInt32Rules_LessThan() {} + +func (*UInt32Rules_Lte) isUInt32Rules_LessThan() {} + +type isUInt32Rules_GreaterThan interface { + isUInt32Rules_GreaterThan() +} + +type UInt32Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be greater than 5 [uint32.gt] + // uint32 value = 1 [(buf.validate.field).uint32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [uint32.gt_lt] + // uint32 other_value = 2 [(buf.validate.field).uint32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [uint32.gt_lt_exclusive] + // uint32 another_value = 3 [(buf.validate.field).uint32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt uint32 `protobuf:"varint,4,opt,name=gt,oneof"` +} + +type UInt32Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be greater than or equal to 5 [uint32.gte] + // uint32 value = 1 [(buf.validate.field).uint32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [uint32.gte_lt] + // uint32 other_value = 2 [(buf.validate.field).uint32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [uint32.gte_lt_exclusive] + // uint32 another_value = 3 [(buf.validate.field).uint32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte uint32 `protobuf:"varint,5,opt,name=gte,oneof"` +} + +func (*UInt32Rules_Gt) isUInt32Rules_GreaterThan() {} + +func (*UInt32Rules_Gte) isUInt32Rules_GreaterThan() {} + +// UInt64Rules describes the rules applied to `uint64` values. These +// rules may also be applied to the `google.protobuf.UInt64Value` Well-Known-Type. +type UInt64Rules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must equal 42 + // uint64 value = 1 [(buf.validate.field).uint64.const = 42]; + // } + // + // ``` + Const *uint64 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // Types that are valid to be assigned to LessThan: + // + // *UInt64Rules_Lt + // *UInt64Rules_Lte + LessThan isUInt64Rules_LessThan `protobuf_oneof:"less_than"` + // Types that are valid to be assigned to GreaterThan: + // + // *UInt64Rules_Gt + // *UInt64Rules_Gte + GreaterThan isUInt64Rules_GreaterThan `protobuf_oneof:"greater_than"` + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be in list [1, 2, 3] + // uint64 value = 1 [(buf.validate.field).uint64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []uint64 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"` + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must not be in list [1, 2, 3] + // uint64 value = 1 [(buf.validate.field).uint64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []uint64 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyUInt64 { + // uint64 value = 1 [ + // (buf.validate.field).uint64.example = 1, + // (buf.validate.field).uint64.example = -10 + // ]; + // } + // + // ``` + Example []uint64 `protobuf:"varint,8,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UInt64Rules) Reset() { + *x = UInt64Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UInt64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt64Rules) ProtoMessage() {} + +func (x *UInt64Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *UInt64Rules) GetConst() uint64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *UInt64Rules) GetLessThan() isUInt64Rules_LessThan { + if x != nil { + return x.LessThan + } + return nil +} + +func (x *UInt64Rules) GetLt() uint64 { + if x != nil { + if x, ok := x.LessThan.(*UInt64Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *UInt64Rules) GetLte() uint64 { + if x != nil { + if x, ok := x.LessThan.(*UInt64Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *UInt64Rules) GetGreaterThan() isUInt64Rules_GreaterThan { + if x != nil { + return x.GreaterThan + } + return nil +} + +func (x *UInt64Rules) GetGt() uint64 { + if x != nil { + if x, ok := x.GreaterThan.(*UInt64Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *UInt64Rules) GetGte() uint64 { + if x != nil { + if x, ok := x.GreaterThan.(*UInt64Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *UInt64Rules) GetIn() []uint64 { + if x != nil { + return x.In + } + return nil +} + +func (x *UInt64Rules) GetNotIn() []uint64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *UInt64Rules) GetExample() []uint64 { + if x != nil { + return x.Example + } + return nil +} + +func (x *UInt64Rules) SetConst(v uint64) { + x.Const = &v +} + +func (x *UInt64Rules) SetLt(v uint64) { + x.LessThan = &UInt64Rules_Lt{v} +} + +func (x *UInt64Rules) SetLte(v uint64) { + x.LessThan = &UInt64Rules_Lte{v} +} + +func (x *UInt64Rules) SetGt(v uint64) { + x.GreaterThan = &UInt64Rules_Gt{v} +} + +func (x *UInt64Rules) SetGte(v uint64) { + x.GreaterThan = &UInt64Rules_Gte{v} +} + +func (x *UInt64Rules) SetIn(v []uint64) { + x.In = v +} + +func (x *UInt64Rules) SetNotIn(v []uint64) { + x.NotIn = v +} + +func (x *UInt64Rules) SetExample(v []uint64) { + x.Example = v +} + +func (x *UInt64Rules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *UInt64Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.LessThan != nil +} + +func (x *UInt64Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*UInt64Rules_Lt) + return ok +} + +func (x *UInt64Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*UInt64Rules_Lte) + return ok +} + +func (x *UInt64Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.GreaterThan != nil +} + +func (x *UInt64Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*UInt64Rules_Gt) + return ok +} + +func (x *UInt64Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*UInt64Rules_Gte) + return ok +} + +func (x *UInt64Rules) ClearConst() { + x.Const = nil +} + +func (x *UInt64Rules) ClearLessThan() { + x.LessThan = nil +} + +func (x *UInt64Rules) ClearLt() { + if _, ok := x.LessThan.(*UInt64Rules_Lt); ok { + x.LessThan = nil + } +} + +func (x *UInt64Rules) ClearLte() { + if _, ok := x.LessThan.(*UInt64Rules_Lte); ok { + x.LessThan = nil + } +} + +func (x *UInt64Rules) ClearGreaterThan() { + x.GreaterThan = nil +} + +func (x *UInt64Rules) ClearGt() { + if _, ok := x.GreaterThan.(*UInt64Rules_Gt); ok { + x.GreaterThan = nil + } +} + +func (x *UInt64Rules) ClearGte() { + if _, ok := x.GreaterThan.(*UInt64Rules_Gte); ok { + x.GreaterThan = nil + } +} + +const UInt64Rules_LessThan_not_set_case case_UInt64Rules_LessThan = 0 +const UInt64Rules_Lt_case case_UInt64Rules_LessThan = 2 +const UInt64Rules_Lte_case case_UInt64Rules_LessThan = 3 + +func (x *UInt64Rules) WhichLessThan() case_UInt64Rules_LessThan { + if x == nil { + return UInt64Rules_LessThan_not_set_case + } + switch x.LessThan.(type) { + case *UInt64Rules_Lt: + return UInt64Rules_Lt_case + case *UInt64Rules_Lte: + return UInt64Rules_Lte_case + default: + return UInt64Rules_LessThan_not_set_case + } +} + +const UInt64Rules_GreaterThan_not_set_case case_UInt64Rules_GreaterThan = 0 +const UInt64Rules_Gt_case case_UInt64Rules_GreaterThan = 4 +const UInt64Rules_Gte_case case_UInt64Rules_GreaterThan = 5 + +func (x *UInt64Rules) WhichGreaterThan() case_UInt64Rules_GreaterThan { + if x == nil { + return UInt64Rules_GreaterThan_not_set_case + } + switch x.GreaterThan.(type) { + case *UInt64Rules_Gt: + return UInt64Rules_Gt_case + case *UInt64Rules_Gte: + return UInt64Rules_Gte_case + default: + return UInt64Rules_GreaterThan_not_set_case + } +} + +type UInt64Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must equal 42 + // uint64 value = 1 [(buf.validate.field).uint64.const = 42]; + // } + // + // ``` + Const *uint64 + // Fields of oneof LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be less than 10 + // uint64 value = 1 [(buf.validate.field).uint64.lt = 10]; + // } + // + // ``` + Lt *uint64 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be less than or equal to 10 + // uint64 value = 1 [(buf.validate.field).uint64.lte = 10]; + // } + // + // ``` + Lte *uint64 + // -- end of LessThan + // Fields of oneof GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be greater than 5 [uint64.gt] + // uint64 value = 1 [(buf.validate.field).uint64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [uint64.gt_lt] + // uint64 other_value = 2 [(buf.validate.field).uint64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [uint64.gt_lt_exclusive] + // uint64 another_value = 3 [(buf.validate.field).uint64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *uint64 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be greater than or equal to 5 [uint64.gte] + // uint64 value = 1 [(buf.validate.field).uint64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [uint64.gte_lt] + // uint64 other_value = 2 [(buf.validate.field).uint64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [uint64.gte_lt_exclusive] + // uint64 another_value = 3 [(buf.validate.field).uint64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *uint64 + // -- end of GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be in list [1, 2, 3] + // uint64 value = 1 [(buf.validate.field).uint64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []uint64 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must not be in list [1, 2, 3] + // uint64 value = 1 [(buf.validate.field).uint64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []uint64 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyUInt64 { + // uint64 value = 1 [ + // (buf.validate.field).uint64.example = 1, + // (buf.validate.field).uint64.example = -10 + // ]; + // } + // + // ``` + Example []uint64 +} + +func (b0 UInt64Rules_builder) Build() *UInt64Rules { + m0 := &UInt64Rules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + if b.Lt != nil { + x.LessThan = &UInt64Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.LessThan = &UInt64Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.GreaterThan = &UInt64Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.GreaterThan = &UInt64Rules_Gte{*b.Gte} + } + x.In = b.In + x.NotIn = b.NotIn + x.Example = b.Example + return m0 +} + +type case_UInt64Rules_LessThan protoreflect.FieldNumber + +func (x case_UInt64Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[11].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_UInt64Rules_GreaterThan protoreflect.FieldNumber + +func (x case_UInt64Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[11].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isUInt64Rules_LessThan interface { + isUInt64Rules_LessThan() +} + +type UInt64Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be less than 10 + // uint64 value = 1 [(buf.validate.field).uint64.lt = 10]; + // } + // + // ``` + Lt uint64 `protobuf:"varint,2,opt,name=lt,oneof"` +} + +type UInt64Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be less than or equal to 10 + // uint64 value = 1 [(buf.validate.field).uint64.lte = 10]; + // } + // + // ``` + Lte uint64 `protobuf:"varint,3,opt,name=lte,oneof"` +} + +func (*UInt64Rules_Lt) isUInt64Rules_LessThan() {} + +func (*UInt64Rules_Lte) isUInt64Rules_LessThan() {} + +type isUInt64Rules_GreaterThan interface { + isUInt64Rules_GreaterThan() +} + +type UInt64Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be greater than 5 [uint64.gt] + // uint64 value = 1 [(buf.validate.field).uint64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [uint64.gt_lt] + // uint64 other_value = 2 [(buf.validate.field).uint64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [uint64.gt_lt_exclusive] + // uint64 another_value = 3 [(buf.validate.field).uint64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt uint64 `protobuf:"varint,4,opt,name=gt,oneof"` +} + +type UInt64Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be greater than or equal to 5 [uint64.gte] + // uint64 value = 1 [(buf.validate.field).uint64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [uint64.gte_lt] + // uint64 other_value = 2 [(buf.validate.field).uint64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [uint64.gte_lt_exclusive] + // uint64 another_value = 3 [(buf.validate.field).uint64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte uint64 `protobuf:"varint,5,opt,name=gte,oneof"` +} + +func (*UInt64Rules_Gt) isUInt64Rules_GreaterThan() {} + +func (*UInt64Rules_Gte) isUInt64Rules_GreaterThan() {} + +// SInt32Rules describes the rules applied to `sint32` values. +type SInt32Rules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must equal 42 + // sint32 value = 1 [(buf.validate.field).sint32.const = 42]; + // } + // + // ``` + Const *int32 `protobuf:"zigzag32,1,opt,name=const" json:"const,omitempty"` + // Types that are valid to be assigned to LessThan: + // + // *SInt32Rules_Lt + // *SInt32Rules_Lte + LessThan isSInt32Rules_LessThan `protobuf_oneof:"less_than"` + // Types that are valid to be assigned to GreaterThan: + // + // *SInt32Rules_Gt + // *SInt32Rules_Gte + GreaterThan isSInt32Rules_GreaterThan `protobuf_oneof:"greater_than"` + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MySInt32 { + // // value must be in list [1, 2, 3] + // sint32 value = 1 [(buf.validate.field).sint32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int32 `protobuf:"zigzag32,6,rep,name=in" json:"in,omitempty"` + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must not be in list [1, 2, 3] + // sint32 value = 1 [(buf.validate.field).sint32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int32 `protobuf:"zigzag32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MySInt32 { + // sint32 value = 1 [ + // (buf.validate.field).sint32.example = 1, + // (buf.validate.field).sint32.example = -10 + // ]; + // } + // + // ``` + Example []int32 `protobuf:"zigzag32,8,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SInt32Rules) Reset() { + *x = SInt32Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SInt32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SInt32Rules) ProtoMessage() {} + +func (x *SInt32Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SInt32Rules) GetConst() int32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *SInt32Rules) GetLessThan() isSInt32Rules_LessThan { + if x != nil { + return x.LessThan + } + return nil +} + +func (x *SInt32Rules) GetLt() int32 { + if x != nil { + if x, ok := x.LessThan.(*SInt32Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *SInt32Rules) GetLte() int32 { + if x != nil { + if x, ok := x.LessThan.(*SInt32Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *SInt32Rules) GetGreaterThan() isSInt32Rules_GreaterThan { + if x != nil { + return x.GreaterThan + } + return nil +} + +func (x *SInt32Rules) GetGt() int32 { + if x != nil { + if x, ok := x.GreaterThan.(*SInt32Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *SInt32Rules) GetGte() int32 { + if x != nil { + if x, ok := x.GreaterThan.(*SInt32Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *SInt32Rules) GetIn() []int32 { + if x != nil { + return x.In + } + return nil +} + +func (x *SInt32Rules) GetNotIn() []int32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *SInt32Rules) GetExample() []int32 { + if x != nil { + return x.Example + } + return nil +} + +func (x *SInt32Rules) SetConst(v int32) { + x.Const = &v +} + +func (x *SInt32Rules) SetLt(v int32) { + x.LessThan = &SInt32Rules_Lt{v} +} + +func (x *SInt32Rules) SetLte(v int32) { + x.LessThan = &SInt32Rules_Lte{v} +} + +func (x *SInt32Rules) SetGt(v int32) { + x.GreaterThan = &SInt32Rules_Gt{v} +} + +func (x *SInt32Rules) SetGte(v int32) { + x.GreaterThan = &SInt32Rules_Gte{v} +} + +func (x *SInt32Rules) SetIn(v []int32) { + x.In = v +} + +func (x *SInt32Rules) SetNotIn(v []int32) { + x.NotIn = v +} + +func (x *SInt32Rules) SetExample(v []int32) { + x.Example = v +} + +func (x *SInt32Rules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *SInt32Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.LessThan != nil +} + +func (x *SInt32Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*SInt32Rules_Lt) + return ok +} + +func (x *SInt32Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*SInt32Rules_Lte) + return ok +} + +func (x *SInt32Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.GreaterThan != nil +} + +func (x *SInt32Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*SInt32Rules_Gt) + return ok +} + +func (x *SInt32Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*SInt32Rules_Gte) + return ok +} + +func (x *SInt32Rules) ClearConst() { + x.Const = nil +} + +func (x *SInt32Rules) ClearLessThan() { + x.LessThan = nil +} + +func (x *SInt32Rules) ClearLt() { + if _, ok := x.LessThan.(*SInt32Rules_Lt); ok { + x.LessThan = nil + } +} + +func (x *SInt32Rules) ClearLte() { + if _, ok := x.LessThan.(*SInt32Rules_Lte); ok { + x.LessThan = nil + } +} + +func (x *SInt32Rules) ClearGreaterThan() { + x.GreaterThan = nil +} + +func (x *SInt32Rules) ClearGt() { + if _, ok := x.GreaterThan.(*SInt32Rules_Gt); ok { + x.GreaterThan = nil + } +} + +func (x *SInt32Rules) ClearGte() { + if _, ok := x.GreaterThan.(*SInt32Rules_Gte); ok { + x.GreaterThan = nil + } +} + +const SInt32Rules_LessThan_not_set_case case_SInt32Rules_LessThan = 0 +const SInt32Rules_Lt_case case_SInt32Rules_LessThan = 2 +const SInt32Rules_Lte_case case_SInt32Rules_LessThan = 3 + +func (x *SInt32Rules) WhichLessThan() case_SInt32Rules_LessThan { + if x == nil { + return SInt32Rules_LessThan_not_set_case + } + switch x.LessThan.(type) { + case *SInt32Rules_Lt: + return SInt32Rules_Lt_case + case *SInt32Rules_Lte: + return SInt32Rules_Lte_case + default: + return SInt32Rules_LessThan_not_set_case + } +} + +const SInt32Rules_GreaterThan_not_set_case case_SInt32Rules_GreaterThan = 0 +const SInt32Rules_Gt_case case_SInt32Rules_GreaterThan = 4 +const SInt32Rules_Gte_case case_SInt32Rules_GreaterThan = 5 + +func (x *SInt32Rules) WhichGreaterThan() case_SInt32Rules_GreaterThan { + if x == nil { + return SInt32Rules_GreaterThan_not_set_case + } + switch x.GreaterThan.(type) { + case *SInt32Rules_Gt: + return SInt32Rules_Gt_case + case *SInt32Rules_Gte: + return SInt32Rules_Gte_case + default: + return SInt32Rules_GreaterThan_not_set_case + } +} + +type SInt32Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must equal 42 + // sint32 value = 1 [(buf.validate.field).sint32.const = 42]; + // } + // + // ``` + Const *int32 + // Fields of oneof LessThan: + // `lt` requires the field value to be less than the specified value (field + // < value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be less than 10 + // sint32 value = 1 [(buf.validate.field).sint32.lt = 10]; + // } + // + // ``` + Lt *int32 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be less than or equal to 10 + // sint32 value = 1 [(buf.validate.field).sint32.lte = 10]; + // } + // + // ``` + Lte *int32 + // -- end of LessThan + // Fields of oneof GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be greater than 5 [sint32.gt] + // sint32 value = 1 [(buf.validate.field).sint32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sint32.gt_lt] + // sint32 other_value = 2 [(buf.validate.field).sint32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sint32.gt_lt_exclusive] + // sint32 another_value = 3 [(buf.validate.field).sint32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *int32 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be greater than or equal to 5 [sint32.gte] + // sint32 value = 1 [(buf.validate.field).sint32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sint32.gte_lt] + // sint32 other_value = 2 [(buf.validate.field).sint32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sint32.gte_lt_exclusive] + // sint32 another_value = 3 [(buf.validate.field).sint32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *int32 + // -- end of GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MySInt32 { + // // value must be in list [1, 2, 3] + // sint32 value = 1 [(buf.validate.field).sint32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int32 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must not be in list [1, 2, 3] + // sint32 value = 1 [(buf.validate.field).sint32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int32 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MySInt32 { + // sint32 value = 1 [ + // (buf.validate.field).sint32.example = 1, + // (buf.validate.field).sint32.example = -10 + // ]; + // } + // + // ``` + Example []int32 +} + +func (b0 SInt32Rules_builder) Build() *SInt32Rules { + m0 := &SInt32Rules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + if b.Lt != nil { + x.LessThan = &SInt32Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.LessThan = &SInt32Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.GreaterThan = &SInt32Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.GreaterThan = &SInt32Rules_Gte{*b.Gte} + } + x.In = b.In + x.NotIn = b.NotIn + x.Example = b.Example + return m0 +} + +type case_SInt32Rules_LessThan protoreflect.FieldNumber + +func (x case_SInt32Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[12].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_SInt32Rules_GreaterThan protoreflect.FieldNumber + +func (x case_SInt32Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[12].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isSInt32Rules_LessThan interface { + isSInt32Rules_LessThan() +} + +type SInt32Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field + // < value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be less than 10 + // sint32 value = 1 [(buf.validate.field).sint32.lt = 10]; + // } + // + // ``` + Lt int32 `protobuf:"zigzag32,2,opt,name=lt,oneof"` +} + +type SInt32Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be less than or equal to 10 + // sint32 value = 1 [(buf.validate.field).sint32.lte = 10]; + // } + // + // ``` + Lte int32 `protobuf:"zigzag32,3,opt,name=lte,oneof"` +} + +func (*SInt32Rules_Lt) isSInt32Rules_LessThan() {} + +func (*SInt32Rules_Lte) isSInt32Rules_LessThan() {} + +type isSInt32Rules_GreaterThan interface { + isSInt32Rules_GreaterThan() +} + +type SInt32Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be greater than 5 [sint32.gt] + // sint32 value = 1 [(buf.validate.field).sint32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sint32.gt_lt] + // sint32 other_value = 2 [(buf.validate.field).sint32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sint32.gt_lt_exclusive] + // sint32 another_value = 3 [(buf.validate.field).sint32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt int32 `protobuf:"zigzag32,4,opt,name=gt,oneof"` +} + +type SInt32Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be greater than or equal to 5 [sint32.gte] + // sint32 value = 1 [(buf.validate.field).sint32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sint32.gte_lt] + // sint32 other_value = 2 [(buf.validate.field).sint32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sint32.gte_lt_exclusive] + // sint32 another_value = 3 [(buf.validate.field).sint32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte int32 `protobuf:"zigzag32,5,opt,name=gte,oneof"` +} + +func (*SInt32Rules_Gt) isSInt32Rules_GreaterThan() {} + +func (*SInt32Rules_Gte) isSInt32Rules_GreaterThan() {} + +// SInt64Rules describes the rules applied to `sint64` values. +type SInt64Rules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must equal 42 + // sint64 value = 1 [(buf.validate.field).sint64.const = 42]; + // } + // + // ``` + Const *int64 `protobuf:"zigzag64,1,opt,name=const" json:"const,omitempty"` + // Types that are valid to be assigned to LessThan: + // + // *SInt64Rules_Lt + // *SInt64Rules_Lte + LessThan isSInt64Rules_LessThan `protobuf_oneof:"less_than"` + // Types that are valid to be assigned to GreaterThan: + // + // *SInt64Rules_Gt + // *SInt64Rules_Gte + GreaterThan isSInt64Rules_GreaterThan `protobuf_oneof:"greater_than"` + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message + // is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be in list [1, 2, 3] + // sint64 value = 1 [(buf.validate.field).sint64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int64 `protobuf:"zigzag64,6,rep,name=in" json:"in,omitempty"` + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must not be in list [1, 2, 3] + // sint64 value = 1 [(buf.validate.field).sint64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int64 `protobuf:"zigzag64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MySInt64 { + // sint64 value = 1 [ + // (buf.validate.field).sint64.example = 1, + // (buf.validate.field).sint64.example = -10 + // ]; + // } + // + // ``` + Example []int64 `protobuf:"zigzag64,8,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SInt64Rules) Reset() { + *x = SInt64Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SInt64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SInt64Rules) ProtoMessage() {} + +func (x *SInt64Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SInt64Rules) GetConst() int64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *SInt64Rules) GetLessThan() isSInt64Rules_LessThan { + if x != nil { + return x.LessThan + } + return nil +} + +func (x *SInt64Rules) GetLt() int64 { + if x != nil { + if x, ok := x.LessThan.(*SInt64Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *SInt64Rules) GetLte() int64 { + if x != nil { + if x, ok := x.LessThan.(*SInt64Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *SInt64Rules) GetGreaterThan() isSInt64Rules_GreaterThan { + if x != nil { + return x.GreaterThan + } + return nil +} + +func (x *SInt64Rules) GetGt() int64 { + if x != nil { + if x, ok := x.GreaterThan.(*SInt64Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *SInt64Rules) GetGte() int64 { + if x != nil { + if x, ok := x.GreaterThan.(*SInt64Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *SInt64Rules) GetIn() []int64 { + if x != nil { + return x.In + } + return nil +} + +func (x *SInt64Rules) GetNotIn() []int64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *SInt64Rules) GetExample() []int64 { + if x != nil { + return x.Example + } + return nil +} + +func (x *SInt64Rules) SetConst(v int64) { + x.Const = &v +} + +func (x *SInt64Rules) SetLt(v int64) { + x.LessThan = &SInt64Rules_Lt{v} +} + +func (x *SInt64Rules) SetLte(v int64) { + x.LessThan = &SInt64Rules_Lte{v} +} + +func (x *SInt64Rules) SetGt(v int64) { + x.GreaterThan = &SInt64Rules_Gt{v} +} + +func (x *SInt64Rules) SetGte(v int64) { + x.GreaterThan = &SInt64Rules_Gte{v} +} + +func (x *SInt64Rules) SetIn(v []int64) { + x.In = v +} + +func (x *SInt64Rules) SetNotIn(v []int64) { + x.NotIn = v +} + +func (x *SInt64Rules) SetExample(v []int64) { + x.Example = v +} + +func (x *SInt64Rules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *SInt64Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.LessThan != nil +} + +func (x *SInt64Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*SInt64Rules_Lt) + return ok +} + +func (x *SInt64Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*SInt64Rules_Lte) + return ok +} + +func (x *SInt64Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.GreaterThan != nil +} + +func (x *SInt64Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*SInt64Rules_Gt) + return ok +} + +func (x *SInt64Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*SInt64Rules_Gte) + return ok +} + +func (x *SInt64Rules) ClearConst() { + x.Const = nil +} + +func (x *SInt64Rules) ClearLessThan() { + x.LessThan = nil +} + +func (x *SInt64Rules) ClearLt() { + if _, ok := x.LessThan.(*SInt64Rules_Lt); ok { + x.LessThan = nil + } +} + +func (x *SInt64Rules) ClearLte() { + if _, ok := x.LessThan.(*SInt64Rules_Lte); ok { + x.LessThan = nil + } +} + +func (x *SInt64Rules) ClearGreaterThan() { + x.GreaterThan = nil +} + +func (x *SInt64Rules) ClearGt() { + if _, ok := x.GreaterThan.(*SInt64Rules_Gt); ok { + x.GreaterThan = nil + } +} + +func (x *SInt64Rules) ClearGte() { + if _, ok := x.GreaterThan.(*SInt64Rules_Gte); ok { + x.GreaterThan = nil + } +} + +const SInt64Rules_LessThan_not_set_case case_SInt64Rules_LessThan = 0 +const SInt64Rules_Lt_case case_SInt64Rules_LessThan = 2 +const SInt64Rules_Lte_case case_SInt64Rules_LessThan = 3 + +func (x *SInt64Rules) WhichLessThan() case_SInt64Rules_LessThan { + if x == nil { + return SInt64Rules_LessThan_not_set_case + } + switch x.LessThan.(type) { + case *SInt64Rules_Lt: + return SInt64Rules_Lt_case + case *SInt64Rules_Lte: + return SInt64Rules_Lte_case + default: + return SInt64Rules_LessThan_not_set_case + } +} + +const SInt64Rules_GreaterThan_not_set_case case_SInt64Rules_GreaterThan = 0 +const SInt64Rules_Gt_case case_SInt64Rules_GreaterThan = 4 +const SInt64Rules_Gte_case case_SInt64Rules_GreaterThan = 5 + +func (x *SInt64Rules) WhichGreaterThan() case_SInt64Rules_GreaterThan { + if x == nil { + return SInt64Rules_GreaterThan_not_set_case + } + switch x.GreaterThan.(type) { + case *SInt64Rules_Gt: + return SInt64Rules_Gt_case + case *SInt64Rules_Gte: + return SInt64Rules_Gte_case + default: + return SInt64Rules_GreaterThan_not_set_case + } +} + +type SInt64Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must equal 42 + // sint64 value = 1 [(buf.validate.field).sint64.const = 42]; + // } + // + // ``` + Const *int64 + // Fields of oneof LessThan: + // `lt` requires the field value to be less than the specified value (field + // < value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be less than 10 + // sint64 value = 1 [(buf.validate.field).sint64.lt = 10]; + // } + // + // ``` + Lt *int64 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be less than or equal to 10 + // sint64 value = 1 [(buf.validate.field).sint64.lte = 10]; + // } + // + // ``` + Lte *int64 + // -- end of LessThan + // Fields of oneof GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be greater than 5 [sint64.gt] + // sint64 value = 1 [(buf.validate.field).sint64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sint64.gt_lt] + // sint64 other_value = 2 [(buf.validate.field).sint64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sint64.gt_lt_exclusive] + // sint64 another_value = 3 [(buf.validate.field).sint64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *int64 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be greater than or equal to 5 [sint64.gte] + // sint64 value = 1 [(buf.validate.field).sint64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sint64.gte_lt] + // sint64 other_value = 2 [(buf.validate.field).sint64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sint64.gte_lt_exclusive] + // sint64 another_value = 3 [(buf.validate.field).sint64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *int64 + // -- end of GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message + // is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be in list [1, 2, 3] + // sint64 value = 1 [(buf.validate.field).sint64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int64 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must not be in list [1, 2, 3] + // sint64 value = 1 [(buf.validate.field).sint64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int64 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MySInt64 { + // sint64 value = 1 [ + // (buf.validate.field).sint64.example = 1, + // (buf.validate.field).sint64.example = -10 + // ]; + // } + // + // ``` + Example []int64 +} + +func (b0 SInt64Rules_builder) Build() *SInt64Rules { + m0 := &SInt64Rules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + if b.Lt != nil { + x.LessThan = &SInt64Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.LessThan = &SInt64Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.GreaterThan = &SInt64Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.GreaterThan = &SInt64Rules_Gte{*b.Gte} + } + x.In = b.In + x.NotIn = b.NotIn + x.Example = b.Example + return m0 +} + +type case_SInt64Rules_LessThan protoreflect.FieldNumber + +func (x case_SInt64Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[13].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_SInt64Rules_GreaterThan protoreflect.FieldNumber + +func (x case_SInt64Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[13].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isSInt64Rules_LessThan interface { + isSInt64Rules_LessThan() +} + +type SInt64Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field + // < value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be less than 10 + // sint64 value = 1 [(buf.validate.field).sint64.lt = 10]; + // } + // + // ``` + Lt int64 `protobuf:"zigzag64,2,opt,name=lt,oneof"` +} + +type SInt64Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be less than or equal to 10 + // sint64 value = 1 [(buf.validate.field).sint64.lte = 10]; + // } + // + // ``` + Lte int64 `protobuf:"zigzag64,3,opt,name=lte,oneof"` +} + +func (*SInt64Rules_Lt) isSInt64Rules_LessThan() {} + +func (*SInt64Rules_Lte) isSInt64Rules_LessThan() {} + +type isSInt64Rules_GreaterThan interface { + isSInt64Rules_GreaterThan() +} + +type SInt64Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be greater than 5 [sint64.gt] + // sint64 value = 1 [(buf.validate.field).sint64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sint64.gt_lt] + // sint64 other_value = 2 [(buf.validate.field).sint64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sint64.gt_lt_exclusive] + // sint64 another_value = 3 [(buf.validate.field).sint64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt int64 `protobuf:"zigzag64,4,opt,name=gt,oneof"` +} + +type SInt64Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be greater than or equal to 5 [sint64.gte] + // sint64 value = 1 [(buf.validate.field).sint64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sint64.gte_lt] + // sint64 other_value = 2 [(buf.validate.field).sint64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sint64.gte_lt_exclusive] + // sint64 another_value = 3 [(buf.validate.field).sint64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte int64 `protobuf:"zigzag64,5,opt,name=gte,oneof"` +} + +func (*SInt64Rules_Gt) isSInt64Rules_GreaterThan() {} + +func (*SInt64Rules_Gte) isSInt64Rules_GreaterThan() {} + +// Fixed32Rules describes the rules applied to `fixed32` values. +type Fixed32Rules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified value. + // If the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must equal 42 + // fixed32 value = 1 [(buf.validate.field).fixed32.const = 42]; + // } + // + // ``` + Const *uint32 `protobuf:"fixed32,1,opt,name=const" json:"const,omitempty"` + // Types that are valid to be assigned to LessThan: + // + // *Fixed32Rules_Lt + // *Fixed32Rules_Lte + LessThan isFixed32Rules_LessThan `protobuf_oneof:"less_than"` + // Types that are valid to be assigned to GreaterThan: + // + // *Fixed32Rules_Gt + // *Fixed32Rules_Gte + GreaterThan isFixed32Rules_GreaterThan `protobuf_oneof:"greater_than"` + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message + // is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be in list [1, 2, 3] + // fixed32 value = 1 [(buf.validate.field).fixed32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []uint32 `protobuf:"fixed32,6,rep,name=in" json:"in,omitempty"` + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must not be in list [1, 2, 3] + // fixed32 value = 1 [(buf.validate.field).fixed32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []uint32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyFixed32 { + // fixed32 value = 1 [ + // (buf.validate.field).fixed32.example = 1, + // (buf.validate.field).fixed32.example = 2 + // ]; + // } + // + // ``` + Example []uint32 `protobuf:"fixed32,8,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Fixed32Rules) Reset() { + *x = Fixed32Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Fixed32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Fixed32Rules) ProtoMessage() {} + +func (x *Fixed32Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Fixed32Rules) GetConst() uint32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *Fixed32Rules) GetLessThan() isFixed32Rules_LessThan { + if x != nil { + return x.LessThan + } + return nil +} + +func (x *Fixed32Rules) GetLt() uint32 { + if x != nil { + if x, ok := x.LessThan.(*Fixed32Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *Fixed32Rules) GetLte() uint32 { + if x != nil { + if x, ok := x.LessThan.(*Fixed32Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *Fixed32Rules) GetGreaterThan() isFixed32Rules_GreaterThan { + if x != nil { + return x.GreaterThan + } + return nil +} + +func (x *Fixed32Rules) GetGt() uint32 { + if x != nil { + if x, ok := x.GreaterThan.(*Fixed32Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *Fixed32Rules) GetGte() uint32 { + if x != nil { + if x, ok := x.GreaterThan.(*Fixed32Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *Fixed32Rules) GetIn() []uint32 { + if x != nil { + return x.In + } + return nil +} + +func (x *Fixed32Rules) GetNotIn() []uint32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *Fixed32Rules) GetExample() []uint32 { + if x != nil { + return x.Example + } + return nil +} + +func (x *Fixed32Rules) SetConst(v uint32) { + x.Const = &v +} + +func (x *Fixed32Rules) SetLt(v uint32) { + x.LessThan = &Fixed32Rules_Lt{v} +} + +func (x *Fixed32Rules) SetLte(v uint32) { + x.LessThan = &Fixed32Rules_Lte{v} +} + +func (x *Fixed32Rules) SetGt(v uint32) { + x.GreaterThan = &Fixed32Rules_Gt{v} +} + +func (x *Fixed32Rules) SetGte(v uint32) { + x.GreaterThan = &Fixed32Rules_Gte{v} +} + +func (x *Fixed32Rules) SetIn(v []uint32) { + x.In = v +} + +func (x *Fixed32Rules) SetNotIn(v []uint32) { + x.NotIn = v +} + +func (x *Fixed32Rules) SetExample(v []uint32) { + x.Example = v +} + +func (x *Fixed32Rules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *Fixed32Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.LessThan != nil +} + +func (x *Fixed32Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*Fixed32Rules_Lt) + return ok +} + +func (x *Fixed32Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*Fixed32Rules_Lte) + return ok +} + +func (x *Fixed32Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.GreaterThan != nil +} + +func (x *Fixed32Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*Fixed32Rules_Gt) + return ok +} + +func (x *Fixed32Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*Fixed32Rules_Gte) + return ok +} + +func (x *Fixed32Rules) ClearConst() { + x.Const = nil +} + +func (x *Fixed32Rules) ClearLessThan() { + x.LessThan = nil +} + +func (x *Fixed32Rules) ClearLt() { + if _, ok := x.LessThan.(*Fixed32Rules_Lt); ok { + x.LessThan = nil + } +} + +func (x *Fixed32Rules) ClearLte() { + if _, ok := x.LessThan.(*Fixed32Rules_Lte); ok { + x.LessThan = nil + } +} + +func (x *Fixed32Rules) ClearGreaterThan() { + x.GreaterThan = nil +} + +func (x *Fixed32Rules) ClearGt() { + if _, ok := x.GreaterThan.(*Fixed32Rules_Gt); ok { + x.GreaterThan = nil + } +} + +func (x *Fixed32Rules) ClearGte() { + if _, ok := x.GreaterThan.(*Fixed32Rules_Gte); ok { + x.GreaterThan = nil + } +} + +const Fixed32Rules_LessThan_not_set_case case_Fixed32Rules_LessThan = 0 +const Fixed32Rules_Lt_case case_Fixed32Rules_LessThan = 2 +const Fixed32Rules_Lte_case case_Fixed32Rules_LessThan = 3 + +func (x *Fixed32Rules) WhichLessThan() case_Fixed32Rules_LessThan { + if x == nil { + return Fixed32Rules_LessThan_not_set_case + } + switch x.LessThan.(type) { + case *Fixed32Rules_Lt: + return Fixed32Rules_Lt_case + case *Fixed32Rules_Lte: + return Fixed32Rules_Lte_case + default: + return Fixed32Rules_LessThan_not_set_case + } +} + +const Fixed32Rules_GreaterThan_not_set_case case_Fixed32Rules_GreaterThan = 0 +const Fixed32Rules_Gt_case case_Fixed32Rules_GreaterThan = 4 +const Fixed32Rules_Gte_case case_Fixed32Rules_GreaterThan = 5 + +func (x *Fixed32Rules) WhichGreaterThan() case_Fixed32Rules_GreaterThan { + if x == nil { + return Fixed32Rules_GreaterThan_not_set_case + } + switch x.GreaterThan.(type) { + case *Fixed32Rules_Gt: + return Fixed32Rules_Gt_case + case *Fixed32Rules_Gte: + return Fixed32Rules_Gte_case + default: + return Fixed32Rules_GreaterThan_not_set_case + } +} + +type Fixed32Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. + // If the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must equal 42 + // fixed32 value = 1 [(buf.validate.field).fixed32.const = 42]; + // } + // + // ``` + Const *uint32 + // Fields of oneof LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be less than 10 + // fixed32 value = 1 [(buf.validate.field).fixed32.lt = 10]; + // } + // + // ``` + Lt *uint32 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be less than or equal to 10 + // fixed32 value = 1 [(buf.validate.field).fixed32.lte = 10]; + // } + // + // ``` + Lte *uint32 + // -- end of LessThan + // Fields of oneof GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be greater than 5 [fixed32.gt] + // fixed32 value = 1 [(buf.validate.field).fixed32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [fixed32.gt_lt] + // fixed32 other_value = 2 [(buf.validate.field).fixed32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [fixed32.gt_lt_exclusive] + // fixed32 another_value = 3 [(buf.validate.field).fixed32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *uint32 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be greater than or equal to 5 [fixed32.gte] + // fixed32 value = 1 [(buf.validate.field).fixed32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [fixed32.gte_lt] + // fixed32 other_value = 2 [(buf.validate.field).fixed32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [fixed32.gte_lt_exclusive] + // fixed32 another_value = 3 [(buf.validate.field).fixed32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *uint32 + // -- end of GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message + // is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be in list [1, 2, 3] + // fixed32 value = 1 [(buf.validate.field).fixed32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []uint32 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must not be in list [1, 2, 3] + // fixed32 value = 1 [(buf.validate.field).fixed32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []uint32 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyFixed32 { + // fixed32 value = 1 [ + // (buf.validate.field).fixed32.example = 1, + // (buf.validate.field).fixed32.example = 2 + // ]; + // } + // + // ``` + Example []uint32 +} + +func (b0 Fixed32Rules_builder) Build() *Fixed32Rules { + m0 := &Fixed32Rules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + if b.Lt != nil { + x.LessThan = &Fixed32Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.LessThan = &Fixed32Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.GreaterThan = &Fixed32Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.GreaterThan = &Fixed32Rules_Gte{*b.Gte} + } + x.In = b.In + x.NotIn = b.NotIn + x.Example = b.Example + return m0 +} + +type case_Fixed32Rules_LessThan protoreflect.FieldNumber + +func (x case_Fixed32Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[14].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_Fixed32Rules_GreaterThan protoreflect.FieldNumber + +func (x case_Fixed32Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[14].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isFixed32Rules_LessThan interface { + isFixed32Rules_LessThan() +} + +type Fixed32Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be less than 10 + // fixed32 value = 1 [(buf.validate.field).fixed32.lt = 10]; + // } + // + // ``` + Lt uint32 `protobuf:"fixed32,2,opt,name=lt,oneof"` +} + +type Fixed32Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be less than or equal to 10 + // fixed32 value = 1 [(buf.validate.field).fixed32.lte = 10]; + // } + // + // ``` + Lte uint32 `protobuf:"fixed32,3,opt,name=lte,oneof"` +} + +func (*Fixed32Rules_Lt) isFixed32Rules_LessThan() {} + +func (*Fixed32Rules_Lte) isFixed32Rules_LessThan() {} + +type isFixed32Rules_GreaterThan interface { + isFixed32Rules_GreaterThan() +} + +type Fixed32Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be greater than 5 [fixed32.gt] + // fixed32 value = 1 [(buf.validate.field).fixed32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [fixed32.gt_lt] + // fixed32 other_value = 2 [(buf.validate.field).fixed32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [fixed32.gt_lt_exclusive] + // fixed32 another_value = 3 [(buf.validate.field).fixed32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt uint32 `protobuf:"fixed32,4,opt,name=gt,oneof"` +} + +type Fixed32Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be greater than or equal to 5 [fixed32.gte] + // fixed32 value = 1 [(buf.validate.field).fixed32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [fixed32.gte_lt] + // fixed32 other_value = 2 [(buf.validate.field).fixed32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [fixed32.gte_lt_exclusive] + // fixed32 another_value = 3 [(buf.validate.field).fixed32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte uint32 `protobuf:"fixed32,5,opt,name=gte,oneof"` +} + +func (*Fixed32Rules_Gt) isFixed32Rules_GreaterThan() {} + +func (*Fixed32Rules_Gte) isFixed32Rules_GreaterThan() {} + +// Fixed64Rules describes the rules applied to `fixed64` values. +type Fixed64Rules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must equal 42 + // fixed64 value = 1 [(buf.validate.field).fixed64.const = 42]; + // } + // + // ``` + Const *uint64 `protobuf:"fixed64,1,opt,name=const" json:"const,omitempty"` + // Types that are valid to be assigned to LessThan: + // + // *Fixed64Rules_Lt + // *Fixed64Rules_Lte + LessThan isFixed64Rules_LessThan `protobuf_oneof:"less_than"` + // Types that are valid to be assigned to GreaterThan: + // + // *Fixed64Rules_Gt + // *Fixed64Rules_Gte + GreaterThan isFixed64Rules_GreaterThan `protobuf_oneof:"greater_than"` + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be in list [1, 2, 3] + // fixed64 value = 1 [(buf.validate.field).fixed64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []uint64 `protobuf:"fixed64,6,rep,name=in" json:"in,omitempty"` + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must not be in list [1, 2, 3] + // fixed64 value = 1 [(buf.validate.field).fixed64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []uint64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyFixed64 { + // fixed64 value = 1 [ + // (buf.validate.field).fixed64.example = 1, + // (buf.validate.field).fixed64.example = 2 + // ]; + // } + // + // ``` + Example []uint64 `protobuf:"fixed64,8,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Fixed64Rules) Reset() { + *x = Fixed64Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Fixed64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Fixed64Rules) ProtoMessage() {} + +func (x *Fixed64Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Fixed64Rules) GetConst() uint64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *Fixed64Rules) GetLessThan() isFixed64Rules_LessThan { + if x != nil { + return x.LessThan + } + return nil +} + +func (x *Fixed64Rules) GetLt() uint64 { + if x != nil { + if x, ok := x.LessThan.(*Fixed64Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *Fixed64Rules) GetLte() uint64 { + if x != nil { + if x, ok := x.LessThan.(*Fixed64Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *Fixed64Rules) GetGreaterThan() isFixed64Rules_GreaterThan { + if x != nil { + return x.GreaterThan + } + return nil +} + +func (x *Fixed64Rules) GetGt() uint64 { + if x != nil { + if x, ok := x.GreaterThan.(*Fixed64Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *Fixed64Rules) GetGte() uint64 { + if x != nil { + if x, ok := x.GreaterThan.(*Fixed64Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *Fixed64Rules) GetIn() []uint64 { + if x != nil { + return x.In + } + return nil +} + +func (x *Fixed64Rules) GetNotIn() []uint64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *Fixed64Rules) GetExample() []uint64 { + if x != nil { + return x.Example + } + return nil +} + +func (x *Fixed64Rules) SetConst(v uint64) { + x.Const = &v +} + +func (x *Fixed64Rules) SetLt(v uint64) { + x.LessThan = &Fixed64Rules_Lt{v} +} + +func (x *Fixed64Rules) SetLte(v uint64) { + x.LessThan = &Fixed64Rules_Lte{v} +} + +func (x *Fixed64Rules) SetGt(v uint64) { + x.GreaterThan = &Fixed64Rules_Gt{v} +} + +func (x *Fixed64Rules) SetGte(v uint64) { + x.GreaterThan = &Fixed64Rules_Gte{v} +} + +func (x *Fixed64Rules) SetIn(v []uint64) { + x.In = v +} + +func (x *Fixed64Rules) SetNotIn(v []uint64) { + x.NotIn = v +} + +func (x *Fixed64Rules) SetExample(v []uint64) { + x.Example = v +} + +func (x *Fixed64Rules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *Fixed64Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.LessThan != nil +} + +func (x *Fixed64Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*Fixed64Rules_Lt) + return ok +} + +func (x *Fixed64Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*Fixed64Rules_Lte) + return ok +} + +func (x *Fixed64Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.GreaterThan != nil +} + +func (x *Fixed64Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*Fixed64Rules_Gt) + return ok +} + +func (x *Fixed64Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*Fixed64Rules_Gte) + return ok +} + +func (x *Fixed64Rules) ClearConst() { + x.Const = nil +} + +func (x *Fixed64Rules) ClearLessThan() { + x.LessThan = nil +} + +func (x *Fixed64Rules) ClearLt() { + if _, ok := x.LessThan.(*Fixed64Rules_Lt); ok { + x.LessThan = nil + } +} + +func (x *Fixed64Rules) ClearLte() { + if _, ok := x.LessThan.(*Fixed64Rules_Lte); ok { + x.LessThan = nil + } +} + +func (x *Fixed64Rules) ClearGreaterThan() { + x.GreaterThan = nil +} + +func (x *Fixed64Rules) ClearGt() { + if _, ok := x.GreaterThan.(*Fixed64Rules_Gt); ok { + x.GreaterThan = nil + } +} + +func (x *Fixed64Rules) ClearGte() { + if _, ok := x.GreaterThan.(*Fixed64Rules_Gte); ok { + x.GreaterThan = nil + } +} + +const Fixed64Rules_LessThan_not_set_case case_Fixed64Rules_LessThan = 0 +const Fixed64Rules_Lt_case case_Fixed64Rules_LessThan = 2 +const Fixed64Rules_Lte_case case_Fixed64Rules_LessThan = 3 + +func (x *Fixed64Rules) WhichLessThan() case_Fixed64Rules_LessThan { + if x == nil { + return Fixed64Rules_LessThan_not_set_case + } + switch x.LessThan.(type) { + case *Fixed64Rules_Lt: + return Fixed64Rules_Lt_case + case *Fixed64Rules_Lte: + return Fixed64Rules_Lte_case + default: + return Fixed64Rules_LessThan_not_set_case + } +} + +const Fixed64Rules_GreaterThan_not_set_case case_Fixed64Rules_GreaterThan = 0 +const Fixed64Rules_Gt_case case_Fixed64Rules_GreaterThan = 4 +const Fixed64Rules_Gte_case case_Fixed64Rules_GreaterThan = 5 + +func (x *Fixed64Rules) WhichGreaterThan() case_Fixed64Rules_GreaterThan { + if x == nil { + return Fixed64Rules_GreaterThan_not_set_case + } + switch x.GreaterThan.(type) { + case *Fixed64Rules_Gt: + return Fixed64Rules_Gt_case + case *Fixed64Rules_Gte: + return Fixed64Rules_Gte_case + default: + return Fixed64Rules_GreaterThan_not_set_case + } +} + +type Fixed64Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must equal 42 + // fixed64 value = 1 [(buf.validate.field).fixed64.const = 42]; + // } + // + // ``` + Const *uint64 + // Fields of oneof LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be less than 10 + // fixed64 value = 1 [(buf.validate.field).fixed64.lt = 10]; + // } + // + // ``` + Lt *uint64 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be less than or equal to 10 + // fixed64 value = 1 [(buf.validate.field).fixed64.lte = 10]; + // } + // + // ``` + Lte *uint64 + // -- end of LessThan + // Fields of oneof GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be greater than 5 [fixed64.gt] + // fixed64 value = 1 [(buf.validate.field).fixed64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [fixed64.gt_lt] + // fixed64 other_value = 2 [(buf.validate.field).fixed64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [fixed64.gt_lt_exclusive] + // fixed64 another_value = 3 [(buf.validate.field).fixed64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *uint64 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be greater than or equal to 5 [fixed64.gte] + // fixed64 value = 1 [(buf.validate.field).fixed64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [fixed64.gte_lt] + // fixed64 other_value = 2 [(buf.validate.field).fixed64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [fixed64.gte_lt_exclusive] + // fixed64 another_value = 3 [(buf.validate.field).fixed64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *uint64 + // -- end of GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be in list [1, 2, 3] + // fixed64 value = 1 [(buf.validate.field).fixed64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []uint64 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must not be in list [1, 2, 3] + // fixed64 value = 1 [(buf.validate.field).fixed64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []uint64 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyFixed64 { + // fixed64 value = 1 [ + // (buf.validate.field).fixed64.example = 1, + // (buf.validate.field).fixed64.example = 2 + // ]; + // } + // + // ``` + Example []uint64 +} + +func (b0 Fixed64Rules_builder) Build() *Fixed64Rules { + m0 := &Fixed64Rules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + if b.Lt != nil { + x.LessThan = &Fixed64Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.LessThan = &Fixed64Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.GreaterThan = &Fixed64Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.GreaterThan = &Fixed64Rules_Gte{*b.Gte} + } + x.In = b.In + x.NotIn = b.NotIn + x.Example = b.Example + return m0 +} + +type case_Fixed64Rules_LessThan protoreflect.FieldNumber + +func (x case_Fixed64Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[15].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_Fixed64Rules_GreaterThan protoreflect.FieldNumber + +func (x case_Fixed64Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[15].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isFixed64Rules_LessThan interface { + isFixed64Rules_LessThan() +} + +type Fixed64Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be less than 10 + // fixed64 value = 1 [(buf.validate.field).fixed64.lt = 10]; + // } + // + // ``` + Lt uint64 `protobuf:"fixed64,2,opt,name=lt,oneof"` +} + +type Fixed64Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be less than or equal to 10 + // fixed64 value = 1 [(buf.validate.field).fixed64.lte = 10]; + // } + // + // ``` + Lte uint64 `protobuf:"fixed64,3,opt,name=lte,oneof"` +} + +func (*Fixed64Rules_Lt) isFixed64Rules_LessThan() {} + +func (*Fixed64Rules_Lte) isFixed64Rules_LessThan() {} + +type isFixed64Rules_GreaterThan interface { + isFixed64Rules_GreaterThan() +} + +type Fixed64Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be greater than 5 [fixed64.gt] + // fixed64 value = 1 [(buf.validate.field).fixed64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [fixed64.gt_lt] + // fixed64 other_value = 2 [(buf.validate.field).fixed64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [fixed64.gt_lt_exclusive] + // fixed64 another_value = 3 [(buf.validate.field).fixed64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt uint64 `protobuf:"fixed64,4,opt,name=gt,oneof"` +} + +type Fixed64Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be greater than or equal to 5 [fixed64.gte] + // fixed64 value = 1 [(buf.validate.field).fixed64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [fixed64.gte_lt] + // fixed64 other_value = 2 [(buf.validate.field).fixed64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [fixed64.gte_lt_exclusive] + // fixed64 another_value = 3 [(buf.validate.field).fixed64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte uint64 `protobuf:"fixed64,5,opt,name=gte,oneof"` +} + +func (*Fixed64Rules_Gt) isFixed64Rules_GreaterThan() {} + +func (*Fixed64Rules_Gte) isFixed64Rules_GreaterThan() {} + +// SFixed32Rules describes the rules applied to `fixed32` values. +type SFixed32Rules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must equal 42 + // sfixed32 value = 1 [(buf.validate.field).sfixed32.const = 42]; + // } + // + // ``` + Const *int32 `protobuf:"fixed32,1,opt,name=const" json:"const,omitempty"` + // Types that are valid to be assigned to LessThan: + // + // *SFixed32Rules_Lt + // *SFixed32Rules_Lte + LessThan isSFixed32Rules_LessThan `protobuf_oneof:"less_than"` + // Types that are valid to be assigned to GreaterThan: + // + // *SFixed32Rules_Gt + // *SFixed32Rules_Gte + GreaterThan isSFixed32Rules_GreaterThan `protobuf_oneof:"greater_than"` + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be in list [1, 2, 3] + // sfixed32 value = 1 [(buf.validate.field).sfixed32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int32 `protobuf:"fixed32,6,rep,name=in" json:"in,omitempty"` + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must not be in list [1, 2, 3] + // sfixed32 value = 1 [(buf.validate.field).sfixed32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MySFixed32 { + // sfixed32 value = 1 [ + // (buf.validate.field).sfixed32.example = 1, + // (buf.validate.field).sfixed32.example = 2 + // ]; + // } + // + // ``` + Example []int32 `protobuf:"fixed32,8,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SFixed32Rules) Reset() { + *x = SFixed32Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SFixed32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SFixed32Rules) ProtoMessage() {} + +func (x *SFixed32Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SFixed32Rules) GetConst() int32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *SFixed32Rules) GetLessThan() isSFixed32Rules_LessThan { + if x != nil { + return x.LessThan + } + return nil +} + +func (x *SFixed32Rules) GetLt() int32 { + if x != nil { + if x, ok := x.LessThan.(*SFixed32Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *SFixed32Rules) GetLte() int32 { + if x != nil { + if x, ok := x.LessThan.(*SFixed32Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *SFixed32Rules) GetGreaterThan() isSFixed32Rules_GreaterThan { + if x != nil { + return x.GreaterThan + } + return nil +} + +func (x *SFixed32Rules) GetGt() int32 { + if x != nil { + if x, ok := x.GreaterThan.(*SFixed32Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *SFixed32Rules) GetGte() int32 { + if x != nil { + if x, ok := x.GreaterThan.(*SFixed32Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *SFixed32Rules) GetIn() []int32 { + if x != nil { + return x.In + } + return nil +} + +func (x *SFixed32Rules) GetNotIn() []int32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *SFixed32Rules) GetExample() []int32 { + if x != nil { + return x.Example + } + return nil +} + +func (x *SFixed32Rules) SetConst(v int32) { + x.Const = &v +} + +func (x *SFixed32Rules) SetLt(v int32) { + x.LessThan = &SFixed32Rules_Lt{v} +} + +func (x *SFixed32Rules) SetLte(v int32) { + x.LessThan = &SFixed32Rules_Lte{v} +} + +func (x *SFixed32Rules) SetGt(v int32) { + x.GreaterThan = &SFixed32Rules_Gt{v} +} + +func (x *SFixed32Rules) SetGte(v int32) { + x.GreaterThan = &SFixed32Rules_Gte{v} +} + +func (x *SFixed32Rules) SetIn(v []int32) { + x.In = v +} + +func (x *SFixed32Rules) SetNotIn(v []int32) { + x.NotIn = v +} + +func (x *SFixed32Rules) SetExample(v []int32) { + x.Example = v +} + +func (x *SFixed32Rules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *SFixed32Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.LessThan != nil +} + +func (x *SFixed32Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*SFixed32Rules_Lt) + return ok +} + +func (x *SFixed32Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*SFixed32Rules_Lte) + return ok +} + +func (x *SFixed32Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.GreaterThan != nil +} + +func (x *SFixed32Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*SFixed32Rules_Gt) + return ok +} + +func (x *SFixed32Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*SFixed32Rules_Gte) + return ok +} + +func (x *SFixed32Rules) ClearConst() { + x.Const = nil +} + +func (x *SFixed32Rules) ClearLessThan() { + x.LessThan = nil +} + +func (x *SFixed32Rules) ClearLt() { + if _, ok := x.LessThan.(*SFixed32Rules_Lt); ok { + x.LessThan = nil + } +} + +func (x *SFixed32Rules) ClearLte() { + if _, ok := x.LessThan.(*SFixed32Rules_Lte); ok { + x.LessThan = nil + } +} + +func (x *SFixed32Rules) ClearGreaterThan() { + x.GreaterThan = nil +} + +func (x *SFixed32Rules) ClearGt() { + if _, ok := x.GreaterThan.(*SFixed32Rules_Gt); ok { + x.GreaterThan = nil + } +} + +func (x *SFixed32Rules) ClearGte() { + if _, ok := x.GreaterThan.(*SFixed32Rules_Gte); ok { + x.GreaterThan = nil + } +} + +const SFixed32Rules_LessThan_not_set_case case_SFixed32Rules_LessThan = 0 +const SFixed32Rules_Lt_case case_SFixed32Rules_LessThan = 2 +const SFixed32Rules_Lte_case case_SFixed32Rules_LessThan = 3 + +func (x *SFixed32Rules) WhichLessThan() case_SFixed32Rules_LessThan { + if x == nil { + return SFixed32Rules_LessThan_not_set_case + } + switch x.LessThan.(type) { + case *SFixed32Rules_Lt: + return SFixed32Rules_Lt_case + case *SFixed32Rules_Lte: + return SFixed32Rules_Lte_case + default: + return SFixed32Rules_LessThan_not_set_case + } +} + +const SFixed32Rules_GreaterThan_not_set_case case_SFixed32Rules_GreaterThan = 0 +const SFixed32Rules_Gt_case case_SFixed32Rules_GreaterThan = 4 +const SFixed32Rules_Gte_case case_SFixed32Rules_GreaterThan = 5 + +func (x *SFixed32Rules) WhichGreaterThan() case_SFixed32Rules_GreaterThan { + if x == nil { + return SFixed32Rules_GreaterThan_not_set_case + } + switch x.GreaterThan.(type) { + case *SFixed32Rules_Gt: + return SFixed32Rules_Gt_case + case *SFixed32Rules_Gte: + return SFixed32Rules_Gte_case + default: + return SFixed32Rules_GreaterThan_not_set_case + } +} + +type SFixed32Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must equal 42 + // sfixed32 value = 1 [(buf.validate.field).sfixed32.const = 42]; + // } + // + // ``` + Const *int32 + // Fields of oneof LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be less than 10 + // sfixed32 value = 1 [(buf.validate.field).sfixed32.lt = 10]; + // } + // + // ``` + Lt *int32 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be less than or equal to 10 + // sfixed32 value = 1 [(buf.validate.field).sfixed32.lte = 10]; + // } + // + // ``` + Lte *int32 + // -- end of LessThan + // Fields of oneof GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be greater than 5 [sfixed32.gt] + // sfixed32 value = 1 [(buf.validate.field).sfixed32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sfixed32.gt_lt] + // sfixed32 other_value = 2 [(buf.validate.field).sfixed32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sfixed32.gt_lt_exclusive] + // sfixed32 another_value = 3 [(buf.validate.field).sfixed32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *int32 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be greater than or equal to 5 [sfixed32.gte] + // sfixed32 value = 1 [(buf.validate.field).sfixed32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sfixed32.gte_lt] + // sfixed32 other_value = 2 [(buf.validate.field).sfixed32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sfixed32.gte_lt_exclusive] + // sfixed32 another_value = 3 [(buf.validate.field).sfixed32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *int32 + // -- end of GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be in list [1, 2, 3] + // sfixed32 value = 1 [(buf.validate.field).sfixed32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int32 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must not be in list [1, 2, 3] + // sfixed32 value = 1 [(buf.validate.field).sfixed32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int32 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MySFixed32 { + // sfixed32 value = 1 [ + // (buf.validate.field).sfixed32.example = 1, + // (buf.validate.field).sfixed32.example = 2 + // ]; + // } + // + // ``` + Example []int32 +} + +func (b0 SFixed32Rules_builder) Build() *SFixed32Rules { + m0 := &SFixed32Rules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + if b.Lt != nil { + x.LessThan = &SFixed32Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.LessThan = &SFixed32Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.GreaterThan = &SFixed32Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.GreaterThan = &SFixed32Rules_Gte{*b.Gte} + } + x.In = b.In + x.NotIn = b.NotIn + x.Example = b.Example + return m0 +} + +type case_SFixed32Rules_LessThan protoreflect.FieldNumber + +func (x case_SFixed32Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[16].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_SFixed32Rules_GreaterThan protoreflect.FieldNumber + +func (x case_SFixed32Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[16].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isSFixed32Rules_LessThan interface { + isSFixed32Rules_LessThan() +} + +type SFixed32Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be less than 10 + // sfixed32 value = 1 [(buf.validate.field).sfixed32.lt = 10]; + // } + // + // ``` + Lt int32 `protobuf:"fixed32,2,opt,name=lt,oneof"` +} + +type SFixed32Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be less than or equal to 10 + // sfixed32 value = 1 [(buf.validate.field).sfixed32.lte = 10]; + // } + // + // ``` + Lte int32 `protobuf:"fixed32,3,opt,name=lte,oneof"` +} + +func (*SFixed32Rules_Lt) isSFixed32Rules_LessThan() {} + +func (*SFixed32Rules_Lte) isSFixed32Rules_LessThan() {} + +type isSFixed32Rules_GreaterThan interface { + isSFixed32Rules_GreaterThan() +} + +type SFixed32Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be greater than 5 [sfixed32.gt] + // sfixed32 value = 1 [(buf.validate.field).sfixed32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sfixed32.gt_lt] + // sfixed32 other_value = 2 [(buf.validate.field).sfixed32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sfixed32.gt_lt_exclusive] + // sfixed32 another_value = 3 [(buf.validate.field).sfixed32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt int32 `protobuf:"fixed32,4,opt,name=gt,oneof"` +} + +type SFixed32Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be greater than or equal to 5 [sfixed32.gte] + // sfixed32 value = 1 [(buf.validate.field).sfixed32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sfixed32.gte_lt] + // sfixed32 other_value = 2 [(buf.validate.field).sfixed32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sfixed32.gte_lt_exclusive] + // sfixed32 another_value = 3 [(buf.validate.field).sfixed32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte int32 `protobuf:"fixed32,5,opt,name=gte,oneof"` +} + +func (*SFixed32Rules_Gt) isSFixed32Rules_GreaterThan() {} + +func (*SFixed32Rules_Gte) isSFixed32Rules_GreaterThan() {} + +// SFixed64Rules describes the rules applied to `fixed64` values. +type SFixed64Rules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must equal 42 + // sfixed64 value = 1 [(buf.validate.field).sfixed64.const = 42]; + // } + // + // ``` + Const *int64 `protobuf:"fixed64,1,opt,name=const" json:"const,omitempty"` + // Types that are valid to be assigned to LessThan: + // + // *SFixed64Rules_Lt + // *SFixed64Rules_Lte + LessThan isSFixed64Rules_LessThan `protobuf_oneof:"less_than"` + // Types that are valid to be assigned to GreaterThan: + // + // *SFixed64Rules_Gt + // *SFixed64Rules_Gte + GreaterThan isSFixed64Rules_GreaterThan `protobuf_oneof:"greater_than"` + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be in list [1, 2, 3] + // sfixed64 value = 1 [(buf.validate.field).sfixed64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int64 `protobuf:"fixed64,6,rep,name=in" json:"in,omitempty"` + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must not be in list [1, 2, 3] + // sfixed64 value = 1 [(buf.validate.field).sfixed64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MySFixed64 { + // sfixed64 value = 1 [ + // (buf.validate.field).sfixed64.example = 1, + // (buf.validate.field).sfixed64.example = 2 + // ]; + // } + // + // ``` + Example []int64 `protobuf:"fixed64,8,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SFixed64Rules) Reset() { + *x = SFixed64Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SFixed64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SFixed64Rules) ProtoMessage() {} + +func (x *SFixed64Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SFixed64Rules) GetConst() int64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *SFixed64Rules) GetLessThan() isSFixed64Rules_LessThan { + if x != nil { + return x.LessThan + } + return nil +} + +func (x *SFixed64Rules) GetLt() int64 { + if x != nil { + if x, ok := x.LessThan.(*SFixed64Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *SFixed64Rules) GetLte() int64 { + if x != nil { + if x, ok := x.LessThan.(*SFixed64Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *SFixed64Rules) GetGreaterThan() isSFixed64Rules_GreaterThan { + if x != nil { + return x.GreaterThan + } + return nil +} + +func (x *SFixed64Rules) GetGt() int64 { + if x != nil { + if x, ok := x.GreaterThan.(*SFixed64Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *SFixed64Rules) GetGte() int64 { + if x != nil { + if x, ok := x.GreaterThan.(*SFixed64Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *SFixed64Rules) GetIn() []int64 { + if x != nil { + return x.In + } + return nil +} + +func (x *SFixed64Rules) GetNotIn() []int64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *SFixed64Rules) GetExample() []int64 { + if x != nil { + return x.Example + } + return nil +} + +func (x *SFixed64Rules) SetConst(v int64) { + x.Const = &v +} + +func (x *SFixed64Rules) SetLt(v int64) { + x.LessThan = &SFixed64Rules_Lt{v} +} + +func (x *SFixed64Rules) SetLte(v int64) { + x.LessThan = &SFixed64Rules_Lte{v} +} + +func (x *SFixed64Rules) SetGt(v int64) { + x.GreaterThan = &SFixed64Rules_Gt{v} +} + +func (x *SFixed64Rules) SetGte(v int64) { + x.GreaterThan = &SFixed64Rules_Gte{v} +} + +func (x *SFixed64Rules) SetIn(v []int64) { + x.In = v +} + +func (x *SFixed64Rules) SetNotIn(v []int64) { + x.NotIn = v +} + +func (x *SFixed64Rules) SetExample(v []int64) { + x.Example = v +} + +func (x *SFixed64Rules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *SFixed64Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.LessThan != nil +} + +func (x *SFixed64Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*SFixed64Rules_Lt) + return ok +} + +func (x *SFixed64Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*SFixed64Rules_Lte) + return ok +} + +func (x *SFixed64Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.GreaterThan != nil +} + +func (x *SFixed64Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*SFixed64Rules_Gt) + return ok +} + +func (x *SFixed64Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*SFixed64Rules_Gte) + return ok +} + +func (x *SFixed64Rules) ClearConst() { + x.Const = nil +} + +func (x *SFixed64Rules) ClearLessThan() { + x.LessThan = nil +} + +func (x *SFixed64Rules) ClearLt() { + if _, ok := x.LessThan.(*SFixed64Rules_Lt); ok { + x.LessThan = nil + } +} + +func (x *SFixed64Rules) ClearLte() { + if _, ok := x.LessThan.(*SFixed64Rules_Lte); ok { + x.LessThan = nil + } +} + +func (x *SFixed64Rules) ClearGreaterThan() { + x.GreaterThan = nil +} + +func (x *SFixed64Rules) ClearGt() { + if _, ok := x.GreaterThan.(*SFixed64Rules_Gt); ok { + x.GreaterThan = nil + } +} + +func (x *SFixed64Rules) ClearGte() { + if _, ok := x.GreaterThan.(*SFixed64Rules_Gte); ok { + x.GreaterThan = nil + } +} + +const SFixed64Rules_LessThan_not_set_case case_SFixed64Rules_LessThan = 0 +const SFixed64Rules_Lt_case case_SFixed64Rules_LessThan = 2 +const SFixed64Rules_Lte_case case_SFixed64Rules_LessThan = 3 + +func (x *SFixed64Rules) WhichLessThan() case_SFixed64Rules_LessThan { + if x == nil { + return SFixed64Rules_LessThan_not_set_case + } + switch x.LessThan.(type) { + case *SFixed64Rules_Lt: + return SFixed64Rules_Lt_case + case *SFixed64Rules_Lte: + return SFixed64Rules_Lte_case + default: + return SFixed64Rules_LessThan_not_set_case + } +} + +const SFixed64Rules_GreaterThan_not_set_case case_SFixed64Rules_GreaterThan = 0 +const SFixed64Rules_Gt_case case_SFixed64Rules_GreaterThan = 4 +const SFixed64Rules_Gte_case case_SFixed64Rules_GreaterThan = 5 + +func (x *SFixed64Rules) WhichGreaterThan() case_SFixed64Rules_GreaterThan { + if x == nil { + return SFixed64Rules_GreaterThan_not_set_case + } + switch x.GreaterThan.(type) { + case *SFixed64Rules_Gt: + return SFixed64Rules_Gt_case + case *SFixed64Rules_Gte: + return SFixed64Rules_Gte_case + default: + return SFixed64Rules_GreaterThan_not_set_case + } +} + +type SFixed64Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must equal 42 + // sfixed64 value = 1 [(buf.validate.field).sfixed64.const = 42]; + // } + // + // ``` + Const *int64 + // Fields of oneof LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be less than 10 + // sfixed64 value = 1 [(buf.validate.field).sfixed64.lt = 10]; + // } + // + // ``` + Lt *int64 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be less than or equal to 10 + // sfixed64 value = 1 [(buf.validate.field).sfixed64.lte = 10]; + // } + // + // ``` + Lte *int64 + // -- end of LessThan + // Fields of oneof GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be greater than 5 [sfixed64.gt] + // sfixed64 value = 1 [(buf.validate.field).sfixed64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sfixed64.gt_lt] + // sfixed64 other_value = 2 [(buf.validate.field).sfixed64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sfixed64.gt_lt_exclusive] + // sfixed64 another_value = 3 [(buf.validate.field).sfixed64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *int64 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be greater than or equal to 5 [sfixed64.gte] + // sfixed64 value = 1 [(buf.validate.field).sfixed64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sfixed64.gte_lt] + // sfixed64 other_value = 2 [(buf.validate.field).sfixed64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sfixed64.gte_lt_exclusive] + // sfixed64 another_value = 3 [(buf.validate.field).sfixed64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *int64 + // -- end of GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be in list [1, 2, 3] + // sfixed64 value = 1 [(buf.validate.field).sfixed64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int64 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must not be in list [1, 2, 3] + // sfixed64 value = 1 [(buf.validate.field).sfixed64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int64 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MySFixed64 { + // sfixed64 value = 1 [ + // (buf.validate.field).sfixed64.example = 1, + // (buf.validate.field).sfixed64.example = 2 + // ]; + // } + // + // ``` + Example []int64 +} + +func (b0 SFixed64Rules_builder) Build() *SFixed64Rules { + m0 := &SFixed64Rules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + if b.Lt != nil { + x.LessThan = &SFixed64Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.LessThan = &SFixed64Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.GreaterThan = &SFixed64Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.GreaterThan = &SFixed64Rules_Gte{*b.Gte} + } + x.In = b.In + x.NotIn = b.NotIn + x.Example = b.Example + return m0 +} + +type case_SFixed64Rules_LessThan protoreflect.FieldNumber + +func (x case_SFixed64Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[17].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_SFixed64Rules_GreaterThan protoreflect.FieldNumber + +func (x case_SFixed64Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[17].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isSFixed64Rules_LessThan interface { + isSFixed64Rules_LessThan() +} + +type SFixed64Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be less than 10 + // sfixed64 value = 1 [(buf.validate.field).sfixed64.lt = 10]; + // } + // + // ``` + Lt int64 `protobuf:"fixed64,2,opt,name=lt,oneof"` +} + +type SFixed64Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be less than or equal to 10 + // sfixed64 value = 1 [(buf.validate.field).sfixed64.lte = 10]; + // } + // + // ``` + Lte int64 `protobuf:"fixed64,3,opt,name=lte,oneof"` +} + +func (*SFixed64Rules_Lt) isSFixed64Rules_LessThan() {} + +func (*SFixed64Rules_Lte) isSFixed64Rules_LessThan() {} + +type isSFixed64Rules_GreaterThan interface { + isSFixed64Rules_GreaterThan() +} + +type SFixed64Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be greater than 5 [sfixed64.gt] + // sfixed64 value = 1 [(buf.validate.field).sfixed64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sfixed64.gt_lt] + // sfixed64 other_value = 2 [(buf.validate.field).sfixed64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sfixed64.gt_lt_exclusive] + // sfixed64 another_value = 3 [(buf.validate.field).sfixed64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt int64 `protobuf:"fixed64,4,opt,name=gt,oneof"` +} + +type SFixed64Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be greater than or equal to 5 [sfixed64.gte] + // sfixed64 value = 1 [(buf.validate.field).sfixed64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sfixed64.gte_lt] + // sfixed64 other_value = 2 [(buf.validate.field).sfixed64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sfixed64.gte_lt_exclusive] + // sfixed64 another_value = 3 [(buf.validate.field).sfixed64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte int64 `protobuf:"fixed64,5,opt,name=gte,oneof"` +} + +func (*SFixed64Rules_Gt) isSFixed64Rules_GreaterThan() {} + +func (*SFixed64Rules_Gte) isSFixed64Rules_GreaterThan() {} + +// BoolRules describes the rules applied to `bool` values. These rules +// may also be applied to the `google.protobuf.BoolValue` Well-Known-Type. +type BoolRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified boolean value. + // If the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyBool { + // // value must equal true + // bool value = 1 [(buf.validate.field).bool.const = true]; + // } + // + // ``` + Const *bool `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyBool { + // bool value = 1 [ + // (buf.validate.field).bool.example = 1, + // (buf.validate.field).bool.example = 2 + // ]; + // } + // + // ``` + Example []bool `protobuf:"varint,2,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BoolRules) Reset() { + *x = BoolRules{} + mi := &file_buf_validate_validate_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BoolRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BoolRules) ProtoMessage() {} + +func (x *BoolRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BoolRules) GetConst() bool { + if x != nil && x.Const != nil { + return *x.Const + } + return false +} + +func (x *BoolRules) GetExample() []bool { + if x != nil { + return x.Example + } + return nil +} + +func (x *BoolRules) SetConst(v bool) { + x.Const = &v +} + +func (x *BoolRules) SetExample(v []bool) { + x.Example = v +} + +func (x *BoolRules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *BoolRules) ClearConst() { + x.Const = nil +} + +type BoolRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified boolean value. + // If the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyBool { + // // value must equal true + // bool value = 1 [(buf.validate.field).bool.const = true]; + // } + // + // ``` + Const *bool + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyBool { + // bool value = 1 [ + // (buf.validate.field).bool.example = 1, + // (buf.validate.field).bool.example = 2 + // ]; + // } + // + // ``` + Example []bool +} + +func (b0 BoolRules_builder) Build() *BoolRules { + m0 := &BoolRules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + x.Example = b.Example + return m0 +} + +// StringRules describes the rules applied to `string` values These +// rules may also be applied to the `google.protobuf.StringValue` Well-Known-Type. +type StringRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyString { + // // value must equal `hello` + // string value = 1 [(buf.validate.field).string.const = "hello"]; + // } + // + // ``` + Const *string `protobuf:"bytes,1,opt,name=const" json:"const,omitempty"` + // `len` dictates that the field value must have the specified + // number of characters (Unicode code points), which may differ from the number + // of bytes in the string. If the field value does not meet the specified + // length, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be 5 characters + // string value = 1 [(buf.validate.field).string.len = 5]; + // } + // + // ``` + Len *uint64 `protobuf:"varint,19,opt,name=len" json:"len,omitempty"` + // `min_len` specifies that the field value must have at least the specified + // number of characters (Unicode code points), which may differ from the number + // of bytes in the string. If the field value contains fewer characters, an error + // message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be at least 3 characters + // string value = 1 [(buf.validate.field).string.min_len = 3]; + // } + // + // ``` + MinLen *uint64 `protobuf:"varint,2,opt,name=min_len,json=minLen" json:"min_len,omitempty"` + // `max_len` specifies that the field value must have no more than the specified + // number of characters (Unicode code points), which may differ from the + // number of bytes in the string. If the field value contains more characters, + // an error message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be at most 10 characters + // string value = 1 [(buf.validate.field).string.max_len = 10]; + // } + // + // ``` + MaxLen *uint64 `protobuf:"varint,3,opt,name=max_len,json=maxLen" json:"max_len,omitempty"` + // `len_bytes` dictates that the field value must have the specified number of + // bytes. If the field value does not match the specified length in bytes, + // an error message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be 6 bytes + // string value = 1 [(buf.validate.field).string.len_bytes = 6]; + // } + // + // ``` + LenBytes *uint64 `protobuf:"varint,20,opt,name=len_bytes,json=lenBytes" json:"len_bytes,omitempty"` + // `min_bytes` specifies that the field value must have at least the specified + // number of bytes. If the field value contains fewer bytes, an error message + // will be generated. + // + // ```proto + // + // message MyString { + // // value length must be at least 4 bytes + // string value = 1 [(buf.validate.field).string.min_bytes = 4]; + // } + // + // ``` + MinBytes *uint64 `protobuf:"varint,4,opt,name=min_bytes,json=minBytes" json:"min_bytes,omitempty"` + // `max_bytes` specifies that the field value must have no more than the + // specified number of bytes. If the field value contains more bytes, an + // error message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be at most 8 bytes + // string value = 1 [(buf.validate.field).string.max_bytes = 8]; + // } + // + // ``` + MaxBytes *uint64 `protobuf:"varint,5,opt,name=max_bytes,json=maxBytes" json:"max_bytes,omitempty"` + // `pattern` specifies that the field value must match the specified + // regular expression (RE2 syntax), with the expression provided without any + // delimiters. If the field value doesn't match the regular expression, an + // error message will be generated. + // + // ```proto + // + // message MyString { + // // value does not match regex pattern `^[a-zA-Z]//$` + // string value = 1 [(buf.validate.field).string.pattern = "^[a-zA-Z]//$"]; + // } + // + // ``` + Pattern *string `protobuf:"bytes,6,opt,name=pattern" json:"pattern,omitempty"` + // `prefix` specifies that the field value must have the + // specified substring at the beginning of the string. If the field value + // doesn't start with the specified prefix, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value does not have prefix `pre` + // string value = 1 [(buf.validate.field).string.prefix = "pre"]; + // } + // + // ``` + Prefix *string `protobuf:"bytes,7,opt,name=prefix" json:"prefix,omitempty"` + // `suffix` specifies that the field value must have the + // specified substring at the end of the string. If the field value doesn't + // end with the specified suffix, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value does not have suffix `post` + // string value = 1 [(buf.validate.field).string.suffix = "post"]; + // } + // + // ``` + Suffix *string `protobuf:"bytes,8,opt,name=suffix" json:"suffix,omitempty"` + // `contains` specifies that the field value must have the + // specified substring anywhere in the string. If the field value doesn't + // contain the specified substring, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value does not contain substring `inside`. + // string value = 1 [(buf.validate.field).string.contains = "inside"]; + // } + // + // ``` + Contains *string `protobuf:"bytes,9,opt,name=contains" json:"contains,omitempty"` + // `not_contains` specifies that the field value must not have the + // specified substring anywhere in the string. If the field value contains + // the specified substring, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value contains substring `inside`. + // string value = 1 [(buf.validate.field).string.not_contains = "inside"]; + // } + // + // ``` + NotContains *string `protobuf:"bytes,23,opt,name=not_contains,json=notContains" json:"not_contains,omitempty"` + // `in` specifies that the field value must be equal to one of the specified + // values. If the field value isn't one of the specified values, an error + // message will be generated. + // + // ```proto + // + // message MyString { + // // value must be in list ["apple", "banana"] + // string value = 1 [(buf.validate.field).string.in = "apple", (buf.validate.field).string.in = "banana"]; + // } + // + // ``` + In []string `protobuf:"bytes,10,rep,name=in" json:"in,omitempty"` + // `not_in` specifies that the field value cannot be equal to any + // of the specified values. If the field value is one of the specified values, + // an error message will be generated. + // ```proto + // + // message MyString { + // // value must not be in list ["orange", "grape"] + // string value = 1 [(buf.validate.field).string.not_in = "orange", (buf.validate.field).string.not_in = "grape"]; + // } + // + // ``` + NotIn []string `protobuf:"bytes,11,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `WellKnown` rules provide advanced rules against common string + // patterns. + // + // Types that are valid to be assigned to WellKnown: + // + // *StringRules_Email + // *StringRules_Hostname + // *StringRules_Ip + // *StringRules_Ipv4 + // *StringRules_Ipv6 + // *StringRules_Uri + // *StringRules_UriRef + // *StringRules_Address + // *StringRules_Uuid + // *StringRules_Tuuid + // *StringRules_IpWithPrefixlen + // *StringRules_Ipv4WithPrefixlen + // *StringRules_Ipv6WithPrefixlen + // *StringRules_IpPrefix + // *StringRules_Ipv4Prefix + // *StringRules_Ipv6Prefix + // *StringRules_HostAndPort + // *StringRules_Ulid + // *StringRules_WellKnownRegex + WellKnown isStringRules_WellKnown `protobuf_oneof:"well_known"` + // This applies to regexes `HTTP_HEADER_NAME` and `HTTP_HEADER_VALUE` to + // enable strict header validation. By default, this is true, and HTTP header + // validations are [RFC-compliant](https://datatracker.ietf.org/doc/html/rfc7230#section-3). Setting to false will enable looser + // validations that only disallow `\r\n\0` characters, which can be used to + // bypass header matching rules. + // + // ```proto + // + // message MyString { + // // The field `value` must have be a valid HTTP headers, but not enforced with strict rules. + // string value = 1 [(buf.validate.field).string.strict = false]; + // } + // + // ``` + Strict *bool `protobuf:"varint,25,opt,name=strict" json:"strict,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyString { + // string value = 1 [ + // (buf.validate.field).string.example = "hello", + // (buf.validate.field).string.example = "world" + // ]; + // } + // + // ``` + Example []string `protobuf:"bytes,34,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StringRules) Reset() { + *x = StringRules{} + mi := &file_buf_validate_validate_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StringRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringRules) ProtoMessage() {} + +func (x *StringRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *StringRules) GetConst() string { + if x != nil && x.Const != nil { + return *x.Const + } + return "" +} + +func (x *StringRules) GetLen() uint64 { + if x != nil && x.Len != nil { + return *x.Len + } + return 0 +} + +func (x *StringRules) GetMinLen() uint64 { + if x != nil && x.MinLen != nil { + return *x.MinLen + } + return 0 +} + +func (x *StringRules) GetMaxLen() uint64 { + if x != nil && x.MaxLen != nil { + return *x.MaxLen + } + return 0 +} + +func (x *StringRules) GetLenBytes() uint64 { + if x != nil && x.LenBytes != nil { + return *x.LenBytes + } + return 0 +} + +func (x *StringRules) GetMinBytes() uint64 { + if x != nil && x.MinBytes != nil { + return *x.MinBytes + } + return 0 +} + +func (x *StringRules) GetMaxBytes() uint64 { + if x != nil && x.MaxBytes != nil { + return *x.MaxBytes + } + return 0 +} + +func (x *StringRules) GetPattern() string { + if x != nil && x.Pattern != nil { + return *x.Pattern + } + return "" +} + +func (x *StringRules) GetPrefix() string { + if x != nil && x.Prefix != nil { + return *x.Prefix + } + return "" +} + +func (x *StringRules) GetSuffix() string { + if x != nil && x.Suffix != nil { + return *x.Suffix + } + return "" +} + +func (x *StringRules) GetContains() string { + if x != nil && x.Contains != nil { + return *x.Contains + } + return "" +} + +func (x *StringRules) GetNotContains() string { + if x != nil && x.NotContains != nil { + return *x.NotContains + } + return "" +} + +func (x *StringRules) GetIn() []string { + if x != nil { + return x.In + } + return nil +} + +func (x *StringRules) GetNotIn() []string { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *StringRules) GetWellKnown() isStringRules_WellKnown { + if x != nil { + return x.WellKnown + } + return nil +} + +func (x *StringRules) GetEmail() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_Email); ok { + return x.Email + } + } + return false +} + +func (x *StringRules) GetHostname() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_Hostname); ok { + return x.Hostname + } + } + return false +} + +func (x *StringRules) GetIp() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_Ip); ok { + return x.Ip + } + } + return false +} + +func (x *StringRules) GetIpv4() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_Ipv4); ok { + return x.Ipv4 + } + } + return false +} + +func (x *StringRules) GetIpv6() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_Ipv6); ok { + return x.Ipv6 + } + } + return false +} + +func (x *StringRules) GetUri() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_Uri); ok { + return x.Uri + } + } + return false +} + +func (x *StringRules) GetUriRef() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_UriRef); ok { + return x.UriRef + } + } + return false +} + +func (x *StringRules) GetAddress() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_Address); ok { + return x.Address + } + } + return false +} + +func (x *StringRules) GetUuid() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_Uuid); ok { + return x.Uuid + } + } + return false +} + +func (x *StringRules) GetTuuid() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_Tuuid); ok { + return x.Tuuid + } + } + return false +} + +func (x *StringRules) GetIpWithPrefixlen() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_IpWithPrefixlen); ok { + return x.IpWithPrefixlen + } + } + return false +} + +func (x *StringRules) GetIpv4WithPrefixlen() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_Ipv4WithPrefixlen); ok { + return x.Ipv4WithPrefixlen + } + } + return false +} + +func (x *StringRules) GetIpv6WithPrefixlen() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_Ipv6WithPrefixlen); ok { + return x.Ipv6WithPrefixlen + } + } + return false +} + +func (x *StringRules) GetIpPrefix() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_IpPrefix); ok { + return x.IpPrefix + } + } + return false +} + +func (x *StringRules) GetIpv4Prefix() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_Ipv4Prefix); ok { + return x.Ipv4Prefix + } + } + return false +} + +func (x *StringRules) GetIpv6Prefix() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_Ipv6Prefix); ok { + return x.Ipv6Prefix + } + } + return false +} + +func (x *StringRules) GetHostAndPort() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_HostAndPort); ok { + return x.HostAndPort + } + } + return false +} + +func (x *StringRules) GetUlid() bool { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_Ulid); ok { + return x.Ulid + } + } + return false +} + +func (x *StringRules) GetWellKnownRegex() KnownRegex { + if x != nil { + if x, ok := x.WellKnown.(*StringRules_WellKnownRegex); ok { + return x.WellKnownRegex + } + } + return KnownRegex_KNOWN_REGEX_UNSPECIFIED +} + +func (x *StringRules) GetStrict() bool { + if x != nil && x.Strict != nil { + return *x.Strict + } + return false +} + +func (x *StringRules) GetExample() []string { + if x != nil { + return x.Example + } + return nil +} + +func (x *StringRules) SetConst(v string) { + x.Const = &v +} + +func (x *StringRules) SetLen(v uint64) { + x.Len = &v +} + +func (x *StringRules) SetMinLen(v uint64) { + x.MinLen = &v +} + +func (x *StringRules) SetMaxLen(v uint64) { + x.MaxLen = &v +} + +func (x *StringRules) SetLenBytes(v uint64) { + x.LenBytes = &v +} + +func (x *StringRules) SetMinBytes(v uint64) { + x.MinBytes = &v +} + +func (x *StringRules) SetMaxBytes(v uint64) { + x.MaxBytes = &v +} + +func (x *StringRules) SetPattern(v string) { + x.Pattern = &v +} + +func (x *StringRules) SetPrefix(v string) { + x.Prefix = &v +} + +func (x *StringRules) SetSuffix(v string) { + x.Suffix = &v +} + +func (x *StringRules) SetContains(v string) { + x.Contains = &v +} + +func (x *StringRules) SetNotContains(v string) { + x.NotContains = &v +} + +func (x *StringRules) SetIn(v []string) { + x.In = v +} + +func (x *StringRules) SetNotIn(v []string) { + x.NotIn = v +} + +func (x *StringRules) SetEmail(v bool) { + x.WellKnown = &StringRules_Email{v} +} + +func (x *StringRules) SetHostname(v bool) { + x.WellKnown = &StringRules_Hostname{v} +} + +func (x *StringRules) SetIp(v bool) { + x.WellKnown = &StringRules_Ip{v} +} + +func (x *StringRules) SetIpv4(v bool) { + x.WellKnown = &StringRules_Ipv4{v} +} + +func (x *StringRules) SetIpv6(v bool) { + x.WellKnown = &StringRules_Ipv6{v} +} + +func (x *StringRules) SetUri(v bool) { + x.WellKnown = &StringRules_Uri{v} +} + +func (x *StringRules) SetUriRef(v bool) { + x.WellKnown = &StringRules_UriRef{v} +} + +func (x *StringRules) SetAddress(v bool) { + x.WellKnown = &StringRules_Address{v} +} + +func (x *StringRules) SetUuid(v bool) { + x.WellKnown = &StringRules_Uuid{v} +} + +func (x *StringRules) SetTuuid(v bool) { + x.WellKnown = &StringRules_Tuuid{v} +} + +func (x *StringRules) SetIpWithPrefixlen(v bool) { + x.WellKnown = &StringRules_IpWithPrefixlen{v} +} + +func (x *StringRules) SetIpv4WithPrefixlen(v bool) { + x.WellKnown = &StringRules_Ipv4WithPrefixlen{v} +} + +func (x *StringRules) SetIpv6WithPrefixlen(v bool) { + x.WellKnown = &StringRules_Ipv6WithPrefixlen{v} +} + +func (x *StringRules) SetIpPrefix(v bool) { + x.WellKnown = &StringRules_IpPrefix{v} +} + +func (x *StringRules) SetIpv4Prefix(v bool) { + x.WellKnown = &StringRules_Ipv4Prefix{v} +} + +func (x *StringRules) SetIpv6Prefix(v bool) { + x.WellKnown = &StringRules_Ipv6Prefix{v} +} + +func (x *StringRules) SetHostAndPort(v bool) { + x.WellKnown = &StringRules_HostAndPort{v} +} + +func (x *StringRules) SetUlid(v bool) { + x.WellKnown = &StringRules_Ulid{v} +} + +func (x *StringRules) SetWellKnownRegex(v KnownRegex) { + x.WellKnown = &StringRules_WellKnownRegex{v} +} + +func (x *StringRules) SetStrict(v bool) { + x.Strict = &v +} + +func (x *StringRules) SetExample(v []string) { + x.Example = v +} + +func (x *StringRules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *StringRules) HasLen() bool { + if x == nil { + return false + } + return x.Len != nil +} + +func (x *StringRules) HasMinLen() bool { + if x == nil { + return false + } + return x.MinLen != nil +} + +func (x *StringRules) HasMaxLen() bool { + if x == nil { + return false + } + return x.MaxLen != nil +} + +func (x *StringRules) HasLenBytes() bool { + if x == nil { + return false + } + return x.LenBytes != nil +} + +func (x *StringRules) HasMinBytes() bool { + if x == nil { + return false + } + return x.MinBytes != nil +} + +func (x *StringRules) HasMaxBytes() bool { + if x == nil { + return false + } + return x.MaxBytes != nil +} + +func (x *StringRules) HasPattern() bool { + if x == nil { + return false + } + return x.Pattern != nil +} + +func (x *StringRules) HasPrefix() bool { + if x == nil { + return false + } + return x.Prefix != nil +} + +func (x *StringRules) HasSuffix() bool { + if x == nil { + return false + } + return x.Suffix != nil +} + +func (x *StringRules) HasContains() bool { + if x == nil { + return false + } + return x.Contains != nil +} + +func (x *StringRules) HasNotContains() bool { + if x == nil { + return false + } + return x.NotContains != nil +} + +func (x *StringRules) HasWellKnown() bool { + if x == nil { + return false + } + return x.WellKnown != nil +} + +func (x *StringRules) HasEmail() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_Email) + return ok +} + +func (x *StringRules) HasHostname() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_Hostname) + return ok +} + +func (x *StringRules) HasIp() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_Ip) + return ok +} + +func (x *StringRules) HasIpv4() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_Ipv4) + return ok +} + +func (x *StringRules) HasIpv6() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_Ipv6) + return ok +} + +func (x *StringRules) HasUri() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_Uri) + return ok +} + +func (x *StringRules) HasUriRef() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_UriRef) + return ok +} + +func (x *StringRules) HasAddress() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_Address) + return ok +} + +func (x *StringRules) HasUuid() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_Uuid) + return ok +} + +func (x *StringRules) HasTuuid() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_Tuuid) + return ok +} + +func (x *StringRules) HasIpWithPrefixlen() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_IpWithPrefixlen) + return ok +} + +func (x *StringRules) HasIpv4WithPrefixlen() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_Ipv4WithPrefixlen) + return ok +} + +func (x *StringRules) HasIpv6WithPrefixlen() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_Ipv6WithPrefixlen) + return ok +} + +func (x *StringRules) HasIpPrefix() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_IpPrefix) + return ok +} + +func (x *StringRules) HasIpv4Prefix() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_Ipv4Prefix) + return ok +} + +func (x *StringRules) HasIpv6Prefix() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_Ipv6Prefix) + return ok +} + +func (x *StringRules) HasHostAndPort() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_HostAndPort) + return ok +} + +func (x *StringRules) HasUlid() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_Ulid) + return ok +} + +func (x *StringRules) HasWellKnownRegex() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*StringRules_WellKnownRegex) + return ok +} + +func (x *StringRules) HasStrict() bool { + if x == nil { + return false + } + return x.Strict != nil +} + +func (x *StringRules) ClearConst() { + x.Const = nil +} + +func (x *StringRules) ClearLen() { + x.Len = nil +} + +func (x *StringRules) ClearMinLen() { + x.MinLen = nil +} + +func (x *StringRules) ClearMaxLen() { + x.MaxLen = nil +} + +func (x *StringRules) ClearLenBytes() { + x.LenBytes = nil +} + +func (x *StringRules) ClearMinBytes() { + x.MinBytes = nil +} + +func (x *StringRules) ClearMaxBytes() { + x.MaxBytes = nil +} + +func (x *StringRules) ClearPattern() { + x.Pattern = nil +} + +func (x *StringRules) ClearPrefix() { + x.Prefix = nil +} + +func (x *StringRules) ClearSuffix() { + x.Suffix = nil +} + +func (x *StringRules) ClearContains() { + x.Contains = nil +} + +func (x *StringRules) ClearNotContains() { + x.NotContains = nil +} + +func (x *StringRules) ClearWellKnown() { + x.WellKnown = nil +} + +func (x *StringRules) ClearEmail() { + if _, ok := x.WellKnown.(*StringRules_Email); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearHostname() { + if _, ok := x.WellKnown.(*StringRules_Hostname); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearIp() { + if _, ok := x.WellKnown.(*StringRules_Ip); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearIpv4() { + if _, ok := x.WellKnown.(*StringRules_Ipv4); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearIpv6() { + if _, ok := x.WellKnown.(*StringRules_Ipv6); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearUri() { + if _, ok := x.WellKnown.(*StringRules_Uri); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearUriRef() { + if _, ok := x.WellKnown.(*StringRules_UriRef); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearAddress() { + if _, ok := x.WellKnown.(*StringRules_Address); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearUuid() { + if _, ok := x.WellKnown.(*StringRules_Uuid); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearTuuid() { + if _, ok := x.WellKnown.(*StringRules_Tuuid); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearIpWithPrefixlen() { + if _, ok := x.WellKnown.(*StringRules_IpWithPrefixlen); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearIpv4WithPrefixlen() { + if _, ok := x.WellKnown.(*StringRules_Ipv4WithPrefixlen); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearIpv6WithPrefixlen() { + if _, ok := x.WellKnown.(*StringRules_Ipv6WithPrefixlen); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearIpPrefix() { + if _, ok := x.WellKnown.(*StringRules_IpPrefix); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearIpv4Prefix() { + if _, ok := x.WellKnown.(*StringRules_Ipv4Prefix); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearIpv6Prefix() { + if _, ok := x.WellKnown.(*StringRules_Ipv6Prefix); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearHostAndPort() { + if _, ok := x.WellKnown.(*StringRules_HostAndPort); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearUlid() { + if _, ok := x.WellKnown.(*StringRules_Ulid); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearWellKnownRegex() { + if _, ok := x.WellKnown.(*StringRules_WellKnownRegex); ok { + x.WellKnown = nil + } +} + +func (x *StringRules) ClearStrict() { + x.Strict = nil +} + +const StringRules_WellKnown_not_set_case case_StringRules_WellKnown = 0 +const StringRules_Email_case case_StringRules_WellKnown = 12 +const StringRules_Hostname_case case_StringRules_WellKnown = 13 +const StringRules_Ip_case case_StringRules_WellKnown = 14 +const StringRules_Ipv4_case case_StringRules_WellKnown = 15 +const StringRules_Ipv6_case case_StringRules_WellKnown = 16 +const StringRules_Uri_case case_StringRules_WellKnown = 17 +const StringRules_UriRef_case case_StringRules_WellKnown = 18 +const StringRules_Address_case case_StringRules_WellKnown = 21 +const StringRules_Uuid_case case_StringRules_WellKnown = 22 +const StringRules_Tuuid_case case_StringRules_WellKnown = 33 +const StringRules_IpWithPrefixlen_case case_StringRules_WellKnown = 26 +const StringRules_Ipv4WithPrefixlen_case case_StringRules_WellKnown = 27 +const StringRules_Ipv6WithPrefixlen_case case_StringRules_WellKnown = 28 +const StringRules_IpPrefix_case case_StringRules_WellKnown = 29 +const StringRules_Ipv4Prefix_case case_StringRules_WellKnown = 30 +const StringRules_Ipv6Prefix_case case_StringRules_WellKnown = 31 +const StringRules_HostAndPort_case case_StringRules_WellKnown = 32 +const StringRules_Ulid_case case_StringRules_WellKnown = 35 +const StringRules_WellKnownRegex_case case_StringRules_WellKnown = 24 + +func (x *StringRules) WhichWellKnown() case_StringRules_WellKnown { + if x == nil { + return StringRules_WellKnown_not_set_case + } + switch x.WellKnown.(type) { + case *StringRules_Email: + return StringRules_Email_case + case *StringRules_Hostname: + return StringRules_Hostname_case + case *StringRules_Ip: + return StringRules_Ip_case + case *StringRules_Ipv4: + return StringRules_Ipv4_case + case *StringRules_Ipv6: + return StringRules_Ipv6_case + case *StringRules_Uri: + return StringRules_Uri_case + case *StringRules_UriRef: + return StringRules_UriRef_case + case *StringRules_Address: + return StringRules_Address_case + case *StringRules_Uuid: + return StringRules_Uuid_case + case *StringRules_Tuuid: + return StringRules_Tuuid_case + case *StringRules_IpWithPrefixlen: + return StringRules_IpWithPrefixlen_case + case *StringRules_Ipv4WithPrefixlen: + return StringRules_Ipv4WithPrefixlen_case + case *StringRules_Ipv6WithPrefixlen: + return StringRules_Ipv6WithPrefixlen_case + case *StringRules_IpPrefix: + return StringRules_IpPrefix_case + case *StringRules_Ipv4Prefix: + return StringRules_Ipv4Prefix_case + case *StringRules_Ipv6Prefix: + return StringRules_Ipv6Prefix_case + case *StringRules_HostAndPort: + return StringRules_HostAndPort_case + case *StringRules_Ulid: + return StringRules_Ulid_case + case *StringRules_WellKnownRegex: + return StringRules_WellKnownRegex_case + default: + return StringRules_WellKnown_not_set_case + } +} + +type StringRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyString { + // // value must equal `hello` + // string value = 1 [(buf.validate.field).string.const = "hello"]; + // } + // + // ``` + Const *string + // `len` dictates that the field value must have the specified + // number of characters (Unicode code points), which may differ from the number + // of bytes in the string. If the field value does not meet the specified + // length, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be 5 characters + // string value = 1 [(buf.validate.field).string.len = 5]; + // } + // + // ``` + Len *uint64 + // `min_len` specifies that the field value must have at least the specified + // number of characters (Unicode code points), which may differ from the number + // of bytes in the string. If the field value contains fewer characters, an error + // message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be at least 3 characters + // string value = 1 [(buf.validate.field).string.min_len = 3]; + // } + // + // ``` + MinLen *uint64 + // `max_len` specifies that the field value must have no more than the specified + // number of characters (Unicode code points), which may differ from the + // number of bytes in the string. If the field value contains more characters, + // an error message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be at most 10 characters + // string value = 1 [(buf.validate.field).string.max_len = 10]; + // } + // + // ``` + MaxLen *uint64 + // `len_bytes` dictates that the field value must have the specified number of + // bytes. If the field value does not match the specified length in bytes, + // an error message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be 6 bytes + // string value = 1 [(buf.validate.field).string.len_bytes = 6]; + // } + // + // ``` + LenBytes *uint64 + // `min_bytes` specifies that the field value must have at least the specified + // number of bytes. If the field value contains fewer bytes, an error message + // will be generated. + // + // ```proto + // + // message MyString { + // // value length must be at least 4 bytes + // string value = 1 [(buf.validate.field).string.min_bytes = 4]; + // } + // + // ``` + MinBytes *uint64 + // `max_bytes` specifies that the field value must have no more than the + // specified number of bytes. If the field value contains more bytes, an + // error message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be at most 8 bytes + // string value = 1 [(buf.validate.field).string.max_bytes = 8]; + // } + // + // ``` + MaxBytes *uint64 + // `pattern` specifies that the field value must match the specified + // regular expression (RE2 syntax), with the expression provided without any + // delimiters. If the field value doesn't match the regular expression, an + // error message will be generated. + // + // ```proto + // + // message MyString { + // // value does not match regex pattern `^[a-zA-Z]//$` + // string value = 1 [(buf.validate.field).string.pattern = "^[a-zA-Z]//$"]; + // } + // + // ``` + Pattern *string + // `prefix` specifies that the field value must have the + // specified substring at the beginning of the string. If the field value + // doesn't start with the specified prefix, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value does not have prefix `pre` + // string value = 1 [(buf.validate.field).string.prefix = "pre"]; + // } + // + // ``` + Prefix *string + // `suffix` specifies that the field value must have the + // specified substring at the end of the string. If the field value doesn't + // end with the specified suffix, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value does not have suffix `post` + // string value = 1 [(buf.validate.field).string.suffix = "post"]; + // } + // + // ``` + Suffix *string + // `contains` specifies that the field value must have the + // specified substring anywhere in the string. If the field value doesn't + // contain the specified substring, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value does not contain substring `inside`. + // string value = 1 [(buf.validate.field).string.contains = "inside"]; + // } + // + // ``` + Contains *string + // `not_contains` specifies that the field value must not have the + // specified substring anywhere in the string. If the field value contains + // the specified substring, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value contains substring `inside`. + // string value = 1 [(buf.validate.field).string.not_contains = "inside"]; + // } + // + // ``` + NotContains *string + // `in` specifies that the field value must be equal to one of the specified + // values. If the field value isn't one of the specified values, an error + // message will be generated. + // + // ```proto + // + // message MyString { + // // value must be in list ["apple", "banana"] + // string value = 1 [(buf.validate.field).string.in = "apple", (buf.validate.field).string.in = "banana"]; + // } + // + // ``` + In []string + // `not_in` specifies that the field value cannot be equal to any + // of the specified values. If the field value is one of the specified values, + // an error message will be generated. + // ```proto + // + // message MyString { + // // value must not be in list ["orange", "grape"] + // string value = 1 [(buf.validate.field).string.not_in = "orange", (buf.validate.field).string.not_in = "grape"]; + // } + // + // ``` + NotIn []string + // `WellKnown` rules provide advanced rules against common string + // patterns. + + // Fields of oneof WellKnown: + // `email` specifies that the field value must be a valid email address, for + // example "foo@example.com". + // + // Conforms to the definition for a valid email address from the [HTML standard](https://html.spec.whatwg.org/multipage/input.html#valid-e-mail-address). + // Note that this standard willfully deviates from [RFC 5322](https://datatracker.ietf.org/doc/html/rfc5322), + // which allows many unexpected forms of email addresses and will easily match + // a typographical error. + // + // If the field value isn't a valid email address, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid email address + // string value = 1 [(buf.validate.field).string.email = true]; + // } + // + // ``` + Email *bool + // `hostname` specifies that the field value must be a valid hostname, for + // example "foo.example.com". + // + // A valid hostname follows the rules below: + // - The name consists of one or more labels, separated by a dot ("."). + // - Each label can be 1 to 63 alphanumeric characters. + // - A label can contain hyphens ("-"), but must not start or end with a hyphen. + // - The right-most label must not be digits only. + // - The name can have a trailing dot—for example, "foo.example.com.". + // - The name can be 253 characters at most, excluding the optional trailing dot. + // + // If the field value isn't a valid hostname, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid hostname + // string value = 1 [(buf.validate.field).string.hostname = true]; + // } + // + // ``` + Hostname *bool + // `ip` specifies that the field value must be a valid IP (v4 or v6) address. + // + // IPv4 addresses are expected in the dotted decimal format—for example, "192.168.5.21". + // IPv6 addresses are expected in their text representation—for example, "::1", + // or "2001:0DB8:ABCD:0012::0". + // + // Both formats are well-defined in the internet standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). + // Zone identifiers for IPv6 addresses (for example, "fe80::a%en1") are supported. + // + // If the field value isn't a valid IP address, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IP address + // string value = 1 [(buf.validate.field).string.ip = true]; + // } + // + // ``` + Ip *bool + // `ipv4` specifies that the field value must be a valid IPv4 address—for + // example "192.168.5.21". If the field value isn't a valid IPv4 address, an + // error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv4 address + // string value = 1 [(buf.validate.field).string.ipv4 = true]; + // } + // + // ``` + Ipv4 *bool + // `ipv6` specifies that the field value must be a valid IPv6 address—for + // example "::1", or "d7a:115c:a1e0:ab12:4843:cd96:626b:430b". If the field + // value is not a valid IPv6 address, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv6 address + // string value = 1 [(buf.validate.field).string.ipv6 = true]; + // } + // + // ``` + Ipv6 *bool + // `uri` specifies that the field value must be a valid URI, for example + // "https://example.com/foo/bar?baz=quux#frag". + // + // URI is defined in the internet standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). + // Zone Identifiers in IPv6 address literals are supported ([RFC 6874](https://datatracker.ietf.org/doc/html/rfc6874)). + // + // If the field value isn't a valid URI, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid URI + // string value = 1 [(buf.validate.field).string.uri = true]; + // } + // + // ``` + Uri *bool + // `uri_ref` specifies that the field value must be a valid URI Reference—either + // a URI such as "https://example.com/foo/bar?baz=quux#frag", or a Relative + // Reference such as "./foo/bar?query". + // + // URI, URI Reference, and Relative Reference are defined in the internet + // standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). Zone + // Identifiers in IPv6 address literals are supported ([RFC 6874](https://datatracker.ietf.org/doc/html/rfc6874)). + // + // If the field value isn't a valid URI Reference, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid URI Reference + // string value = 1 [(buf.validate.field).string.uri_ref = true]; + // } + // + // ``` + UriRef *bool + // `address` specifies that the field value must be either a valid hostname + // (for example, "example.com"), or a valid IP (v4 or v6) address (for example, + // "192.168.0.1", or "::1"). If the field value isn't a valid hostname or IP, + // an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid hostname, or ip address + // string value = 1 [(buf.validate.field).string.address = true]; + // } + // + // ``` + Address *bool + // `uuid` specifies that the field value must be a valid UUID as defined by + // [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2). If the + // field value isn't a valid UUID, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid UUID + // string value = 1 [(buf.validate.field).string.uuid = true]; + // } + // + // ``` + Uuid *bool + // `tuuid` (trimmed UUID) specifies that the field value must be a valid UUID as + // defined by [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2) with all dashes + // omitted. If the field value isn't a valid UUID without dashes, an error message + // will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid trimmed UUID + // string value = 1 [(buf.validate.field).string.tuuid = true]; + // } + // + // ``` + Tuuid *bool + // `ip_with_prefixlen` specifies that the field value must be a valid IP + // (v4 or v6) address with prefix length—for example, "192.168.5.21/16" or + // "2001:0DB8:ABCD:0012::F1/64". If the field value isn't a valid IP with + // prefix length, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IP with prefix length + // string value = 1 [(buf.validate.field).string.ip_with_prefixlen = true]; + // } + // + // ``` + IpWithPrefixlen *bool + // `ipv4_with_prefixlen` specifies that the field value must be a valid + // IPv4 address with prefix length—for example, "192.168.5.21/16". If the + // field value isn't a valid IPv4 address with prefix length, an error + // message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv4 address with prefix length + // string value = 1 [(buf.validate.field).string.ipv4_with_prefixlen = true]; + // } + // + // ``` + Ipv4WithPrefixlen *bool + // `ipv6_with_prefixlen` specifies that the field value must be a valid + // IPv6 address with prefix length—for example, "2001:0DB8:ABCD:0012::F1/64". + // If the field value is not a valid IPv6 address with prefix length, + // an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv6 address prefix length + // string value = 1 [(buf.validate.field).string.ipv6_with_prefixlen = true]; + // } + // + // ``` + Ipv6WithPrefixlen *bool + // `ip_prefix` specifies that the field value must be a valid IP (v4 or v6) + // prefix—for example, "192.168.0.0/16" or "2001:0DB8:ABCD:0012::0/64". + // + // The prefix must have all zeros for the unmasked bits. For example, + // "2001:0DB8:ABCD:0012::0/64" designates the left-most 64 bits for the + // prefix, and the remaining 64 bits must be zero. + // + // If the field value isn't a valid IP prefix, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IP prefix + // string value = 1 [(buf.validate.field).string.ip_prefix = true]; + // } + // + // ``` + IpPrefix *bool + // `ipv4_prefix` specifies that the field value must be a valid IPv4 + // prefix, for example "192.168.0.0/16". + // + // The prefix must have all zeros for the unmasked bits. For example, + // "192.168.0.0/16" designates the left-most 16 bits for the prefix, + // and the remaining 16 bits must be zero. + // + // If the field value isn't a valid IPv4 prefix, an error message + // will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv4 prefix + // string value = 1 [(buf.validate.field).string.ipv4_prefix = true]; + // } + // + // ``` + Ipv4Prefix *bool + // `ipv6_prefix` specifies that the field value must be a valid IPv6 prefix—for + // example, "2001:0DB8:ABCD:0012::0/64". + // + // The prefix must have all zeros for the unmasked bits. For example, + // "2001:0DB8:ABCD:0012::0/64" designates the left-most 64 bits for the + // prefix, and the remaining 64 bits must be zero. + // + // If the field value is not a valid IPv6 prefix, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv6 prefix + // string value = 1 [(buf.validate.field).string.ipv6_prefix = true]; + // } + // + // ``` + Ipv6Prefix *bool + // `host_and_port` specifies that the field value must be valid host/port + // pair—for example, "example.com:8080". + // + // The host can be one of: + // - An IPv4 address in dotted decimal format—for example, "192.168.5.21". + // - An IPv6 address enclosed in square brackets—for example, "[2001:0DB8:ABCD:0012::F1]". + // - A hostname—for example, "example.com". + // + // The port is separated by a colon. It must be non-empty, with a decimal number + // in the range of 0-65535, inclusive. + HostAndPort *bool + // `ulid` specifies that the field value must be a valid ULID (Universally Unique + // Lexicographically Sortable Identifier) as defined by the [ULID specification](https://github.com/ulid/spec). + // If the field value isn't a valid ULID, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid ULID + // string value = 1 [(buf.validate.field).string.ulid = true]; + // } + // + // ``` + Ulid *bool + // `well_known_regex` specifies a common well-known pattern + // defined as a regex. If the field value doesn't match the well-known + // regex, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid HTTP header value + // string value = 1 [(buf.validate.field).string.well_known_regex = KNOWN_REGEX_HTTP_HEADER_VALUE]; + // } + // + // ``` + // + // #### KnownRegex + // + // `well_known_regex` contains some well-known patterns. + // + // | Name | Number | Description | + // |-------------------------------|--------|-------------------------------------------| + // | KNOWN_REGEX_UNSPECIFIED | 0 | | + // | KNOWN_REGEX_HTTP_HEADER_NAME | 1 | HTTP header name as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2) | + // | KNOWN_REGEX_HTTP_HEADER_VALUE | 2 | HTTP header value as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.4) | + WellKnownRegex *KnownRegex + // -- end of WellKnown + // This applies to regexes `HTTP_HEADER_NAME` and `HTTP_HEADER_VALUE` to + // enable strict header validation. By default, this is true, and HTTP header + // validations are [RFC-compliant](https://datatracker.ietf.org/doc/html/rfc7230#section-3). Setting to false will enable looser + // validations that only disallow `\r\n\0` characters, which can be used to + // bypass header matching rules. + // + // ```proto + // + // message MyString { + // // The field `value` must have be a valid HTTP headers, but not enforced with strict rules. + // string value = 1 [(buf.validate.field).string.strict = false]; + // } + // + // ``` + Strict *bool + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyString { + // string value = 1 [ + // (buf.validate.field).string.example = "hello", + // (buf.validate.field).string.example = "world" + // ]; + // } + // + // ``` + Example []string +} + +func (b0 StringRules_builder) Build() *StringRules { + m0 := &StringRules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + x.Len = b.Len + x.MinLen = b.MinLen + x.MaxLen = b.MaxLen + x.LenBytes = b.LenBytes + x.MinBytes = b.MinBytes + x.MaxBytes = b.MaxBytes + x.Pattern = b.Pattern + x.Prefix = b.Prefix + x.Suffix = b.Suffix + x.Contains = b.Contains + x.NotContains = b.NotContains + x.In = b.In + x.NotIn = b.NotIn + if b.Email != nil { + x.WellKnown = &StringRules_Email{*b.Email} + } + if b.Hostname != nil { + x.WellKnown = &StringRules_Hostname{*b.Hostname} + } + if b.Ip != nil { + x.WellKnown = &StringRules_Ip{*b.Ip} + } + if b.Ipv4 != nil { + x.WellKnown = &StringRules_Ipv4{*b.Ipv4} + } + if b.Ipv6 != nil { + x.WellKnown = &StringRules_Ipv6{*b.Ipv6} + } + if b.Uri != nil { + x.WellKnown = &StringRules_Uri{*b.Uri} + } + if b.UriRef != nil { + x.WellKnown = &StringRules_UriRef{*b.UriRef} + } + if b.Address != nil { + x.WellKnown = &StringRules_Address{*b.Address} + } + if b.Uuid != nil { + x.WellKnown = &StringRules_Uuid{*b.Uuid} + } + if b.Tuuid != nil { + x.WellKnown = &StringRules_Tuuid{*b.Tuuid} + } + if b.IpWithPrefixlen != nil { + x.WellKnown = &StringRules_IpWithPrefixlen{*b.IpWithPrefixlen} + } + if b.Ipv4WithPrefixlen != nil { + x.WellKnown = &StringRules_Ipv4WithPrefixlen{*b.Ipv4WithPrefixlen} + } + if b.Ipv6WithPrefixlen != nil { + x.WellKnown = &StringRules_Ipv6WithPrefixlen{*b.Ipv6WithPrefixlen} + } + if b.IpPrefix != nil { + x.WellKnown = &StringRules_IpPrefix{*b.IpPrefix} + } + if b.Ipv4Prefix != nil { + x.WellKnown = &StringRules_Ipv4Prefix{*b.Ipv4Prefix} + } + if b.Ipv6Prefix != nil { + x.WellKnown = &StringRules_Ipv6Prefix{*b.Ipv6Prefix} + } + if b.HostAndPort != nil { + x.WellKnown = &StringRules_HostAndPort{*b.HostAndPort} + } + if b.Ulid != nil { + x.WellKnown = &StringRules_Ulid{*b.Ulid} + } + if b.WellKnownRegex != nil { + x.WellKnown = &StringRules_WellKnownRegex{*b.WellKnownRegex} + } + x.Strict = b.Strict + x.Example = b.Example + return m0 +} + +type case_StringRules_WellKnown protoreflect.FieldNumber + +func (x case_StringRules_WellKnown) String() string { + md := file_buf_validate_validate_proto_msgTypes[19].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isStringRules_WellKnown interface { + isStringRules_WellKnown() +} + +type StringRules_Email struct { + // `email` specifies that the field value must be a valid email address, for + // example "foo@example.com". + // + // Conforms to the definition for a valid email address from the [HTML standard](https://html.spec.whatwg.org/multipage/input.html#valid-e-mail-address). + // Note that this standard willfully deviates from [RFC 5322](https://datatracker.ietf.org/doc/html/rfc5322), + // which allows many unexpected forms of email addresses and will easily match + // a typographical error. + // + // If the field value isn't a valid email address, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid email address + // string value = 1 [(buf.validate.field).string.email = true]; + // } + // + // ``` + Email bool `protobuf:"varint,12,opt,name=email,oneof"` +} + +type StringRules_Hostname struct { + // `hostname` specifies that the field value must be a valid hostname, for + // example "foo.example.com". + // + // A valid hostname follows the rules below: + // - The name consists of one or more labels, separated by a dot ("."). + // - Each label can be 1 to 63 alphanumeric characters. + // - A label can contain hyphens ("-"), but must not start or end with a hyphen. + // - The right-most label must not be digits only. + // - The name can have a trailing dot—for example, "foo.example.com.". + // - The name can be 253 characters at most, excluding the optional trailing dot. + // + // If the field value isn't a valid hostname, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid hostname + // string value = 1 [(buf.validate.field).string.hostname = true]; + // } + // + // ``` + Hostname bool `protobuf:"varint,13,opt,name=hostname,oneof"` +} + +type StringRules_Ip struct { + // `ip` specifies that the field value must be a valid IP (v4 or v6) address. + // + // IPv4 addresses are expected in the dotted decimal format—for example, "192.168.5.21". + // IPv6 addresses are expected in their text representation—for example, "::1", + // or "2001:0DB8:ABCD:0012::0". + // + // Both formats are well-defined in the internet standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). + // Zone identifiers for IPv6 addresses (for example, "fe80::a%en1") are supported. + // + // If the field value isn't a valid IP address, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IP address + // string value = 1 [(buf.validate.field).string.ip = true]; + // } + // + // ``` + Ip bool `protobuf:"varint,14,opt,name=ip,oneof"` +} + +type StringRules_Ipv4 struct { + // `ipv4` specifies that the field value must be a valid IPv4 address—for + // example "192.168.5.21". If the field value isn't a valid IPv4 address, an + // error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv4 address + // string value = 1 [(buf.validate.field).string.ipv4 = true]; + // } + // + // ``` + Ipv4 bool `protobuf:"varint,15,opt,name=ipv4,oneof"` +} + +type StringRules_Ipv6 struct { + // `ipv6` specifies that the field value must be a valid IPv6 address—for + // example "::1", or "d7a:115c:a1e0:ab12:4843:cd96:626b:430b". If the field + // value is not a valid IPv6 address, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv6 address + // string value = 1 [(buf.validate.field).string.ipv6 = true]; + // } + // + // ``` + Ipv6 bool `protobuf:"varint,16,opt,name=ipv6,oneof"` +} + +type StringRules_Uri struct { + // `uri` specifies that the field value must be a valid URI, for example + // "https://example.com/foo/bar?baz=quux#frag". + // + // URI is defined in the internet standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). + // Zone Identifiers in IPv6 address literals are supported ([RFC 6874](https://datatracker.ietf.org/doc/html/rfc6874)). + // + // If the field value isn't a valid URI, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid URI + // string value = 1 [(buf.validate.field).string.uri = true]; + // } + // + // ``` + Uri bool `protobuf:"varint,17,opt,name=uri,oneof"` +} + +type StringRules_UriRef struct { + // `uri_ref` specifies that the field value must be a valid URI Reference—either + // a URI such as "https://example.com/foo/bar?baz=quux#frag", or a Relative + // Reference such as "./foo/bar?query". + // + // URI, URI Reference, and Relative Reference are defined in the internet + // standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). Zone + // Identifiers in IPv6 address literals are supported ([RFC 6874](https://datatracker.ietf.org/doc/html/rfc6874)). + // + // If the field value isn't a valid URI Reference, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid URI Reference + // string value = 1 [(buf.validate.field).string.uri_ref = true]; + // } + // + // ``` + UriRef bool `protobuf:"varint,18,opt,name=uri_ref,json=uriRef,oneof"` +} + +type StringRules_Address struct { + // `address` specifies that the field value must be either a valid hostname + // (for example, "example.com"), or a valid IP (v4 or v6) address (for example, + // "192.168.0.1", or "::1"). If the field value isn't a valid hostname or IP, + // an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid hostname, or ip address + // string value = 1 [(buf.validate.field).string.address = true]; + // } + // + // ``` + Address bool `protobuf:"varint,21,opt,name=address,oneof"` +} + +type StringRules_Uuid struct { + // `uuid` specifies that the field value must be a valid UUID as defined by + // [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2). If the + // field value isn't a valid UUID, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid UUID + // string value = 1 [(buf.validate.field).string.uuid = true]; + // } + // + // ``` + Uuid bool `protobuf:"varint,22,opt,name=uuid,oneof"` +} + +type StringRules_Tuuid struct { + // `tuuid` (trimmed UUID) specifies that the field value must be a valid UUID as + // defined by [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2) with all dashes + // omitted. If the field value isn't a valid UUID without dashes, an error message + // will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid trimmed UUID + // string value = 1 [(buf.validate.field).string.tuuid = true]; + // } + // + // ``` + Tuuid bool `protobuf:"varint,33,opt,name=tuuid,oneof"` +} + +type StringRules_IpWithPrefixlen struct { + // `ip_with_prefixlen` specifies that the field value must be a valid IP + // (v4 or v6) address with prefix length—for example, "192.168.5.21/16" or + // "2001:0DB8:ABCD:0012::F1/64". If the field value isn't a valid IP with + // prefix length, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IP with prefix length + // string value = 1 [(buf.validate.field).string.ip_with_prefixlen = true]; + // } + // + // ``` + IpWithPrefixlen bool `protobuf:"varint,26,opt,name=ip_with_prefixlen,json=ipWithPrefixlen,oneof"` +} + +type StringRules_Ipv4WithPrefixlen struct { + // `ipv4_with_prefixlen` specifies that the field value must be a valid + // IPv4 address with prefix length—for example, "192.168.5.21/16". If the + // field value isn't a valid IPv4 address with prefix length, an error + // message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv4 address with prefix length + // string value = 1 [(buf.validate.field).string.ipv4_with_prefixlen = true]; + // } + // + // ``` + Ipv4WithPrefixlen bool `protobuf:"varint,27,opt,name=ipv4_with_prefixlen,json=ipv4WithPrefixlen,oneof"` +} + +type StringRules_Ipv6WithPrefixlen struct { + // `ipv6_with_prefixlen` specifies that the field value must be a valid + // IPv6 address with prefix length—for example, "2001:0DB8:ABCD:0012::F1/64". + // If the field value is not a valid IPv6 address with prefix length, + // an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv6 address prefix length + // string value = 1 [(buf.validate.field).string.ipv6_with_prefixlen = true]; + // } + // + // ``` + Ipv6WithPrefixlen bool `protobuf:"varint,28,opt,name=ipv6_with_prefixlen,json=ipv6WithPrefixlen,oneof"` +} + +type StringRules_IpPrefix struct { + // `ip_prefix` specifies that the field value must be a valid IP (v4 or v6) + // prefix—for example, "192.168.0.0/16" or "2001:0DB8:ABCD:0012::0/64". + // + // The prefix must have all zeros for the unmasked bits. For example, + // "2001:0DB8:ABCD:0012::0/64" designates the left-most 64 bits for the + // prefix, and the remaining 64 bits must be zero. + // + // If the field value isn't a valid IP prefix, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IP prefix + // string value = 1 [(buf.validate.field).string.ip_prefix = true]; + // } + // + // ``` + IpPrefix bool `protobuf:"varint,29,opt,name=ip_prefix,json=ipPrefix,oneof"` +} + +type StringRules_Ipv4Prefix struct { + // `ipv4_prefix` specifies that the field value must be a valid IPv4 + // prefix, for example "192.168.0.0/16". + // + // The prefix must have all zeros for the unmasked bits. For example, + // "192.168.0.0/16" designates the left-most 16 bits for the prefix, + // and the remaining 16 bits must be zero. + // + // If the field value isn't a valid IPv4 prefix, an error message + // will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv4 prefix + // string value = 1 [(buf.validate.field).string.ipv4_prefix = true]; + // } + // + // ``` + Ipv4Prefix bool `protobuf:"varint,30,opt,name=ipv4_prefix,json=ipv4Prefix,oneof"` +} + +type StringRules_Ipv6Prefix struct { + // `ipv6_prefix` specifies that the field value must be a valid IPv6 prefix—for + // example, "2001:0DB8:ABCD:0012::0/64". + // + // The prefix must have all zeros for the unmasked bits. For example, + // "2001:0DB8:ABCD:0012::0/64" designates the left-most 64 bits for the + // prefix, and the remaining 64 bits must be zero. + // + // If the field value is not a valid IPv6 prefix, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv6 prefix + // string value = 1 [(buf.validate.field).string.ipv6_prefix = true]; + // } + // + // ``` + Ipv6Prefix bool `protobuf:"varint,31,opt,name=ipv6_prefix,json=ipv6Prefix,oneof"` +} + +type StringRules_HostAndPort struct { + // `host_and_port` specifies that the field value must be valid host/port + // pair—for example, "example.com:8080". + // + // The host can be one of: + // - An IPv4 address in dotted decimal format—for example, "192.168.5.21". + // - An IPv6 address enclosed in square brackets—for example, "[2001:0DB8:ABCD:0012::F1]". + // - A hostname—for example, "example.com". + // + // The port is separated by a colon. It must be non-empty, with a decimal number + // in the range of 0-65535, inclusive. + HostAndPort bool `protobuf:"varint,32,opt,name=host_and_port,json=hostAndPort,oneof"` +} + +type StringRules_Ulid struct { + // `ulid` specifies that the field value must be a valid ULID (Universally Unique + // Lexicographically Sortable Identifier) as defined by the [ULID specification](https://github.com/ulid/spec). + // If the field value isn't a valid ULID, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid ULID + // string value = 1 [(buf.validate.field).string.ulid = true]; + // } + // + // ``` + Ulid bool `protobuf:"varint,35,opt,name=ulid,oneof"` +} + +type StringRules_WellKnownRegex struct { + // `well_known_regex` specifies a common well-known pattern + // defined as a regex. If the field value doesn't match the well-known + // regex, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid HTTP header value + // string value = 1 [(buf.validate.field).string.well_known_regex = KNOWN_REGEX_HTTP_HEADER_VALUE]; + // } + // + // ``` + // + // #### KnownRegex + // + // `well_known_regex` contains some well-known patterns. + // + // | Name | Number | Description | + // |-------------------------------|--------|-------------------------------------------| + // | KNOWN_REGEX_UNSPECIFIED | 0 | | + // | KNOWN_REGEX_HTTP_HEADER_NAME | 1 | HTTP header name as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2) | + // | KNOWN_REGEX_HTTP_HEADER_VALUE | 2 | HTTP header value as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.4) | + WellKnownRegex KnownRegex `protobuf:"varint,24,opt,name=well_known_regex,json=wellKnownRegex,enum=buf.validate.KnownRegex,oneof"` +} + +func (*StringRules_Email) isStringRules_WellKnown() {} + +func (*StringRules_Hostname) isStringRules_WellKnown() {} + +func (*StringRules_Ip) isStringRules_WellKnown() {} + +func (*StringRules_Ipv4) isStringRules_WellKnown() {} + +func (*StringRules_Ipv6) isStringRules_WellKnown() {} + +func (*StringRules_Uri) isStringRules_WellKnown() {} + +func (*StringRules_UriRef) isStringRules_WellKnown() {} + +func (*StringRules_Address) isStringRules_WellKnown() {} + +func (*StringRules_Uuid) isStringRules_WellKnown() {} + +func (*StringRules_Tuuid) isStringRules_WellKnown() {} + +func (*StringRules_IpWithPrefixlen) isStringRules_WellKnown() {} + +func (*StringRules_Ipv4WithPrefixlen) isStringRules_WellKnown() {} + +func (*StringRules_Ipv6WithPrefixlen) isStringRules_WellKnown() {} + +func (*StringRules_IpPrefix) isStringRules_WellKnown() {} + +func (*StringRules_Ipv4Prefix) isStringRules_WellKnown() {} + +func (*StringRules_Ipv6Prefix) isStringRules_WellKnown() {} + +func (*StringRules_HostAndPort) isStringRules_WellKnown() {} + +func (*StringRules_Ulid) isStringRules_WellKnown() {} + +func (*StringRules_WellKnownRegex) isStringRules_WellKnown() {} + +// BytesRules describe the rules applied to `bytes` values. These rules +// may also be applied to the `google.protobuf.BytesValue` Well-Known-Type. +type BytesRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified bytes + // value. If the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must be "\x01\x02\x03\x04" + // bytes value = 1 [(buf.validate.field).bytes.const = "\x01\x02\x03\x04"]; + // } + // + // ``` + Const []byte `protobuf:"bytes,1,opt,name=const" json:"const,omitempty"` + // `len` requires the field value to have the specified length in bytes. + // If the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value length must be 4 bytes. + // optional bytes value = 1 [(buf.validate.field).bytes.len = 4]; + // } + // + // ``` + Len *uint64 `protobuf:"varint,13,opt,name=len" json:"len,omitempty"` + // `min_len` requires the field value to have at least the specified minimum + // length in bytes. + // If the field value doesn't meet the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value length must be at least 2 bytes. + // optional bytes value = 1 [(buf.validate.field).bytes.min_len = 2]; + // } + // + // ``` + MinLen *uint64 `protobuf:"varint,2,opt,name=min_len,json=minLen" json:"min_len,omitempty"` + // `max_len` requires the field value to have at most the specified maximum + // length in bytes. + // If the field value exceeds the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must be at most 6 bytes. + // optional bytes value = 1 [(buf.validate.field).bytes.max_len = 6]; + // } + // + // ``` + MaxLen *uint64 `protobuf:"varint,3,opt,name=max_len,json=maxLen" json:"max_len,omitempty"` + // `pattern` requires the field value to match the specified regular + // expression ([RE2 syntax](https://github.com/google/re2/wiki/Syntax)). + // The value of the field must be valid UTF-8 or validation will fail with a + // runtime error. + // If the field value doesn't match the pattern, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must match regex pattern "^[a-zA-Z0-9]+$". + // optional bytes value = 1 [(buf.validate.field).bytes.pattern = "^[a-zA-Z0-9]+$"]; + // } + // + // ``` + Pattern *string `protobuf:"bytes,4,opt,name=pattern" json:"pattern,omitempty"` + // `prefix` requires the field value to have the specified bytes at the + // beginning of the string. + // If the field value doesn't meet the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value does not have prefix \x01\x02 + // optional bytes value = 1 [(buf.validate.field).bytes.prefix = "\x01\x02"]; + // } + // + // ``` + Prefix []byte `protobuf:"bytes,5,opt,name=prefix" json:"prefix,omitempty"` + // `suffix` requires the field value to have the specified bytes at the end + // of the string. + // If the field value doesn't meet the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value does not have suffix \x03\x04 + // optional bytes value = 1 [(buf.validate.field).bytes.suffix = "\x03\x04"]; + // } + // + // ``` + Suffix []byte `protobuf:"bytes,6,opt,name=suffix" json:"suffix,omitempty"` + // `contains` requires the field value to have the specified bytes anywhere in + // the string. + // If the field value doesn't meet the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value does not contain \x02\x03 + // optional bytes value = 1 [(buf.validate.field).bytes.contains = "\x02\x03"]; + // } + // + // ``` + Contains []byte `protobuf:"bytes,7,opt,name=contains" json:"contains,omitempty"` + // `in` requires the field value to be equal to one of the specified + // values. If the field value doesn't match any of the specified values, an + // error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must in ["\x01\x02", "\x02\x03", "\x03\x04"] + // optional bytes value = 1 [(buf.validate.field).bytes.in = {"\x01\x02", "\x02\x03", "\x03\x04"}]; + // } + // + // ``` + In [][]byte `protobuf:"bytes,8,rep,name=in" json:"in,omitempty"` + // `not_in` requires the field value to be not equal to any of the specified + // values. + // If the field value matches any of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyBytes { + // // value must not in ["\x01\x02", "\x02\x03", "\x03\x04"] + // optional bytes value = 1 [(buf.validate.field).bytes.not_in = {"\x01\x02", "\x02\x03", "\x03\x04"}]; + // } + // + // ``` + NotIn [][]byte `protobuf:"bytes,9,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // WellKnown rules provide advanced rules against common byte + // patterns + // + // Types that are valid to be assigned to WellKnown: + // + // *BytesRules_Ip + // *BytesRules_Ipv4 + // *BytesRules_Ipv6 + // *BytesRules_Uuid + WellKnown isBytesRules_WellKnown `protobuf_oneof:"well_known"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyBytes { + // bytes value = 1 [ + // (buf.validate.field).bytes.example = "\x01\x02", + // (buf.validate.field).bytes.example = "\x02\x03" + // ]; + // } + // + // ``` + Example [][]byte `protobuf:"bytes,14,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BytesRules) Reset() { + *x = BytesRules{} + mi := &file_buf_validate_validate_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BytesRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BytesRules) ProtoMessage() {} + +func (x *BytesRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BytesRules) GetConst() []byte { + if x != nil { + return x.Const + } + return nil +} + +func (x *BytesRules) GetLen() uint64 { + if x != nil && x.Len != nil { + return *x.Len + } + return 0 +} + +func (x *BytesRules) GetMinLen() uint64 { + if x != nil && x.MinLen != nil { + return *x.MinLen + } + return 0 +} + +func (x *BytesRules) GetMaxLen() uint64 { + if x != nil && x.MaxLen != nil { + return *x.MaxLen + } + return 0 +} + +func (x *BytesRules) GetPattern() string { + if x != nil && x.Pattern != nil { + return *x.Pattern + } + return "" +} + +func (x *BytesRules) GetPrefix() []byte { + if x != nil { + return x.Prefix + } + return nil +} + +func (x *BytesRules) GetSuffix() []byte { + if x != nil { + return x.Suffix + } + return nil +} + +func (x *BytesRules) GetContains() []byte { + if x != nil { + return x.Contains + } + return nil +} + +func (x *BytesRules) GetIn() [][]byte { + if x != nil { + return x.In + } + return nil +} + +func (x *BytesRules) GetNotIn() [][]byte { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *BytesRules) GetWellKnown() isBytesRules_WellKnown { + if x != nil { + return x.WellKnown + } + return nil +} + +func (x *BytesRules) GetIp() bool { + if x != nil { + if x, ok := x.WellKnown.(*BytesRules_Ip); ok { + return x.Ip + } + } + return false +} + +func (x *BytesRules) GetIpv4() bool { + if x != nil { + if x, ok := x.WellKnown.(*BytesRules_Ipv4); ok { + return x.Ipv4 + } + } + return false +} + +func (x *BytesRules) GetIpv6() bool { + if x != nil { + if x, ok := x.WellKnown.(*BytesRules_Ipv6); ok { + return x.Ipv6 + } + } + return false +} + +func (x *BytesRules) GetUuid() bool { + if x != nil { + if x, ok := x.WellKnown.(*BytesRules_Uuid); ok { + return x.Uuid + } + } + return false +} + +func (x *BytesRules) GetExample() [][]byte { + if x != nil { + return x.Example + } + return nil +} + +func (x *BytesRules) SetConst(v []byte) { + if v == nil { + v = []byte{} + } + x.Const = v +} + +func (x *BytesRules) SetLen(v uint64) { + x.Len = &v +} + +func (x *BytesRules) SetMinLen(v uint64) { + x.MinLen = &v +} + +func (x *BytesRules) SetMaxLen(v uint64) { + x.MaxLen = &v +} + +func (x *BytesRules) SetPattern(v string) { + x.Pattern = &v +} + +func (x *BytesRules) SetPrefix(v []byte) { + if v == nil { + v = []byte{} + } + x.Prefix = v +} + +func (x *BytesRules) SetSuffix(v []byte) { + if v == nil { + v = []byte{} + } + x.Suffix = v +} + +func (x *BytesRules) SetContains(v []byte) { + if v == nil { + v = []byte{} + } + x.Contains = v +} + +func (x *BytesRules) SetIn(v [][]byte) { + x.In = v +} + +func (x *BytesRules) SetNotIn(v [][]byte) { + x.NotIn = v +} + +func (x *BytesRules) SetIp(v bool) { + x.WellKnown = &BytesRules_Ip{v} +} + +func (x *BytesRules) SetIpv4(v bool) { + x.WellKnown = &BytesRules_Ipv4{v} +} + +func (x *BytesRules) SetIpv6(v bool) { + x.WellKnown = &BytesRules_Ipv6{v} +} + +func (x *BytesRules) SetUuid(v bool) { + x.WellKnown = &BytesRules_Uuid{v} +} + +func (x *BytesRules) SetExample(v [][]byte) { + x.Example = v +} + +func (x *BytesRules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *BytesRules) HasLen() bool { + if x == nil { + return false + } + return x.Len != nil +} + +func (x *BytesRules) HasMinLen() bool { + if x == nil { + return false + } + return x.MinLen != nil +} + +func (x *BytesRules) HasMaxLen() bool { + if x == nil { + return false + } + return x.MaxLen != nil +} + +func (x *BytesRules) HasPattern() bool { + if x == nil { + return false + } + return x.Pattern != nil +} + +func (x *BytesRules) HasPrefix() bool { + if x == nil { + return false + } + return x.Prefix != nil +} + +func (x *BytesRules) HasSuffix() bool { + if x == nil { + return false + } + return x.Suffix != nil +} + +func (x *BytesRules) HasContains() bool { + if x == nil { + return false + } + return x.Contains != nil +} + +func (x *BytesRules) HasWellKnown() bool { + if x == nil { + return false + } + return x.WellKnown != nil +} + +func (x *BytesRules) HasIp() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*BytesRules_Ip) + return ok +} + +func (x *BytesRules) HasIpv4() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*BytesRules_Ipv4) + return ok +} + +func (x *BytesRules) HasIpv6() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*BytesRules_Ipv6) + return ok +} + +func (x *BytesRules) HasUuid() bool { + if x == nil { + return false + } + _, ok := x.WellKnown.(*BytesRules_Uuid) + return ok +} + +func (x *BytesRules) ClearConst() { + x.Const = nil +} + +func (x *BytesRules) ClearLen() { + x.Len = nil +} + +func (x *BytesRules) ClearMinLen() { + x.MinLen = nil +} + +func (x *BytesRules) ClearMaxLen() { + x.MaxLen = nil +} + +func (x *BytesRules) ClearPattern() { + x.Pattern = nil +} + +func (x *BytesRules) ClearPrefix() { + x.Prefix = nil +} + +func (x *BytesRules) ClearSuffix() { + x.Suffix = nil +} + +func (x *BytesRules) ClearContains() { + x.Contains = nil +} + +func (x *BytesRules) ClearWellKnown() { + x.WellKnown = nil +} + +func (x *BytesRules) ClearIp() { + if _, ok := x.WellKnown.(*BytesRules_Ip); ok { + x.WellKnown = nil + } +} + +func (x *BytesRules) ClearIpv4() { + if _, ok := x.WellKnown.(*BytesRules_Ipv4); ok { + x.WellKnown = nil + } +} + +func (x *BytesRules) ClearIpv6() { + if _, ok := x.WellKnown.(*BytesRules_Ipv6); ok { + x.WellKnown = nil + } +} + +func (x *BytesRules) ClearUuid() { + if _, ok := x.WellKnown.(*BytesRules_Uuid); ok { + x.WellKnown = nil + } +} + +const BytesRules_WellKnown_not_set_case case_BytesRules_WellKnown = 0 +const BytesRules_Ip_case case_BytesRules_WellKnown = 10 +const BytesRules_Ipv4_case case_BytesRules_WellKnown = 11 +const BytesRules_Ipv6_case case_BytesRules_WellKnown = 12 +const BytesRules_Uuid_case case_BytesRules_WellKnown = 15 + +func (x *BytesRules) WhichWellKnown() case_BytesRules_WellKnown { + if x == nil { + return BytesRules_WellKnown_not_set_case + } + switch x.WellKnown.(type) { + case *BytesRules_Ip: + return BytesRules_Ip_case + case *BytesRules_Ipv4: + return BytesRules_Ipv4_case + case *BytesRules_Ipv6: + return BytesRules_Ipv6_case + case *BytesRules_Uuid: + return BytesRules_Uuid_case + default: + return BytesRules_WellKnown_not_set_case + } +} + +type BytesRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified bytes + // value. If the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must be "\x01\x02\x03\x04" + // bytes value = 1 [(buf.validate.field).bytes.const = "\x01\x02\x03\x04"]; + // } + // + // ``` + Const []byte + // `len` requires the field value to have the specified length in bytes. + // If the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value length must be 4 bytes. + // optional bytes value = 1 [(buf.validate.field).bytes.len = 4]; + // } + // + // ``` + Len *uint64 + // `min_len` requires the field value to have at least the specified minimum + // length in bytes. + // If the field value doesn't meet the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value length must be at least 2 bytes. + // optional bytes value = 1 [(buf.validate.field).bytes.min_len = 2]; + // } + // + // ``` + MinLen *uint64 + // `max_len` requires the field value to have at most the specified maximum + // length in bytes. + // If the field value exceeds the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must be at most 6 bytes. + // optional bytes value = 1 [(buf.validate.field).bytes.max_len = 6]; + // } + // + // ``` + MaxLen *uint64 + // `pattern` requires the field value to match the specified regular + // expression ([RE2 syntax](https://github.com/google/re2/wiki/Syntax)). + // The value of the field must be valid UTF-8 or validation will fail with a + // runtime error. + // If the field value doesn't match the pattern, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must match regex pattern "^[a-zA-Z0-9]+$". + // optional bytes value = 1 [(buf.validate.field).bytes.pattern = "^[a-zA-Z0-9]+$"]; + // } + // + // ``` + Pattern *string + // `prefix` requires the field value to have the specified bytes at the + // beginning of the string. + // If the field value doesn't meet the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value does not have prefix \x01\x02 + // optional bytes value = 1 [(buf.validate.field).bytes.prefix = "\x01\x02"]; + // } + // + // ``` + Prefix []byte + // `suffix` requires the field value to have the specified bytes at the end + // of the string. + // If the field value doesn't meet the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value does not have suffix \x03\x04 + // optional bytes value = 1 [(buf.validate.field).bytes.suffix = "\x03\x04"]; + // } + // + // ``` + Suffix []byte + // `contains` requires the field value to have the specified bytes anywhere in + // the string. + // If the field value doesn't meet the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value does not contain \x02\x03 + // optional bytes value = 1 [(buf.validate.field).bytes.contains = "\x02\x03"]; + // } + // + // ``` + Contains []byte + // `in` requires the field value to be equal to one of the specified + // values. If the field value doesn't match any of the specified values, an + // error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must in ["\x01\x02", "\x02\x03", "\x03\x04"] + // optional bytes value = 1 [(buf.validate.field).bytes.in = {"\x01\x02", "\x02\x03", "\x03\x04"}]; + // } + // + // ``` + In [][]byte + // `not_in` requires the field value to be not equal to any of the specified + // values. + // If the field value matches any of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyBytes { + // // value must not in ["\x01\x02", "\x02\x03", "\x03\x04"] + // optional bytes value = 1 [(buf.validate.field).bytes.not_in = {"\x01\x02", "\x02\x03", "\x03\x04"}]; + // } + // + // ``` + NotIn [][]byte + // WellKnown rules provide advanced rules against common byte + // patterns + + // Fields of oneof WellKnown: + // `ip` ensures that the field `value` is a valid IP address (v4 or v6) in byte format. + // If the field value doesn't meet this rule, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must be a valid IP address + // optional bytes value = 1 [(buf.validate.field).bytes.ip = true]; + // } + // + // ``` + Ip *bool + // `ipv4` ensures that the field `value` is a valid IPv4 address in byte format. + // If the field value doesn't meet this rule, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must be a valid IPv4 address + // optional bytes value = 1 [(buf.validate.field).bytes.ipv4 = true]; + // } + // + // ``` + Ipv4 *bool + // `ipv6` ensures that the field `value` is a valid IPv6 address in byte format. + // If the field value doesn't meet this rule, an error message is generated. + // ```proto + // + // message MyBytes { + // // value must be a valid IPv6 address + // optional bytes value = 1 [(buf.validate.field).bytes.ipv6 = true]; + // } + // + // ``` + Ipv6 *bool + // `uuid` ensures that the field `value` encodes the 128-bit UUID data as + // defined by [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2). + // The field must contain exactly 16 bytes + // representing the UUID. If the field value isn't a valid UUID, an error + // message will be generated. + // + // ```proto + // + // message MyBytes { + // // value must be a valid UUID + // optional bytes value = 1 [(buf.validate.field).bytes.uuid = true]; + // } + // + // ``` + Uuid *bool + // -- end of WellKnown + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyBytes { + // bytes value = 1 [ + // (buf.validate.field).bytes.example = "\x01\x02", + // (buf.validate.field).bytes.example = "\x02\x03" + // ]; + // } + // + // ``` + Example [][]byte +} + +func (b0 BytesRules_builder) Build() *BytesRules { + m0 := &BytesRules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + x.Len = b.Len + x.MinLen = b.MinLen + x.MaxLen = b.MaxLen + x.Pattern = b.Pattern + x.Prefix = b.Prefix + x.Suffix = b.Suffix + x.Contains = b.Contains + x.In = b.In + x.NotIn = b.NotIn + if b.Ip != nil { + x.WellKnown = &BytesRules_Ip{*b.Ip} + } + if b.Ipv4 != nil { + x.WellKnown = &BytesRules_Ipv4{*b.Ipv4} + } + if b.Ipv6 != nil { + x.WellKnown = &BytesRules_Ipv6{*b.Ipv6} + } + if b.Uuid != nil { + x.WellKnown = &BytesRules_Uuid{*b.Uuid} + } + x.Example = b.Example + return m0 +} + +type case_BytesRules_WellKnown protoreflect.FieldNumber + +func (x case_BytesRules_WellKnown) String() string { + md := file_buf_validate_validate_proto_msgTypes[20].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isBytesRules_WellKnown interface { + isBytesRules_WellKnown() +} + +type BytesRules_Ip struct { + // `ip` ensures that the field `value` is a valid IP address (v4 or v6) in byte format. + // If the field value doesn't meet this rule, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must be a valid IP address + // optional bytes value = 1 [(buf.validate.field).bytes.ip = true]; + // } + // + // ``` + Ip bool `protobuf:"varint,10,opt,name=ip,oneof"` +} + +type BytesRules_Ipv4 struct { + // `ipv4` ensures that the field `value` is a valid IPv4 address in byte format. + // If the field value doesn't meet this rule, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must be a valid IPv4 address + // optional bytes value = 1 [(buf.validate.field).bytes.ipv4 = true]; + // } + // + // ``` + Ipv4 bool `protobuf:"varint,11,opt,name=ipv4,oneof"` +} + +type BytesRules_Ipv6 struct { + // `ipv6` ensures that the field `value` is a valid IPv6 address in byte format. + // If the field value doesn't meet this rule, an error message is generated. + // ```proto + // + // message MyBytes { + // // value must be a valid IPv6 address + // optional bytes value = 1 [(buf.validate.field).bytes.ipv6 = true]; + // } + // + // ``` + Ipv6 bool `protobuf:"varint,12,opt,name=ipv6,oneof"` +} + +type BytesRules_Uuid struct { + // `uuid` ensures that the field `value` encodes the 128-bit UUID data as + // defined by [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2). + // The field must contain exactly 16 bytes + // representing the UUID. If the field value isn't a valid UUID, an error + // message will be generated. + // + // ```proto + // + // message MyBytes { + // // value must be a valid UUID + // optional bytes value = 1 [(buf.validate.field).bytes.uuid = true]; + // } + // + // ``` + Uuid bool `protobuf:"varint,15,opt,name=uuid,oneof"` +} + +func (*BytesRules_Ip) isBytesRules_WellKnown() {} + +func (*BytesRules_Ipv4) isBytesRules_WellKnown() {} + +func (*BytesRules_Ipv6) isBytesRules_WellKnown() {} + +func (*BytesRules_Uuid) isBytesRules_WellKnown() {} + +// EnumRules describe the rules applied to `enum` values. +type EnumRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` requires the field value to exactly match the specified enum value. + // If the field value doesn't match, an error message is generated. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // // The field `value` must be exactly MY_ENUM_VALUE1. + // MyEnum value = 1 [(buf.validate.field).enum.const = 1]; + // } + // + // ``` + Const *int32 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // `defined_only` requires the field value to be one of the defined values for + // this enum, failing on any undefined value. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // // The field `value` must be a defined value of MyEnum. + // MyEnum value = 1 [(buf.validate.field).enum.defined_only = true]; + // } + // + // ``` + DefinedOnly *bool `protobuf:"varint,2,opt,name=defined_only,json=definedOnly" json:"defined_only,omitempty"` + // `in` requires the field value to be equal to one of the + // specified enum values. If the field value doesn't match any of the + // specified values, an error message is generated. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // // The field `value` must be equal to one of the specified values. + // MyEnum value = 1 [(buf.validate.field).enum = { in: [1, 2]}]; + // } + // + // ``` + In []int32 `protobuf:"varint,3,rep,name=in" json:"in,omitempty"` + // `not_in` requires the field value to be not equal to any of the + // specified enum values. If the field value matches one of the specified + // values, an error message is generated. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // // The field `value` must not be equal to any of the specified values. + // MyEnum value = 1 [(buf.validate.field).enum = { not_in: [1, 2]}]; + // } + // + // ``` + NotIn []int32 `protobuf:"varint,4,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // (buf.validate.field).enum.example = 1, + // (buf.validate.field).enum.example = 2 + // } + // + // ``` + Example []int32 `protobuf:"varint,5,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EnumRules) Reset() { + *x = EnumRules{} + mi := &file_buf_validate_validate_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EnumRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumRules) ProtoMessage() {} + +func (x *EnumRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EnumRules) GetConst() int32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *EnumRules) GetDefinedOnly() bool { + if x != nil && x.DefinedOnly != nil { + return *x.DefinedOnly + } + return false +} + +func (x *EnumRules) GetIn() []int32 { + if x != nil { + return x.In + } + return nil +} + +func (x *EnumRules) GetNotIn() []int32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *EnumRules) GetExample() []int32 { + if x != nil { + return x.Example + } + return nil +} + +func (x *EnumRules) SetConst(v int32) { + x.Const = &v +} + +func (x *EnumRules) SetDefinedOnly(v bool) { + x.DefinedOnly = &v +} + +func (x *EnumRules) SetIn(v []int32) { + x.In = v +} + +func (x *EnumRules) SetNotIn(v []int32) { + x.NotIn = v +} + +func (x *EnumRules) SetExample(v []int32) { + x.Example = v +} + +func (x *EnumRules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *EnumRules) HasDefinedOnly() bool { + if x == nil { + return false + } + return x.DefinedOnly != nil +} + +func (x *EnumRules) ClearConst() { + x.Const = nil +} + +func (x *EnumRules) ClearDefinedOnly() { + x.DefinedOnly = nil +} + +type EnumRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified enum value. + // If the field value doesn't match, an error message is generated. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // // The field `value` must be exactly MY_ENUM_VALUE1. + // MyEnum value = 1 [(buf.validate.field).enum.const = 1]; + // } + // + // ``` + Const *int32 + // `defined_only` requires the field value to be one of the defined values for + // this enum, failing on any undefined value. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // // The field `value` must be a defined value of MyEnum. + // MyEnum value = 1 [(buf.validate.field).enum.defined_only = true]; + // } + // + // ``` + DefinedOnly *bool + // `in` requires the field value to be equal to one of the + // specified enum values. If the field value doesn't match any of the + // specified values, an error message is generated. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // // The field `value` must be equal to one of the specified values. + // MyEnum value = 1 [(buf.validate.field).enum = { in: [1, 2]}]; + // } + // + // ``` + In []int32 + // `not_in` requires the field value to be not equal to any of the + // specified enum values. If the field value matches one of the specified + // values, an error message is generated. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // // The field `value` must not be equal to any of the specified values. + // MyEnum value = 1 [(buf.validate.field).enum = { not_in: [1, 2]}]; + // } + // + // ``` + NotIn []int32 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // (buf.validate.field).enum.example = 1, + // (buf.validate.field).enum.example = 2 + // } + // + // ``` + Example []int32 +} + +func (b0 EnumRules_builder) Build() *EnumRules { + m0 := &EnumRules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + x.DefinedOnly = b.DefinedOnly + x.In = b.In + x.NotIn = b.NotIn + x.Example = b.Example + return m0 +} + +// RepeatedRules describe the rules applied to `repeated` values. +type RepeatedRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `min_items` requires that this field must contain at least the specified + // minimum number of items. + // + // Note that `min_items = 1` is equivalent to setting a field as `required`. + // + // ```proto + // + // message MyRepeated { + // // value must contain at least 2 items + // repeated string value = 1 [(buf.validate.field).repeated.min_items = 2]; + // } + // + // ``` + MinItems *uint64 `protobuf:"varint,1,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + // `max_items` denotes that this field must not exceed a + // certain number of items as the upper limit. If the field contains more + // items than specified, an error message will be generated, requiring the + // field to maintain no more than the specified number of items. + // + // ```proto + // + // message MyRepeated { + // // value must contain no more than 3 item(s) + // repeated string value = 1 [(buf.validate.field).repeated.max_items = 3]; + // } + // + // ``` + MaxItems *uint64 `protobuf:"varint,2,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + // `unique` indicates that all elements in this field must + // be unique. This rule is strictly applicable to scalar and enum + // types, with message types not being supported. + // + // ```proto + // + // message MyRepeated { + // // repeated value must contain unique items + // repeated string value = 1 [(buf.validate.field).repeated.unique = true]; + // } + // + // ``` + Unique *bool `protobuf:"varint,3,opt,name=unique" json:"unique,omitempty"` + // `items` details the rules to be applied to each item + // in the field. Even for repeated message fields, validation is executed + // against each item unless `ignore` is specified. + // + // ```proto + // + // message MyRepeated { + // // The items in the field `value` must follow the specified rules. + // repeated string value = 1 [(buf.validate.field).repeated.items = { + // string: { + // min_len: 3 + // max_len: 10 + // } + // }]; + // } + // + // ``` + // + // Note that the `required` rule does not apply. Repeated items + // cannot be unset. + Items *FieldRules `protobuf:"bytes,4,opt,name=items" json:"items,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RepeatedRules) Reset() { + *x = RepeatedRules{} + mi := &file_buf_validate_validate_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RepeatedRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RepeatedRules) ProtoMessage() {} + +func (x *RepeatedRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RepeatedRules) GetMinItems() uint64 { + if x != nil && x.MinItems != nil { + return *x.MinItems + } + return 0 +} + +func (x *RepeatedRules) GetMaxItems() uint64 { + if x != nil && x.MaxItems != nil { + return *x.MaxItems + } + return 0 +} + +func (x *RepeatedRules) GetUnique() bool { + if x != nil && x.Unique != nil { + return *x.Unique + } + return false +} + +func (x *RepeatedRules) GetItems() *FieldRules { + if x != nil { + return x.Items + } + return nil +} + +func (x *RepeatedRules) SetMinItems(v uint64) { + x.MinItems = &v +} + +func (x *RepeatedRules) SetMaxItems(v uint64) { + x.MaxItems = &v +} + +func (x *RepeatedRules) SetUnique(v bool) { + x.Unique = &v +} + +func (x *RepeatedRules) SetItems(v *FieldRules) { + x.Items = v +} + +func (x *RepeatedRules) HasMinItems() bool { + if x == nil { + return false + } + return x.MinItems != nil +} + +func (x *RepeatedRules) HasMaxItems() bool { + if x == nil { + return false + } + return x.MaxItems != nil +} + +func (x *RepeatedRules) HasUnique() bool { + if x == nil { + return false + } + return x.Unique != nil +} + +func (x *RepeatedRules) HasItems() bool { + if x == nil { + return false + } + return x.Items != nil +} + +func (x *RepeatedRules) ClearMinItems() { + x.MinItems = nil +} + +func (x *RepeatedRules) ClearMaxItems() { + x.MaxItems = nil +} + +func (x *RepeatedRules) ClearUnique() { + x.Unique = nil +} + +func (x *RepeatedRules) ClearItems() { + x.Items = nil +} + +type RepeatedRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `min_items` requires that this field must contain at least the specified + // minimum number of items. + // + // Note that `min_items = 1` is equivalent to setting a field as `required`. + // + // ```proto + // + // message MyRepeated { + // // value must contain at least 2 items + // repeated string value = 1 [(buf.validate.field).repeated.min_items = 2]; + // } + // + // ``` + MinItems *uint64 + // `max_items` denotes that this field must not exceed a + // certain number of items as the upper limit. If the field contains more + // items than specified, an error message will be generated, requiring the + // field to maintain no more than the specified number of items. + // + // ```proto + // + // message MyRepeated { + // // value must contain no more than 3 item(s) + // repeated string value = 1 [(buf.validate.field).repeated.max_items = 3]; + // } + // + // ``` + MaxItems *uint64 + // `unique` indicates that all elements in this field must + // be unique. This rule is strictly applicable to scalar and enum + // types, with message types not being supported. + // + // ```proto + // + // message MyRepeated { + // // repeated value must contain unique items + // repeated string value = 1 [(buf.validate.field).repeated.unique = true]; + // } + // + // ``` + Unique *bool + // `items` details the rules to be applied to each item + // in the field. Even for repeated message fields, validation is executed + // against each item unless `ignore` is specified. + // + // ```proto + // + // message MyRepeated { + // // The items in the field `value` must follow the specified rules. + // repeated string value = 1 [(buf.validate.field).repeated.items = { + // string: { + // min_len: 3 + // max_len: 10 + // } + // }]; + // } + // + // ``` + // + // Note that the `required` rule does not apply. Repeated items + // cannot be unset. + Items *FieldRules +} + +func (b0 RepeatedRules_builder) Build() *RepeatedRules { + m0 := &RepeatedRules{} + b, x := &b0, m0 + _, _ = b, x + x.MinItems = b.MinItems + x.MaxItems = b.MaxItems + x.Unique = b.Unique + x.Items = b.Items + return m0 +} + +// MapRules describe the rules applied to `map` values. +type MapRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // Specifies the minimum number of key-value pairs allowed. If the field has + // fewer key-value pairs than specified, an error message is generated. + // + // ```proto + // + // message MyMap { + // // The field `value` must have at least 2 key-value pairs. + // map value = 1 [(buf.validate.field).map.min_pairs = 2]; + // } + // + // ``` + MinPairs *uint64 `protobuf:"varint,1,opt,name=min_pairs,json=minPairs" json:"min_pairs,omitempty"` + // Specifies the maximum number of key-value pairs allowed. If the field has + // more key-value pairs than specified, an error message is generated. + // + // ```proto + // + // message MyMap { + // // The field `value` must have at most 3 key-value pairs. + // map value = 1 [(buf.validate.field).map.max_pairs = 3]; + // } + // + // ``` + MaxPairs *uint64 `protobuf:"varint,2,opt,name=max_pairs,json=maxPairs" json:"max_pairs,omitempty"` + // Specifies the rules to be applied to each key in the field. + // + // ```proto + // + // message MyMap { + // // The keys in the field `value` must follow the specified rules. + // map value = 1 [(buf.validate.field).map.keys = { + // string: { + // min_len: 3 + // max_len: 10 + // } + // }]; + // } + // + // ``` + // + // Note that the `required` rule does not apply. Map keys cannot be unset. + Keys *FieldRules `protobuf:"bytes,4,opt,name=keys" json:"keys,omitempty"` + // Specifies the rules to be applied to the value of each key in the + // field. Message values will still have their validations evaluated unless + // `ignore` is specified. + // + // ```proto + // + // message MyMap { + // // The values in the field `value` must follow the specified rules. + // map value = 1 [(buf.validate.field).map.values = { + // string: { + // min_len: 5 + // max_len: 20 + // } + // }]; + // } + // + // ``` + // Note that the `required` rule does not apply. Map values cannot be unset. + Values *FieldRules `protobuf:"bytes,5,opt,name=values" json:"values,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MapRules) Reset() { + *x = MapRules{} + mi := &file_buf_validate_validate_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MapRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapRules) ProtoMessage() {} + +func (x *MapRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[23] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *MapRules) GetMinPairs() uint64 { + if x != nil && x.MinPairs != nil { + return *x.MinPairs + } + return 0 +} + +func (x *MapRules) GetMaxPairs() uint64 { + if x != nil && x.MaxPairs != nil { + return *x.MaxPairs + } + return 0 +} + +func (x *MapRules) GetKeys() *FieldRules { + if x != nil { + return x.Keys + } + return nil +} + +func (x *MapRules) GetValues() *FieldRules { + if x != nil { + return x.Values + } + return nil +} + +func (x *MapRules) SetMinPairs(v uint64) { + x.MinPairs = &v +} + +func (x *MapRules) SetMaxPairs(v uint64) { + x.MaxPairs = &v +} + +func (x *MapRules) SetKeys(v *FieldRules) { + x.Keys = v +} + +func (x *MapRules) SetValues(v *FieldRules) { + x.Values = v +} + +func (x *MapRules) HasMinPairs() bool { + if x == nil { + return false + } + return x.MinPairs != nil +} + +func (x *MapRules) HasMaxPairs() bool { + if x == nil { + return false + } + return x.MaxPairs != nil +} + +func (x *MapRules) HasKeys() bool { + if x == nil { + return false + } + return x.Keys != nil +} + +func (x *MapRules) HasValues() bool { + if x == nil { + return false + } + return x.Values != nil +} + +func (x *MapRules) ClearMinPairs() { + x.MinPairs = nil +} + +func (x *MapRules) ClearMaxPairs() { + x.MaxPairs = nil +} + +func (x *MapRules) ClearKeys() { + x.Keys = nil +} + +func (x *MapRules) ClearValues() { + x.Values = nil +} + +type MapRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Specifies the minimum number of key-value pairs allowed. If the field has + // fewer key-value pairs than specified, an error message is generated. + // + // ```proto + // + // message MyMap { + // // The field `value` must have at least 2 key-value pairs. + // map value = 1 [(buf.validate.field).map.min_pairs = 2]; + // } + // + // ``` + MinPairs *uint64 + // Specifies the maximum number of key-value pairs allowed. If the field has + // more key-value pairs than specified, an error message is generated. + // + // ```proto + // + // message MyMap { + // // The field `value` must have at most 3 key-value pairs. + // map value = 1 [(buf.validate.field).map.max_pairs = 3]; + // } + // + // ``` + MaxPairs *uint64 + // Specifies the rules to be applied to each key in the field. + // + // ```proto + // + // message MyMap { + // // The keys in the field `value` must follow the specified rules. + // map value = 1 [(buf.validate.field).map.keys = { + // string: { + // min_len: 3 + // max_len: 10 + // } + // }]; + // } + // + // ``` + // + // Note that the `required` rule does not apply. Map keys cannot be unset. + Keys *FieldRules + // Specifies the rules to be applied to the value of each key in the + // field. Message values will still have their validations evaluated unless + // `ignore` is specified. + // + // ```proto + // + // message MyMap { + // // The values in the field `value` must follow the specified rules. + // map value = 1 [(buf.validate.field).map.values = { + // string: { + // min_len: 5 + // max_len: 20 + // } + // }]; + // } + // + // ``` + // Note that the `required` rule does not apply. Map values cannot be unset. + Values *FieldRules +} + +func (b0 MapRules_builder) Build() *MapRules { + m0 := &MapRules{} + b, x := &b0, m0 + _, _ = b, x + x.MinPairs = b.MinPairs + x.MaxPairs = b.MaxPairs + x.Keys = b.Keys + x.Values = b.Values + return m0 +} + +// AnyRules describe rules applied exclusively to the `google.protobuf.Any` well-known type. +type AnyRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `in` requires the field's `type_url` to be equal to one of the + // specified values. If it doesn't match any of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyAny { + // // The `value` field must have a `type_url` equal to one of the specified values. + // google.protobuf.Any value = 1 [(buf.validate.field).any = { + // in: ["type.googleapis.com/MyType1", "type.googleapis.com/MyType2"] + // }]; + // } + // + // ``` + In []string `protobuf:"bytes,2,rep,name=in" json:"in,omitempty"` + // requires the field's type_url to be not equal to any of the specified values. If it matches any of the specified values, an error message is generated. + // + // ```proto + // + // message MyAny { + // // The `value` field must not have a `type_url` equal to any of the specified values. + // google.protobuf.Any value = 1 [(buf.validate.field).any = { + // not_in: ["type.googleapis.com/ForbiddenType1", "type.googleapis.com/ForbiddenType2"] + // }]; + // } + // + // ``` + NotIn []string `protobuf:"bytes,3,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AnyRules) Reset() { + *x = AnyRules{} + mi := &file_buf_validate_validate_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AnyRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnyRules) ProtoMessage() {} + +func (x *AnyRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *AnyRules) GetIn() []string { + if x != nil { + return x.In + } + return nil +} + +func (x *AnyRules) GetNotIn() []string { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *AnyRules) SetIn(v []string) { + x.In = v +} + +func (x *AnyRules) SetNotIn(v []string) { + x.NotIn = v +} + +type AnyRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `in` requires the field's `type_url` to be equal to one of the + // specified values. If it doesn't match any of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyAny { + // // The `value` field must have a `type_url` equal to one of the specified values. + // google.protobuf.Any value = 1 [(buf.validate.field).any = { + // in: ["type.googleapis.com/MyType1", "type.googleapis.com/MyType2"] + // }]; + // } + // + // ``` + In []string + // requires the field's type_url to be not equal to any of the specified values. If it matches any of the specified values, an error message is generated. + // + // ```proto + // + // message MyAny { + // // The `value` field must not have a `type_url` equal to any of the specified values. + // google.protobuf.Any value = 1 [(buf.validate.field).any = { + // not_in: ["type.googleapis.com/ForbiddenType1", "type.googleapis.com/ForbiddenType2"] + // }]; + // } + // + // ``` + NotIn []string +} + +func (b0 AnyRules_builder) Build() *AnyRules { + m0 := &AnyRules{} + b, x := &b0, m0 + _, _ = b, x + x.In = b.In + x.NotIn = b.NotIn + return m0 +} + +// DurationRules describe the rules applied exclusively to the `google.protobuf.Duration` well-known type. +type DurationRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` dictates that the field must match the specified value of the `google.protobuf.Duration` type exactly. + // If the field's value deviates from the specified value, an error message + // will be generated. + // + // ```proto + // + // message MyDuration { + // // value must equal 5s + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.const = "5s"]; + // } + // + // ``` + Const *durationpb.Duration `protobuf:"bytes,2,opt,name=const" json:"const,omitempty"` + // Types that are valid to be assigned to LessThan: + // + // *DurationRules_Lt + // *DurationRules_Lte + LessThan isDurationRules_LessThan `protobuf_oneof:"less_than"` + // Types that are valid to be assigned to GreaterThan: + // + // *DurationRules_Gt + // *DurationRules_Gte + GreaterThan isDurationRules_GreaterThan `protobuf_oneof:"greater_than"` + // `in` asserts that the field must be equal to one of the specified values of the `google.protobuf.Duration` type. + // If the field's value doesn't correspond to any of the specified values, + // an error message will be generated. + // + // ```proto + // + // message MyDuration { + // // value must be in list [1s, 2s, 3s] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.in = ["1s", "2s", "3s"]]; + // } + // + // ``` + In []*durationpb.Duration `protobuf:"bytes,7,rep,name=in" json:"in,omitempty"` + // `not_in` denotes that the field must not be equal to + // any of the specified values of the `google.protobuf.Duration` type. + // If the field's value matches any of these values, an error message will be + // generated. + // + // ```proto + // + // message MyDuration { + // // value must not be in list [1s, 2s, 3s] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.not_in = ["1s", "2s", "3s"]]; + // } + // + // ``` + NotIn []*durationpb.Duration `protobuf:"bytes,8,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyDuration { + // google.protobuf.Duration value = 1 [ + // (buf.validate.field).duration.example = { seconds: 1 }, + // (buf.validate.field).duration.example = { seconds: 2 }, + // ]; + // } + // + // ``` + Example []*durationpb.Duration `protobuf:"bytes,9,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DurationRules) Reset() { + *x = DurationRules{} + mi := &file_buf_validate_validate_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DurationRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurationRules) ProtoMessage() {} + +func (x *DurationRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *DurationRules) GetConst() *durationpb.Duration { + if x != nil { + return x.Const + } + return nil +} + +func (x *DurationRules) GetLessThan() isDurationRules_LessThan { + if x != nil { + return x.LessThan + } + return nil +} + +func (x *DurationRules) GetLt() *durationpb.Duration { + if x != nil { + if x, ok := x.LessThan.(*DurationRules_Lt); ok { + return x.Lt + } + } + return nil +} + +func (x *DurationRules) GetLte() *durationpb.Duration { + if x != nil { + if x, ok := x.LessThan.(*DurationRules_Lte); ok { + return x.Lte + } + } + return nil +} + +func (x *DurationRules) GetGreaterThan() isDurationRules_GreaterThan { + if x != nil { + return x.GreaterThan + } + return nil +} + +func (x *DurationRules) GetGt() *durationpb.Duration { + if x != nil { + if x, ok := x.GreaterThan.(*DurationRules_Gt); ok { + return x.Gt + } + } + return nil +} + +func (x *DurationRules) GetGte() *durationpb.Duration { + if x != nil { + if x, ok := x.GreaterThan.(*DurationRules_Gte); ok { + return x.Gte + } + } + return nil +} + +func (x *DurationRules) GetIn() []*durationpb.Duration { + if x != nil { + return x.In + } + return nil +} + +func (x *DurationRules) GetNotIn() []*durationpb.Duration { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *DurationRules) GetExample() []*durationpb.Duration { + if x != nil { + return x.Example + } + return nil +} + +func (x *DurationRules) SetConst(v *durationpb.Duration) { + x.Const = v +} + +func (x *DurationRules) SetLt(v *durationpb.Duration) { + if v == nil { + x.LessThan = nil + return + } + x.LessThan = &DurationRules_Lt{v} +} + +func (x *DurationRules) SetLte(v *durationpb.Duration) { + if v == nil { + x.LessThan = nil + return + } + x.LessThan = &DurationRules_Lte{v} +} + +func (x *DurationRules) SetGt(v *durationpb.Duration) { + if v == nil { + x.GreaterThan = nil + return + } + x.GreaterThan = &DurationRules_Gt{v} +} + +func (x *DurationRules) SetGte(v *durationpb.Duration) { + if v == nil { + x.GreaterThan = nil + return + } + x.GreaterThan = &DurationRules_Gte{v} +} + +func (x *DurationRules) SetIn(v []*durationpb.Duration) { + x.In = v +} + +func (x *DurationRules) SetNotIn(v []*durationpb.Duration) { + x.NotIn = v +} + +func (x *DurationRules) SetExample(v []*durationpb.Duration) { + x.Example = v +} + +func (x *DurationRules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *DurationRules) HasLessThan() bool { + if x == nil { + return false + } + return x.LessThan != nil +} + +func (x *DurationRules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*DurationRules_Lt) + return ok +} + +func (x *DurationRules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*DurationRules_Lte) + return ok +} + +func (x *DurationRules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.GreaterThan != nil +} + +func (x *DurationRules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*DurationRules_Gt) + return ok +} + +func (x *DurationRules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*DurationRules_Gte) + return ok +} + +func (x *DurationRules) ClearConst() { + x.Const = nil +} + +func (x *DurationRules) ClearLessThan() { + x.LessThan = nil +} + +func (x *DurationRules) ClearLt() { + if _, ok := x.LessThan.(*DurationRules_Lt); ok { + x.LessThan = nil + } +} + +func (x *DurationRules) ClearLte() { + if _, ok := x.LessThan.(*DurationRules_Lte); ok { + x.LessThan = nil + } +} + +func (x *DurationRules) ClearGreaterThan() { + x.GreaterThan = nil +} + +func (x *DurationRules) ClearGt() { + if _, ok := x.GreaterThan.(*DurationRules_Gt); ok { + x.GreaterThan = nil + } +} + +func (x *DurationRules) ClearGte() { + if _, ok := x.GreaterThan.(*DurationRules_Gte); ok { + x.GreaterThan = nil + } +} + +const DurationRules_LessThan_not_set_case case_DurationRules_LessThan = 0 +const DurationRules_Lt_case case_DurationRules_LessThan = 3 +const DurationRules_Lte_case case_DurationRules_LessThan = 4 + +func (x *DurationRules) WhichLessThan() case_DurationRules_LessThan { + if x == nil { + return DurationRules_LessThan_not_set_case + } + switch x.LessThan.(type) { + case *DurationRules_Lt: + return DurationRules_Lt_case + case *DurationRules_Lte: + return DurationRules_Lte_case + default: + return DurationRules_LessThan_not_set_case + } +} + +const DurationRules_GreaterThan_not_set_case case_DurationRules_GreaterThan = 0 +const DurationRules_Gt_case case_DurationRules_GreaterThan = 5 +const DurationRules_Gte_case case_DurationRules_GreaterThan = 6 + +func (x *DurationRules) WhichGreaterThan() case_DurationRules_GreaterThan { + if x == nil { + return DurationRules_GreaterThan_not_set_case + } + switch x.GreaterThan.(type) { + case *DurationRules_Gt: + return DurationRules_Gt_case + case *DurationRules_Gte: + return DurationRules_Gte_case + default: + return DurationRules_GreaterThan_not_set_case + } +} + +type DurationRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` dictates that the field must match the specified value of the `google.protobuf.Duration` type exactly. + // If the field's value deviates from the specified value, an error message + // will be generated. + // + // ```proto + // + // message MyDuration { + // // value must equal 5s + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.const = "5s"]; + // } + // + // ``` + Const *durationpb.Duration + // Fields of oneof LessThan: + // `lt` stipulates that the field must be less than the specified value of the `google.protobuf.Duration` type, + // exclusive. If the field's value is greater than or equal to the specified + // value, an error message will be generated. + // + // ```proto + // + // message MyDuration { + // // value must be less than 5s + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.lt = "5s"]; + // } + // + // ``` + Lt *durationpb.Duration + // `lte` indicates that the field must be less than or equal to the specified + // value of the `google.protobuf.Duration` type, inclusive. If the field's value is greater than the specified value, + // an error message will be generated. + // + // ```proto + // + // message MyDuration { + // // value must be less than or equal to 10s + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.lte = "10s"]; + // } + // + // ``` + Lte *durationpb.Duration + // -- end of LessThan + // Fields of oneof GreaterThan: + // `gt` requires the duration field value to be greater than the specified + // value (exclusive). If the value of `gt` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyDuration { + // // duration must be greater than 5s [duration.gt] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.gt = { seconds: 5 }]; + // + // // duration must be greater than 5s and less than 10s [duration.gt_lt] + // google.protobuf.Duration another_value = 2 [(buf.validate.field).duration = { gt: { seconds: 5 }, lt: { seconds: 10 } }]; + // + // // duration must be greater than 10s or less than 5s [duration.gt_lt_exclusive] + // google.protobuf.Duration other_value = 3 [(buf.validate.field).duration = { gt: { seconds: 10 }, lt: { seconds: 5 } }]; + // } + // + // ``` + Gt *durationpb.Duration + // `gte` requires the duration field value to be greater than or equal to the + // specified value (exclusive). If the value of `gte` is larger than a + // specified `lt` or `lte`, the range is reversed, and the field value must + // be outside the specified range. If the field value doesn't meet the + // required conditions, an error message is generated. + // + // ```proto + // + // message MyDuration { + // // duration must be greater than or equal to 5s [duration.gte] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.gte = { seconds: 5 }]; + // + // // duration must be greater than or equal to 5s and less than 10s [duration.gte_lt] + // google.protobuf.Duration another_value = 2 [(buf.validate.field).duration = { gte: { seconds: 5 }, lt: { seconds: 10 } }]; + // + // // duration must be greater than or equal to 10s or less than 5s [duration.gte_lt_exclusive] + // google.protobuf.Duration other_value = 3 [(buf.validate.field).duration = { gte: { seconds: 10 }, lt: { seconds: 5 } }]; + // } + // + // ``` + Gte *durationpb.Duration + // -- end of GreaterThan + // `in` asserts that the field must be equal to one of the specified values of the `google.protobuf.Duration` type. + // If the field's value doesn't correspond to any of the specified values, + // an error message will be generated. + // + // ```proto + // + // message MyDuration { + // // value must be in list [1s, 2s, 3s] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.in = ["1s", "2s", "3s"]]; + // } + // + // ``` + In []*durationpb.Duration + // `not_in` denotes that the field must not be equal to + // any of the specified values of the `google.protobuf.Duration` type. + // If the field's value matches any of these values, an error message will be + // generated. + // + // ```proto + // + // message MyDuration { + // // value must not be in list [1s, 2s, 3s] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.not_in = ["1s", "2s", "3s"]]; + // } + // + // ``` + NotIn []*durationpb.Duration + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyDuration { + // google.protobuf.Duration value = 1 [ + // (buf.validate.field).duration.example = { seconds: 1 }, + // (buf.validate.field).duration.example = { seconds: 2 }, + // ]; + // } + // + // ``` + Example []*durationpb.Duration +} + +func (b0 DurationRules_builder) Build() *DurationRules { + m0 := &DurationRules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + if b.Lt != nil { + x.LessThan = &DurationRules_Lt{b.Lt} + } + if b.Lte != nil { + x.LessThan = &DurationRules_Lte{b.Lte} + } + if b.Gt != nil { + x.GreaterThan = &DurationRules_Gt{b.Gt} + } + if b.Gte != nil { + x.GreaterThan = &DurationRules_Gte{b.Gte} + } + x.In = b.In + x.NotIn = b.NotIn + x.Example = b.Example + return m0 +} + +type case_DurationRules_LessThan protoreflect.FieldNumber + +func (x case_DurationRules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[25].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_DurationRules_GreaterThan protoreflect.FieldNumber + +func (x case_DurationRules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[25].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isDurationRules_LessThan interface { + isDurationRules_LessThan() +} + +type DurationRules_Lt struct { + // `lt` stipulates that the field must be less than the specified value of the `google.protobuf.Duration` type, + // exclusive. If the field's value is greater than or equal to the specified + // value, an error message will be generated. + // + // ```proto + // + // message MyDuration { + // // value must be less than 5s + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.lt = "5s"]; + // } + // + // ``` + Lt *durationpb.Duration `protobuf:"bytes,3,opt,name=lt,oneof"` +} + +type DurationRules_Lte struct { + // `lte` indicates that the field must be less than or equal to the specified + // value of the `google.protobuf.Duration` type, inclusive. If the field's value is greater than the specified value, + // an error message will be generated. + // + // ```proto + // + // message MyDuration { + // // value must be less than or equal to 10s + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.lte = "10s"]; + // } + // + // ``` + Lte *durationpb.Duration `protobuf:"bytes,4,opt,name=lte,oneof"` +} + +func (*DurationRules_Lt) isDurationRules_LessThan() {} + +func (*DurationRules_Lte) isDurationRules_LessThan() {} + +type isDurationRules_GreaterThan interface { + isDurationRules_GreaterThan() +} + +type DurationRules_Gt struct { + // `gt` requires the duration field value to be greater than the specified + // value (exclusive). If the value of `gt` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyDuration { + // // duration must be greater than 5s [duration.gt] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.gt = { seconds: 5 }]; + // + // // duration must be greater than 5s and less than 10s [duration.gt_lt] + // google.protobuf.Duration another_value = 2 [(buf.validate.field).duration = { gt: { seconds: 5 }, lt: { seconds: 10 } }]; + // + // // duration must be greater than 10s or less than 5s [duration.gt_lt_exclusive] + // google.protobuf.Duration other_value = 3 [(buf.validate.field).duration = { gt: { seconds: 10 }, lt: { seconds: 5 } }]; + // } + // + // ``` + Gt *durationpb.Duration `protobuf:"bytes,5,opt,name=gt,oneof"` +} + +type DurationRules_Gte struct { + // `gte` requires the duration field value to be greater than or equal to the + // specified value (exclusive). If the value of `gte` is larger than a + // specified `lt` or `lte`, the range is reversed, and the field value must + // be outside the specified range. If the field value doesn't meet the + // required conditions, an error message is generated. + // + // ```proto + // + // message MyDuration { + // // duration must be greater than or equal to 5s [duration.gte] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.gte = { seconds: 5 }]; + // + // // duration must be greater than or equal to 5s and less than 10s [duration.gte_lt] + // google.protobuf.Duration another_value = 2 [(buf.validate.field).duration = { gte: { seconds: 5 }, lt: { seconds: 10 } }]; + // + // // duration must be greater than or equal to 10s or less than 5s [duration.gte_lt_exclusive] + // google.protobuf.Duration other_value = 3 [(buf.validate.field).duration = { gte: { seconds: 10 }, lt: { seconds: 5 } }]; + // } + // + // ``` + Gte *durationpb.Duration `protobuf:"bytes,6,opt,name=gte,oneof"` +} + +func (*DurationRules_Gt) isDurationRules_GreaterThan() {} + +func (*DurationRules_Gte) isDurationRules_GreaterThan() {} + +// FieldMaskRules describe rules applied exclusively to the `google.protobuf.FieldMask` well-known type. +type FieldMaskRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` dictates that the field must match the specified value of the `google.protobuf.FieldMask` type exactly. + // If the field's value deviates from the specified value, an error message + // will be generated. + // + // ```proto + // + // message MyFieldMask { + // // value must equal ["a"] + // google.protobuf.FieldMask value = 1 [(buf.validate.field).field_mask.const = { + // paths: ["a"] + // }]; + // } + // + // ``` + Const *fieldmaskpb.FieldMask `protobuf:"bytes,1,opt,name=const" json:"const,omitempty"` + // `in` requires the field value to only contain paths matching specified + // values or their subpaths. + // If any of the field value's paths doesn't match the rule, + // an error message is generated. + // See: https://protobuf.dev/reference/protobuf/google.protobuf/#field-mask + // + // ```proto + // + // message MyFieldMask { + // // The `value` FieldMask must only contain paths listed in `in`. + // google.protobuf.FieldMask value = 1 [(buf.validate.field).field_mask = { + // in: ["a", "b", "c.a"] + // }]; + // } + // + // ``` + In []string `protobuf:"bytes,2,rep,name=in" json:"in,omitempty"` + // `not_in` requires the field value to not contain paths matching specified + // values or their subpaths. + // If any of the field value's paths matches the rule, + // an error message is generated. + // See: https://protobuf.dev/reference/protobuf/google.protobuf/#field-mask + // + // ```proto + // + // message MyFieldMask { + // // The `value` FieldMask shall not contain paths listed in `not_in`. + // google.protobuf.FieldMask value = 1 [(buf.validate.field).field_mask = { + // not_in: ["forbidden", "immutable", "c.a"] + // }]; + // } + // + // ``` + NotIn []string `protobuf:"bytes,3,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyFieldMask { + // google.protobuf.FieldMask value = 1 [ + // (buf.validate.field).field_mask.example = { paths: ["a", "b"] }, + // (buf.validate.field).field_mask.example = { paths: ["c.a", "d"] }, + // ]; + // } + // + // ``` + Example []*fieldmaskpb.FieldMask `protobuf:"bytes,4,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FieldMaskRules) Reset() { + *x = FieldMaskRules{} + mi := &file_buf_validate_validate_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FieldMaskRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldMaskRules) ProtoMessage() {} + +func (x *FieldMaskRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *FieldMaskRules) GetConst() *fieldmaskpb.FieldMask { + if x != nil { + return x.Const + } + return nil +} + +func (x *FieldMaskRules) GetIn() []string { + if x != nil { + return x.In + } + return nil +} + +func (x *FieldMaskRules) GetNotIn() []string { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *FieldMaskRules) GetExample() []*fieldmaskpb.FieldMask { + if x != nil { + return x.Example + } + return nil +} + +func (x *FieldMaskRules) SetConst(v *fieldmaskpb.FieldMask) { + x.Const = v +} + +func (x *FieldMaskRules) SetIn(v []string) { + x.In = v +} + +func (x *FieldMaskRules) SetNotIn(v []string) { + x.NotIn = v +} + +func (x *FieldMaskRules) SetExample(v []*fieldmaskpb.FieldMask) { + x.Example = v +} + +func (x *FieldMaskRules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *FieldMaskRules) ClearConst() { + x.Const = nil +} + +type FieldMaskRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` dictates that the field must match the specified value of the `google.protobuf.FieldMask` type exactly. + // If the field's value deviates from the specified value, an error message + // will be generated. + // + // ```proto + // + // message MyFieldMask { + // // value must equal ["a"] + // google.protobuf.FieldMask value = 1 [(buf.validate.field).field_mask.const = { + // paths: ["a"] + // }]; + // } + // + // ``` + Const *fieldmaskpb.FieldMask + // `in` requires the field value to only contain paths matching specified + // values or their subpaths. + // If any of the field value's paths doesn't match the rule, + // an error message is generated. + // See: https://protobuf.dev/reference/protobuf/google.protobuf/#field-mask + // + // ```proto + // + // message MyFieldMask { + // // The `value` FieldMask must only contain paths listed in `in`. + // google.protobuf.FieldMask value = 1 [(buf.validate.field).field_mask = { + // in: ["a", "b", "c.a"] + // }]; + // } + // + // ``` + In []string + // `not_in` requires the field value to not contain paths matching specified + // values or their subpaths. + // If any of the field value's paths matches the rule, + // an error message is generated. + // See: https://protobuf.dev/reference/protobuf/google.protobuf/#field-mask + // + // ```proto + // + // message MyFieldMask { + // // The `value` FieldMask shall not contain paths listed in `not_in`. + // google.protobuf.FieldMask value = 1 [(buf.validate.field).field_mask = { + // not_in: ["forbidden", "immutable", "c.a"] + // }]; + // } + // + // ``` + NotIn []string + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyFieldMask { + // google.protobuf.FieldMask value = 1 [ + // (buf.validate.field).field_mask.example = { paths: ["a", "b"] }, + // (buf.validate.field).field_mask.example = { paths: ["c.a", "d"] }, + // ]; + // } + // + // ``` + Example []*fieldmaskpb.FieldMask +} + +func (b0 FieldMaskRules_builder) Build() *FieldMaskRules { + m0 := &FieldMaskRules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + x.In = b.In + x.NotIn = b.NotIn + x.Example = b.Example + return m0 +} + +// TimestampRules describe the rules applied exclusively to the `google.protobuf.Timestamp` well-known type. +type TimestampRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `const` dictates that this field, of the `google.protobuf.Timestamp` type, must exactly match the specified value. If the field value doesn't correspond to the specified timestamp, an error message will be generated. + // + // ```proto + // + // message MyTimestamp { + // // value must equal 2023-05-03T10:00:00Z + // google.protobuf.Timestamp created_at = 1 [(buf.validate.field).timestamp.const = {seconds: 1727998800}]; + // } + // + // ``` + Const *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=const" json:"const,omitempty"` + // Types that are valid to be assigned to LessThan: + // + // *TimestampRules_Lt + // *TimestampRules_Lte + // *TimestampRules_LtNow + LessThan isTimestampRules_LessThan `protobuf_oneof:"less_than"` + // Types that are valid to be assigned to GreaterThan: + // + // *TimestampRules_Gt + // *TimestampRules_Gte + // *TimestampRules_GtNow + GreaterThan isTimestampRules_GreaterThan `protobuf_oneof:"greater_than"` + // `within` specifies that this field, of the `google.protobuf.Timestamp` type, must be within the specified duration of the current time. If the field value isn't within the duration, an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // value must be within 1 hour of now + // google.protobuf.Timestamp created_at = 1 [(buf.validate.field).timestamp.within = {seconds: 3600}]; + // } + // + // ``` + Within *durationpb.Duration `protobuf:"bytes,9,opt,name=within" json:"within,omitempty"` + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyTimestamp { + // google.protobuf.Timestamp value = 1 [ + // (buf.validate.field).timestamp.example = { seconds: 1672444800 }, + // (buf.validate.field).timestamp.example = { seconds: 1672531200 }, + // ]; + // } + // + // ``` + Example []*timestamppb.Timestamp `protobuf:"bytes,10,rep,name=example" json:"example,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TimestampRules) Reset() { + *x = TimestampRules{} + mi := &file_buf_validate_validate_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TimestampRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimestampRules) ProtoMessage() {} + +func (x *TimestampRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TimestampRules) GetConst() *timestamppb.Timestamp { + if x != nil { + return x.Const + } + return nil +} + +func (x *TimestampRules) GetLessThan() isTimestampRules_LessThan { + if x != nil { + return x.LessThan + } + return nil +} + +func (x *TimestampRules) GetLt() *timestamppb.Timestamp { + if x != nil { + if x, ok := x.LessThan.(*TimestampRules_Lt); ok { + return x.Lt + } + } + return nil +} + +func (x *TimestampRules) GetLte() *timestamppb.Timestamp { + if x != nil { + if x, ok := x.LessThan.(*TimestampRules_Lte); ok { + return x.Lte + } + } + return nil +} + +func (x *TimestampRules) GetLtNow() bool { + if x != nil { + if x, ok := x.LessThan.(*TimestampRules_LtNow); ok { + return x.LtNow + } + } + return false +} + +func (x *TimestampRules) GetGreaterThan() isTimestampRules_GreaterThan { + if x != nil { + return x.GreaterThan + } + return nil +} + +func (x *TimestampRules) GetGt() *timestamppb.Timestamp { + if x != nil { + if x, ok := x.GreaterThan.(*TimestampRules_Gt); ok { + return x.Gt + } + } + return nil +} + +func (x *TimestampRules) GetGte() *timestamppb.Timestamp { + if x != nil { + if x, ok := x.GreaterThan.(*TimestampRules_Gte); ok { + return x.Gte + } + } + return nil +} + +func (x *TimestampRules) GetGtNow() bool { + if x != nil { + if x, ok := x.GreaterThan.(*TimestampRules_GtNow); ok { + return x.GtNow + } + } + return false +} + +func (x *TimestampRules) GetWithin() *durationpb.Duration { + if x != nil { + return x.Within + } + return nil +} + +func (x *TimestampRules) GetExample() []*timestamppb.Timestamp { + if x != nil { + return x.Example + } + return nil +} + +func (x *TimestampRules) SetConst(v *timestamppb.Timestamp) { + x.Const = v +} + +func (x *TimestampRules) SetLt(v *timestamppb.Timestamp) { + if v == nil { + x.LessThan = nil + return + } + x.LessThan = &TimestampRules_Lt{v} +} + +func (x *TimestampRules) SetLte(v *timestamppb.Timestamp) { + if v == nil { + x.LessThan = nil + return + } + x.LessThan = &TimestampRules_Lte{v} +} + +func (x *TimestampRules) SetLtNow(v bool) { + x.LessThan = &TimestampRules_LtNow{v} +} + +func (x *TimestampRules) SetGt(v *timestamppb.Timestamp) { + if v == nil { + x.GreaterThan = nil + return + } + x.GreaterThan = &TimestampRules_Gt{v} +} + +func (x *TimestampRules) SetGte(v *timestamppb.Timestamp) { + if v == nil { + x.GreaterThan = nil + return + } + x.GreaterThan = &TimestampRules_Gte{v} +} + +func (x *TimestampRules) SetGtNow(v bool) { + x.GreaterThan = &TimestampRules_GtNow{v} +} + +func (x *TimestampRules) SetWithin(v *durationpb.Duration) { + x.Within = v +} + +func (x *TimestampRules) SetExample(v []*timestamppb.Timestamp) { + x.Example = v +} + +func (x *TimestampRules) HasConst() bool { + if x == nil { + return false + } + return x.Const != nil +} + +func (x *TimestampRules) HasLessThan() bool { + if x == nil { + return false + } + return x.LessThan != nil +} + +func (x *TimestampRules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*TimestampRules_Lt) + return ok +} + +func (x *TimestampRules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*TimestampRules_Lte) + return ok +} + +func (x *TimestampRules) HasLtNow() bool { + if x == nil { + return false + } + _, ok := x.LessThan.(*TimestampRules_LtNow) + return ok +} + +func (x *TimestampRules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.GreaterThan != nil +} + +func (x *TimestampRules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*TimestampRules_Gt) + return ok +} + +func (x *TimestampRules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*TimestampRules_Gte) + return ok +} + +func (x *TimestampRules) HasGtNow() bool { + if x == nil { + return false + } + _, ok := x.GreaterThan.(*TimestampRules_GtNow) + return ok +} + +func (x *TimestampRules) HasWithin() bool { + if x == nil { + return false + } + return x.Within != nil +} + +func (x *TimestampRules) ClearConst() { + x.Const = nil +} + +func (x *TimestampRules) ClearLessThan() { + x.LessThan = nil +} + +func (x *TimestampRules) ClearLt() { + if _, ok := x.LessThan.(*TimestampRules_Lt); ok { + x.LessThan = nil + } +} + +func (x *TimestampRules) ClearLte() { + if _, ok := x.LessThan.(*TimestampRules_Lte); ok { + x.LessThan = nil + } +} + +func (x *TimestampRules) ClearLtNow() { + if _, ok := x.LessThan.(*TimestampRules_LtNow); ok { + x.LessThan = nil + } +} + +func (x *TimestampRules) ClearGreaterThan() { + x.GreaterThan = nil +} + +func (x *TimestampRules) ClearGt() { + if _, ok := x.GreaterThan.(*TimestampRules_Gt); ok { + x.GreaterThan = nil + } +} + +func (x *TimestampRules) ClearGte() { + if _, ok := x.GreaterThan.(*TimestampRules_Gte); ok { + x.GreaterThan = nil + } +} + +func (x *TimestampRules) ClearGtNow() { + if _, ok := x.GreaterThan.(*TimestampRules_GtNow); ok { + x.GreaterThan = nil + } +} + +func (x *TimestampRules) ClearWithin() { + x.Within = nil +} + +const TimestampRules_LessThan_not_set_case case_TimestampRules_LessThan = 0 +const TimestampRules_Lt_case case_TimestampRules_LessThan = 3 +const TimestampRules_Lte_case case_TimestampRules_LessThan = 4 +const TimestampRules_LtNow_case case_TimestampRules_LessThan = 7 + +func (x *TimestampRules) WhichLessThan() case_TimestampRules_LessThan { + if x == nil { + return TimestampRules_LessThan_not_set_case + } + switch x.LessThan.(type) { + case *TimestampRules_Lt: + return TimestampRules_Lt_case + case *TimestampRules_Lte: + return TimestampRules_Lte_case + case *TimestampRules_LtNow: + return TimestampRules_LtNow_case + default: + return TimestampRules_LessThan_not_set_case + } +} + +const TimestampRules_GreaterThan_not_set_case case_TimestampRules_GreaterThan = 0 +const TimestampRules_Gt_case case_TimestampRules_GreaterThan = 5 +const TimestampRules_Gte_case case_TimestampRules_GreaterThan = 6 +const TimestampRules_GtNow_case case_TimestampRules_GreaterThan = 8 + +func (x *TimestampRules) WhichGreaterThan() case_TimestampRules_GreaterThan { + if x == nil { + return TimestampRules_GreaterThan_not_set_case + } + switch x.GreaterThan.(type) { + case *TimestampRules_Gt: + return TimestampRules_Gt_case + case *TimestampRules_Gte: + return TimestampRules_Gte_case + case *TimestampRules_GtNow: + return TimestampRules_GtNow_case + default: + return TimestampRules_GreaterThan_not_set_case + } +} + +type TimestampRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` dictates that this field, of the `google.protobuf.Timestamp` type, must exactly match the specified value. If the field value doesn't correspond to the specified timestamp, an error message will be generated. + // + // ```proto + // + // message MyTimestamp { + // // value must equal 2023-05-03T10:00:00Z + // google.protobuf.Timestamp created_at = 1 [(buf.validate.field).timestamp.const = {seconds: 1727998800}]; + // } + // + // ``` + Const *timestamppb.Timestamp + // Fields of oneof LessThan: + // requires the duration field value to be less than the specified value (field < value). If the field value doesn't meet the required conditions, an error message is generated. + // + // ```proto + // + // message MyDuration { + // // duration must be less than 'P3D' [duration.lt] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.lt = { seconds: 259200 }]; + // } + // + // ``` + Lt *timestamppb.Timestamp + // requires the timestamp field value to be less than or equal to the specified value (field <= value). If the field value doesn't meet the required conditions, an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // timestamp must be less than or equal to '2023-05-14T00:00:00Z' [timestamp.lte] + // google.protobuf.Timestamp value = 1 [(buf.validate.field).timestamp.lte = { seconds: 1678867200 }]; + // } + // + // ``` + Lte *timestamppb.Timestamp + // `lt_now` specifies that this field, of the `google.protobuf.Timestamp` type, must be less than the current time. `lt_now` can only be used with the `within` rule. + // + // ```proto + // + // message MyTimestamp { + // // value must be less than now + // google.protobuf.Timestamp created_at = 1 [(buf.validate.field).timestamp.lt_now = true]; + // } + // + // ``` + LtNow *bool + // -- end of LessThan + // Fields of oneof GreaterThan: + // `gt` requires the timestamp field value to be greater than the specified + // value (exclusive). If the value of `gt` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // timestamp must be greater than '2023-01-01T00:00:00Z' [timestamp.gt] + // google.protobuf.Timestamp value = 1 [(buf.validate.field).timestamp.gt = { seconds: 1672444800 }]; + // + // // timestamp must be greater than '2023-01-01T00:00:00Z' and less than '2023-01-02T00:00:00Z' [timestamp.gt_lt] + // google.protobuf.Timestamp another_value = 2 [(buf.validate.field).timestamp = { gt: { seconds: 1672444800 }, lt: { seconds: 1672531200 } }]; + // + // // timestamp must be greater than '2023-01-02T00:00:00Z' or less than '2023-01-01T00:00:00Z' [timestamp.gt_lt_exclusive] + // google.protobuf.Timestamp other_value = 3 [(buf.validate.field).timestamp = { gt: { seconds: 1672531200 }, lt: { seconds: 1672444800 } }]; + // } + // + // ``` + Gt *timestamppb.Timestamp + // `gte` requires the timestamp field value to be greater than or equal to the + // specified value (exclusive). If the value of `gte` is larger than a + // specified `lt` or `lte`, the range is reversed, and the field value + // must be outside the specified range. If the field value doesn't meet + // the required conditions, an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // timestamp must be greater than or equal to '2023-01-01T00:00:00Z' [timestamp.gte] + // google.protobuf.Timestamp value = 1 [(buf.validate.field).timestamp.gte = { seconds: 1672444800 }]; + // + // // timestamp must be greater than or equal to '2023-01-01T00:00:00Z' and less than '2023-01-02T00:00:00Z' [timestamp.gte_lt] + // google.protobuf.Timestamp another_value = 2 [(buf.validate.field).timestamp = { gte: { seconds: 1672444800 }, lt: { seconds: 1672531200 } }]; + // + // // timestamp must be greater than or equal to '2023-01-02T00:00:00Z' or less than '2023-01-01T00:00:00Z' [timestamp.gte_lt_exclusive] + // google.protobuf.Timestamp other_value = 3 [(buf.validate.field).timestamp = { gte: { seconds: 1672531200 }, lt: { seconds: 1672444800 } }]; + // } + // + // ``` + Gte *timestamppb.Timestamp + // `gt_now` specifies that this field, of the `google.protobuf.Timestamp` type, must be greater than the current time. `gt_now` can only be used with the `within` rule. + // + // ```proto + // + // message MyTimestamp { + // // value must be greater than now + // google.protobuf.Timestamp created_at = 1 [(buf.validate.field).timestamp.gt_now = true]; + // } + // + // ``` + GtNow *bool + // -- end of GreaterThan + // `within` specifies that this field, of the `google.protobuf.Timestamp` type, must be within the specified duration of the current time. If the field value isn't within the duration, an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // value must be within 1 hour of now + // google.protobuf.Timestamp created_at = 1 [(buf.validate.field).timestamp.within = {seconds: 3600}]; + // } + // + // ``` + Within *durationpb.Duration + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyTimestamp { + // google.protobuf.Timestamp value = 1 [ + // (buf.validate.field).timestamp.example = { seconds: 1672444800 }, + // (buf.validate.field).timestamp.example = { seconds: 1672531200 }, + // ]; + // } + // + // ``` + Example []*timestamppb.Timestamp +} + +func (b0 TimestampRules_builder) Build() *TimestampRules { + m0 := &TimestampRules{} + b, x := &b0, m0 + _, _ = b, x + x.Const = b.Const + if b.Lt != nil { + x.LessThan = &TimestampRules_Lt{b.Lt} + } + if b.Lte != nil { + x.LessThan = &TimestampRules_Lte{b.Lte} + } + if b.LtNow != nil { + x.LessThan = &TimestampRules_LtNow{*b.LtNow} + } + if b.Gt != nil { + x.GreaterThan = &TimestampRules_Gt{b.Gt} + } + if b.Gte != nil { + x.GreaterThan = &TimestampRules_Gte{b.Gte} + } + if b.GtNow != nil { + x.GreaterThan = &TimestampRules_GtNow{*b.GtNow} + } + x.Within = b.Within + x.Example = b.Example + return m0 +} + +type case_TimestampRules_LessThan protoreflect.FieldNumber + +func (x case_TimestampRules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[27].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_TimestampRules_GreaterThan protoreflect.FieldNumber + +func (x case_TimestampRules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[27].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isTimestampRules_LessThan interface { + isTimestampRules_LessThan() +} + +type TimestampRules_Lt struct { + // requires the duration field value to be less than the specified value (field < value). If the field value doesn't meet the required conditions, an error message is generated. + // + // ```proto + // + // message MyDuration { + // // duration must be less than 'P3D' [duration.lt] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.lt = { seconds: 259200 }]; + // } + // + // ``` + Lt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=lt,oneof"` +} + +type TimestampRules_Lte struct { + // requires the timestamp field value to be less than or equal to the specified value (field <= value). If the field value doesn't meet the required conditions, an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // timestamp must be less than or equal to '2023-05-14T00:00:00Z' [timestamp.lte] + // google.protobuf.Timestamp value = 1 [(buf.validate.field).timestamp.lte = { seconds: 1678867200 }]; + // } + // + // ``` + Lte *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=lte,oneof"` +} + +type TimestampRules_LtNow struct { + // `lt_now` specifies that this field, of the `google.protobuf.Timestamp` type, must be less than the current time. `lt_now` can only be used with the `within` rule. + // + // ```proto + // + // message MyTimestamp { + // // value must be less than now + // google.protobuf.Timestamp created_at = 1 [(buf.validate.field).timestamp.lt_now = true]; + // } + // + // ``` + LtNow bool `protobuf:"varint,7,opt,name=lt_now,json=ltNow,oneof"` +} + +func (*TimestampRules_Lt) isTimestampRules_LessThan() {} + +func (*TimestampRules_Lte) isTimestampRules_LessThan() {} + +func (*TimestampRules_LtNow) isTimestampRules_LessThan() {} + +type isTimestampRules_GreaterThan interface { + isTimestampRules_GreaterThan() +} + +type TimestampRules_Gt struct { + // `gt` requires the timestamp field value to be greater than the specified + // value (exclusive). If the value of `gt` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // timestamp must be greater than '2023-01-01T00:00:00Z' [timestamp.gt] + // google.protobuf.Timestamp value = 1 [(buf.validate.field).timestamp.gt = { seconds: 1672444800 }]; + // + // // timestamp must be greater than '2023-01-01T00:00:00Z' and less than '2023-01-02T00:00:00Z' [timestamp.gt_lt] + // google.protobuf.Timestamp another_value = 2 [(buf.validate.field).timestamp = { gt: { seconds: 1672444800 }, lt: { seconds: 1672531200 } }]; + // + // // timestamp must be greater than '2023-01-02T00:00:00Z' or less than '2023-01-01T00:00:00Z' [timestamp.gt_lt_exclusive] + // google.protobuf.Timestamp other_value = 3 [(buf.validate.field).timestamp = { gt: { seconds: 1672531200 }, lt: { seconds: 1672444800 } }]; + // } + // + // ``` + Gt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=gt,oneof"` +} + +type TimestampRules_Gte struct { + // `gte` requires the timestamp field value to be greater than or equal to the + // specified value (exclusive). If the value of `gte` is larger than a + // specified `lt` or `lte`, the range is reversed, and the field value + // must be outside the specified range. If the field value doesn't meet + // the required conditions, an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // timestamp must be greater than or equal to '2023-01-01T00:00:00Z' [timestamp.gte] + // google.protobuf.Timestamp value = 1 [(buf.validate.field).timestamp.gte = { seconds: 1672444800 }]; + // + // // timestamp must be greater than or equal to '2023-01-01T00:00:00Z' and less than '2023-01-02T00:00:00Z' [timestamp.gte_lt] + // google.protobuf.Timestamp another_value = 2 [(buf.validate.field).timestamp = { gte: { seconds: 1672444800 }, lt: { seconds: 1672531200 } }]; + // + // // timestamp must be greater than or equal to '2023-01-02T00:00:00Z' or less than '2023-01-01T00:00:00Z' [timestamp.gte_lt_exclusive] + // google.protobuf.Timestamp other_value = 3 [(buf.validate.field).timestamp = { gte: { seconds: 1672531200 }, lt: { seconds: 1672444800 } }]; + // } + // + // ``` + Gte *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=gte,oneof"` +} + +type TimestampRules_GtNow struct { + // `gt_now` specifies that this field, of the `google.protobuf.Timestamp` type, must be greater than the current time. `gt_now` can only be used with the `within` rule. + // + // ```proto + // + // message MyTimestamp { + // // value must be greater than now + // google.protobuf.Timestamp created_at = 1 [(buf.validate.field).timestamp.gt_now = true]; + // } + // + // ``` + GtNow bool `protobuf:"varint,8,opt,name=gt_now,json=gtNow,oneof"` +} + +func (*TimestampRules_Gt) isTimestampRules_GreaterThan() {} + +func (*TimestampRules_Gte) isTimestampRules_GreaterThan() {} + +func (*TimestampRules_GtNow) isTimestampRules_GreaterThan() {} + +// `Violations` is a collection of `Violation` messages. This message type is returned by +// Protovalidate when a proto message fails to meet the requirements set by the `Rule` validation rules. +// Each individual violation is represented by a `Violation` message. +type Violations struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `violations` is a repeated field that contains all the `Violation` messages corresponding to the violations detected. + Violations []*Violation `protobuf:"bytes,1,rep,name=violations" json:"violations,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Violations) Reset() { + *x = Violations{} + mi := &file_buf_validate_validate_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Violations) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Violations) ProtoMessage() {} + +func (x *Violations) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[28] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Violations) GetViolations() []*Violation { + if x != nil { + return x.Violations + } + return nil +} + +func (x *Violations) SetViolations(v []*Violation) { + x.Violations = v +} + +type Violations_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `violations` is a repeated field that contains all the `Violation` messages corresponding to the violations detected. + Violations []*Violation +} + +func (b0 Violations_builder) Build() *Violations { + m0 := &Violations{} + b, x := &b0, m0 + _, _ = b, x + x.Violations = b.Violations + return m0 +} + +// `Violation` represents a single instance where a validation rule, expressed +// as a `Rule`, was not met. It provides information about the field that +// caused the violation, the specific rule that wasn't fulfilled, and a +// human-readable error message. +// +// For example, consider the following message: +// +// ```proto +// +// message User { +// int32 age = 1 [(buf.validate.field).cel = { +// id: "user.age", +// expression: "this < 18 ? 'User must be at least 18 years old' : ''", +// }]; +// } +// +// ``` +// +// It could produce the following violation: +// +// ```json +// +// { +// "ruleId": "user.age", +// "message": "User must be at least 18 years old", +// "field": { +// "elements": [ +// { +// "fieldNumber": 1, +// "fieldName": "age", +// "fieldType": "TYPE_INT32" +// } +// ] +// }, +// "rule": { +// "elements": [ +// { +// "fieldNumber": 23, +// "fieldName": "cel", +// "fieldType": "TYPE_MESSAGE", +// "index": "0" +// } +// ] +// } +// } +// +// ``` +type Violation struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `field` is a machine-readable path to the field that failed validation. + // This could be a nested field, in which case the path will include all the parent fields leading to the actual field that caused the violation. + // + // For example, consider the following message: + // + // ```proto + // + // message Message { + // bool a = 1 [(buf.validate.field).required = true]; + // } + // + // ``` + // + // It could produce the following violation: + // + // ```textproto + // + // violation { + // field { element { field_number: 1, field_name: "a", field_type: 8 } } + // ... + // } + // + // ``` + Field *FieldPath `protobuf:"bytes,5,opt,name=field" json:"field,omitempty"` + // `rule` is a machine-readable path that points to the specific rule that failed validation. + // This will be a nested field starting from the FieldRules of the field that failed validation. + // For custom rules, this will provide the path of the rule, e.g. `cel[0]`. + // + // For example, consider the following message: + // + // ```proto + // + // message Message { + // bool a = 1 [(buf.validate.field).required = true]; + // bool b = 2 [(buf.validate.field).cel = { + // id: "custom_rule", + // expression: "!this ? 'b must be true': ''" + // }] + // } + // + // ``` + // + // It could produce the following violations: + // + // ```textproto + // + // violation { + // rule { element { field_number: 25, field_name: "required", field_type: 8 } } + // ... + // } + // + // violation { + // rule { element { field_number: 23, field_name: "cel", field_type: 11, index: 0 } } + // ... + // } + // + // ``` + Rule *FieldPath `protobuf:"bytes,6,opt,name=rule" json:"rule,omitempty"` + // `rule_id` is the unique identifier of the `Rule` that was not fulfilled. + // This is the same `id` that was specified in the `Rule` message, allowing easy tracing of which rule was violated. + RuleId *string `protobuf:"bytes,2,opt,name=rule_id,json=ruleId" json:"rule_id,omitempty"` + // `message` is a human-readable error message that describes the nature of the violation. + // This can be the default error message from the violated `Rule`, or it can be a custom message that gives more context about the violation. + Message *string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` + // `for_key` indicates whether the violation was caused by a map key, rather than a value. + ForKey *bool `protobuf:"varint,4,opt,name=for_key,json=forKey" json:"for_key,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Violation) Reset() { + *x = Violation{} + mi := &file_buf_validate_validate_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Violation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Violation) ProtoMessage() {} + +func (x *Violation) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[29] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Violation) GetField() *FieldPath { + if x != nil { + return x.Field + } + return nil +} + +func (x *Violation) GetRule() *FieldPath { + if x != nil { + return x.Rule + } + return nil +} + +func (x *Violation) GetRuleId() string { + if x != nil && x.RuleId != nil { + return *x.RuleId + } + return "" +} + +func (x *Violation) GetMessage() string { + if x != nil && x.Message != nil { + return *x.Message + } + return "" +} + +func (x *Violation) GetForKey() bool { + if x != nil && x.ForKey != nil { + return *x.ForKey + } + return false +} + +func (x *Violation) SetField(v *FieldPath) { + x.Field = v +} + +func (x *Violation) SetRule(v *FieldPath) { + x.Rule = v +} + +func (x *Violation) SetRuleId(v string) { + x.RuleId = &v +} + +func (x *Violation) SetMessage(v string) { + x.Message = &v +} + +func (x *Violation) SetForKey(v bool) { + x.ForKey = &v +} + +func (x *Violation) HasField() bool { + if x == nil { + return false + } + return x.Field != nil +} + +func (x *Violation) HasRule() bool { + if x == nil { + return false + } + return x.Rule != nil +} + +func (x *Violation) HasRuleId() bool { + if x == nil { + return false + } + return x.RuleId != nil +} + +func (x *Violation) HasMessage() bool { + if x == nil { + return false + } + return x.Message != nil +} + +func (x *Violation) HasForKey() bool { + if x == nil { + return false + } + return x.ForKey != nil +} + +func (x *Violation) ClearField() { + x.Field = nil +} + +func (x *Violation) ClearRule() { + x.Rule = nil +} + +func (x *Violation) ClearRuleId() { + x.RuleId = nil +} + +func (x *Violation) ClearMessage() { + x.Message = nil +} + +func (x *Violation) ClearForKey() { + x.ForKey = nil +} + +type Violation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `field` is a machine-readable path to the field that failed validation. + // This could be a nested field, in which case the path will include all the parent fields leading to the actual field that caused the violation. + // + // For example, consider the following message: + // + // ```proto + // + // message Message { + // bool a = 1 [(buf.validate.field).required = true]; + // } + // + // ``` + // + // It could produce the following violation: + // + // ```textproto + // + // violation { + // field { element { field_number: 1, field_name: "a", field_type: 8 } } + // ... + // } + // + // ``` + Field *FieldPath + // `rule` is a machine-readable path that points to the specific rule that failed validation. + // This will be a nested field starting from the FieldRules of the field that failed validation. + // For custom rules, this will provide the path of the rule, e.g. `cel[0]`. + // + // For example, consider the following message: + // + // ```proto + // + // message Message { + // bool a = 1 [(buf.validate.field).required = true]; + // bool b = 2 [(buf.validate.field).cel = { + // id: "custom_rule", + // expression: "!this ? 'b must be true': ''" + // }] + // } + // + // ``` + // + // It could produce the following violations: + // + // ```textproto + // + // violation { + // rule { element { field_number: 25, field_name: "required", field_type: 8 } } + // ... + // } + // + // violation { + // rule { element { field_number: 23, field_name: "cel", field_type: 11, index: 0 } } + // ... + // } + // + // ``` + Rule *FieldPath + // `rule_id` is the unique identifier of the `Rule` that was not fulfilled. + // This is the same `id` that was specified in the `Rule` message, allowing easy tracing of which rule was violated. + RuleId *string + // `message` is a human-readable error message that describes the nature of the violation. + // This can be the default error message from the violated `Rule`, or it can be a custom message that gives more context about the violation. + Message *string + // `for_key` indicates whether the violation was caused by a map key, rather than a value. + ForKey *bool +} + +func (b0 Violation_builder) Build() *Violation { + m0 := &Violation{} + b, x := &b0, m0 + _, _ = b, x + x.Field = b.Field + x.Rule = b.Rule + x.RuleId = b.RuleId + x.Message = b.Message + x.ForKey = b.ForKey + return m0 +} + +// `FieldPath` provides a path to a nested protobuf field. +// +// This message provides enough information to render a dotted field path even without protobuf descriptors. +// It also provides enough information to resolve a nested field through unknown wire data. +type FieldPath struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `elements` contains each element of the path, starting from the root and recursing downward. + Elements []*FieldPathElement `protobuf:"bytes,1,rep,name=elements" json:"elements,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FieldPath) Reset() { + *x = FieldPath{} + mi := &file_buf_validate_validate_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FieldPath) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldPath) ProtoMessage() {} + +func (x *FieldPath) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *FieldPath) GetElements() []*FieldPathElement { + if x != nil { + return x.Elements + } + return nil +} + +func (x *FieldPath) SetElements(v []*FieldPathElement) { + x.Elements = v +} + +type FieldPath_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `elements` contains each element of the path, starting from the root and recursing downward. + Elements []*FieldPathElement +} + +func (b0 FieldPath_builder) Build() *FieldPath { + m0 := &FieldPath{} + b, x := &b0, m0 + _, _ = b, x + x.Elements = b.Elements + return m0 +} + +// `FieldPathElement` provides enough information to nest through a single protobuf field. +// +// If the selected field is a map or repeated field, the `subscript` value selects a specific element from it. +// A path that refers to a value nested under a map key or repeated field index will have a `subscript` value. +// The `field_type` field allows unambiguous resolution of a field even if descriptors are not available. +type FieldPathElement struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `field_number` is the field number this path element refers to. + FieldNumber *int32 `protobuf:"varint,1,opt,name=field_number,json=fieldNumber" json:"field_number,omitempty"` + // `field_name` contains the field name this path element refers to. + // This can be used to display a human-readable path even if the field number is unknown. + FieldName *string `protobuf:"bytes,2,opt,name=field_name,json=fieldName" json:"field_name,omitempty"` + // `field_type` specifies the type of this field. When using reflection, this value is not needed. + // + // This value is provided to make it possible to traverse unknown fields through wire data. + // When traversing wire data, be mindful of both packed[1] and delimited[2] encoding schemes. + // + // N.B.: Although groups are deprecated, the corresponding delimited encoding scheme is not, and + // can be explicitly used in Protocol Buffers 2023 Edition. + // + // [1]: https://protobuf.dev/programming-guides/encoding/#packed + // [2]: https://protobuf.dev/programming-guides/encoding/#groups + FieldType *descriptorpb.FieldDescriptorProto_Type `protobuf:"varint,3,opt,name=field_type,json=fieldType,enum=google.protobuf.FieldDescriptorProto_Type" json:"field_type,omitempty"` + // `key_type` specifies the map key type of this field. This value is useful when traversing + // unknown fields through wire data: specifically, it allows handling the differences between + // different integer encodings. + KeyType *descriptorpb.FieldDescriptorProto_Type `protobuf:"varint,4,opt,name=key_type,json=keyType,enum=google.protobuf.FieldDescriptorProto_Type" json:"key_type,omitempty"` + // `value_type` specifies map value type of this field. This is useful if you want to display a + // value inside unknown fields through wire data. + ValueType *descriptorpb.FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=value_type,json=valueType,enum=google.protobuf.FieldDescriptorProto_Type" json:"value_type,omitempty"` + // `subscript` contains a repeated index or map key, if this path element nests into a repeated or map field. + // + // Types that are valid to be assigned to Subscript: + // + // *FieldPathElement_Index + // *FieldPathElement_BoolKey + // *FieldPathElement_IntKey + // *FieldPathElement_UintKey + // *FieldPathElement_StringKey + Subscript isFieldPathElement_Subscript `protobuf_oneof:"subscript"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FieldPathElement) Reset() { + *x = FieldPathElement{} + mi := &file_buf_validate_validate_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FieldPathElement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldPathElement) ProtoMessage() {} + +func (x *FieldPathElement) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[31] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *FieldPathElement) GetFieldNumber() int32 { + if x != nil && x.FieldNumber != nil { + return *x.FieldNumber + } + return 0 +} + +func (x *FieldPathElement) GetFieldName() string { + if x != nil && x.FieldName != nil { + return *x.FieldName + } + return "" +} + +func (x *FieldPathElement) GetFieldType() descriptorpb.FieldDescriptorProto_Type { + if x != nil && x.FieldType != nil { + return *x.FieldType + } + return descriptorpb.FieldDescriptorProto_Type(1) +} + +func (x *FieldPathElement) GetKeyType() descriptorpb.FieldDescriptorProto_Type { + if x != nil && x.KeyType != nil { + return *x.KeyType + } + return descriptorpb.FieldDescriptorProto_Type(1) +} + +func (x *FieldPathElement) GetValueType() descriptorpb.FieldDescriptorProto_Type { + if x != nil && x.ValueType != nil { + return *x.ValueType + } + return descriptorpb.FieldDescriptorProto_Type(1) +} + +func (x *FieldPathElement) GetSubscript() isFieldPathElement_Subscript { + if x != nil { + return x.Subscript + } + return nil +} + +func (x *FieldPathElement) GetIndex() uint64 { + if x != nil { + if x, ok := x.Subscript.(*FieldPathElement_Index); ok { + return x.Index + } + } + return 0 +} + +func (x *FieldPathElement) GetBoolKey() bool { + if x != nil { + if x, ok := x.Subscript.(*FieldPathElement_BoolKey); ok { + return x.BoolKey + } + } + return false +} + +func (x *FieldPathElement) GetIntKey() int64 { + if x != nil { + if x, ok := x.Subscript.(*FieldPathElement_IntKey); ok { + return x.IntKey + } + } + return 0 +} + +func (x *FieldPathElement) GetUintKey() uint64 { + if x != nil { + if x, ok := x.Subscript.(*FieldPathElement_UintKey); ok { + return x.UintKey + } + } + return 0 +} + +func (x *FieldPathElement) GetStringKey() string { + if x != nil { + if x, ok := x.Subscript.(*FieldPathElement_StringKey); ok { + return x.StringKey + } + } + return "" +} + +func (x *FieldPathElement) SetFieldNumber(v int32) { + x.FieldNumber = &v +} + +func (x *FieldPathElement) SetFieldName(v string) { + x.FieldName = &v +} + +func (x *FieldPathElement) SetFieldType(v descriptorpb.FieldDescriptorProto_Type) { + x.FieldType = &v +} + +func (x *FieldPathElement) SetKeyType(v descriptorpb.FieldDescriptorProto_Type) { + x.KeyType = &v +} + +func (x *FieldPathElement) SetValueType(v descriptorpb.FieldDescriptorProto_Type) { + x.ValueType = &v +} + +func (x *FieldPathElement) SetIndex(v uint64) { + x.Subscript = &FieldPathElement_Index{v} +} + +func (x *FieldPathElement) SetBoolKey(v bool) { + x.Subscript = &FieldPathElement_BoolKey{v} +} + +func (x *FieldPathElement) SetIntKey(v int64) { + x.Subscript = &FieldPathElement_IntKey{v} +} + +func (x *FieldPathElement) SetUintKey(v uint64) { + x.Subscript = &FieldPathElement_UintKey{v} +} + +func (x *FieldPathElement) SetStringKey(v string) { + x.Subscript = &FieldPathElement_StringKey{v} +} + +func (x *FieldPathElement) HasFieldNumber() bool { + if x == nil { + return false + } + return x.FieldNumber != nil +} + +func (x *FieldPathElement) HasFieldName() bool { + if x == nil { + return false + } + return x.FieldName != nil +} + +func (x *FieldPathElement) HasFieldType() bool { + if x == nil { + return false + } + return x.FieldType != nil +} + +func (x *FieldPathElement) HasKeyType() bool { + if x == nil { + return false + } + return x.KeyType != nil +} + +func (x *FieldPathElement) HasValueType() bool { + if x == nil { + return false + } + return x.ValueType != nil +} + +func (x *FieldPathElement) HasSubscript() bool { + if x == nil { + return false + } + return x.Subscript != nil +} + +func (x *FieldPathElement) HasIndex() bool { + if x == nil { + return false + } + _, ok := x.Subscript.(*FieldPathElement_Index) + return ok +} + +func (x *FieldPathElement) HasBoolKey() bool { + if x == nil { + return false + } + _, ok := x.Subscript.(*FieldPathElement_BoolKey) + return ok +} + +func (x *FieldPathElement) HasIntKey() bool { + if x == nil { + return false + } + _, ok := x.Subscript.(*FieldPathElement_IntKey) + return ok +} + +func (x *FieldPathElement) HasUintKey() bool { + if x == nil { + return false + } + _, ok := x.Subscript.(*FieldPathElement_UintKey) + return ok +} + +func (x *FieldPathElement) HasStringKey() bool { + if x == nil { + return false + } + _, ok := x.Subscript.(*FieldPathElement_StringKey) + return ok +} + +func (x *FieldPathElement) ClearFieldNumber() { + x.FieldNumber = nil +} + +func (x *FieldPathElement) ClearFieldName() { + x.FieldName = nil +} + +func (x *FieldPathElement) ClearFieldType() { + x.FieldType = nil +} + +func (x *FieldPathElement) ClearKeyType() { + x.KeyType = nil +} + +func (x *FieldPathElement) ClearValueType() { + x.ValueType = nil +} + +func (x *FieldPathElement) ClearSubscript() { + x.Subscript = nil +} + +func (x *FieldPathElement) ClearIndex() { + if _, ok := x.Subscript.(*FieldPathElement_Index); ok { + x.Subscript = nil + } +} + +func (x *FieldPathElement) ClearBoolKey() { + if _, ok := x.Subscript.(*FieldPathElement_BoolKey); ok { + x.Subscript = nil + } +} + +func (x *FieldPathElement) ClearIntKey() { + if _, ok := x.Subscript.(*FieldPathElement_IntKey); ok { + x.Subscript = nil + } +} + +func (x *FieldPathElement) ClearUintKey() { + if _, ok := x.Subscript.(*FieldPathElement_UintKey); ok { + x.Subscript = nil + } +} + +func (x *FieldPathElement) ClearStringKey() { + if _, ok := x.Subscript.(*FieldPathElement_StringKey); ok { + x.Subscript = nil + } +} + +const FieldPathElement_Subscript_not_set_case case_FieldPathElement_Subscript = 0 +const FieldPathElement_Index_case case_FieldPathElement_Subscript = 6 +const FieldPathElement_BoolKey_case case_FieldPathElement_Subscript = 7 +const FieldPathElement_IntKey_case case_FieldPathElement_Subscript = 8 +const FieldPathElement_UintKey_case case_FieldPathElement_Subscript = 9 +const FieldPathElement_StringKey_case case_FieldPathElement_Subscript = 10 + +func (x *FieldPathElement) WhichSubscript() case_FieldPathElement_Subscript { + if x == nil { + return FieldPathElement_Subscript_not_set_case + } + switch x.Subscript.(type) { + case *FieldPathElement_Index: + return FieldPathElement_Index_case + case *FieldPathElement_BoolKey: + return FieldPathElement_BoolKey_case + case *FieldPathElement_IntKey: + return FieldPathElement_IntKey_case + case *FieldPathElement_UintKey: + return FieldPathElement_UintKey_case + case *FieldPathElement_StringKey: + return FieldPathElement_StringKey_case + default: + return FieldPathElement_Subscript_not_set_case + } +} + +type FieldPathElement_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `field_number` is the field number this path element refers to. + FieldNumber *int32 + // `field_name` contains the field name this path element refers to. + // This can be used to display a human-readable path even if the field number is unknown. + FieldName *string + // `field_type` specifies the type of this field. When using reflection, this value is not needed. + // + // This value is provided to make it possible to traverse unknown fields through wire data. + // When traversing wire data, be mindful of both packed[1] and delimited[2] encoding schemes. + // + // N.B.: Although groups are deprecated, the corresponding delimited encoding scheme is not, and + // can be explicitly used in Protocol Buffers 2023 Edition. + // + // [1]: https://protobuf.dev/programming-guides/encoding/#packed + // [2]: https://protobuf.dev/programming-guides/encoding/#groups + FieldType *descriptorpb.FieldDescriptorProto_Type + // `key_type` specifies the map key type of this field. This value is useful when traversing + // unknown fields through wire data: specifically, it allows handling the differences between + // different integer encodings. + KeyType *descriptorpb.FieldDescriptorProto_Type + // `value_type` specifies map value type of this field. This is useful if you want to display a + // value inside unknown fields through wire data. + ValueType *descriptorpb.FieldDescriptorProto_Type + // `subscript` contains a repeated index or map key, if this path element nests into a repeated or map field. + + // Fields of oneof Subscript: + // `index` specifies a 0-based index into a repeated field. + Index *uint64 + // `bool_key` specifies a map key of type bool. + BoolKey *bool + // `int_key` specifies a map key of type int32, int64, sint32, sint64, sfixed32 or sfixed64. + IntKey *int64 + // `uint_key` specifies a map key of type uint32, uint64, fixed32 or fixed64. + UintKey *uint64 + // `string_key` specifies a map key of type string. + StringKey *string + // -- end of Subscript +} + +func (b0 FieldPathElement_builder) Build() *FieldPathElement { + m0 := &FieldPathElement{} + b, x := &b0, m0 + _, _ = b, x + x.FieldNumber = b.FieldNumber + x.FieldName = b.FieldName + x.FieldType = b.FieldType + x.KeyType = b.KeyType + x.ValueType = b.ValueType + if b.Index != nil { + x.Subscript = &FieldPathElement_Index{*b.Index} + } + if b.BoolKey != nil { + x.Subscript = &FieldPathElement_BoolKey{*b.BoolKey} + } + if b.IntKey != nil { + x.Subscript = &FieldPathElement_IntKey{*b.IntKey} + } + if b.UintKey != nil { + x.Subscript = &FieldPathElement_UintKey{*b.UintKey} + } + if b.StringKey != nil { + x.Subscript = &FieldPathElement_StringKey{*b.StringKey} + } + return m0 +} + +type case_FieldPathElement_Subscript protoreflect.FieldNumber + +func (x case_FieldPathElement_Subscript) String() string { + md := file_buf_validate_validate_proto_msgTypes[31].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isFieldPathElement_Subscript interface { + isFieldPathElement_Subscript() +} + +type FieldPathElement_Index struct { + // `index` specifies a 0-based index into a repeated field. + Index uint64 `protobuf:"varint,6,opt,name=index,oneof"` +} + +type FieldPathElement_BoolKey struct { + // `bool_key` specifies a map key of type bool. + BoolKey bool `protobuf:"varint,7,opt,name=bool_key,json=boolKey,oneof"` +} + +type FieldPathElement_IntKey struct { + // `int_key` specifies a map key of type int32, int64, sint32, sint64, sfixed32 or sfixed64. + IntKey int64 `protobuf:"varint,8,opt,name=int_key,json=intKey,oneof"` +} + +type FieldPathElement_UintKey struct { + // `uint_key` specifies a map key of type uint32, uint64, fixed32 or fixed64. + UintKey uint64 `protobuf:"varint,9,opt,name=uint_key,json=uintKey,oneof"` +} + +type FieldPathElement_StringKey struct { + // `string_key` specifies a map key of type string. + StringKey string `protobuf:"bytes,10,opt,name=string_key,json=stringKey,oneof"` +} + +func (*FieldPathElement_Index) isFieldPathElement_Subscript() {} + +func (*FieldPathElement_BoolKey) isFieldPathElement_Subscript() {} + +func (*FieldPathElement_IntKey) isFieldPathElement_Subscript() {} + +func (*FieldPathElement_UintKey) isFieldPathElement_Subscript() {} + +func (*FieldPathElement_StringKey) isFieldPathElement_Subscript() {} + +var file_buf_validate_validate_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*MessageRules)(nil), + Field: 1159, + Name: "buf.validate.message", + Tag: "bytes,1159,opt,name=message", + Filename: "buf/validate/validate.proto", + }, + { + ExtendedType: (*descriptorpb.OneofOptions)(nil), + ExtensionType: (*OneofRules)(nil), + Field: 1159, + Name: "buf.validate.oneof", + Tag: "bytes,1159,opt,name=oneof", + Filename: "buf/validate/validate.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldRules)(nil), + Field: 1159, + Name: "buf.validate.field", + Tag: "bytes,1159,opt,name=field", + Filename: "buf/validate/validate.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*PredefinedRules)(nil), + Field: 1160, + Name: "buf.validate.predefined", + Tag: "bytes,1160,opt,name=predefined", + Filename: "buf/validate/validate.proto", + }, +} + +// Extension fields to descriptorpb.MessageOptions. +var ( + // Rules specify the validations to be performed on this message. By default, + // no validation is performed against a message. + // + // optional buf.validate.MessageRules message = 1159; + E_Message = &file_buf_validate_validate_proto_extTypes[0] +) + +// Extension fields to descriptorpb.OneofOptions. +var ( + // Rules specify the validations to be performed on this oneof. By default, + // no validation is performed against a oneof. + // + // optional buf.validate.OneofRules oneof = 1159; + E_Oneof = &file_buf_validate_validate_proto_extTypes[1] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Rules specify the validations to be performed on this field. By default, + // no validation is performed against a field. + // + // optional buf.validate.FieldRules field = 1159; + E_Field = &file_buf_validate_validate_proto_extTypes[2] + // Specifies predefined rules. When extending a standard rule message, + // this adds additional CEL expressions that apply when the extension is used. + // + // ```proto + // + // extend buf.validate.Int32Rules { + // bool is_zero [(buf.validate.predefined).cel = { + // id: "int32.is_zero", + // message: "value must be zero", + // expression: "!rule || this == 0", + // }]; + // } + // + // message Foo { + // int32 reserved = 1 [(buf.validate.field).int32.(is_zero) = true]; + // } + // + // ``` + // + // optional buf.validate.PredefinedRules predefined = 1160; + E_Predefined = &file_buf_validate_validate_proto_extTypes[3] +) + +var File_buf_validate_validate_proto protoreflect.FileDescriptor + +const file_buf_validate_validate_proto_rawDesc = "" + + "\n" + + "\x1bbuf/validate/validate.proto\x12\fbuf.validate\x1a google/protobuf/descriptor.proto\x1a\x1egoogle/protobuf/duration.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"P\n" + + "\x04Rule\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage\x12\x1e\n" + + "\n" + + "expression\x18\x03 \x01(\tR\n" + + "expression\"\xa1\x01\n" + + "\fMessageRules\x12%\n" + + "\x0ecel_expression\x18\x05 \x03(\tR\rcelExpression\x12$\n" + + "\x03cel\x18\x03 \x03(\v2\x12.buf.validate.RuleR\x03cel\x124\n" + + "\x05oneof\x18\x04 \x03(\v2\x1e.buf.validate.MessageOneofRuleR\x05oneofJ\x04\b\x01\x10\x02R\bdisabled\"F\n" + + "\x10MessageOneofRule\x12\x16\n" + + "\x06fields\x18\x01 \x03(\tR\x06fields\x12\x1a\n" + + "\brequired\x18\x02 \x01(\bR\brequired\"(\n" + + "\n" + + "OneofRules\x12\x1a\n" + + "\brequired\x18\x01 \x01(\bR\brequired\"\xe3\n" + + "\n" + + "\n" + + "FieldRules\x12%\n" + + "\x0ecel_expression\x18\x1d \x03(\tR\rcelExpression\x12$\n" + + "\x03cel\x18\x17 \x03(\v2\x12.buf.validate.RuleR\x03cel\x12\x1a\n" + + "\brequired\x18\x19 \x01(\bR\brequired\x12,\n" + + "\x06ignore\x18\x1b \x01(\x0e2\x14.buf.validate.IgnoreR\x06ignore\x120\n" + + "\x05float\x18\x01 \x01(\v2\x18.buf.validate.FloatRulesH\x00R\x05float\x123\n" + + "\x06double\x18\x02 \x01(\v2\x19.buf.validate.DoubleRulesH\x00R\x06double\x120\n" + + "\x05int32\x18\x03 \x01(\v2\x18.buf.validate.Int32RulesH\x00R\x05int32\x120\n" + + "\x05int64\x18\x04 \x01(\v2\x18.buf.validate.Int64RulesH\x00R\x05int64\x123\n" + + "\x06uint32\x18\x05 \x01(\v2\x19.buf.validate.UInt32RulesH\x00R\x06uint32\x123\n" + + "\x06uint64\x18\x06 \x01(\v2\x19.buf.validate.UInt64RulesH\x00R\x06uint64\x123\n" + + "\x06sint32\x18\a \x01(\v2\x19.buf.validate.SInt32RulesH\x00R\x06sint32\x123\n" + + "\x06sint64\x18\b \x01(\v2\x19.buf.validate.SInt64RulesH\x00R\x06sint64\x126\n" + + "\afixed32\x18\t \x01(\v2\x1a.buf.validate.Fixed32RulesH\x00R\afixed32\x126\n" + + "\afixed64\x18\n" + + " \x01(\v2\x1a.buf.validate.Fixed64RulesH\x00R\afixed64\x129\n" + + "\bsfixed32\x18\v \x01(\v2\x1b.buf.validate.SFixed32RulesH\x00R\bsfixed32\x129\n" + + "\bsfixed64\x18\f \x01(\v2\x1b.buf.validate.SFixed64RulesH\x00R\bsfixed64\x12-\n" + + "\x04bool\x18\r \x01(\v2\x17.buf.validate.BoolRulesH\x00R\x04bool\x123\n" + + "\x06string\x18\x0e \x01(\v2\x19.buf.validate.StringRulesH\x00R\x06string\x120\n" + + "\x05bytes\x18\x0f \x01(\v2\x18.buf.validate.BytesRulesH\x00R\x05bytes\x12-\n" + + "\x04enum\x18\x10 \x01(\v2\x17.buf.validate.EnumRulesH\x00R\x04enum\x129\n" + + "\brepeated\x18\x12 \x01(\v2\x1b.buf.validate.RepeatedRulesH\x00R\brepeated\x12*\n" + + "\x03map\x18\x13 \x01(\v2\x16.buf.validate.MapRulesH\x00R\x03map\x12*\n" + + "\x03any\x18\x14 \x01(\v2\x16.buf.validate.AnyRulesH\x00R\x03any\x129\n" + + "\bduration\x18\x15 \x01(\v2\x1b.buf.validate.DurationRulesH\x00R\bduration\x12=\n" + + "\n" + + "field_mask\x18\x1c \x01(\v2\x1c.buf.validate.FieldMaskRulesH\x00R\tfieldMask\x12<\n" + + "\ttimestamp\x18\x16 \x01(\v2\x1c.buf.validate.TimestampRulesH\x00R\ttimestampB\x06\n" + + "\x04typeJ\x04\b\x18\x10\x19J\x04\b\x1a\x10\x1bR\askippedR\fignore_empty\"Z\n" + + "\x0fPredefinedRules\x12$\n" + + "\x03cel\x18\x01 \x03(\v2\x12.buf.validate.RuleR\x03celJ\x04\b\x18\x10\x19J\x04\b\x1a\x10\x1bR\askippedR\fignore_empty\"\x90\x18\n" + + "\n" + + "FloatRules\x12\x8a\x01\n" + + "\x05const\x18\x01 \x01(\x02Bt\xc2Hq\n" + + "o\n" + + "\vfloat.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\xa3\x01\n" + + "\x02lt\x18\x02 \x01(\x02B\x90\x01\xc2H\x8c\x01\n" + + "\x89\x01\n" + + "\bfloat.lt\x1a}!has(rules.gte) && !has(rules.gt) && (this.isNan() || this >= rules.lt)? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xb4\x01\n" + + "\x03lte\x18\x03 \x01(\x02B\x9f\x01\xc2H\x9b\x01\n" + + "\x98\x01\n" + + "\tfloat.lte\x1a\x8a\x01!has(rules.gte) && !has(rules.gt) && (this.isNan() || this > rules.lte)? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xf3\a\n" + + "\x02gt\x18\x04 \x01(\x02B\xe0\a\xc2H\xdc\a\n" + + "\x8d\x01\n" + + "\bfloat.gt\x1a\x80\x01!has(rules.lt) && !has(rules.lte) && (this.isNan() || this <= rules.gt)? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xc3\x01\n" + + "\vfloat.gt_lt\x1a\xb3\x01has(rules.lt) && rules.lt >= rules.gt && (this.isNan() || this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xcd\x01\n" + + "\x15float.gt_lt_exclusive\x1a\xb3\x01has(rules.lt) && rules.lt < rules.gt && (this.isNan() || (rules.lt <= this && this <= rules.gt))? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xd3\x01\n" + + "\ffloat.gt_lte\x1a\xc2\x01has(rules.lte) && rules.lte >= rules.gt && (this.isNan() || this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xdd\x01\n" + + "\x16float.gt_lte_exclusive\x1a\xc2\x01has(rules.lte) && rules.lte < rules.gt && (this.isNan() || (rules.lte < this && this <= rules.gt))? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xbf\b\n" + + "\x03gte\x18\x05 \x01(\x02B\xaa\b\xc2H\xa6\b\n" + + "\x9b\x01\n" + + "\tfloat.gte\x1a\x8d\x01!has(rules.lt) && !has(rules.lte) && (this.isNan() || this < rules.gte)? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xd2\x01\n" + + "\ffloat.gte_lt\x1a\xc1\x01has(rules.lt) && rules.lt >= rules.gte && (this.isNan() || this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xdc\x01\n" + + "\x16float.gte_lt_exclusive\x1a\xc1\x01has(rules.lt) && rules.lt < rules.gte && (this.isNan() || (rules.lt <= this && this < rules.gte))? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xe2\x01\n" + + "\rfloat.gte_lte\x1a\xd0\x01has(rules.lte) && rules.lte >= rules.gte && (this.isNan() || this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xec\x01\n" + + "\x17float.gte_lte_exclusive\x1a\xd0\x01has(rules.lte) && rules.lte < rules.gte && (this.isNan() || (rules.lte < this && this < rules.gte))? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x83\x01\n" + + "\x02in\x18\x06 \x03(\x02Bs\xc2Hp\n" + + "n\n" + + "\bfloat.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12}\n" + + "\x06not_in\x18\a \x03(\x02Bf\xc2Hc\n" + + "a\n" + + "\ffloat.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x12}\n" + + "\x06finite\x18\b \x01(\bBe\xc2Hb\n" + + "`\n" + + "\ffloat.finite\x1aPrules.finite ? (this.isNan() || this.isInf() ? 'value must be finite' : '') : ''R\x06finite\x124\n" + + "\aexample\x18\t \x03(\x02B\x1a\xc2H\x17\n" + + "\x15\n" + + "\rfloat.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xa2\x18\n" + + "\vDoubleRules\x12\x8b\x01\n" + + "\x05const\x18\x01 \x01(\x01Bu\xc2Hr\n" + + "p\n" + + "\fdouble.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\xa4\x01\n" + + "\x02lt\x18\x02 \x01(\x01B\x91\x01\xc2H\x8d\x01\n" + + "\x8a\x01\n" + + "\tdouble.lt\x1a}!has(rules.gte) && !has(rules.gt) && (this.isNan() || this >= rules.lt)? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xb5\x01\n" + + "\x03lte\x18\x03 \x01(\x01B\xa0\x01\xc2H\x9c\x01\n" + + "\x99\x01\n" + + "\n" + + "double.lte\x1a\x8a\x01!has(rules.gte) && !has(rules.gt) && (this.isNan() || this > rules.lte)? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xf8\a\n" + + "\x02gt\x18\x04 \x01(\x01B\xe5\a\xc2H\xe1\a\n" + + "\x8e\x01\n" + + "\tdouble.gt\x1a\x80\x01!has(rules.lt) && !has(rules.lte) && (this.isNan() || this <= rules.gt)? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xc4\x01\n" + + "\fdouble.gt_lt\x1a\xb3\x01has(rules.lt) && rules.lt >= rules.gt && (this.isNan() || this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xce\x01\n" + + "\x16double.gt_lt_exclusive\x1a\xb3\x01has(rules.lt) && rules.lt < rules.gt && (this.isNan() || (rules.lt <= this && this <= rules.gt))? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xd4\x01\n" + + "\rdouble.gt_lte\x1a\xc2\x01has(rules.lte) && rules.lte >= rules.gt && (this.isNan() || this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xde\x01\n" + + "\x17double.gt_lte_exclusive\x1a\xc2\x01has(rules.lte) && rules.lte < rules.gt && (this.isNan() || (rules.lte < this && this <= rules.gt))? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xc4\b\n" + + "\x03gte\x18\x05 \x01(\x01B\xaf\b\xc2H\xab\b\n" + + "\x9c\x01\n" + + "\n" + + "double.gte\x1a\x8d\x01!has(rules.lt) && !has(rules.lte) && (this.isNan() || this < rules.gte)? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xd3\x01\n" + + "\rdouble.gte_lt\x1a\xc1\x01has(rules.lt) && rules.lt >= rules.gte && (this.isNan() || this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xdd\x01\n" + + "\x17double.gte_lt_exclusive\x1a\xc1\x01has(rules.lt) && rules.lt < rules.gte && (this.isNan() || (rules.lt <= this && this < rules.gte))? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xe3\x01\n" + + "\x0edouble.gte_lte\x1a\xd0\x01has(rules.lte) && rules.lte >= rules.gte && (this.isNan() || this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xed\x01\n" + + "\x18double.gte_lte_exclusive\x1a\xd0\x01has(rules.lte) && rules.lte < rules.gte && (this.isNan() || (rules.lte < this && this < rules.gte))? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x84\x01\n" + + "\x02in\x18\x06 \x03(\x01Bt\xc2Hq\n" + + "o\n" + + "\tdouble.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12~\n" + + "\x06not_in\x18\a \x03(\x01Bg\xc2Hd\n" + + "b\n" + + "\rdouble.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x12~\n" + + "\x06finite\x18\b \x01(\bBf\xc2Hc\n" + + "a\n" + + "\rdouble.finite\x1aPrules.finite ? (this.isNan() || this.isInf() ? 'value must be finite' : '') : ''R\x06finite\x125\n" + + "\aexample\x18\t \x03(\x01B\x1b\xc2H\x18\n" + + "\x16\n" + + "\x0edouble.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xba\x15\n" + + "\n" + + "Int32Rules\x12\x8a\x01\n" + + "\x05const\x18\x01 \x01(\x05Bt\xc2Hq\n" + + "o\n" + + "\vint32.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x8e\x01\n" + + "\x02lt\x18\x02 \x01(\x05B|\xc2Hy\n" + + "w\n" + + "\bint32.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa1\x01\n" + + "\x03lte\x18\x03 \x01(\x05B\x8c\x01\xc2H\x88\x01\n" + + "\x85\x01\n" + + "\tint32.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\x9b\a\n" + + "\x02gt\x18\x04 \x01(\x05B\x88\a\xc2H\x84\a\n" + + "z\n" + + "\bint32.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb3\x01\n" + + "\vint32.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbb\x01\n" + + "\x15int32.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc3\x01\n" + + "\fint32.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcb\x01\n" + + "\x16int32.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xe8\a\n" + + "\x03gte\x18\x05 \x01(\x05B\xd3\a\xc2H\xcf\a\n" + + "\x88\x01\n" + + "\tint32.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc2\x01\n" + + "\fint32.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xca\x01\n" + + "\x16int32.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd2\x01\n" + + "\rint32.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xda\x01\n" + + "\x17int32.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x83\x01\n" + + "\x02in\x18\x06 \x03(\x05Bs\xc2Hp\n" + + "n\n" + + "\bint32.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12}\n" + + "\x06not_in\x18\a \x03(\x05Bf\xc2Hc\n" + + "a\n" + + "\fint32.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x124\n" + + "\aexample\x18\b \x03(\x05B\x1a\xc2H\x17\n" + + "\x15\n" + + "\rint32.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xba\x15\n" + + "\n" + + "Int64Rules\x12\x8a\x01\n" + + "\x05const\x18\x01 \x01(\x03Bt\xc2Hq\n" + + "o\n" + + "\vint64.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x8e\x01\n" + + "\x02lt\x18\x02 \x01(\x03B|\xc2Hy\n" + + "w\n" + + "\bint64.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa1\x01\n" + + "\x03lte\x18\x03 \x01(\x03B\x8c\x01\xc2H\x88\x01\n" + + "\x85\x01\n" + + "\tint64.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\x9b\a\n" + + "\x02gt\x18\x04 \x01(\x03B\x88\a\xc2H\x84\a\n" + + "z\n" + + "\bint64.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb3\x01\n" + + "\vint64.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbb\x01\n" + + "\x15int64.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc3\x01\n" + + "\fint64.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcb\x01\n" + + "\x16int64.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xe8\a\n" + + "\x03gte\x18\x05 \x01(\x03B\xd3\a\xc2H\xcf\a\n" + + "\x88\x01\n" + + "\tint64.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc2\x01\n" + + "\fint64.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xca\x01\n" + + "\x16int64.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd2\x01\n" + + "\rint64.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xda\x01\n" + + "\x17int64.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x83\x01\n" + + "\x02in\x18\x06 \x03(\x03Bs\xc2Hp\n" + + "n\n" + + "\bint64.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12}\n" + + "\x06not_in\x18\a \x03(\x03Bf\xc2Hc\n" + + "a\n" + + "\fint64.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x124\n" + + "\aexample\x18\t \x03(\x03B\x1a\xc2H\x17\n" + + "\x15\n" + + "\rint64.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xcb\x15\n" + + "\vUInt32Rules\x12\x8b\x01\n" + + "\x05const\x18\x01 \x01(\rBu\xc2Hr\n" + + "p\n" + + "\fuint32.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x8f\x01\n" + + "\x02lt\x18\x02 \x01(\rB}\xc2Hz\n" + + "x\n" + + "\tuint32.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa2\x01\n" + + "\x03lte\x18\x03 \x01(\rB\x8d\x01\xc2H\x89\x01\n" + + "\x86\x01\n" + + "\n" + + "uint32.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xa0\a\n" + + "\x02gt\x18\x04 \x01(\rB\x8d\a\xc2H\x89\a\n" + + "{\n" + + "\tuint32.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb4\x01\n" + + "\fuint32.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbc\x01\n" + + "\x16uint32.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc4\x01\n" + + "\ruint32.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcc\x01\n" + + "\x17uint32.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xed\a\n" + + "\x03gte\x18\x05 \x01(\rB\xd8\a\xc2H\xd4\a\n" + + "\x89\x01\n" + + "\n" + + "uint32.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc3\x01\n" + + "\ruint32.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcb\x01\n" + + "\x17uint32.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd3\x01\n" + + "\x0euint32.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdb\x01\n" + + "\x18uint32.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x84\x01\n" + + "\x02in\x18\x06 \x03(\rBt\xc2Hq\n" + + "o\n" + + "\tuint32.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12~\n" + + "\x06not_in\x18\a \x03(\rBg\xc2Hd\n" + + "b\n" + + "\ruint32.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x125\n" + + "\aexample\x18\b \x03(\rB\x1b\xc2H\x18\n" + + "\x16\n" + + "\x0euint32.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xcb\x15\n" + + "\vUInt64Rules\x12\x8b\x01\n" + + "\x05const\x18\x01 \x01(\x04Bu\xc2Hr\n" + + "p\n" + + "\fuint64.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x8f\x01\n" + + "\x02lt\x18\x02 \x01(\x04B}\xc2Hz\n" + + "x\n" + + "\tuint64.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa2\x01\n" + + "\x03lte\x18\x03 \x01(\x04B\x8d\x01\xc2H\x89\x01\n" + + "\x86\x01\n" + + "\n" + + "uint64.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xa0\a\n" + + "\x02gt\x18\x04 \x01(\x04B\x8d\a\xc2H\x89\a\n" + + "{\n" + + "\tuint64.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb4\x01\n" + + "\fuint64.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbc\x01\n" + + "\x16uint64.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc4\x01\n" + + "\ruint64.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcc\x01\n" + + "\x17uint64.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xed\a\n" + + "\x03gte\x18\x05 \x01(\x04B\xd8\a\xc2H\xd4\a\n" + + "\x89\x01\n" + + "\n" + + "uint64.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc3\x01\n" + + "\ruint64.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcb\x01\n" + + "\x17uint64.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd3\x01\n" + + "\x0euint64.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdb\x01\n" + + "\x18uint64.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x84\x01\n" + + "\x02in\x18\x06 \x03(\x04Bt\xc2Hq\n" + + "o\n" + + "\tuint64.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12~\n" + + "\x06not_in\x18\a \x03(\x04Bg\xc2Hd\n" + + "b\n" + + "\ruint64.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x125\n" + + "\aexample\x18\b \x03(\x04B\x1b\xc2H\x18\n" + + "\x16\n" + + "\x0euint64.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xcb\x15\n" + + "\vSInt32Rules\x12\x8b\x01\n" + + "\x05const\x18\x01 \x01(\x11Bu\xc2Hr\n" + + "p\n" + + "\fsint32.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x8f\x01\n" + + "\x02lt\x18\x02 \x01(\x11B}\xc2Hz\n" + + "x\n" + + "\tsint32.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa2\x01\n" + + "\x03lte\x18\x03 \x01(\x11B\x8d\x01\xc2H\x89\x01\n" + + "\x86\x01\n" + + "\n" + + "sint32.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xa0\a\n" + + "\x02gt\x18\x04 \x01(\x11B\x8d\a\xc2H\x89\a\n" + + "{\n" + + "\tsint32.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb4\x01\n" + + "\fsint32.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbc\x01\n" + + "\x16sint32.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc4\x01\n" + + "\rsint32.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcc\x01\n" + + "\x17sint32.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xed\a\n" + + "\x03gte\x18\x05 \x01(\x11B\xd8\a\xc2H\xd4\a\n" + + "\x89\x01\n" + + "\n" + + "sint32.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc3\x01\n" + + "\rsint32.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcb\x01\n" + + "\x17sint32.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd3\x01\n" + + "\x0esint32.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdb\x01\n" + + "\x18sint32.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x84\x01\n" + + "\x02in\x18\x06 \x03(\x11Bt\xc2Hq\n" + + "o\n" + + "\tsint32.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12~\n" + + "\x06not_in\x18\a \x03(\x11Bg\xc2Hd\n" + + "b\n" + + "\rsint32.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x125\n" + + "\aexample\x18\b \x03(\x11B\x1b\xc2H\x18\n" + + "\x16\n" + + "\x0esint32.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xcb\x15\n" + + "\vSInt64Rules\x12\x8b\x01\n" + + "\x05const\x18\x01 \x01(\x12Bu\xc2Hr\n" + + "p\n" + + "\fsint64.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x8f\x01\n" + + "\x02lt\x18\x02 \x01(\x12B}\xc2Hz\n" + + "x\n" + + "\tsint64.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa2\x01\n" + + "\x03lte\x18\x03 \x01(\x12B\x8d\x01\xc2H\x89\x01\n" + + "\x86\x01\n" + + "\n" + + "sint64.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xa0\a\n" + + "\x02gt\x18\x04 \x01(\x12B\x8d\a\xc2H\x89\a\n" + + "{\n" + + "\tsint64.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb4\x01\n" + + "\fsint64.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbc\x01\n" + + "\x16sint64.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc4\x01\n" + + "\rsint64.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcc\x01\n" + + "\x17sint64.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xed\a\n" + + "\x03gte\x18\x05 \x01(\x12B\xd8\a\xc2H\xd4\a\n" + + "\x89\x01\n" + + "\n" + + "sint64.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc3\x01\n" + + "\rsint64.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcb\x01\n" + + "\x17sint64.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd3\x01\n" + + "\x0esint64.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdb\x01\n" + + "\x18sint64.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x84\x01\n" + + "\x02in\x18\x06 \x03(\x12Bt\xc2Hq\n" + + "o\n" + + "\tsint64.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12~\n" + + "\x06not_in\x18\a \x03(\x12Bg\xc2Hd\n" + + "b\n" + + "\rsint64.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x125\n" + + "\aexample\x18\b \x03(\x12B\x1b\xc2H\x18\n" + + "\x16\n" + + "\x0esint64.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xdc\x15\n" + + "\fFixed32Rules\x12\x8c\x01\n" + + "\x05const\x18\x01 \x01(\aBv\xc2Hs\n" + + "q\n" + + "\rfixed32.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x90\x01\n" + + "\x02lt\x18\x02 \x01(\aB~\xc2H{\n" + + "y\n" + + "\n" + + "fixed32.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa3\x01\n" + + "\x03lte\x18\x03 \x01(\aB\x8e\x01\xc2H\x8a\x01\n" + + "\x87\x01\n" + + "\vfixed32.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xa5\a\n" + + "\x02gt\x18\x04 \x01(\aB\x92\a\xc2H\x8e\a\n" + + "|\n" + + "\n" + + "fixed32.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb5\x01\n" + + "\rfixed32.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbd\x01\n" + + "\x17fixed32.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc5\x01\n" + + "\x0efixed32.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcd\x01\n" + + "\x18fixed32.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xf2\a\n" + + "\x03gte\x18\x05 \x01(\aB\xdd\a\xc2H\xd9\a\n" + + "\x8a\x01\n" + + "\vfixed32.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc4\x01\n" + + "\x0efixed32.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcc\x01\n" + + "\x18fixed32.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd4\x01\n" + + "\x0ffixed32.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdc\x01\n" + + "\x19fixed32.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x85\x01\n" + + "\x02in\x18\x06 \x03(\aBu\xc2Hr\n" + + "p\n" + + "\n" + + "fixed32.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12\x7f\n" + + "\x06not_in\x18\a \x03(\aBh\xc2He\n" + + "c\n" + + "\x0efixed32.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x126\n" + + "\aexample\x18\b \x03(\aB\x1c\xc2H\x19\n" + + "\x17\n" + + "\x0ffixed32.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xdc\x15\n" + + "\fFixed64Rules\x12\x8c\x01\n" + + "\x05const\x18\x01 \x01(\x06Bv\xc2Hs\n" + + "q\n" + + "\rfixed64.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x90\x01\n" + + "\x02lt\x18\x02 \x01(\x06B~\xc2H{\n" + + "y\n" + + "\n" + + "fixed64.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa3\x01\n" + + "\x03lte\x18\x03 \x01(\x06B\x8e\x01\xc2H\x8a\x01\n" + + "\x87\x01\n" + + "\vfixed64.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xa5\a\n" + + "\x02gt\x18\x04 \x01(\x06B\x92\a\xc2H\x8e\a\n" + + "|\n" + + "\n" + + "fixed64.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb5\x01\n" + + "\rfixed64.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbd\x01\n" + + "\x17fixed64.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc5\x01\n" + + "\x0efixed64.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcd\x01\n" + + "\x18fixed64.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xf2\a\n" + + "\x03gte\x18\x05 \x01(\x06B\xdd\a\xc2H\xd9\a\n" + + "\x8a\x01\n" + + "\vfixed64.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc4\x01\n" + + "\x0efixed64.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcc\x01\n" + + "\x18fixed64.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd4\x01\n" + + "\x0ffixed64.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdc\x01\n" + + "\x19fixed64.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x85\x01\n" + + "\x02in\x18\x06 \x03(\x06Bu\xc2Hr\n" + + "p\n" + + "\n" + + "fixed64.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12\x7f\n" + + "\x06not_in\x18\a \x03(\x06Bh\xc2He\n" + + "c\n" + + "\x0efixed64.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x126\n" + + "\aexample\x18\b \x03(\x06B\x1c\xc2H\x19\n" + + "\x17\n" + + "\x0ffixed64.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xee\x15\n" + + "\rSFixed32Rules\x12\x8d\x01\n" + + "\x05const\x18\x01 \x01(\x0fBw\xc2Ht\n" + + "r\n" + + "\x0esfixed32.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x91\x01\n" + + "\x02lt\x18\x02 \x01(\x0fB\x7f\xc2H|\n" + + "z\n" + + "\vsfixed32.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa4\x01\n" + + "\x03lte\x18\x03 \x01(\x0fB\x8f\x01\xc2H\x8b\x01\n" + + "\x88\x01\n" + + "\fsfixed32.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xaa\a\n" + + "\x02gt\x18\x04 \x01(\x0fB\x97\a\xc2H\x93\a\n" + + "}\n" + + "\vsfixed32.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb6\x01\n" + + "\x0esfixed32.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbe\x01\n" + + "\x18sfixed32.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc6\x01\n" + + "\x0fsfixed32.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xce\x01\n" + + "\x19sfixed32.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xf7\a\n" + + "\x03gte\x18\x05 \x01(\x0fB\xe2\a\xc2H\xde\a\n" + + "\x8b\x01\n" + + "\fsfixed32.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc5\x01\n" + + "\x0fsfixed32.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcd\x01\n" + + "\x19sfixed32.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd5\x01\n" + + "\x10sfixed32.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdd\x01\n" + + "\x1asfixed32.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x86\x01\n" + + "\x02in\x18\x06 \x03(\x0fBv\xc2Hs\n" + + "q\n" + + "\vsfixed32.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12\x80\x01\n" + + "\x06not_in\x18\a \x03(\x0fBi\xc2Hf\n" + + "d\n" + + "\x0fsfixed32.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x127\n" + + "\aexample\x18\b \x03(\x0fB\x1d\xc2H\x1a\n" + + "\x18\n" + + "\x10sfixed32.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xee\x15\n" + + "\rSFixed64Rules\x12\x8d\x01\n" + + "\x05const\x18\x01 \x01(\x10Bw\xc2Ht\n" + + "r\n" + + "\x0esfixed64.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x91\x01\n" + + "\x02lt\x18\x02 \x01(\x10B\x7f\xc2H|\n" + + "z\n" + + "\vsfixed64.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa4\x01\n" + + "\x03lte\x18\x03 \x01(\x10B\x8f\x01\xc2H\x8b\x01\n" + + "\x88\x01\n" + + "\fsfixed64.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xaa\a\n" + + "\x02gt\x18\x04 \x01(\x10B\x97\a\xc2H\x93\a\n" + + "}\n" + + "\vsfixed64.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb6\x01\n" + + "\x0esfixed64.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbe\x01\n" + + "\x18sfixed64.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc6\x01\n" + + "\x0fsfixed64.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xce\x01\n" + + "\x19sfixed64.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xf7\a\n" + + "\x03gte\x18\x05 \x01(\x10B\xe2\a\xc2H\xde\a\n" + + "\x8b\x01\n" + + "\fsfixed64.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc5\x01\n" + + "\x0fsfixed64.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcd\x01\n" + + "\x19sfixed64.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd5\x01\n" + + "\x10sfixed64.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdd\x01\n" + + "\x1asfixed64.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x86\x01\n" + + "\x02in\x18\x06 \x03(\x10Bv\xc2Hs\n" + + "q\n" + + "\vsfixed64.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12\x80\x01\n" + + "\x06not_in\x18\a \x03(\x10Bi\xc2Hf\n" + + "d\n" + + "\x0fsfixed64.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x127\n" + + "\aexample\x18\b \x03(\x10B\x1d\xc2H\x1a\n" + + "\x18\n" + + "\x10sfixed64.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xd7\x01\n" + + "\tBoolRules\x12\x89\x01\n" + + "\x05const\x18\x01 \x01(\bBs\xc2Hp\n" + + "n\n" + + "\n" + + "bool.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x123\n" + + "\aexample\x18\x02 \x03(\bB\x19\xc2H\x16\n" + + "\x14\n" + + "\fbool.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xcf;\n" + + "\vStringRules\x12\x8d\x01\n" + + "\x05const\x18\x01 \x01(\tBw\xc2Ht\n" + + "r\n" + + "\fstring.const\x1abthis != getField(rules, 'const') ? 'value must equal `%s`'.format([getField(rules, 'const')]) : ''R\x05const\x12\x83\x01\n" + + "\x03len\x18\x13 \x01(\x04Bq\xc2Hn\n" + + "l\n" + + "\n" + + "string.len\x1a^uint(this.size()) != rules.len ? 'value length must be %s characters'.format([rules.len]) : ''R\x03len\x12\xa1\x01\n" + + "\amin_len\x18\x02 \x01(\x04B\x87\x01\xc2H\x83\x01\n" + + "\x80\x01\n" + + "\x0estring.min_len\x1anuint(this.size()) < rules.min_len ? 'value length must be at least %s characters'.format([rules.min_len]) : ''R\x06minLen\x12\x9f\x01\n" + + "\amax_len\x18\x03 \x01(\x04B\x85\x01\xc2H\x81\x01\n" + + "\x7f\n" + + "\x0estring.max_len\x1amuint(this.size()) > rules.max_len ? 'value length must be at most %s characters'.format([rules.max_len]) : ''R\x06maxLen\x12\xa5\x01\n" + + "\tlen_bytes\x18\x14 \x01(\x04B\x87\x01\xc2H\x83\x01\n" + + "\x80\x01\n" + + "\x10string.len_bytes\x1aluint(bytes(this).size()) != rules.len_bytes ? 'value length must be %s bytes'.format([rules.len_bytes]) : ''R\blenBytes\x12\xad\x01\n" + + "\tmin_bytes\x18\x04 \x01(\x04B\x8f\x01\xc2H\x8b\x01\n" + + "\x88\x01\n" + + "\x10string.min_bytes\x1atuint(bytes(this).size()) < rules.min_bytes ? 'value length must be at least %s bytes'.format([rules.min_bytes]) : ''R\bminBytes\x12\xac\x01\n" + + "\tmax_bytes\x18\x05 \x01(\x04B\x8e\x01\xc2H\x8a\x01\n" + + "\x87\x01\n" + + "\x10string.max_bytes\x1asuint(bytes(this).size()) > rules.max_bytes ? 'value length must be at most %s bytes'.format([rules.max_bytes]) : ''R\bmaxBytes\x12\x96\x01\n" + + "\apattern\x18\x06 \x01(\tB|\xc2Hy\n" + + "w\n" + + "\x0estring.pattern\x1ae!this.matches(rules.pattern) ? 'value does not match regex pattern `%s`'.format([rules.pattern]) : ''R\apattern\x12\x8c\x01\n" + + "\x06prefix\x18\a \x01(\tBt\xc2Hq\n" + + "o\n" + + "\rstring.prefix\x1a^!this.startsWith(rules.prefix) ? 'value does not have prefix `%s`'.format([rules.prefix]) : ''R\x06prefix\x12\x8a\x01\n" + + "\x06suffix\x18\b \x01(\tBr\xc2Ho\n" + + "m\n" + + "\rstring.suffix\x1a\\!this.endsWith(rules.suffix) ? 'value does not have suffix `%s`'.format([rules.suffix]) : ''R\x06suffix\x12\x9a\x01\n" + + "\bcontains\x18\t \x01(\tB~\xc2H{\n" + + "y\n" + + "\x0fstring.contains\x1af!this.contains(rules.contains) ? 'value does not contain substring `%s`'.format([rules.contains]) : ''R\bcontains\x12\xa5\x01\n" + + "\fnot_contains\x18\x17 \x01(\tB\x81\x01\xc2H~\n" + + "|\n" + + "\x13string.not_contains\x1aethis.contains(rules.not_contains) ? 'value contains substring `%s`'.format([rules.not_contains]) : ''R\vnotContains\x12\x84\x01\n" + + "\x02in\x18\n" + + " \x03(\tBt\xc2Hq\n" + + "o\n" + + "\tstring.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12~\n" + + "\x06not_in\x18\v \x03(\tBg\xc2Hd\n" + + "b\n" + + "\rstring.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x12\xe6\x01\n" + + "\x05email\x18\f \x01(\bB\xcd\x01\xc2H\xc9\x01\n" + + "a\n" + + "\fstring.email\x12#value must be a valid email address\x1a,!rules.email || this == '' || this.isEmail()\n" + + "d\n" + + "\x12string.email_empty\x122value is empty, which is not a valid email address\x1a\x1a!rules.email || this != ''H\x00R\x05email\x12\xf1\x01\n" + + "\bhostname\x18\r \x01(\bB\xd2\x01\xc2H\xce\x01\n" + + "e\n" + + "\x0fstring.hostname\x12\x1evalue must be a valid hostname\x1a2!rules.hostname || this == '' || this.isHostname()\n" + + "e\n" + + "\x15string.hostname_empty\x12-value is empty, which is not a valid hostname\x1a\x1d!rules.hostname || this != ''H\x00R\bhostname\x12\xcb\x01\n" + + "\x02ip\x18\x0e \x01(\bB\xb8\x01\xc2H\xb4\x01\n" + + "U\n" + + "\tstring.ip\x12 value must be a valid IP address\x1a&!rules.ip || this == '' || this.isIp()\n" + + "[\n" + + "\x0fstring.ip_empty\x12/value is empty, which is not a valid IP address\x1a\x17!rules.ip || this != ''H\x00R\x02ip\x12\xdc\x01\n" + + "\x04ipv4\x18\x0f \x01(\bB\xc5\x01\xc2H\xc1\x01\n" + + "\\\n" + + "\vstring.ipv4\x12\"value must be a valid IPv4 address\x1a)!rules.ipv4 || this == '' || this.isIp(4)\n" + + "a\n" + + "\x11string.ipv4_empty\x121value is empty, which is not a valid IPv4 address\x1a\x19!rules.ipv4 || this != ''H\x00R\x04ipv4\x12\xdc\x01\n" + + "\x04ipv6\x18\x10 \x01(\bB\xc5\x01\xc2H\xc1\x01\n" + + "\\\n" + + "\vstring.ipv6\x12\"value must be a valid IPv6 address\x1a)!rules.ipv6 || this == '' || this.isIp(6)\n" + + "a\n" + + "\x11string.ipv6_empty\x121value is empty, which is not a valid IPv6 address\x1a\x19!rules.ipv6 || this != ''H\x00R\x04ipv6\x12\xc4\x01\n" + + "\x03uri\x18\x11 \x01(\bB\xaf\x01\xc2H\xab\x01\n" + + "Q\n" + + "\n" + + "string.uri\x12\x19value must be a valid URI\x1a(!rules.uri || this == '' || this.isUri()\n" + + "V\n" + + "\x10string.uri_empty\x12(value is empty, which is not a valid URI\x1a\x18!rules.uri || this != ''H\x00R\x03uri\x12x\n" + + "\auri_ref\x18\x12 \x01(\bB]\xc2HZ\n" + + "X\n" + + "\x0estring.uri_ref\x12#value must be a valid URI Reference\x1a!!rules.uri_ref || this.isUriRef()H\x00R\x06uriRef\x12\x99\x02\n" + + "\aaddress\x18\x15 \x01(\bB\xfc\x01\xc2H\xf8\x01\n" + + "\x81\x01\n" + + "\x0estring.address\x12-value must be a valid hostname, or ip address\x1a@!rules.address || this == '' || this.isHostname() || this.isIp()\n" + + "r\n" + + "\x14string.address_empty\x12!rules.ipv4_with_prefixlen || this == '' || this.isIpPrefix(4)\n" + + "\x92\x01\n" + + " string.ipv4_with_prefixlen_empty\x12Dvalue is empty, which is not a valid IPv4 address with prefix length\x1a(!rules.ipv4_with_prefixlen || this != ''H\x00R\x11ipv4WithPrefixlen\x12\xe2\x02\n" + + "\x13ipv6_with_prefixlen\x18\x1c \x01(\bB\xaf\x02\xc2H\xab\x02\n" + + "\x93\x01\n" + + "\x1astring.ipv6_with_prefixlen\x125value must be a valid IPv6 address with prefix length\x1a>!rules.ipv6_with_prefixlen || this == '' || this.isIpPrefix(6)\n" + + "\x92\x01\n" + + " string.ipv6_with_prefixlen_empty\x12Dvalue is empty, which is not a valid IPv6 address with prefix length\x1a(!rules.ipv6_with_prefixlen || this != ''H\x00R\x11ipv6WithPrefixlen\x12\xfc\x01\n" + + "\tip_prefix\x18\x1d \x01(\bB\xdc\x01\xc2H\xd8\x01\n" + + "l\n" + + "\x10string.ip_prefix\x12\x1fvalue must be a valid IP prefix\x1a7!rules.ip_prefix || this == '' || this.isIpPrefix(true)\n" + + "h\n" + + "\x16string.ip_prefix_empty\x12.value is empty, which is not a valid IP prefix\x1a\x1e!rules.ip_prefix || this != ''H\x00R\bipPrefix\x12\x8f\x02\n" + + "\vipv4_prefix\x18\x1e \x01(\bB\xeb\x01\xc2H\xe7\x01\n" + + "u\n" + + "\x12string.ipv4_prefix\x12!value must be a valid IPv4 prefix\x1a!rules.host_and_port || this == '' || this.isHostAndPort(true)\n" + + "y\n" + + "\x1astring.host_and_port_empty\x127value is empty, which is not a valid host and port pair\x1a\"!rules.host_and_port || this != ''H\x00R\vhostAndPort\x12\xfb\x01\n" + + "\x04ulid\x18# \x01(\bB\xe4\x01\xc2H\xe0\x01\n" + + "\x82\x01\n" + + "\vstring.ulid\x12\x1avalue must be a valid ULID\x1aW!rules.ulid || this == '' || this.matches('^[0-7][0-9A-HJKMNP-TV-Za-hjkmnp-tv-z]{25}$')\n" + + "Y\n" + + "\x11string.ulid_empty\x12)value is empty, which is not a valid ULID\x1a\x19!rules.ulid || this != ''H\x00R\x04ulid\x12\xb8\x05\n" + + "\x10well_known_regex\x18\x18 \x01(\x0e2\x18.buf.validate.KnownRegexB\xf1\x04\xc2H\xed\x04\n" + + "\xf0\x01\n" + + "#string.well_known_regex.header_name\x12&value must be a valid HTTP header name\x1a\xa0\x01rules.well_known_regex != 1 || this == '' || this.matches(!has(rules.strict) || rules.strict ?'^:?[0-9a-zA-Z!#$%&\\'*+-.^_|~\\x60]+$' :'^[^\\u0000\\u000A\\u000D]+$')\n" + + "\x8d\x01\n" + + ")string.well_known_regex.header_name_empty\x125value is empty, which is not a valid HTTP header name\x1a)rules.well_known_regex != 1 || this != ''\n" + + "\xe7\x01\n" + + "$string.well_known_regex.header_value\x12'value must be a valid HTTP header value\x1a\x95\x01rules.well_known_regex != 2 || this.matches(!has(rules.strict) || rules.strict ?'^[^\\u0000-\\u0008\\u000A-\\u001F\\u007F]*$' :'^[^\\u0000\\u000A\\u000D]*$')H\x00R\x0ewellKnownRegex\x12\x16\n" + + "\x06strict\x18\x19 \x01(\bR\x06strict\x125\n" + + "\aexample\x18\" \x03(\tB\x1b\xc2H\x18\n" + + "\x16\n" + + "\x0estring.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\f\n" + + "\n" + + "well_known\"\xac\x13\n" + + "\n" + + "BytesRules\x12\x87\x01\n" + + "\x05const\x18\x01 \x01(\fBq\xc2Hn\n" + + "l\n" + + "\vbytes.const\x1a]this != getField(rules, 'const') ? 'value must be %x'.format([getField(rules, 'const')]) : ''R\x05const\x12}\n" + + "\x03len\x18\r \x01(\x04Bk\xc2Hh\n" + + "f\n" + + "\tbytes.len\x1aYuint(this.size()) != rules.len ? 'value length must be %s bytes'.format([rules.len]) : ''R\x03len\x12\x98\x01\n" + + "\amin_len\x18\x02 \x01(\x04B\x7f\xc2H|\n" + + "z\n" + + "\rbytes.min_len\x1aiuint(this.size()) < rules.min_len ? 'value length must be at least %s bytes'.format([rules.min_len]) : ''R\x06minLen\x12\x90\x01\n" + + "\amax_len\x18\x03 \x01(\x04Bw\xc2Ht\n" + + "r\n" + + "\rbytes.max_len\x1aauint(this.size()) > rules.max_len ? 'value must be at most %s bytes'.format([rules.max_len]) : ''R\x06maxLen\x12\x99\x01\n" + + "\apattern\x18\x04 \x01(\tB\x7f\xc2H|\n" + + "z\n" + + "\rbytes.pattern\x1ai!string(this).matches(rules.pattern) ? 'value must match regex pattern `%s`'.format([rules.pattern]) : ''R\apattern\x12\x89\x01\n" + + "\x06prefix\x18\x05 \x01(\fBq\xc2Hn\n" + + "l\n" + + "\fbytes.prefix\x1a\\!this.startsWith(rules.prefix) ? 'value does not have prefix %x'.format([rules.prefix]) : ''R\x06prefix\x12\x87\x01\n" + + "\x06suffix\x18\x06 \x01(\fBo\xc2Hl\n" + + "j\n" + + "\fbytes.suffix\x1aZ!this.endsWith(rules.suffix) ? 'value does not have suffix %x'.format([rules.suffix]) : ''R\x06suffix\x12\x8d\x01\n" + + "\bcontains\x18\a \x01(\fBq\xc2Hn\n" + + "l\n" + + "\x0ebytes.contains\x1aZ!this.contains(rules.contains) ? 'value does not contain %x'.format([rules.contains]) : ''R\bcontains\x12\xab\x01\n" + + "\x02in\x18\b \x03(\fB\x9a\x01\xc2H\x96\x01\n" + + "\x93\x01\n" + + "\bbytes.in\x1a\x86\x01getField(rules, 'in').size() > 0 && !(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12}\n" + + "\x06not_in\x18\t \x03(\fBf\xc2Hc\n" + + "a\n" + + "\fbytes.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x12\xef\x01\n" + + "\x02ip\x18\n" + + " \x01(\bB\xdc\x01\xc2H\xd8\x01\n" + + "t\n" + + "\bbytes.ip\x12 value must be a valid IP address\x1aF!rules.ip || this.size() == 0 || this.size() == 4 || this.size() == 16\n" + + "`\n" + + "\x0ebytes.ip_empty\x12/value is empty, which is not a valid IP address\x1a\x1d!rules.ip || this.size() != 0H\x00R\x02ip\x12\xea\x01\n" + + "\x04ipv4\x18\v \x01(\bB\xd3\x01\xc2H\xcf\x01\n" + + "e\n" + + "\n" + + "bytes.ipv4\x12\"value must be a valid IPv4 address\x1a3!rules.ipv4 || this.size() == 0 || this.size() == 4\n" + + "f\n" + + "\x10bytes.ipv4_empty\x121value is empty, which is not a valid IPv4 address\x1a\x1f!rules.ipv4 || this.size() != 0H\x00R\x04ipv4\x12\xeb\x01\n" + + "\x04ipv6\x18\f \x01(\bB\xd4\x01\xc2H\xd0\x01\n" + + "f\n" + + "\n" + + "bytes.ipv6\x12\"value must be a valid IPv6 address\x1a4!rules.ipv6 || this.size() == 0 || this.size() == 16\n" + + "f\n" + + "\x10bytes.ipv6_empty\x121value is empty, which is not a valid IPv6 address\x1a\x1f!rules.ipv6 || this.size() != 0H\x00R\x04ipv6\x12\xdb\x01\n" + + "\x04uuid\x18\x0f \x01(\bB\xc4\x01\xc2H\xc0\x01\n" + + "^\n" + + "\n" + + "bytes.uuid\x12\x1avalue must be a valid UUID\x1a4!rules.uuid || this.size() == 0 || this.size() == 16\n" + + "^\n" + + "\x10bytes.uuid_empty\x12)value is empty, which is not a valid UUID\x1a\x1f!rules.uuid || this.size() != 0H\x00R\x04uuid\x124\n" + + "\aexample\x18\x0e \x03(\fB\x1a\xc2H\x17\n" + + "\x15\n" + + "\rbytes.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\f\n" + + "\n" + + "well_known\"\xfd\x03\n" + + "\tEnumRules\x12\x89\x01\n" + + "\x05const\x18\x01 \x01(\x05Bs\xc2Hp\n" + + "n\n" + + "\n" + + "enum.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12!\n" + + "\fdefined_only\x18\x02 \x01(\bR\vdefinedOnly\x12\x82\x01\n" + + "\x02in\x18\x03 \x03(\x05Br\xc2Ho\n" + + "m\n" + + "\aenum.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12|\n" + + "\x06not_in\x18\x04 \x03(\x05Be\xc2Hb\n" + + "`\n" + + "\venum.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x123\n" + + "\aexample\x18\x05 \x03(\x05B\x19\xc2H\x16\n" + + "\x14\n" + + "\fenum.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9e\x04\n" + + "\rRepeatedRules\x12\xa8\x01\n" + + "\tmin_items\x18\x01 \x01(\x04B\x8a\x01\xc2H\x86\x01\n" + + "\x83\x01\n" + + "\x12repeated.min_items\x1amuint(this.size()) < rules.min_items ? 'value must contain at least %d item(s)'.format([rules.min_items]) : ''R\bminItems\x12\xac\x01\n" + + "\tmax_items\x18\x02 \x01(\x04B\x8e\x01\xc2H\x8a\x01\n" + + "\x87\x01\n" + + "\x12repeated.max_items\x1aquint(this.size()) > rules.max_items ? 'value must contain no more than %s item(s)'.format([rules.max_items]) : ''R\bmaxItems\x12x\n" + + "\x06unique\x18\x03 \x01(\bB`\xc2H]\n" + + "[\n" + + "\x0frepeated.unique\x12(repeated value must contain unique items\x1a\x1e!rules.unique || this.unique()R\x06unique\x12.\n" + + "\x05items\x18\x04 \x01(\v2\x18.buf.validate.FieldRulesR\x05items*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xac\x03\n" + + "\bMapRules\x12\x99\x01\n" + + "\tmin_pairs\x18\x01 \x01(\x04B|\xc2Hy\n" + + "w\n" + + "\rmap.min_pairs\x1afuint(this.size()) < rules.min_pairs ? 'map must be at least %d entries'.format([rules.min_pairs]) : ''R\bminPairs\x12\x98\x01\n" + + "\tmax_pairs\x18\x02 \x01(\x04B{\xc2Hx\n" + + "v\n" + + "\rmap.max_pairs\x1aeuint(this.size()) > rules.max_pairs ? 'map must be at most %d entries'.format([rules.max_pairs]) : ''R\bmaxPairs\x12,\n" + + "\x04keys\x18\x04 \x01(\v2\x18.buf.validate.FieldRulesR\x04keys\x120\n" + + "\x06values\x18\x05 \x01(\v2\x18.buf.validate.FieldRulesR\x06values*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"1\n" + + "\bAnyRules\x12\x0e\n" + + "\x02in\x18\x02 \x03(\tR\x02in\x12\x15\n" + + "\x06not_in\x18\x03 \x03(\tR\x05notIn\"\xc6\x17\n" + + "\rDurationRules\x12\xa8\x01\n" + + "\x05const\x18\x02 \x01(\v2\x19.google.protobuf.DurationBw\xc2Ht\n" + + "r\n" + + "\x0eduration.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\xac\x01\n" + + "\x02lt\x18\x03 \x01(\v2\x19.google.protobuf.DurationB\x7f\xc2H|\n" + + "z\n" + + "\vduration.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xbf\x01\n" + + "\x03lte\x18\x04 \x01(\v2\x19.google.protobuf.DurationB\x8f\x01\xc2H\x8b\x01\n" + + "\x88\x01\n" + + "\fduration.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xc5\a\n" + + "\x02gt\x18\x05 \x01(\v2\x19.google.protobuf.DurationB\x97\a\xc2H\x93\a\n" + + "}\n" + + "\vduration.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb6\x01\n" + + "\x0eduration.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbe\x01\n" + + "\x18duration.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc6\x01\n" + + "\x0fduration.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xce\x01\n" + + "\x19duration.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\x92\b\n" + + "\x03gte\x18\x06 \x01(\v2\x19.google.protobuf.DurationB\xe2\a\xc2H\xde\a\n" + + "\x8b\x01\n" + + "\fduration.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc5\x01\n" + + "\x0fduration.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcd\x01\n" + + "\x19duration.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd5\x01\n" + + "\x10duration.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdd\x01\n" + + "\x1aduration.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\xa1\x01\n" + + "\x02in\x18\a \x03(\v2\x19.google.protobuf.DurationBv\xc2Hs\n" + + "q\n" + + "\vduration.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12\x9b\x01\n" + + "\x06not_in\x18\b \x03(\v2\x19.google.protobuf.DurationBi\xc2Hf\n" + + "d\n" + + "\x0fduration.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x12R\n" + + "\aexample\x18\t \x03(\v2\x19.google.protobuf.DurationB\x1d\xc2H\x1a\n" + + "\x18\n" + + "\x10duration.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\x98\x06\n" + + "\x0eFieldMaskRules\x12\xc6\x01\n" + + "\x05const\x18\x01 \x01(\v2\x1a.google.protobuf.FieldMaskB\x93\x01\xc2H\x8f\x01\n" + + "\x8c\x01\n" + + "\x10field_mask.const\x1axthis.paths != getField(rules, 'const').paths ? 'value must equal paths %s'.format([getField(rules, 'const').paths]) : ''R\x05const\x12\xdd\x01\n" + + "\x02in\x18\x02 \x03(\tB\xcc\x01\xc2H\xc8\x01\n" + + "\xc5\x01\n" + + "\rfield_mask.in\x1a\xb3\x01!this.paths.all(p, p in getField(rules, 'in') || getField(rules, 'in').exists(f, p.startsWith(f+'.'))) ? 'value must only contain paths in %s'.format([getField(rules, 'in')]) : ''R\x02in\x12\xfa\x01\n" + + "\x06not_in\x18\x03 \x03(\tB\xe2\x01\xc2H\xde\x01\n" + + "\xdb\x01\n" + + "\x11field_mask.not_in\x1a\xc5\x01!this.paths.all(p, !(p in getField(rules, 'not_in') || getField(rules, 'not_in').exists(f, p.startsWith(f+'.')))) ? 'value must not contain any paths in %s'.format([getField(rules, 'not_in')]) : ''R\x05notIn\x12U\n" + + "\aexample\x18\x04 \x03(\v2\x1a.google.protobuf.FieldMaskB\x1f\xc2H\x1c\n" + + "\x1a\n" + + "\x12field_mask.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xca\x18\n" + + "\x0eTimestampRules\x12\xaa\x01\n" + + "\x05const\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampBx\xc2Hu\n" + + "s\n" + + "\x0ftimestamp.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\xaf\x01\n" + + "\x02lt\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampB\x80\x01\xc2H}\n" + + "{\n" + + "\ftimestamp.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xc1\x01\n" + + "\x03lte\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampB\x90\x01\xc2H\x8c\x01\n" + + "\x89\x01\n" + + "\rtimestamp.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12s\n" + + "\x06lt_now\x18\a \x01(\bBZ\xc2HW\n" + + "U\n" + + "\x10timestamp.lt_now\x1aA(rules.lt_now && this > now) ? 'value must be less than now' : ''H\x00R\x05ltNow\x12\xcb\a\n" + + "\x02gt\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampB\x9c\a\xc2H\x98\a\n" + + "~\n" + + "\ftimestamp.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb7\x01\n" + + "\x0ftimestamp.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbf\x01\n" + + "\x19timestamp.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc7\x01\n" + + "\x10timestamp.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcf\x01\n" + + "\x1atimestamp.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\x98\b\n" + + "\x03gte\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampB\xe7\a\xc2H\xe3\a\n" + + "\x8c\x01\n" + + "\rtimestamp.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc6\x01\n" + + "\x10timestamp.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xce\x01\n" + + "\x1atimestamp.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd6\x01\n" + + "\x11timestamp.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xde\x01\n" + + "\x1btimestamp.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12v\n" + + "\x06gt_now\x18\b \x01(\bB]\xc2HZ\n" + + "X\n" + + "\x10timestamp.gt_now\x1aD(rules.gt_now && this < now) ? 'value must be greater than now' : ''H\x01R\x05gtNow\x12\xc0\x01\n" + + "\x06within\x18\t \x01(\v2\x19.google.protobuf.DurationB\x8c\x01\xc2H\x88\x01\n" + + "\x85\x01\n" + + "\x10timestamp.within\x1aqthis < now-rules.within || this > now+rules.within ? 'value must be within %s of now'.format([rules.within]) : ''R\x06within\x12T\n" + + "\aexample\x18\n" + + " \x03(\v2\x1a.google.protobuf.TimestampB\x1e\xc2H\x1b\n" + + "\x19\n" + + "\x11timestamp.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"E\n" + + "\n" + + "Violations\x127\n" + + "\n" + + "violations\x18\x01 \x03(\v2\x17.buf.validate.ViolationR\n" + + "violations\"\xc5\x01\n" + + "\tViolation\x12-\n" + + "\x05field\x18\x05 \x01(\v2\x17.buf.validate.FieldPathR\x05field\x12+\n" + + "\x04rule\x18\x06 \x01(\v2\x17.buf.validate.FieldPathR\x04rule\x12\x17\n" + + "\arule_id\x18\x02 \x01(\tR\x06ruleId\x12\x18\n" + + "\amessage\x18\x03 \x01(\tR\amessage\x12\x17\n" + + "\afor_key\x18\x04 \x01(\bR\x06forKeyJ\x04\b\x01\x10\x02R\n" + + "field_path\"G\n" + + "\tFieldPath\x12:\n" + + "\belements\x18\x01 \x03(\v2\x1e.buf.validate.FieldPathElementR\belements\"\xcc\x03\n" + + "\x10FieldPathElement\x12!\n" + + "\ffield_number\x18\x01 \x01(\x05R\vfieldNumber\x12\x1d\n" + + "\n" + + "field_name\x18\x02 \x01(\tR\tfieldName\x12I\n" + + "\n" + + "field_type\x18\x03 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\tfieldType\x12E\n" + + "\bkey_type\x18\x04 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\akeyType\x12I\n" + + "\n" + + "value_type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\tvalueType\x12\x16\n" + + "\x05index\x18\x06 \x01(\x04H\x00R\x05index\x12\x1b\n" + + "\bbool_key\x18\a \x01(\bH\x00R\aboolKey\x12\x19\n" + + "\aint_key\x18\b \x01(\x03H\x00R\x06intKey\x12\x1b\n" + + "\buint_key\x18\t \x01(\x04H\x00R\auintKey\x12\x1f\n" + + "\n" + + "string_key\x18\n" + + " \x01(\tH\x00R\tstringKeyB\v\n" + + "\tsubscript*\xa1\x01\n" + + "\x06Ignore\x12\x16\n" + + "\x12IGNORE_UNSPECIFIED\x10\x00\x12\x18\n" + + "\x14IGNORE_IF_ZERO_VALUE\x10\x01\x12\x11\n" + + "\rIGNORE_ALWAYS\x10\x03\"\x04\b\x02\x10\x02*\fIGNORE_EMPTY*\x0eIGNORE_DEFAULT*\x17IGNORE_IF_DEFAULT_VALUE*\x15IGNORE_IF_UNPOPULATED*n\n" + + "\n" + + "KnownRegex\x12\x1b\n" + + "\x17KNOWN_REGEX_UNSPECIFIED\x10\x00\x12 \n" + + "\x1cKNOWN_REGEX_HTTP_HEADER_NAME\x10\x01\x12!\n" + + "\x1dKNOWN_REGEX_HTTP_HEADER_VALUE\x10\x02:V\n" + + "\amessage\x12\x1f.google.protobuf.MessageOptions\x18\x87\t \x01(\v2\x1a.buf.validate.MessageRulesR\amessage:N\n" + + "\x05oneof\x12\x1d.google.protobuf.OneofOptions\x18\x87\t \x01(\v2\x18.buf.validate.OneofRulesR\x05oneof:N\n" + + "\x05field\x12\x1d.google.protobuf.FieldOptions\x18\x87\t \x01(\v2\x18.buf.validate.FieldRulesR\x05field:]\n" + + "\n" + + "predefined\x12\x1d.google.protobuf.FieldOptions\x18\x88\t \x01(\v2\x1d.buf.validate.PredefinedRulesR\n" + + "predefinedBn\n" + + "\x12build.buf.validateB\rValidateProtoP\x01ZGbuf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/buf/validate" + +var file_buf_validate_validate_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_buf_validate_validate_proto_msgTypes = make([]protoimpl.MessageInfo, 32) +var file_buf_validate_validate_proto_goTypes = []any{ + (Ignore)(0), // 0: buf.validate.Ignore + (KnownRegex)(0), // 1: buf.validate.KnownRegex + (*Rule)(nil), // 2: buf.validate.Rule + (*MessageRules)(nil), // 3: buf.validate.MessageRules + (*MessageOneofRule)(nil), // 4: buf.validate.MessageOneofRule + (*OneofRules)(nil), // 5: buf.validate.OneofRules + (*FieldRules)(nil), // 6: buf.validate.FieldRules + (*PredefinedRules)(nil), // 7: buf.validate.PredefinedRules + (*FloatRules)(nil), // 8: buf.validate.FloatRules + (*DoubleRules)(nil), // 9: buf.validate.DoubleRules + (*Int32Rules)(nil), // 10: buf.validate.Int32Rules + (*Int64Rules)(nil), // 11: buf.validate.Int64Rules + (*UInt32Rules)(nil), // 12: buf.validate.UInt32Rules + (*UInt64Rules)(nil), // 13: buf.validate.UInt64Rules + (*SInt32Rules)(nil), // 14: buf.validate.SInt32Rules + (*SInt64Rules)(nil), // 15: buf.validate.SInt64Rules + (*Fixed32Rules)(nil), // 16: buf.validate.Fixed32Rules + (*Fixed64Rules)(nil), // 17: buf.validate.Fixed64Rules + (*SFixed32Rules)(nil), // 18: buf.validate.SFixed32Rules + (*SFixed64Rules)(nil), // 19: buf.validate.SFixed64Rules + (*BoolRules)(nil), // 20: buf.validate.BoolRules + (*StringRules)(nil), // 21: buf.validate.StringRules + (*BytesRules)(nil), // 22: buf.validate.BytesRules + (*EnumRules)(nil), // 23: buf.validate.EnumRules + (*RepeatedRules)(nil), // 24: buf.validate.RepeatedRules + (*MapRules)(nil), // 25: buf.validate.MapRules + (*AnyRules)(nil), // 26: buf.validate.AnyRules + (*DurationRules)(nil), // 27: buf.validate.DurationRules + (*FieldMaskRules)(nil), // 28: buf.validate.FieldMaskRules + (*TimestampRules)(nil), // 29: buf.validate.TimestampRules + (*Violations)(nil), // 30: buf.validate.Violations + (*Violation)(nil), // 31: buf.validate.Violation + (*FieldPath)(nil), // 32: buf.validate.FieldPath + (*FieldPathElement)(nil), // 33: buf.validate.FieldPathElement + (*durationpb.Duration)(nil), // 34: google.protobuf.Duration + (*fieldmaskpb.FieldMask)(nil), // 35: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 36: google.protobuf.Timestamp + (descriptorpb.FieldDescriptorProto_Type)(0), // 37: google.protobuf.FieldDescriptorProto.Type + (*descriptorpb.MessageOptions)(nil), // 38: google.protobuf.MessageOptions + (*descriptorpb.OneofOptions)(nil), // 39: google.protobuf.OneofOptions + (*descriptorpb.FieldOptions)(nil), // 40: google.protobuf.FieldOptions +} +var file_buf_validate_validate_proto_depIdxs = []int32{ + 2, // 0: buf.validate.MessageRules.cel:type_name -> buf.validate.Rule + 4, // 1: buf.validate.MessageRules.oneof:type_name -> buf.validate.MessageOneofRule + 2, // 2: buf.validate.FieldRules.cel:type_name -> buf.validate.Rule + 0, // 3: buf.validate.FieldRules.ignore:type_name -> buf.validate.Ignore + 8, // 4: buf.validate.FieldRules.float:type_name -> buf.validate.FloatRules + 9, // 5: buf.validate.FieldRules.double:type_name -> buf.validate.DoubleRules + 10, // 6: buf.validate.FieldRules.int32:type_name -> buf.validate.Int32Rules + 11, // 7: buf.validate.FieldRules.int64:type_name -> buf.validate.Int64Rules + 12, // 8: buf.validate.FieldRules.uint32:type_name -> buf.validate.UInt32Rules + 13, // 9: buf.validate.FieldRules.uint64:type_name -> buf.validate.UInt64Rules + 14, // 10: buf.validate.FieldRules.sint32:type_name -> buf.validate.SInt32Rules + 15, // 11: buf.validate.FieldRules.sint64:type_name -> buf.validate.SInt64Rules + 16, // 12: buf.validate.FieldRules.fixed32:type_name -> buf.validate.Fixed32Rules + 17, // 13: buf.validate.FieldRules.fixed64:type_name -> buf.validate.Fixed64Rules + 18, // 14: buf.validate.FieldRules.sfixed32:type_name -> buf.validate.SFixed32Rules + 19, // 15: buf.validate.FieldRules.sfixed64:type_name -> buf.validate.SFixed64Rules + 20, // 16: buf.validate.FieldRules.bool:type_name -> buf.validate.BoolRules + 21, // 17: buf.validate.FieldRules.string:type_name -> buf.validate.StringRules + 22, // 18: buf.validate.FieldRules.bytes:type_name -> buf.validate.BytesRules + 23, // 19: buf.validate.FieldRules.enum:type_name -> buf.validate.EnumRules + 24, // 20: buf.validate.FieldRules.repeated:type_name -> buf.validate.RepeatedRules + 25, // 21: buf.validate.FieldRules.map:type_name -> buf.validate.MapRules + 26, // 22: buf.validate.FieldRules.any:type_name -> buf.validate.AnyRules + 27, // 23: buf.validate.FieldRules.duration:type_name -> buf.validate.DurationRules + 28, // 24: buf.validate.FieldRules.field_mask:type_name -> buf.validate.FieldMaskRules + 29, // 25: buf.validate.FieldRules.timestamp:type_name -> buf.validate.TimestampRules + 2, // 26: buf.validate.PredefinedRules.cel:type_name -> buf.validate.Rule + 1, // 27: buf.validate.StringRules.well_known_regex:type_name -> buf.validate.KnownRegex + 6, // 28: buf.validate.RepeatedRules.items:type_name -> buf.validate.FieldRules + 6, // 29: buf.validate.MapRules.keys:type_name -> buf.validate.FieldRules + 6, // 30: buf.validate.MapRules.values:type_name -> buf.validate.FieldRules + 34, // 31: buf.validate.DurationRules.const:type_name -> google.protobuf.Duration + 34, // 32: buf.validate.DurationRules.lt:type_name -> google.protobuf.Duration + 34, // 33: buf.validate.DurationRules.lte:type_name -> google.protobuf.Duration + 34, // 34: buf.validate.DurationRules.gt:type_name -> google.protobuf.Duration + 34, // 35: buf.validate.DurationRules.gte:type_name -> google.protobuf.Duration + 34, // 36: buf.validate.DurationRules.in:type_name -> google.protobuf.Duration + 34, // 37: buf.validate.DurationRules.not_in:type_name -> google.protobuf.Duration + 34, // 38: buf.validate.DurationRules.example:type_name -> google.protobuf.Duration + 35, // 39: buf.validate.FieldMaskRules.const:type_name -> google.protobuf.FieldMask + 35, // 40: buf.validate.FieldMaskRules.example:type_name -> google.protobuf.FieldMask + 36, // 41: buf.validate.TimestampRules.const:type_name -> google.protobuf.Timestamp + 36, // 42: buf.validate.TimestampRules.lt:type_name -> google.protobuf.Timestamp + 36, // 43: buf.validate.TimestampRules.lte:type_name -> google.protobuf.Timestamp + 36, // 44: buf.validate.TimestampRules.gt:type_name -> google.protobuf.Timestamp + 36, // 45: buf.validate.TimestampRules.gte:type_name -> google.protobuf.Timestamp + 34, // 46: buf.validate.TimestampRules.within:type_name -> google.protobuf.Duration + 36, // 47: buf.validate.TimestampRules.example:type_name -> google.protobuf.Timestamp + 31, // 48: buf.validate.Violations.violations:type_name -> buf.validate.Violation + 32, // 49: buf.validate.Violation.field:type_name -> buf.validate.FieldPath + 32, // 50: buf.validate.Violation.rule:type_name -> buf.validate.FieldPath + 33, // 51: buf.validate.FieldPath.elements:type_name -> buf.validate.FieldPathElement + 37, // 52: buf.validate.FieldPathElement.field_type:type_name -> google.protobuf.FieldDescriptorProto.Type + 37, // 53: buf.validate.FieldPathElement.key_type:type_name -> google.protobuf.FieldDescriptorProto.Type + 37, // 54: buf.validate.FieldPathElement.value_type:type_name -> google.protobuf.FieldDescriptorProto.Type + 38, // 55: buf.validate.message:extendee -> google.protobuf.MessageOptions + 39, // 56: buf.validate.oneof:extendee -> google.protobuf.OneofOptions + 40, // 57: buf.validate.field:extendee -> google.protobuf.FieldOptions + 40, // 58: buf.validate.predefined:extendee -> google.protobuf.FieldOptions + 3, // 59: buf.validate.message:type_name -> buf.validate.MessageRules + 5, // 60: buf.validate.oneof:type_name -> buf.validate.OneofRules + 6, // 61: buf.validate.field:type_name -> buf.validate.FieldRules + 7, // 62: buf.validate.predefined:type_name -> buf.validate.PredefinedRules + 63, // [63:63] is the sub-list for method output_type + 63, // [63:63] is the sub-list for method input_type + 59, // [59:63] is the sub-list for extension type_name + 55, // [55:59] is the sub-list for extension extendee + 0, // [0:55] is the sub-list for field type_name +} + +func init() { file_buf_validate_validate_proto_init() } +func file_buf_validate_validate_proto_init() { + if File_buf_validate_validate_proto != nil { + return + } + file_buf_validate_validate_proto_msgTypes[4].OneofWrappers = []any{ + (*FieldRules_Float)(nil), + (*FieldRules_Double)(nil), + (*FieldRules_Int32)(nil), + (*FieldRules_Int64)(nil), + (*FieldRules_Uint32)(nil), + (*FieldRules_Uint64)(nil), + (*FieldRules_Sint32)(nil), + (*FieldRules_Sint64)(nil), + (*FieldRules_Fixed32)(nil), + (*FieldRules_Fixed64)(nil), + (*FieldRules_Sfixed32)(nil), + (*FieldRules_Sfixed64)(nil), + (*FieldRules_Bool)(nil), + (*FieldRules_String_)(nil), + (*FieldRules_Bytes)(nil), + (*FieldRules_Enum)(nil), + (*FieldRules_Repeated)(nil), + (*FieldRules_Map)(nil), + (*FieldRules_Any)(nil), + (*FieldRules_Duration)(nil), + (*FieldRules_FieldMask)(nil), + (*FieldRules_Timestamp)(nil), + } + file_buf_validate_validate_proto_msgTypes[6].OneofWrappers = []any{ + (*FloatRules_Lt)(nil), + (*FloatRules_Lte)(nil), + (*FloatRules_Gt)(nil), + (*FloatRules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[7].OneofWrappers = []any{ + (*DoubleRules_Lt)(nil), + (*DoubleRules_Lte)(nil), + (*DoubleRules_Gt)(nil), + (*DoubleRules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[8].OneofWrappers = []any{ + (*Int32Rules_Lt)(nil), + (*Int32Rules_Lte)(nil), + (*Int32Rules_Gt)(nil), + (*Int32Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[9].OneofWrappers = []any{ + (*Int64Rules_Lt)(nil), + (*Int64Rules_Lte)(nil), + (*Int64Rules_Gt)(nil), + (*Int64Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[10].OneofWrappers = []any{ + (*UInt32Rules_Lt)(nil), + (*UInt32Rules_Lte)(nil), + (*UInt32Rules_Gt)(nil), + (*UInt32Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[11].OneofWrappers = []any{ + (*UInt64Rules_Lt)(nil), + (*UInt64Rules_Lte)(nil), + (*UInt64Rules_Gt)(nil), + (*UInt64Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[12].OneofWrappers = []any{ + (*SInt32Rules_Lt)(nil), + (*SInt32Rules_Lte)(nil), + (*SInt32Rules_Gt)(nil), + (*SInt32Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[13].OneofWrappers = []any{ + (*SInt64Rules_Lt)(nil), + (*SInt64Rules_Lte)(nil), + (*SInt64Rules_Gt)(nil), + (*SInt64Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[14].OneofWrappers = []any{ + (*Fixed32Rules_Lt)(nil), + (*Fixed32Rules_Lte)(nil), + (*Fixed32Rules_Gt)(nil), + (*Fixed32Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[15].OneofWrappers = []any{ + (*Fixed64Rules_Lt)(nil), + (*Fixed64Rules_Lte)(nil), + (*Fixed64Rules_Gt)(nil), + (*Fixed64Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[16].OneofWrappers = []any{ + (*SFixed32Rules_Lt)(nil), + (*SFixed32Rules_Lte)(nil), + (*SFixed32Rules_Gt)(nil), + (*SFixed32Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[17].OneofWrappers = []any{ + (*SFixed64Rules_Lt)(nil), + (*SFixed64Rules_Lte)(nil), + (*SFixed64Rules_Gt)(nil), + (*SFixed64Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[19].OneofWrappers = []any{ + (*StringRules_Email)(nil), + (*StringRules_Hostname)(nil), + (*StringRules_Ip)(nil), + (*StringRules_Ipv4)(nil), + (*StringRules_Ipv6)(nil), + (*StringRules_Uri)(nil), + (*StringRules_UriRef)(nil), + (*StringRules_Address)(nil), + (*StringRules_Uuid)(nil), + (*StringRules_Tuuid)(nil), + (*StringRules_IpWithPrefixlen)(nil), + (*StringRules_Ipv4WithPrefixlen)(nil), + (*StringRules_Ipv6WithPrefixlen)(nil), + (*StringRules_IpPrefix)(nil), + (*StringRules_Ipv4Prefix)(nil), + (*StringRules_Ipv6Prefix)(nil), + (*StringRules_HostAndPort)(nil), + (*StringRules_Ulid)(nil), + (*StringRules_WellKnownRegex)(nil), + } + file_buf_validate_validate_proto_msgTypes[20].OneofWrappers = []any{ + (*BytesRules_Ip)(nil), + (*BytesRules_Ipv4)(nil), + (*BytesRules_Ipv6)(nil), + (*BytesRules_Uuid)(nil), + } + file_buf_validate_validate_proto_msgTypes[25].OneofWrappers = []any{ + (*DurationRules_Lt)(nil), + (*DurationRules_Lte)(nil), + (*DurationRules_Gt)(nil), + (*DurationRules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[27].OneofWrappers = []any{ + (*TimestampRules_Lt)(nil), + (*TimestampRules_Lte)(nil), + (*TimestampRules_LtNow)(nil), + (*TimestampRules_Gt)(nil), + (*TimestampRules_Gte)(nil), + (*TimestampRules_GtNow)(nil), + } + file_buf_validate_validate_proto_msgTypes[31].OneofWrappers = []any{ + (*FieldPathElement_Index)(nil), + (*FieldPathElement_BoolKey)(nil), + (*FieldPathElement_IntKey)(nil), + (*FieldPathElement_UintKey)(nil), + (*FieldPathElement_StringKey)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_buf_validate_validate_proto_rawDesc), len(file_buf_validate_validate_proto_rawDesc)), + NumEnums: 2, + NumMessages: 32, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_buf_validate_validate_proto_goTypes, + DependencyIndexes: file_buf_validate_validate_proto_depIdxs, + EnumInfos: file_buf_validate_validate_proto_enumTypes, + MessageInfos: file_buf_validate_validate_proto_msgTypes, + ExtensionInfos: file_buf_validate_validate_proto_extTypes, + }.Build() + File_buf_validate_validate_proto = out.File + file_buf_validate_validate_proto_goTypes = nil + file_buf_validate_validate_proto_depIdxs = nil +} diff --git a/vendor/buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/buf/validate/validate_protoopaque.pb.go b/vendor/buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/buf/validate/validate_protoopaque.pb.go new file mode 100644 index 0000000000..4ebc3935c1 --- /dev/null +++ b/vendor/buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/buf/validate/validate_protoopaque.pb.go @@ -0,0 +1,15998 @@ +// Copyright 2023-2026 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: buf/validate/validate.proto + +// [Protovalidate](https://protovalidate.com/) is the semantic validation library for Protobuf. +// It provides standard annotations to validate common rules on messages and fields, as well as the ability to use [CEL](https://cel.dev) to write custom rules. +// It's the next generation of [protoc-gen-validate](https://github.com/bufbuild/protoc-gen-validate). +// +// This package provides the options, messages, and enums that power Protovalidate. +// Apply its options to messages, fields, and oneofs in your Protobuf schemas to add validation rules: +// +// ```proto +// message User { +// string id = 1 [(buf.validate.field).string.uuid = true]; +// string first_name = 2 [(buf.validate.field).string.max_len = 64]; +// string last_name = 3 [(buf.validate.field).string.max_len = 64]; +// +// option (buf.validate.message).cel = { +// id: "first_name_requires_last_name" +// message: "last_name must be present if first_name is present" +// expression: "!has(this.first_name) || has(this.last_name)" +// }; +// } +// ``` +// +// These rules are enforced at runtime by language-specific libraries. +// See the [developer quickstart](https://protovalidate.com/quickstart/) to get started, or go directly to the runtime library for your language: +// [Go](https://github.com/bufbuild/protovalidate-go) +// [JavaScript/TypeScript](https://github.com/bufbuild/protovalidate-es), +// [Java](https://github.com/bufbuild/protovalidate-java), +// [Python](https://github.com/bufbuild/protovalidate-python), +// or [C++](https://github.com/bufbuild/protovalidate-cc). + +//go:build protoopaque + +package validate + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies how `FieldRules.ignore` behaves, depending on the field's value, and +// whether the field tracks presence. +type Ignore int32 + +const ( + // Ignore rules if the field tracks presence and is unset. This is the default + // behavior. + // + // In proto3, only message fields, members of a Protobuf `oneof`, and fields + // with the `optional` label track presence. Consequently, the following fields + // are always validated, whether a value is set or not: + // + // ```proto + // syntax="proto3"; + // + // message RulesApply { + // string email = 1 [ + // (buf.validate.field).string.email = true + // ]; + // int32 age = 2 [ + // (buf.validate.field).int32.gt = 0 + // ]; + // repeated string labels = 3 [ + // (buf.validate.field).repeated.min_items = 1 + // ]; + // } + // + // ``` + // + // In contrast, the following fields track presence, and are only validated if + // a value is set: + // + // ```proto + // syntax="proto3"; + // + // message RulesApplyIfSet { + // optional string email = 1 [ + // (buf.validate.field).string.email = true + // ]; + // oneof ref { + // string reference = 2 [ + // (buf.validate.field).string.uuid = true + // ]; + // string name = 3 [ + // (buf.validate.field).string.min_len = 4 + // ]; + // } + // SomeMessage msg = 4 [ + // (buf.validate.field).cel = {/* ... */} + // ]; + // } + // + // ``` + // + // To ensure that such a field is set, add the `required` rule. + // + // To learn which fields track presence, see the + // [Field Presence cheat sheet](https://protobuf.dev/programming-guides/field_presence/#cheat). + Ignore_IGNORE_UNSPECIFIED Ignore = 0 + // Ignore rules if the field is unset, or set to the zero value. + // + // The zero value depends on the field type: + // - For strings, the zero value is the empty string. + // - For bytes, the zero value is empty bytes. + // - For bool, the zero value is false. + // - For numeric types, the zero value is zero. + // - For enums, the zero value is the first defined enum value. + // - For repeated fields, the zero is an empty list. + // - For map fields, the zero is an empty map. + // - For message fields, absence of the message (typically a null-value) is considered zero value. + // + // For fields that track presence (e.g. adding the `optional` label in proto3), + // this a no-op and behavior is the same as the default `IGNORE_UNSPECIFIED`. + Ignore_IGNORE_IF_ZERO_VALUE Ignore = 1 + // Always ignore rules, including the `required` rule. + // + // This is useful for ignoring the rules of a referenced message, or to + // temporarily ignore rules during development. + // + // ```proto + // + // message MyMessage { + // // The field's rules will always be ignored, including any validations + // // on value's fields. + // MyOtherMessage value = 1 [ + // (buf.validate.field).ignore = IGNORE_ALWAYS + // ]; + // } + // + // ``` + Ignore_IGNORE_ALWAYS Ignore = 3 +) + +// Enum value maps for Ignore. +var ( + Ignore_name = map[int32]string{ + 0: "IGNORE_UNSPECIFIED", + 1: "IGNORE_IF_ZERO_VALUE", + 3: "IGNORE_ALWAYS", + } + Ignore_value = map[string]int32{ + "IGNORE_UNSPECIFIED": 0, + "IGNORE_IF_ZERO_VALUE": 1, + "IGNORE_ALWAYS": 3, + } +) + +func (x Ignore) Enum() *Ignore { + p := new(Ignore) + *p = x + return p +} + +func (x Ignore) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Ignore) Descriptor() protoreflect.EnumDescriptor { + return file_buf_validate_validate_proto_enumTypes[0].Descriptor() +} + +func (Ignore) Type() protoreflect.EnumType { + return &file_buf_validate_validate_proto_enumTypes[0] +} + +func (x Ignore) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// KnownRegex contains some well-known patterns. +type KnownRegex int32 + +const ( + KnownRegex_KNOWN_REGEX_UNSPECIFIED KnownRegex = 0 + // HTTP header name as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2). + KnownRegex_KNOWN_REGEX_HTTP_HEADER_NAME KnownRegex = 1 + // HTTP header value as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.4). + KnownRegex_KNOWN_REGEX_HTTP_HEADER_VALUE KnownRegex = 2 +) + +// Enum value maps for KnownRegex. +var ( + KnownRegex_name = map[int32]string{ + 0: "KNOWN_REGEX_UNSPECIFIED", + 1: "KNOWN_REGEX_HTTP_HEADER_NAME", + 2: "KNOWN_REGEX_HTTP_HEADER_VALUE", + } + KnownRegex_value = map[string]int32{ + "KNOWN_REGEX_UNSPECIFIED": 0, + "KNOWN_REGEX_HTTP_HEADER_NAME": 1, + "KNOWN_REGEX_HTTP_HEADER_VALUE": 2, + } +) + +func (x KnownRegex) Enum() *KnownRegex { + p := new(KnownRegex) + *p = x + return p +} + +func (x KnownRegex) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (KnownRegex) Descriptor() protoreflect.EnumDescriptor { + return file_buf_validate_validate_proto_enumTypes[1].Descriptor() +} + +func (KnownRegex) Type() protoreflect.EnumType { + return &file_buf_validate_validate_proto_enumTypes[1] +} + +func (x KnownRegex) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// `Rule` represents a validation rule written in the Common Expression +// Language (CEL) syntax. Each Rule includes a unique identifier, an +// optional error message, and the CEL expression to evaluate. For more +// information, [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). +// +// ```proto +// +// message Foo { +// option (buf.validate.message).cel = { +// id: "foo.bar" +// message: "bar must be greater than 0" +// expression: "this.bar > 0" +// }; +// int32 bar = 1; +// } +// +// ``` +type Rule struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id *string `protobuf:"bytes,1,opt,name=id"` + xxx_hidden_Message *string `protobuf:"bytes,2,opt,name=message"` + xxx_hidden_Expression *string `protobuf:"bytes,3,opt,name=expression"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Rule) Reset() { + *x = Rule{} + mi := &file_buf_validate_validate_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Rule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Rule) ProtoMessage() {} + +func (x *Rule) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Rule) GetId() string { + if x != nil { + if x.xxx_hidden_Id != nil { + return *x.xxx_hidden_Id + } + return "" + } + return "" +} + +func (x *Rule) GetMessage() string { + if x != nil { + if x.xxx_hidden_Message != nil { + return *x.xxx_hidden_Message + } + return "" + } + return "" +} + +func (x *Rule) GetExpression() string { + if x != nil { + if x.xxx_hidden_Expression != nil { + return *x.xxx_hidden_Expression + } + return "" + } + return "" +} + +func (x *Rule) SetId(v string) { + x.xxx_hidden_Id = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 3) +} + +func (x *Rule) SetMessage(v string) { + x.xxx_hidden_Message = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 1, 3) +} + +func (x *Rule) SetExpression(v string) { + x.xxx_hidden_Expression = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 2, 3) +} + +func (x *Rule) HasId() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *Rule) HasMessage() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 1) +} + +func (x *Rule) HasExpression() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 2) +} + +func (x *Rule) ClearId() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Id = nil +} + +func (x *Rule) ClearMessage() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 1) + x.xxx_hidden_Message = nil +} + +func (x *Rule) ClearExpression() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 2) + x.xxx_hidden_Expression = nil +} + +type Rule_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `id` is a string that serves as a machine-readable name for this Rule. + // It should be unique within its scope, which could be either a message or a field. + Id *string + // `message` is an optional field that provides a human-readable error message + // for this Rule when the CEL expression evaluates to false. If a + // non-empty message is provided, any strings resulting from the CEL + // expression evaluation are ignored. + Message *string + // `expression` is the actual CEL expression that will be evaluated for + // validation. This string must resolve to either a boolean or a string + // value. If the expression evaluates to false or a non-empty string, the + // validation is considered failed, and the message is rejected. + Expression *string +} + +func (b0 Rule_builder) Build() *Rule { + m0 := &Rule{} + b, x := &b0, m0 + _, _ = b, x + if b.Id != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 3) + x.xxx_hidden_Id = b.Id + } + if b.Message != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 1, 3) + x.xxx_hidden_Message = b.Message + } + if b.Expression != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 2, 3) + x.xxx_hidden_Expression = b.Expression + } + return m0 +} + +// MessageRules represents validation rules that are applied to the entire message. +// It includes disabling options and a list of Rule messages representing Common Expression Language (CEL) validation rules. +type MessageRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_CelExpression []string `protobuf:"bytes,5,rep,name=cel_expression,json=celExpression"` + xxx_hidden_Cel *[]*Rule `protobuf:"bytes,3,rep,name=cel"` + xxx_hidden_Oneof *[]*MessageOneofRule `protobuf:"bytes,4,rep,name=oneof"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageRules) Reset() { + *x = MessageRules{} + mi := &file_buf_validate_validate_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageRules) ProtoMessage() {} + +func (x *MessageRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *MessageRules) GetCelExpression() []string { + if x != nil { + return x.xxx_hidden_CelExpression + } + return nil +} + +func (x *MessageRules) GetCel() []*Rule { + if x != nil { + if x.xxx_hidden_Cel != nil { + return *x.xxx_hidden_Cel + } + } + return nil +} + +func (x *MessageRules) GetOneof() []*MessageOneofRule { + if x != nil { + if x.xxx_hidden_Oneof != nil { + return *x.xxx_hidden_Oneof + } + } + return nil +} + +func (x *MessageRules) SetCelExpression(v []string) { + x.xxx_hidden_CelExpression = v +} + +func (x *MessageRules) SetCel(v []*Rule) { + x.xxx_hidden_Cel = &v +} + +func (x *MessageRules) SetOneof(v []*MessageOneofRule) { + x.xxx_hidden_Oneof = &v +} + +type MessageRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `cel_expression` is a repeated field CEL expressions. Each expression specifies a validation + // rule to be applied to this message. These rules are written in Common Expression Language (CEL) syntax. + // + // This is a simplified form of the `cel` Rule field, where only `expression` is set. This allows for + // simpler syntax when defining CEL Rules where `id` and `message` derived from the `expression`. `id` will + // be same as the `expression`. + // + // For more information, [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `foo` must be greater than 42. + // option (buf.validate.message).cel_expression = "this.foo > 42"; + // // The field `foo` must be less than 84. + // option (buf.validate.message).cel_expression = "this.foo < 84"; + // optional int32 foo = 1; + // } + // + // ``` + CelExpression []string + // `cel` is a repeated field of type Rule. Each Rule specifies a validation rule to be applied to this message. + // These rules are written in Common Expression Language (CEL) syntax. For more information, + // [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `foo` must be greater than 42. + // option (buf.validate.message).cel = { + // id: "my_message.value", + // message: "value must be greater than 42", + // expression: "this.foo > 42", + // }; + // optional int32 foo = 1; + // } + // + // ``` + Cel []*Rule + // `oneof` is a repeated field of type MessageOneofRule that specifies a list of fields + // of which at most one can be present. If `required` is also specified, then exactly one + // of the specified fields _must_ be present. + // + // This will enforce oneof-like constraints with a few features not provided by + // actual Protobuf oneof declarations: + // 1. Repeated and map fields are allowed in this validation. In a Protobuf oneof, + // only scalar fields are allowed. + // 2. Fields with implicit presence are allowed. In a Protobuf oneof, all member + // fields have explicit presence. This means that, for the purpose of determining + // how many fields are set, explicitly setting such a field to its zero value is + // effectively the same as not setting it at all. + // 3. This will always generate validation errors for a message unmarshalled from + // serialized data that sets more than one field. With a Protobuf oneof, when + // multiple fields are present in the serialized form, earlier values are usually + // silently ignored when unmarshalling, with only the last field being set when + // unmarshalling completes. + // + // Note that adding a field to a `oneof` will also set the IGNORE_IF_ZERO_VALUE on the fields. This means + // only the field that is set will be validated and the unset fields are not validated according to the field rules. + // This behavior can be overridden by setting `ignore` against a field. + // + // ```proto + // + // message MyMessage { + // // Only one of `field1` or `field2` _can_ be present in this message. + // option (buf.validate.message).oneof = { fields: ["field1", "field2"] }; + // // Exactly one of `field3` or `field4` _must_ be present in this message. + // option (buf.validate.message).oneof = { fields: ["field3", "field4"], required: true }; + // string field1 = 1; + // bytes field2 = 2; + // bool field3 = 3; + // int32 field4 = 4; + // } + // + // ``` + Oneof []*MessageOneofRule +} + +func (b0 MessageRules_builder) Build() *MessageRules { + m0 := &MessageRules{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_CelExpression = b.CelExpression + x.xxx_hidden_Cel = &b.Cel + x.xxx_hidden_Oneof = &b.Oneof + return m0 +} + +type MessageOneofRule struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Fields []string `protobuf:"bytes,1,rep,name=fields"` + xxx_hidden_Required bool `protobuf:"varint,2,opt,name=required"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageOneofRule) Reset() { + *x = MessageOneofRule{} + mi := &file_buf_validate_validate_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageOneofRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageOneofRule) ProtoMessage() {} + +func (x *MessageOneofRule) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *MessageOneofRule) GetFields() []string { + if x != nil { + return x.xxx_hidden_Fields + } + return nil +} + +func (x *MessageOneofRule) GetRequired() bool { + if x != nil { + return x.xxx_hidden_Required + } + return false +} + +func (x *MessageOneofRule) SetFields(v []string) { + x.xxx_hidden_Fields = v +} + +func (x *MessageOneofRule) SetRequired(v bool) { + x.xxx_hidden_Required = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 1, 2) +} + +func (x *MessageOneofRule) HasRequired() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 1) +} + +func (x *MessageOneofRule) ClearRequired() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 1) + x.xxx_hidden_Required = false +} + +type MessageOneofRule_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A list of field names to include in the oneof. All field names must be + // defined in the message. At least one field must be specified, and + // duplicates are not permitted. + Fields []string + // If true, one of the fields specified _must_ be set. + Required *bool +} + +func (b0 MessageOneofRule_builder) Build() *MessageOneofRule { + m0 := &MessageOneofRule{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Fields = b.Fields + if b.Required != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 1, 2) + x.xxx_hidden_Required = *b.Required + } + return m0 +} + +// The `OneofRules` message type enables you to manage rules for +// oneof fields in your protobuf messages. +type OneofRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Required bool `protobuf:"varint,1,opt,name=required"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *OneofRules) Reset() { + *x = OneofRules{} + mi := &file_buf_validate_validate_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *OneofRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofRules) ProtoMessage() {} + +func (x *OneofRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *OneofRules) GetRequired() bool { + if x != nil { + return x.xxx_hidden_Required + } + return false +} + +func (x *OneofRules) SetRequired(v bool) { + x.xxx_hidden_Required = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 1) +} + +func (x *OneofRules) HasRequired() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *OneofRules) ClearRequired() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Required = false +} + +type OneofRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // If `required` is true, exactly one field of the oneof must be set. A + // validation error is returned if no fields in the oneof are set. Further rules + // should be placed on the fields themselves to ensure they are valid values, + // such as `min_len` or `gt`. + // + // ```proto + // + // message MyMessage { + // oneof value { + // // Either `a` or `b` must be set. If `a` is set, it must also be + // // non-empty; whereas if `b` is set, it can still be an empty string. + // option (buf.validate.oneof).required = true; + // string a = 1 [(buf.validate.field).string.min_len = 1]; + // string b = 2; + // } + // } + // + // ``` + Required *bool +} + +func (b0 OneofRules_builder) Build() *OneofRules { + m0 := &OneofRules{} + b, x := &b0, m0 + _, _ = b, x + if b.Required != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 1) + x.xxx_hidden_Required = *b.Required + } + return m0 +} + +// FieldRules encapsulates the rules for each type of field. Depending on +// the field, the correct set should be used to ensure proper validations. +type FieldRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_CelExpression []string `protobuf:"bytes,29,rep,name=cel_expression,json=celExpression"` + xxx_hidden_Cel *[]*Rule `protobuf:"bytes,23,rep,name=cel"` + xxx_hidden_Required bool `protobuf:"varint,25,opt,name=required"` + xxx_hidden_Ignore Ignore `protobuf:"varint,27,opt,name=ignore,enum=buf.validate.Ignore"` + xxx_hidden_Type isFieldRules_Type `protobuf_oneof:"type"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FieldRules) Reset() { + *x = FieldRules{} + mi := &file_buf_validate_validate_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FieldRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldRules) ProtoMessage() {} + +func (x *FieldRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *FieldRules) GetCelExpression() []string { + if x != nil { + return x.xxx_hidden_CelExpression + } + return nil +} + +func (x *FieldRules) GetCel() []*Rule { + if x != nil { + if x.xxx_hidden_Cel != nil { + return *x.xxx_hidden_Cel + } + } + return nil +} + +func (x *FieldRules) GetRequired() bool { + if x != nil { + return x.xxx_hidden_Required + } + return false +} + +func (x *FieldRules) GetIgnore() Ignore { + if x != nil { + if protoimpl.X.Present(&(x.XXX_presence[0]), 3) { + return x.xxx_hidden_Ignore + } + } + return Ignore_IGNORE_UNSPECIFIED +} + +func (x *FieldRules) GetFloat() *FloatRules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Float); ok { + return x.Float + } + } + return nil +} + +func (x *FieldRules) GetDouble() *DoubleRules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Double); ok { + return x.Double + } + } + return nil +} + +func (x *FieldRules) GetInt32() *Int32Rules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Int32); ok { + return x.Int32 + } + } + return nil +} + +func (x *FieldRules) GetInt64() *Int64Rules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Int64); ok { + return x.Int64 + } + } + return nil +} + +func (x *FieldRules) GetUint32() *UInt32Rules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Uint32); ok { + return x.Uint32 + } + } + return nil +} + +func (x *FieldRules) GetUint64() *UInt64Rules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Uint64); ok { + return x.Uint64 + } + } + return nil +} + +func (x *FieldRules) GetSint32() *SInt32Rules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Sint32); ok { + return x.Sint32 + } + } + return nil +} + +func (x *FieldRules) GetSint64() *SInt64Rules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Sint64); ok { + return x.Sint64 + } + } + return nil +} + +func (x *FieldRules) GetFixed32() *Fixed32Rules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Fixed32); ok { + return x.Fixed32 + } + } + return nil +} + +func (x *FieldRules) GetFixed64() *Fixed64Rules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Fixed64); ok { + return x.Fixed64 + } + } + return nil +} + +func (x *FieldRules) GetSfixed32() *SFixed32Rules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Sfixed32); ok { + return x.Sfixed32 + } + } + return nil +} + +func (x *FieldRules) GetSfixed64() *SFixed64Rules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Sfixed64); ok { + return x.Sfixed64 + } + } + return nil +} + +func (x *FieldRules) GetBool() *BoolRules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Bool); ok { + return x.Bool + } + } + return nil +} + +func (x *FieldRules) GetString() *StringRules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_String_); ok { + return x.String_ + } + } + return nil +} + +func (x *FieldRules) GetBytes() *BytesRules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Bytes); ok { + return x.Bytes + } + } + return nil +} + +func (x *FieldRules) GetEnum() *EnumRules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Enum); ok { + return x.Enum + } + } + return nil +} + +func (x *FieldRules) GetRepeated() *RepeatedRules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Repeated); ok { + return x.Repeated + } + } + return nil +} + +func (x *FieldRules) GetMap() *MapRules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Map); ok { + return x.Map + } + } + return nil +} + +func (x *FieldRules) GetAny() *AnyRules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Any); ok { + return x.Any + } + } + return nil +} + +func (x *FieldRules) GetDuration() *DurationRules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Duration); ok { + return x.Duration + } + } + return nil +} + +func (x *FieldRules) GetFieldMask() *FieldMaskRules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_FieldMask); ok { + return x.FieldMask + } + } + return nil +} + +func (x *FieldRules) GetTimestamp() *TimestampRules { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*fieldRules_Timestamp); ok { + return x.Timestamp + } + } + return nil +} + +func (x *FieldRules) SetCelExpression(v []string) { + x.xxx_hidden_CelExpression = v +} + +func (x *FieldRules) SetCel(v []*Rule) { + x.xxx_hidden_Cel = &v +} + +func (x *FieldRules) SetRequired(v bool) { + x.xxx_hidden_Required = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 2, 5) +} + +func (x *FieldRules) SetIgnore(v Ignore) { + x.xxx_hidden_Ignore = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 3, 5) +} + +func (x *FieldRules) SetFloat(v *FloatRules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Float{v} +} + +func (x *FieldRules) SetDouble(v *DoubleRules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Double{v} +} + +func (x *FieldRules) SetInt32(v *Int32Rules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Int32{v} +} + +func (x *FieldRules) SetInt64(v *Int64Rules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Int64{v} +} + +func (x *FieldRules) SetUint32(v *UInt32Rules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Uint32{v} +} + +func (x *FieldRules) SetUint64(v *UInt64Rules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Uint64{v} +} + +func (x *FieldRules) SetSint32(v *SInt32Rules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Sint32{v} +} + +func (x *FieldRules) SetSint64(v *SInt64Rules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Sint64{v} +} + +func (x *FieldRules) SetFixed32(v *Fixed32Rules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Fixed32{v} +} + +func (x *FieldRules) SetFixed64(v *Fixed64Rules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Fixed64{v} +} + +func (x *FieldRules) SetSfixed32(v *SFixed32Rules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Sfixed32{v} +} + +func (x *FieldRules) SetSfixed64(v *SFixed64Rules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Sfixed64{v} +} + +func (x *FieldRules) SetBool(v *BoolRules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Bool{v} +} + +func (x *FieldRules) SetString(v *StringRules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_String_{v} +} + +func (x *FieldRules) SetBytes(v *BytesRules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Bytes{v} +} + +func (x *FieldRules) SetEnum(v *EnumRules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Enum{v} +} + +func (x *FieldRules) SetRepeated(v *RepeatedRules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Repeated{v} +} + +func (x *FieldRules) SetMap(v *MapRules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Map{v} +} + +func (x *FieldRules) SetAny(v *AnyRules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Any{v} +} + +func (x *FieldRules) SetDuration(v *DurationRules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Duration{v} +} + +func (x *FieldRules) SetFieldMask(v *FieldMaskRules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_FieldMask{v} +} + +func (x *FieldRules) SetTimestamp(v *TimestampRules) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &fieldRules_Timestamp{v} +} + +func (x *FieldRules) HasRequired() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 2) +} + +func (x *FieldRules) HasIgnore() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 3) +} + +func (x *FieldRules) HasType() bool { + if x == nil { + return false + } + return x.xxx_hidden_Type != nil +} + +func (x *FieldRules) HasFloat() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Float) + return ok +} + +func (x *FieldRules) HasDouble() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Double) + return ok +} + +func (x *FieldRules) HasInt32() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Int32) + return ok +} + +func (x *FieldRules) HasInt64() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Int64) + return ok +} + +func (x *FieldRules) HasUint32() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Uint32) + return ok +} + +func (x *FieldRules) HasUint64() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Uint64) + return ok +} + +func (x *FieldRules) HasSint32() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Sint32) + return ok +} + +func (x *FieldRules) HasSint64() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Sint64) + return ok +} + +func (x *FieldRules) HasFixed32() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Fixed32) + return ok +} + +func (x *FieldRules) HasFixed64() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Fixed64) + return ok +} + +func (x *FieldRules) HasSfixed32() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Sfixed32) + return ok +} + +func (x *FieldRules) HasSfixed64() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Sfixed64) + return ok +} + +func (x *FieldRules) HasBool() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Bool) + return ok +} + +func (x *FieldRules) HasString() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_String_) + return ok +} + +func (x *FieldRules) HasBytes() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Bytes) + return ok +} + +func (x *FieldRules) HasEnum() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Enum) + return ok +} + +func (x *FieldRules) HasRepeated() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Repeated) + return ok +} + +func (x *FieldRules) HasMap() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Map) + return ok +} + +func (x *FieldRules) HasAny() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Any) + return ok +} + +func (x *FieldRules) HasDuration() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Duration) + return ok +} + +func (x *FieldRules) HasFieldMask() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_FieldMask) + return ok +} + +func (x *FieldRules) HasTimestamp() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*fieldRules_Timestamp) + return ok +} + +func (x *FieldRules) ClearRequired() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 2) + x.xxx_hidden_Required = false +} + +func (x *FieldRules) ClearIgnore() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 3) + x.xxx_hidden_Ignore = Ignore_IGNORE_UNSPECIFIED +} + +func (x *FieldRules) ClearType() { + x.xxx_hidden_Type = nil +} + +func (x *FieldRules) ClearFloat() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Float); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearDouble() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Double); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearInt32() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Int32); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearInt64() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Int64); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearUint32() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Uint32); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearUint64() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Uint64); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearSint32() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Sint32); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearSint64() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Sint64); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearFixed32() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Fixed32); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearFixed64() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Fixed64); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearSfixed32() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Sfixed32); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearSfixed64() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Sfixed64); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearBool() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Bool); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearString() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_String_); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearBytes() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Bytes); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearEnum() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Enum); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearRepeated() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Repeated); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearMap() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Map); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearAny() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Any); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearDuration() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Duration); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearFieldMask() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_FieldMask); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *FieldRules) ClearTimestamp() { + if _, ok := x.xxx_hidden_Type.(*fieldRules_Timestamp); ok { + x.xxx_hidden_Type = nil + } +} + +const FieldRules_Type_not_set_case case_FieldRules_Type = 0 +const FieldRules_Float_case case_FieldRules_Type = 1 +const FieldRules_Double_case case_FieldRules_Type = 2 +const FieldRules_Int32_case case_FieldRules_Type = 3 +const FieldRules_Int64_case case_FieldRules_Type = 4 +const FieldRules_Uint32_case case_FieldRules_Type = 5 +const FieldRules_Uint64_case case_FieldRules_Type = 6 +const FieldRules_Sint32_case case_FieldRules_Type = 7 +const FieldRules_Sint64_case case_FieldRules_Type = 8 +const FieldRules_Fixed32_case case_FieldRules_Type = 9 +const FieldRules_Fixed64_case case_FieldRules_Type = 10 +const FieldRules_Sfixed32_case case_FieldRules_Type = 11 +const FieldRules_Sfixed64_case case_FieldRules_Type = 12 +const FieldRules_Bool_case case_FieldRules_Type = 13 +const FieldRules_String__case case_FieldRules_Type = 14 +const FieldRules_Bytes_case case_FieldRules_Type = 15 +const FieldRules_Enum_case case_FieldRules_Type = 16 +const FieldRules_Repeated_case case_FieldRules_Type = 18 +const FieldRules_Map_case case_FieldRules_Type = 19 +const FieldRules_Any_case case_FieldRules_Type = 20 +const FieldRules_Duration_case case_FieldRules_Type = 21 +const FieldRules_FieldMask_case case_FieldRules_Type = 28 +const FieldRules_Timestamp_case case_FieldRules_Type = 22 + +func (x *FieldRules) WhichType() case_FieldRules_Type { + if x == nil { + return FieldRules_Type_not_set_case + } + switch x.xxx_hidden_Type.(type) { + case *fieldRules_Float: + return FieldRules_Float_case + case *fieldRules_Double: + return FieldRules_Double_case + case *fieldRules_Int32: + return FieldRules_Int32_case + case *fieldRules_Int64: + return FieldRules_Int64_case + case *fieldRules_Uint32: + return FieldRules_Uint32_case + case *fieldRules_Uint64: + return FieldRules_Uint64_case + case *fieldRules_Sint32: + return FieldRules_Sint32_case + case *fieldRules_Sint64: + return FieldRules_Sint64_case + case *fieldRules_Fixed32: + return FieldRules_Fixed32_case + case *fieldRules_Fixed64: + return FieldRules_Fixed64_case + case *fieldRules_Sfixed32: + return FieldRules_Sfixed32_case + case *fieldRules_Sfixed64: + return FieldRules_Sfixed64_case + case *fieldRules_Bool: + return FieldRules_Bool_case + case *fieldRules_String_: + return FieldRules_String__case + case *fieldRules_Bytes: + return FieldRules_Bytes_case + case *fieldRules_Enum: + return FieldRules_Enum_case + case *fieldRules_Repeated: + return FieldRules_Repeated_case + case *fieldRules_Map: + return FieldRules_Map_case + case *fieldRules_Any: + return FieldRules_Any_case + case *fieldRules_Duration: + return FieldRules_Duration_case + case *fieldRules_FieldMask: + return FieldRules_FieldMask_case + case *fieldRules_Timestamp: + return FieldRules_Timestamp_case + default: + return FieldRules_Type_not_set_case + } +} + +type FieldRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `cel_expression` is a repeated field CEL expressions. Each expression specifies a validation + // rule to be applied to this message. These rules are written in Common Expression Language (CEL) syntax. + // + // This is a simplified form of the `cel` Rule field, where only `expression` is set. This allows for + // simpler syntax when defining CEL Rules where `id` and `message` derived from the `expression`. `id` will + // be same as the `expression`. + // + // For more information, [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `value` must be greater than 42. + // optional int32 value = 1 [(buf.validate.field).cel_expression = "this > 42"]; + // } + // + // ``` + CelExpression []string + // `cel` is a repeated field used to represent a textual expression + // in the Common Expression Language (CEL) syntax. For more information, + // [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `value` must be greater than 42. + // optional int32 value = 1 [(buf.validate.field).cel = { + // id: "my_message.value", + // message: "value must be greater than 42", + // expression: "this > 42", + // }]; + // } + // + // ``` + Cel []*Rule + // If `required` is true, the field must be set. A validation error is returned + // if the field is not set. + // + // ```proto + // syntax="proto3"; + // + // message FieldsWithPresence { + // // Requires any string to be set, including the empty string. + // optional string link = 1 [ + // (buf.validate.field).required = true + // ]; + // // Requires true or false to be set. + // optional bool disabled = 2 [ + // (buf.validate.field).required = true + // ]; + // // Requires a message to be set, including the empty message. + // SomeMessage msg = 4 [ + // (buf.validate.field).required = true + // ]; + // } + // + // ``` + // + // All fields in the example above track presence. By default, Protovalidate + // ignores rules on those fields if no value is set. `required` ensures that + // the fields are set and valid. + // + // Fields that don't track presence are always validated by Protovalidate, + // whether they are set or not. It is not necessary to add `required`. It + // can be added to indicate that the field cannot be the zero value. + // + // ```proto + // syntax="proto3"; + // + // message FieldsWithoutPresence { + // // `string.email` always applies, even to an empty string. + // string link = 1 [ + // (buf.validate.field).string.email = true + // ]; + // // `repeated.min_items` always applies, even to an empty list. + // repeated string labels = 2 [ + // (buf.validate.field).repeated.min_items = 1 + // ]; + // // `required`, for fields that don't track presence, indicates + // // the value of the field can't be the zero value. + // int32 zero_value_not_allowed = 3 [ + // (buf.validate.field).required = true + // ]; + // } + // + // ``` + // + // To learn which fields track presence, see the + // [Field Presence cheat sheet](https://protobuf.dev/programming-guides/field_presence/#cheat). + // + // Note: While field rules can be applied to repeated items, map keys, and map + // values, the elements are always considered to be set. Consequently, + // specifying `repeated.items.required` is redundant. + Required *bool + // Ignore validation rules on the field if its value matches the specified + // criteria. See the `Ignore` enum for details. + // + // ```proto + // + // message UpdateRequest { + // // The uri rule only applies if the field is not an empty string. + // string url = 1 [ + // (buf.validate.field).ignore = IGNORE_IF_ZERO_VALUE, + // (buf.validate.field).string.uri = true + // ]; + // } + // + // ``` + Ignore *Ignore + // Fields of oneof xxx_hidden_Type: + // Scalar Field Types + Float *FloatRules + Double *DoubleRules + Int32 *Int32Rules + Int64 *Int64Rules + Uint32 *UInt32Rules + Uint64 *UInt64Rules + Sint32 *SInt32Rules + Sint64 *SInt64Rules + Fixed32 *Fixed32Rules + Fixed64 *Fixed64Rules + Sfixed32 *SFixed32Rules + Sfixed64 *SFixed64Rules + Bool *BoolRules + String *StringRules + Bytes *BytesRules + // Complex Field Types + Enum *EnumRules + Repeated *RepeatedRules + Map *MapRules + // Well-Known Field Types + Any *AnyRules + Duration *DurationRules + FieldMask *FieldMaskRules + Timestamp *TimestampRules + // -- end of xxx_hidden_Type +} + +func (b0 FieldRules_builder) Build() *FieldRules { + m0 := &FieldRules{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_CelExpression = b.CelExpression + x.xxx_hidden_Cel = &b.Cel + if b.Required != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 2, 5) + x.xxx_hidden_Required = *b.Required + } + if b.Ignore != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 3, 5) + x.xxx_hidden_Ignore = *b.Ignore + } + if b.Float != nil { + x.xxx_hidden_Type = &fieldRules_Float{b.Float} + } + if b.Double != nil { + x.xxx_hidden_Type = &fieldRules_Double{b.Double} + } + if b.Int32 != nil { + x.xxx_hidden_Type = &fieldRules_Int32{b.Int32} + } + if b.Int64 != nil { + x.xxx_hidden_Type = &fieldRules_Int64{b.Int64} + } + if b.Uint32 != nil { + x.xxx_hidden_Type = &fieldRules_Uint32{b.Uint32} + } + if b.Uint64 != nil { + x.xxx_hidden_Type = &fieldRules_Uint64{b.Uint64} + } + if b.Sint32 != nil { + x.xxx_hidden_Type = &fieldRules_Sint32{b.Sint32} + } + if b.Sint64 != nil { + x.xxx_hidden_Type = &fieldRules_Sint64{b.Sint64} + } + if b.Fixed32 != nil { + x.xxx_hidden_Type = &fieldRules_Fixed32{b.Fixed32} + } + if b.Fixed64 != nil { + x.xxx_hidden_Type = &fieldRules_Fixed64{b.Fixed64} + } + if b.Sfixed32 != nil { + x.xxx_hidden_Type = &fieldRules_Sfixed32{b.Sfixed32} + } + if b.Sfixed64 != nil { + x.xxx_hidden_Type = &fieldRules_Sfixed64{b.Sfixed64} + } + if b.Bool != nil { + x.xxx_hidden_Type = &fieldRules_Bool{b.Bool} + } + if b.String != nil { + x.xxx_hidden_Type = &fieldRules_String_{b.String} + } + if b.Bytes != nil { + x.xxx_hidden_Type = &fieldRules_Bytes{b.Bytes} + } + if b.Enum != nil { + x.xxx_hidden_Type = &fieldRules_Enum{b.Enum} + } + if b.Repeated != nil { + x.xxx_hidden_Type = &fieldRules_Repeated{b.Repeated} + } + if b.Map != nil { + x.xxx_hidden_Type = &fieldRules_Map{b.Map} + } + if b.Any != nil { + x.xxx_hidden_Type = &fieldRules_Any{b.Any} + } + if b.Duration != nil { + x.xxx_hidden_Type = &fieldRules_Duration{b.Duration} + } + if b.FieldMask != nil { + x.xxx_hidden_Type = &fieldRules_FieldMask{b.FieldMask} + } + if b.Timestamp != nil { + x.xxx_hidden_Type = &fieldRules_Timestamp{b.Timestamp} + } + return m0 +} + +type case_FieldRules_Type protoreflect.FieldNumber + +func (x case_FieldRules_Type) String() string { + md := file_buf_validate_validate_proto_msgTypes[4].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isFieldRules_Type interface { + isFieldRules_Type() +} + +type fieldRules_Float struct { + // Scalar Field Types + Float *FloatRules `protobuf:"bytes,1,opt,name=float,oneof"` +} + +type fieldRules_Double struct { + Double *DoubleRules `protobuf:"bytes,2,opt,name=double,oneof"` +} + +type fieldRules_Int32 struct { + Int32 *Int32Rules `protobuf:"bytes,3,opt,name=int32,oneof"` +} + +type fieldRules_Int64 struct { + Int64 *Int64Rules `protobuf:"bytes,4,opt,name=int64,oneof"` +} + +type fieldRules_Uint32 struct { + Uint32 *UInt32Rules `protobuf:"bytes,5,opt,name=uint32,oneof"` +} + +type fieldRules_Uint64 struct { + Uint64 *UInt64Rules `protobuf:"bytes,6,opt,name=uint64,oneof"` +} + +type fieldRules_Sint32 struct { + Sint32 *SInt32Rules `protobuf:"bytes,7,opt,name=sint32,oneof"` +} + +type fieldRules_Sint64 struct { + Sint64 *SInt64Rules `protobuf:"bytes,8,opt,name=sint64,oneof"` +} + +type fieldRules_Fixed32 struct { + Fixed32 *Fixed32Rules `protobuf:"bytes,9,opt,name=fixed32,oneof"` +} + +type fieldRules_Fixed64 struct { + Fixed64 *Fixed64Rules `protobuf:"bytes,10,opt,name=fixed64,oneof"` +} + +type fieldRules_Sfixed32 struct { + Sfixed32 *SFixed32Rules `protobuf:"bytes,11,opt,name=sfixed32,oneof"` +} + +type fieldRules_Sfixed64 struct { + Sfixed64 *SFixed64Rules `protobuf:"bytes,12,opt,name=sfixed64,oneof"` +} + +type fieldRules_Bool struct { + Bool *BoolRules `protobuf:"bytes,13,opt,name=bool,oneof"` +} + +type fieldRules_String_ struct { + String_ *StringRules `protobuf:"bytes,14,opt,name=string,oneof"` +} + +type fieldRules_Bytes struct { + Bytes *BytesRules `protobuf:"bytes,15,opt,name=bytes,oneof"` +} + +type fieldRules_Enum struct { + // Complex Field Types + Enum *EnumRules `protobuf:"bytes,16,opt,name=enum,oneof"` +} + +type fieldRules_Repeated struct { + Repeated *RepeatedRules `protobuf:"bytes,18,opt,name=repeated,oneof"` +} + +type fieldRules_Map struct { + Map *MapRules `protobuf:"bytes,19,opt,name=map,oneof"` +} + +type fieldRules_Any struct { + // Well-Known Field Types + Any *AnyRules `protobuf:"bytes,20,opt,name=any,oneof"` +} + +type fieldRules_Duration struct { + Duration *DurationRules `protobuf:"bytes,21,opt,name=duration,oneof"` +} + +type fieldRules_FieldMask struct { + FieldMask *FieldMaskRules `protobuf:"bytes,28,opt,name=field_mask,json=fieldMask,oneof"` +} + +type fieldRules_Timestamp struct { + Timestamp *TimestampRules `protobuf:"bytes,22,opt,name=timestamp,oneof"` +} + +func (*fieldRules_Float) isFieldRules_Type() {} + +func (*fieldRules_Double) isFieldRules_Type() {} + +func (*fieldRules_Int32) isFieldRules_Type() {} + +func (*fieldRules_Int64) isFieldRules_Type() {} + +func (*fieldRules_Uint32) isFieldRules_Type() {} + +func (*fieldRules_Uint64) isFieldRules_Type() {} + +func (*fieldRules_Sint32) isFieldRules_Type() {} + +func (*fieldRules_Sint64) isFieldRules_Type() {} + +func (*fieldRules_Fixed32) isFieldRules_Type() {} + +func (*fieldRules_Fixed64) isFieldRules_Type() {} + +func (*fieldRules_Sfixed32) isFieldRules_Type() {} + +func (*fieldRules_Sfixed64) isFieldRules_Type() {} + +func (*fieldRules_Bool) isFieldRules_Type() {} + +func (*fieldRules_String_) isFieldRules_Type() {} + +func (*fieldRules_Bytes) isFieldRules_Type() {} + +func (*fieldRules_Enum) isFieldRules_Type() {} + +func (*fieldRules_Repeated) isFieldRules_Type() {} + +func (*fieldRules_Map) isFieldRules_Type() {} + +func (*fieldRules_Any) isFieldRules_Type() {} + +func (*fieldRules_Duration) isFieldRules_Type() {} + +func (*fieldRules_FieldMask) isFieldRules_Type() {} + +func (*fieldRules_Timestamp) isFieldRules_Type() {} + +// PredefinedRules are custom rules that can be re-used with +// multiple fields. +type PredefinedRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Cel *[]*Rule `protobuf:"bytes,1,rep,name=cel"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PredefinedRules) Reset() { + *x = PredefinedRules{} + mi := &file_buf_validate_validate_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PredefinedRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PredefinedRules) ProtoMessage() {} + +func (x *PredefinedRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *PredefinedRules) GetCel() []*Rule { + if x != nil { + if x.xxx_hidden_Cel != nil { + return *x.xxx_hidden_Cel + } + } + return nil +} + +func (x *PredefinedRules) SetCel(v []*Rule) { + x.xxx_hidden_Cel = &v +} + +type PredefinedRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `cel` is a repeated field used to represent a textual expression + // in the Common Expression Language (CEL) syntax. For more information, + // [see our documentation](https://buf.build/docs/protovalidate/schemas/predefined-rules/). + // + // ```proto + // + // message MyMessage { + // // The field `value` must be greater than 42. + // optional int32 value = 1 [(buf.validate.predefined).cel = { + // id: "my_message.value", + // message: "value must be greater than 42", + // expression: "this > 42", + // }]; + // } + // + // ``` + Cel []*Rule +} + +func (b0 PredefinedRules_builder) Build() *PredefinedRules { + m0 := &PredefinedRules{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Cel = &b.Cel + return m0 +} + +// FloatRules describes the rules applied to `float` values. These +// rules may also be applied to the `google.protobuf.FloatValue` Well-Known-Type. +type FloatRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const float32 `protobuf:"fixed32,1,opt,name=const"` + xxx_hidden_LessThan isFloatRules_LessThan `protobuf_oneof:"less_than"` + xxx_hidden_GreaterThan isFloatRules_GreaterThan `protobuf_oneof:"greater_than"` + xxx_hidden_In []float32 `protobuf:"fixed32,6,rep,name=in"` + xxx_hidden_NotIn []float32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn"` + xxx_hidden_Finite bool `protobuf:"varint,8,opt,name=finite"` + xxx_hidden_Example []float32 `protobuf:"fixed32,9,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FloatRules) Reset() { + *x = FloatRules{} + mi := &file_buf_validate_validate_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FloatRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FloatRules) ProtoMessage() {} + +func (x *FloatRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *FloatRules) GetConst() float32 { + if x != nil { + return x.xxx_hidden_Const + } + return 0 +} + +func (x *FloatRules) GetLt() float32 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*floatRules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *FloatRules) GetLte() float32 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*floatRules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *FloatRules) GetGt() float32 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*floatRules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *FloatRules) GetGte() float32 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*floatRules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *FloatRules) GetIn() []float32 { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *FloatRules) GetNotIn() []float32 { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *FloatRules) GetFinite() bool { + if x != nil { + return x.xxx_hidden_Finite + } + return false +} + +func (x *FloatRules) GetExample() []float32 { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *FloatRules) SetConst(v float32) { + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 7) +} + +func (x *FloatRules) SetLt(v float32) { + x.xxx_hidden_LessThan = &floatRules_Lt{v} +} + +func (x *FloatRules) SetLte(v float32) { + x.xxx_hidden_LessThan = &floatRules_Lte{v} +} + +func (x *FloatRules) SetGt(v float32) { + x.xxx_hidden_GreaterThan = &floatRules_Gt{v} +} + +func (x *FloatRules) SetGte(v float32) { + x.xxx_hidden_GreaterThan = &floatRules_Gte{v} +} + +func (x *FloatRules) SetIn(v []float32) { + x.xxx_hidden_In = v +} + +func (x *FloatRules) SetNotIn(v []float32) { + x.xxx_hidden_NotIn = v +} + +func (x *FloatRules) SetFinite(v bool) { + x.xxx_hidden_Finite = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 5, 7) +} + +func (x *FloatRules) SetExample(v []float32) { + x.xxx_hidden_Example = v +} + +func (x *FloatRules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *FloatRules) HasLessThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_LessThan != nil +} + +func (x *FloatRules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*floatRules_Lt) + return ok +} + +func (x *FloatRules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*floatRules_Lte) + return ok +} + +func (x *FloatRules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_GreaterThan != nil +} + +func (x *FloatRules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*floatRules_Gt) + return ok +} + +func (x *FloatRules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*floatRules_Gte) + return ok +} + +func (x *FloatRules) HasFinite() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 5) +} + +func (x *FloatRules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = 0 +} + +func (x *FloatRules) ClearLessThan() { + x.xxx_hidden_LessThan = nil +} + +func (x *FloatRules) ClearLt() { + if _, ok := x.xxx_hidden_LessThan.(*floatRules_Lt); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *FloatRules) ClearLte() { + if _, ok := x.xxx_hidden_LessThan.(*floatRules_Lte); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *FloatRules) ClearGreaterThan() { + x.xxx_hidden_GreaterThan = nil +} + +func (x *FloatRules) ClearGt() { + if _, ok := x.xxx_hidden_GreaterThan.(*floatRules_Gt); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *FloatRules) ClearGte() { + if _, ok := x.xxx_hidden_GreaterThan.(*floatRules_Gte); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *FloatRules) ClearFinite() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 5) + x.xxx_hidden_Finite = false +} + +const FloatRules_LessThan_not_set_case case_FloatRules_LessThan = 0 +const FloatRules_Lt_case case_FloatRules_LessThan = 2 +const FloatRules_Lte_case case_FloatRules_LessThan = 3 + +func (x *FloatRules) WhichLessThan() case_FloatRules_LessThan { + if x == nil { + return FloatRules_LessThan_not_set_case + } + switch x.xxx_hidden_LessThan.(type) { + case *floatRules_Lt: + return FloatRules_Lt_case + case *floatRules_Lte: + return FloatRules_Lte_case + default: + return FloatRules_LessThan_not_set_case + } +} + +const FloatRules_GreaterThan_not_set_case case_FloatRules_GreaterThan = 0 +const FloatRules_Gt_case case_FloatRules_GreaterThan = 4 +const FloatRules_Gte_case case_FloatRules_GreaterThan = 5 + +func (x *FloatRules) WhichGreaterThan() case_FloatRules_GreaterThan { + if x == nil { + return FloatRules_GreaterThan_not_set_case + } + switch x.xxx_hidden_GreaterThan.(type) { + case *floatRules_Gt: + return FloatRules_Gt_case + case *floatRules_Gte: + return FloatRules_Gte_case + default: + return FloatRules_GreaterThan_not_set_case + } +} + +type FloatRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must equal 42.0 + // float value = 1 [(buf.validate.field).float.const = 42.0]; + // } + // + // ``` + Const *float32 + // Fields of oneof xxx_hidden_LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be less than 10.0 + // float value = 1 [(buf.validate.field).float.lt = 10.0]; + // } + // + // ``` + Lt *float32 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be less than or equal to 10.0 + // float value = 1 [(buf.validate.field).float.lte = 10.0]; + // } + // + // ``` + Lte *float32 + // -- end of xxx_hidden_LessThan + // Fields of oneof xxx_hidden_GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be greater than 5.0 [float.gt] + // float value = 1 [(buf.validate.field).float.gt = 5.0]; + // + // // value must be greater than 5 and less than 10.0 [float.gt_lt] + // float other_value = 2 [(buf.validate.field).float = { gt: 5.0, lt: 10.0 }]; + // + // // value must be greater than 10 or less than 5.0 [float.gt_lt_exclusive] + // float another_value = 3 [(buf.validate.field).float = { gt: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gt *float32 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be greater than or equal to 5.0 [float.gte] + // float value = 1 [(buf.validate.field).float.gte = 5.0]; + // + // // value must be greater than or equal to 5.0 and less than 10.0 [float.gte_lt] + // float other_value = 2 [(buf.validate.field).float = { gte: 5.0, lt: 10.0 }]; + // + // // value must be greater than or equal to 10.0 or less than 5.0 [float.gte_lt_exclusive] + // float another_value = 3 [(buf.validate.field).float = { gte: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gte *float32 + // -- end of xxx_hidden_GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message + // is generated. + // + // ```proto + // + // message MyFloat { + // // value must be in list [1.0, 2.0, 3.0] + // float value = 1 [(buf.validate.field).float = { in: [1.0, 2.0, 3.0] }]; + // } + // + // ``` + In []float32 + // `in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyFloat { + // // value must not be in list [1.0, 2.0, 3.0] + // float value = 1 [(buf.validate.field).float = { not_in: [1.0, 2.0, 3.0] }]; + // } + // + // ``` + NotIn []float32 + // `finite` requires the field value to be finite. If the field value is + // infinite or NaN, an error message is generated. + Finite *bool + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyFloat { + // float value = 1 [ + // (buf.validate.field).float.example = 1.0, + // (buf.validate.field).float.example = inf + // ]; + // } + // + // ``` + Example []float32 +} + +func (b0 FloatRules_builder) Build() *FloatRules { + m0 := &FloatRules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 7) + x.xxx_hidden_Const = *b.Const + } + if b.Lt != nil { + x.xxx_hidden_LessThan = &floatRules_Lt{*b.Lt} + } + if b.Lte != nil { + x.xxx_hidden_LessThan = &floatRules_Lte{*b.Lte} + } + if b.Gt != nil { + x.xxx_hidden_GreaterThan = &floatRules_Gt{*b.Gt} + } + if b.Gte != nil { + x.xxx_hidden_GreaterThan = &floatRules_Gte{*b.Gte} + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + if b.Finite != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 5, 7) + x.xxx_hidden_Finite = *b.Finite + } + x.xxx_hidden_Example = b.Example + return m0 +} + +type case_FloatRules_LessThan protoreflect.FieldNumber + +func (x case_FloatRules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[6].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_FloatRules_GreaterThan protoreflect.FieldNumber + +func (x case_FloatRules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[6].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isFloatRules_LessThan interface { + isFloatRules_LessThan() +} + +type floatRules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be less than 10.0 + // float value = 1 [(buf.validate.field).float.lt = 10.0]; + // } + // + // ``` + Lt float32 `protobuf:"fixed32,2,opt,name=lt,oneof"` +} + +type floatRules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be less than or equal to 10.0 + // float value = 1 [(buf.validate.field).float.lte = 10.0]; + // } + // + // ``` + Lte float32 `protobuf:"fixed32,3,opt,name=lte,oneof"` +} + +func (*floatRules_Lt) isFloatRules_LessThan() {} + +func (*floatRules_Lte) isFloatRules_LessThan() {} + +type isFloatRules_GreaterThan interface { + isFloatRules_GreaterThan() +} + +type floatRules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be greater than 5.0 [float.gt] + // float value = 1 [(buf.validate.field).float.gt = 5.0]; + // + // // value must be greater than 5 and less than 10.0 [float.gt_lt] + // float other_value = 2 [(buf.validate.field).float = { gt: 5.0, lt: 10.0 }]; + // + // // value must be greater than 10 or less than 5.0 [float.gt_lt_exclusive] + // float another_value = 3 [(buf.validate.field).float = { gt: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gt float32 `protobuf:"fixed32,4,opt,name=gt,oneof"` +} + +type floatRules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFloat { + // // value must be greater than or equal to 5.0 [float.gte] + // float value = 1 [(buf.validate.field).float.gte = 5.0]; + // + // // value must be greater than or equal to 5.0 and less than 10.0 [float.gte_lt] + // float other_value = 2 [(buf.validate.field).float = { gte: 5.0, lt: 10.0 }]; + // + // // value must be greater than or equal to 10.0 or less than 5.0 [float.gte_lt_exclusive] + // float another_value = 3 [(buf.validate.field).float = { gte: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gte float32 `protobuf:"fixed32,5,opt,name=gte,oneof"` +} + +func (*floatRules_Gt) isFloatRules_GreaterThan() {} + +func (*floatRules_Gte) isFloatRules_GreaterThan() {} + +// DoubleRules describes the rules applied to `double` values. These +// rules may also be applied to the `google.protobuf.DoubleValue` Well-Known-Type. +type DoubleRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const float64 `protobuf:"fixed64,1,opt,name=const"` + xxx_hidden_LessThan isDoubleRules_LessThan `protobuf_oneof:"less_than"` + xxx_hidden_GreaterThan isDoubleRules_GreaterThan `protobuf_oneof:"greater_than"` + xxx_hidden_In []float64 `protobuf:"fixed64,6,rep,name=in"` + xxx_hidden_NotIn []float64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn"` + xxx_hidden_Finite bool `protobuf:"varint,8,opt,name=finite"` + xxx_hidden_Example []float64 `protobuf:"fixed64,9,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DoubleRules) Reset() { + *x = DoubleRules{} + mi := &file_buf_validate_validate_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DoubleRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleRules) ProtoMessage() {} + +func (x *DoubleRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *DoubleRules) GetConst() float64 { + if x != nil { + return x.xxx_hidden_Const + } + return 0 +} + +func (x *DoubleRules) GetLt() float64 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*doubleRules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *DoubleRules) GetLte() float64 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*doubleRules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *DoubleRules) GetGt() float64 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*doubleRules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *DoubleRules) GetGte() float64 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*doubleRules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *DoubleRules) GetIn() []float64 { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *DoubleRules) GetNotIn() []float64 { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *DoubleRules) GetFinite() bool { + if x != nil { + return x.xxx_hidden_Finite + } + return false +} + +func (x *DoubleRules) GetExample() []float64 { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *DoubleRules) SetConst(v float64) { + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 7) +} + +func (x *DoubleRules) SetLt(v float64) { + x.xxx_hidden_LessThan = &doubleRules_Lt{v} +} + +func (x *DoubleRules) SetLte(v float64) { + x.xxx_hidden_LessThan = &doubleRules_Lte{v} +} + +func (x *DoubleRules) SetGt(v float64) { + x.xxx_hidden_GreaterThan = &doubleRules_Gt{v} +} + +func (x *DoubleRules) SetGte(v float64) { + x.xxx_hidden_GreaterThan = &doubleRules_Gte{v} +} + +func (x *DoubleRules) SetIn(v []float64) { + x.xxx_hidden_In = v +} + +func (x *DoubleRules) SetNotIn(v []float64) { + x.xxx_hidden_NotIn = v +} + +func (x *DoubleRules) SetFinite(v bool) { + x.xxx_hidden_Finite = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 5, 7) +} + +func (x *DoubleRules) SetExample(v []float64) { + x.xxx_hidden_Example = v +} + +func (x *DoubleRules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *DoubleRules) HasLessThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_LessThan != nil +} + +func (x *DoubleRules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*doubleRules_Lt) + return ok +} + +func (x *DoubleRules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*doubleRules_Lte) + return ok +} + +func (x *DoubleRules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_GreaterThan != nil +} + +func (x *DoubleRules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*doubleRules_Gt) + return ok +} + +func (x *DoubleRules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*doubleRules_Gte) + return ok +} + +func (x *DoubleRules) HasFinite() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 5) +} + +func (x *DoubleRules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = 0 +} + +func (x *DoubleRules) ClearLessThan() { + x.xxx_hidden_LessThan = nil +} + +func (x *DoubleRules) ClearLt() { + if _, ok := x.xxx_hidden_LessThan.(*doubleRules_Lt); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *DoubleRules) ClearLte() { + if _, ok := x.xxx_hidden_LessThan.(*doubleRules_Lte); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *DoubleRules) ClearGreaterThan() { + x.xxx_hidden_GreaterThan = nil +} + +func (x *DoubleRules) ClearGt() { + if _, ok := x.xxx_hidden_GreaterThan.(*doubleRules_Gt); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *DoubleRules) ClearGte() { + if _, ok := x.xxx_hidden_GreaterThan.(*doubleRules_Gte); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *DoubleRules) ClearFinite() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 5) + x.xxx_hidden_Finite = false +} + +const DoubleRules_LessThan_not_set_case case_DoubleRules_LessThan = 0 +const DoubleRules_Lt_case case_DoubleRules_LessThan = 2 +const DoubleRules_Lte_case case_DoubleRules_LessThan = 3 + +func (x *DoubleRules) WhichLessThan() case_DoubleRules_LessThan { + if x == nil { + return DoubleRules_LessThan_not_set_case + } + switch x.xxx_hidden_LessThan.(type) { + case *doubleRules_Lt: + return DoubleRules_Lt_case + case *doubleRules_Lte: + return DoubleRules_Lte_case + default: + return DoubleRules_LessThan_not_set_case + } +} + +const DoubleRules_GreaterThan_not_set_case case_DoubleRules_GreaterThan = 0 +const DoubleRules_Gt_case case_DoubleRules_GreaterThan = 4 +const DoubleRules_Gte_case case_DoubleRules_GreaterThan = 5 + +func (x *DoubleRules) WhichGreaterThan() case_DoubleRules_GreaterThan { + if x == nil { + return DoubleRules_GreaterThan_not_set_case + } + switch x.xxx_hidden_GreaterThan.(type) { + case *doubleRules_Gt: + return DoubleRules_Gt_case + case *doubleRules_Gte: + return DoubleRules_Gte_case + default: + return DoubleRules_GreaterThan_not_set_case + } +} + +type DoubleRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must equal 42.0 + // double value = 1 [(buf.validate.field).double.const = 42.0]; + // } + // + // ``` + Const *float64 + // Fields of oneof xxx_hidden_LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be less than 10.0 + // double value = 1 [(buf.validate.field).double.lt = 10.0]; + // } + // + // ``` + Lt *float64 + // `lte` requires the field value to be less than or equal to the specified value + // (field <= value). If the field value is greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be less than or equal to 10.0 + // double value = 1 [(buf.validate.field).double.lte = 10.0]; + // } + // + // ``` + Lte *float64 + // -- end of xxx_hidden_LessThan + // Fields of oneof xxx_hidden_GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or `lte`, + // the range is reversed, and the field value must be outside the specified + // range. If the field value doesn't meet the required conditions, an error + // message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be greater than 5.0 [double.gt] + // double value = 1 [(buf.validate.field).double.gt = 5.0]; + // + // // value must be greater than 5 and less than 10.0 [double.gt_lt] + // double other_value = 2 [(buf.validate.field).double = { gt: 5.0, lt: 10.0 }]; + // + // // value must be greater than 10 or less than 5.0 [double.gt_lt_exclusive] + // double another_value = 3 [(buf.validate.field).double = { gt: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gt *float64 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be greater than or equal to 5.0 [double.gte] + // double value = 1 [(buf.validate.field).double.gte = 5.0]; + // + // // value must be greater than or equal to 5.0 and less than 10.0 [double.gte_lt] + // double other_value = 2 [(buf.validate.field).double = { gte: 5.0, lt: 10.0 }]; + // + // // value must be greater than or equal to 10.0 or less than 5.0 [double.gte_lt_exclusive] + // double another_value = 3 [(buf.validate.field).double = { gte: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gte *float64 + // -- end of xxx_hidden_GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyDouble { + // // value must be in list [1.0, 2.0, 3.0] + // double value = 1 [(buf.validate.field).double = { in: [1.0, 2.0, 3.0] }]; + // } + // + // ``` + In []float64 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyDouble { + // // value must not be in list [1.0, 2.0, 3.0] + // double value = 1 [(buf.validate.field).double = { not_in: [1.0, 2.0, 3.0] }]; + // } + // + // ``` + NotIn []float64 + // `finite` requires the field value to be finite. If the field value is + // infinite or NaN, an error message is generated. + Finite *bool + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyDouble { + // double value = 1 [ + // (buf.validate.field).double.example = 1.0, + // (buf.validate.field).double.example = inf + // ]; + // } + // + // ``` + Example []float64 +} + +func (b0 DoubleRules_builder) Build() *DoubleRules { + m0 := &DoubleRules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 7) + x.xxx_hidden_Const = *b.Const + } + if b.Lt != nil { + x.xxx_hidden_LessThan = &doubleRules_Lt{*b.Lt} + } + if b.Lte != nil { + x.xxx_hidden_LessThan = &doubleRules_Lte{*b.Lte} + } + if b.Gt != nil { + x.xxx_hidden_GreaterThan = &doubleRules_Gt{*b.Gt} + } + if b.Gte != nil { + x.xxx_hidden_GreaterThan = &doubleRules_Gte{*b.Gte} + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + if b.Finite != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 5, 7) + x.xxx_hidden_Finite = *b.Finite + } + x.xxx_hidden_Example = b.Example + return m0 +} + +type case_DoubleRules_LessThan protoreflect.FieldNumber + +func (x case_DoubleRules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[7].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_DoubleRules_GreaterThan protoreflect.FieldNumber + +func (x case_DoubleRules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[7].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isDoubleRules_LessThan interface { + isDoubleRules_LessThan() +} + +type doubleRules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be less than 10.0 + // double value = 1 [(buf.validate.field).double.lt = 10.0]; + // } + // + // ``` + Lt float64 `protobuf:"fixed64,2,opt,name=lt,oneof"` +} + +type doubleRules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified value + // (field <= value). If the field value is greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be less than or equal to 10.0 + // double value = 1 [(buf.validate.field).double.lte = 10.0]; + // } + // + // ``` + Lte float64 `protobuf:"fixed64,3,opt,name=lte,oneof"` +} + +func (*doubleRules_Lt) isDoubleRules_LessThan() {} + +func (*doubleRules_Lte) isDoubleRules_LessThan() {} + +type isDoubleRules_GreaterThan interface { + isDoubleRules_GreaterThan() +} + +type doubleRules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or `lte`, + // the range is reversed, and the field value must be outside the specified + // range. If the field value doesn't meet the required conditions, an error + // message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be greater than 5.0 [double.gt] + // double value = 1 [(buf.validate.field).double.gt = 5.0]; + // + // // value must be greater than 5 and less than 10.0 [double.gt_lt] + // double other_value = 2 [(buf.validate.field).double = { gt: 5.0, lt: 10.0 }]; + // + // // value must be greater than 10 or less than 5.0 [double.gt_lt_exclusive] + // double another_value = 3 [(buf.validate.field).double = { gt: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gt float64 `protobuf:"fixed64,4,opt,name=gt,oneof"` +} + +type doubleRules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyDouble { + // // value must be greater than or equal to 5.0 [double.gte] + // double value = 1 [(buf.validate.field).double.gte = 5.0]; + // + // // value must be greater than or equal to 5.0 and less than 10.0 [double.gte_lt] + // double other_value = 2 [(buf.validate.field).double = { gte: 5.0, lt: 10.0 }]; + // + // // value must be greater than or equal to 10.0 or less than 5.0 [double.gte_lt_exclusive] + // double another_value = 3 [(buf.validate.field).double = { gte: 10.0, lt: 5.0 }]; + // } + // + // ``` + Gte float64 `protobuf:"fixed64,5,opt,name=gte,oneof"` +} + +func (*doubleRules_Gt) isDoubleRules_GreaterThan() {} + +func (*doubleRules_Gte) isDoubleRules_GreaterThan() {} + +// Int32Rules describes the rules applied to `int32` values. These +// rules may also be applied to the `google.protobuf.Int32Value` Well-Known-Type. +type Int32Rules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const int32 `protobuf:"varint,1,opt,name=const"` + xxx_hidden_LessThan isInt32Rules_LessThan `protobuf_oneof:"less_than"` + xxx_hidden_GreaterThan isInt32Rules_GreaterThan `protobuf_oneof:"greater_than"` + xxx_hidden_In []int32 `protobuf:"varint,6,rep,name=in"` + xxx_hidden_NotIn []int32 `protobuf:"varint,7,rep,name=not_in,json=notIn"` + xxx_hidden_Example []int32 `protobuf:"varint,8,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Int32Rules) Reset() { + *x = Int32Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Int32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32Rules) ProtoMessage() {} + +func (x *Int32Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Int32Rules) GetConst() int32 { + if x != nil { + return x.xxx_hidden_Const + } + return 0 +} + +func (x *Int32Rules) GetLt() int32 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*int32Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *Int32Rules) GetLte() int32 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*int32Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *Int32Rules) GetGt() int32 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*int32Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *Int32Rules) GetGte() int32 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*int32Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *Int32Rules) GetIn() []int32 { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *Int32Rules) GetNotIn() []int32 { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *Int32Rules) GetExample() []int32 { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *Int32Rules) SetConst(v int32) { + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 6) +} + +func (x *Int32Rules) SetLt(v int32) { + x.xxx_hidden_LessThan = &int32Rules_Lt{v} +} + +func (x *Int32Rules) SetLte(v int32) { + x.xxx_hidden_LessThan = &int32Rules_Lte{v} +} + +func (x *Int32Rules) SetGt(v int32) { + x.xxx_hidden_GreaterThan = &int32Rules_Gt{v} +} + +func (x *Int32Rules) SetGte(v int32) { + x.xxx_hidden_GreaterThan = &int32Rules_Gte{v} +} + +func (x *Int32Rules) SetIn(v []int32) { + x.xxx_hidden_In = v +} + +func (x *Int32Rules) SetNotIn(v []int32) { + x.xxx_hidden_NotIn = v +} + +func (x *Int32Rules) SetExample(v []int32) { + x.xxx_hidden_Example = v +} + +func (x *Int32Rules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *Int32Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_LessThan != nil +} + +func (x *Int32Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*int32Rules_Lt) + return ok +} + +func (x *Int32Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*int32Rules_Lte) + return ok +} + +func (x *Int32Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_GreaterThan != nil +} + +func (x *Int32Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*int32Rules_Gt) + return ok +} + +func (x *Int32Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*int32Rules_Gte) + return ok +} + +func (x *Int32Rules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = 0 +} + +func (x *Int32Rules) ClearLessThan() { + x.xxx_hidden_LessThan = nil +} + +func (x *Int32Rules) ClearLt() { + if _, ok := x.xxx_hidden_LessThan.(*int32Rules_Lt); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *Int32Rules) ClearLte() { + if _, ok := x.xxx_hidden_LessThan.(*int32Rules_Lte); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *Int32Rules) ClearGreaterThan() { + x.xxx_hidden_GreaterThan = nil +} + +func (x *Int32Rules) ClearGt() { + if _, ok := x.xxx_hidden_GreaterThan.(*int32Rules_Gt); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *Int32Rules) ClearGte() { + if _, ok := x.xxx_hidden_GreaterThan.(*int32Rules_Gte); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +const Int32Rules_LessThan_not_set_case case_Int32Rules_LessThan = 0 +const Int32Rules_Lt_case case_Int32Rules_LessThan = 2 +const Int32Rules_Lte_case case_Int32Rules_LessThan = 3 + +func (x *Int32Rules) WhichLessThan() case_Int32Rules_LessThan { + if x == nil { + return Int32Rules_LessThan_not_set_case + } + switch x.xxx_hidden_LessThan.(type) { + case *int32Rules_Lt: + return Int32Rules_Lt_case + case *int32Rules_Lte: + return Int32Rules_Lte_case + default: + return Int32Rules_LessThan_not_set_case + } +} + +const Int32Rules_GreaterThan_not_set_case case_Int32Rules_GreaterThan = 0 +const Int32Rules_Gt_case case_Int32Rules_GreaterThan = 4 +const Int32Rules_Gte_case case_Int32Rules_GreaterThan = 5 + +func (x *Int32Rules) WhichGreaterThan() case_Int32Rules_GreaterThan { + if x == nil { + return Int32Rules_GreaterThan_not_set_case + } + switch x.xxx_hidden_GreaterThan.(type) { + case *int32Rules_Gt: + return Int32Rules_Gt_case + case *int32Rules_Gte: + return Int32Rules_Gte_case + default: + return Int32Rules_GreaterThan_not_set_case + } +} + +type Int32Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must equal 42 + // int32 value = 1 [(buf.validate.field).int32.const = 42]; + // } + // + // ``` + Const *int32 + // Fields of oneof xxx_hidden_LessThan: + // `lt` requires the field value to be less than the specified value (field + // < value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be less than 10 + // int32 value = 1 [(buf.validate.field).int32.lt = 10]; + // } + // + // ``` + Lt *int32 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be less than or equal to 10 + // int32 value = 1 [(buf.validate.field).int32.lte = 10]; + // } + // + // ``` + Lte *int32 + // -- end of xxx_hidden_LessThan + // Fields of oneof xxx_hidden_GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be greater than 5 [int32.gt] + // int32 value = 1 [(buf.validate.field).int32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [int32.gt_lt] + // int32 other_value = 2 [(buf.validate.field).int32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [int32.gt_lt_exclusive] + // int32 another_value = 3 [(buf.validate.field).int32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *int32 + // `gte` requires the field value to be greater than or equal to the specified value + // (exclusive). If the value of `gte` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be greater than or equal to 5 [int32.gte] + // int32 value = 1 [(buf.validate.field).int32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [int32.gte_lt] + // int32 other_value = 2 [(buf.validate.field).int32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [int32.gte_lt_exclusive] + // int32 another_value = 3 [(buf.validate.field).int32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *int32 + // -- end of xxx_hidden_GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyInt32 { + // // value must be in list [1, 2, 3] + // int32 value = 1 [(buf.validate.field).int32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int32 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error message + // is generated. + // + // ```proto + // + // message MyInt32 { + // // value must not be in list [1, 2, 3] + // int32 value = 1 [(buf.validate.field).int32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int32 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyInt32 { + // int32 value = 1 [ + // (buf.validate.field).int32.example = 1, + // (buf.validate.field).int32.example = -10 + // ]; + // } + // + // ``` + Example []int32 +} + +func (b0 Int32Rules_builder) Build() *Int32Rules { + m0 := &Int32Rules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 6) + x.xxx_hidden_Const = *b.Const + } + if b.Lt != nil { + x.xxx_hidden_LessThan = &int32Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.xxx_hidden_LessThan = &int32Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.xxx_hidden_GreaterThan = &int32Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.xxx_hidden_GreaterThan = &int32Rules_Gte{*b.Gte} + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + x.xxx_hidden_Example = b.Example + return m0 +} + +type case_Int32Rules_LessThan protoreflect.FieldNumber + +func (x case_Int32Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[8].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_Int32Rules_GreaterThan protoreflect.FieldNumber + +func (x case_Int32Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[8].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isInt32Rules_LessThan interface { + isInt32Rules_LessThan() +} + +type int32Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field + // < value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be less than 10 + // int32 value = 1 [(buf.validate.field).int32.lt = 10]; + // } + // + // ``` + Lt int32 `protobuf:"varint,2,opt,name=lt,oneof"` +} + +type int32Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be less than or equal to 10 + // int32 value = 1 [(buf.validate.field).int32.lte = 10]; + // } + // + // ``` + Lte int32 `protobuf:"varint,3,opt,name=lte,oneof"` +} + +func (*int32Rules_Lt) isInt32Rules_LessThan() {} + +func (*int32Rules_Lte) isInt32Rules_LessThan() {} + +type isInt32Rules_GreaterThan interface { + isInt32Rules_GreaterThan() +} + +type int32Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be greater than 5 [int32.gt] + // int32 value = 1 [(buf.validate.field).int32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [int32.gt_lt] + // int32 other_value = 2 [(buf.validate.field).int32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [int32.gt_lt_exclusive] + // int32 another_value = 3 [(buf.validate.field).int32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt int32 `protobuf:"varint,4,opt,name=gt,oneof"` +} + +type int32Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified value + // (exclusive). If the value of `gte` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt32 { + // // value must be greater than or equal to 5 [int32.gte] + // int32 value = 1 [(buf.validate.field).int32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [int32.gte_lt] + // int32 other_value = 2 [(buf.validate.field).int32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [int32.gte_lt_exclusive] + // int32 another_value = 3 [(buf.validate.field).int32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte int32 `protobuf:"varint,5,opt,name=gte,oneof"` +} + +func (*int32Rules_Gt) isInt32Rules_GreaterThan() {} + +func (*int32Rules_Gte) isInt32Rules_GreaterThan() {} + +// Int64Rules describes the rules applied to `int64` values. These +// rules may also be applied to the `google.protobuf.Int64Value` Well-Known-Type. +type Int64Rules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const int64 `protobuf:"varint,1,opt,name=const"` + xxx_hidden_LessThan isInt64Rules_LessThan `protobuf_oneof:"less_than"` + xxx_hidden_GreaterThan isInt64Rules_GreaterThan `protobuf_oneof:"greater_than"` + xxx_hidden_In []int64 `protobuf:"varint,6,rep,name=in"` + xxx_hidden_NotIn []int64 `protobuf:"varint,7,rep,name=not_in,json=notIn"` + xxx_hidden_Example []int64 `protobuf:"varint,9,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Int64Rules) Reset() { + *x = Int64Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Int64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Rules) ProtoMessage() {} + +func (x *Int64Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Int64Rules) GetConst() int64 { + if x != nil { + return x.xxx_hidden_Const + } + return 0 +} + +func (x *Int64Rules) GetLt() int64 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*int64Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *Int64Rules) GetLte() int64 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*int64Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *Int64Rules) GetGt() int64 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*int64Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *Int64Rules) GetGte() int64 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*int64Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *Int64Rules) GetIn() []int64 { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *Int64Rules) GetNotIn() []int64 { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *Int64Rules) GetExample() []int64 { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *Int64Rules) SetConst(v int64) { + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 6) +} + +func (x *Int64Rules) SetLt(v int64) { + x.xxx_hidden_LessThan = &int64Rules_Lt{v} +} + +func (x *Int64Rules) SetLte(v int64) { + x.xxx_hidden_LessThan = &int64Rules_Lte{v} +} + +func (x *Int64Rules) SetGt(v int64) { + x.xxx_hidden_GreaterThan = &int64Rules_Gt{v} +} + +func (x *Int64Rules) SetGte(v int64) { + x.xxx_hidden_GreaterThan = &int64Rules_Gte{v} +} + +func (x *Int64Rules) SetIn(v []int64) { + x.xxx_hidden_In = v +} + +func (x *Int64Rules) SetNotIn(v []int64) { + x.xxx_hidden_NotIn = v +} + +func (x *Int64Rules) SetExample(v []int64) { + x.xxx_hidden_Example = v +} + +func (x *Int64Rules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *Int64Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_LessThan != nil +} + +func (x *Int64Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*int64Rules_Lt) + return ok +} + +func (x *Int64Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*int64Rules_Lte) + return ok +} + +func (x *Int64Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_GreaterThan != nil +} + +func (x *Int64Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*int64Rules_Gt) + return ok +} + +func (x *Int64Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*int64Rules_Gte) + return ok +} + +func (x *Int64Rules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = 0 +} + +func (x *Int64Rules) ClearLessThan() { + x.xxx_hidden_LessThan = nil +} + +func (x *Int64Rules) ClearLt() { + if _, ok := x.xxx_hidden_LessThan.(*int64Rules_Lt); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *Int64Rules) ClearLte() { + if _, ok := x.xxx_hidden_LessThan.(*int64Rules_Lte); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *Int64Rules) ClearGreaterThan() { + x.xxx_hidden_GreaterThan = nil +} + +func (x *Int64Rules) ClearGt() { + if _, ok := x.xxx_hidden_GreaterThan.(*int64Rules_Gt); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *Int64Rules) ClearGte() { + if _, ok := x.xxx_hidden_GreaterThan.(*int64Rules_Gte); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +const Int64Rules_LessThan_not_set_case case_Int64Rules_LessThan = 0 +const Int64Rules_Lt_case case_Int64Rules_LessThan = 2 +const Int64Rules_Lte_case case_Int64Rules_LessThan = 3 + +func (x *Int64Rules) WhichLessThan() case_Int64Rules_LessThan { + if x == nil { + return Int64Rules_LessThan_not_set_case + } + switch x.xxx_hidden_LessThan.(type) { + case *int64Rules_Lt: + return Int64Rules_Lt_case + case *int64Rules_Lte: + return Int64Rules_Lte_case + default: + return Int64Rules_LessThan_not_set_case + } +} + +const Int64Rules_GreaterThan_not_set_case case_Int64Rules_GreaterThan = 0 +const Int64Rules_Gt_case case_Int64Rules_GreaterThan = 4 +const Int64Rules_Gte_case case_Int64Rules_GreaterThan = 5 + +func (x *Int64Rules) WhichGreaterThan() case_Int64Rules_GreaterThan { + if x == nil { + return Int64Rules_GreaterThan_not_set_case + } + switch x.xxx_hidden_GreaterThan.(type) { + case *int64Rules_Gt: + return Int64Rules_Gt_case + case *int64Rules_Gte: + return Int64Rules_Gte_case + default: + return Int64Rules_GreaterThan_not_set_case + } +} + +type Int64Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must equal 42 + // int64 value = 1 [(buf.validate.field).int64.const = 42]; + // } + // + // ``` + Const *int64 + // Fields of oneof xxx_hidden_LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be less than 10 + // int64 value = 1 [(buf.validate.field).int64.lt = 10]; + // } + // + // ``` + Lt *int64 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be less than or equal to 10 + // int64 value = 1 [(buf.validate.field).int64.lte = 10]; + // } + // + // ``` + Lte *int64 + // -- end of xxx_hidden_LessThan + // Fields of oneof xxx_hidden_GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be greater than 5 [int64.gt] + // int64 value = 1 [(buf.validate.field).int64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [int64.gt_lt] + // int64 other_value = 2 [(buf.validate.field).int64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [int64.gt_lt_exclusive] + // int64 another_value = 3 [(buf.validate.field).int64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *int64 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be greater than or equal to 5 [int64.gte] + // int64 value = 1 [(buf.validate.field).int64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [int64.gte_lt] + // int64 other_value = 2 [(buf.validate.field).int64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [int64.gte_lt_exclusive] + // int64 another_value = 3 [(buf.validate.field).int64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *int64 + // -- end of xxx_hidden_GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyInt64 { + // // value must be in list [1, 2, 3] + // int64 value = 1 [(buf.validate.field).int64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int64 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must not be in list [1, 2, 3] + // int64 value = 1 [(buf.validate.field).int64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int64 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyInt64 { + // int64 value = 1 [ + // (buf.validate.field).int64.example = 1, + // (buf.validate.field).int64.example = -10 + // ]; + // } + // + // ``` + Example []int64 +} + +func (b0 Int64Rules_builder) Build() *Int64Rules { + m0 := &Int64Rules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 6) + x.xxx_hidden_Const = *b.Const + } + if b.Lt != nil { + x.xxx_hidden_LessThan = &int64Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.xxx_hidden_LessThan = &int64Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.xxx_hidden_GreaterThan = &int64Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.xxx_hidden_GreaterThan = &int64Rules_Gte{*b.Gte} + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + x.xxx_hidden_Example = b.Example + return m0 +} + +type case_Int64Rules_LessThan protoreflect.FieldNumber + +func (x case_Int64Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[9].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_Int64Rules_GreaterThan protoreflect.FieldNumber + +func (x case_Int64Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[9].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isInt64Rules_LessThan interface { + isInt64Rules_LessThan() +} + +type int64Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be less than 10 + // int64 value = 1 [(buf.validate.field).int64.lt = 10]; + // } + // + // ``` + Lt int64 `protobuf:"varint,2,opt,name=lt,oneof"` +} + +type int64Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be less than or equal to 10 + // int64 value = 1 [(buf.validate.field).int64.lte = 10]; + // } + // + // ``` + Lte int64 `protobuf:"varint,3,opt,name=lte,oneof"` +} + +func (*int64Rules_Lt) isInt64Rules_LessThan() {} + +func (*int64Rules_Lte) isInt64Rules_LessThan() {} + +type isInt64Rules_GreaterThan interface { + isInt64Rules_GreaterThan() +} + +type int64Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be greater than 5 [int64.gt] + // int64 value = 1 [(buf.validate.field).int64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [int64.gt_lt] + // int64 other_value = 2 [(buf.validate.field).int64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [int64.gt_lt_exclusive] + // int64 another_value = 3 [(buf.validate.field).int64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt int64 `protobuf:"varint,4,opt,name=gt,oneof"` +} + +type int64Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyInt64 { + // // value must be greater than or equal to 5 [int64.gte] + // int64 value = 1 [(buf.validate.field).int64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [int64.gte_lt] + // int64 other_value = 2 [(buf.validate.field).int64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [int64.gte_lt_exclusive] + // int64 another_value = 3 [(buf.validate.field).int64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte int64 `protobuf:"varint,5,opt,name=gte,oneof"` +} + +func (*int64Rules_Gt) isInt64Rules_GreaterThan() {} + +func (*int64Rules_Gte) isInt64Rules_GreaterThan() {} + +// UInt32Rules describes the rules applied to `uint32` values. These +// rules may also be applied to the `google.protobuf.UInt32Value` Well-Known-Type. +type UInt32Rules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const uint32 `protobuf:"varint,1,opt,name=const"` + xxx_hidden_LessThan isUInt32Rules_LessThan `protobuf_oneof:"less_than"` + xxx_hidden_GreaterThan isUInt32Rules_GreaterThan `protobuf_oneof:"greater_than"` + xxx_hidden_In []uint32 `protobuf:"varint,6,rep,name=in"` + xxx_hidden_NotIn []uint32 `protobuf:"varint,7,rep,name=not_in,json=notIn"` + xxx_hidden_Example []uint32 `protobuf:"varint,8,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UInt32Rules) Reset() { + *x = UInt32Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UInt32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt32Rules) ProtoMessage() {} + +func (x *UInt32Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *UInt32Rules) GetConst() uint32 { + if x != nil { + return x.xxx_hidden_Const + } + return 0 +} + +func (x *UInt32Rules) GetLt() uint32 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*uInt32Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *UInt32Rules) GetLte() uint32 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*uInt32Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *UInt32Rules) GetGt() uint32 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*uInt32Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *UInt32Rules) GetGte() uint32 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*uInt32Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *UInt32Rules) GetIn() []uint32 { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *UInt32Rules) GetNotIn() []uint32 { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *UInt32Rules) GetExample() []uint32 { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *UInt32Rules) SetConst(v uint32) { + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 6) +} + +func (x *UInt32Rules) SetLt(v uint32) { + x.xxx_hidden_LessThan = &uInt32Rules_Lt{v} +} + +func (x *UInt32Rules) SetLte(v uint32) { + x.xxx_hidden_LessThan = &uInt32Rules_Lte{v} +} + +func (x *UInt32Rules) SetGt(v uint32) { + x.xxx_hidden_GreaterThan = &uInt32Rules_Gt{v} +} + +func (x *UInt32Rules) SetGte(v uint32) { + x.xxx_hidden_GreaterThan = &uInt32Rules_Gte{v} +} + +func (x *UInt32Rules) SetIn(v []uint32) { + x.xxx_hidden_In = v +} + +func (x *UInt32Rules) SetNotIn(v []uint32) { + x.xxx_hidden_NotIn = v +} + +func (x *UInt32Rules) SetExample(v []uint32) { + x.xxx_hidden_Example = v +} + +func (x *UInt32Rules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *UInt32Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_LessThan != nil +} + +func (x *UInt32Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*uInt32Rules_Lt) + return ok +} + +func (x *UInt32Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*uInt32Rules_Lte) + return ok +} + +func (x *UInt32Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_GreaterThan != nil +} + +func (x *UInt32Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*uInt32Rules_Gt) + return ok +} + +func (x *UInt32Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*uInt32Rules_Gte) + return ok +} + +func (x *UInt32Rules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = 0 +} + +func (x *UInt32Rules) ClearLessThan() { + x.xxx_hidden_LessThan = nil +} + +func (x *UInt32Rules) ClearLt() { + if _, ok := x.xxx_hidden_LessThan.(*uInt32Rules_Lt); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *UInt32Rules) ClearLte() { + if _, ok := x.xxx_hidden_LessThan.(*uInt32Rules_Lte); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *UInt32Rules) ClearGreaterThan() { + x.xxx_hidden_GreaterThan = nil +} + +func (x *UInt32Rules) ClearGt() { + if _, ok := x.xxx_hidden_GreaterThan.(*uInt32Rules_Gt); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *UInt32Rules) ClearGte() { + if _, ok := x.xxx_hidden_GreaterThan.(*uInt32Rules_Gte); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +const UInt32Rules_LessThan_not_set_case case_UInt32Rules_LessThan = 0 +const UInt32Rules_Lt_case case_UInt32Rules_LessThan = 2 +const UInt32Rules_Lte_case case_UInt32Rules_LessThan = 3 + +func (x *UInt32Rules) WhichLessThan() case_UInt32Rules_LessThan { + if x == nil { + return UInt32Rules_LessThan_not_set_case + } + switch x.xxx_hidden_LessThan.(type) { + case *uInt32Rules_Lt: + return UInt32Rules_Lt_case + case *uInt32Rules_Lte: + return UInt32Rules_Lte_case + default: + return UInt32Rules_LessThan_not_set_case + } +} + +const UInt32Rules_GreaterThan_not_set_case case_UInt32Rules_GreaterThan = 0 +const UInt32Rules_Gt_case case_UInt32Rules_GreaterThan = 4 +const UInt32Rules_Gte_case case_UInt32Rules_GreaterThan = 5 + +func (x *UInt32Rules) WhichGreaterThan() case_UInt32Rules_GreaterThan { + if x == nil { + return UInt32Rules_GreaterThan_not_set_case + } + switch x.xxx_hidden_GreaterThan.(type) { + case *uInt32Rules_Gt: + return UInt32Rules_Gt_case + case *uInt32Rules_Gte: + return UInt32Rules_Gte_case + default: + return UInt32Rules_GreaterThan_not_set_case + } +} + +type UInt32Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must equal 42 + // uint32 value = 1 [(buf.validate.field).uint32.const = 42]; + // } + // + // ``` + Const *uint32 + // Fields of oneof xxx_hidden_LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be less than 10 + // uint32 value = 1 [(buf.validate.field).uint32.lt = 10]; + // } + // + // ``` + Lt *uint32 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be less than or equal to 10 + // uint32 value = 1 [(buf.validate.field).uint32.lte = 10]; + // } + // + // ``` + Lte *uint32 + // -- end of xxx_hidden_LessThan + // Fields of oneof xxx_hidden_GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be greater than 5 [uint32.gt] + // uint32 value = 1 [(buf.validate.field).uint32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [uint32.gt_lt] + // uint32 other_value = 2 [(buf.validate.field).uint32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [uint32.gt_lt_exclusive] + // uint32 another_value = 3 [(buf.validate.field).uint32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *uint32 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be greater than or equal to 5 [uint32.gte] + // uint32 value = 1 [(buf.validate.field).uint32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [uint32.gte_lt] + // uint32 other_value = 2 [(buf.validate.field).uint32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [uint32.gte_lt_exclusive] + // uint32 another_value = 3 [(buf.validate.field).uint32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *uint32 + // -- end of xxx_hidden_GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be in list [1, 2, 3] + // uint32 value = 1 [(buf.validate.field).uint32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []uint32 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must not be in list [1, 2, 3] + // uint32 value = 1 [(buf.validate.field).uint32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []uint32 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyUInt32 { + // uint32 value = 1 [ + // (buf.validate.field).uint32.example = 1, + // (buf.validate.field).uint32.example = 10 + // ]; + // } + // + // ``` + Example []uint32 +} + +func (b0 UInt32Rules_builder) Build() *UInt32Rules { + m0 := &UInt32Rules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 6) + x.xxx_hidden_Const = *b.Const + } + if b.Lt != nil { + x.xxx_hidden_LessThan = &uInt32Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.xxx_hidden_LessThan = &uInt32Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.xxx_hidden_GreaterThan = &uInt32Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.xxx_hidden_GreaterThan = &uInt32Rules_Gte{*b.Gte} + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + x.xxx_hidden_Example = b.Example + return m0 +} + +type case_UInt32Rules_LessThan protoreflect.FieldNumber + +func (x case_UInt32Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[10].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_UInt32Rules_GreaterThan protoreflect.FieldNumber + +func (x case_UInt32Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[10].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isUInt32Rules_LessThan interface { + isUInt32Rules_LessThan() +} + +type uInt32Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be less than 10 + // uint32 value = 1 [(buf.validate.field).uint32.lt = 10]; + // } + // + // ``` + Lt uint32 `protobuf:"varint,2,opt,name=lt,oneof"` +} + +type uInt32Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be less than or equal to 10 + // uint32 value = 1 [(buf.validate.field).uint32.lte = 10]; + // } + // + // ``` + Lte uint32 `protobuf:"varint,3,opt,name=lte,oneof"` +} + +func (*uInt32Rules_Lt) isUInt32Rules_LessThan() {} + +func (*uInt32Rules_Lte) isUInt32Rules_LessThan() {} + +type isUInt32Rules_GreaterThan interface { + isUInt32Rules_GreaterThan() +} + +type uInt32Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be greater than 5 [uint32.gt] + // uint32 value = 1 [(buf.validate.field).uint32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [uint32.gt_lt] + // uint32 other_value = 2 [(buf.validate.field).uint32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [uint32.gt_lt_exclusive] + // uint32 another_value = 3 [(buf.validate.field).uint32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt uint32 `protobuf:"varint,4,opt,name=gt,oneof"` +} + +type uInt32Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt32 { + // // value must be greater than or equal to 5 [uint32.gte] + // uint32 value = 1 [(buf.validate.field).uint32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [uint32.gte_lt] + // uint32 other_value = 2 [(buf.validate.field).uint32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [uint32.gte_lt_exclusive] + // uint32 another_value = 3 [(buf.validate.field).uint32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte uint32 `protobuf:"varint,5,opt,name=gte,oneof"` +} + +func (*uInt32Rules_Gt) isUInt32Rules_GreaterThan() {} + +func (*uInt32Rules_Gte) isUInt32Rules_GreaterThan() {} + +// UInt64Rules describes the rules applied to `uint64` values. These +// rules may also be applied to the `google.protobuf.UInt64Value` Well-Known-Type. +type UInt64Rules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const uint64 `protobuf:"varint,1,opt,name=const"` + xxx_hidden_LessThan isUInt64Rules_LessThan `protobuf_oneof:"less_than"` + xxx_hidden_GreaterThan isUInt64Rules_GreaterThan `protobuf_oneof:"greater_than"` + xxx_hidden_In []uint64 `protobuf:"varint,6,rep,name=in"` + xxx_hidden_NotIn []uint64 `protobuf:"varint,7,rep,name=not_in,json=notIn"` + xxx_hidden_Example []uint64 `protobuf:"varint,8,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UInt64Rules) Reset() { + *x = UInt64Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UInt64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt64Rules) ProtoMessage() {} + +func (x *UInt64Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *UInt64Rules) GetConst() uint64 { + if x != nil { + return x.xxx_hidden_Const + } + return 0 +} + +func (x *UInt64Rules) GetLt() uint64 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*uInt64Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *UInt64Rules) GetLte() uint64 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*uInt64Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *UInt64Rules) GetGt() uint64 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*uInt64Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *UInt64Rules) GetGte() uint64 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*uInt64Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *UInt64Rules) GetIn() []uint64 { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *UInt64Rules) GetNotIn() []uint64 { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *UInt64Rules) GetExample() []uint64 { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *UInt64Rules) SetConst(v uint64) { + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 6) +} + +func (x *UInt64Rules) SetLt(v uint64) { + x.xxx_hidden_LessThan = &uInt64Rules_Lt{v} +} + +func (x *UInt64Rules) SetLte(v uint64) { + x.xxx_hidden_LessThan = &uInt64Rules_Lte{v} +} + +func (x *UInt64Rules) SetGt(v uint64) { + x.xxx_hidden_GreaterThan = &uInt64Rules_Gt{v} +} + +func (x *UInt64Rules) SetGte(v uint64) { + x.xxx_hidden_GreaterThan = &uInt64Rules_Gte{v} +} + +func (x *UInt64Rules) SetIn(v []uint64) { + x.xxx_hidden_In = v +} + +func (x *UInt64Rules) SetNotIn(v []uint64) { + x.xxx_hidden_NotIn = v +} + +func (x *UInt64Rules) SetExample(v []uint64) { + x.xxx_hidden_Example = v +} + +func (x *UInt64Rules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *UInt64Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_LessThan != nil +} + +func (x *UInt64Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*uInt64Rules_Lt) + return ok +} + +func (x *UInt64Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*uInt64Rules_Lte) + return ok +} + +func (x *UInt64Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_GreaterThan != nil +} + +func (x *UInt64Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*uInt64Rules_Gt) + return ok +} + +func (x *UInt64Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*uInt64Rules_Gte) + return ok +} + +func (x *UInt64Rules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = 0 +} + +func (x *UInt64Rules) ClearLessThan() { + x.xxx_hidden_LessThan = nil +} + +func (x *UInt64Rules) ClearLt() { + if _, ok := x.xxx_hidden_LessThan.(*uInt64Rules_Lt); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *UInt64Rules) ClearLte() { + if _, ok := x.xxx_hidden_LessThan.(*uInt64Rules_Lte); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *UInt64Rules) ClearGreaterThan() { + x.xxx_hidden_GreaterThan = nil +} + +func (x *UInt64Rules) ClearGt() { + if _, ok := x.xxx_hidden_GreaterThan.(*uInt64Rules_Gt); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *UInt64Rules) ClearGte() { + if _, ok := x.xxx_hidden_GreaterThan.(*uInt64Rules_Gte); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +const UInt64Rules_LessThan_not_set_case case_UInt64Rules_LessThan = 0 +const UInt64Rules_Lt_case case_UInt64Rules_LessThan = 2 +const UInt64Rules_Lte_case case_UInt64Rules_LessThan = 3 + +func (x *UInt64Rules) WhichLessThan() case_UInt64Rules_LessThan { + if x == nil { + return UInt64Rules_LessThan_not_set_case + } + switch x.xxx_hidden_LessThan.(type) { + case *uInt64Rules_Lt: + return UInt64Rules_Lt_case + case *uInt64Rules_Lte: + return UInt64Rules_Lte_case + default: + return UInt64Rules_LessThan_not_set_case + } +} + +const UInt64Rules_GreaterThan_not_set_case case_UInt64Rules_GreaterThan = 0 +const UInt64Rules_Gt_case case_UInt64Rules_GreaterThan = 4 +const UInt64Rules_Gte_case case_UInt64Rules_GreaterThan = 5 + +func (x *UInt64Rules) WhichGreaterThan() case_UInt64Rules_GreaterThan { + if x == nil { + return UInt64Rules_GreaterThan_not_set_case + } + switch x.xxx_hidden_GreaterThan.(type) { + case *uInt64Rules_Gt: + return UInt64Rules_Gt_case + case *uInt64Rules_Gte: + return UInt64Rules_Gte_case + default: + return UInt64Rules_GreaterThan_not_set_case + } +} + +type UInt64Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must equal 42 + // uint64 value = 1 [(buf.validate.field).uint64.const = 42]; + // } + // + // ``` + Const *uint64 + // Fields of oneof xxx_hidden_LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be less than 10 + // uint64 value = 1 [(buf.validate.field).uint64.lt = 10]; + // } + // + // ``` + Lt *uint64 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be less than or equal to 10 + // uint64 value = 1 [(buf.validate.field).uint64.lte = 10]; + // } + // + // ``` + Lte *uint64 + // -- end of xxx_hidden_LessThan + // Fields of oneof xxx_hidden_GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be greater than 5 [uint64.gt] + // uint64 value = 1 [(buf.validate.field).uint64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [uint64.gt_lt] + // uint64 other_value = 2 [(buf.validate.field).uint64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [uint64.gt_lt_exclusive] + // uint64 another_value = 3 [(buf.validate.field).uint64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *uint64 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be greater than or equal to 5 [uint64.gte] + // uint64 value = 1 [(buf.validate.field).uint64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [uint64.gte_lt] + // uint64 other_value = 2 [(buf.validate.field).uint64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [uint64.gte_lt_exclusive] + // uint64 another_value = 3 [(buf.validate.field).uint64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *uint64 + // -- end of xxx_hidden_GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be in list [1, 2, 3] + // uint64 value = 1 [(buf.validate.field).uint64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []uint64 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must not be in list [1, 2, 3] + // uint64 value = 1 [(buf.validate.field).uint64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []uint64 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyUInt64 { + // uint64 value = 1 [ + // (buf.validate.field).uint64.example = 1, + // (buf.validate.field).uint64.example = -10 + // ]; + // } + // + // ``` + Example []uint64 +} + +func (b0 UInt64Rules_builder) Build() *UInt64Rules { + m0 := &UInt64Rules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 6) + x.xxx_hidden_Const = *b.Const + } + if b.Lt != nil { + x.xxx_hidden_LessThan = &uInt64Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.xxx_hidden_LessThan = &uInt64Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.xxx_hidden_GreaterThan = &uInt64Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.xxx_hidden_GreaterThan = &uInt64Rules_Gte{*b.Gte} + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + x.xxx_hidden_Example = b.Example + return m0 +} + +type case_UInt64Rules_LessThan protoreflect.FieldNumber + +func (x case_UInt64Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[11].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_UInt64Rules_GreaterThan protoreflect.FieldNumber + +func (x case_UInt64Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[11].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isUInt64Rules_LessThan interface { + isUInt64Rules_LessThan() +} + +type uInt64Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be less than 10 + // uint64 value = 1 [(buf.validate.field).uint64.lt = 10]; + // } + // + // ``` + Lt uint64 `protobuf:"varint,2,opt,name=lt,oneof"` +} + +type uInt64Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be less than or equal to 10 + // uint64 value = 1 [(buf.validate.field).uint64.lte = 10]; + // } + // + // ``` + Lte uint64 `protobuf:"varint,3,opt,name=lte,oneof"` +} + +func (*uInt64Rules_Lt) isUInt64Rules_LessThan() {} + +func (*uInt64Rules_Lte) isUInt64Rules_LessThan() {} + +type isUInt64Rules_GreaterThan interface { + isUInt64Rules_GreaterThan() +} + +type uInt64Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be greater than 5 [uint64.gt] + // uint64 value = 1 [(buf.validate.field).uint64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [uint64.gt_lt] + // uint64 other_value = 2 [(buf.validate.field).uint64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [uint64.gt_lt_exclusive] + // uint64 another_value = 3 [(buf.validate.field).uint64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt uint64 `protobuf:"varint,4,opt,name=gt,oneof"` +} + +type uInt64Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyUInt64 { + // // value must be greater than or equal to 5 [uint64.gte] + // uint64 value = 1 [(buf.validate.field).uint64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [uint64.gte_lt] + // uint64 other_value = 2 [(buf.validate.field).uint64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [uint64.gte_lt_exclusive] + // uint64 another_value = 3 [(buf.validate.field).uint64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte uint64 `protobuf:"varint,5,opt,name=gte,oneof"` +} + +func (*uInt64Rules_Gt) isUInt64Rules_GreaterThan() {} + +func (*uInt64Rules_Gte) isUInt64Rules_GreaterThan() {} + +// SInt32Rules describes the rules applied to `sint32` values. +type SInt32Rules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const int32 `protobuf:"zigzag32,1,opt,name=const"` + xxx_hidden_LessThan isSInt32Rules_LessThan `protobuf_oneof:"less_than"` + xxx_hidden_GreaterThan isSInt32Rules_GreaterThan `protobuf_oneof:"greater_than"` + xxx_hidden_In []int32 `protobuf:"zigzag32,6,rep,name=in"` + xxx_hidden_NotIn []int32 `protobuf:"zigzag32,7,rep,name=not_in,json=notIn"` + xxx_hidden_Example []int32 `protobuf:"zigzag32,8,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SInt32Rules) Reset() { + *x = SInt32Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SInt32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SInt32Rules) ProtoMessage() {} + +func (x *SInt32Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SInt32Rules) GetConst() int32 { + if x != nil { + return x.xxx_hidden_Const + } + return 0 +} + +func (x *SInt32Rules) GetLt() int32 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*sInt32Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *SInt32Rules) GetLte() int32 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*sInt32Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *SInt32Rules) GetGt() int32 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*sInt32Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *SInt32Rules) GetGte() int32 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*sInt32Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *SInt32Rules) GetIn() []int32 { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *SInt32Rules) GetNotIn() []int32 { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *SInt32Rules) GetExample() []int32 { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *SInt32Rules) SetConst(v int32) { + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 6) +} + +func (x *SInt32Rules) SetLt(v int32) { + x.xxx_hidden_LessThan = &sInt32Rules_Lt{v} +} + +func (x *SInt32Rules) SetLte(v int32) { + x.xxx_hidden_LessThan = &sInt32Rules_Lte{v} +} + +func (x *SInt32Rules) SetGt(v int32) { + x.xxx_hidden_GreaterThan = &sInt32Rules_Gt{v} +} + +func (x *SInt32Rules) SetGte(v int32) { + x.xxx_hidden_GreaterThan = &sInt32Rules_Gte{v} +} + +func (x *SInt32Rules) SetIn(v []int32) { + x.xxx_hidden_In = v +} + +func (x *SInt32Rules) SetNotIn(v []int32) { + x.xxx_hidden_NotIn = v +} + +func (x *SInt32Rules) SetExample(v []int32) { + x.xxx_hidden_Example = v +} + +func (x *SInt32Rules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *SInt32Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_LessThan != nil +} + +func (x *SInt32Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*sInt32Rules_Lt) + return ok +} + +func (x *SInt32Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*sInt32Rules_Lte) + return ok +} + +func (x *SInt32Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_GreaterThan != nil +} + +func (x *SInt32Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*sInt32Rules_Gt) + return ok +} + +func (x *SInt32Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*sInt32Rules_Gte) + return ok +} + +func (x *SInt32Rules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = 0 +} + +func (x *SInt32Rules) ClearLessThan() { + x.xxx_hidden_LessThan = nil +} + +func (x *SInt32Rules) ClearLt() { + if _, ok := x.xxx_hidden_LessThan.(*sInt32Rules_Lt); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *SInt32Rules) ClearLte() { + if _, ok := x.xxx_hidden_LessThan.(*sInt32Rules_Lte); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *SInt32Rules) ClearGreaterThan() { + x.xxx_hidden_GreaterThan = nil +} + +func (x *SInt32Rules) ClearGt() { + if _, ok := x.xxx_hidden_GreaterThan.(*sInt32Rules_Gt); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *SInt32Rules) ClearGte() { + if _, ok := x.xxx_hidden_GreaterThan.(*sInt32Rules_Gte); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +const SInt32Rules_LessThan_not_set_case case_SInt32Rules_LessThan = 0 +const SInt32Rules_Lt_case case_SInt32Rules_LessThan = 2 +const SInt32Rules_Lte_case case_SInt32Rules_LessThan = 3 + +func (x *SInt32Rules) WhichLessThan() case_SInt32Rules_LessThan { + if x == nil { + return SInt32Rules_LessThan_not_set_case + } + switch x.xxx_hidden_LessThan.(type) { + case *sInt32Rules_Lt: + return SInt32Rules_Lt_case + case *sInt32Rules_Lte: + return SInt32Rules_Lte_case + default: + return SInt32Rules_LessThan_not_set_case + } +} + +const SInt32Rules_GreaterThan_not_set_case case_SInt32Rules_GreaterThan = 0 +const SInt32Rules_Gt_case case_SInt32Rules_GreaterThan = 4 +const SInt32Rules_Gte_case case_SInt32Rules_GreaterThan = 5 + +func (x *SInt32Rules) WhichGreaterThan() case_SInt32Rules_GreaterThan { + if x == nil { + return SInt32Rules_GreaterThan_not_set_case + } + switch x.xxx_hidden_GreaterThan.(type) { + case *sInt32Rules_Gt: + return SInt32Rules_Gt_case + case *sInt32Rules_Gte: + return SInt32Rules_Gte_case + default: + return SInt32Rules_GreaterThan_not_set_case + } +} + +type SInt32Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must equal 42 + // sint32 value = 1 [(buf.validate.field).sint32.const = 42]; + // } + // + // ``` + Const *int32 + // Fields of oneof xxx_hidden_LessThan: + // `lt` requires the field value to be less than the specified value (field + // < value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be less than 10 + // sint32 value = 1 [(buf.validate.field).sint32.lt = 10]; + // } + // + // ``` + Lt *int32 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be less than or equal to 10 + // sint32 value = 1 [(buf.validate.field).sint32.lte = 10]; + // } + // + // ``` + Lte *int32 + // -- end of xxx_hidden_LessThan + // Fields of oneof xxx_hidden_GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be greater than 5 [sint32.gt] + // sint32 value = 1 [(buf.validate.field).sint32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sint32.gt_lt] + // sint32 other_value = 2 [(buf.validate.field).sint32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sint32.gt_lt_exclusive] + // sint32 another_value = 3 [(buf.validate.field).sint32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *int32 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be greater than or equal to 5 [sint32.gte] + // sint32 value = 1 [(buf.validate.field).sint32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sint32.gte_lt] + // sint32 other_value = 2 [(buf.validate.field).sint32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sint32.gte_lt_exclusive] + // sint32 another_value = 3 [(buf.validate.field).sint32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *int32 + // -- end of xxx_hidden_GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MySInt32 { + // // value must be in list [1, 2, 3] + // sint32 value = 1 [(buf.validate.field).sint32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int32 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must not be in list [1, 2, 3] + // sint32 value = 1 [(buf.validate.field).sint32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int32 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MySInt32 { + // sint32 value = 1 [ + // (buf.validate.field).sint32.example = 1, + // (buf.validate.field).sint32.example = -10 + // ]; + // } + // + // ``` + Example []int32 +} + +func (b0 SInt32Rules_builder) Build() *SInt32Rules { + m0 := &SInt32Rules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 6) + x.xxx_hidden_Const = *b.Const + } + if b.Lt != nil { + x.xxx_hidden_LessThan = &sInt32Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.xxx_hidden_LessThan = &sInt32Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.xxx_hidden_GreaterThan = &sInt32Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.xxx_hidden_GreaterThan = &sInt32Rules_Gte{*b.Gte} + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + x.xxx_hidden_Example = b.Example + return m0 +} + +type case_SInt32Rules_LessThan protoreflect.FieldNumber + +func (x case_SInt32Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[12].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_SInt32Rules_GreaterThan protoreflect.FieldNumber + +func (x case_SInt32Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[12].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isSInt32Rules_LessThan interface { + isSInt32Rules_LessThan() +} + +type sInt32Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field + // < value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be less than 10 + // sint32 value = 1 [(buf.validate.field).sint32.lt = 10]; + // } + // + // ``` + Lt int32 `protobuf:"zigzag32,2,opt,name=lt,oneof"` +} + +type sInt32Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be less than or equal to 10 + // sint32 value = 1 [(buf.validate.field).sint32.lte = 10]; + // } + // + // ``` + Lte int32 `protobuf:"zigzag32,3,opt,name=lte,oneof"` +} + +func (*sInt32Rules_Lt) isSInt32Rules_LessThan() {} + +func (*sInt32Rules_Lte) isSInt32Rules_LessThan() {} + +type isSInt32Rules_GreaterThan interface { + isSInt32Rules_GreaterThan() +} + +type sInt32Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be greater than 5 [sint32.gt] + // sint32 value = 1 [(buf.validate.field).sint32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sint32.gt_lt] + // sint32 other_value = 2 [(buf.validate.field).sint32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sint32.gt_lt_exclusive] + // sint32 another_value = 3 [(buf.validate.field).sint32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt int32 `protobuf:"zigzag32,4,opt,name=gt,oneof"` +} + +type sInt32Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt32 { + // // value must be greater than or equal to 5 [sint32.gte] + // sint32 value = 1 [(buf.validate.field).sint32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sint32.gte_lt] + // sint32 other_value = 2 [(buf.validate.field).sint32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sint32.gte_lt_exclusive] + // sint32 another_value = 3 [(buf.validate.field).sint32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte int32 `protobuf:"zigzag32,5,opt,name=gte,oneof"` +} + +func (*sInt32Rules_Gt) isSInt32Rules_GreaterThan() {} + +func (*sInt32Rules_Gte) isSInt32Rules_GreaterThan() {} + +// SInt64Rules describes the rules applied to `sint64` values. +type SInt64Rules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const int64 `protobuf:"zigzag64,1,opt,name=const"` + xxx_hidden_LessThan isSInt64Rules_LessThan `protobuf_oneof:"less_than"` + xxx_hidden_GreaterThan isSInt64Rules_GreaterThan `protobuf_oneof:"greater_than"` + xxx_hidden_In []int64 `protobuf:"zigzag64,6,rep,name=in"` + xxx_hidden_NotIn []int64 `protobuf:"zigzag64,7,rep,name=not_in,json=notIn"` + xxx_hidden_Example []int64 `protobuf:"zigzag64,8,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SInt64Rules) Reset() { + *x = SInt64Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SInt64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SInt64Rules) ProtoMessage() {} + +func (x *SInt64Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SInt64Rules) GetConst() int64 { + if x != nil { + return x.xxx_hidden_Const + } + return 0 +} + +func (x *SInt64Rules) GetLt() int64 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*sInt64Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *SInt64Rules) GetLte() int64 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*sInt64Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *SInt64Rules) GetGt() int64 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*sInt64Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *SInt64Rules) GetGte() int64 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*sInt64Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *SInt64Rules) GetIn() []int64 { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *SInt64Rules) GetNotIn() []int64 { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *SInt64Rules) GetExample() []int64 { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *SInt64Rules) SetConst(v int64) { + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 6) +} + +func (x *SInt64Rules) SetLt(v int64) { + x.xxx_hidden_LessThan = &sInt64Rules_Lt{v} +} + +func (x *SInt64Rules) SetLte(v int64) { + x.xxx_hidden_LessThan = &sInt64Rules_Lte{v} +} + +func (x *SInt64Rules) SetGt(v int64) { + x.xxx_hidden_GreaterThan = &sInt64Rules_Gt{v} +} + +func (x *SInt64Rules) SetGte(v int64) { + x.xxx_hidden_GreaterThan = &sInt64Rules_Gte{v} +} + +func (x *SInt64Rules) SetIn(v []int64) { + x.xxx_hidden_In = v +} + +func (x *SInt64Rules) SetNotIn(v []int64) { + x.xxx_hidden_NotIn = v +} + +func (x *SInt64Rules) SetExample(v []int64) { + x.xxx_hidden_Example = v +} + +func (x *SInt64Rules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *SInt64Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_LessThan != nil +} + +func (x *SInt64Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*sInt64Rules_Lt) + return ok +} + +func (x *SInt64Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*sInt64Rules_Lte) + return ok +} + +func (x *SInt64Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_GreaterThan != nil +} + +func (x *SInt64Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*sInt64Rules_Gt) + return ok +} + +func (x *SInt64Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*sInt64Rules_Gte) + return ok +} + +func (x *SInt64Rules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = 0 +} + +func (x *SInt64Rules) ClearLessThan() { + x.xxx_hidden_LessThan = nil +} + +func (x *SInt64Rules) ClearLt() { + if _, ok := x.xxx_hidden_LessThan.(*sInt64Rules_Lt); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *SInt64Rules) ClearLte() { + if _, ok := x.xxx_hidden_LessThan.(*sInt64Rules_Lte); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *SInt64Rules) ClearGreaterThan() { + x.xxx_hidden_GreaterThan = nil +} + +func (x *SInt64Rules) ClearGt() { + if _, ok := x.xxx_hidden_GreaterThan.(*sInt64Rules_Gt); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *SInt64Rules) ClearGte() { + if _, ok := x.xxx_hidden_GreaterThan.(*sInt64Rules_Gte); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +const SInt64Rules_LessThan_not_set_case case_SInt64Rules_LessThan = 0 +const SInt64Rules_Lt_case case_SInt64Rules_LessThan = 2 +const SInt64Rules_Lte_case case_SInt64Rules_LessThan = 3 + +func (x *SInt64Rules) WhichLessThan() case_SInt64Rules_LessThan { + if x == nil { + return SInt64Rules_LessThan_not_set_case + } + switch x.xxx_hidden_LessThan.(type) { + case *sInt64Rules_Lt: + return SInt64Rules_Lt_case + case *sInt64Rules_Lte: + return SInt64Rules_Lte_case + default: + return SInt64Rules_LessThan_not_set_case + } +} + +const SInt64Rules_GreaterThan_not_set_case case_SInt64Rules_GreaterThan = 0 +const SInt64Rules_Gt_case case_SInt64Rules_GreaterThan = 4 +const SInt64Rules_Gte_case case_SInt64Rules_GreaterThan = 5 + +func (x *SInt64Rules) WhichGreaterThan() case_SInt64Rules_GreaterThan { + if x == nil { + return SInt64Rules_GreaterThan_not_set_case + } + switch x.xxx_hidden_GreaterThan.(type) { + case *sInt64Rules_Gt: + return SInt64Rules_Gt_case + case *sInt64Rules_Gte: + return SInt64Rules_Gte_case + default: + return SInt64Rules_GreaterThan_not_set_case + } +} + +type SInt64Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must equal 42 + // sint64 value = 1 [(buf.validate.field).sint64.const = 42]; + // } + // + // ``` + Const *int64 + // Fields of oneof xxx_hidden_LessThan: + // `lt` requires the field value to be less than the specified value (field + // < value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be less than 10 + // sint64 value = 1 [(buf.validate.field).sint64.lt = 10]; + // } + // + // ``` + Lt *int64 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be less than or equal to 10 + // sint64 value = 1 [(buf.validate.field).sint64.lte = 10]; + // } + // + // ``` + Lte *int64 + // -- end of xxx_hidden_LessThan + // Fields of oneof xxx_hidden_GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be greater than 5 [sint64.gt] + // sint64 value = 1 [(buf.validate.field).sint64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sint64.gt_lt] + // sint64 other_value = 2 [(buf.validate.field).sint64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sint64.gt_lt_exclusive] + // sint64 another_value = 3 [(buf.validate.field).sint64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *int64 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be greater than or equal to 5 [sint64.gte] + // sint64 value = 1 [(buf.validate.field).sint64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sint64.gte_lt] + // sint64 other_value = 2 [(buf.validate.field).sint64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sint64.gte_lt_exclusive] + // sint64 another_value = 3 [(buf.validate.field).sint64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *int64 + // -- end of xxx_hidden_GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message + // is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be in list [1, 2, 3] + // sint64 value = 1 [(buf.validate.field).sint64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int64 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must not be in list [1, 2, 3] + // sint64 value = 1 [(buf.validate.field).sint64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int64 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MySInt64 { + // sint64 value = 1 [ + // (buf.validate.field).sint64.example = 1, + // (buf.validate.field).sint64.example = -10 + // ]; + // } + // + // ``` + Example []int64 +} + +func (b0 SInt64Rules_builder) Build() *SInt64Rules { + m0 := &SInt64Rules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 6) + x.xxx_hidden_Const = *b.Const + } + if b.Lt != nil { + x.xxx_hidden_LessThan = &sInt64Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.xxx_hidden_LessThan = &sInt64Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.xxx_hidden_GreaterThan = &sInt64Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.xxx_hidden_GreaterThan = &sInt64Rules_Gte{*b.Gte} + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + x.xxx_hidden_Example = b.Example + return m0 +} + +type case_SInt64Rules_LessThan protoreflect.FieldNumber + +func (x case_SInt64Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[13].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_SInt64Rules_GreaterThan protoreflect.FieldNumber + +func (x case_SInt64Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[13].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isSInt64Rules_LessThan interface { + isSInt64Rules_LessThan() +} + +type sInt64Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field + // < value). If the field value is equal to or greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be less than 10 + // sint64 value = 1 [(buf.validate.field).sint64.lt = 10]; + // } + // + // ``` + Lt int64 `protobuf:"zigzag64,2,opt,name=lt,oneof"` +} + +type sInt64Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be less than or equal to 10 + // sint64 value = 1 [(buf.validate.field).sint64.lte = 10]; + // } + // + // ``` + Lte int64 `protobuf:"zigzag64,3,opt,name=lte,oneof"` +} + +func (*sInt64Rules_Lt) isSInt64Rules_LessThan() {} + +func (*sInt64Rules_Lte) isSInt64Rules_LessThan() {} + +type isSInt64Rules_GreaterThan interface { + isSInt64Rules_GreaterThan() +} + +type sInt64Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be greater than 5 [sint64.gt] + // sint64 value = 1 [(buf.validate.field).sint64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sint64.gt_lt] + // sint64 other_value = 2 [(buf.validate.field).sint64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sint64.gt_lt_exclusive] + // sint64 another_value = 3 [(buf.validate.field).sint64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt int64 `protobuf:"zigzag64,4,opt,name=gt,oneof"` +} + +type sInt64Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySInt64 { + // // value must be greater than or equal to 5 [sint64.gte] + // sint64 value = 1 [(buf.validate.field).sint64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sint64.gte_lt] + // sint64 other_value = 2 [(buf.validate.field).sint64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sint64.gte_lt_exclusive] + // sint64 another_value = 3 [(buf.validate.field).sint64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte int64 `protobuf:"zigzag64,5,opt,name=gte,oneof"` +} + +func (*sInt64Rules_Gt) isSInt64Rules_GreaterThan() {} + +func (*sInt64Rules_Gte) isSInt64Rules_GreaterThan() {} + +// Fixed32Rules describes the rules applied to `fixed32` values. +type Fixed32Rules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const uint32 `protobuf:"fixed32,1,opt,name=const"` + xxx_hidden_LessThan isFixed32Rules_LessThan `protobuf_oneof:"less_than"` + xxx_hidden_GreaterThan isFixed32Rules_GreaterThan `protobuf_oneof:"greater_than"` + xxx_hidden_In []uint32 `protobuf:"fixed32,6,rep,name=in"` + xxx_hidden_NotIn []uint32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn"` + xxx_hidden_Example []uint32 `protobuf:"fixed32,8,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Fixed32Rules) Reset() { + *x = Fixed32Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Fixed32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Fixed32Rules) ProtoMessage() {} + +func (x *Fixed32Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Fixed32Rules) GetConst() uint32 { + if x != nil { + return x.xxx_hidden_Const + } + return 0 +} + +func (x *Fixed32Rules) GetLt() uint32 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*fixed32Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *Fixed32Rules) GetLte() uint32 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*fixed32Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *Fixed32Rules) GetGt() uint32 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*fixed32Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *Fixed32Rules) GetGte() uint32 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*fixed32Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *Fixed32Rules) GetIn() []uint32 { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *Fixed32Rules) GetNotIn() []uint32 { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *Fixed32Rules) GetExample() []uint32 { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *Fixed32Rules) SetConst(v uint32) { + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 6) +} + +func (x *Fixed32Rules) SetLt(v uint32) { + x.xxx_hidden_LessThan = &fixed32Rules_Lt{v} +} + +func (x *Fixed32Rules) SetLte(v uint32) { + x.xxx_hidden_LessThan = &fixed32Rules_Lte{v} +} + +func (x *Fixed32Rules) SetGt(v uint32) { + x.xxx_hidden_GreaterThan = &fixed32Rules_Gt{v} +} + +func (x *Fixed32Rules) SetGte(v uint32) { + x.xxx_hidden_GreaterThan = &fixed32Rules_Gte{v} +} + +func (x *Fixed32Rules) SetIn(v []uint32) { + x.xxx_hidden_In = v +} + +func (x *Fixed32Rules) SetNotIn(v []uint32) { + x.xxx_hidden_NotIn = v +} + +func (x *Fixed32Rules) SetExample(v []uint32) { + x.xxx_hidden_Example = v +} + +func (x *Fixed32Rules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *Fixed32Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_LessThan != nil +} + +func (x *Fixed32Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*fixed32Rules_Lt) + return ok +} + +func (x *Fixed32Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*fixed32Rules_Lte) + return ok +} + +func (x *Fixed32Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_GreaterThan != nil +} + +func (x *Fixed32Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*fixed32Rules_Gt) + return ok +} + +func (x *Fixed32Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*fixed32Rules_Gte) + return ok +} + +func (x *Fixed32Rules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = 0 +} + +func (x *Fixed32Rules) ClearLessThan() { + x.xxx_hidden_LessThan = nil +} + +func (x *Fixed32Rules) ClearLt() { + if _, ok := x.xxx_hidden_LessThan.(*fixed32Rules_Lt); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *Fixed32Rules) ClearLte() { + if _, ok := x.xxx_hidden_LessThan.(*fixed32Rules_Lte); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *Fixed32Rules) ClearGreaterThan() { + x.xxx_hidden_GreaterThan = nil +} + +func (x *Fixed32Rules) ClearGt() { + if _, ok := x.xxx_hidden_GreaterThan.(*fixed32Rules_Gt); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *Fixed32Rules) ClearGte() { + if _, ok := x.xxx_hidden_GreaterThan.(*fixed32Rules_Gte); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +const Fixed32Rules_LessThan_not_set_case case_Fixed32Rules_LessThan = 0 +const Fixed32Rules_Lt_case case_Fixed32Rules_LessThan = 2 +const Fixed32Rules_Lte_case case_Fixed32Rules_LessThan = 3 + +func (x *Fixed32Rules) WhichLessThan() case_Fixed32Rules_LessThan { + if x == nil { + return Fixed32Rules_LessThan_not_set_case + } + switch x.xxx_hidden_LessThan.(type) { + case *fixed32Rules_Lt: + return Fixed32Rules_Lt_case + case *fixed32Rules_Lte: + return Fixed32Rules_Lte_case + default: + return Fixed32Rules_LessThan_not_set_case + } +} + +const Fixed32Rules_GreaterThan_not_set_case case_Fixed32Rules_GreaterThan = 0 +const Fixed32Rules_Gt_case case_Fixed32Rules_GreaterThan = 4 +const Fixed32Rules_Gte_case case_Fixed32Rules_GreaterThan = 5 + +func (x *Fixed32Rules) WhichGreaterThan() case_Fixed32Rules_GreaterThan { + if x == nil { + return Fixed32Rules_GreaterThan_not_set_case + } + switch x.xxx_hidden_GreaterThan.(type) { + case *fixed32Rules_Gt: + return Fixed32Rules_Gt_case + case *fixed32Rules_Gte: + return Fixed32Rules_Gte_case + default: + return Fixed32Rules_GreaterThan_not_set_case + } +} + +type Fixed32Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. + // If the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must equal 42 + // fixed32 value = 1 [(buf.validate.field).fixed32.const = 42]; + // } + // + // ``` + Const *uint32 + // Fields of oneof xxx_hidden_LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be less than 10 + // fixed32 value = 1 [(buf.validate.field).fixed32.lt = 10]; + // } + // + // ``` + Lt *uint32 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be less than or equal to 10 + // fixed32 value = 1 [(buf.validate.field).fixed32.lte = 10]; + // } + // + // ``` + Lte *uint32 + // -- end of xxx_hidden_LessThan + // Fields of oneof xxx_hidden_GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be greater than 5 [fixed32.gt] + // fixed32 value = 1 [(buf.validate.field).fixed32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [fixed32.gt_lt] + // fixed32 other_value = 2 [(buf.validate.field).fixed32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [fixed32.gt_lt_exclusive] + // fixed32 another_value = 3 [(buf.validate.field).fixed32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *uint32 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be greater than or equal to 5 [fixed32.gte] + // fixed32 value = 1 [(buf.validate.field).fixed32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [fixed32.gte_lt] + // fixed32 other_value = 2 [(buf.validate.field).fixed32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [fixed32.gte_lt_exclusive] + // fixed32 another_value = 3 [(buf.validate.field).fixed32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *uint32 + // -- end of xxx_hidden_GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message + // is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be in list [1, 2, 3] + // fixed32 value = 1 [(buf.validate.field).fixed32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []uint32 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must not be in list [1, 2, 3] + // fixed32 value = 1 [(buf.validate.field).fixed32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []uint32 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyFixed32 { + // fixed32 value = 1 [ + // (buf.validate.field).fixed32.example = 1, + // (buf.validate.field).fixed32.example = 2 + // ]; + // } + // + // ``` + Example []uint32 +} + +func (b0 Fixed32Rules_builder) Build() *Fixed32Rules { + m0 := &Fixed32Rules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 6) + x.xxx_hidden_Const = *b.Const + } + if b.Lt != nil { + x.xxx_hidden_LessThan = &fixed32Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.xxx_hidden_LessThan = &fixed32Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.xxx_hidden_GreaterThan = &fixed32Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.xxx_hidden_GreaterThan = &fixed32Rules_Gte{*b.Gte} + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + x.xxx_hidden_Example = b.Example + return m0 +} + +type case_Fixed32Rules_LessThan protoreflect.FieldNumber + +func (x case_Fixed32Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[14].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_Fixed32Rules_GreaterThan protoreflect.FieldNumber + +func (x case_Fixed32Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[14].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isFixed32Rules_LessThan interface { + isFixed32Rules_LessThan() +} + +type fixed32Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be less than 10 + // fixed32 value = 1 [(buf.validate.field).fixed32.lt = 10]; + // } + // + // ``` + Lt uint32 `protobuf:"fixed32,2,opt,name=lt,oneof"` +} + +type fixed32Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be less than or equal to 10 + // fixed32 value = 1 [(buf.validate.field).fixed32.lte = 10]; + // } + // + // ``` + Lte uint32 `protobuf:"fixed32,3,opt,name=lte,oneof"` +} + +func (*fixed32Rules_Lt) isFixed32Rules_LessThan() {} + +func (*fixed32Rules_Lte) isFixed32Rules_LessThan() {} + +type isFixed32Rules_GreaterThan interface { + isFixed32Rules_GreaterThan() +} + +type fixed32Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be greater than 5 [fixed32.gt] + // fixed32 value = 1 [(buf.validate.field).fixed32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [fixed32.gt_lt] + // fixed32 other_value = 2 [(buf.validate.field).fixed32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [fixed32.gt_lt_exclusive] + // fixed32 another_value = 3 [(buf.validate.field).fixed32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt uint32 `protobuf:"fixed32,4,opt,name=gt,oneof"` +} + +type fixed32Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed32 { + // // value must be greater than or equal to 5 [fixed32.gte] + // fixed32 value = 1 [(buf.validate.field).fixed32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [fixed32.gte_lt] + // fixed32 other_value = 2 [(buf.validate.field).fixed32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [fixed32.gte_lt_exclusive] + // fixed32 another_value = 3 [(buf.validate.field).fixed32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte uint32 `protobuf:"fixed32,5,opt,name=gte,oneof"` +} + +func (*fixed32Rules_Gt) isFixed32Rules_GreaterThan() {} + +func (*fixed32Rules_Gte) isFixed32Rules_GreaterThan() {} + +// Fixed64Rules describes the rules applied to `fixed64` values. +type Fixed64Rules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const uint64 `protobuf:"fixed64,1,opt,name=const"` + xxx_hidden_LessThan isFixed64Rules_LessThan `protobuf_oneof:"less_than"` + xxx_hidden_GreaterThan isFixed64Rules_GreaterThan `protobuf_oneof:"greater_than"` + xxx_hidden_In []uint64 `protobuf:"fixed64,6,rep,name=in"` + xxx_hidden_NotIn []uint64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn"` + xxx_hidden_Example []uint64 `protobuf:"fixed64,8,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Fixed64Rules) Reset() { + *x = Fixed64Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Fixed64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Fixed64Rules) ProtoMessage() {} + +func (x *Fixed64Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Fixed64Rules) GetConst() uint64 { + if x != nil { + return x.xxx_hidden_Const + } + return 0 +} + +func (x *Fixed64Rules) GetLt() uint64 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*fixed64Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *Fixed64Rules) GetLte() uint64 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*fixed64Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *Fixed64Rules) GetGt() uint64 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*fixed64Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *Fixed64Rules) GetGte() uint64 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*fixed64Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *Fixed64Rules) GetIn() []uint64 { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *Fixed64Rules) GetNotIn() []uint64 { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *Fixed64Rules) GetExample() []uint64 { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *Fixed64Rules) SetConst(v uint64) { + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 6) +} + +func (x *Fixed64Rules) SetLt(v uint64) { + x.xxx_hidden_LessThan = &fixed64Rules_Lt{v} +} + +func (x *Fixed64Rules) SetLte(v uint64) { + x.xxx_hidden_LessThan = &fixed64Rules_Lte{v} +} + +func (x *Fixed64Rules) SetGt(v uint64) { + x.xxx_hidden_GreaterThan = &fixed64Rules_Gt{v} +} + +func (x *Fixed64Rules) SetGte(v uint64) { + x.xxx_hidden_GreaterThan = &fixed64Rules_Gte{v} +} + +func (x *Fixed64Rules) SetIn(v []uint64) { + x.xxx_hidden_In = v +} + +func (x *Fixed64Rules) SetNotIn(v []uint64) { + x.xxx_hidden_NotIn = v +} + +func (x *Fixed64Rules) SetExample(v []uint64) { + x.xxx_hidden_Example = v +} + +func (x *Fixed64Rules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *Fixed64Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_LessThan != nil +} + +func (x *Fixed64Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*fixed64Rules_Lt) + return ok +} + +func (x *Fixed64Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*fixed64Rules_Lte) + return ok +} + +func (x *Fixed64Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_GreaterThan != nil +} + +func (x *Fixed64Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*fixed64Rules_Gt) + return ok +} + +func (x *Fixed64Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*fixed64Rules_Gte) + return ok +} + +func (x *Fixed64Rules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = 0 +} + +func (x *Fixed64Rules) ClearLessThan() { + x.xxx_hidden_LessThan = nil +} + +func (x *Fixed64Rules) ClearLt() { + if _, ok := x.xxx_hidden_LessThan.(*fixed64Rules_Lt); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *Fixed64Rules) ClearLte() { + if _, ok := x.xxx_hidden_LessThan.(*fixed64Rules_Lte); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *Fixed64Rules) ClearGreaterThan() { + x.xxx_hidden_GreaterThan = nil +} + +func (x *Fixed64Rules) ClearGt() { + if _, ok := x.xxx_hidden_GreaterThan.(*fixed64Rules_Gt); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *Fixed64Rules) ClearGte() { + if _, ok := x.xxx_hidden_GreaterThan.(*fixed64Rules_Gte); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +const Fixed64Rules_LessThan_not_set_case case_Fixed64Rules_LessThan = 0 +const Fixed64Rules_Lt_case case_Fixed64Rules_LessThan = 2 +const Fixed64Rules_Lte_case case_Fixed64Rules_LessThan = 3 + +func (x *Fixed64Rules) WhichLessThan() case_Fixed64Rules_LessThan { + if x == nil { + return Fixed64Rules_LessThan_not_set_case + } + switch x.xxx_hidden_LessThan.(type) { + case *fixed64Rules_Lt: + return Fixed64Rules_Lt_case + case *fixed64Rules_Lte: + return Fixed64Rules_Lte_case + default: + return Fixed64Rules_LessThan_not_set_case + } +} + +const Fixed64Rules_GreaterThan_not_set_case case_Fixed64Rules_GreaterThan = 0 +const Fixed64Rules_Gt_case case_Fixed64Rules_GreaterThan = 4 +const Fixed64Rules_Gte_case case_Fixed64Rules_GreaterThan = 5 + +func (x *Fixed64Rules) WhichGreaterThan() case_Fixed64Rules_GreaterThan { + if x == nil { + return Fixed64Rules_GreaterThan_not_set_case + } + switch x.xxx_hidden_GreaterThan.(type) { + case *fixed64Rules_Gt: + return Fixed64Rules_Gt_case + case *fixed64Rules_Gte: + return Fixed64Rules_Gte_case + default: + return Fixed64Rules_GreaterThan_not_set_case + } +} + +type Fixed64Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must equal 42 + // fixed64 value = 1 [(buf.validate.field).fixed64.const = 42]; + // } + // + // ``` + Const *uint64 + // Fields of oneof xxx_hidden_LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be less than 10 + // fixed64 value = 1 [(buf.validate.field).fixed64.lt = 10]; + // } + // + // ``` + Lt *uint64 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be less than or equal to 10 + // fixed64 value = 1 [(buf.validate.field).fixed64.lte = 10]; + // } + // + // ``` + Lte *uint64 + // -- end of xxx_hidden_LessThan + // Fields of oneof xxx_hidden_GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be greater than 5 [fixed64.gt] + // fixed64 value = 1 [(buf.validate.field).fixed64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [fixed64.gt_lt] + // fixed64 other_value = 2 [(buf.validate.field).fixed64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [fixed64.gt_lt_exclusive] + // fixed64 another_value = 3 [(buf.validate.field).fixed64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *uint64 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be greater than or equal to 5 [fixed64.gte] + // fixed64 value = 1 [(buf.validate.field).fixed64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [fixed64.gte_lt] + // fixed64 other_value = 2 [(buf.validate.field).fixed64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [fixed64.gte_lt_exclusive] + // fixed64 another_value = 3 [(buf.validate.field).fixed64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *uint64 + // -- end of xxx_hidden_GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be in list [1, 2, 3] + // fixed64 value = 1 [(buf.validate.field).fixed64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []uint64 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must not be in list [1, 2, 3] + // fixed64 value = 1 [(buf.validate.field).fixed64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []uint64 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyFixed64 { + // fixed64 value = 1 [ + // (buf.validate.field).fixed64.example = 1, + // (buf.validate.field).fixed64.example = 2 + // ]; + // } + // + // ``` + Example []uint64 +} + +func (b0 Fixed64Rules_builder) Build() *Fixed64Rules { + m0 := &Fixed64Rules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 6) + x.xxx_hidden_Const = *b.Const + } + if b.Lt != nil { + x.xxx_hidden_LessThan = &fixed64Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.xxx_hidden_LessThan = &fixed64Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.xxx_hidden_GreaterThan = &fixed64Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.xxx_hidden_GreaterThan = &fixed64Rules_Gte{*b.Gte} + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + x.xxx_hidden_Example = b.Example + return m0 +} + +type case_Fixed64Rules_LessThan protoreflect.FieldNumber + +func (x case_Fixed64Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[15].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_Fixed64Rules_GreaterThan protoreflect.FieldNumber + +func (x case_Fixed64Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[15].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isFixed64Rules_LessThan interface { + isFixed64Rules_LessThan() +} + +type fixed64Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be less than 10 + // fixed64 value = 1 [(buf.validate.field).fixed64.lt = 10]; + // } + // + // ``` + Lt uint64 `protobuf:"fixed64,2,opt,name=lt,oneof"` +} + +type fixed64Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be less than or equal to 10 + // fixed64 value = 1 [(buf.validate.field).fixed64.lte = 10]; + // } + // + // ``` + Lte uint64 `protobuf:"fixed64,3,opt,name=lte,oneof"` +} + +func (*fixed64Rules_Lt) isFixed64Rules_LessThan() {} + +func (*fixed64Rules_Lte) isFixed64Rules_LessThan() {} + +type isFixed64Rules_GreaterThan interface { + isFixed64Rules_GreaterThan() +} + +type fixed64Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be greater than 5 [fixed64.gt] + // fixed64 value = 1 [(buf.validate.field).fixed64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [fixed64.gt_lt] + // fixed64 other_value = 2 [(buf.validate.field).fixed64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [fixed64.gt_lt_exclusive] + // fixed64 another_value = 3 [(buf.validate.field).fixed64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt uint64 `protobuf:"fixed64,4,opt,name=gt,oneof"` +} + +type fixed64Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyFixed64 { + // // value must be greater than or equal to 5 [fixed64.gte] + // fixed64 value = 1 [(buf.validate.field).fixed64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [fixed64.gte_lt] + // fixed64 other_value = 2 [(buf.validate.field).fixed64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [fixed64.gte_lt_exclusive] + // fixed64 another_value = 3 [(buf.validate.field).fixed64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte uint64 `protobuf:"fixed64,5,opt,name=gte,oneof"` +} + +func (*fixed64Rules_Gt) isFixed64Rules_GreaterThan() {} + +func (*fixed64Rules_Gte) isFixed64Rules_GreaterThan() {} + +// SFixed32Rules describes the rules applied to `fixed32` values. +type SFixed32Rules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const int32 `protobuf:"fixed32,1,opt,name=const"` + xxx_hidden_LessThan isSFixed32Rules_LessThan `protobuf_oneof:"less_than"` + xxx_hidden_GreaterThan isSFixed32Rules_GreaterThan `protobuf_oneof:"greater_than"` + xxx_hidden_In []int32 `protobuf:"fixed32,6,rep,name=in"` + xxx_hidden_NotIn []int32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn"` + xxx_hidden_Example []int32 `protobuf:"fixed32,8,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SFixed32Rules) Reset() { + *x = SFixed32Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SFixed32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SFixed32Rules) ProtoMessage() {} + +func (x *SFixed32Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SFixed32Rules) GetConst() int32 { + if x != nil { + return x.xxx_hidden_Const + } + return 0 +} + +func (x *SFixed32Rules) GetLt() int32 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*sFixed32Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *SFixed32Rules) GetLte() int32 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*sFixed32Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *SFixed32Rules) GetGt() int32 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*sFixed32Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *SFixed32Rules) GetGte() int32 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*sFixed32Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *SFixed32Rules) GetIn() []int32 { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *SFixed32Rules) GetNotIn() []int32 { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *SFixed32Rules) GetExample() []int32 { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *SFixed32Rules) SetConst(v int32) { + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 6) +} + +func (x *SFixed32Rules) SetLt(v int32) { + x.xxx_hidden_LessThan = &sFixed32Rules_Lt{v} +} + +func (x *SFixed32Rules) SetLte(v int32) { + x.xxx_hidden_LessThan = &sFixed32Rules_Lte{v} +} + +func (x *SFixed32Rules) SetGt(v int32) { + x.xxx_hidden_GreaterThan = &sFixed32Rules_Gt{v} +} + +func (x *SFixed32Rules) SetGte(v int32) { + x.xxx_hidden_GreaterThan = &sFixed32Rules_Gte{v} +} + +func (x *SFixed32Rules) SetIn(v []int32) { + x.xxx_hidden_In = v +} + +func (x *SFixed32Rules) SetNotIn(v []int32) { + x.xxx_hidden_NotIn = v +} + +func (x *SFixed32Rules) SetExample(v []int32) { + x.xxx_hidden_Example = v +} + +func (x *SFixed32Rules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *SFixed32Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_LessThan != nil +} + +func (x *SFixed32Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*sFixed32Rules_Lt) + return ok +} + +func (x *SFixed32Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*sFixed32Rules_Lte) + return ok +} + +func (x *SFixed32Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_GreaterThan != nil +} + +func (x *SFixed32Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*sFixed32Rules_Gt) + return ok +} + +func (x *SFixed32Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*sFixed32Rules_Gte) + return ok +} + +func (x *SFixed32Rules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = 0 +} + +func (x *SFixed32Rules) ClearLessThan() { + x.xxx_hidden_LessThan = nil +} + +func (x *SFixed32Rules) ClearLt() { + if _, ok := x.xxx_hidden_LessThan.(*sFixed32Rules_Lt); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *SFixed32Rules) ClearLte() { + if _, ok := x.xxx_hidden_LessThan.(*sFixed32Rules_Lte); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *SFixed32Rules) ClearGreaterThan() { + x.xxx_hidden_GreaterThan = nil +} + +func (x *SFixed32Rules) ClearGt() { + if _, ok := x.xxx_hidden_GreaterThan.(*sFixed32Rules_Gt); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *SFixed32Rules) ClearGte() { + if _, ok := x.xxx_hidden_GreaterThan.(*sFixed32Rules_Gte); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +const SFixed32Rules_LessThan_not_set_case case_SFixed32Rules_LessThan = 0 +const SFixed32Rules_Lt_case case_SFixed32Rules_LessThan = 2 +const SFixed32Rules_Lte_case case_SFixed32Rules_LessThan = 3 + +func (x *SFixed32Rules) WhichLessThan() case_SFixed32Rules_LessThan { + if x == nil { + return SFixed32Rules_LessThan_not_set_case + } + switch x.xxx_hidden_LessThan.(type) { + case *sFixed32Rules_Lt: + return SFixed32Rules_Lt_case + case *sFixed32Rules_Lte: + return SFixed32Rules_Lte_case + default: + return SFixed32Rules_LessThan_not_set_case + } +} + +const SFixed32Rules_GreaterThan_not_set_case case_SFixed32Rules_GreaterThan = 0 +const SFixed32Rules_Gt_case case_SFixed32Rules_GreaterThan = 4 +const SFixed32Rules_Gte_case case_SFixed32Rules_GreaterThan = 5 + +func (x *SFixed32Rules) WhichGreaterThan() case_SFixed32Rules_GreaterThan { + if x == nil { + return SFixed32Rules_GreaterThan_not_set_case + } + switch x.xxx_hidden_GreaterThan.(type) { + case *sFixed32Rules_Gt: + return SFixed32Rules_Gt_case + case *sFixed32Rules_Gte: + return SFixed32Rules_Gte_case + default: + return SFixed32Rules_GreaterThan_not_set_case + } +} + +type SFixed32Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must equal 42 + // sfixed32 value = 1 [(buf.validate.field).sfixed32.const = 42]; + // } + // + // ``` + Const *int32 + // Fields of oneof xxx_hidden_LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be less than 10 + // sfixed32 value = 1 [(buf.validate.field).sfixed32.lt = 10]; + // } + // + // ``` + Lt *int32 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be less than or equal to 10 + // sfixed32 value = 1 [(buf.validate.field).sfixed32.lte = 10]; + // } + // + // ``` + Lte *int32 + // -- end of xxx_hidden_LessThan + // Fields of oneof xxx_hidden_GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be greater than 5 [sfixed32.gt] + // sfixed32 value = 1 [(buf.validate.field).sfixed32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sfixed32.gt_lt] + // sfixed32 other_value = 2 [(buf.validate.field).sfixed32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sfixed32.gt_lt_exclusive] + // sfixed32 another_value = 3 [(buf.validate.field).sfixed32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *int32 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be greater than or equal to 5 [sfixed32.gte] + // sfixed32 value = 1 [(buf.validate.field).sfixed32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sfixed32.gte_lt] + // sfixed32 other_value = 2 [(buf.validate.field).sfixed32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sfixed32.gte_lt_exclusive] + // sfixed32 another_value = 3 [(buf.validate.field).sfixed32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *int32 + // -- end of xxx_hidden_GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be in list [1, 2, 3] + // sfixed32 value = 1 [(buf.validate.field).sfixed32 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int32 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must not be in list [1, 2, 3] + // sfixed32 value = 1 [(buf.validate.field).sfixed32 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int32 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MySFixed32 { + // sfixed32 value = 1 [ + // (buf.validate.field).sfixed32.example = 1, + // (buf.validate.field).sfixed32.example = 2 + // ]; + // } + // + // ``` + Example []int32 +} + +func (b0 SFixed32Rules_builder) Build() *SFixed32Rules { + m0 := &SFixed32Rules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 6) + x.xxx_hidden_Const = *b.Const + } + if b.Lt != nil { + x.xxx_hidden_LessThan = &sFixed32Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.xxx_hidden_LessThan = &sFixed32Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.xxx_hidden_GreaterThan = &sFixed32Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.xxx_hidden_GreaterThan = &sFixed32Rules_Gte{*b.Gte} + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + x.xxx_hidden_Example = b.Example + return m0 +} + +type case_SFixed32Rules_LessThan protoreflect.FieldNumber + +func (x case_SFixed32Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[16].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_SFixed32Rules_GreaterThan protoreflect.FieldNumber + +func (x case_SFixed32Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[16].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isSFixed32Rules_LessThan interface { + isSFixed32Rules_LessThan() +} + +type sFixed32Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be less than 10 + // sfixed32 value = 1 [(buf.validate.field).sfixed32.lt = 10]; + // } + // + // ``` + Lt int32 `protobuf:"fixed32,2,opt,name=lt,oneof"` +} + +type sFixed32Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be less than or equal to 10 + // sfixed32 value = 1 [(buf.validate.field).sfixed32.lte = 10]; + // } + // + // ``` + Lte int32 `protobuf:"fixed32,3,opt,name=lte,oneof"` +} + +func (*sFixed32Rules_Lt) isSFixed32Rules_LessThan() {} + +func (*sFixed32Rules_Lte) isSFixed32Rules_LessThan() {} + +type isSFixed32Rules_GreaterThan interface { + isSFixed32Rules_GreaterThan() +} + +type sFixed32Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be greater than 5 [sfixed32.gt] + // sfixed32 value = 1 [(buf.validate.field).sfixed32.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sfixed32.gt_lt] + // sfixed32 other_value = 2 [(buf.validate.field).sfixed32 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sfixed32.gt_lt_exclusive] + // sfixed32 another_value = 3 [(buf.validate.field).sfixed32 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt int32 `protobuf:"fixed32,4,opt,name=gt,oneof"` +} + +type sFixed32Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed32 { + // // value must be greater than or equal to 5 [sfixed32.gte] + // sfixed32 value = 1 [(buf.validate.field).sfixed32.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sfixed32.gte_lt] + // sfixed32 other_value = 2 [(buf.validate.field).sfixed32 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sfixed32.gte_lt_exclusive] + // sfixed32 another_value = 3 [(buf.validate.field).sfixed32 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte int32 `protobuf:"fixed32,5,opt,name=gte,oneof"` +} + +func (*sFixed32Rules_Gt) isSFixed32Rules_GreaterThan() {} + +func (*sFixed32Rules_Gte) isSFixed32Rules_GreaterThan() {} + +// SFixed64Rules describes the rules applied to `fixed64` values. +type SFixed64Rules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const int64 `protobuf:"fixed64,1,opt,name=const"` + xxx_hidden_LessThan isSFixed64Rules_LessThan `protobuf_oneof:"less_than"` + xxx_hidden_GreaterThan isSFixed64Rules_GreaterThan `protobuf_oneof:"greater_than"` + xxx_hidden_In []int64 `protobuf:"fixed64,6,rep,name=in"` + xxx_hidden_NotIn []int64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn"` + xxx_hidden_Example []int64 `protobuf:"fixed64,8,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SFixed64Rules) Reset() { + *x = SFixed64Rules{} + mi := &file_buf_validate_validate_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SFixed64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SFixed64Rules) ProtoMessage() {} + +func (x *SFixed64Rules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SFixed64Rules) GetConst() int64 { + if x != nil { + return x.xxx_hidden_Const + } + return 0 +} + +func (x *SFixed64Rules) GetLt() int64 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*sFixed64Rules_Lt); ok { + return x.Lt + } + } + return 0 +} + +func (x *SFixed64Rules) GetLte() int64 { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*sFixed64Rules_Lte); ok { + return x.Lte + } + } + return 0 +} + +func (x *SFixed64Rules) GetGt() int64 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*sFixed64Rules_Gt); ok { + return x.Gt + } + } + return 0 +} + +func (x *SFixed64Rules) GetGte() int64 { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*sFixed64Rules_Gte); ok { + return x.Gte + } + } + return 0 +} + +func (x *SFixed64Rules) GetIn() []int64 { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *SFixed64Rules) GetNotIn() []int64 { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *SFixed64Rules) GetExample() []int64 { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *SFixed64Rules) SetConst(v int64) { + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 6) +} + +func (x *SFixed64Rules) SetLt(v int64) { + x.xxx_hidden_LessThan = &sFixed64Rules_Lt{v} +} + +func (x *SFixed64Rules) SetLte(v int64) { + x.xxx_hidden_LessThan = &sFixed64Rules_Lte{v} +} + +func (x *SFixed64Rules) SetGt(v int64) { + x.xxx_hidden_GreaterThan = &sFixed64Rules_Gt{v} +} + +func (x *SFixed64Rules) SetGte(v int64) { + x.xxx_hidden_GreaterThan = &sFixed64Rules_Gte{v} +} + +func (x *SFixed64Rules) SetIn(v []int64) { + x.xxx_hidden_In = v +} + +func (x *SFixed64Rules) SetNotIn(v []int64) { + x.xxx_hidden_NotIn = v +} + +func (x *SFixed64Rules) SetExample(v []int64) { + x.xxx_hidden_Example = v +} + +func (x *SFixed64Rules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *SFixed64Rules) HasLessThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_LessThan != nil +} + +func (x *SFixed64Rules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*sFixed64Rules_Lt) + return ok +} + +func (x *SFixed64Rules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*sFixed64Rules_Lte) + return ok +} + +func (x *SFixed64Rules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_GreaterThan != nil +} + +func (x *SFixed64Rules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*sFixed64Rules_Gt) + return ok +} + +func (x *SFixed64Rules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*sFixed64Rules_Gte) + return ok +} + +func (x *SFixed64Rules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = 0 +} + +func (x *SFixed64Rules) ClearLessThan() { + x.xxx_hidden_LessThan = nil +} + +func (x *SFixed64Rules) ClearLt() { + if _, ok := x.xxx_hidden_LessThan.(*sFixed64Rules_Lt); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *SFixed64Rules) ClearLte() { + if _, ok := x.xxx_hidden_LessThan.(*sFixed64Rules_Lte); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *SFixed64Rules) ClearGreaterThan() { + x.xxx_hidden_GreaterThan = nil +} + +func (x *SFixed64Rules) ClearGt() { + if _, ok := x.xxx_hidden_GreaterThan.(*sFixed64Rules_Gt); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *SFixed64Rules) ClearGte() { + if _, ok := x.xxx_hidden_GreaterThan.(*sFixed64Rules_Gte); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +const SFixed64Rules_LessThan_not_set_case case_SFixed64Rules_LessThan = 0 +const SFixed64Rules_Lt_case case_SFixed64Rules_LessThan = 2 +const SFixed64Rules_Lte_case case_SFixed64Rules_LessThan = 3 + +func (x *SFixed64Rules) WhichLessThan() case_SFixed64Rules_LessThan { + if x == nil { + return SFixed64Rules_LessThan_not_set_case + } + switch x.xxx_hidden_LessThan.(type) { + case *sFixed64Rules_Lt: + return SFixed64Rules_Lt_case + case *sFixed64Rules_Lte: + return SFixed64Rules_Lte_case + default: + return SFixed64Rules_LessThan_not_set_case + } +} + +const SFixed64Rules_GreaterThan_not_set_case case_SFixed64Rules_GreaterThan = 0 +const SFixed64Rules_Gt_case case_SFixed64Rules_GreaterThan = 4 +const SFixed64Rules_Gte_case case_SFixed64Rules_GreaterThan = 5 + +func (x *SFixed64Rules) WhichGreaterThan() case_SFixed64Rules_GreaterThan { + if x == nil { + return SFixed64Rules_GreaterThan_not_set_case + } + switch x.xxx_hidden_GreaterThan.(type) { + case *sFixed64Rules_Gt: + return SFixed64Rules_Gt_case + case *sFixed64Rules_Gte: + return SFixed64Rules_Gte_case + default: + return SFixed64Rules_GreaterThan_not_set_case + } +} + +type SFixed64Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must equal 42 + // sfixed64 value = 1 [(buf.validate.field).sfixed64.const = 42]; + // } + // + // ``` + Const *int64 + // Fields of oneof xxx_hidden_LessThan: + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be less than 10 + // sfixed64 value = 1 [(buf.validate.field).sfixed64.lt = 10]; + // } + // + // ``` + Lt *int64 + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be less than or equal to 10 + // sfixed64 value = 1 [(buf.validate.field).sfixed64.lte = 10]; + // } + // + // ``` + Lte *int64 + // -- end of xxx_hidden_LessThan + // Fields of oneof xxx_hidden_GreaterThan: + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be greater than 5 [sfixed64.gt] + // sfixed64 value = 1 [(buf.validate.field).sfixed64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sfixed64.gt_lt] + // sfixed64 other_value = 2 [(buf.validate.field).sfixed64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sfixed64.gt_lt_exclusive] + // sfixed64 another_value = 3 [(buf.validate.field).sfixed64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt *int64 + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be greater than or equal to 5 [sfixed64.gte] + // sfixed64 value = 1 [(buf.validate.field).sfixed64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sfixed64.gte_lt] + // sfixed64 other_value = 2 [(buf.validate.field).sfixed64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sfixed64.gte_lt_exclusive] + // sfixed64 another_value = 3 [(buf.validate.field).sfixed64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte *int64 + // -- end of xxx_hidden_GreaterThan + // `in` requires the field value to be equal to one of the specified values. + // If the field value isn't one of the specified values, an error message is + // generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be in list [1, 2, 3] + // sfixed64 value = 1 [(buf.validate.field).sfixed64 = { in: [1, 2, 3] }]; + // } + // + // ``` + In []int64 + // `not_in` requires the field value to not be equal to any of the specified + // values. If the field value is one of the specified values, an error + // message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must not be in list [1, 2, 3] + // sfixed64 value = 1 [(buf.validate.field).sfixed64 = { not_in: [1, 2, 3] }]; + // } + // + // ``` + NotIn []int64 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MySFixed64 { + // sfixed64 value = 1 [ + // (buf.validate.field).sfixed64.example = 1, + // (buf.validate.field).sfixed64.example = 2 + // ]; + // } + // + // ``` + Example []int64 +} + +func (b0 SFixed64Rules_builder) Build() *SFixed64Rules { + m0 := &SFixed64Rules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 6) + x.xxx_hidden_Const = *b.Const + } + if b.Lt != nil { + x.xxx_hidden_LessThan = &sFixed64Rules_Lt{*b.Lt} + } + if b.Lte != nil { + x.xxx_hidden_LessThan = &sFixed64Rules_Lte{*b.Lte} + } + if b.Gt != nil { + x.xxx_hidden_GreaterThan = &sFixed64Rules_Gt{*b.Gt} + } + if b.Gte != nil { + x.xxx_hidden_GreaterThan = &sFixed64Rules_Gte{*b.Gte} + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + x.xxx_hidden_Example = b.Example + return m0 +} + +type case_SFixed64Rules_LessThan protoreflect.FieldNumber + +func (x case_SFixed64Rules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[17].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_SFixed64Rules_GreaterThan protoreflect.FieldNumber + +func (x case_SFixed64Rules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[17].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isSFixed64Rules_LessThan interface { + isSFixed64Rules_LessThan() +} + +type sFixed64Rules_Lt struct { + // `lt` requires the field value to be less than the specified value (field < + // value). If the field value is equal to or greater than the specified value, + // an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be less than 10 + // sfixed64 value = 1 [(buf.validate.field).sfixed64.lt = 10]; + // } + // + // ``` + Lt int64 `protobuf:"fixed64,2,opt,name=lt,oneof"` +} + +type sFixed64Rules_Lte struct { + // `lte` requires the field value to be less than or equal to the specified + // value (field <= value). If the field value is greater than the specified + // value, an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be less than or equal to 10 + // sfixed64 value = 1 [(buf.validate.field).sfixed64.lte = 10]; + // } + // + // ``` + Lte int64 `protobuf:"fixed64,3,opt,name=lte,oneof"` +} + +func (*sFixed64Rules_Lt) isSFixed64Rules_LessThan() {} + +func (*sFixed64Rules_Lte) isSFixed64Rules_LessThan() {} + +type isSFixed64Rules_GreaterThan interface { + isSFixed64Rules_GreaterThan() +} + +type sFixed64Rules_Gt struct { + // `gt` requires the field value to be greater than the specified value + // (exclusive). If the value of `gt` is larger than a specified `lt` or + // `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be greater than 5 [sfixed64.gt] + // sfixed64 value = 1 [(buf.validate.field).sfixed64.gt = 5]; + // + // // value must be greater than 5 and less than 10 [sfixed64.gt_lt] + // sfixed64 other_value = 2 [(buf.validate.field).sfixed64 = { gt: 5, lt: 10 }]; + // + // // value must be greater than 10 or less than 5 [sfixed64.gt_lt_exclusive] + // sfixed64 another_value = 3 [(buf.validate.field).sfixed64 = { gt: 10, lt: 5 }]; + // } + // + // ``` + Gt int64 `protobuf:"fixed64,4,opt,name=gt,oneof"` +} + +type sFixed64Rules_Gte struct { + // `gte` requires the field value to be greater than or equal to the specified + // value (exclusive). If the value of `gte` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MySFixed64 { + // // value must be greater than or equal to 5 [sfixed64.gte] + // sfixed64 value = 1 [(buf.validate.field).sfixed64.gte = 5]; + // + // // value must be greater than or equal to 5 and less than 10 [sfixed64.gte_lt] + // sfixed64 other_value = 2 [(buf.validate.field).sfixed64 = { gte: 5, lt: 10 }]; + // + // // value must be greater than or equal to 10 or less than 5 [sfixed64.gte_lt_exclusive] + // sfixed64 another_value = 3 [(buf.validate.field).sfixed64 = { gte: 10, lt: 5 }]; + // } + // + // ``` + Gte int64 `protobuf:"fixed64,5,opt,name=gte,oneof"` +} + +func (*sFixed64Rules_Gt) isSFixed64Rules_GreaterThan() {} + +func (*sFixed64Rules_Gte) isSFixed64Rules_GreaterThan() {} + +// BoolRules describes the rules applied to `bool` values. These rules +// may also be applied to the `google.protobuf.BoolValue` Well-Known-Type. +type BoolRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const bool `protobuf:"varint,1,opt,name=const"` + xxx_hidden_Example []bool `protobuf:"varint,2,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BoolRules) Reset() { + *x = BoolRules{} + mi := &file_buf_validate_validate_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BoolRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BoolRules) ProtoMessage() {} + +func (x *BoolRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BoolRules) GetConst() bool { + if x != nil { + return x.xxx_hidden_Const + } + return false +} + +func (x *BoolRules) GetExample() []bool { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *BoolRules) SetConst(v bool) { + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 2) +} + +func (x *BoolRules) SetExample(v []bool) { + x.xxx_hidden_Example = v +} + +func (x *BoolRules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *BoolRules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = false +} + +type BoolRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified boolean value. + // If the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyBool { + // // value must equal true + // bool value = 1 [(buf.validate.field).bool.const = true]; + // } + // + // ``` + Const *bool + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyBool { + // bool value = 1 [ + // (buf.validate.field).bool.example = 1, + // (buf.validate.field).bool.example = 2 + // ]; + // } + // + // ``` + Example []bool +} + +func (b0 BoolRules_builder) Build() *BoolRules { + m0 := &BoolRules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 2) + x.xxx_hidden_Const = *b.Const + } + x.xxx_hidden_Example = b.Example + return m0 +} + +// StringRules describes the rules applied to `string` values These +// rules may also be applied to the `google.protobuf.StringValue` Well-Known-Type. +type StringRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const *string `protobuf:"bytes,1,opt,name=const"` + xxx_hidden_Len uint64 `protobuf:"varint,19,opt,name=len"` + xxx_hidden_MinLen uint64 `protobuf:"varint,2,opt,name=min_len,json=minLen"` + xxx_hidden_MaxLen uint64 `protobuf:"varint,3,opt,name=max_len,json=maxLen"` + xxx_hidden_LenBytes uint64 `protobuf:"varint,20,opt,name=len_bytes,json=lenBytes"` + xxx_hidden_MinBytes uint64 `protobuf:"varint,4,opt,name=min_bytes,json=minBytes"` + xxx_hidden_MaxBytes uint64 `protobuf:"varint,5,opt,name=max_bytes,json=maxBytes"` + xxx_hidden_Pattern *string `protobuf:"bytes,6,opt,name=pattern"` + xxx_hidden_Prefix *string `protobuf:"bytes,7,opt,name=prefix"` + xxx_hidden_Suffix *string `protobuf:"bytes,8,opt,name=suffix"` + xxx_hidden_Contains *string `protobuf:"bytes,9,opt,name=contains"` + xxx_hidden_NotContains *string `protobuf:"bytes,23,opt,name=not_contains,json=notContains"` + xxx_hidden_In []string `protobuf:"bytes,10,rep,name=in"` + xxx_hidden_NotIn []string `protobuf:"bytes,11,rep,name=not_in,json=notIn"` + xxx_hidden_WellKnown isStringRules_WellKnown `protobuf_oneof:"well_known"` + xxx_hidden_Strict bool `protobuf:"varint,25,opt,name=strict"` + xxx_hidden_Example []string `protobuf:"bytes,34,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StringRules) Reset() { + *x = StringRules{} + mi := &file_buf_validate_validate_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StringRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringRules) ProtoMessage() {} + +func (x *StringRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *StringRules) GetConst() string { + if x != nil { + if x.xxx_hidden_Const != nil { + return *x.xxx_hidden_Const + } + return "" + } + return "" +} + +func (x *StringRules) GetLen() uint64 { + if x != nil { + return x.xxx_hidden_Len + } + return 0 +} + +func (x *StringRules) GetMinLen() uint64 { + if x != nil { + return x.xxx_hidden_MinLen + } + return 0 +} + +func (x *StringRules) GetMaxLen() uint64 { + if x != nil { + return x.xxx_hidden_MaxLen + } + return 0 +} + +func (x *StringRules) GetLenBytes() uint64 { + if x != nil { + return x.xxx_hidden_LenBytes + } + return 0 +} + +func (x *StringRules) GetMinBytes() uint64 { + if x != nil { + return x.xxx_hidden_MinBytes + } + return 0 +} + +func (x *StringRules) GetMaxBytes() uint64 { + if x != nil { + return x.xxx_hidden_MaxBytes + } + return 0 +} + +func (x *StringRules) GetPattern() string { + if x != nil { + if x.xxx_hidden_Pattern != nil { + return *x.xxx_hidden_Pattern + } + return "" + } + return "" +} + +func (x *StringRules) GetPrefix() string { + if x != nil { + if x.xxx_hidden_Prefix != nil { + return *x.xxx_hidden_Prefix + } + return "" + } + return "" +} + +func (x *StringRules) GetSuffix() string { + if x != nil { + if x.xxx_hidden_Suffix != nil { + return *x.xxx_hidden_Suffix + } + return "" + } + return "" +} + +func (x *StringRules) GetContains() string { + if x != nil { + if x.xxx_hidden_Contains != nil { + return *x.xxx_hidden_Contains + } + return "" + } + return "" +} + +func (x *StringRules) GetNotContains() string { + if x != nil { + if x.xxx_hidden_NotContains != nil { + return *x.xxx_hidden_NotContains + } + return "" + } + return "" +} + +func (x *StringRules) GetIn() []string { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *StringRules) GetNotIn() []string { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *StringRules) GetEmail() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_Email); ok { + return x.Email + } + } + return false +} + +func (x *StringRules) GetHostname() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_Hostname); ok { + return x.Hostname + } + } + return false +} + +func (x *StringRules) GetIp() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_Ip); ok { + return x.Ip + } + } + return false +} + +func (x *StringRules) GetIpv4() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv4); ok { + return x.Ipv4 + } + } + return false +} + +func (x *StringRules) GetIpv6() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv6); ok { + return x.Ipv6 + } + } + return false +} + +func (x *StringRules) GetUri() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_Uri); ok { + return x.Uri + } + } + return false +} + +func (x *StringRules) GetUriRef() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_UriRef); ok { + return x.UriRef + } + } + return false +} + +func (x *StringRules) GetAddress() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_Address); ok { + return x.Address + } + } + return false +} + +func (x *StringRules) GetUuid() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_Uuid); ok { + return x.Uuid + } + } + return false +} + +func (x *StringRules) GetTuuid() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_Tuuid); ok { + return x.Tuuid + } + } + return false +} + +func (x *StringRules) GetIpWithPrefixlen() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_IpWithPrefixlen); ok { + return x.IpWithPrefixlen + } + } + return false +} + +func (x *StringRules) GetIpv4WithPrefixlen() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv4WithPrefixlen); ok { + return x.Ipv4WithPrefixlen + } + } + return false +} + +func (x *StringRules) GetIpv6WithPrefixlen() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv6WithPrefixlen); ok { + return x.Ipv6WithPrefixlen + } + } + return false +} + +func (x *StringRules) GetIpPrefix() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_IpPrefix); ok { + return x.IpPrefix + } + } + return false +} + +func (x *StringRules) GetIpv4Prefix() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv4Prefix); ok { + return x.Ipv4Prefix + } + } + return false +} + +func (x *StringRules) GetIpv6Prefix() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv6Prefix); ok { + return x.Ipv6Prefix + } + } + return false +} + +func (x *StringRules) GetHostAndPort() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_HostAndPort); ok { + return x.HostAndPort + } + } + return false +} + +func (x *StringRules) GetUlid() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_Ulid); ok { + return x.Ulid + } + } + return false +} + +func (x *StringRules) GetWellKnownRegex() KnownRegex { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*stringRules_WellKnownRegex); ok { + return x.WellKnownRegex + } + } + return KnownRegex_KNOWN_REGEX_UNSPECIFIED +} + +func (x *StringRules) GetStrict() bool { + if x != nil { + return x.xxx_hidden_Strict + } + return false +} + +func (x *StringRules) GetExample() []string { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *StringRules) SetConst(v string) { + x.xxx_hidden_Const = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 17) +} + +func (x *StringRules) SetLen(v uint64) { + x.xxx_hidden_Len = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 1, 17) +} + +func (x *StringRules) SetMinLen(v uint64) { + x.xxx_hidden_MinLen = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 2, 17) +} + +func (x *StringRules) SetMaxLen(v uint64) { + x.xxx_hidden_MaxLen = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 3, 17) +} + +func (x *StringRules) SetLenBytes(v uint64) { + x.xxx_hidden_LenBytes = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 4, 17) +} + +func (x *StringRules) SetMinBytes(v uint64) { + x.xxx_hidden_MinBytes = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 5, 17) +} + +func (x *StringRules) SetMaxBytes(v uint64) { + x.xxx_hidden_MaxBytes = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 6, 17) +} + +func (x *StringRules) SetPattern(v string) { + x.xxx_hidden_Pattern = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 7, 17) +} + +func (x *StringRules) SetPrefix(v string) { + x.xxx_hidden_Prefix = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 8, 17) +} + +func (x *StringRules) SetSuffix(v string) { + x.xxx_hidden_Suffix = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 9, 17) +} + +func (x *StringRules) SetContains(v string) { + x.xxx_hidden_Contains = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 10, 17) +} + +func (x *StringRules) SetNotContains(v string) { + x.xxx_hidden_NotContains = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 11, 17) +} + +func (x *StringRules) SetIn(v []string) { + x.xxx_hidden_In = v +} + +func (x *StringRules) SetNotIn(v []string) { + x.xxx_hidden_NotIn = v +} + +func (x *StringRules) SetEmail(v bool) { + x.xxx_hidden_WellKnown = &stringRules_Email{v} +} + +func (x *StringRules) SetHostname(v bool) { + x.xxx_hidden_WellKnown = &stringRules_Hostname{v} +} + +func (x *StringRules) SetIp(v bool) { + x.xxx_hidden_WellKnown = &stringRules_Ip{v} +} + +func (x *StringRules) SetIpv4(v bool) { + x.xxx_hidden_WellKnown = &stringRules_Ipv4{v} +} + +func (x *StringRules) SetIpv6(v bool) { + x.xxx_hidden_WellKnown = &stringRules_Ipv6{v} +} + +func (x *StringRules) SetUri(v bool) { + x.xxx_hidden_WellKnown = &stringRules_Uri{v} +} + +func (x *StringRules) SetUriRef(v bool) { + x.xxx_hidden_WellKnown = &stringRules_UriRef{v} +} + +func (x *StringRules) SetAddress(v bool) { + x.xxx_hidden_WellKnown = &stringRules_Address{v} +} + +func (x *StringRules) SetUuid(v bool) { + x.xxx_hidden_WellKnown = &stringRules_Uuid{v} +} + +func (x *StringRules) SetTuuid(v bool) { + x.xxx_hidden_WellKnown = &stringRules_Tuuid{v} +} + +func (x *StringRules) SetIpWithPrefixlen(v bool) { + x.xxx_hidden_WellKnown = &stringRules_IpWithPrefixlen{v} +} + +func (x *StringRules) SetIpv4WithPrefixlen(v bool) { + x.xxx_hidden_WellKnown = &stringRules_Ipv4WithPrefixlen{v} +} + +func (x *StringRules) SetIpv6WithPrefixlen(v bool) { + x.xxx_hidden_WellKnown = &stringRules_Ipv6WithPrefixlen{v} +} + +func (x *StringRules) SetIpPrefix(v bool) { + x.xxx_hidden_WellKnown = &stringRules_IpPrefix{v} +} + +func (x *StringRules) SetIpv4Prefix(v bool) { + x.xxx_hidden_WellKnown = &stringRules_Ipv4Prefix{v} +} + +func (x *StringRules) SetIpv6Prefix(v bool) { + x.xxx_hidden_WellKnown = &stringRules_Ipv6Prefix{v} +} + +func (x *StringRules) SetHostAndPort(v bool) { + x.xxx_hidden_WellKnown = &stringRules_HostAndPort{v} +} + +func (x *StringRules) SetUlid(v bool) { + x.xxx_hidden_WellKnown = &stringRules_Ulid{v} +} + +func (x *StringRules) SetWellKnownRegex(v KnownRegex) { + x.xxx_hidden_WellKnown = &stringRules_WellKnownRegex{v} +} + +func (x *StringRules) SetStrict(v bool) { + x.xxx_hidden_Strict = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 15, 17) +} + +func (x *StringRules) SetExample(v []string) { + x.xxx_hidden_Example = v +} + +func (x *StringRules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *StringRules) HasLen() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 1) +} + +func (x *StringRules) HasMinLen() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 2) +} + +func (x *StringRules) HasMaxLen() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 3) +} + +func (x *StringRules) HasLenBytes() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 4) +} + +func (x *StringRules) HasMinBytes() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 5) +} + +func (x *StringRules) HasMaxBytes() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 6) +} + +func (x *StringRules) HasPattern() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 7) +} + +func (x *StringRules) HasPrefix() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 8) +} + +func (x *StringRules) HasSuffix() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 9) +} + +func (x *StringRules) HasContains() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 10) +} + +func (x *StringRules) HasNotContains() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 11) +} + +func (x *StringRules) HasWellKnown() bool { + if x == nil { + return false + } + return x.xxx_hidden_WellKnown != nil +} + +func (x *StringRules) HasEmail() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_Email) + return ok +} + +func (x *StringRules) HasHostname() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_Hostname) + return ok +} + +func (x *StringRules) HasIp() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ip) + return ok +} + +func (x *StringRules) HasIpv4() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv4) + return ok +} + +func (x *StringRules) HasIpv6() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv6) + return ok +} + +func (x *StringRules) HasUri() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_Uri) + return ok +} + +func (x *StringRules) HasUriRef() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_UriRef) + return ok +} + +func (x *StringRules) HasAddress() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_Address) + return ok +} + +func (x *StringRules) HasUuid() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_Uuid) + return ok +} + +func (x *StringRules) HasTuuid() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_Tuuid) + return ok +} + +func (x *StringRules) HasIpWithPrefixlen() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_IpWithPrefixlen) + return ok +} + +func (x *StringRules) HasIpv4WithPrefixlen() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv4WithPrefixlen) + return ok +} + +func (x *StringRules) HasIpv6WithPrefixlen() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv6WithPrefixlen) + return ok +} + +func (x *StringRules) HasIpPrefix() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_IpPrefix) + return ok +} + +func (x *StringRules) HasIpv4Prefix() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv4Prefix) + return ok +} + +func (x *StringRules) HasIpv6Prefix() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv6Prefix) + return ok +} + +func (x *StringRules) HasHostAndPort() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_HostAndPort) + return ok +} + +func (x *StringRules) HasUlid() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ulid) + return ok +} + +func (x *StringRules) HasWellKnownRegex() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*stringRules_WellKnownRegex) + return ok +} + +func (x *StringRules) HasStrict() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 15) +} + +func (x *StringRules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = nil +} + +func (x *StringRules) ClearLen() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 1) + x.xxx_hidden_Len = 0 +} + +func (x *StringRules) ClearMinLen() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 2) + x.xxx_hidden_MinLen = 0 +} + +func (x *StringRules) ClearMaxLen() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 3) + x.xxx_hidden_MaxLen = 0 +} + +func (x *StringRules) ClearLenBytes() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 4) + x.xxx_hidden_LenBytes = 0 +} + +func (x *StringRules) ClearMinBytes() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 5) + x.xxx_hidden_MinBytes = 0 +} + +func (x *StringRules) ClearMaxBytes() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 6) + x.xxx_hidden_MaxBytes = 0 +} + +func (x *StringRules) ClearPattern() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 7) + x.xxx_hidden_Pattern = nil +} + +func (x *StringRules) ClearPrefix() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 8) + x.xxx_hidden_Prefix = nil +} + +func (x *StringRules) ClearSuffix() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 9) + x.xxx_hidden_Suffix = nil +} + +func (x *StringRules) ClearContains() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 10) + x.xxx_hidden_Contains = nil +} + +func (x *StringRules) ClearNotContains() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 11) + x.xxx_hidden_NotContains = nil +} + +func (x *StringRules) ClearWellKnown() { + x.xxx_hidden_WellKnown = nil +} + +func (x *StringRules) ClearEmail() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_Email); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearHostname() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_Hostname); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearIp() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ip); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearIpv4() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv4); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearIpv6() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv6); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearUri() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_Uri); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearUriRef() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_UriRef); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearAddress() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_Address); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearUuid() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_Uuid); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearTuuid() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_Tuuid); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearIpWithPrefixlen() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_IpWithPrefixlen); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearIpv4WithPrefixlen() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv4WithPrefixlen); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearIpv6WithPrefixlen() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv6WithPrefixlen); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearIpPrefix() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_IpPrefix); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearIpv4Prefix() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv4Prefix); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearIpv6Prefix() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ipv6Prefix); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearHostAndPort() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_HostAndPort); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearUlid() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_Ulid); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearWellKnownRegex() { + if _, ok := x.xxx_hidden_WellKnown.(*stringRules_WellKnownRegex); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *StringRules) ClearStrict() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 15) + x.xxx_hidden_Strict = false +} + +const StringRules_WellKnown_not_set_case case_StringRules_WellKnown = 0 +const StringRules_Email_case case_StringRules_WellKnown = 12 +const StringRules_Hostname_case case_StringRules_WellKnown = 13 +const StringRules_Ip_case case_StringRules_WellKnown = 14 +const StringRules_Ipv4_case case_StringRules_WellKnown = 15 +const StringRules_Ipv6_case case_StringRules_WellKnown = 16 +const StringRules_Uri_case case_StringRules_WellKnown = 17 +const StringRules_UriRef_case case_StringRules_WellKnown = 18 +const StringRules_Address_case case_StringRules_WellKnown = 21 +const StringRules_Uuid_case case_StringRules_WellKnown = 22 +const StringRules_Tuuid_case case_StringRules_WellKnown = 33 +const StringRules_IpWithPrefixlen_case case_StringRules_WellKnown = 26 +const StringRules_Ipv4WithPrefixlen_case case_StringRules_WellKnown = 27 +const StringRules_Ipv6WithPrefixlen_case case_StringRules_WellKnown = 28 +const StringRules_IpPrefix_case case_StringRules_WellKnown = 29 +const StringRules_Ipv4Prefix_case case_StringRules_WellKnown = 30 +const StringRules_Ipv6Prefix_case case_StringRules_WellKnown = 31 +const StringRules_HostAndPort_case case_StringRules_WellKnown = 32 +const StringRules_Ulid_case case_StringRules_WellKnown = 35 +const StringRules_WellKnownRegex_case case_StringRules_WellKnown = 24 + +func (x *StringRules) WhichWellKnown() case_StringRules_WellKnown { + if x == nil { + return StringRules_WellKnown_not_set_case + } + switch x.xxx_hidden_WellKnown.(type) { + case *stringRules_Email: + return StringRules_Email_case + case *stringRules_Hostname: + return StringRules_Hostname_case + case *stringRules_Ip: + return StringRules_Ip_case + case *stringRules_Ipv4: + return StringRules_Ipv4_case + case *stringRules_Ipv6: + return StringRules_Ipv6_case + case *stringRules_Uri: + return StringRules_Uri_case + case *stringRules_UriRef: + return StringRules_UriRef_case + case *stringRules_Address: + return StringRules_Address_case + case *stringRules_Uuid: + return StringRules_Uuid_case + case *stringRules_Tuuid: + return StringRules_Tuuid_case + case *stringRules_IpWithPrefixlen: + return StringRules_IpWithPrefixlen_case + case *stringRules_Ipv4WithPrefixlen: + return StringRules_Ipv4WithPrefixlen_case + case *stringRules_Ipv6WithPrefixlen: + return StringRules_Ipv6WithPrefixlen_case + case *stringRules_IpPrefix: + return StringRules_IpPrefix_case + case *stringRules_Ipv4Prefix: + return StringRules_Ipv4Prefix_case + case *stringRules_Ipv6Prefix: + return StringRules_Ipv6Prefix_case + case *stringRules_HostAndPort: + return StringRules_HostAndPort_case + case *stringRules_Ulid: + return StringRules_Ulid_case + case *stringRules_WellKnownRegex: + return StringRules_WellKnownRegex_case + default: + return StringRules_WellKnown_not_set_case + } +} + +type StringRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified value. If + // the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyString { + // // value must equal `hello` + // string value = 1 [(buf.validate.field).string.const = "hello"]; + // } + // + // ``` + Const *string + // `len` dictates that the field value must have the specified + // number of characters (Unicode code points), which may differ from the number + // of bytes in the string. If the field value does not meet the specified + // length, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be 5 characters + // string value = 1 [(buf.validate.field).string.len = 5]; + // } + // + // ``` + Len *uint64 + // `min_len` specifies that the field value must have at least the specified + // number of characters (Unicode code points), which may differ from the number + // of bytes in the string. If the field value contains fewer characters, an error + // message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be at least 3 characters + // string value = 1 [(buf.validate.field).string.min_len = 3]; + // } + // + // ``` + MinLen *uint64 + // `max_len` specifies that the field value must have no more than the specified + // number of characters (Unicode code points), which may differ from the + // number of bytes in the string. If the field value contains more characters, + // an error message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be at most 10 characters + // string value = 1 [(buf.validate.field).string.max_len = 10]; + // } + // + // ``` + MaxLen *uint64 + // `len_bytes` dictates that the field value must have the specified number of + // bytes. If the field value does not match the specified length in bytes, + // an error message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be 6 bytes + // string value = 1 [(buf.validate.field).string.len_bytes = 6]; + // } + // + // ``` + LenBytes *uint64 + // `min_bytes` specifies that the field value must have at least the specified + // number of bytes. If the field value contains fewer bytes, an error message + // will be generated. + // + // ```proto + // + // message MyString { + // // value length must be at least 4 bytes + // string value = 1 [(buf.validate.field).string.min_bytes = 4]; + // } + // + // ``` + MinBytes *uint64 + // `max_bytes` specifies that the field value must have no more than the + // specified number of bytes. If the field value contains more bytes, an + // error message will be generated. + // + // ```proto + // + // message MyString { + // // value length must be at most 8 bytes + // string value = 1 [(buf.validate.field).string.max_bytes = 8]; + // } + // + // ``` + MaxBytes *uint64 + // `pattern` specifies that the field value must match the specified + // regular expression (RE2 syntax), with the expression provided without any + // delimiters. If the field value doesn't match the regular expression, an + // error message will be generated. + // + // ```proto + // + // message MyString { + // // value does not match regex pattern `^[a-zA-Z]//$` + // string value = 1 [(buf.validate.field).string.pattern = "^[a-zA-Z]//$"]; + // } + // + // ``` + Pattern *string + // `prefix` specifies that the field value must have the + // specified substring at the beginning of the string. If the field value + // doesn't start with the specified prefix, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value does not have prefix `pre` + // string value = 1 [(buf.validate.field).string.prefix = "pre"]; + // } + // + // ``` + Prefix *string + // `suffix` specifies that the field value must have the + // specified substring at the end of the string. If the field value doesn't + // end with the specified suffix, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value does not have suffix `post` + // string value = 1 [(buf.validate.field).string.suffix = "post"]; + // } + // + // ``` + Suffix *string + // `contains` specifies that the field value must have the + // specified substring anywhere in the string. If the field value doesn't + // contain the specified substring, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value does not contain substring `inside`. + // string value = 1 [(buf.validate.field).string.contains = "inside"]; + // } + // + // ``` + Contains *string + // `not_contains` specifies that the field value must not have the + // specified substring anywhere in the string. If the field value contains + // the specified substring, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value contains substring `inside`. + // string value = 1 [(buf.validate.field).string.not_contains = "inside"]; + // } + // + // ``` + NotContains *string + // `in` specifies that the field value must be equal to one of the specified + // values. If the field value isn't one of the specified values, an error + // message will be generated. + // + // ```proto + // + // message MyString { + // // value must be in list ["apple", "banana"] + // string value = 1 [(buf.validate.field).string.in = "apple", (buf.validate.field).string.in = "banana"]; + // } + // + // ``` + In []string + // `not_in` specifies that the field value cannot be equal to any + // of the specified values. If the field value is one of the specified values, + // an error message will be generated. + // ```proto + // + // message MyString { + // // value must not be in list ["orange", "grape"] + // string value = 1 [(buf.validate.field).string.not_in = "orange", (buf.validate.field).string.not_in = "grape"]; + // } + // + // ``` + NotIn []string + // `WellKnown` rules provide advanced rules against common string + // patterns. + + // Fields of oneof xxx_hidden_WellKnown: + // `email` specifies that the field value must be a valid email address, for + // example "foo@example.com". + // + // Conforms to the definition for a valid email address from the [HTML standard](https://html.spec.whatwg.org/multipage/input.html#valid-e-mail-address). + // Note that this standard willfully deviates from [RFC 5322](https://datatracker.ietf.org/doc/html/rfc5322), + // which allows many unexpected forms of email addresses and will easily match + // a typographical error. + // + // If the field value isn't a valid email address, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid email address + // string value = 1 [(buf.validate.field).string.email = true]; + // } + // + // ``` + Email *bool + // `hostname` specifies that the field value must be a valid hostname, for + // example "foo.example.com". + // + // A valid hostname follows the rules below: + // - The name consists of one or more labels, separated by a dot ("."). + // - Each label can be 1 to 63 alphanumeric characters. + // - A label can contain hyphens ("-"), but must not start or end with a hyphen. + // - The right-most label must not be digits only. + // - The name can have a trailing dot—for example, "foo.example.com.". + // - The name can be 253 characters at most, excluding the optional trailing dot. + // + // If the field value isn't a valid hostname, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid hostname + // string value = 1 [(buf.validate.field).string.hostname = true]; + // } + // + // ``` + Hostname *bool + // `ip` specifies that the field value must be a valid IP (v4 or v6) address. + // + // IPv4 addresses are expected in the dotted decimal format—for example, "192.168.5.21". + // IPv6 addresses are expected in their text representation—for example, "::1", + // or "2001:0DB8:ABCD:0012::0". + // + // Both formats are well-defined in the internet standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). + // Zone identifiers for IPv6 addresses (for example, "fe80::a%en1") are supported. + // + // If the field value isn't a valid IP address, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IP address + // string value = 1 [(buf.validate.field).string.ip = true]; + // } + // + // ``` + Ip *bool + // `ipv4` specifies that the field value must be a valid IPv4 address—for + // example "192.168.5.21". If the field value isn't a valid IPv4 address, an + // error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv4 address + // string value = 1 [(buf.validate.field).string.ipv4 = true]; + // } + // + // ``` + Ipv4 *bool + // `ipv6` specifies that the field value must be a valid IPv6 address—for + // example "::1", or "d7a:115c:a1e0:ab12:4843:cd96:626b:430b". If the field + // value is not a valid IPv6 address, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv6 address + // string value = 1 [(buf.validate.field).string.ipv6 = true]; + // } + // + // ``` + Ipv6 *bool + // `uri` specifies that the field value must be a valid URI, for example + // "https://example.com/foo/bar?baz=quux#frag". + // + // URI is defined in the internet standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). + // Zone Identifiers in IPv6 address literals are supported ([RFC 6874](https://datatracker.ietf.org/doc/html/rfc6874)). + // + // If the field value isn't a valid URI, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid URI + // string value = 1 [(buf.validate.field).string.uri = true]; + // } + // + // ``` + Uri *bool + // `uri_ref` specifies that the field value must be a valid URI Reference—either + // a URI such as "https://example.com/foo/bar?baz=quux#frag", or a Relative + // Reference such as "./foo/bar?query". + // + // URI, URI Reference, and Relative Reference are defined in the internet + // standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). Zone + // Identifiers in IPv6 address literals are supported ([RFC 6874](https://datatracker.ietf.org/doc/html/rfc6874)). + // + // If the field value isn't a valid URI Reference, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid URI Reference + // string value = 1 [(buf.validate.field).string.uri_ref = true]; + // } + // + // ``` + UriRef *bool + // `address` specifies that the field value must be either a valid hostname + // (for example, "example.com"), or a valid IP (v4 or v6) address (for example, + // "192.168.0.1", or "::1"). If the field value isn't a valid hostname or IP, + // an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid hostname, or ip address + // string value = 1 [(buf.validate.field).string.address = true]; + // } + // + // ``` + Address *bool + // `uuid` specifies that the field value must be a valid UUID as defined by + // [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2). If the + // field value isn't a valid UUID, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid UUID + // string value = 1 [(buf.validate.field).string.uuid = true]; + // } + // + // ``` + Uuid *bool + // `tuuid` (trimmed UUID) specifies that the field value must be a valid UUID as + // defined by [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2) with all dashes + // omitted. If the field value isn't a valid UUID without dashes, an error message + // will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid trimmed UUID + // string value = 1 [(buf.validate.field).string.tuuid = true]; + // } + // + // ``` + Tuuid *bool + // `ip_with_prefixlen` specifies that the field value must be a valid IP + // (v4 or v6) address with prefix length—for example, "192.168.5.21/16" or + // "2001:0DB8:ABCD:0012::F1/64". If the field value isn't a valid IP with + // prefix length, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IP with prefix length + // string value = 1 [(buf.validate.field).string.ip_with_prefixlen = true]; + // } + // + // ``` + IpWithPrefixlen *bool + // `ipv4_with_prefixlen` specifies that the field value must be a valid + // IPv4 address with prefix length—for example, "192.168.5.21/16". If the + // field value isn't a valid IPv4 address with prefix length, an error + // message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv4 address with prefix length + // string value = 1 [(buf.validate.field).string.ipv4_with_prefixlen = true]; + // } + // + // ``` + Ipv4WithPrefixlen *bool + // `ipv6_with_prefixlen` specifies that the field value must be a valid + // IPv6 address with prefix length—for example, "2001:0DB8:ABCD:0012::F1/64". + // If the field value is not a valid IPv6 address with prefix length, + // an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv6 address prefix length + // string value = 1 [(buf.validate.field).string.ipv6_with_prefixlen = true]; + // } + // + // ``` + Ipv6WithPrefixlen *bool + // `ip_prefix` specifies that the field value must be a valid IP (v4 or v6) + // prefix—for example, "192.168.0.0/16" or "2001:0DB8:ABCD:0012::0/64". + // + // The prefix must have all zeros for the unmasked bits. For example, + // "2001:0DB8:ABCD:0012::0/64" designates the left-most 64 bits for the + // prefix, and the remaining 64 bits must be zero. + // + // If the field value isn't a valid IP prefix, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IP prefix + // string value = 1 [(buf.validate.field).string.ip_prefix = true]; + // } + // + // ``` + IpPrefix *bool + // `ipv4_prefix` specifies that the field value must be a valid IPv4 + // prefix, for example "192.168.0.0/16". + // + // The prefix must have all zeros for the unmasked bits. For example, + // "192.168.0.0/16" designates the left-most 16 bits for the prefix, + // and the remaining 16 bits must be zero. + // + // If the field value isn't a valid IPv4 prefix, an error message + // will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv4 prefix + // string value = 1 [(buf.validate.field).string.ipv4_prefix = true]; + // } + // + // ``` + Ipv4Prefix *bool + // `ipv6_prefix` specifies that the field value must be a valid IPv6 prefix—for + // example, "2001:0DB8:ABCD:0012::0/64". + // + // The prefix must have all zeros for the unmasked bits. For example, + // "2001:0DB8:ABCD:0012::0/64" designates the left-most 64 bits for the + // prefix, and the remaining 64 bits must be zero. + // + // If the field value is not a valid IPv6 prefix, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv6 prefix + // string value = 1 [(buf.validate.field).string.ipv6_prefix = true]; + // } + // + // ``` + Ipv6Prefix *bool + // `host_and_port` specifies that the field value must be valid host/port + // pair—for example, "example.com:8080". + // + // The host can be one of: + // - An IPv4 address in dotted decimal format—for example, "192.168.5.21". + // - An IPv6 address enclosed in square brackets—for example, "[2001:0DB8:ABCD:0012::F1]". + // - A hostname—for example, "example.com". + // + // The port is separated by a colon. It must be non-empty, with a decimal number + // in the range of 0-65535, inclusive. + HostAndPort *bool + // `ulid` specifies that the field value must be a valid ULID (Universally Unique + // Lexicographically Sortable Identifier) as defined by the [ULID specification](https://github.com/ulid/spec). + // If the field value isn't a valid ULID, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid ULID + // string value = 1 [(buf.validate.field).string.ulid = true]; + // } + // + // ``` + Ulid *bool + // `well_known_regex` specifies a common well-known pattern + // defined as a regex. If the field value doesn't match the well-known + // regex, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid HTTP header value + // string value = 1 [(buf.validate.field).string.well_known_regex = KNOWN_REGEX_HTTP_HEADER_VALUE]; + // } + // + // ``` + // + // #### KnownRegex + // + // `well_known_regex` contains some well-known patterns. + // + // | Name | Number | Description | + // |-------------------------------|--------|-------------------------------------------| + // | KNOWN_REGEX_UNSPECIFIED | 0 | | + // | KNOWN_REGEX_HTTP_HEADER_NAME | 1 | HTTP header name as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2) | + // | KNOWN_REGEX_HTTP_HEADER_VALUE | 2 | HTTP header value as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.4) | + WellKnownRegex *KnownRegex + // -- end of xxx_hidden_WellKnown + // This applies to regexes `HTTP_HEADER_NAME` and `HTTP_HEADER_VALUE` to + // enable strict header validation. By default, this is true, and HTTP header + // validations are [RFC-compliant](https://datatracker.ietf.org/doc/html/rfc7230#section-3). Setting to false will enable looser + // validations that only disallow `\r\n\0` characters, which can be used to + // bypass header matching rules. + // + // ```proto + // + // message MyString { + // // The field `value` must have be a valid HTTP headers, but not enforced with strict rules. + // string value = 1 [(buf.validate.field).string.strict = false]; + // } + // + // ``` + Strict *bool + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyString { + // string value = 1 [ + // (buf.validate.field).string.example = "hello", + // (buf.validate.field).string.example = "world" + // ]; + // } + // + // ``` + Example []string +} + +func (b0 StringRules_builder) Build() *StringRules { + m0 := &StringRules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 17) + x.xxx_hidden_Const = b.Const + } + if b.Len != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 1, 17) + x.xxx_hidden_Len = *b.Len + } + if b.MinLen != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 2, 17) + x.xxx_hidden_MinLen = *b.MinLen + } + if b.MaxLen != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 3, 17) + x.xxx_hidden_MaxLen = *b.MaxLen + } + if b.LenBytes != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 4, 17) + x.xxx_hidden_LenBytes = *b.LenBytes + } + if b.MinBytes != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 5, 17) + x.xxx_hidden_MinBytes = *b.MinBytes + } + if b.MaxBytes != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 6, 17) + x.xxx_hidden_MaxBytes = *b.MaxBytes + } + if b.Pattern != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 7, 17) + x.xxx_hidden_Pattern = b.Pattern + } + if b.Prefix != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 8, 17) + x.xxx_hidden_Prefix = b.Prefix + } + if b.Suffix != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 9, 17) + x.xxx_hidden_Suffix = b.Suffix + } + if b.Contains != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 10, 17) + x.xxx_hidden_Contains = b.Contains + } + if b.NotContains != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 11, 17) + x.xxx_hidden_NotContains = b.NotContains + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + if b.Email != nil { + x.xxx_hidden_WellKnown = &stringRules_Email{*b.Email} + } + if b.Hostname != nil { + x.xxx_hidden_WellKnown = &stringRules_Hostname{*b.Hostname} + } + if b.Ip != nil { + x.xxx_hidden_WellKnown = &stringRules_Ip{*b.Ip} + } + if b.Ipv4 != nil { + x.xxx_hidden_WellKnown = &stringRules_Ipv4{*b.Ipv4} + } + if b.Ipv6 != nil { + x.xxx_hidden_WellKnown = &stringRules_Ipv6{*b.Ipv6} + } + if b.Uri != nil { + x.xxx_hidden_WellKnown = &stringRules_Uri{*b.Uri} + } + if b.UriRef != nil { + x.xxx_hidden_WellKnown = &stringRules_UriRef{*b.UriRef} + } + if b.Address != nil { + x.xxx_hidden_WellKnown = &stringRules_Address{*b.Address} + } + if b.Uuid != nil { + x.xxx_hidden_WellKnown = &stringRules_Uuid{*b.Uuid} + } + if b.Tuuid != nil { + x.xxx_hidden_WellKnown = &stringRules_Tuuid{*b.Tuuid} + } + if b.IpWithPrefixlen != nil { + x.xxx_hidden_WellKnown = &stringRules_IpWithPrefixlen{*b.IpWithPrefixlen} + } + if b.Ipv4WithPrefixlen != nil { + x.xxx_hidden_WellKnown = &stringRules_Ipv4WithPrefixlen{*b.Ipv4WithPrefixlen} + } + if b.Ipv6WithPrefixlen != nil { + x.xxx_hidden_WellKnown = &stringRules_Ipv6WithPrefixlen{*b.Ipv6WithPrefixlen} + } + if b.IpPrefix != nil { + x.xxx_hidden_WellKnown = &stringRules_IpPrefix{*b.IpPrefix} + } + if b.Ipv4Prefix != nil { + x.xxx_hidden_WellKnown = &stringRules_Ipv4Prefix{*b.Ipv4Prefix} + } + if b.Ipv6Prefix != nil { + x.xxx_hidden_WellKnown = &stringRules_Ipv6Prefix{*b.Ipv6Prefix} + } + if b.HostAndPort != nil { + x.xxx_hidden_WellKnown = &stringRules_HostAndPort{*b.HostAndPort} + } + if b.Ulid != nil { + x.xxx_hidden_WellKnown = &stringRules_Ulid{*b.Ulid} + } + if b.WellKnownRegex != nil { + x.xxx_hidden_WellKnown = &stringRules_WellKnownRegex{*b.WellKnownRegex} + } + if b.Strict != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 15, 17) + x.xxx_hidden_Strict = *b.Strict + } + x.xxx_hidden_Example = b.Example + return m0 +} + +type case_StringRules_WellKnown protoreflect.FieldNumber + +func (x case_StringRules_WellKnown) String() string { + md := file_buf_validate_validate_proto_msgTypes[19].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isStringRules_WellKnown interface { + isStringRules_WellKnown() +} + +type stringRules_Email struct { + // `email` specifies that the field value must be a valid email address, for + // example "foo@example.com". + // + // Conforms to the definition for a valid email address from the [HTML standard](https://html.spec.whatwg.org/multipage/input.html#valid-e-mail-address). + // Note that this standard willfully deviates from [RFC 5322](https://datatracker.ietf.org/doc/html/rfc5322), + // which allows many unexpected forms of email addresses and will easily match + // a typographical error. + // + // If the field value isn't a valid email address, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid email address + // string value = 1 [(buf.validate.field).string.email = true]; + // } + // + // ``` + Email bool `protobuf:"varint,12,opt,name=email,oneof"` +} + +type stringRules_Hostname struct { + // `hostname` specifies that the field value must be a valid hostname, for + // example "foo.example.com". + // + // A valid hostname follows the rules below: + // - The name consists of one or more labels, separated by a dot ("."). + // - Each label can be 1 to 63 alphanumeric characters. + // - A label can contain hyphens ("-"), but must not start or end with a hyphen. + // - The right-most label must not be digits only. + // - The name can have a trailing dot—for example, "foo.example.com.". + // - The name can be 253 characters at most, excluding the optional trailing dot. + // + // If the field value isn't a valid hostname, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid hostname + // string value = 1 [(buf.validate.field).string.hostname = true]; + // } + // + // ``` + Hostname bool `protobuf:"varint,13,opt,name=hostname,oneof"` +} + +type stringRules_Ip struct { + // `ip` specifies that the field value must be a valid IP (v4 or v6) address. + // + // IPv4 addresses are expected in the dotted decimal format—for example, "192.168.5.21". + // IPv6 addresses are expected in their text representation—for example, "::1", + // or "2001:0DB8:ABCD:0012::0". + // + // Both formats are well-defined in the internet standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). + // Zone identifiers for IPv6 addresses (for example, "fe80::a%en1") are supported. + // + // If the field value isn't a valid IP address, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IP address + // string value = 1 [(buf.validate.field).string.ip = true]; + // } + // + // ``` + Ip bool `protobuf:"varint,14,opt,name=ip,oneof"` +} + +type stringRules_Ipv4 struct { + // `ipv4` specifies that the field value must be a valid IPv4 address—for + // example "192.168.5.21". If the field value isn't a valid IPv4 address, an + // error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv4 address + // string value = 1 [(buf.validate.field).string.ipv4 = true]; + // } + // + // ``` + Ipv4 bool `protobuf:"varint,15,opt,name=ipv4,oneof"` +} + +type stringRules_Ipv6 struct { + // `ipv6` specifies that the field value must be a valid IPv6 address—for + // example "::1", or "d7a:115c:a1e0:ab12:4843:cd96:626b:430b". If the field + // value is not a valid IPv6 address, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv6 address + // string value = 1 [(buf.validate.field).string.ipv6 = true]; + // } + // + // ``` + Ipv6 bool `protobuf:"varint,16,opt,name=ipv6,oneof"` +} + +type stringRules_Uri struct { + // `uri` specifies that the field value must be a valid URI, for example + // "https://example.com/foo/bar?baz=quux#frag". + // + // URI is defined in the internet standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). + // Zone Identifiers in IPv6 address literals are supported ([RFC 6874](https://datatracker.ietf.org/doc/html/rfc6874)). + // + // If the field value isn't a valid URI, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid URI + // string value = 1 [(buf.validate.field).string.uri = true]; + // } + // + // ``` + Uri bool `protobuf:"varint,17,opt,name=uri,oneof"` +} + +type stringRules_UriRef struct { + // `uri_ref` specifies that the field value must be a valid URI Reference—either + // a URI such as "https://example.com/foo/bar?baz=quux#frag", or a Relative + // Reference such as "./foo/bar?query". + // + // URI, URI Reference, and Relative Reference are defined in the internet + // standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). Zone + // Identifiers in IPv6 address literals are supported ([RFC 6874](https://datatracker.ietf.org/doc/html/rfc6874)). + // + // If the field value isn't a valid URI Reference, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid URI Reference + // string value = 1 [(buf.validate.field).string.uri_ref = true]; + // } + // + // ``` + UriRef bool `protobuf:"varint,18,opt,name=uri_ref,json=uriRef,oneof"` +} + +type stringRules_Address struct { + // `address` specifies that the field value must be either a valid hostname + // (for example, "example.com"), or a valid IP (v4 or v6) address (for example, + // "192.168.0.1", or "::1"). If the field value isn't a valid hostname or IP, + // an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid hostname, or ip address + // string value = 1 [(buf.validate.field).string.address = true]; + // } + // + // ``` + Address bool `protobuf:"varint,21,opt,name=address,oneof"` +} + +type stringRules_Uuid struct { + // `uuid` specifies that the field value must be a valid UUID as defined by + // [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2). If the + // field value isn't a valid UUID, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid UUID + // string value = 1 [(buf.validate.field).string.uuid = true]; + // } + // + // ``` + Uuid bool `protobuf:"varint,22,opt,name=uuid,oneof"` +} + +type stringRules_Tuuid struct { + // `tuuid` (trimmed UUID) specifies that the field value must be a valid UUID as + // defined by [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2) with all dashes + // omitted. If the field value isn't a valid UUID without dashes, an error message + // will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid trimmed UUID + // string value = 1 [(buf.validate.field).string.tuuid = true]; + // } + // + // ``` + Tuuid bool `protobuf:"varint,33,opt,name=tuuid,oneof"` +} + +type stringRules_IpWithPrefixlen struct { + // `ip_with_prefixlen` specifies that the field value must be a valid IP + // (v4 or v6) address with prefix length—for example, "192.168.5.21/16" or + // "2001:0DB8:ABCD:0012::F1/64". If the field value isn't a valid IP with + // prefix length, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IP with prefix length + // string value = 1 [(buf.validate.field).string.ip_with_prefixlen = true]; + // } + // + // ``` + IpWithPrefixlen bool `protobuf:"varint,26,opt,name=ip_with_prefixlen,json=ipWithPrefixlen,oneof"` +} + +type stringRules_Ipv4WithPrefixlen struct { + // `ipv4_with_prefixlen` specifies that the field value must be a valid + // IPv4 address with prefix length—for example, "192.168.5.21/16". If the + // field value isn't a valid IPv4 address with prefix length, an error + // message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv4 address with prefix length + // string value = 1 [(buf.validate.field).string.ipv4_with_prefixlen = true]; + // } + // + // ``` + Ipv4WithPrefixlen bool `protobuf:"varint,27,opt,name=ipv4_with_prefixlen,json=ipv4WithPrefixlen,oneof"` +} + +type stringRules_Ipv6WithPrefixlen struct { + // `ipv6_with_prefixlen` specifies that the field value must be a valid + // IPv6 address with prefix length—for example, "2001:0DB8:ABCD:0012::F1/64". + // If the field value is not a valid IPv6 address with prefix length, + // an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv6 address prefix length + // string value = 1 [(buf.validate.field).string.ipv6_with_prefixlen = true]; + // } + // + // ``` + Ipv6WithPrefixlen bool `protobuf:"varint,28,opt,name=ipv6_with_prefixlen,json=ipv6WithPrefixlen,oneof"` +} + +type stringRules_IpPrefix struct { + // `ip_prefix` specifies that the field value must be a valid IP (v4 or v6) + // prefix—for example, "192.168.0.0/16" or "2001:0DB8:ABCD:0012::0/64". + // + // The prefix must have all zeros for the unmasked bits. For example, + // "2001:0DB8:ABCD:0012::0/64" designates the left-most 64 bits for the + // prefix, and the remaining 64 bits must be zero. + // + // If the field value isn't a valid IP prefix, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IP prefix + // string value = 1 [(buf.validate.field).string.ip_prefix = true]; + // } + // + // ``` + IpPrefix bool `protobuf:"varint,29,opt,name=ip_prefix,json=ipPrefix,oneof"` +} + +type stringRules_Ipv4Prefix struct { + // `ipv4_prefix` specifies that the field value must be a valid IPv4 + // prefix, for example "192.168.0.0/16". + // + // The prefix must have all zeros for the unmasked bits. For example, + // "192.168.0.0/16" designates the left-most 16 bits for the prefix, + // and the remaining 16 bits must be zero. + // + // If the field value isn't a valid IPv4 prefix, an error message + // will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv4 prefix + // string value = 1 [(buf.validate.field).string.ipv4_prefix = true]; + // } + // + // ``` + Ipv4Prefix bool `protobuf:"varint,30,opt,name=ipv4_prefix,json=ipv4Prefix,oneof"` +} + +type stringRules_Ipv6Prefix struct { + // `ipv6_prefix` specifies that the field value must be a valid IPv6 prefix—for + // example, "2001:0DB8:ABCD:0012::0/64". + // + // The prefix must have all zeros for the unmasked bits. For example, + // "2001:0DB8:ABCD:0012::0/64" designates the left-most 64 bits for the + // prefix, and the remaining 64 bits must be zero. + // + // If the field value is not a valid IPv6 prefix, an error message will be + // generated. + // + // ```proto + // + // message MyString { + // // value must be a valid IPv6 prefix + // string value = 1 [(buf.validate.field).string.ipv6_prefix = true]; + // } + // + // ``` + Ipv6Prefix bool `protobuf:"varint,31,opt,name=ipv6_prefix,json=ipv6Prefix,oneof"` +} + +type stringRules_HostAndPort struct { + // `host_and_port` specifies that the field value must be valid host/port + // pair—for example, "example.com:8080". + // + // The host can be one of: + // - An IPv4 address in dotted decimal format—for example, "192.168.5.21". + // - An IPv6 address enclosed in square brackets—for example, "[2001:0DB8:ABCD:0012::F1]". + // - A hostname—for example, "example.com". + // + // The port is separated by a colon. It must be non-empty, with a decimal number + // in the range of 0-65535, inclusive. + HostAndPort bool `protobuf:"varint,32,opt,name=host_and_port,json=hostAndPort,oneof"` +} + +type stringRules_Ulid struct { + // `ulid` specifies that the field value must be a valid ULID (Universally Unique + // Lexicographically Sortable Identifier) as defined by the [ULID specification](https://github.com/ulid/spec). + // If the field value isn't a valid ULID, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid ULID + // string value = 1 [(buf.validate.field).string.ulid = true]; + // } + // + // ``` + Ulid bool `protobuf:"varint,35,opt,name=ulid,oneof"` +} + +type stringRules_WellKnownRegex struct { + // `well_known_regex` specifies a common well-known pattern + // defined as a regex. If the field value doesn't match the well-known + // regex, an error message will be generated. + // + // ```proto + // + // message MyString { + // // value must be a valid HTTP header value + // string value = 1 [(buf.validate.field).string.well_known_regex = KNOWN_REGEX_HTTP_HEADER_VALUE]; + // } + // + // ``` + // + // #### KnownRegex + // + // `well_known_regex` contains some well-known patterns. + // + // | Name | Number | Description | + // |-------------------------------|--------|-------------------------------------------| + // | KNOWN_REGEX_UNSPECIFIED | 0 | | + // | KNOWN_REGEX_HTTP_HEADER_NAME | 1 | HTTP header name as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2) | + // | KNOWN_REGEX_HTTP_HEADER_VALUE | 2 | HTTP header value as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.4) | + WellKnownRegex KnownRegex `protobuf:"varint,24,opt,name=well_known_regex,json=wellKnownRegex,enum=buf.validate.KnownRegex,oneof"` +} + +func (*stringRules_Email) isStringRules_WellKnown() {} + +func (*stringRules_Hostname) isStringRules_WellKnown() {} + +func (*stringRules_Ip) isStringRules_WellKnown() {} + +func (*stringRules_Ipv4) isStringRules_WellKnown() {} + +func (*stringRules_Ipv6) isStringRules_WellKnown() {} + +func (*stringRules_Uri) isStringRules_WellKnown() {} + +func (*stringRules_UriRef) isStringRules_WellKnown() {} + +func (*stringRules_Address) isStringRules_WellKnown() {} + +func (*stringRules_Uuid) isStringRules_WellKnown() {} + +func (*stringRules_Tuuid) isStringRules_WellKnown() {} + +func (*stringRules_IpWithPrefixlen) isStringRules_WellKnown() {} + +func (*stringRules_Ipv4WithPrefixlen) isStringRules_WellKnown() {} + +func (*stringRules_Ipv6WithPrefixlen) isStringRules_WellKnown() {} + +func (*stringRules_IpPrefix) isStringRules_WellKnown() {} + +func (*stringRules_Ipv4Prefix) isStringRules_WellKnown() {} + +func (*stringRules_Ipv6Prefix) isStringRules_WellKnown() {} + +func (*stringRules_HostAndPort) isStringRules_WellKnown() {} + +func (*stringRules_Ulid) isStringRules_WellKnown() {} + +func (*stringRules_WellKnownRegex) isStringRules_WellKnown() {} + +// BytesRules describe the rules applied to `bytes` values. These rules +// may also be applied to the `google.protobuf.BytesValue` Well-Known-Type. +type BytesRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const []byte `protobuf:"bytes,1,opt,name=const"` + xxx_hidden_Len uint64 `protobuf:"varint,13,opt,name=len"` + xxx_hidden_MinLen uint64 `protobuf:"varint,2,opt,name=min_len,json=minLen"` + xxx_hidden_MaxLen uint64 `protobuf:"varint,3,opt,name=max_len,json=maxLen"` + xxx_hidden_Pattern *string `protobuf:"bytes,4,opt,name=pattern"` + xxx_hidden_Prefix []byte `protobuf:"bytes,5,opt,name=prefix"` + xxx_hidden_Suffix []byte `protobuf:"bytes,6,opt,name=suffix"` + xxx_hidden_Contains []byte `protobuf:"bytes,7,opt,name=contains"` + xxx_hidden_In [][]byte `protobuf:"bytes,8,rep,name=in"` + xxx_hidden_NotIn [][]byte `protobuf:"bytes,9,rep,name=not_in,json=notIn"` + xxx_hidden_WellKnown isBytesRules_WellKnown `protobuf_oneof:"well_known"` + xxx_hidden_Example [][]byte `protobuf:"bytes,14,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BytesRules) Reset() { + *x = BytesRules{} + mi := &file_buf_validate_validate_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BytesRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BytesRules) ProtoMessage() {} + +func (x *BytesRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BytesRules) GetConst() []byte { + if x != nil { + return x.xxx_hidden_Const + } + return nil +} + +func (x *BytesRules) GetLen() uint64 { + if x != nil { + return x.xxx_hidden_Len + } + return 0 +} + +func (x *BytesRules) GetMinLen() uint64 { + if x != nil { + return x.xxx_hidden_MinLen + } + return 0 +} + +func (x *BytesRules) GetMaxLen() uint64 { + if x != nil { + return x.xxx_hidden_MaxLen + } + return 0 +} + +func (x *BytesRules) GetPattern() string { + if x != nil { + if x.xxx_hidden_Pattern != nil { + return *x.xxx_hidden_Pattern + } + return "" + } + return "" +} + +func (x *BytesRules) GetPrefix() []byte { + if x != nil { + return x.xxx_hidden_Prefix + } + return nil +} + +func (x *BytesRules) GetSuffix() []byte { + if x != nil { + return x.xxx_hidden_Suffix + } + return nil +} + +func (x *BytesRules) GetContains() []byte { + if x != nil { + return x.xxx_hidden_Contains + } + return nil +} + +func (x *BytesRules) GetIn() [][]byte { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *BytesRules) GetNotIn() [][]byte { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *BytesRules) GetIp() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*bytesRules_Ip); ok { + return x.Ip + } + } + return false +} + +func (x *BytesRules) GetIpv4() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*bytesRules_Ipv4); ok { + return x.Ipv4 + } + } + return false +} + +func (x *BytesRules) GetIpv6() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*bytesRules_Ipv6); ok { + return x.Ipv6 + } + } + return false +} + +func (x *BytesRules) GetUuid() bool { + if x != nil { + if x, ok := x.xxx_hidden_WellKnown.(*bytesRules_Uuid); ok { + return x.Uuid + } + } + return false +} + +func (x *BytesRules) GetExample() [][]byte { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *BytesRules) SetConst(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 12) +} + +func (x *BytesRules) SetLen(v uint64) { + x.xxx_hidden_Len = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 1, 12) +} + +func (x *BytesRules) SetMinLen(v uint64) { + x.xxx_hidden_MinLen = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 2, 12) +} + +func (x *BytesRules) SetMaxLen(v uint64) { + x.xxx_hidden_MaxLen = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 3, 12) +} + +func (x *BytesRules) SetPattern(v string) { + x.xxx_hidden_Pattern = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 4, 12) +} + +func (x *BytesRules) SetPrefix(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Prefix = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 5, 12) +} + +func (x *BytesRules) SetSuffix(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Suffix = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 6, 12) +} + +func (x *BytesRules) SetContains(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Contains = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 7, 12) +} + +func (x *BytesRules) SetIn(v [][]byte) { + x.xxx_hidden_In = v +} + +func (x *BytesRules) SetNotIn(v [][]byte) { + x.xxx_hidden_NotIn = v +} + +func (x *BytesRules) SetIp(v bool) { + x.xxx_hidden_WellKnown = &bytesRules_Ip{v} +} + +func (x *BytesRules) SetIpv4(v bool) { + x.xxx_hidden_WellKnown = &bytesRules_Ipv4{v} +} + +func (x *BytesRules) SetIpv6(v bool) { + x.xxx_hidden_WellKnown = &bytesRules_Ipv6{v} +} + +func (x *BytesRules) SetUuid(v bool) { + x.xxx_hidden_WellKnown = &bytesRules_Uuid{v} +} + +func (x *BytesRules) SetExample(v [][]byte) { + x.xxx_hidden_Example = v +} + +func (x *BytesRules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *BytesRules) HasLen() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 1) +} + +func (x *BytesRules) HasMinLen() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 2) +} + +func (x *BytesRules) HasMaxLen() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 3) +} + +func (x *BytesRules) HasPattern() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 4) +} + +func (x *BytesRules) HasPrefix() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 5) +} + +func (x *BytesRules) HasSuffix() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 6) +} + +func (x *BytesRules) HasContains() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 7) +} + +func (x *BytesRules) HasWellKnown() bool { + if x == nil { + return false + } + return x.xxx_hidden_WellKnown != nil +} + +func (x *BytesRules) HasIp() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*bytesRules_Ip) + return ok +} + +func (x *BytesRules) HasIpv4() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*bytesRules_Ipv4) + return ok +} + +func (x *BytesRules) HasIpv6() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*bytesRules_Ipv6) + return ok +} + +func (x *BytesRules) HasUuid() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_WellKnown.(*bytesRules_Uuid) + return ok +} + +func (x *BytesRules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = nil +} + +func (x *BytesRules) ClearLen() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 1) + x.xxx_hidden_Len = 0 +} + +func (x *BytesRules) ClearMinLen() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 2) + x.xxx_hidden_MinLen = 0 +} + +func (x *BytesRules) ClearMaxLen() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 3) + x.xxx_hidden_MaxLen = 0 +} + +func (x *BytesRules) ClearPattern() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 4) + x.xxx_hidden_Pattern = nil +} + +func (x *BytesRules) ClearPrefix() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 5) + x.xxx_hidden_Prefix = nil +} + +func (x *BytesRules) ClearSuffix() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 6) + x.xxx_hidden_Suffix = nil +} + +func (x *BytesRules) ClearContains() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 7) + x.xxx_hidden_Contains = nil +} + +func (x *BytesRules) ClearWellKnown() { + x.xxx_hidden_WellKnown = nil +} + +func (x *BytesRules) ClearIp() { + if _, ok := x.xxx_hidden_WellKnown.(*bytesRules_Ip); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *BytesRules) ClearIpv4() { + if _, ok := x.xxx_hidden_WellKnown.(*bytesRules_Ipv4); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *BytesRules) ClearIpv6() { + if _, ok := x.xxx_hidden_WellKnown.(*bytesRules_Ipv6); ok { + x.xxx_hidden_WellKnown = nil + } +} + +func (x *BytesRules) ClearUuid() { + if _, ok := x.xxx_hidden_WellKnown.(*bytesRules_Uuid); ok { + x.xxx_hidden_WellKnown = nil + } +} + +const BytesRules_WellKnown_not_set_case case_BytesRules_WellKnown = 0 +const BytesRules_Ip_case case_BytesRules_WellKnown = 10 +const BytesRules_Ipv4_case case_BytesRules_WellKnown = 11 +const BytesRules_Ipv6_case case_BytesRules_WellKnown = 12 +const BytesRules_Uuid_case case_BytesRules_WellKnown = 15 + +func (x *BytesRules) WhichWellKnown() case_BytesRules_WellKnown { + if x == nil { + return BytesRules_WellKnown_not_set_case + } + switch x.xxx_hidden_WellKnown.(type) { + case *bytesRules_Ip: + return BytesRules_Ip_case + case *bytesRules_Ipv4: + return BytesRules_Ipv4_case + case *bytesRules_Ipv6: + return BytesRules_Ipv6_case + case *bytesRules_Uuid: + return BytesRules_Uuid_case + default: + return BytesRules_WellKnown_not_set_case + } +} + +type BytesRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified bytes + // value. If the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must be "\x01\x02\x03\x04" + // bytes value = 1 [(buf.validate.field).bytes.const = "\x01\x02\x03\x04"]; + // } + // + // ``` + Const []byte + // `len` requires the field value to have the specified length in bytes. + // If the field value doesn't match, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value length must be 4 bytes. + // optional bytes value = 1 [(buf.validate.field).bytes.len = 4]; + // } + // + // ``` + Len *uint64 + // `min_len` requires the field value to have at least the specified minimum + // length in bytes. + // If the field value doesn't meet the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value length must be at least 2 bytes. + // optional bytes value = 1 [(buf.validate.field).bytes.min_len = 2]; + // } + // + // ``` + MinLen *uint64 + // `max_len` requires the field value to have at most the specified maximum + // length in bytes. + // If the field value exceeds the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must be at most 6 bytes. + // optional bytes value = 1 [(buf.validate.field).bytes.max_len = 6]; + // } + // + // ``` + MaxLen *uint64 + // `pattern` requires the field value to match the specified regular + // expression ([RE2 syntax](https://github.com/google/re2/wiki/Syntax)). + // The value of the field must be valid UTF-8 or validation will fail with a + // runtime error. + // If the field value doesn't match the pattern, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must match regex pattern "^[a-zA-Z0-9]+$". + // optional bytes value = 1 [(buf.validate.field).bytes.pattern = "^[a-zA-Z0-9]+$"]; + // } + // + // ``` + Pattern *string + // `prefix` requires the field value to have the specified bytes at the + // beginning of the string. + // If the field value doesn't meet the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value does not have prefix \x01\x02 + // optional bytes value = 1 [(buf.validate.field).bytes.prefix = "\x01\x02"]; + // } + // + // ``` + Prefix []byte + // `suffix` requires the field value to have the specified bytes at the end + // of the string. + // If the field value doesn't meet the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value does not have suffix \x03\x04 + // optional bytes value = 1 [(buf.validate.field).bytes.suffix = "\x03\x04"]; + // } + // + // ``` + Suffix []byte + // `contains` requires the field value to have the specified bytes anywhere in + // the string. + // If the field value doesn't meet the requirement, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value does not contain \x02\x03 + // optional bytes value = 1 [(buf.validate.field).bytes.contains = "\x02\x03"]; + // } + // + // ``` + Contains []byte + // `in` requires the field value to be equal to one of the specified + // values. If the field value doesn't match any of the specified values, an + // error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must in ["\x01\x02", "\x02\x03", "\x03\x04"] + // optional bytes value = 1 [(buf.validate.field).bytes.in = {"\x01\x02", "\x02\x03", "\x03\x04"}]; + // } + // + // ``` + In [][]byte + // `not_in` requires the field value to be not equal to any of the specified + // values. + // If the field value matches any of the specified values, an error message is + // generated. + // + // ```proto + // + // message MyBytes { + // // value must not in ["\x01\x02", "\x02\x03", "\x03\x04"] + // optional bytes value = 1 [(buf.validate.field).bytes.not_in = {"\x01\x02", "\x02\x03", "\x03\x04"}]; + // } + // + // ``` + NotIn [][]byte + // WellKnown rules provide advanced rules against common byte + // patterns + + // Fields of oneof xxx_hidden_WellKnown: + // `ip` ensures that the field `value` is a valid IP address (v4 or v6) in byte format. + // If the field value doesn't meet this rule, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must be a valid IP address + // optional bytes value = 1 [(buf.validate.field).bytes.ip = true]; + // } + // + // ``` + Ip *bool + // `ipv4` ensures that the field `value` is a valid IPv4 address in byte format. + // If the field value doesn't meet this rule, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must be a valid IPv4 address + // optional bytes value = 1 [(buf.validate.field).bytes.ipv4 = true]; + // } + // + // ``` + Ipv4 *bool + // `ipv6` ensures that the field `value` is a valid IPv6 address in byte format. + // If the field value doesn't meet this rule, an error message is generated. + // ```proto + // + // message MyBytes { + // // value must be a valid IPv6 address + // optional bytes value = 1 [(buf.validate.field).bytes.ipv6 = true]; + // } + // + // ``` + Ipv6 *bool + // `uuid` ensures that the field `value` encodes the 128-bit UUID data as + // defined by [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2). + // The field must contain exactly 16 bytes + // representing the UUID. If the field value isn't a valid UUID, an error + // message will be generated. + // + // ```proto + // + // message MyBytes { + // // value must be a valid UUID + // optional bytes value = 1 [(buf.validate.field).bytes.uuid = true]; + // } + // + // ``` + Uuid *bool + // -- end of xxx_hidden_WellKnown + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyBytes { + // bytes value = 1 [ + // (buf.validate.field).bytes.example = "\x01\x02", + // (buf.validate.field).bytes.example = "\x02\x03" + // ]; + // } + // + // ``` + Example [][]byte +} + +func (b0 BytesRules_builder) Build() *BytesRules { + m0 := &BytesRules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 12) + x.xxx_hidden_Const = b.Const + } + if b.Len != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 1, 12) + x.xxx_hidden_Len = *b.Len + } + if b.MinLen != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 2, 12) + x.xxx_hidden_MinLen = *b.MinLen + } + if b.MaxLen != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 3, 12) + x.xxx_hidden_MaxLen = *b.MaxLen + } + if b.Pattern != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 4, 12) + x.xxx_hidden_Pattern = b.Pattern + } + if b.Prefix != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 5, 12) + x.xxx_hidden_Prefix = b.Prefix + } + if b.Suffix != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 6, 12) + x.xxx_hidden_Suffix = b.Suffix + } + if b.Contains != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 7, 12) + x.xxx_hidden_Contains = b.Contains + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + if b.Ip != nil { + x.xxx_hidden_WellKnown = &bytesRules_Ip{*b.Ip} + } + if b.Ipv4 != nil { + x.xxx_hidden_WellKnown = &bytesRules_Ipv4{*b.Ipv4} + } + if b.Ipv6 != nil { + x.xxx_hidden_WellKnown = &bytesRules_Ipv6{*b.Ipv6} + } + if b.Uuid != nil { + x.xxx_hidden_WellKnown = &bytesRules_Uuid{*b.Uuid} + } + x.xxx_hidden_Example = b.Example + return m0 +} + +type case_BytesRules_WellKnown protoreflect.FieldNumber + +func (x case_BytesRules_WellKnown) String() string { + md := file_buf_validate_validate_proto_msgTypes[20].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isBytesRules_WellKnown interface { + isBytesRules_WellKnown() +} + +type bytesRules_Ip struct { + // `ip` ensures that the field `value` is a valid IP address (v4 or v6) in byte format. + // If the field value doesn't meet this rule, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must be a valid IP address + // optional bytes value = 1 [(buf.validate.field).bytes.ip = true]; + // } + // + // ``` + Ip bool `protobuf:"varint,10,opt,name=ip,oneof"` +} + +type bytesRules_Ipv4 struct { + // `ipv4` ensures that the field `value` is a valid IPv4 address in byte format. + // If the field value doesn't meet this rule, an error message is generated. + // + // ```proto + // + // message MyBytes { + // // value must be a valid IPv4 address + // optional bytes value = 1 [(buf.validate.field).bytes.ipv4 = true]; + // } + // + // ``` + Ipv4 bool `protobuf:"varint,11,opt,name=ipv4,oneof"` +} + +type bytesRules_Ipv6 struct { + // `ipv6` ensures that the field `value` is a valid IPv6 address in byte format. + // If the field value doesn't meet this rule, an error message is generated. + // ```proto + // + // message MyBytes { + // // value must be a valid IPv6 address + // optional bytes value = 1 [(buf.validate.field).bytes.ipv6 = true]; + // } + // + // ``` + Ipv6 bool `protobuf:"varint,12,opt,name=ipv6,oneof"` +} + +type bytesRules_Uuid struct { + // `uuid` ensures that the field `value` encodes the 128-bit UUID data as + // defined by [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2). + // The field must contain exactly 16 bytes + // representing the UUID. If the field value isn't a valid UUID, an error + // message will be generated. + // + // ```proto + // + // message MyBytes { + // // value must be a valid UUID + // optional bytes value = 1 [(buf.validate.field).bytes.uuid = true]; + // } + // + // ``` + Uuid bool `protobuf:"varint,15,opt,name=uuid,oneof"` +} + +func (*bytesRules_Ip) isBytesRules_WellKnown() {} + +func (*bytesRules_Ipv4) isBytesRules_WellKnown() {} + +func (*bytesRules_Ipv6) isBytesRules_WellKnown() {} + +func (*bytesRules_Uuid) isBytesRules_WellKnown() {} + +// EnumRules describe the rules applied to `enum` values. +type EnumRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const int32 `protobuf:"varint,1,opt,name=const"` + xxx_hidden_DefinedOnly bool `protobuf:"varint,2,opt,name=defined_only,json=definedOnly"` + xxx_hidden_In []int32 `protobuf:"varint,3,rep,name=in"` + xxx_hidden_NotIn []int32 `protobuf:"varint,4,rep,name=not_in,json=notIn"` + xxx_hidden_Example []int32 `protobuf:"varint,5,rep,name=example"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EnumRules) Reset() { + *x = EnumRules{} + mi := &file_buf_validate_validate_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EnumRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumRules) ProtoMessage() {} + +func (x *EnumRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EnumRules) GetConst() int32 { + if x != nil { + return x.xxx_hidden_Const + } + return 0 +} + +func (x *EnumRules) GetDefinedOnly() bool { + if x != nil { + return x.xxx_hidden_DefinedOnly + } + return false +} + +func (x *EnumRules) GetIn() []int32 { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *EnumRules) GetNotIn() []int32 { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *EnumRules) GetExample() []int32 { + if x != nil { + return x.xxx_hidden_Example + } + return nil +} + +func (x *EnumRules) SetConst(v int32) { + x.xxx_hidden_Const = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 5) +} + +func (x *EnumRules) SetDefinedOnly(v bool) { + x.xxx_hidden_DefinedOnly = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 1, 5) +} + +func (x *EnumRules) SetIn(v []int32) { + x.xxx_hidden_In = v +} + +func (x *EnumRules) SetNotIn(v []int32) { + x.xxx_hidden_NotIn = v +} + +func (x *EnumRules) SetExample(v []int32) { + x.xxx_hidden_Example = v +} + +func (x *EnumRules) HasConst() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *EnumRules) HasDefinedOnly() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 1) +} + +func (x *EnumRules) ClearConst() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Const = 0 +} + +func (x *EnumRules) ClearDefinedOnly() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 1) + x.xxx_hidden_DefinedOnly = false +} + +type EnumRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` requires the field value to exactly match the specified enum value. + // If the field value doesn't match, an error message is generated. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // // The field `value` must be exactly MY_ENUM_VALUE1. + // MyEnum value = 1 [(buf.validate.field).enum.const = 1]; + // } + // + // ``` + Const *int32 + // `defined_only` requires the field value to be one of the defined values for + // this enum, failing on any undefined value. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // // The field `value` must be a defined value of MyEnum. + // MyEnum value = 1 [(buf.validate.field).enum.defined_only = true]; + // } + // + // ``` + DefinedOnly *bool + // `in` requires the field value to be equal to one of the + // specified enum values. If the field value doesn't match any of the + // specified values, an error message is generated. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // // The field `value` must be equal to one of the specified values. + // MyEnum value = 1 [(buf.validate.field).enum = { in: [1, 2]}]; + // } + // + // ``` + In []int32 + // `not_in` requires the field value to be not equal to any of the + // specified enum values. If the field value matches one of the specified + // values, an error message is generated. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // // The field `value` must not be equal to any of the specified values. + // MyEnum value = 1 [(buf.validate.field).enum = { not_in: [1, 2]}]; + // } + // + // ``` + NotIn []int32 + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // enum MyEnum { + // MY_ENUM_UNSPECIFIED = 0; + // MY_ENUM_VALUE1 = 1; + // MY_ENUM_VALUE2 = 2; + // } + // + // message MyMessage { + // (buf.validate.field).enum.example = 1, + // (buf.validate.field).enum.example = 2 + // } + // + // ``` + Example []int32 +} + +func (b0 EnumRules_builder) Build() *EnumRules { + m0 := &EnumRules{} + b, x := &b0, m0 + _, _ = b, x + if b.Const != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 5) + x.xxx_hidden_Const = *b.Const + } + if b.DefinedOnly != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 1, 5) + x.xxx_hidden_DefinedOnly = *b.DefinedOnly + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + x.xxx_hidden_Example = b.Example + return m0 +} + +// RepeatedRules describe the rules applied to `repeated` values. +type RepeatedRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_MinItems uint64 `protobuf:"varint,1,opt,name=min_items,json=minItems"` + xxx_hidden_MaxItems uint64 `protobuf:"varint,2,opt,name=max_items,json=maxItems"` + xxx_hidden_Unique bool `protobuf:"varint,3,opt,name=unique"` + xxx_hidden_Items *FieldRules `protobuf:"bytes,4,opt,name=items"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RepeatedRules) Reset() { + *x = RepeatedRules{} + mi := &file_buf_validate_validate_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RepeatedRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RepeatedRules) ProtoMessage() {} + +func (x *RepeatedRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RepeatedRules) GetMinItems() uint64 { + if x != nil { + return x.xxx_hidden_MinItems + } + return 0 +} + +func (x *RepeatedRules) GetMaxItems() uint64 { + if x != nil { + return x.xxx_hidden_MaxItems + } + return 0 +} + +func (x *RepeatedRules) GetUnique() bool { + if x != nil { + return x.xxx_hidden_Unique + } + return false +} + +func (x *RepeatedRules) GetItems() *FieldRules { + if x != nil { + return x.xxx_hidden_Items + } + return nil +} + +func (x *RepeatedRules) SetMinItems(v uint64) { + x.xxx_hidden_MinItems = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 4) +} + +func (x *RepeatedRules) SetMaxItems(v uint64) { + x.xxx_hidden_MaxItems = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 1, 4) +} + +func (x *RepeatedRules) SetUnique(v bool) { + x.xxx_hidden_Unique = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 2, 4) +} + +func (x *RepeatedRules) SetItems(v *FieldRules) { + x.xxx_hidden_Items = v +} + +func (x *RepeatedRules) HasMinItems() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *RepeatedRules) HasMaxItems() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 1) +} + +func (x *RepeatedRules) HasUnique() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 2) +} + +func (x *RepeatedRules) HasItems() bool { + if x == nil { + return false + } + return x.xxx_hidden_Items != nil +} + +func (x *RepeatedRules) ClearMinItems() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_MinItems = 0 +} + +func (x *RepeatedRules) ClearMaxItems() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 1) + x.xxx_hidden_MaxItems = 0 +} + +func (x *RepeatedRules) ClearUnique() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 2) + x.xxx_hidden_Unique = false +} + +func (x *RepeatedRules) ClearItems() { + x.xxx_hidden_Items = nil +} + +type RepeatedRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `min_items` requires that this field must contain at least the specified + // minimum number of items. + // + // Note that `min_items = 1` is equivalent to setting a field as `required`. + // + // ```proto + // + // message MyRepeated { + // // value must contain at least 2 items + // repeated string value = 1 [(buf.validate.field).repeated.min_items = 2]; + // } + // + // ``` + MinItems *uint64 + // `max_items` denotes that this field must not exceed a + // certain number of items as the upper limit. If the field contains more + // items than specified, an error message will be generated, requiring the + // field to maintain no more than the specified number of items. + // + // ```proto + // + // message MyRepeated { + // // value must contain no more than 3 item(s) + // repeated string value = 1 [(buf.validate.field).repeated.max_items = 3]; + // } + // + // ``` + MaxItems *uint64 + // `unique` indicates that all elements in this field must + // be unique. This rule is strictly applicable to scalar and enum + // types, with message types not being supported. + // + // ```proto + // + // message MyRepeated { + // // repeated value must contain unique items + // repeated string value = 1 [(buf.validate.field).repeated.unique = true]; + // } + // + // ``` + Unique *bool + // `items` details the rules to be applied to each item + // in the field. Even for repeated message fields, validation is executed + // against each item unless `ignore` is specified. + // + // ```proto + // + // message MyRepeated { + // // The items in the field `value` must follow the specified rules. + // repeated string value = 1 [(buf.validate.field).repeated.items = { + // string: { + // min_len: 3 + // max_len: 10 + // } + // }]; + // } + // + // ``` + // + // Note that the `required` rule does not apply. Repeated items + // cannot be unset. + Items *FieldRules +} + +func (b0 RepeatedRules_builder) Build() *RepeatedRules { + m0 := &RepeatedRules{} + b, x := &b0, m0 + _, _ = b, x + if b.MinItems != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 4) + x.xxx_hidden_MinItems = *b.MinItems + } + if b.MaxItems != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 1, 4) + x.xxx_hidden_MaxItems = *b.MaxItems + } + if b.Unique != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 2, 4) + x.xxx_hidden_Unique = *b.Unique + } + x.xxx_hidden_Items = b.Items + return m0 +} + +// MapRules describe the rules applied to `map` values. +type MapRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_MinPairs uint64 `protobuf:"varint,1,opt,name=min_pairs,json=minPairs"` + xxx_hidden_MaxPairs uint64 `protobuf:"varint,2,opt,name=max_pairs,json=maxPairs"` + xxx_hidden_Keys *FieldRules `protobuf:"bytes,4,opt,name=keys"` + xxx_hidden_Values *FieldRules `protobuf:"bytes,5,opt,name=values"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MapRules) Reset() { + *x = MapRules{} + mi := &file_buf_validate_validate_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MapRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapRules) ProtoMessage() {} + +func (x *MapRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[23] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *MapRules) GetMinPairs() uint64 { + if x != nil { + return x.xxx_hidden_MinPairs + } + return 0 +} + +func (x *MapRules) GetMaxPairs() uint64 { + if x != nil { + return x.xxx_hidden_MaxPairs + } + return 0 +} + +func (x *MapRules) GetKeys() *FieldRules { + if x != nil { + return x.xxx_hidden_Keys + } + return nil +} + +func (x *MapRules) GetValues() *FieldRules { + if x != nil { + return x.xxx_hidden_Values + } + return nil +} + +func (x *MapRules) SetMinPairs(v uint64) { + x.xxx_hidden_MinPairs = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 4) +} + +func (x *MapRules) SetMaxPairs(v uint64) { + x.xxx_hidden_MaxPairs = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 1, 4) +} + +func (x *MapRules) SetKeys(v *FieldRules) { + x.xxx_hidden_Keys = v +} + +func (x *MapRules) SetValues(v *FieldRules) { + x.xxx_hidden_Values = v +} + +func (x *MapRules) HasMinPairs() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *MapRules) HasMaxPairs() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 1) +} + +func (x *MapRules) HasKeys() bool { + if x == nil { + return false + } + return x.xxx_hidden_Keys != nil +} + +func (x *MapRules) HasValues() bool { + if x == nil { + return false + } + return x.xxx_hidden_Values != nil +} + +func (x *MapRules) ClearMinPairs() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_MinPairs = 0 +} + +func (x *MapRules) ClearMaxPairs() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 1) + x.xxx_hidden_MaxPairs = 0 +} + +func (x *MapRules) ClearKeys() { + x.xxx_hidden_Keys = nil +} + +func (x *MapRules) ClearValues() { + x.xxx_hidden_Values = nil +} + +type MapRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Specifies the minimum number of key-value pairs allowed. If the field has + // fewer key-value pairs than specified, an error message is generated. + // + // ```proto + // + // message MyMap { + // // The field `value` must have at least 2 key-value pairs. + // map value = 1 [(buf.validate.field).map.min_pairs = 2]; + // } + // + // ``` + MinPairs *uint64 + // Specifies the maximum number of key-value pairs allowed. If the field has + // more key-value pairs than specified, an error message is generated. + // + // ```proto + // + // message MyMap { + // // The field `value` must have at most 3 key-value pairs. + // map value = 1 [(buf.validate.field).map.max_pairs = 3]; + // } + // + // ``` + MaxPairs *uint64 + // Specifies the rules to be applied to each key in the field. + // + // ```proto + // + // message MyMap { + // // The keys in the field `value` must follow the specified rules. + // map value = 1 [(buf.validate.field).map.keys = { + // string: { + // min_len: 3 + // max_len: 10 + // } + // }]; + // } + // + // ``` + // + // Note that the `required` rule does not apply. Map keys cannot be unset. + Keys *FieldRules + // Specifies the rules to be applied to the value of each key in the + // field. Message values will still have their validations evaluated unless + // `ignore` is specified. + // + // ```proto + // + // message MyMap { + // // The values in the field `value` must follow the specified rules. + // map value = 1 [(buf.validate.field).map.values = { + // string: { + // min_len: 5 + // max_len: 20 + // } + // }]; + // } + // + // ``` + // Note that the `required` rule does not apply. Map values cannot be unset. + Values *FieldRules +} + +func (b0 MapRules_builder) Build() *MapRules { + m0 := &MapRules{} + b, x := &b0, m0 + _, _ = b, x + if b.MinPairs != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 4) + x.xxx_hidden_MinPairs = *b.MinPairs + } + if b.MaxPairs != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 1, 4) + x.xxx_hidden_MaxPairs = *b.MaxPairs + } + x.xxx_hidden_Keys = b.Keys + x.xxx_hidden_Values = b.Values + return m0 +} + +// AnyRules describe rules applied exclusively to the `google.protobuf.Any` well-known type. +type AnyRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_In []string `protobuf:"bytes,2,rep,name=in"` + xxx_hidden_NotIn []string `protobuf:"bytes,3,rep,name=not_in,json=notIn"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AnyRules) Reset() { + *x = AnyRules{} + mi := &file_buf_validate_validate_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AnyRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnyRules) ProtoMessage() {} + +func (x *AnyRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *AnyRules) GetIn() []string { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *AnyRules) GetNotIn() []string { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *AnyRules) SetIn(v []string) { + x.xxx_hidden_In = v +} + +func (x *AnyRules) SetNotIn(v []string) { + x.xxx_hidden_NotIn = v +} + +type AnyRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `in` requires the field's `type_url` to be equal to one of the + // specified values. If it doesn't match any of the specified values, an error + // message is generated. + // + // ```proto + // + // message MyAny { + // // The `value` field must have a `type_url` equal to one of the specified values. + // google.protobuf.Any value = 1 [(buf.validate.field).any = { + // in: ["type.googleapis.com/MyType1", "type.googleapis.com/MyType2"] + // }]; + // } + // + // ``` + In []string + // requires the field's type_url to be not equal to any of the specified values. If it matches any of the specified values, an error message is generated. + // + // ```proto + // + // message MyAny { + // // The `value` field must not have a `type_url` equal to any of the specified values. + // google.protobuf.Any value = 1 [(buf.validate.field).any = { + // not_in: ["type.googleapis.com/ForbiddenType1", "type.googleapis.com/ForbiddenType2"] + // }]; + // } + // + // ``` + NotIn []string +} + +func (b0 AnyRules_builder) Build() *AnyRules { + m0 := &AnyRules{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + return m0 +} + +// DurationRules describe the rules applied exclusively to the `google.protobuf.Duration` well-known type. +type DurationRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const *durationpb.Duration `protobuf:"bytes,2,opt,name=const"` + xxx_hidden_LessThan isDurationRules_LessThan `protobuf_oneof:"less_than"` + xxx_hidden_GreaterThan isDurationRules_GreaterThan `protobuf_oneof:"greater_than"` + xxx_hidden_In *[]*durationpb.Duration `protobuf:"bytes,7,rep,name=in"` + xxx_hidden_NotIn *[]*durationpb.Duration `protobuf:"bytes,8,rep,name=not_in,json=notIn"` + xxx_hidden_Example *[]*durationpb.Duration `protobuf:"bytes,9,rep,name=example"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DurationRules) Reset() { + *x = DurationRules{} + mi := &file_buf_validate_validate_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DurationRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurationRules) ProtoMessage() {} + +func (x *DurationRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *DurationRules) GetConst() *durationpb.Duration { + if x != nil { + return x.xxx_hidden_Const + } + return nil +} + +func (x *DurationRules) GetLt() *durationpb.Duration { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*durationRules_Lt); ok { + return x.Lt + } + } + return nil +} + +func (x *DurationRules) GetLte() *durationpb.Duration { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*durationRules_Lte); ok { + return x.Lte + } + } + return nil +} + +func (x *DurationRules) GetGt() *durationpb.Duration { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*durationRules_Gt); ok { + return x.Gt + } + } + return nil +} + +func (x *DurationRules) GetGte() *durationpb.Duration { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*durationRules_Gte); ok { + return x.Gte + } + } + return nil +} + +func (x *DurationRules) GetIn() []*durationpb.Duration { + if x != nil { + if x.xxx_hidden_In != nil { + return *x.xxx_hidden_In + } + } + return nil +} + +func (x *DurationRules) GetNotIn() []*durationpb.Duration { + if x != nil { + if x.xxx_hidden_NotIn != nil { + return *x.xxx_hidden_NotIn + } + } + return nil +} + +func (x *DurationRules) GetExample() []*durationpb.Duration { + if x != nil { + if x.xxx_hidden_Example != nil { + return *x.xxx_hidden_Example + } + } + return nil +} + +func (x *DurationRules) SetConst(v *durationpb.Duration) { + x.xxx_hidden_Const = v +} + +func (x *DurationRules) SetLt(v *durationpb.Duration) { + if v == nil { + x.xxx_hidden_LessThan = nil + return + } + x.xxx_hidden_LessThan = &durationRules_Lt{v} +} + +func (x *DurationRules) SetLte(v *durationpb.Duration) { + if v == nil { + x.xxx_hidden_LessThan = nil + return + } + x.xxx_hidden_LessThan = &durationRules_Lte{v} +} + +func (x *DurationRules) SetGt(v *durationpb.Duration) { + if v == nil { + x.xxx_hidden_GreaterThan = nil + return + } + x.xxx_hidden_GreaterThan = &durationRules_Gt{v} +} + +func (x *DurationRules) SetGte(v *durationpb.Duration) { + if v == nil { + x.xxx_hidden_GreaterThan = nil + return + } + x.xxx_hidden_GreaterThan = &durationRules_Gte{v} +} + +func (x *DurationRules) SetIn(v []*durationpb.Duration) { + x.xxx_hidden_In = &v +} + +func (x *DurationRules) SetNotIn(v []*durationpb.Duration) { + x.xxx_hidden_NotIn = &v +} + +func (x *DurationRules) SetExample(v []*durationpb.Duration) { + x.xxx_hidden_Example = &v +} + +func (x *DurationRules) HasConst() bool { + if x == nil { + return false + } + return x.xxx_hidden_Const != nil +} + +func (x *DurationRules) HasLessThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_LessThan != nil +} + +func (x *DurationRules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*durationRules_Lt) + return ok +} + +func (x *DurationRules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*durationRules_Lte) + return ok +} + +func (x *DurationRules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_GreaterThan != nil +} + +func (x *DurationRules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*durationRules_Gt) + return ok +} + +func (x *DurationRules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*durationRules_Gte) + return ok +} + +func (x *DurationRules) ClearConst() { + x.xxx_hidden_Const = nil +} + +func (x *DurationRules) ClearLessThan() { + x.xxx_hidden_LessThan = nil +} + +func (x *DurationRules) ClearLt() { + if _, ok := x.xxx_hidden_LessThan.(*durationRules_Lt); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *DurationRules) ClearLte() { + if _, ok := x.xxx_hidden_LessThan.(*durationRules_Lte); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *DurationRules) ClearGreaterThan() { + x.xxx_hidden_GreaterThan = nil +} + +func (x *DurationRules) ClearGt() { + if _, ok := x.xxx_hidden_GreaterThan.(*durationRules_Gt); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *DurationRules) ClearGte() { + if _, ok := x.xxx_hidden_GreaterThan.(*durationRules_Gte); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +const DurationRules_LessThan_not_set_case case_DurationRules_LessThan = 0 +const DurationRules_Lt_case case_DurationRules_LessThan = 3 +const DurationRules_Lte_case case_DurationRules_LessThan = 4 + +func (x *DurationRules) WhichLessThan() case_DurationRules_LessThan { + if x == nil { + return DurationRules_LessThan_not_set_case + } + switch x.xxx_hidden_LessThan.(type) { + case *durationRules_Lt: + return DurationRules_Lt_case + case *durationRules_Lte: + return DurationRules_Lte_case + default: + return DurationRules_LessThan_not_set_case + } +} + +const DurationRules_GreaterThan_not_set_case case_DurationRules_GreaterThan = 0 +const DurationRules_Gt_case case_DurationRules_GreaterThan = 5 +const DurationRules_Gte_case case_DurationRules_GreaterThan = 6 + +func (x *DurationRules) WhichGreaterThan() case_DurationRules_GreaterThan { + if x == nil { + return DurationRules_GreaterThan_not_set_case + } + switch x.xxx_hidden_GreaterThan.(type) { + case *durationRules_Gt: + return DurationRules_Gt_case + case *durationRules_Gte: + return DurationRules_Gte_case + default: + return DurationRules_GreaterThan_not_set_case + } +} + +type DurationRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` dictates that the field must match the specified value of the `google.protobuf.Duration` type exactly. + // If the field's value deviates from the specified value, an error message + // will be generated. + // + // ```proto + // + // message MyDuration { + // // value must equal 5s + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.const = "5s"]; + // } + // + // ``` + Const *durationpb.Duration + // Fields of oneof xxx_hidden_LessThan: + // `lt` stipulates that the field must be less than the specified value of the `google.protobuf.Duration` type, + // exclusive. If the field's value is greater than or equal to the specified + // value, an error message will be generated. + // + // ```proto + // + // message MyDuration { + // // value must be less than 5s + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.lt = "5s"]; + // } + // + // ``` + Lt *durationpb.Duration + // `lte` indicates that the field must be less than or equal to the specified + // value of the `google.protobuf.Duration` type, inclusive. If the field's value is greater than the specified value, + // an error message will be generated. + // + // ```proto + // + // message MyDuration { + // // value must be less than or equal to 10s + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.lte = "10s"]; + // } + // + // ``` + Lte *durationpb.Duration + // -- end of xxx_hidden_LessThan + // Fields of oneof xxx_hidden_GreaterThan: + // `gt` requires the duration field value to be greater than the specified + // value (exclusive). If the value of `gt` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyDuration { + // // duration must be greater than 5s [duration.gt] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.gt = { seconds: 5 }]; + // + // // duration must be greater than 5s and less than 10s [duration.gt_lt] + // google.protobuf.Duration another_value = 2 [(buf.validate.field).duration = { gt: { seconds: 5 }, lt: { seconds: 10 } }]; + // + // // duration must be greater than 10s or less than 5s [duration.gt_lt_exclusive] + // google.protobuf.Duration other_value = 3 [(buf.validate.field).duration = { gt: { seconds: 10 }, lt: { seconds: 5 } }]; + // } + // + // ``` + Gt *durationpb.Duration + // `gte` requires the duration field value to be greater than or equal to the + // specified value (exclusive). If the value of `gte` is larger than a + // specified `lt` or `lte`, the range is reversed, and the field value must + // be outside the specified range. If the field value doesn't meet the + // required conditions, an error message is generated. + // + // ```proto + // + // message MyDuration { + // // duration must be greater than or equal to 5s [duration.gte] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.gte = { seconds: 5 }]; + // + // // duration must be greater than or equal to 5s and less than 10s [duration.gte_lt] + // google.protobuf.Duration another_value = 2 [(buf.validate.field).duration = { gte: { seconds: 5 }, lt: { seconds: 10 } }]; + // + // // duration must be greater than or equal to 10s or less than 5s [duration.gte_lt_exclusive] + // google.protobuf.Duration other_value = 3 [(buf.validate.field).duration = { gte: { seconds: 10 }, lt: { seconds: 5 } }]; + // } + // + // ``` + Gte *durationpb.Duration + // -- end of xxx_hidden_GreaterThan + // `in` asserts that the field must be equal to one of the specified values of the `google.protobuf.Duration` type. + // If the field's value doesn't correspond to any of the specified values, + // an error message will be generated. + // + // ```proto + // + // message MyDuration { + // // value must be in list [1s, 2s, 3s] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.in = ["1s", "2s", "3s"]]; + // } + // + // ``` + In []*durationpb.Duration + // `not_in` denotes that the field must not be equal to + // any of the specified values of the `google.protobuf.Duration` type. + // If the field's value matches any of these values, an error message will be + // generated. + // + // ```proto + // + // message MyDuration { + // // value must not be in list [1s, 2s, 3s] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.not_in = ["1s", "2s", "3s"]]; + // } + // + // ``` + NotIn []*durationpb.Duration + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyDuration { + // google.protobuf.Duration value = 1 [ + // (buf.validate.field).duration.example = { seconds: 1 }, + // (buf.validate.field).duration.example = { seconds: 2 }, + // ]; + // } + // + // ``` + Example []*durationpb.Duration +} + +func (b0 DurationRules_builder) Build() *DurationRules { + m0 := &DurationRules{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Const = b.Const + if b.Lt != nil { + x.xxx_hidden_LessThan = &durationRules_Lt{b.Lt} + } + if b.Lte != nil { + x.xxx_hidden_LessThan = &durationRules_Lte{b.Lte} + } + if b.Gt != nil { + x.xxx_hidden_GreaterThan = &durationRules_Gt{b.Gt} + } + if b.Gte != nil { + x.xxx_hidden_GreaterThan = &durationRules_Gte{b.Gte} + } + x.xxx_hidden_In = &b.In + x.xxx_hidden_NotIn = &b.NotIn + x.xxx_hidden_Example = &b.Example + return m0 +} + +type case_DurationRules_LessThan protoreflect.FieldNumber + +func (x case_DurationRules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[25].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_DurationRules_GreaterThan protoreflect.FieldNumber + +func (x case_DurationRules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[25].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isDurationRules_LessThan interface { + isDurationRules_LessThan() +} + +type durationRules_Lt struct { + // `lt` stipulates that the field must be less than the specified value of the `google.protobuf.Duration` type, + // exclusive. If the field's value is greater than or equal to the specified + // value, an error message will be generated. + // + // ```proto + // + // message MyDuration { + // // value must be less than 5s + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.lt = "5s"]; + // } + // + // ``` + Lt *durationpb.Duration `protobuf:"bytes,3,opt,name=lt,oneof"` +} + +type durationRules_Lte struct { + // `lte` indicates that the field must be less than or equal to the specified + // value of the `google.protobuf.Duration` type, inclusive. If the field's value is greater than the specified value, + // an error message will be generated. + // + // ```proto + // + // message MyDuration { + // // value must be less than or equal to 10s + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.lte = "10s"]; + // } + // + // ``` + Lte *durationpb.Duration `protobuf:"bytes,4,opt,name=lte,oneof"` +} + +func (*durationRules_Lt) isDurationRules_LessThan() {} + +func (*durationRules_Lte) isDurationRules_LessThan() {} + +type isDurationRules_GreaterThan interface { + isDurationRules_GreaterThan() +} + +type durationRules_Gt struct { + // `gt` requires the duration field value to be greater than the specified + // value (exclusive). If the value of `gt` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyDuration { + // // duration must be greater than 5s [duration.gt] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.gt = { seconds: 5 }]; + // + // // duration must be greater than 5s and less than 10s [duration.gt_lt] + // google.protobuf.Duration another_value = 2 [(buf.validate.field).duration = { gt: { seconds: 5 }, lt: { seconds: 10 } }]; + // + // // duration must be greater than 10s or less than 5s [duration.gt_lt_exclusive] + // google.protobuf.Duration other_value = 3 [(buf.validate.field).duration = { gt: { seconds: 10 }, lt: { seconds: 5 } }]; + // } + // + // ``` + Gt *durationpb.Duration `protobuf:"bytes,5,opt,name=gt,oneof"` +} + +type durationRules_Gte struct { + // `gte` requires the duration field value to be greater than or equal to the + // specified value (exclusive). If the value of `gte` is larger than a + // specified `lt` or `lte`, the range is reversed, and the field value must + // be outside the specified range. If the field value doesn't meet the + // required conditions, an error message is generated. + // + // ```proto + // + // message MyDuration { + // // duration must be greater than or equal to 5s [duration.gte] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.gte = { seconds: 5 }]; + // + // // duration must be greater than or equal to 5s and less than 10s [duration.gte_lt] + // google.protobuf.Duration another_value = 2 [(buf.validate.field).duration = { gte: { seconds: 5 }, lt: { seconds: 10 } }]; + // + // // duration must be greater than or equal to 10s or less than 5s [duration.gte_lt_exclusive] + // google.protobuf.Duration other_value = 3 [(buf.validate.field).duration = { gte: { seconds: 10 }, lt: { seconds: 5 } }]; + // } + // + // ``` + Gte *durationpb.Duration `protobuf:"bytes,6,opt,name=gte,oneof"` +} + +func (*durationRules_Gt) isDurationRules_GreaterThan() {} + +func (*durationRules_Gte) isDurationRules_GreaterThan() {} + +// FieldMaskRules describe rules applied exclusively to the `google.protobuf.FieldMask` well-known type. +type FieldMaskRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const *fieldmaskpb.FieldMask `protobuf:"bytes,1,opt,name=const"` + xxx_hidden_In []string `protobuf:"bytes,2,rep,name=in"` + xxx_hidden_NotIn []string `protobuf:"bytes,3,rep,name=not_in,json=notIn"` + xxx_hidden_Example *[]*fieldmaskpb.FieldMask `protobuf:"bytes,4,rep,name=example"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FieldMaskRules) Reset() { + *x = FieldMaskRules{} + mi := &file_buf_validate_validate_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FieldMaskRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldMaskRules) ProtoMessage() {} + +func (x *FieldMaskRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *FieldMaskRules) GetConst() *fieldmaskpb.FieldMask { + if x != nil { + return x.xxx_hidden_Const + } + return nil +} + +func (x *FieldMaskRules) GetIn() []string { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *FieldMaskRules) GetNotIn() []string { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *FieldMaskRules) GetExample() []*fieldmaskpb.FieldMask { + if x != nil { + if x.xxx_hidden_Example != nil { + return *x.xxx_hidden_Example + } + } + return nil +} + +func (x *FieldMaskRules) SetConst(v *fieldmaskpb.FieldMask) { + x.xxx_hidden_Const = v +} + +func (x *FieldMaskRules) SetIn(v []string) { + x.xxx_hidden_In = v +} + +func (x *FieldMaskRules) SetNotIn(v []string) { + x.xxx_hidden_NotIn = v +} + +func (x *FieldMaskRules) SetExample(v []*fieldmaskpb.FieldMask) { + x.xxx_hidden_Example = &v +} + +func (x *FieldMaskRules) HasConst() bool { + if x == nil { + return false + } + return x.xxx_hidden_Const != nil +} + +func (x *FieldMaskRules) ClearConst() { + x.xxx_hidden_Const = nil +} + +type FieldMaskRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` dictates that the field must match the specified value of the `google.protobuf.FieldMask` type exactly. + // If the field's value deviates from the specified value, an error message + // will be generated. + // + // ```proto + // + // message MyFieldMask { + // // value must equal ["a"] + // google.protobuf.FieldMask value = 1 [(buf.validate.field).field_mask.const = { + // paths: ["a"] + // }]; + // } + // + // ``` + Const *fieldmaskpb.FieldMask + // `in` requires the field value to only contain paths matching specified + // values or their subpaths. + // If any of the field value's paths doesn't match the rule, + // an error message is generated. + // See: https://protobuf.dev/reference/protobuf/google.protobuf/#field-mask + // + // ```proto + // + // message MyFieldMask { + // // The `value` FieldMask must only contain paths listed in `in`. + // google.protobuf.FieldMask value = 1 [(buf.validate.field).field_mask = { + // in: ["a", "b", "c.a"] + // }]; + // } + // + // ``` + In []string + // `not_in` requires the field value to not contain paths matching specified + // values or their subpaths. + // If any of the field value's paths matches the rule, + // an error message is generated. + // See: https://protobuf.dev/reference/protobuf/google.protobuf/#field-mask + // + // ```proto + // + // message MyFieldMask { + // // The `value` FieldMask shall not contain paths listed in `not_in`. + // google.protobuf.FieldMask value = 1 [(buf.validate.field).field_mask = { + // not_in: ["forbidden", "immutable", "c.a"] + // }]; + // } + // + // ``` + NotIn []string + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyFieldMask { + // google.protobuf.FieldMask value = 1 [ + // (buf.validate.field).field_mask.example = { paths: ["a", "b"] }, + // (buf.validate.field).field_mask.example = { paths: ["c.a", "d"] }, + // ]; + // } + // + // ``` + Example []*fieldmaskpb.FieldMask +} + +func (b0 FieldMaskRules_builder) Build() *FieldMaskRules { + m0 := &FieldMaskRules{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Const = b.Const + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + x.xxx_hidden_Example = &b.Example + return m0 +} + +// TimestampRules describe the rules applied exclusively to the `google.protobuf.Timestamp` well-known type. +type TimestampRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Const *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=const"` + xxx_hidden_LessThan isTimestampRules_LessThan `protobuf_oneof:"less_than"` + xxx_hidden_GreaterThan isTimestampRules_GreaterThan `protobuf_oneof:"greater_than"` + xxx_hidden_Within *durationpb.Duration `protobuf:"bytes,9,opt,name=within"` + xxx_hidden_Example *[]*timestamppb.Timestamp `protobuf:"bytes,10,rep,name=example"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TimestampRules) Reset() { + *x = TimestampRules{} + mi := &file_buf_validate_validate_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TimestampRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimestampRules) ProtoMessage() {} + +func (x *TimestampRules) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TimestampRules) GetConst() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_Const + } + return nil +} + +func (x *TimestampRules) GetLt() *timestamppb.Timestamp { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*timestampRules_Lt); ok { + return x.Lt + } + } + return nil +} + +func (x *TimestampRules) GetLte() *timestamppb.Timestamp { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*timestampRules_Lte); ok { + return x.Lte + } + } + return nil +} + +func (x *TimestampRules) GetLtNow() bool { + if x != nil { + if x, ok := x.xxx_hidden_LessThan.(*timestampRules_LtNow); ok { + return x.LtNow + } + } + return false +} + +func (x *TimestampRules) GetGt() *timestamppb.Timestamp { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*timestampRules_Gt); ok { + return x.Gt + } + } + return nil +} + +func (x *TimestampRules) GetGte() *timestamppb.Timestamp { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*timestampRules_Gte); ok { + return x.Gte + } + } + return nil +} + +func (x *TimestampRules) GetGtNow() bool { + if x != nil { + if x, ok := x.xxx_hidden_GreaterThan.(*timestampRules_GtNow); ok { + return x.GtNow + } + } + return false +} + +func (x *TimestampRules) GetWithin() *durationpb.Duration { + if x != nil { + return x.xxx_hidden_Within + } + return nil +} + +func (x *TimestampRules) GetExample() []*timestamppb.Timestamp { + if x != nil { + if x.xxx_hidden_Example != nil { + return *x.xxx_hidden_Example + } + } + return nil +} + +func (x *TimestampRules) SetConst(v *timestamppb.Timestamp) { + x.xxx_hidden_Const = v +} + +func (x *TimestampRules) SetLt(v *timestamppb.Timestamp) { + if v == nil { + x.xxx_hidden_LessThan = nil + return + } + x.xxx_hidden_LessThan = ×tampRules_Lt{v} +} + +func (x *TimestampRules) SetLte(v *timestamppb.Timestamp) { + if v == nil { + x.xxx_hidden_LessThan = nil + return + } + x.xxx_hidden_LessThan = ×tampRules_Lte{v} +} + +func (x *TimestampRules) SetLtNow(v bool) { + x.xxx_hidden_LessThan = ×tampRules_LtNow{v} +} + +func (x *TimestampRules) SetGt(v *timestamppb.Timestamp) { + if v == nil { + x.xxx_hidden_GreaterThan = nil + return + } + x.xxx_hidden_GreaterThan = ×tampRules_Gt{v} +} + +func (x *TimestampRules) SetGte(v *timestamppb.Timestamp) { + if v == nil { + x.xxx_hidden_GreaterThan = nil + return + } + x.xxx_hidden_GreaterThan = ×tampRules_Gte{v} +} + +func (x *TimestampRules) SetGtNow(v bool) { + x.xxx_hidden_GreaterThan = ×tampRules_GtNow{v} +} + +func (x *TimestampRules) SetWithin(v *durationpb.Duration) { + x.xxx_hidden_Within = v +} + +func (x *TimestampRules) SetExample(v []*timestamppb.Timestamp) { + x.xxx_hidden_Example = &v +} + +func (x *TimestampRules) HasConst() bool { + if x == nil { + return false + } + return x.xxx_hidden_Const != nil +} + +func (x *TimestampRules) HasLessThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_LessThan != nil +} + +func (x *TimestampRules) HasLt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*timestampRules_Lt) + return ok +} + +func (x *TimestampRules) HasLte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*timestampRules_Lte) + return ok +} + +func (x *TimestampRules) HasLtNow() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_LessThan.(*timestampRules_LtNow) + return ok +} + +func (x *TimestampRules) HasGreaterThan() bool { + if x == nil { + return false + } + return x.xxx_hidden_GreaterThan != nil +} + +func (x *TimestampRules) HasGt() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*timestampRules_Gt) + return ok +} + +func (x *TimestampRules) HasGte() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*timestampRules_Gte) + return ok +} + +func (x *TimestampRules) HasGtNow() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_GreaterThan.(*timestampRules_GtNow) + return ok +} + +func (x *TimestampRules) HasWithin() bool { + if x == nil { + return false + } + return x.xxx_hidden_Within != nil +} + +func (x *TimestampRules) ClearConst() { + x.xxx_hidden_Const = nil +} + +func (x *TimestampRules) ClearLessThan() { + x.xxx_hidden_LessThan = nil +} + +func (x *TimestampRules) ClearLt() { + if _, ok := x.xxx_hidden_LessThan.(*timestampRules_Lt); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *TimestampRules) ClearLte() { + if _, ok := x.xxx_hidden_LessThan.(*timestampRules_Lte); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *TimestampRules) ClearLtNow() { + if _, ok := x.xxx_hidden_LessThan.(*timestampRules_LtNow); ok { + x.xxx_hidden_LessThan = nil + } +} + +func (x *TimestampRules) ClearGreaterThan() { + x.xxx_hidden_GreaterThan = nil +} + +func (x *TimestampRules) ClearGt() { + if _, ok := x.xxx_hidden_GreaterThan.(*timestampRules_Gt); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *TimestampRules) ClearGte() { + if _, ok := x.xxx_hidden_GreaterThan.(*timestampRules_Gte); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *TimestampRules) ClearGtNow() { + if _, ok := x.xxx_hidden_GreaterThan.(*timestampRules_GtNow); ok { + x.xxx_hidden_GreaterThan = nil + } +} + +func (x *TimestampRules) ClearWithin() { + x.xxx_hidden_Within = nil +} + +const TimestampRules_LessThan_not_set_case case_TimestampRules_LessThan = 0 +const TimestampRules_Lt_case case_TimestampRules_LessThan = 3 +const TimestampRules_Lte_case case_TimestampRules_LessThan = 4 +const TimestampRules_LtNow_case case_TimestampRules_LessThan = 7 + +func (x *TimestampRules) WhichLessThan() case_TimestampRules_LessThan { + if x == nil { + return TimestampRules_LessThan_not_set_case + } + switch x.xxx_hidden_LessThan.(type) { + case *timestampRules_Lt: + return TimestampRules_Lt_case + case *timestampRules_Lte: + return TimestampRules_Lte_case + case *timestampRules_LtNow: + return TimestampRules_LtNow_case + default: + return TimestampRules_LessThan_not_set_case + } +} + +const TimestampRules_GreaterThan_not_set_case case_TimestampRules_GreaterThan = 0 +const TimestampRules_Gt_case case_TimestampRules_GreaterThan = 5 +const TimestampRules_Gte_case case_TimestampRules_GreaterThan = 6 +const TimestampRules_GtNow_case case_TimestampRules_GreaterThan = 8 + +func (x *TimestampRules) WhichGreaterThan() case_TimestampRules_GreaterThan { + if x == nil { + return TimestampRules_GreaterThan_not_set_case + } + switch x.xxx_hidden_GreaterThan.(type) { + case *timestampRules_Gt: + return TimestampRules_Gt_case + case *timestampRules_Gte: + return TimestampRules_Gte_case + case *timestampRules_GtNow: + return TimestampRules_GtNow_case + default: + return TimestampRules_GreaterThan_not_set_case + } +} + +type TimestampRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `const` dictates that this field, of the `google.protobuf.Timestamp` type, must exactly match the specified value. If the field value doesn't correspond to the specified timestamp, an error message will be generated. + // + // ```proto + // + // message MyTimestamp { + // // value must equal 2023-05-03T10:00:00Z + // google.protobuf.Timestamp created_at = 1 [(buf.validate.field).timestamp.const = {seconds: 1727998800}]; + // } + // + // ``` + Const *timestamppb.Timestamp + // Fields of oneof xxx_hidden_LessThan: + // requires the duration field value to be less than the specified value (field < value). If the field value doesn't meet the required conditions, an error message is generated. + // + // ```proto + // + // message MyDuration { + // // duration must be less than 'P3D' [duration.lt] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.lt = { seconds: 259200 }]; + // } + // + // ``` + Lt *timestamppb.Timestamp + // requires the timestamp field value to be less than or equal to the specified value (field <= value). If the field value doesn't meet the required conditions, an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // timestamp must be less than or equal to '2023-05-14T00:00:00Z' [timestamp.lte] + // google.protobuf.Timestamp value = 1 [(buf.validate.field).timestamp.lte = { seconds: 1678867200 }]; + // } + // + // ``` + Lte *timestamppb.Timestamp + // `lt_now` specifies that this field, of the `google.protobuf.Timestamp` type, must be less than the current time. `lt_now` can only be used with the `within` rule. + // + // ```proto + // + // message MyTimestamp { + // // value must be less than now + // google.protobuf.Timestamp created_at = 1 [(buf.validate.field).timestamp.lt_now = true]; + // } + // + // ``` + LtNow *bool + // -- end of xxx_hidden_LessThan + // Fields of oneof xxx_hidden_GreaterThan: + // `gt` requires the timestamp field value to be greater than the specified + // value (exclusive). If the value of `gt` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // timestamp must be greater than '2023-01-01T00:00:00Z' [timestamp.gt] + // google.protobuf.Timestamp value = 1 [(buf.validate.field).timestamp.gt = { seconds: 1672444800 }]; + // + // // timestamp must be greater than '2023-01-01T00:00:00Z' and less than '2023-01-02T00:00:00Z' [timestamp.gt_lt] + // google.protobuf.Timestamp another_value = 2 [(buf.validate.field).timestamp = { gt: { seconds: 1672444800 }, lt: { seconds: 1672531200 } }]; + // + // // timestamp must be greater than '2023-01-02T00:00:00Z' or less than '2023-01-01T00:00:00Z' [timestamp.gt_lt_exclusive] + // google.protobuf.Timestamp other_value = 3 [(buf.validate.field).timestamp = { gt: { seconds: 1672531200 }, lt: { seconds: 1672444800 } }]; + // } + // + // ``` + Gt *timestamppb.Timestamp + // `gte` requires the timestamp field value to be greater than or equal to the + // specified value (exclusive). If the value of `gte` is larger than a + // specified `lt` or `lte`, the range is reversed, and the field value + // must be outside the specified range. If the field value doesn't meet + // the required conditions, an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // timestamp must be greater than or equal to '2023-01-01T00:00:00Z' [timestamp.gte] + // google.protobuf.Timestamp value = 1 [(buf.validate.field).timestamp.gte = { seconds: 1672444800 }]; + // + // // timestamp must be greater than or equal to '2023-01-01T00:00:00Z' and less than '2023-01-02T00:00:00Z' [timestamp.gte_lt] + // google.protobuf.Timestamp another_value = 2 [(buf.validate.field).timestamp = { gte: { seconds: 1672444800 }, lt: { seconds: 1672531200 } }]; + // + // // timestamp must be greater than or equal to '2023-01-02T00:00:00Z' or less than '2023-01-01T00:00:00Z' [timestamp.gte_lt_exclusive] + // google.protobuf.Timestamp other_value = 3 [(buf.validate.field).timestamp = { gte: { seconds: 1672531200 }, lt: { seconds: 1672444800 } }]; + // } + // + // ``` + Gte *timestamppb.Timestamp + // `gt_now` specifies that this field, of the `google.protobuf.Timestamp` type, must be greater than the current time. `gt_now` can only be used with the `within` rule. + // + // ```proto + // + // message MyTimestamp { + // // value must be greater than now + // google.protobuf.Timestamp created_at = 1 [(buf.validate.field).timestamp.gt_now = true]; + // } + // + // ``` + GtNow *bool + // -- end of xxx_hidden_GreaterThan + // `within` specifies that this field, of the `google.protobuf.Timestamp` type, must be within the specified duration of the current time. If the field value isn't within the duration, an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // value must be within 1 hour of now + // google.protobuf.Timestamp created_at = 1 [(buf.validate.field).timestamp.within = {seconds: 3600}]; + // } + // + // ``` + Within *durationpb.Duration + // `example` specifies values that the field may have. These values SHOULD + // conform to other rules. `example` values will not impact validation + // but may be used as helpful guidance on how to populate the given field. + // + // ```proto + // + // message MyTimestamp { + // google.protobuf.Timestamp value = 1 [ + // (buf.validate.field).timestamp.example = { seconds: 1672444800 }, + // (buf.validate.field).timestamp.example = { seconds: 1672531200 }, + // ]; + // } + // + // ``` + Example []*timestamppb.Timestamp +} + +func (b0 TimestampRules_builder) Build() *TimestampRules { + m0 := &TimestampRules{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Const = b.Const + if b.Lt != nil { + x.xxx_hidden_LessThan = ×tampRules_Lt{b.Lt} + } + if b.Lte != nil { + x.xxx_hidden_LessThan = ×tampRules_Lte{b.Lte} + } + if b.LtNow != nil { + x.xxx_hidden_LessThan = ×tampRules_LtNow{*b.LtNow} + } + if b.Gt != nil { + x.xxx_hidden_GreaterThan = ×tampRules_Gt{b.Gt} + } + if b.Gte != nil { + x.xxx_hidden_GreaterThan = ×tampRules_Gte{b.Gte} + } + if b.GtNow != nil { + x.xxx_hidden_GreaterThan = ×tampRules_GtNow{*b.GtNow} + } + x.xxx_hidden_Within = b.Within + x.xxx_hidden_Example = &b.Example + return m0 +} + +type case_TimestampRules_LessThan protoreflect.FieldNumber + +func (x case_TimestampRules_LessThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[27].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type case_TimestampRules_GreaterThan protoreflect.FieldNumber + +func (x case_TimestampRules_GreaterThan) String() string { + md := file_buf_validate_validate_proto_msgTypes[27].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isTimestampRules_LessThan interface { + isTimestampRules_LessThan() +} + +type timestampRules_Lt struct { + // requires the duration field value to be less than the specified value (field < value). If the field value doesn't meet the required conditions, an error message is generated. + // + // ```proto + // + // message MyDuration { + // // duration must be less than 'P3D' [duration.lt] + // google.protobuf.Duration value = 1 [(buf.validate.field).duration.lt = { seconds: 259200 }]; + // } + // + // ``` + Lt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=lt,oneof"` +} + +type timestampRules_Lte struct { + // requires the timestamp field value to be less than or equal to the specified value (field <= value). If the field value doesn't meet the required conditions, an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // timestamp must be less than or equal to '2023-05-14T00:00:00Z' [timestamp.lte] + // google.protobuf.Timestamp value = 1 [(buf.validate.field).timestamp.lte = { seconds: 1678867200 }]; + // } + // + // ``` + Lte *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=lte,oneof"` +} + +type timestampRules_LtNow struct { + // `lt_now` specifies that this field, of the `google.protobuf.Timestamp` type, must be less than the current time. `lt_now` can only be used with the `within` rule. + // + // ```proto + // + // message MyTimestamp { + // // value must be less than now + // google.protobuf.Timestamp created_at = 1 [(buf.validate.field).timestamp.lt_now = true]; + // } + // + // ``` + LtNow bool `protobuf:"varint,7,opt,name=lt_now,json=ltNow,oneof"` +} + +func (*timestampRules_Lt) isTimestampRules_LessThan() {} + +func (*timestampRules_Lte) isTimestampRules_LessThan() {} + +func (*timestampRules_LtNow) isTimestampRules_LessThan() {} + +type isTimestampRules_GreaterThan interface { + isTimestampRules_GreaterThan() +} + +type timestampRules_Gt struct { + // `gt` requires the timestamp field value to be greater than the specified + // value (exclusive). If the value of `gt` is larger than a specified `lt` + // or `lte`, the range is reversed, and the field value must be outside the + // specified range. If the field value doesn't meet the required conditions, + // an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // timestamp must be greater than '2023-01-01T00:00:00Z' [timestamp.gt] + // google.protobuf.Timestamp value = 1 [(buf.validate.field).timestamp.gt = { seconds: 1672444800 }]; + // + // // timestamp must be greater than '2023-01-01T00:00:00Z' and less than '2023-01-02T00:00:00Z' [timestamp.gt_lt] + // google.protobuf.Timestamp another_value = 2 [(buf.validate.field).timestamp = { gt: { seconds: 1672444800 }, lt: { seconds: 1672531200 } }]; + // + // // timestamp must be greater than '2023-01-02T00:00:00Z' or less than '2023-01-01T00:00:00Z' [timestamp.gt_lt_exclusive] + // google.protobuf.Timestamp other_value = 3 [(buf.validate.field).timestamp = { gt: { seconds: 1672531200 }, lt: { seconds: 1672444800 } }]; + // } + // + // ``` + Gt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=gt,oneof"` +} + +type timestampRules_Gte struct { + // `gte` requires the timestamp field value to be greater than or equal to the + // specified value (exclusive). If the value of `gte` is larger than a + // specified `lt` or `lte`, the range is reversed, and the field value + // must be outside the specified range. If the field value doesn't meet + // the required conditions, an error message is generated. + // + // ```proto + // + // message MyTimestamp { + // // timestamp must be greater than or equal to '2023-01-01T00:00:00Z' [timestamp.gte] + // google.protobuf.Timestamp value = 1 [(buf.validate.field).timestamp.gte = { seconds: 1672444800 }]; + // + // // timestamp must be greater than or equal to '2023-01-01T00:00:00Z' and less than '2023-01-02T00:00:00Z' [timestamp.gte_lt] + // google.protobuf.Timestamp another_value = 2 [(buf.validate.field).timestamp = { gte: { seconds: 1672444800 }, lt: { seconds: 1672531200 } }]; + // + // // timestamp must be greater than or equal to '2023-01-02T00:00:00Z' or less than '2023-01-01T00:00:00Z' [timestamp.gte_lt_exclusive] + // google.protobuf.Timestamp other_value = 3 [(buf.validate.field).timestamp = { gte: { seconds: 1672531200 }, lt: { seconds: 1672444800 } }]; + // } + // + // ``` + Gte *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=gte,oneof"` +} + +type timestampRules_GtNow struct { + // `gt_now` specifies that this field, of the `google.protobuf.Timestamp` type, must be greater than the current time. `gt_now` can only be used with the `within` rule. + // + // ```proto + // + // message MyTimestamp { + // // value must be greater than now + // google.protobuf.Timestamp created_at = 1 [(buf.validate.field).timestamp.gt_now = true]; + // } + // + // ``` + GtNow bool `protobuf:"varint,8,opt,name=gt_now,json=gtNow,oneof"` +} + +func (*timestampRules_Gt) isTimestampRules_GreaterThan() {} + +func (*timestampRules_Gte) isTimestampRules_GreaterThan() {} + +func (*timestampRules_GtNow) isTimestampRules_GreaterThan() {} + +// `Violations` is a collection of `Violation` messages. This message type is returned by +// Protovalidate when a proto message fails to meet the requirements set by the `Rule` validation rules. +// Each individual violation is represented by a `Violation` message. +type Violations struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Violations *[]*Violation `protobuf:"bytes,1,rep,name=violations"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Violations) Reset() { + *x = Violations{} + mi := &file_buf_validate_validate_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Violations) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Violations) ProtoMessage() {} + +func (x *Violations) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[28] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Violations) GetViolations() []*Violation { + if x != nil { + if x.xxx_hidden_Violations != nil { + return *x.xxx_hidden_Violations + } + } + return nil +} + +func (x *Violations) SetViolations(v []*Violation) { + x.xxx_hidden_Violations = &v +} + +type Violations_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `violations` is a repeated field that contains all the `Violation` messages corresponding to the violations detected. + Violations []*Violation +} + +func (b0 Violations_builder) Build() *Violations { + m0 := &Violations{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Violations = &b.Violations + return m0 +} + +// `Violation` represents a single instance where a validation rule, expressed +// as a `Rule`, was not met. It provides information about the field that +// caused the violation, the specific rule that wasn't fulfilled, and a +// human-readable error message. +// +// For example, consider the following message: +// +// ```proto +// +// message User { +// int32 age = 1 [(buf.validate.field).cel = { +// id: "user.age", +// expression: "this < 18 ? 'User must be at least 18 years old' : ''", +// }]; +// } +// +// ``` +// +// It could produce the following violation: +// +// ```json +// +// { +// "ruleId": "user.age", +// "message": "User must be at least 18 years old", +// "field": { +// "elements": [ +// { +// "fieldNumber": 1, +// "fieldName": "age", +// "fieldType": "TYPE_INT32" +// } +// ] +// }, +// "rule": { +// "elements": [ +// { +// "fieldNumber": 23, +// "fieldName": "cel", +// "fieldType": "TYPE_MESSAGE", +// "index": "0" +// } +// ] +// } +// } +// +// ``` +type Violation struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Field *FieldPath `protobuf:"bytes,5,opt,name=field"` + xxx_hidden_Rule *FieldPath `protobuf:"bytes,6,opt,name=rule"` + xxx_hidden_RuleId *string `protobuf:"bytes,2,opt,name=rule_id,json=ruleId"` + xxx_hidden_Message *string `protobuf:"bytes,3,opt,name=message"` + xxx_hidden_ForKey bool `protobuf:"varint,4,opt,name=for_key,json=forKey"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Violation) Reset() { + *x = Violation{} + mi := &file_buf_validate_validate_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Violation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Violation) ProtoMessage() {} + +func (x *Violation) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[29] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Violation) GetField() *FieldPath { + if x != nil { + return x.xxx_hidden_Field + } + return nil +} + +func (x *Violation) GetRule() *FieldPath { + if x != nil { + return x.xxx_hidden_Rule + } + return nil +} + +func (x *Violation) GetRuleId() string { + if x != nil { + if x.xxx_hidden_RuleId != nil { + return *x.xxx_hidden_RuleId + } + return "" + } + return "" +} + +func (x *Violation) GetMessage() string { + if x != nil { + if x.xxx_hidden_Message != nil { + return *x.xxx_hidden_Message + } + return "" + } + return "" +} + +func (x *Violation) GetForKey() bool { + if x != nil { + return x.xxx_hidden_ForKey + } + return false +} + +func (x *Violation) SetField(v *FieldPath) { + x.xxx_hidden_Field = v +} + +func (x *Violation) SetRule(v *FieldPath) { + x.xxx_hidden_Rule = v +} + +func (x *Violation) SetRuleId(v string) { + x.xxx_hidden_RuleId = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 2, 5) +} + +func (x *Violation) SetMessage(v string) { + x.xxx_hidden_Message = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 3, 5) +} + +func (x *Violation) SetForKey(v bool) { + x.xxx_hidden_ForKey = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 4, 5) +} + +func (x *Violation) HasField() bool { + if x == nil { + return false + } + return x.xxx_hidden_Field != nil +} + +func (x *Violation) HasRule() bool { + if x == nil { + return false + } + return x.xxx_hidden_Rule != nil +} + +func (x *Violation) HasRuleId() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 2) +} + +func (x *Violation) HasMessage() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 3) +} + +func (x *Violation) HasForKey() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 4) +} + +func (x *Violation) ClearField() { + x.xxx_hidden_Field = nil +} + +func (x *Violation) ClearRule() { + x.xxx_hidden_Rule = nil +} + +func (x *Violation) ClearRuleId() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 2) + x.xxx_hidden_RuleId = nil +} + +func (x *Violation) ClearMessage() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 3) + x.xxx_hidden_Message = nil +} + +func (x *Violation) ClearForKey() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 4) + x.xxx_hidden_ForKey = false +} + +type Violation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `field` is a machine-readable path to the field that failed validation. + // This could be a nested field, in which case the path will include all the parent fields leading to the actual field that caused the violation. + // + // For example, consider the following message: + // + // ```proto + // + // message Message { + // bool a = 1 [(buf.validate.field).required = true]; + // } + // + // ``` + // + // It could produce the following violation: + // + // ```textproto + // + // violation { + // field { element { field_number: 1, field_name: "a", field_type: 8 } } + // ... + // } + // + // ``` + Field *FieldPath + // `rule` is a machine-readable path that points to the specific rule that failed validation. + // This will be a nested field starting from the FieldRules of the field that failed validation. + // For custom rules, this will provide the path of the rule, e.g. `cel[0]`. + // + // For example, consider the following message: + // + // ```proto + // + // message Message { + // bool a = 1 [(buf.validate.field).required = true]; + // bool b = 2 [(buf.validate.field).cel = { + // id: "custom_rule", + // expression: "!this ? 'b must be true': ''" + // }] + // } + // + // ``` + // + // It could produce the following violations: + // + // ```textproto + // + // violation { + // rule { element { field_number: 25, field_name: "required", field_type: 8 } } + // ... + // } + // + // violation { + // rule { element { field_number: 23, field_name: "cel", field_type: 11, index: 0 } } + // ... + // } + // + // ``` + Rule *FieldPath + // `rule_id` is the unique identifier of the `Rule` that was not fulfilled. + // This is the same `id` that was specified in the `Rule` message, allowing easy tracing of which rule was violated. + RuleId *string + // `message` is a human-readable error message that describes the nature of the violation. + // This can be the default error message from the violated `Rule`, or it can be a custom message that gives more context about the violation. + Message *string + // `for_key` indicates whether the violation was caused by a map key, rather than a value. + ForKey *bool +} + +func (b0 Violation_builder) Build() *Violation { + m0 := &Violation{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Field = b.Field + x.xxx_hidden_Rule = b.Rule + if b.RuleId != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 2, 5) + x.xxx_hidden_RuleId = b.RuleId + } + if b.Message != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 3, 5) + x.xxx_hidden_Message = b.Message + } + if b.ForKey != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 4, 5) + x.xxx_hidden_ForKey = *b.ForKey + } + return m0 +} + +// `FieldPath` provides a path to a nested protobuf field. +// +// This message provides enough information to render a dotted field path even without protobuf descriptors. +// It also provides enough information to resolve a nested field through unknown wire data. +type FieldPath struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Elements *[]*FieldPathElement `protobuf:"bytes,1,rep,name=elements"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FieldPath) Reset() { + *x = FieldPath{} + mi := &file_buf_validate_validate_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FieldPath) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldPath) ProtoMessage() {} + +func (x *FieldPath) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *FieldPath) GetElements() []*FieldPathElement { + if x != nil { + if x.xxx_hidden_Elements != nil { + return *x.xxx_hidden_Elements + } + } + return nil +} + +func (x *FieldPath) SetElements(v []*FieldPathElement) { + x.xxx_hidden_Elements = &v +} + +type FieldPath_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `elements` contains each element of the path, starting from the root and recursing downward. + Elements []*FieldPathElement +} + +func (b0 FieldPath_builder) Build() *FieldPath { + m0 := &FieldPath{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Elements = &b.Elements + return m0 +} + +// `FieldPathElement` provides enough information to nest through a single protobuf field. +// +// If the selected field is a map or repeated field, the `subscript` value selects a specific element from it. +// A path that refers to a value nested under a map key or repeated field index will have a `subscript` value. +// The `field_type` field allows unambiguous resolution of a field even if descriptors are not available. +type FieldPathElement struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_FieldNumber int32 `protobuf:"varint,1,opt,name=field_number,json=fieldNumber"` + xxx_hidden_FieldName *string `protobuf:"bytes,2,opt,name=field_name,json=fieldName"` + xxx_hidden_FieldType descriptorpb.FieldDescriptorProto_Type `protobuf:"varint,3,opt,name=field_type,json=fieldType,enum=google.protobuf.FieldDescriptorProto_Type"` + xxx_hidden_KeyType descriptorpb.FieldDescriptorProto_Type `protobuf:"varint,4,opt,name=key_type,json=keyType,enum=google.protobuf.FieldDescriptorProto_Type"` + xxx_hidden_ValueType descriptorpb.FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=value_type,json=valueType,enum=google.protobuf.FieldDescriptorProto_Type"` + xxx_hidden_Subscript isFieldPathElement_Subscript `protobuf_oneof:"subscript"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FieldPathElement) Reset() { + *x = FieldPathElement{} + mi := &file_buf_validate_validate_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FieldPathElement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldPathElement) ProtoMessage() {} + +func (x *FieldPathElement) ProtoReflect() protoreflect.Message { + mi := &file_buf_validate_validate_proto_msgTypes[31] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *FieldPathElement) GetFieldNumber() int32 { + if x != nil { + return x.xxx_hidden_FieldNumber + } + return 0 +} + +func (x *FieldPathElement) GetFieldName() string { + if x != nil { + if x.xxx_hidden_FieldName != nil { + return *x.xxx_hidden_FieldName + } + return "" + } + return "" +} + +func (x *FieldPathElement) GetFieldType() descriptorpb.FieldDescriptorProto_Type { + if x != nil { + if protoimpl.X.Present(&(x.XXX_presence[0]), 2) { + return x.xxx_hidden_FieldType + } + } + return descriptorpb.FieldDescriptorProto_Type(1) +} + +func (x *FieldPathElement) GetKeyType() descriptorpb.FieldDescriptorProto_Type { + if x != nil { + if protoimpl.X.Present(&(x.XXX_presence[0]), 3) { + return x.xxx_hidden_KeyType + } + } + return descriptorpb.FieldDescriptorProto_Type(1) +} + +func (x *FieldPathElement) GetValueType() descriptorpb.FieldDescriptorProto_Type { + if x != nil { + if protoimpl.X.Present(&(x.XXX_presence[0]), 4) { + return x.xxx_hidden_ValueType + } + } + return descriptorpb.FieldDescriptorProto_Type(1) +} + +func (x *FieldPathElement) GetIndex() uint64 { + if x != nil { + if x, ok := x.xxx_hidden_Subscript.(*fieldPathElement_Index); ok { + return x.Index + } + } + return 0 +} + +func (x *FieldPathElement) GetBoolKey() bool { + if x != nil { + if x, ok := x.xxx_hidden_Subscript.(*fieldPathElement_BoolKey); ok { + return x.BoolKey + } + } + return false +} + +func (x *FieldPathElement) GetIntKey() int64 { + if x != nil { + if x, ok := x.xxx_hidden_Subscript.(*fieldPathElement_IntKey); ok { + return x.IntKey + } + } + return 0 +} + +func (x *FieldPathElement) GetUintKey() uint64 { + if x != nil { + if x, ok := x.xxx_hidden_Subscript.(*fieldPathElement_UintKey); ok { + return x.UintKey + } + } + return 0 +} + +func (x *FieldPathElement) GetStringKey() string { + if x != nil { + if x, ok := x.xxx_hidden_Subscript.(*fieldPathElement_StringKey); ok { + return x.StringKey + } + } + return "" +} + +func (x *FieldPathElement) SetFieldNumber(v int32) { + x.xxx_hidden_FieldNumber = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 6) +} + +func (x *FieldPathElement) SetFieldName(v string) { + x.xxx_hidden_FieldName = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 1, 6) +} + +func (x *FieldPathElement) SetFieldType(v descriptorpb.FieldDescriptorProto_Type) { + x.xxx_hidden_FieldType = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 2, 6) +} + +func (x *FieldPathElement) SetKeyType(v descriptorpb.FieldDescriptorProto_Type) { + x.xxx_hidden_KeyType = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 3, 6) +} + +func (x *FieldPathElement) SetValueType(v descriptorpb.FieldDescriptorProto_Type) { + x.xxx_hidden_ValueType = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 4, 6) +} + +func (x *FieldPathElement) SetIndex(v uint64) { + x.xxx_hidden_Subscript = &fieldPathElement_Index{v} +} + +func (x *FieldPathElement) SetBoolKey(v bool) { + x.xxx_hidden_Subscript = &fieldPathElement_BoolKey{v} +} + +func (x *FieldPathElement) SetIntKey(v int64) { + x.xxx_hidden_Subscript = &fieldPathElement_IntKey{v} +} + +func (x *FieldPathElement) SetUintKey(v uint64) { + x.xxx_hidden_Subscript = &fieldPathElement_UintKey{v} +} + +func (x *FieldPathElement) SetStringKey(v string) { + x.xxx_hidden_Subscript = &fieldPathElement_StringKey{v} +} + +func (x *FieldPathElement) HasFieldNumber() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *FieldPathElement) HasFieldName() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 1) +} + +func (x *FieldPathElement) HasFieldType() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 2) +} + +func (x *FieldPathElement) HasKeyType() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 3) +} + +func (x *FieldPathElement) HasValueType() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 4) +} + +func (x *FieldPathElement) HasSubscript() bool { + if x == nil { + return false + } + return x.xxx_hidden_Subscript != nil +} + +func (x *FieldPathElement) HasIndex() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Subscript.(*fieldPathElement_Index) + return ok +} + +func (x *FieldPathElement) HasBoolKey() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Subscript.(*fieldPathElement_BoolKey) + return ok +} + +func (x *FieldPathElement) HasIntKey() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Subscript.(*fieldPathElement_IntKey) + return ok +} + +func (x *FieldPathElement) HasUintKey() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Subscript.(*fieldPathElement_UintKey) + return ok +} + +func (x *FieldPathElement) HasStringKey() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Subscript.(*fieldPathElement_StringKey) + return ok +} + +func (x *FieldPathElement) ClearFieldNumber() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_FieldNumber = 0 +} + +func (x *FieldPathElement) ClearFieldName() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 1) + x.xxx_hidden_FieldName = nil +} + +func (x *FieldPathElement) ClearFieldType() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 2) + x.xxx_hidden_FieldType = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE +} + +func (x *FieldPathElement) ClearKeyType() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 3) + x.xxx_hidden_KeyType = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE +} + +func (x *FieldPathElement) ClearValueType() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 4) + x.xxx_hidden_ValueType = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE +} + +func (x *FieldPathElement) ClearSubscript() { + x.xxx_hidden_Subscript = nil +} + +func (x *FieldPathElement) ClearIndex() { + if _, ok := x.xxx_hidden_Subscript.(*fieldPathElement_Index); ok { + x.xxx_hidden_Subscript = nil + } +} + +func (x *FieldPathElement) ClearBoolKey() { + if _, ok := x.xxx_hidden_Subscript.(*fieldPathElement_BoolKey); ok { + x.xxx_hidden_Subscript = nil + } +} + +func (x *FieldPathElement) ClearIntKey() { + if _, ok := x.xxx_hidden_Subscript.(*fieldPathElement_IntKey); ok { + x.xxx_hidden_Subscript = nil + } +} + +func (x *FieldPathElement) ClearUintKey() { + if _, ok := x.xxx_hidden_Subscript.(*fieldPathElement_UintKey); ok { + x.xxx_hidden_Subscript = nil + } +} + +func (x *FieldPathElement) ClearStringKey() { + if _, ok := x.xxx_hidden_Subscript.(*fieldPathElement_StringKey); ok { + x.xxx_hidden_Subscript = nil + } +} + +const FieldPathElement_Subscript_not_set_case case_FieldPathElement_Subscript = 0 +const FieldPathElement_Index_case case_FieldPathElement_Subscript = 6 +const FieldPathElement_BoolKey_case case_FieldPathElement_Subscript = 7 +const FieldPathElement_IntKey_case case_FieldPathElement_Subscript = 8 +const FieldPathElement_UintKey_case case_FieldPathElement_Subscript = 9 +const FieldPathElement_StringKey_case case_FieldPathElement_Subscript = 10 + +func (x *FieldPathElement) WhichSubscript() case_FieldPathElement_Subscript { + if x == nil { + return FieldPathElement_Subscript_not_set_case + } + switch x.xxx_hidden_Subscript.(type) { + case *fieldPathElement_Index: + return FieldPathElement_Index_case + case *fieldPathElement_BoolKey: + return FieldPathElement_BoolKey_case + case *fieldPathElement_IntKey: + return FieldPathElement_IntKey_case + case *fieldPathElement_UintKey: + return FieldPathElement_UintKey_case + case *fieldPathElement_StringKey: + return FieldPathElement_StringKey_case + default: + return FieldPathElement_Subscript_not_set_case + } +} + +type FieldPathElement_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `field_number` is the field number this path element refers to. + FieldNumber *int32 + // `field_name` contains the field name this path element refers to. + // This can be used to display a human-readable path even if the field number is unknown. + FieldName *string + // `field_type` specifies the type of this field. When using reflection, this value is not needed. + // + // This value is provided to make it possible to traverse unknown fields through wire data. + // When traversing wire data, be mindful of both packed[1] and delimited[2] encoding schemes. + // + // N.B.: Although groups are deprecated, the corresponding delimited encoding scheme is not, and + // can be explicitly used in Protocol Buffers 2023 Edition. + // + // [1]: https://protobuf.dev/programming-guides/encoding/#packed + // [2]: https://protobuf.dev/programming-guides/encoding/#groups + FieldType *descriptorpb.FieldDescriptorProto_Type + // `key_type` specifies the map key type of this field. This value is useful when traversing + // unknown fields through wire data: specifically, it allows handling the differences between + // different integer encodings. + KeyType *descriptorpb.FieldDescriptorProto_Type + // `value_type` specifies map value type of this field. This is useful if you want to display a + // value inside unknown fields through wire data. + ValueType *descriptorpb.FieldDescriptorProto_Type + // `subscript` contains a repeated index or map key, if this path element nests into a repeated or map field. + + // Fields of oneof xxx_hidden_Subscript: + // `index` specifies a 0-based index into a repeated field. + Index *uint64 + // `bool_key` specifies a map key of type bool. + BoolKey *bool + // `int_key` specifies a map key of type int32, int64, sint32, sint64, sfixed32 or sfixed64. + IntKey *int64 + // `uint_key` specifies a map key of type uint32, uint64, fixed32 or fixed64. + UintKey *uint64 + // `string_key` specifies a map key of type string. + StringKey *string + // -- end of xxx_hidden_Subscript +} + +func (b0 FieldPathElement_builder) Build() *FieldPathElement { + m0 := &FieldPathElement{} + b, x := &b0, m0 + _, _ = b, x + if b.FieldNumber != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 6) + x.xxx_hidden_FieldNumber = *b.FieldNumber + } + if b.FieldName != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 1, 6) + x.xxx_hidden_FieldName = b.FieldName + } + if b.FieldType != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 2, 6) + x.xxx_hidden_FieldType = *b.FieldType + } + if b.KeyType != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 3, 6) + x.xxx_hidden_KeyType = *b.KeyType + } + if b.ValueType != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 4, 6) + x.xxx_hidden_ValueType = *b.ValueType + } + if b.Index != nil { + x.xxx_hidden_Subscript = &fieldPathElement_Index{*b.Index} + } + if b.BoolKey != nil { + x.xxx_hidden_Subscript = &fieldPathElement_BoolKey{*b.BoolKey} + } + if b.IntKey != nil { + x.xxx_hidden_Subscript = &fieldPathElement_IntKey{*b.IntKey} + } + if b.UintKey != nil { + x.xxx_hidden_Subscript = &fieldPathElement_UintKey{*b.UintKey} + } + if b.StringKey != nil { + x.xxx_hidden_Subscript = &fieldPathElement_StringKey{*b.StringKey} + } + return m0 +} + +type case_FieldPathElement_Subscript protoreflect.FieldNumber + +func (x case_FieldPathElement_Subscript) String() string { + md := file_buf_validate_validate_proto_msgTypes[31].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isFieldPathElement_Subscript interface { + isFieldPathElement_Subscript() +} + +type fieldPathElement_Index struct { + // `index` specifies a 0-based index into a repeated field. + Index uint64 `protobuf:"varint,6,opt,name=index,oneof"` +} + +type fieldPathElement_BoolKey struct { + // `bool_key` specifies a map key of type bool. + BoolKey bool `protobuf:"varint,7,opt,name=bool_key,json=boolKey,oneof"` +} + +type fieldPathElement_IntKey struct { + // `int_key` specifies a map key of type int32, int64, sint32, sint64, sfixed32 or sfixed64. + IntKey int64 `protobuf:"varint,8,opt,name=int_key,json=intKey,oneof"` +} + +type fieldPathElement_UintKey struct { + // `uint_key` specifies a map key of type uint32, uint64, fixed32 or fixed64. + UintKey uint64 `protobuf:"varint,9,opt,name=uint_key,json=uintKey,oneof"` +} + +type fieldPathElement_StringKey struct { + // `string_key` specifies a map key of type string. + StringKey string `protobuf:"bytes,10,opt,name=string_key,json=stringKey,oneof"` +} + +func (*fieldPathElement_Index) isFieldPathElement_Subscript() {} + +func (*fieldPathElement_BoolKey) isFieldPathElement_Subscript() {} + +func (*fieldPathElement_IntKey) isFieldPathElement_Subscript() {} + +func (*fieldPathElement_UintKey) isFieldPathElement_Subscript() {} + +func (*fieldPathElement_StringKey) isFieldPathElement_Subscript() {} + +var file_buf_validate_validate_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*MessageRules)(nil), + Field: 1159, + Name: "buf.validate.message", + Tag: "bytes,1159,opt,name=message", + Filename: "buf/validate/validate.proto", + }, + { + ExtendedType: (*descriptorpb.OneofOptions)(nil), + ExtensionType: (*OneofRules)(nil), + Field: 1159, + Name: "buf.validate.oneof", + Tag: "bytes,1159,opt,name=oneof", + Filename: "buf/validate/validate.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldRules)(nil), + Field: 1159, + Name: "buf.validate.field", + Tag: "bytes,1159,opt,name=field", + Filename: "buf/validate/validate.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*PredefinedRules)(nil), + Field: 1160, + Name: "buf.validate.predefined", + Tag: "bytes,1160,opt,name=predefined", + Filename: "buf/validate/validate.proto", + }, +} + +// Extension fields to descriptorpb.MessageOptions. +var ( + // Rules specify the validations to be performed on this message. By default, + // no validation is performed against a message. + // + // optional buf.validate.MessageRules message = 1159; + E_Message = &file_buf_validate_validate_proto_extTypes[0] +) + +// Extension fields to descriptorpb.OneofOptions. +var ( + // Rules specify the validations to be performed on this oneof. By default, + // no validation is performed against a oneof. + // + // optional buf.validate.OneofRules oneof = 1159; + E_Oneof = &file_buf_validate_validate_proto_extTypes[1] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Rules specify the validations to be performed on this field. By default, + // no validation is performed against a field. + // + // optional buf.validate.FieldRules field = 1159; + E_Field = &file_buf_validate_validate_proto_extTypes[2] + // Specifies predefined rules. When extending a standard rule message, + // this adds additional CEL expressions that apply when the extension is used. + // + // ```proto + // + // extend buf.validate.Int32Rules { + // bool is_zero [(buf.validate.predefined).cel = { + // id: "int32.is_zero", + // message: "value must be zero", + // expression: "!rule || this == 0", + // }]; + // } + // + // message Foo { + // int32 reserved = 1 [(buf.validate.field).int32.(is_zero) = true]; + // } + // + // ``` + // + // optional buf.validate.PredefinedRules predefined = 1160; + E_Predefined = &file_buf_validate_validate_proto_extTypes[3] +) + +var File_buf_validate_validate_proto protoreflect.FileDescriptor + +const file_buf_validate_validate_proto_rawDesc = "" + + "\n" + + "\x1bbuf/validate/validate.proto\x12\fbuf.validate\x1a google/protobuf/descriptor.proto\x1a\x1egoogle/protobuf/duration.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"P\n" + + "\x04Rule\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage\x12\x1e\n" + + "\n" + + "expression\x18\x03 \x01(\tR\n" + + "expression\"\xa1\x01\n" + + "\fMessageRules\x12%\n" + + "\x0ecel_expression\x18\x05 \x03(\tR\rcelExpression\x12$\n" + + "\x03cel\x18\x03 \x03(\v2\x12.buf.validate.RuleR\x03cel\x124\n" + + "\x05oneof\x18\x04 \x03(\v2\x1e.buf.validate.MessageOneofRuleR\x05oneofJ\x04\b\x01\x10\x02R\bdisabled\"F\n" + + "\x10MessageOneofRule\x12\x16\n" + + "\x06fields\x18\x01 \x03(\tR\x06fields\x12\x1a\n" + + "\brequired\x18\x02 \x01(\bR\brequired\"(\n" + + "\n" + + "OneofRules\x12\x1a\n" + + "\brequired\x18\x01 \x01(\bR\brequired\"\xe3\n" + + "\n" + + "\n" + + "FieldRules\x12%\n" + + "\x0ecel_expression\x18\x1d \x03(\tR\rcelExpression\x12$\n" + + "\x03cel\x18\x17 \x03(\v2\x12.buf.validate.RuleR\x03cel\x12\x1a\n" + + "\brequired\x18\x19 \x01(\bR\brequired\x12,\n" + + "\x06ignore\x18\x1b \x01(\x0e2\x14.buf.validate.IgnoreR\x06ignore\x120\n" + + "\x05float\x18\x01 \x01(\v2\x18.buf.validate.FloatRulesH\x00R\x05float\x123\n" + + "\x06double\x18\x02 \x01(\v2\x19.buf.validate.DoubleRulesH\x00R\x06double\x120\n" + + "\x05int32\x18\x03 \x01(\v2\x18.buf.validate.Int32RulesH\x00R\x05int32\x120\n" + + "\x05int64\x18\x04 \x01(\v2\x18.buf.validate.Int64RulesH\x00R\x05int64\x123\n" + + "\x06uint32\x18\x05 \x01(\v2\x19.buf.validate.UInt32RulesH\x00R\x06uint32\x123\n" + + "\x06uint64\x18\x06 \x01(\v2\x19.buf.validate.UInt64RulesH\x00R\x06uint64\x123\n" + + "\x06sint32\x18\a \x01(\v2\x19.buf.validate.SInt32RulesH\x00R\x06sint32\x123\n" + + "\x06sint64\x18\b \x01(\v2\x19.buf.validate.SInt64RulesH\x00R\x06sint64\x126\n" + + "\afixed32\x18\t \x01(\v2\x1a.buf.validate.Fixed32RulesH\x00R\afixed32\x126\n" + + "\afixed64\x18\n" + + " \x01(\v2\x1a.buf.validate.Fixed64RulesH\x00R\afixed64\x129\n" + + "\bsfixed32\x18\v \x01(\v2\x1b.buf.validate.SFixed32RulesH\x00R\bsfixed32\x129\n" + + "\bsfixed64\x18\f \x01(\v2\x1b.buf.validate.SFixed64RulesH\x00R\bsfixed64\x12-\n" + + "\x04bool\x18\r \x01(\v2\x17.buf.validate.BoolRulesH\x00R\x04bool\x123\n" + + "\x06string\x18\x0e \x01(\v2\x19.buf.validate.StringRulesH\x00R\x06string\x120\n" + + "\x05bytes\x18\x0f \x01(\v2\x18.buf.validate.BytesRulesH\x00R\x05bytes\x12-\n" + + "\x04enum\x18\x10 \x01(\v2\x17.buf.validate.EnumRulesH\x00R\x04enum\x129\n" + + "\brepeated\x18\x12 \x01(\v2\x1b.buf.validate.RepeatedRulesH\x00R\brepeated\x12*\n" + + "\x03map\x18\x13 \x01(\v2\x16.buf.validate.MapRulesH\x00R\x03map\x12*\n" + + "\x03any\x18\x14 \x01(\v2\x16.buf.validate.AnyRulesH\x00R\x03any\x129\n" + + "\bduration\x18\x15 \x01(\v2\x1b.buf.validate.DurationRulesH\x00R\bduration\x12=\n" + + "\n" + + "field_mask\x18\x1c \x01(\v2\x1c.buf.validate.FieldMaskRulesH\x00R\tfieldMask\x12<\n" + + "\ttimestamp\x18\x16 \x01(\v2\x1c.buf.validate.TimestampRulesH\x00R\ttimestampB\x06\n" + + "\x04typeJ\x04\b\x18\x10\x19J\x04\b\x1a\x10\x1bR\askippedR\fignore_empty\"Z\n" + + "\x0fPredefinedRules\x12$\n" + + "\x03cel\x18\x01 \x03(\v2\x12.buf.validate.RuleR\x03celJ\x04\b\x18\x10\x19J\x04\b\x1a\x10\x1bR\askippedR\fignore_empty\"\x90\x18\n" + + "\n" + + "FloatRules\x12\x8a\x01\n" + + "\x05const\x18\x01 \x01(\x02Bt\xc2Hq\n" + + "o\n" + + "\vfloat.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\xa3\x01\n" + + "\x02lt\x18\x02 \x01(\x02B\x90\x01\xc2H\x8c\x01\n" + + "\x89\x01\n" + + "\bfloat.lt\x1a}!has(rules.gte) && !has(rules.gt) && (this.isNan() || this >= rules.lt)? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xb4\x01\n" + + "\x03lte\x18\x03 \x01(\x02B\x9f\x01\xc2H\x9b\x01\n" + + "\x98\x01\n" + + "\tfloat.lte\x1a\x8a\x01!has(rules.gte) && !has(rules.gt) && (this.isNan() || this > rules.lte)? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xf3\a\n" + + "\x02gt\x18\x04 \x01(\x02B\xe0\a\xc2H\xdc\a\n" + + "\x8d\x01\n" + + "\bfloat.gt\x1a\x80\x01!has(rules.lt) && !has(rules.lte) && (this.isNan() || this <= rules.gt)? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xc3\x01\n" + + "\vfloat.gt_lt\x1a\xb3\x01has(rules.lt) && rules.lt >= rules.gt && (this.isNan() || this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xcd\x01\n" + + "\x15float.gt_lt_exclusive\x1a\xb3\x01has(rules.lt) && rules.lt < rules.gt && (this.isNan() || (rules.lt <= this && this <= rules.gt))? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xd3\x01\n" + + "\ffloat.gt_lte\x1a\xc2\x01has(rules.lte) && rules.lte >= rules.gt && (this.isNan() || this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xdd\x01\n" + + "\x16float.gt_lte_exclusive\x1a\xc2\x01has(rules.lte) && rules.lte < rules.gt && (this.isNan() || (rules.lte < this && this <= rules.gt))? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xbf\b\n" + + "\x03gte\x18\x05 \x01(\x02B\xaa\b\xc2H\xa6\b\n" + + "\x9b\x01\n" + + "\tfloat.gte\x1a\x8d\x01!has(rules.lt) && !has(rules.lte) && (this.isNan() || this < rules.gte)? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xd2\x01\n" + + "\ffloat.gte_lt\x1a\xc1\x01has(rules.lt) && rules.lt >= rules.gte && (this.isNan() || this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xdc\x01\n" + + "\x16float.gte_lt_exclusive\x1a\xc1\x01has(rules.lt) && rules.lt < rules.gte && (this.isNan() || (rules.lt <= this && this < rules.gte))? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xe2\x01\n" + + "\rfloat.gte_lte\x1a\xd0\x01has(rules.lte) && rules.lte >= rules.gte && (this.isNan() || this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xec\x01\n" + + "\x17float.gte_lte_exclusive\x1a\xd0\x01has(rules.lte) && rules.lte < rules.gte && (this.isNan() || (rules.lte < this && this < rules.gte))? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x83\x01\n" + + "\x02in\x18\x06 \x03(\x02Bs\xc2Hp\n" + + "n\n" + + "\bfloat.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12}\n" + + "\x06not_in\x18\a \x03(\x02Bf\xc2Hc\n" + + "a\n" + + "\ffloat.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x12}\n" + + "\x06finite\x18\b \x01(\bBe\xc2Hb\n" + + "`\n" + + "\ffloat.finite\x1aPrules.finite ? (this.isNan() || this.isInf() ? 'value must be finite' : '') : ''R\x06finite\x124\n" + + "\aexample\x18\t \x03(\x02B\x1a\xc2H\x17\n" + + "\x15\n" + + "\rfloat.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xa2\x18\n" + + "\vDoubleRules\x12\x8b\x01\n" + + "\x05const\x18\x01 \x01(\x01Bu\xc2Hr\n" + + "p\n" + + "\fdouble.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\xa4\x01\n" + + "\x02lt\x18\x02 \x01(\x01B\x91\x01\xc2H\x8d\x01\n" + + "\x8a\x01\n" + + "\tdouble.lt\x1a}!has(rules.gte) && !has(rules.gt) && (this.isNan() || this >= rules.lt)? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xb5\x01\n" + + "\x03lte\x18\x03 \x01(\x01B\xa0\x01\xc2H\x9c\x01\n" + + "\x99\x01\n" + + "\n" + + "double.lte\x1a\x8a\x01!has(rules.gte) && !has(rules.gt) && (this.isNan() || this > rules.lte)? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xf8\a\n" + + "\x02gt\x18\x04 \x01(\x01B\xe5\a\xc2H\xe1\a\n" + + "\x8e\x01\n" + + "\tdouble.gt\x1a\x80\x01!has(rules.lt) && !has(rules.lte) && (this.isNan() || this <= rules.gt)? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xc4\x01\n" + + "\fdouble.gt_lt\x1a\xb3\x01has(rules.lt) && rules.lt >= rules.gt && (this.isNan() || this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xce\x01\n" + + "\x16double.gt_lt_exclusive\x1a\xb3\x01has(rules.lt) && rules.lt < rules.gt && (this.isNan() || (rules.lt <= this && this <= rules.gt))? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xd4\x01\n" + + "\rdouble.gt_lte\x1a\xc2\x01has(rules.lte) && rules.lte >= rules.gt && (this.isNan() || this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xde\x01\n" + + "\x17double.gt_lte_exclusive\x1a\xc2\x01has(rules.lte) && rules.lte < rules.gt && (this.isNan() || (rules.lte < this && this <= rules.gt))? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xc4\b\n" + + "\x03gte\x18\x05 \x01(\x01B\xaf\b\xc2H\xab\b\n" + + "\x9c\x01\n" + + "\n" + + "double.gte\x1a\x8d\x01!has(rules.lt) && !has(rules.lte) && (this.isNan() || this < rules.gte)? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xd3\x01\n" + + "\rdouble.gte_lt\x1a\xc1\x01has(rules.lt) && rules.lt >= rules.gte && (this.isNan() || this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xdd\x01\n" + + "\x17double.gte_lt_exclusive\x1a\xc1\x01has(rules.lt) && rules.lt < rules.gte && (this.isNan() || (rules.lt <= this && this < rules.gte))? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xe3\x01\n" + + "\x0edouble.gte_lte\x1a\xd0\x01has(rules.lte) && rules.lte >= rules.gte && (this.isNan() || this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xed\x01\n" + + "\x18double.gte_lte_exclusive\x1a\xd0\x01has(rules.lte) && rules.lte < rules.gte && (this.isNan() || (rules.lte < this && this < rules.gte))? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x84\x01\n" + + "\x02in\x18\x06 \x03(\x01Bt\xc2Hq\n" + + "o\n" + + "\tdouble.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12~\n" + + "\x06not_in\x18\a \x03(\x01Bg\xc2Hd\n" + + "b\n" + + "\rdouble.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x12~\n" + + "\x06finite\x18\b \x01(\bBf\xc2Hc\n" + + "a\n" + + "\rdouble.finite\x1aPrules.finite ? (this.isNan() || this.isInf() ? 'value must be finite' : '') : ''R\x06finite\x125\n" + + "\aexample\x18\t \x03(\x01B\x1b\xc2H\x18\n" + + "\x16\n" + + "\x0edouble.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xba\x15\n" + + "\n" + + "Int32Rules\x12\x8a\x01\n" + + "\x05const\x18\x01 \x01(\x05Bt\xc2Hq\n" + + "o\n" + + "\vint32.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x8e\x01\n" + + "\x02lt\x18\x02 \x01(\x05B|\xc2Hy\n" + + "w\n" + + "\bint32.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa1\x01\n" + + "\x03lte\x18\x03 \x01(\x05B\x8c\x01\xc2H\x88\x01\n" + + "\x85\x01\n" + + "\tint32.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\x9b\a\n" + + "\x02gt\x18\x04 \x01(\x05B\x88\a\xc2H\x84\a\n" + + "z\n" + + "\bint32.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb3\x01\n" + + "\vint32.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbb\x01\n" + + "\x15int32.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc3\x01\n" + + "\fint32.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcb\x01\n" + + "\x16int32.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xe8\a\n" + + "\x03gte\x18\x05 \x01(\x05B\xd3\a\xc2H\xcf\a\n" + + "\x88\x01\n" + + "\tint32.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc2\x01\n" + + "\fint32.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xca\x01\n" + + "\x16int32.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd2\x01\n" + + "\rint32.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xda\x01\n" + + "\x17int32.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x83\x01\n" + + "\x02in\x18\x06 \x03(\x05Bs\xc2Hp\n" + + "n\n" + + "\bint32.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12}\n" + + "\x06not_in\x18\a \x03(\x05Bf\xc2Hc\n" + + "a\n" + + "\fint32.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x124\n" + + "\aexample\x18\b \x03(\x05B\x1a\xc2H\x17\n" + + "\x15\n" + + "\rint32.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xba\x15\n" + + "\n" + + "Int64Rules\x12\x8a\x01\n" + + "\x05const\x18\x01 \x01(\x03Bt\xc2Hq\n" + + "o\n" + + "\vint64.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x8e\x01\n" + + "\x02lt\x18\x02 \x01(\x03B|\xc2Hy\n" + + "w\n" + + "\bint64.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa1\x01\n" + + "\x03lte\x18\x03 \x01(\x03B\x8c\x01\xc2H\x88\x01\n" + + "\x85\x01\n" + + "\tint64.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\x9b\a\n" + + "\x02gt\x18\x04 \x01(\x03B\x88\a\xc2H\x84\a\n" + + "z\n" + + "\bint64.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb3\x01\n" + + "\vint64.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbb\x01\n" + + "\x15int64.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc3\x01\n" + + "\fint64.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcb\x01\n" + + "\x16int64.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xe8\a\n" + + "\x03gte\x18\x05 \x01(\x03B\xd3\a\xc2H\xcf\a\n" + + "\x88\x01\n" + + "\tint64.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc2\x01\n" + + "\fint64.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xca\x01\n" + + "\x16int64.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd2\x01\n" + + "\rint64.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xda\x01\n" + + "\x17int64.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x83\x01\n" + + "\x02in\x18\x06 \x03(\x03Bs\xc2Hp\n" + + "n\n" + + "\bint64.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12}\n" + + "\x06not_in\x18\a \x03(\x03Bf\xc2Hc\n" + + "a\n" + + "\fint64.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x124\n" + + "\aexample\x18\t \x03(\x03B\x1a\xc2H\x17\n" + + "\x15\n" + + "\rint64.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xcb\x15\n" + + "\vUInt32Rules\x12\x8b\x01\n" + + "\x05const\x18\x01 \x01(\rBu\xc2Hr\n" + + "p\n" + + "\fuint32.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x8f\x01\n" + + "\x02lt\x18\x02 \x01(\rB}\xc2Hz\n" + + "x\n" + + "\tuint32.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa2\x01\n" + + "\x03lte\x18\x03 \x01(\rB\x8d\x01\xc2H\x89\x01\n" + + "\x86\x01\n" + + "\n" + + "uint32.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xa0\a\n" + + "\x02gt\x18\x04 \x01(\rB\x8d\a\xc2H\x89\a\n" + + "{\n" + + "\tuint32.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb4\x01\n" + + "\fuint32.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbc\x01\n" + + "\x16uint32.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc4\x01\n" + + "\ruint32.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcc\x01\n" + + "\x17uint32.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xed\a\n" + + "\x03gte\x18\x05 \x01(\rB\xd8\a\xc2H\xd4\a\n" + + "\x89\x01\n" + + "\n" + + "uint32.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc3\x01\n" + + "\ruint32.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcb\x01\n" + + "\x17uint32.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd3\x01\n" + + "\x0euint32.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdb\x01\n" + + "\x18uint32.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x84\x01\n" + + "\x02in\x18\x06 \x03(\rBt\xc2Hq\n" + + "o\n" + + "\tuint32.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12~\n" + + "\x06not_in\x18\a \x03(\rBg\xc2Hd\n" + + "b\n" + + "\ruint32.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x125\n" + + "\aexample\x18\b \x03(\rB\x1b\xc2H\x18\n" + + "\x16\n" + + "\x0euint32.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xcb\x15\n" + + "\vUInt64Rules\x12\x8b\x01\n" + + "\x05const\x18\x01 \x01(\x04Bu\xc2Hr\n" + + "p\n" + + "\fuint64.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x8f\x01\n" + + "\x02lt\x18\x02 \x01(\x04B}\xc2Hz\n" + + "x\n" + + "\tuint64.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa2\x01\n" + + "\x03lte\x18\x03 \x01(\x04B\x8d\x01\xc2H\x89\x01\n" + + "\x86\x01\n" + + "\n" + + "uint64.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xa0\a\n" + + "\x02gt\x18\x04 \x01(\x04B\x8d\a\xc2H\x89\a\n" + + "{\n" + + "\tuint64.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb4\x01\n" + + "\fuint64.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbc\x01\n" + + "\x16uint64.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc4\x01\n" + + "\ruint64.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcc\x01\n" + + "\x17uint64.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xed\a\n" + + "\x03gte\x18\x05 \x01(\x04B\xd8\a\xc2H\xd4\a\n" + + "\x89\x01\n" + + "\n" + + "uint64.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc3\x01\n" + + "\ruint64.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcb\x01\n" + + "\x17uint64.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd3\x01\n" + + "\x0euint64.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdb\x01\n" + + "\x18uint64.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x84\x01\n" + + "\x02in\x18\x06 \x03(\x04Bt\xc2Hq\n" + + "o\n" + + "\tuint64.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12~\n" + + "\x06not_in\x18\a \x03(\x04Bg\xc2Hd\n" + + "b\n" + + "\ruint64.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x125\n" + + "\aexample\x18\b \x03(\x04B\x1b\xc2H\x18\n" + + "\x16\n" + + "\x0euint64.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xcb\x15\n" + + "\vSInt32Rules\x12\x8b\x01\n" + + "\x05const\x18\x01 \x01(\x11Bu\xc2Hr\n" + + "p\n" + + "\fsint32.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x8f\x01\n" + + "\x02lt\x18\x02 \x01(\x11B}\xc2Hz\n" + + "x\n" + + "\tsint32.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa2\x01\n" + + "\x03lte\x18\x03 \x01(\x11B\x8d\x01\xc2H\x89\x01\n" + + "\x86\x01\n" + + "\n" + + "sint32.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xa0\a\n" + + "\x02gt\x18\x04 \x01(\x11B\x8d\a\xc2H\x89\a\n" + + "{\n" + + "\tsint32.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb4\x01\n" + + "\fsint32.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbc\x01\n" + + "\x16sint32.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc4\x01\n" + + "\rsint32.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcc\x01\n" + + "\x17sint32.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xed\a\n" + + "\x03gte\x18\x05 \x01(\x11B\xd8\a\xc2H\xd4\a\n" + + "\x89\x01\n" + + "\n" + + "sint32.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc3\x01\n" + + "\rsint32.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcb\x01\n" + + "\x17sint32.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd3\x01\n" + + "\x0esint32.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdb\x01\n" + + "\x18sint32.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x84\x01\n" + + "\x02in\x18\x06 \x03(\x11Bt\xc2Hq\n" + + "o\n" + + "\tsint32.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12~\n" + + "\x06not_in\x18\a \x03(\x11Bg\xc2Hd\n" + + "b\n" + + "\rsint32.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x125\n" + + "\aexample\x18\b \x03(\x11B\x1b\xc2H\x18\n" + + "\x16\n" + + "\x0esint32.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xcb\x15\n" + + "\vSInt64Rules\x12\x8b\x01\n" + + "\x05const\x18\x01 \x01(\x12Bu\xc2Hr\n" + + "p\n" + + "\fsint64.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x8f\x01\n" + + "\x02lt\x18\x02 \x01(\x12B}\xc2Hz\n" + + "x\n" + + "\tsint64.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa2\x01\n" + + "\x03lte\x18\x03 \x01(\x12B\x8d\x01\xc2H\x89\x01\n" + + "\x86\x01\n" + + "\n" + + "sint64.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xa0\a\n" + + "\x02gt\x18\x04 \x01(\x12B\x8d\a\xc2H\x89\a\n" + + "{\n" + + "\tsint64.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb4\x01\n" + + "\fsint64.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbc\x01\n" + + "\x16sint64.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc4\x01\n" + + "\rsint64.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcc\x01\n" + + "\x17sint64.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xed\a\n" + + "\x03gte\x18\x05 \x01(\x12B\xd8\a\xc2H\xd4\a\n" + + "\x89\x01\n" + + "\n" + + "sint64.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc3\x01\n" + + "\rsint64.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcb\x01\n" + + "\x17sint64.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd3\x01\n" + + "\x0esint64.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdb\x01\n" + + "\x18sint64.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x84\x01\n" + + "\x02in\x18\x06 \x03(\x12Bt\xc2Hq\n" + + "o\n" + + "\tsint64.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12~\n" + + "\x06not_in\x18\a \x03(\x12Bg\xc2Hd\n" + + "b\n" + + "\rsint64.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x125\n" + + "\aexample\x18\b \x03(\x12B\x1b\xc2H\x18\n" + + "\x16\n" + + "\x0esint64.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xdc\x15\n" + + "\fFixed32Rules\x12\x8c\x01\n" + + "\x05const\x18\x01 \x01(\aBv\xc2Hs\n" + + "q\n" + + "\rfixed32.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x90\x01\n" + + "\x02lt\x18\x02 \x01(\aB~\xc2H{\n" + + "y\n" + + "\n" + + "fixed32.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa3\x01\n" + + "\x03lte\x18\x03 \x01(\aB\x8e\x01\xc2H\x8a\x01\n" + + "\x87\x01\n" + + "\vfixed32.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xa5\a\n" + + "\x02gt\x18\x04 \x01(\aB\x92\a\xc2H\x8e\a\n" + + "|\n" + + "\n" + + "fixed32.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb5\x01\n" + + "\rfixed32.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbd\x01\n" + + "\x17fixed32.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc5\x01\n" + + "\x0efixed32.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcd\x01\n" + + "\x18fixed32.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xf2\a\n" + + "\x03gte\x18\x05 \x01(\aB\xdd\a\xc2H\xd9\a\n" + + "\x8a\x01\n" + + "\vfixed32.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc4\x01\n" + + "\x0efixed32.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcc\x01\n" + + "\x18fixed32.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd4\x01\n" + + "\x0ffixed32.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdc\x01\n" + + "\x19fixed32.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x85\x01\n" + + "\x02in\x18\x06 \x03(\aBu\xc2Hr\n" + + "p\n" + + "\n" + + "fixed32.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12\x7f\n" + + "\x06not_in\x18\a \x03(\aBh\xc2He\n" + + "c\n" + + "\x0efixed32.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x126\n" + + "\aexample\x18\b \x03(\aB\x1c\xc2H\x19\n" + + "\x17\n" + + "\x0ffixed32.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xdc\x15\n" + + "\fFixed64Rules\x12\x8c\x01\n" + + "\x05const\x18\x01 \x01(\x06Bv\xc2Hs\n" + + "q\n" + + "\rfixed64.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x90\x01\n" + + "\x02lt\x18\x02 \x01(\x06B~\xc2H{\n" + + "y\n" + + "\n" + + "fixed64.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa3\x01\n" + + "\x03lte\x18\x03 \x01(\x06B\x8e\x01\xc2H\x8a\x01\n" + + "\x87\x01\n" + + "\vfixed64.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xa5\a\n" + + "\x02gt\x18\x04 \x01(\x06B\x92\a\xc2H\x8e\a\n" + + "|\n" + + "\n" + + "fixed64.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb5\x01\n" + + "\rfixed64.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbd\x01\n" + + "\x17fixed64.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc5\x01\n" + + "\x0efixed64.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcd\x01\n" + + "\x18fixed64.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xf2\a\n" + + "\x03gte\x18\x05 \x01(\x06B\xdd\a\xc2H\xd9\a\n" + + "\x8a\x01\n" + + "\vfixed64.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc4\x01\n" + + "\x0efixed64.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcc\x01\n" + + "\x18fixed64.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd4\x01\n" + + "\x0ffixed64.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdc\x01\n" + + "\x19fixed64.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x85\x01\n" + + "\x02in\x18\x06 \x03(\x06Bu\xc2Hr\n" + + "p\n" + + "\n" + + "fixed64.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12\x7f\n" + + "\x06not_in\x18\a \x03(\x06Bh\xc2He\n" + + "c\n" + + "\x0efixed64.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x126\n" + + "\aexample\x18\b \x03(\x06B\x1c\xc2H\x19\n" + + "\x17\n" + + "\x0ffixed64.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xee\x15\n" + + "\rSFixed32Rules\x12\x8d\x01\n" + + "\x05const\x18\x01 \x01(\x0fBw\xc2Ht\n" + + "r\n" + + "\x0esfixed32.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x91\x01\n" + + "\x02lt\x18\x02 \x01(\x0fB\x7f\xc2H|\n" + + "z\n" + + "\vsfixed32.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa4\x01\n" + + "\x03lte\x18\x03 \x01(\x0fB\x8f\x01\xc2H\x8b\x01\n" + + "\x88\x01\n" + + "\fsfixed32.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xaa\a\n" + + "\x02gt\x18\x04 \x01(\x0fB\x97\a\xc2H\x93\a\n" + + "}\n" + + "\vsfixed32.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb6\x01\n" + + "\x0esfixed32.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbe\x01\n" + + "\x18sfixed32.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc6\x01\n" + + "\x0fsfixed32.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xce\x01\n" + + "\x19sfixed32.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xf7\a\n" + + "\x03gte\x18\x05 \x01(\x0fB\xe2\a\xc2H\xde\a\n" + + "\x8b\x01\n" + + "\fsfixed32.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc5\x01\n" + + "\x0fsfixed32.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcd\x01\n" + + "\x19sfixed32.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd5\x01\n" + + "\x10sfixed32.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdd\x01\n" + + "\x1asfixed32.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x86\x01\n" + + "\x02in\x18\x06 \x03(\x0fBv\xc2Hs\n" + + "q\n" + + "\vsfixed32.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12\x80\x01\n" + + "\x06not_in\x18\a \x03(\x0fBi\xc2Hf\n" + + "d\n" + + "\x0fsfixed32.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x127\n" + + "\aexample\x18\b \x03(\x0fB\x1d\xc2H\x1a\n" + + "\x18\n" + + "\x10sfixed32.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xee\x15\n" + + "\rSFixed64Rules\x12\x8d\x01\n" + + "\x05const\x18\x01 \x01(\x10Bw\xc2Ht\n" + + "r\n" + + "\x0esfixed64.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\x91\x01\n" + + "\x02lt\x18\x02 \x01(\x10B\x7f\xc2H|\n" + + "z\n" + + "\vsfixed64.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xa4\x01\n" + + "\x03lte\x18\x03 \x01(\x10B\x8f\x01\xc2H\x8b\x01\n" + + "\x88\x01\n" + + "\fsfixed64.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xaa\a\n" + + "\x02gt\x18\x04 \x01(\x10B\x97\a\xc2H\x93\a\n" + + "}\n" + + "\vsfixed64.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb6\x01\n" + + "\x0esfixed64.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbe\x01\n" + + "\x18sfixed64.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc6\x01\n" + + "\x0fsfixed64.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xce\x01\n" + + "\x19sfixed64.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\xf7\a\n" + + "\x03gte\x18\x05 \x01(\x10B\xe2\a\xc2H\xde\a\n" + + "\x8b\x01\n" + + "\fsfixed64.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc5\x01\n" + + "\x0fsfixed64.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcd\x01\n" + + "\x19sfixed64.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd5\x01\n" + + "\x10sfixed64.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdd\x01\n" + + "\x1asfixed64.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\x86\x01\n" + + "\x02in\x18\x06 \x03(\x10Bv\xc2Hs\n" + + "q\n" + + "\vsfixed64.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12\x80\x01\n" + + "\x06not_in\x18\a \x03(\x10Bi\xc2Hf\n" + + "d\n" + + "\x0fsfixed64.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x127\n" + + "\aexample\x18\b \x03(\x10B\x1d\xc2H\x1a\n" + + "\x18\n" + + "\x10sfixed64.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\xd7\x01\n" + + "\tBoolRules\x12\x89\x01\n" + + "\x05const\x18\x01 \x01(\bBs\xc2Hp\n" + + "n\n" + + "\n" + + "bool.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x123\n" + + "\aexample\x18\x02 \x03(\bB\x19\xc2H\x16\n" + + "\x14\n" + + "\fbool.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xcf;\n" + + "\vStringRules\x12\x8d\x01\n" + + "\x05const\x18\x01 \x01(\tBw\xc2Ht\n" + + "r\n" + + "\fstring.const\x1abthis != getField(rules, 'const') ? 'value must equal `%s`'.format([getField(rules, 'const')]) : ''R\x05const\x12\x83\x01\n" + + "\x03len\x18\x13 \x01(\x04Bq\xc2Hn\n" + + "l\n" + + "\n" + + "string.len\x1a^uint(this.size()) != rules.len ? 'value length must be %s characters'.format([rules.len]) : ''R\x03len\x12\xa1\x01\n" + + "\amin_len\x18\x02 \x01(\x04B\x87\x01\xc2H\x83\x01\n" + + "\x80\x01\n" + + "\x0estring.min_len\x1anuint(this.size()) < rules.min_len ? 'value length must be at least %s characters'.format([rules.min_len]) : ''R\x06minLen\x12\x9f\x01\n" + + "\amax_len\x18\x03 \x01(\x04B\x85\x01\xc2H\x81\x01\n" + + "\x7f\n" + + "\x0estring.max_len\x1amuint(this.size()) > rules.max_len ? 'value length must be at most %s characters'.format([rules.max_len]) : ''R\x06maxLen\x12\xa5\x01\n" + + "\tlen_bytes\x18\x14 \x01(\x04B\x87\x01\xc2H\x83\x01\n" + + "\x80\x01\n" + + "\x10string.len_bytes\x1aluint(bytes(this).size()) != rules.len_bytes ? 'value length must be %s bytes'.format([rules.len_bytes]) : ''R\blenBytes\x12\xad\x01\n" + + "\tmin_bytes\x18\x04 \x01(\x04B\x8f\x01\xc2H\x8b\x01\n" + + "\x88\x01\n" + + "\x10string.min_bytes\x1atuint(bytes(this).size()) < rules.min_bytes ? 'value length must be at least %s bytes'.format([rules.min_bytes]) : ''R\bminBytes\x12\xac\x01\n" + + "\tmax_bytes\x18\x05 \x01(\x04B\x8e\x01\xc2H\x8a\x01\n" + + "\x87\x01\n" + + "\x10string.max_bytes\x1asuint(bytes(this).size()) > rules.max_bytes ? 'value length must be at most %s bytes'.format([rules.max_bytes]) : ''R\bmaxBytes\x12\x96\x01\n" + + "\apattern\x18\x06 \x01(\tB|\xc2Hy\n" + + "w\n" + + "\x0estring.pattern\x1ae!this.matches(rules.pattern) ? 'value does not match regex pattern `%s`'.format([rules.pattern]) : ''R\apattern\x12\x8c\x01\n" + + "\x06prefix\x18\a \x01(\tBt\xc2Hq\n" + + "o\n" + + "\rstring.prefix\x1a^!this.startsWith(rules.prefix) ? 'value does not have prefix `%s`'.format([rules.prefix]) : ''R\x06prefix\x12\x8a\x01\n" + + "\x06suffix\x18\b \x01(\tBr\xc2Ho\n" + + "m\n" + + "\rstring.suffix\x1a\\!this.endsWith(rules.suffix) ? 'value does not have suffix `%s`'.format([rules.suffix]) : ''R\x06suffix\x12\x9a\x01\n" + + "\bcontains\x18\t \x01(\tB~\xc2H{\n" + + "y\n" + + "\x0fstring.contains\x1af!this.contains(rules.contains) ? 'value does not contain substring `%s`'.format([rules.contains]) : ''R\bcontains\x12\xa5\x01\n" + + "\fnot_contains\x18\x17 \x01(\tB\x81\x01\xc2H~\n" + + "|\n" + + "\x13string.not_contains\x1aethis.contains(rules.not_contains) ? 'value contains substring `%s`'.format([rules.not_contains]) : ''R\vnotContains\x12\x84\x01\n" + + "\x02in\x18\n" + + " \x03(\tBt\xc2Hq\n" + + "o\n" + + "\tstring.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12~\n" + + "\x06not_in\x18\v \x03(\tBg\xc2Hd\n" + + "b\n" + + "\rstring.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x12\xe6\x01\n" + + "\x05email\x18\f \x01(\bB\xcd\x01\xc2H\xc9\x01\n" + + "a\n" + + "\fstring.email\x12#value must be a valid email address\x1a,!rules.email || this == '' || this.isEmail()\n" + + "d\n" + + "\x12string.email_empty\x122value is empty, which is not a valid email address\x1a\x1a!rules.email || this != ''H\x00R\x05email\x12\xf1\x01\n" + + "\bhostname\x18\r \x01(\bB\xd2\x01\xc2H\xce\x01\n" + + "e\n" + + "\x0fstring.hostname\x12\x1evalue must be a valid hostname\x1a2!rules.hostname || this == '' || this.isHostname()\n" + + "e\n" + + "\x15string.hostname_empty\x12-value is empty, which is not a valid hostname\x1a\x1d!rules.hostname || this != ''H\x00R\bhostname\x12\xcb\x01\n" + + "\x02ip\x18\x0e \x01(\bB\xb8\x01\xc2H\xb4\x01\n" + + "U\n" + + "\tstring.ip\x12 value must be a valid IP address\x1a&!rules.ip || this == '' || this.isIp()\n" + + "[\n" + + "\x0fstring.ip_empty\x12/value is empty, which is not a valid IP address\x1a\x17!rules.ip || this != ''H\x00R\x02ip\x12\xdc\x01\n" + + "\x04ipv4\x18\x0f \x01(\bB\xc5\x01\xc2H\xc1\x01\n" + + "\\\n" + + "\vstring.ipv4\x12\"value must be a valid IPv4 address\x1a)!rules.ipv4 || this == '' || this.isIp(4)\n" + + "a\n" + + "\x11string.ipv4_empty\x121value is empty, which is not a valid IPv4 address\x1a\x19!rules.ipv4 || this != ''H\x00R\x04ipv4\x12\xdc\x01\n" + + "\x04ipv6\x18\x10 \x01(\bB\xc5\x01\xc2H\xc1\x01\n" + + "\\\n" + + "\vstring.ipv6\x12\"value must be a valid IPv6 address\x1a)!rules.ipv6 || this == '' || this.isIp(6)\n" + + "a\n" + + "\x11string.ipv6_empty\x121value is empty, which is not a valid IPv6 address\x1a\x19!rules.ipv6 || this != ''H\x00R\x04ipv6\x12\xc4\x01\n" + + "\x03uri\x18\x11 \x01(\bB\xaf\x01\xc2H\xab\x01\n" + + "Q\n" + + "\n" + + "string.uri\x12\x19value must be a valid URI\x1a(!rules.uri || this == '' || this.isUri()\n" + + "V\n" + + "\x10string.uri_empty\x12(value is empty, which is not a valid URI\x1a\x18!rules.uri || this != ''H\x00R\x03uri\x12x\n" + + "\auri_ref\x18\x12 \x01(\bB]\xc2HZ\n" + + "X\n" + + "\x0estring.uri_ref\x12#value must be a valid URI Reference\x1a!!rules.uri_ref || this.isUriRef()H\x00R\x06uriRef\x12\x99\x02\n" + + "\aaddress\x18\x15 \x01(\bB\xfc\x01\xc2H\xf8\x01\n" + + "\x81\x01\n" + + "\x0estring.address\x12-value must be a valid hostname, or ip address\x1a@!rules.address || this == '' || this.isHostname() || this.isIp()\n" + + "r\n" + + "\x14string.address_empty\x12!rules.ipv4_with_prefixlen || this == '' || this.isIpPrefix(4)\n" + + "\x92\x01\n" + + " string.ipv4_with_prefixlen_empty\x12Dvalue is empty, which is not a valid IPv4 address with prefix length\x1a(!rules.ipv4_with_prefixlen || this != ''H\x00R\x11ipv4WithPrefixlen\x12\xe2\x02\n" + + "\x13ipv6_with_prefixlen\x18\x1c \x01(\bB\xaf\x02\xc2H\xab\x02\n" + + "\x93\x01\n" + + "\x1astring.ipv6_with_prefixlen\x125value must be a valid IPv6 address with prefix length\x1a>!rules.ipv6_with_prefixlen || this == '' || this.isIpPrefix(6)\n" + + "\x92\x01\n" + + " string.ipv6_with_prefixlen_empty\x12Dvalue is empty, which is not a valid IPv6 address with prefix length\x1a(!rules.ipv6_with_prefixlen || this != ''H\x00R\x11ipv6WithPrefixlen\x12\xfc\x01\n" + + "\tip_prefix\x18\x1d \x01(\bB\xdc\x01\xc2H\xd8\x01\n" + + "l\n" + + "\x10string.ip_prefix\x12\x1fvalue must be a valid IP prefix\x1a7!rules.ip_prefix || this == '' || this.isIpPrefix(true)\n" + + "h\n" + + "\x16string.ip_prefix_empty\x12.value is empty, which is not a valid IP prefix\x1a\x1e!rules.ip_prefix || this != ''H\x00R\bipPrefix\x12\x8f\x02\n" + + "\vipv4_prefix\x18\x1e \x01(\bB\xeb\x01\xc2H\xe7\x01\n" + + "u\n" + + "\x12string.ipv4_prefix\x12!value must be a valid IPv4 prefix\x1a!rules.host_and_port || this == '' || this.isHostAndPort(true)\n" + + "y\n" + + "\x1astring.host_and_port_empty\x127value is empty, which is not a valid host and port pair\x1a\"!rules.host_and_port || this != ''H\x00R\vhostAndPort\x12\xfb\x01\n" + + "\x04ulid\x18# \x01(\bB\xe4\x01\xc2H\xe0\x01\n" + + "\x82\x01\n" + + "\vstring.ulid\x12\x1avalue must be a valid ULID\x1aW!rules.ulid || this == '' || this.matches('^[0-7][0-9A-HJKMNP-TV-Za-hjkmnp-tv-z]{25}$')\n" + + "Y\n" + + "\x11string.ulid_empty\x12)value is empty, which is not a valid ULID\x1a\x19!rules.ulid || this != ''H\x00R\x04ulid\x12\xb8\x05\n" + + "\x10well_known_regex\x18\x18 \x01(\x0e2\x18.buf.validate.KnownRegexB\xf1\x04\xc2H\xed\x04\n" + + "\xf0\x01\n" + + "#string.well_known_regex.header_name\x12&value must be a valid HTTP header name\x1a\xa0\x01rules.well_known_regex != 1 || this == '' || this.matches(!has(rules.strict) || rules.strict ?'^:?[0-9a-zA-Z!#$%&\\'*+-.^_|~\\x60]+$' :'^[^\\u0000\\u000A\\u000D]+$')\n" + + "\x8d\x01\n" + + ")string.well_known_regex.header_name_empty\x125value is empty, which is not a valid HTTP header name\x1a)rules.well_known_regex != 1 || this != ''\n" + + "\xe7\x01\n" + + "$string.well_known_regex.header_value\x12'value must be a valid HTTP header value\x1a\x95\x01rules.well_known_regex != 2 || this.matches(!has(rules.strict) || rules.strict ?'^[^\\u0000-\\u0008\\u000A-\\u001F\\u007F]*$' :'^[^\\u0000\\u000A\\u000D]*$')H\x00R\x0ewellKnownRegex\x12\x16\n" + + "\x06strict\x18\x19 \x01(\bR\x06strict\x125\n" + + "\aexample\x18\" \x03(\tB\x1b\xc2H\x18\n" + + "\x16\n" + + "\x0estring.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\f\n" + + "\n" + + "well_known\"\xac\x13\n" + + "\n" + + "BytesRules\x12\x87\x01\n" + + "\x05const\x18\x01 \x01(\fBq\xc2Hn\n" + + "l\n" + + "\vbytes.const\x1a]this != getField(rules, 'const') ? 'value must be %x'.format([getField(rules, 'const')]) : ''R\x05const\x12}\n" + + "\x03len\x18\r \x01(\x04Bk\xc2Hh\n" + + "f\n" + + "\tbytes.len\x1aYuint(this.size()) != rules.len ? 'value length must be %s bytes'.format([rules.len]) : ''R\x03len\x12\x98\x01\n" + + "\amin_len\x18\x02 \x01(\x04B\x7f\xc2H|\n" + + "z\n" + + "\rbytes.min_len\x1aiuint(this.size()) < rules.min_len ? 'value length must be at least %s bytes'.format([rules.min_len]) : ''R\x06minLen\x12\x90\x01\n" + + "\amax_len\x18\x03 \x01(\x04Bw\xc2Ht\n" + + "r\n" + + "\rbytes.max_len\x1aauint(this.size()) > rules.max_len ? 'value must be at most %s bytes'.format([rules.max_len]) : ''R\x06maxLen\x12\x99\x01\n" + + "\apattern\x18\x04 \x01(\tB\x7f\xc2H|\n" + + "z\n" + + "\rbytes.pattern\x1ai!string(this).matches(rules.pattern) ? 'value must match regex pattern `%s`'.format([rules.pattern]) : ''R\apattern\x12\x89\x01\n" + + "\x06prefix\x18\x05 \x01(\fBq\xc2Hn\n" + + "l\n" + + "\fbytes.prefix\x1a\\!this.startsWith(rules.prefix) ? 'value does not have prefix %x'.format([rules.prefix]) : ''R\x06prefix\x12\x87\x01\n" + + "\x06suffix\x18\x06 \x01(\fBo\xc2Hl\n" + + "j\n" + + "\fbytes.suffix\x1aZ!this.endsWith(rules.suffix) ? 'value does not have suffix %x'.format([rules.suffix]) : ''R\x06suffix\x12\x8d\x01\n" + + "\bcontains\x18\a \x01(\fBq\xc2Hn\n" + + "l\n" + + "\x0ebytes.contains\x1aZ!this.contains(rules.contains) ? 'value does not contain %x'.format([rules.contains]) : ''R\bcontains\x12\xab\x01\n" + + "\x02in\x18\b \x03(\fB\x9a\x01\xc2H\x96\x01\n" + + "\x93\x01\n" + + "\bbytes.in\x1a\x86\x01getField(rules, 'in').size() > 0 && !(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12}\n" + + "\x06not_in\x18\t \x03(\fBf\xc2Hc\n" + + "a\n" + + "\fbytes.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x12\xef\x01\n" + + "\x02ip\x18\n" + + " \x01(\bB\xdc\x01\xc2H\xd8\x01\n" + + "t\n" + + "\bbytes.ip\x12 value must be a valid IP address\x1aF!rules.ip || this.size() == 0 || this.size() == 4 || this.size() == 16\n" + + "`\n" + + "\x0ebytes.ip_empty\x12/value is empty, which is not a valid IP address\x1a\x1d!rules.ip || this.size() != 0H\x00R\x02ip\x12\xea\x01\n" + + "\x04ipv4\x18\v \x01(\bB\xd3\x01\xc2H\xcf\x01\n" + + "e\n" + + "\n" + + "bytes.ipv4\x12\"value must be a valid IPv4 address\x1a3!rules.ipv4 || this.size() == 0 || this.size() == 4\n" + + "f\n" + + "\x10bytes.ipv4_empty\x121value is empty, which is not a valid IPv4 address\x1a\x1f!rules.ipv4 || this.size() != 0H\x00R\x04ipv4\x12\xeb\x01\n" + + "\x04ipv6\x18\f \x01(\bB\xd4\x01\xc2H\xd0\x01\n" + + "f\n" + + "\n" + + "bytes.ipv6\x12\"value must be a valid IPv6 address\x1a4!rules.ipv6 || this.size() == 0 || this.size() == 16\n" + + "f\n" + + "\x10bytes.ipv6_empty\x121value is empty, which is not a valid IPv6 address\x1a\x1f!rules.ipv6 || this.size() != 0H\x00R\x04ipv6\x12\xdb\x01\n" + + "\x04uuid\x18\x0f \x01(\bB\xc4\x01\xc2H\xc0\x01\n" + + "^\n" + + "\n" + + "bytes.uuid\x12\x1avalue must be a valid UUID\x1a4!rules.uuid || this.size() == 0 || this.size() == 16\n" + + "^\n" + + "\x10bytes.uuid_empty\x12)value is empty, which is not a valid UUID\x1a\x1f!rules.uuid || this.size() != 0H\x00R\x04uuid\x124\n" + + "\aexample\x18\x0e \x03(\fB\x1a\xc2H\x17\n" + + "\x15\n" + + "\rbytes.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\f\n" + + "\n" + + "well_known\"\xfd\x03\n" + + "\tEnumRules\x12\x89\x01\n" + + "\x05const\x18\x01 \x01(\x05Bs\xc2Hp\n" + + "n\n" + + "\n" + + "enum.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12!\n" + + "\fdefined_only\x18\x02 \x01(\bR\vdefinedOnly\x12\x82\x01\n" + + "\x02in\x18\x03 \x03(\x05Br\xc2Ho\n" + + "m\n" + + "\aenum.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12|\n" + + "\x06not_in\x18\x04 \x03(\x05Be\xc2Hb\n" + + "`\n" + + "\venum.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x123\n" + + "\aexample\x18\x05 \x03(\x05B\x19\xc2H\x16\n" + + "\x14\n" + + "\fenum.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9e\x04\n" + + "\rRepeatedRules\x12\xa8\x01\n" + + "\tmin_items\x18\x01 \x01(\x04B\x8a\x01\xc2H\x86\x01\n" + + "\x83\x01\n" + + "\x12repeated.min_items\x1amuint(this.size()) < rules.min_items ? 'value must contain at least %d item(s)'.format([rules.min_items]) : ''R\bminItems\x12\xac\x01\n" + + "\tmax_items\x18\x02 \x01(\x04B\x8e\x01\xc2H\x8a\x01\n" + + "\x87\x01\n" + + "\x12repeated.max_items\x1aquint(this.size()) > rules.max_items ? 'value must contain no more than %s item(s)'.format([rules.max_items]) : ''R\bmaxItems\x12x\n" + + "\x06unique\x18\x03 \x01(\bB`\xc2H]\n" + + "[\n" + + "\x0frepeated.unique\x12(repeated value must contain unique items\x1a\x1e!rules.unique || this.unique()R\x06unique\x12.\n" + + "\x05items\x18\x04 \x01(\v2\x18.buf.validate.FieldRulesR\x05items*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xac\x03\n" + + "\bMapRules\x12\x99\x01\n" + + "\tmin_pairs\x18\x01 \x01(\x04B|\xc2Hy\n" + + "w\n" + + "\rmap.min_pairs\x1afuint(this.size()) < rules.min_pairs ? 'map must be at least %d entries'.format([rules.min_pairs]) : ''R\bminPairs\x12\x98\x01\n" + + "\tmax_pairs\x18\x02 \x01(\x04B{\xc2Hx\n" + + "v\n" + + "\rmap.max_pairs\x1aeuint(this.size()) > rules.max_pairs ? 'map must be at most %d entries'.format([rules.max_pairs]) : ''R\bmaxPairs\x12,\n" + + "\x04keys\x18\x04 \x01(\v2\x18.buf.validate.FieldRulesR\x04keys\x120\n" + + "\x06values\x18\x05 \x01(\v2\x18.buf.validate.FieldRulesR\x06values*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"1\n" + + "\bAnyRules\x12\x0e\n" + + "\x02in\x18\x02 \x03(\tR\x02in\x12\x15\n" + + "\x06not_in\x18\x03 \x03(\tR\x05notIn\"\xc6\x17\n" + + "\rDurationRules\x12\xa8\x01\n" + + "\x05const\x18\x02 \x01(\v2\x19.google.protobuf.DurationBw\xc2Ht\n" + + "r\n" + + "\x0eduration.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\xac\x01\n" + + "\x02lt\x18\x03 \x01(\v2\x19.google.protobuf.DurationB\x7f\xc2H|\n" + + "z\n" + + "\vduration.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xbf\x01\n" + + "\x03lte\x18\x04 \x01(\v2\x19.google.protobuf.DurationB\x8f\x01\xc2H\x8b\x01\n" + + "\x88\x01\n" + + "\fduration.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12\xc5\a\n" + + "\x02gt\x18\x05 \x01(\v2\x19.google.protobuf.DurationB\x97\a\xc2H\x93\a\n" + + "}\n" + + "\vduration.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb6\x01\n" + + "\x0eduration.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbe\x01\n" + + "\x18duration.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc6\x01\n" + + "\x0fduration.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xce\x01\n" + + "\x19duration.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\x92\b\n" + + "\x03gte\x18\x06 \x01(\v2\x19.google.protobuf.DurationB\xe2\a\xc2H\xde\a\n" + + "\x8b\x01\n" + + "\fduration.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc5\x01\n" + + "\x0fduration.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xcd\x01\n" + + "\x19duration.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd5\x01\n" + + "\x10duration.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xdd\x01\n" + + "\x1aduration.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12\xa1\x01\n" + + "\x02in\x18\a \x03(\v2\x19.google.protobuf.DurationBv\xc2Hs\n" + + "q\n" + + "\vduration.in\x1ab!(this in getField(rules, 'in')) ? 'value must be in list %s'.format([getField(rules, 'in')]) : ''R\x02in\x12\x9b\x01\n" + + "\x06not_in\x18\b \x03(\v2\x19.google.protobuf.DurationBi\xc2Hf\n" + + "d\n" + + "\x0fduration.not_in\x1aQthis in rules.not_in ? 'value must not be in list %s'.format([rules.not_in]) : ''R\x05notIn\x12R\n" + + "\aexample\x18\t \x03(\v2\x19.google.protobuf.DurationB\x1d\xc2H\x1a\n" + + "\x18\n" + + "\x10duration.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"\x98\x06\n" + + "\x0eFieldMaskRules\x12\xc6\x01\n" + + "\x05const\x18\x01 \x01(\v2\x1a.google.protobuf.FieldMaskB\x93\x01\xc2H\x8f\x01\n" + + "\x8c\x01\n" + + "\x10field_mask.const\x1axthis.paths != getField(rules, 'const').paths ? 'value must equal paths %s'.format([getField(rules, 'const').paths]) : ''R\x05const\x12\xdd\x01\n" + + "\x02in\x18\x02 \x03(\tB\xcc\x01\xc2H\xc8\x01\n" + + "\xc5\x01\n" + + "\rfield_mask.in\x1a\xb3\x01!this.paths.all(p, p in getField(rules, 'in') || getField(rules, 'in').exists(f, p.startsWith(f+'.'))) ? 'value must only contain paths in %s'.format([getField(rules, 'in')]) : ''R\x02in\x12\xfa\x01\n" + + "\x06not_in\x18\x03 \x03(\tB\xe2\x01\xc2H\xde\x01\n" + + "\xdb\x01\n" + + "\x11field_mask.not_in\x1a\xc5\x01!this.paths.all(p, !(p in getField(rules, 'not_in') || getField(rules, 'not_in').exists(f, p.startsWith(f+'.')))) ? 'value must not contain any paths in %s'.format([getField(rules, 'not_in')]) : ''R\x05notIn\x12U\n" + + "\aexample\x18\x04 \x03(\v2\x1a.google.protobuf.FieldMaskB\x1f\xc2H\x1c\n" + + "\x1a\n" + + "\x12field_mask.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xca\x18\n" + + "\x0eTimestampRules\x12\xaa\x01\n" + + "\x05const\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampBx\xc2Hu\n" + + "s\n" + + "\x0ftimestamp.const\x1a`this != getField(rules, 'const') ? 'value must equal %s'.format([getField(rules, 'const')]) : ''R\x05const\x12\xaf\x01\n" + + "\x02lt\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampB\x80\x01\xc2H}\n" + + "{\n" + + "\ftimestamp.lt\x1ak!has(rules.gte) && !has(rules.gt) && this >= rules.lt? 'value must be less than %s'.format([rules.lt]) : ''H\x00R\x02lt\x12\xc1\x01\n" + + "\x03lte\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampB\x90\x01\xc2H\x8c\x01\n" + + "\x89\x01\n" + + "\rtimestamp.lte\x1ax!has(rules.gte) && !has(rules.gt) && this > rules.lte? 'value must be less than or equal to %s'.format([rules.lte]) : ''H\x00R\x03lte\x12s\n" + + "\x06lt_now\x18\a \x01(\bBZ\xc2HW\n" + + "U\n" + + "\x10timestamp.lt_now\x1aA(rules.lt_now && this > now) ? 'value must be less than now' : ''H\x00R\x05ltNow\x12\xcb\a\n" + + "\x02gt\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampB\x9c\a\xc2H\x98\a\n" + + "~\n" + + "\ftimestamp.gt\x1an!has(rules.lt) && !has(rules.lte) && this <= rules.gt? 'value must be greater than %s'.format([rules.gt]) : ''\n" + + "\xb7\x01\n" + + "\x0ftimestamp.gt_lt\x1a\xa3\x01has(rules.lt) && rules.lt >= rules.gt && (this >= rules.lt || this <= rules.gt)? 'value must be greater than %s and less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xbf\x01\n" + + "\x19timestamp.gt_lt_exclusive\x1a\xa1\x01has(rules.lt) && rules.lt < rules.gt && (rules.lt <= this && this <= rules.gt)? 'value must be greater than %s or less than %s'.format([rules.gt, rules.lt]) : ''\n" + + "\xc7\x01\n" + + "\x10timestamp.gt_lte\x1a\xb2\x01has(rules.lte) && rules.lte >= rules.gt && (this > rules.lte || this <= rules.gt)? 'value must be greater than %s and less than or equal to %s'.format([rules.gt, rules.lte]) : ''\n" + + "\xcf\x01\n" + + "\x1atimestamp.gt_lte_exclusive\x1a\xb0\x01has(rules.lte) && rules.lte < rules.gt && (rules.lte < this && this <= rules.gt)? 'value must be greater than %s or less than or equal to %s'.format([rules.gt, rules.lte]) : ''H\x01R\x02gt\x12\x98\b\n" + + "\x03gte\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampB\xe7\a\xc2H\xe3\a\n" + + "\x8c\x01\n" + + "\rtimestamp.gte\x1a{!has(rules.lt) && !has(rules.lte) && this < rules.gte? 'value must be greater than or equal to %s'.format([rules.gte]) : ''\n" + + "\xc6\x01\n" + + "\x10timestamp.gte_lt\x1a\xb1\x01has(rules.lt) && rules.lt >= rules.gte && (this >= rules.lt || this < rules.gte)? 'value must be greater than or equal to %s and less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xce\x01\n" + + "\x1atimestamp.gte_lt_exclusive\x1a\xaf\x01has(rules.lt) && rules.lt < rules.gte && (rules.lt <= this && this < rules.gte)? 'value must be greater than or equal to %s or less than %s'.format([rules.gte, rules.lt]) : ''\n" + + "\xd6\x01\n" + + "\x11timestamp.gte_lte\x1a\xc0\x01has(rules.lte) && rules.lte >= rules.gte && (this > rules.lte || this < rules.gte)? 'value must be greater than or equal to %s and less than or equal to %s'.format([rules.gte, rules.lte]) : ''\n" + + "\xde\x01\n" + + "\x1btimestamp.gte_lte_exclusive\x1a\xbe\x01has(rules.lte) && rules.lte < rules.gte && (rules.lte < this && this < rules.gte)? 'value must be greater than or equal to %s or less than or equal to %s'.format([rules.gte, rules.lte]) : ''H\x01R\x03gte\x12v\n" + + "\x06gt_now\x18\b \x01(\bB]\xc2HZ\n" + + "X\n" + + "\x10timestamp.gt_now\x1aD(rules.gt_now && this < now) ? 'value must be greater than now' : ''H\x01R\x05gtNow\x12\xc0\x01\n" + + "\x06within\x18\t \x01(\v2\x19.google.protobuf.DurationB\x8c\x01\xc2H\x88\x01\n" + + "\x85\x01\n" + + "\x10timestamp.within\x1aqthis < now-rules.within || this > now+rules.within ? 'value must be within %s of now'.format([rules.within]) : ''R\x06within\x12T\n" + + "\aexample\x18\n" + + " \x03(\v2\x1a.google.protobuf.TimestampB\x1e\xc2H\x1b\n" + + "\x19\n" + + "\x11timestamp.example\x1a\x04trueR\aexample*\t\b\xe8\a\x10\x80\x80\x80\x80\x02B\v\n" + + "\tless_thanB\x0e\n" + + "\fgreater_than\"E\n" + + "\n" + + "Violations\x127\n" + + "\n" + + "violations\x18\x01 \x03(\v2\x17.buf.validate.ViolationR\n" + + "violations\"\xc5\x01\n" + + "\tViolation\x12-\n" + + "\x05field\x18\x05 \x01(\v2\x17.buf.validate.FieldPathR\x05field\x12+\n" + + "\x04rule\x18\x06 \x01(\v2\x17.buf.validate.FieldPathR\x04rule\x12\x17\n" + + "\arule_id\x18\x02 \x01(\tR\x06ruleId\x12\x18\n" + + "\amessage\x18\x03 \x01(\tR\amessage\x12\x17\n" + + "\afor_key\x18\x04 \x01(\bR\x06forKeyJ\x04\b\x01\x10\x02R\n" + + "field_path\"G\n" + + "\tFieldPath\x12:\n" + + "\belements\x18\x01 \x03(\v2\x1e.buf.validate.FieldPathElementR\belements\"\xcc\x03\n" + + "\x10FieldPathElement\x12!\n" + + "\ffield_number\x18\x01 \x01(\x05R\vfieldNumber\x12\x1d\n" + + "\n" + + "field_name\x18\x02 \x01(\tR\tfieldName\x12I\n" + + "\n" + + "field_type\x18\x03 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\tfieldType\x12E\n" + + "\bkey_type\x18\x04 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\akeyType\x12I\n" + + "\n" + + "value_type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\tvalueType\x12\x16\n" + + "\x05index\x18\x06 \x01(\x04H\x00R\x05index\x12\x1b\n" + + "\bbool_key\x18\a \x01(\bH\x00R\aboolKey\x12\x19\n" + + "\aint_key\x18\b \x01(\x03H\x00R\x06intKey\x12\x1b\n" + + "\buint_key\x18\t \x01(\x04H\x00R\auintKey\x12\x1f\n" + + "\n" + + "string_key\x18\n" + + " \x01(\tH\x00R\tstringKeyB\v\n" + + "\tsubscript*\xa1\x01\n" + + "\x06Ignore\x12\x16\n" + + "\x12IGNORE_UNSPECIFIED\x10\x00\x12\x18\n" + + "\x14IGNORE_IF_ZERO_VALUE\x10\x01\x12\x11\n" + + "\rIGNORE_ALWAYS\x10\x03\"\x04\b\x02\x10\x02*\fIGNORE_EMPTY*\x0eIGNORE_DEFAULT*\x17IGNORE_IF_DEFAULT_VALUE*\x15IGNORE_IF_UNPOPULATED*n\n" + + "\n" + + "KnownRegex\x12\x1b\n" + + "\x17KNOWN_REGEX_UNSPECIFIED\x10\x00\x12 \n" + + "\x1cKNOWN_REGEX_HTTP_HEADER_NAME\x10\x01\x12!\n" + + "\x1dKNOWN_REGEX_HTTP_HEADER_VALUE\x10\x02:V\n" + + "\amessage\x12\x1f.google.protobuf.MessageOptions\x18\x87\t \x01(\v2\x1a.buf.validate.MessageRulesR\amessage:N\n" + + "\x05oneof\x12\x1d.google.protobuf.OneofOptions\x18\x87\t \x01(\v2\x18.buf.validate.OneofRulesR\x05oneof:N\n" + + "\x05field\x12\x1d.google.protobuf.FieldOptions\x18\x87\t \x01(\v2\x18.buf.validate.FieldRulesR\x05field:]\n" + + "\n" + + "predefined\x12\x1d.google.protobuf.FieldOptions\x18\x88\t \x01(\v2\x1d.buf.validate.PredefinedRulesR\n" + + "predefinedBn\n" + + "\x12build.buf.validateB\rValidateProtoP\x01ZGbuf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/buf/validate" + +var file_buf_validate_validate_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_buf_validate_validate_proto_msgTypes = make([]protoimpl.MessageInfo, 32) +var file_buf_validate_validate_proto_goTypes = []any{ + (Ignore)(0), // 0: buf.validate.Ignore + (KnownRegex)(0), // 1: buf.validate.KnownRegex + (*Rule)(nil), // 2: buf.validate.Rule + (*MessageRules)(nil), // 3: buf.validate.MessageRules + (*MessageOneofRule)(nil), // 4: buf.validate.MessageOneofRule + (*OneofRules)(nil), // 5: buf.validate.OneofRules + (*FieldRules)(nil), // 6: buf.validate.FieldRules + (*PredefinedRules)(nil), // 7: buf.validate.PredefinedRules + (*FloatRules)(nil), // 8: buf.validate.FloatRules + (*DoubleRules)(nil), // 9: buf.validate.DoubleRules + (*Int32Rules)(nil), // 10: buf.validate.Int32Rules + (*Int64Rules)(nil), // 11: buf.validate.Int64Rules + (*UInt32Rules)(nil), // 12: buf.validate.UInt32Rules + (*UInt64Rules)(nil), // 13: buf.validate.UInt64Rules + (*SInt32Rules)(nil), // 14: buf.validate.SInt32Rules + (*SInt64Rules)(nil), // 15: buf.validate.SInt64Rules + (*Fixed32Rules)(nil), // 16: buf.validate.Fixed32Rules + (*Fixed64Rules)(nil), // 17: buf.validate.Fixed64Rules + (*SFixed32Rules)(nil), // 18: buf.validate.SFixed32Rules + (*SFixed64Rules)(nil), // 19: buf.validate.SFixed64Rules + (*BoolRules)(nil), // 20: buf.validate.BoolRules + (*StringRules)(nil), // 21: buf.validate.StringRules + (*BytesRules)(nil), // 22: buf.validate.BytesRules + (*EnumRules)(nil), // 23: buf.validate.EnumRules + (*RepeatedRules)(nil), // 24: buf.validate.RepeatedRules + (*MapRules)(nil), // 25: buf.validate.MapRules + (*AnyRules)(nil), // 26: buf.validate.AnyRules + (*DurationRules)(nil), // 27: buf.validate.DurationRules + (*FieldMaskRules)(nil), // 28: buf.validate.FieldMaskRules + (*TimestampRules)(nil), // 29: buf.validate.TimestampRules + (*Violations)(nil), // 30: buf.validate.Violations + (*Violation)(nil), // 31: buf.validate.Violation + (*FieldPath)(nil), // 32: buf.validate.FieldPath + (*FieldPathElement)(nil), // 33: buf.validate.FieldPathElement + (*durationpb.Duration)(nil), // 34: google.protobuf.Duration + (*fieldmaskpb.FieldMask)(nil), // 35: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 36: google.protobuf.Timestamp + (descriptorpb.FieldDescriptorProto_Type)(0), // 37: google.protobuf.FieldDescriptorProto.Type + (*descriptorpb.MessageOptions)(nil), // 38: google.protobuf.MessageOptions + (*descriptorpb.OneofOptions)(nil), // 39: google.protobuf.OneofOptions + (*descriptorpb.FieldOptions)(nil), // 40: google.protobuf.FieldOptions +} +var file_buf_validate_validate_proto_depIdxs = []int32{ + 2, // 0: buf.validate.MessageRules.cel:type_name -> buf.validate.Rule + 4, // 1: buf.validate.MessageRules.oneof:type_name -> buf.validate.MessageOneofRule + 2, // 2: buf.validate.FieldRules.cel:type_name -> buf.validate.Rule + 0, // 3: buf.validate.FieldRules.ignore:type_name -> buf.validate.Ignore + 8, // 4: buf.validate.FieldRules.float:type_name -> buf.validate.FloatRules + 9, // 5: buf.validate.FieldRules.double:type_name -> buf.validate.DoubleRules + 10, // 6: buf.validate.FieldRules.int32:type_name -> buf.validate.Int32Rules + 11, // 7: buf.validate.FieldRules.int64:type_name -> buf.validate.Int64Rules + 12, // 8: buf.validate.FieldRules.uint32:type_name -> buf.validate.UInt32Rules + 13, // 9: buf.validate.FieldRules.uint64:type_name -> buf.validate.UInt64Rules + 14, // 10: buf.validate.FieldRules.sint32:type_name -> buf.validate.SInt32Rules + 15, // 11: buf.validate.FieldRules.sint64:type_name -> buf.validate.SInt64Rules + 16, // 12: buf.validate.FieldRules.fixed32:type_name -> buf.validate.Fixed32Rules + 17, // 13: buf.validate.FieldRules.fixed64:type_name -> buf.validate.Fixed64Rules + 18, // 14: buf.validate.FieldRules.sfixed32:type_name -> buf.validate.SFixed32Rules + 19, // 15: buf.validate.FieldRules.sfixed64:type_name -> buf.validate.SFixed64Rules + 20, // 16: buf.validate.FieldRules.bool:type_name -> buf.validate.BoolRules + 21, // 17: buf.validate.FieldRules.string:type_name -> buf.validate.StringRules + 22, // 18: buf.validate.FieldRules.bytes:type_name -> buf.validate.BytesRules + 23, // 19: buf.validate.FieldRules.enum:type_name -> buf.validate.EnumRules + 24, // 20: buf.validate.FieldRules.repeated:type_name -> buf.validate.RepeatedRules + 25, // 21: buf.validate.FieldRules.map:type_name -> buf.validate.MapRules + 26, // 22: buf.validate.FieldRules.any:type_name -> buf.validate.AnyRules + 27, // 23: buf.validate.FieldRules.duration:type_name -> buf.validate.DurationRules + 28, // 24: buf.validate.FieldRules.field_mask:type_name -> buf.validate.FieldMaskRules + 29, // 25: buf.validate.FieldRules.timestamp:type_name -> buf.validate.TimestampRules + 2, // 26: buf.validate.PredefinedRules.cel:type_name -> buf.validate.Rule + 1, // 27: buf.validate.StringRules.well_known_regex:type_name -> buf.validate.KnownRegex + 6, // 28: buf.validate.RepeatedRules.items:type_name -> buf.validate.FieldRules + 6, // 29: buf.validate.MapRules.keys:type_name -> buf.validate.FieldRules + 6, // 30: buf.validate.MapRules.values:type_name -> buf.validate.FieldRules + 34, // 31: buf.validate.DurationRules.const:type_name -> google.protobuf.Duration + 34, // 32: buf.validate.DurationRules.lt:type_name -> google.protobuf.Duration + 34, // 33: buf.validate.DurationRules.lte:type_name -> google.protobuf.Duration + 34, // 34: buf.validate.DurationRules.gt:type_name -> google.protobuf.Duration + 34, // 35: buf.validate.DurationRules.gte:type_name -> google.protobuf.Duration + 34, // 36: buf.validate.DurationRules.in:type_name -> google.protobuf.Duration + 34, // 37: buf.validate.DurationRules.not_in:type_name -> google.protobuf.Duration + 34, // 38: buf.validate.DurationRules.example:type_name -> google.protobuf.Duration + 35, // 39: buf.validate.FieldMaskRules.const:type_name -> google.protobuf.FieldMask + 35, // 40: buf.validate.FieldMaskRules.example:type_name -> google.protobuf.FieldMask + 36, // 41: buf.validate.TimestampRules.const:type_name -> google.protobuf.Timestamp + 36, // 42: buf.validate.TimestampRules.lt:type_name -> google.protobuf.Timestamp + 36, // 43: buf.validate.TimestampRules.lte:type_name -> google.protobuf.Timestamp + 36, // 44: buf.validate.TimestampRules.gt:type_name -> google.protobuf.Timestamp + 36, // 45: buf.validate.TimestampRules.gte:type_name -> google.protobuf.Timestamp + 34, // 46: buf.validate.TimestampRules.within:type_name -> google.protobuf.Duration + 36, // 47: buf.validate.TimestampRules.example:type_name -> google.protobuf.Timestamp + 31, // 48: buf.validate.Violations.violations:type_name -> buf.validate.Violation + 32, // 49: buf.validate.Violation.field:type_name -> buf.validate.FieldPath + 32, // 50: buf.validate.Violation.rule:type_name -> buf.validate.FieldPath + 33, // 51: buf.validate.FieldPath.elements:type_name -> buf.validate.FieldPathElement + 37, // 52: buf.validate.FieldPathElement.field_type:type_name -> google.protobuf.FieldDescriptorProto.Type + 37, // 53: buf.validate.FieldPathElement.key_type:type_name -> google.protobuf.FieldDescriptorProto.Type + 37, // 54: buf.validate.FieldPathElement.value_type:type_name -> google.protobuf.FieldDescriptorProto.Type + 38, // 55: buf.validate.message:extendee -> google.protobuf.MessageOptions + 39, // 56: buf.validate.oneof:extendee -> google.protobuf.OneofOptions + 40, // 57: buf.validate.field:extendee -> google.protobuf.FieldOptions + 40, // 58: buf.validate.predefined:extendee -> google.protobuf.FieldOptions + 3, // 59: buf.validate.message:type_name -> buf.validate.MessageRules + 5, // 60: buf.validate.oneof:type_name -> buf.validate.OneofRules + 6, // 61: buf.validate.field:type_name -> buf.validate.FieldRules + 7, // 62: buf.validate.predefined:type_name -> buf.validate.PredefinedRules + 63, // [63:63] is the sub-list for method output_type + 63, // [63:63] is the sub-list for method input_type + 59, // [59:63] is the sub-list for extension type_name + 55, // [55:59] is the sub-list for extension extendee + 0, // [0:55] is the sub-list for field type_name +} + +func init() { file_buf_validate_validate_proto_init() } +func file_buf_validate_validate_proto_init() { + if File_buf_validate_validate_proto != nil { + return + } + file_buf_validate_validate_proto_msgTypes[4].OneofWrappers = []any{ + (*fieldRules_Float)(nil), + (*fieldRules_Double)(nil), + (*fieldRules_Int32)(nil), + (*fieldRules_Int64)(nil), + (*fieldRules_Uint32)(nil), + (*fieldRules_Uint64)(nil), + (*fieldRules_Sint32)(nil), + (*fieldRules_Sint64)(nil), + (*fieldRules_Fixed32)(nil), + (*fieldRules_Fixed64)(nil), + (*fieldRules_Sfixed32)(nil), + (*fieldRules_Sfixed64)(nil), + (*fieldRules_Bool)(nil), + (*fieldRules_String_)(nil), + (*fieldRules_Bytes)(nil), + (*fieldRules_Enum)(nil), + (*fieldRules_Repeated)(nil), + (*fieldRules_Map)(nil), + (*fieldRules_Any)(nil), + (*fieldRules_Duration)(nil), + (*fieldRules_FieldMask)(nil), + (*fieldRules_Timestamp)(nil), + } + file_buf_validate_validate_proto_msgTypes[6].OneofWrappers = []any{ + (*floatRules_Lt)(nil), + (*floatRules_Lte)(nil), + (*floatRules_Gt)(nil), + (*floatRules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[7].OneofWrappers = []any{ + (*doubleRules_Lt)(nil), + (*doubleRules_Lte)(nil), + (*doubleRules_Gt)(nil), + (*doubleRules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[8].OneofWrappers = []any{ + (*int32Rules_Lt)(nil), + (*int32Rules_Lte)(nil), + (*int32Rules_Gt)(nil), + (*int32Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[9].OneofWrappers = []any{ + (*int64Rules_Lt)(nil), + (*int64Rules_Lte)(nil), + (*int64Rules_Gt)(nil), + (*int64Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[10].OneofWrappers = []any{ + (*uInt32Rules_Lt)(nil), + (*uInt32Rules_Lte)(nil), + (*uInt32Rules_Gt)(nil), + (*uInt32Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[11].OneofWrappers = []any{ + (*uInt64Rules_Lt)(nil), + (*uInt64Rules_Lte)(nil), + (*uInt64Rules_Gt)(nil), + (*uInt64Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[12].OneofWrappers = []any{ + (*sInt32Rules_Lt)(nil), + (*sInt32Rules_Lte)(nil), + (*sInt32Rules_Gt)(nil), + (*sInt32Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[13].OneofWrappers = []any{ + (*sInt64Rules_Lt)(nil), + (*sInt64Rules_Lte)(nil), + (*sInt64Rules_Gt)(nil), + (*sInt64Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[14].OneofWrappers = []any{ + (*fixed32Rules_Lt)(nil), + (*fixed32Rules_Lte)(nil), + (*fixed32Rules_Gt)(nil), + (*fixed32Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[15].OneofWrappers = []any{ + (*fixed64Rules_Lt)(nil), + (*fixed64Rules_Lte)(nil), + (*fixed64Rules_Gt)(nil), + (*fixed64Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[16].OneofWrappers = []any{ + (*sFixed32Rules_Lt)(nil), + (*sFixed32Rules_Lte)(nil), + (*sFixed32Rules_Gt)(nil), + (*sFixed32Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[17].OneofWrappers = []any{ + (*sFixed64Rules_Lt)(nil), + (*sFixed64Rules_Lte)(nil), + (*sFixed64Rules_Gt)(nil), + (*sFixed64Rules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[19].OneofWrappers = []any{ + (*stringRules_Email)(nil), + (*stringRules_Hostname)(nil), + (*stringRules_Ip)(nil), + (*stringRules_Ipv4)(nil), + (*stringRules_Ipv6)(nil), + (*stringRules_Uri)(nil), + (*stringRules_UriRef)(nil), + (*stringRules_Address)(nil), + (*stringRules_Uuid)(nil), + (*stringRules_Tuuid)(nil), + (*stringRules_IpWithPrefixlen)(nil), + (*stringRules_Ipv4WithPrefixlen)(nil), + (*stringRules_Ipv6WithPrefixlen)(nil), + (*stringRules_IpPrefix)(nil), + (*stringRules_Ipv4Prefix)(nil), + (*stringRules_Ipv6Prefix)(nil), + (*stringRules_HostAndPort)(nil), + (*stringRules_Ulid)(nil), + (*stringRules_WellKnownRegex)(nil), + } + file_buf_validate_validate_proto_msgTypes[20].OneofWrappers = []any{ + (*bytesRules_Ip)(nil), + (*bytesRules_Ipv4)(nil), + (*bytesRules_Ipv6)(nil), + (*bytesRules_Uuid)(nil), + } + file_buf_validate_validate_proto_msgTypes[25].OneofWrappers = []any{ + (*durationRules_Lt)(nil), + (*durationRules_Lte)(nil), + (*durationRules_Gt)(nil), + (*durationRules_Gte)(nil), + } + file_buf_validate_validate_proto_msgTypes[27].OneofWrappers = []any{ + (*timestampRules_Lt)(nil), + (*timestampRules_Lte)(nil), + (*timestampRules_LtNow)(nil), + (*timestampRules_Gt)(nil), + (*timestampRules_Gte)(nil), + (*timestampRules_GtNow)(nil), + } + file_buf_validate_validate_proto_msgTypes[31].OneofWrappers = []any{ + (*fieldPathElement_Index)(nil), + (*fieldPathElement_BoolKey)(nil), + (*fieldPathElement_IntKey)(nil), + (*fieldPathElement_UintKey)(nil), + (*fieldPathElement_StringKey)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_buf_validate_validate_proto_rawDesc), len(file_buf_validate_validate_proto_rawDesc)), + NumEnums: 2, + NumMessages: 32, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_buf_validate_validate_proto_goTypes, + DependencyIndexes: file_buf_validate_validate_proto_depIdxs, + EnumInfos: file_buf_validate_validate_proto_enumTypes, + MessageInfos: file_buf_validate_validate_proto_msgTypes, + ExtensionInfos: file_buf_validate_validate_proto_extTypes, + }.Build() + File_buf_validate_validate_proto = out.File + file_buf_validate_validate_proto_goTypes = nil + file_buf_validate_validate_proto_depIdxs = nil +} diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel index 37d8adc950..f5bda3bb17 100644 --- a/vendor/cel.dev/expr/BUILD.bazel +++ b/vendor/cel.dev/expr/BUILD.bazel @@ -16,7 +16,6 @@ go_library( importpath = "cel.dev/expr", visibility = ["//visibility:public"], deps = [ - "@org_golang_google_genproto_googleapis_rpc//status:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect", "@org_golang_google_protobuf//runtime/protoimpl", "@org_golang_google_protobuf//types/known/anypb", diff --git a/vendor/cel.dev/expr/MODULE.bazel b/vendor/cel.dev/expr/MODULE.bazel index 85ac9ff617..cb98ed5991 100644 --- a/vendor/cel.dev/expr/MODULE.bazel +++ b/vendor/cel.dev/expr/MODULE.bazel @@ -11,26 +11,9 @@ bazel_dep( version = "0.39.1", repo_name = "bazel_gazelle", ) -bazel_dep( - name = "googleapis", - version = "0.0.0-20241220-5e258e33.bcr.1", - repo_name = "com_google_googleapis", -) -bazel_dep( - name = "googleapis-cc", - version = "1.0.0", -) -bazel_dep( - name = "googleapis-java", - version = "1.0.0", -) -bazel_dep( - name = "googleapis-go", - version = "1.0.0", -) bazel_dep( name = "protobuf", - version = "27.0", + version = "27.1", repo_name = "com_google_protobuf", ) bazel_dep( @@ -63,12 +46,11 @@ python.toolchain( ) go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk") -go_sdk.download(version = "1.22.0") +go_sdk.download(version = "1.23.0") go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps") go_deps.from_file(go_mod = "//:go.mod") use_repo( go_deps, - "org_golang_google_genproto_googleapis_rpc", "org_golang_google_protobuf", ) diff --git a/vendor/cel.dev/expr/checked.pb.go b/vendor/cel.dev/expr/checked.pb.go index bb225c8ab3..b18085e9b6 100644 --- a/vendor/cel.dev/expr/checked.pb.go +++ b/vendor/cel.dev/expr/checked.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.36.10 +// protoc v5.27.1 // source: cel/expr/checked.proto package expr @@ -13,6 +13,7 @@ import ( structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -136,24 +137,21 @@ func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) { } type CheckedExpr struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` + ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"` + Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"` unknownFields protoimpl.UnknownFields - - ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` - ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"` - Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"` + sizeCache protoimpl.SizeCache } func (x *CheckedExpr) Reset() { *x = CheckedExpr{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CheckedExpr) String() string { @@ -164,7 +162,7 @@ func (*CheckedExpr) ProtoMessage() {} func (x *CheckedExpr) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -215,11 +213,8 @@ func (x *CheckedExpr) GetExpr() *Expr { } type Type struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to TypeKind: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to TypeKind: // // *Type_Dyn // *Type_Null @@ -234,16 +229,16 @@ type Type struct { // *Type_Type // *Type_Error // *Type_AbstractType_ - TypeKind isType_TypeKind `protobuf_oneof:"type_kind"` + TypeKind isType_TypeKind `protobuf_oneof:"type_kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Type) Reset() { *x = Type{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Type) String() string { @@ -254,7 +249,7 @@ func (*Type) ProtoMessage() {} func (x *Type) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -269,100 +264,126 @@ func (*Type) Descriptor() ([]byte, []int) { return file_cel_expr_checked_proto_rawDescGZIP(), []int{1} } -func (m *Type) GetTypeKind() isType_TypeKind { - if m != nil { - return m.TypeKind +func (x *Type) GetTypeKind() isType_TypeKind { + if x != nil { + return x.TypeKind } return nil } func (x *Type) GetDyn() *emptypb.Empty { - if x, ok := x.GetTypeKind().(*Type_Dyn); ok { - return x.Dyn + if x != nil { + if x, ok := x.TypeKind.(*Type_Dyn); ok { + return x.Dyn + } } return nil } func (x *Type) GetNull() structpb.NullValue { - if x, ok := x.GetTypeKind().(*Type_Null); ok { - return x.Null + if x != nil { + if x, ok := x.TypeKind.(*Type_Null); ok { + return x.Null + } } return structpb.NullValue(0) } func (x *Type) GetPrimitive() Type_PrimitiveType { - if x, ok := x.GetTypeKind().(*Type_Primitive); ok { - return x.Primitive + if x != nil { + if x, ok := x.TypeKind.(*Type_Primitive); ok { + return x.Primitive + } } return Type_PRIMITIVE_TYPE_UNSPECIFIED } func (x *Type) GetWrapper() Type_PrimitiveType { - if x, ok := x.GetTypeKind().(*Type_Wrapper); ok { - return x.Wrapper + if x != nil { + if x, ok := x.TypeKind.(*Type_Wrapper); ok { + return x.Wrapper + } } return Type_PRIMITIVE_TYPE_UNSPECIFIED } func (x *Type) GetWellKnown() Type_WellKnownType { - if x, ok := x.GetTypeKind().(*Type_WellKnown); ok { - return x.WellKnown + if x != nil { + if x, ok := x.TypeKind.(*Type_WellKnown); ok { + return x.WellKnown + } } return Type_WELL_KNOWN_TYPE_UNSPECIFIED } func (x *Type) GetListType() *Type_ListType { - if x, ok := x.GetTypeKind().(*Type_ListType_); ok { - return x.ListType + if x != nil { + if x, ok := x.TypeKind.(*Type_ListType_); ok { + return x.ListType + } } return nil } func (x *Type) GetMapType() *Type_MapType { - if x, ok := x.GetTypeKind().(*Type_MapType_); ok { - return x.MapType + if x != nil { + if x, ok := x.TypeKind.(*Type_MapType_); ok { + return x.MapType + } } return nil } func (x *Type) GetFunction() *Type_FunctionType { - if x, ok := x.GetTypeKind().(*Type_Function); ok { - return x.Function + if x != nil { + if x, ok := x.TypeKind.(*Type_Function); ok { + return x.Function + } } return nil } func (x *Type) GetMessageType() string { - if x, ok := x.GetTypeKind().(*Type_MessageType); ok { - return x.MessageType + if x != nil { + if x, ok := x.TypeKind.(*Type_MessageType); ok { + return x.MessageType + } } return "" } func (x *Type) GetTypeParam() string { - if x, ok := x.GetTypeKind().(*Type_TypeParam); ok { - return x.TypeParam + if x != nil { + if x, ok := x.TypeKind.(*Type_TypeParam); ok { + return x.TypeParam + } } return "" } func (x *Type) GetType() *Type { - if x, ok := x.GetTypeKind().(*Type_Type); ok { - return x.Type + if x != nil { + if x, ok := x.TypeKind.(*Type_Type); ok { + return x.Type + } } return nil } func (x *Type) GetError() *emptypb.Empty { - if x, ok := x.GetTypeKind().(*Type_Error); ok { - return x.Error + if x != nil { + if x, ok := x.TypeKind.(*Type_Error); ok { + return x.Error + } } return nil } func (x *Type) GetAbstractType() *Type_AbstractType { - if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok { - return x.AbstractType + if x != nil { + if x, ok := x.TypeKind.(*Type_AbstractType_); ok { + return x.AbstractType + } } return nil } @@ -450,25 +471,22 @@ func (*Type_Error) isType_TypeKind() {} func (*Type_AbstractType_) isType_TypeKind() {} type Decl struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Types that are assignable to DeclKind: + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are valid to be assigned to DeclKind: // // *Decl_Ident // *Decl_Function - DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"` + DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Decl) Reset() { *x = Decl{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Decl) String() string { @@ -479,7 +497,7 @@ func (*Decl) ProtoMessage() {} func (x *Decl) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -501,23 +519,27 @@ func (x *Decl) GetName() string { return "" } -func (m *Decl) GetDeclKind() isDecl_DeclKind { - if m != nil { - return m.DeclKind +func (x *Decl) GetDeclKind() isDecl_DeclKind { + if x != nil { + return x.DeclKind } return nil } func (x *Decl) GetIdent() *Decl_IdentDecl { - if x, ok := x.GetDeclKind().(*Decl_Ident); ok { - return x.Ident + if x != nil { + if x, ok := x.DeclKind.(*Decl_Ident); ok { + return x.Ident + } } return nil } func (x *Decl) GetFunction() *Decl_FunctionDecl { - if x, ok := x.GetDeclKind().(*Decl_Function); ok { - return x.Function + if x != nil { + if x, ok := x.DeclKind.(*Decl_Function); ok { + return x.Function + } } return nil } @@ -539,22 +561,19 @@ func (*Decl_Ident) isDecl_DeclKind() {} func (*Decl_Function) isDecl_DeclKind() {} type Reference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` - Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Reference) Reset() { *x = Reference{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Reference) String() string { @@ -565,7 +584,7 @@ func (*Reference) ProtoMessage() {} func (x *Reference) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -602,20 +621,17 @@ func (x *Reference) GetValue() *Constant { } type Type_ListType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"` unknownFields protoimpl.UnknownFields - - ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Type_ListType) Reset() { *x = Type_ListType{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Type_ListType) String() string { @@ -626,7 +642,7 @@ func (*Type_ListType) ProtoMessage() {} func (x *Type_ListType) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -649,21 +665,18 @@ func (x *Type_ListType) GetElemType() *Type { } type Type_MapType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"` + ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"` unknownFields protoimpl.UnknownFields - - KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"` - ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Type_MapType) Reset() { *x = Type_MapType{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Type_MapType) String() string { @@ -674,7 +687,7 @@ func (*Type_MapType) ProtoMessage() {} func (x *Type_MapType) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -704,21 +717,18 @@ func (x *Type_MapType) GetValueType() *Type { } type Type_FunctionType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"` unknownFields protoimpl.UnknownFields - - ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` - ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Type_FunctionType) Reset() { *x = Type_FunctionType{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Type_FunctionType) String() string { @@ -729,7 +739,7 @@ func (*Type_FunctionType) ProtoMessage() {} func (x *Type_FunctionType) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -759,21 +769,18 @@ func (x *Type_FunctionType) GetArgTypes() []*Type { } type Type_AbstractType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Type_AbstractType) Reset() { *x = Type_AbstractType{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Type_AbstractType) String() string { @@ -784,7 +791,7 @@ func (*Type_AbstractType) ProtoMessage() {} func (x *Type_AbstractType) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -814,22 +821,19 @@ func (x *Type_AbstractType) GetParameterTypes() []*Type { } type Decl_IdentDecl struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"` unknownFields protoimpl.UnknownFields - - Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Decl_IdentDecl) Reset() { *x = Decl_IdentDecl{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Decl_IdentDecl) String() string { @@ -840,7 +844,7 @@ func (*Decl_IdentDecl) ProtoMessage() {} func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -877,20 +881,18 @@ func (x *Decl_IdentDecl) GetDoc() string { } type Decl_FunctionDecl struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"` + Doc string `protobuf:"bytes,2,opt,name=doc,proto3" json:"doc,omitempty"` unknownFields protoimpl.UnknownFields - - Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Decl_FunctionDecl) Reset() { *x = Decl_FunctionDecl{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Decl_FunctionDecl) String() string { @@ -901,7 +903,7 @@ func (*Decl_FunctionDecl) ProtoMessage() {} func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -923,26 +925,30 @@ func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload { return nil } -type Decl_FunctionDecl_Overload struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *Decl_FunctionDecl) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} - OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` - Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"` - TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"` - ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` - IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"` - Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"` +type Decl_FunctionDecl_Overload struct { + state protoimpl.MessageState `protogen:"open.v1"` + OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"` + TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"` + ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"` + Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Decl_FunctionDecl_Overload) Reset() { *x = Decl_FunctionDecl_Overload{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Decl_FunctionDecl_Overload) String() string { @@ -953,7 +959,7 @@ func (*Decl_FunctionDecl_Overload) ProtoMessage() {} func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1012,185 +1018,113 @@ func (x *Decl_FunctionDecl_Overload) GetDoc() string { var File_cel_expr_checked_proto protoreflect.FileDescriptor -var file_cel_expr_checked_proto_rawDesc = []byte{ - 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, - 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, - 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, - 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, - 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, - 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61, - 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65, - 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a, - 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, - 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, - 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d, - 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, - 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69, - 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, - 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, - 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, - 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, - 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, - 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c, - 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, - 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08, - 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a, - 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72, - 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72, - 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65, - 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, - 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b, - 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79, - 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73, - 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, - 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, - 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a, - 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59, - 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, - 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12, - 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a, - 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44, - 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c, - 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, - 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f, - 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, - 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65, - 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c, - 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, - 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b, - 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a, - 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, - 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75, - 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, - 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, - 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, - 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49, - 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64, - 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63, - 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, - 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} +const file_cel_expr_checked_proto_rawDesc = "" + + "\n" + + "\x16cel/expr/checked.proto\x12\bcel.expr\x1a\x15cel/expr/syntax.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xba\x03\n" + + "\vCheckedExpr\x12L\n" + + "\rreference_map\x18\x02 \x03(\v2'.cel.expr.CheckedExpr.ReferenceMapEntryR\freferenceMap\x12=\n" + + "\btype_map\x18\x03 \x03(\v2\".cel.expr.CheckedExpr.TypeMapEntryR\atypeMap\x125\n" + + "\vsource_info\x18\x05 \x01(\v2\x14.cel.expr.SourceInfoR\n" + + "sourceInfo\x12!\n" + + "\fexpr_version\x18\x06 \x01(\tR\vexprVersion\x12\"\n" + + "\x04expr\x18\x04 \x01(\v2\x0e.cel.expr.ExprR\x04expr\x1aT\n" + + "\x11ReferenceMapEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12)\n" + + "\x05value\x18\x02 \x01(\v2\x13.cel.expr.ReferenceR\x05value:\x028\x01\x1aJ\n" + + "\fTypeMapEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12$\n" + + "\x05value\x18\x02 \x01(\v2\x0e.cel.expr.TypeR\x05value:\x028\x01\"\xe6\t\n" + + "\x04Type\x12*\n" + + "\x03dyn\x18\x01 \x01(\v2\x16.google.protobuf.EmptyH\x00R\x03dyn\x120\n" + + "\x04null\x18\x02 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\x04null\x12<\n" + + "\tprimitive\x18\x03 \x01(\x0e2\x1c.cel.expr.Type.PrimitiveTypeH\x00R\tprimitive\x128\n" + + "\awrapper\x18\x04 \x01(\x0e2\x1c.cel.expr.Type.PrimitiveTypeH\x00R\awrapper\x12=\n" + + "\n" + + "well_known\x18\x05 \x01(\x0e2\x1c.cel.expr.Type.WellKnownTypeH\x00R\twellKnown\x126\n" + + "\tlist_type\x18\x06 \x01(\v2\x17.cel.expr.Type.ListTypeH\x00R\blistType\x123\n" + + "\bmap_type\x18\a \x01(\v2\x16.cel.expr.Type.MapTypeH\x00R\amapType\x129\n" + + "\bfunction\x18\b \x01(\v2\x1b.cel.expr.Type.FunctionTypeH\x00R\bfunction\x12#\n" + + "\fmessage_type\x18\t \x01(\tH\x00R\vmessageType\x12\x1f\n" + + "\n" + + "type_param\x18\n" + + " \x01(\tH\x00R\ttypeParam\x12$\n" + + "\x04type\x18\v \x01(\v2\x0e.cel.expr.TypeH\x00R\x04type\x12.\n" + + "\x05error\x18\f \x01(\v2\x16.google.protobuf.EmptyH\x00R\x05error\x12B\n" + + "\rabstract_type\x18\x0e \x01(\v2\x1b.cel.expr.Type.AbstractTypeH\x00R\fabstractType\x1a7\n" + + "\bListType\x12+\n" + + "\telem_type\x18\x01 \x01(\v2\x0e.cel.expr.TypeR\belemType\x1ac\n" + + "\aMapType\x12)\n" + + "\bkey_type\x18\x01 \x01(\v2\x0e.cel.expr.TypeR\akeyType\x12-\n" + + "\n" + + "value_type\x18\x02 \x01(\v2\x0e.cel.expr.TypeR\tvalueType\x1al\n" + + "\fFunctionType\x12/\n" + + "\vresult_type\x18\x01 \x01(\v2\x0e.cel.expr.TypeR\n" + + "resultType\x12+\n" + + "\targ_types\x18\x02 \x03(\v2\x0e.cel.expr.TypeR\bargTypes\x1a[\n" + + "\fAbstractType\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x127\n" + + "\x0fparameter_types\x18\x02 \x03(\v2\x0e.cel.expr.TypeR\x0eparameterTypes\"s\n" + + "\rPrimitiveType\x12\x1e\n" + + "\x1aPRIMITIVE_TYPE_UNSPECIFIED\x10\x00\x12\b\n" + + "\x04BOOL\x10\x01\x12\t\n" + + "\x05INT64\x10\x02\x12\n" + + "\n" + + "\x06UINT64\x10\x03\x12\n" + + "\n" + + "\x06DOUBLE\x10\x04\x12\n" + + "\n" + + "\x06STRING\x10\x05\x12\t\n" + + "\x05BYTES\x10\x06\"V\n" + + "\rWellKnownType\x12\x1f\n" + + "\x1bWELL_KNOWN_TYPE_UNSPECIFIED\x10\x00\x12\a\n" + + "\x03ANY\x10\x01\x12\r\n" + + "\tTIMESTAMP\x10\x02\x12\f\n" + + "\bDURATION\x10\x03B\v\n" + + "\ttype_kind\"\xd4\x04\n" + + "\x04Decl\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x120\n" + + "\x05ident\x18\x02 \x01(\v2\x18.cel.expr.Decl.IdentDeclH\x00R\x05ident\x129\n" + + "\bfunction\x18\x03 \x01(\v2\x1b.cel.expr.Decl.FunctionDeclH\x00R\bfunction\x1ak\n" + + "\tIdentDecl\x12\"\n" + + "\x04type\x18\x01 \x01(\v2\x0e.cel.expr.TypeR\x04type\x12(\n" + + "\x05value\x18\x02 \x01(\v2\x12.cel.expr.ConstantR\x05value\x12\x10\n" + + "\x03doc\x18\x03 \x01(\tR\x03doc\x1a\xd0\x02\n" + + "\fFunctionDecl\x12B\n" + + "\toverloads\x18\x01 \x03(\v2$.cel.expr.Decl.FunctionDecl.OverloadR\toverloads\x12\x10\n" + + "\x03doc\x18\x02 \x01(\tR\x03doc\x1a\xe9\x01\n" + + "\bOverload\x12\x1f\n" + + "\voverload_id\x18\x01 \x01(\tR\n" + + "overloadId\x12&\n" + + "\x06params\x18\x02 \x03(\v2\x0e.cel.expr.TypeR\x06params\x12\x1f\n" + + "\vtype_params\x18\x03 \x03(\tR\n" + + "typeParams\x12/\n" + + "\vresult_type\x18\x04 \x01(\v2\x0e.cel.expr.TypeR\n" + + "resultType\x120\n" + + "\x14is_instance_function\x18\x05 \x01(\bR\x12isInstanceFunction\x12\x10\n" + + "\x03doc\x18\x06 \x01(\tR\x03docB\v\n" + + "\tdecl_kind\"j\n" + + "\tReference\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1f\n" + + "\voverload_id\x18\x03 \x03(\tR\n" + + "overloadId\x12(\n" + + "\x05value\x18\x04 \x01(\v2\x12.cel.expr.ConstantR\x05valueB,\n" + + "\fdev.cel.exprB\tDeclProtoP\x01Z\fcel.dev/expr\xf8\x01\x01b\x06proto3" var ( file_cel_expr_checked_proto_rawDescOnce sync.Once - file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc + file_cel_expr_checked_proto_rawDescData []byte ) func file_cel_expr_checked_proto_rawDescGZIP() []byte { file_cel_expr_checked_proto_rawDescOnce.Do(func() { - file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData) + file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cel_expr_checked_proto_rawDesc), len(file_cel_expr_checked_proto_rawDesc))) }) return file_cel_expr_checked_proto_rawDescData } var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13) -var file_cel_expr_checked_proto_goTypes = []interface{}{ +var file_cel_expr_checked_proto_goTypes = []any{ (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr @@ -1257,141 +1191,7 @@ func file_cel_expr_checked_proto_init() { return } file_cel_expr_syntax_proto_init() - if !protoimpl.UnsafeEnabled { - file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckedExpr); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Decl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Reference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_ListType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_MapType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_FunctionType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_AbstractType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Decl_IdentDecl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Decl_FunctionDecl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Decl_FunctionDecl_Overload); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []any{ (*Type_Dyn)(nil), (*Type_Null)(nil), (*Type_Primitive)(nil), @@ -1406,7 +1206,7 @@ func file_cel_expr_checked_proto_init() { (*Type_Error)(nil), (*Type_AbstractType_)(nil), } - file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []any{ (*Decl_Ident)(nil), (*Decl_Function)(nil), } @@ -1414,7 +1214,7 @@ func file_cel_expr_checked_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_cel_expr_checked_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_cel_expr_checked_proto_rawDesc), len(file_cel_expr_checked_proto_rawDesc)), NumEnums: 2, NumMessages: 13, NumExtensions: 0, @@ -1426,7 +1226,6 @@ func file_cel_expr_checked_proto_init() { MessageInfos: file_cel_expr_checked_proto_msgTypes, }.Build() File_cel_expr_checked_proto = out.File - file_cel_expr_checked_proto_rawDesc = nil file_cel_expr_checked_proto_goTypes = nil file_cel_expr_checked_proto_depIdxs = nil } diff --git a/vendor/cel.dev/expr/eval.pb.go b/vendor/cel.dev/expr/eval.pb.go index a7aae0900c..83acb8935b 100644 --- a/vendor/cel.dev/expr/eval.pb.go +++ b/vendor/cel.dev/expr/eval.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.3 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: cel/expr/eval.proto @@ -12,6 +12,7 @@ import ( anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -373,58 +374,39 @@ func (x *EvalState_Result) GetValue() int64 { var File_cel_expr_eval_proto protoreflect.FileDescriptor -var file_cel_expr_eval_proto_rawDesc = []byte{ - 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a, - 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, - 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0xa2, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, - 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, - 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, - 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, - 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, - 0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, - 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, - 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x28, - 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, - 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, - 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, - 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, - 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_cel_expr_eval_proto_rawDesc = "" + + "\n" + + "\x13cel/expr/eval.proto\x12\bcel.expr\x1a\x19google/protobuf/any.proto\x1a\x14cel/expr/value.proto\"\xa2\x01\n" + + "\tEvalState\x12+\n" + + "\x06values\x18\x01 \x03(\v2\x13.cel.expr.ExprValueR\x06values\x124\n" + + "\aresults\x18\x03 \x03(\v2\x1a.cel.expr.EvalState.ResultR\aresults\x1a2\n" + + "\x06Result\x12\x12\n" + + "\x04expr\x18\x01 \x01(\x03R\x04expr\x12\x14\n" + + "\x05value\x18\x02 \x01(\x03R\x05value\"\x9a\x01\n" + + "\tExprValue\x12'\n" + + "\x05value\x18\x01 \x01(\v2\x0f.cel.expr.ValueH\x00R\x05value\x12*\n" + + "\x05error\x18\x02 \x01(\v2\x12.cel.expr.ErrorSetH\x00R\x05error\x120\n" + + "\aunknown\x18\x03 \x01(\v2\x14.cel.expr.UnknownSetH\x00R\aunknownB\x06\n" + + "\x04kind\"4\n" + + "\bErrorSet\x12(\n" + + "\x06errors\x18\x01 \x03(\v2\x10.cel.expr.StatusR\x06errors\"f\n" + + "\x06Status\x12\x12\n" + + "\x04code\x18\x01 \x01(\x05R\x04code\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage\x12.\n" + + "\adetails\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\adetails\"\"\n" + + "\n" + + "UnknownSet\x12\x14\n" + + "\x05exprs\x18\x01 \x03(\x03R\x05exprsB,\n" + + "\fdev.cel.exprB\tEvalProtoP\x01Z\fcel.dev/expr\xf8\x01\x01b\x06proto3" var ( file_cel_expr_eval_proto_rawDescOnce sync.Once - file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc + file_cel_expr_eval_proto_rawDescData []byte ) func file_cel_expr_eval_proto_rawDescGZIP() []byte { file_cel_expr_eval_proto_rawDescOnce.Do(func() { - file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData) + file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cel_expr_eval_proto_rawDesc), len(file_cel_expr_eval_proto_rawDesc))) }) return file_cel_expr_eval_proto_rawDescData } @@ -470,7 +452,7 @@ func file_cel_expr_eval_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_cel_expr_eval_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_cel_expr_eval_proto_rawDesc), len(file_cel_expr_eval_proto_rawDesc)), NumEnums: 0, NumMessages: 6, NumExtensions: 0, @@ -481,7 +463,6 @@ func file_cel_expr_eval_proto_init() { MessageInfos: file_cel_expr_eval_proto_msgTypes, }.Build() File_cel_expr_eval_proto = out.File - file_cel_expr_eval_proto_rawDesc = nil file_cel_expr_eval_proto_goTypes = nil file_cel_expr_eval_proto_depIdxs = nil } diff --git a/vendor/cel.dev/expr/explain.pb.go b/vendor/cel.dev/expr/explain.pb.go index 79fd5443b9..4239933978 100644 --- a/vendor/cel.dev/expr/explain.pb.go +++ b/vendor/cel.dev/expr/explain.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.36.10 +// protoc v5.27.1 // source: cel/expr/explain.proto package expr @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -20,23 +21,20 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in cel/expr/explain.proto. type Explain struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"` unknownFields protoimpl.UnknownFields - - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Explain) Reset() { *x = Explain{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_explain_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_explain_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Explain) String() string { @@ -47,7 +45,7 @@ func (*Explain) ProtoMessage() {} func (x *Explain) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_explain_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -77,21 +75,18 @@ func (x *Explain) GetExprSteps() []*Explain_ExprStep { } type Explain_ExprStep struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"` unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Explain_ExprStep) Reset() { *x = Explain_ExprStep{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_explain_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_explain_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Explain_ExprStep) String() string { @@ -102,7 +97,7 @@ func (*Explain_ExprStep) ProtoMessage() {} func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_explain_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -133,42 +128,33 @@ func (x *Explain_ExprStep) GetValueIndex() int32 { var File_cel_expr_explain_proto protoreflect.FileDescriptor -var file_cel_expr_explain_proto_rawDesc = []byte{ - 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61, - 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70, - 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a, - 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, - 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65, - 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72, - 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76, - 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61, - 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, - 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} +const file_cel_expr_explain_proto_rawDesc = "" + + "\n" + + "\x16cel/expr/explain.proto\x12\bcel.expr\x1a\x14cel/expr/value.proto\"\xae\x01\n" + + "\aExplain\x12'\n" + + "\x06values\x18\x01 \x03(\v2\x0f.cel.expr.ValueR\x06values\x129\n" + + "\n" + + "expr_steps\x18\x02 \x03(\v2\x1a.cel.expr.Explain.ExprStepR\texprSteps\x1a;\n" + + "\bExprStep\x12\x0e\n" + + "\x02id\x18\x01 \x01(\x03R\x02id\x12\x1f\n" + + "\vvalue_index\x18\x02 \x01(\x05R\n" + + "valueIndex:\x02\x18\x01B/\n" + + "\fdev.cel.exprB\fExplainProtoP\x01Z\fcel.dev/expr\xf8\x01\x01b\x06proto3" var ( file_cel_expr_explain_proto_rawDescOnce sync.Once - file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc + file_cel_expr_explain_proto_rawDescData []byte ) func file_cel_expr_explain_proto_rawDescGZIP() []byte { file_cel_expr_explain_proto_rawDescOnce.Do(func() { - file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData) + file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cel_expr_explain_proto_rawDesc), len(file_cel_expr_explain_proto_rawDesc))) }) return file_cel_expr_explain_proto_rawDescData } var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_cel_expr_explain_proto_goTypes = []interface{}{ +var file_cel_expr_explain_proto_goTypes = []any{ (*Explain)(nil), // 0: cel.expr.Explain (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep (*Value)(nil), // 2: cel.expr.Value @@ -189,37 +175,11 @@ func file_cel_expr_explain_proto_init() { return } file_cel_expr_value_proto_init() - if !protoimpl.UnsafeEnabled { - file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Explain); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Explain_ExprStep); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_cel_expr_explain_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_cel_expr_explain_proto_rawDesc), len(file_cel_expr_explain_proto_rawDesc)), NumEnums: 0, NumMessages: 2, NumExtensions: 0, @@ -230,7 +190,6 @@ func file_cel_expr_explain_proto_init() { MessageInfos: file_cel_expr_explain_proto_msgTypes, }.Build() File_cel_expr_explain_proto = out.File - file_cel_expr_explain_proto_rawDesc = nil file_cel_expr_explain_proto_goTypes = nil file_cel_expr_explain_proto_depIdxs = nil } diff --git a/vendor/cel.dev/expr/syntax.pb.go b/vendor/cel.dev/expr/syntax.pb.go index 48a952872e..72d19b20d4 100644 --- a/vendor/cel.dev/expr/syntax.pb.go +++ b/vendor/cel.dev/expr/syntax.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.36.10 +// protoc v5.27.1 // source: cel/expr/syntax.proto package expr @@ -14,6 +14,7 @@ import ( timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -76,21 +77,18 @@ func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) { } type ParsedExpr struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"` + SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` unknownFields protoimpl.UnknownFields - - Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"` - SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ParsedExpr) Reset() { *x = ParsedExpr{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParsedExpr) String() string { @@ -101,7 +99,7 @@ func (*ParsedExpr) ProtoMessage() {} func (x *ParsedExpr) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -131,12 +129,9 @@ func (x *ParsedExpr) GetSourceInfo() *SourceInfo { } type Expr struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` - // Types that are assignable to ExprKind: + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // Types that are valid to be assigned to ExprKind: // // *Expr_ConstExpr // *Expr_IdentExpr @@ -145,16 +140,16 @@ type Expr struct { // *Expr_ListExpr // *Expr_StructExpr // *Expr_ComprehensionExpr - ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"` + ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Expr) Reset() { *x = Expr{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr) String() string { @@ -165,7 +160,7 @@ func (*Expr) ProtoMessage() {} func (x *Expr) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -187,58 +182,72 @@ func (x *Expr) GetId() int64 { return 0 } -func (m *Expr) GetExprKind() isExpr_ExprKind { - if m != nil { - return m.ExprKind +func (x *Expr) GetExprKind() isExpr_ExprKind { + if x != nil { + return x.ExprKind } return nil } func (x *Expr) GetConstExpr() *Constant { - if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok { - return x.ConstExpr + if x != nil { + if x, ok := x.ExprKind.(*Expr_ConstExpr); ok { + return x.ConstExpr + } } return nil } func (x *Expr) GetIdentExpr() *Expr_Ident { - if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok { - return x.IdentExpr + if x != nil { + if x, ok := x.ExprKind.(*Expr_IdentExpr); ok { + return x.IdentExpr + } } return nil } func (x *Expr) GetSelectExpr() *Expr_Select { - if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok { - return x.SelectExpr + if x != nil { + if x, ok := x.ExprKind.(*Expr_SelectExpr); ok { + return x.SelectExpr + } } return nil } func (x *Expr) GetCallExpr() *Expr_Call { - if x, ok := x.GetExprKind().(*Expr_CallExpr); ok { - return x.CallExpr + if x != nil { + if x, ok := x.ExprKind.(*Expr_CallExpr); ok { + return x.CallExpr + } } return nil } func (x *Expr) GetListExpr() *Expr_CreateList { - if x, ok := x.GetExprKind().(*Expr_ListExpr); ok { - return x.ListExpr + if x != nil { + if x, ok := x.ExprKind.(*Expr_ListExpr); ok { + return x.ListExpr + } } return nil } func (x *Expr) GetStructExpr() *Expr_CreateStruct { - if x, ok := x.GetExprKind().(*Expr_StructExpr); ok { - return x.StructExpr + if x != nil { + if x, ok := x.ExprKind.(*Expr_StructExpr); ok { + return x.StructExpr + } } return nil } func (x *Expr) GetComprehensionExpr() *Expr_Comprehension { - if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok { - return x.ComprehensionExpr + if x != nil { + if x, ok := x.ExprKind.(*Expr_ComprehensionExpr); ok { + return x.ComprehensionExpr + } } return nil } @@ -290,11 +299,8 @@ func (*Expr_StructExpr) isExpr_ExprKind() {} func (*Expr_ComprehensionExpr) isExpr_ExprKind() {} type Constant struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to ConstantKind: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to ConstantKind: // // *Constant_NullValue // *Constant_BoolValue @@ -305,16 +311,16 @@ type Constant struct { // *Constant_BytesValue // *Constant_DurationValue // *Constant_TimestampValue - ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"` + ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Constant) Reset() { *x = Constant{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Constant) String() string { @@ -325,7 +331,7 @@ func (*Constant) ProtoMessage() {} func (x *Constant) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -340,74 +346,92 @@ func (*Constant) Descriptor() ([]byte, []int) { return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2} } -func (m *Constant) GetConstantKind() isConstant_ConstantKind { - if m != nil { - return m.ConstantKind +func (x *Constant) GetConstantKind() isConstant_ConstantKind { + if x != nil { + return x.ConstantKind } return nil } func (x *Constant) GetNullValue() structpb.NullValue { - if x, ok := x.GetConstantKind().(*Constant_NullValue); ok { - return x.NullValue + if x != nil { + if x, ok := x.ConstantKind.(*Constant_NullValue); ok { + return x.NullValue + } } return structpb.NullValue(0) } func (x *Constant) GetBoolValue() bool { - if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok { - return x.BoolValue + if x != nil { + if x, ok := x.ConstantKind.(*Constant_BoolValue); ok { + return x.BoolValue + } } return false } func (x *Constant) GetInt64Value() int64 { - if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok { - return x.Int64Value + if x != nil { + if x, ok := x.ConstantKind.(*Constant_Int64Value); ok { + return x.Int64Value + } } return 0 } func (x *Constant) GetUint64Value() uint64 { - if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok { - return x.Uint64Value + if x != nil { + if x, ok := x.ConstantKind.(*Constant_Uint64Value); ok { + return x.Uint64Value + } } return 0 } func (x *Constant) GetDoubleValue() float64 { - if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok { - return x.DoubleValue + if x != nil { + if x, ok := x.ConstantKind.(*Constant_DoubleValue); ok { + return x.DoubleValue + } } return 0 } func (x *Constant) GetStringValue() string { - if x, ok := x.GetConstantKind().(*Constant_StringValue); ok { - return x.StringValue + if x != nil { + if x, ok := x.ConstantKind.(*Constant_StringValue); ok { + return x.StringValue + } } return "" } func (x *Constant) GetBytesValue() []byte { - if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok { - return x.BytesValue + if x != nil { + if x, ok := x.ConstantKind.(*Constant_BytesValue); ok { + return x.BytesValue + } } return nil } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in cel/expr/syntax.proto. func (x *Constant) GetDurationValue() *durationpb.Duration { - if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok { - return x.DurationValue + if x != nil { + if x, ok := x.ConstantKind.(*Constant_DurationValue); ok { + return x.DurationValue + } } return nil } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in cel/expr/syntax.proto. func (x *Constant) GetTimestampValue() *timestamppb.Timestamp { - if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok { - return x.TimestampValue + if x != nil { + if x, ok := x.ConstantKind.(*Constant_TimestampValue); ok { + return x.TimestampValue + } } return nil } @@ -445,12 +469,12 @@ type Constant_BytesValue struct { } type Constant_DurationValue struct { - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in cel/expr/syntax.proto. DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"` } type Constant_TimestampValue struct { - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in cel/expr/syntax.proto. TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"` } @@ -473,25 +497,22 @@ func (*Constant_DurationValue) isConstant_ConstantKind() {} func (*Constant_TimestampValue) isConstant_ConstantKind() {} type SourceInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"` Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"` - Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceInfo) Reset() { *x = SourceInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceInfo) String() string { @@ -502,7 +523,7 @@ func (*SourceInfo) ProtoMessage() {} func (x *SourceInfo) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -560,20 +581,17 @@ func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension { } type Expr_Ident struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Expr_Ident) Reset() { *x = Expr_Ident{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr_Ident) String() string { @@ -584,7 +602,7 @@ func (*Expr_Ident) ProtoMessage() {} func (x *Expr_Ident) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -607,22 +625,19 @@ func (x *Expr_Ident) GetName() string { } type Expr_Select struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"` + Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"` + TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"` unknownFields protoimpl.UnknownFields - - Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"` - Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"` - TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Expr_Select) Reset() { *x = Expr_Select{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr_Select) String() string { @@ -633,7 +648,7 @@ func (*Expr_Select) ProtoMessage() {} func (x *Expr_Select) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -670,22 +685,19 @@ func (x *Expr_Select) GetTestOnly() bool { } type Expr_Call struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"` + Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` unknownFields protoimpl.UnknownFields - - Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` - Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"` - Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Expr_Call) Reset() { *x = Expr_Call{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr_Call) String() string { @@ -696,7 +708,7 @@ func (*Expr_Call) ProtoMessage() {} func (x *Expr_Call) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -733,21 +745,18 @@ func (x *Expr_Call) GetArgs() []*Expr { } type Expr_CreateList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"` - OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"` + OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Expr_CreateList) Reset() { *x = Expr_CreateList{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr_CreateList) String() string { @@ -758,7 +767,7 @@ func (*Expr_CreateList) ProtoMessage() {} func (x *Expr_CreateList) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -788,21 +797,18 @@ func (x *Expr_CreateList) GetOptionalIndices() []int32 { } type Expr_CreateStruct struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"` + Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"` unknownFields protoimpl.UnknownFields - - MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"` - Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Expr_CreateStruct) Reset() { *x = Expr_CreateStruct{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr_CreateStruct) String() string { @@ -813,7 +819,7 @@ func (*Expr_CreateStruct) ProtoMessage() {} func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -843,26 +849,24 @@ func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry { } type Expr_Comprehension struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` + IterVar2 string `protobuf:"bytes,8,opt,name=iter_var2,json=iterVar2,proto3" json:"iter_var2,omitempty"` + IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` + AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` + AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` + LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` + LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` + Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"` unknownFields protoimpl.UnknownFields - - IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` - IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` - AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` - AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` - LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` - LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` - Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Expr_Comprehension) Reset() { *x = Expr_Comprehension{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr_Comprehension) String() string { @@ -873,7 +877,7 @@ func (*Expr_Comprehension) ProtoMessage() {} func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -895,6 +899,13 @@ func (x *Expr_Comprehension) GetIterVar() string { return "" } +func (x *Expr_Comprehension) GetIterVar2() string { + if x != nil { + return x.IterVar2 + } + return "" +} + func (x *Expr_Comprehension) GetIterRange() *Expr { if x != nil { return x.IterRange @@ -938,27 +949,24 @@ func (x *Expr_Comprehension) GetResult() *Expr { } type Expr_CreateStruct_Entry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - // Types that are assignable to KeyKind: + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Types that are valid to be assigned to KeyKind: // // *Expr_CreateStruct_Entry_FieldKey // *Expr_CreateStruct_Entry_MapKey KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"` Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Expr_CreateStruct_Entry) Reset() { *x = Expr_CreateStruct_Entry{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr_CreateStruct_Entry) String() string { @@ -969,7 +977,7 @@ func (*Expr_CreateStruct_Entry) ProtoMessage() {} func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -991,23 +999,27 @@ func (x *Expr_CreateStruct_Entry) GetId() int64 { return 0 } -func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind { - if m != nil { - return m.KeyKind +func (x *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind { + if x != nil { + return x.KeyKind } return nil } func (x *Expr_CreateStruct_Entry) GetFieldKey() string { - if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok { - return x.FieldKey + if x != nil { + if x, ok := x.KeyKind.(*Expr_CreateStruct_Entry_FieldKey); ok { + return x.FieldKey + } } return "" } func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr { - if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok { - return x.MapKey + if x != nil { + if x, ok := x.KeyKind.(*Expr_CreateStruct_Entry_MapKey); ok { + return x.MapKey + } } return nil } @@ -1043,22 +1055,19 @@ func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {} func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {} type SourceInfo_Extension struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"` Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceInfo_Extension) Reset() { *x = SourceInfo_Extension{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceInfo_Extension) String() string { @@ -1069,7 +1078,7 @@ func (*SourceInfo_Extension) ProtoMessage() {} func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1106,21 +1115,18 @@ func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version { } type SourceInfo_Extension_Version struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` unknownFields protoimpl.UnknownFields - - Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` - Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SourceInfo_Extension_Version) Reset() { *x = SourceInfo_Extension_Version{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceInfo_Extension_Version) String() string { @@ -1131,7 +1137,7 @@ func (*SourceInfo_Extension_Version) ProtoMessage() {} func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1162,210 +1168,124 @@ func (x *SourceInfo_Extension_Version) GetMinor() int64 { var File_cel_expr_syntax_proto protoreflect.FileDescriptor -var file_cel_expr_syntax_proto_rawDesc = []byte{ - 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, - 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22, - 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, - 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, - 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, - 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78, - 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, - 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, - 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, - 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, - 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c, - 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09, - 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69, - 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, - 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, - 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07, - 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, - 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09, - 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c, - 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, - 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, - 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, - 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab, - 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, - 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, - 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, - 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f, - 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70, - 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, - 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a, - 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, - 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65, - 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, - 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, - 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, - 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, - 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, - 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, - 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, - 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, - 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, - 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, - 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, - 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, - 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, - 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06, - 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, - 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, - 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e, - 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61, - 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, - 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63, - 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12, - 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, - 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43, - 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50, - 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, - 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, - 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, - 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, - 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c, - 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79, - 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, - 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} +const file_cel_expr_syntax_proto_rawDesc = "" + + "\n" + + "\x15cel/expr/syntax.proto\x12\bcel.expr\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"g\n" + + "\n" + + "ParsedExpr\x12\"\n" + + "\x04expr\x18\x02 \x01(\v2\x0e.cel.expr.ExprR\x04expr\x125\n" + + "\vsource_info\x18\x03 \x01(\v2\x14.cel.expr.SourceInfoR\n" + + "sourceInfo\"\x9a\v\n" + + "\x04Expr\x12\x0e\n" + + "\x02id\x18\x02 \x01(\x03R\x02id\x123\n" + + "\n" + + "const_expr\x18\x03 \x01(\v2\x12.cel.expr.ConstantH\x00R\tconstExpr\x125\n" + + "\n" + + "ident_expr\x18\x04 \x01(\v2\x14.cel.expr.Expr.IdentH\x00R\tidentExpr\x128\n" + + "\vselect_expr\x18\x05 \x01(\v2\x15.cel.expr.Expr.SelectH\x00R\n" + + "selectExpr\x122\n" + + "\tcall_expr\x18\x06 \x01(\v2\x13.cel.expr.Expr.CallH\x00R\bcallExpr\x128\n" + + "\tlist_expr\x18\a \x01(\v2\x19.cel.expr.Expr.CreateListH\x00R\blistExpr\x12>\n" + + "\vstruct_expr\x18\b \x01(\v2\x1b.cel.expr.Expr.CreateStructH\x00R\n" + + "structExpr\x12M\n" + + "\x12comprehension_expr\x18\t \x01(\v2\x1c.cel.expr.Expr.ComprehensionH\x00R\x11comprehensionExpr\x1a\x1b\n" + + "\x05Ident\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x1ae\n" + + "\x06Select\x12(\n" + + "\aoperand\x18\x01 \x01(\v2\x0e.cel.expr.ExprR\aoperand\x12\x14\n" + + "\x05field\x18\x02 \x01(\tR\x05field\x12\x1b\n" + + "\ttest_only\x18\x03 \x01(\bR\btestOnly\x1an\n" + + "\x04Call\x12&\n" + + "\x06target\x18\x01 \x01(\v2\x0e.cel.expr.ExprR\x06target\x12\x1a\n" + + "\bfunction\x18\x02 \x01(\tR\bfunction\x12\"\n" + + "\x04args\x18\x03 \x03(\v2\x0e.cel.expr.ExprR\x04args\x1ac\n" + + "\n" + + "CreateList\x12*\n" + + "\belements\x18\x01 \x03(\v2\x0e.cel.expr.ExprR\belements\x12)\n" + + "\x10optional_indices\x18\x02 \x03(\x05R\x0foptionalIndices\x1a\xab\x02\n" + + "\fCreateStruct\x12!\n" + + "\fmessage_name\x18\x01 \x01(\tR\vmessageName\x12;\n" + + "\aentries\x18\x02 \x03(\v2!.cel.expr.Expr.CreateStruct.EntryR\aentries\x1a\xba\x01\n" + + "\x05Entry\x12\x0e\n" + + "\x02id\x18\x01 \x01(\x03R\x02id\x12\x1d\n" + + "\tfield_key\x18\x02 \x01(\tH\x00R\bfieldKey\x12)\n" + + "\amap_key\x18\x03 \x01(\v2\x0e.cel.expr.ExprH\x00R\x06mapKey\x12$\n" + + "\x05value\x18\x04 \x01(\v2\x0e.cel.expr.ExprR\x05value\x12%\n" + + "\x0eoptional_entry\x18\x05 \x01(\bR\roptionalEntryB\n" + + "\n" + + "\bkey_kind\x1a\xca\x02\n" + + "\rComprehension\x12\x19\n" + + "\biter_var\x18\x01 \x01(\tR\aiterVar\x12\x1b\n" + + "\titer_var2\x18\b \x01(\tR\biterVar2\x12-\n" + + "\n" + + "iter_range\x18\x02 \x01(\v2\x0e.cel.expr.ExprR\titerRange\x12\x19\n" + + "\baccu_var\x18\x03 \x01(\tR\aaccuVar\x12+\n" + + "\taccu_init\x18\x04 \x01(\v2\x0e.cel.expr.ExprR\baccuInit\x125\n" + + "\x0eloop_condition\x18\x05 \x01(\v2\x0e.cel.expr.ExprR\rloopCondition\x12+\n" + + "\tloop_step\x18\x06 \x01(\v2\x0e.cel.expr.ExprR\bloopStep\x12&\n" + + "\x06result\x18\a \x01(\v2\x0e.cel.expr.ExprR\x06resultB\v\n" + + "\texpr_kind\"\xc1\x03\n" + + "\bConstant\x12;\n" + + "\n" + + "null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12\x1f\n" + + "\n" + + "bool_value\x18\x02 \x01(\bH\x00R\tboolValue\x12!\n" + + "\vint64_value\x18\x03 \x01(\x03H\x00R\n" + + "int64Value\x12#\n" + + "\fuint64_value\x18\x04 \x01(\x04H\x00R\vuint64Value\x12#\n" + + "\fdouble_value\x18\x05 \x01(\x01H\x00R\vdoubleValue\x12#\n" + + "\fstring_value\x18\x06 \x01(\tH\x00R\vstringValue\x12!\n" + + "\vbytes_value\x18\a \x01(\fH\x00R\n" + + "bytesValue\x12F\n" + + "\x0eduration_value\x18\b \x01(\v2\x19.google.protobuf.DurationB\x02\x18\x01H\x00R\rdurationValue\x12I\n" + + "\x0ftimestamp_value\x18\t \x01(\v2\x1a.google.protobuf.TimestampB\x02\x18\x01H\x00R\x0etimestampValueB\x0f\n" + + "\rconstant_kind\"\xac\x06\n" + + "\n" + + "SourceInfo\x12%\n" + + "\x0esyntax_version\x18\x01 \x01(\tR\rsyntaxVersion\x12\x1a\n" + + "\blocation\x18\x02 \x01(\tR\blocation\x12!\n" + + "\fline_offsets\x18\x03 \x03(\x05R\vlineOffsets\x12A\n" + + "\tpositions\x18\x04 \x03(\v2#.cel.expr.SourceInfo.PositionsEntryR\tpositions\x12E\n" + + "\vmacro_calls\x18\x05 \x03(\v2$.cel.expr.SourceInfo.MacroCallsEntryR\n" + + "macroCalls\x12>\n" + + "\n" + + "extensions\x18\x06 \x03(\v2\x1e.cel.expr.SourceInfo.ExtensionR\n" + + "extensions\x1a<\n" + + "\x0ePositionsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\x05R\x05value:\x028\x01\x1aM\n" + + "\x0fMacroCallsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12$\n" + + "\x05value\x18\x02 \x01(\v2\x0e.cel.expr.ExprR\x05value:\x028\x01\x1a\xe0\x02\n" + + "\tExtension\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12Y\n" + + "\x13affected_components\x18\x02 \x03(\x0e2(.cel.expr.SourceInfo.Extension.ComponentR\x12affectedComponents\x12@\n" + + "\aversion\x18\x03 \x01(\v2&.cel.expr.SourceInfo.Extension.VersionR\aversion\x1a5\n" + + "\aVersion\x12\x14\n" + + "\x05major\x18\x01 \x01(\x03R\x05major\x12\x14\n" + + "\x05minor\x18\x02 \x01(\x03R\x05minor\"o\n" + + "\tComponent\x12\x19\n" + + "\x15COMPONENT_UNSPECIFIED\x10\x00\x12\x14\n" + + "\x10COMPONENT_PARSER\x10\x01\x12\x1a\n" + + "\x16COMPONENT_TYPE_CHECKER\x10\x02\x12\x15\n" + + "\x11COMPONENT_RUNTIME\x10\x03B.\n" + + "\fdev.cel.exprB\vSyntaxProtoP\x01Z\fcel.dev/expr\xf8\x01\x01b\x06proto3" var ( file_cel_expr_syntax_proto_rawDescOnce sync.Once - file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc + file_cel_expr_syntax_proto_rawDescData []byte ) func file_cel_expr_syntax_proto_rawDescGZIP() []byte { file_cel_expr_syntax_proto_rawDescOnce.Do(func() { - file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData) + file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cel_expr_syntax_proto_rawDesc), len(file_cel_expr_syntax_proto_rawDesc))) }) return file_cel_expr_syntax_proto_rawDescData } var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15) -var file_cel_expr_syntax_proto_goTypes = []interface{}{ +var file_cel_expr_syntax_proto_goTypes = []any{ (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr (*Expr)(nil), // 2: cel.expr.Expr @@ -1429,165 +1349,7 @@ func file_cel_expr_syntax_proto_init() { if File_cel_expr_syntax_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParsedExpr); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Constant); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_Ident); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_Select); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_Call); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_CreateList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_CreateStruct); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_Comprehension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_CreateStruct_Entry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceInfo_Extension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceInfo_Extension_Version); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []any{ (*Expr_ConstExpr)(nil), (*Expr_IdentExpr)(nil), (*Expr_SelectExpr)(nil), @@ -1596,7 +1358,7 @@ func file_cel_expr_syntax_proto_init() { (*Expr_StructExpr)(nil), (*Expr_ComprehensionExpr)(nil), } - file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []any{ (*Constant_NullValue)(nil), (*Constant_BoolValue)(nil), (*Constant_Int64Value)(nil), @@ -1607,7 +1369,7 @@ func file_cel_expr_syntax_proto_init() { (*Constant_DurationValue)(nil), (*Constant_TimestampValue)(nil), } - file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{ + file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []any{ (*Expr_CreateStruct_Entry_FieldKey)(nil), (*Expr_CreateStruct_Entry_MapKey)(nil), } @@ -1615,7 +1377,7 @@ func file_cel_expr_syntax_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_cel_expr_syntax_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_cel_expr_syntax_proto_rawDesc), len(file_cel_expr_syntax_proto_rawDesc)), NumEnums: 1, NumMessages: 15, NumExtensions: 0, @@ -1627,7 +1389,6 @@ func file_cel_expr_syntax_proto_init() { MessageInfos: file_cel_expr_syntax_proto_msgTypes, }.Build() File_cel_expr_syntax_proto = out.File - file_cel_expr_syntax_proto_rawDesc = nil file_cel_expr_syntax_proto_goTypes = nil file_cel_expr_syntax_proto_depIdxs = nil } diff --git a/vendor/cel.dev/expr/value.pb.go b/vendor/cel.dev/expr/value.pb.go index e5e29228c2..1f53a6a296 100644 --- a/vendor/cel.dev/expr/value.pb.go +++ b/vendor/cel.dev/expr/value.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.36.10 +// protoc v5.27.1 // source: cel/expr/value.proto package expr @@ -13,6 +13,7 @@ import ( structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -23,11 +24,8 @@ const ( ) type Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Kind: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Kind: // // *Value_NullValue // *Value_BoolValue @@ -41,16 +39,16 @@ type Value struct { // *Value_MapValue // *Value_ListValue // *Value_TypeValue - Kind isValue_Kind `protobuf_oneof:"kind"` + Kind isValue_Kind `protobuf_oneof:"kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Value) Reset() { *x = Value{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_value_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_value_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Value) String() string { @@ -61,7 +59,7 @@ func (*Value) ProtoMessage() {} func (x *Value) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_value_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -76,93 +74,117 @@ func (*Value) Descriptor() ([]byte, []int) { return file_cel_expr_value_proto_rawDescGZIP(), []int{0} } -func (m *Value) GetKind() isValue_Kind { - if m != nil { - return m.Kind +func (x *Value) GetKind() isValue_Kind { + if x != nil { + return x.Kind } return nil } func (x *Value) GetNullValue() structpb.NullValue { - if x, ok := x.GetKind().(*Value_NullValue); ok { - return x.NullValue + if x != nil { + if x, ok := x.Kind.(*Value_NullValue); ok { + return x.NullValue + } } return structpb.NullValue(0) } func (x *Value) GetBoolValue() bool { - if x, ok := x.GetKind().(*Value_BoolValue); ok { - return x.BoolValue + if x != nil { + if x, ok := x.Kind.(*Value_BoolValue); ok { + return x.BoolValue + } } return false } func (x *Value) GetInt64Value() int64 { - if x, ok := x.GetKind().(*Value_Int64Value); ok { - return x.Int64Value + if x != nil { + if x, ok := x.Kind.(*Value_Int64Value); ok { + return x.Int64Value + } } return 0 } func (x *Value) GetUint64Value() uint64 { - if x, ok := x.GetKind().(*Value_Uint64Value); ok { - return x.Uint64Value + if x != nil { + if x, ok := x.Kind.(*Value_Uint64Value); ok { + return x.Uint64Value + } } return 0 } func (x *Value) GetDoubleValue() float64 { - if x, ok := x.GetKind().(*Value_DoubleValue); ok { - return x.DoubleValue + if x != nil { + if x, ok := x.Kind.(*Value_DoubleValue); ok { + return x.DoubleValue + } } return 0 } func (x *Value) GetStringValue() string { - if x, ok := x.GetKind().(*Value_StringValue); ok { - return x.StringValue + if x != nil { + if x, ok := x.Kind.(*Value_StringValue); ok { + return x.StringValue + } } return "" } func (x *Value) GetBytesValue() []byte { - if x, ok := x.GetKind().(*Value_BytesValue); ok { - return x.BytesValue + if x != nil { + if x, ok := x.Kind.(*Value_BytesValue); ok { + return x.BytesValue + } } return nil } func (x *Value) GetEnumValue() *EnumValue { - if x, ok := x.GetKind().(*Value_EnumValue); ok { - return x.EnumValue + if x != nil { + if x, ok := x.Kind.(*Value_EnumValue); ok { + return x.EnumValue + } } return nil } func (x *Value) GetObjectValue() *anypb.Any { - if x, ok := x.GetKind().(*Value_ObjectValue); ok { - return x.ObjectValue + if x != nil { + if x, ok := x.Kind.(*Value_ObjectValue); ok { + return x.ObjectValue + } } return nil } func (x *Value) GetMapValue() *MapValue { - if x, ok := x.GetKind().(*Value_MapValue); ok { - return x.MapValue + if x != nil { + if x, ok := x.Kind.(*Value_MapValue); ok { + return x.MapValue + } } return nil } func (x *Value) GetListValue() *ListValue { - if x, ok := x.GetKind().(*Value_ListValue); ok { - return x.ListValue + if x != nil { + if x, ok := x.Kind.(*Value_ListValue); ok { + return x.ListValue + } } return nil } func (x *Value) GetTypeValue() string { - if x, ok := x.GetKind().(*Value_TypeValue); ok { - return x.TypeValue + if x != nil { + if x, ok := x.Kind.(*Value_TypeValue); ok { + return x.TypeValue + } } return "" } @@ -244,21 +266,18 @@ func (*Value_ListValue) isValue_Kind() {} func (*Value_TypeValue) isValue_Kind() {} type EnumValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EnumValue) Reset() { *x = EnumValue{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_value_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_value_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValue) String() string { @@ -269,7 +288,7 @@ func (*EnumValue) ProtoMessage() {} func (x *EnumValue) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_value_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -299,20 +318,17 @@ func (x *EnumValue) GetValue() int32 { } type ListValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` unknownFields protoimpl.UnknownFields - - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ListValue) Reset() { *x = ListValue{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_value_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_value_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListValue) String() string { @@ -323,7 +339,7 @@ func (*ListValue) ProtoMessage() {} func (x *ListValue) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_value_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -346,20 +362,17 @@ func (x *ListValue) GetValues() []*Value { } type MapValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` unknownFields protoimpl.UnknownFields - - Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + sizeCache protoimpl.SizeCache } func (x *MapValue) Reset() { *x = MapValue{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_value_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_value_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MapValue) String() string { @@ -370,7 +383,7 @@ func (*MapValue) ProtoMessage() {} func (x *MapValue) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_value_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -393,21 +406,18 @@ func (x *MapValue) GetEntries() []*MapValue_Entry { } type MapValue_Entry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *MapValue_Entry) Reset() { *x = MapValue_Entry{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_value_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_value_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MapValue_Entry) String() string { @@ -418,7 +428,7 @@ func (*MapValue_Entry) ProtoMessage() {} func (x *MapValue_Entry) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_value_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -449,83 +459,58 @@ func (x *MapValue_Entry) GetValue() *Value { var File_cel_expr_value_proto protoreflect.FileDescriptor -var file_cel_expr_value_proto_rawDesc = []byte{ - 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, - 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69, - 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75, - 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48, - 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, - 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, - 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, - 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69, - 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75, - 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, - 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, - 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65, - 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, - 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} +const file_cel_expr_value_proto_rawDesc = "" + + "\n" + + "\x14cel/expr/value.proto\x12\bcel.expr\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x9d\x04\n" + + "\x05Value\x12;\n" + + "\n" + + "null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12\x1f\n" + + "\n" + + "bool_value\x18\x02 \x01(\bH\x00R\tboolValue\x12!\n" + + "\vint64_value\x18\x03 \x01(\x03H\x00R\n" + + "int64Value\x12#\n" + + "\fuint64_value\x18\x04 \x01(\x04H\x00R\vuint64Value\x12#\n" + + "\fdouble_value\x18\x05 \x01(\x01H\x00R\vdoubleValue\x12#\n" + + "\fstring_value\x18\x06 \x01(\tH\x00R\vstringValue\x12!\n" + + "\vbytes_value\x18\a \x01(\fH\x00R\n" + + "bytesValue\x124\n" + + "\n" + + "enum_value\x18\t \x01(\v2\x13.cel.expr.EnumValueH\x00R\tenumValue\x129\n" + + "\fobject_value\x18\n" + + " \x01(\v2\x14.google.protobuf.AnyH\x00R\vobjectValue\x121\n" + + "\tmap_value\x18\v \x01(\v2\x12.cel.expr.MapValueH\x00R\bmapValue\x124\n" + + "\n" + + "list_value\x18\f \x01(\v2\x13.cel.expr.ListValueH\x00R\tlistValue\x12\x1f\n" + + "\n" + + "type_value\x18\x0f \x01(\tH\x00R\ttypeValueB\x06\n" + + "\x04kind\"5\n" + + "\tEnumValue\x12\x12\n" + + "\x04type\x18\x01 \x01(\tR\x04type\x12\x14\n" + + "\x05value\x18\x02 \x01(\x05R\x05value\"4\n" + + "\tListValue\x12'\n" + + "\x06values\x18\x01 \x03(\v2\x0f.cel.expr.ValueR\x06values\"\x91\x01\n" + + "\bMapValue\x122\n" + + "\aentries\x18\x01 \x03(\v2\x18.cel.expr.MapValue.EntryR\aentries\x1aQ\n" + + "\x05Entry\x12!\n" + + "\x03key\x18\x01 \x01(\v2\x0f.cel.expr.ValueR\x03key\x12%\n" + + "\x05value\x18\x02 \x01(\v2\x0f.cel.expr.ValueR\x05valueB-\n" + + "\fdev.cel.exprB\n" + + "ValueProtoP\x01Z\fcel.dev/expr\xf8\x01\x01b\x06proto3" var ( file_cel_expr_value_proto_rawDescOnce sync.Once - file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc + file_cel_expr_value_proto_rawDescData []byte ) func file_cel_expr_value_proto_rawDescGZIP() []byte { file_cel_expr_value_proto_rawDescOnce.Do(func() { - file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData) + file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cel_expr_value_proto_rawDesc), len(file_cel_expr_value_proto_rawDesc))) }) return file_cel_expr_value_proto_rawDescData } var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_cel_expr_value_proto_goTypes = []interface{}{ +var file_cel_expr_value_proto_goTypes = []any{ (*Value)(nil), // 0: cel.expr.Value (*EnumValue)(nil), // 1: cel.expr.EnumValue (*ListValue)(nil), // 2: cel.expr.ListValue @@ -556,69 +541,7 @@ func file_cel_expr_value_proto_init() { if File_cel_expr_value_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MapValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MapValue_Entry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_BoolValue)(nil), (*Value_Int64Value)(nil), @@ -636,7 +559,7 @@ func file_cel_expr_value_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_cel_expr_value_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_cel_expr_value_proto_rawDesc), len(file_cel_expr_value_proto_rawDesc)), NumEnums: 0, NumMessages: 5, NumExtensions: 0, @@ -647,7 +570,6 @@ func file_cel_expr_value_proto_init() { MessageInfos: file_cel_expr_value_proto_msgTypes, }.Build() File_cel_expr_value_proto = out.File - file_cel_expr_value_proto_rawDesc = nil file_cel_expr_value_proto_goTypes = nil file_cel_expr_value_proto_depIdxs = nil } diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index 36c7db49c5..38014c4fb2 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,24 @@ # Changes +## [0.19.0](https://github.com/googleapis/google-cloud-go/releases/tag/auth%2Fv0.19.0) (2026-03-23) + +### Features + +* add OpenTelemetry gRPC and HTTP wrappers for T4 tracing (#14133) ([d38abf9](https://github.com/googleapis/google-cloud-go/commit/d38abf988d4017b4832434abae9a90874bec5ce9)) + +## [0.18.2](https://github.com/googleapis/google-cloud-go/releases/tag/auth%2Fv0.18.2) (2026-02-13) + +### Bug Fixes + +* fixes gdch credentials logic (#13741) ([f82cda5](https://github.com/googleapis/google-cloud-go/commit/f82cda58bd9885b7b8a9d8b15126f5a1e0add0dc)) + +## [0.18.1](https://github.com/googleapis/google-cloud-go/releases/tag/auth%2Fv0.18.1) (2026-01-21) + +### Bug Fixes + +* add InternalOptions.TelemetryAttributes for internal client use (#13641) ([3876978](https://github.com/googleapis/google-cloud-go/commit/38769789755ed47d85e85dcd56596109de65f780)) +* remove singleton and restore normal usage of otelgrpc.clientHandler (#13522) ([673d4b0](https://github.com/googleapis/google-cloud-go/commit/673d4b05617f833aa433f7f6a350b5cb888ea20d)) + ## [0.18.0](https://github.com/googleapis/google-cloud-go/releases/tag/auth%2Fv0.18.0) (2025-12-15) ### Features diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go index c2d320fdf4..19a2051411 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -15,6 +15,7 @@ package gdch import ( + "bytes" "context" "crypto" "crypto/tls" @@ -24,9 +25,7 @@ import ( "fmt" "log/slog" "net/http" - "net/url" "os" - "strings" "time" "cloud.google.com/go/auth" @@ -121,27 +120,34 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { Exp: exp.Unix(), } h := jwt.Header{ - Algorithm: jwt.HeaderAlgRSA256, + Algorithm: jwt.HeaderAlgES256, Type: jwt.HeaderType, - KeyID: string(g.pkID), + KeyID: g.pkID, } payload, err := jwt.EncodeJWS(&h, &claims, g.signer) if err != nil { return nil, err } - v := url.Values{} - v.Set("grant_type", GrantType) - v.Set("audience", g.aud) - v.Set("requested_token_type", requestTokenType) - v.Set("subject_token", payload) - v.Set("subject_token_type", subjectTokenType) - req, err := http.NewRequestWithContext(ctx, "POST", g.tokenURL, strings.NewReader(v.Encode())) + v := map[string]string{ + "grant_type": GrantType, + "audience": g.aud, + "requested_token_type": requestTokenType, + "subject_token": payload, + "subject_token_type": subjectTokenType, + } + + r, err := json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("credentials: cannot marshal token request: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "POST", g.tokenURL, bytes.NewReader(r)) if err != nil { return nil, err } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - g.logger.DebugContext(ctx, "gdch token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) + req.Header.Set("Content-Type", "application/json") + g.logger.DebugContext(ctx, "gdch token request", "request", internallog.HTTPRequest(req, r)) resp, body, err := internal.DoRequest(g.client, req) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) @@ -188,4 +194,5 @@ func addCertToTransport(hc *http.Client, certPool *x509.CertPool) { trans.TLSClientConfig = &tls.Config{ RootCAs: certPool, } + hc.Transport = trans } diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index c9126535d7..bd693907f9 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -168,6 +168,15 @@ type InternalOptions struct { // for the credentials. It should only be used internally for clients that // need more control over their transport. The default is false. SkipUniverseDomainValidation bool + // TelemetryAttributes specifies a map of telemetry attributes to be added + // to all OpenTelemetry signals, such as tracing and metrics, for purposes + // including representing the static identity of the client (e.g., service + // name, version). These attributes are expected to be consistent across all + // signals to enable cross-signal correlation. + // + // It should only be used internally by generated clients. Callers should not + // modify the map after it is passed in. + TelemetryAttributes map[string]string } // AddAuthorizationMiddleware adds a middleware to the provided client's diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index 3feb997c76..2ece1ea360 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -17,9 +17,12 @@ package httptransport import ( "context" "crypto/tls" + "errors" + "fmt" "net" "net/http" "os" + "strconv" "time" "cloud.google.com/go/auth" @@ -28,7 +31,11 @@ import ( "cloud.google.com/go/auth/internal/transport" "cloud.google.com/go/auth/internal/transport/cert" "cloud.google.com/go/auth/internal/transport/headers" + "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/callctx" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "golang.org/x/net/http2" ) @@ -173,7 +180,81 @@ func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.Roun if opts.DisableTelemetry { return trans } - return otelhttp.NewTransport(trans) + if !gax.IsFeatureEnabled("TRACING") { + return otelhttp.NewTransport(trans) + } + var staticAttrs []attribute.KeyValue + if opts.InternalOptions != nil { + staticAttrs = transport.StaticTelemetryAttributes(opts.InternalOptions.TelemetryAttributes) + } + otelOpts := []otelhttp.Option{ + otelhttp.WithSpanOptions(trace.WithAttributes(staticAttrs...)), + } + return otelhttp.NewTransport(&otelAttributeTransport{ + base: trans, + }, otelOpts...) +} + +// otelAttributeTransport is a wrapper around an http.RoundTripper that adds +// custom Google Cloud-specific attributes to OpenTelemetry spans. +type otelAttributeTransport struct { + base http.RoundTripper +} + +// RoundTrip intercepts the HTTP request and response to enrich the active +// OpenTelemetry span with static and dynamic attributes, as well as detailed +// error information. +func (t *otelAttributeTransport) RoundTrip(req *http.Request) (*http.Response, error) { + span := trace.SpanFromContext(req.Context()) + if span.IsRecording() { + var attrs []attribute.KeyValue + attrs = append(attrs, attribute.String("rpc.system.name", "http")) + if resName, ok := callctx.TelemetryFromContext(req.Context(), "resource_name"); ok { + attrs = append(attrs, attribute.String("gcp.resource.destination.id", resName)) + } + if resendCountStr, ok := callctx.TelemetryFromContext(req.Context(), "resend_count"); ok { + if count, err := strconv.Atoi(resendCountStr); err == nil { + attrs = append(attrs, attribute.Int("http.request.resend_count", count)) + } + } + if urlTemplate, ok := callctx.TelemetryFromContext(req.Context(), "url_template"); ok { + attrs = append(attrs, attribute.String("url.template", urlTemplate)) + span.SetName(fmt.Sprintf("%s %s", req.Method, urlTemplate)) + } + span.SetAttributes(attrs...) + } + + resp, err := t.base.RoundTrip(req) + + if span.IsRecording() { + if err != nil { + var errorType string + switch { + case errors.Is(err, context.DeadlineExceeded): + errorType = "CLIENT_TIMEOUT" + case errors.Is(err, context.Canceled): + errorType = "CLIENT_CANCELLED" + default: + errorType = "CLIENT_CONNECTION_ERROR" + } + span.SetAttributes( + attribute.String("error.type", errorType), + attribute.String("status.message", err.Error()), + attribute.String("exception.type", fmt.Sprintf("%T", err)), + ) + } else { + span.SetAttributes(attribute.Int("http.response.status_code", resp.StatusCode)) + if resp.StatusCode >= 400 { + errorType := strconv.Itoa(resp.StatusCode) + span.SetAttributes( + attribute.String("error.type", errorType), + attribute.String("status.message", resp.Status), + ) + } + } + } + + return resp, err } type authTransport struct { diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go index 9bd55f510c..027c0dae05 100644 --- a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go +++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go @@ -17,13 +17,16 @@ package jwt import ( "bytes" "crypto" + "crypto/ecdsa" "crypto/rand" "crypto/rsa" "crypto/sha256" + "encoding/asn1" "encoding/base64" "encoding/json" "errors" "fmt" + "math/big" "strings" "time" ) @@ -35,6 +38,8 @@ const ( HeaderAlgES256 = "ES256" // HeaderType is the standard [Header.Type]. HeaderType = "JWT" + // ES256 key size + es256KeySize = 32 ) // Header represents a JWT header. @@ -127,6 +132,22 @@ func EncodeJWS(header *Header, c *Claims, signer crypto.Signer) (string, error) if err != nil { return "", err } + + if header.Algorithm == HeaderAlgES256 { + var ecSig struct { + R, S *big.Int + } + if _, err := asn1.Unmarshal(sig, &ecSig); err != nil { + return "", err + } + + rawSig := make([]byte, es256KeySize*2) + + ecSig.R.FillBytes(rawSig[:es256KeySize]) + ecSig.S.FillBytes(rawSig[es256KeySize:]) + + sig = rawSig + } return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil } @@ -153,7 +174,7 @@ func DecodeJWS(payload string) (*Claims, error) { // VerifyJWS tests whether the provided JWT token's signature was produced by // the private key associated with the provided public key. -func VerifyJWS(token string, key *rsa.PublicKey) error { +func VerifyJWS(token string, key crypto.PublicKey) error { parts := strings.Split(token, ".") if len(parts) != 3 { return errors.New("jwt: invalid token received, token must have 3 parts") @@ -167,5 +188,21 @@ func VerifyJWS(token string, key *rsa.PublicKey) error { h := sha256.New() h.Write([]byte(signedContent)) - return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) + hashed := h.Sum(nil) + + switch pub := key.(type) { + case *rsa.PublicKey: + return rsa.VerifyPKCS1v15(pub, crypto.SHA256, hashed, signatureString) + case *ecdsa.PublicKey: + if len(signatureString) != 2*32 { + return fmt.Errorf("jwt: ecdsa signature size should be 64 bytes, got %d", len(signatureString)) + } + r := new(big.Int).SetBytes(signatureString[:32]) + s := new(big.Int).SetBytes(signatureString[32:]) + if !ecdsa.Verify(pub, hashed, r, s) { + return errors.New("jwt: ecdsa signature verification failed") + } + return nil + } + return fmt.Errorf("jwt: unsupported public key type: %T", key) } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go index 5c8721efa9..fb0a8a1e07 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -24,8 +24,38 @@ import ( "time" "cloud.google.com/go/auth/credentials" + "go.opentelemetry.io/otel/attribute" ) +// knownKeys provides keys for reading telemetry attributes from Context. +// It provides an implicit contract with generated client library code +// using the same keys. The keys in this collection should not be removed +// or modified. New keys may be added, but they will need to be explicitly +// used in code referencing this collection in order to appear in telemetry. +var knownKeys = []string{ + "gcp.client.service", + "gcp.client.version", + "gcp.client.repo", + "gcp.client.artifact", + "gcp.client.language", + "url.domain", +} + +// StaticTelemetryAttributes selectively converts known keys from a map of +// strings to Open Telemetry attributes. +func StaticTelemetryAttributes(m map[string]string) []attribute.KeyValue { + var staticAttrs []attribute.KeyValue + if m == nil { + return staticAttrs + } + for _, k := range knownKeys { + if v, ok := m[k]; ok { + staticAttrs = append(staticAttrs, attribute.String(k, v)) + } + } + return staticAttrs +} + // CloneDetectOptions clones a user set detect option into some new memory that // we can internally manipulate before sending onto the detect package. func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOptions { diff --git a/vendor/cloud.google.com/go/auth/internal/version.go b/vendor/cloud.google.com/go/auth/internal/version.go index 702a6840d4..fb1c45739c 100644 --- a/vendor/cloud.google.com/go/auth/internal/version.go +++ b/vendor/cloud.google.com/go/auth/internal/version.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,4 +17,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.18.0" +const Version = "0.19.0" diff --git a/vendor/connectrpc.com/connect/.gitignore b/vendor/connectrpc.com/connect/.gitignore new file mode 100644 index 0000000000..355314397c --- /dev/null +++ b/vendor/connectrpc.com/connect/.gitignore @@ -0,0 +1,5 @@ +/.tmp/ +*.pprof +*.svg +cover.out +connect.test diff --git a/vendor/connectrpc.com/connect/.golangci.yml b/vendor/connectrpc.com/connect/.golangci.yml new file mode 100644 index 0000000000..15eed4409f --- /dev/null +++ b/vendor/connectrpc.com/connect/.golangci.yml @@ -0,0 +1,130 @@ +linters-settings: + errcheck: + check-type-assertions: true + exhaustruct: + include: + # No zero values for param structs. + - 'connectrpc\.com/connect\..*[pP]arams' + forbidigo: + forbid: + - '^fmt\.Print' + - '^log\.' + - '^print$' + - '^println$' + - '^panic$' + godox: + # TODO, OPT, etc. comments are fine to commit. Use FIXME comments for + # temporary hacks, and use godox to prevent committing them. + keywords: [FIXME] + importas: + no-unaliased: true + alias: + - pkg: connectrpc.com/connect/internal/gen/connect/ping/v1 + alias: pingv1 + varnamelen: + ignore-decls: + - T any + - i int + - wg sync.WaitGroup +linters: + enable-all: true + disable: + - cyclop # covered by gocyclo + - depguard # unnecessary for small libraries + - funlen # rely on code review to limit function length + - gocognit # dubious "cognitive overhead" quantification + - gofumpt # prefer standard gofmt + - goimports # rely on gci instead + - inamedparam # convention is not followed + - ireturn # "accept interfaces, return structs" isn't ironclad + - lll # don't want hard limits for line length + - maintidx # covered by gocyclo + - mnd # status codes are clearer than constants + - nlreturn # generous whitespace violates house style + - nonamedreturns # named returns are fine; it's *bare* returns that are bad + - protogetter # too many false positives + - tenv # replaced by usetesting + - testpackage # internal tests are fine + - wrapcheck # don't _always_ need to wrap errors + - wsl # generous whitespace violates house style +issues: + exclude-dirs-use-default: false + + exclude: + # Don't ban use of fmt.Errorf to create new errors, but the remaining + # checks from err113 are useful. + - "do not define dynamic errors, use wrapped static errors instead: .*" + + exclude-rules: + # If future reflect.Kinds are nil-able, we'll find out when a test fails. + - linters: [exhaustive] + path: internal/assert/assert.go + # We need our duplex HTTP call to have access to the context. + - linters: [containedctx] + path: duplex_http_call.go + # We need to init a global in-mem HTTP server for testable examples. + - linters: [gochecknoinits, gochecknoglobals] + path: example_init_test.go + # We purposefully do an ineffectual assignment for an example. + - linters: [ineffassign] + path: client_example_test.go + # The generated file is effectively a global receiver. + - linters: [varnamelen] + path: cmd/protoc-gen-connect-go + text: "parameter name 'g' is too short" + # Thorough error logging and timeout config make this example unreadably long. + - linters: [errcheck, gosec] + path: error_writer_example_test.go + # It should be crystal clear that Connect uses plain *http.Clients. + - linters: [revive, stylecheck] + path: client_example_test.go + # Don't complain about timeout management or lack of output assertions in examples. + - linters: [gosec, testableexamples] + path: handler_example_test.go + # No output assertions needed for these examples. + - linters: [testableexamples] + path: error_writer_example_test.go + - linters: [testableexamples] + path: error_not_modified_example_test.go + - linters: [testableexamples] + path: error_example_test.go + # In examples, it's okay to use http.ListenAndServe. + - linters: [gosec] + path: error_not_modified_example_test.go + # There are many instances where we want to keep unused parameters + # as a matter of style or convention, for example when a context.Context + # is the first parameter, we choose to just globally ignore this. + - linters: [revive] + text: "^unused-parameter: " + # We want to return explicit nils in protocol_grpc.go + - linters: [revive] + text: "^if-return: " + path: protocol_grpc.go + # We want to return explicit nils in protocol_connect.go + - linters: [revive] + text: "^if-return: " + path: protocol_connect.go + # We want to return explicit nils in error_writer.go + - linters: [revive] + text: "^if-return: " + path: error_writer.go + # We want to set http.Server's logger + - linters: [forbidigo] + path: internal/memhttp + text: "use of `log.(New|Logger|Lshortfile)` forbidden by pattern .*" + # We want to show examples with http.Get + - linters: [noctx] + path: internal/memhttp/memhttp_test.go + # Allow fmt.Sprintf for cmd/protoc-gen-connect-go for consistency + - linters: [perfsprint] + path: cmd/protoc-gen-connect-go/main.go + # Allow non-canonical headers in tests + - linters: [canonicalheader] + path: '.*_test.go' + # Allow Code pointer receiver for UnmarshalText method + - linters: [recvcheck] + path: code.go + # Avoid false positives for int overflow in tests + - linters: [gosec] + text: "^G115: integer overflow conversion" + path: ".*_test.go" diff --git a/vendor/connectrpc.com/connect/LICENSE b/vendor/connectrpc.com/connect/LICENSE new file mode 100644 index 0000000000..62b825afba --- /dev/null +++ b/vendor/connectrpc.com/connect/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021-2025 The Connect Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/connectrpc.com/connect/MAINTAINERS.md b/vendor/connectrpc.com/connect/MAINTAINERS.md new file mode 100644 index 0000000000..0b7495c704 --- /dev/null +++ b/vendor/connectrpc.com/connect/MAINTAINERS.md @@ -0,0 +1,12 @@ +Maintainers +=========== + +## Current +* [Peter Edge](https://github.com/bufdev), [Buf](https://buf.build) +* [Josh Humphries](https://github.com/jhump), [Buf](https://buf.build) +* [Matt Robenolt](https://github.com/mattrobenolt), [PlanetScale](https://planetscale.com) +* [Edward McFarlane](https://github.com/emcfarlane), [Buf](https://buf.build) + +## Former +* [Akshay Shah](https://github.com/akshayjshah) +* [Alex McKinney](https://github.com/amckinney) diff --git a/vendor/connectrpc.com/connect/Makefile b/vendor/connectrpc.com/connect/Makefile new file mode 100644 index 0000000000..8cba3fedfe --- /dev/null +++ b/vendor/connectrpc.com/connect/Makefile @@ -0,0 +1,121 @@ +# See https://tech.davis-hansson.com/p/make/ +SHELL := bash +.DELETE_ON_ERROR: +.SHELLFLAGS := -eu -o pipefail -c +.DEFAULT_GOAL := all +MAKEFLAGS += --warn-undefined-variables +MAKEFLAGS += --no-builtin-rules +MAKEFLAGS += --no-print-directory +BIN := .tmp/bin +export PATH := $(abspath $(BIN)):$(PATH) +export GOBIN := $(abspath $(BIN)) +COPYRIGHT_YEARS := 2021-2025 +LICENSE_IGNORE := --ignore /testdata/ +BUF_VERSION := 1.50.1 + +.PHONY: help +help: ## Describe useful make targets + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "%-30s %s\n", $$1, $$2}' + +.PHONY: all +all: ## Build, test, and lint (default) + $(MAKE) test + $(MAKE) lint + +.PHONY: clean +clean: ## Delete intermediate build artifacts + @# -X only removes untracked files, -d recurses into directories, -f actually removes files/dirs + git clean -Xdf + +.PHONY: test +test: shorttest slowtest + +.PHONY: shorttest +shorttest: build ## Run unit tests + go test -vet=off -race -cover -short ./... + +.PHONY: slowtest +# Runs all tests, including known long/slow ones. The +# race detector is not used for a few reasons: +# 1. Race coverage of the short tests should be +# adequate to catch race conditions. +# 2. It slows tests down, which is not good if we +# know these are already slow tests. +# 3. Some of the slow tests can't repro issues and +# find regressions as reliably with the race +# detector enabled. +slowtest: build + go test ./... + +.PHONY: runconformance +runconformance: build ## Run conformance test suite + cd internal/conformance && ./runconformance.sh + +.PHONY: bench +bench: BENCH ?= .* +bench: build ## Run benchmarks for root package + go test -vet=off -run '^$$' -bench '$(BENCH)' -benchmem -cpuprofile cpu.pprof -memprofile mem.pprof . + +.PHONY: build +build: generate ## Build all packages + go build ./... + +.PHONY: install +install: ## Install all binaries + go install ./... + +.PHONY: lint +lint: $(BIN)/golangci-lint $(BIN)/buf ## Lint Go and protobuf + go vet ./... + golangci-lint run --modules-download-mode=readonly --timeout=3m0s + buf lint + buf format -d --exit-code + +.PHONY: lintfix +lintfix: $(BIN)/golangci-lint $(BIN)/buf ## Automatically fix some lint errors + golangci-lint run --fix --modules-download-mode=readonly --timeout=3m0s + buf format -w + +.PHONY: generate +generate: $(BIN)/buf $(BIN)/protoc-gen-go $(BIN)/protoc-gen-connect-go $(BIN)/license-header ## Regenerate code and licenses + go mod tidy + cd ./internal/conformance && go mod tidy + buf generate + cd ./cmd/protoc-gen-connect-go/internal && \ + find ./testdata -maxdepth 1 -type d \( ! -name testdata \) | xargs -n 1 -I % bash -c "cd '%' && buf generate" + license-header \ + --license-type apache \ + --copyright-holder "The Connect Authors" \ + --year-range "$(COPYRIGHT_YEARS)" $(LICENSE_IGNORE) + +.PHONY: upgrade +upgrade: ## Upgrade dependencies + go get -u -t ./... && go mod tidy -v + +.PHONY: checkgenerate +checkgenerate: + @# Used in CI to verify that `make generate` doesn't produce a diff. + test -z "$$(git status --porcelain | tee /dev/stderr)" + +.PHONY: $(BIN)/protoc-gen-connect-go +$(BIN)/protoc-gen-connect-go: + @mkdir -p $(@D) + go build -o $(@) ./cmd/protoc-gen-connect-go + +$(BIN)/buf: Makefile + @mkdir -p $(@D) + go install github.com/bufbuild/buf/cmd/buf@v${BUF_VERSION} + +$(BIN)/license-header: Makefile + @mkdir -p $(@D) + go install github.com/bufbuild/buf/private/pkg/licenseheader/cmd/license-header@v${BUF_VERSION} + +$(BIN)/golangci-lint: Makefile + @mkdir -p $(@D) + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.64.7 + +$(BIN)/protoc-gen-go: Makefile go.mod + @mkdir -p $(@D) + @# The version of protoc-gen-go is determined by the version in go.mod + go install google.golang.org/protobuf/cmd/protoc-gen-go + diff --git a/vendor/connectrpc.com/connect/README.md b/vendor/connectrpc.com/connect/README.md new file mode 100644 index 0000000000..57f4ef6329 --- /dev/null +++ b/vendor/connectrpc.com/connect/README.md @@ -0,0 +1,184 @@ +Connect +======= + +[![Build](https://github.com/connectrpc/connect-go/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/connectrpc/connect-go/actions/workflows/ci.yaml) +[![Report Card](https://goreportcard.com/badge/connectrpc.com/connect)](https://goreportcard.com/report/connectrpc.com/connect) +[![GoDoc](https://pkg.go.dev/badge/connectrpc.com/connect.svg)](https://pkg.go.dev/connectrpc.com/connect) +[![Slack](https://img.shields.io/badge/slack-buf-%23e01563)][slack] +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8972/badge)](https://www.bestpractices.dev/projects/8972) + +Connect is a slim library for building browser and gRPC-compatible HTTP APIs. +You write a short [Protocol Buffer][protobuf] schema and implement your +application logic, and Connect generates code to handle marshaling, routing, +compression, and content type negotiation. It also generates an idiomatic, +type-safe client. Handlers and clients support three protocols: gRPC, gRPC-Web, +and Connect's own protocol. + +The [Connect protocol][protocol] is a simple protocol that works over HTTP/1.1 +or HTTP/2. It takes the best portions of gRPC and gRPC-Web, including +streaming, and packages them into a protocol that works equally well in +browsers, monoliths, and microservices. Calling a Connect API is as easy as +using `curl`. Try it with our live demo: + +``` +curl \ + --header "Content-Type: application/json" \ + --data '{"sentence": "I feel happy."}' \ + https://demo.connectrpc.com/connectrpc.eliza.v1.ElizaService/Say +``` + +Handlers and clients also support the gRPC and gRPC-Web protocols, including +streaming, headers, trailers, and error details. gRPC-compatible [server +reflection][grpcreflect] and [health checks][grpchealth] are available as +standalone packages. Instead of cURL, we could call our API with a gRPC client: + +``` +go install github.com/bufbuild/buf/cmd/buf@latest +buf curl --protocol grpc \ + --data '{"sentence": "I feel happy."}' \ + https://demo.connectrpc.com/connectrpc.eliza.v1.ElizaService/Say +``` + +Under the hood, Connect is just [Protocol Buffers][protobuf] and the standard +library: no custom HTTP implementation, no new name resolution or load +balancing APIs, and no surprises. Everything you already know about `net/http` +still applies, and any package that works with an `http.Server`, `http.Client`, +or `http.Handler` also works with Connect. + +For more on Connect, see the [announcement blog post][blog], the documentation +on [connectrpc.com][docs] (especially the [Getting Started] guide for Go), the +[demo service][examples-go], or the [protocol specification][protocol]. + +## A small example + +Curious what all this looks like in practice? From a [Protobuf +schema](internal/proto/connect/ping/v1/ping.proto), we generate [a small RPC +package](internal/gen/simple/connect/ping/v1/pingv1connect/ping.connect.go). Using that +package, we can build a server. This example is available at [internal/example](internal/example): + +```go +package main + +import ( + "context" + "log" + "net/http" + + "connectrpc.com/connect" + pingv1 "connectrpc.com/connect/internal/gen/connect/ping/v1" + "connectrpc.com/connect/internal/gen/simple/connect/ping/v1/pingv1connect" + "connectrpc.com/validate" +) + +type PingServer struct { + pingv1connect.UnimplementedPingServiceHandler // returns errors from all methods +} + +func (ps *PingServer) Ping(ctx context.Context, req *pingv1.PingRequest) (*pingv1.PingResponse, error) { + return &pingv1.PingResponse{ + Number: req.Number, + }, nil +} + +func main() { + mux := http.NewServeMux() + // The generated constructors return a path and a plain net/http + // handler. + mux.Handle( + pingv1connect.NewPingServiceHandler( + &PingServer{}, + // Validation via Protovalidate is almost always recommended + connect.WithInterceptors(validate.NewInterceptor()), + ), + ) + p := new(http.Protocols) + p.SetHTTP1(true) + // For gRPC clients, it's convenient to support HTTP/2 without TLS. + p.SetUnencryptedHTTP2(true) + s := &http.Server{ + Addr: "localhost:8080", + Handler: mux, + Protocols: p, + } + if err := s.ListenAndServe(); err != nil { + log.Fatalf("listen failed: %v", err) + } +} +``` + +With that server running, you can make requests with any gRPC or Connect +client. To write a client using Connect: + +```go +package main + +import ( + "context" + "log" + "net/http" + + pingv1 "connectrpc.com/connect/internal/gen/connect/ping/v1" + "connectrpc.com/connect/internal/gen/simple/connect/ping/v1/pingv1connect" +) + +func main() { + client := pingv1connect.NewPingServiceClient( + http.DefaultClient, + "http://localhost:8080/", + ) + req := &pingv1.PingRequest{Number: 42} + res, err := client.Ping(context.Background(), req) + if err != nil { + log.Fatalln(err) + } + log.Println(res) +} +``` + +Of course, `http.ListenAndServe` and `http.DefaultClient` aren't fit for +production use! See Connect's [deployment docs][docs-deployment] for a guide to +configuring timeouts, connection pools, observability, and h2c. + +## Ecosystem + +* [grpchealth]: gRPC-compatible health checks for connect-go +* [grpcreflect]: gRPC-compatible server reflection for connect-go +* [validate]: [Protovalidate][protovalidate] interceptor for connect-go +* [examples-go]: service powering [demo.connectrpc.com](https://demo.connectrpc.com), including bidi streaming +* [connect-es]: Type-safe APIs with Protobuf and TypeScript +* [Buf Studio]: web UI for ad-hoc RPCs +* [conformance]: Connect, gRPC, and gRPC-Web interoperability tests + +## Status: Stable + +This module is stable. It supports: + +* The two most recent major releases of Go (the same versions of Go that continue + to [receive security patches][go-support-policy]). +* [APIv2] of Protocol Buffers in Go (`google.golang.org/protobuf`). + +Within those parameters, `connect` follows semantic versioning. We will +_not_ make breaking changes in the 1.x series of releases. + +## Legal + +Offered under the [Apache 2 license][license]. + +[APIv2]: https://blog.golang.org/protobuf-apiv2 +[Buf Studio]: https://buf.build/studio +[Getting Started]: https://connectrpc.com/docs/go/getting-started +[blog]: https://buf.build/blog/connect-a-better-grpc +[conformance]: https://github.com/connectrpc/conformance +[grpchealth]: https://github.com/connectrpc/grpchealth-go +[grpcreflect]: https://github.com/connectrpc/grpcreflect-go +[connect-es]: https://github.com/connectrpc/connect-es +[examples-go]: https://github.com/connectrpc/examples-go +[docs-deployment]: https://connectrpc.com/docs/go/deployment +[docs]: https://connectrpc.com +[go-support-policy]: https://golang.org/doc/devel/release#policy +[license]: https://github.com/connectrpc/connect-go/blob/main/LICENSE +[protobuf]: https://developers.google.com/protocol-buffers +[protocol]: https://connectrpc.com/docs/protocol +[slack]: https://buf.build/links/slack +[validate]: https://github.com/connectrpc/validate-go +[protovalidate]: https://protovalidate.com diff --git a/vendor/connectrpc.com/connect/RELEASE.md b/vendor/connectrpc.com/connect/RELEASE.md new file mode 100644 index 0000000000..22364cd144 --- /dev/null +++ b/vendor/connectrpc.com/connect/RELEASE.md @@ -0,0 +1,44 @@ +# Releasing connect-go + +This document outlines how to create a release of connect-go. + +1. Clone the repo, ensuring you have the latest main. + +2. On a new branch, open [connect.go](connect.go) and change the `Version` constant to an appropriate [semantic version](https://semver.org/). To select the correct version, look at the version number of the [latest release] and the changes that are included in this new release. + * If there are only bug fixes and no new features, remove the `-dev` suffix, set MINOR number to be equal to the [latest release], and set the PATCH number to be 1 more than the PATCH number of the [latest release]. + * If there are features being released, remove the `-dev` suffix, set the MINOR number to be 1 more than the MINOR number of the [latest release], and set the PATCH number to `0`. In the common case, the diff here will just be to remove the `-dev` suffix. + + ```patch + -const Version = "1.14.0-dev" + +const Version = "1.14.0" + ``` + +3. Check for any changes in [cmd/protoc-gen-connect-go/main.go](cmd/protoc-gen-connect-go/main.go) that require a version restriction. A constant `IsAtLeastVersionX_Y_Z` should be defined in [connect.go](connect.go) if generated code has begun to use a new API. Make sure the generated code references this constant. If a new constant has been added since the last release, ensure that the name of the constant matches the version being released ([Example PR #496](https://github.com/connectrpc/connect-go/pull/496)). + +4. Open a PR titled "Prepare for vX.Y.Z" ([Example PR #661](https://github.com/connectrpc/connect-go/pull/661)) and a description tagging all current maintainers. Once it's reviewed and CI passes, merge it. + + *Make sure no new commits are merged until the release is complete.* + +5. Review all commits in the new release and for each PR check an appropriate label is used and edit the title to be meaninful to end users. This will help auto-generated release notes match the final notes as closely as possible. + +6. Using the Github UI, create a new release. + - Under “Choose a tag”, type in “vX.Y.Z” to create a new tag for the release upon publish. + - Target the main branch. + - Title the Release “vX.Y.Z”. + - Click “set as latest release”. + - Set the last version as the “Previous tag”. + - Click “Generate release notes” to autogenerate release notes. + - Edit the release notes. A summary and other sub categories may be added if required but should, in most cases, be left as ### Enhancements and ### Bugfixes. Feel free to collect multiple small changes to docs or Github config into one line, but try to tag every contributor. Make especially sure to credit new external contributors! + +7. Publish the release. + +8. On a new branch, open [connect.go](connect.go) and change the `Version` to increment the minor tag and append the `-dev` suffix. Use the next minor release - we never anticipate bugs and patch releases. + + ```patch + -const Version = "1.14.0" + +const Version = "1.15.0-dev" + ``` + +9. Open a PR titled "Back to development" ([Example PR #662](https://github.com/connectrpc/connect-go/pull/662)). Once it's reviewed and CI passes, merge it. + +[latest release]: https://github.com/connectrpc/connect-go/releases/latest diff --git a/vendor/connectrpc.com/connect/SECURITY.md b/vendor/connectrpc.com/connect/SECURITY.md new file mode 100644 index 0000000000..04dcde5210 --- /dev/null +++ b/vendor/connectrpc.com/connect/SECURITY.md @@ -0,0 +1,5 @@ +Security Policy +=============== + +This project follows the [Connect security policy and reporting +process](https://connectrpc.com/docs/governance/security). diff --git a/vendor/connectrpc.com/connect/buf.gen.yaml b/vendor/connectrpc.com/connect/buf.gen.yaml new file mode 100644 index 0000000000..45beb82af6 --- /dev/null +++ b/vendor/connectrpc.com/connect/buf.gen.yaml @@ -0,0 +1,19 @@ +version: v2 +managed: + enabled: true + override: + - file_option: go_package_prefix + value: connectrpc.com/connect/internal/gen +plugins: + - local: protoc-gen-go + out: internal/gen + opt: paths=source_relative + - local: protoc-gen-connect-go + out: internal/gen/generics + opt: paths=source_relative + - local: protoc-gen-connect-go + out: internal/gen/simple + opt: + - paths=source_relative + - simple +clean: true diff --git a/vendor/connectrpc.com/connect/buf.yaml b/vendor/connectrpc.com/connect/buf.yaml new file mode 100644 index 0000000000..0ab5a09449 --- /dev/null +++ b/vendor/connectrpc.com/connect/buf.yaml @@ -0,0 +1,14 @@ +version: v2 +modules: + - path: internal/proto +lint: + use: + - STANDARD + ignore: + - internal/proto/connectext/grpc/health/v1/health.proto + - internal/proto/connectext/grpc/reflection/v1alpha/reflection.proto + - internal/proto/connectext/grpc/status/v1/status.proto + disallow_comment_ignores: true +breaking: + use: + - WIRE_JSON diff --git a/vendor/connectrpc.com/connect/buffer_pool.go b/vendor/connectrpc.com/connect/buffer_pool.go new file mode 100644 index 0000000000..8ef9b1dfd1 --- /dev/null +++ b/vendor/connectrpc.com/connect/buffer_pool.go @@ -0,0 +1,54 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "bytes" + "sync" +) + +const ( + initialBufferSize = 512 + maxRecycleBufferSize = 8 * 1024 * 1024 // if >8MiB, don't hold onto a buffer +) + +type bufferPool struct { + sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() any { + return bytes.NewBuffer(make([]byte, 0, initialBufferSize)) + }, + }, + } +} + +func (b *bufferPool) Get() *bytes.Buffer { + if buf, ok := b.Pool.Get().(*bytes.Buffer); ok { + return buf + } + return bytes.NewBuffer(make([]byte, 0, initialBufferSize)) +} + +func (b *bufferPool) Put(buffer *bytes.Buffer) { + if buffer.Cap() > maxRecycleBufferSize { + return + } + buffer.Reset() + b.Pool.Put(buffer) +} diff --git a/vendor/connectrpc.com/connect/client.go b/vendor/connectrpc.com/connect/client.go new file mode 100644 index 0000000000..9cd34ef1f3 --- /dev/null +++ b/vendor/connectrpc.com/connect/client.go @@ -0,0 +1,392 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" +) + +// Client is a reusable, concurrency-safe client for a single procedure. +// Depending on the procedure's type, use the CallUnary, CallClientStream, +// CallServerStream, or CallBidiStream method. +// +// By default, clients use the Connect protocol with the binary Protobuf Codec, +// ask for gzipped responses, and send uncompressed requests. To use the gRPC +// or gRPC-Web protocols, use the [WithGRPC] or [WithGRPCWeb] options. +type Client[Req, Res any] struct { + config *clientConfig + callUnary func(context.Context, *Request[Req]) (*Response[Res], error) + protocolClient protocolClient + err error +} + +// NewClient constructs a new Client. +func NewClient[Req, Res any](httpClient HTTPClient, url string, options ...ClientOption) *Client[Req, Res] { + client := &Client[Req, Res]{} + config, err := newClientConfig(url, options) + if err != nil { + client.err = err + return client + } + client.config = config + protocolClient, protocolErr := client.config.Protocol.NewClient( + &protocolClientParams{ + CompressionName: config.RequestCompressionName, + CompressionPools: newReadOnlyCompressionPools( + config.CompressionPools, + config.CompressionNames, + ), + Codec: config.Codec, + Protobuf: config.protobuf(), + CompressMinBytes: config.CompressMinBytes, + HTTPClient: httpClient, + URL: config.URL, + BufferPool: config.BufferPool, + ReadMaxBytes: config.ReadMaxBytes, + SendMaxBytes: config.SendMaxBytes, + EnableGet: config.EnableGet, + GetURLMaxBytes: config.GetURLMaxBytes, + GetUseFallback: config.GetUseFallback, + }, + ) + if protocolErr != nil { + client.err = protocolErr + return client + } + client.protocolClient = protocolClient + // Rather than applying unary interceptors along the hot path, we can do it + // once at client creation. + unarySpec := config.newSpec(StreamTypeUnary) + unaryFunc := UnaryFunc(func(ctx context.Context, request AnyRequest) (AnyResponse, error) { + conn := client.protocolClient.NewConn(ctx, unarySpec, request.Header()) + conn.onRequestSend(func(r *http.Request) { + request.setRequestMethod(r.Method) + callInfo, ok := clientCallInfoForContext(ctx) + if ok { + callInfo.method = r.Method + callInfo.responseSource = conn + } + }) + // Send always returns an io.EOF unless the error is from the client-side. + // We want the user to continue to call Receive in those cases to get the + // full error from the server-side. + if err := conn.Send(request.Any()); err != nil && !errors.Is(err, io.EOF) { + _ = conn.CloseRequest() + _ = conn.CloseResponse() + return nil, err + } + if err := conn.CloseRequest(); err != nil { + _ = conn.CloseResponse() + return nil, err + } + response, err := receiveUnaryResponse[Res](conn, config.Initializer) + if err != nil { + _ = conn.CloseResponse() + return nil, err + } + return response, conn.CloseResponse() + }) + if interceptor := config.Interceptor; interceptor != nil { + // interceptor is the full chain of all interceptors provided + unaryFunc = interceptor.WrapUnary(unaryFunc) + } + client.callUnary = func(ctx context.Context, request *Request[Req]) (*Response[Res], error) { + // To make the specification, peer, and RPC headers visible to the full + // interceptor chain (as though they were supplied by the caller), we'll + // add them here. + request.spec = unarySpec + request.peer = client.protocolClient.Peer() + protocolClient.WriteRequestHeader(StreamTypeUnary, request.Header()) + + // Also set them in the context if there's a call info present + callInfo, callInfoOk := clientCallInfoForContext(ctx) + if callInfoOk { + callInfo.peer = request.Peer() + callInfo.spec = request.Spec() + // A client could have set request headers in the call info OR the request wrapper + // So if a callInfo exists in context, merge any headers from there into the request wrapper + // so that all headers are sent in the request + mergeHeaders(request.Header(), callInfo.requestHeader) + + // Copy the call info into a sentinel value. This is so we can compare + // the sentinel value against the call info in context. If they're different, + // we can stop the request. This protects against changing the context in interceptors. + ctx = context.WithValue(ctx, sentinelContextKey{}, callInfo) + } + + response, err := unaryFunc(ctx, request) + if err != nil { + return nil, err + } + typed, ok := response.(*Response[Res]) + if !ok { + return nil, errorf(CodeInternal, "unexpected client response type %T", response) + } + return typed, nil + } + return client +} + +// CallUnary calls a request-response procedure. +func (c *Client[Req, Res]) CallUnary(ctx context.Context, request *Request[Req]) (*Response[Res], error) { + if c.err != nil { + return nil, c.err + } + return c.callUnary(ctx, request) +} + +// CallClientStream calls a client streaming procedure. +// +// Request headers can be sent via the [ClientStreamForClient.RequestHeader] method on the stream. Note that the +// request headers are not sent automatically when this method is invoked and instead require an explicit call to +// [ClientStreamForClient.Send]. +func (c *Client[Req, Res]) CallClientStream(ctx context.Context) *ClientStreamForClient[Req, Res] { + if c.err != nil { + return &ClientStreamForClient[Req, Res]{err: c.err} + } + return &ClientStreamForClient[Req, Res]{ + conn: c.newConn(ctx, StreamTypeClient, nil), + initializer: c.config.Initializer, + } +} + +// CallClientStreamSimple calls a client streaming procedure. +// +// Request headers should be set in a [CallInfo] object inside the context using [NewClientContext]. These headers are +// transmitted when this method is called and do not require an explicit call to [ClientStreamForClientSimple.Send]. +// +// In addition, when calling [ClientStreamForClientSimple.CloseAndReceive] on the returned stream, the returned response +// is the response type defined for the stream and _not_ a Connect [Response] wrapper type. As a result, any response +// headers and trailers should be read from the [CallInfo] object in context. +func (c *Client[Req, Res]) CallClientStreamSimple(ctx context.Context) (*ClientStreamForClientSimple[Req, Res], error) { + if c.err != nil { + return &ClientStreamForClientSimple[Req, Res]{ + stream: &ClientStreamForClient[Req, Res]{err: c.err}, + }, c.err + } + + stream := &ClientStreamForClientSimple[Req, Res]{ + stream: &ClientStreamForClient[Req, Res]{ + conn: c.newConn(ctx, StreamTypeClient, nil), + initializer: c.config.Initializer, + }, + } + if err := stream.Send(nil); err != nil { + return nil, err + } + return stream, nil +} + +// CallServerStream calls a server streaming procedure. +func (c *Client[Req, Res]) CallServerStream(ctx context.Context, request *Request[Req]) (*ServerStreamForClient[Res], error) { + if c.err != nil { + return nil, c.err + } + conn := c.newConn(ctx, StreamTypeServer, func(r *http.Request) { + request.method = r.Method + }) + request.peer = conn.Peer() + request.spec = conn.Spec() + + mergeHeaders(conn.RequestHeader(), request.header) + + // Send always returns an io.EOF unless the error is from the client-side. + // We want the user to continue to call Receive in those cases to get the + // full error from the server-side. + if err := conn.Send(request.Msg); err != nil && !errors.Is(err, io.EOF) { + _ = conn.CloseRequest() + _ = conn.CloseResponse() + return nil, err + } + if err := conn.CloseRequest(); err != nil { + return nil, err + } + return &ServerStreamForClient[Res]{ + conn: conn, + initializer: c.config.Initializer, + }, nil +} + +// CallBidiStream calls a bidirectional streaming procedure. +// +// Request headers can be sent via the [BidiStreamForClient.RequestHeader] method. Note that the +// request headers are not sent automatically when this method is invoked and instead require an explicit call to +// [BidiStreamForClient.Send]. +func (c *Client[Req, Res]) CallBidiStream(ctx context.Context) *BidiStreamForClient[Req, Res] { + if c.err != nil { + return &BidiStreamForClient[Req, Res]{err: c.err} + } + return &BidiStreamForClient[Req, Res]{ + conn: c.newConn(ctx, StreamTypeBidi, nil), + initializer: c.config.Initializer, + } +} + +// CallBidiStreamSimple calls a bidirectional streaming procedure. +// +// Request headers should be set in a [CallInfo] object inside the context using [NewClientContext]. These headers +// are transmitted when this method is called and do not require an explicit call to [BidiStreamForClient.Send]. +// +// Likewise, response headers and trailers should be read from the [CallInfo] object in context. +func (c *Client[Req, Res]) CallBidiStreamSimple(ctx context.Context) (*BidiStreamForClientSimple[Req, Res], error) { + if c.err != nil { + return &BidiStreamForClientSimple[Req, Res]{ + stream: &BidiStreamForClient[Req, Res]{err: c.err}, + }, c.err + } + + stream := &BidiStreamForClientSimple[Req, Res]{ + stream: &BidiStreamForClient[Req, Res]{ + conn: c.newConn(ctx, StreamTypeBidi, nil), + initializer: c.config.Initializer, + }, + } + + if err := stream.Send(nil); err != nil { + return nil, err + } + return stream, nil +} + +func (c *Client[Req, Res]) newConn(ctx context.Context, streamType StreamType, onRequestSend func(r *http.Request)) StreamingClientConn { + callInfo, callInfoOk := clientCallInfoForContext(ctx) + // Set values in the context if there's a call info present + if callInfoOk { + // Copy the call info into a sentinel value. This is so we can compare + // the sentinel value against the call info in context. If they're different, + // we can stop the request. This protects against changing the context in interceptors. + ctx = context.WithValue(ctx, sentinelContextKey{}, callInfo) + } + newConn := func(ctx context.Context, spec Spec) StreamingClientConn { + header := make(http.Header, 8) // arbitrary power of two, prevent immediate resizing + c.protocolClient.WriteRequestHeader(streamType, header) + conn := c.protocolClient.NewConn(ctx, spec, header) + conn.onRequestSend(onRequestSend) + return conn + } + if interceptor := c.config.Interceptor; interceptor != nil { + newConn = interceptor.WrapStreamingClient(newConn) + } + conn := newConn(ctx, c.config.newSpec(streamType)) + + // Set values in the context if there's a call info present + if callInfoOk { + callInfo.peer = conn.Peer() + callInfo.spec = conn.Spec() + callInfo.responseSource = conn + + // Merge any callInfo request headers first, then do the request, + // so that context headers show first in the list of headers. + mergeHeaders(conn.RequestHeader(), callInfo.RequestHeader()) + } + + return conn +} + +type clientConfig struct { + URL *url.URL + Protocol protocol + Procedure string + Schema any + Initializer maybeInitializer + CompressMinBytes int + Interceptor Interceptor + CompressionPools map[string]*compressionPool + CompressionNames []string + Codec Codec + RequestCompressionName string + BufferPool *bufferPool + ReadMaxBytes int + SendMaxBytes int + EnableGet bool + GetURLMaxBytes int + GetUseFallback bool + IdempotencyLevel IdempotencyLevel +} + +func newClientConfig(rawURL string, options []ClientOption) (*clientConfig, *Error) { + url, err := parseRequestURL(rawURL) + if err != nil { + return nil, err + } + protoPath := extractProtoPath(url.Path) + config := clientConfig{ + URL: url, + Protocol: &protocolConnect{}, + Procedure: protoPath, + CompressionPools: make(map[string]*compressionPool), + BufferPool: newBufferPool(), + } + withProtoBinaryCodec().applyToClient(&config) + withGzip().applyToClient(&config) + for _, opt := range options { + opt.applyToClient(&config) + } + if err := config.validate(); err != nil { + return nil, err + } + return &config, nil +} + +func (c *clientConfig) validate() *Error { + if c.Codec == nil || c.Codec.Name() == "" { + return errorf(CodeUnknown, "no codec configured") + } + if c.RequestCompressionName != "" && c.RequestCompressionName != compressionIdentity { + if _, ok := c.CompressionPools[c.RequestCompressionName]; !ok { + return errorf(CodeUnknown, "unknown compression %q", c.RequestCompressionName) + } + } + return nil +} + +func (c *clientConfig) protobuf() Codec { + if c.Codec.Name() == codecNameProto { + return c.Codec + } + return &protoBinaryCodec{} +} + +func (c *clientConfig) newSpec(t StreamType) Spec { + return Spec{ + StreamType: t, + Procedure: c.Procedure, + Schema: c.Schema, + IsClient: true, + IdempotencyLevel: c.IdempotencyLevel, + } +} + +func parseRequestURL(rawURL string) (*url.URL, *Error) { + url, err := url.ParseRequestURI(rawURL) + if err == nil { + return url, nil + } + if !strings.Contains(rawURL, "://") { + // URL doesn't have a scheme, so the user is likely accustomed to + // grpc-go's APIs. + err = fmt.Errorf( + "URL %q missing scheme: use http:// or https:// (unlike grpc-go)", + rawURL, + ) + } + return nil, NewError(CodeUnavailable, err) +} diff --git a/vendor/connectrpc.com/connect/client_stream.go b/vendor/connectrpc.com/connect/client_stream.go new file mode 100644 index 0000000000..5a24f4a3d0 --- /dev/null +++ b/vendor/connectrpc.com/connect/client_stream.go @@ -0,0 +1,441 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "errors" + "io" + "net/http" +) + +var ( + // errNoStreamInitialized signals that a no stream has been initialized when + // attempting to access stream-related methods. + errNoStreamInitialized = errors.New("no stream initialized") +) + +// ClientStreamForClient is the client's view of a client streaming RPC. +// +// It's returned from [Client].CallClientStream, but doesn't currently have an +// exported constructor function. +// +// When using this stream, request headers should be set via the [ClientStreamForClient.RequestHeader] method. +type ClientStreamForClient[Req, Res any] struct { + conn StreamingClientConn + initializer maybeInitializer + // Error from client construction. If non-nil, return for all calls. + err error +} + +// Spec returns the specification for the RPC. +func (c *ClientStreamForClient[_, _]) Spec() Spec { + return c.conn.Spec() +} + +// Peer describes the server for the RPC. +func (c *ClientStreamForClient[_, _]) Peer() Peer { + return c.conn.Peer() +} + +// RequestHeader returns the request headers. Headers are sent to the server with the +// first call to Send. +// +// Headers beginning with "Connect-" and "Grpc-" are reserved for use by the +// Connect and gRPC protocols. Applications shouldn't write them. +func (c *ClientStreamForClient[Req, Res]) RequestHeader() http.Header { + if c.err != nil { + return http.Header{} + } + return c.conn.RequestHeader() +} + +// Send a message to the server. The first call to Send also sends the request +// headers. +// +// If the server returns an error, Send returns an error that wraps [io.EOF]. +// Clients should check for case using the standard library's [errors.Is] and +// unmarshal the error using CloseAndReceive. +func (c *ClientStreamForClient[Req, Res]) Send(request *Req) error { + if c.err != nil { + return c.err + } + if request == nil { + return c.conn.Send(nil) + } + return c.conn.Send(request) +} + +// CloseAndReceive closes the send side of the stream and waits for the +// response. +func (c *ClientStreamForClient[Req, Res]) CloseAndReceive() (*Response[Res], error) { + if c.err != nil { + return nil, c.err + } + if err := c.conn.CloseRequest(); err != nil { + _ = c.conn.CloseResponse() + return nil, err + } + response, err := receiveUnaryResponse[Res](c.conn, c.initializer) + if err != nil { + _ = c.conn.CloseResponse() + return nil, err + } + return response, c.conn.CloseResponse() +} + +// Conn exposes the underlying StreamingClientConn. This may be useful if +// you'd prefer to wrap the connection in a different high-level API. +func (c *ClientStreamForClient[Req, Res]) Conn() (StreamingClientConn, error) { + return c.conn, c.err +} + +// ClientStreamForClientSimple is the client's view of a client streaming RPC. +// +// It's returned from [Client.CallClientStreamSimple], but doesn't currently have an +// exported constructor function. +// +// Usage of this stream requires that request headers be set in a [CallInfo] object in context via [NewClientContext]. +// In addition, the response returned by [ClientStreamForClientSimple.CloseAndReceive] is the response type defined for +// the stream and _not_ a Connect [Response] wrapper type. As a result, response headers/trailers should be read from +// the [CallInfo] object in context. +type ClientStreamForClientSimple[Req, Res any] struct { + stream *ClientStreamForClient[Req, Res] +} + +// Spec returns the specification for the RPC. +func (c *ClientStreamForClientSimple[_, _]) Spec() Spec { + if c.stream == nil { + return Spec{} + } + return c.stream.Spec() +} + +// Peer describes the server for the RPC. +func (c *ClientStreamForClientSimple[_, _]) Peer() Peer { + if c.stream == nil { + return Peer{} + } + return c.stream.Peer() +} + +// Send a message to the server. The first call to Send also sends the request +// headers. +// +// If the server returns an error, Send returns an error that wraps [io.EOF]. +// Clients should check for case using the standard library's [errors.Is] and +// unmarshal the error using CloseAndReceive. +func (c *ClientStreamForClientSimple[Req, Res]) Send(request *Req) error { + if c.stream == nil { + return errNoStreamInitialized + } + return c.stream.Send(request) +} + +// CloseAndReceive closes the send side of the stream and waits for the +// response. +func (c *ClientStreamForClientSimple[Req, Res]) CloseAndReceive() (*Res, error) { + if c.stream == nil { + return nil, errNoStreamInitialized + } + res, err := c.stream.CloseAndReceive() + if err != nil { + return nil, err + } + return res.Msg, nil +} + +// ServerStreamForClient is the client's view of a server streaming RPC. +// +// It's returned from [Client].CallServerStream, but doesn't currently have an +// exported constructor function. +type ServerStreamForClient[Res any] struct { + conn StreamingClientConn + initializer maybeInitializer + msg *Res + // Error from client construction. If non-nil, return for all calls. + constructErr error + // Error from conn.Receive(). + receiveErr error +} + +// Receive advances the stream to the next message, which will then be +// available through the Msg method. It returns false when the stream stops, +// either by reaching the end or by encountering an unexpected error. After +// Receive returns false, the Err method will return any unexpected error +// encountered. +func (s *ServerStreamForClient[Res]) Receive() bool { + if s.constructErr != nil || s.receiveErr != nil { + return false + } + s.msg = new(Res) + if err := s.initializer.maybe(s.conn.Spec(), s.msg); err != nil { + s.receiveErr = err + return false + } + s.receiveErr = s.conn.Receive(s.msg) + return s.receiveErr == nil +} + +// Msg returns the most recent message unmarshaled by a call to Receive. +func (s *ServerStreamForClient[Res]) Msg() *Res { + if s.msg == nil { + s.msg = new(Res) + } + return s.msg +} + +// Err returns the first non-EOF error that was encountered by Receive. +func (s *ServerStreamForClient[Res]) Err() error { + if s.constructErr != nil { + return s.constructErr + } + if s.receiveErr != nil && !errors.Is(s.receiveErr, io.EOF) { + return s.receiveErr + } + return nil +} + +// ResponseHeader returns the headers received from the server. It blocks until +// the first call to Receive returns. +func (s *ServerStreamForClient[Res]) ResponseHeader() http.Header { + if s.constructErr != nil { + return http.Header{} + } + return s.conn.ResponseHeader() +} + +// ResponseTrailer returns the trailers received from the server. Trailers +// aren't fully populated until Receive() returns an error wrapping io.EOF. +func (s *ServerStreamForClient[Res]) ResponseTrailer() http.Header { + if s.constructErr != nil { + return http.Header{} + } + return s.conn.ResponseTrailer() +} + +// Close the receive side of the stream. +// +// Close is non-blocking. To gracefully close the stream and allow for +// connection resuse ensure all messages have been received before calling +// Close. All messages are received when Receive returns false. +func (s *ServerStreamForClient[Res]) Close() error { + if s.constructErr != nil { + return s.constructErr + } + return s.conn.CloseResponse() +} + +// Conn exposes the underlying StreamingClientConn. This may be useful if +// you'd prefer to wrap the connection in a different high-level API. +func (s *ServerStreamForClient[Res]) Conn() (StreamingClientConn, error) { + return s.conn, s.constructErr +} + +// BidiStreamForClient is the client's view of a bidirectional streaming RPC. +// +// It's returned from [Client].CallBidiStream, but doesn't currently have an +// exported constructor function. +type BidiStreamForClient[Req, Res any] struct { + conn StreamingClientConn + initializer maybeInitializer + // Error from client construction. If non-nil, return for all calls. + err error +} + +// Spec returns the specification for the RPC. +func (b *BidiStreamForClient[_, _]) Spec() Spec { + return b.conn.Spec() +} + +// Peer describes the server for the RPC. +func (b *BidiStreamForClient[_, _]) Peer() Peer { + return b.conn.Peer() +} + +// RequestHeader returns the request headers. Headers are sent with the first +// call to Send. +// +// Headers beginning with "Connect-" and "Grpc-" are reserved for use by the +// Connect and gRPC protocols. Applications shouldn't write them. +func (b *BidiStreamForClient[Req, Res]) RequestHeader() http.Header { + if b.err != nil { + return http.Header{} + } + return b.conn.RequestHeader() +} + +// Send a message to the server. The first call to Send also sends the request +// headers. To send just the request headers, without a body, call Send with a +// nil pointer. +// +// If the server returns an error, Send returns an error that wraps [io.EOF]. +// Clients should check for EOF using the standard library's [errors.Is] and +// call Receive to retrieve the error. +func (b *BidiStreamForClient[Req, Res]) Send(msg *Req) error { + if b.err != nil { + return b.err + } + if msg == nil { + return b.conn.Send(nil) + } + return b.conn.Send(msg) +} + +// CloseRequest closes the send side of the stream. +func (b *BidiStreamForClient[Req, Res]) CloseRequest() error { + if b.err != nil { + return b.err + } + return b.conn.CloseRequest() +} + +// Receive a message. When the server is done sending messages and no other +// errors have occurred, Receive will return an error that wraps [io.EOF]. +func (b *BidiStreamForClient[Req, Res]) Receive() (*Res, error) { + if b.err != nil { + return nil, b.err + } + var msg Res + if err := b.initializer.maybe(b.conn.Spec(), &msg); err != nil { + return nil, err + } + if err := b.conn.Receive(&msg); err != nil { + return nil, err + } + return &msg, nil +} + +// CloseResponse closes the receive side of the stream. +// +// CloseResponse is non-blocking. To gracefully close the stream and allow for +// connection resuse ensure all messages have been received before calling +// CloseResponse. All messages are received when Receive returns an error +// wrapping [io.EOF]. +func (b *BidiStreamForClient[Req, Res]) CloseResponse() error { + if b.err != nil { + return b.err + } + return b.conn.CloseResponse() +} + +// ResponseHeader returns the headers received from the server. It blocks until +// the first call to Receive returns. +func (b *BidiStreamForClient[Req, Res]) ResponseHeader() http.Header { + if b.err != nil { + return http.Header{} + } + return b.conn.ResponseHeader() +} + +// ResponseTrailer returns the trailers received from the server. Trailers +// aren't fully populated until Receive() returns an error wrapping [io.EOF]. +func (b *BidiStreamForClient[Req, Res]) ResponseTrailer() http.Header { + if b.err != nil { + return http.Header{} + } + return b.conn.ResponseTrailer() +} + +// Conn exposes the underlying StreamingClientConn. This may be useful if +// you'd prefer to wrap the connection in a different high-level API. +func (b *BidiStreamForClient[Req, Res]) Conn() (StreamingClientConn, error) { + return b.conn, b.err +} + +// BidiStreamForClientSimple is the client's view of a bidirectional streaming RPC. +// +// It's returned from [Client].CallBidiStream, but doesn't currently have an +// exported constructor function. +type BidiStreamForClientSimple[Req, Res any] struct { + stream *BidiStreamForClient[Req, Res] +} + +// Spec returns the specification for the RPC. +func (b *BidiStreamForClientSimple[_, _]) Spec() Spec { + if b.stream == nil { + return Spec{} + } + return b.stream.Spec() +} + +// Peer describes the server for the RPC. +func (b *BidiStreamForClientSimple[_, _]) Peer() Peer { + if b.stream == nil { + return Peer{} + } + return b.stream.Peer() +} + +// Send a message to the server. The first call to Send also sends the request +// headers. To send just the request headers, without a body, call Send with a +// nil pointer. +// +// If the server returns an error, Send returns an error that wraps [io.EOF]. +// Clients should check for EOF using the standard library's [errors.Is] and +// call Receive to retrieve the error. +func (b *BidiStreamForClientSimple[Req, Res]) Send(msg *Req) error { + if b.stream == nil { + return errNoStreamInitialized + } + return b.stream.Send(msg) +} + +// CloseRequest closes the send side of the stream. +func (b *BidiStreamForClientSimple[Req, Res]) CloseRequest() error { + if b.stream == nil { + return errNoStreamInitialized + } + return b.stream.CloseRequest() +} + +// Receive a message. When the server is done sending messages and no other +// errors have occurred, Receive will return an error that wraps [io.EOF]. +func (b *BidiStreamForClientSimple[Req, Res]) Receive() (*Res, error) { + if b.stream == nil { + return nil, errNoStreamInitialized + } + return b.stream.Receive() +} + +// CloseResponse closes the receive side of the stream. +// +// CloseResponse is non-blocking. To gracefully close the stream and allow for +// connection resuse ensure all messages have been received before calling +// CloseResponse. All messages are received when Receive returns an error +// wrapping [io.EOF]. +func (b *BidiStreamForClientSimple[Req, Res]) CloseResponse() error { + if b.stream == nil { + return errNoStreamInitialized + } + return b.stream.CloseResponse() +} + +// ResponseHeader returns the headers received from the server. It blocks until +// the first call to Receive returns. +func (b *BidiStreamForClientSimple[Req, Res]) ResponseHeader() http.Header { + if b.stream == nil { + return make(http.Header) + } + return b.stream.ResponseHeader() +} + +// ResponseTrailer returns the trailers received from the server. Trailers +// aren't fully populated until Receive() returns an error wrapping [io.EOF]. +func (b *BidiStreamForClientSimple[Req, Res]) ResponseTrailer() http.Header { + if b.stream == nil { + return make(http.Header) + } + return b.stream.ResponseTrailer() +} diff --git a/vendor/connectrpc.com/connect/code.go b/vendor/connectrpc.com/connect/code.go new file mode 100644 index 0000000000..3577c072a5 --- /dev/null +++ b/vendor/connectrpc.com/connect/code.go @@ -0,0 +1,226 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "fmt" + "strconv" + "strings" +) + +// A Code is one of the Connect protocol's error codes. There are no user-defined +// codes, so only the codes enumerated below are valid. In both name and +// semantics, these codes match the gRPC status codes. +// +// The descriptions below are optimized for brevity rather than completeness. +// See the [Connect protocol specification] for detailed descriptions of each +// code and example usage. +// +// [Connect protocol specification]: https://connectrpc.com/docs/protocol +type Code uint32 + +const ( + // The zero code in gRPC is OK, which indicates that the operation was a + // success. We don't define a constant for it because it overlaps awkwardly + // with Go's error semantics: what does it mean to have a non-nil error with + // an OK status? (Also, the Connect protocol doesn't use a code for + // successes.) + + // CodeCanceled indicates that the operation was canceled, typically by the + // caller. + CodeCanceled Code = 1 + + // CodeUnknown indicates that the operation failed for an unknown reason. + CodeUnknown Code = 2 + + // CodeInvalidArgument indicates that client supplied an invalid argument. + CodeInvalidArgument Code = 3 + + // CodeDeadlineExceeded indicates that deadline expired before the operation + // could complete. + CodeDeadlineExceeded Code = 4 + + // CodeNotFound indicates that some requested entity (for example, a file or + // directory) was not found. + CodeNotFound Code = 5 + + // CodeAlreadyExists indicates that client attempted to create an entity (for + // example, a file or directory) that already exists. + CodeAlreadyExists Code = 6 + + // CodePermissionDenied indicates that the caller doesn't have permission to + // execute the specified operation. + CodePermissionDenied Code = 7 + + // CodeResourceExhausted indicates that some resource has been exhausted. For + // example, a per-user quota may be exhausted or the entire file system may + // be full. + CodeResourceExhausted Code = 8 + + // CodeFailedPrecondition indicates that the system is not in a state + // required for the operation's execution. + CodeFailedPrecondition Code = 9 + + // CodeAborted indicates that operation was aborted by the system, usually + // because of a concurrency issue such as a sequencer check failure or + // transaction abort. + CodeAborted Code = 10 + + // CodeOutOfRange indicates that the operation was attempted past the valid + // range (for example, seeking past end-of-file). + CodeOutOfRange Code = 11 + + // CodeUnimplemented indicates that the operation isn't implemented, + // supported, or enabled in this service. + CodeUnimplemented Code = 12 + + // CodeInternal indicates that some invariants expected by the underlying + // system have been broken. This code is reserved for serious errors. + CodeInternal Code = 13 + + // CodeUnavailable indicates that the service is currently unavailable. This + // is usually temporary, so clients can back off and retry idempotent + // operations. + CodeUnavailable Code = 14 + + // CodeDataLoss indicates that the operation has resulted in unrecoverable + // data loss or corruption. + CodeDataLoss Code = 15 + + // CodeUnauthenticated indicates that the request does not have valid + // authentication credentials for the operation. + CodeUnauthenticated Code = 16 + + minCode = CodeCanceled + maxCode = CodeUnauthenticated +) + +func (c Code) String() string { + switch c { + case CodeCanceled: + return "canceled" + case CodeUnknown: + return "unknown" + case CodeInvalidArgument: + return "invalid_argument" + case CodeDeadlineExceeded: + return "deadline_exceeded" + case CodeNotFound: + return "not_found" + case CodeAlreadyExists: + return "already_exists" + case CodePermissionDenied: + return "permission_denied" + case CodeResourceExhausted: + return "resource_exhausted" + case CodeFailedPrecondition: + return "failed_precondition" + case CodeAborted: + return "aborted" + case CodeOutOfRange: + return "out_of_range" + case CodeUnimplemented: + return "unimplemented" + case CodeInternal: + return "internal" + case CodeUnavailable: + return "unavailable" + case CodeDataLoss: + return "data_loss" + case CodeUnauthenticated: + return "unauthenticated" + } + return fmt.Sprintf("code_%d", c) +} + +// MarshalText implements [encoding.TextMarshaler]. +func (c Code) MarshalText() ([]byte, error) { + return []byte(c.String()), nil +} + +// UnmarshalText implements [encoding.TextUnmarshaler]. +func (c *Code) UnmarshalText(data []byte) error { + dataStr := string(data) + switch dataStr { + case "canceled": + *c = CodeCanceled + return nil + case "unknown": + *c = CodeUnknown + return nil + case "invalid_argument": + *c = CodeInvalidArgument + return nil + case "deadline_exceeded": + *c = CodeDeadlineExceeded + return nil + case "not_found": + *c = CodeNotFound + return nil + case "already_exists": + *c = CodeAlreadyExists + return nil + case "permission_denied": + *c = CodePermissionDenied + return nil + case "resource_exhausted": + *c = CodeResourceExhausted + return nil + case "failed_precondition": + *c = CodeFailedPrecondition + return nil + case "aborted": + *c = CodeAborted + return nil + case "out_of_range": + *c = CodeOutOfRange + return nil + case "unimplemented": + *c = CodeUnimplemented + return nil + case "internal": + *c = CodeInternal + return nil + case "unavailable": + *c = CodeUnavailable + return nil + case "data_loss": + *c = CodeDataLoss + return nil + case "unauthenticated": + *c = CodeUnauthenticated + return nil + } + // Ensure that non-canonical codes round-trip through MarshalText and + // UnmarshalText. + if after, ok := strings.CutPrefix(dataStr, "code_"); ok { + dataStr = after + code, err := strconv.ParseUint(dataStr, 10 /* base */, 32 /* bitsize */) + if err == nil && (code < uint64(minCode) || code > uint64(maxCode)) { + *c = Code(code) + return nil + } + } + return fmt.Errorf("invalid code %q", dataStr) +} + +// CodeOf returns the error's status code if it is or wraps an [*Error] and +// [CodeUnknown] otherwise. +func CodeOf(err error) Code { + if connectErr, ok := asError(err); ok { + return connectErr.Code() + } + return CodeUnknown +} diff --git a/vendor/connectrpc.com/connect/codec.go b/vendor/connectrpc.com/connect/codec.go new file mode 100644 index 0000000000..729a28ab6c --- /dev/null +++ b/vendor/connectrpc.com/connect/codec.go @@ -0,0 +1,259 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" +) + +const ( + codecNameProto = "proto" + codecNameJSON = "json" + codecNameJSONCharsetUTF8 = codecNameJSON + "; charset=utf-8" +) + +// Codec marshals structs (typically generated from a schema) to and from bytes. +type Codec interface { + // Name returns the name of the Codec. + // + // This may be used as part of the Content-Type within HTTP. For example, + // with gRPC this is the content subtype, so "application/grpc+proto" will + // map to the Codec with name "proto". + // + // Names must not be empty. + Name() string + // Marshal marshals the given message. + // + // Marshal may expect a specific type of message, and will error if this type + // is not given. + Marshal(any) ([]byte, error) + // Unmarshal unmarshals the given message. + // + // Unmarshal may expect a specific type of message, and will error if this + // type is not given. + Unmarshal([]byte, any) error +} + +// marshalAppender is an extension to Codec for appending to a byte slice. +type marshalAppender interface { + Codec + + // MarshalAppend marshals the given message and appends it to the given + // byte slice. + // + // MarshalAppend may expect a specific type of message, and will error if + // this type is not given. + MarshalAppend([]byte, any) ([]byte, error) +} + +// stableCodec is an extension to Codec for serializing with stable output. +type stableCodec interface { + Codec + + // MarshalStable marshals the given message with stable field ordering. + // + // MarshalStable should return the same output for a given input. Although + // it is not guaranteed to be canonicalized, the marshalling routine for + // MarshalStable will opt for the most normalized output available for a + // given serialization. + // + // For practical reasons, it is possible for MarshalStable to return two + // different results for two inputs considered to be "equal" in their own + // domain, and it may change in the future with codec updates, but for + // any given concrete value and any given version, it should return the + // same output. + MarshalStable(any) ([]byte, error) + + // IsBinary returns true if the marshalled data is binary for this codec. + // + // If this function returns false, the data returned from Marshal and + // MarshalStable are considered valid text and may be used in contexts + // where text is expected. + IsBinary() bool +} + +type protoBinaryCodec struct{} + +var _ Codec = (*protoBinaryCodec)(nil) + +func (c *protoBinaryCodec) Name() string { return codecNameProto } + +func (c *protoBinaryCodec) Marshal(message any) ([]byte, error) { + protoMessage, ok := message.(proto.Message) + if !ok { + return nil, errNotProto(message) + } + return proto.Marshal(protoMessage) +} + +func (c *protoBinaryCodec) MarshalAppend(dst []byte, message any) ([]byte, error) { + protoMessage, ok := message.(proto.Message) + if !ok { + return nil, errNotProto(message) + } + return proto.MarshalOptions{}.MarshalAppend(dst, protoMessage) +} + +func (c *protoBinaryCodec) Unmarshal(data []byte, message any) error { + protoMessage, ok := message.(proto.Message) + if !ok { + return errNotProto(message) + } + err := proto.Unmarshal(data, protoMessage) + if err != nil { + return fmt.Errorf("unmarshal into %T: %w", message, err) + } + return nil +} + +func (c *protoBinaryCodec) MarshalStable(message any) ([]byte, error) { + protoMessage, ok := message.(proto.Message) + if !ok { + return nil, errNotProto(message) + } + // protobuf does not offer a canonical output today, so this format is not + // guaranteed to match deterministic output from other protobuf libraries. + // In addition, unknown fields may cause inconsistent output for otherwise + // equal messages. + // https://github.com/golang/protobuf/issues/1121 + options := proto.MarshalOptions{Deterministic: true} + return options.Marshal(protoMessage) +} + +func (c *protoBinaryCodec) IsBinary() bool { + return true +} + +type protoJSONCodec struct { + name string +} + +var _ Codec = (*protoJSONCodec)(nil) + +func (c *protoJSONCodec) Name() string { return c.name } + +func (c *protoJSONCodec) Marshal(message any) ([]byte, error) { + protoMessage, ok := message.(proto.Message) + if !ok { + return nil, errNotProto(message) + } + return protojson.MarshalOptions{}.Marshal(protoMessage) +} + +func (c *protoJSONCodec) MarshalAppend(dst []byte, message any) ([]byte, error) { + protoMessage, ok := message.(proto.Message) + if !ok { + return nil, errNotProto(message) + } + return protojson.MarshalOptions{}.MarshalAppend(dst, protoMessage) +} + +func (c *protoJSONCodec) Unmarshal(binary []byte, message any) error { + protoMessage, ok := message.(proto.Message) + if !ok { + return errNotProto(message) + } + if len(binary) == 0 { + return errors.New("zero-length payload is not a valid JSON object") + } + // Discard unknown fields so clients and servers aren't forced to always use + // exactly the same version of the schema. + options := protojson.UnmarshalOptions{DiscardUnknown: true} + err := options.Unmarshal(binary, protoMessage) + if err != nil { + return fmt.Errorf("unmarshal into %T: %w", message, err) + } + return nil +} + +func (c *protoJSONCodec) MarshalStable(message any) ([]byte, error) { + // protojson does not offer a "deterministic" field ordering, but fields + // are still ordered consistently by their index. However, protojson can + // output inconsistent whitespace for some reason, therefore it is + // suggested to use a formatter to ensure consistent formatting. + // https://github.com/golang/protobuf/issues/1373 + messageJSON, err := c.Marshal(message) + if err != nil { + return nil, err + } + compactedJSON := bytes.NewBuffer(messageJSON[:0]) + if err = json.Compact(compactedJSON, messageJSON); err != nil { + return nil, err + } + return compactedJSON.Bytes(), nil +} + +func (c *protoJSONCodec) IsBinary() bool { + return false +} + +// readOnlyCodecs is a read-only interface to a map of named codecs. +type readOnlyCodecs interface { + // Get gets the Codec with the given name. + Get(string) Codec + // Protobuf gets the user-supplied protobuf codec, falling back to the default + // implementation if necessary. + // + // This is helpful in the gRPC protocol, where the wire protocol requires + // marshaling protobuf structs to binary even if the RPC procedures were + // generated from a different IDL. + Protobuf() Codec + // Names returns a copy of the registered codec names. The returned slice is + // safe for the caller to mutate. + Names() []string +} + +func newReadOnlyCodecs(nameToCodec map[string]Codec) readOnlyCodecs { + return &codecMap{ + nameToCodec: nameToCodec, + } +} + +type codecMap struct { + nameToCodec map[string]Codec +} + +func (m *codecMap) Get(name string) Codec { + return m.nameToCodec[name] +} + +func (m *codecMap) Protobuf() Codec { + if pb, ok := m.nameToCodec[codecNameProto]; ok { + return pb + } + return &protoBinaryCodec{} +} + +func (m *codecMap) Names() []string { + names := make([]string, 0, len(m.nameToCodec)) + for name := range m.nameToCodec { + names = append(names, name) + } + return names +} + +func errNotProto(message any) error { + if _, ok := message.(protoiface.MessageV1); ok { + return fmt.Errorf("%T uses github.com/golang/protobuf, but connect-go only supports google.golang.org/protobuf: see https://go.dev/blog/protobuf-apiv2", message) + } + return fmt.Errorf("%T doesn't implement proto.Message", message) +} diff --git a/vendor/connectrpc.com/connect/compression.go b/vendor/connectrpc.com/connect/compression.go new file mode 100644 index 0000000000..4ff8ab0329 --- /dev/null +++ b/vendor/connectrpc.com/connect/compression.go @@ -0,0 +1,224 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "bytes" + "errors" + "io" + "math" + "net/http" + "strings" + "sync" +) + +const ( + compressionGzip = "gzip" + compressionIdentity = "identity" +) + +// A Decompressor is a reusable wrapper that decompresses an underlying data +// source. The standard library's [*gzip.Reader] implements Decompressor. +type Decompressor interface { + io.Reader + + // Close closes the Decompressor, but not the underlying data source. It may + // return an error if the Decompressor wasn't read to EOF. + Close() error + + // Reset discards the Decompressor's internal state, if any, and prepares it + // to read from a new source of compressed data. + Reset(io.Reader) error +} + +// A Compressor is a reusable wrapper that compresses data written to an +// underlying sink. The standard library's [*gzip.Writer] implements Compressor. +type Compressor interface { + io.Writer + + // Close flushes any buffered data to the underlying sink, then closes the + // Compressor. It must not close the underlying sink. + Close() error + + // Reset discards the Compressor's internal state, if any, and prepares it to + // write compressed data to a new sink. + Reset(io.Writer) +} + +type compressionPool struct { + decompressors sync.Pool + compressors sync.Pool +} + +func newCompressionPool( + newDecompressor func() Decompressor, + newCompressor func() Compressor, +) *compressionPool { + if newDecompressor == nil && newCompressor == nil { + return nil + } + return &compressionPool{ + decompressors: sync.Pool{ + New: func() any { return newDecompressor() }, + }, + compressors: sync.Pool{ + New: func() any { return newCompressor() }, + }, + } +} + +func (c *compressionPool) Decompress(dst *bytes.Buffer, src *bytes.Buffer, readMaxBytes int64) *Error { + decompressor, err := c.getDecompressor(src) + if err != nil { + return errorf(CodeInvalidArgument, "get decompressor: %w", err) + } + reader := io.Reader(decompressor) + if readMaxBytes > 0 && readMaxBytes < math.MaxInt64 { + reader = io.LimitReader(decompressor, readMaxBytes+1) + } + bytesRead, err := dst.ReadFrom(reader) + if err != nil { + _ = c.putDecompressor(decompressor) + err = wrapIfContextError(err) + if connectErr, ok := asError(err); ok { + return connectErr + } + return errorf(CodeInvalidArgument, "decompress: %w", err) + } + if readMaxBytes > 0 && bytesRead > readMaxBytes { + discardedBytes, err := io.Copy(io.Discard, decompressor) + _ = c.putDecompressor(decompressor) + if err != nil { + return errorf(CodeResourceExhausted, "message is larger than configured max %d - unable to determine message size: %w", readMaxBytes, err) + } + return errorf(CodeResourceExhausted, "message size %d is larger than configured max %d", bytesRead+discardedBytes, readMaxBytes) + } + if err := c.putDecompressor(decompressor); err != nil { + return errorf(CodeUnknown, "recycle decompressor: %w", err) + } + return nil +} + +func (c *compressionPool) Compress(dst *bytes.Buffer, src *bytes.Buffer) *Error { + compressor, err := c.getCompressor(dst) + if err != nil { + return errorf(CodeUnknown, "get compressor: %w", err) + } + if _, err := src.WriteTo(compressor); err != nil { + _ = c.putCompressor(compressor) + err = wrapIfContextError(err) + if connectErr, ok := asError(err); ok { + return connectErr + } + return errorf(CodeInternal, "compress: %w", err) + } + if err := c.putCompressor(compressor); err != nil { + return errorf(CodeInternal, "recycle compressor: %w", err) + } + return nil +} + +func (c *compressionPool) getDecompressor(reader io.Reader) (Decompressor, error) { + decompressor, ok := c.decompressors.Get().(Decompressor) + if !ok { + return nil, errors.New("expected Decompressor, got incorrect type from pool") + } + return decompressor, decompressor.Reset(reader) +} + +func (c *compressionPool) putDecompressor(decompressor Decompressor) error { + if err := decompressor.Close(); err != nil { + return err + } + // While it's in the pool, we don't want the decompressor to retain a + // reference to the underlying reader. However, most decompressors attempt to + // read some header data from the new data source when Reset; since we don't + // know the compression format, we can't provide a valid header. Since we + // also reset the decompressor when it's pulled out of the pool, we can + // ignore errors here. + _ = decompressor.Reset(http.NoBody) + c.decompressors.Put(decompressor) + return nil +} + +func (c *compressionPool) getCompressor(writer io.Writer) (Compressor, error) { + compressor, ok := c.compressors.Get().(Compressor) + if !ok { + return nil, errors.New("expected Compressor, got incorrect type from pool") + } + compressor.Reset(writer) + return compressor, nil +} + +func (c *compressionPool) putCompressor(compressor Compressor) error { + if err := compressor.Close(); err != nil { + return err + } + compressor.Reset(io.Discard) // don't keep references + c.compressors.Put(compressor) + return nil +} + +// readOnlyCompressionPools is a read-only interface to a map of named +// compressionPools. +type readOnlyCompressionPools interface { + Get(string) *compressionPool + Contains(string) bool + // Wordy, but clarifies how this is different from readOnlyCodecs.Names(). + CommaSeparatedNames() string +} + +func newReadOnlyCompressionPools( + nameToPool map[string]*compressionPool, + reversedNames []string, +) readOnlyCompressionPools { + // Client and handler configs keep compression names in registration order, + // but we want the last registered to be the most preferred. + names := make([]string, 0, len(reversedNames)) + seen := make(map[string]struct{}, len(reversedNames)) + for i := len(reversedNames) - 1; i >= 0; i-- { + name := reversedNames[i] + if _, ok := seen[name]; ok { + continue + } + seen[name] = struct{}{} + names = append(names, name) + } + return &namedCompressionPools{ + nameToPool: nameToPool, + commaSeparatedNames: strings.Join(names, ","), + } +} + +type namedCompressionPools struct { + nameToPool map[string]*compressionPool + commaSeparatedNames string +} + +func (m *namedCompressionPools) Get(name string) *compressionPool { + if name == "" || name == compressionIdentity { + return nil + } + return m.nameToPool[name] +} + +func (m *namedCompressionPools) Contains(name string) bool { + _, ok := m.nameToPool[name] + return ok +} + +func (m *namedCompressionPools) CommaSeparatedNames() string { + return m.commaSeparatedNames +} diff --git a/vendor/connectrpc.com/connect/connect.go b/vendor/connectrpc.com/connect/connect.go new file mode 100644 index 0000000000..04bbca85cb --- /dev/null +++ b/vendor/connectrpc.com/connect/connect.go @@ -0,0 +1,482 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package connect is a slim RPC framework built on Protocol Buffers and +// [net/http]. In addition to supporting its own protocol, Connect handlers and +// clients are wire-compatible with gRPC and gRPC-Web, including streaming. +// +// This documentation is intended to explain each type and function in +// isolation. Walkthroughs, FAQs, and other narrative docs are available on the +// [Connect website], and there's a working [demonstration service] on Github. +// +// [Connect website]: https://connectrpc.com +// [demonstration service]: https://github.com/connectrpc/examples-go +package connect + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" +) + +// Version is the semantic version of the connect module. +const Version = "1.19.1" + +// These constants are used in compile-time handshakes with connect's generated +// code. +const ( + IsAtLeastVersion0_0_1 = true + IsAtLeastVersion0_1_0 = true + IsAtLeastVersion1_7_0 = true + IsAtLeastVersion1_13_0 = true +) + +// StreamType describes whether the client, server, neither, or both is +// streaming. +type StreamType uint8 + +const ( + StreamTypeUnary StreamType = 0b00 + StreamTypeClient StreamType = 0b01 + StreamTypeServer StreamType = 0b10 + StreamTypeBidi = StreamTypeClient | StreamTypeServer +) + +func (s StreamType) String() string { + switch s { + case StreamTypeUnary: + return "unary" + case StreamTypeClient: + return "client" + case StreamTypeServer: + return "server" + case StreamTypeBidi: + return "bidi" + } + return fmt.Sprintf("stream_%d", s) +} + +// StreamingHandlerConn is the server's view of a bidirectional message +// exchange. Interceptors for streaming RPCs may wrap StreamingHandlerConns. +// +// Like the standard library's [http.ResponseWriter], StreamingHandlerConns write +// response headers to the network with the first call to Send. Any subsequent +// mutations are effectively no-ops. Handlers may mutate response trailers at +// any time before returning. When the client has finished sending data, +// Receive returns an error wrapping [io.EOF]. Handlers should check for this +// using the standard library's [errors.Is]. +// +// Headers and trailers beginning with "Connect-" and "Grpc-" are reserved for +// use by the gRPC and Connect protocols: applications may read them but +// shouldn't write them. +// +// StreamingHandlerConn implementations provided by this module guarantee that +// all returned errors can be cast to [*Error] using the standard library's +// [errors.As]. +// +// StreamingHandlerConn implementations do not need to be safe for concurrent use. +type StreamingHandlerConn interface { + Spec() Spec + Peer() Peer + + Receive(any) error + RequestHeader() http.Header + + Send(any) error + ResponseHeader() http.Header + ResponseTrailer() http.Header +} + +// StreamingClientConn is the client's view of a bidirectional message exchange. +// Interceptors for streaming RPCs may wrap StreamingClientConns. +// +// StreamingClientConns write request headers to the network with the first +// call to Send. Any subsequent mutations are effectively no-ops. When the +// server is done sending data, the StreamingClientConn's Receive method +// returns an error wrapping [io.EOF]. Clients should check for this using the +// standard library's [errors.Is]. If the server encounters an error during +// processing, subsequent calls to the StreamingClientConn's Send method will +// return an error wrapping [io.EOF]; clients may then call Receive to unmarshal +// the error. +// +// Headers and trailers beginning with "Connect-" and "Grpc-" are reserved for +// use by the gRPC and Connect protocols: applications may read them but +// shouldn't write them. +// +// StreamingClientConn implementations provided by this module guarantee that +// all returned errors can be cast to [*Error] using the standard library's +// [errors.As]. +// +// In order to support bidirectional streaming RPCs, all StreamingClientConn +// implementations must support limited concurrent use. See the comments on +// each group of methods for details. +type StreamingClientConn interface { + // Spec and Peer must be safe to call concurrently with all other methods. + Spec() Spec + Peer() Peer + + // Send, RequestHeader, and CloseRequest may race with each other, but must + // be safe to call concurrently with all other methods. + Send(any) error + RequestHeader() http.Header + CloseRequest() error + + // Receive, ResponseHeader, ResponseTrailer, and CloseResponse may race with + // each other, but must be safe to call concurrently with all other methods. + Receive(any) error + ResponseHeader() http.Header + ResponseTrailer() http.Header + CloseResponse() error +} + +// Request is a wrapper around a generated request message. It provides +// access to metadata like headers and the RPC specification, as well as +// strongly-typed access to the message itself. +type Request[T any] struct { + Msg *T + + spec Spec + peer Peer + header http.Header + method string +} + +// NewRequest wraps a generated request message. +func NewRequest[T any](message *T) *Request[T] { + return &Request[T]{ + Msg: message, + // Initialized lazily so we don't allocate unnecessarily. + header: nil, + } +} + +// Any returns the concrete request message as an empty interface, so that +// *Request implements the [AnyRequest] interface. +func (r *Request[_]) Any() any { + return r.Msg +} + +// Spec returns a description of this RPC. +func (r *Request[_]) Spec() Spec { + return r.spec +} + +// Peer describes the other party for this RPC. +func (r *Request[_]) Peer() Peer { + return r.peer +} + +// Header returns the HTTP headers for this request. Headers beginning with +// "Connect-" and "Grpc-" are reserved for use by the Connect and gRPC +// protocols: applications may read them but shouldn't write them. +func (r *Request[_]) Header() http.Header { + if r.header == nil { + r.header = make(http.Header) + } + return r.header +} + +// HTTPMethod returns the HTTP method for this request. This is nearly always +// POST, but side-effect-free unary RPCs could be made via a GET. +// +// On a newly created request, via NewRequest, this will return the empty +// string until the actual request is actually sent and the HTTP method +// determined. This means that client interceptor functions will see the +// empty string until *after* they delegate to the handler they wrapped. It +// is even possible for this to return the empty string after such delegation, +// if the request was never actually sent to the server (and thus no +// determination ever made about the HTTP method). +func (r *Request[_]) HTTPMethod() string { + return r.method +} + +// internalOnly implements AnyRequest. +func (r *Request[_]) internalOnly() {} + +// setRequestMethod sets the request method to the given value. +func (r *Request[_]) setRequestMethod(method string) { + r.method = method +} + +// AnyRequest is the common method set of every [Request], regardless of type +// parameter. It's used in unary interceptors. +// +// Headers and trailers beginning with "Connect-" and "Grpc-" are reserved for +// use by the gRPC and Connect protocols: applications may read them but +// shouldn't write them. +// +// To preserve our ability to add methods to this interface without breaking +// backward compatibility, only types defined in this package can implement +// AnyRequest. +type AnyRequest interface { + Any() any + Spec() Spec + Peer() Peer + Header() http.Header + HTTPMethod() string + + internalOnly() + setRequestMethod(string) +} + +// Response is a wrapper around a generated response message. It provides +// access to metadata like headers and trailers, as well as strongly-typed +// access to the message itself. +type Response[T any] struct { + Msg *T + + header http.Header + trailer http.Header +} + +// NewResponse wraps a generated response message. +func NewResponse[T any](message *T) *Response[T] { + return &Response[T]{ + Msg: message, + // Initialized lazily so we don't allocate unnecessarily. + header: nil, + trailer: nil, + } +} + +// Any returns the concrete response message as an empty interface, so that +// *Response implements the [AnyResponse] interface. +func (r *Response[_]) Any() any { + return r.Msg +} + +// Header returns the HTTP headers for this response. Headers beginning with +// "Connect-" and "Grpc-" are reserved for use by the Connect and gRPC +// protocols: applications may read them but shouldn't write them. +func (r *Response[_]) Header() http.Header { + if r.header == nil { + r.header = make(http.Header) + } + return r.header +} + +// Trailer returns the trailers for this response. Depending on the underlying +// RPC protocol, trailers may be sent as HTTP trailers or a protocol-specific +// block of in-body metadata. +// +// Trailers beginning with "Connect-" and "Grpc-" are reserved for use by the +// Connect and gRPC protocols: applications may read them but shouldn't write +// them. +func (r *Response[_]) Trailer() http.Header { + if r.trailer == nil { + r.trailer = make(http.Header) + } + return r.trailer +} + +// internalOnly implements AnyResponse. +func (r *Response[_]) internalOnly() {} + +// AnyResponse is the common method set of every [Response], regardless of type +// parameter. It's used in unary interceptors. +// +// Headers and trailers beginning with "Connect-" and "Grpc-" are reserved for +// use by the gRPC and Connect protocols: applications may read them but +// shouldn't write them. +// +// To preserve our ability to add methods to this interface without breaking +// backward compatibility, only types defined in this package can implement +// AnyResponse. +type AnyResponse interface { + Any() any + Header() http.Header + Trailer() http.Header + + internalOnly() +} + +// HTTPClient is the interface connect expects HTTP clients to implement. The +// standard library's *http.Client implements HTTPClient. +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Spec is a description of a client call or a handler invocation. +// +// If you're using Protobuf, protoc-gen-connect-go generates a constant for the +// fully-qualified Procedure corresponding to each RPC in your schema. +type Spec struct { + StreamType StreamType + Schema any // for protobuf RPCs, a protoreflect.MethodDescriptor + Procedure string // for example, "/acme.foo.v1.FooService/Bar" + IsClient bool // otherwise we're in a handler + IdempotencyLevel IdempotencyLevel +} + +// Peer describes the other party to an RPC. +// +// When accessed client-side, Addr contains the host or host:port from the +// server's URL. When accessed server-side, Addr contains the client's address +// in IP:port format. +// +// On both the client and the server, Protocol is the RPC protocol in use. +// Currently, it's either [ProtocolConnect], [ProtocolGRPC], or +// [ProtocolGRPCWeb], but additional protocols may be added in the future. +// +// Query contains the query parameters for the request. For the server, this +// will reflect the actual query parameters sent. For the client, it is unset. +type Peer struct { + Addr string + Protocol string + Query url.Values // server-only +} + +func newPeerForURL(url *url.URL, protocol string) Peer { + return Peer{ + Addr: url.Host, + Protocol: protocol, + } +} + +// handlerConnCloser extends StreamingHandlerConn with a method for handlers to +// terminate the message exchange (and optionally send an error to the client). +type handlerConnCloser interface { + StreamingHandlerConn + + Close(error) error +} + +// receiveConn represents the shared methods of both StreamingClientConn and StreamingHandlerConn +// that the below helper functions use for implementing the rules around a "unary" stream, that +// is expected to have exactly one message (or zero messages followed by a non-EOF error). +type receiveConn interface { + Spec() Spec + Receive(any) error +} + +// hasHTTPMethod is implemented by streaming connections that support HTTP methods other than +// POST. +type hasHTTPMethod interface { + getHTTPMethod() string +} + +// errStreamingClientConn is a sentinel error implementation of StreamingClientConn. +type errStreamingClientConn struct { + err error +} + +func (c *errStreamingClientConn) Receive(msg any) error { + return c.err +} + +func (c *errStreamingClientConn) Spec() Spec { + return Spec{} +} + +func (c *errStreamingClientConn) Peer() Peer { + return Peer{} +} + +func (c *errStreamingClientConn) Send(msg any) error { + return c.err +} + +func (c *errStreamingClientConn) CloseRequest() error { + return c.err +} + +func (c *errStreamingClientConn) CloseResponse() error { + return c.err +} + +func (c *errStreamingClientConn) RequestHeader() http.Header { + return make(http.Header) +} + +func (c *errStreamingClientConn) ResponseHeader() http.Header { + return make(http.Header) +} + +func (c *errStreamingClientConn) ResponseTrailer() http.Header { + return make(http.Header) +} + +// receiveUnaryResponse unmarshals a message from a StreamingClientConn, then +// envelopes the message and attaches headers and trailers. It attempts to +// consume the response stream and isn't appropriate when receiving multiple +// messages. +func receiveUnaryResponse[T any](conn StreamingClientConn, initializer maybeInitializer) (*Response[T], error) { + msg, err := receiveUnaryMessage[T](conn, initializer, "response") + if err != nil { + return nil, err + } + return &Response[T]{ + Msg: msg, + header: conn.ResponseHeader(), + trailer: conn.ResponseTrailer(), + }, nil +} + +// receiveUnaryRequest unmarshals a message from a StreamingClientConn, then +// envelopes the message and attaches headers and other request properties. It +// attempts to consume the request stream and isn't appropriate when receiving +// multiple messages. +func receiveUnaryRequest[T any](conn StreamingHandlerConn, initializer maybeInitializer) (*Request[T], error) { + msg, err := receiveUnaryMessage[T](conn, initializer, "request") + if err != nil { + return nil, err + } + method := http.MethodPost + if hasRequestMethod, ok := conn.(hasHTTPMethod); ok { + method = hasRequestMethod.getHTTPMethod() + } + return &Request[T]{ + Msg: msg, + spec: conn.Spec(), + peer: conn.Peer(), + header: conn.RequestHeader(), + method: method, + }, nil +} + +func receiveUnaryMessage[T any](conn receiveConn, initializer maybeInitializer, what string) (*T, error) { + var msg T + if err := initializer.maybe(conn.Spec(), &msg); err != nil { + return nil, err + } + // Possibly counter-intuitive, but the gRPC specs about error codes state that both clients + // and servers should return "unimplemented" when they encounter a cardinality violation: where + // the number of messages in the stream is wrong. Search for "cardinality violation" in the + // following docs: + // https://grpc.github.io/grpc/core/md_doc_statuscodes.html + if err := conn.Receive(&msg); err != nil { + if errors.Is(err, io.EOF) { + err = NewError(CodeUnimplemented, fmt.Errorf("unary %s has zero messages", what)) + } + return nil, err + } + // In a well-formed stream, the one message must be the only content in the body. + // To verify that it is well-formed, try to read another message from the stream. + // TODO: optimize this second receive: ideally do it w/out allocation, w/out + // fully reading next message (if one is present), and w/out trying to + // actually unmarshal the bytes) + var msg2 T + if err := initializer.maybe(conn.Spec(), &msg2); err != nil { + return nil, err + } + if err := conn.Receive(&msg2); !errors.Is(err, io.EOF) { + if err == nil { + err = NewError(CodeUnimplemented, fmt.Errorf("unary %s has multiple messages", what)) + } + return nil, err + } + return &msg, nil +} diff --git a/vendor/connectrpc.com/connect/context.go b/vendor/connectrpc.com/connect/context.go new file mode 100644 index 0000000000..9f2d018086 --- /dev/null +++ b/vendor/connectrpc.com/connect/context.go @@ -0,0 +1,242 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "context" + "net/http" +) + +// CallInfo represents information relevant to an RPC call. +type CallInfo interface { + // Spec returns a description of this call. + Spec() Spec + // Peer describes the other party for this call. + Peer() Peer + // RequestHeader returns the HTTP headers for this request. Headers beginning with + // "Connect-" and "Grpc-" are reserved for use by the Connect and gRPC + // protocols: applications may read them but shouldn't write them. + RequestHeader() http.Header + // ResponseHeader returns the HTTP headers for this response. Headers beginning with + // "Connect-" and "Grpc-" are reserved for use by the Connect and gRPC + // protocols: applications may read them but shouldn't write them. + // On the client side, this method returns nil before + // the call is actually made. After the call is made, for streaming operations, + // this method will block for the server to actually return response headers. + ResponseHeader() http.Header + // ResponseTrailer returns the trailers for this response. Depending on the underlying + // RPC protocol, trailers may be sent as HTTP trailers or a protocol-specific + // block of in-body metadata. + // + // Trailers beginning with "Connect-" and "Grpc-" are reserved for use by the + // Connect and gRPC protocols: applications may read them but shouldn't write + // them. + // + // On the client side, this method returns nil before the call is actually made. + // After the call is made, for streaming operations, this method will block + // for the server to actually return response trailers. + ResponseTrailer() http.Header + // HTTPMethod returns the HTTP method for this request. This is nearly always + // POST, but side-effect-free unary RPCs could be made via a GET. + // + // On a newly created request, via NewRequest, this will return the empty + // string until the actual request is actually sent and the HTTP method + // determined. This means that client interceptor functions will see the + // empty string until *after* they delegate to the handler they wrapped. It + // is even possible for this to return the empty string after such delegation, + // if the request was never actually sent to the server (and thus no + // determination ever made about the HTTP method). + HTTPMethod() string + + internalOnly() +} + +// Create a new client (i.e. outgoing) context for use from a client. When the +// returned context is passed to RPCs, the returned call info can be used to set +// request metadata before the RPC is invoked and to inspect response +// metadata after the RPC completes. +// +// The returned context may be re-used across RPCs as long as they are +// not concurrent. Results of all CallInfo methods other than +// RequestHeader() are undefined if the context is used with concurrent RPCs. +func NewClientContext(ctx context.Context) (context.Context, CallInfo) { + info := &clientCallInfo{} + return context.WithValue(ctx, clientCallInfoContextKey{}, info), info +} + +// CallInfoForHandlerContext returns the CallInfo for the given handler (i.e. incoming) context, if there is one. +func CallInfoForHandlerContext(ctx context.Context) (CallInfo, bool) { + value, ok := ctx.Value(handlerCallInfoContextKey{}).(CallInfo) + return value, ok +} + +// handlerCallInfo is a CallInfo implementation used for unary handlers. +type handlerCallInfo struct { + spec Spec + peer Peer + method string + requestHeader http.Header + responseHeader http.Header + responseTrailer http.Header +} + +func (c *handlerCallInfo) Spec() Spec { + return c.spec +} + +func (c *handlerCallInfo) Peer() Peer { + return c.peer +} + +func (c *handlerCallInfo) RequestHeader() http.Header { + if c.requestHeader == nil { + c.requestHeader = make(http.Header) + } + return c.requestHeader +} + +func (c *handlerCallInfo) ResponseHeader() http.Header { + if c.responseHeader == nil { + c.responseHeader = make(http.Header) + } + return c.responseHeader +} + +func (c *handlerCallInfo) ResponseTrailer() http.Header { + if c.responseTrailer == nil { + c.responseTrailer = make(http.Header) + } + return c.responseTrailer +} + +func (c *handlerCallInfo) HTTPMethod() string { + return c.method +} + +// internalOnly implements CallInfo. +func (c *handlerCallInfo) internalOnly() {} + +// streamingHandlerCallInfo is a CallInfo implementation used for streaming RPC handlers. +type streamingHandlerCallInfo struct { + conn StreamingHandlerConn +} + +func (c *streamingHandlerCallInfo) Spec() Spec { + return c.conn.Spec() +} + +func (c *streamingHandlerCallInfo) Peer() Peer { + return c.conn.Peer() +} + +func (c *streamingHandlerCallInfo) RequestHeader() http.Header { + return c.conn.RequestHeader() +} + +func (c *streamingHandlerCallInfo) ResponseHeader() http.Header { + return c.conn.ResponseHeader() +} + +func (c *streamingHandlerCallInfo) ResponseTrailer() http.Header { + return c.conn.ResponseTrailer() +} + +func (c *streamingHandlerCallInfo) HTTPMethod() string { + // All stream calls are POSTs + return http.MethodPost +} + +// internalOnly implements CallInfo. +func (c *streamingHandlerCallInfo) internalOnly() {} + +// clientCallInfo is a CallInfo implementation used for clients. +type clientCallInfo struct { + responseSource + spec Spec + peer Peer + method string + requestHeader http.Header +} + +func (c *clientCallInfo) Spec() Spec { + return c.spec +} + +func (c *clientCallInfo) Peer() Peer { + return c.peer +} + +func (c *clientCallInfo) RequestHeader() http.Header { + if c.requestHeader == nil { + c.requestHeader = make(http.Header) + } + return c.requestHeader +} + +func (c *clientCallInfo) ResponseHeader() http.Header { + if c.responseSource == nil { + return nil + } + return c.responseSource.ResponseHeader() +} + +func (c *clientCallInfo) ResponseTrailer() http.Header { + if c.responseSource == nil { + return nil + } + return c.responseSource.ResponseTrailer() +} + +func (c *clientCallInfo) HTTPMethod() string { + return c.method +} + +// internalOnly implements CallInfo. +func (c *clientCallInfo) internalOnly() {} + +// clientCallInfoContextKey is the key used to store client call info in context. +type clientCallInfoContextKey struct{} + +// sentinelContextKey is the key used to store a copy of client call info in context +// when a request is made. +// Each step in an interceptor chain compares the actual call info with the +// sentinel call info. If the two values are different, the request will +// return an error in the interceptor. +// This protects against changing the call info in interceptors, which is prohibited +// as it would allow users to modify call info mid-flight independent of the actual +// request or response. +// Users who wish to modify call info data such as headers and trailers should instead +// use Connect [Request] and [Response] wrapper types. +type sentinelContextKey struct{} + +// handlerCallInfoContextKey is the key used to store handler call info in context. +type handlerCallInfoContextKey struct{} + +// responseSource indicates a type that manages response headers and trailers. +type responseSource interface { + ResponseHeader() http.Header + ResponseTrailer() http.Header +} + +// clientCallInfoForContext gets the call info from a client/outgoing context. +func clientCallInfoForContext(ctx context.Context) (*clientCallInfo, bool) { + info, ok := ctx.Value(clientCallInfoContextKey{}).(*clientCallInfo) + return info, ok +} + +// newHandlerContext creates a new handler/incoming context. +func newHandlerContext(ctx context.Context, info CallInfo) context.Context { + return context.WithValue(ctx, handlerCallInfoContextKey{}, info) +} diff --git a/vendor/connectrpc.com/connect/duplex_http_call.go b/vendor/connectrpc.com/connect/duplex_http_call.go new file mode 100644 index 0000000000..80f6b671ce --- /dev/null +++ b/vendor/connectrpc.com/connect/duplex_http_call.go @@ -0,0 +1,471 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "context" + "errors" + "io" + "net/http" + "net/url" + "sync" + "sync/atomic" +) + +// duplexHTTPCall is a full-duplex stream between the client and server. The +// request body is the stream from client to server, and the response body is +// the reverse. +// +// Be warned: we need to use some lesser-known APIs to do this with net/http. +type duplexHTTPCall struct { + ctx context.Context + httpClient HTTPClient + streamType StreamType + onRequestSend func(*http.Request) + validateResponse func(*http.Response) *Error + + // io.Pipe is used to implement the request body for client streaming calls. + // If the request is unary, requestBodyWriter is nil. + requestBodyWriter *io.PipeWriter + + // requestSent ensures we only send the request once. + requestSent atomic.Bool + request *http.Request + + // responseReady is closed when the response is ready or when the request + // fails. Any error on request initialisation will be set on the + // responseErr. There's always a response if responseErr is nil. + responseReady chan struct{} + response *http.Response + responseErr error +} + +func newDuplexHTTPCall( + ctx context.Context, + httpClient HTTPClient, + url *url.URL, + spec Spec, + header http.Header, +) *duplexHTTPCall { + // ensure we make a copy of the url before we pass along to the + // Request. This ensures if a transport out of our control wants + // to mutate the req.URL, we don't feel the effects of it. + url = cloneURL(url) + + // This is mirroring what http.NewRequestContext did, but + // using an already parsed url.URL object, rather than a string + // and parsing it again. This is a bit funny with HTTP/1.1 + // explicitly, but this is logic copied over from + // NewRequestContext and doesn't effect the actual version + // being transmitted. + request := (&http.Request{ + Method: http.MethodPost, + URL: url, + Header: header, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Body: http.NoBody, + GetBody: getNoBody, + Host: url.Host, + }).WithContext(ctx) + return &duplexHTTPCall{ + ctx: ctx, + httpClient: httpClient, + streamType: spec.StreamType, + request: request, + responseReady: make(chan struct{}), + } +} + +// Send sends a message to the server. +func (d *duplexHTTPCall) Send(payload messagePayload) (int64, error) { + if d.streamType&StreamTypeClient == 0 { + return d.sendUnary(payload) + } + isFirst := d.requestSent.CompareAndSwap(false, true) + if isFirst { + // This is the first time we're sending a message to the server. + // We need to send the request headers and start the request. + pipeReader, pipeWriter := io.Pipe() + d.requestBodyWriter = pipeWriter + d.request.Body = pipeReader + d.request.GetBody = nil // GetBody not supported for client streaming + d.request.ContentLength = -1 + go d.makeRequest() // concurrent request + } + if err := d.ctx.Err(); err != nil { + return 0, wrapIfContextError(err) + } + if isFirst && payload.Len() == 0 { + // On first write a nil Send is used to send request headers. Avoid + // writing a zero-length payload to avoid superfluous errors with close. + return 0, nil + } + // It's safe to write to this side of the pipe while net/http concurrently + // reads from the other side. + bytesWritten, err := payload.WriteTo(d.requestBodyWriter) + if err != nil && errors.Is(err, io.ErrClosedPipe) { + // Signal that the stream is closed with the more-typical io.EOF instead of + // io.ErrClosedPipe. This makes it easier for protocol-specific wrappers to + // match grpc-go's behavior. + err = io.EOF + } + return bytesWritten, err +} + +func (d *duplexHTTPCall) sendUnary(payload messagePayload) (int64, error) { + // Unary messages are sent as a single HTTP request. We don't need to use a + // pipe for the request body and we don't need to send headers separately. + if !d.requestSent.CompareAndSwap(false, true) { + return 0, errors.New("request already sent") + } + payloadLength := int64(payload.Len()) + if payloadLength > 0 { + // Build the request body from the payload. + payloadBody := newPayloadCloser(payload) + d.request.Body = payloadBody + d.request.ContentLength = payloadLength + d.request.GetBody = func() (io.ReadCloser, error) { + if !payloadBody.Rewind() { + return nil, errors.New("payload cannot be retried") + } + return payloadBody, nil + } + // Release the payload ensuring that after Send returns the + // payload is safe to be reused. See [http.RoundTripper] for + // more details. + defer payloadBody.Release() + } + d.makeRequest() // synchronous request + if d.responseErr != nil { + // Check on response errors for context errors. Other errors are + // handled on read. + if err := d.ctx.Err(); err != nil { + return 0, wrapIfContextError(err) + } + } + return payloadLength, nil +} + +// CloseWrite closes the request body. Callers *must* call CloseWrite before Read when +// using HTTP/1.x. +func (d *duplexHTTPCall) CloseWrite() error { + // Even if Write was never called, we need to make an HTTP request. This + // ensures that we've sent any headers to the server and that we have an HTTP + // response to read from. + if d.requestSent.CompareAndSwap(false, true) { + go d.makeRequest() + // We never setup a request body, so it's effectively already closed. + // So nothing else to do. + return nil + } + // The user calls CloseWrite to indicate that they're done sending data. It's + // safe to close the write side of the pipe while net/http is reading from + // it. + // + // Because connect also supports some RPC types over HTTP/1.1, we need to be + // careful how we expose this method to users. HTTP/1.1 doesn't support + // bidirectional streaming - the write side of the stream (aka request body) + // must be closed before we start reading the response or we'll just block + // forever. To make sure users don't have to worry about this, the generated + // code for unary, client streaming, and server streaming RPCs must call + // CloseWrite automatically rather than requiring the user to do it. + if d.requestBodyWriter != nil { + return d.requestBodyWriter.Close() + } + return d.request.Body.Close() +} + +// Header returns the HTTP request headers. +func (d *duplexHTTPCall) Header() http.Header { + return d.request.Header +} + +// Trailer returns the HTTP request trailers. +func (d *duplexHTTPCall) Trailer() http.Header { + return d.request.Trailer +} + +// URL returns the URL for the request. +func (d *duplexHTTPCall) URL() *url.URL { + return d.request.URL +} + +// Method returns the HTTP method for the request (GET or POST). +func (d *duplexHTTPCall) Method() string { + return d.request.Method +} + +// SetMethod changes the method of the request before it is sent. +func (d *duplexHTTPCall) SetMethod(method string) { + d.request.Method = method +} + +// Read from the response body. Returns the first error passed to SetError. +func (d *duplexHTTPCall) Read(data []byte) (int, error) { + // First, we wait until we've gotten the response headers and established the + // server-to-client side of the stream. + if err := d.BlockUntilResponseReady(); err != nil { + // The stream is already closed or corrupted. + return 0, err + } + // Before we read, check if the context has been canceled. + if err := d.ctx.Err(); err != nil { + return 0, wrapIfContextError(err) + } + n, err := d.response.Body.Read(data) + if err != nil && !errors.Is(err, io.EOF) { + err = wrapIfContextDone(d.ctx, err) + err = wrapIfRSTError(err) + } + return n, err +} + +func (d *duplexHTTPCall) CloseRead() error { + _ = d.BlockUntilResponseReady() + if d.response == nil { + return nil + } + err := d.response.Body.Close() + err = wrapIfContextDone(d.ctx, err) + return wrapIfRSTError(err) +} + +// ResponseStatusCode is the response's HTTP status code. +func (d *duplexHTTPCall) ResponseStatusCode() (int, error) { + if err := d.BlockUntilResponseReady(); err != nil { + return 0, err + } + return d.response.StatusCode, nil +} + +// ResponseHeader returns the response HTTP headers. +func (d *duplexHTTPCall) ResponseHeader() http.Header { + _ = d.BlockUntilResponseReady() + if d.response != nil { + return d.response.Header + } + return make(http.Header) +} + +// ResponseTrailer returns the response HTTP trailers. +func (d *duplexHTTPCall) ResponseTrailer() http.Header { + _ = d.BlockUntilResponseReady() + if d.response != nil { + return d.response.Trailer + } + return make(http.Header) +} + +// SetValidateResponse sets the response validation function. The function runs +// in a background goroutine. +func (d *duplexHTTPCall) SetValidateResponse(validate func(*http.Response) *Error) { + d.validateResponse = validate +} + +// BlockUntilResponseReady returns when the response is ready or reports an +// error from initializing the request. +func (d *duplexHTTPCall) BlockUntilResponseReady() error { + <-d.responseReady + return d.responseErr +} + +func (d *duplexHTTPCall) makeRequest() { + // This runs concurrently with Write and CloseWrite. Read and CloseRead wait + // on d.responseReady, so we can't race with them. + defer close(d.responseReady) + + // Promote the header Host to the request object. + if host := getHeaderCanonical(d.request.Header, headerHost); len(host) > 0 { + d.request.Host = host + } + if d.onRequestSend != nil { + d.onRequestSend(d.request) + } + // Once we send a message to the server, they send a message back and + // establish the receive side of the stream. + // On error, we close the request body using the Write side of the pipe. + // This ensures HTTP2 streams receive an io.EOF from the Read side of the + // pipe. Write's check for io.ErrClosedPipe and will convert this to io.EOF. + response, err := d.httpClient.Do(d.request) //nolint:bodyclose + if err != nil { + if errors.Is(err, io.EOF) { + // We use io.EOF as a sentinel in many places and don't want this + // transport error to be confused for those other situations. + err = io.ErrUnexpectedEOF + } + err = wrapIfContextError(err) + err = wrapIfLikelyH2CNotConfiguredError(d.request, err) + err = wrapIfLikelyWithGRPCNotUsedError(err) + err = wrapIfRSTError(err) + if _, ok := asError(err); !ok { + err = NewError(CodeUnavailable, err) + } + d.responseErr = err + _ = d.CloseWrite() + return + } + // We've got a response. We can now read from the response body. + // Closing the response body is delegated to the caller even on error. + d.response = response + if err := d.validateResponse(response); err != nil { + d.responseErr = err + _ = d.CloseWrite() + return + } + if (d.streamType&StreamTypeBidi) == StreamTypeBidi && response.ProtoMajor < 2 { + // If we somehow dialed an HTTP/1.x server, fail with an explicit message + // rather than returning a more cryptic error later on. + d.responseErr = errorf( + CodeUnimplemented, + "response from %v is HTTP/%d.%d: bidi streams require at least HTTP/2", + d.request.URL, + response.ProtoMajor, + response.ProtoMinor, + ) + _ = d.CloseWrite() + } +} + +// getNoBody is a GetBody function for http.NoBody. +func getNoBody() (io.ReadCloser, error) { + return http.NoBody, nil +} + +// messagePayload is a sized and seekable message payload. The interface is +// implemented by [*bytes.Reader] and *envelope. Reads must be non-blocking. +type messagePayload interface { + io.Reader + io.WriterTo + io.Seeker + Len() int +} + +// nopPayload is a message payload that does nothing. It's used to send headers +// to the server. +type nopPayload struct{} + +var _ messagePayload = nopPayload{} + +func (nopPayload) Read([]byte) (int, error) { + return 0, io.EOF +} + +func (nopPayload) WriteTo(io.Writer) (int64, error) { + return 0, nil +} + +func (nopPayload) Seek(int64, int) (int64, error) { + return 0, nil +} + +func (nopPayload) Len() int { + return 0 +} + +// messageSender sends a message payload. The interface is implemented by +// [*duplexHTTPCall] and writeSender. +type messageSender interface { + Send(messagePayload) (int64, error) +} + +// writeSender is a sender that writes to an [io.Writer]. Useful for wrapping +// [http.ResponseWriter]. +type writeSender struct { + writer io.Writer +} + +var _ messageSender = writeSender{} + +func (w writeSender) Send(payload messagePayload) (int64, error) { + return payload.WriteTo(w.writer) +} + +// See: https://cs.opensource.google/go/go/+/refs/tags/go1.20.1:src/net/http/clone.go;l=22-33 +func cloneURL(oldURL *url.URL) *url.URL { + if oldURL == nil { + return nil + } + newURL := new(url.URL) + *newURL = *oldURL + if oldURL.User != nil { + newURL.User = new(url.Userinfo) + *newURL.User = *oldURL.User + } + return newURL +} + +// payloadCloser is an [io.ReadCloser] that wraps a messagePayload. It's used to +// implement the request body for unary calls. To safely reuse the buffer +// call Release after the response is received to ensure the payload is safe for +// reuse. +type payloadCloser struct { + mu sync.Mutex + payload messagePayload // nil after Release +} + +func newPayloadCloser(payload messagePayload) *payloadCloser { + return &payloadCloser{ + payload: payload, + } +} + +// Read implements [io.Reader]. +func (p *payloadCloser) Read(dst []byte) (readN int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.payload == nil { + return 0, io.EOF + } + return p.payload.Read(dst) +} + +// WriteTo implements [io.WriterTo]. +func (p *payloadCloser) WriteTo(dst io.Writer) (int64, error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.payload == nil { + return 0, nil + } + return p.payload.WriteTo(dst) +} + +// Close implements [io.Closer]. +func (p *payloadCloser) Close() error { + return nil +} + +// Rewind rewinds the payload to the beginning. It returns false if the +// payload has been discarded from a previous call to Release. +func (p *payloadCloser) Rewind() bool { + p.mu.Lock() + defer p.mu.Unlock() + if p.payload == nil { + return false + } + if _, err := p.payload.Seek(0, io.SeekStart); err != nil { + return false + } + return true +} + +// Release discards the payload. After Release is called, the payload cannot be +// rewound and the payload is safe to reuse. +func (p *payloadCloser) Release() { + p.mu.Lock() + p.payload = nil + p.mu.Unlock() +} diff --git a/vendor/connectrpc.com/connect/envelope.go b/vendor/connectrpc.com/connect/envelope.go new file mode 100644 index 0000000000..21a897de9f --- /dev/null +++ b/vendor/connectrpc.com/connect/envelope.go @@ -0,0 +1,387 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +// flagEnvelopeCompressed indicates that the data is compressed. It has the +// same meaning in the gRPC-Web, gRPC-HTTP2, and Connect protocols. +const flagEnvelopeCompressed = 0b00000001 + +var errSpecialEnvelope = errorf( + CodeUnknown, + "final message has protocol-specific flags: %w", + // User code checks for end of stream with errors.Is(err, io.EOF). + io.EOF, +) + +// envelope is a block of arbitrary bytes wrapped in gRPC and Connect's framing +// protocol. +// +// Each message is preceded by a 5-byte prefix. The first byte is a uint8 used +// as a set of bitwise flags, and the remainder is a uint32 indicating the +// message length. gRPC and Connect interpret the bitwise flags differently, so +// envelope leaves their interpretation up to the caller. +type envelope struct { + Data *bytes.Buffer + Flags uint8 + offset int64 +} + +var _ messagePayload = (*envelope)(nil) + +func (e *envelope) IsSet(flag uint8) bool { + return e.Flags&flag == flag +} + +// Read implements [io.Reader]. +func (e *envelope) Read(data []byte) (readN int, err error) { + if e.offset < 5 { + prefix, err := makeEnvelopePrefix(e.Flags, e.Data.Len()) + if err != nil { + return 0, err + } + readN = copy(data, prefix[e.offset:]) + e.offset += int64(readN) + if e.offset < 5 { + return readN, nil + } + data = data[readN:] + } + n := copy(data, e.Data.Bytes()[e.offset-5:]) + e.offset += int64(n) + readN += n + if readN == 0 && e.offset == int64(e.Data.Len()+5) { + err = io.EOF + } + return readN, err +} + +// WriteTo implements [io.WriterTo]. +func (e *envelope) WriteTo(dst io.Writer) (wroteN int64, err error) { + if e.offset < 5 { + prefix, err := makeEnvelopePrefix(e.Flags, e.Data.Len()) + if err != nil { + return 0, err + } + prefixN, err := dst.Write(prefix[e.offset:]) + e.offset += int64(prefixN) + wroteN += int64(prefixN) + if e.offset < 5 { + return wroteN, err + } + } + n, err := dst.Write(e.Data.Bytes()[e.offset-5:]) + e.offset += int64(n) + wroteN += int64(n) + return wroteN, err +} + +// Seek implements [io.Seeker]. Based on the implementation of [bytes.Reader]. +func (e *envelope) Seek(offset int64, whence int) (int64, error) { + var abs int64 + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs = e.offset + offset + case io.SeekEnd: + abs = int64(e.Data.Len()) + offset + default: + return 0, errors.New("connect.envelope.Seek: invalid whence") + } + if abs < 0 { + return 0, errors.New("connect.envelope.Seek: negative position") + } + e.offset = abs + return abs, nil +} + +// Len returns the number of bytes of the unread portion of the envelope. +func (e *envelope) Len() int { + if length := int(int64(e.Data.Len()) + 5 - e.offset); length > 0 { + return length + } + return 0 +} + +type envelopeWriter struct { + ctx context.Context //nolint:containedctx + sender messageSender + codec Codec + compressMinBytes int + compressionPool *compressionPool + bufferPool *bufferPool + sendMaxBytes int +} + +func (w *envelopeWriter) Marshal(message any) *Error { + if message == nil { + // Send no-op message to create the request and send headers. + payload := nopPayload{} + if _, err := w.sender.Send(payload); err != nil { + if connectErr, ok := asError(err); ok { + return connectErr + } + return NewError(CodeUnknown, err) + } + return nil + } + if appender, ok := w.codec.(marshalAppender); ok { + return w.marshalAppend(message, appender) + } + return w.marshal(message) +} + +// Write writes the enveloped message, compressing as necessary. It doesn't +// retain any references to the supplied envelope or its underlying data. +func (w *envelopeWriter) Write(env *envelope) *Error { + if env.IsSet(flagEnvelopeCompressed) || + w.compressionPool == nil || + env.Data.Len() < w.compressMinBytes { + if w.sendMaxBytes > 0 && env.Data.Len() > w.sendMaxBytes { + return errorf(CodeResourceExhausted, "message size %d exceeds sendMaxBytes %d", env.Data.Len(), w.sendMaxBytes) + } + return w.write(env) + } + data := w.bufferPool.Get() + defer w.bufferPool.Put(data) + if err := w.compressionPool.Compress(data, env.Data); err != nil { + return err + } + if w.sendMaxBytes > 0 && data.Len() > w.sendMaxBytes { + return errorf(CodeResourceExhausted, "compressed message size %d exceeds sendMaxBytes %d", data.Len(), w.sendMaxBytes) + } + return w.write(&envelope{ + Data: data, + Flags: env.Flags | flagEnvelopeCompressed, + }) +} + +func (w *envelopeWriter) marshalAppend(message any, codec marshalAppender) *Error { + // Codec supports MarshalAppend; try to re-use a []byte from the pool. + buffer := w.bufferPool.Get() + defer w.bufferPool.Put(buffer) + raw, err := codec.MarshalAppend(buffer.Bytes(), message) + if err != nil { + return errorf(CodeInternal, "marshal message: %w", err) + } + if cap(raw) > buffer.Cap() { + // The buffer from the pool was too small, so MarshalAppend grew the slice. + // Pessimistically assume that the too-small buffer is insufficient for the + // application workload, so there's no point in keeping it in the pool. + // Instead, replace it with the larger, newly-allocated slice. This + // allocates, but it's a small, constant-size allocation. + *buffer = *bytes.NewBuffer(raw) + } else { + // MarshalAppend didn't allocate, but we need to fix the internal state of + // the buffer. Compared to replacing the buffer (as above), buffer.Write + // copies but avoids allocating. + buffer.Write(raw) + } + envelope := &envelope{Data: buffer} + return w.Write(envelope) +} + +func (w *envelopeWriter) marshal(message any) *Error { + // Codec doesn't support MarshalAppend; let Marshal allocate a []byte. + raw, err := w.codec.Marshal(message) + if err != nil { + return errorf(CodeInternal, "marshal message: %w", err) + } + buffer := bytes.NewBuffer(raw) + // Put our new []byte into the pool for later reuse. + defer w.bufferPool.Put(buffer) + envelope := &envelope{Data: buffer} + return w.Write(envelope) +} + +func (w *envelopeWriter) write(env *envelope) *Error { + if _, err := w.sender.Send(env); err != nil { + err = wrapIfContextDone(w.ctx, err) + if connectErr, ok := asError(err); ok { + return connectErr + } + return errorf(CodeUnknown, "write envelope: %w", err) + } + return nil +} + +type envelopeReader struct { + ctx context.Context //nolint:containedctx + reader io.Reader + bytesRead int64 // detect trailers-only gRPC responses + codec Codec + last envelope + compressionPool *compressionPool + bufferPool *bufferPool + readMaxBytes int +} + +func (r *envelopeReader) Unmarshal(message any) *Error { + buffer := r.bufferPool.Get() + var dontRelease *bytes.Buffer + defer func() { + if buffer != dontRelease { + r.bufferPool.Put(buffer) + } + }() + + env := &envelope{Data: buffer} + err := r.Read(env) + switch { + case err == nil && env.IsSet(flagEnvelopeCompressed) && r.compressionPool == nil: + return errorf( + CodeInternal, + "protocol error: sent compressed message without compression support", + ) + case err == nil && + (env.Flags == 0 || env.Flags == flagEnvelopeCompressed) && + env.Data.Len() == 0: + // This is a standard message (because none of the top 7 bits are set) and + // there's no data, so the zero value of the message is correct. + return nil + case err != nil && errors.Is(err, io.EOF): + // The stream has ended. Propagate the EOF to the caller. + return err + case err != nil: + // Something's wrong. + return err + } + + data := env.Data + if data.Len() > 0 && env.IsSet(flagEnvelopeCompressed) { + decompressed := r.bufferPool.Get() + defer func() { + if decompressed != dontRelease { + r.bufferPool.Put(decompressed) + } + }() + if err := r.compressionPool.Decompress(decompressed, data, int64(r.readMaxBytes)); err != nil { + return err + } + data = decompressed + } + + if env.Flags != 0 && env.Flags != flagEnvelopeCompressed { + // Drain the rest of the stream to ensure there is no extra data. + numBytes, err := discard(r.reader) + r.bytesRead += numBytes + if err != nil { + err = wrapIfContextError(err) + if connErr, ok := asError(err); ok { + return connErr + } + return errorf(CodeInternal, "corrupt response: I/O error after end-stream message: %w", err) + } else if numBytes > 0 { + return errorf(CodeInternal, "corrupt response: %d extra bytes after end of stream", numBytes) + } + // One of the protocol-specific flags are set, so this is the end of the + // stream. Save the message for protocol-specific code to process and + // return a sentinel error. We alias the buffer with dontRelease as a + // way of marking it so above defers don't release it to the pool. + r.last = envelope{ + Data: data, + Flags: env.Flags, + } + dontRelease = data + return errSpecialEnvelope + } + + if err := r.codec.Unmarshal(data.Bytes(), message); err != nil { + return errorf(CodeInvalidArgument, "unmarshal message: %w", err) + } + return nil +} + +func (r *envelopeReader) Read(env *envelope) *Error { + prefixes := [5]byte{} + // io.ReadFull reads the number of bytes requested, or returns an error. + // io.EOF will only be returned if no bytes were read. + n, err := io.ReadFull(r.reader, prefixes[:]) + r.bytesRead += int64(n) + if err != nil { + if errors.Is(err, io.EOF) { + // The stream ended cleanly. That's expected, but we need to propagate an EOF + // to the user so that they know that the stream has ended. We shouldn't + // add any alarming text about protocol errors, though. + return NewError(CodeUnknown, err) + } + err = wrapIfMaxBytesError(err, "read 5 byte message prefix") + err = wrapIfContextDone(r.ctx, err) + if connectErr, ok := asError(err); ok { + return connectErr + } + // Something else has gone wrong - the stream didn't end cleanly. + return errorf( + CodeInvalidArgument, + "protocol error: incomplete envelope: %w", err, + ) + } + size := int64(binary.BigEndian.Uint32(prefixes[1:5])) + if r.readMaxBytes > 0 && size > int64(r.readMaxBytes) { + n, err := io.CopyN(io.Discard, r.reader, size) + r.bytesRead += n + if err != nil && !errors.Is(err, io.EOF) { + return errorf(CodeResourceExhausted, "message is larger than configured max %d - unable to determine message size: %w", r.readMaxBytes, err) + } + return errorf(CodeResourceExhausted, "message size %d is larger than configured max %d", size, r.readMaxBytes) + } + // We've read the prefix, so we know how many bytes to expect. + // CopyN will return an error if it doesn't read the requested + // number of bytes. + readN, err := io.CopyN(env.Data, r.reader, size) + r.bytesRead += readN + if err != nil { + if errors.Is(err, io.EOF) { + // We've gotten fewer bytes than we expected, so the stream has ended + // unexpectedly. + return errorf( + CodeInvalidArgument, + "protocol error: promised %d bytes in enveloped message, got %d bytes", + size, + readN, + ) + } + err = wrapIfMaxBytesError(err, "read %d byte message", size) + err = wrapIfContextDone(r.ctx, err) + if connectErr, ok := asError(err); ok { + return connectErr + } + return errorf(CodeUnknown, "read enveloped message: %w", err) + } + env.Flags = prefixes[0] + return nil +} + +func makeEnvelopePrefix(flags uint8, size int) ([5]byte, error) { + // Cast as 64-bit to ensure comparison works on all architectures. + size64 := int64(size) + if size64 < 0 || size64 > math.MaxUint32 { + return [5]byte{}, fmt.Errorf("connect.makeEnvelopePrefix: size %d out of bounds", size) + } + prefix := [5]byte{} + prefix[0] = flags + binary.BigEndian.PutUint32(prefix[1:5], uint32(size64)) + return prefix, nil +} diff --git a/vendor/connectrpc.com/connect/error.go b/vendor/connectrpc.com/connect/error.go new file mode 100644 index 0000000000..65558065a4 --- /dev/null +++ b/vendor/connectrpc.com/connect/error.go @@ -0,0 +1,458 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strings" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const ( + commonErrorsURL = "https://connectrpc.com/docs/go/common-errors" + defaultAnyResolverPrefix = "type.googleapis.com/" +) + +var ( + // errNotModified signals Connect-protocol responses to GET requests to use the + // 304 Not Modified HTTP error code. + errNotModified = errors.New("not modified") + // errNotModifiedClient wraps ErrNotModified for use client-side. + errNotModifiedClient = fmt.Errorf("HTTP 304: %w", errNotModified) +) + +// An ErrorDetail is a self-describing Protobuf message attached to an [*Error]. +// Error details are sent over the network to clients, which can then work with +// strongly-typed data rather than trying to parse a complex error message. For +// example, you might use details to send a localized error message or retry +// parameters to the client. +// +// The [google.golang.org/genproto/googleapis/rpc/errdetails] package contains a +// variety of Protobuf messages commonly used as error details. +type ErrorDetail struct { + pbAny *anypb.Any + pbInner proto.Message // if nil, must be extracted from pbAny + wireJSON string // preserve human-readable JSON +} + +// NewErrorDetail constructs a new error detail. If msg is an *[anypb.Any] then +// it is used as is. Otherwise, it is first marshalled into an *[anypb.Any] +// value. This returns an error if msg cannot be marshalled. +func NewErrorDetail(msg proto.Message) (*ErrorDetail, error) { + // If it's already an Any, don't wrap it inside another. + if pb, ok := msg.(*anypb.Any); ok { + return &ErrorDetail{pbAny: pb}, nil + } + pb, err := anypb.New(msg) + if err != nil { + return nil, err + } + return &ErrorDetail{pbAny: pb, pbInner: msg}, nil +} + +// Type is the fully-qualified name of the detail's Protobuf message (for +// example, acme.foo.v1.FooDetail). +func (d *ErrorDetail) Type() string { + // proto.Any tries to make messages self-describing by using type URLs rather + // than plain type names, but there aren't any descriptor registries + // deployed. With the current state of the `Any` code, it's not possible to + // build a useful type registry either. To hide this from users, we should + // trim the URL prefix is added to the type name. + // + // If we ever want to support remote registries, we can add an explicit + // `TypeURL` method. + return typeNameForURL(d.pbAny.GetTypeUrl()) +} + +// Bytes returns a copy of the Protobuf-serialized detail. +func (d *ErrorDetail) Bytes() []byte { + out := make([]byte, len(d.pbAny.GetValue())) + copy(out, d.pbAny.GetValue()) + return out +} + +// Value uses the Protobuf runtime's package-global registry to unmarshal the +// Detail into a strongly-typed message. Typically, clients use Go type +// assertions to cast from the proto.Message interface to concrete types. +func (d *ErrorDetail) Value() (proto.Message, error) { + if d.pbInner != nil { + // We clone it so that if the caller mutates the returned value, + // they don't inadvertently corrupt this error detail value. + return proto.Clone(d.pbInner), nil + } + return d.pbAny.UnmarshalNew() +} + +// An Error captures four key pieces of information: a [Code], an underlying Go +// error, a map of metadata, and an optional collection of arbitrary Protobuf +// messages called "details" (more on those below). Servers send the code, the +// underlying error's Error() output, the metadata, and details over the wire +// to clients. Remember that the underlying error's message will be sent to +// clients - take care not to leak sensitive information from public APIs! +// +// Service implementations and interceptors should return errors that can be +// cast to an [*Error] (using the standard library's [errors.As]). If the returned +// error can't be cast to an [*Error], connect will use [CodeUnknown] and the +// returned error's message. +// +// Error details are an optional mechanism for servers, interceptors, and +// proxies to attach arbitrary Protobuf messages to the error code and message. +// They're a clearer and more performant alternative to HTTP header +// microformats. See [the documentation on errors] for more details. +// +// [the documentation on errors]: https://connectrpc.com/docs/go/errors +type Error struct { + code Code + err error + details []*ErrorDetail + meta http.Header + wireErr bool +} + +// NewError annotates any Go error with a status code. +func NewError(c Code, underlying error) *Error { + return &Error{code: c, err: underlying} +} + +// NewWireError is similar to [NewError], but the resulting *Error returns true +// when tested with [IsWireError]. +// +// This is useful for clients trying to propagate partial failures from +// streaming RPCs. Often, these RPCs include error information in their +// response messages (for example, [gRPC server reflection] and +// OpenTelemetry's [OTLP]). Clients propagating these errors up the stack +// should use NewWireError to clarify that the error code, message, and details +// (if any) were explicitly sent by the server rather than inferred from a +// lower-level networking error or timeout. +// +// [gRPC server reflection]: https://github.com/grpc/grpc/blob/v1.49.2/src/proto/grpc/reflection/v1alpha/reflection.proto#L132-L136 +// [OTLP]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#partial-success +func NewWireError(c Code, underlying error) *Error { + err := NewError(c, underlying) + err.wireErr = true + return err +} + +// IsWireError checks whether the error was returned by the server, as opposed +// to being synthesized by the client. +// +// Clients may find this useful when deciding how to propagate errors. For +// example, an RPC-to-HTTP proxy might expose a server-sent CodeUnknown as an +// HTTP 500 but a client-synthesized CodeUnknown as a 503. +// +// Handlers will strip [Error.Meta] headers propagated from wire errors to avoid +// leaking response headers. To propagate headers recreate the error as a +// non-wire error. +func IsWireError(err error) bool { + se := new(Error) + if !errors.As(err, &se) { + return false + } + return se.wireErr +} + +// NewNotModifiedError indicates that the requested resource hasn't changed. It +// should be used only when handlers wish to respond to conditional HTTP GET +// requests with a 304 Not Modified. In all other circumstances, including all +// RPCs using the gRPC or gRPC-Web protocols, it's equivalent to sending an +// error with [CodeUnknown]. The supplied headers should include Etag, +// Cache-Control, or any other headers required by [RFC 9110 § 15.4.5]. +// +// Clients should check for this error using [IsNotModifiedError]. +// +// [RFC 9110 § 15.4.5]: https://httpwg.org/specs/rfc9110.html#status.304 +func NewNotModifiedError(headers http.Header) *Error { + err := NewError(CodeUnknown, errNotModified) + if headers != nil { + err.meta = headers + } + return err +} + +func (e *Error) Error() string { + message := e.Message() + if message == "" { + return e.code.String() + } + return e.code.String() + ": " + message +} + +// Message returns the underlying error message. It may be empty if the +// original error was created with a status code and a nil error. +func (e *Error) Message() string { + if e.err != nil { + return e.err.Error() + } + return "" +} + +// Unwrap allows [errors.Is] and [errors.As] access to the underlying error. +func (e *Error) Unwrap() error { + return e.err +} + +// Code returns the error's status code. +func (e *Error) Code() Code { + return e.code +} + +// Details returns the error's details. +func (e *Error) Details() []*ErrorDetail { + return e.details +} + +// AddDetail appends to the error's details. +func (e *Error) AddDetail(d *ErrorDetail) { + e.details = append(e.details, d) +} + +// Meta allows the error to carry additional information as key-value pairs. +// +// Protocol-specific headers and trailers may be removed to avoid breaking +// protocol semantics. For example, Content-Length and Content-Type headers +// won't be propagated. See the documentation for each protocol for more +// datails. +// +// When clients receive errors, the metadata contains the union of the HTTP +// headers and the protocol-specific trailers (either HTTP trailers or in-body +// metadata). +func (e *Error) Meta() http.Header { + if e.meta == nil { + e.meta = make(http.Header) + } + return e.meta +} + +func (e *Error) detailsAsAny() []*anypb.Any { + anys := make([]*anypb.Any, 0, len(e.details)) + for _, detail := range e.details { + anys = append(anys, detail.pbAny) + } + return anys +} + +// IsNotModifiedError checks whether the supplied error indicates that the +// requested resource hasn't changed. It only returns true if the server used +// [NewNotModifiedError] in response to a Connect-protocol RPC made with an +// HTTP GET. +func IsNotModifiedError(err error) bool { + return errors.Is(err, errNotModified) +} + +// errorf calls fmt.Errorf with the supplied template and arguments, then wraps +// the resulting error. +func errorf(c Code, template string, args ...any) *Error { + return NewError(c, fmt.Errorf(template, args...)) +} + +// asError uses errors.As to unwrap any error and look for a connect *Error. +func asError(err error) (*Error, bool) { + var connectErr *Error + ok := errors.As(err, &connectErr) + return connectErr, ok +} + +// wrapIfUncoded ensures that all errors are wrapped. It leaves already-wrapped +// errors unchanged, uses wrapIfContextError to apply codes to context.Canceled +// and context.DeadlineExceeded, and falls back to wrapping other errors with +// CodeUnknown. +func wrapIfUncoded(err error) error { + if err == nil { + return nil + } + maybeCodedErr := wrapIfContextError(err) + if _, ok := asError(maybeCodedErr); ok { + return maybeCodedErr + } + return NewError(CodeUnknown, maybeCodedErr) +} + +// wrapIfContextError applies CodeCanceled or CodeDeadlineExceeded to Go's +// context.Canceled and context.DeadlineExceeded errors, but only if they +// haven't already been wrapped. +func wrapIfContextError(err error) error { + if err == nil { + return nil + } + if _, ok := asError(err); ok { + return err + } + if errors.Is(err, context.Canceled) { + return NewError(CodeCanceled, err) + } + if errors.Is(err, context.DeadlineExceeded) { + return NewError(CodeDeadlineExceeded, err) + } + // Ick, some dial errors can be returned as os.ErrDeadlineExceeded + // instead of context.DeadlineExceeded :( + // https://github.com/golang/go/issues/64449 + if errors.Is(err, os.ErrDeadlineExceeded) { + return NewError(CodeDeadlineExceeded, err) + } + return err +} + +// wrapIfContextDone wraps errors with CodeCanceled or CodeDeadlineExceeded +// if the context is done. It leaves already-wrapped errors unchanged. +func wrapIfContextDone(ctx context.Context, err error) error { + if err == nil { + return nil + } + err = wrapIfContextError(err) + if _, ok := asError(err); ok { + return err + } + ctxErr := ctx.Err() + if errors.Is(ctxErr, context.Canceled) { + return NewError(CodeCanceled, err) + } else if errors.Is(ctxErr, context.DeadlineExceeded) { + return NewError(CodeDeadlineExceeded, err) + } + return err +} + +// wrapIfLikelyH2CNotConfiguredError adds a wrapping error that has a message +// telling the caller that they likely need to use h2c but are using a raw http.Client{}. +// +// This happens when running a gRPC-only server. +// This is fragile and may break over time, and this should be considered a best-effort. +func wrapIfLikelyH2CNotConfiguredError(request *http.Request, err error) error { + if err == nil { + return nil + } + if _, ok := asError(err); ok { + return err + } + if url := request.URL; url != nil && url.Scheme != "http" { + // If the scheme is not http, we definitely do not have an h2c error, so just return. + return err + } + // net/http code has been investigated and there is no typing of any of these errors + // they are all created with fmt.Errorf + // grpc-go returns the first error 2/3-3/4 of the time, and the second error 1/4-1/3 of the time + if errString := err.Error(); strings.HasPrefix(errString, `Post "`) && + (strings.Contains(errString, `net/http: HTTP/1.x transport connection broken: malformed HTTP response`) || + strings.HasSuffix(errString, `write: broken pipe`)) { + return fmt.Errorf("possible h2c configuration issue when talking to gRPC server, see %s: %w", commonErrorsURL, err) + } + return err +} + +// wrapIfLikelyWithGRPCNotUsedError adds a wrapping error that has a message +// telling the caller that they likely forgot to use connect.WithGRPC(). +// +// This happens when running a gRPC-only server. +// This is fragile and may break over time, and this should be considered a best-effort. +func wrapIfLikelyWithGRPCNotUsedError(err error) error { + if err == nil { + return nil + } + if _, ok := asError(err); ok { + return err + } + // golang.org/x/net code has been investigated and there is no typing of this error + // it is created with fmt.Errorf + // http2/transport.go:573: return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) + if errString := err.Error(); strings.HasPrefix(errString, `Post "`) && + strings.Contains(errString, `http2: Transport: cannot retry err`) && + strings.HasSuffix(errString, `after Request.Body was written; define Request.GetBody to avoid this error`) { + return fmt.Errorf("possible missing connect.WithGPRC() client option when talking to gRPC server, see %s: %w", commonErrorsURL, err) + } + return err +} + +// HTTP/2 has its own set of error codes, which it sends in RST_STREAM frames. +// When the server sends one of these errors, we should map it back into our +// RPC error codes following +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#http2-transport-mapping. +// +// This would be vastly simpler if we were using x/net/http2 directly, since +// the StreamError type is exported. When x/net/http2 gets vendored into +// net/http, though, all these types become unexported...so we're left with +// string munging. +func wrapIfRSTError(err error) error { + const ( + streamErrPrefix = "stream error: " + fromPeerSuffix = "; received from peer" + ) + if err == nil { + return nil + } + if _, ok := asError(err); ok { + return err + } + if urlErr := new(url.Error); errors.As(err, &urlErr) { + // If we get an RST_STREAM error from http.Client.Do, it's wrapped in a + // *url.Error. + err = urlErr.Unwrap() + } + msg := err.Error() + if !strings.HasPrefix(msg, streamErrPrefix) { + return err + } + if !strings.HasSuffix(msg, fromPeerSuffix) { + return err + } + msg = strings.TrimSuffix(msg, fromPeerSuffix) + i := strings.LastIndex(msg, ";") + if i < 0 || i >= len(msg)-1 { + return err + } + msg = msg[i+1:] + msg = strings.TrimSpace(msg) + switch msg { + case "NO_ERROR", "PROTOCOL_ERROR", "INTERNAL_ERROR", "FLOW_CONTROL_ERROR", + "SETTINGS_TIMEOUT", "FRAME_SIZE_ERROR", "COMPRESSION_ERROR", "CONNECT_ERROR": + return NewError(CodeInternal, err) + case "REFUSED_STREAM": + return NewError(CodeUnavailable, err) + case "CANCEL": + return NewError(CodeCanceled, err) + case "ENHANCE_YOUR_CALM": + return NewError(CodeResourceExhausted, fmt.Errorf("bandwidth exhausted: %w", err)) + case "INADEQUATE_SECURITY": + return NewError(CodePermissionDenied, fmt.Errorf("transport protocol insecure: %w", err)) + default: + return err + } +} + +// wrapIfMaxBytesError wraps errors returned reading from a http.MaxBytesHandler +// whose limit has been exceeded. +func wrapIfMaxBytesError(err error, tmpl string, args ...any) error { + if err == nil { + return nil + } + if _, ok := asError(err); ok { + return err + } + var maxBytesErr *http.MaxBytesError + if ok := errors.As(err, &maxBytesErr); !ok { + return err + } + prefix := fmt.Sprintf(tmpl, args...) + return errorf(CodeResourceExhausted, "%s: exceeded %d byte http.MaxBytesReader limit", prefix, maxBytesErr.Limit) +} + +func typeNameForURL(url string) string { + return url[strings.LastIndexByte(url, '/')+1:] +} diff --git a/vendor/connectrpc.com/connect/error_writer.go b/vendor/connectrpc.com/connect/error_writer.go new file mode 100644 index 0000000000..b8e33459e3 --- /dev/null +++ b/vendor/connectrpc.com/connect/error_writer.go @@ -0,0 +1,179 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" +) + +// protocolType is one of the supported RPC protocols. +type protocolType uint8 + +const ( + unknownProtocol protocolType = iota + connectUnaryProtocol + connectStreamProtocol + grpcProtocol + grpcWebProtocol +) + +// An ErrorWriter writes errors to an [http.ResponseWriter] in the format +// expected by an RPC client. This is especially useful in server-side net/http +// middleware, where you may wish to handle requests from RPC and non-RPC +// clients with the same code. +// +// ErrorWriters are safe to use concurrently. +type ErrorWriter struct { + bufferPool *bufferPool + protobuf Codec + requireConnectProtocolHeader bool +} + +// NewErrorWriter constructs an ErrorWriter. Handler options may be passed to +// configure the error writer behaviour to match the handlers. +// [WithRequireConnectProtocolHeader] will assert that Connect protocol +// requests include the version header allowing the error writer to correctly +// classify the request. +// Options supplied via [WithConditionalHandlerOptions] are ignored. +func NewErrorWriter(opts ...HandlerOption) *ErrorWriter { + config := newHandlerConfig("", StreamTypeUnary, opts) + codecs := newReadOnlyCodecs(config.Codecs) + return &ErrorWriter{ + bufferPool: config.BufferPool, + protobuf: codecs.Protobuf(), + requireConnectProtocolHeader: config.RequireConnectProtocolHeader, + } +} + +func (w *ErrorWriter) classifyRequest(request *http.Request) protocolType { + ctype := canonicalizeContentType(getHeaderCanonical(request.Header, headerContentType)) + isPost := request.Method == http.MethodPost + isGet := request.Method == http.MethodGet + switch { + case isPost && (ctype == grpcContentTypeDefault || strings.HasPrefix(ctype, grpcContentTypePrefix)): + return grpcProtocol + case isPost && (ctype == grpcWebContentTypeDefault || strings.HasPrefix(ctype, grpcWebContentTypePrefix)): + return grpcWebProtocol + case isPost && strings.HasPrefix(ctype, connectStreamingContentTypePrefix): + // Streaming ignores the requireConnectProtocolHeader option as the + // Content-Type is enough to determine the protocol. + if err := connectCheckProtocolVersion(request, false /* required */); err != nil { + return unknownProtocol + } + return connectStreamProtocol + case isPost && strings.HasPrefix(ctype, connectUnaryContentTypePrefix): + if err := connectCheckProtocolVersion(request, w.requireConnectProtocolHeader); err != nil { + return unknownProtocol + } + return connectUnaryProtocol + case isGet: + if err := connectCheckProtocolVersion(request, w.requireConnectProtocolHeader); err != nil { + return unknownProtocol + } + return connectUnaryProtocol + default: + return unknownProtocol + } +} + +// IsSupported checks whether a request is using one of the ErrorWriter's +// supported RPC protocols. +func (w *ErrorWriter) IsSupported(request *http.Request) bool { + return w.classifyRequest(request) != unknownProtocol +} + +// Write an error, using the format appropriate for the RPC protocol in use. +// Callers should first use IsSupported to verify that the request is using one +// of the ErrorWriter's supported RPC protocols. If the protocol is unknown, +// Write will send the error as unprefixed, Connect-formatted JSON. +// +// Write does not read or close the request body. +func (w *ErrorWriter) Write(response http.ResponseWriter, request *http.Request, err error) error { + ctype := canonicalizeContentType(getHeaderCanonical(request.Header, headerContentType)) + switch protocolType := w.classifyRequest(request); protocolType { + case connectStreamProtocol: + setHeaderCanonical(response.Header(), headerContentType, ctype) + return w.writeConnectStreaming(response, err) + case grpcProtocol: + setHeaderCanonical(response.Header(), headerContentType, ctype) + return w.writeGRPC(response, err) + case grpcWebProtocol: + setHeaderCanonical(response.Header(), headerContentType, ctype) + return w.writeGRPCWeb(response, err) + case unknownProtocol, connectUnaryProtocol: + fallthrough + default: + // Unary errors are always JSON. Unknown protocols are treated as unary + // because they are likely to be Connect clients and will still be able to + // parse the error as it's in a human-readable format. + setHeaderCanonical(response.Header(), headerContentType, connectUnaryContentTypeJSON) + return w.writeConnectUnary(response, err) + } +} + +func (w *ErrorWriter) writeConnectUnary(response http.ResponseWriter, err error) error { + if connectErr, ok := asError(err); ok && !connectErr.wireErr { + mergeNonProtocolHeaders(response.Header(), connectErr.meta) + } + response.WriteHeader(connectCodeToHTTP(CodeOf(err))) + data, marshalErr := json.Marshal(newConnectWireError(err)) + if marshalErr != nil { + return fmt.Errorf("marshal error: %w", marshalErr) + } + _, writeErr := response.Write(data) + return writeErr +} + +func (w *ErrorWriter) writeConnectStreaming(response http.ResponseWriter, err error) error { + response.WriteHeader(http.StatusOK) + marshaler := &connectStreamingMarshaler{ + envelopeWriter: envelopeWriter{ + sender: writeSender{writer: response}, + bufferPool: w.bufferPool, + }, + } + // MarshalEndStream returns *Error: check return value to avoid typed nils. + if marshalErr := marshaler.MarshalEndStream(err, make(http.Header)); marshalErr != nil { + return marshalErr + } + return nil +} + +func (w *ErrorWriter) writeGRPC(response http.ResponseWriter, err error) error { + trailers := make(http.Header, 2) // need space for at least code & message + grpcErrorToTrailer(trailers, w.protobuf, err) + // To make net/http reliably send trailers without a body, we must set the + // Trailers header rather than using http.TrailerPrefix. See + // https://github.com/golang/go/issues/54723. + keys := make([]string, 0, len(trailers)) + for k := range trailers { + keys = append(keys, k) + } + setHeaderCanonical(response.Header(), headerTrailer, strings.Join(keys, ",")) + response.WriteHeader(http.StatusOK) + mergeHeaders(response.Header(), trailers) + return nil +} + +func (w *ErrorWriter) writeGRPCWeb(response http.ResponseWriter, err error) error { + // This is a trailers-only response. To match the behavior of Envoy and + // protocol_grpc.go, put the trailers in the HTTP headers. + grpcErrorToTrailer(response.Header(), w.protobuf, err) + response.WriteHeader(http.StatusOK) + return nil +} diff --git a/vendor/connectrpc.com/connect/handler.go b/vendor/connectrpc.com/connect/handler.go new file mode 100644 index 0000000000..e33934ea2e --- /dev/null +++ b/vendor/connectrpc.com/connect/handler.go @@ -0,0 +1,423 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "context" + "net/http" +) + +// A Handler is the server-side implementation of a single RPC defined by a +// service schema. +// +// By default, Handlers support the Connect, gRPC, and gRPC-Web protocols with +// the binary Protobuf and JSON codecs. They support gzip compression using the +// standard library's [compress/gzip]. +type Handler struct { + spec Spec + implementation StreamingHandlerFunc + protocolHandlers map[string][]protocolHandler // Method to protocol handlers + allowMethod string // Allow header + acceptPost string // Accept-Post header +} + +// NewUnaryHandler constructs a [Handler] for a request-response procedure. +func NewUnaryHandler[Req, Res any]( + procedure string, + unary func(context.Context, *Request[Req]) (*Response[Res], error), + options ...HandlerOption, +) *Handler { + // Wrap the strongly-typed implementation so we can apply interceptors. + untyped := UnaryFunc(func(ctx context.Context, request AnyRequest) (AnyResponse, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + typed, ok := request.(*Request[Req]) + if !ok { + return nil, errorf(CodeInternal, "unexpected handler request type %T", request) + } + res, err := unary(ctx, typed) + if res == nil && err == nil { + // This is going to panic during serialization. Debugging is much easier + // if we panic here instead, so we can include the procedure name. + panic(procedure + " returned nil *connect.Response and nil error") //nolint: forbidigo + } + return res, err + }) + config := newHandlerConfig(procedure, StreamTypeUnary, options) + if interceptor := config.Interceptor; interceptor != nil { + untyped = interceptor.WrapUnary(untyped) + } + // Given a stream, how should we call the unary function? + implementation := func(ctx context.Context, conn StreamingHandlerConn) error { + request, err := receiveUnaryRequest[Req](conn, config.Initializer) + if err != nil { + return err + } + // Add the request header to the context, and store the response header + // and trailer to propagate back to the caller. + info := &handlerCallInfo{ + peer: request.Peer(), + spec: request.Spec(), + method: request.HTTPMethod(), + requestHeader: request.Header(), + } + ctx = newHandlerContext(ctx, info) + response, err := untyped(ctx, request) + // Add response headers/trailers from the context callinfo into the conn if they exist + if info.responseHeader != nil { + mergeNonProtocolHeaders(conn.ResponseHeader(), info.responseHeader) + } + if info.responseTrailer != nil { + mergeNonProtocolHeaders(conn.ResponseTrailer(), info.responseTrailer) + } + if err != nil { + return err + } + + // Add response headers/trailers from the response into the conn if they exist + if len(response.Header()) != 0 { + mergeNonProtocolHeaders(conn.ResponseHeader(), response.Header()) + } + if len(response.Trailer()) != 0 { + mergeNonProtocolHeaders(conn.ResponseTrailer(), response.Trailer()) + } + return conn.Send(response.Any()) + } + + protocolHandlers := config.newProtocolHandlers() + return &Handler{ + spec: config.newSpec(), + implementation: implementation, + protocolHandlers: mappedMethodHandlers(protocolHandlers), + allowMethod: sortedAllowMethodValue(protocolHandlers), + acceptPost: sortedAcceptPostValue(protocolHandlers), + } +} + +// NewUnaryHandlerSimple constructs a [Handler] for a request-response procedure using the +// function signature associated with the "simple" generation option. +// +// This option eliminates the [Request] and [Response] wrappers, and instead uses the +// context.Context to propagate information such as headers. +func NewUnaryHandlerSimple[Req, Res any]( + procedure string, + unary func(context.Context, *Req) (*Res, error), + options ...HandlerOption, +) *Handler { + return NewUnaryHandler( + procedure, + func(ctx context.Context, request *Request[Req]) (*Response[Res], error) { + responseMsg, err := unary(ctx, request.Msg) + if err != nil { + return nil, err + } + return NewResponse(responseMsg), nil + }, + options..., + ) +} + +// NewClientStreamHandler constructs a [Handler] for a client streaming procedure. +func NewClientStreamHandler[Req, Res any]( + procedure string, + implementation func(context.Context, *ClientStream[Req]) (*Response[Res], error), + options ...HandlerOption, +) *Handler { + config := newHandlerConfig(procedure, StreamTypeClient, options) + return newStreamHandler( + config, + func(ctx context.Context, conn StreamingHandlerConn) error { + stream := &ClientStream[Req]{ + conn: conn, + initializer: config.Initializer, + } + ctx = newHandlerContext(ctx, &streamingHandlerCallInfo{ + conn: conn, + }) + res, err := implementation(ctx, stream) + if err != nil { + return err + } + if res == nil { + // This is going to panic during serialization. Debugging is much easier + // if we panic here instead, so we can include the procedure name. + panic(procedure + " returned nil *connect.Response and nil error") //nolint: forbidigo + } + mergeHeaders(conn.ResponseHeader(), res.header) + mergeHeaders(conn.ResponseTrailer(), res.trailer) + return conn.Send(res.Msg) + }, + ) +} + +// NewClientStreamHandlerSimple constructs a [Handler] for a request-streaming procedure +// using the function signature associated with the "simple" generation option. +// +// This option eliminates the [Response] wrapper, and instead uses the context.Context +// to propagate information such as headers. +func NewClientStreamHandlerSimple[Req, Res any]( + procedure string, + implementation func(context.Context, *ClientStream[Req]) (*Res, error), + options ...HandlerOption, +) *Handler { + return NewClientStreamHandler( + procedure, + func(ctx context.Context, stream *ClientStream[Req]) (*Response[Res], error) { + responseMsg, err := implementation(ctx, stream) + if err != nil { + return nil, err + } + return NewResponse(responseMsg), nil + }, + options..., + ) +} + +// NewServerStreamHandler constructs a [Handler] for a server streaming procedure. +func NewServerStreamHandler[Req, Res any]( + procedure string, + implementation func(context.Context, *Request[Req], *ServerStream[Res]) error, + options ...HandlerOption, +) *Handler { + config := newHandlerConfig(procedure, StreamTypeServer, options) + return newStreamHandler( + config, + func(ctx context.Context, conn StreamingHandlerConn) error { + req, err := receiveUnaryRequest[Req](conn, config.Initializer) + if err != nil { + return err + } + ctx = newHandlerContext(ctx, &streamingHandlerCallInfo{ + conn: conn, + }) + return implementation(ctx, req, &ServerStream[Res]{conn: conn}) + }, + ) +} + +// NewServerStreamHandlerSimple constructs a [Handler] a server streaming procedure using the function +// signature associated with the "simple" generation option. +// +// This option eliminates the [Request] wrapper, and instead uses the context.Context to +// propagate information such as headers. +func NewServerStreamHandlerSimple[Req, Res any]( + procedure string, + implementation func(context.Context, *Req, *ServerStream[Res]) error, + options ...HandlerOption, +) *Handler { + return NewServerStreamHandler( + procedure, + func(ctx context.Context, request *Request[Req], serverStream *ServerStream[Res]) error { + return implementation(ctx, request.Msg, serverStream) + }, + options..., + ) +} + +// NewBidiStreamHandler constructs a [Handler] for a bidirectional streaming procedure. +func NewBidiStreamHandler[Req, Res any]( + procedure string, + implementation func(context.Context, *BidiStream[Req, Res]) error, + options ...HandlerOption, +) *Handler { + config := newHandlerConfig(procedure, StreamTypeBidi, options) + return newStreamHandler( + config, + func(ctx context.Context, conn StreamingHandlerConn) error { + ctx = newHandlerContext(ctx, &streamingHandlerCallInfo{ + conn: conn, + }) + return implementation( + ctx, + &BidiStream[Req, Res]{ + conn: conn, + initializer: config.Initializer, + }, + ) + }, + ) +} + +// ServeHTTP implements [http.Handler]. +func (h *Handler) ServeHTTP(responseWriter http.ResponseWriter, request *http.Request) { + // We don't need to defer functions to close the request body or read to + // EOF: the stream we construct later on already does that, and we only + // return early when dealing with misbehaving clients. In those cases, it's + // okay if we can't re-use the connection. + isBidi := (h.spec.StreamType & StreamTypeBidi) == StreamTypeBidi + if isBidi && request.ProtoMajor < 2 { + // Clients coded to expect full-duplex connections may hang if they've + // mistakenly negotiated HTTP/1.1. To unblock them, we must close the + // underlying TCP connection. + responseWriter.Header().Set("Connection", "close") + responseWriter.WriteHeader(http.StatusHTTPVersionNotSupported) + return + } + + protocolHandlers := h.protocolHandlers[request.Method] + if len(protocolHandlers) == 0 { + responseWriter.Header().Set("Allow", h.allowMethod) + responseWriter.WriteHeader(http.StatusMethodNotAllowed) + return + } + + contentType := canonicalizeContentType(getHeaderCanonical(request.Header, headerContentType)) + + // Find our implementation of the RPC protocol in use. + var protocolHandler protocolHandler + for _, handler := range protocolHandlers { + if handler.CanHandlePayload(request, contentType) { + protocolHandler = handler + break + } + } + if protocolHandler == nil { + responseWriter.Header().Set("Accept-Post", h.acceptPost) + responseWriter.WriteHeader(http.StatusUnsupportedMediaType) + return + } + + if request.Method == http.MethodGet { + // A body must not be present. + hasBody := request.ContentLength > 0 + if request.ContentLength < 0 { + // No content-length header. + // Test if body is empty by trying to read a single byte. + var b [1]byte + n, _ := request.Body.Read(b[:]) + hasBody = n > 0 + } + if hasBody { + responseWriter.WriteHeader(http.StatusUnsupportedMediaType) + return + } + _ = request.Body.Close() + } + + // Establish a stream and serve the RPC. + setHeaderCanonical(request.Header, headerContentType, contentType) + setHeaderCanonical(request.Header, headerHost, request.Host) + ctx, cancel, timeoutErr := protocolHandler.SetTimeout(request) //nolint: contextcheck + if timeoutErr != nil { + ctx = request.Context() + } + if cancel != nil { + defer cancel() + } + connCloser, ok := protocolHandler.NewConn( + responseWriter, + request.WithContext(ctx), + ) + if !ok { + // Failed to create stream, usually because client used an unknown + // compression algorithm. Nothing further to do. + return + } + if timeoutErr != nil { + _ = connCloser.Close(timeoutErr) + return + } + _ = connCloser.Close(h.implementation(ctx, connCloser)) +} + +type handlerConfig struct { + CompressionPools map[string]*compressionPool + CompressionNames []string + Codecs map[string]Codec + CompressMinBytes int + Interceptor Interceptor + Procedure string + Schema any + Initializer maybeInitializer + RequireConnectProtocolHeader bool + IdempotencyLevel IdempotencyLevel + BufferPool *bufferPool + ReadMaxBytes int + SendMaxBytes int + StreamType StreamType +} + +func newHandlerConfig(procedure string, streamType StreamType, options []HandlerOption) *handlerConfig { + protoPath := extractProtoPath(procedure) + config := handlerConfig{ + Procedure: protoPath, + CompressionPools: make(map[string]*compressionPool), + Codecs: make(map[string]Codec), + BufferPool: newBufferPool(), + StreamType: streamType, + } + withProtoBinaryCodec().applyToHandler(&config) + withProtoJSONCodecs().applyToHandler(&config) + withGzip().applyToHandler(&config) + for _, opt := range options { + opt.applyToHandler(&config) + } + return &config +} + +func (c *handlerConfig) newSpec() Spec { + return Spec{ + Procedure: c.Procedure, + Schema: c.Schema, + StreamType: c.StreamType, + IdempotencyLevel: c.IdempotencyLevel, + } +} + +func (c *handlerConfig) newProtocolHandlers() []protocolHandler { + protocols := []protocol{ + &protocolConnect{}, + &protocolGRPC{web: false}, + &protocolGRPC{web: true}, + } + handlers := make([]protocolHandler, 0, len(protocols)) + codecs := newReadOnlyCodecs(c.Codecs) + compressors := newReadOnlyCompressionPools( + c.CompressionPools, + c.CompressionNames, + ) + for _, protocol := range protocols { + handlers = append(handlers, protocol.NewHandler(&protocolHandlerParams{ + Spec: c.newSpec(), + Codecs: codecs, + CompressionPools: compressors, + CompressMinBytes: c.CompressMinBytes, + BufferPool: c.BufferPool, + ReadMaxBytes: c.ReadMaxBytes, + SendMaxBytes: c.SendMaxBytes, + RequireConnectProtocolHeader: c.RequireConnectProtocolHeader, + IdempotencyLevel: c.IdempotencyLevel, + })) + } + return handlers +} + +func newStreamHandler( + config *handlerConfig, + implementation StreamingHandlerFunc, +) *Handler { + if ic := config.Interceptor; ic != nil { + implementation = ic.WrapStreamingHandler(implementation) + } + protocolHandlers := config.newProtocolHandlers() + return &Handler{ + spec: config.newSpec(), + implementation: implementation, + protocolHandlers: mappedMethodHandlers(protocolHandlers), + allowMethod: sortedAllowMethodValue(protocolHandlers), + acceptPost: sortedAcceptPostValue(protocolHandlers), + } +} diff --git a/vendor/connectrpc.com/connect/handler_stream.go b/vendor/connectrpc.com/connect/handler_stream.go new file mode 100644 index 0000000000..21092717b0 --- /dev/null +++ b/vendor/connectrpc.com/connect/handler_stream.go @@ -0,0 +1,198 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "errors" + "io" + "net/http" +) + +// ClientStream is the handler's view of a client streaming RPC. +// +// It's constructed as part of [Handler] invocation, but doesn't currently have +// an exported constructor. +type ClientStream[Req any] struct { + conn StreamingHandlerConn + initializer maybeInitializer + msg *Req + err error +} + +// Spec returns the specification for the RPC. +func (c *ClientStream[_]) Spec() Spec { + return c.conn.Spec() +} + +// Peer describes the client for this RPC. +func (c *ClientStream[_]) Peer() Peer { + return c.conn.Peer() +} + +// RequestHeader returns the headers received from the client. +func (c *ClientStream[Req]) RequestHeader() http.Header { + return c.conn.RequestHeader() +} + +// Receive advances the stream to the next message, which will then be +// available through the Msg method. It returns false when the stream stops, +// either by reaching the end or by encountering an unexpected error. After +// Receive returns false, the Err method will return any unexpected error +// encountered. +func (c *ClientStream[Req]) Receive() bool { + if c.err != nil { + return false + } + c.msg = new(Req) + if err := c.initializer.maybe(c.Spec(), c.msg); err != nil { + c.err = err + return false + } + c.err = c.conn.Receive(c.msg) + return c.err == nil +} + +// Msg returns the most recent message unmarshaled by a call to Receive. +func (c *ClientStream[Req]) Msg() *Req { + if c.msg == nil { + c.msg = new(Req) + } + return c.msg +} + +// Err returns the first non-EOF error that was encountered by Receive. +func (c *ClientStream[Req]) Err() error { + if c.err == nil || errors.Is(c.err, io.EOF) { + return nil + } + return c.err +} + +// Conn exposes the underlying StreamingHandlerConn. This may be useful if +// you'd prefer to wrap the connection in a different high-level API. +func (c *ClientStream[Req]) Conn() StreamingHandlerConn { + return c.conn +} + +// ServerStream is the handler's view of a server streaming RPC. +// +// It's constructed as part of [Handler] invocation, but doesn't currently have +// an exported constructor. +type ServerStream[Res any] struct { + conn StreamingHandlerConn +} + +// ResponseHeader returns the response headers. Headers are sent with the first +// call to Send. +// +// Headers beginning with "Connect-" and "Grpc-" are reserved for use by the +// Connect and gRPC protocols. Applications shouldn't write them. +func (s *ServerStream[Res]) ResponseHeader() http.Header { + return s.conn.ResponseHeader() +} + +// ResponseTrailer returns the response trailers. Handlers may write to the +// response trailers at any time before returning. +// +// Trailers beginning with "Connect-" and "Grpc-" are reserved for use by the +// Connect and gRPC protocols. Applications shouldn't write them. +func (s *ServerStream[Res]) ResponseTrailer() http.Header { + return s.conn.ResponseTrailer() +} + +// Send a message to the client. The first call to Send also sends the response +// headers. +func (s *ServerStream[Res]) Send(msg *Res) error { + if msg == nil { + return s.conn.Send(nil) + } + return s.conn.Send(msg) +} + +// Conn exposes the underlying StreamingHandlerConn. This may be useful if +// you'd prefer to wrap the connection in a different high-level API. +func (s *ServerStream[Res]) Conn() StreamingHandlerConn { + return s.conn +} + +// BidiStream is the handler's view of a bidirectional streaming RPC. +// +// It's constructed as part of [Handler] invocation, but doesn't currently have +// an exported constructor. +type BidiStream[Req, Res any] struct { + conn StreamingHandlerConn + initializer maybeInitializer +} + +// Spec returns the specification for the RPC. +func (b *BidiStream[_, _]) Spec() Spec { + return b.conn.Spec() +} + +// Peer describes the client for this RPC. +func (b *BidiStream[_, _]) Peer() Peer { + return b.conn.Peer() +} + +// RequestHeader returns the headers received from the client. +func (b *BidiStream[Req, Res]) RequestHeader() http.Header { + return b.conn.RequestHeader() +} + +// Receive a message. When the client is done sending messages, Receive will +// return an error that wraps [io.EOF]. +func (b *BidiStream[Req, Res]) Receive() (*Req, error) { + var req Req + if err := b.initializer.maybe(b.Spec(), &req); err != nil { + return nil, err + } + if err := b.conn.Receive(&req); err != nil { + return nil, err + } + return &req, nil +} + +// ResponseHeader returns the response headers. Headers are sent with the first +// call to Send. +// +// Headers beginning with "Connect-" and "Grpc-" are reserved for use by the +// Connect and gRPC protocols. Applications shouldn't write them. +func (b *BidiStream[Req, Res]) ResponseHeader() http.Header { + return b.conn.ResponseHeader() +} + +// ResponseTrailer returns the response trailers. Handlers may write to the +// response trailers at any time before returning. +// +// Trailers beginning with "Connect-" and "Grpc-" are reserved for use by the +// Connect and gRPC protocols. Applications shouldn't write them. +func (b *BidiStream[Req, Res]) ResponseTrailer() http.Header { + return b.conn.ResponseTrailer() +} + +// Send a message to the client. The first call to Send also sends the response +// headers. +func (b *BidiStream[Req, Res]) Send(msg *Res) error { + if msg == nil { + return b.conn.Send(nil) + } + return b.conn.Send(msg) +} + +// Conn exposes the underlying StreamingHandlerConn. This may be useful if +// you'd prefer to wrap the connection in a different high-level API. +func (b *BidiStream[Req, Res]) Conn() StreamingHandlerConn { + return b.conn +} diff --git a/vendor/connectrpc.com/connect/header.go b/vendor/connectrpc.com/connect/header.go new file mode 100644 index 0000000000..081570a5e3 --- /dev/null +++ b/vendor/connectrpc.com/connect/header.go @@ -0,0 +1,128 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "encoding/base64" + "net/http" +) + +//nolint:gochecknoglobals +var protocolHeaders = map[string]struct{}{ + // HTTP headers. + headerContentType: {}, + headerContentLength: {}, + headerContentEncoding: {}, + headerHost: {}, + headerUserAgent: {}, + headerTrailer: {}, + headerDate: {}, + // Connect headers. + connectUnaryHeaderAcceptCompression: {}, + connectUnaryTrailerPrefix: {}, + connectStreamingHeaderCompression: {}, + connectStreamingHeaderAcceptCompression: {}, + connectHeaderTimeout: {}, + connectHeaderProtocolVersion: {}, + // gRPC headers. + grpcHeaderCompression: {}, + grpcHeaderAcceptCompression: {}, + grpcHeaderTimeout: {}, + grpcHeaderStatus: {}, + grpcHeaderMessage: {}, + grpcHeaderDetails: {}, +} + +// EncodeBinaryHeader base64-encodes the data. It always emits unpadded values. +// +// In the Connect, gRPC, and gRPC-Web protocols, binary headers must have keys +// ending in "-Bin". +func EncodeBinaryHeader(data []byte) string { + // gRPC specification says that implementations should emit unpadded values. + return base64.RawStdEncoding.EncodeToString(data) +} + +// DecodeBinaryHeader base64-decodes the data. It can decode padded or unpadded +// values. Following usual HTTP semantics, multiple base64-encoded values may +// be joined with a comma. When receiving such comma-separated values, split +// them with [strings.Split] before calling DecodeBinaryHeader. +// +// Binary headers sent using the Connect, gRPC, and gRPC-Web protocols have +// keys ending in "-Bin". +func DecodeBinaryHeader(data string) ([]byte, error) { + if len(data)%4 != 0 { + // Data definitely isn't padded. + return base64.RawStdEncoding.DecodeString(data) + } + // Either the data was padded, or padding wasn't necessary. In both cases, + // the padding-aware decoder works. + return base64.StdEncoding.DecodeString(data) +} + +func mergeHeaders(into, from http.Header) { + for key, vals := range from { + if len(vals) == 0 { + // For response trailers, net/http will pre-populate entries + // with nil values based on the "Trailer" header. But if there + // are no actual values for those keys, we skip them. + continue + } + into[key] = append(into[key], vals...) + } +} + +// mergeNonProtocolHeaders merges headers excluding protocol headers defined in +// protocolHeaders. +func mergeNonProtocolHeaders(into, from http.Header) { + for key, vals := range from { + if len(vals) == 0 { + // For response trailers, net/http will pre-populate entries + // with nil values based on the "Trailer" header. But if there + // are no actual values for those keys, we skip them. + continue + } + if _, isProtocolHeader := protocolHeaders[key]; !isProtocolHeader { + into[key] = append(into[key], vals...) + } + } +} + +// getHeaderCanonical is a shortcut for Header.Get() which +// bypasses the CanonicalMIMEHeaderKey operation when we +// know the key is already in canonical form. +func getHeaderCanonical(h http.Header, key string) string { + if h == nil { + return "" + } + v := h[key] + if len(v) == 0 { + return "" + } + return v[0] +} + +// setHeaderCanonical is a shortcut for Header.Set() which +// bypasses the CanonicalMIMEHeaderKey operation when we +// know the key is already in canonical form. +func setHeaderCanonical(h http.Header, key, value string) { + h[key] = []string{value} +} + +// delHeaderCanonical is a shortcut for Header.Del() which +// bypasses the CanonicalMIMEHeaderKey operation when we +// know the key is already in canonical form. +func delHeaderCanonical(h http.Header, key string) { + delete(h, key) +} diff --git a/vendor/connectrpc.com/connect/idempotency_level.go b/vendor/connectrpc.com/connect/idempotency_level.go new file mode 100644 index 0000000000..5dbf8f531f --- /dev/null +++ b/vendor/connectrpc.com/connect/idempotency_level.go @@ -0,0 +1,68 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import "fmt" + +// An IdempotencyLevel is a value that declares how "idempotent" an RPC is. This +// value can affect RPC behaviors, such as determining whether it is safe to +// retry a request, or what kinds of request modalities are allowed for a given +// procedure. +type IdempotencyLevel int + +// NOTE: For simplicity, these should be kept in sync with the values of the +// google.protobuf.MethodOptions.IdempotencyLevel enumeration. + +const ( + // IdempotencyUnknown is the default idempotency level. A procedure with + // this idempotency level may not be idempotent. This is appropriate for + // any kind of procedure. + IdempotencyUnknown IdempotencyLevel = 0 + + // IdempotencyNoSideEffects is the idempotency level that specifies that a + // given call has no side-effects. This is equivalent to [RFC 9110 § 9.2.1] + // "safe" methods in terms of semantics. This procedure should not mutate + // any state. This idempotency level is appropriate for queries, or anything + // that would be suitable for an HTTP GET request. In addition, due to the + // lack of side-effects, such a procedure would be suitable to retry and + // expect that the results will not be altered by preceding attempts. + // + // [RFC 9110 § 9.2.1]: https://www.rfc-editor.org/rfc/rfc9110.html#section-9.2.1 + IdempotencyNoSideEffects IdempotencyLevel = 1 + + // IdempotencyIdempotent is the idempotency level that specifies that a + // given call is "idempotent", such that multiple instances of the same + // request to this procedure would have the same side-effects as a single + // request. This is equivalent to [RFC 9110 § 9.2.2] "idempotent" methods. + // This level is a subset of the previous level. This idempotency level is + // appropriate for any procedure that is safe to retry multiple times + // and be guaranteed that the response and side-effects will not be altered + // as a result of multiple attempts, for example, entity deletion requests. + // + // [RFC 9110 § 9.2.2]: https://www.rfc-editor.org/rfc/rfc9110.html#section-9.2.2 + IdempotencyIdempotent IdempotencyLevel = 2 +) + +func (i IdempotencyLevel) String() string { + switch i { + case IdempotencyUnknown: + return "idempotency_unknown" + case IdempotencyNoSideEffects: + return "no_side_effects" + case IdempotencyIdempotent: + return "idempotent" + } + return fmt.Sprintf("idempotency_%d", i) +} diff --git a/vendor/connectrpc.com/connect/interceptor.go b/vendor/connectrpc.com/connect/interceptor.go new file mode 100644 index 0000000000..d3bc137488 --- /dev/null +++ b/vendor/connectrpc.com/connect/interceptor.go @@ -0,0 +1,138 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "context" + "errors" +) + +var ( + // errNewClientContextProhibited signals that a new client context was created + // in an interceptor, which is prohibited. + errNewClientContextProhibited = errors.New("creating a new context in an interceptor is prohibited") +) + +// UnaryFunc is the generic signature of a unary RPC. Interceptors may wrap +// Funcs. +// +// The type of the request and response structs depend on the codec being used. +// When using Protobuf, request.Any() and response.Any() will always be +// [proto.Message] implementations. +type UnaryFunc func(context.Context, AnyRequest) (AnyResponse, error) + +// StreamingClientFunc is the generic signature of a streaming RPC from the client's +// perspective. Interceptors may wrap StreamingClientFuncs. +type StreamingClientFunc func(context.Context, Spec) StreamingClientConn + +// StreamingHandlerFunc is the generic signature of a streaming RPC from the +// handler's perspective. Interceptors may wrap StreamingHandlerFuncs. +type StreamingHandlerFunc func(context.Context, StreamingHandlerConn) error + +// An Interceptor adds logic to a generated handler or client, like the +// decorators or middleware you may have seen in other libraries. Interceptors +// may mutate requests and responses, handle errors, retry, recover from panics, +// emit logs and metrics, or do nearly anything else. +// +// The returned functions must be safe to call concurrently. +type Interceptor interface { + WrapUnary(UnaryFunc) UnaryFunc + WrapStreamingClient(StreamingClientFunc) StreamingClientFunc + WrapStreamingHandler(StreamingHandlerFunc) StreamingHandlerFunc +} + +// UnaryInterceptorFunc is a simple Interceptor implementation that only +// wraps unary RPCs. It has no effect on streaming RPCs. +type UnaryInterceptorFunc func(UnaryFunc) UnaryFunc + +// WrapUnary implements [Interceptor] by applying the interceptor function. +func (f UnaryInterceptorFunc) WrapUnary(next UnaryFunc) UnaryFunc { return f(next) } + +// WrapStreamingClient implements [Interceptor] with a no-op. +func (f UnaryInterceptorFunc) WrapStreamingClient(next StreamingClientFunc) StreamingClientFunc { + return next +} + +// WrapStreamingHandler implements [Interceptor] with a no-op. +func (f UnaryInterceptorFunc) WrapStreamingHandler(next StreamingHandlerFunc) StreamingHandlerFunc { + return next +} + +// A chain composes multiple interceptors into one. +type chain struct { + interceptors []Interceptor +} + +// newChain composes multiple interceptors into one. +func newChain(interceptors []Interceptor) *chain { + // We usually wrap in reverse order to have the first interceptor from + // the slice act first. Rather than doing this dance repeatedly, reverse the + // interceptor order now. + var chain chain + for i := len(interceptors) - 1; i >= 0; i-- { + if interceptor := interceptors[i]; interceptor != nil { + chain.interceptors = append(chain.interceptors, interceptor) + } + } + return &chain +} + +func (c *chain) WrapUnary(next UnaryFunc) UnaryFunc { + for _, interceptor := range c.interceptors { + next = unaryThunk(next) + next = interceptor.WrapUnary(next) + } + return next +} + +func (c *chain) WrapStreamingClient(next StreamingClientFunc) StreamingClientFunc { + for _, interceptor := range c.interceptors { + next = streamingClientThunk(next) + next = interceptor.WrapStreamingClient(next) + } + return next +} + +func (c *chain) WrapStreamingHandler(next StreamingHandlerFunc) StreamingHandlerFunc { + for _, interceptor := range c.interceptors { + next = interceptor.WrapStreamingHandler(next) + } + return next +} + +func unaryThunk(next UnaryFunc) UnaryFunc { + return func(ctx context.Context, req AnyRequest) (AnyResponse, error) { + if err := checkSentinel(ctx); err != nil { + return nil, err + } + return next(ctx, req) + } +} + +func streamingClientThunk(next StreamingClientFunc) StreamingClientFunc { + return func(ctx context.Context, spec Spec) StreamingClientConn { + if err := checkSentinel(ctx); err != nil { + return &errStreamingClientConn{err: err} + } + return next(ctx, spec) + } +} + +func checkSentinel(ctx context.Context) error { + if ctx.Value(clientCallInfoContextKey{}) != ctx.Value(sentinelContextKey{}) { + return errNewClientContextProhibited + } + return nil +} diff --git a/vendor/connectrpc.com/connect/internal/gen/connectext/grpc/status/v1/status.pb.go b/vendor/connectrpc.com/connect/internal/gen/connectext/grpc/status/v1/status.pb.go new file mode 100644 index 0000000000..10dea47410 --- /dev/null +++ b/vendor/connectrpc.com/connect/internal/gen/connectext/grpc/status/v1/status.pb.go @@ -0,0 +1,165 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.9 +// protoc (unknown) +// source: connectext/grpc/status/v1/status.proto + +// This package is for internal use by Connect, and provides no backward +// compatibility guarantees whatsoever. + +package statusv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// See https://cloud.google.com/apis/design/errors. +// +// This struct must remain binary-compatible with +// https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto. +type Status struct { + state protoimpl.MessageState `protogen:"open.v1"` + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // a google.rpc.Code + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // developer-facing, English (localize in details or client-side) + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Status) Reset() { + *x = Status{} + mi := &file_connectext_grpc_status_v1_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_connectext_grpc_status_v1_status_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_connectext_grpc_status_v1_status_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_connectext_grpc_status_v1_status_proto protoreflect.FileDescriptor + +const file_connectext_grpc_status_v1_status_proto_rawDesc = "" + + "\n" + + "&connectext/grpc/status/v1/status.proto\x12\x0egrpc.status.v1\x1a\x19google/protobuf/any.proto\"f\n" + + "\x06Status\x12\x12\n" + + "\x04code\x18\x01 \x01(\x05R\x04code\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage\x12.\n" + + "\adetails\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\adetailsB\xc3\x01\n" + + "\x12com.grpc.status.v1B\vStatusProtoP\x01ZFconnectrpc.com/connect/internal/gen/connectext/grpc/status/v1;statusv1\xa2\x02\x03GSX\xaa\x02\x0eGrpc.Status.V1\xca\x02\x0eGrpc\\Status\\V1\xe2\x02\x1aGrpc\\Status\\V1\\GPBMetadata\xea\x02\x10Grpc::Status::V1b\x06proto3" + +var ( + file_connectext_grpc_status_v1_status_proto_rawDescOnce sync.Once + file_connectext_grpc_status_v1_status_proto_rawDescData []byte +) + +func file_connectext_grpc_status_v1_status_proto_rawDescGZIP() []byte { + file_connectext_grpc_status_v1_status_proto_rawDescOnce.Do(func() { + file_connectext_grpc_status_v1_status_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_connectext_grpc_status_v1_status_proto_rawDesc), len(file_connectext_grpc_status_v1_status_proto_rawDesc))) + }) + return file_connectext_grpc_status_v1_status_proto_rawDescData +} + +var file_connectext_grpc_status_v1_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_connectext_grpc_status_v1_status_proto_goTypes = []any{ + (*Status)(nil), // 0: grpc.status.v1.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_connectext_grpc_status_v1_status_proto_depIdxs = []int32{ + 1, // 0: grpc.status.v1.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_connectext_grpc_status_v1_status_proto_init() } +func file_connectext_grpc_status_v1_status_proto_init() { + if File_connectext_grpc_status_v1_status_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_connectext_grpc_status_v1_status_proto_rawDesc), len(file_connectext_grpc_status_v1_status_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_connectext_grpc_status_v1_status_proto_goTypes, + DependencyIndexes: file_connectext_grpc_status_v1_status_proto_depIdxs, + MessageInfos: file_connectext_grpc_status_v1_status_proto_msgTypes, + }.Build() + File_connectext_grpc_status_v1_status_proto = out.File + file_connectext_grpc_status_v1_status_proto_goTypes = nil + file_connectext_grpc_status_v1_status_proto_depIdxs = nil +} diff --git a/vendor/connectrpc.com/connect/option.go b/vendor/connectrpc.com/connect/option.go new file mode 100644 index 0000000000..fe0a2cd9d2 --- /dev/null +++ b/vendor/connectrpc.com/connect/option.go @@ -0,0 +1,647 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "compress/gzip" + "context" + "io" + "net/http" +) + +// A ClientOption configures a [Client]. +// +// In addition to any options grouped in the documentation below, remember that +// any [Option] is also a valid ClientOption. +type ClientOption interface { + applyToClient(*clientConfig) +} + +// WithAcceptCompression makes a compression algorithm available to a client. +// Clients ask servers to compress responses using any of the registered +// algorithms. The first registered algorithm is treated as the least +// preferred, and the last registered algorithm is the most preferred. +// +// It's safe to use this option liberally: servers will ignore any +// compression algorithms they don't support. To compress requests, pair this +// option with [WithSendCompression]. To remove support for a +// previously-registered compression algorithm, use WithAcceptCompression with +// nil decompressor and compressor constructors. +// +// Clients accept gzipped responses by default, using a compressor backed by the +// standard library's [gzip] package with the default compression level. Use +// [WithSendGzip] to compress requests with gzip. +// +// Calling WithAcceptCompression with an empty name is a no-op. +func WithAcceptCompression( + name string, + newDecompressor func() Decompressor, + newCompressor func() Compressor, +) ClientOption { + return &compressionOption{ + Name: name, + CompressionPool: newCompressionPool(newDecompressor, newCompressor), + } +} + +// WithClientOptions composes multiple ClientOptions into one. +func WithClientOptions(options ...ClientOption) ClientOption { + return &clientOptionsOption{options} +} + +// WithGRPC configures clients to use the HTTP/2 gRPC protocol. +func WithGRPC() ClientOption { + return &grpcOption{web: false} +} + +// WithGRPCWeb configures clients to use the gRPC-Web protocol. +func WithGRPCWeb() ClientOption { + return &grpcOption{web: true} +} + +// WithProtoJSON configures a client to send JSON-encoded data instead of +// binary Protobuf. It uses the standard Protobuf JSON mapping as implemented +// by [google.golang.org/protobuf/encoding/protojson]: fields are named using +// lowerCamelCase, zero values are omitted, missing required fields are errors, +// enums are emitted as strings, etc. +func WithProtoJSON() ClientOption { + return WithCodec(&protoJSONCodec{codecNameJSON}) +} + +// WithSendCompression configures the client to use the specified algorithm to +// compress request messages. If the algorithm has not been registered using +// [WithAcceptCompression], the client will return errors at runtime. +// +// Because some servers don't support compression, clients default to sending +// uncompressed requests. +func WithSendCompression(name string) ClientOption { + return &sendCompressionOption{Name: name} +} + +// WithSendGzip configures the client to gzip requests. Since clients have +// access to a gzip compressor by default, WithSendGzip doesn't require +// [WithSendCompression]. +// +// Some servers don't support gzip, so clients default to sending uncompressed +// requests. +func WithSendGzip() ClientOption { + return WithSendCompression(compressionGzip) +} + +// A HandlerOption configures a [Handler]. +// +// In addition to any options grouped in the documentation below, remember that +// any [Option] is also a HandlerOption. +type HandlerOption interface { + applyToHandler(*handlerConfig) +} + +// WithCompression configures handlers to support a compression algorithm. +// Clients may send messages compressed with that algorithm and/or request +// compressed responses. The [Compressor] and [Decompressor] produced by the +// supplied constructors must use the same algorithm. Internally, Connect pools +// compressors and decompressors. +// +// By default, handlers support gzip using the standard library's +// [compress/gzip] package at the default compression level. To remove support for +// a previously-registered compression algorithm, use WithCompression with nil +// decompressor and compressor constructors. +// +// Calling WithCompression with an empty name is a no-op. +func WithCompression( + name string, + newDecompressor func() Decompressor, + newCompressor func() Compressor, +) HandlerOption { + return &compressionOption{ + Name: name, + CompressionPool: newCompressionPool(newDecompressor, newCompressor), + } +} + +// WithHandlerOptions composes multiple HandlerOptions into one. +func WithHandlerOptions(options ...HandlerOption) HandlerOption { + return &handlerOptionsOption{options} +} + +// WithRecover adds an interceptor that recovers from panics. The supplied +// function receives the context, [Spec], request headers, and the recovered +// value (which may be nil). It must return an error to send back to the +// client. It may also log the panic, emit metrics, or execute other +// error-handling logic. Handler functions must be safe to call concurrently. +// +// To preserve compatibility with [net/http]'s semantics, this interceptor +// doesn't handle panics with [http.ErrAbortHandler]. +// +// By default, handlers don't recover from panics. Because the standard +// library's [http.Server] recovers from panics by default, this option isn't +// usually necessary to prevent crashes. Instead, it helps servers collect +// RPC-specific data during panics and send a more detailed error to +// clients. +func WithRecover(handle func(context.Context, Spec, http.Header, any) error) HandlerOption { + return WithInterceptors(&recoverHandlerInterceptor{handle: handle}) +} + +// WithRequireConnectProtocolHeader configures the Handler to require requests +// using the Connect RPC protocol to include the Connect-Protocol-Version +// header. This ensures that HTTP proxies and net/http middleware can easily +// identify valid Connect requests, even if they use a common Content-Type like +// application/json. However, it makes ad-hoc requests with tools like cURL +// more laborious. Streaming requests are not affected by this option. +// +// This option has no effect if the client uses the gRPC or gRPC-Web protocols. +func WithRequireConnectProtocolHeader() HandlerOption { + return &requireConnectProtocolHeaderOption{} +} + +// WithConditionalHandlerOptions allows procedures in the same service to have +// different configurations: for example, one procedure may need a much larger +// WithReadMaxBytes setting than the others. +// +// WithConditionalHandlerOptions takes a function which may inspect each +// procedure's Spec before deciding which options to apply. Returning a nil +// slice is safe. +func WithConditionalHandlerOptions(conditional func(spec Spec) []HandlerOption) HandlerOption { + return &conditionalHandlerOptions{conditional: conditional} +} + +// Option implements both [ClientOption] and [HandlerOption], so it can be +// applied both client-side and server-side. +type Option interface { + ClientOption + HandlerOption +} + +// WithSchema provides a parsed representation of the schema for an RPC to a +// client or handler. The supplied schema is exposed as [Spec].Schema. This +// option is typically added by generated code. +// +// For services using protobuf schemas, the supplied schema should be a +// [google.golang.org/protobuf/reflect/protoreflect.MethodDescriptor]. +func WithSchema(schema any) Option { + return &schemaOption{Schema: schema} +} + +// WithRequestInitializer provides a function that initializes a new message. +// It may be used to dynamically construct request messages. It is called on +// server receives to construct the message to be unmarshaled into. The message +// will be a non nil pointer to the type created by the handler. Use the Schema +// field of the [Spec] to determine the type of the message. +func WithRequestInitializer(initializer func(spec Spec, message any) error) HandlerOption { + return &initializerOption{Initializer: initializer} +} + +// WithResponseInitializer provides a function that initializes a new message. +// It may be used to dynamically construct response messages. It is called on +// client receives to construct the message to be unmarshaled into. The message +// will be a non nil pointer to the type created by the client. Use the Schema +// field of the [Spec] to determine the type of the message. +func WithResponseInitializer(initializer func(spec Spec, message any) error) ClientOption { + return &initializerOption{Initializer: initializer} +} + +// WithCodec registers a serialization method with a client or handler. +// Handlers may have multiple codecs registered, and use whichever the client +// chooses. Clients may only have a single codec. +// +// By default, handlers and clients support binary Protocol Buffer data using +// [google.golang.org/protobuf/proto]. Handlers also support JSON by default, +// using the standard Protobuf JSON mapping. Users with more specialized needs +// may override the default codecs by registering a new codec under the "proto" +// or "json" names. When supplying a custom "proto" codec, keep in mind that +// some unexported, protocol-specific messages are serialized using Protobuf - +// take care to fall back to the standard Protobuf implementation if +// necessary. +// +// Registering a codec with an empty name is a no-op. +func WithCodec(codec Codec) Option { + return &codecOption{Codec: codec} +} + +// WithCompressMinBytes sets a minimum size threshold for compression: +// regardless of compressor configuration, messages smaller than the configured +// minimum are sent uncompressed. +// +// The default minimum is zero. Setting a minimum compression threshold may +// improve overall performance, because the CPU cost of compressing very small +// messages usually isn't worth the small reduction in network I/O. +func WithCompressMinBytes(minBytes int) Option { + return &compressMinBytesOption{Min: minBytes} +} + +// WithReadMaxBytes limits the performance impact of pathologically large +// messages sent by the other party. For handlers, WithReadMaxBytes limits the size +// of a message that the client can send. For clients, WithReadMaxBytes limits the +// size of a message that the server can respond with. Limits apply to each Protobuf +// message, not to the stream as a whole. +// +// Setting WithReadMaxBytes to zero allows any message size. Both clients and +// handlers default to allowing any request size. +// +// Handlers may also use [http.MaxBytesHandler] to limit the total size of the +// HTTP request stream (rather than the per-message size). Connect handles +// [http.MaxBytesError] specially, so clients still receive errors with the +// appropriate error code and informative messages. +func WithReadMaxBytes(maxBytes int) Option { + return &readMaxBytesOption{Max: maxBytes} +} + +// WithSendMaxBytes prevents sending messages too large for the client/handler +// to handle without significant performance overhead. For handlers, WithSendMaxBytes +// limits the size of a message that the handler can respond with. For clients, +// WithSendMaxBytes limits the size of a message that the client can send. Limits +// apply to each message, not to the stream as a whole. +// +// Setting WithSendMaxBytes to zero allows any message size. Both clients and +// handlers default to allowing any message size. +func WithSendMaxBytes(maxBytes int) Option { + return &sendMaxBytesOption{Max: maxBytes} +} + +// WithIdempotency declares the idempotency of the procedure. This can determine +// whether a procedure call can safely be retried, and may affect which request +// modalities are allowed for a given procedure call. +// +// In most cases, you should not need to manually set this. It is normally set +// by the code generator for your schema. For protobuf schemas, it can be set like this: +// +// rpc Ping(PingRequest) returns (PingResponse) { +// option idempotency_level = NO_SIDE_EFFECTS; +// } +func WithIdempotency(idempotencyLevel IdempotencyLevel) Option { + return &idempotencyOption{idempotencyLevel: idempotencyLevel} +} + +// WithHTTPGet allows Connect-protocol clients to use HTTP GET requests for +// side-effect free unary RPC calls. Typically, the service schema indicates +// which procedures are idempotent (see [WithIdempotency] for an example +// protobuf schema). The gRPC and gRPC-Web protocols are POST-only, so this +// option has no effect when combined with [WithGRPC] or [WithGRPCWeb]. +// +// Using HTTP GET requests makes it easier to take advantage of CDNs, caching +// reverse proxies, and browsers' built-in caching. Note, however, that servers +// don't automatically set any cache headers; you can set cache headers using +// interceptors or by adding headers in individual procedure implementations. +// +// By default, all requests are made as HTTP POSTs. +func WithHTTPGet() ClientOption { + return &enableGet{} +} + +// WithInterceptors configures a client or handler's interceptor stack. Repeated +// WithInterceptors options are applied in order, so +// +// WithInterceptors(A) + WithInterceptors(B, C) == WithInterceptors(A, B, C) +// +// Unary interceptors compose like an onion. The first interceptor provided is +// the outermost layer of the onion: it acts first on the context and request, +// and last on the response and error. +// +// Stream interceptors also behave like an onion: the first interceptor +// provided is the outermost wrapper for the [StreamingClientConn] or +// [StreamingHandlerConn]. It's the first to see sent messages and the last to +// see received messages. +// +// Applied to client and handler, WithInterceptors(A, B, ..., Y, Z) produces: +// +// client.Send() client.Receive() +// | ^ +// v | +// A --- --- A +// B --- --- B +// : ... ... : +// Y --- --- Y +// Z --- --- Z +// | ^ +// v | +// = = = = = = = = = = = = = = = = +// network +// = = = = = = = = = = = = = = = = +// | ^ +// v | +// A --- --- A +// B --- --- B +// : ... ... : +// Y --- --- Y +// Z --- --- Z +// | ^ +// v | +// handler.Receive() handler.Send() +// | ^ +// | | +// '-> handler logic >-' +// +// Note that in clients, Send handles the request message(s) and Receive +// handles the response message(s). For handlers, it's the reverse. Depending +// on your interceptor's logic, you may need to wrap one method in clients and +// the other in handlers. +func WithInterceptors(interceptors ...Interceptor) Option { + return &interceptorsOption{interceptors} +} + +// WithOptions composes multiple Options into one. +func WithOptions(options ...Option) Option { + return &optionsOption{options} +} + +type schemaOption struct { + Schema any +} + +func (o *schemaOption) applyToClient(config *clientConfig) { + config.Schema = o.Schema +} + +func (o *schemaOption) applyToHandler(config *handlerConfig) { + config.Schema = o.Schema +} + +type initializerOption struct { + Initializer func(spec Spec, message any) error +} + +func (o *initializerOption) applyToHandler(config *handlerConfig) { + config.Initializer = maybeInitializer{initializer: o.Initializer} +} + +func (o *initializerOption) applyToClient(config *clientConfig) { + config.Initializer = maybeInitializer{initializer: o.Initializer} +} + +type maybeInitializer struct { + initializer func(spec Spec, message any) error +} + +func (o maybeInitializer) maybe(spec Spec, message any) error { + if o.initializer != nil { + return o.initializer(spec, message) + } + return nil +} + +type clientOptionsOption struct { + options []ClientOption +} + +func (o *clientOptionsOption) applyToClient(config *clientConfig) { + for _, option := range o.options { + option.applyToClient(config) + } +} + +type codecOption struct { + Codec Codec +} + +func (o *codecOption) applyToClient(config *clientConfig) { + if o.Codec == nil || o.Codec.Name() == "" { + return + } + config.Codec = o.Codec +} + +func (o *codecOption) applyToHandler(config *handlerConfig) { + if o.Codec == nil || o.Codec.Name() == "" { + return + } + config.Codecs[o.Codec.Name()] = o.Codec +} + +type compressionOption struct { + Name string + CompressionPool *compressionPool +} + +func (o *compressionOption) applyToClient(config *clientConfig) { + o.apply(&config.CompressionNames, config.CompressionPools) +} + +func (o *compressionOption) applyToHandler(config *handlerConfig) { + o.apply(&config.CompressionNames, config.CompressionPools) +} + +func (o *compressionOption) apply(configuredNames *[]string, configuredPools map[string]*compressionPool) { + if o.Name == "" { + return + } + if o.CompressionPool == nil { + delete(configuredPools, o.Name) + var names []string + for _, name := range *configuredNames { + if name == o.Name { + continue + } + names = append(names, name) + } + *configuredNames = names + return + } + configuredPools[o.Name] = o.CompressionPool + *configuredNames = append(*configuredNames, o.Name) +} + +type compressMinBytesOption struct { + Min int +} + +func (o *compressMinBytesOption) applyToClient(config *clientConfig) { + config.CompressMinBytes = o.Min +} + +func (o *compressMinBytesOption) applyToHandler(config *handlerConfig) { + config.CompressMinBytes = o.Min +} + +type readMaxBytesOption struct { + Max int +} + +func (o *readMaxBytesOption) applyToClient(config *clientConfig) { + config.ReadMaxBytes = o.Max +} + +func (o *readMaxBytesOption) applyToHandler(config *handlerConfig) { + config.ReadMaxBytes = o.Max +} + +type sendMaxBytesOption struct { + Max int +} + +func (o *sendMaxBytesOption) applyToClient(config *clientConfig) { + config.SendMaxBytes = o.Max +} + +func (o *sendMaxBytesOption) applyToHandler(config *handlerConfig) { + config.SendMaxBytes = o.Max +} + +type handlerOptionsOption struct { + options []HandlerOption +} + +func (o *handlerOptionsOption) applyToHandler(config *handlerConfig) { + for _, option := range o.options { + option.applyToHandler(config) + } +} + +type requireConnectProtocolHeaderOption struct{} + +func (o *requireConnectProtocolHeaderOption) applyToHandler(config *handlerConfig) { + config.RequireConnectProtocolHeader = true +} + +type idempotencyOption struct { + idempotencyLevel IdempotencyLevel +} + +func (o *idempotencyOption) applyToClient(config *clientConfig) { + config.IdempotencyLevel = o.idempotencyLevel +} + +func (o *idempotencyOption) applyToHandler(config *handlerConfig) { + config.IdempotencyLevel = o.idempotencyLevel +} + +type grpcOption struct { + web bool +} + +func (o *grpcOption) applyToClient(config *clientConfig) { + config.Protocol = &protocolGRPC{web: o.web} +} + +type enableGet struct{} + +func (o *enableGet) applyToClient(config *clientConfig) { + config.EnableGet = true +} + +// WithHTTPGetMaxURLSize sets the maximum allowable URL length for GET requests +// made using the Connect protocol. It has no effect on gRPC or gRPC-Web +// clients, since those protocols are POST-only. +// +// Limiting the URL size is useful as most user agents, proxies, and servers +// have limits on the allowable length of a URL. For example, Apache and Nginx +// limit the size of a request line to around 8 KiB, meaning that maximum +// length of a URL is a bit smaller than this. If you run into URL size +// limitations imposed by your network infrastructure and don't know the +// maximum allowable size, or if you'd prefer to be cautious from the start, a +// 4096 byte (4 KiB) limit works with most common proxies and CDNs. +// +// If fallback is set to true and the URL would be longer than the configured +// maximum value, the request will be sent as an HTTP POST instead. If fallback +// is set to false, the request will fail with [CodeResourceExhausted]. +// +// By default, Connect-protocol clients with GET requests enabled may send a +// URL of any size. +func WithHTTPGetMaxURLSize(bytes int, fallback bool) ClientOption { + return &getURLMaxBytes{Max: bytes, Fallback: fallback} +} + +type getURLMaxBytes struct { + Max int + Fallback bool +} + +func (o *getURLMaxBytes) applyToClient(config *clientConfig) { + config.GetURLMaxBytes = o.Max + config.GetUseFallback = o.Fallback +} + +type interceptorsOption struct { + Interceptors []Interceptor +} + +func (o *interceptorsOption) applyToClient(config *clientConfig) { + config.Interceptor = o.chainWith(config.Interceptor) +} + +func (o *interceptorsOption) applyToHandler(config *handlerConfig) { + config.Interceptor = o.chainWith(config.Interceptor) +} + +func (o *interceptorsOption) chainWith(current Interceptor) Interceptor { + if len(o.Interceptors) == 0 { + return current + } + if current == nil && len(o.Interceptors) == 1 { + return o.Interceptors[0] + } + if current == nil && len(o.Interceptors) > 1 { + return newChain(o.Interceptors) + } + return newChain(append([]Interceptor{current}, o.Interceptors...)) +} + +type optionsOption struct { + options []Option +} + +func (o *optionsOption) applyToClient(config *clientConfig) { + for _, option := range o.options { + option.applyToClient(config) + } +} + +func (o *optionsOption) applyToHandler(config *handlerConfig) { + for _, option := range o.options { + option.applyToHandler(config) + } +} + +type sendCompressionOption struct { + Name string +} + +func (o *sendCompressionOption) applyToClient(config *clientConfig) { + config.RequestCompressionName = o.Name +} + +func withGzip() Option { + return &compressionOption{ + Name: compressionGzip, + CompressionPool: newCompressionPool( + func() Decompressor { return &gzip.Reader{} }, + func() Compressor { return gzip.NewWriter(io.Discard) }, + ), + } +} + +func withProtoBinaryCodec() Option { + return WithCodec(&protoBinaryCodec{}) +} + +func withProtoJSONCodecs() HandlerOption { + return WithHandlerOptions( + WithCodec(&protoJSONCodec{codecNameJSON}), + WithCodec(&protoJSONCodec{codecNameJSONCharsetUTF8}), + ) +} + +type conditionalHandlerOptions struct { + conditional func(spec Spec) []HandlerOption +} + +func (o *conditionalHandlerOptions) applyToHandler(config *handlerConfig) { + spec := config.newSpec() + if spec.Procedure == "" { + return // ignore empty specs + } + for _, option := range o.conditional(spec) { + option.applyToHandler(config) + } +} diff --git a/vendor/connectrpc.com/connect/protobuf_util.go b/vendor/connectrpc.com/connect/protobuf_util.go new file mode 100644 index 0000000000..b9d0056d71 --- /dev/null +++ b/vendor/connectrpc.com/connect/protobuf_util.go @@ -0,0 +1,42 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "strings" +) + +// extractProtoPath returns the trailing portion of the URL's path, +// corresponding to the Protobuf package, service, and method. It always starts +// with a slash. Within connect, we use this as (1) Spec.Procedure and (2) the +// path when mounting handlers on muxes. +func extractProtoPath(path string) string { + segments := strings.Split(path, "/") + var pkg, method string + if len(segments) > 0 { + pkg = segments[0] + } + if len(segments) > 1 { + pkg = segments[len(segments)-2] + method = segments[len(segments)-1] + } + if pkg == "" { + return "/" + } + if method == "" { + return "/" + pkg + } + return "/" + pkg + "/" + method +} diff --git a/vendor/connectrpc.com/connect/protocol.go b/vendor/connectrpc.com/connect/protocol.go new file mode 100644 index 0000000000..dc9dc0c8b3 --- /dev/null +++ b/vendor/connectrpc.com/connect/protocol.go @@ -0,0 +1,424 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "context" + "errors" + "fmt" + "io" + "mime" + "net/http" + "net/url" + "sort" + "strings" +) + +// The names of the Connect, gRPC, and gRPC-Web protocols (as exposed by +// [Peer].Protocol). Additional protocols may be added in the future. +const ( + ProtocolConnect = "connect" + ProtocolGRPC = "grpc" + ProtocolGRPCWeb = "grpcweb" +) + +const ( + headerContentType = "Content-Type" + headerContentEncoding = "Content-Encoding" + headerContentLength = "Content-Length" + headerHost = "Host" + headerUserAgent = "User-Agent" + headerTrailer = "Trailer" + headerDate = "Date" + + discardLimit = 1024 * 1024 * 4 // 4MiB +) + +var errNoTimeout = errors.New("no timeout") + +// A Protocol defines the HTTP semantics to use when sending and receiving +// messages. It ties together codecs, compressors, and net/http to produce +// Senders and Receivers. +// +// For example, connect supports the gRPC protocol using this abstraction. Among +// many other things, the protocol implementation is responsible for +// translating timeouts from Go contexts to HTTP and vice versa. For gRPC, it +// converts timeouts to and from strings (for example, 10*time.Second <-> +// "10S"), and puts those strings into the "Grpc-Timeout" HTTP header. Other +// protocols might encode durations differently, put them into a different HTTP +// header, or ignore them entirely. +// +// We don't have any short-term plans to export this interface; it's just here +// to separate the protocol-specific portions of connect from the +// protocol-agnostic plumbing. +type protocol interface { + NewHandler(*protocolHandlerParams) protocolHandler + NewClient(*protocolClientParams) (protocolClient, error) +} + +// HandlerParams are the arguments provided to a Protocol's NewHandler +// method, bundled into a struct to allow backward-compatible argument +// additions. Protocol implementations should take care to use the supplied +// Spec rather than constructing their own, since new fields may have been +// added. +type protocolHandlerParams struct { + Spec Spec + Codecs readOnlyCodecs + CompressionPools readOnlyCompressionPools + CompressMinBytes int + BufferPool *bufferPool + ReadMaxBytes int + SendMaxBytes int + RequireConnectProtocolHeader bool + IdempotencyLevel IdempotencyLevel +} + +// Handler is the server side of a protocol. HTTP handlers typically support +// multiple protocols, codecs, and compressors. +type protocolHandler interface { + // Methods is the set of HTTP methods the protocol can handle. + Methods() map[string]struct{} + + // ContentTypes is the set of HTTP Content-Types that the protocol can + // handle. + ContentTypes() map[string]struct{} + + // SetTimeout runs before NewStream. Implementations may inspect the HTTP + // request, parse any timeout set by the client, and return a modified + // context and cancellation function. + // + // If the client didn't send a timeout, SetTimeout should return the + // request's context, a nil cancellation function, and a nil error. + SetTimeout(*http.Request) (context.Context, context.CancelFunc, error) + + // CanHandlePayload returns true if the protocol can handle an HTTP request. + // This is called after the request method is validated, so we only need to + // be concerned with the content type/payload specifically. + CanHandlePayload(*http.Request, string) bool + + // NewConn constructs a HandlerConn for the message exchange. + NewConn(http.ResponseWriter, *http.Request) (handlerConnCloser, bool) +} + +// ClientParams are the arguments provided to a Protocol's NewClient method, +// bundled into a struct to allow backward-compatible argument additions. +// Protocol implementations should take care to use the supplied Spec rather +// than constructing their own, since new fields may have been added. +type protocolClientParams struct { + CompressionName string + CompressionPools readOnlyCompressionPools + Codec Codec + CompressMinBytes int + HTTPClient HTTPClient + URL *url.URL + BufferPool *bufferPool + ReadMaxBytes int + SendMaxBytes int + EnableGet bool + GetURLMaxBytes int + GetUseFallback bool + // The gRPC family of protocols always needs access to a Protobuf codec to + // marshal and unmarshal errors. + Protobuf Codec +} + +// Client is the client side of a protocol. HTTP clients typically use a single +// protocol, codec, and compressor to send requests. +type protocolClient interface { + // Peer describes the server for the RPC. + Peer() Peer + + // WriteRequestHeader writes any protocol-specific request headers. + WriteRequestHeader(StreamType, http.Header) + + // NewConn constructs a StreamingClientConn for the message exchange. + // + // Implementations should assume that the supplied HTTP headers have already + // been populated by WriteRequestHeader. When constructing a stream for a + // unary call, implementations may assume that the Sender's Send and Close + // methods return before the Receiver's Receive or Close methods are called. + NewConn(context.Context, Spec, http.Header) streamingClientConn +} + +// streamingClientConn extends StreamingClientConn with a method for registering +// a hook when the HTTP request is actually sent. +type streamingClientConn interface { + StreamingClientConn + + onRequestSend(fn func(*http.Request)) +} + +// errorTranslatingHandlerConnCloser wraps a handlerConnCloser to ensure that +// we always return coded errors to users and write coded errors to the +// network. +// +// It's used in protocol implementations. +type errorTranslatingHandlerConnCloser struct { + handlerConnCloser + + toWire func(error) error + fromWire func(error) error +} + +func (hc *errorTranslatingHandlerConnCloser) Send(msg any) error { + return hc.fromWire(hc.handlerConnCloser.Send(msg)) +} + +func (hc *errorTranslatingHandlerConnCloser) Receive(msg any) error { + return hc.fromWire(hc.handlerConnCloser.Receive(msg)) +} + +func (hc *errorTranslatingHandlerConnCloser) Close(err error) error { + closeErr := hc.handlerConnCloser.Close(hc.toWire(err)) + return hc.fromWire(closeErr) +} + +func (hc *errorTranslatingHandlerConnCloser) getHTTPMethod() string { + if methoder, ok := hc.handlerConnCloser.(interface{ getHTTPMethod() string }); ok { + return methoder.getHTTPMethod() + } + return http.MethodPost +} + +// errorTranslatingClientConn wraps a StreamingClientConn to make sure that we always +// return coded errors from clients. +// +// It's used in protocol implementations. +type errorTranslatingClientConn struct { + streamingClientConn + + fromWire func(error) error +} + +func (cc *errorTranslatingClientConn) Send(msg any) error { + return cc.fromWire(cc.streamingClientConn.Send(msg)) +} + +func (cc *errorTranslatingClientConn) Receive(msg any) error { + return cc.fromWire(cc.streamingClientConn.Receive(msg)) +} + +func (cc *errorTranslatingClientConn) CloseRequest() error { + return cc.fromWire(cc.streamingClientConn.CloseRequest()) +} + +func (cc *errorTranslatingClientConn) CloseResponse() error { + return cc.fromWire(cc.streamingClientConn.CloseResponse()) +} + +func (cc *errorTranslatingClientConn) onRequestSend(fn func(*http.Request)) { + cc.streamingClientConn.onRequestSend(fn) +} + +// wrapHandlerConnWithCodedErrors ensures that we (1) automatically code +// context-related errors correctly when writing them to the network, and (2) +// return *Errors from all exported APIs. +func wrapHandlerConnWithCodedErrors(conn handlerConnCloser) handlerConnCloser { + return &errorTranslatingHandlerConnCloser{ + handlerConnCloser: conn, + toWire: wrapIfContextError, + fromWire: wrapIfUncoded, + } +} + +// wrapClientConnWithCodedErrors ensures that we always return *Errors from +// public APIs. +func wrapClientConnWithCodedErrors(conn streamingClientConn) streamingClientConn { + return &errorTranslatingClientConn{ + streamingClientConn: conn, + fromWire: wrapIfUncoded, + } +} + +func mappedMethodHandlers(handlers []protocolHandler) map[string][]protocolHandler { + methodHandlers := make(map[string][]protocolHandler) + for _, handler := range handlers { + for method := range handler.Methods() { + methodHandlers[method] = append(methodHandlers[method], handler) + } + } + return methodHandlers +} + +func sortedAcceptPostValue(handlers []protocolHandler) string { + contentTypes := make(map[string]struct{}) + for _, handler := range handlers { + for contentType := range handler.ContentTypes() { + contentTypes[contentType] = struct{}{} + } + } + accept := make([]string, 0, len(contentTypes)) + for ct := range contentTypes { + accept = append(accept, ct) + } + sort.Strings(accept) + return strings.Join(accept, ", ") +} + +func sortedAllowMethodValue(handlers []protocolHandler) string { + methods := make(map[string]struct{}) + for _, handler := range handlers { + for method := range handler.Methods() { + methods[method] = struct{}{} + } + } + allow := make([]string, 0, len(methods)) + for ct := range methods { + allow = append(allow, ct) + } + sort.Strings(allow) + return strings.Join(allow, ", ") +} + +func isCommaOrSpace(c rune) bool { + return c == ',' || c == ' ' +} + +func discard(reader io.Reader) (int64, error) { + if lr, ok := reader.(*io.LimitedReader); ok { + return io.Copy(io.Discard, lr) + } + // We don't want to get stuck throwing data away forever, so limit how much + // we're willing to do here. + lr := &io.LimitedReader{R: reader, N: discardLimit} + return io.Copy(io.Discard, lr) +} + +// negotiateCompression determines and validates the request compression and +// response compression using the available compressors and protocol-specific +// Content-Encoding and Accept-Encoding headers. +func negotiateCompression( //nolint:nonamedreturns + availableCompressors readOnlyCompressionPools, + sent, accept string, +) (requestCompression, responseCompression string, clientVisibleErr *Error) { + requestCompression = compressionIdentity + if sent != "" && sent != compressionIdentity { + // We default to identity, so we only care if the client sends something + // other than the empty string or compressIdentity. + if availableCompressors.Contains(sent) { + requestCompression = sent + } else { + // To comply with + // https://github.com/grpc/grpc/blob/master/doc/compression.md and the + // Connect protocol, we should return CodeUnimplemented and specify + // acceptable compression(s) (in addition to setting the a + // protocol-specific accept-encoding header). + return "", "", errorf( + CodeUnimplemented, + "unknown compression %q: supported encodings are %v", + sent, availableCompressors.CommaSeparatedNames(), + ) + } + } + // Support asymmetric compression. This logic follows + // https://github.com/grpc/grpc/blob/master/doc/compression.md and common + // sense. + responseCompression = requestCompression + // If we're not already planning to compress the response, check whether the + // client requested a compression algorithm we support. + if responseCompression == compressionIdentity && accept != "" { + for _, name := range strings.FieldsFunc(accept, isCommaOrSpace) { + if availableCompressors.Contains(name) { + // We found a mutually supported compression algorithm. Unlike standard + // HTTP, there's no preference weighting, so can bail out immediately. + responseCompression = name + break + } + } + } + return requestCompression, responseCompression, nil +} + +// checkServerStreamsCanFlush ensures that bidi and server streaming handlers +// have received an http.ResponseWriter that implements http.Flusher, since +// they must flush data after sending each message. +func checkServerStreamsCanFlush(spec Spec, responseWriter http.ResponseWriter) *Error { + requiresFlusher := (spec.StreamType & StreamTypeServer) == StreamTypeServer + if _, flushable := responseWriter.(http.Flusher); requiresFlusher && !flushable { + return NewError(CodeInternal, fmt.Errorf("%T does not implement http.Flusher", responseWriter)) + } + return nil +} + +func flushResponseWriter(w http.ResponseWriter) { + if f, ok := w.(http.Flusher); ok { + f.Flush() + } +} + +func canonicalizeContentType(contentType string) string { + // Typically, clients send Content-Type in canonical form, without + // parameters. In those cases, we'd like to avoid parsing and + // canonicalization overhead. + // + // See https://www.rfc-editor.org/rfc/rfc2045.html#section-5.1 for a full + // grammar. + var slashes int + for _, r := range contentType { + switch { + case r >= 'a' && r <= 'z': + case r == '.' || r == '+' || r == '-': + case r == '/': + slashes++ + default: + return canonicalizeContentTypeSlow(contentType) + } + } + if slashes == 1 { + return contentType + } + return canonicalizeContentTypeSlow(contentType) +} + +func canonicalizeContentTypeSlow(contentType string) string { + base, params, err := mime.ParseMediaType(contentType) + if err != nil { + return contentType + } + // According to RFC 9110 Section 8.3.2, the charset parameter value should be treated as case-insensitive. + // mime.FormatMediaType canonicalizes parameter names, but not parameter values, + // because the case sensitivity of a parameter value depends on its semantics. + // Therefore, the charset parameter value should be canonicalized here. + // ref.) https://httpwg.org/specs/rfc9110.html#rfc.section.8.3.2 + if charset, ok := params["charset"]; ok { + params["charset"] = strings.ToLower(charset) + } + return mime.FormatMediaType(base, params) +} + +func httpToCode(httpCode int) Code { + // https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md + // Note that this is NOT the inverse of the gRPC-to-HTTP or Connect-to-HTTP + // mappings. + + // Literals are easier to compare to the specification (vs named + // constants). + switch httpCode { + case 400: + return CodeInternal + case 401: + return CodeUnauthenticated + case 403: + return CodePermissionDenied + case 404: + return CodeUnimplemented + case 429: + return CodeUnavailable + case 502, 503, 504: + return CodeUnavailable + default: + return CodeUnknown + } +} diff --git a/vendor/connectrpc.com/connect/protocol_connect.go b/vendor/connectrpc.com/connect/protocol_connect.go new file mode 100644 index 0000000000..371f382e34 --- /dev/null +++ b/vendor/connectrpc.com/connect/protocol_connect.go @@ -0,0 +1,1449 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net/http" + "net/url" + "runtime" + "strconv" + "strings" + "time" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const ( + connectUnaryHeaderCompression = "Content-Encoding" + connectUnaryHeaderAcceptCompression = "Accept-Encoding" + connectUnaryTrailerPrefix = "Trailer-" + connectStreamingHeaderCompression = "Connect-Content-Encoding" + connectStreamingHeaderAcceptCompression = "Connect-Accept-Encoding" + connectHeaderTimeout = "Connect-Timeout-Ms" + connectHeaderProtocolVersion = "Connect-Protocol-Version" + connectProtocolVersion = "1" + headerVary = "Vary" + + connectFlagEnvelopeEndStream = 0b00000010 + + connectUnaryContentTypePrefix = "application/" + connectUnaryContentTypeJSON = connectUnaryContentTypePrefix + codecNameJSON + connectStreamingContentTypePrefix = "application/connect+" + + connectUnaryEncodingQueryParameter = "encoding" + connectUnaryMessageQueryParameter = "message" + connectUnaryBase64QueryParameter = "base64" + connectUnaryCompressionQueryParameter = "compression" + connectUnaryConnectQueryParameter = "connect" + connectUnaryConnectQueryValue = "v" + connectProtocolVersion +) + +// defaultConnectUserAgent returns a User-Agent string similar to those used in gRPC. +// +//nolint:gochecknoglobals +var defaultConnectUserAgent = fmt.Sprintf("connect-go/%s (%s)", Version, runtime.Version()) + +type protocolConnect struct{} + +// NewHandler implements protocol, so it must return an interface. +func (*protocolConnect) NewHandler(params *protocolHandlerParams) protocolHandler { + methods := make(map[string]struct{}) + methods[http.MethodPost] = struct{}{} + + if params.Spec.StreamType == StreamTypeUnary && params.IdempotencyLevel == IdempotencyNoSideEffects { + methods[http.MethodGet] = struct{}{} + } + + contentTypes := make(map[string]struct{}) + for _, name := range params.Codecs.Names() { + if params.Spec.StreamType == StreamTypeUnary { + contentTypes[canonicalizeContentType(connectUnaryContentTypePrefix+name)] = struct{}{} + continue + } + contentTypes[canonicalizeContentType(connectStreamingContentTypePrefix+name)] = struct{}{} + } + + return &connectHandler{ + protocolHandlerParams: *params, + methods: methods, + accept: contentTypes, + } +} + +// NewClient implements protocol, so it must return an interface. +func (*protocolConnect) NewClient(params *protocolClientParams) (protocolClient, error) { + return &connectClient{ + protocolClientParams: *params, + peer: newPeerForURL(params.URL, ProtocolConnect), + }, nil +} + +type connectHandler struct { + protocolHandlerParams + + methods map[string]struct{} + accept map[string]struct{} +} + +func (h *connectHandler) Methods() map[string]struct{} { + return h.methods +} + +func (h *connectHandler) ContentTypes() map[string]struct{} { + return h.accept +} + +func (*connectHandler) SetTimeout(request *http.Request) (context.Context, context.CancelFunc, error) { + timeout := getHeaderCanonical(request.Header, connectHeaderTimeout) + if timeout == "" { + return request.Context(), nil, nil + } + if len(timeout) > 10 { + return nil, nil, errorf(CodeInvalidArgument, "parse timeout: %q has >10 digits", timeout) + } + millis, err := strconv.ParseInt(timeout, 10 /* base */, 64 /* bitsize */) + if err != nil { + return nil, nil, errorf(CodeInvalidArgument, "parse timeout: %w", err) + } + ctx, cancel := context.WithTimeout( + request.Context(), + time.Duration(millis)*time.Millisecond, + ) + return ctx, cancel, nil +} + +func (h *connectHandler) CanHandlePayload(request *http.Request, contentType string) bool { + if request.Method == http.MethodGet { + query := request.URL.Query() + codecName := query.Get(connectUnaryEncodingQueryParameter) + contentType = connectContentTypeForCodecName( + h.Spec.StreamType, + codecName, + ) + } + _, ok := h.accept[contentType] + return ok +} + +func (h *connectHandler) NewConn( + responseWriter http.ResponseWriter, + request *http.Request, +) (handlerConnCloser, bool) { + ctx := request.Context() + query := request.URL.Query() + // We need to parse metadata before entering the interceptor stack; we'll + // send the error to the client later on. + var contentEncoding, acceptEncoding string + if h.Spec.StreamType == StreamTypeUnary { + if request.Method == http.MethodGet { + contentEncoding = query.Get(connectUnaryCompressionQueryParameter) + } else { + contentEncoding = getHeaderCanonical(request.Header, connectUnaryHeaderCompression) + } + acceptEncoding = getHeaderCanonical(request.Header, connectUnaryHeaderAcceptCompression) + } else { + contentEncoding = getHeaderCanonical(request.Header, connectStreamingHeaderCompression) + acceptEncoding = getHeaderCanonical(request.Header, connectStreamingHeaderAcceptCompression) + } + requestCompression, responseCompression, failed := negotiateCompression( + h.CompressionPools, + contentEncoding, + acceptEncoding, + ) + if failed == nil { + failed = checkServerStreamsCanFlush(h.Spec, responseWriter) + } + if failed == nil { + required := h.RequireConnectProtocolHeader && (h.Spec.StreamType == StreamTypeUnary) + failed = connectCheckProtocolVersion(request, required) + } + + var requestBody io.ReadCloser + var contentType, codecName string + if request.Method == http.MethodGet { + if failed == nil && !query.Has(connectUnaryEncodingQueryParameter) { + failed = errorf(CodeInvalidArgument, "missing %s parameter", connectUnaryEncodingQueryParameter) + } else if failed == nil && !query.Has(connectUnaryMessageQueryParameter) { + failed = errorf(CodeInvalidArgument, "missing %s parameter", connectUnaryMessageQueryParameter) + } + msg := query.Get(connectUnaryMessageQueryParameter) + msgReader := queryValueReader(msg, query.Get(connectUnaryBase64QueryParameter) == "1") + requestBody = io.NopCloser(msgReader) + codecName = query.Get(connectUnaryEncodingQueryParameter) + contentType = connectContentTypeForCodecName( + h.Spec.StreamType, + codecName, + ) + } else { + requestBody = request.Body + contentType = getHeaderCanonical(request.Header, headerContentType) + codecName = connectCodecForContentType( + h.Spec.StreamType, + contentType, + ) + } + + codec := h.Codecs.Get(codecName) + // The codec can be nil in the GET request case; that's okay: when failed + // is non-nil, codec is never used. + if failed == nil && codec == nil { + failed = errorf(CodeInvalidArgument, "invalid message encoding: %q", codecName) + } + + // Write any remaining headers here: + // (1) any writes to the stream will implicitly send the headers, so we + // should get all of gRPC's required response headers ready. + // (2) interceptors should be able to see these headers. + // + // Since we know that these header keys are already in canonical form, we can + // skip the normalization in Header.Set. + header := responseWriter.Header() + header[headerContentType] = []string{contentType} + acceptCompressionHeader := connectUnaryHeaderAcceptCompression + if h.Spec.StreamType != StreamTypeUnary { + acceptCompressionHeader = connectStreamingHeaderAcceptCompression + // We only write the request encoding header here for streaming calls, + // since the streaming envelope lets us choose whether to compress each + // message individually. For unary, we won't know whether we're compressing + // the request until we see how large the payload is. + if responseCompression != compressionIdentity { + header[connectStreamingHeaderCompression] = []string{responseCompression} + } + } + header[acceptCompressionHeader] = []string{h.CompressionPools.CommaSeparatedNames()} + + var conn handlerConnCloser + peer := Peer{ + Addr: request.RemoteAddr, + Protocol: ProtocolConnect, + Query: query, + } + if h.Spec.StreamType == StreamTypeUnary { + conn = &connectUnaryHandlerConn{ + spec: h.Spec, + peer: peer, + request: request, + responseWriter: responseWriter, + marshaler: connectUnaryMarshaler{ + ctx: ctx, + sender: writeSender{writer: responseWriter}, + codec: codec, + compressMinBytes: h.CompressMinBytes, + compressionName: responseCompression, + compressionPool: h.CompressionPools.Get(responseCompression), + bufferPool: h.BufferPool, + header: responseWriter.Header(), + sendMaxBytes: h.SendMaxBytes, + }, + unmarshaler: connectUnaryUnmarshaler{ + ctx: ctx, + reader: requestBody, + codec: codec, + compressionPool: h.CompressionPools.Get(requestCompression), + bufferPool: h.BufferPool, + readMaxBytes: h.ReadMaxBytes, + }, + responseTrailer: make(http.Header), + } + } else { + conn = &connectStreamingHandlerConn{ + spec: h.Spec, + peer: peer, + request: request, + responseWriter: responseWriter, + marshaler: connectStreamingMarshaler{ + envelopeWriter: envelopeWriter{ + ctx: ctx, + sender: writeSender{responseWriter}, + codec: codec, + compressMinBytes: h.CompressMinBytes, + compressionPool: h.CompressionPools.Get(responseCompression), + bufferPool: h.BufferPool, + sendMaxBytes: h.SendMaxBytes, + }, + }, + unmarshaler: connectStreamingUnmarshaler{ + envelopeReader: envelopeReader{ + ctx: ctx, + reader: requestBody, + codec: codec, + compressionPool: h.CompressionPools.Get(requestCompression), + bufferPool: h.BufferPool, + readMaxBytes: h.ReadMaxBytes, + }, + }, + responseTrailer: make(http.Header), + } + } + conn = wrapHandlerConnWithCodedErrors(conn) + + if failed != nil { + // Negotiation failed, so we can't establish a stream. + _ = conn.Close(failed) + return nil, false + } + return conn, true +} + +type connectClient struct { + protocolClientParams + + peer Peer +} + +func (c *connectClient) Peer() Peer { + return c.peer +} + +func (c *connectClient) WriteRequestHeader(streamType StreamType, header http.Header) { + // We know these header keys are in canonical form, so we can bypass all the + // checks in Header.Set. + if getHeaderCanonical(header, headerUserAgent) == "" { + header[headerUserAgent] = []string{defaultConnectUserAgent} + } + header[connectHeaderProtocolVersion] = []string{connectProtocolVersion} + header[headerContentType] = []string{ + connectContentTypeForCodecName(streamType, c.Codec.Name()), + } + acceptCompressionHeader := connectUnaryHeaderAcceptCompression + if streamType != StreamTypeUnary { + // If we don't set Accept-Encoding, by default http.Client will ask the + // server to compress the whole stream. Since we're already compressing + // each message, this is a waste. + header[connectUnaryHeaderAcceptCompression] = []string{compressionIdentity} + acceptCompressionHeader = connectStreamingHeaderAcceptCompression + // We only write the request encoding header here for streaming calls, + // since the streaming envelope lets us choose whether to compress each + // message individually. For unary, we won't know whether we're compressing + // the request until we see how large the payload is. + if c.CompressionName != "" && c.CompressionName != compressionIdentity { + header[connectStreamingHeaderCompression] = []string{c.CompressionName} + } + } + if acceptCompression := c.CompressionPools.CommaSeparatedNames(); acceptCompression != "" { + header[acceptCompressionHeader] = []string{acceptCompression} + } +} + +func (c *connectClient) NewConn( + ctx context.Context, + spec Spec, + header http.Header, +) streamingClientConn { + if deadline, ok := ctx.Deadline(); ok { + millis := int64(time.Until(deadline) / time.Millisecond) + if millis > 0 { + encoded := strconv.FormatInt(millis, 10 /* base */) + if len(encoded) <= 10 { + header[connectHeaderTimeout] = []string{encoded} + } // else effectively unbounded + } + } + duplexCall := newDuplexHTTPCall(ctx, c.HTTPClient, c.URL, spec, header) + var conn streamingClientConn + if spec.StreamType == StreamTypeUnary { + unaryConn := &connectUnaryClientConn{ + spec: spec, + peer: c.Peer(), + duplexCall: duplexCall, + compressionPools: c.CompressionPools, + bufferPool: c.BufferPool, + marshaler: connectUnaryRequestMarshaler{ + connectUnaryMarshaler: connectUnaryMarshaler{ + ctx: ctx, + sender: duplexCall, + codec: c.Codec, + compressMinBytes: c.CompressMinBytes, + compressionName: c.CompressionName, + compressionPool: c.CompressionPools.Get(c.CompressionName), + bufferPool: c.BufferPool, + header: duplexCall.Header(), + sendMaxBytes: c.SendMaxBytes, + }, + }, + unmarshaler: connectUnaryUnmarshaler{ + ctx: ctx, + reader: duplexCall, + codec: c.Codec, + bufferPool: c.BufferPool, + readMaxBytes: c.ReadMaxBytes, + }, + responseHeader: make(http.Header), + responseTrailer: make(http.Header), + } + if spec.IdempotencyLevel == IdempotencyNoSideEffects { + unaryConn.marshaler.enableGet = c.EnableGet + unaryConn.marshaler.getURLMaxBytes = c.GetURLMaxBytes + unaryConn.marshaler.getUseFallback = c.GetUseFallback + unaryConn.marshaler.duplexCall = duplexCall + if stableCodec, ok := c.Codec.(stableCodec); ok { + unaryConn.marshaler.stableCodec = stableCodec + } + } + conn = unaryConn + duplexCall.SetValidateResponse(unaryConn.validateResponse) + } else { + streamingConn := &connectStreamingClientConn{ + spec: spec, + peer: c.Peer(), + duplexCall: duplexCall, + compressionPools: c.CompressionPools, + bufferPool: c.BufferPool, + codec: c.Codec, + marshaler: connectStreamingMarshaler{ + envelopeWriter: envelopeWriter{ + ctx: ctx, + sender: duplexCall, + codec: c.Codec, + compressMinBytes: c.CompressMinBytes, + compressionPool: c.CompressionPools.Get(c.CompressionName), + bufferPool: c.BufferPool, + sendMaxBytes: c.SendMaxBytes, + }, + }, + unmarshaler: connectStreamingUnmarshaler{ + envelopeReader: envelopeReader{ + ctx: ctx, + reader: duplexCall, + codec: c.Codec, + bufferPool: c.BufferPool, + readMaxBytes: c.ReadMaxBytes, + }, + }, + responseHeader: make(http.Header), + responseTrailer: make(http.Header), + } + conn = streamingConn + duplexCall.SetValidateResponse(streamingConn.validateResponse) + } + return wrapClientConnWithCodedErrors(conn) +} + +type connectUnaryClientConn struct { + spec Spec + peer Peer + duplexCall *duplexHTTPCall + compressionPools readOnlyCompressionPools + bufferPool *bufferPool + marshaler connectUnaryRequestMarshaler + unmarshaler connectUnaryUnmarshaler + responseHeader http.Header + responseTrailer http.Header +} + +func (cc *connectUnaryClientConn) Spec() Spec { + return cc.spec +} + +func (cc *connectUnaryClientConn) Peer() Peer { + return cc.peer +} + +func (cc *connectUnaryClientConn) Send(msg any) error { + if err := cc.marshaler.Marshal(msg); err != nil { + return err + } + return nil // must be a literal nil: nil *Error is a non-nil error +} + +func (cc *connectUnaryClientConn) RequestHeader() http.Header { + return cc.duplexCall.Header() +} + +func (cc *connectUnaryClientConn) CloseRequest() error { + return cc.duplexCall.CloseWrite() +} + +func (cc *connectUnaryClientConn) Receive(msg any) error { + if err := cc.duplexCall.BlockUntilResponseReady(); err != nil { + return err + } + if err := cc.unmarshaler.Unmarshal(msg); err != nil { + return err + } + return nil // must be a literal nil: nil *Error is a non-nil error +} + +func (cc *connectUnaryClientConn) ResponseHeader() http.Header { + _ = cc.duplexCall.BlockUntilResponseReady() + return cc.responseHeader +} + +func (cc *connectUnaryClientConn) ResponseTrailer() http.Header { + _ = cc.duplexCall.BlockUntilResponseReady() + return cc.responseTrailer +} + +func (cc *connectUnaryClientConn) CloseResponse() error { + return cc.duplexCall.CloseRead() +} + +func (cc *connectUnaryClientConn) onRequestSend(fn func(*http.Request)) { + cc.duplexCall.onRequestSend = fn +} + +func (cc *connectUnaryClientConn) validateResponse(response *http.Response) *Error { + for k, v := range response.Header { + if !strings.HasPrefix(k, connectUnaryTrailerPrefix) { + cc.responseHeader[k] = v + continue + } + cc.responseTrailer[k[len(connectUnaryTrailerPrefix):]] = v + } + if err := connectValidateUnaryResponseContentType( + cc.marshaler.codec.Name(), + cc.duplexCall.Method(), + response.StatusCode, + response.Status, + getHeaderCanonical(response.Header, headerContentType), + ); err != nil { + if IsNotModifiedError(err) { + // Allow access to response headers for this kind of error. + // RFC 9110 doesn't allow trailers on 304s, so we only need to include headers. + err.meta = cc.responseHeader.Clone() + } + return err + } + compression := getHeaderCanonical(response.Header, connectUnaryHeaderCompression) + if compression != "" && + compression != compressionIdentity && + !cc.compressionPools.Contains(compression) { + return errorf( + CodeInternal, + "unknown encoding %q: accepted encodings are %v", + compression, + cc.compressionPools.CommaSeparatedNames(), + ) + } + cc.unmarshaler.compressionPool = cc.compressionPools.Get(compression) + if response.StatusCode != http.StatusOK { + unmarshaler := connectUnaryUnmarshaler{ + ctx: cc.unmarshaler.ctx, + reader: response.Body, + compressionPool: cc.unmarshaler.compressionPool, + bufferPool: cc.bufferPool, + } + var wireErr connectWireError + if err := unmarshaler.UnmarshalFunc(&wireErr, json.Unmarshal); err != nil { + return NewError( + httpToCode(response.StatusCode), + errors.New(response.Status), + ) + } + if wireErr.Code == 0 { + // code not set? default to one implied by HTTP status + wireErr.Code = httpToCode(response.StatusCode) + } + serverErr := wireErr.asError() + if serverErr == nil { + return nil + } + serverErr.meta = cc.responseHeader.Clone() + mergeHeaders(serverErr.meta, cc.responseTrailer) + return serverErr + } + return nil +} + +type connectStreamingClientConn struct { + spec Spec + peer Peer + duplexCall *duplexHTTPCall + compressionPools readOnlyCompressionPools + bufferPool *bufferPool + codec Codec + marshaler connectStreamingMarshaler + unmarshaler connectStreamingUnmarshaler + responseHeader http.Header + responseTrailer http.Header +} + +func (cc *connectStreamingClientConn) Spec() Spec { + return cc.spec +} + +func (cc *connectStreamingClientConn) Peer() Peer { + return cc.peer +} + +func (cc *connectStreamingClientConn) Send(msg any) error { + if err := cc.marshaler.Marshal(msg); err != nil { + return err + } + return nil // must be a literal nil: nil *Error is a non-nil error +} + +func (cc *connectStreamingClientConn) RequestHeader() http.Header { + return cc.duplexCall.Header() +} + +func (cc *connectStreamingClientConn) CloseRequest() error { + return cc.duplexCall.CloseWrite() +} + +func (cc *connectStreamingClientConn) Receive(msg any) error { + if err := cc.duplexCall.BlockUntilResponseReady(); err != nil { + return err + } + err := cc.unmarshaler.Unmarshal(msg) + if err == nil { + return nil + } + // See if the server sent an explicit error in the end-of-stream message. + mergeHeaders(cc.responseTrailer, cc.unmarshaler.Trailer()) + if serverErr := cc.unmarshaler.EndStreamError(); serverErr != nil { + // This is expected from a protocol perspective, but receiving an + // end-of-stream message means that we're _not_ getting a regular message. + // For users to realize that the stream has ended, Receive must return an + // error. + serverErr.meta = cc.responseHeader.Clone() + mergeHeaders(serverErr.meta, cc.responseTrailer) + _ = cc.duplexCall.CloseWrite() + return serverErr + } + // If the error is EOF but not from a last message, we want to return + // io.ErrUnexpectedEOF instead. + if errors.Is(err, io.EOF) && !errors.Is(err, errSpecialEnvelope) { + err = errorf(CodeInternal, "protocol error: %w", io.ErrUnexpectedEOF) + } + // There's no error in the trailers, so this was probably an error + // converting the bytes to a message, an error reading from the network, or + // just an EOF. We're going to return it to the user, but we also want to + // close the writer so Send errors out. + _ = cc.duplexCall.CloseWrite() + return err +} + +func (cc *connectStreamingClientConn) ResponseHeader() http.Header { + _ = cc.duplexCall.BlockUntilResponseReady() + return cc.responseHeader +} + +func (cc *connectStreamingClientConn) ResponseTrailer() http.Header { + _ = cc.duplexCall.BlockUntilResponseReady() + return cc.responseTrailer +} + +func (cc *connectStreamingClientConn) CloseResponse() error { + return cc.duplexCall.CloseRead() +} + +func (cc *connectStreamingClientConn) onRequestSend(fn func(*http.Request)) { + cc.duplexCall.onRequestSend = fn +} + +func (cc *connectStreamingClientConn) validateResponse(response *http.Response) *Error { + if response.StatusCode != http.StatusOK { + return errorf(httpToCode(response.StatusCode), "HTTP status %v", response.Status) + } + if err := connectValidateStreamResponseContentType( + cc.codec.Name(), + cc.spec.StreamType, + getHeaderCanonical(response.Header, headerContentType), + ); err != nil { + return err + } + compression := getHeaderCanonical(response.Header, connectStreamingHeaderCompression) + if compression != "" && + compression != compressionIdentity && + !cc.compressionPools.Contains(compression) { + return errorf( + CodeInternal, + "unknown encoding %q: accepted encodings are %v", + compression, + cc.compressionPools.CommaSeparatedNames(), + ) + } + cc.unmarshaler.compressionPool = cc.compressionPools.Get(compression) + mergeHeaders(cc.responseHeader, response.Header) + return nil +} + +type connectUnaryHandlerConn struct { + spec Spec + peer Peer + request *http.Request + responseWriter http.ResponseWriter + marshaler connectUnaryMarshaler + unmarshaler connectUnaryUnmarshaler + responseTrailer http.Header +} + +func (hc *connectUnaryHandlerConn) Spec() Spec { + return hc.spec +} + +func (hc *connectUnaryHandlerConn) Peer() Peer { + return hc.peer +} + +func (hc *connectUnaryHandlerConn) Receive(msg any) error { + if err := hc.unmarshaler.Unmarshal(msg); err != nil { + return err + } + return nil // must be a literal nil: nil *Error is a non-nil error +} + +func (hc *connectUnaryHandlerConn) RequestHeader() http.Header { + return hc.request.Header +} + +func (hc *connectUnaryHandlerConn) Send(msg any) error { + hc.mergeResponseHeader(nil /* error */) + if err := hc.marshaler.Marshal(msg); err != nil { + return err + } + return nil // must be a literal nil: nil *Error is a non-nil error +} + +func (hc *connectUnaryHandlerConn) ResponseHeader() http.Header { + return hc.responseWriter.Header() +} + +func (hc *connectUnaryHandlerConn) ResponseTrailer() http.Header { + return hc.responseTrailer +} + +func (hc *connectUnaryHandlerConn) Close(err error) error { + if !hc.marshaler.wroteHeader { + hc.mergeResponseHeader(err) + // If the handler received a GET request and the resource hasn't changed, + // return a 304. + if len(hc.peer.Query) > 0 && IsNotModifiedError(err) { + hc.responseWriter.WriteHeader(http.StatusNotModified) + return hc.request.Body.Close() + } + } + if err == nil || hc.marshaler.wroteHeader { + return hc.request.Body.Close() + } + // In unary Connect, errors always use application/json. + setHeaderCanonical(hc.responseWriter.Header(), headerContentType, connectUnaryContentTypeJSON) + hc.responseWriter.WriteHeader(connectCodeToHTTP(CodeOf(err))) + data, marshalErr := json.Marshal(newConnectWireError(err)) + if marshalErr != nil { + _ = hc.request.Body.Close() + return errorf(CodeInternal, "marshal error: %w", err) + } + if _, writeErr := hc.responseWriter.Write(data); writeErr != nil { + _ = hc.request.Body.Close() + return writeErr + } + return hc.request.Body.Close() +} + +func (hc *connectUnaryHandlerConn) getHTTPMethod() string { + return hc.request.Method +} + +func (hc *connectUnaryHandlerConn) mergeResponseHeader(err error) { + header := hc.responseWriter.Header() + if hc.request.Method == http.MethodGet { + // The response content varies depending on the compression that the client + // requested (if any). GETs are potentially cacheable, so we should ensure + // that the Vary header includes at least Accept-Encoding (and not overwrite any values already set). + header[headerVary] = append(header[headerVary], connectUnaryHeaderAcceptCompression) + } + if err != nil { + if connectErr, ok := asError(err); ok && !connectErr.wireErr { + mergeNonProtocolHeaders(header, connectErr.meta) + } + } + for k, v := range hc.responseTrailer { + header[connectUnaryTrailerPrefix+k] = v + } +} + +type connectStreamingHandlerConn struct { + spec Spec + peer Peer + request *http.Request + responseWriter http.ResponseWriter + marshaler connectStreamingMarshaler + unmarshaler connectStreamingUnmarshaler + responseTrailer http.Header +} + +func (hc *connectStreamingHandlerConn) Spec() Spec { + return hc.spec +} + +func (hc *connectStreamingHandlerConn) Peer() Peer { + return hc.peer +} + +func (hc *connectStreamingHandlerConn) Receive(msg any) error { + if err := hc.unmarshaler.Unmarshal(msg); err != nil { + // Clients may not send end-of-stream metadata, so we don't need to handle + // errSpecialEnvelope. + return err + } + return nil // must be a literal nil: nil *Error is a non-nil error +} + +func (hc *connectStreamingHandlerConn) RequestHeader() http.Header { + return hc.request.Header +} + +func (hc *connectStreamingHandlerConn) Send(msg any) error { + defer flushResponseWriter(hc.responseWriter) + if err := hc.marshaler.Marshal(msg); err != nil { + return err + } + return nil // must be a literal nil: nil *Error is a non-nil error +} + +func (hc *connectStreamingHandlerConn) ResponseHeader() http.Header { + return hc.responseWriter.Header() +} + +func (hc *connectStreamingHandlerConn) ResponseTrailer() http.Header { + return hc.responseTrailer +} + +func (hc *connectStreamingHandlerConn) Close(err error) error { + defer flushResponseWriter(hc.responseWriter) + if err := hc.marshaler.MarshalEndStream(err, hc.responseTrailer); err != nil { + _ = hc.request.Body.Close() + return err + } + // We don't want to copy unread portions of the body to /dev/null here: if + // the client hasn't closed the request body, we'll block until the server + // timeout kicks in. This could happen because the client is malicious, but + // a well-intentioned client may just not expect the server to be returning + // an error for a streaming RPC. Better to accept that we can't always reuse + // TCP connections. + if err := hc.request.Body.Close(); err != nil { + if connectErr, ok := asError(err); ok { + return connectErr + } + return NewError(CodeUnknown, err) + } + return nil // must be a literal nil: nil *Error is a non-nil error +} + +type connectStreamingMarshaler struct { + envelopeWriter +} + +func (m *connectStreamingMarshaler) MarshalEndStream(err error, trailer http.Header) *Error { + end := &connectEndStreamMessage{Trailer: trailer} + if err != nil { + end.Error = newConnectWireError(err) + if connectErr, ok := asError(err); ok && !connectErr.wireErr { + mergeNonProtocolHeaders(end.Trailer, connectErr.meta) + } + } + data, marshalErr := json.Marshal(end) + if marshalErr != nil { + return errorf(CodeInternal, "marshal end stream: %w", marshalErr) + } + raw := bytes.NewBuffer(data) + defer m.envelopeWriter.bufferPool.Put(raw) + return m.Write(&envelope{ + Data: raw, + Flags: connectFlagEnvelopeEndStream, + }) +} + +type connectStreamingUnmarshaler struct { + envelopeReader + + endStreamErr *Error + trailer http.Header +} + +func (u *connectStreamingUnmarshaler) Unmarshal(message any) *Error { + err := u.envelopeReader.Unmarshal(message) + if err == nil { + return nil + } + if !errors.Is(err, errSpecialEnvelope) { + return err + } + env := u.last + data := env.Data + u.last.Data = nil // don't keep a reference to it + defer u.bufferPool.Put(data) + if !env.IsSet(connectFlagEnvelopeEndStream) { + return errorf(CodeInternal, "protocol error: invalid envelope flags %d", env.Flags) + } + var end connectEndStreamMessage + if err := json.Unmarshal(data.Bytes(), &end); err != nil { + return errorf(CodeInternal, "unmarshal end stream message: %w", err) + } + for name, value := range end.Trailer { + canonical := http.CanonicalHeaderKey(name) + if name != canonical { + delHeaderCanonical(end.Trailer, name) + end.Trailer[canonical] = append(end.Trailer[canonical], value...) + } + } + u.trailer = end.Trailer + u.endStreamErr = end.Error.asError() + return errSpecialEnvelope +} + +func (u *connectStreamingUnmarshaler) Trailer() http.Header { + return u.trailer +} + +func (u *connectStreamingUnmarshaler) EndStreamError() *Error { + return u.endStreamErr +} + +type connectUnaryMarshaler struct { + ctx context.Context //nolint:containedctx + sender messageSender + codec Codec + compressMinBytes int + compressionName string + compressionPool *compressionPool + bufferPool *bufferPool + header http.Header + sendMaxBytes int + wroteHeader bool +} + +func (m *connectUnaryMarshaler) Marshal(message any) *Error { + if message == nil { + return m.write(nil) + } + var data []byte + var err error + if appender, ok := m.codec.(marshalAppender); ok { + data, err = appender.MarshalAppend(m.bufferPool.Get().Bytes(), message) + } else { + // Can't avoid allocating the slice, but we'll reuse it. + data, err = m.codec.Marshal(message) + } + if err != nil { + return errorf(CodeInternal, "marshal message: %w", err) + } + uncompressed := bytes.NewBuffer(data) + defer m.bufferPool.Put(uncompressed) + if len(data) < m.compressMinBytes || m.compressionPool == nil { + if m.sendMaxBytes > 0 && len(data) > m.sendMaxBytes { + return NewError(CodeResourceExhausted, fmt.Errorf("message size %d exceeds sendMaxBytes %d", len(data), m.sendMaxBytes)) + } + return m.write(data) + } + compressed := m.bufferPool.Get() + defer m.bufferPool.Put(compressed) + if err := m.compressionPool.Compress(compressed, uncompressed); err != nil { + return err + } + if m.sendMaxBytes > 0 && compressed.Len() > m.sendMaxBytes { + return NewError(CodeResourceExhausted, fmt.Errorf("compressed message size %d exceeds sendMaxBytes %d", compressed.Len(), m.sendMaxBytes)) + } + setHeaderCanonical(m.header, connectUnaryHeaderCompression, m.compressionName) + return m.write(compressed.Bytes()) +} + +func (m *connectUnaryMarshaler) write(data []byte) *Error { + m.wroteHeader = true + payload := bytes.NewReader(data) + if _, err := m.sender.Send(payload); err != nil { + err = wrapIfContextError(err) + if connectErr, ok := asError(err); ok { + return connectErr + } + return errorf(CodeUnknown, "write message: %w", err) + } + return nil +} + +type connectUnaryRequestMarshaler struct { + connectUnaryMarshaler + + enableGet bool + getURLMaxBytes int + getUseFallback bool + stableCodec stableCodec + duplexCall *duplexHTTPCall +} + +func (m *connectUnaryRequestMarshaler) Marshal(message any) *Error { + if m.enableGet { + if m.stableCodec == nil && !m.getUseFallback { + return errorf(CodeInternal, "codec %s doesn't support stable marshal; can't use get", m.codec.Name()) + } + if m.stableCodec != nil { + return m.marshalWithGet(message) + } + } + return m.connectUnaryMarshaler.Marshal(message) +} + +func (m *connectUnaryRequestMarshaler) marshalWithGet(message any) *Error { + // TODO(jchadwick-buf): This function is mostly a superset of + // connectUnaryMarshaler.Marshal. This should be reconciled at some point. + var data []byte + var err error + if message != nil { + data, err = m.stableCodec.MarshalStable(message) + if err != nil { + return errorf(CodeInternal, "marshal message stable: %w", err) + } + } + isTooBig := m.sendMaxBytes > 0 && len(data) > m.sendMaxBytes + if isTooBig && m.compressionPool == nil { + return NewError(CodeResourceExhausted, fmt.Errorf( + "message size %d exceeds sendMaxBytes %d: enabling request compression may help", + len(data), + m.sendMaxBytes, + )) + } + if !isTooBig { + url := m.buildGetURL(data, false /* compressed */) + if m.getURLMaxBytes <= 0 || len(url.String()) < m.getURLMaxBytes { + m.writeWithGet(url) + return nil + } + if m.compressionPool == nil { + if m.getUseFallback { + return m.write(data) + } + return NewError(CodeResourceExhausted, fmt.Errorf( + "url size %d exceeds getURLMaxBytes %d: enabling request compression may help", + len(url.String()), + m.getURLMaxBytes, + )) + } + } + // Compress message to try to make it fit in the URL. + uncompressed := bytes.NewBuffer(data) + defer m.bufferPool.Put(uncompressed) + compressed := m.bufferPool.Get() + defer m.bufferPool.Put(compressed) + if err := m.compressionPool.Compress(compressed, uncompressed); err != nil { + return err + } + if m.sendMaxBytes > 0 && compressed.Len() > m.sendMaxBytes { + return NewError(CodeResourceExhausted, fmt.Errorf("compressed message size %d exceeds sendMaxBytes %d", compressed.Len(), m.sendMaxBytes)) + } + url := m.buildGetURL(compressed.Bytes(), true /* compressed */) + if m.getURLMaxBytes <= 0 || len(url.String()) < m.getURLMaxBytes { + m.writeWithGet(url) + return nil + } + if m.getUseFallback { + setHeaderCanonical(m.header, connectUnaryHeaderCompression, m.compressionName) + return m.write(compressed.Bytes()) + } + return NewError(CodeResourceExhausted, fmt.Errorf("compressed url size %d exceeds getURLMaxBytes %d", len(url.String()), m.getURLMaxBytes)) +} + +func (m *connectUnaryRequestMarshaler) buildGetURL(data []byte, compressed bool) *url.URL { + url := *m.duplexCall.URL() + query := url.Query() + query.Set(connectUnaryConnectQueryParameter, connectUnaryConnectQueryValue) + query.Set(connectUnaryEncodingQueryParameter, m.codec.Name()) + if m.stableCodec.IsBinary() || compressed { + query.Set(connectUnaryMessageQueryParameter, encodeBinaryQueryValue(data)) + query.Set(connectUnaryBase64QueryParameter, "1") + } else { + query.Set(connectUnaryMessageQueryParameter, string(data)) + } + if compressed { + query.Set(connectUnaryCompressionQueryParameter, m.compressionName) + } + url.RawQuery = query.Encode() + return &url +} + +func (m *connectUnaryRequestMarshaler) writeWithGet(url *url.URL) { + delHeaderCanonical(m.header, connectHeaderProtocolVersion) + delHeaderCanonical(m.header, headerContentType) + delHeaderCanonical(m.header, headerContentEncoding) + delHeaderCanonical(m.header, headerContentLength) + m.duplexCall.SetMethod(http.MethodGet) + *m.duplexCall.URL() = *url +} + +type connectUnaryUnmarshaler struct { + ctx context.Context //nolint:containedctx + reader io.Reader + codec Codec + compressionPool *compressionPool + bufferPool *bufferPool + alreadyRead bool + readMaxBytes int +} + +func (u *connectUnaryUnmarshaler) Unmarshal(message any) *Error { + return u.UnmarshalFunc(message, u.codec.Unmarshal) +} + +func (u *connectUnaryUnmarshaler) UnmarshalFunc(message any, unmarshal func([]byte, any) error) *Error { + if u.alreadyRead { + return NewError(CodeInternal, io.EOF) + } + u.alreadyRead = true + data := u.bufferPool.Get() + defer u.bufferPool.Put(data) + reader := u.reader + if u.readMaxBytes > 0 && int64(u.readMaxBytes) < math.MaxInt64 { + reader = io.LimitReader(u.reader, int64(u.readMaxBytes)+1) + } + // ReadFor ignores io.EOF, so any error here is real. + bytesRead, err := data.ReadFrom(reader) + if err != nil { + err = wrapIfMaxBytesError(err, "read first %d bytes of message", bytesRead) + err = wrapIfContextDone(u.ctx, err) + if connectErr, ok := asError(err); ok { + return connectErr + } + return errorf(CodeUnknown, "read message: %w", err) + } + if u.readMaxBytes > 0 && bytesRead > int64(u.readMaxBytes) { + // Attempt to read to end in order to allow connection re-use + discardedBytes, err := io.Copy(io.Discard, u.reader) + if err != nil { + return errorf(CodeResourceExhausted, "message is larger than configured max %d - unable to determine message size: %w", u.readMaxBytes, err) + } + return errorf(CodeResourceExhausted, "message size %d is larger than configured max %d", bytesRead+discardedBytes, u.readMaxBytes) + } + if data.Len() > 0 && u.compressionPool != nil { + decompressed := u.bufferPool.Get() + defer u.bufferPool.Put(decompressed) + if err := u.compressionPool.Decompress(decompressed, data, int64(u.readMaxBytes)); err != nil { + return err + } + data = decompressed + } + if err := unmarshal(data.Bytes(), message); err != nil { + return errorf(CodeInvalidArgument, "unmarshal message: %w", err) + } + return nil +} + +type connectWireDetail ErrorDetail + +func (d *connectWireDetail) MarshalJSON() ([]byte, error) { + if d.wireJSON != "" { + // If we unmarshaled this detail from JSON, return the original data. This + // lets proxies w/o protobuf descriptors preserve human-readable details. + return []byte(d.wireJSON), nil + } + wire := struct { + Type string `json:"type"` + Value string `json:"value"` + Debug json.RawMessage `json:"debug,omitempty"` + }{ + Type: typeNameForURL(d.pbAny.GetTypeUrl()), + Value: base64.RawStdEncoding.EncodeToString(d.pbAny.GetValue()), + } + // Try to produce debug info, but expect failure when we don't have + // descriptors. + msg, err := d.getInner() + if err == nil { + var codec protoJSONCodec + debug, err := codec.Marshal(msg) + if err == nil { + wire.Debug = debug + } + } + return json.Marshal(wire) +} + +func (d *connectWireDetail) UnmarshalJSON(data []byte) error { + var wire struct { + Type string `json:"type"` + Value string `json:"value"` + } + if err := json.Unmarshal(data, &wire); err != nil { + return err + } + if !strings.Contains(wire.Type, "/") { + wire.Type = defaultAnyResolverPrefix + wire.Type + } + decoded, err := DecodeBinaryHeader(wire.Value) + if err != nil { + return fmt.Errorf("decode base64: %w", err) + } + *d = connectWireDetail{ + pbAny: &anypb.Any{ + TypeUrl: wire.Type, + Value: decoded, + }, + wireJSON: string(data), + } + return nil +} + +func (d *connectWireDetail) getInner() (proto.Message, error) { + if d.pbInner != nil { + return d.pbInner, nil + } + return d.pbAny.UnmarshalNew() +} + +type connectWireError struct { + Code Code `json:"code"` + Message string `json:"message,omitempty"` + Details []*connectWireDetail `json:"details,omitempty"` +} + +func newConnectWireError(err error) *connectWireError { + wire := &connectWireError{ + Code: CodeUnknown, + Message: err.Error(), + } + if connectErr, ok := asError(err); ok { + wire.Code = connectErr.Code() + wire.Message = connectErr.Message() + if len(connectErr.details) > 0 { + wire.Details = make([]*connectWireDetail, len(connectErr.details)) + for i, detail := range connectErr.details { + wire.Details[i] = (*connectWireDetail)(detail) + } + } + } + return wire +} + +func (e *connectWireError) asError() *Error { + if e == nil { + return nil + } + if e.Code < minCode || e.Code > maxCode { + e.Code = CodeUnknown + } + err := NewWireError(e.Code, errors.New(e.Message)) + if len(e.Details) > 0 { + err.details = make([]*ErrorDetail, len(e.Details)) + for i, detail := range e.Details { + err.details[i] = (*ErrorDetail)(detail) + } + } + return err +} + +func (e *connectWireError) UnmarshalJSON(data []byte) error { + // We want to be lenient if the JSON has an unrecognized or invalid code. + // So if that occurs, we leave the code unset but can still de-serialize + // the other fields from the input JSON. + var wireError struct { + Code string `json:"code"` + Message string `json:"message"` + Details []*connectWireDetail `json:"details"` + } + err := json.Unmarshal(data, &wireError) + if err != nil { + return err + } + e.Message = wireError.Message + e.Details = wireError.Details + // This will leave e.Code unset if we can't unmarshal the given string. + _ = e.Code.UnmarshalText([]byte(wireError.Code)) + return nil +} + +type connectEndStreamMessage struct { + Error *connectWireError `json:"error,omitempty"` + Trailer http.Header `json:"metadata,omitempty"` +} + +func connectCodeToHTTP(code Code) int { + // Return literals rather than named constants from the HTTP package to make + // it easier to compare this function to the Connect specification. + switch code { + case CodeCanceled: + return 499 + case CodeUnknown: + return 500 + case CodeInvalidArgument: + return 400 + case CodeDeadlineExceeded: + return 504 + case CodeNotFound: + return 404 + case CodeAlreadyExists: + return 409 + case CodePermissionDenied: + return 403 + case CodeResourceExhausted: + return 429 + case CodeFailedPrecondition: + return 400 + case CodeAborted: + return 409 + case CodeOutOfRange: + return 400 + case CodeUnimplemented: + return 501 + case CodeInternal: + return 500 + case CodeUnavailable: + return 503 + case CodeDataLoss: + return 500 + case CodeUnauthenticated: + return 401 + default: + return 500 // same as CodeUnknown + } +} + +func connectCodecForContentType(streamType StreamType, contentType string) string { + if streamType == StreamTypeUnary { + return strings.TrimPrefix(contentType, connectUnaryContentTypePrefix) + } + return strings.TrimPrefix(contentType, connectStreamingContentTypePrefix) +} + +func connectContentTypeForCodecName(streamType StreamType, name string) string { + if streamType == StreamTypeUnary { + return connectUnaryContentTypePrefix + name + } + return connectStreamingContentTypePrefix + name +} + +// encodeBinaryQueryValue URL-safe base64-encodes data, without padding. +func encodeBinaryQueryValue(data []byte) string { + return base64.RawURLEncoding.EncodeToString(data) +} + +// binaryQueryValueReader creates a reader that can read either padded or +// unpadded URL-safe base64 from a string. +func binaryQueryValueReader(data string) io.Reader { + stringReader := strings.NewReader(data) + if len(data)%4 != 0 { + // Data definitely isn't padded. + return base64.NewDecoder(base64.RawURLEncoding, stringReader) + } + // Data is padded, or no padding was necessary. + return base64.NewDecoder(base64.URLEncoding, stringReader) +} + +// queryValueReader creates a reader for a string that may be URL-safe base64 +// encoded. +func queryValueReader(data string, base64Encoded bool) io.Reader { + if base64Encoded { + return binaryQueryValueReader(data) + } + return strings.NewReader(data) +} + +func connectValidateUnaryResponseContentType( + requestCodecName string, + httpMethod string, + statusCode int, + statusMsg string, + responseContentType string, +) *Error { + if statusCode != http.StatusOK { + if statusCode == http.StatusNotModified && httpMethod == http.MethodGet { + return NewWireError(CodeUnknown, errNotModifiedClient) + } + // Error responses must be JSON-encoded. + if responseContentType == connectUnaryContentTypePrefix+codecNameJSON || + responseContentType == connectUnaryContentTypePrefix+codecNameJSONCharsetUTF8 { + return nil + } + return NewError( + httpToCode(statusCode), + errors.New(statusMsg), + ) + } + // Normal responses must have valid content-type that indicates same codec as the request. + if !strings.HasPrefix(responseContentType, connectUnaryContentTypePrefix) { + // Doesn't even look like a Connect response? Use code "unknown". + return errorf( + CodeUnknown, + "invalid content-type: %q; expecting %q", + responseContentType, + connectUnaryContentTypePrefix+requestCodecName, + ) + } + responseCodecName := connectCodecForContentType( + StreamTypeUnary, + responseContentType, + ) + if responseCodecName == requestCodecName { + return nil + } + // HACK: We likely want a better way to handle the optional "charset" parameter + // for application/json, instead of hard-coding. But this suffices for now. + if (responseCodecName == codecNameJSON && requestCodecName == codecNameJSONCharsetUTF8) || + (responseCodecName == codecNameJSONCharsetUTF8 && requestCodecName == codecNameJSON) { + // Both are JSON + return nil + } + return errorf( + CodeInternal, + "invalid content-type: %q; expecting %q", + responseContentType, + connectUnaryContentTypePrefix+requestCodecName, + ) +} + +func connectValidateStreamResponseContentType(requestCodecName string, streamType StreamType, responseContentType string) *Error { + // Responses must have valid content-type that indicates same codec as the request. + if !strings.HasPrefix(responseContentType, connectStreamingContentTypePrefix) { + // Doesn't even look like a Connect response? Use code "unknown". + return errorf( + CodeUnknown, + "invalid content-type: %q; expecting %q", + responseContentType, + connectStreamingContentTypePrefix+requestCodecName, + ) + } + responseCodecName := connectCodecForContentType( + streamType, + responseContentType, + ) + if responseCodecName != requestCodecName { + return errorf( + CodeInternal, + "invalid content-type: %q; expecting %q", + responseContentType, + connectStreamingContentTypePrefix+requestCodecName, + ) + } + return nil +} + +func connectCheckProtocolVersion(request *http.Request, required bool) *Error { + switch request.Method { + case http.MethodGet: + version := request.URL.Query().Get(connectUnaryConnectQueryParameter) + if version == "" && required { + return errorf(CodeInvalidArgument, "missing required query parameter: set %s to %q", connectUnaryConnectQueryParameter, connectUnaryConnectQueryValue) + } else if version != "" && version != connectUnaryConnectQueryValue { + return errorf(CodeInvalidArgument, "%s must be %q: got %q", connectUnaryConnectQueryParameter, connectUnaryConnectQueryValue, version) + } + case http.MethodPost: + version := getHeaderCanonical(request.Header, connectHeaderProtocolVersion) + if version == "" && required { + return errorf(CodeInvalidArgument, "missing required header: set %s to %q", connectHeaderProtocolVersion, connectProtocolVersion) + } else if version != "" && version != connectProtocolVersion { + return errorf(CodeInvalidArgument, "%s must be %q: got %q", connectHeaderProtocolVersion, connectProtocolVersion, version) + } + default: + return errorf(CodeInvalidArgument, "unsupported method: %q", request.Method) + } + return nil +} diff --git a/vendor/connectrpc.com/connect/protocol_grpc.go b/vendor/connectrpc.com/connect/protocol_grpc.go new file mode 100644 index 0000000000..4c0e8125e7 --- /dev/null +++ b/vendor/connectrpc.com/connect/protocol_grpc.go @@ -0,0 +1,1010 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "math" + "net/http" + "net/textproto" + "runtime" + "strconv" + "strings" + "time" + + statusv1 "connectrpc.com/connect/internal/gen/connectext/grpc/status/v1" +) + +const ( + grpcHeaderCompression = "Grpc-Encoding" + grpcHeaderAcceptCompression = "Grpc-Accept-Encoding" + grpcHeaderTimeout = "Grpc-Timeout" + grpcHeaderStatus = "Grpc-Status" + grpcHeaderMessage = "Grpc-Message" + grpcHeaderDetails = "Grpc-Status-Details-Bin" + + grpcFlagEnvelopeTrailer = 0b10000000 + + grpcContentTypeDefault = "application/grpc" + grpcWebContentTypeDefault = "application/grpc-web" + grpcContentTypePrefix = grpcContentTypeDefault + "+" + grpcWebContentTypePrefix = grpcWebContentTypeDefault + "+" + + headerXUserAgent = "X-User-Agent" + + upperhex = "0123456789ABCDEF" +) + +var ( + errTrailersWithoutGRPCStatus = fmt.Errorf("protocol error: no %s trailer: %w", grpcHeaderStatus, io.ErrUnexpectedEOF) + + // defaultGrpcUserAgent follows + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#user-agents: + // + // While the protocol does not require a user-agent to function it is recommended + // that clients provide a structured user-agent string that provides a basic + // description of the calling library, version & platform to facilitate issue diagnosis + // in heterogeneous environments. The following structure is recommended to library developers: + // + // User-Agent → "grpc-" Language ?("-" Variant) "/" Version ?( " (" *(AdditionalProperty ";") ")" ) + // + //nolint:gochecknoglobals + defaultGrpcUserAgent = fmt.Sprintf("grpc-go-connect/%s (%s)", Version, runtime.Version()) + //nolint:gochecknoglobals + grpcAllowedMethods = map[string]struct{}{ + http.MethodPost: {}, + } +) + +type protocolGRPC struct { + web bool +} + +// NewHandler implements protocol, so it must return an interface. +func (g *protocolGRPC) NewHandler(params *protocolHandlerParams) protocolHandler { + bare, prefix := grpcContentTypeDefault, grpcContentTypePrefix + if g.web { + bare, prefix = grpcWebContentTypeDefault, grpcWebContentTypePrefix + } + contentTypes := make(map[string]struct{}) + for _, name := range params.Codecs.Names() { + contentTypes[canonicalizeContentType(prefix+name)] = struct{}{} + } + if params.Codecs.Get(codecNameProto) != nil { + contentTypes[bare] = struct{}{} + } + return &grpcHandler{ + protocolHandlerParams: *params, + web: g.web, + accept: contentTypes, + } +} + +// NewClient implements protocol, so it must return an interface. +func (g *protocolGRPC) NewClient(params *protocolClientParams) (protocolClient, error) { + peer := newPeerForURL(params.URL, ProtocolGRPC) + if g.web { + peer = newPeerForURL(params.URL, ProtocolGRPCWeb) + } + return &grpcClient{ + protocolClientParams: *params, + web: g.web, + peer: peer, + }, nil +} + +type grpcHandler struct { + protocolHandlerParams + + web bool + accept map[string]struct{} +} + +func (g *grpcHandler) Methods() map[string]struct{} { + return grpcAllowedMethods +} + +func (g *grpcHandler) ContentTypes() map[string]struct{} { + return g.accept +} + +func (*grpcHandler) SetTimeout(request *http.Request) (context.Context, context.CancelFunc, error) { + timeout, err := grpcParseTimeout(getHeaderCanonical(request.Header, grpcHeaderTimeout)) + if err != nil && !errors.Is(err, errNoTimeout) { + // Errors here indicate that the client sent an invalid timeout header, so + // the error text is safe to send back. + return nil, nil, NewError(CodeInvalidArgument, err) + } else if err != nil { + // err wraps errNoTimeout, nothing to do. + return request.Context(), nil, nil //nolint:nilerr + } + ctx, cancel := context.WithTimeout(request.Context(), timeout) + return ctx, cancel, nil +} + +func (g *grpcHandler) CanHandlePayload(_ *http.Request, contentType string) bool { + _, ok := g.accept[contentType] + return ok +} + +func (g *grpcHandler) NewConn( + responseWriter http.ResponseWriter, + request *http.Request, +) (handlerConnCloser, bool) { + ctx := request.Context() + // We need to parse metadata before entering the interceptor stack; we'll + // send the error to the client later on. + requestCompression, responseCompression, failed := negotiateCompression( + g.CompressionPools, + getHeaderCanonical(request.Header, grpcHeaderCompression), + getHeaderCanonical(request.Header, grpcHeaderAcceptCompression), + ) + if failed == nil { + failed = checkServerStreamsCanFlush(g.Spec, responseWriter) + } + + // Write any remaining headers here: + // (1) any writes to the stream will implicitly send the headers, so we + // should get all of gRPC's required response headers ready. + // (2) interceptors should be able to see these headers. + // + // Since we know that these header keys are already in canonical form, we can + // skip the normalization in Header.Set. + header := responseWriter.Header() + header[headerContentType] = []string{getHeaderCanonical(request.Header, headerContentType)} + header[grpcHeaderAcceptCompression] = []string{g.CompressionPools.CommaSeparatedNames()} + if responseCompression != compressionIdentity { + header[grpcHeaderCompression] = []string{responseCompression} + } + + codecName := grpcCodecForContentType(g.web, getHeaderCanonical(request.Header, headerContentType)) + codec := g.Codecs.Get(codecName) // handler.go guarantees this is not nil + protocolName := ProtocolGRPC + if g.web { + protocolName = ProtocolGRPCWeb + } + conn := wrapHandlerConnWithCodedErrors(&grpcHandlerConn{ + spec: g.Spec, + peer: Peer{ + Addr: request.RemoteAddr, + Protocol: protocolName, + }, + web: g.web, + bufferPool: g.BufferPool, + protobuf: g.Codecs.Protobuf(), // for errors + marshaler: grpcMarshaler{ + envelopeWriter: envelopeWriter{ + ctx: ctx, + sender: writeSender{writer: responseWriter}, + compressionPool: g.CompressionPools.Get(responseCompression), + codec: codec, + compressMinBytes: g.CompressMinBytes, + bufferPool: g.BufferPool, + sendMaxBytes: g.SendMaxBytes, + }, + }, + responseWriter: responseWriter, + responseHeader: make(http.Header), + responseTrailer: make(http.Header), + request: request, + unmarshaler: grpcUnmarshaler{ + envelopeReader: envelopeReader{ + ctx: ctx, + reader: request.Body, + codec: codec, + compressionPool: g.CompressionPools.Get(requestCompression), + bufferPool: g.BufferPool, + readMaxBytes: g.ReadMaxBytes, + }, + web: g.web, + }, + }) + if failed != nil { + // Negotiation failed, so we can't establish a stream. + _ = conn.Close(failed) + return nil, false + } + return conn, true +} + +type grpcClient struct { + protocolClientParams + + web bool + peer Peer +} + +func (g *grpcClient) Peer() Peer { + return g.peer +} + +func (g *grpcClient) WriteRequestHeader(_ StreamType, header http.Header) { + // We know these header keys are in canonical form, so we can bypass all the + // checks in Header.Set. + if getHeaderCanonical(header, headerUserAgent) == "" { + header[headerUserAgent] = []string{defaultGrpcUserAgent} + } + if g.web && getHeaderCanonical(header, headerXUserAgent) == "" { + // The gRPC-Web pseudo-specification seems to require X-User-Agent rather + // than User-Agent for all clients, even if they're not browser-based. This + // is very odd for a backend client, so we'll split the difference and set + // both. + header[headerXUserAgent] = []string{defaultGrpcUserAgent} + } + header[headerContentType] = []string{grpcContentTypeForCodecName(g.web, g.Codec.Name())} + // gRPC handles compression on a per-message basis, so we don't want to + // compress the whole stream. By default, http.Client will ask the server + // to gzip the stream if we don't set Accept-Encoding. + header["Accept-Encoding"] = []string{compressionIdentity} + if g.CompressionName != "" && g.CompressionName != compressionIdentity { + header[grpcHeaderCompression] = []string{g.CompressionName} + } + if acceptCompression := g.CompressionPools.CommaSeparatedNames(); acceptCompression != "" { + header[grpcHeaderAcceptCompression] = []string{acceptCompression} + } + if !g.web { + // The gRPC-HTTP2 specification requires this - it flushes out proxies that + // don't support HTTP trailers. + header["Te"] = []string{"trailers"} + } +} + +func (g *grpcClient) NewConn( + ctx context.Context, + spec Spec, + header http.Header, +) streamingClientConn { + if deadline, ok := ctx.Deadline(); ok { + encodedDeadline := grpcEncodeTimeout(time.Until(deadline)) + header[grpcHeaderTimeout] = []string{encodedDeadline} + } + duplexCall := newDuplexHTTPCall( + ctx, + g.HTTPClient, + g.URL, + spec, + header, + ) + conn := &grpcClientConn{ + spec: spec, + peer: g.Peer(), + duplexCall: duplexCall, + compressionPools: g.CompressionPools, + bufferPool: g.BufferPool, + protobuf: g.Protobuf, + marshaler: grpcMarshaler{ + envelopeWriter: envelopeWriter{ + ctx: ctx, + sender: duplexCall, + compressionPool: g.CompressionPools.Get(g.CompressionName), + codec: g.Codec, + compressMinBytes: g.CompressMinBytes, + bufferPool: g.BufferPool, + sendMaxBytes: g.SendMaxBytes, + }, + }, + unmarshaler: grpcUnmarshaler{ + envelopeReader: envelopeReader{ + ctx: ctx, + reader: duplexCall, + codec: g.Codec, + bufferPool: g.BufferPool, + readMaxBytes: g.ReadMaxBytes, + }, + }, + responseHeader: make(http.Header), + responseTrailer: make(http.Header), + } + duplexCall.SetValidateResponse(conn.validateResponse) + if g.web { + conn.unmarshaler.web = true + conn.readTrailers = func(unmarshaler *grpcUnmarshaler, _ *duplexHTTPCall) http.Header { + return unmarshaler.WebTrailer() + } + } else { + conn.readTrailers = func(_ *grpcUnmarshaler, call *duplexHTTPCall) http.Header { + // To access HTTP trailers, we need to read the body to EOF. + _, _ = discard(call) + return call.ResponseTrailer() + } + } + return wrapClientConnWithCodedErrors(conn) +} + +// grpcClientConn works for both gRPC and gRPC-Web. +type grpcClientConn struct { + spec Spec + peer Peer + duplexCall *duplexHTTPCall + compressionPools readOnlyCompressionPools + bufferPool *bufferPool + protobuf Codec // for errors + marshaler grpcMarshaler + unmarshaler grpcUnmarshaler + responseHeader http.Header + responseTrailer http.Header + readTrailers func(*grpcUnmarshaler, *duplexHTTPCall) http.Header +} + +func (cc *grpcClientConn) Spec() Spec { + return cc.spec +} + +func (cc *grpcClientConn) Peer() Peer { + return cc.peer +} + +func (cc *grpcClientConn) Send(msg any) error { + if err := cc.marshaler.Marshal(msg); err != nil { + return err + } + return nil // must be a literal nil: nil *Error is a non-nil error +} + +func (cc *grpcClientConn) RequestHeader() http.Header { + return cc.duplexCall.Header() +} + +func (cc *grpcClientConn) CloseRequest() error { + return cc.duplexCall.CloseWrite() +} + +func (cc *grpcClientConn) Receive(msg any) error { + if err := cc.duplexCall.BlockUntilResponseReady(); err != nil { + return err + } + err := cc.unmarshaler.Unmarshal(msg) + if err == nil { + return nil + } + mergeHeaders( + cc.responseTrailer, + cc.readTrailers(&cc.unmarshaler, cc.duplexCall), + ) + if errors.Is(err, io.EOF) && cc.unmarshaler.bytesRead == 0 && len(cc.responseTrailer) == 0 { + // No body and no trailers means a trailers-only response. + // Note: per the specification, only the HTTP status code and Content-Type + // should be treated as headers. The rest should be treated as trailing + // metadata. But it would be unsafe to mutate cc.responseHeader at this + // point. So we'll leave cc.responseHeader alone but copy the relevant + // metadata into cc.responseTrailer. + mergeHeaders(cc.responseTrailer, cc.responseHeader) + delHeaderCanonical(cc.responseTrailer, headerContentType) + + // Try to read the status out of the headers. + serverErr := grpcErrorForTrailer(cc.protobuf, cc.responseHeader) + if serverErr == nil { + // Status says "OK". So return original error (io.EOF). + return err + } + serverErr.meta = cc.responseHeader.Clone() + return serverErr + } + + // See if the server sent an explicit error in the HTTP or gRPC-Web trailers. + serverErr := grpcErrorForTrailer(cc.protobuf, cc.responseTrailer) + if serverErr != nil && (errors.Is(err, io.EOF) || !errors.Is(serverErr, errTrailersWithoutGRPCStatus)) { + // We've either: + // - Cleanly read until the end of the response body and *not* received + // gRPC status trailers, which is a protocol error, or + // - Received an explicit error from the server. + // + // This is expected from a protocol perspective, but receiving trailers + // means that we're _not_ getting a message. For users to realize that + // the stream has ended, Receive must return an error. + serverErr.meta = cc.responseHeader.Clone() + mergeHeaders(serverErr.meta, cc.responseTrailer) + _ = cc.duplexCall.CloseWrite() + return serverErr + } + // This was probably an error converting the bytes to a message or an error + // reading from the network. We're going to return it to the + // user, but we also want to close writes so Send errors out. + _ = cc.duplexCall.CloseWrite() + return err +} + +func (cc *grpcClientConn) ResponseHeader() http.Header { + _ = cc.duplexCall.BlockUntilResponseReady() + return cc.responseHeader +} + +func (cc *grpcClientConn) ResponseTrailer() http.Header { + _ = cc.duplexCall.BlockUntilResponseReady() + return cc.responseTrailer +} + +func (cc *grpcClientConn) CloseResponse() error { + return cc.duplexCall.CloseRead() +} + +func (cc *grpcClientConn) onRequestSend(fn func(*http.Request)) { + cc.duplexCall.onRequestSend = fn +} + +func (cc *grpcClientConn) validateResponse(response *http.Response) *Error { + if err := grpcValidateResponse( + response, + cc.responseHeader, + cc.compressionPools, + cc.unmarshaler.web, + cc.marshaler.codec.Name(), + ); err != nil { + return err + } + compression := getHeaderCanonical(response.Header, grpcHeaderCompression) + cc.unmarshaler.compressionPool = cc.compressionPools.Get(compression) + return nil +} + +type grpcHandlerConn struct { + spec Spec + peer Peer + web bool + bufferPool *bufferPool + protobuf Codec // for errors + marshaler grpcMarshaler + responseWriter http.ResponseWriter + responseHeader http.Header + responseTrailer http.Header + wroteToBody bool + request *http.Request + unmarshaler grpcUnmarshaler +} + +func (hc *grpcHandlerConn) Spec() Spec { + return hc.spec +} + +func (hc *grpcHandlerConn) Peer() Peer { + return hc.peer +} + +func (hc *grpcHandlerConn) Receive(msg any) error { + if err := hc.unmarshaler.Unmarshal(msg); err != nil { + return err // already coded + } + return nil // must be a literal nil: nil *Error is a non-nil error +} + +func (hc *grpcHandlerConn) RequestHeader() http.Header { + return hc.request.Header +} + +func (hc *grpcHandlerConn) Send(msg any) error { + defer flushResponseWriter(hc.responseWriter) + if !hc.wroteToBody { + mergeHeaders(hc.responseWriter.Header(), hc.responseHeader) + hc.wroteToBody = true + } + if err := hc.marshaler.Marshal(msg); err != nil { + return err + } + return nil // must be a literal nil: nil *Error is a non-nil error +} + +func (hc *grpcHandlerConn) ResponseHeader() http.Header { + return hc.responseHeader +} + +func (hc *grpcHandlerConn) ResponseTrailer() http.Header { + return hc.responseTrailer +} + +func (hc *grpcHandlerConn) Close(err error) (retErr error) { + defer func() { + // We don't want to copy unread portions of the body to /dev/null here: if + // the client hasn't closed the request body, we'll block until the server + // timeout kicks in. This could happen because the client is malicious, but + // a well-intentioned client may just not expect the server to be returning + // an error for a streaming RPC. Better to accept that we can't always reuse + // TCP connections. + closeErr := hc.request.Body.Close() + if retErr == nil { + retErr = closeErr + } + }() + defer flushResponseWriter(hc.responseWriter) + // If we haven't written the headers yet, do so. + if !hc.wroteToBody { + mergeHeaders(hc.responseWriter.Header(), hc.responseHeader) + } + // gRPC always sends the error's code, message, details, and metadata as + // trailing metadata. The Connect protocol doesn't do this, so we don't want + // to mutate the trailers map that the user sees. + mergedTrailers := make( + http.Header, + len(hc.responseTrailer)+2, // always make space for status & message + ) + mergeHeaders(mergedTrailers, hc.responseTrailer) + grpcErrorToTrailer(mergedTrailers, hc.protobuf, err) + if hc.web && !hc.wroteToBody && len(hc.responseHeader) == 0 { + // We're using gRPC-Web, we haven't yet written to the body, and there are no + // custom headers. That means we can send a "trailers-only" response and send + // trailing metadata as HTTP headers (instead of as trailers). + mergeHeaders(hc.responseWriter.Header(), mergedTrailers) + return nil + } + if hc.web { + // We're using gRPC-Web and we've already sent the headers, so we write + // trailing metadata to the HTTP body. + if err := hc.marshaler.MarshalWebTrailers(mergedTrailers); err != nil { + return err + } + return nil // must be a literal nil: nil *Error is a non-nil error + } + // We're using standard gRPC. Even if we haven't written to the body and + // we're sending a "trailers-only" response, we must send trailing metadata + // as HTTP trailers. (If we had frame-level control of the HTTP/2 layer, we + // could send trailers-only responses as a single HEADER frame and no DATA + // frames, but net/http doesn't expose APIs that low-level.) + // + // In net/http's ResponseWriter API, we send HTTP trailers by writing to the + // headers map with a special prefix. This prefixing is an implementation + // detail, so we should hide it and _not_ mutate the user-visible headers. + // + // Note that this is _very_ finicky and difficult to test with net/http, + // since correctness depends on low-level framing details. Breaking this + // logic breaks Envoy's gRPC-Web translation. + for key, values := range mergedTrailers { + for _, value := range values { + // These are potentially user-supplied, so we can't assume they're in + // canonical form. + hc.responseWriter.Header().Add(http.TrailerPrefix+key, value) + } + } + return nil +} + +type grpcMarshaler struct { + envelopeWriter +} + +func (m *grpcMarshaler) MarshalWebTrailers(trailer http.Header) *Error { + raw := m.envelopeWriter.bufferPool.Get() + defer m.envelopeWriter.bufferPool.Put(raw) + for key, values := range trailer { + // Per the Go specification, keys inserted during iteration may be produced + // later in the iteration or may be skipped. For safety, avoid mutating the + // map if the key is already lower-cased. + lower := strings.ToLower(key) + if key == lower { + continue + } + delete(trailer, key) + trailer[lower] = values + } + if err := trailer.Write(raw); err != nil { + return errorf(CodeInternal, "format trailers: %w", err) + } + return m.Write(&envelope{ + Data: raw, + Flags: grpcFlagEnvelopeTrailer, + }) +} + +type grpcUnmarshaler struct { + envelopeReader + + web bool + webTrailer http.Header +} + +func (u *grpcUnmarshaler) Unmarshal(message any) *Error { + err := u.envelopeReader.Unmarshal(message) + if err == nil { + return nil + } + if !errors.Is(err, errSpecialEnvelope) { + return err + } + env := u.last + data := env.Data + u.last.Data = nil // don't keep a reference to it + defer u.bufferPool.Put(data) + if !u.web || !env.IsSet(grpcFlagEnvelopeTrailer) { + return errorf(CodeInternal, "protocol error: invalid envelope flags %d", env.Flags) + } + + // Per the gRPC-Web specification, trailers should be encoded as an HTTP/1 + // headers block _without_ the terminating newline. To make the headers + // parseable by net/textproto, we need to add the newline. + if err := data.WriteByte('\n'); err != nil { + return errorf(CodeInternal, "unmarshal web trailers: %w", err) + } + bufferedReader := bufio.NewReader(data) + mimeReader := textproto.NewReader(bufferedReader) + mimeHeader, mimeErr := mimeReader.ReadMIMEHeader() + if mimeErr != nil { + return errorf( + CodeInternal, + "gRPC-Web protocol error: trailers invalid: %w", + mimeErr, + ) + } + u.webTrailer = http.Header(mimeHeader) + return errSpecialEnvelope +} + +func (u *grpcUnmarshaler) WebTrailer() http.Header { + return u.webTrailer +} + +func grpcValidateResponse( + response *http.Response, + header http.Header, + availableCompressors readOnlyCompressionPools, + web bool, + codecName string, +) *Error { + if response.StatusCode != http.StatusOK { + return errorf(httpToCode(response.StatusCode), "HTTP status %v", response.Status) + } + if err := grpcValidateResponseContentType( + web, + codecName, + getHeaderCanonical(response.Header, headerContentType), + ); err != nil { + return err + } + if compression := getHeaderCanonical(response.Header, grpcHeaderCompression); compression != "" && + compression != compressionIdentity && + !availableCompressors.Contains(compression) { + // Per https://github.com/grpc/grpc/blob/master/doc/compression.md, we + // should return CodeInternal and specify acceptable compression(s) (in + // addition to setting the Grpc-Accept-Encoding header). + return errorf( + CodeInternal, + "unknown encoding %q: accepted encodings are %v", + compression, + availableCompressors.CommaSeparatedNames(), + ) + } + // The response is valid, so we should expose the headers. + mergeHeaders(header, response.Header) + return nil +} + +// The gRPC wire protocol specifies that errors should be serialized using the +// binary Protobuf format, even if the messages in the request/response stream +// use a different codec. Consequently, this function needs a Protobuf codec to +// unmarshal error information in the headers. +// +// A nil error is only returned when a grpc-status key IS present, but it +// indicates a code of zero (no error). If no grpc-status key is present, this +// returns a non-nil *Error that wraps errTrailersWithoutGRPCStatus. +func grpcErrorForTrailer(protobuf Codec, trailer http.Header) *Error { + codeHeader := getHeaderCanonical(trailer, grpcHeaderStatus) + if codeHeader == "" { + // If there are no trailers at all, that's an internal error. + // But if it's an error determining the status code from the + // trailers, it's unknown. + code := CodeUnknown + if len(trailer) == 0 { + code = CodeInternal + } + return NewError(code, errTrailersWithoutGRPCStatus) + } + if codeHeader == "0" { + return nil + } + + code, err := strconv.ParseUint(codeHeader, 10 /* base */, 32 /* bitsize */) + if err != nil { + return errorf(CodeUnknown, "protocol error: invalid error code %q", codeHeader) + } + message, err := grpcPercentDecode(getHeaderCanonical(trailer, grpcHeaderMessage)) + if err != nil { + return errorf(CodeInternal, "protocol error: invalid error message %q", message) + } + retErr := NewWireError(Code(code), errors.New(message)) + + detailsBinaryEncoded := getHeaderCanonical(trailer, grpcHeaderDetails) + if len(detailsBinaryEncoded) > 0 { + detailsBinary, err := DecodeBinaryHeader(detailsBinaryEncoded) + if err != nil { + return errorf(CodeInternal, "server returned invalid grpc-status-details-bin trailer: %w", err) + } + var status statusv1.Status + if err := protobuf.Unmarshal(detailsBinary, &status); err != nil { + return errorf(CodeInternal, "server returned invalid protobuf for error details: %w", err) + } + for _, d := range status.GetDetails() { + retErr.details = append(retErr.details, &ErrorDetail{pbAny: d}) + } + // Prefer the Protobuf-encoded data to the headers (grpc-go does this too). + retErr.code = Code(status.GetCode()) //nolint:gosec // No information loss + retErr.err = errors.New(status.GetMessage()) + } + + return retErr +} + +func grpcParseTimeout(timeout string) (time.Duration, error) { + if timeout == "" { + return 0, errNoTimeout + } + unit, err := grpcTimeoutUnitLookup(timeout[len(timeout)-1]) + if err != nil { + return 0, err + } + num, err := strconv.ParseInt(timeout[:len(timeout)-1], 10 /* base */, 64 /* bitsize */) + if err != nil || num < 0 { + return 0, fmt.Errorf("protocol error: invalid timeout %q", timeout) + } + if num > 99999999 { // timeout must be ASCII string of at most 8 digits + return 0, fmt.Errorf("protocol error: timeout %q is too long", timeout) + } + const grpcTimeoutMaxHours = math.MaxInt64 / int64(time.Hour) // how many hours fit into a time.Duration? + if unit == time.Hour && num > grpcTimeoutMaxHours { + // Timeout is effectively unbounded, so ignore it. The grpc-go + // implementation does the same thing. + return 0, errNoTimeout + } + return time.Duration(num) * unit, nil +} + +func grpcEncodeTimeout(timeout time.Duration) string { + if timeout <= 0 { + return "0n" + } + // The gRPC protocol limits timeouts to 8 characters (not counting the unit), + // so timeouts must be strictly less than 1e8 of the appropriate unit. + const grpcTimeoutMaxValue = 1e8 + var ( + size time.Duration + unit byte + ) + switch { + case timeout < time.Nanosecond*grpcTimeoutMaxValue: + size, unit = time.Nanosecond, 'n' + case timeout < time.Microsecond*grpcTimeoutMaxValue: + size, unit = time.Microsecond, 'u' + case timeout < time.Millisecond*grpcTimeoutMaxValue: + size, unit = time.Millisecond, 'm' + case timeout < time.Second*grpcTimeoutMaxValue: + size, unit = time.Second, 'S' + case timeout < time.Minute*grpcTimeoutMaxValue: + size, unit = time.Minute, 'M' + default: + // time.Duration is an int64 number of nanoseconds, so the largest + // expressible duration is less than 1e8 hours. + size, unit = time.Hour, 'H' + } + buf := make([]byte, 0, 9) + buf = strconv.AppendInt(buf, int64(timeout/size), 10 /* base */) + buf = append(buf, unit) + return string(buf) +} + +func grpcTimeoutUnitLookup(unit byte) (time.Duration, error) { + switch unit { + case 'n': + return time.Nanosecond, nil + case 'u': + return time.Microsecond, nil + case 'm': + return time.Millisecond, nil + case 'S': + return time.Second, nil + case 'M': + return time.Minute, nil + case 'H': + return time.Hour, nil + default: + return 0, fmt.Errorf("protocol error: timeout has invalid unit %q", unit) + } +} + +func grpcCodecForContentType(web bool, contentType string) string { + if (!web && contentType == grpcContentTypeDefault) || (web && contentType == grpcWebContentTypeDefault) { + // implicitly protobuf + return codecNameProto + } + prefix := grpcContentTypePrefix + if web { + prefix = grpcWebContentTypePrefix + } + return strings.TrimPrefix(contentType, prefix) +} + +func grpcContentTypeForCodecName(web bool, name string) string { + if web { + return grpcWebContentTypePrefix + name + } + if name == codecNameProto { + // For compatibility with Google Cloud Platform's frontends, prefer an + // implicit default codec. See + // https://github.com/connectrpc/connect-go/pull/655#issuecomment-1915754523 + // for details. + return grpcContentTypeDefault + } + return grpcContentTypePrefix + name +} + +func grpcErrorToTrailer(trailer http.Header, protobuf Codec, err error) { + if err == nil { + setHeaderCanonical(trailer, grpcHeaderStatus, "0") // zero is the gRPC OK status + return + } + if connectErr, ok := asError(err); ok && !connectErr.wireErr { + mergeNonProtocolHeaders(trailer, connectErr.meta) + } + var ( + status = grpcStatusForError(err) + code = status.GetCode() + message = status.GetMessage() + bin []byte + ) + if len(status.Details) > 0 { + var binErr error + bin, binErr = protobuf.Marshal(status) + if binErr != nil { + code = int32(CodeInternal) + message = fmt.Sprintf("marshal protobuf status: %v", binErr) + } + } + setHeaderCanonical(trailer, grpcHeaderStatus, strconv.Itoa(int(code))) + setHeaderCanonical(trailer, grpcHeaderMessage, grpcPercentEncode(message)) + if len(bin) > 0 { + setHeaderCanonical(trailer, grpcHeaderDetails, EncodeBinaryHeader(bin)) + } +} + +func grpcStatusForError(err error) *statusv1.Status { + status := &statusv1.Status{ + Code: int32(CodeUnknown), + Message: err.Error(), + } + if connectErr, ok := asError(err); ok { + status.Code = int32(connectErr.Code()) //nolint:gosec // No information loss + status.Message = connectErr.Message() + status.Details = connectErr.detailsAsAny() + } + return status +} + +// grpcPercentEncode follows RFC 3986 Section 2.1 and the gRPC HTTP/2 spec. +// It's a variant of URL-encoding with fewer reserved characters. It's intended +// to take UTF-8 encoded text and escape non-ASCII bytes so that they're valid +// HTTP/1 headers, while still maximizing readability of the data on the wire. +// +// The grpc-message trailer (used for human-readable error messages) should be +// percent-encoded. +// +// References: +// +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#responses +// https://datatracker.ietf.org/doc/html/rfc3986#section-2.1 +func grpcPercentEncode(msg string) string { + var hexCount int + for i := range len(msg) { + if grpcShouldEscape(msg[i]) { + hexCount++ + } + } + if hexCount == 0 { + return msg + } + // We need to escape some characters, so we'll need to allocate a new string. + var out strings.Builder + out.Grow(len(msg) + 2*hexCount) + for i := range len(msg) { + switch char := msg[i]; { + case grpcShouldEscape(char): + out.WriteByte('%') + out.WriteByte(upperhex[char>>4]) + out.WriteByte(upperhex[char&15]) + default: + out.WriteByte(char) + } + } + return out.String() +} + +func grpcPercentDecode(input string) (string, error) { + percentCount := 0 + for i := 0; i < len(input); { + switch input[i] { + case '%': + percentCount++ + if err := validateHex(input[i:]); err != nil { + return "", err + } + i += 3 + default: + i++ + } + } + if percentCount == 0 { + return input, nil + } + // We need to unescape some characters, so we'll need to allocate a new string. + var out strings.Builder + out.Grow(len(input) - 2*percentCount) + for i := 0; i < len(input); i++ { + switch input[i] { + case '%': + out.WriteByte(unhex(input[i+1])<<4 | unhex(input[i+2])) + i += 2 + default: + out.WriteByte(input[i]) + } + } + return out.String(), nil +} + +// Characters that need to be escaped are defined in gRPC's HTTP/2 spec. +// They're different from the generic set defined in RFC 3986. +func grpcShouldEscape(char byte) bool { + return char < ' ' || char > '~' || char == '%' +} + +func unhex(char byte) byte { + switch { + case '0' <= char && char <= '9': + return char - '0' + case 'a' <= char && char <= 'f': + return char - 'a' + 10 + case 'A' <= char && char <= 'F': + return char - 'A' + 10 + } + return 0 +} + +func isHex(char byte) bool { + return ('0' <= char && char <= '9') || ('a' <= char && char <= 'f') || ('A' <= char && char <= 'F') +} + +func validateHex(input string) error { + if len(input) < 3 || input[0] != '%' || !isHex(input[1]) || !isHex(input[2]) { + if len(input) > 3 { + input = input[:3] + } + return fmt.Errorf("invalid percent-encoded string %q", input) + } + return nil +} + +func grpcValidateResponseContentType(web bool, requestCodecName string, responseContentType string) *Error { + // Responses must have valid content-type that indicates same codec as the request. + bare, prefix := grpcContentTypeDefault, grpcContentTypePrefix + if web { + bare, prefix = grpcWebContentTypeDefault, grpcWebContentTypePrefix + } + if responseContentType == prefix+requestCodecName || + (requestCodecName == codecNameProto && responseContentType == bare) { + return nil + } + expectedContentType := bare + if requestCodecName != codecNameProto { + expectedContentType = prefix + requestCodecName + } + code := CodeInternal + if responseContentType != bare && !strings.HasPrefix(responseContentType, prefix) { + // Doesn't even look like a gRPC response? Use code "unknown". + code = CodeUnknown + } + return errorf( + code, + "invalid content-type: %q; expecting %q", + responseContentType, + expectedContentType, + ) +} diff --git a/vendor/connectrpc.com/connect/recover.go b/vendor/connectrpc.com/connect/recover.go new file mode 100644 index 0000000000..2fee6ab274 --- /dev/null +++ b/vendor/connectrpc.com/connect/recover.go @@ -0,0 +1,64 @@ +// Copyright 2021-2025 The Connect Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connect + +import ( + "context" + "net/http" +) + +// recoverHandlerInterceptor lets handlers trap panics, perform side effects +// (like emitting logs or metrics), and present a friendlier error message to +// clients. +type recoverHandlerInterceptor struct { + Interceptor + + handle func(context.Context, Spec, http.Header, any) error +} + +func (i *recoverHandlerInterceptor) WrapUnary(next UnaryFunc) UnaryFunc { + return func(ctx context.Context, req AnyRequest) (_ AnyResponse, retErr error) { + if req.Spec().IsClient { + return next(ctx, req) + } + defer func() { + if r := recover(); r != nil { + // net/http checks for ErrAbortHandler with ==, so we should too. + if r == http.ErrAbortHandler { //nolint:errorlint,goerr113 + panic(r) //nolint:forbidigo + } + retErr = i.handle(ctx, req.Spec(), req.Header(), r) + } + }() + res, err := next(ctx, req) + return res, err + } +} + +func (i *recoverHandlerInterceptor) WrapStreamingHandler(next StreamingHandlerFunc) StreamingHandlerFunc { + return func(ctx context.Context, conn StreamingHandlerConn) (retErr error) { + defer func() { + if r := recover(); r != nil { + // net/http checks for ErrAbortHandler with ==, so we should too. + if r == http.ErrAbortHandler { //nolint:errorlint,goerr113 + panic(r) //nolint:forbidigo + } + retErr = i.handle(ctx, conn.Spec(), conn.RequestHeader(), r) + } + }() + err := next(ctx, conn) + return err + } +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/func.go b/vendor/cuelabs.dev/go/oci/ociregistry/func.go index e3d44c52cb..83ab3863a0 100644 --- a/vendor/cuelabs.dev/go/oci/ociregistry/func.go +++ b/vendor/cuelabs.dev/go/oci/ociregistry/func.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "io" + "iter" ) var _ Interface = (*Funcs)(nil) @@ -54,9 +55,9 @@ type Funcs struct { DeleteBlob_ func(ctx context.Context, repo string, digest Digest) error DeleteManifest_ func(ctx context.Context, repo string, digest Digest) error DeleteTag_ func(ctx context.Context, repo string, name string) error - Repositories_ func(ctx context.Context, startAfter string) Seq[string] - Tags_ func(ctx context.Context, repo string, startAfter string) Seq[string] - Referrers_ func(ctx context.Context, repo string, digest Digest, artifactType string) Seq[Descriptor] + Repositories_ func(ctx context.Context, startAfter string) iter.Seq2[string, error] + Tags_ func(ctx context.Context, repo string, startAfter string) iter.Seq2[string, error] + Referrers_ func(ctx context.Context, repo string, digest Digest, artifactType string) iter.Seq2[Descriptor, error] } // This blesses Funcs as the canonical Interface implementation. @@ -174,21 +175,21 @@ func (f *Funcs) DeleteTag(ctx context.Context, repo string, name string) error { return f.newError(ctx, "DeleteTag", repo) } -func (f *Funcs) Repositories(ctx context.Context, startAfter string) Seq[string] { +func (f *Funcs) Repositories(ctx context.Context, startAfter string) iter.Seq2[string, error] { if f != nil && f.Repositories_ != nil { return f.Repositories_(ctx, startAfter) } return ErrorSeq[string](f.newError(ctx, "Repositories", "")) } -func (f *Funcs) Tags(ctx context.Context, repo string, startAfter string) Seq[string] { +func (f *Funcs) Tags(ctx context.Context, repo string, startAfter string) iter.Seq2[string, error] { if f != nil && f.Tags_ != nil { return f.Tags_(ctx, repo, startAfter) } return ErrorSeq[string](f.newError(ctx, "Tags", repo)) } -func (f *Funcs) Referrers(ctx context.Context, repo string, digest Digest, artifactType string) Seq[Descriptor] { +func (f *Funcs) Referrers(ctx context.Context, repo string, digest Digest, artifactType string) iter.Seq2[Descriptor, error] { if f != nil && f.Referrers_ != nil { return f.Referrers_(ctx, repo, digest, artifactType) } diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/interface.go b/vendor/cuelabs.dev/go/oci/ociregistry/interface.go index ade311d76a..3ea31d2dbb 100644 --- a/vendor/cuelabs.dev/go/oci/ociregistry/interface.go +++ b/vendor/cuelabs.dev/go/oci/ociregistry/interface.go @@ -59,6 +59,7 @@ package ociregistry import ( "context" "io" + "iter" "cuelabs.dev/go/oci/ociregistry/ociref" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -214,20 +215,20 @@ type Lister interface { // over all the repositories in the registry in lexical order. // If startAfter is non-empty, the iteration starts lexically // after, but not including, that repository. - Repositories(ctx context.Context, startAfter string) Seq[string] + Repositories(ctx context.Context, startAfter string) iter.Seq2[string, error] // Tags returns an iterator that can be used to iterate over all // the tags in the given repository in lexical order. If // startAfter is non-empty, the tags start lexically after, but // not including that tag. - Tags(ctx context.Context, repo string, startAfter string) Seq[string] + Tags(ctx context.Context, repo string, startAfter string) iter.Seq2[string, error] // Referrers returns an iterator that can be used to iterate over all // the manifests that have the given digest as their Subject. // If artifactType is non-zero, the results will be restricted to // only manifests with that type. // TODO is it possible to ask for multiple artifact types? - Referrers(ctx context.Context, repo string, digest Digest, artifactType string) Seq[Descriptor] + Referrers(ctx context.Context, repo string, digest Digest, artifactType string) iter.Seq2[Descriptor, error] } // BlobWriter provides a handle for uploading a blob to a registry. diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/iter.go b/vendor/cuelabs.dev/go/oci/ociregistry/iter.go index fcc034dc97..d68e4a2138 100644 --- a/vendor/cuelabs.dev/go/oci/ociregistry/iter.go +++ b/vendor/cuelabs.dev/go/oci/ociregistry/iter.go @@ -14,15 +14,16 @@ package ociregistry -// TODO(go1.24) when we can depend on Go 1.24, this should be: -// type Seq[T any] = iter.Seq2[T, error] +import "iter" -// Seq defines the type of an iterator sequence returned from -// the iterator functions. In general, a non-nil -// error means that the item is the last in the sequence. -type Seq[T any] func(yield func(T, error) bool) +// Seq is kept for backwards compatibility with existing implementations +// +// Deprecated: use iter.Seq2. +// +//go:fix inline +type Seq[T any] = iter.Seq2[T, error] -func All[T any](it Seq[T]) ([]T, error) { +func All[T any](it iter.Seq2[T, error]) ([]T, error) { xs := []T{} for x, err := range it { if err != nil { @@ -33,7 +34,7 @@ func All[T any](it Seq[T]) ([]T, error) { return xs, nil } -func SliceSeq[T any](xs []T) Seq[T] { +func SliceSeq[T any](xs []T) iter.Seq2[T, error] { return func(yield func(T, error) bool) { for _, x := range xs { if !yield(x, nil) { @@ -45,7 +46,7 @@ func SliceSeq[T any](xs []T) Seq[T] { // ErrorSeq returns an iterator that has no // items and always returns the given error. -func ErrorSeq[T any](err error) Seq[T] { +func ErrorSeq[T any](err error) iter.Seq2[T, error] { return func(yield func(T, error) bool) { yield(*new(T), err) } diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/authfile.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/authfile.go index a1c36eaaae..0d4e89895f 100644 --- a/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/authfile.go +++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/authfile.go @@ -96,6 +96,18 @@ func LoadWithEnv(runner HelperRunner, env []string) (*ConfigFile, error) { if env != nil { getenv = getenvFunc(env) } + // DOCKER_AUTH_CONFIG has precedence, therefore check if + // it has the inlined JSON. + if data := getenv("DOCKER_AUTH_CONFIG"); data != "" { + f, err := decodeConfigFile([]byte(data)) + if err != nil { + return nil, fmt.Errorf("invalid config: %v", err) + } + return &ConfigFile{ + data: f, + runner: runner, + }, nil + } for _, f := range configFileLocations { filename := f(getenv) if filename == "" { @@ -126,7 +138,8 @@ func LoadWithEnv(runner HelperRunner, env []string) (*ConfigFile, error) { // It uses runner to run any external helper commands; if runner // is nil, [ExecHelper] will be used. // -// In order it tries: +// In order, it tries: +// - $DOCKER_AUTH_CONFIG (inlined JSON) // - $DOCKER_CONFIG/config.json // - ~/.docker/config.json // - $XDG_RUNTIME_DIR/containers/auth.json diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/lister.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/lister.go index a804e93029..c3e955565b 100644 --- a/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/lister.go +++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/lister.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io" + "iter" "net/http" "slices" "strings" @@ -30,7 +31,7 @@ import ( "cuelabs.dev/go/oci/ociregistry/internal/ocirequest" ) -func (c *client) Repositories(ctx context.Context, startAfter string) ociregistry.Seq[string] { +func (c *client) Repositories(ctx context.Context, startAfter string) iter.Seq2[string, error] { return pager(ctx, c, &ocirequest.Request{ Kind: ocirequest.ReqCatalogList, ListN: c.listPageSize, @@ -50,7 +51,7 @@ func (c *client) Repositories(ctx context.Context, startAfter string) ociregistr }) } -func (c *client) Tags(ctx context.Context, repoName, startAfter string) ociregistry.Seq[string] { +func (c *client) Tags(ctx context.Context, repoName, startAfter string) iter.Seq2[string, error] { return pager(ctx, c, &ocirequest.Request{ Kind: ocirequest.ReqTagsList, Repo: repoName, @@ -72,7 +73,7 @@ func (c *client) Tags(ctx context.Context, repoName, startAfter string) ociregis }) } -func (c *client) Referrers(ctx context.Context, repoName string, digest ociregistry.Digest, artifactType string) ociregistry.Seq[ociregistry.Descriptor] { +func (c *client) Referrers(ctx context.Context, repoName string, digest ociregistry.Digest, artifactType string) iter.Seq2[ociregistry.Descriptor, error] { return pager(ctx, c, &ocirequest.Request{ Kind: ocirequest.ReqReferrersList, Repo: repoName, @@ -127,7 +128,7 @@ func (c *client) Referrers(ctx context.Context, repoName string, digest ociregis // parseResponse. It tries to use the Link header in each response to continue // the iteration, falling back to using the "last" query parameter if // canUseLast is true. -func pager[T any](ctx context.Context, c *client, initialReq *ocirequest.Request, canUseLast bool, parseResponse func(*http.Response) ([]T, error), okStatuses ...int) ociregistry.Seq[T] { +func pager[T any](ctx context.Context, c *client, initialReq *ocirequest.Request, canUseLast bool, parseResponse func(*http.Response) ([]T, error), okStatuses ...int) iter.Seq2[T, error] { return func(yield func(T, error) bool) { // We assume that the same auth scope is applicable to all page requests. req, err := newRequest(ctx, initialReq, nil) diff --git a/vendor/cuelang.org/go/cue/ast/ast.go b/vendor/cuelang.org/go/cue/ast/ast.go index 3ac2141051..e7d93aa551 100644 --- a/vendor/cuelang.org/go/cue/ast/ast.go +++ b/vendor/cuelang.org/go/cue/ast/ast.go @@ -18,6 +18,8 @@ package ast import ( "fmt" + "iter" + "slices" "strings" "cuelang.org/go/cue/literal" @@ -41,6 +43,12 @@ import ( // A Node represents any node in the abstract syntax tree. type Node interface { + // We should have invariants: + // 1. Pos() <= End() + // 2. If a node has children nodes, then all of those children + // nodes should fall within their parent's Pos() -> End() range. + // TODO: add tests to enforce these. + Pos() token.Pos // position of first character belonging to the node End() token.Pos // position of first character immediately after the node @@ -48,11 +56,6 @@ type Node interface { // the node or nil if there is no such position. pos() *token.Pos - // Deprecated: use [Comments] - Comments() []*CommentGroup - - // Deprecated: use [AddComment] - AddComment(*CommentGroup) commentInfo() *comments } @@ -140,6 +143,12 @@ type comments struct { func (c *comments) commentInfo() *comments { return c } +// TODO: remove these deprecated comment methods in late 2026. +// Note that we unfortunately cannot use `//go:fix inline`; +// for example, from the comments.Comments promoted method below, +// we cannot call the Comments API as it works on Node, the embedding type. + +// Deprecated: use [Comments]. func (c *comments) Comments() []*CommentGroup { if c.groups == nil { return []*CommentGroup{} @@ -147,9 +156,7 @@ func (c *comments) Comments() []*CommentGroup { return *c.groups } -// // AddComment adds the given comments to the fields. -// // If line is true the comment is inserted at the preceding token. - +// Deprecated: use [AddComment]. func (c *comments) AddComment(cg *CommentGroup) { if cg == nil { return @@ -167,8 +174,16 @@ func (c *comments) AddComment(cg *CommentGroup) { } } +// Deprecated: use [SetComments]. func (c *comments) SetComments(cgs []*CommentGroup) { if c.groups == nil { + if cgs == nil { + // Replacing no comments with a nil slice is a no-op. + // Avoid allocating below. + // Note that we continue for other zero-length slices, + // as the caller may want to reuse memory. + return + } a := cgs c.groups = &a return @@ -250,27 +265,25 @@ func (g *CommentGroup) Text() string { } // Split on newlines. - cl := strings.Split(c, "\n") + cl := strings.SplitSeq(c, "\n") // Walk lines, stripping trailing white space and adding to list. - for _, l := range cl { + for l := range cl { lines = append(lines, stripTrailingWhitespace(l)) } } // Remove leading blank lines; convert runs of // interior blank lines to a single blank line. - n := 0 - for _, line := range lines { - if line != "" || n > 0 && lines[n-1] != "" { - lines[n] = line - n++ - } - } - lines = lines[0:n] + lastBlank := true + lines = slices.DeleteFunc(lines, func(line string) bool { + remove := lastBlank && line == "" + lastBlank = line == "" + return remove + }) // Add final "" entry to get trailing newline from Join. - if n > 0 && lines[n-1] != "" { + if !lastBlank { lines = append(lines, "") } @@ -301,15 +314,12 @@ func (a *Attribute) Split() (key, body string) { // A Field represents a field declaration in a struct. type Field struct { - Label Label // must have at least one element. - // Deprecated: use [Field.Constraint] - Optional token.Pos - Constraint token.Token // token.ILLEGAL, token.OPTION, or token.NOT + Label Label // must have at least one element. + Alias *PostfixAlias // optional postfix alias (nil if no alias) + Constraint token.Token // token.ILLEGAL, token.OPTION, or token.NOT // No TokenPos: Value must be an StructLit with one field. TokenPos token.Pos - // Deprecated: the value is always [token.COLON] - Token token.Token Value Expr // the value associated with this field. @@ -348,10 +358,40 @@ func (a *Alias) Pos() token.Pos { return a.Ident.Pos() } func (a *Alias) pos() *token.Pos { return a.Ident.pos() } func (a *Alias) End() token.Pos { return a.Expr.End() } +// A PostfixAlias represents the new postfix alias syntax using ~. +// It appears in field declarations after the label. +// +// Simple form: label~X where X captures the field reference +// Dual form: label~(K,V) where K captures the label name string and V captures the field reference +type PostfixAlias struct { + Tilde token.Pos // position of "~" + + // Dual form: ~(K,V) + Lparen token.Pos // position of "(" (invalid if simple form) + Label *Ident // K: label name capture (nil if simple form) + Comma token.Pos // position of "," (invalid if simple form) + Rparen token.Pos // position of ")" (invalid if simple form) + + // Both forms: the field reference (always non-nil) + Field *Ident // X or V: captures the field reference + + comments +} + +func (a *PostfixAlias) Pos() token.Pos { return a.Tilde } +func (a *PostfixAlias) pos() *token.Pos { return &a.Tilde } +func (a *PostfixAlias) End() token.Pos { + if a.Rparen.IsValid() { + return a.Rparen.Add(1) + } + return a.Field.End() +} + // A Comprehension node represents a comprehension declaration. type Comprehension struct { - Clauses []Clause // There must be at least one clause. - Value Expr // Must be a struct TODO: change to Struct + Clauses []Clause // There must be at least one clause. + Value Expr // Must be a struct TODO: change to Struct + Fallback *FallbackClause // Optional else/fallback clause comments decl @@ -361,6 +401,9 @@ type Comprehension struct { func (x *Comprehension) Pos() token.Pos { return getPos(x) } func (x *Comprehension) pos() *token.Pos { return x.Clauses[0].pos() } func (x *Comprehension) End() token.Pos { + if x.Fallback != nil { + return x.Fallback.Body.End() + } return x.Value.End() } @@ -400,17 +443,49 @@ type Ident struct { Name string Scope Node // scope in which node was found or nil if referring directly - Node Node + Node Node // node referenced by this identifier, if any; see [cuelang.org/go/cue/ast/astutil.Resolve] comments label expr } +// NewPredeclared creates an [Ident] for a predeclared name such as "self", +// "int", or "matchN". name must not have the "__" prefix. +// +// When [cuelang.org/go/cue/ast/astutil.Sanitize] encounters an identifier +// created by NewPredeclared and the name is shadowed in scope, it renames the +// identifier to avoid the shadow. It currently does so by writing the +// "__"-prefixed form (e.g. "__self"), but this may change in the future. +// +// Use [Ident.IsPredeclared] to check if an identifier refers to a predeclared +// name. +func NewPredeclared(name string) *Ident { + return &Ident{Name: name, Node: predeclared} +} + +// IsPredeclared reports whether id was created by [NewPredeclared], +// i.e., whether it refers to a predeclared name. +func (id *Ident) IsPredeclared() bool { + return id.Node == predeclared +} + +// predeclared is a sentinel node used to mark identifiers that refer to +// predeclared names. +var predeclared Node = &predeclaredNode{} + +type predeclaredNode struct { + comments +} + +func (n *predeclaredNode) Pos() token.Pos { return token.NoPos } +func (n *predeclaredNode) pos() *token.Pos { return nil } +func (n *predeclaredNode) End() token.Pos { return token.NoPos } + // A BasicLit node represents a literal of basic type. type BasicLit struct { ValuePos token.Pos // literal position - Kind token.Token // INT, FLOAT, DURATION, or STRING + Kind token.Token // INT, FLOAT, STRING, NULL, TRUE, FALSE Value string // literal string; e.g. 42, 0x7f, 3.14, 1_234_567, 1e-9, 2.4i, 'a', '\x7f', "foo", or '\m\n\o' comments @@ -418,10 +493,22 @@ type BasicLit struct { label } -// TODO: introduce and use NewLabel and NewBytes and perhaps NewText (in the +// TODO: introduce and use NewBytes and perhaps NewText (in the // later case NewString would return a string or bytes type) to distinguish from // NewString. Consider how to pass indentation information. +// NewStringLabel creates a new string label with the given string, +// quoting it as a string literal only if necessary, +// as outlined in [StringLabelNeedsQuoting]. +// +// To create labels for definition or hidden fields, use [NewIdent]. +func NewStringLabel(name string) Label { + if StringLabelNeedsQuoting(name) { + return NewString(name) + } + return NewIdent(name) +} + // NewString creates a new BasicLit with a string value without position. // It quotes the given string. // Useful for ASTs generated by code other than the CUE parser. @@ -508,7 +595,6 @@ func NewStruct(fields ...interface{}) *StructLit { for i := 0; i < len(fields); i++ { var ( label Label - optional = token.NoPos constraint = token.ILLEGAL expr Expr ) @@ -545,10 +631,7 @@ func NewStruct(fields ...interface{}) *StructLit { break inner case token.Token: switch x { - case token.OPTION: - constraint = x - optional = token.Blank.Pos() - case token.NOT: + case token.OPTION, token.NOT: constraint = x case token.COLON, token.ILLEGAL: default: @@ -563,7 +646,6 @@ func NewStruct(fields ...interface{}) *StructLit { } s.Elts = append(s.Elts, &Field{ Label: label, - Optional: optional, Constraint: constraint, Value: expr, }) @@ -641,6 +723,33 @@ type LetClause struct { decl } +// A FallbackClause node represents an else or fallback clause in a comprehension. +// Used with `else` after if/try clauses, and `fallback` after for clauses. +type FallbackClause struct { + // TODO: note that the support for "else" is likely temporary, as + // we will move that functionality to an "if" and "try" element with an + // optional "else" body. + Fallback token.Pos // Position of "else" or "fallback" keyword + Body *StructLit + + comments + clause +} + +// A TryClause node represents a try clause in a comprehension. +// It can have two forms: +// - try { struct } - Ident/Expr are nil; body is in Comprehension.Value +// - try x = expr - Ident/Expr are set +type TryClause struct { + Try token.Pos + Ident *Ident // identifier for assignment form (nil for struct form) + Equal token.Pos // position of "=" (invalid for struct form) + Expr Expr // expression for assignment form (nil for struct form) + + comments + clause +} + // A ParenExpr node represents a parenthesized expression. type ParenExpr struct { Lparen token.Pos // position of "(" @@ -654,8 +763,9 @@ type ParenExpr struct { // A SelectorExpr node represents an expression followed by a selector. type SelectorExpr struct { - X Expr // expression - Sel Label // field selector + X Expr // expression + Period token.Pos // position of . + Sel Label // field selector comments expr @@ -731,6 +841,16 @@ type BinaryExpr struct { expr } +// A PostfixExpr node represents an expression followed by a postfix operator. +type PostfixExpr struct { + X Expr // expression + Op token.Token // postfix operator // ... or ? + OpPos token.Pos // position of operator + + comments + expr +} + // NewBinExpr creates for list of expressions of length 2 or greater a chained // binary expression of the form (((x1 op x2) op x3) ...). For lists of length // 1 it returns the expression itself. It panics for empty lists. @@ -766,32 +886,38 @@ func (x *StructLit) pos() *token.Pos { return &x.Lbrace } -func (x *ListLit) Pos() token.Pos { return x.Lbrack } -func (x *ListLit) pos() *token.Pos { return &x.Lbrack } -func (x *Ellipsis) Pos() token.Pos { return x.Ellipsis } -func (x *Ellipsis) pos() *token.Pos { return &x.Ellipsis } -func (x *LetClause) Pos() token.Pos { return x.Let } -func (x *LetClause) pos() *token.Pos { return &x.Let } -func (x *ForClause) Pos() token.Pos { return x.For } -func (x *ForClause) pos() *token.Pos { return &x.For } -func (x *IfClause) Pos() token.Pos { return x.If } -func (x *IfClause) pos() *token.Pos { return &x.If } -func (x *ParenExpr) Pos() token.Pos { return x.Lparen } -func (x *ParenExpr) pos() *token.Pos { return &x.Lparen } -func (x *SelectorExpr) Pos() token.Pos { return x.X.Pos() } -func (x *SelectorExpr) pos() *token.Pos { return x.X.pos() } -func (x *IndexExpr) Pos() token.Pos { return x.X.Pos() } -func (x *IndexExpr) pos() *token.Pos { return x.X.pos() } -func (x *SliceExpr) Pos() token.Pos { return x.X.Pos() } -func (x *SliceExpr) pos() *token.Pos { return x.X.pos() } -func (x *CallExpr) Pos() token.Pos { return x.Fun.Pos() } -func (x *CallExpr) pos() *token.Pos { return x.Fun.pos() } -func (x *UnaryExpr) Pos() token.Pos { return x.OpPos } -func (x *UnaryExpr) pos() *token.Pos { return &x.OpPos } -func (x *BinaryExpr) Pos() token.Pos { return x.X.Pos() } -func (x *BinaryExpr) pos() *token.Pos { return x.X.pos() } -func (x *BottomLit) Pos() token.Pos { return x.Bottom } -func (x *BottomLit) pos() *token.Pos { return &x.Bottom } +func (x *ListLit) Pos() token.Pos { return x.Lbrack } +func (x *ListLit) pos() *token.Pos { return &x.Lbrack } +func (x *Ellipsis) Pos() token.Pos { return x.Ellipsis } +func (x *Ellipsis) pos() *token.Pos { return &x.Ellipsis } +func (x *LetClause) Pos() token.Pos { return x.Let } +func (x *LetClause) pos() *token.Pos { return &x.Let } +func (x *TryClause) Pos() token.Pos { return x.Try } +func (x *TryClause) pos() *token.Pos { return &x.Try } +func (x *ForClause) Pos() token.Pos { return x.For } +func (x *ForClause) pos() *token.Pos { return &x.For } +func (x *IfClause) Pos() token.Pos { return x.If } +func (x *IfClause) pos() *token.Pos { return &x.If } +func (x *FallbackClause) Pos() token.Pos { return x.Fallback } +func (x *FallbackClause) pos() *token.Pos { return &x.Fallback } +func (x *ParenExpr) Pos() token.Pos { return x.Lparen } +func (x *ParenExpr) pos() *token.Pos { return &x.Lparen } +func (x *SelectorExpr) Pos() token.Pos { return x.X.Pos() } +func (x *SelectorExpr) pos() *token.Pos { return x.X.pos() } +func (x *IndexExpr) Pos() token.Pos { return x.X.Pos() } +func (x *IndexExpr) pos() *token.Pos { return x.X.pos() } +func (x *SliceExpr) Pos() token.Pos { return x.X.Pos() } +func (x *SliceExpr) pos() *token.Pos { return x.X.pos() } +func (x *CallExpr) Pos() token.Pos { return x.Fun.Pos() } +func (x *CallExpr) pos() *token.Pos { return x.Fun.pos() } +func (x *UnaryExpr) Pos() token.Pos { return x.OpPos } +func (x *UnaryExpr) pos() *token.Pos { return &x.OpPos } +func (x *BinaryExpr) Pos() token.Pos { return x.X.Pos() } +func (x *BinaryExpr) pos() *token.Pos { return x.X.pos() } +func (x *PostfixExpr) Pos() token.Pos { return x.X.Pos() } +func (x *PostfixExpr) pos() *token.Pos { return x.X.pos() } +func (x *BottomLit) Pos() token.Pos { return x.Bottom } +func (x *BottomLit) pos() *token.Pos { return &x.Bottom } func (x *BadExpr) End() token.Pos { return x.To } func (x *Ident) End() token.Pos { @@ -799,7 +925,7 @@ func (x *Ident) End() token.Pos { } func (x *BasicLit) End() token.Pos { return x.ValuePos.Add(len(x.Value)) } -func (x *Interpolation) End() token.Pos { return x.Elts[len(x.Elts)-1].Pos() } +func (x *Interpolation) End() token.Pos { return x.Elts[len(x.Elts)-1].End() } func (x *Func) End() token.Pos { return x.Ret.End() } func (x *StructLit) End() token.Pos { if x.Rbrace == token.NoPos && len(x.Elts) > 0 { @@ -814,17 +940,32 @@ func (x *Ellipsis) End() token.Pos { } return x.Ellipsis.Add(3) // len("...") } -func (x *LetClause) End() token.Pos { return x.Expr.End() } -func (x *ForClause) End() token.Pos { return x.Source.End() } -func (x *IfClause) End() token.Pos { return x.Condition.End() } -func (x *ParenExpr) End() token.Pos { return x.Rparen.Add(1) } -func (x *SelectorExpr) End() token.Pos { return x.Sel.End() } -func (x *IndexExpr) End() token.Pos { return x.Rbrack.Add(1) } -func (x *SliceExpr) End() token.Pos { return x.Rbrack.Add(1) } -func (x *CallExpr) End() token.Pos { return x.Rparen.Add(1) } -func (x *UnaryExpr) End() token.Pos { return x.X.End() } -func (x *BinaryExpr) End() token.Pos { return x.Y.End() } -func (x *BottomLit) End() token.Pos { return x.Bottom.Add(1) } +func (x *LetClause) End() token.Pos { return x.Expr.End() } +func (x *TryClause) End() token.Pos { + if x.Expr != nil { + return x.Expr.End() + } + return x.Try.Add(3) // len("try") +} +func (x *ForClause) End() token.Pos { return x.Source.End() } +func (x *IfClause) End() token.Pos { return x.Condition.End() } +func (x *FallbackClause) End() token.Pos { return x.Body.End() } +func (x *ParenExpr) End() token.Pos { return x.Rparen.Add(1) } +func (x *SelectorExpr) End() token.Pos { return x.Sel.End() } +func (x *IndexExpr) End() token.Pos { return x.Rbrack.Add(1) } +func (x *SliceExpr) End() token.Pos { return x.Rbrack.Add(1) } +func (x *CallExpr) End() token.Pos { return x.Rparen.Add(1) } +func (x *UnaryExpr) End() token.Pos { return x.X.End() } +func (x *BinaryExpr) End() token.Pos { return x.Y.End() } +func (x *PostfixExpr) End() token.Pos { + switch x.Op { + case token.ELLIPSIS: + return x.OpPos.Add(3) // len("...") + default: + return x.OpPos.Add(1) // most single-char operators + } +} +func (x *BottomLit) End() token.Pos { return x.Bottom.Add(1) } // ---------------------------------------------------------------------------- // Convenience functions for Idents @@ -832,7 +973,7 @@ func (x *BottomLit) End() token.Pos { return x.Bottom.Add(1) } // NewIdent creates a new Ident without position. // Useful for ASTs generated by code other than the CUE parser. func NewIdent(name string) *Ident { - return &Ident{token.NoPos, name, nil, nil, comments{}, label{}, expr{}} + return &Ident{NamePos: token.NoPos, Name: name} } func (id *Ident) String() string { @@ -872,12 +1013,8 @@ func (s *ImportSpec) pos() *token.Pos { return s.Path.pos() } -// func (s *AliasSpec) Pos() token.Pos { return s.Name.Pos() } -// func (s *ValueSpec) Pos() token.Pos { return s.Names[0].Pos() } -// func (s *TypeSpec) Pos() token.Pos { return s.Name.Pos() } - func (s *ImportSpec) End() token.Pos { - if s.EndPos != token.NoPos { + if s.EndPos.IsValid() { return s.EndPos } return s.Path.End() @@ -951,8 +1088,11 @@ type File struct { Filename string Decls []Decl // top-level declarations; or nil - Imports []*ImportSpec // imports in this file - Unresolved []*Ident // unresolved identifiers in this file + // Deprecated: use [File.ImportSpecs]. + // TODO(mvdan): remove in mid 2026. + Imports []*ImportSpec // imports in this file + + Unresolved []*Ident // unresolved identifiers in this file // TODO remove this field: it's here as a temporary // entity so that tests can determine which version @@ -984,16 +1124,45 @@ outer: return f.Decls[:p] } +// VisitImports iterates through the import declarations in the file. +// +// Deprecated: use [File.ImportDecls]. +// +//go:fix inline func (f *File) VisitImports(fn func(d *ImportDecl)) { - for _, d := range f.Decls { - switch x := d.(type) { - case *CommentGroup: - case *Package: - case *Attribute: - case *ImportDecl: - fn(x) - default: - return + for d := range f.ImportDecls() { + fn(d) + } +} + +// ImportDecls iterates through the import declarations in the file. +func (f *File) ImportDecls() iter.Seq[*ImportDecl] { + return func(yield func(d *ImportDecl) bool) { + for _, d := range f.Decls { + switch x := d.(type) { + case *CommentGroup: + case *Package: + case *Attribute: + case *ImportDecl: + if !yield(x) { + return + } + default: + return + } + } + } +} + +// ImportSpecs iterates through all the import specs from all the import decls in the file. +func (f *File) ImportSpecs() iter.Seq[*ImportSpec] { + return func(yield func(d *ImportSpec) bool) { + for d := range f.ImportDecls() { + for _, spec := range d.Specs { + if !yield(spec) { + return + } + } } } } @@ -1055,7 +1224,7 @@ type Package struct { func (p *Package) Pos() token.Pos { return getPos(p) } func (p *Package) pos() *token.Pos { - if p.PackagePos != token.NoPos { + if p.PackagePos.IsValid() { return &p.PackagePos } if p.Name != nil { diff --git a/vendor/cuelang.org/go/cue/ast/astutil/apply.go b/vendor/cuelang.org/go/cue/ast/astutil/apply.go index 1f71faf106..5ce7b01dd1 100644 --- a/vendor/cuelang.org/go/cue/ast/astutil/apply.go +++ b/vendor/cuelang.org/go/cue/ast/astutil/apply.go @@ -71,6 +71,14 @@ type Cursor interface { // Unless n is wrapped by ApplyRecursively, Apply does not walk n. InsertBefore(n ast.Node) + // Modified reports whether the cursor has been modified. + // Use ClearEnclosingModified to reset the flag. + Modified() bool + + // ClearEnclosingModified resets the Modified flag of the cursor so that + // the processing of enclosing nodes do not observe the modification. + ClearEnclosingModified() + self() *cursor } @@ -98,6 +106,7 @@ type cursor struct { typ interface{} // the type of the node index int // position of any of the sub types. replaced bool + modified bool } func newCursor(parent Cursor, n ast.Node, typ interface{}) *cursor { @@ -118,10 +127,12 @@ func fileInfo(c Cursor) (info *info) { return nil } -func (c *cursor) self() *cursor { return c } -func (c *cursor) Parent() Cursor { return c.parent } -func (c *cursor) Index() int { return c.index } -func (c *cursor) Node() ast.Node { return c.node } +func (c *cursor) self() *cursor { return c } +func (c *cursor) Parent() Cursor { return c.parent } +func (c *cursor) Index() int { return c.index } +func (c *cursor) Node() ast.Node { return c.node } +func (c *cursor) Modified() bool { return c.modified } +func (c *cursor) ClearEnclosingModified() { c.modified = false } // Deprecated: use [ast.NewImport] as an [ast.Ident.Node], and then // [Sanitize]. @@ -131,7 +142,7 @@ func (c *cursor) Import(importPath string) *ast.Ident { return nil } - name := ImportPathName(importPath) + name := ast.ParseImportPath(importPath).Qualifier // TODO: come up with something much better. // For instance, hoist the uniquer form cue/export.go to @@ -158,6 +169,7 @@ func (c *cursor) Replace(n ast.Node) { if ast.Comments(n) != nil { CopyComments(n, c.node) } + c.modified = true if r, ok := n.(recursive); ok { n = r.Node } else { @@ -190,16 +202,34 @@ func (c *cursor) Delete() { panic("unsupported") } // Children are traversed in the order in which they appear in the // respective node's struct definition. func Apply(node ast.Node, before, after func(Cursor) bool) ast.Node { - apply(&applier{before: before, after: after}, nil, &node) + a := &applier{before: before, after: after} + apply(a, nil, &node) + + // Fix certain references. + if a.fieldValueMap != nil { + ast.Walk(node, func(n ast.Node) bool { + if x, ok := n.(*ast.Ident); ok { + if v, ok := a.fieldValueMap[x.Node]; ok { + x.Node = v + } + } + return true + }, nil) + } return node } -// A applyVisitor's before method is invoked for each node encountered by Walk. +// A applyVisitor's Before method is invoked for each node encountered by Walk. // If the result applyVisitor w is true, Walk visits each of the children // of node with the applyVisitor w, followed by a call of w.After. +// The Mapping method is used to record changes to values that affect +// Ident.Node and Ident.Scope fields. +// TODO: currently, Mapping is only used to record Field.Value changes. Track +// more changes in the future. type applyVisitor interface { Before(Cursor) applyVisitor After(Cursor) bool + Mapping(before, after ast.Node) } // Helper functions for common node lists. They may be empty. @@ -211,6 +241,7 @@ type declsCursor struct { } func (c *declsCursor) InsertAfter(n ast.Node) { + c.modified = true if r, ok := n.(recursive); ok { n = r.Node c.process = append(c.process, n.(ast.Decl)) @@ -219,6 +250,7 @@ func (c *declsCursor) InsertAfter(n ast.Node) { } func (c *declsCursor) InsertBefore(n ast.Node) { + c.modified = true if r, ok := n.(recursive); ok { n = r.Node c.process = append(c.process, n.(ast.Decl)) @@ -226,7 +258,10 @@ func (c *declsCursor) InsertBefore(n ast.Node) { c.decls = append(c.decls, n.(ast.Decl)) } -func (c *declsCursor) Delete() { c.delete = true } +func (c *declsCursor) Delete() { + c.modified = true + c.delete = true +} func applyDeclList(v applyVisitor, parent Cursor, list []ast.Decl) []ast.Decl { c := &declsCursor{ @@ -244,6 +279,10 @@ func applyDeclList(v applyVisitor, parent Cursor, list []ast.Decl) []ast.Decl { c.decls = append(c.decls, c.node.(ast.Decl)) } c.delete = false + if c.modified { + parent.self().modified = true + c.modified = false + } for i := 0; i < len(c.process); i++ { x := c.process[i] c.node = x @@ -278,10 +317,29 @@ func applyDeclList(v applyVisitor, parent Cursor, list []ast.Decl) []ast.Decl { return c.decls } -func apply[N ast.Node](v applyVisitor, parent Cursor, nodePtr *N) { +type nilableNode interface { + ast.Node + comparable // pointer nodes, which can be compared to nil +} + +func applyIfNotNil[N nilableNode](v applyVisitor, parent Cursor, nodePtr *N) { + var zero N // nil + if *nodePtr != zero { + apply(v, parent, nodePtr) + } +} + +func apply[N nilableNode](v applyVisitor, parent Cursor, nodePtr *N) { node := *nodePtr + var zero N // nil + if node == zero { + panic("unexpected nil node; malformed syntax tree?") + } c := newCursor(parent, node, nodePtr) applyCursor(v, c) + if c.modified && parent != nil { + parent.self().modified = true + } if ast.Node(node) != c.node { *nodePtr = c.node.(N) } @@ -292,6 +350,10 @@ func applyList[N ast.Node](v applyVisitor, parent Cursor, list []N) { for i, node := range list { c.index = i c.node = node + if c.modified { + parent.self().modified = true + c.modified = false + } c.typ = &list[i] applyCursor(v, c) if ast.Node(node) != c.node { @@ -316,6 +378,8 @@ func applyCursor(v applyVisitor, c Cursor) { // parsing and printing? applyList(v, c, ast.Comments(node)) + var beforeValue ast.Node // Used for Field + // apply children // (the order of the cases matches the order // of the corresponding node types in go) @@ -331,10 +395,10 @@ func applyCursor(v applyVisitor, c Cursor) { // nothing to do case *ast.Field: + beforeValue = n.Value apply(v, c, &n.Label) - if n.Value != nil { - apply(v, c, &n.Value) - } + applyIfNotNil(v, c, &n.Alias) + applyIfNotNil(v, c, &n.Value) applyList(v, c, n.Attrs) case *ast.StructLit: @@ -351,9 +415,7 @@ func applyCursor(v applyVisitor, c Cursor) { applyList(v, c, n.Elts) case *ast.Ellipsis: - if n.Type != nil { - apply(v, c, &n.Type) - } + applyIfNotNil(v, c, &n.Type) case *ast.ParenExpr: apply(v, c, &n.X) @@ -368,12 +430,8 @@ func applyCursor(v applyVisitor, c Cursor) { case *ast.SliceExpr: apply(v, c, &n.X) - if n.Low != nil { - apply(v, c, &n.Low) - } - if n.High != nil { - apply(v, c, &n.High) - } + applyIfNotNil(v, c, &n.Low) + applyIfNotNil(v, c, &n.High) case *ast.CallExpr: apply(v, c, &n.Fun) @@ -386,11 +444,12 @@ func applyCursor(v applyVisitor, c Cursor) { apply(v, c, &n.X) apply(v, c, &n.Y) + case *ast.PostfixExpr: + apply(v, c, &n.X) + // Declarations case *ast.ImportSpec: - if n.Name != nil { - apply(v, c, &n.Name) - } + applyIfNotNil(v, c, &n.Name) apply(v, c, &n.Path) case *ast.BadDecl: @@ -410,9 +469,14 @@ func applyCursor(v applyVisitor, c Cursor) { apply(v, c, &n.Ident) apply(v, c, &n.Expr) + case *ast.PostfixAlias: + applyIfNotNil(v, c, &n.Label) + applyIfNotNil(v, c, &n.Field) + case *ast.Comprehension: applyList(v, c, n.Clauses) apply(v, c, &n.Value) + applyIfNotNil(v, c, &n.Fallback) // Files and packages case *ast.File: @@ -422,20 +486,24 @@ func applyCursor(v applyVisitor, c Cursor) { apply(v, c, &n.Name) case *ast.ForClause: - if n.Key != nil { - apply(v, c, &n.Key) - } + applyIfNotNil(v, c, &n.Key) apply(v, c, &n.Value) apply(v, c, &n.Source) case *ast.IfClause: apply(v, c, &n.Condition) + case *ast.FallbackClause: + apply(v, c, &n.Body) + default: panic(fmt.Sprintf("Walk: unexpected node type %T", n)) } v.After(c) + if f, ok := node.(*ast.Field); ok && beforeValue != f.Value { + v.Mapping(beforeValue, f.Value) + } } type applier struct { @@ -444,6 +512,15 @@ type applier struct { commentStack []commentFrame current commentFrame + + fieldValueMap map[ast.Node]ast.Node +} + +func (f *applier) Mapping(before, after ast.Node) { + if f.fieldValueMap == nil { + f.fieldValueMap = make(map[ast.Node]ast.Node) + } + f.fieldValueMap[before] = after } type commentFrame struct { diff --git a/vendor/cuelang.org/go/cue/ast/astutil/resolve.go b/vendor/cuelang.org/go/cue/ast/astutil/resolve.go index 6b4b4eabe5..b75c0b5023 100644 --- a/vendor/cuelang.org/go/cue/ast/astutil/resolve.go +++ b/vendor/cuelang.org/go/cue/ast/astutil/resolve.go @@ -57,13 +57,20 @@ type ErrFunc func(pos token.Pos, msg string, args ...interface{}) // Value // X in a: X=y Field Alias // Fields -// X in X: y File/Struct Expr (y) +// y in X: y File/Struct Expr (y) // X in X=x: y File/Struct Field // X in X=(x): y File/Struct Field // X in X="\(x)": y File/Struct Field // X in [X=x]: y Field Expr (x) // X in X=[x]: y Field Field // +// V in foo~(K,V): v File/Struct Field +// K in foo~(K,V): v Field Expr "foo" +// V in [x]~(K,V): y Field Field +// K in [x]~(K,V): y Field Expr (x) +// V in (x)~(K,V): y File/Struct Field +// K in (x)~(K,V): y Field Expr (x) +// // for k, v in ForClause Ident // let x = y LetClause Ident // @@ -72,22 +79,34 @@ type ErrFunc func(pos token.Pos, msg string, args ...interface{}) // Value Field Field // Pkg nil ImportSpec -// Resolve resolves all identifiers in a file. Unresolved identifiers are -// recorded in Unresolved. It will not overwrite already resolved values. +// Resolve resolves all identifiers in a file, populating [ast.Ident.Node] fields. +// Unresolved identifiers are recorded in [ast.File.Unresolved]. +// It will not overwrite already resolved identifiers. func Resolve(f *ast.File, errFn ErrFunc) { - visitor := &scope{errFn: errFn, identFn: resolveIdent} + stack := make([]*scope, 0, 8) + visitor := &scope{ + errFn: errFn, + identFn: resolveIdent, + scopeStack: &stack, + } ast.Walk(f, visitor.Before, nil) } -// Resolve resolves all identifiers in an expression. +// ResolveExpr resolves all identifiers in an expression. // It will not overwrite already resolved values. func ResolveExpr(e ast.Expr, errFn ErrFunc) { f := &ast.File{} - visitor := &scope{file: f, errFn: errFn, identFn: resolveIdent} + stack := make([]*scope, 0, 8) + visitor := &scope{ + file: f, + errFn: errFn, + identFn: resolveIdent, + scopeStack: &stack, + } ast.Walk(e, visitor.Before, nil) } -// A Scope maintains the set of named language entities declared +// A scope maintains the set of named language entities declared // in the scope and a link to the immediately surrounding (outer) // scope. type scope struct { @@ -100,24 +119,60 @@ type scope struct { identFn func(s *scope, n *ast.Ident) bool nameFn func(name string) errFn func(p token.Pos, msg string, args ...interface{}) + + // scopeStack is used to reuse scope allocations. + // The pointer is shared between the root scope and all its children. + scopeStack *[]*scope } type entry struct { - node ast.Node - link ast.Node // Alias, LetClause, or Field + node ast.Node + link ast.Node // Alias, LetClause, or Field + field *ast.Field // Used for LabelAliases } -func newScope(f *ast.File, outer *scope, node ast.Node, decls []ast.Decl) *scope { - const n = 4 // initial scope capacity - s := &scope{ - file: f, - outer: outer, - node: node, - index: make(map[string]entry, n), - identFn: outer.identFn, - nameFn: outer.nameFn, - errFn: outer.errFn, +func (s *scope) allocScope() *scope { + if n := len(*s.scopeStack); n > 0 { + scope := (*s.scopeStack)[n-1] + *s.scopeStack = (*s.scopeStack)[:n-1] + return scope + } + return &scope{ + index: make(map[string]entry, 4), + scopeStack: s.scopeStack, } +} + +func (s *scope) freeScope() { + // Ensure no pointers remain, which can hold onto memory. + // We only reuse the index map capacity, and keep the scopeStack pointer. + *s = scope{index: s.index, scopeStack: s.scopeStack} + clear(s.index) + *s.scopeStack = append(*s.scopeStack, s) +} + +// freeScopesUntil frees all scopes from s up to (but not including) 'ancestor'. +func (s *scope) freeScopesUntil(ancestor *scope) { + for s != ancestor { + if s == nil { + panic("ancestor scope not found") + } + next := s.outer + s.freeScope() + s = next + } +} + +func newScope(f *ast.File, outer *scope, node ast.Node, decls []ast.Decl) *scope { + s := outer.allocScope() + s.file = f + s.outer = outer + s.node = node + s.inField = false + s.identFn = outer.identFn + s.nameFn = outer.nameFn + s.errFn = outer.errFn + for _, d := range decls { switch x := d.(type) { case *ast.Field: @@ -126,11 +181,23 @@ func newScope(f *ast.File, outer *scope, node ast.Node, decls []ast.Decl) *scope if a, ok := x.Label.(*ast.Alias); ok { name := a.Ident.Name if _, ok := a.Expr.(*ast.ListLit); !ok { - s.insert(name, x, a) + s.insert(name, x, a, nil) + } + if x.Alias != nil { + // Error: cannot have both old-style label alias and postfix + // alias + s.errFn(x.Pos(), + "field has both label alias and postfix alias") + } + } + if _, isPattern := label.(*ast.ListLit); !isPattern { + if a := x.Alias; a != nil { + insertPostfixAliases(s, x, a.Label) } } - // default: + // TODO(perf): replace labelName with quick tests: this generates an + // error in many cases. name, isIdent, _ := ast.LabelName(label) if isIdent { v := x.Value @@ -138,22 +205,22 @@ func newScope(f *ast.File, outer *scope, node ast.Node, decls []ast.Decl) *scope if a, ok := v.(*ast.Alias); ok { v = a.Expr } - s.insert(name, v, x) + s.insert(name, v, x, nil) } case *ast.LetClause: name, isIdent, _ := ast.LabelName(x.Ident) if isIdent { - s.insert(name, x, x) + s.insert(name, x, x, nil) } case *ast.Alias: name, isIdent, _ := ast.LabelName(x.Ident) if isIdent { - s.insert(name, x, x) + s.insert(name, x, x, nil) } case *ast.ImportDecl: for _, spec := range x.Specs { info, _ := ParseImportSpec(spec) - s.insert(info.Ident, spec, spec) + s.insert(info.Ident, spec, spec, nil) } } } @@ -165,7 +232,7 @@ func (s *scope) isLet(n ast.Node) bool { return true } switch n.(type) { - case *ast.LetClause, *ast.Alias, *ast.Field: + case *ast.LetClause, *ast.TryClause, *ast.Alias, *ast.Field: return true } return false @@ -178,13 +245,13 @@ func (s *scope) mustBeUnique(n ast.Node) bool { switch n.(type) { // TODO: add *ast.ImportSpec when some implementations are moved over to // Sanitize. - case *ast.ImportSpec, *ast.LetClause, *ast.Alias, *ast.Field: + case *ast.ImportSpec, *ast.LetClause, *ast.TryClause, *ast.Alias, *ast.Field: return true } return false } -func (s *scope) insert(name string, n, link ast.Node) { +func (s *scope) insert(name string, n, link ast.Node, f *ast.Field) { if name == "" { return } @@ -215,7 +282,7 @@ func (s *scope) insert(name string, n, link ast.Node) { // s.errFn(n.Pos(), "alias %q already declared in enclosing scope", name) } } - s.index[name] = entry{node: n, link: link} + s.index[name] = entry{node: n, link: link, field: f} } func (s *scope) resolveScope(name string, node ast.Node) (scope ast.Node, e entry, ok bool) { @@ -243,7 +310,12 @@ func (s *scope) lookup(name string) (p *scope, obj ast.Node, node entry) { if _, ok := n.node.(*ast.ImportSpec); ok { return s, nil, n } - return s, s.node, n + obj := s.node + if n.field != nil { + // Label alias case. + obj = n.field + } + return s, obj, n } // s, last = s.outer, s s = s.outer @@ -251,10 +323,44 @@ func (s *scope) lookup(name string) (p *scope, obj ast.Node, node entry) { return nil, nil, entry{} } +func insertPostfixAliases(s *scope, x *ast.Field, expr ast.Node) { + a := x.Alias + if a == nil { + return + } + hasField := a.Field != nil && a.Field.Name != "_" + + if a.Label == nil { + // Single form: ~X + if !hasField { + s.errFn(a.Pos(), + "single postfix alias %q field cannot be the blank identifier", a.Field.Name) + } else { + s.insert(a.Field.Name, x, a, nil) + } + return + } + + // Double form: ~(X,Y) + hasLabel := a.Label != nil && a.Label.Name != "_" + if !hasField && !hasLabel { + s.errFn(a.Pos(), + "both label and field in postfix alias cannot be the blank identifier") + return + } + if hasLabel { + s.insert(a.Label.Name, expr, a, x) + } + if hasField { + s.insert(a.Field.Name, x, a, nil) + } +} + func (s *scope) Before(n ast.Node) bool { switch x := n.(type) { case *ast.File: - s := newScope(x, s, x, x.Decls) + s = newScope(x, s, x, x.Decls) + defer s.freeScope() // Support imports. for _, d := range x.Decls { ast.Walk(d, s.Before, nil) @@ -263,14 +369,22 @@ func (s *scope) Before(n ast.Node) bool { case *ast.StructLit: s = newScope(s.file, s, x, x.Elts) + defer s.freeScope() for _, elt := range x.Elts { ast.Walk(elt, s.Before, nil) } return false case *ast.Comprehension: + outer := s s = scopeClauses(s, x.Clauses) + defer s.freeScopesUntil(outer) ast.Walk(x.Value, s.Before, nil) + // Walk the fallback clause in the OUTER scope, since fallback should not + // have access to for/let variables from the comprehension clauses. + if x.Fallback != nil { + ast.Walk(x.Fallback.Body, outer.Before, nil) + } return false case *ast.Field: @@ -292,15 +406,22 @@ func (s *scope) Before(n ast.Node) bool { break } s = newScope(s.file, s, x, nil) + defer s.freeScope() if alias != nil { if name, _, _ := ast.LabelName(alias.Ident); name != "" { - s.insert(name, x, alias) + s.insert(name, x, alias, nil) } } expr := label.Elts[0] if a, ok := expr.(*ast.Alias); ok { + if x.Alias != nil { + // Error: cannot have both old-style pattern alias and + // postfix alias + s.errFn(x.Pos(), + "pattern constraint has both label alias and postfix alias") + } expr = a.Expr // Add to current scope, instead of the value's, and allow @@ -309,7 +430,9 @@ func (s *scope) Before(n ast.Node) bool { // illegal name clashes, and it allows giving better error // messages. This puts the burden on clients of this library // to detect illegal usage, though. - s.insert(a.Ident.Name, a.Expr, a) + s.insert(a.Ident.Name, a.Expr, a, x) + } else { + insertPostfixAliases(s, x, expr) } ast.Walk(expr, nil, func(n ast.Node) { @@ -326,11 +449,13 @@ func (s *scope) Before(n ast.Node) bool { } if n := x.Value; n != nil { + // Handle value aliases. if alias, ok := x.Value.(*ast.Alias); ok { // TODO: this should move into Before once decl attributes // have been fully deprecated and embed attributes are introduced. s = newScope(s.file, s, x, nil) - s.insert(alias.Ident.Name, alias, x) + defer s.freeScope() + s.insert(alias.Ident.Name, alias, x, nil) n = alias.Expr } s.inField = true @@ -346,9 +471,12 @@ func (s *scope) Before(n ast.Node) bool { saved := s.index[name] delete(s.index, name) // The same name may still appear in another scope - if x.Expr != nil { - ast.Walk(x.Expr, s.Before, nil) - } + // Set inField so that the label expression check in pattern constraints + // does not walk beyond the let clause's value. A let clause's value is + // a separate context, just like a field value. + s.inField = true + ast.Walk(x.Expr, s.Before, nil) + s.inField = false s.index[name] = saved return false @@ -358,9 +486,7 @@ func (s *scope) Before(n ast.Node) bool { saved := s.index[name] delete(s.index, name) // The same name may still appear in another scope - if x.Expr != nil { - ast.Walk(x.Expr, s.Before, nil) - } + ast.Walk(x.Expr, s.Before, nil) s.index[name] = saved return false @@ -418,14 +544,25 @@ func scopeClauses(s *scope, clauses []ast.Clause) *scope { ast.Walk(x.Source, s.Before, nil) s = newScope(s.file, s, x, nil) if x.Key != nil { - s.insert(x.Key.Name, x.Key, x) + s.insert(x.Key.Name, x.Key, x, nil) } - s.insert(x.Value.Name, x.Value, x) + s.insert(x.Value.Name, x.Value, x, nil) case *ast.LetClause: ast.Walk(x.Expr, s.Before, nil) s = newScope(s.file, s, x, nil) - s.insert(x.Ident.Name, x.Ident, x) + s.insert(x.Ident.Name, x.Ident, x, nil) + + case *ast.TryClause: + // For the assignment form (try x = expr), handle scope like LetClause. + if x.Ident != nil { + ast.Walk(x.Expr, s.Before, nil) + s = newScope(s.file, s, x, nil) + s.insert(x.Ident.Name, x.Ident, x, nil) + } else { + // For the struct form (try { ... }), just walk normally. + ast.Walk(c, s.Before, nil) + } default: ast.Walk(c, s.Before, nil) diff --git a/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go b/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go index a7f90c687b..ae407fdd79 100644 --- a/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go +++ b/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go @@ -28,8 +28,6 @@ import ( // - handle comprehensions // - change field from foo to "foo" if it isn't referenced, rather than // relying on introducing a unique alias. -// - change a predeclared identifier reference to use the __ident form, -// instead of introducing an alias. // Sanitize rewrites File f in place to be well-formed after automated // construction of an AST. @@ -50,10 +48,12 @@ func Sanitize(f *ast.File) error { } // Gather all names. + stack := make([]*scope, 0, 8) s := &scope{ - errFn: z.errf, - nameFn: z.addName, - identFn: z.markUsed, + errFn: z.errf, + nameFn: z.addName, + identFn: z.markUsed, + scopeStack: &stack, } ast.Walk(f, s.Before, nil) if z.errs != nil { @@ -61,11 +61,13 @@ func Sanitize(f *ast.File) error { } // Add imports and unshadow. + stack = stack[:0] s = &scope{ - file: f, - errFn: z.errf, - identFn: z.handleIdent, - index: make(map[string]entry), + file: f, + errFn: z.errf, + identFn: z.handleIdent, + index: make(map[string]entry), + scopeStack: &stack, } z.fileScope = s ast.Walk(f, s.Before, nil) @@ -170,7 +172,7 @@ func (z *sanitizer) markUsed(s *scope, n *ast.Ident) bool { func (z *sanitizer) cleanImports() { var fileImports []*ast.ImportSpec - z.file.VisitImports(func(decl *ast.ImportDecl) { + for decl := range z.file.ImportDecls() { newLen := 0 for _, spec := range decl.Specs { if _, ok := z.referenced[spec]; ok { @@ -180,18 +182,15 @@ func (z *sanitizer) cleanImports() { } } decl.Specs = decl.Specs[:newLen] - }) + } z.file.Imports = fileImports // Ensure that the first import always starts a new section // so that if the file has a comment, it won't be associated with // the import comment rather than the file. - first := true - z.file.VisitImports(func(decl *ast.ImportDecl) { - if first { - ast.SetRelPos(decl, token.NewSection) - first = false - } - }) + for decl := range z.file.ImportDecls() { + ast.SetRelPos(decl, token.NewSection) + break + } } func (z *sanitizer) handleIdent(s *scope, n *ast.Ident) bool { @@ -212,7 +211,7 @@ func (z *sanitizer) handleIdent(s *scope, n *ast.Ident) bool { _ = z.addImport(spec) info, _ := ParseImportSpec(spec) - z.fileScope.insert(info.Ident, spec, spec) + z.fileScope.insert(info.Ident, spec, spec, nil) return true } @@ -242,7 +241,7 @@ func (z *sanitizer) handleIdent(s *scope, n *ast.Ident) bool { Path: x.Path, }) z.importMap[xi.ID] = spec - z.fileScope.insert(name, spec, spec) + z.fileScope.insert(name, spec, spec, nil) } info, _ := ParseImportSpec(spec) @@ -257,6 +256,15 @@ func (z *sanitizer) handleIdent(s *scope, n *ast.Ident) bool { return true } + // A predeclared reference (e.g. "self") is shadowed by a local + // declaration. Use the "__"-prefixed form to avoid the shadow. + if n.IsPredeclared() { + n.Name = "__" + n.Name + n.Node = nil + n.Scope = nil + return false + } + // n.Node != node and are both not nil and n.Node is not an ImportSpec. // This means that either n.Node is illegal or shadowed. // Look for the scope in which n.Node is defined and add an alias or let. diff --git a/vendor/cuelang.org/go/cue/ast/astutil/util.go b/vendor/cuelang.org/go/cue/ast/astutil/util.go index 625380b65b..2939704929 100644 --- a/vendor/cuelang.org/go/cue/ast/astutil/util.go +++ b/vendor/cuelang.org/go/cue/ast/astutil/util.go @@ -15,9 +15,7 @@ package astutil import ( - "path" "strconv" - "strings" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/token" @@ -33,14 +31,10 @@ import ( // // Deprecated: use [ast.ParseImportPath] instead to obtain the // qualifier. +// +//go:fix inline func ImportPathName(id string) string { - // TODO use ast.ParseImportPath(id).Qualifier and change - // callers to understand that they might receive an empty string. - name := path.Base(id) - if p := strings.LastIndexByte(name, ':'); p > 0 { - name = name[p+1:] - } - return name + return ast.ParseImportPath(id).Qualifier } // ImportInfo describes the information contained in an ImportSpec. diff --git a/vendor/cuelang.org/go/cue/ast/ident.go b/vendor/cuelang.org/go/cue/ast/ident.go index 8e84aef3f9..9827bee91f 100644 --- a/vendor/cuelang.org/go/cue/ast/ident.go +++ b/vendor/cuelang.org/go/cue/ast/ident.go @@ -40,16 +40,12 @@ func IsValidIdent(ident string) bool { return false } - consumed := false - if strings.HasPrefix(ident, "_") { - ident = ident[1:] - consumed = true - if len(ident) == 0 { - return true - } + ident, consumed := strings.CutPrefix(ident, "_") + if ident == "" { + return true // "_" is a valid identifier } - if strings.HasPrefix(ident, "#") { - ident = ident[1:] + ident, consumedHash := strings.CutPrefix(ident, "#") + if consumedHash { // Note: _#0 is not allowed by the spec, although _0 is. // TODO: set consumed to true here to allow #0. consumed = false @@ -70,6 +66,21 @@ func IsValidIdent(ident string) bool { return true } +// StringLabelNeedsQuoting reports whether the given string +// must be quoted via [literal.Label].Quote to represent itself +// as a string label, such as a regular field. +// +// Note that a negative result does not mean you can simply use +// [NewIdent](name) to create a valid label without affecting any references. +// In the general case, you should use [Ident.Node] to ensure each identifier references +// exactly what they mean to, or quote any string label which doesn't need to be referenced. +// +// The main use case of this API is for simple scenarios, such as a JSON decoder +// where the input is all data without any references. +func StringLabelNeedsQuoting(name string) bool { + return strings.HasPrefix(name, "#") || strings.HasPrefix(name, "_") || !IsValidIdent(name) +} + // LabelName reports the name of a label, whether it is an identifier // (it binds a value to a scope), and whether it is valid. // Keywords that are allowed in label positions are interpreted accordingly. diff --git a/vendor/cuelang.org/go/cue/ast/importpath.go b/vendor/cuelang.org/go/cue/ast/importpath.go index c96d0e206b..ddec0c78be 100644 --- a/vendor/cuelang.org/go/cue/ast/importpath.go +++ b/vendor/cuelang.org/go/cue/ast/importpath.go @@ -1,6 +1,9 @@ package ast -import "strings" +import ( + "cmp" + "strings" +) // ParseImportPath returns the various components of an import path. // It does not check the result for validity. @@ -8,7 +11,12 @@ func ParseImportPath(p string) ImportPath { var parts ImportPath pathWithoutQualifier := p if i := strings.LastIndexAny(p, "/:"); i >= 0 && p[i] == ':' { - pathWithoutQualifier = p[:i] + // Historically, `:pkgname` has been an alias for `.:pkgname`, + // and some users started relying on that behavior in the CLI + // even though it was never documented in `cue help inputs`. + // Keep support for it around for now, but perhaps reconsider in the future. + pathWithoutQualifier = cmp.Or(p[:i], ".") + parts.Qualifier = p[i+1:] parts.ExplicitQualifier = true } diff --git a/vendor/cuelang.org/go/cue/ast/walk.go b/vendor/cuelang.org/go/cue/ast/walk.go index 53429f975d..a4997ad1ae 100644 --- a/vendor/cuelang.org/go/cue/ast/walk.go +++ b/vendor/cuelang.org/go/cue/ast/walk.go @@ -18,6 +18,18 @@ import ( "fmt" ) +type nilableNode interface { + Node + comparable // pointer nodes, which can be compared to nil +} + +func walkIfNotNil[N nilableNode](node N, before func(Node) bool, after func(Node)) { + var zero N // nil + if node != zero { + Walk(node, before, after) + } +} + func walkList[N Node](list []N, before func(Node) bool, after func(Node)) { for _, node := range list { Walk(node, before, after) @@ -53,9 +65,8 @@ func Walk(node Node, before func(Node) bool, after func(Node)) { case *Field: Walk(n.Label, before, after) - if n.Value != nil { - Walk(n.Value, before, after) - } + walkIfNotNil(n.Alias, before, after) + walkIfNotNil(n.Value, before, after) walkList(n.Attrs, before, after) case *Func: @@ -76,9 +87,7 @@ func Walk(node Node, before func(Node) bool, after func(Node)) { walkList(n.Elts, before, after) case *Ellipsis: - if n.Type != nil { - Walk(n.Type, before, after) - } + walkIfNotNil(n.Type, before, after) case *ParenExpr: Walk(n.X, before, after) @@ -93,12 +102,8 @@ func Walk(node Node, before func(Node) bool, after func(Node)) { case *SliceExpr: Walk(n.X, before, after) - if n.Low != nil { - Walk(n.Low, before, after) - } - if n.High != nil { - Walk(n.High, before, after) - } + walkIfNotNil(n.Low, before, after) + walkIfNotNil(n.High, before, after) case *CallExpr: Walk(n.Fun, before, after) @@ -111,11 +116,12 @@ func Walk(node Node, before func(Node) bool, after func(Node)) { Walk(n.X, before, after) Walk(n.Y, before, after) + case *PostfixExpr: + Walk(n.X, before, after) + // Declarations case *ImportSpec: - if n.Name != nil { - Walk(n.Name, before, after) - } + walkIfNotNil(n.Name, before, after) Walk(n.Path, before, after) case *BadDecl: @@ -131,13 +137,26 @@ func Walk(node Node, before func(Node) bool, after func(Node)) { Walk(n.Ident, before, after) Walk(n.Expr, before, after) + case *TryClause: + if n.Ident != nil { + // Assignment form: try x = expr + Walk(n.Ident, before, after) + Walk(n.Expr, before, after) + } + // Struct form: body is in Comprehension.Value, walked separately + case *Alias: Walk(n.Ident, before, after) Walk(n.Expr, before, after) + case *PostfixAlias: + walkIfNotNil(n.Label, before, after) + walkIfNotNil(n.Field, before, after) + case *Comprehension: walkList(n.Clauses, before, after) Walk(n.Value, before, after) + walkIfNotNil(n.Fallback, before, after) // Files and packages case *File: @@ -147,15 +166,16 @@ func Walk(node Node, before func(Node) bool, after func(Node)) { Walk(n.Name, before, after) case *ForClause: - if n.Key != nil { - Walk(n.Key, before, after) - } + walkIfNotNil(n.Key, before, after) Walk(n.Value, before, after) Walk(n.Source, before, after) case *IfClause: Walk(n.Condition, before, after) + case *FallbackClause: + Walk(n.Body, before, after) + default: panic(fmt.Sprintf("Walk: unexpected node type %T", n)) } diff --git a/vendor/cuelang.org/go/cue/build/context.go b/vendor/cuelang.org/go/cue/build/context.go index 240e6149b4..4d11bf1c64 100644 --- a/vendor/cuelang.org/go/cue/build/context.go +++ b/vendor/cuelang.org/go/cue/build/context.go @@ -68,6 +68,12 @@ func (inst *Instance) Complete() error { if err != nil { inst.ReportError(err) } + + // Resolve identifiers after imports are loaded. Store errors separately + // to avoid "imported and not used" errors in dependencies being reported + // as "import failed". + inst.ResolutionErr = inst.resolveIdentifiers() + if inst.Err != nil { inst.Incomplete = true return inst.Err diff --git a/vendor/cuelang.org/go/cue/build/import.go b/vendor/cuelang.org/go/cue/build/import.go index 76d7ef86d7..2fe6a37909 100644 --- a/vendor/cuelang.org/go/cue/build/import.go +++ b/vendor/cuelang.org/go/cue/build/import.go @@ -54,7 +54,7 @@ func (inst *Instance) complete() errors.Error { ) for _, f := range inst.Files { - for _, spec := range f.Imports { + for spec := range f.ImportSpecs() { quoted := spec.Path.Value path, err := strconv.Unquote(quoted) if err != nil { diff --git a/vendor/cuelang.org/go/cue/build/instance.go b/vendor/cuelang.org/go/cue/build/instance.go index 8bc3c5b7d4..ace25c3d0a 100644 --- a/vendor/cuelang.org/go/cue/build/instance.go +++ b/vendor/cuelang.org/go/cue/build/instance.go @@ -18,6 +18,8 @@ import ( "fmt" pathpkg "path" "path/filepath" + "slices" + "strconv" "strings" "unicode" @@ -68,6 +70,11 @@ type Instance struct { // were any errors in dependencies. Err errors.Error + // ResolutionErr contains errors from identifier resolution, such as + // "imported and not used". These are stored separately from Err to + // avoid failing import loading for dependencies. + ResolutionErr errors.Error + parent *Instance // TODO: for cycle detection // The following fields are for informative purposes and are not used by @@ -107,7 +114,10 @@ type Instance struct { Deps []string `api:"alpha"` DepsErrors []error `api:"alpha"` - Match []string `api:"alpha"` + // TODO: Match was declared for years but never set by any of the cue/build logic. + // If any user was trying to use it, we should implement it, + // but that seems unlikely given that it was always empty. + // Match []string `api:"alpha"` } // RelPath reports the path of f relative to the root of the instance's module @@ -286,3 +296,129 @@ func IsLocalImport(path string) bool { return path == "." || path == ".." || strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../") } + +func (inst *Instance) resolveIdentifiers() errors.Error { + // Link top-level declarations. As top-level entries get unified, an entry + // may be linked to any top-level entry of any of the files. + allFields := map[string]ast.Node{} + for _, f := range inst.Files { + if f.PackageName() == "" { + continue + } + for _, d := range f.Decls { + if f, ok := d.(*ast.Field); ok && f.Value != nil { + if ident, ok := f.Label.(*ast.Ident); ok { + allFields[ident.Name] = f.Value + } + } + } + } + + var errs errors.Error + for _, f := range inst.Files { + err := inst.resolveFile(f, allFields) + errs = errors.Append(errs, err) + } + return errs +} + +func (inst *Instance) resolveFile(f *ast.File, allFields map[string]ast.Node) errors.Error { + unresolved := map[string][]*ast.Ident{} + for _, u := range f.Unresolved { + unresolved[u.Name] = append(unresolved[u.Name], u) + } + fields := map[string]ast.Node{} + for _, d := range f.Decls { + if f, ok := d.(*ast.Field); ok && f.Value != nil { + if ident, ok := f.Label.(*ast.Ident); ok { + fields[ident.Name] = d + } + } + } + var errs errors.Error + + specs := []*ast.ImportSpec{} + + for spec := range f.ImportSpecs() { + id, err := strconv.Unquote(spec.Path.Value) + if err != nil { + continue // quietly ignore the error + } + name := pathpkg.Base(id) + if imp := inst.LookupImport(id); imp != nil { + name = imp.PkgName + } + if spec.Name != nil { + name = spec.Name.Name + } + if n, ok := fields[name]; ok { + errs = errors.Append(errs, errors.Newf(spec.Pos(), + "%s redeclared as imported package name\n"+ + "\tprevious declaration at %s", name, n.Pos())) + continue + } + fields[name] = spec + used := false + for _, u := range unresolved[name] { + used = true + u.Node = spec + } + if !used { + specs = append(specs, spec) + } + } + + // Verify each import is used. + if len(specs) > 0 { + // Find references to imports. This assumes that identifiers in labels + // are not resolved or that such errors are caught elsewhere. + ast.Walk(f, nil, func(n ast.Node) { + if x, ok := n.(*ast.Ident); ok { + // As we also visit labels, most nodes will be nil. + if x.Node == nil { + return + } + for i, s := range specs { + if s == x.Node { + specs[i] = nil + return + } + } + } + }) + + // Add errors for unused imports. + for _, spec := range specs { + if spec == nil { + continue + } + if spec.Name == nil { + errs = errors.Append(errs, errors.Newf(spec.Pos(), + "imported and not used: %s", spec.Path.Value)) + } else { + errs = errors.Append(errs, errors.Newf(spec.Pos(), + "imported and not used: %s as %s", spec.Path.Value, spec.Name)) + } + } + } + + f.Unresolved = slices.DeleteFunc(f.Unresolved, func(u *ast.Ident) bool { + if n, ok := allFields[u.Name]; ok { + u.Node = n + u.Scope = f + return true + } + if u.Node != nil { + // Keep valid import resolutions; clear any stale + // field references from a previous instance context. + if _, ok := u.Node.(*ast.ImportSpec); ok { + return true + } + u.Node = nil + u.Scope = nil + } + return false + }) + + return errs +} diff --git a/vendor/cuelang.org/go/cue/context.go b/vendor/cuelang.org/go/cue/context.go index 6da0e48f9c..58f06e339e 100644 --- a/vendor/cuelang.org/go/cue/context.go +++ b/vendor/cuelang.org/go/cue/context.go @@ -362,7 +362,7 @@ func NilIsAny(isAny bool) EncodeOption { // Channel, complex, and function values cannot be encoded in CUE. Attempting to // encode such a value results in the returned value being an error, accessible // through the Err method. -func (c *Context) Encode(x interface{}, option ...EncodeOption) Value { +func (c *Context) Encode(x any, option ...EncodeOption) Value { switch v := x.(type) { case adt.Value: return newValueRoot(c.runtime(), c.ctx(), v) @@ -372,40 +372,28 @@ func (c *Context) Encode(x interface{}, option ...EncodeOption) Value { ctx := c.ctx() // TODO: is true the right default? - expr := convert.GoValueToValue(ctx, x, options.nilIsTop) - var n *adt.Vertex - if v, ok := expr.(*adt.Vertex); ok { - n = v - } else { - n = &adt.Vertex{} - n.AddConjunct(adt.MakeRootConjunct(nil, expr)) - } + val := convert.FromGoValue(ctx, x, options.nilIsTop) + n := adt.ToVertex(val) // we know val is finalized n.Finalize(ctx) return c.make(n) } -// Encode converts a Go type to a CUE [Value]. +// EncodeType converts a Go type to a CUE [Value]. // // The returned value will represent an error, accessible through [Value.Err], // if any error occurred. -func (c *Context) EncodeType(x interface{}, option ...EncodeOption) Value { +func (c *Context) EncodeType(x any, option ...EncodeOption) Value { switch v := x.(type) { case *adt.Vertex: return c.make(v) } ctx := c.ctx() - expr, err := convert.GoTypeToExpr(ctx, x) + expr, err := convert.FromGoType(ctx, x) if err != nil { return c.makeError(err) } - var n *adt.Vertex - if v, ok := expr.(*adt.Vertex); ok { - n = v - } else { - n = &adt.Vertex{} - n.AddConjunct(adt.MakeRootConjunct(nil, expr)) - } + n := exprToVertex(expr) n.Finalize(ctx) return c.make(n) } diff --git a/vendor/cuelang.org/go/cue/cue.go b/vendor/cuelang.org/go/cue/cue.go index 9fcf5b8aea..4b7d960b0e 100644 --- a/vendor/cuelang.org/go/cue/cue.go +++ b/vendor/cuelang.org/go/cue/cue.go @@ -26,16 +26,9 @@ // // While a context can be used to build values, note that loading a module and its // dependencies should be done with the [cuelang.org/go/cue/load] package. +// To print a value into its string syntax form, use [cuelang.org/go/cue/format]. // -// Note that the following types are DEPRECATED and their usage should be -// avoided if possible: -// -// - [FieldInfo] -// - [Instance] -// - [Runtime] -// - [Struct] -// -// Many types also have deprecated methods. Code that already uses deprecated -// methods can keep using them for at least some time. We aim to provide a +// Note that some types and funcs are deprecated. Code that already uses deprecated +// funcs can keep using them for at least some time. We aim to provide a // go or cue fix solution to automatically rewrite code using the new API. package cue diff --git a/vendor/cuelang.org/go/cue/cuecontext/cuecontext.go b/vendor/cuelang.org/go/cue/cuecontext/cuecontext.go index 178c26d6b4..8970f58812 100644 --- a/vendor/cuelang.org/go/cue/cuecontext/cuecontext.go +++ b/vendor/cuelang.org/go/cue/cuecontext/cuecontext.go @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package cuecontext creates [cue.Context] values, +// which are needed for creating [cue.Value] values +// and using the core API in the [cue] package. package cuecontext import ( @@ -77,10 +80,6 @@ const ( // currently [EvalV3]. Note that this version may change without notice. EvalExperiment EvalVersion = internal.DevVersion - // EvalV2 is the previous version of the evaluator. It was introduced in CUE - // version 0.3 and is being maintained until 2024. - EvalV2 EvalVersion = internal.EvalV2 - // EvalV3 is the current version of the evaluator. It was introduced in 2024 // and brought a new disjunction algorithm, a new closedness algorithm, a // new core scheduler, and adds performance enhancements like structure sharing. diff --git a/vendor/cuelang.org/go/cue/decode.go b/vendor/cuelang.org/go/cue/decode.go index 9e78a18009..84e70eb7eb 100644 --- a/vendor/cuelang.org/go/cue/decode.go +++ b/vendor/cuelang.org/go/cue/decode.go @@ -35,6 +35,14 @@ import ( // Decode initializes the value pointed to by x with Value v. // An error is returned if x is nil or not a pointer. // +// If x implements any of these interfaces, they will be used for decoding +// in the following order of precedence: +// 1. Unmarshaler +// 2. json.Unmarshaler +// 3. encoding.TextUnmarshaler +// +// If x is a struct, this same applies to all of its fields (matching the behavior of json.Unmarshal) +// // If x is a struct, Decode will validate the constraints specified in the field tags. // // If x contains a [Value], that part of x will be set to the value @@ -51,6 +59,8 @@ func (v Value) Decode(x interface{}) error { return d.errs } +// TODO(mvdan): move decoder to internal/core/convert as ToGoValue + type decoder struct { errs errors.Error } @@ -66,8 +76,8 @@ func incompleteError(v Value) errors.Error { v: v, err: &adt.Bottom{ Code: adt.IncompleteError, - Err: errors.Newf(v.Pos(), - "cannot convert non-concrete value %v", v)}, + Err: errors.Newf(v.Pos(), "cannot convert non-concrete value %v", v), + }, } } @@ -79,6 +89,46 @@ func (d *decoder) clear(x reflect.Value) { var valueType = reflect.TypeFor[Value]() +type valueUnmarshaler interface { + unmarshalValue(v Value) error +} + +type cueUnmarshaler struct{ Unmarshaler } + +func (u cueUnmarshaler) unmarshalValue(v Value) error { + return u.UnmarshalCUE(v) +} + +type jsonUnmarshaler struct{ json.Unmarshaler } + +func (u jsonUnmarshaler) unmarshalValue(v Value) error { + b, err := v.MarshalJSON() + if err != nil { + return err + } + return u.UnmarshalJSON(b) +} + +type textUnmarshaler struct{ encoding.TextUnmarshaler } + +func (u textUnmarshaler) unmarshalValue(v Value) error { + switch x := u.TextUnmarshaler.(type) { + case *big.Float: + f, err := v.Float(nil) + if err != nil { + return errors.Wrapf(err, v.Pos(), "Decode") + } + *x = *f + return nil + default: + b, err := v.Bytes() + if err != nil { + return errors.Wrapf(err, v.Pos(), "Decode") + } + return u.UnmarshalText(b) + } +} + func (d *decoder) decode(x reflect.Value, v Value, isPtr bool) { if !x.IsValid() { d.addErr(errors.Newf(v.Pos(), "cannot decode into invalid value")) @@ -91,7 +141,7 @@ func (d *decoder) decode(x reflect.Value, v Value, isPtr bool) { return } - if err := v.Err(); err != nil { + if err := v.Err(); err != nil && !IsIncomplete(err) { d.addErr(err) return } @@ -100,7 +150,14 @@ func (d *decoder) decode(x reflect.Value, v Value, isPtr bool) { return } - switch x.Kind() { + unmarshaler, x := indirect(x, v.IsNull()) + if unmarshaler != nil { + d.addErr(unmarshaler.unmarshalValue(v)) + return + } + + kind := x.Kind() + switch kind { case reflect.Pointer, reflect.Map, reflect.Slice, reflect.Interface: // nullable types if v.IsNull() || !v.IsConcrete() { @@ -116,39 +173,6 @@ func (d *decoder) decode(x reflect.Value, v Value, isPtr bool) { } } - ij, it, x := indirect(x, v.IsNull()) - - if ij != nil { - b, err := v.MarshalJSON() - d.addErr(err) - d.addErr(ij.UnmarshalJSON(b)) - return - } - - if it != nil { - if _, ok := it.(*big.Float); ok { - f, err := v.Float(nil) - if err != nil { - err = errors.Wrapf(err, v.Pos(), "Decode") - d.addErr(err) - return - } - x.Elem().Set(reflect.ValueOf(*f)) - return - } - - b, err := v.Bytes() - if err != nil { - err = errors.Wrapf(err, v.Pos(), "Decode") - d.addErr(err) - return - } - d.addErr(it.UnmarshalText(b)) - return - } - - kind := x.Kind() - if kind == reflect.Interface { value := d.interfaceValue(v) x.Set(reflect.ValueOf(value)) @@ -334,7 +358,7 @@ func (d *decoder) convertMap(x reflect.Value, v Value) { t := x.Type() // Map key must either have string kind, have an integer kind, - // or be an encoding.TextUnmarshaler. + // or be an [encoding.TextUnmarshaler]. switch t.Key().Kind() { case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, @@ -361,7 +385,8 @@ func (d *decoder) convertMap(x reflect.Value, v Value) { kt := t.Key() if reflect.PointerTo(kt).Implements(textUnmarshalerType) { kv = reflect.New(kt) - err := kv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(key)) + u, _ := reflect.TypeAssert[encoding.TextUnmarshaler](kv) + err := u.UnmarshalText([]byte(key)) d.addErr(err) kv = kv.Elem() } else { @@ -856,7 +881,7 @@ func simpleLetterEqualFold(s, t []byte) bool { // If it encounters an Unmarshaler, indirect stops and returns that. // If decodingNull is true, indirect stops at the first settable pointer so it // can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { +func indirect(v reflect.Value, decodingNull bool) (valueUnmarshaler, reflect.Value) { // Issue #24153 indicates that it is generally not a guaranteed property // that you may round-trip a reflect.Value by calling Value.Addr().Elem() // and expect the value to still be settable for values derived from @@ -909,12 +934,14 @@ func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.Te v.Set(reflect.New(v.Type().Elem())) } if v.Type().NumMethod() > 0 && v.CanInterface() { - if u, ok := v.Interface().(json.Unmarshaler); ok { - return u, nil, v - } - if !decodingNull { - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, v + switch u := v.Interface().(type) { + case Unmarshaler: + return cueUnmarshaler{u}, v + case json.Unmarshaler: + return jsonUnmarshaler{u}, v + case encoding.TextUnmarshaler: + if !decodingNull { + return textUnmarshaler{u}, v } } } @@ -926,5 +953,5 @@ func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.Te v = v.Elem() } } - return nil, nil, v + return nil, v } diff --git a/vendor/cuelang.org/go/cue/errors.go b/vendor/cuelang.org/go/cue/errors.go index 241d80dbc9..bbf87af614 100644 --- a/vendor/cuelang.org/go/cue/errors.go +++ b/vendor/cuelang.org/go/cue/errors.go @@ -58,11 +58,7 @@ func (e *valueError) Position() token.Pos { if e.err.Err != nil { return e.err.Err.Position() } - src := e.err.Source() - if src == nil { - return token.NoPos - } - return src.Pos() + return adt.Pos(e.err) } func (e *valueError) InputPositions() []token.Pos { @@ -95,6 +91,35 @@ var errNotExists = &adt.Bottom{ Err: errors.Newf(token.NoPos, "undefined value"), } +// IsIncomplete reports whether err is an incomplete error. +// Incomplete errors occur when a value cannot be fully evaluated +// due to missing information, such as unresolved references or +// incomplete disjunctions. These errors may be acceptable in +// non-concrete contexts. +// +// If the error returned by [Value.Err] is incomplete but the value +// still exists (i.e., [Value.Exists] returns true), it typically means +// the value is valid CUE but not fully resolved. In such cases, +// [Value.Validate] with default options will return nil. +func IsIncomplete(err error) bool { + if err == nil { + return false + } + // Fast path: + if ve, ok := err.(*valueError); ok { + return ve.err.IsIncomplete() + } + // Handle combined errors + for _, e := range errors.Errors(err) { + if ve, ok := e.(*valueError); ok { + if ve.err.IsIncomplete() { + return true + } + } + } + return false +} + func mkErr(src adt.Node, args ...interface{}) *adt.Bottom { var e *adt.Bottom var code adt.ErrorCode = -1 @@ -115,7 +140,7 @@ outer: case string: args := args[i+1:] // Do not expand message so that errors can be localized. - pos := pos(src) + pos := adt.Pos(src) if code < 0 { code = 0 } diff --git a/vendor/cuelang.org/go/cue/errors/errors.go b/vendor/cuelang.org/go/cue/errors/errors.go index 7451ea7d4a..129d02ac42 100644 --- a/vendor/cuelang.org/go/cue/errors/errors.go +++ b/vendor/cuelang.org/go/cue/errors/errors.go @@ -29,6 +29,7 @@ import ( "strings" "cuelang.org/go/cue/token" + "cuelang.org/go/internal/core/format" ) // New is a convenience wrapper for [errors.New] in the core library. @@ -86,6 +87,8 @@ func NewMessagef(format string, args ...interface{}) Message { // NewMessage creates an error message for human consumption. // // Deprecated: Use [NewMessagef] instead. +// +//go:fix inline func NewMessage(format string, args []interface{}) Message { return NewMessagef(format, args...) } @@ -123,8 +126,8 @@ type Error interface { Msg() (format string, args []interface{}) } -// Positions returns all positions returned by an error, sorted -// by relevance when possible and with duplicates removed. +// Positions returns the printable positions returned by an error, +// sorted by relevance when possible and with duplicates removed. func Positions(err error) []token.Pos { e := Error(nil) if !errors.As(err, &e) { @@ -134,17 +137,21 @@ func Positions(err error) []token.Pos { a := make([]token.Pos, 0, 3) pos := e.Position() - if pos.IsValid() { + if pos.File() != nil { a = append(a, pos) } sortOffset := len(a) for _, p := range e.InputPositions() { - if p.IsValid() && p != pos { + if p.File() != nil && p != pos { a = append(a, p) } } + // TODO if the Error we found wraps another error that itself + // has positions, we won't return them here but perhaps we should? + + // TODO(mvdan): we can use [token.Pos.Compare] here and no tests break. slices.SortFunc(a[sortOffset:], comparePosWithNoPosFirst) return slices.Compact(a) } @@ -155,9 +162,9 @@ func Positions(err error) []token.Pos { func comparePosWithNoPosFirst(a, b token.Pos) int { if a == b { return 0 - } else if a == token.NoPos { + } else if !a.IsValid() { return -1 - } else if b == token.NoPos { + } else if !b.IsValid() { return +1 } return token.Pos.Compare(a, b) @@ -248,7 +255,7 @@ func (e *wrapped) InputPositions() []token.Pos { } func (e *wrapped) Position() token.Pos { - if p := e.main.Position(); p != token.NoPos { + if p := e.main.Position(); p.IsValid() { return p } if wrap, ok := e.wrap.(Error); ok { @@ -273,10 +280,10 @@ func Promote(err error, msg string) Error { var _ Error = &posError{} -// In an List, an error is represented by an *posError. -// The position Pos, if valid, points to the beginning of +// In a list, an error is represented by a *posError. +// The position pos, if valid, points to the beginning of // the offending token, and the error condition is described -// by Msg. +// by Message. type posError struct { pos token.Pos Message @@ -286,7 +293,11 @@ func (e *posError) Path() []string { return nil } func (e *posError) InputPositions() []token.Pos { return nil } func (e *posError) Position() token.Pos { return e.pos } -// Append combines two errors, flattening Lists as necessary. +// Append combines two errors, flattening lists as necessary. +// +// Note: this may mutate a if it is already a list, so +// must not be used if a might have been shared across multiple +// goroutines. func Append(a, b Error) Error { switch x := a.(type) { case nil: @@ -299,7 +310,7 @@ func Append(a, b Error) Error { } // Errors reports the individual errors associated with an error, which is -// the error itself if there is only one or, if the underlying type is List, +// the error itself if there is only one or, if the underlying type is list, // its individual elements. If the given error is not an Error, it will be // promoted to one. func Errors(err error) []Error { @@ -310,8 +321,11 @@ func Errors(err error) []Error { var errorErr Error switch { case As(err, &listErr): + // TODO if err itself wraps a list, then the wrapping + // error information will be lost here. return listErr case As(err, &errorErr): + // TODO similar error loss here. return []Error{errorErr} default: return []Error{Promote(err, "")} @@ -331,10 +345,8 @@ func appendToList(a list, err Error) list { } return a default: - for _, e := range a { - if e == err { - return a - } + if slices.Contains(a, err) { + return a } return append(a, err) } @@ -362,20 +374,6 @@ func (p list) As(target interface{}) bool { return false } -// AddNewf adds an Error with given position and error message to an List. -func (p *list) AddNewf(pos token.Pos, msg string, args ...interface{}) { - err := &posError{pos: pos, Message: Message{format: msg, args: args}} - *p = append(*p, err) -} - -// Add adds an Error with given position and error message to an List. -func (p *list) Add(err Error) { - *p = appendToList(*p, err) -} - -// Reset resets an List to no errors. -func (p *list) Reset() { *p = (*p)[:0] } - // Sanitize sorts multiple errors and removes duplicates on a best effort basis. // If err represents a single or no error, it returns the error as is. func Sanitize(err Error) Error { @@ -397,14 +395,13 @@ func (p list) sanitize() list { return p } a := slices.Clone(p) - a.RemoveMultiples() + a.removeMultiples() return a } -// Sort sorts an List. *posError entries are sorted by position, -// other errors are sorted by error message, and before any *posError -// entry. -func (p list) Sort() { +// sort sorts a list. *posError entries are sorted by position, +// other errors are sorted by error message, and before any *posError entry. +func (p list) sort() { slices.SortFunc(p, func(a, b Error) int { if c := comparePosWithNoPosFirst(a.Position(), b.Position()); c != 0 { return c @@ -413,31 +410,44 @@ func (p list) Sort() { return c } return cmp.Compare(a.Error(), b.Error()) - }) } -// RemoveMultiples sorts an List and removes all but the first error per line. -func (p *list) RemoveMultiples() { - p.Sort() +// removeMultiples sorts a list and removes all but the first error per line. +func (p *list) removeMultiples() { + p.sort() *p = slices.CompactFunc(*p, approximateEqual) } func approximateEqual(a, b Error) bool { aPos := a.Position() bPos := b.Position() - if aPos == token.NoPos || bPos == token.NoPos { + if !aPos.IsValid() || !bPos.IsValid() { return a.Error() == b.Error() } - return comparePosWithNoPosFirst(aPos, bPos) == 0 && slices.Compare(a.Path(), b.Path()) == 0 + return aPos.Compare(bPos) == 0 && slices.Compare(a.Path(), b.Path()) == 0 } -// An List implements the error interface. +// A list implements the error interface by returning the +// string for the first error in the list. func (p list) Error() string { + // TODO in general Error.Msg does not include the message + // from errors that are wrapped (see [wrapped.Msg] which does + // not include any text from the wrapped error, so this implementation + // of Error means that we might lose information when + // just printing an error list with regular %v. format, args := p.Msg() return fmt.Sprintf(format, args...) } +// pathlessError wraps an Error to suppress its path, for use as a +// format argument when the path is already provided by the caller. +type pathlessError struct{ cueError } +type cueError = Error // alias to avoid field name conflicting with Error() method + +func (e pathlessError) Path() []string { return nil } +func (e pathlessError) Unwrap() error { return Unwrap(e.cueError) } + // Msg reports the unformatted error message for the first error, if any. func (p list) Msg() (format string, args []interface{}) { switch len(p) { @@ -446,7 +456,10 @@ func (p list) Msg() (format string, args []interface{}) { case 1: return p[0].Msg() } - return "%s (and %d more errors)", []interface{}{p[0], len(p) - 1} + // Wrap p[0] to suppress its path. The list's own Path() already + // returns p[0].Path(), so including the path in the format arg + // would cause it to appear twice in the output. + return "%s (and %d more errors)", []interface{}{pathlessError{p[0]}, len(p) - 1} } // Position reports the primary position for the first error, if any. @@ -494,12 +507,18 @@ type Config struct { // ToSlash sets whether to use Unix paths. Mostly used for testing. ToSlash bool + + // OmitPath removes the path prefix from error messages. + OmitPath bool + + // Printer is used internally to detect printing cycles. + Printer format.Printer } var zeroConfig = &Config{} // Print is a utility function that prints a list of errors to w, -// one error per line, if the err parameter is an List. Otherwise +// one error per line, if the err parameter is a list. Otherwise // it prints the err string. func Print(w io.Writer, err error, cfg *Config) { if cfg == nil { @@ -525,10 +544,20 @@ func String(err Error) string { return b.String() } +// StringWithConfig generates a short message from a given Error, using the +// provided configuration. +func StringWithConfig(err Error, cfg *Config) string { + var b strings.Builder + writeErr(&b, err, cfg) + return b.String() +} + func writeErr(w io.Writer, err Error, cfg *Config) { - if path := strings.Join(err.Path(), "."); path != "" { - _, _ = io.WriteString(w, path) - _, _ = io.WriteString(w, ": ") + if !cfg.OmitPath { + if path := strings.Join(err.Path(), "."); path != "" { + _, _ = io.WriteString(w, path) + _, _ = io.WriteString(w, ": ") + } } for { @@ -543,21 +572,33 @@ func writeErr(w io.Writer, err Error, cfg *Config) { // so we make a copy if we need to replace any arguments. didCopy := false for i, arg := range args { - var pos token.Position + var alt any switch arg := arg.(type) { case token.Pos: - pos = arg.Position() + pos := arg.Position() + pos.Filename = relPath(pos.Filename, cfg) + alt = pos case token.Position: - pos = arg + pos := arg + pos.Filename = relPath(pos.Filename, cfg) + alt = pos default: - continue + if cfg.Printer == nil { + // We should always do something. Consider replacing + // vertices with a path if this is not set. + continue + } + var replaced bool + alt, replaced = cfg.Printer.ReplaceArg(arg) + if !replaced { + continue + } } if !didCopy { args = slices.Clone(args) didCopy = true } - pos.Filename = relPath(pos.Filename, cfg) - args[i] = pos + args[i] = alt } n, _ := fmt.Fprintf(w, msg, args...) diff --git a/vendor/cuelang.org/go/cue/format/import.go b/vendor/cuelang.org/go/cue/format/import.go index 873de2c7f6..7fef6a97ed 100644 --- a/vendor/cuelang.org/go/cue/format/import.go +++ b/vendor/cuelang.org/go/cue/format/import.go @@ -61,7 +61,7 @@ func setRelativePos(s *ast.ImportSpec, r token.RelPos) { } func hasDoc(s *ast.ImportSpec) bool { - for _, doc := range s.Comments() { + for _, doc := range ast.Comments(s) { if doc.Doc { return true } @@ -86,7 +86,7 @@ func importName(s *ast.ImportSpec) string { } func importComment(s *ast.ImportSpec) string { - for _, c := range s.Comments() { + for _, c := range ast.Comments(s) { if c.Line { return c.Text() } @@ -99,7 +99,7 @@ func collapse(prev, next *ast.ImportSpec) bool { if importPath(next) != importPath(prev) || importName(next) != importName(prev) { return false } - for _, c := range prev.Comments() { + for _, c := range ast.Comments(prev) { if !c.Doc { return false } diff --git a/vendor/cuelang.org/go/cue/format/node.go b/vendor/cuelang.org/go/cue/format/node.go index 4d0e943b14..288e57f6cd 100644 --- a/vendor/cuelang.org/go/cue/format/node.go +++ b/vendor/cuelang.org/go/cue/format/node.go @@ -16,6 +16,7 @@ package format import ( "fmt" + "slices" "strings" "cuelang.org/go/cue/ast" @@ -64,10 +65,6 @@ unsupported: return fmt.Errorf("cue/format: unsupported node type %T", node) } -func isRegularField(tok token.Token) bool { - return tok == token.ILLEGAL || tok == token.COLON -} - // Helper functions for common node lists. They may be empty. func nestDepth(f *ast.Field) int { @@ -101,6 +98,18 @@ func hasDocComments(d ast.Decl) bool { return false } +// hasNoSignificantComments checks if an import spec has no comments that +// would require formatting with parentheses. Trailing comments (Position > 1) +// that appear after the import statement don't require parentheses. +func hasNoSignificantComments(spec *ast.ImportSpec) bool { + for _, cg := range ast.Comments(spec) { + if cg.Position <= 1 { + return false + } + } + return true +} + func (f *formatter) walkDeclList(list []ast.Decl) { f.before(nil) d := 0 @@ -127,7 +136,7 @@ func (f *formatter) walkDeclList(list []ast.Decl) { } } } - if f.printer.cfg.simplify && internal.IsEllipsis(x) { + if f.printer.cfg.simplify && isEllipsis(x) { ellipsis = x continue } @@ -166,6 +175,33 @@ func (f *formatter) walkDeclList(list []ast.Decl) { f.after(nil) } +// isEllipsis reports whether the declaration can be represented as an ellipsis. +func isEllipsis(x ast.Decl) bool { + // ... + if _, ok := x.(*ast.Ellipsis); ok { + return true + } + + // [string]: _ or [_]: _ + f, ok := x.(*ast.Field) + if !ok { + return false + } + v, ok := f.Value.(*ast.Ident) + if !ok || v.Name != "_" { + return false + } + l, ok := f.Label.(*ast.ListLit) + if !ok || len(l.Elts) != 1 { + return false + } + i, ok := l.Elts[0].(*ast.Ident) + if !ok { + return false + } + return i.Name == "string" || i.Name == "_" +} + func (f *formatter) walkSpecList(list []*ast.ImportSpec) { f.before(nil) for _, x := range list { @@ -190,22 +226,27 @@ func (f *formatter) walkClauseList(list []ast.Clause, ws whiteSpace) { f.after(nil) } +func fallbackKeyword(n *ast.Comprehension) token.Token { + if len(n.Clauses) > 1 { + return token.FALLBACK + } else if _, ok := n.Clauses[0].(*ast.ForClause); ok { + return token.FALLBACK + } + return token.ELSE +} + func (f *formatter) walkListElems(list []ast.Expr) { f.before(nil) for _, x := range list { f.before(x) - // This is a hack to ensure that comments are printed correctly in lists. - // A comment must be printed after each element in a list, but we can't - // print a comma at the end of a comment because it will be considered - // part of the comment and ignored. - // To fix this we collect all comments that appear after the element, - // and only handle them after it's formatted. + // Collect comments that appear after the element's start position. + // These need to be printed after the comma, not before it. var commentsAfter []*ast.CommentGroup splitComments := x.Pos().IsValid() if splitComments { for _, cg := range ast.Comments(x) { - if x.Pos().Before(cg.Pos()) { + if x.Pos().Compare(cg.Pos()) < 0 { commentsAfter = append(commentsAfter, cg) } } @@ -219,9 +260,25 @@ func (f *formatter) walkListElems(list []ast.Expr) { f.walkClauseList(n.Clauses, blank) f.print(blank, nooverride) f.expr(n.Value) + if n.Fallback != nil { + // Use FALLBACK keyword for 'for' comprehensions, ELSE for 'if'/'try' + kw := fallbackKeyword(n) + f.print(blank, n.Fallback.Fallback, kw, blank) + f.expr(n.Fallback.Body) + } case *ast.Ellipsis: - f.ellipsis(n) + // For ellipsis, also collect trailing comments from the type + // since they're attached to the nested node, not the ellipsis itself. + f.print(n.Ellipsis, token.ELLIPSIS) + if n.Type != nil && !isTop(n.Type) { + for _, cg := range ast.Comments(n.Type) { + if n.Type.Pos().Compare(cg.Pos()) < 0 { + commentsAfter = append(commentsAfter, cg) + } + } + f.exprRaw(n.Type, token.LowestPrec, 1) + } case *ast.Alias: f.expr(n.Ident) @@ -305,15 +362,32 @@ func (f *formatter) decl(decl ast.Decl) { switch n := decl.(type) { case *ast.Field: - constraint, _ := internal.ConstraintToken(n) - f.label(n.Label, constraint) + // Format label without constraint (we'll add constraint after alias) + f.label(n.Label, token.ILLEGAL) + + // Format postfix alias if present + if a := n.Alias; a != nil { + f.print(a.Tilde, token.TILDE, noblank) + if a.Label != nil { + // Dual form: ~(K,V) + // Assumes that ILLEGAL tokens are no-ops. + f.print(a.Lparen, token.LPAREN, noblank) + f.expr(a.Label) + f.print(a.Comma, token.COMMA, noblank) + f.expr(a.Field) + f.print(a.Rparen, token.RPAREN, noblank) + } else { + // Simple form: ~X + f.expr(a.Field) + } + } - regular := isRegularField(n.Token) - if regular { - f.print(noblank, nooverride, n.TokenPos, token.COLON) - } else { - f.print(blank, nooverride, n.Token) + // Format constraint marker (?, !) if present + if n.Constraint != token.ILLEGAL { + f.print(n.Constraint) } + + f.print(noblank, nooverride, n.TokenPos, token.COLON) f.visitComments(f.current.pos) if mem := f.inlineField(n); mem != nil { @@ -321,7 +395,7 @@ func (f *formatter) decl(decl ast.Decl) { default: fallthrough - case regular && f.cfg.simplify: + case f.cfg.simplify: f.print(blank, nooverride) f.decl(mem) @@ -382,7 +456,7 @@ func (f *formatter) decl(decl ast.Decl) { break } switch { - case len(n.Specs) == 1 && len(n.Specs[0].Comments()) == 0: + case len(n.Specs) == 1 && hasNoSignificantComments(n.Specs[0]): if !n.Lparen.IsValid() { f.print(blank) f.walkSpecList(n.Specs) @@ -434,6 +508,12 @@ func (f *formatter) embedding(decl ast.Expr) { f.walkClauseList(n.Clauses, blank) f.print(blank, nooverride) f.expr(n.Value) + if n.Fallback != nil { + // Use FALLBACK keyword for 'for' comprehensions, ELSE for 'if'/'try' + kw := fallbackKeyword(n) + f.print(blank, n.Fallback.Fallback, kw, blank) + f.expr(n.Fallback.Body) + } case *ast.Ellipsis: f.ellipsis(n) @@ -466,6 +546,8 @@ func (f *formatter) nextNeedsFormfeed(n ast.Expr) bool { return f.nextNeedsFormfeed(x.X) case *ast.UnaryExpr: return f.nextNeedsFormfeed(x.X) + case *ast.PostfixExpr: + return f.nextNeedsFormfeed(x.X) case *ast.BinaryExpr: return f.nextNeedsFormfeed(x.X) || f.nextNeedsFormfeed(x.Y) case *ast.IndexExpr: @@ -473,10 +555,8 @@ func (f *formatter) nextNeedsFormfeed(n ast.Expr) bool { case *ast.SelectorExpr: return f.nextNeedsFormfeed(x.X) case *ast.CallExpr: - for _, arg := range x.Args { - if f.nextNeedsFormfeed(arg) { - return true - } + if slices.ContainsFunc(x.Args, f.nextNeedsFormfeed) { + return true } } return false @@ -506,7 +586,7 @@ func (f *formatter) label(l ast.Label, constraint token.Token) { // if the AST is not generated by the parser. name := n.Name if !ast.IsValidIdent(name) { - name = literal.Label.Quote(n.Name) + name = literal.Label.Quote(name) } f.print(n.NamePos, name) @@ -562,7 +642,6 @@ func (f *formatter) expr1(expr ast.Expr, prec1, depth int) { } func (f *formatter) exprRaw(expr ast.Expr, prec1, depth int) { - switch x := expr.(type) { case *ast.BadExpr: f.print(x.From, "_|_") @@ -599,6 +678,10 @@ func (f *formatter) exprRaw(expr ast.Expr, prec1, depth int) { f.expr1(x.X, prec, depth) } + case *ast.PostfixExpr: + f.expr1(x.X, token.HighestPrec, depth) + f.print(x.Op) + case *ast.BasicLit: f.print(x.ValuePos, x) @@ -670,7 +753,7 @@ func (f *formatter) exprRaw(expr ast.Expr, prec1, depth int) { case len(x.Elts) == 0: // collapse curly braces if the body is empty. ffAlt := blank | nooverride - for _, c := range x.Comments() { + for _, c := range ast.Comments(x) { if c.Position == 1 { ffAlt = ff break @@ -700,7 +783,7 @@ func (f *formatter) exprRaw(expr ast.Expr, prec1, depth int) { if len(x.Elts) == 0 { // collapse square brackets if the body is empty. collapseWs := blank | nooverride - for _, c := range x.Comments() { + for _, c := range ast.Comments(x) { if c.Position == 1 { collapseWs = ws break @@ -757,6 +840,18 @@ func (f *formatter) clause(clause ast.Clause) { f.expr(n.Expr) f.markUnindentLine() + case *ast.TryClause: + f.print(n.Try, token.TRY) + if n.Ident != nil { + // Assignment form: try x = expr + f.print(blank, nooverride, indent) + f.expr(n.Ident) + f.print(blank, nooverride, n.Equal, token.BIND, blank) + f.expr(n.Expr) + f.markUnindentLine() + } + // Struct form: just "try" - body comes from Comprehension.Value + default: panic("unknown clause type") } diff --git a/vendor/cuelang.org/go/cue/format/printer.go b/vendor/cuelang.org/go/cue/format/printer.go index 68e5b7c6bc..f2ca7e0262 100644 --- a/vendor/cuelang.org/go/cue/format/printer.go +++ b/vendor/cuelang.org/go/cue/format/printer.go @@ -401,7 +401,7 @@ func (p *printer) writeString(s string, isLit bool) { } func (p *printer) writeByte(ch byte, n int) { - for i := 0; i < n; i++ { + for range n { p.output = append(p.output, ch) } @@ -412,7 +412,7 @@ func (p *printer) writeByte(ch byte, n int) { p.pos.Column = 1 n := p.cfg.Indent + p.indent // include base indentation - for i := 0; i < n; i++ { + for range n { p.output = append(p.output, '\t') } diff --git a/vendor/cuelang.org/go/cue/format/simplify.go b/vendor/cuelang.org/go/cue/format/simplify.go index d36c58e700..a3b81d2b60 100644 --- a/vendor/cuelang.org/go/cue/format/simplify.go +++ b/vendor/cuelang.org/go/cue/format/simplify.go @@ -18,8 +18,6 @@ import ( "strconv" "cuelang.org/go/cue/ast" - "cuelang.org/go/cue/ast/astutil" - "cuelang.org/go/internal" ) // labelSimplifier rewrites string labels to identifiers if @@ -52,8 +50,11 @@ func (s *labelSimplifier) processDecls(decls []ast.Decl) { for _, d := range decls { switch x := d.(type) { case *ast.Field: - if _, ok := x.Label.(*ast.BasicLit); ok { - x.Label = astutil.Apply(x.Label, nil, sc.replace).(ast.Label) + if bl, ok := x.Label.(*ast.BasicLit); ok { + str, err := strconv.Unquote(bl.Value) + if err == nil && sc.scope[str] { + x.Label = ast.NewIdent(str) + } } } } @@ -89,7 +90,7 @@ func (s *labelSimplifier) markStrings(n ast.Node) bool { switch x := n.(type) { case *ast.BasicLit: str, err := strconv.Unquote(x.Value) - if err != nil || !ast.IsValidIdent(str) || internal.IsDefOrHidden(str) { + if err != nil || ast.StringLabelNeedsQuoting(str) { return false } s.scope[str] = true @@ -102,14 +103,3 @@ func (s *labelSimplifier) markStrings(n ast.Node) bool { } return true } - -func (s *labelSimplifier) replace(c astutil.Cursor) bool { - switch x := c.Node().(type) { - case *ast.BasicLit: - str, err := strconv.Unquote(x.Value) - if err == nil && s.scope[str] && !internal.IsDefOrHidden(str) { - c.Replace(ast.NewIdent(str)) - } - } - return false -} diff --git a/vendor/cuelang.org/go/cue/instance.go b/vendor/cuelang.org/go/cue/instance.go index 13d54ddb92..6614fbd7fe 100644 --- a/vendor/cuelang.org/go/cue/instance.go +++ b/vendor/cuelang.org/go/cue/instance.go @@ -15,7 +15,6 @@ package cue import ( - "cuelang.org/go/cue/ast" "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" "cuelang.org/go/internal/core/adt" @@ -71,7 +70,7 @@ func addInst(x *runtime.Runtime, p *Instance) *Instance { PkgName: p.PkgName, } } - x.AddInst(p.ImportPath, p.root, p.inst) + x.AddInst(p.root, p.inst) x.SetBuildData(p.inst, p) p.index = x return p @@ -119,7 +118,7 @@ func getImportFromNode(x *runtime.Runtime, v *adt.Vertex) *Instance { } func getImportFromPath(x *runtime.Runtime, id string) *Instance { - node := x.LoadImport(id) + node := x.LoadBuiltin(id) if node == nil { return nil } @@ -154,7 +153,7 @@ func newInstance(x *runtime.Runtime, p *build.Instance, v *adt.Vertex) *Instance } } - x.AddInst(p.ImportPath, v, p) + x.AddInst(v, p) x.SetBuildData(p, inst) inst.index = x return inst @@ -188,20 +187,7 @@ func (inst *Instance) Value() Value { return newVertexRoot(inst.index, ctx, inst.root) } -// Eval evaluates an expression within an existing instance. -// -// Expressions may refer to builtin packages if they can be uniquely identified. -// -// Deprecated: use -// inst.Value().Context().BuildExpr(expr, Scope(inst.Value), InferBuiltins(true)) -func (inst *hiddenInstance) Eval(expr ast.Expr) Value { - v := inst.Value() - return v.Context().BuildExpr(expr, Scope(v), InferBuiltins(true)) -} - -// DO NOT USE. -// -// Deprecated: do not use. +// Deprecated: do not use; use unification instead. func Merge(inst ...*Instance) *Instance { v := &adt.Vertex{} @@ -233,10 +219,8 @@ func (inst *hiddenInstance) Build(p *build.Instance) *Instance { idx := inst.index r := inst.index - rErr := r.ResolveFiles(p) - cfg := &compile.Config{Scope: valueScope(Value{idx: r, v: inst.root})} - v, err := compile.Files(cfg, r, p.ID(), p.Files...) + v, err := compile.Instance(cfg, r, p) // Just like [runtime.Runtime.Build], ensure that the @embed compiler is run as needed. err = errors.Append(err, r.InjectImplementations(p, v)) @@ -244,8 +228,8 @@ func (inst *hiddenInstance) Build(p *build.Instance) *Instance { v.AddConjunct(adt.MakeRootConjunct(nil, inst.root)) i := newInstance(idx, p, v) - if rErr != nil { - i.setListOrError(rErr) + if p.ResolutionErr != nil { + i.setListOrError(p.ResolutionErr) } if i.Err != nil { i.setListOrError(i.Err) diff --git a/vendor/cuelang.org/go/cue/interpreter/embed/embed.go b/vendor/cuelang.org/go/cue/interpreter/embed/embed.go index 090edf2646..f7d03ec4c8 100644 --- a/vendor/cuelang.org/go/cue/interpreter/embed/embed.go +++ b/vendor/cuelang.org/go/cue/interpreter/embed/embed.go @@ -24,8 +24,9 @@ // all. This allows the @embed attribute to be used to load a file within a CUE // module into a field. // -// References to files are always relative to directory in which the referring -// file resides. Only files that exist within the CUE module are accessible. +// References to files are always relative to the directory in which the +// referring file resides. Only files in the same module containing the CUE +// file can be embedded, and parent directory references are not allowed. // // # The @embed attribute // @@ -51,6 +52,12 @@ // the list of supported types. This field is required if a file extension is // unknown, or if a wildcard is used for the file extension in the glob pattern. // +// allowEmptyGlob +// +// By default, a glob pattern that matches no files results in an error. When +// allowEmptyGlob is present, a glob pattern with no matches will return an +// empty struct instead of an error. This option is only supported with glob patterns. +// // # Limitations // // The embed interpreter currently does not support: @@ -79,16 +86,21 @@ // // include all files in the y directory as a map of file paths to binary // // data. The entries are unified into the same map as above. // files: _ @embed(glob=y/*.*, type=binary) +// +// // include all YAML files in the z directory, but allow empty result +// // if no files match (returns empty struct instead of error) +// optionalFiles: _ @embed(glob=z/*.yaml, allowEmptyGlob) package embed import ( - "io/fs" + iofs "io/fs" "os" "path" "path/filepath" "strings" "cuelang.org/go/cue" + "cuelang.org/go/cue/ast" "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" @@ -118,9 +130,11 @@ func New() runtime.Interpreter { } func (i interpreter) Kind() string { - return "embed" + return EmbedKind } +const EmbedKind = "embed" + // NewCompiler returns a compiler that can decode and embed files that exist // within a CUE module. func (i interpreter) NewCompiler(b *build.Instance, r *runtime.Runtime) (runtime.Compiler, errors.Error) { @@ -145,102 +159,118 @@ type compiler struct { // file system cache dir string - fs fs.StatFS + fs iofs.StatFS pos token.Pos } -// Compile interprets an embed attribute to either load a file -// (@embed(file=...)) or a glob of files (@embed(glob=...)). -// and decodes the given files. -func (c *compiler) Compile(funcName string, scope adt.Value, a *internal.Attr) (adt.Expr, errors.Error) { +// validateAttr performs logical validation of the attr. It does not +// perform any checks against a filesystem. +func validateAttr(a *internal.Attr) (file, glob, typ string, allowEmptyGlob bool, errs errors.Error) { + pos := a.Pos file, _, err := a.Lookup(0, "file") if err != nil { - return nil, errors.Promote(err, "invalid attribute") + return "", "", "", false, errors.Promote(err, "invalid attribute") } - glob, _, err := a.Lookup(0, "glob") + glob, _, err = a.Lookup(0, "glob") if err != nil { - return nil, errors.Promote(err, "invalid attribute") + return "", "", "", false, errors.Promote(err, "invalid attribute") } - typ, _, err := a.Lookup(0, "type") + typ, _, err = a.Lookup(0, "type") if err != nil { - return nil, errors.Promote(err, "invalid type argument") + return "", "", "", false, errors.Promote(err, "invalid type argument") } - c.opCtx = adt.NewContext((*runtime.Runtime)(c.runtime), nil) - - pos := a.Pos - c.pos = pos - - // Jump through some hoops to get file operations to behave the same for - // Windows and Unix. - // TODO: obtain a fs.FS from load or something similar. - dir := filepath.Dir(pos.File().Name()) - if c.dir != dir { - c.fs = os.DirFS(dir).(fs.StatFS) // Documented as implementing fs.StatFS - c.dir = dir + allowEmptyGlob, err = a.Flag(0, "allowEmptyGlob") + if err != nil { + return "", "", "", false, errors.Promote(err, "invalid allowEmptyGlob argument") } switch { case file == "" && glob == "": - return nil, errors.Newf(a.Pos, "attribute must have file or glob field") + return "", "", "", false, errors.Newf(pos, "attribute must have file or glob field") case file != "" && glob != "": - return nil, errors.Newf(a.Pos, "attribute cannot have both file and glob field") + return "", "", "", false, errors.Newf(pos, "attribute cannot have both file and glob field") + case allowEmptyGlob && glob == "": + return "", "", "", false, errors.Newf(pos, "allowEmptyGlob must be specified with a glob field") case file != "": - return c.processFile(file, typ, scope) + file, err := clean(pos, file) + if err != nil { + return "", "", "", false, err + } + return file, "", typ, allowEmptyGlob, nil - default: // glob != "": - return c.processGlob(glob, typ, scope) - } -} + default: + glob, err := clean(pos, glob) + if err != nil { + return "", "", "", false, err + } -func (c *compiler) processFile(file, scope string, schema adt.Value) (adt.Expr, errors.Error) { - file, err := c.clean(file) - if err != nil { - return nil, err - } - for dir := path.Dir(file); dir != "."; dir = path.Dir(dir) { - if _, err := c.fs.Stat(path.Join(dir, "cue.mod")); err == nil { - return nil, errors.Newf(c.pos, "cannot embed file %q: in different module", file) + // Validate that the glob pattern is valid per [pkgpath.Match]. + // Note that we use Unix match semantics because all embed paths are Unix-like. + if _, err := pkgpath.Match(glob, "", pkgpath.Unix); err != nil { + return "", "", "", false, errors.Wrapf(err, pos, "invalid glob pattern %q", glob) } - } - return c.decodeFile(file, scope, schema) + // If we do not have a type, ensure the extension of the base is fully + // specified, i.e. does not contain any meta characters as specified by + // path.Match. + if typ == "" { + ext := path.Ext(path.Base(glob)) + if ext == "" || strings.ContainsAny(ext, "*?[\\") { + return "", "", "", false, errors.Newf(pos, "extension not fully specified; type argument required") + } + } + return "", glob, typ, allowEmptyGlob, nil + } } -func (c *compiler) processGlob(glob, scope string, schema adt.Value) (adt.Expr, errors.Error) { - glob, ce := c.clean(glob) - if ce != nil { - return nil, ce +// Compile interprets an embed attribute to either load a file +// (@embed(file=...)) or a glob of files (@embed(glob=...)). +// and decodes the given files. +func (c *compiler) Compile(funcName string, scope adt.Value, a *internal.Attr) (adt.Expr, errors.Error) { + c.opCtx = adt.NewContext((*runtime.Runtime)(c.runtime), nil) + + pos := a.Pos + c.pos = pos + + // Jump through some hoops to get file operations to behave the same for + // Windows and Unix. + // TODO: obtain an iofs.FS from load or something similar. + dir := filepath.Dir(pos.File().Name()) + if c.dir != dir { + c.fs = os.DirFS(dir).(iofs.StatFS) // Documented as implementing iofs.StatFS + c.dir = dir } - // Validate that the glob pattern is valid per [pkgpath.Match]. - // Note that we use Unix match semantics because all embed paths are Unix-like. - if _, err := pkgpath.Match(glob, "", pkgpath.Unix); err != nil { - return nil, errors.Wrapf(err, c.pos, "invalid glob pattern %q", glob) + file, glob, typ, allowEmptyGlob, err := validateAttr(a) + if err != nil { + return nil, err } - // If we do not have a type, ensure the extension of the base is fully - // specified, i.e. does not contain any meta characters as specified by - // path.Match. - if scope == "" { - ext := path.Ext(path.Base(glob)) - if ext == "" || strings.ContainsAny(ext, "*?[\\") { - return nil, errors.Newf(c.pos, "extension not fully specified; type argument required") + if file != "" { + for dir := path.Dir(file); dir != "."; dir = path.Dir(dir) { + if _, err := c.fs.Stat(path.Join(dir, "cue.mod")); err == nil { + return nil, errors.Newf(pos, "cannot embed file %q: in different module", file) + } } + return c.decodeFile(file, typ) } + return c.processGlob(glob, typ, allowEmptyGlob) +} +func (c *compiler) processGlob(glob, scope string, allowEmptyGlob bool) (adt.Expr, errors.Error) { m := &adt.StructLit{} matches, err := fsGlob(c.fs, glob) if err != nil { return nil, errors.Promote(err, "failed to match glob") } - if len(matches) == 0 { + if len(matches) == 0 && !allowEmptyGlob { return nil, errors.Newf(c.pos, "no matches for glob pattern %q", glob) } @@ -262,7 +292,7 @@ func (c *compiler) processGlob(glob, scope string, schema adt.Value) (adt.Expr, dirs[dir] = f } - expr, err := c.decodeFile(f, scope, schema) + expr, err := c.decodeFile(f, scope) if err != nil { return nil, err } @@ -282,29 +312,36 @@ func (c *compiler) processGlob(glob, scope string, schema adt.Value) (adt.Expr, return m, nil } -func (c *compiler) clean(s string) (string, errors.Error) { +func clean(pos token.Pos, s string) (string, errors.Error) { file := path.Clean(s) if file != s { - return file, errors.Newf(c.pos, "path not normalized, use %q instead", file) + return file, errors.Newf(pos, "path not normalized, use %q instead", file) } if path.IsAbs(file) { - return "", errors.Newf(c.pos, "only relative files are allowed") + return "", errors.Newf(pos, "only relative files are allowed") } if file == ".." || strings.HasPrefix(file, "../") { - return "", errors.Newf(c.pos, "cannot refer to parent directory") + return "", errors.Newf(pos, "cannot refer to parent directory") } return file, nil } -// fsGlob is like [fs.Glob] but only includes dot-prefixed files +// fsGlob is like [iofs.Glob] but only includes dot-prefixed files // when the dot is explictly present in an element. // TODO: add option for including dot files? -func fsGlob(fsys fs.FS, pattern string) ([]string, error) { +func fsGlob(fsys iofs.FS, pattern string) ([]string, error) { pattern = path.Clean(pattern) - matches, err := fs.Glob(fsys, pattern) + matches, err := iofs.Glob(fsys, pattern) if err != nil { return nil, err } + return filterFsGlobResults(pattern, matches...), nil +} + +// filterFsGlobResults applies additional filtering on the given +// matches to only include dot-prefixed files when the dot is +// explictly present in the corresponding pattern element. +func filterFsGlobResults(pattern string, matches ...string) []string { patElems := strings.Split(pattern, "/") included := func(m string) bool { for i, elem := range strings.Split(m, "/") { @@ -324,10 +361,10 @@ func fsGlob(fsys fs.FS, pattern string) ([]string, error) { i++ } } - return matches[:i], nil + return matches[:i] } -func (c *compiler) decodeFile(file, scope string, schema adt.Value) (adt.Expr, errors.Error) { +func (c *compiler) decodeFile(file, scope string) (adt.Expr, errors.Error) { // Do not use the most obvious filetypes.Input in order to disable "auto" // mode. f, err := filetypes.ParseFileAndType(file, scope, filetypes.Def) @@ -335,7 +372,7 @@ func (c *compiler) decodeFile(file, scope string, schema adt.Value) (adt.Expr, e return nil, errors.Promote(err, "invalid file type") } - // Open and pre-load the file system using fs.FS, instead of relying + // Open and pre-load the file system using iofs.FS. r, err := c.fs.Open(file) if err != nil { return nil, errors.Newf(c.pos, "open %v: no such file or directory", file) @@ -396,3 +433,152 @@ func (c *compiler) decodeFile(file, scope string, schema adt.Value) (adt.Expr, e _, v := value.ToInternal(val) return v, nil } + +// EmbeddedPaths validates the provided attributes as embed +// attributes, returning a slice of [Embed] structs for attributes +// that were successfully validated, and errors for those which were +// not. The filepath should be the filepath of the file from which +// these attributes were extracted, and relative to whatever root is +// going to be used in calls to [Embed.Matches] and [Embed.FindAll]. +func EmbeddedPaths(filepath string, attrsByField map[*ast.Field]*internal.Attr) ([]*Embed, errors.Error) { + if len(attrsByField) == 0 { + return nil, nil + } + var errs errors.Error + embeds := make([]*Embed, 0, len(attrsByField)) + for field, attr := range attrsByField { + if attr.Err != nil { + errs = errors.Append(errs, attr.Err) + continue + } + file, glob, typ, allowEmptyGlob, err := validateAttr(attr) + if err != nil { + errs = errors.Append(errs, err) + continue + } + embed := &Embed{ + Field: field, + Attribute: attr, + FilePath: filepath, + Type: typ, + } + if file != "" { + embed.interpreter = &embeddedFile{ + filepath: file, + } + embeds = append(embeds, embed) + } else if glob != "" { + embed.interpreter = &embeddedGlob{ + glob: glob, + allowEmptyGlob: allowEmptyGlob, + } + embeds = append(embeds, embed) + } + } + return embeds, errs +} + +type Embed struct { + Field *ast.Field + Attribute *internal.Attr + FilePath string + Type string + interpreter embedInterpreter +} + +// Matches reports whether the provided filepath is matched by this +// [Embed] attribute. The filepath should be relative to the same root +// as the filepath provided to [EmbeddedPaths]. E.g. if in +// `/wibble/foo/bar.cue` you have `@embed(filename=a/b.json)`, and +// `foo/bar.cue` is the filepath passed to [EmbeddedPaths], then +// [Embed.Matches] will return true if called with `foo/a/b.json`. +func (e *Embed) Matches(filepath string) bool { + return e.interpreter.matches(e, filepath) +} + +// FindAll uses the provided fs to report all the filepaths that +// match this [Embed] attribute. The fs must be relative to the same +// root as the filepath provided to [EmbeddedPaths]. I.e. for the +// filepath provided to [EmbeddedPaths], iofs.Stat(fs, filepath) +// should be accessing the same file which contained this [Embed] +// attribute. +func (e *Embed) FindAll(fs iofs.FS) ([]string, error) { + return e.interpreter.findAll(e, fs) +} + +// IsGlob reports whether this [Embed] attribute represents a glob +// embedding. +func (e *Embed) IsGlob() bool { + _, isGlob := e.interpreter.(*embeddedGlob) + return isGlob +} + +type embedInterpreter interface { + // NB: All filepaths (including any within the Embed) are + // considered relative to the same root. + + matches(e *Embed, filepath string) bool + findAll(e *Embed, fs iofs.FS) ([]string, error) +} + +type embeddedFile struct { + filepath string +} + +func (ef *embeddedFile) matches(e *Embed, filepath string) bool { + dir := path.Dir(e.FilePath) + return filepath == path.Join(dir, ef.filepath) +} + +func (ef *embeddedFile) findAll(e *Embed, fs iofs.FS) ([]string, error) { + dir := path.Dir(e.FilePath) + filepath := path.Join(dir, ef.filepath) + info, err := iofs.Stat(fs, filepath) + if err != nil { + return nil, errors.Wrapf(err, e.Attribute.Pos, "failed to stat %s: %v", filepath, err) + } + if info.IsDir() { + return nil, errors.Newf(e.Attribute.Pos, "%v is a directory", filepath) + } + return []string{filepath}, nil +} + +type embeddedGlob struct { + glob string + allowEmptyGlob bool +} + +func (eg *embeddedGlob) matches(e *Embed, filepath string) bool { + dir := path.Dir(e.FilePath) + if dir != "." { + wasCut := false + filepath, wasCut = strings.CutPrefix(filepath, dir+"/") + if !wasCut { + return false + } + } + result, err := pkgpath.Match(eg.glob, filepath, pkgpath.Unix) + if !result || err != nil { + return false + } + return len(filterFsGlobResults(eg.glob, filepath)) == 1 +} + +func (eg *embeddedGlob) findAll(e *Embed, fs iofs.FS) ([]string, error) { + dir := path.Dir(e.FilePath) + fs, err := iofs.Sub(fs, dir) + if err != nil { + return nil, errors.Wrapf(err, e.Attribute.Pos, "%v", err) + } + filepaths, err := fsGlob(fs, eg.glob) + if err != nil { + return nil, errors.Wrapf(err, e.Attribute.Pos, "%v", err) + } + if !eg.allowEmptyGlob && len(filepaths) == 0 { + return nil, errors.Newf(e.Attribute.Pos, "no matches for glob pattern %q", eg.glob) + } + for i, filepath := range filepaths { + filepaths[i] = path.Join(dir, filepath) + } + return filepaths, nil +} diff --git a/vendor/cuelang.org/go/cue/literal/quote.go b/vendor/cuelang.org/go/cue/literal/quote.go index 2208e2cf7f..78cc355825 100644 --- a/vendor/cuelang.org/go/cue/literal/quote.go +++ b/vendor/cuelang.org/go/cue/literal/quote.go @@ -53,7 +53,7 @@ func (f Form) WithTabIndent(n int) Form { return f } -// WithOptionalIndent is like WithTabIndent, but only returns a multiline +// WithOptionalTabIndent is like [Form.WithTabIndent], but only returns a multiline // strings if it doesn't contain any newline characters. func (f Form) WithOptionalTabIndent(tabs int) Form { f.indent = strings.Repeat("\t", tabs) @@ -82,7 +82,7 @@ var ( // TODO: ExactString: quotes to bytes type if the string cannot be // represented without loss of accuracy. - // Label is like String, but optimized for labels. + // Label is like [String], but optimized for labels. Label Form = stringForm // Bytes defines the format of bytes literal. @@ -133,10 +133,11 @@ func (f Form) Append(buf []byte, s string) []byte { buf = append(buf, '#') } if f.multiline { - buf = append(buf, f.quote, f.quote, f.quote, '\n') + buf = append(buf, f.tripleQuote...) + buf = append(buf, '\n') if s == "" { buf = append(buf, f.indent...) - buf = append(buf, f.quote, f.quote, f.quote) + buf = append(buf, f.tripleQuote...) return buf } if len(s) > 0 && s[0] != '\n' { @@ -151,7 +152,7 @@ func (f Form) Append(buf []byte, s string) []byte { if f.multiline { buf = append(buf, '\n') buf = append(buf, f.indent...) - buf = append(buf, f.quote, f.quote, f.quote) + buf = append(buf, f.tripleQuote...) } else { buf = append(buf, f.quote) } @@ -192,7 +193,8 @@ func (f Form) appendEscaped(buf []byte, s string) []byte { r, width = utf8.DecodeRuneInString(s) } if f.exact && width == 1 && r == utf8.RuneError { - buf = append(buf, `\x`...) + buf = f.appendEscape(buf) + buf = append(buf, 'x') buf = append(buf, lowerhex[s[0]>>4]) buf = append(buf, lowerhex[s[0]&0xF]) continue diff --git a/vendor/cuelang.org/go/cue/literal/string.go b/vendor/cuelang.org/go/cue/literal/string.go index e43b093bb5..72ceb56d8c 100644 --- a/vendor/cuelang.org/go/cue/literal/string.go +++ b/vendor/cuelang.org/go/cue/literal/string.go @@ -22,10 +22,12 @@ import ( ) var ( - errSyntax = errors.New("invalid syntax") - errInvalidWhitespace = errors.New("invalid string: invalid whitespace") - errMissingNewline = errors.New( + errSyntax = errors.New("invalid syntax") + errInvalidWhitespace = errors.New("invalid string: invalid whitespace") + errMissingOpeningNewline = errors.New( "invalid string: opening quote of multiline string must be followed by newline") + errMissingClosingNewline = errors.New( + "invalid string: closing quote of multiline string must follow a newline") errUnmatchedQuote = errors.New("invalid string: unmatched quote") // TODO: making this an error is optional according to RFC 4627. But we // could make it not an error if this ever results in an issue. @@ -84,7 +86,7 @@ func ParseQuotes(start, end string) (q QuoteInfo, nStart, nEnd int, err error) { switch s[0] { case '"', '\'': q.char = s[0] - if len(s) > 3 && s[1] == s[0] && s[2] == s[0] { + if len(s) > 3 && s[1] == q.char && s[2] == q.char && s[3] != '#' { switch s[3] { case '\n': q.quote = start[:3+q.numHash] @@ -95,7 +97,7 @@ func ParseQuotes(start, end string) (q QuoteInfo, nStart, nEnd int, err error) { } fallthrough default: - return q, 0, 0, errMissingNewline + return q, 0, 0, errMissingOpeningNewline } q.multiline = true q.numChar = 3 @@ -116,13 +118,18 @@ func ParseQuotes(start, end string) (q QuoteInfo, nStart, nEnd int, err error) { } if q.multiline { i := len(end) - len(quote) + hasNewline := false for i > 0 { r, size := utf8.DecodeLastRuneInString(end[:i]) if r == '\n' || !unicode.IsSpace(r) { + hasNewline = r == '\n' break } i -= size } + if !hasNewline { + return q, 0, 0, errMissingClosingNewline + } q.whitespace = end[i : len(end)-len(quote)] if len(start) > nStart && start[nStart] != '\n' { @@ -398,7 +405,7 @@ func unquoteChar(s string, info QuoteInfo) (value rune, multibyte bool, tail str err = errSyntax return } - for j := 0; j < 2; j++ { // one digit already; two more + for j := range 2 { // one digit already; two more x := rune(s[j]) - '0' if x < 0 || x > 7 { err = errSyntax diff --git a/vendor/cuelang.org/go/cue/load/config.go b/vendor/cuelang.org/go/cue/load/config.go index c5ba38500d..14f1d383b7 100644 --- a/vendor/cuelang.org/go/cue/load/config.go +++ b/vendor/cuelang.org/go/cue/load/config.go @@ -29,6 +29,7 @@ import ( "cuelang.org/go/cue/parser" "cuelang.org/go/cue/token" "cuelang.org/go/internal" + "cuelang.org/go/internal/mod/modpkgload" "cuelang.org/go/mod/modconfig" "cuelang.org/go/mod/modfile" "cuelang.org/go/mod/module" @@ -168,6 +169,11 @@ type Config struct { // For example, it is used to determine the main module, // and rooted import paths starting with "./" are relative to it. // If Dir is empty, the current directory is used. + // + // When using an Overlay with file entries such as "/foo/bar/baz.cue", + // you can use an absolute path that is a parent of one of the overlaid files, + // such as in this case "/foo" or "/foo/bar", even if these directories + // do not exist in the host filesystem. Dir string // Tags defines boolean tags or key-value pairs to select files to build @@ -276,9 +282,14 @@ type Config struct { // the syntax tree. ParseFile func(name string, src interface{}, cfg parser.Config) (*ast.File, error) - // Overlay provides a mapping of absolute file paths to file contents. If - // the file with the given path already exists, the parser will use the - // alternative file contents provided by the map. + // Overlay provides a mapping of absolute file paths to file contents, + // which are overlaid on top of the host operating system when loading files. + // + // If an overlaid file already exists in the host filesystem, + // the overlaid file contents will be used in its place. + // If an overlaid file does not exist in the host filesystem, + // the loader behaves as if the overlaid file exists with its contents, + // and that that all of its parent directories exist too. Overlay map[string]Source // Stdin defines an alternative for os.Stdin for the file "-". When used, @@ -341,6 +352,8 @@ func addImportQualifier(pkg importPath, name string) (importPath, error) { // It does not initialize c.Context, because that requires the // loader in order to use for build.Loader. func (c Config) complete() (cfg *Config, err error) { + // Ensure [Config.Dir] is a clean and absolute path, + // necessary for matching directory prefixes later. if c.Dir == "" { c.Dir, err = os.Getwd() if err != nil { @@ -349,6 +362,9 @@ func (c Config) complete() (cfg *Config, err error) { } else if c.Dir, err = filepath.Abs(c.Dir); err != nil { return nil, err } + if modpkgload.InsideCueMod(c.Dir) { + return nil, fmt.Errorf("cannot load packages inside the %s directory", modDir) + } // TODO: we could populate this already with absolute file paths, // but relative paths cannot be added. Consider what is reasonable. @@ -358,6 +374,9 @@ func (c Config) complete() (cfg *Config, err error) { } c.fileSystem = fsys + // Ensure [Config.ModuleRoot] is a clean and absolute path, + // necessary for matching directory prefixes later. + // // TODO: determine root on a package basis. Maybe we even need a // pkgname.cue.mod // Look to see if there is a cue.mod. @@ -373,6 +392,8 @@ func (c Config) complete() (cfg *Config, err error) { } } else if !filepath.IsAbs(c.ModuleRoot) { c.ModuleRoot = filepath.Join(c.Dir, c.ModuleRoot) + } else { + c.ModuleRoot = filepath.Clean(c.ModuleRoot) } if c.SkipImports { // We should never use the registry in SkipImports mode diff --git a/vendor/cuelang.org/go/cue/load/errors.go b/vendor/cuelang.org/go/cue/load/errors.go index d3442d0db6..8faa41ff2a 100644 --- a/vendor/cuelang.org/go/cue/load/errors.go +++ b/vendor/cuelang.org/go/cue/load/errors.go @@ -43,6 +43,7 @@ func (p *PackageError) fillPos(cwd string, positions []token.Pos) { } // TODO(localize) + func (p *PackageError) Error() string { // Import cycles deserve special treatment. if p.IsImportCycle { @@ -73,6 +74,7 @@ func (e *NoFilesError) InputPositions() []token.Pos { return nil } func (e *NoFilesError) Path() []string { return nil } // TODO(localize) + func (e *NoFilesError) Msg() (string, []interface{}) { // Count files beginning with _, which we will pretend don't exist at all. dummy := 0 diff --git a/vendor/cuelang.org/go/cue/load/fs.go b/vendor/cuelang.org/go/cue/load/fs.go index 411a2f2db4..08fc5c63ea 100644 --- a/vendor/cuelang.org/go/cue/load/fs.go +++ b/vendor/cuelang.org/go/cue/load/fs.go @@ -17,6 +17,7 @@ package load import ( "bytes" "cmp" + stderrs "errors" "fmt" "io" iofs "io/fs" @@ -344,11 +345,19 @@ func (fs *ioFS) ReadFile(name string) ([]byte, error) { var _ module.ReadCUEFS = (*ioFS)(nil) +// IsDirWithCUEFiles implements [module.ReadCUEFS] +func (fs *ioFS) IsDirWithCUEFiles(path string) (bool, error) { + return false, stderrs.ErrUnsupported +} + // ReadCUEFile implements [module.ReadCUEFS] by // reading and updating the syntax file cache, which // is shared with the cache used by the [fileSystem.getCUESyntax] // method. func (fs *ioFS) ReadCUEFile(path string, cfg parser.Config) (*ast.File, error) { + if !strings.HasSuffix(path, ".cue") { + return nil, nil + } fpath, err := fs.absPathFromFSPath(path) if err != nil { return nil, err diff --git a/vendor/cuelang.org/go/cue/load/import.go b/vendor/cuelang.org/go/cue/load/import.go index 1e4fb431f0..52fcef8cb0 100644 --- a/vendor/cuelang.org/go/cue/load/import.go +++ b/vendor/cuelang.org/go/cue/load/import.go @@ -64,10 +64,8 @@ func (l *loader) importPkg(pos token.Pos, p *build.Instance) []*build.Instance { return []*build.Instance{p} } - for _, item := range l.stk { - if item == p.ImportPath { - return retErr(&PackageError{Message: errors.NewMessagef("package import cycle not allowed")}) - } + if slices.Contains(l.stk, p.ImportPath) { + return retErr(&PackageError{Message: errors.NewMessagef("package import cycle not allowed")}) } l.stk.Push(p.ImportPath) defer l.stk.Pop() @@ -144,6 +142,11 @@ func (l *loader) importPkg(pos token.Pos, p *build.Instance) []*build.Instance { // See https://cuelang.org/docs/concept/modules-packages-instances/#instances. for _, d := range dirs { dir := filepath.Clean(d[1]) + // firstDir keeps track of whether we're still looking at the initial + // directory rather than one of its parents. If there are no CUE files + // in the initial directory, we shouldn't walk to its parents because + // the initial directory isn't itself a CUE package. + firstDir := true for { sd, ok := l.dirCachedBuildFiles[dir] if !ok { @@ -156,6 +159,7 @@ func (l *loader) importPkg(pos token.Pos, p *build.Instance) []*build.Instance { } return retErr(errors.Wrapf(err, token.NoPos, "import failed reading dir %v", dir)) } + added := false for _, name := range sd.filenames { file, err := filetypes.ParseFileAndType(name, "", filetypes.Input) if err != nil { @@ -163,11 +167,11 @@ func (l *loader) importPkg(pos token.Pos, p *build.Instance) []*build.Instance { Filename: name, ExcludeReason: errors.Newf(token.NoPos, "unknown filetype"), }) - } else { - fp.add(dir, file, 0) + } else if fp.add(dir, file, 0) { + added = true } } - if p.PkgName == "" || !inModule || l.cfg.isModRoot(dir) || dir == d[0] { + if p.PkgName == "" || !inModule || l.cfg.isModRoot(dir) || dir == d[0] || (firstDir && !added) { break } @@ -182,6 +186,7 @@ func (l *loader) importPkg(pos token.Pos, p *build.Instance) []*build.Instance { break } dir = parent + firstDir = false } } @@ -390,12 +395,17 @@ func importPathFromAbsDir(c *Config, absDir string, origPath string) (importPath return "", fmt.Errorf("cannot determine import path for %q (root undefined)", origPath) } - dir := filepath.Clean(absDir) - if !strings.HasPrefix(dir, c.ModuleRoot) { + subdir, ok := strings.CutPrefix(filepath.Clean(absDir), c.ModuleRoot) + if !ok { return "", fmt.Errorf("cannot determine import path for %q (dir outside of root)", origPath) } - pkg := filepath.ToSlash(dir[len(c.ModuleRoot):]) + pkg := filepath.ToSlash(subdir) + if pkg != "" && !strings.HasPrefix(pkg, "/") { + // [Config.ModuleRoot] was the root of the filesystem, + // and it had a trailing slash which got removed as a prefix; add it back. + pkg = "/" + pkg + } switch { case strings.HasPrefix(pkg, "/cue.mod/"): pkg = pkg[len("/cue.mod/"):] @@ -432,11 +442,11 @@ func (l *loader) newInstance(pos token.Pos, p importPath) *build.Instance { return i } mf, err1 := l.modFileCache.modFile(mv, modRoot) - if err != nil { + if err1 != nil { i.Err = errors.Append(i.Err, errors.Promote(err1, "")) } root, err1 := absPathForSourceLoc(modRoot) - if err != nil { + if err1 != nil { i.Err = errors.Append(i.Err, errors.Promote(err1, "")) } else { i.Root = root diff --git a/vendor/cuelang.org/go/cue/load/instances.go b/vendor/cuelang.org/go/cue/load/instances.go index 3f9499d178..84f5d0a082 100644 --- a/vendor/cuelang.org/go/cue/load/instances.go +++ b/vendor/cuelang.org/go/cue/load/instances.go @@ -46,6 +46,11 @@ func Instances(args []string, c *Config) []*build.Instance { if len(args) == 0 { args = []string{"."} } + // Note that Config is used early on to return error instances; ensure it's not nil. + if c == nil { + c = &Config{} + } + // TODO: This requires packages to be placed before files. At some point this // could be relaxed. i := 0 @@ -65,9 +70,6 @@ func Instances(args []string, c *Config) []*build.Instance { return []*build.Instance{c.newErrInstance(err)} } ctx := context.TODO() - if c == nil { - c = &Config{} - } newC, err := c.complete() if err != nil { return []*build.Instance{c.newErrInstance(err)} @@ -97,6 +99,20 @@ func Instances(args []string, c *Config) []*build.Instance { pkgArgs = pkgArgs1 } + // When outside a module, a major-only version like foo.com/bar@v2 + // cannot be resolved. Provide a helpful error suggesting alternatives. + if c.modFile == nil || c.modFile.Module == "" { + for _, p := range pkgArgs { + ip := ast.ParseImportPath(p) + if ip.Version != "" && semver.Major(ip.Version) == ip.Version { + return []*build.Instance{c.newErrInstance(fmt.Errorf( + "package %s: %[2]s is not a valid version to use as an argument; use a fully qualified version like %[2]s.0.0, %[2]s.latest, or @latest", + p, ip.Version, + ))} + } + } + } + tg := newTagger(c) var pkgs *modpkgload.Packages @@ -218,10 +234,7 @@ func loadAbsPackage( ip := ast.ParseImportPath(pkg) ip.Version = semver.Major(mv.Version()) - pkgs, err := loadPackages(ctx, cfg, mf, loc, []string{ip.String()}, tg) - if err != nil { - return "", nil, err - } + pkgs := loadPackages(ctx, cfg, mf, loc, []string{ip.String()}, tg) return ip.String(), pkgs, nil } @@ -253,7 +266,7 @@ func loadPackagesFromArgs( if err != nil { return nil, fmt.Errorf("cannot get syntax for %q: %w", f.Filename, err) } - for _, imp := range syntax.Imports { + for imp := range syntax.ImportSpecs() { pkgPath, err := strconv.Unquote(imp.Path.Value) if err != nil { // Should never happen. @@ -271,7 +284,7 @@ func loadPackagesFromArgs( }, slices.Sorted(maps.Keys(pkgPaths)), tg, - ) + ), nil } func loadPackages( @@ -281,7 +294,7 @@ func loadPackages( mainModLoc module.SourceLoc, pkgPaths []string, tg *tagger, -) (*modpkgload.Packages, error) { +) *modpkgload.Packages { mainModPath := mainMod.QualifiedModule() reqs := modrequirements.NewRequirements( mainModPath, @@ -325,7 +338,7 @@ func loadPackages( } return true }, - ), nil + ) } func isAbsVersionPackage(p string) bool { diff --git a/vendor/cuelang.org/go/cue/load/loader_common.go b/vendor/cuelang.org/go/cue/load/loader_common.go index 613b6c8e81..95cfbead63 100644 --- a/vendor/cuelang.org/go/cue/load/loader_common.go +++ b/vendor/cuelang.org/go/cue/load/loader_common.go @@ -153,7 +153,11 @@ func (fp *fileProcessor) finalize(p *build.Instance) errors.Error { } // add adds the given file to the appropriate package in fp. -func (fp *fileProcessor) add(root string, file *build.File, mode importMode) { +// It reports whether the file might be considered part of the +// package being loaded, even if it ends up not added to +// the build files, for example because of an @if constraint or +// it's a tool file. +func (fp *fileProcessor) add(root string, file *build.File, mode importMode) bool { fullPath := file.Filename if fullPath != "-" { if !filepath.IsAbs(fullPath) { @@ -183,7 +187,7 @@ func (fp *fileProcessor) add(root string, file *build.File, mode importMode) { } if err := setFileSource(fp.c, file); err != nil { badFile(errors.Promote(err, "")) - return + return false } if file.Encoding != build.CUE { @@ -191,7 +195,7 @@ func (fp *fileProcessor) add(root string, file *build.File, mode importMode) { if sameDir { p.OrphanedFiles = append(p.OrphanedFiles, file) } - return + return false } if (mode & allowExcludedFiles) == 0 { var badPrefix string @@ -202,7 +206,7 @@ func (fp *fileProcessor) add(root string, file *build.File, mode importMode) { } if badPrefix != "" { if !sameDir { - return + return false } file.ExcludeReason = errors.Newf(token.NoPos, "filename starts with a '%s'", badPrefix) if file.Interpretation == "" { @@ -210,7 +214,7 @@ func (fp *fileProcessor) add(root string, file *build.File, mode importMode) { } else { p.OrphanedFiles = append(p.OrphanedFiles, file) } - return + return false } } // Note: when path is "-" (stdin), it will already have @@ -219,7 +223,7 @@ func (fp *fileProcessor) add(root string, file *build.File, mode importMode) { pf, perr := fp.c.fileSystem.getCUESyntax(file, fp.c.parserConfig) if perr != nil { badFile(errors.Promote(perr, "add failed")) - return + return false } pkg := pf.PackageName() @@ -239,7 +243,7 @@ func (fp *fileProcessor) add(root string, file *build.File, mode importMode) { if q == nil && !sameDir { // It's a file in a parent directory that doesn't correspond // to a package in the original directory. - return + return false } if q == nil { q = fp.c.Context.NewInstance(p.Dir, nil) @@ -262,7 +266,7 @@ func (fp *fileProcessor) add(root string, file *build.File, mode importMode) { file.ExcludeReason = excludeError{errors.Newf(pos, "no package name")} p.IgnoredFiles = append(p.IgnoredFiles, file) } - return + return false } if !fp.c.AllCUEFiles { @@ -282,7 +286,7 @@ func (fp *fileProcessor) add(root string, file *build.File, mode importMode) { } file.ExcludeReason = err p.IgnoredFiles = append(p.IgnoredFiles, file) - return + return true } } @@ -295,7 +299,7 @@ func (fp *fileProcessor) add(root string, file *build.File, mode importMode) { file.ExcludeReason = excludeError{errors.Newf(pos, "package is %s, want %s", pkg, p.PkgName)} p.IgnoredFiles = append(p.IgnoredFiles, file) - return + return false } if !fp.allPackages { badFile(&MultiplePackageError{ @@ -303,7 +307,7 @@ func (fp *fileProcessor) add(root string, file *build.File, mode importMode) { Packages: []string{p.PkgName, pkg}, Files: []string{fp.firstFile, base}, }) - return + return false } } } @@ -311,7 +315,7 @@ func (fp *fileProcessor) add(root string, file *build.File, mode importMode) { isTest := strings.HasSuffix(base, "_test"+cueSuffix) isTool := strings.HasSuffix(base, "_tool"+cueSuffix) - for _, spec := range pf.Imports { + for spec := range pf.ImportSpecs() { quoted := spec.Path.Value path, err := strconv.Unquote(quoted) if err != nil { @@ -344,6 +348,7 @@ func (fp *fileProcessor) add(root string, file *build.File, mode importMode) { default: p.BuildFiles = append(p.BuildFiles, file) } + return true } // isLocalImport reports whether the import path is diff --git a/vendor/cuelang.org/go/cue/load/search.go b/vendor/cuelang.org/go/cue/load/search.go index da5cb24ba0..5782fab16d 100644 --- a/vendor/cuelang.org/go/cue/load/search.go +++ b/vendor/cuelang.org/go/cue/load/search.go @@ -144,8 +144,11 @@ func (l *loader) matchPackagesInFS(pattern, pkgName string) *match { // Could be smarter but this one optimization // is enough for now, since ... is usually at the // end of a path. - i := strings.Index(pattern, "...") - dir, _ := path.Split(pattern[:i]) + // + // TODO this logic entirely ignores the pattern that's + // after the "...". See cuelang.org/issue/3212 + before, _, _ := strings.Cut(pattern, "...") + dir, _ := path.Split(before) root := l.abs(dir) @@ -243,15 +246,13 @@ func (l *loader) importPathsQuiet(patterns []string) []*match { orig := a pkgName := l.cfg.Package - switch p := strings.IndexByte(a, ':'); { - case p < 0: - case p == 0: - pkgName = a[1:] - a = "." - default: - pkgName = a[p+1:] - a = a[:p] + ip := ast.ParseImportPath(a) + if ip.ExplicitQualifier { + pkgName = ip.Qualifier } + ip.Qualifier = "" + ip.ExplicitQualifier = false + a = ip.String() if pkgName == "*" { pkgName = "" } @@ -486,15 +487,23 @@ func appendExpandedUnqualifiedPackagePath(pkgPaths []resolvedPackageArg, origp s // Note: // * We know that pattern contains "..." // * We know that pattern is relative to the module root +// +// Note: this logic matches the logic in [loader.loadImportPathsQuiet]. +// TODO de-duplicate the logic so wildcards are expanded exactly once using a single piece of logic. func appendExpandedWildcardPackagePath(pkgPaths []resolvedPackageArg, pattern ast.ImportPath, pkgQual string, mainModRoot module.SourceLoc, mainModPath string, tg *tagger) ([]resolvedPackageArg, error) { modIpath := ast.ParseImportPath(mainModPath) // Find directory to begin the scan. // Could be smarter but this one optimization is enough for now, // since ... is usually at the end of a path. - // TODO: strip package qualifier. + // + // TODO this logic entirely ignores the pattern that's + // after the "...". See cuelang.org/issue/3212 i := strings.Index(pattern.Path, "...") dir, _ := path.Split(pattern.Path[:i]) dir = path.Join(mainModRoot.Dir, dir) + if pattern.ExplicitQualifier { + pkgQual = pattern.Qualifier + } var isSelected func(string) bool switch pkgQual { case "_": diff --git a/vendor/cuelang.org/go/cue/load/tags.go b/vendor/cuelang.org/go/cue/load/tags.go index 4d6d6d49e1..e44670b935 100644 --- a/vendor/cuelang.org/go/cue/load/tags.go +++ b/vendor/cuelang.org/go/cue/load/tags.go @@ -83,14 +83,12 @@ type TagVar struct { Description string } -const rfc3339 = "2006-01-02T15:04:05.999999999Z" - // DefaultTagVars creates a new map with a set of supported injection variables. func DefaultTagVars() map[string]TagVar { return map[string]TagVar{ "now": { Func: func() (ast.Expr, error) { - return ast.NewString(time.Now().UTC().Format(rfc3339)), nil + return ast.NewString(time.Now().UTC().Format(time.RFC3339Nano)), nil }, }, "os": { @@ -125,10 +123,7 @@ func DefaultTagVars() map[string]TagVar { "rand": { Func: func() (ast.Expr, error) { var b [16]byte - _, err := rand.Read(b[:]) - if err != nil { - return nil, err - } + rand.Read(b[:]) var hx [34]byte hx[0] = '0' hx[1] = 'x' @@ -143,8 +138,7 @@ func varToString(s string, err error) (ast.Expr, error) { if err != nil { return nil, err } - x := ast.NewString(s) - return x, nil + return ast.NewString(s), nil } // A tag binds an identifier to a field to allow passing command-line values. @@ -201,7 +195,7 @@ func parseTag(pos token.Pos, body string) (t *tag, err errors.Error) { } if s, ok, _ := a.Lookup(1, "short"); ok { - for _, s := range strings.Split(s, "|") { + for s := range strings.SplitSeq(s, "|") { if !ast.IsValidIdent(t.key) { return t, errors.Newf(pos, "invalid identifier %q", s) } @@ -218,8 +212,11 @@ func parseTag(pos token.Pos, body string) (t *tag, err errors.Error) { func (t *tag) inject(value string, tg *tagger) errors.Error { e, err := cli.ParseValue(token.NoPos, t.key, value, t.kind) + if err != nil { + return err + } t.injectValue(e, tg) - return err + return nil } func (t *tag) injectValue(x ast.Expr, tg *tagger) { @@ -262,8 +259,7 @@ func findTags(b *build.Instance) (tags []*tag, errs errors.Error) { case *ast.Field: // TODO: allow optional fields? _, _, err := ast.LabelName(x.Label) - _, ok := internal.ConstraintToken(x) - if err != nil || ok { + if err != nil || x.Constraint != token.ILLEGAL { findInvalidTags(n, "@tag not allowed within field constraint") return false } diff --git a/vendor/cuelang.org/go/cue/marshal.go b/vendor/cuelang.org/go/cue/marshal.go index 8b46cc1697..24ad3b6d00 100644 --- a/vendor/cuelang.org/go/cue/marshal.go +++ b/vendor/cuelang.org/go/cue/marshal.go @@ -19,10 +19,9 @@ import ( "compress/gzip" "encoding/gob" "path/filepath" - "strings" + "strconv" "cuelang.org/go/cue/ast" - "cuelang.org/go/cue/ast/astutil" "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/format" @@ -31,6 +30,13 @@ import ( "cuelang.org/go/internal/core/export" ) +// Unmarshaler is the interface implemented by types that can unmarshal themselves from a CUE value. +// While input can be assumed to be a valid Value, +// there is no guarantee there are no errors or that the value is complete. +type Unmarshaler interface { + UnmarshalCUE(v Value) error +} + // root. type instanceData struct { Root bool @@ -91,7 +97,6 @@ func compileInstances(r *Runtime, data []*instanceData) (instances []*Instance, } builds = append(builds, b.build(i)) } - return r.BuildInstances(builds) } @@ -135,6 +140,7 @@ func (r *Runtime) Marshal(values ...InstanceOrValue) (b []byte, err error) { var errs errors.Error var stageInstance func(i Value) (pos int) + allStaged := make(map[*build.Instance]bool) stageInstance = func(i Value) (pos int) { inst := i.BuildInstance() if p, ok := done[inst.ImportPath]; ok { @@ -142,16 +148,16 @@ func (r *Runtime) Marshal(values ...InstanceOrValue) (b []byte, err error) { } // TODO: support exporting instance file, _ := export.Def(r.runtime(), inst.ID(), i.instance().root) - imports := []string{} - file.VisitImports(func(i *ast.ImportDecl) { - for _, spec := range i.Specs { - info, _ := astutil.ParseImportSpec(spec) - imports = append(imports, info.ID) + imports := make(map[*build.Instance]bool) + for spec := range file.ImportSpecs() { + path, _ := strconv.Unquote(spec.Path.Value) + if impInst := inst.LookupImport(path); impInst != nil { + imports[impInst] = true } - }) + } if inst.PkgName != "" { - if pkg := internal.Package(file); pkg == nil { + if pkg, _ := internal.Package(file); pkg == nil { pkg := &ast.Package{Name: ast.NewIdent(inst.PkgName)} file.Decls = append([]ast.Decl{pkg}, file.Decls...) } else if pkg.Name.Name != inst.PkgName { @@ -188,21 +194,22 @@ func (r *Runtime) Marshal(values ...InstanceOrValue) (b []byte, err error) { p := len(staged) - 1 - for _, imp := range imports { - i := getImportFromPath(r.runtime(), imp) - if i == nil || !strings.Contains(imp, ".") { - continue // a builtin package. + for impInst := range imports { + if allStaged[impInst] { + continue + } + if v := r.runtime().LoadInstance(impInst); v != nil { + allStaged[impInst] = true + imp := getImportFromBuild(r.runtime(), impInst, v) + stageInstance(imp.Value()) } - stageInstance(i.Value()) } return p } - for _, val := range values { staged[stageInstance(val.Value())].Root = true } - buf := &bytes.Buffer{} buf.WriteByte(version) diff --git a/vendor/cuelang.org/go/cue/op.go b/vendor/cuelang.org/go/cue/op.go index 6f08e4e04a..6e0a46d662 100644 --- a/vendor/cuelang.org/go/cue/op.go +++ b/vendor/cuelang.org/go/cue/op.go @@ -58,4 +58,6 @@ const ( IntModuloOp Op = adt.IntModuloOp InterpolationOp Op = adt.InterpolationOp + + SpreadOp Op = adt.SpreadOp ) diff --git a/vendor/cuelang.org/go/cue/parser/interface.go b/vendor/cuelang.org/go/cue/parser/interface.go index ccf43a53a4..517e1ba996 100644 --- a/vendor/cuelang.org/go/cue/parser/interface.go +++ b/vendor/cuelang.org/go/cue/parser/interface.go @@ -155,31 +155,6 @@ func Version(v string) Option { }) } -// FromVersion specifies until which legacy version the parser should provide -// backwards compatibility. -// Deprecated: use [Version] instead. -func FromVersion(version int) Option { - return optionFunc(func(cfg *Config) {}) -} - -// DeprecationError is a sentinel error to indicate that an error is -// related to an unsupported old CUE syntax. -type DeprecationError struct { - Version int -} - -func (e *DeprecationError) Error() string { - return "try running `cue fix` (possibly with an earlier version, like v0.2.2) to upgrade" -} - -const ( - // Deprecated: see [Version]. - Latest = 0 - - // Deprecated: see [Version]. - FullBackwardCompatibility = 0 -) - // FileOffset specifies the File position info to use. // // Deprecated: this has no effect. diff --git a/vendor/cuelang.org/go/cue/parser/parser.go b/vendor/cuelang.org/go/cue/parser/parser.go index 2a488fd714..1934cb46b9 100644 --- a/vendor/cuelang.org/go/cue/parser/parser.go +++ b/vendor/cuelang.org/go/cue/parser/parser.go @@ -16,6 +16,7 @@ package parser import ( "fmt" + "slices" "strings" "unicode" @@ -44,8 +45,9 @@ type parser struct { indent int // indentation used for tracing output // Comments - leadComment *ast.CommentGroup - comments *commentState + leadComment *ast.CommentGroup + comments *commentState + commentStack []*commentState // to reuse [commentState] allocations // Next token, filled by [parser.next0]. pos token.Pos // token position @@ -72,8 +74,6 @@ type parser struct { exprLev int // < 0: in control clause, >= 0: in expression imports []*ast.ImportSpec // list of imports - - version int } func (p *parser) init(filename string, src []byte, opts []Option) { @@ -109,11 +109,26 @@ type commentState struct { lastPos int8 } +func (p *parser) allocCommentState() *commentState { + if n := len(p.commentStack); n > 0 { + c := p.commentStack[n-1] + p.commentStack = p.commentStack[:n-1] + return c + } + return &commentState{} +} + +func (p *parser) freeCommentState(c *commentState) { + // Ensure no pointers remain, which can hold onto memory. + // We only reuse the groups slice capacity. + *c = commentState{groups: c.groups[:0]} + p.commentStack = append(p.commentStack, c) +} + // openComments reserves the next doc comment for the caller and flushes func (p *parser) openComments() *commentState { - child := &commentState{ - parent: p.comments, - } + child := p.allocCommentState() + child.parent = p.comments if c := p.comments; c != nil && c.isList > 0 { if c.lastChild != nil { var groups []*ast.CommentGroup @@ -130,16 +145,14 @@ func (p *parser) openComments() *commentState { } } ast.SetComments(c.lastChild, groups) - c.groups = nil } else { - c.lastChild = nil // attach before next for _, cg := range c.groups { cg.Position = 0 } - child.groups = c.groups - c.groups = nil + child.groups = append(child.groups, c.groups...) } + c.groups = c.groups[:0] } if p.leadComment != nil { child.groups = append(child.groups, p.leadComment) @@ -156,10 +169,9 @@ func (p *parser) openList() { p.comments.isList++ return } - c := &commentState{ - parent: p.comments, - isList: 1, - } + c := p.allocCommentState() + c.parent = p.comments + c.isList = 1 p.comments = c } @@ -175,7 +187,7 @@ func (p *parser) closeList() { cg.Position = c.lastPos ast.AddComment(c.lastChild, cg) } - c.groups = nil + c.groups = c.groups[:0] } switch c.isList--; { case c.isList < 0: @@ -192,6 +204,7 @@ func (p *parser) closeList() { } parent.pos++ p.comments = parent + p.freeCommentState(c) } } @@ -218,7 +231,7 @@ func (c *commentState) closeNode(p *parser, n ast.Node) ast.Node { } } } - c.groups = nil + p.freeCommentState(c) return n } @@ -402,21 +415,6 @@ func (p *parser) next() { } } -// assertV0 indicates the last version at which a certain feature was -// supported. -func (p *parser) assertV0(pos token.Pos, minor, patch int, name string) { - v := internal.Version(minor, patch) - base := p.version - if base == 0 { - base = internal.APIVersionSupported - } - if base > v { - p.errors = errors.Append(p.errors, - errors.Wrapf(&DeprecationError{v}, pos, - "use of deprecated %s (deprecated as of v0.%d.%d)", name, minor, patch+1)) - } -} - func (p *parser) errf(pos token.Pos, msg string, args ...interface{}) { // ePos := p.file.Position(pos) ePos := pos @@ -462,6 +460,7 @@ func (p *parser) expect(tok token.Token) token.Pos { pos := p.pos if p.tok != tok { p.errorExpected(pos, "'"+tok.String()+"'") + pos = token.NoPos } p.next() // make progress return pos @@ -478,15 +477,14 @@ func (p *parser) expectClosing(tok token.Token, context string) token.Pos { } func (p *parser) expectComma() { - // semicolon is optional before a closing ')', ']', '}', or newline - if p.tok != token.RPAREN && p.tok != token.RBRACE && p.tok != token.EOF { - switch p.tok { - case token.COMMA: - p.next() - default: - p.errorExpected(p.pos, "','") - syncExpr(p) - } + switch p.tok { + case token.COMMA: + p.next() + // the comma is optional before a closing ')', ']', '}', or newline + case token.RPAREN, token.RBRACE, token.EOF: + default: + p.errorExpected(p.pos, "','") + syncExpr(p) } } @@ -494,10 +492,8 @@ func (p *parser) atComma(context string, follow ...token.Token) bool { if p.tok == token.COMMA { return true } - for _, t := range follow { - if p.tok == t { - return false - } + if slices.Contains(follow, p.tok) { + return false } // TODO: find a way to detect crossing lines now we don't have a semi. if p.lit == "\n" { @@ -525,7 +521,7 @@ func syncExpr(p *parser) { p.syncCnt++ return } - if p.syncPos.Before(p.pos) { + if p.syncPos.Compare(p.pos) < 0 { p.syncPos = p.pos p.syncCnt = 0 return @@ -566,19 +562,32 @@ func (p *parser) safePos(pos token.Pos) (res token.Pos) { func (p *parser) parseIdent() *ast.Ident { c := p.openComments() - pos := p.pos - name := "_" - if p.tok == token.IDENT { - name = p.lit - p.next() - } else { - p.expect(token.IDENT) // use expect() error handling + name := p.lit + pos := p.expect(token.IDENT) + if !pos.IsValid() { + name = "_" } ident := &ast.Ident{NamePos: pos, Name: name} c.closeNode(p, ident) return ident } +// checkDeclIdent validates that an identifier is not a reserved +// double-underscore identifier. Use this when an identifier is being declared. +func (p *parser) checkDeclIdent(ident *ast.Ident) { + if strings.HasPrefix(ident.Name, "__") { + p.errf(ident.NamePos, "identifiers starting with '__' are reserved") + } +} + +// parseIdentDecl parses an identifier and validates that it's not a reserved +// double-underscore identifier. Use this for identifier declarations. +func (p *parser) parseIdentDecl() *ast.Ident { + ident := p.parseIdent() + p.checkDeclIdent(ident) + return ident +} + func (p *parser) parseKeyIdent() *ast.Ident { c := p.openComments() pos := p.pos @@ -601,7 +610,17 @@ func (p *parser) parseOperand() (expr ast.Expr) { switch p.tok { case token.IDENT: - return p.parseIdent() + ident := p.parseIdent() + // Check for optional reference marker (?) + // Don't consume ? if it's followed by : (that's a field constraint, not optional reference) + if p.tok == token.OPTION { + // Peek ahead to see if this is a field constraint (foo?: value) + p.peek() + if p.peekToken.tok != token.COLON { + return p.wrapOptional(ident, p.tok) + } + } + return ident case token.LBRACE: return p.parseStruct() @@ -645,7 +664,8 @@ func (p *parser) parseOperand() (expr ast.Expr) { return &ast.ParenExpr{ Lparen: lparen, X: x, - Rparen: rparen} + Rparen: rparen, + } default: if p.tok.IsKeyword() { @@ -697,14 +717,26 @@ func (p *parser) parseIndexOrSlice(x ast.Expr) (expr ast.Expr) { Lbrack: lbrack, Low: index[0], High: index[1], - Rbrack: rbrack} + Rbrack: rbrack, + } } - return &ast.IndexExpr{ + result := &ast.IndexExpr{ X: x, Lbrack: lbrack, Index: index[0], - Rbrack: rbrack} + Rbrack: rbrack, + } + return p.wrapOptional(result, p.tok) +} + +func (p *parser) wrapOptional(x ast.Expr, tok token.Token) ast.Expr { + if tok == token.OPTION { + pos := p.pos + p.next() + return &ast.PostfixExpr{X: x, Op: token.OPTION, OpPos: pos} + } + return x } func (p *parser) parseCallOrConversion(fun ast.Expr) (expr *ast.CallExpr) { @@ -735,7 +767,8 @@ func (p *parser) parseCallOrConversion(fun ast.Expr) (expr *ast.CallExpr) { Fun: fun, Lparen: lparen, Args: list, - Rparen: rparen} + Rparen: rparen, + } } // TODO: inline this function in parseFieldList once we no longer user comment @@ -795,7 +828,7 @@ func (p *parser) parseLetDecl() (decl ast.Decl, ident *ast.Ident) { } defer func() { c.closeNode(p, decl) }() - ident = p.parseIdent() + ident = p.parseIdentDecl() assign := p.expect(token.BIND) expr := p.parseRHS() @@ -819,7 +852,7 @@ func (p *parser) parseComprehension() (decl ast.Decl, ident *ast.Ident) { tok := p.tok pos := p.pos - clauses, fc := p.parseComprehensionClauses(true) + clauses, fc := p.parseComprehensionClauses() if fc != nil { ident = &ast.Ident{ NamePos: pos, @@ -833,13 +866,19 @@ func (p *parser) parseComprehension() (decl ast.Decl, ident *ast.Ident) { expr := p.parseStruct() sc.closeExpr(p, expr) + var fallbackClause *ast.FallbackClause + if p.tok == token.ELSE || p.tok == token.FALLBACK { + fallbackClause = p.parseFallbackClause(clauses) + } + if p.atComma("struct literal", token.RBRACE) { // TODO: may be EOF p.next() } return &ast.Comprehension{ - Clauses: clauses, - Value: expr, + Clauses: clauses, + Value: expr, + Fallback: fallbackClause, }, nil } @@ -852,24 +891,19 @@ func (p *parser) parseField() (decl ast.Decl) { defer func() { c.closeNode(p, decl) }() pos := p.pos - - this := &ast.Field{Label: nil} - m := this - tok := p.tok label, expr, decl, ok := p.parseLabel(false) if decl != nil { return decl } - m.Label = label if !ok { if expr == nil { expr = p.parseRHS() } if a, ok := expr.(*ast.Alias); ok { - p.assertV0(a.Pos(), 1, 3, `old-style alias; use "let X = expr" instead`) + p.errf(a.Pos(), `pre-v0.2 alias; use "let X = expr" instead`) p.consumeDeclComma() return a } @@ -878,9 +912,15 @@ func (p *parser) parseField() (decl ast.Decl) { return e } + this := &ast.Field{} + m := this + m.Label = label + + // Parse postfix alias if present + m.Alias = p.parsePostfixAlias() + switch p.tok { case token.OPTION, token.NOT: - m.Optional = p.pos m.Constraint = p.tok p.next() } @@ -893,20 +933,25 @@ func (p *parser) parseField() (decl ast.Decl) { switch p.tok { case token.COLON: + // Now we know it's being used as a label, validate double-underscore + if ident, ok := label.(*ast.Ident); ok { + p.checkDeclIdent(ident) + } case token.COMMA: p.expectComma() // sync parser. fallthrough case token.RBRACE, token.EOF: if a, ok := expr.(*ast.Alias); ok { - p.assertV0(a.Pos(), 1, 3, `old-style alias; use "let X = expr" instead`) + p.errf(a.Pos(), `pre-v0.2 alias; use "let X = expr" instead`) return a } switch tok { case token.IDENT, token.LBRACK, token.LPAREN, token.STRING, token.INTERPOLATION, token.NULL, token.TRUE, token.FALSE, - token.FOR, token.IF, token.LET, token.IN: + token.FOR, token.IF, token.LET, token.IN, + token.TRY, token.ELSE, token.FALLBACK: return &ast.EmbedDecl{Expr: expr} } fallthrough @@ -917,11 +962,7 @@ func (p *parser) parseField() (decl ast.Decl) { } m.TokenPos = p.pos - m.Token = p.tok - if p.tok != token.COLON { - p.errorExpected(pos, "':'") - } - p.next() // : + p.expect(token.COLON) for { if l, ok := m.Label.(*ast.ListLit); ok && len(l.Elts) != 1 { @@ -929,7 +970,7 @@ func (p *parser) parseField() (decl ast.Decl) { } label, expr, _, ok := p.parseLabel(true) - if !ok || (p.tok != token.COLON && p.tok != token.OPTION && p.tok != token.NOT) { + if !ok || (p.tok != token.COLON && p.tok != token.OPTION && p.tok != token.NOT && p.tok != token.TILDE) { if expr == nil { expr = p.parseRHS() } @@ -940,24 +981,17 @@ func (p *parser) parseField() (decl ast.Decl) { m.Value = &ast.StructLit{Elts: []ast.Decl{field}} m = field + // Parse postfix alias if present + m.Alias = p.parsePostfixAlias() + switch p.tok { case token.OPTION, token.NOT: - m.Optional = p.pos m.Constraint = p.tok p.next() } m.TokenPos = p.pos - m.Token = p.tok - if p.tok != token.COLON { - if p.tok.IsLiteral() { - p.errf(p.pos, "expected ':'; found %s", p.lit) - } else { - p.errf(p.pos, "expected ':'; found %s", p.tok) - } - break - } - p.next() + p.expect(token.COLON) } if attrs := p.parseAttributes(); attrs != nil { @@ -997,7 +1031,7 @@ func (p *parser) parseLabel(rhs bool) (label ast.Label, expr ast.Expr, decl ast. tok := p.tok switch tok { - case token.FOR, token.IF: + case token.FOR, token.IF, token.TRY: if rhs { expr = p.parseExpr() break @@ -1008,6 +1042,10 @@ func (p *parser) parseLabel(rhs bool) (label ast.Label, expr ast.Expr, decl ast. } expr = ident + case token.ELSE, token.FALLBACK: + // These keywords can be used as field labels + expr = p.parseExpr() + case token.LET: let, ident := p.parseLetDecl() if let != nil { @@ -1042,10 +1080,6 @@ func (p *parser) parseLabel(rhs bool) (label ast.Label, expr ast.Expr, decl ast. } case *ast.Ident: - if strings.HasPrefix(x.Name, "__") && !rhs { - p.errf(x.NamePos, "identifiers starting with '__' are reserved") - } - expr = p.parseAlias(x) if a, ok := expr.(*ast.Alias); ok { if _, ok = a.Expr.(ast.Label); !ok { @@ -1103,14 +1137,13 @@ func (p *parser) parseStructBody() []ast.Decl { return elts } -// parseComprehensionClauses parses either new-style (first==true) -// or old-style (first==false). +// parseComprehensionClauses parses comprehension clauses. // Should we now disallow keywords as identifiers? If not, we need to // return a list of discovered labels as the alternative. -func (p *parser) parseComprehensionClauses(first bool) (clauses []ast.Clause, c *commentState) { +func (p *parser) parseComprehensionClauses() (clauses []ast.Clause, c *commentState) { // TODO: reuse Template spec, which is possible if it doesn't check the // first is an identifier. - + first := true for { switch p.tok { case token.FOR: @@ -1126,11 +1159,11 @@ func (p *parser) parseComprehensionClauses(first bool) (clauses []ast.Clause, c var key, value *ast.Ident var colon token.Pos - value = p.parseIdent() + value = p.parseIdentDecl() if p.tok == token.COMMA { colon = p.expect(token.COMMA) key = value - value = p.parseIdent() + value = p.parseIdentDecl() } c.pos = 4 // params := p.parseParams(nil, ARROW) @@ -1168,7 +1201,7 @@ func (p *parser) parseComprehensionClauses(first bool) (clauses []ast.Clause, c c := p.openComments() letPos := p.expect(token.LET) - ident := p.parseIdent() + ident := p.parseIdentDecl() assign := p.expect(token.BIND) expr := p.parseRHS() @@ -1179,17 +1212,82 @@ func (p *parser) parseComprehensionClauses(first bool) (clauses []ast.Clause, c Expr: expr, })) + case token.TRY: + c := p.openComments() + + tc := &ast.TryClause{Try: p.expect(token.TRY)} + + // Check for assignment form: try x = expr + if p.tok == token.IDENT { + tc.Ident = p.parseIdent() + tc.Equal = p.expect(token.BIND) + tc.Expr = p.parseRHS() + } + + clauses = append(clauses, c.closeClause(p, tc)) + default: return clauses, nil } if p.tok == token.COMMA { p.next() } - first = false } } +// parseFallbackClause parses an else or fallback clause in a comprehension. +// It determines the appropriate keyword based on the clause composition: +// - Single if or try clause: expects ELSE, errors on FALLBACK +// - Everything else: expects FALLBACK, errors on ELSE +func (p *parser) parseFallbackClause(clauses []ast.Clause) *ast.FallbackClause { + if p.trace { + defer un(trace(p, "FallbackClause")) + } + c := p.openComments() + + if p.experiments == nil || !p.experiments.Try { + p.errf(p.pos, "%s requires @experiment(try)", p.tok) + } + + // Determine if this is a single if or try clause + isSingleGuard := len(clauses) == 1 + if isSingleGuard { + switch clauses[0].(type) { + case *ast.IfClause, *ast.TryClause: + // Single if or try: use else + default: + isSingleGuard = false + } + } + + var pos token.Pos + if isSingleGuard { + // Single if/try clause: must use else + if p.tok == token.FALLBACK { + p.errf(p.pos, "use 'else' with single 'if' or 'try' clause") + pos = p.pos + p.next() + } else { + pos = p.expect(token.ELSE) + } + } else { + // Everything else: must use fallback + if p.tok == token.ELSE { + p.errf(p.pos, "use 'fallback' for comprehensions with multiple clauses or 'for' clauses") + pos = p.pos + p.next() + } else { + pos = p.expect(token.FALLBACK) + } + } + body := p.parseStruct() + return c.closeClause(p, &ast.FallbackClause{ + Fallback: pos, + Body: body.(*ast.StructLit), + }).(*ast.FallbackClause) +} + func (p *parser) parseFunc() (expr ast.Expr) { if p.trace { defer un(trace(p, "Func")) @@ -1232,9 +1330,7 @@ func (p *parser) parseFuncArgs() (list []ast.Expr) { for p.tok != token.RPAREN && p.tok != token.EOF { list = append(list, p.parseFuncArg()) - if p.tok != token.RPAREN { - p.expectComma() - } + p.expectComma() // skip over a trailing comma or newline } return list @@ -1274,7 +1370,8 @@ func (p *parser) parseList() (expr ast.Expr) { return &ast.ListLit{ Lbrack: lbrack, Elts: elts, - Rbrack: rbrack} + Rbrack: rbrack, + } } func (p *parser) parseListElements() (list []ast.Expr) { @@ -1306,19 +1403,25 @@ func (p *parser) parseListElement() (expr ast.Expr, ok bool) { case token.FOR, token.IF: tok := p.tok pos := p.pos - clauses, fc := p.parseComprehensionClauses(true) + clauses, fc := p.parseComprehensionClauses() if clauses != nil { sc := p.openComments() expr := p.parseStruct() sc.closeExpr(p, expr) + var fallbackClause *ast.FallbackClause + if p.tok == token.ELSE || p.tok == token.FALLBACK { + fallbackClause = p.parseFallbackClause(clauses) + } + if p.atComma("list literal", token.RBRACK) { // TODO: may be EOF p.next() } return &ast.Comprehension{ - Clauses: clauses, - Value: expr, + Clauses: clauses, + Value: expr, + Fallback: fallbackClause, }, true } @@ -1360,6 +1463,15 @@ func (p *parser) parseAlias(lhs ast.Expr) (expr ast.Expr) { return lhs } pos := p.pos + + // Check if old-style aliases are disallowed + if p.experiments != nil && p.experiments.AliasV2 { + p.errf(pos, "old-style alias syntax (=) is not allowed with @experiment(aliasv2); use postfix syntax (~X or ~(K,V))") + p.next() + expr = p.parseRHS() + return expr + } + p.next() expr = p.parseRHS() if expr == nil { @@ -1367,12 +1479,88 @@ func (p *parser) parseAlias(lhs ast.Expr) (expr ast.Expr) { } switch x := lhs.(type) { case *ast.Ident: + p.checkDeclIdent(x) return &ast.Alias{Ident: x, Equal: pos, Expr: expr} } - p.errf(p.pos, "expected identifier for alias") + p.errorExpected(p.pos, "identifier for alias") return expr } +// parsePostfixAlias parses the postfix alias syntax: ~X or ~(K,V) +// Returns nil if no alias is present. +func (p *parser) parsePostfixAlias() *ast.PostfixAlias { + if p.tok != token.TILDE { + return nil + } + + pos := p.pos + + // Check if postfix alias syntax requires experiment + if p.experiments == nil || !p.experiments.AliasV2 { + p.errf(pos, "postfix alias syntax requires @experiment(aliasv2)") + } + + p.next() + + switch p.tok { + case token.LPAREN: + // Dual form: ~(K,V) + lparen := p.pos + p.next() + + if p.tok != token.IDENT { + p.errorExpected(p.pos, "identifier for label alias") + return nil + } + k := p.parseIdent() + + comma := p.expect(token.COMMA) + if !comma.IsValid() { + // Recovery: treat as simple form with just K + return &ast.PostfixAlias{ + Tilde: pos, + Field: k, + } + } + + if p.tok != token.IDENT { + p.errorExpected(p.pos, "identifier for field alias") + // Recovery: return what we have + return &ast.PostfixAlias{ + Tilde: pos, + Lparen: lparen, + Label: k, + Comma: comma, + Field: k, // Use K as field too for recovery + } + } + v := p.parseIdent() + + rparen := p.expect(token.RPAREN) + + return &ast.PostfixAlias{ + Tilde: pos, + Lparen: lparen, + Label: k, + Comma: comma, + Field: v, + Rparen: rparen, + } + + case token.IDENT: + // Simple form: ~X + ident := p.parseIdent() + return &ast.PostfixAlias{ + Tilde: pos, + Field: ident, + } + + default: + p.errorExpected(p.pos, "identifier or '('") + return nil + } +} + // checkExpr checks that x is an expression (and not a type). func (p *parser) checkExpr(x ast.Expr) ast.Expr { switch unparen(x).(type) { @@ -1392,6 +1580,7 @@ func (p *parser) checkExpr(x ast.Expr) ast.Expr { case *ast.CallExpr: case *ast.UnaryExpr: case *ast.BinaryExpr: + case *ast.PostfixExpr: default: // all other nodes are not proper expressions p.errorExpected(x.Pos(), "expression") @@ -1425,15 +1614,18 @@ L: for { switch p.tok { case token.PERIOD: + period := p.pos c := p.openComments() c.pos = 1 p.next() switch p.tok { case token.IDENT: x = &ast.SelectorExpr{ - X: p.checkExpr(x), - Sel: p.parseIdent(), + X: p.checkExpr(x), + Period: period, + Sel: p.parseIdent(), } + x = p.wrapOptional(x, p.tok) case token.STRING: if strings.HasPrefix(p.lit, `"`) && !strings.HasPrefix(p.lit, `""`) { str := &ast.BasicLit{ @@ -1443,31 +1635,58 @@ L: } p.next() x = &ast.SelectorExpr{ - X: p.checkExpr(x), - Sel: str, + X: p.checkExpr(x), + Period: period, + Sel: str, } + x = p.wrapOptional(x, p.tok) break } fallthrough default: if p.tok.IsKeyword() { x = &ast.SelectorExpr{ - X: p.checkExpr(x), - Sel: p.parseKeyIdent(), + X: p.checkExpr(x), + Period: period, + Sel: p.parseKeyIdent(), } + x = p.wrapOptional(x, p.tok) break } pos := p.pos p.errorExpected(pos, "selector") p.next() // make progress - x = &ast.SelectorExpr{X: x, Sel: &ast.Ident{NamePos: pos, Name: "_"}} + x = &ast.SelectorExpr{ + X: x, + Period: period, + Sel: &ast.Ident{NamePos: pos, Name: "_"}, + } } c.closeNode(p, x) case token.LBRACK: x = p.parseIndexOrSlice(p.checkExpr(x)) case token.LPAREN: x = p.parseCallOrConversion(p.checkExpr(x)) + case token.ELLIPSIS: + if p.experiments.ExplicitOpen { + pos := p.pos + c := p.openComments() + p.next() + x = c.closeExpr(p, &ast.PostfixExpr{ + X: p.checkExpr(x), + Op: token.ELLIPSIS, + OpPos: pos, + }) + } else { + // Consume the token and give a clear error + pos := p.pos + p.next() + err := errors.Newf(pos, "postfix ... operator requires @experiment(explicitopen)") + p.errors = errors.Append(p.errors, err) + // Return a BadExpr to continue parsing + x = &ast.BadExpr{From: pos, To: p.pos} + } default: break L } @@ -1548,7 +1767,8 @@ func (p *parser) parseBinaryExprTail(prec1 int, x ast.Expr) ast.Expr { OpPos: pos, Op: op, // Treat nested expressions as RHS. - Y: p.checkExpr(p.parseBinaryExpr(prec + 1))}) + Y: p.checkExpr(p.parseBinaryExpr(prec + 1)), + }) } } @@ -1567,7 +1787,9 @@ func (p *parser) parseInterpolation() (expr ast.Expr) { last := &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: lit} exprs := []ast.Expr{last} - for p.tok == token.LPAREN { + // Note: we can only tell if the string returned by ResumeInterpolation + // starts a new interpolated expression by whether it ends in a parenthesis. + for strings.HasSuffix(last.Value, "(") { c.pos = 1 p.expect(token.LPAREN) cc.closeExpr(p, last) @@ -1576,7 +1798,7 @@ func (p *parser) parseInterpolation() (expr ast.Expr) { cc = p.openComments() if p.tok != token.RPAREN { - p.errf(p.pos, "expected ')' for string interpolation") + p.errorExpected(p.pos, "')' for string interpolation") } lit = p.scanner.ResumeInterpolation() pos = p.pos @@ -1635,27 +1857,18 @@ func (p *parser) parseImportSpec(_ int) *ast.ImportSpec { var ident *ast.Ident if p.tok == token.IDENT { - ident = p.parseIdent() - if isDefinition(ident) { + ident = p.parseIdentDecl() + if internal.IsDef(ident.Name) { p.errf(p.pos, "cannot import package as definition identifier") } } - pos := p.pos - var path string - if p.tok == token.STRING { - path = p.lit - if !isValidImport(path) { - p.errf(pos, "invalid import path: %s", path) - } - p.next() - p.expectComma() // call before accessing p.linecomment - } else { - p.expect(token.STRING) // use expect() error handling - if p.tok == token.COMMA { - p.expectComma() // call before accessing p.linecomment - } + path := p.lit + pos := p.expect(token.STRING) + if pos.IsValid() && !isValidImport(path) { + p.errf(pos, "invalid import path: %s", path) } + p.expectComma() // skip over a comma or newline // collect imports spec := &ast.ImportSpec{ Name: ident, @@ -1685,7 +1898,7 @@ func (p *parser) parseImports() *ast.ImportDecl { } p.closeList() rparen = p.expect(token.RPAREN) - p.expectComma() + p.expectComma() // skip over a comma or newline } else { list = append(list, p.parseImportSpec(0)) } @@ -1729,10 +1942,11 @@ func (p *parser) parseFile() *ast.File { if err != nil { e := errors.Wrapf(err, p.pos, "parsing experiments for version %q", v) p.errors = errors.Append(p.errors, e) - } else { - p.experiments = exp - p.file.SetExperiments(exp) + // Do not proceed without setting p.experiments. + return nil } + p.experiments = exp + p.file.SetExperiments(exp) // The package clause is not a declaration: it does not appear in any // scope. @@ -1746,7 +1960,7 @@ func (p *parser) parseFile() *ast.File { if name.Name == "_" && p.cfg.Mode&DeclarationErrors != 0 { p.errf(p.pos, "invalid package name _") } - if isDefinition(name) { + if internal.IsDef(name.Name) { p.errf(p.pos, "invalid package name %s", name.Name) } pkg := &ast.Package{ @@ -1754,7 +1968,7 @@ func (p *parser) parseFile() *ast.File { Name: name, } decls = append(decls, pkg) - p.expectComma() + p.expectComma() // skip over a comma or newline c.closeNode(p, pkg) } @@ -1786,8 +2000,3 @@ func (p *parser) parseFile() *ast.File { c.closeNode(p, f) return f } - -func isDefinition(ident *ast.Ident) bool { - return strings.HasPrefix(ident.Name, "#") || - strings.HasPrefix(ident.Name, "_#") -} diff --git a/vendor/cuelang.org/go/cue/path.go b/vendor/cuelang.org/go/cue/path.go index 26d174221b..60a4e758f9 100644 --- a/vendor/cuelang.org/go/cue/path.go +++ b/vendor/cuelang.org/go/cue/path.go @@ -25,8 +25,10 @@ import ( "cuelang.org/go/cue/literal" "cuelang.org/go/cue/parser" "cuelang.org/go/cue/token" + "cuelang.org/go/internal" "cuelang.org/go/internal/astinternal" "cuelang.org/go/internal/core/adt" + "cuelang.org/go/internal/core/runtime" "github.com/cockroachdb/apd/v3" ) @@ -138,6 +140,28 @@ func (sel Selector) String() string { return sel.sel.String() } +// ErrNotAPattern is a sentinel error value indicating that a value is not a +// pattern, which may be returned by [Selector.Pattern]. +var ErrNotAPattern = newErrValue( + Value{idx: runtime.New()}, + &adt.Bottom{ + Err: errors.Newf(token.NoPos, "selector is not a pattern"), + Code: adt.EvalError, + }, +) + +// Pattern returns the label pattern for a pattern constraint selector +// returned by an iterator with the [Patterns] option enabled. +// +// For other selectors, it returns [ErrNotAPattern]. +func (sel Selector) Pattern() Value { + switch sel := sel.sel.(type) { + case patternSelector: + return sel.pattern + } + return ErrNotAPattern +} + // Unquoted returns the unquoted value of a string label. // It panics unless [Selector.LabelType] is [StringLabel] and has a concrete name. func (sel Selector) Unquoted() string { @@ -275,6 +299,11 @@ func pathToStrings(p Path) (a []string) { return a } +// Append adds sel as a path component to p. +func (p Path) Append(sel ...Selector) Path { + return Path{path: append(p.path, sel...)} +} + // ParsePath parses a CUE expression into a Path. Any error resulting from // this conversion can be obtained by calling Err on the result. // @@ -463,10 +492,6 @@ func (p Path) Err() error { return errs } -func isHiddenOrDefinition(s string) bool { - return strings.HasPrefix(s, "#") || strings.HasPrefix(s, "_") -} - // Hid returns a selector for a hidden field. It panics if pkg is empty. // Hidden fields are scoped by package, and pkg indicates for which package // the hidden field must apply. For anonymous packages, it must be set to "_". @@ -505,11 +530,11 @@ func (s scopedSelector) feature(r adt.Runtime) adt.Feature { return adt.MakeIdentLabel(r, s.name, s.pkg) } -// A Def marks a string as a definition label. An # will be added if a string is +// Def marks a string as a definition label. An # will be added if a string is // not prefixed with a #. It will panic if s cannot be written as a valid // identifier. func Def(s string) Selector { - if !strings.HasPrefix(s, "#") && !strings.HasPrefix(s, "_#") { + if !internal.IsDef(s) { s = "#" + s } if !ast.IsValidIdent(s) { @@ -531,13 +556,13 @@ func (d definitionSelector) labelType() SelectorType { return DefinitionLabel } -func (s definitionSelector) constraintType() SelectorType { return 0 } +func (d definitionSelector) constraintType() SelectorType { return 0 } func (d definitionSelector) feature(r adt.Runtime) adt.Feature { return adt.MakeIdentLabel(r, string(d), "") } -// A Str is a CUE string label. Definition selectors are defined with Def. +// Str creates a CUE string label. Definition selectors are defined with [Def]. func Str(s string) Selector { return Selector{stringSelector(s)} } @@ -546,7 +571,7 @@ type stringSelector string func (s stringSelector) String() string { str := string(s) - if isHiddenOrDefinition(str) || !ast.IsValidIdent(str) { + if ast.StringLabelNeedsQuoting(str) { return literal.Label.Quote(str) } return str @@ -560,7 +585,7 @@ func (s stringSelector) feature(r adt.Runtime) adt.Feature { return adt.MakeStringLabel(r, string(s)) } -// An Index selects a list element by index. +// Index selects a list element by index. // It returns an invalid selector if the index is out of range. func Index[T interface{ int | int64 }](x T) Selector { f, err := adt.MakeLabel(nil, int64(x), adt.IntLabel) @@ -603,6 +628,20 @@ func (s anySelector) feature(r adt.Runtime) adt.Feature { return adt.Feature(s) } +type patternSelector struct { + pattern Value + _labelType SelectorType +} + +func (s patternSelector) String() string { return fmt.Sprintf("[%#v]", s.pattern) } +func (s patternSelector) isConstraint() bool { return true } +func (s patternSelector) labelType() SelectorType { return s._labelType } +func (s patternSelector) constraintType() SelectorType { return PatternConstraint } +func (s patternSelector) feature(r adt.Runtime) adt.Feature { + // Only called for non-pattern selectors. + panic("unreachable") +} + // TODO: allow import paths to be represented? // // // ImportPath defines a lookup at the root of an instance. It must be the first @@ -611,6 +650,7 @@ func (s anySelector) feature(r adt.Runtime) adt.Feature { // func ImportPath(s string) Selector { // return importSelector(s) // } + type constraintSelector struct { selector constraint SelectorType diff --git a/vendor/cuelang.org/go/cue/query.go b/vendor/cuelang.org/go/cue/query.go index f410cd0a88..698c72f1d2 100644 --- a/vendor/cuelang.org/go/cue/query.go +++ b/vendor/cuelang.org/go/cue/query.go @@ -15,6 +15,8 @@ package cue import ( + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" "cuelang.org/go/internal/core/adt" ) @@ -49,6 +51,15 @@ func (v Value) LookupPath(p Path) Value { outer: for _, sel := range p.path { + if _, ok := sel.sel.(patternSelector); ok { + // It's not possible to look up pattern constraints. + // TODO: could potentially relax that restriction. + err := errors.Newf( + token.NoPos, + "cannot look up pattern constraints other than AnyString or AnyIndex", + ) + return newErrValue(makeValue(v.idx, n, parent), &adt.Bottom{Err: err}) + } f := sel.sel.feature(v.idx) deref := n.DerefValue() for _, a := range deref.Arcs { @@ -56,6 +67,7 @@ outer: if a.IsConstraint() && !sel.sel.isConstraint() { break } + a.Finalize(ctx) parent = linkParent(parent, n, a) n = a continue outer diff --git a/vendor/cuelang.org/go/cue/scanner/scanner.go b/vendor/cuelang.org/go/cue/scanner/scanner.go index ad94b4c4ee..c76c71ddda 100644 --- a/vendor/cuelang.org/go/cue/scanner/scanner.go +++ b/vendor/cuelang.org/go/cue/scanner/scanner.go @@ -605,6 +605,11 @@ func (s *Scanner) popInterpolation() quoteInfo { } // ResumeInterpolation resumes scanning of a string interpolation. +// It should be called when the final parenthesis (RPAREN) of an expression +// inside a string interpolation has been consumed, and returns +// the next literal segment of the interpolation string, including +// the closing parenthesis, and possible the opening parenthesis +// of the next interpolation expression if there is one. func (s *Scanner) ResumeInterpolation() string { quote := s.popInterpolation() _, str := s.scanString(s.offset-1, quote) @@ -620,9 +625,10 @@ func (s *Scanner) Offset() int { // and its literal string if applicable. The source end is indicated by // EOF. // -// If the returned token is a literal (IDENT, INT, FLOAT, -// IMAG, CHAR, STRING) or COMMENT, the literal string -// has the corresponding value. +// If the returned token is a literal (IDENT, INT, FLOAT, IMAG, CHAR, +// STRING, INTERPOLATION) or COMMENT or ATTRIBUTE, the literal +// string has the corresponding value, but see below for more +// information on INTERPOLATION. // // If the returned token is a keyword, the literal string is the keyword. // @@ -646,6 +652,44 @@ func (s *Scanner) Offset() int { // Scan adds line information to the file added to the file // set with Init. Token positions are relative to that file // and thus relative to the file set. +// +// # String Interpolations +// +// The INTERPOLATION token is treated somewhat specially, as the scanner +// does not itself determine the extent of the interpolation +// expressions. +// +// The scanner relies on the caller to read tokens after the literal +// string segments of the interpolation; when the caller has scanned the +// final parenthesis, it must call [Scanner.ResumeInterpolation], which +// returns the next segment of the interpolation, which may or may not +// itself be an interpolation (it's an interpolation iff the final +// character is an open-parenthesis). +// +// Note that the string literal associated with the INTERPOLATION token +// and the string returned by ResumeInterpolation contain the bounding +// parenthesis characters, even though they are also returned from the +// scanner as separate tokens. +// +// For example, scanning the following CUE +// +// #"a\#(foo("c \(d)"))"# +// +// would produce the following tokens and literal values: +// +// INTERPOLATION `#"a\#(` +// LPAREN +// IDENT `b` +// LPAREN +// IDENT `foo` +// LPAREN +// INTERPOLATION `"c \(` +// IDENT `d` +// RPAREN +// ResumeInterpolation -> `)"` +// RPAREN +// RPAREN +// ResumeInterpolation -> `)"#` func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { scanAgain: s.skipWhitespace(1) @@ -796,6 +840,8 @@ scanAgain: case '?': tok = token.OPTION insertEOL = true + case '~': + tok = token.TILDE case '.': if '0' <= s.ch && s.ch <= '9' { insertEOL = true diff --git a/vendor/cuelang.org/go/cue/stats/stats.go b/vendor/cuelang.org/go/cue/stats/stats.go index 220198c8b3..51f8f55d05 100644 --- a/vendor/cuelang.org/go/cue/stats/stats.go +++ b/vendor/cuelang.org/go/cue/stats/stats.go @@ -102,6 +102,11 @@ type Counts struct { // dependency analysis, in which case it is benign. SkippedNotification int64 + // Dependency resolution counters + + // ResolveDep counts the number calls to markResolver in dep.go. + ResolveDep int64 + // Buffer counters // // Each unification and disjunct operation is associated with an object @@ -156,6 +161,7 @@ func (c *Counts) Add(other Counts) { if other.MaxRedirect > c.MaxRedirect { c.MaxRedirect = other.MaxRedirect } + c.ResolveDep += other.ResolveDep c.Freed += other.Freed c.Retained += other.Retained @@ -174,6 +180,7 @@ func (c Counts) Since(start Counts) Counts { c.SkippedNotification -= start.SkippedNotification c.NumCloseIDs -= start.NumCloseIDs c.ConjunctInfos -= start.ConjunctInfos + c.ResolveDep -= start.ResolveDep // For max values, we don't subtract since they represent peaks // c.MaxConjunctInfos and c.MaxReqSets and c.MaxRedirect remain as-is @@ -207,7 +214,8 @@ Retain: {{.Retained}} Unifications: {{.Unifications}} Conjuncts: {{.Conjuncts}} Disjuncts: {{.Disjuncts}}{{if .Notifications}} -Notifications: {{.Notifications}}{{end}}{{if or .GenerationMismatch .MisalignedConjunct .MisalignedConstraint .SkippedNotification}} +Notifications: {{.Notifications}}{{end}}{{if .ResolveDep}} +ResolveDep: {{.ResolveDep}}{{end}}{{if or .GenerationMismatch .MisalignedConjunct .MisalignedConstraint .SkippedNotification}} {{if .GenerationMismatch}} GenerationMismatch: {{.GenerationMismatch}}{{end}}{{if .MisalignedConjunct}} MisalignedConjunct: {{.MisalignedConjunct}}{{end}}{{if .MisalignedConstraint}} diff --git a/vendor/cuelang.org/go/cue/token/position.go b/vendor/cuelang.org/go/cue/token/position.go index 5c1228072e..305788a7ba 100644 --- a/vendor/cuelang.org/go/cue/token/position.go +++ b/vendor/cuelang.org/go/cue/token/position.go @@ -17,17 +17,21 @@ package token import ( "cmp" "fmt" + "path/filepath" "sort" "sync" + "cuelang.org/go/internal/core/layer" "cuelang.org/go/internal/cueexperiment" ) // ----------------------------------------------------------------------------- // Positions -// Position describes an arbitrary source position -// including the file, line, and column location. +// Position describes an arbitrary and absolute (printable) source +// position within a file, including offset, line, and column +// location, which can be rendered in a human-friendly text form. +// // A Position is valid if the line number is > 0. type Position struct { Filename string // filename, if any @@ -40,7 +44,7 @@ type Position struct { // IsValid reports whether the position is valid. func (pos *Position) IsValid() bool { return pos.Line > 0 } -// String returns a string in one of several forms: +// String returns a human-readable form of a position in one of several forms: // // file:line:column valid position with file name // line:column valid position without file name @@ -60,20 +64,20 @@ func (pos Position) String() string { return s } -// Pos is a compact encoding of a source position within a file, as well as -// relative positioning information. It can be converted into a Position for a -// more convenient, but much larger, representation. +// Pos is a compact encoding of a source position. +// When valid, as reported by [Pos.IsValid], this can be either +// an absolute file position to obtain via [Pos.Position], +// which can be rendered in a human-friendly text form, +// and/or a relative position to obtain via [Pos.RelPos]. type Pos struct { file *File offset int } -// File returns the file that contains the position p or nil if there is no -// such file (for instance for p == NoPos). +// File returns the file that contains the absolute position p +// or nil if there is no such file (for instance for p == [NoPos]). func (p Pos) File() *File { - if p.index() == 0 { - return nil - } + // assumed: p.index() != 0 iff p.file != nil return p.file } @@ -82,7 +86,7 @@ func (p Pos) File() *File { type hiddenPos = Pos func (p hiddenPos) Experiment() (x cueexperiment.File) { - if p.file == nil || p.file.experiments == nil { + if !p.HasAbsPos() || p.file.experiments == nil { return x } @@ -90,38 +94,43 @@ func (p hiddenPos) Experiment() (x cueexperiment.File) { return x } -// TODO(mvdan): The methods below don't need to build an entire Position -// just to access some of the information. This could matter particularly for -// Compare, as it is called many times when sorting by position. +// NOTE: this is an internal API and may change at any time without notice. +func (p hiddenPos) Priority() (pr layer.Priority, ok bool) { + if f := p.file; f != nil { + return f.priority, f.isData + } + return 0, false +} +// Line returns the position's line number, starting at 1. func (p Pos) Line() int { - if p.file == nil { - return 0 - } return p.Position().Line } +// Column returns the position's column number counting in bytes, +// starting at 1. func (p Pos) Column() int { - if p.file == nil { - return 0 - } return p.Position().Column } +// Filename returns the name of the file that this position belongs to. func (p Pos) Filename() string { - if p.file == nil { + // Avoid calling [Pos.Position] as it also unpacks line and column info. + if !p.HasAbsPos() { return "" } - return p.Position().Filename + return p.file.name } +// Position unpacks the position information into a flat struct. func (p Pos) Position() Position { - if p.file == nil { + if !p.HasAbsPos() { return Position{} } return p.file.Position(p) } +// String returns a human-readable form of an absolute position. func (p Pos) String() string { return p.Position().String() } @@ -131,19 +140,29 @@ func (p Pos) String() string { func (p Pos) Compare(p2 Pos) int { if p == p2 { return 0 - } else if p == NoPos { + } else if !p.IsValid() { return +1 - } else if p2 == NoPos { + } else if !p2.IsValid() { return -1 } - pos, pos2 := p.Position(), p2.Position() - if c := cmp.Compare(pos.Filename, pos2.Filename); c != 0 { + // Avoid calling [Pos.Position] as it also unpacks line and column info; + // comparing positions only needs filenames and offsets. + // + // Note that absolute paths always go first; this naturally happens on Unix-like + // systems given that a leading slash sorts before almost any filename character. + // On Windows, volume names like C: could sort after filenames like Bar, + // which can cause inconsistent ordering depending on the current directory. + f1, f2 := p.Filename(), p2.Filename() + if c := cmpBool(filepath.IsAbs(f1), filepath.IsAbs(f2)); c != 0 { + // Note that this is negated so that absolute paths (IsAbs==true) go first. + return -c + } + if c := cmp.Compare(f1, f2); c != 0 { return c } // Note that CUE doesn't currently use any directives which alter // position information, like Go's //line, so comparing by offset is enough. - return cmp.Compare(pos.Offset, pos2.Offset) - + return cmp.Compare(p.Offset(), p2.Offset()) } // NoPos is the zero value for [Pos]; there is no file and line information @@ -157,7 +176,7 @@ var NoPos = Pos{} // RelPos indicates the relative position of token to the previous token. type RelPos int -//go:generate go run golang.org/x/tools/cmd/stringer -type=RelPos -linecomment +//go:generate go tool stringer -type=RelPos -linecomment const ( // NoRelPos indicates no relative position is specified. @@ -187,27 +206,52 @@ func (p RelPos) Pos() Pos { return Pos{nil, int(p)} } -// HasRelPos reports whether p has a relative position. +// HasRelPos reports whether p has a relative position, which can be +// obtained via [Pos.RelPos]. func (p Pos) HasRelPos() bool { return p.offset&relMask != 0 +} +// HasAbsPos reports whether p has an absolute position, which can be +// obtained via [Pos.Position]. +func (p Pos) HasAbsPos() bool { + // It's assumed that p.file == nil iff p.index() == 0 + // + // I.e. a file without an index is pointless (just a filename), and + // an index without a file is an offset in a vacuum. + return p.file != nil } +// Before reports whether p < q, as documented in [Pos.Compare]. +// +// Deprecated: use [Pos.Compare] instead. +// +//go:fix inline func (p Pos) Before(q Pos) bool { - return p.file == q.file && p.Offset() < q.Offset() + return p.Compare(q) < 0 } // Offset reports the byte offset relative to the file. func (p Pos) Offset() int { - return p.Position().Offset + // Avoid calling [Pos.Position] as it also unpacks line and column info. + if !p.HasAbsPos() { + return 0 + } + return p.file.Offset(p) } // Add creates a new position relative to the p offset by n. func (p Pos) Add(n int) Pos { + // If p is not an absolute position, we can't add to its offset. + if !p.HasAbsPos() { + return p + } return Pos{p.file, p.offset + toPos(index(n))} } -// IsValid reports whether the position is valid. +// IsValid reports whether the position contains any useful information, +// meaning either an absolute file position (obtained via [Pos.Position]), +// and/or a relative position (obtained via [Pos.RelPos]). func (p Pos) IsValid() bool { return p != NoPos } @@ -234,6 +278,14 @@ func toPos(x index) int { return (int(x) << relShift) } +// WithinInclusive reports whether offset lies within the range start +// to end, inclusive on both ends. It is up to the caller to ensure +// that start and end are from the same file, and start is before end, +// and that offset is appropriate for the file. +func WithinInclusive(offset int, start, end Pos) bool { + return start.Offset() <= offset && offset <= end.Offset() +} + // ----------------------------------------------------------------------------- // File @@ -252,12 +304,15 @@ type File struct { base index size index // file size as provided to AddFile - // lines, infos, and content are protected by set.mutex - lines []index // lines contains the offset of the first character for each line (the first entry is always 0) - infos []lineInfo - content []byte + // lines, infos, content, and revision are protected by [File.mutex] + lines []index // lines contains the offset of the first character for each line (the first entry is always 0) + infos []lineInfo + content []byte + revision int32 experiments *cueexperiment.File + priority layer.Priority + isData bool } // NewFile returns a new file with the given OS file name. The size provides the @@ -269,7 +324,6 @@ func NewFile(filename string, deprecatedBase, size int) *File { deprecatedBase = 1 } return &File{ - mutex: sync.RWMutex{}, name: filename, base: index(deprecatedBase), size: index(size), @@ -277,6 +331,18 @@ func NewFile(filename string, deprecatedBase, size int) *File { } } +// fixOffset fixes an out-of-bounds offset such that 0 <= offset <= f.size. +func (f *File) fixOffset(offset index) index { + switch { + case offset < 0: + return 0 + case offset > f.size: + return f.size + default: + return offset + } +} + // hiddenFile allows defining methods in File that are hidden from public // documentation. type hiddenFile = File @@ -285,6 +351,18 @@ func (f *hiddenFile) SetExperiments(experiments *cueexperiment.File) { f.experiments = experiments } +// NOTE: this is an internal API and may change at any time without notice. +// +// SetLayer sets the layer priority for this file. The priority parameter +// determines the precedence of defaults defined in this file, with higher +// values taking precedence over lower values. The isData parameter indicates +// whether this file should be treated as containing data defaults, which +// have different merging semantics from regular defaults. +func (f *hiddenFile) SetLayer(priority int8, isData bool) { + f.priority = layer.Priority(priority) + f.isData = isData +} + // Name returns the file name of file f as registered with AddFile. func (f *File) Name() string { return f.name @@ -421,6 +499,24 @@ func (f *hiddenFile) Content() []byte { return f.content } +// NOTE: this is an internal API and may change at any time without notice. +// +// SetRevision sets the file's version. +func (f *hiddenFile) SetRevision(version int32) { + f.mutex.Lock() + f.revision = version + f.mutex.Unlock() +} + +// NOTE: this is an internal API and may change at any time without notice. +// +// Revision retrieves the file's version. +func (f *hiddenFile) Revision() int32 { + f.mutex.RLock() + defer f.mutex.RUnlock() + return f.revision +} + // A lineInfo object describes alternative file and line number // information (such as provided via a //line comment in a .go // file) for a given file offset. @@ -447,25 +543,31 @@ func (f *File) AddLineInfo(offset int, filename string, line int) { f.mutex.Unlock() } -// Pos returns the Pos value for the given file offset; -// the offset must be <= f.Size(). +// Pos returns the Pos value for the given file offset. +// +// If offset is negative, the result is the file's start +// position; if the offset is too large, the result is +// the file's end position (see also go.dev/issue/57490). +// +// The following invariant, though not true for Pos values +// in general, holds for the result p: // f.Pos(f.Offset(p)) == p. func (f *File) Pos(offset int, rel RelPos) Pos { - if index(offset) > f.size { - panic("illegal file offset") - } - return Pos{f, toPos(1+index(offset)) + int(rel)} + return Pos{f, toPos(1+f.fixOffset(index(offset))) + int(rel)} } -// Offset returns the offset for the given file position p; -// p must be a valid Pos value in that file. -// f.Offset(f.Pos(offset)) == offset. +// Offset returns the offset for the given file position p. +// +// If p is before the file's start position (or if p is NoPos), +// the result is 0; if p is past the file's end position, the +// the result is the file size (see also go.dev/issue/57490). +// +// The following invariant, though not true for offset values +// in general, holds for the result offset: +// f.Offset(f.Pos(offset)) == offset func (f *File) Offset(p Pos) int { x := p.index() - if x < 1 || x > 1+f.size { - panic("illegal Pos value") - } - return int(x - 1) + return int(f.fixOffset(x - 1)) } // Line returns the line number for the given file position p; @@ -500,28 +602,26 @@ func (f *File) unpack(offset index, adjusted bool) (filename string, line, colum } func (f *File) position(p Pos, adjusted bool) (pos Position) { - offset := p.index() - 1 - pos.Offset = int(offset) - pos.Filename, pos.Line, pos.Column = f.unpack(offset, adjusted) + offset := f.Offset(p) + pos.Offset = offset + pos.Filename, pos.Line, pos.Column = f.unpack(index(offset), adjusted) return } // PositionFor returns the Position value for the given file position p. +// If p is out of bounds, it is adjusted to match the File.Offset behavior. // If adjusted is set, the position may be adjusted by position-altering // //line comments; otherwise those comments are ignored. // p must be a Pos value in f or NoPos. func (f *File) PositionFor(p Pos, adjusted bool) (pos Position) { - x := p.index() - if p != NoPos { - if x < 1 || x > 1+f.size { - panic("illegal Pos value") - } + if p.IsValid() { pos = f.position(p, adjusted) } return } // Position returns the Position value for the given file position p. +// If p is out of bounds, it is adjusted to match the File.Offset behavior. // Calling f.Position(p) is equivalent to calling f.PositionFor(p, true). func (f *File) Position(p Pos) (pos Position) { return f.PositionFor(p, true) @@ -552,3 +652,14 @@ func searchInts(a []index, x index) int { } return i - 1 } + +func cmpBool(x, y bool) int { + switch { + case !x && y: + return -1 + case x && !y: + return +1 + default: + return 0 + } +} diff --git a/vendor/cuelang.org/go/cue/token/relpos_string.go b/vendor/cuelang.org/go/cue/token/relpos_string.go index 0129d7bb62..c6bb71e373 100644 --- a/vendor/cuelang.org/go/cue/token/relpos_string.go +++ b/vendor/cuelang.org/go/cue/token/relpos_string.go @@ -21,8 +21,9 @@ const _RelPos_name = "invalidelidednospaceblanknewlinesection" var _RelPos_index = [...]uint8{0, 7, 13, 20, 25, 32, 39} func (i RelPos) String() string { - if i < 0 || i >= RelPos(len(_RelPos_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_RelPos_index)-1 { return "RelPos(" + strconv.FormatInt(int64(i), 10) + ")" } - return _RelPos_name[_RelPos_index[i]:_RelPos_index[i+1]] + return _RelPos_name[_RelPos_index[idx]:_RelPos_index[idx+1]] } diff --git a/vendor/cuelang.org/go/cue/token/token.go b/vendor/cuelang.org/go/cue/token/token.go index 05579d4049..ee03611052 100644 --- a/vendor/cuelang.org/go/cue/token/token.go +++ b/vendor/cuelang.org/go/cue/token/token.go @@ -19,7 +19,7 @@ package token // Token is the set of lexical tokens of the CUE configuration language. type Token int -//go:generate go run golang.org/x/tools/cmd/stringer -type=Token -linecomment +//go:generate go tool stringer -type=Token -linecomment // The list of tokens. const ( @@ -95,14 +95,18 @@ const ( SEMICOLON // ; COLON // : OPTION // ? + TILDE // ~ operatorEnd keywordBeg - IF // if - FOR // for - IN // in - LET // let + IF // if + ELSE // else + FOR // for + IN // in + LET // let + TRY // try + FALLBACK // fallback // experimental FUNC // func diff --git a/vendor/cuelang.org/go/cue/token/token_string.go b/vendor/cuelang.org/go/cue/token/token_string.go index e845d9c2c4..24374da5a9 100644 --- a/vendor/cuelang.org/go/cue/token/token_string.go +++ b/vendor/cuelang.org/go/cue/token/token_string.go @@ -57,26 +57,31 @@ func _() { _ = x[SEMICOLON-46] _ = x[COLON-47] _ = x[OPTION-48] - _ = x[operatorEnd-49] - _ = x[keywordBeg-50] - _ = x[IF-51] - _ = x[FOR-52] - _ = x[IN-53] - _ = x[LET-54] - _ = x[FUNC-55] - _ = x[TRUE-56] - _ = x[FALSE-57] - _ = x[NULL-58] - _ = x[keywordEnd-59] + _ = x[TILDE-49] + _ = x[operatorEnd-50] + _ = x[keywordBeg-51] + _ = x[IF-52] + _ = x[ELSE-53] + _ = x[FOR-54] + _ = x[IN-55] + _ = x[LET-56] + _ = x[TRY-57] + _ = x[FALLBACK-58] + _ = x[FUNC-59] + _ = x[TRUE-60] + _ = x[FALSE-61] + _ = x[NULL-62] + _ = x[keywordEnd-63] } -const _Token_name = "ILLEGALEOFCOMMENTATTRIBUTEliteralBegIDENTINTFLOATSTRINGINTERPOLATION_|_literalEndoperatorBeg+-*^/quoremdivmod&|&&||===<>!<-!=<=>==~!~([{,....)]};:?operatorEndkeywordBegifforinletfunctruefalsenullkeywordEnd" +const _Token_name = "ILLEGALEOFCOMMENTATTRIBUTEliteralBegIDENTINTFLOATSTRINGINTERPOLATION_|_literalEndoperatorBeg+-*^/quoremdivmod&|&&||===<>!<-!=<=>==~!~([{,....)]};:?~operatorEndkeywordBegifelseforinlettryfallbackfunctruefalsenullkeywordEnd" -var _Token_index = [...]uint8{0, 7, 10, 17, 26, 36, 41, 44, 49, 55, 68, 71, 81, 92, 93, 94, 95, 96, 97, 100, 103, 106, 109, 110, 111, 113, 115, 116, 118, 119, 120, 121, 123, 125, 127, 129, 131, 133, 134, 135, 136, 137, 138, 141, 142, 143, 144, 145, 146, 147, 158, 168, 170, 173, 175, 178, 182, 186, 191, 195, 205} +var _Token_index = [...]uint8{0, 7, 10, 17, 26, 36, 41, 44, 49, 55, 68, 71, 81, 92, 93, 94, 95, 96, 97, 100, 103, 106, 109, 110, 111, 113, 115, 116, 118, 119, 120, 121, 123, 125, 127, 129, 131, 133, 134, 135, 136, 137, 138, 141, 142, 143, 144, 145, 146, 147, 148, 159, 169, 171, 175, 178, 180, 183, 186, 194, 198, 202, 207, 211, 221} func (i Token) String() string { - if i < 0 || i >= Token(len(_Token_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Token_index)-1 { return "Token(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Token_name[_Token_index[i]:_Token_index[i+1]] + return _Token_name[_Token_index[idx]:_Token_index[idx+1]] } diff --git a/vendor/cuelang.org/go/cue/types.go b/vendor/cuelang.org/go/cue/types.go index b9b6db53a1..c9a9d61de0 100644 --- a/vendor/cuelang.org/go/cue/types.go +++ b/vendor/cuelang.org/go/cue/types.go @@ -23,6 +23,7 @@ import ( "math/big" "slices" "strings" + "unicode/utf8" "github.com/cockroachdb/apd/v3" @@ -38,6 +39,7 @@ import ( "cuelang.org/go/internal/core/runtime" "cuelang.org/go/internal/core/subsume" internaljson "cuelang.org/go/internal/encoding/json" + "cuelang.org/go/internal/iterutil" "cuelang.org/go/internal/types" ) @@ -88,10 +90,11 @@ const ( // // TODO: remove type structValue struct { - ctx *adt.OpContext - v Value - obj *adt.Vertex - arcs []*adt.Vertex + ctx *adt.OpContext + v Value + obj *adt.Vertex + arcs []*adt.Vertex + patterns []adt.PatternConstraint } type hiddenStructValue = structValue @@ -210,33 +213,52 @@ func unwrapJSONError(err error) errors.Error { // An Iterator iterates over values. type Iterator struct { - val Value - idx *runtime.Runtime - ctx *adt.OpContext - arcs []*adt.Vertex - p int - cur Value - f adt.Feature - arcType adt.ArcType + val Value + idx *runtime.Runtime + ctx *adt.OpContext + arcs []*adt.Vertex + patterns []adt.PatternConstraint + p int + cur Value + f adt.Feature + arcType adt.ArcType + isPattern bool + isList bool } type hiddenIterator = Iterator // Next advances the iterator to the next value and reports whether there was any. // It must be called before the first call to [Iterator.Value] or [Iterator.Selector]. +// +// Note that pattern constraints will be produced by the iterator before +// any other field. func (i *Iterator) Next() bool { - if i.p >= len(i.arcs) { + switch { + case i.p >= len(i.arcs)+len(i.patterns): i.cur = Value{} return false + case i.p < len(i.patterns): + i.isPattern = true + i.arcType = adt.ArcNotPresent + pattern := i.patterns[i.p] + pattern.Constraint.Finalize(i.ctx) + i.cur = makeValue(i.val.idx, pattern.Constraint, + linkParent(i.val.parent_, i.val.v, pattern.Constraint), + ) + i.p++ + return true + + default: + arc := i.arcs[i.p-len(i.patterns)] + arc.Finalize(i.ctx) + i.isPattern = false + i.f = arc.Label + i.arcType = arc.ArcType + i.cur = makeValue(i.val.idx, arc, linkParent(i.val.parent_, i.val.v, arc)) + i.p++ + return true } - arc := i.arcs[i.p] - arc.Finalize(i.ctx) - p := linkParent(i.val.parent_, i.val.v, arc) - i.f = arc.Label - i.arcType = arc.ArcType - i.cur = makeValue(i.val.idx, arc, p) - i.p++ - return true } // Value returns the current value in the list. @@ -247,12 +269,33 @@ func (i *Iterator) Value() Value { // Selector reports the field label of this iteration. func (i *Iterator) Selector() Selector { - sel := featureToSel(i.f, i.idx) - // Only call wrapConstraint if there is any constraint type to wrap with. - if ctype := fromArcType(i.arcType); ctype != 0 { - sel = wrapConstraint(sel, ctype) + if !i.isPattern { + sel := featureToSel(i.f, i.idx) + // Only call wrapConstraint if there is any constraint type to wrap with. + if ctype := fromArcType(i.arcType); ctype != 0 { + sel = wrapConstraint(sel, ctype) + } + return sel + } + pattern := exprToVertex(i.patterns[i.p-1].Pattern) + pattern.Finalize(i.ctx) + + return Selector{ + patternSelector{ + pattern: makeValue(i.val.idx, pattern, + linkParent(i.val.parent_, i.val.v, pattern), + ), + _labelType: i.patternSelectorType().LabelType(), + }, + } +} + +func (i *Iterator) patternSelectorType() SelectorType { + if i.isList { + // Pattern constraints in lists are always indexes. + return IndexLabel | PatternConstraint } - return sel + return StringLabel | PatternConstraint } // Label reports the label of the value if i iterates over struct fields and "" @@ -276,6 +319,9 @@ func (i *Iterator) IsOptional() bool { // FieldType reports the type of the field. func (i *Iterator) FieldType() SelectorType { + if i.isPattern { + return i.patternSelectorType() + } return featureToSelType(i.f, i.arcType) } @@ -302,6 +348,10 @@ func listAppendJSON(b []byte, l *Iterator) ([]byte, error) { func (v Value) getNum(k adt.Kind) (*adt.Num, errors.Error) { v, _ = v.Default() + if num, _ := v.v.BaseValue.(*adt.Num); num != nil && k&v.Kind() != adt.BottomKind { + // In the happy path, avoid creating a new [OpContext], which is wasteful. + return num, nil + } ctx := v.ctx() if err := v.checkKind(ctx, k); err != nil { return nil, v.toErr(err) @@ -401,17 +451,7 @@ func (v Value) Int(z *big.Int) (*big.Int, error) { if err != nil { return nil, err } - if z == nil { - z = &big.Int{} - } - if n.X.Exponent != 0 { - panic("cue: exponent should always be nil for integer types") - } - z.Set(n.X.Coeff.MathBigInt()) - if n.X.Negative { - z.Neg(z) - } - return z, nil + return n.BigInt(z), nil } // Int64 converts the underlying integral number to int64. It reports an @@ -584,9 +624,8 @@ func (v valueScope) Parent() compile.Scope { type hiddenValue = Value // Core is for internal use only. -func (v hiddenValue) Core(x *types.Value) { - x.V = v.v - x.R = v.idx +func (v hiddenValue) Core() types.Value { + return types.Value{V: v.v, R: v.idx} } func newErrValue(v Value, b *adt.Bottom) Value { @@ -612,12 +651,16 @@ func newVertexRoot(idx *runtime.Runtime, ctx *adt.OpContext, x *adt.Vertex) Valu } func newValueRoot(idx *runtime.Runtime, ctx *adt.OpContext, x adt.Expr) Value { + return newVertexRoot(idx, ctx, exprToVertex(x)) +} + +func exprToVertex(x adt.Expr) *adt.Vertex { if n, ok := x.(*adt.Vertex); ok { - return newVertexRoot(idx, ctx, n) + return n } - node := &adt.Vertex{} - node.AddConjunct(adt.MakeRootConjunct(nil, x)) - return newVertexRoot(idx, ctx, node) + n := &adt.Vertex{} + n.AddConjunct(adt.MakeRootConjunct(nil, x)) + return n } func newChildValue(o *structValue, i int) Value { @@ -849,16 +892,17 @@ func (v Value) Syntax(opts ...Option) ast.Node { o := getOptions(opts) p := export.Profile{ - Simplify: !o.raw, - TakeDefaults: o.final, - ShowOptional: !o.omitOptional && !o.concrete, - ShowDefinitions: !o.omitDefinitions && !o.concrete, - ShowHidden: !o.omitHidden && !o.concrete, - ShowAttributes: !o.omitAttrs, - ShowDocs: o.docs, - ShowErrors: o.showErrors, - InlineImports: o.inlineImports, - Fragment: o.raw, + Simplify: !o.raw, + TakeDefaults: o.final, + ShowOptional: !o.omitOptional && !o.concrete, + ShowDefinitions: !o.omitDefinitions && !o.concrete, + ShowHidden: !o.omitHidden && !o.concrete, + ShowAttributes: !o.omitAttrs, + ShowDocs: o.docs, + ShowErrors: o.showErrors, + InlineImports: o.inlineImports, + Fragment: o.raw, + ExpandReferences: o.concrete, } pkgID := v.instance().ID() @@ -878,7 +922,7 @@ You could file a bug with the above information at: ` cg := &ast.CommentGroup{Doc: true} msg := fmt.Sprintf(format, name, err, p, v) - for _, line := range strings.Split(msg, "\n") { + for line := range strings.SplitSeq(msg, "\n") { cg.List = append(cg.List, &ast.Comment{Text: "// " + line}) } x := &ast.BadExpr{} @@ -889,7 +933,7 @@ You could file a bug with the above information at: // var expr ast.Expr var err error var f *ast.File - if o.concrete || o.final || o.resolveReferences { + if o.concrete || o.final { f, err = p.Vertex(v.idx, pkgID, v.v) if err != nil { return bad(`"cuelang.org/go/internal/core/export".Vertex`, err) @@ -921,14 +965,14 @@ outer: for _, c := range ast.Comments(e.Expr) { ast.AddComment(f, c) } - ast.SetComments(e.Expr, f.Comments()) + ast.SetComments(e.Expr, ast.Comments(f)) return e.Expr } } st := &ast.StructLit{ Elts: f.Decls, } - ast.SetComments(st, f.Comments()) + ast.SetComments(st, ast.Comments(f)) return st } @@ -949,21 +993,19 @@ func (v Value) Source() ast.Node { if v.v == nil { return nil } - count := 0 + c, count := v.v.SingleConjunct() var src ast.Node - v.v.VisitLeafConjuncts(func(c adt.Conjunct) bool { + if count == 1 { src = c.Source() - count++ - return true - }) - if count > 1 || src == nil { + } + if src == nil { src = v.v.Value().Source() } return src } -// If v exactly represents a package, BuildInstance returns -// the build instance corresponding to the value; otherwise it returns nil. +// BuildInstance returns the build instance corresponding to the value +// if v exactly represents a package; otherwise it returns nil. // // The value returned by [Value.ReferencePath] will commonly represent a package. func (v Value) BuildInstance() *build.Instance { @@ -990,25 +1032,21 @@ func (v Value) Pos() token.Pos { } if src := v.Source(); src != nil { - if pos := src.Pos(); pos != token.NoPos { + if pos := src.Pos(); pos.IsValid() { return pos } } // Pick the most-concrete field. var p token.Pos - v.v.VisitLeafConjuncts(func(c adt.Conjunct) bool { + for c := range v.v.LeafConjuncts() { x := c.Elem() - pp := pos(x) - if pp == token.NoPos { - return true + pp := adt.Pos(x) + if !pp.IsValid() { + continue } p = pp - // Prefer struct conjuncts with actual fields. - if s, ok := x.(*adt.StructLit); ok && len(s.Fields) > 0 { - return false - } - return true - }) + // TODO: Prefer struct conjuncts with actual fields. + } return p } @@ -1022,11 +1060,57 @@ func (v Value) Allows(sel Selector) bool { if v.v.HasEllipsis { return true } + if _, ok := sel.sel.(patternSelector); ok { + // We can always add a pattern constraint. + return true + } c := v.ctx() f := sel.sel.feature(c) return v.v.Accept(c, f) } +// IsClosed reports whether the value has been closed at the top level, either +// with the close function or by being referenced as a definition. +func (v Value) IsClosed() bool { + if v.v == nil { + return false + } + // Use the non-forwarded node to get the actual closed state + x := v.v + isClosed := x.ClosedNonRecursive || x.ClosedRecursive + for !isClosed { + if v, ok := x.BaseValue.(*adt.Vertex); ok { + isClosed = isClosed || v.ClosedNonRecursive || v.ClosedRecursive + x = v + continue + } + break + } + return isClosed +} + +// IsClosedRecursively reports whether the value has been closed by virtue of +// being referenced as a definition. +func (v Value) IsClosedRecursively() bool { + if v.v == nil { + return false + } + // Use the non-forwarded node to get the actual closed state + x := v.v + isClosed := x.ClosedRecursive + // This loop doesn't seem necessary for ClosedRecursive, but we will keep + // it as a safety net. + for !isClosed { + if v, ok := x.BaseValue.(*adt.Vertex); ok { + isClosed = isClosed || v.ClosedRecursive + x = v + continue + } + break + } + return isClosed +} + // IsConcrete reports whether the current value is a concrete scalar value // (not relying on default values), a terminal error, a list, or a struct. // It does not verify that values of lists or structs are concrete themselves. @@ -1045,14 +1129,6 @@ func (v Value) IsConcrete() bool { return true } -// // Deprecated: IsIncomplete -// // -// // It indicates that the value cannot be fully evaluated due to -// // insufficient information. -// func (v Value) IsIncomplete() bool { -// panic("deprecated") -// } - // Exists reports whether this value existed in the configuration. func (v Value) Exists() bool { if v.v == nil { @@ -1126,7 +1202,7 @@ func (v Value) Len() Value { case *adt.Vertex: if x.IsList() { n := &adt.Num{K: adt.IntKind} - n.X.SetInt64(int64(len(x.Elems()))) + n.X.SetInt64(int64(iterutil.Count(x.Elems()))) if x.IsClosedList() { return remakeFinal(v, n) } @@ -1144,7 +1220,7 @@ func (v Value) Len() Value { case *adt.Bytes: return makeInt(v, int64(len(x.B))) case *adt.String: - return makeInt(v, int64(len([]rune(x.Str)))) + return makeInt(v, int64(utf8.RuneCountInString(x.Str))) } } const msg = "len not supported for type %v" @@ -1178,18 +1254,16 @@ func (v Value) List() (Iterator, error) { // mustList is like [Value.List], but reusing ctx and leaving it to the caller // to apply defaults and check the kind. func (v Value) mustList(ctx *adt.OpContext) Iterator { - arcs := []*adt.Vertex{} - for _, a := range v.v.Elems() { - if a.Label.IsInt() { - arcs = append(arcs, a) - } - } - return Iterator{idx: v.idx, ctx: ctx, val: v, arcs: arcs} + return Iterator{idx: v.idx, ctx: ctx, val: v, arcs: slices.Collect(v.v.Elems())} } // Null reports an error if v is not null. func (v Value) Null() error { v, _ = v.Default() + if b, _ := v.v.BaseValue.(*adt.Null); b != nil { + // In the happy path, avoid creating a new [OpContext], which is wasteful. + return nil + } if err := v.checkKind(v.ctx(), adt.NullKind); err != nil { return v.toErr(err) } @@ -1199,12 +1273,20 @@ func (v Value) Null() error { // IsNull reports whether v is null. func (v Value) IsNull() bool { v, _ = v.Default() + if b, _ := v.v.BaseValue.(*adt.Null); b != nil { + // In the happy path, avoid creating a new [OpContext], which is wasteful. + return true + } return v.isKind(v.ctx(), adt.NullKind) } // Bool returns the bool value of v or false and an error if v is not a boolean. func (v Value) Bool() (bool, error) { v, _ = v.Default() + if b, _ := v.v.BaseValue.(*adt.Bool); b != nil { + // In the happy path, avoid creating a new [OpContext], which is wasteful. + return b.B, nil + } ctx := v.ctx() if err := v.checkKind(ctx, adt.BoolKind); err != nil { return false, v.toErr(err) @@ -1213,8 +1295,13 @@ func (v Value) Bool() (bool, error) { } // String returns the string value if v is a string or an error otherwise. +// To stringify a CUE value into text form, use [cuelang.org/go/cue/format]. func (v Value) String() (string, error) { v, _ = v.Default() + if str, _ := v.v.BaseValue.(*adt.String); str != nil { + // In the happy path, avoid creating a new [OpContext], which is wasteful. + return str.Str, nil + } ctx := v.ctx() if err := v.checkKind(ctx, adt.StringKind); err != nil { return "", v.toErr(err) @@ -1226,6 +1313,13 @@ func (v Value) String() (string, error) { // otherwise. func (v Value) Bytes() ([]byte, error) { v, _ = v.Default() + switch val := v.v.BaseValue.(type) { + // In the happy path, avoid creating a new [OpContext], which is wasteful. + case *adt.Bytes: + return bytes.Clone(val.B), nil + case *adt.String: + return []byte(val.Str), nil + } ctx := v.ctx() switch x := v.eval(ctx).(type) { case *adt.Bytes: @@ -1273,8 +1367,8 @@ func (v Value) structValOpts(ctx *adt.OpContext, o options) (s structValue, err switch b := v.v.Bottom(); { case b != nil && b.IsIncomplete() && !o.concrete && !o.final: - // Allow scalar values if hidden or definition fields are requested. - case !o.omitHidden, !o.omitDefinitions: + // Allow scalar values if hidden or definition fields or patterns are requested. + case !o.omitHidden, !o.omitDefinitions, o.includePatterns: default: if err := v.checkKind(ctx, adt.StructKind); err != nil && !err.ChildError { return structValue{}, err @@ -1322,7 +1416,11 @@ func (v Value) structValOpts(ctx *adt.OpContext, o options) (s structValue, err } arcs = append(arcs, arc) } - return structValue{ctx, orig, obj, arcs}, nil + var patterns []adt.PatternConstraint + if o.includePatterns && obj.PatternConstraints != nil { + patterns = obj.PatternConstraints.Pairs + } + return structValue{ctx, orig, obj, arcs, patterns}, nil } // Struct returns the underlying struct of a value or an error if the value @@ -1402,7 +1500,11 @@ func (s *hiddenStruct) Fields(opts ...Option) *Iterator { // Fields creates an iterator over v's fields if v is a struct or an error // otherwise. func (v Value) Fields(opts ...Option) (*Iterator, error) { - o := options{omitDefinitions: true, omitHidden: true, omitOptional: true} + o := options{ + omitDefinitions: true, + omitHidden: true, + omitOptional: true, + } o.updateOptions(opts) ctx := v.ctx() obj, err := v.structValOpts(ctx, o) @@ -1410,7 +1512,14 @@ func (v Value) Fields(opts ...Option) (*Iterator, error) { return &Iterator{idx: v.idx, ctx: ctx}, v.toErr(err) } - return &Iterator{idx: v.idx, ctx: ctx, val: v, arcs: obj.arcs}, nil + return &Iterator{ + idx: v.idx, + ctx: ctx, + val: v, + arcs: obj.arcs, + patterns: obj.patterns, + isList: v.Kind() == ListKind, + }, nil } // Lookup reports the value at a path starting from v. The empty path returns v @@ -1597,11 +1706,18 @@ func (v Value) FillPath(p Path, x interface{}) Value { // TODO: inject import path of current package? expr = resolveExpr(ctx, n, x) default: - expr = convert.GoValueToValue(ctx, x, true) + expr = convert.FromGoValue(ctx, x, true) } for _, sel := range slices.Backward(p.path) { switch sel.Type() { case StringLabel | PatternConstraint: + if _, ok := sel.sel.(patternSelector); ok { + // TODO consider relaxing this restriction, in which case we'd really + // want a constructor for pattern selectors too. + return newErrValue(v, + mkErr(nil, 0, "cannot use pattern selector in FillPath"), + ) + } expr = &adt.StructLit{Decls: []adt.Decl{ &adt.BulkOptionalField{ Filter: &adt.BasicType{K: adt.StringKind}, @@ -1661,12 +1777,9 @@ func (v hiddenValue) Template() func(label string) Value { return nil } - // Implementation for the old evaluator. - types := v.v.OptionalTypes() - switch { - case types&(adt.HasAdditional|adt.HasPattern) != 0: - case v.v.PatternConstraints != nil: - default: + // Simplified after removing OptionalTypes. + // Check if there are pattern constraints. + if v.v.PatternConstraints == nil { return nil } @@ -1709,7 +1822,7 @@ func (v Value) Subsume(w Value, opts ...Option) error { // TODO: this is likely not correct for V3. There are some cases where this is // still used for V3. Transition away from those. func allowed(ctx *adt.OpContext, parent, n *adt.Vertex) *adt.Bottom { - if !parent.IsClosedList() && !parent.IsClosedStruct() { + if !parent.IsClosedList() && parent.IsOpenStruct() { return nil } @@ -1741,18 +1854,6 @@ func (v Value) Unify(w Value) Value { n := adt.Unify(ctx, v.v, w.v) - if ctx.Version == internal.EvalV2 { - if err := n.Err(ctx); err != nil { - return makeValue(v.idx, n, v.parent_) - } - if err := allowed(ctx, v.v, n); err != nil { - return newErrValue(w, err) - } - if err := allowed(ctx, w.v, n); err != nil { - return newErrValue(v, err) - } - } - return makeValue(v.idx, n, v.parent_) } @@ -1775,18 +1876,8 @@ func (v Value) UnifyAccept(w Value, accept Value) Value { n := &adt.Vertex{} ctx := v.ctx() - switch ctx.Version { - case internal.EvalV2: - cv := adt.MakeRootConjunct(nil, v.v) - cw := adt.MakeRootConjunct(nil, w.v) - - n.AddConjunct(cv) - n.AddConjunct(cw) - - case internal.EvalV3: - n.AddOpenConjunct(ctx, v.v) - n.AddOpenConjunct(ctx, w.v) - } + n.AddOpenConjunct(ctx, v.v) + n.AddOpenConjunct(ctx, w.v) n.Finalize(ctx) n.Parent = v.v.Parent @@ -1896,7 +1987,11 @@ func reference(rt *runtime.Runtime, c *adt.OpContext, env *adt.Environment, r ad path = appendSelector(path, valueToSel(v)) case *adt.ImportReference: - inst = rt.LoadImport(rt.LabelStr(x.ImportPath)) + if x.Instance != nil { + inst = rt.LoadInstance(x.Instance) + } else { + inst = rt.LoadBuiltin(rt.LabelStr(x.ImportPath)) + } case *adt.SelectorExpr: inst, path = reference(rt, c, env, x.X) @@ -1924,20 +2019,20 @@ func mkPath(r *runtime.Runtime, a []Selector, v *adt.Vertex) (root *adt.Vertex, } type options struct { - concrete bool // enforce that values are concrete - raw bool // show original values - hasHidden bool - omitHidden bool - omitDefinitions bool - omitOptional bool - omitAttrs bool - inlineImports bool - resolveReferences bool - showErrors bool - final bool - ignoreClosedness bool // used for comparing APIs - docs bool - disallowCycles bool // implied by concrete + concrete bool // enforce that values are concrete + raw bool // show original values + hasHidden bool + omitHidden bool + omitDefinitions bool + omitOptional bool + omitAttrs bool + includePatterns bool + inlineImports bool + showErrors bool + final bool + ignoreClosedness bool // used for comparing APIs + docs bool + disallowCycles bool // implied by concrete } // An Option defines modes of evaluation. @@ -1956,7 +2051,7 @@ func Final() Option { } } -// Schema specifies the input is a Schema. Used by Subsume. +// Schema specifies the input is a Schema. Used only by [Value.Subsume]. func Schema() Option { return func(o *options) { o.ignoreClosedness = true @@ -1992,33 +2087,10 @@ func DisallowCycles(disallow bool) Option { return func(p *options) { p.disallowCycles = disallow } } -// ResolveReferences forces the evaluation of references when outputting. -// -// Deprecated: [Value.Syntax] will now always attempt to resolve dangling references and -// make the output self-contained. When [Final] or [Concrete] are used, -// it will already attempt to resolve all references. -// See also [InlineImports]. -func ResolveReferences(resolve bool) Option { - return func(p *options) { - p.resolveReferences = resolve - - // ResolveReferences is implemented as a Value printer, rather than - // a definition printer, even though it should be more like the latter. - // To reflect this we convert incomplete errors to their original - // expression. - // - // TODO: ShowErrors mostly shows incomplete errors, even though this is - // just an approximation. There seems to be some inconsistencies as to - // when child errors are marked as such, making the conversion somewhat - // inconsistent. This option is conservative, though. - p.showErrors = true - } -} - // ErrorsAsValues treats errors as a regular value, including them at the // location in the tree where they occur, instead of interpreting them as a // configuration-wide failure that is returned instead of root value. -// Used by Syntax. +// Used by [Value.Syntax]. func ErrorsAsValues(show bool) Option { return func(p *options) { p.showErrors = show } } @@ -2061,6 +2133,21 @@ func Definitions(include bool) Option { } } +// Patterns indicates whether pattern constraints should be included +// when iterating over struct fields. This includes universal pattern +// constraints such as `[_]: int` or `[=~"^a"]: string` but +// not the ellipsis pattern as selected by [AnyString]: that +// can be found with [Value.LookupPath](cue.MakePath(cue.AnyString)). +func Patterns(include bool) Option { + // TODO we can include patterns, but there's no way + // of iterating over patterns _only_ which might be + // useful in some cases. Perhaps we could add: + // func Regular(include bool) Option + return func(p *options) { + p.includePatterns = include + } +} + // Hidden indicates that definitions and hidden fields should be included. func Hidden(include bool) Option { return func(p *options) { @@ -2208,7 +2295,7 @@ func (v Value) Expr() (Op, []Value) { default: a := []Value{} ctx := v.ctx() - v.v.VisitLeafConjuncts(func(c adt.Conjunct) bool { + for c := range v.v.LeafConjuncts() { // Keep parent here. TODO: do we need remove the requirement // from other conjuncts? n := &adt.Vertex{ @@ -2218,9 +2305,7 @@ func (v Value) Expr() (Op, []Value) { n.AddConjunct(c) n.Finalize(ctx) a = append(a, makeValue(v.idx, n, v.parent_)) - return true - }) - + } return adt.AndOp, a } @@ -2237,6 +2322,9 @@ process: case *adt.UnaryExpr: a = append(a, remakeValue(v, env, x.X)) op = x.Op + case *adt.OpenExpr: + a = append(a, remakeValue(v, env, x.X)) + op = adt.SpreadOp case *adt.BoundExpr: a = append(a, remakeValue(v, env, x.Expr)) op = x.Op diff --git a/vendor/cuelang.org/go/encoding/json/json.go b/vendor/cuelang.org/go/encoding/json/json.go index 714ff6cb0c..97cc4d96f3 100644 --- a/vendor/cuelang.org/go/encoding/json/json.go +++ b/vendor/cuelang.org/go/encoding/json/json.go @@ -21,15 +21,13 @@ import ( "encoding/json" "fmt" "io" - "strings" "cuelang.org/go/cue" "cuelang.org/go/cue/ast" - "cuelang.org/go/cue/ast/astutil" "cuelang.org/go/cue/errors" - "cuelang.org/go/cue/literal" "cuelang.org/go/cue/parser" "cuelang.org/go/cue/token" + cuejson "cuelang.org/go/internal/encoding/json" "cuelang.org/go/internal/source" ) @@ -63,7 +61,7 @@ func Extract(path string, data []byte) (ast.Expr, error) { if err != nil { return nil, err } - patchExpr(expr, nil) + cuejson.PatchExpr(expr, nil) return expr, nil } @@ -127,7 +125,7 @@ func (d *Decoder) Extract() (ast.Expr, error) { if err != nil { return expr, err } - patchExpr(expr, d.patchPos) + cuejson.PatchExpr(expr, d.patchPos) return expr, nil } @@ -159,115 +157,3 @@ func (d *Decoder) patchPos(n ast.Node) { realPos := d.tokFile.Pos(pos.Offset()+d.startOffset, pos.RelPos()) ast.SetPos(n, realPos) } - -// patchExpr simplifies the AST parsed from JSON. -// TODO: some of the modifications are already done in format, but are -// a package deal of a more aggressive simplify. Other pieces of modification -// should probably be moved to format. -func patchExpr(n ast.Node, patchPos func(n ast.Node)) { - type info struct { - reflow bool - } - stack := []info{{true}} - - afterFn := func(n ast.Node) { - switch n.(type) { - case *ast.ListLit, *ast.StructLit: - stack = stack[:len(stack)-1] - } - } - - var beforeFn func(n ast.Node) bool - - beforeFn = func(n ast.Node) bool { - if patchPos != nil { - patchPos(n) - } - - isLarge := n.End().Offset()-n.Pos().Offset() > 50 - descent := true - - switch x := n.(type) { - case *ast.ListLit: - reflow := true - if !isLarge { - for _, e := range x.Elts { - if hasSpaces(e) { - reflow = false - break - } - } - } - stack = append(stack, info{reflow}) - if reflow { - x.Lbrack = x.Lbrack.WithRel(token.NoRelPos) - x.Rbrack = x.Rbrack.WithRel(token.NoRelPos) - } - return true - - case *ast.StructLit: - reflow := true - if !isLarge { - for _, e := range x.Elts { - if f, ok := e.(*ast.Field); !ok || hasSpaces(f) || hasSpaces(f.Value) { - reflow = false - break - } - } - } - stack = append(stack, info{reflow}) - if reflow { - x.Lbrace = x.Lbrace.WithRel(token.NoRelPos) - x.Rbrace = x.Rbrace.WithRel(token.NoRelPos) - } - return true - - case *ast.Field: - // label is always a string for JSON. - switch { - case true: - s, ok := x.Label.(*ast.BasicLit) - if !ok || s.Kind != token.STRING { - break // should not happen: implies invalid JSON - } - - u, err := literal.Unquote(s.Value) - if err != nil { - break // should not happen: implies invalid JSON - } - - // TODO(legacy): remove checking for '_' prefix once hidden - // fields are removed. - if !ast.IsValidIdent(u) || strings.HasPrefix(u, "_") { - break // keep string - } - - x.Label = ast.NewIdent(u) - astutil.CopyMeta(x.Label, s) - } - ast.Walk(x.Value, beforeFn, afterFn) - descent = false - - case *ast.BasicLit: - if x.Kind == token.STRING && len(x.Value) > 10 { - s, err := literal.Unquote(x.Value) - if err != nil { - break // should not happen: implies invalid JSON - } - - x.Value = literal.String.WithOptionalTabIndent(len(stack)).Quote(s) - } - } - - if stack[len(stack)-1].reflow { - ast.SetRelPos(n, token.NoRelPos) - } - return descent - } - - ast.Walk(n, beforeFn, afterFn) -} - -func hasSpaces(n ast.Node) bool { - return n.Pos().RelPos() > token.NoSpace -} diff --git a/vendor/cuelang.org/go/encoding/json/pointer.go b/vendor/cuelang.org/go/encoding/json/pointer.go new file mode 100644 index 0000000000..e041d25140 --- /dev/null +++ b/vendor/cuelang.org/go/encoding/json/pointer.go @@ -0,0 +1,98 @@ +// Copyright 2025 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package json + +import ( + "fmt" + "iter" + "strconv" + "strings" + + "cuelang.org/go/cue" +) + +var ( + jsonPtrEsc = strings.NewReplacer("~", "~0", "/", "~1") + jsonPtrUnesc = strings.NewReplacer("~0", "~", "~1", "/") +) + +// Pointer represents a JSON Pointer as defined by RFC 6901. +// It is a slash-separated list of tokens that reference a specific location +// within a JSON document. +// TODO(go1.26) alias this to [encoding/json/jsontext.Pointer] +type Pointer string + +// PointerFromTokens returns a JSON Pointer formed from +// the unquoted tokens in the given sequence. Any +// slash (/) or tilde (~) characters will be escaped appropriately. +func PointerFromTokens(tokens iter.Seq[string]) Pointer { + var buf strings.Builder + for tok := range tokens { + buf.WriteByte('/') + buf.WriteString(jsonPtrEsc.Replace(tok)) + } + return Pointer(buf.String()) +} + +// Tokens returns a sequence of all the +// unquoted path elements (tokens) of the JSON Pointer. +func (p Pointer) Tokens() iter.Seq[string] { + s := string(p) + return func(yield func(string) bool) { + needUnesc := strings.IndexByte(s, '~') >= 0 + for len(s) > 0 { + s = strings.TrimPrefix(s, "/") + i := min(uint(strings.IndexByte(s, '/')), uint(len(s))) + tok := s[:i] + if needUnesc { + tok = jsonPtrUnesc.Replace(tok) + } + if !yield(tok) { + return + } + s = s[i:] + } + } +} + +// PointerFromCUEPath returns a JSON Pointer equivalent to the +// given CUE path. It returns an error if the path contains an element +// that cannot be represented as a JSON Pointer. +func PointerFromCUEPath(p cue.Path) (Pointer, error) { + var err error + ptr := PointerFromTokens(func(yield func(s string) bool) { + for _, sel := range p.Selectors() { + var token string + switch sel.Type() { + case cue.StringLabel: + token = sel.Unquoted() + case cue.IndexLabel: + token = strconv.Itoa(sel.Index()) + default: + if err == nil { + err = fmt.Errorf("cannot convert selector %v to JSON pointer", sel) + continue + } + } + if !yield(token) { + return + } + } + }) + if err != nil { + return "", err + } + return ptr, nil +} diff --git a/vendor/cuelang.org/go/encoding/jsonschema/constraints_array.go b/vendor/cuelang.org/go/encoding/jsonschema/constraints_array.go index 08868d690a..0131c93d5f 100644 --- a/vendor/cuelang.org/go/encoding/jsonschema/constraints_array.go +++ b/vendor/cuelang.org/go/encoding/jsonschema/constraints_array.go @@ -45,7 +45,7 @@ func constraintAdditionalItems(key string, n cue.Value, s *state) { panic("no elements in list") } last := s.list.Elts[len(s.list.Elts)-1].(*ast.Ellipsis) - if isBottom(elem) { + if isErrorCall(elem) { // No additional elements allowed. Remove the ellipsis. s.list.Elts = s.list.Elts[:len(s.list.Elts)-1] return diff --git a/vendor/cuelang.org/go/encoding/jsonschema/constraints_object.go b/vendor/cuelang.org/go/encoding/jsonschema/constraints_object.go index 3ace080973..02240d1095 100644 --- a/vendor/cuelang.org/go/encoding/jsonschema/constraints_object.go +++ b/vendor/cuelang.org/go/encoding/jsonschema/constraints_object.go @@ -20,8 +20,8 @@ import ( "cuelang.org/go/cue" "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" "cuelang.org/go/cue/token" - "cuelang.org/go/internal" ) // Object constraints @@ -114,7 +114,7 @@ func constraintAdditionalProperties(key string, n cue.Value, s *state) { expr, _ := s.schemaState(n, allTypes, func(s *state) { s.preserveUnknownFields = false }) - f := internal.EmbedStruct(ast.NewStruct(&ast.Field{ + f := embedStruct(ast.NewStruct(&ast.Field{ Label: ast.NewList(ast.NewBinExpr(token.AND, existing...)), Value: expr, })) @@ -128,6 +128,23 @@ func constraintAdditionalProperties(key string, n cue.Value, s *state) { s.hasAdditionalProperties = true } +func embedStruct(s *ast.StructLit) *ast.EmbedDecl { + e := &ast.EmbedDecl{Expr: s} + if len(s.Elts) == 1 { + d := s.Elts[0] + astutil.CopyPosition(e, d) + ast.SetRelPos(d, token.NoSpace) + astutil.CopyComments(e, d) + ast.SetComments(d, nil) + if f, ok := d.(*ast.Field); ok { + ast.SetRelPos(f.Label, token.NoSpace) + } + } + s.Lbrace = token.Newline.Pos() + s.Rbrace = token.NoSpace.Pos() + return e +} + // constraintDependencies is used to implement all of the dependencies, // dependentSchemas and dependentRequired keywords. func constraintDependencies(key string, n cue.Value, s *state) { @@ -291,7 +308,6 @@ func constraintPatternProperties(key string, n cue.Value, s *state) { s.errf(n, `value of "patternProperties" must be an object, found %v`, n.Kind()) } obj := s.object(n) - existing := excludeFields(s.obj.Elts) s.processMap(n, func(key string, n cue.Value) { if !s.checkRegexp(n, key) { return @@ -304,12 +320,9 @@ func constraintPatternProperties(key string, n cue.Value, s *state) { &ast.UnaryExpr{Op: token.NMAT, X: ast.NewString(key)}) // We'll make a pattern constraint of the form: - // [pattern & !~(properties)]: schema - f := internal.EmbedStruct(ast.NewStruct(&ast.Field{ - Label: ast.NewList(ast.NewBinExpr( - token.AND, - append([]ast.Expr{&ast.UnaryExpr{Op: token.MAT, X: ast.NewString(key)}}, existing...)..., - )), + // [pattern]: schema + f := embedStruct(ast.NewStruct(&ast.Field{ + Label: ast.NewList(&ast.UnaryExpr{Op: token.MAT, X: ast.NewString(key)}), Value: s.schema(n), })) ast.SetRelPos(f, token.NewSection) @@ -379,17 +392,17 @@ func constraintProperties(key string, n cue.Value, s *state) { f.Value = ast.NewString(s.k8sAPIVersion) hasAPIVersion = true } - if len(obj.Elts) > 0 && len(f.Comments()) > 0 { + if len(obj.Elts) > 0 && len(ast.Comments(f)) > 0 { // TODO: change formatter such that either a NewSection on the // field or doc comment will cause a new section. - ast.SetRelPos(f.Comments()[0], token.NewSection) + ast.SetRelPos(ast.Comments(f)[0], token.NewSection) } if state.deprecated { switch expr.(type) { case *ast.StructLit: obj.Elts = append(obj.Elts, addTag(name, "deprecated", "")) default: - f.Attrs = append(f.Attrs, internal.NewAttr("deprecated", "")) + f.Attrs = append(f.Attrs, &ast.Attribute{Text: "@deprecated()"}) } } obj.Elts = append(obj.Elts, f) diff --git a/vendor/cuelang.org/go/encoding/jsonschema/crd.cue b/vendor/cuelang.org/go/encoding/jsonschema/crd.cue index 1d66ee96f1..172829214a 100644 --- a/vendor/cuelang.org/go/encoding/jsonschema/crd.cue +++ b/vendor/cuelang.org/go/encoding/jsonschema/crd.cue @@ -1,3 +1,5 @@ +@experiment(try) + package jsonschema // input holds the parsed YAML document, which may contain multiple @@ -23,8 +25,7 @@ specs: { #CRDSpec }, ] - } - if (input & [...]) == _|_ { + } else { // It's a single document. Include it if it looks like a CRD. if (input & {#crdlike, ...}) != _|_ { [{input, #CRDSpec}] diff --git a/vendor/cuelang.org/go/encoding/jsonschema/crd.go b/vendor/cuelang.org/go/encoding/jsonschema/crd.go index bbcb13dbfe..7788a81e8b 100644 --- a/vendor/cuelang.org/go/encoding/jsonschema/crd.go +++ b/vendor/cuelang.org/go/encoding/jsonschema/crd.go @@ -9,7 +9,7 @@ import ( "cuelang.org/go/cue/token" ) -//go:generate go run cuelang.org/go/cmd/cue exp gengotypes . +//go:generate go tool cue exp gengotypes . //go:embed crd.cue var crdCUE []byte @@ -25,6 +25,10 @@ type ExtractedCRD struct { // version. Versions map[string]*ast.File + // VersionToPath maps each version to the path + // within Source containing the schema for that version. + VersionToPath map[string]cue.Path + // Data holds chosen fields extracted from the source CRD document. Data *CRDSpec @@ -49,13 +53,22 @@ func ExtractCRDs(data cue.Value, cfg *CRDConfig) ([]*ExtractedCRD, error) { crds := make([]*ExtractedCRD, len(crdInfos)) for crdIndex, crd := range crdInfos { versions := make(map[string]*ast.File) + versionToPath := make(map[string]cue.Path) for i, version := range crd.Spec.Versions { + rootPath := cue.MakePath( + cue.Str("spec"), + cue.Str("versions"), + cue.Index(i), + cue.Str("schema"), + cue.Str("openAPIV3Schema"), + ) + versionToPath[version.Name] = rootPath f, err := Extract(crdValues[crdIndex], &Config{ PkgName: version.Name, // There are several kubernetes-related keywords that aren't implemented yet StrictFeatures: false, StrictKeywords: true, - Root: fmt.Sprintf("#/spec/versions/%d/schema/openAPIV3Schema", i), + Root: "#" + mustCUEPathToJSONPointer(rootPath), SingleRoot: true, DefaultVersion: VersionKubernetesCRD, }) @@ -102,9 +115,10 @@ func ExtractCRDs(data cue.Value, cfg *CRDConfig) ([]*ExtractedCRD, error) { versions[version.Name] = f } crds[crdIndex] = &ExtractedCRD{ - Versions: versions, - Data: crdInfos[crdIndex], - Source: crdValues[crdIndex], + Versions: versions, + VersionToPath: versionToPath, + Data: crdInfos[crdIndex], + Source: crdValues[crdIndex], } } return crds, nil diff --git a/vendor/cuelang.org/go/encoding/jsonschema/decode.go b/vendor/cuelang.org/go/encoding/jsonschema/decode.go index 38dc3d1b91..62473dbd0b 100644 --- a/vendor/cuelang.org/go/encoding/jsonschema/decode.go +++ b/vendor/cuelang.org/go/encoding/jsonschema/decode.go @@ -646,7 +646,7 @@ func (s *state) finalize() (e ast.Expr) { if s.allowedTypes == 0 { // Nothing is possible. This isn't a necessarily a problem, as // we might be inside an allOf or oneOf with other valid constraints. - return bottom() + return errorDisallowed() } s.finalizeObject() @@ -855,7 +855,7 @@ func (s0 *state) schemaState(n cue.Value, types cue.Kind, init func(*state)) (ex } // do multiple passes over the constraints to ensure they are done in order. - for pass := 0; pass < numPhases; pass++ { + for pass := range numPhases { s.processMap(n, func(key string, value cue.Value) { if pass == 0 && key == "$ref" { // Before 2019-19, keywords alongside $ref are ignored so keep @@ -1008,7 +1008,7 @@ func (s *state) addDefinition(n cue.Value) *definedSchema { var loc SchemaLoc schemaRoot := s.schemaRoot() loc.ID = ref(*schemaRoot.id) - loc.ID.Fragment = cuePathToJSONPointer(relPath(n, schemaRoot.pos)) + loc.ID.Fragment = mustCUEPathToJSONPointer(relPath(n, schemaRoot.pos)) idStr := loc.ID.String() def, ok := s.defs[idStr] if ok { @@ -1090,34 +1090,6 @@ func (s *state) constValue(n cue.Value) ast.Expr { } } -func (s *state) value(n cue.Value) ast.Expr { - k := n.Kind() - switch k { - case cue.ListKind: - a := []ast.Expr{} - for i, _ := n.List(); i.Next(); { - a = append(a, s.value(i.Value())) - } - return setPos(ast.NewList(a...), n) - - case cue.StructKind: - a := []ast.Decl{} - s.processMap(n, func(key string, n cue.Value) { - a = append(a, &ast.Field{ - Label: ast.NewString(key), - Value: s.value(n), - }) - }) - return setPos(&ast.StructLit{Elts: a}, n) - - default: - if !n.IsConcrete() { - s.errf(n, "invalid non-concrete value") - } - return n.Syntax(cue.Final()).(ast.Expr) - } -} - // processMap processes a yaml node, expanding merges. // // TODO: in some cases we can translate merges into CUE embeddings. @@ -1184,8 +1156,20 @@ func excludeFields(decls []ast.Decl) []ast.Expr { } } -func bottom() ast.Expr { - return &ast.BottomLit{} +func errorDisallowed() ast.Expr { + return ast.NewCall(ast.NewIdent("error"), ast.NewString("disallowed")) +} + +func isErrorCall(e ast.Expr) bool { + call, ok := e.(*ast.CallExpr) + if !ok { + return false + } + target, ok := call.Fun.(*ast.Ident) + if !ok { + return false + } + return target.Name == "error" } func top() ast.Expr { @@ -1196,7 +1180,7 @@ func boolSchema(ok bool) ast.Expr { if ok { return top() } - return bottom() + return errorDisallowed() } func isTop(s ast.Expr) bool { @@ -1204,11 +1188,6 @@ func isTop(s ast.Expr) bool { return ok && i.Name == "_" } -func isBottom(e ast.Expr) bool { - _, ok := e.(*ast.BottomLit) - return ok -} - func addTag(field ast.Label, tag, value string) *ast.Field { return &ast.Field{ Label: field, diff --git a/vendor/cuelang.org/go/encoding/jsonschema/external_teststats.txt b/vendor/cuelang.org/go/encoding/jsonschema/external_teststats.txt index dd27dfbd34..40e8d58142 100644 --- a/vendor/cuelang.org/go/encoding/jsonschema/external_teststats.txt +++ b/vendor/cuelang.org/go/encoding/jsonschema/external_teststats.txt @@ -1,22 +1,25 @@ # Generated by CUE_UPDATE=1 go test. DO NOT EDIT -v2: - schema extract (pass / total): 1072 / 1363 = 78.7% - tests (pass / total): 3910 / 4803 = 81.4% - tests on extracted schemas (pass / total): 3910 / 4041 = 96.8% + +Core tests: v3: schema extract (pass / total): 1072 / 1363 = 78.7% - tests (pass / total): 3908 / 4803 = 81.4% - tests on extracted schemas (pass / total): 3908 / 4041 = 96.7% + tests (pass / total): 3917 / 4803 = 81.6% + tests on extracted schemas (pass / total): 3917 / 4041 = 96.9% -Optional tests +v3-roundtrip: + schema extract (pass / total): 240 / 1363 = 17.6% + tests (pass / total): 845 / 4803 = 17.6% + tests on extracted schemas (pass / total): 845 / 920 = 91.8% -v2: - schema extract (pass / total): 235 / 274 = 85.8% - tests (pass / total): 1663 / 2372 = 70.1% - tests on extracted schemas (pass / total): 1663 / 2262 = 73.5% +Optional tests: v3: - schema extract (pass / total): 235 / 274 = 85.8% - tests (pass / total): 1663 / 2372 = 70.1% - tests on extracted schemas (pass / total): 1663 / 2262 = 73.5% + schema extract (pass / total): 255 / 274 = 93.1% + tests (pass / total): 1733 / 2372 = 73.1% + tests on extracted schemas (pass / total): 1733 / 2332 = 74.3% + +v3-roundtrip: + schema extract (pass / total): 62 / 274 = 22.6% + tests (pass / total): 425 / 2372 = 17.9% + tests on extracted schemas (pass / total): 425 / 622 = 68.3% diff --git a/vendor/cuelang.org/go/encoding/jsonschema/generate.go b/vendor/cuelang.org/go/encoding/jsonschema/generate.go new file mode 100644 index 0000000000..b8f7874467 --- /dev/null +++ b/vendor/cuelang.org/go/encoding/jsonschema/generate.go @@ -0,0 +1,1253 @@ +// Copyright 2025 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + "iter" + "maps" + "regexp" + "slices" + "strings" + "time" + + "cuelang.org/go/cue" + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +// GenerateConfig configures JSON Schema generation from CUE values. +type GenerateConfig struct { + // Version specifies the version of JSON Schema to generate. + // Currently only [VersionDraft2020_12] is supported. + Version Version + + // NameFunc is used to determine how references map to + // JSON Schema definition names. It is passed the + // root value (usually a package) and the path to that value + // within it, as returned by [cue.Value.ReferencePath]. + // + // If this is nil, [DefaultNameFunc] will be used. + NameFunc func(root cue.Value, path cue.Path) string + + // ExplicitOpen, when true, will never close a schema with `additionalProperties: false` + // (but _will_ explicitly open a schema with `additionalProperties: true` + // when there is an explicit `...` or universal pattern in a struct). + // + // By default (when ExplicitOpen is false), all structs that are closed will + // have an `additionalProperties: false` added. + ExplicitOpen bool +} + +type closedMode byte + +const ( + open = closedMode(iota) + closed + closedRecursively +) + +// descend returns the closed mode that applies to m when +// descending one level of struct field. +func (m closedMode) descend() closedMode { + if m == closedRecursively { + return m + } + return open +} + +// Generate generates a JSON Schema for the given CUE value, +// with the returned AST representing the generated JSON result. +func Generate(v cue.Value, cfg *GenerateConfig) (ast.Expr, error) { + if err := v.Validate(); err != nil { + return nil, err + } + if cfg == nil { + cfg = &GenerateConfig{} + } else { + // Prevent mutation of the argument. + cfg = *ref(cfg) + } + if cfg.NameFunc == nil { + cfg.NameFunc = DefaultNameFunc + } + if cfg.Version == VersionUnknown { + cfg.Version = VersionDraft2020_12 + } + if cfg.Version != VersionDraft2020_12 { + return nil, fmt.Errorf("only version %v is supported for generating JSON Schema for now", VersionDraft2020_12) + } + + g := &generator{ + cfg: cfg, + defs: make(map[string]internItem), + unique: newUniqueItems(), + } + mode := open + switch { + case v.IsClosed(): + mode = closed + case v.IsClosedRecursively(): + mode = closedRecursively + } + item := optimize(g.makeItem(v, mode), g.unique) + expr := item.Value().generate(g) + + // Check if the result is a boolean literal + if lit, ok := expr.(*ast.BasicLit); ok && (lit.Kind == token.TRUE || lit.Kind == token.FALSE) { + if lit.Kind == token.FALSE { + // There should already be an error; if not, create one + if g.err == nil { + g.addError(v, fmt.Errorf("schema cannot be satisfied")) + } + return nil, g.err + } + // true means empty struct + expr = &ast.StructLit{} + } + + // The result should be a struct literal + st, ok := expr.(*ast.StructLit) + if !ok { + return nil, fmt.Errorf("expected struct literal from generate, got %T", expr) + } + + // Add schema version metadata and definitions. + fields := []ast.Decl{makeField("$schema", ast.NewString(cfg.Version.String()))} + if len(g.defs) != 0 { + defFields := make([]ast.Decl, 0, len(g.defs)) + for _, name := range slices.Sorted(maps.Keys(g.defs)) { + def := optimize(g.defs[name], g.unique) + defFields = append(defFields, makeField(name, def.Value().generate(g))) + } + fields = append(fields, makeField("$defs", &ast.StructLit{Elts: defFields})) + } + fields = append(fields, st.Elts...) + + if g.err != nil { + return nil, g.err + } + return makeSchemaStructLit(fields...), nil +} + +func optimize(it internItem, u *uniqueItems) internItem { + it = mergeAllOf(it, u) + return enumFromConst(it, u) +} + +// mergeAllOf returns the item with adjacent itemAllOf nodes +// all merged into a single itemAllOf node with all +// the conjuncts in. +func mergeAllOf(it internItem, u *uniqueItems) internItem { + switch it1 := it.Value().(type) { + case *itemAllOf: + it2 := &itemAllOf{ + elems: make([]internItem, 0, len(it1.elems)), + } + for e := range siblings(it1) { + // Remove elements that are entirely redundant. + // TODO we could unify itemType elements here, for example: + // allOf(itemType(number), itemType(integer)) -> itemType(integer) + if !slices.Contains(it2.elems, e) { + it2.elems = append(it2.elems, mergeAllOf(e, u)) + } + } + if len(it2.elems) == 1 { + return it2.elems[0] + } + return u.intern(it2) + default: + return u.apply(it, mergeAllOf) + } +} + +func itemConjuncts(it internItem) iter.Seq[internItem] { + return func(yield func(internItem) bool) { + it1, ok := it.Value().(*itemAllOf) + if !ok { + yield(it) + return + } + yieldSiblings(it1, yield) + } +} + +type elementsItem interface { + elements() []internItem +} + +func siblings[T elementsItem](it T) iter.Seq[internItem] { + return func(yield func(internItem) bool) { + yieldSiblings(it, yield) + } +} + +func yieldSiblings[T elementsItem](it T, yield func(internItem) bool) bool { + for _, e := range it.elements() { + if ae, ok := e.Value().(T); ok { + if !yieldSiblings(ae, yield) { + return false + } + } else { + if !yield(e) { + return false + } + } + } + return true +} + +// enumFromConst returns the item with disjunctive +// constants replaced by itemEnum. +// For example: +// +// anyOf(const("a"), const("b"), const("c")) +// -> +// enum("a", "b", "c") +func enumFromConst(it0 internItem, u *uniqueItems) internItem { + switch it := it0.Value().(type) { + case *itemAnyOf: + if slices.ContainsFunc(it.elems, func(it internItem) bool { + _, ok := it.Value().(*itemConst) + return !ok + }) { + // They're not all consts, so return as-is. + return it0 + } + // All items are const. We can make an enum from this. + // TODO this doesn't cover cases where there are some + // const values and some noncrete values. + it1 := &itemEnum{ + values: make([]ast.Expr, 0, len(it.elems)), + } + for _, e := range it.elems { + it1.values = append(it1.values, e.Value().(*itemConst).value) + } + return u.intern(it1) + default: + return u.apply(it0, enumFromConst) + } +} + +type generator struct { + cfg *GenerateConfig + + // err holds any errors accumulated during translation. + err errors.Error + + // defs holds any definitions made during the course of generation, + // indexed by the entry name within the `$defs` field. + defs map[string]internItem + + // unique ensures that all items are comparable with + // simple equality. + unique *uniqueItems +} + +func (g *generator) addError(pos cue.Value, err error) { + // TODO pos + g.err = errors.Append(g.err, errors.Promote(err, "")) +} + +// isDefinition reports whether the given path represents a definition. +// A definition is indicated by a selector with DefinitionLabel type. +func isDefinition(path cue.Path) bool { + for _, sel := range path.Selectors() { + if sel.LabelType() == cue.DefinitionLabel { + return true + } + } + return false +} + +func (g *generator) addErrorf(pos cue.Value, f string, a ...any) { + g.addError(pos, fmt.Errorf(f, a...)) +} + +// makeItem returns an item representing the JSON Schema +// for v in naive form. +func (g *generator) makeItem(v cue.Value, mode closedMode) internItem { + return g.unique.intern(g.makeItem0(v, mode)) +} + +func (g *generator) makeItem0(v cue.Value, mode closedMode) item { + op, args := v.Expr() + switch op { + case cue.NoOp, cue.SelectorOp: + pkg, path := v.ReferencePath() + if !pkg.Exists() { + break + } + // It's a reference: generate a definition for it. + // TODO Not all references need or should have a definition. + if name := g.cfg.NameFunc(pkg, path); name != "" { + // Lookup path directly rather than following v + // so that we get to see the reference in isolation + // and can follow its value even if it's a reference itself. + v1 := pkg.LookupPath(path) + if !v1.Exists() { + g.addErrorf(v, "reference %v not found", path) + } + v = v1 + ref := &itemRef{ + defName: name, + } + if _, ok := g.defs[name]; ok { + // Already defined. + return ref + } + g.defs[name] = internItem{} // Prevent infinite loops on cycles. + defMode := open + if isDefinition(path) { + defMode = closedRecursively + } + g.defs[name] = g.makeItem(v, defMode) + return ref + } + case cue.AndOp: + if v.Kind() == cue.StructKind { + // It's a conjunction of structs: we want to see all the + // top level fields in one coherent view because JSON + // Schema requires `additionalProperties` to be at the + // same level as `properties`. + return &itemAllOf{ + elems: []internItem{ + g.unique.intern(g.makeStructItem(v, mode)), + g.unique.intern(&itemType{kinds: []string{"object"}}), + }, + } + } + return &itemAllOf{ + elems: mapSlice(args, func(v cue.Value) internItem { return g.makeItem(v, open) }), + } + case cue.OrOp: + return &itemAnyOf{ + elems: mapSlice(args, func(v cue.Value) internItem { return g.makeItem(v, open) }), + } + case cue.RegexMatchOp, + cue.NotRegexMatchOp: + re, err := args[0].String() + if err != nil { + g.addError(args[0], err) + return &itemFalse{} + } + m := g.unique.intern(&itemPattern{ + regexp: re, + }) + if op == cue.NotRegexMatchOp { + m = g.unique.intern(&itemNot{ + elem: m, + }) + } + return &itemAllOf{ + elems: []internItem{ + g.unique.intern(&itemType{ + kinds: []string{"string"}, + }), + m, + }, + } + case cue.EqualOp, + cue.NotEqualOp: + if len(args) > 1 { + // Binary operations can't be expressed in JSON Schema. + break + } + if !args[0].IsConcrete() { + // If it's not concrete, we can't represent it in JSON Schema + // so accept anything. + return &itemTrue{} + } + syntax := args[0].Syntax() + expr, ok := syntax.(ast.Expr) + if !ok { + g.addError(args[0], fmt.Errorf("expected expression from Syntax, got %T", syntax)) + return &itemFalse{} + } + it := g.unique.intern(&itemConst{ + value: expr, + }) + if op == cue.EqualOp { + return it.Value() + } + return &itemNot{ + elem: it, + } + case cue.LessThanOp, + cue.LessThanEqualOp, + cue.GreaterThanOp, + cue.GreaterThanEqualOp: + if len(args) > 1 { + // Binary operations can't be expressed in JSON Schema. + break + } + switch kind := args[0].Kind(); kind { + case cue.FloatKind, cue.IntKind: + n, err := args[0].Float64() + if err != nil { + // Probably non-concrete. + return &itemTrue{} + } + return &itemAllOf{ + elems: []internItem{ + g.unique.intern(&itemBounds{ + constraint: op, + n: n, + }), + g.unique.intern(&itemType{ + kinds: []string{"number"}, + }), + }, + } + case cue.StringKind: + // Can't express bounds on strings in JSON Schema + return &itemType{ + kinds: cueKindToJSONSchemaTypes(kind), + } + default: + g.addError(args[0], fmt.Errorf("bad argument to unary comparison")) + return &itemFalse{} + } + case cue.CallOp: + return g.makeCallItem(v, args, mode) + } + if !v.IsNull() { + // We want to encode null as {type: "null"} not {const: null} + // so then there's a possibility of collapsing it together in + // the same type keyword. + if e, ok := g.constExpr(v, mode); ok { + return &itemConst{ + value: e, + } + } + } + kind := v.IncompleteKind() + if kind == cue.TopKind { + return &itemTrue{} + } + var it item // additional constraints for some known types. + switch kind { + case cue.StructKind: + it = g.makeStructItem(v, mode) + case cue.ListKind: + it = g.makeListItem(v, mode) + } + var elems []internItem + if kinds := cueKindToJSONSchemaTypes(kind); len(kinds) > 0 { + elems = append(elems, g.unique.intern(&itemType{ + kinds: kinds, + })) + } + if it != nil { + elems = append(elems, g.unique.intern(it)) + } + switch len(elems) { + case 0: + return &itemTrue{} + case 1: + return elems[0].Value() + } + return &itemAllOf{ + elems: elems, + } +} + +// constExpr returns the "constant" value of a given +// cue value. There are a few possible ways to represent +// a JSON Schema const in CUE; some examples: +// +// true +// ==true +// close({a!: true}) // Note: this is the representation Extract uses +// [==true] +// [true] +// +// There's some overlap here with the unary == treatment +// in [generator.makeItem] but in that case we know that +// the argument must be constant, and this case we don't. +func (g *generator) constExpr(v cue.Value, mode closedMode) (ast.Expr, bool) { + // Check for unary == operator (e.g., ==1, ==true) + op, args := v.Expr() + if op == cue.EqualOp && len(args) == 1 { + // It's a unary equals: the argument must be concrete and + // there's no need to use [constExpr] any more. + syntax := args[0].Syntax() + expr, ok := syntax.(ast.Expr) + return expr, ok + } + + switch kind := v.Kind(); kind { + case cue.BottomKind: + return nil, false + case cue.StructKind: + if mode == open { + // Open struct is not const. + return nil, false + } + // Closed struct: all fields must be required (no optional fields) + // and we need to recursively check all field values are const + iter, err := v.Fields(cue.Optional(true), cue.Patterns(true)) + if err != nil { + return nil, false + } + var fields []ast.Decl + for iter.Next() { + sel := iter.Selector() + // All fields must be required for the struct to be const + if sel.ConstraintType() != cue.RequiredConstraint { + return nil, false + } + // Recursively check if the field value is const + fieldExpr, ok := g.constExpr(iter.Value(), mode) + if !ok { + return nil, false + } + // Create a regular field (not required marker) + fields = append(fields, makeField(sel.Unquoted(), fieldExpr)) + } + return &ast.StructLit{Elts: fields}, true + case cue.ListKind: + if v.LookupPath(cue.MakePath(cue.AnyIndex)).Exists() { + // Open list is not const. + return nil, false + } + // Closed list: recursively check all elements are const + iter, err := v.List() + if err != nil { + return nil, false + } + var elems []ast.Expr + for iter.Next() { + elemExpr, ok := g.constExpr(iter.Value(), mode) + if !ok { + return nil, false + } + elems = append(elems, elemExpr) + } + return &ast.ListLit{Elts: elems}, true + } + // For other kinds (atoms), if it's concrete, return its syntax + if !v.IsConcrete() { + return nil, false + } + expr, ok := v.Syntax().(ast.Expr) + return expr, ok +} + +func (g *generator) makeCallItem(v cue.Value, args []cue.Value, mode closedMode) item { + if len(args) < 1 { + // Invalid call - not enough arguments + g.addError(v, fmt.Errorf("call operation with no function")) + return &itemFalse{} + } + + // Get the function name from the first argument. + // TODO this might need rethinking if/when functions become more of + // a first class thing within CUE. + funcName := fmt.Sprint(args[0]) + switch funcName { + case "error()", "error": + // Explicit error: don't add an error to g but map it to a `false` schema. + // See https://github.com/cue-lang/cue/issues/4133 for why + // we include "error()" as well as "error" + return &itemFalse{} + case "close": + if mode == open { + mode = closed + } + return g.makeItem(args[1], mode).Value() + case "strings.MinRunes": + if len(args) != 2 { + g.addError(v, fmt.Errorf("strings.MinRunes expects 1 argument, got %d", len(args)-1)) + return &itemFalse{} + } + n, err := args[1].Int64() + if err != nil { + g.addError(args[1], err) + return &itemFalse{} + } + return &itemAllOf{ + elems: []internItem{ + g.unique.intern(&itemType{kinds: []string{"string"}}), + g.unique.intern(&itemLengthBounds{constraint: cue.GreaterThanEqualOp, n: int(n)}), + }, + } + + case "strings.MaxRunes": + if len(args) != 2 { + g.addError(v, fmt.Errorf("strings.MaxRunes expects 1 argument, got %d", len(args)-1)) + return &itemFalse{} + } + n, err := args[1].Int64() + if err != nil { + g.addError(args[1], err) + return &itemFalse{} + } + return &itemAllOf{ + elems: []internItem{ + g.unique.intern(&itemType{kinds: []string{"string"}}), + g.unique.intern(&itemLengthBounds{constraint: cue.LessThanEqualOp, n: int(n)}), + }, + } + + case "math.MultipleOf": + if len(args) != 2 { + g.addError(v, fmt.Errorf("math.MultipleOf expects 1 argument, got %d", len(args)-1)) + return &itemFalse{} + } + n, err := args[1].Float64() + if err != nil { + g.addError(args[1], err) + return &itemFalse{} + } + return &itemAllOf{ + elems: []internItem{ + g.unique.intern(&itemType{kinds: []string{"number"}}), + g.unique.intern(&itemMultipleOf{n: n}), + }, + } + + case "time.Format": + if len(args) != 2 { + g.addError(v, fmt.Errorf("time.Format expects 1 argument, got %d", len(args)-1)) + return &itemFalse{} + } + layout, err := args[1].String() + if err != nil { + // TODO should we just fall back to type=string if we + // can't determine the concrete format? + g.addError(args[1], err) + return &itemFalse{} + } + // Convert CUE time layout to JSON Schema format + var format string + switch layout { + case time.RFC3339, time.RFC3339Nano: + format = "date-time" + case time.DateOnly: + format = "date" + case time.TimeOnly: + format = "time" + default: + // For other layouts, we can't express them in JSON Schema + // but at least we know it's a string. + return &itemType{kinds: []string{"string"}} + } + return &itemAllOf{ + elems: []internItem{ + g.unique.intern(&itemType{kinds: []string{"string"}}), + g.unique.intern(&itemFormat{format: format}), + }, + } + case "list.MinItems", "list.MaxItems": + if len(args) != 2 { + g.addError(v, fmt.Errorf("%s expects 1 argument, got %d", funcName, len(args)-1)) + return &itemFalse{} + } + n, err := args[1].Int64() + if err != nil { + g.addError(args[1], err) + return &itemFalse{} + } + var constraint cue.Op + if funcName == "list.MinItems" { + constraint = cue.GreaterThanEqualOp + } else { + constraint = cue.LessThanEqualOp + } + return &itemAllOf{ + elems: []internItem{ + g.unique.intern(&itemType{kinds: []string{"array"}}), + g.unique.intern(&itemItemsBounds{constraint: constraint, n: int(n)}), + }, + } + + case "list.MatchN": + // list.MatchN is generated by Extract for the contains keyword. + // - list.MatchN(>=N, schema) represents contains with minContains: N + // - list.MatchN(>=N & <=M, schema) represents contains with minContains: N and maxContains: M + if len(args) != 3 { + // Unrecognized form, accept anything + return &itemTrue{} + } + + // Parse the constraint from the first argument + constraintVal := args[1] + var minVal, maxVal *int64 + + op, opArgs := constraintVal.Expr() + switch op { + case cue.NoOp: + // It's a simple expression, could be a literal or something more complex + // Try to parse as an int literal for the minimum + n, err := constraintVal.Int64() + if err == nil { + minVal = ref(n) + } else { + // Not a simple integer, accept anything + return &itemTrue{} + } + case cue.GreaterThanEqualOp: + // >=N constraint for minimum + if len(opArgs) != 1 { + return &itemTrue{} + } + n, err := opArgs[0].Int64() + if err != nil { + return &itemTrue{} + } + minVal = ref(n) + case cue.AndOp: + // Could be >=N & <=M + if len(opArgs) != 2 { + return &itemTrue{} + } + // First operand should be >=N + op1, op1Args := opArgs[0].Expr() + if op1 != cue.GreaterThanEqualOp || len(op1Args) != 1 { + return &itemTrue{} + } + n, err := op1Args[0].Int64() + if err != nil { + return &itemTrue{} + } + minVal = ref(n) + + // Second operand should be <=M + op2, op2Args := opArgs[1].Expr() + if op2 != cue.LessThanEqualOp || len(op2Args) != 1 { + return &itemTrue{} + } + n, err = op2Args[0].Int64() + if err != nil { + return &itemTrue{} + } + maxVal = ref(n) + default: + // Unknown constraint pattern, accept anything + return &itemTrue{} + } + + // Get the schema element from the second argument + // Check if it's bottom first (which represents "contains: false") + // to avoid adding errors to the generator. + var elem internItem + elemVal := args[2] + if err := elemVal.Err(); err != nil { + // Bottom value - represents "contains: false" + elem = g.unique.intern(&itemFalse{}) + } else { + elem = g.makeItem(elemVal, open) + } + + return &itemAllOf{ + elems: []internItem{ + g.unique.intern(&itemType{kinds: []string{"array"}}), + g.unique.intern(&itemContains{elem: elem, min: minVal, max: maxVal}), + }, + } + + case "matchN": + // matchN is generated by Extract for oneOf, anyOf, allOf, and not. + // - matchN(1, [a, b, c, ...]) represents oneOf + // - matchN(0, [x]) represents not + // - matchN(>=1, [a, b, c, ...]) represents anyOf + // - matchN(N, [a, b, c, ...]) where N == len(list) represents allOf + if len(args) != 3 { + // Unrecognized form, accept anything + return &itemTrue{} + } + + constraintVal, listVal := args[1], args[2] + + var items []internItem + for i := 0; ; i++ { + // Unfortunately https://github.com/cue-lang/cue/issues/4132 means + // that we cannot iterate over elements of the list with listVal.List + // when there are error elements (which there could be, as [Extract] + // can generate explicit errors, but we _can_ use [Value.LookupPath] + // to look up explicit indexes. + v := listVal.LookupPath(cue.MakePath(cue.Index(i))) + if !v.Exists() { + break + } + items = append(items, g.makeItem(v, open)) + } + // Extract the list of items from the second argument. + + // Determine which combinator to use based on the constraint + // It can be a literal int (0, 1, N) or a unary expression (>=1). + op, opArgs := constraintVal.Expr() + switch op { + case cue.NoOp: + // It's a simple integer literal + n, err := constraintVal.Int64() + if err != nil { + // Not an integer, accept anything + return &itemTrue{} + } + switch n { + case 0: + // matchN(0, [x]) represents not + if len(items) != 1 { + // Unexpected form, accept anything + return &itemTrue{} + } + return &itemNot{elem: items[0]} + case 1: + if len(items) == 0 { + return &itemFalse{} + } + // matchN(1, [a, b, c, ...]) represents oneOf + return &itemOneOf{elems: items} + default: + // matchN(N, [...]) where N == len(list) represents allOf + if int(n) == len(items) { + return &itemAllOf{elems: items} + } + // Unknown matchN pattern, accept anything + return &itemTrue{} + } + + case cue.GreaterThanEqualOp: + // matchN(>=1, [a, b, c, ...]) represents anyOf + if len(opArgs) != 1 { + return &itemTrue{} + } + n, err := opArgs[0].Int64() + if err != nil || n != 1 { + // Unknown matchN pattern, accept anything + return &itemTrue{} + } + if len(items) == 0 { + return &itemFalse{} + } + return &itemAnyOf{elems: items} + + default: + // Unknown operator, accept anything + return &itemTrue{} + } + + case "matchIf": + // matchIf is generated by Extract for if/then/else constraints. + // - matchIf(ifExpr, thenExpr, elseExpr) + if len(args) != 4 { + // Unrecognized form, accept anything + return &itemTrue{} + } + + return &itemIfThenElse{ + ifElem: g.makeItem(args[1], open), + thenElem: trueAsNil(g.makeItem(args[2], open)), + elseElem: trueAsNil(g.makeItem(args[3], open)), + } + + default: + // For unknown functions, accept anything rather than fail. + // This allows for gradual implementation of more function types + return &itemTrue{} + } +} + +func (g *generator) makeStructItem(v cue.Value, mode closedMode) item { + props := itemProperties{ + properties: make(map[string]internItem), + patternProperties: make(map[string]internItem), + } + required := make(map[string]bool) + + allOf := &itemAllOf{} + addProperty := func(fieldName string, it internItem) { + props.properties[fieldName] = join(props.properties[fieldName], it, g.unique) + } + addPatternProperty := func(pattern string, it internItem) { + props.patternProperties[pattern] = join(props.patternProperties[pattern], it, g.unique) + } + hasUniversalConstraint := false + for v := range valueConjuncts(v) { + pkg, _ := v.ReferencePath() + if pkg.Exists() || v.Kind() != cue.StructKind { + // This conjunct is a reference or some other non-struct literal. + // Let's keep it as such. + allOf.elems = append(allOf.elems, g.makeItem(v, open)) + continue + } + iter, err := v.Fields(cue.Optional(true), cue.Patterns(true)) + if err != nil { + g.addError(v, err) + return &itemFalse{} + } + type pat struct { + pattern *regexp.Regexp + constraints map[internItem]bool + } + // patternConstraints keeps track of the pattern constraints in this + // particular conjunct so we can remove them from the individual fields. + var patternConstraints []pat + outer: + for iter.Next() { + sel := iter.Selector() + switch sel.ConstraintType() { + case cue.PatternConstraint: + re, ok := regexpForValue(sel.Pattern()) + if ok { + if re.String() == "" && acceptsAllString(sel.Pattern()) { + // Record the fact that we've seen a universal constraint + // because then we know that LookupPath(AnyString) + // will return it. + hasUniversalConstraint = true + } + constraint := g.makeItem(iter.Value(), mode.descend()) + addPatternProperty(re.String(), constraint) + p := pat{ + pattern: re, + constraints: make(map[internItem]bool), + } + for c := range itemConjuncts(constraint) { + p.constraints[c] = true + } + patternConstraints = append(patternConstraints, p) + } else { + // We can't express the constraint in JSON Schema, and it + // might cover any number of possible labels, so the + // only thing we can do is treat the whole thing as explicitly + // open. + addPatternProperty("", g.unique.intern(&itemTrue{})) + } + continue outer + case cue.OptionalConstraint: + case cue.RequiredConstraint: + required[sel.Unquoted()] = true + default: + // It's a regular field. If it's concrete, then we can + // consider the field to be optional because it's OK + // to omit it. Otherwise it'll be required. + if err := iter.Value().Validate(cue.Concrete(true)); err != nil { + required[sel.Unquoted()] = true + } + } + propItem := g.makeItem(iter.Value(), mode.descend()) + fieldName := sel.Unquoted() + if len(patternConstraints) == 0 { + addProperty(fieldName, propItem) + continue + } + // There are pattern constraints which will have been unified in with + // the constraints of any matching field. They're redundant with + // respect to patternProperties, so remove them. + // This has the potential to remove explicit constraints on the fields + // themselves, but this will not change behavior, just result in a slightly + // smaller resulting schema. + allof, ok := propItem.Value().(*itemAllOf) + if !ok || len(allof.elems) <= 1 { + // No possibility of removing any conjuncts. + addProperty(fieldName, propItem) + continue + } + var elems []internItem + for _, c := range patternConstraints { + if !c.pattern.MatchString(fieldName) { + continue + } + if elems == nil { + elems = slices.Collect(siblings(allof)) + } + // We've found a pattern constraint that unifies with the field name. + // Its constraint will have been added to this property's constraints + // but are redundant, so remove them. + elems = slices.DeleteFunc(elems, func(it internItem) bool { + return c.constraints[it] + }) + } + if len(elems) == 0 { + propItem = g.unique.intern(&itemTrue{}) + } else { + propItem = g.unique.intern(&itemAllOf{elems: elems}) + } + addProperty(fieldName, propItem) + } + } + + ellipsis := v.LookupPath(cue.MakePath(cue.AnyString)) + if ellipsis.Exists() && !hasUniversalConstraint { + constraint := g.makeItem(ellipsis, mode.descend()) + if isTrue(constraint) { + // `... _` is indistingishable from `[_]: _` so set it as a + // pattern property so we can treat it uniformly. + addPatternProperty("", constraint) + } else { + // Note: currently this will never happen as the CUE evaluator + // does not support `... T` in structs. + props.additionalProperties = constraint + } + } + + if constraint, ok := props.patternProperties[""]; ok && isTrue(constraint) || len(props.properties) == 0 { + // There's a universal pattern constraint and either no + // properties or we accept anything. In both these cases it's + // not possible to tell the difference between + // `additionalProperties` (only applies to properties not + // explicitly mentioned) and `patternProperties` (applies to all + // properties regardless), so use `additionalProperties` in + // preference as it's a little shorter and arguably more + // obvious. + props.additionalProperties = join(props.additionalProperties, constraint, g.unique) + delete(props.patternProperties, "") + } + if mode != open && !g.cfg.ExplicitOpen && props.additionalProperties.Value() == nil { + props.additionalProperties = g.unique.intern(&itemFalse{}) + } + props.required = slices.Sorted(maps.Keys(required)) + hasObjectConstraints := + len(props.properties) == 0 || + len(props.required) == 0 || + len(props.patternProperties) == 0 + if len(allOf.elems) > 0 { + if !hasObjectConstraints { + return allOf + } + allOf.elems = append(allOf.elems, g.unique.intern(&props)) + return allOf + } + if hasObjectConstraints { + return &props + } + return &itemTrue{} +} + +func (g *generator) makeListItem(v cue.Value, mode closedMode) item { + ellipsis := v.LookupPath(cue.MakePath(cue.AnyIndex)) + lenv := v.Len() + var n int64 + if ellipsis.Exists() { + // It's an open list. The length will be in the form int&>=5 + op, args := lenv.Expr() + if op != cue.AndOp || len(args) != 2 { + g.addErrorf(v, "list length has unexpected form; got %v want int&>=N", lenv) + return &itemFalse{} + } + op, args = args[1].Expr() + if op != cue.GreaterThanEqualOp || len(args) != 1 { + g.addErrorf(v, "list length has unexpected form (2); got %v want >=N", lenv) + return &itemFalse{} + } + var err error + n, err = args[0].Int64() + if err != nil { + g.addErrorf(v, "cannot extract list length from %v: %v", v, err) + return &itemFalse{} + } + } else { + var err error + n, err = lenv.Int64() + if err != nil { + g.addErrorf(v, "cannot extract concrete list length from %v: %v", v, err) + } + } + prefix := make([]internItem, n) + for i := range n { + elem := v.LookupPath(cue.MakePath(cue.Index(i))) + if !elem.Exists() { + g.addErrorf(v, "cannot get value at index %d in %v", i, v) + return &itemFalse{} + } + prefix[i] = g.makeItem(elem, mode) + } + a := &itemAllOf{ + elems: []internItem{g.unique.intern(&itemType{kinds: []string{"array"}})}, + } + items := &itemItems{} + if len(prefix) > 0 { + a.elems = append(a.elems, g.unique.intern(&itemLengthBounds{ + constraint: cue.GreaterThanEqualOp, + n: len(prefix), + })) + items.prefix = prefix + } + if ellipsis.Exists() { + items.rest = trueAsNil(g.makeItem(ellipsis, mode)) + } else { + a.elems = append(a.elems, g.unique.intern(&itemLengthBounds{ + constraint: cue.LessThanEqualOp, + n: len(prefix), + })) + } + if items.rest.Value() != nil || len(items.prefix) > 0 { + a.elems = append(a.elems, g.unique.intern(items)) + } + return a +} + +func join(it1, it2 internItem, u *uniqueItems) internItem { + if it1.Value() == nil || isTrue(it1) { + return it2 + } + if it2.Value() == nil || isTrue(it2) { + return it1 + } + return u.intern(&itemAllOf{ + elems: []internItem{it1, it2}, + }) +} + +// cueKindToJSONSchemaTypes converts a CUE kind to JSON Schema type strings +// as associated with the "type" keyword. +func cueKindToJSONSchemaTypes(kind cue.Kind) []string { + types := make([]string, 0, kind.Count()) + if (kind & cue.FloatKind) != 0 { + // JSON Schema doesn't distinguish between float and number, + // so any float allows all numbers (CUE models "number" as float|int). + kind &^= cue.NumberKind + types = append(types, "number") + } + + for k := range kind.Kinds() { + var t string + switch k { + case cue.NullKind: + t = "null" + case cue.BoolKind: + t = "boolean" + case cue.StringKind: + t = "string" + case cue.IntKind: + t = "integer" + case cue.StructKind: + t = "object" + case cue.ListKind: + t = "array" + default: + continue + } + types = append(types, t) + } + return types +} + +// regexpForValue tries to interpret v as a regular expression constraint, +// It returns the regular expression and reports whether it succeeded. +func regexpForValue(v cue.Value) (*regexp.Regexp, bool) { + s, ok := regexpForValue1(v) + if !ok { + return nil, false + } + pat, err := regexp.Compile(s) + return pat, err == nil +} + +func regexpForValue1(v cue.Value) (string, bool) { + op, args := v.Expr() + if op == cue.RegexMatchOp { + if len(args) != 1 { + return "", false + } + s, err := args[0].String() + if err != nil { + return "", false + } + return s, true + } + s, err := v.String() + if err == nil { + // Exact match. + return "^" + regexp.QuoteMeta(s) + "$", true + } + if acceptsAllString(v) { + // It matches all possible string labels: return + // a regular expression that matches all possible + // labels too. + return "", true + } + return "", false +} + +func acceptsAllString(v cue.Value) bool { + // TODO return v.AcceptsAll(cue.StringKind) if/when that + // method is implemented. + sv := v.Context().CompileString("string") + return v.Unify(sv).Subsume(sv, cue.Final()) == nil +} + +// trueAsNil returns the nil item if the item +// is *itemTrue (top). +func trueAsNil(it internItem) internItem { + if isTrue(it) { + return internItem{} + } + return it +} + +func isTrue(it internItem) bool { + _, ok := it.Value().(*itemTrue) + return ok +} + +// isConcreteScalar reports whether v should be considered concrete +// enough to be encoded as a const or enum value. +// +// Structs and lists are excluded for now to avoid O(n^2) +// overhead when checking. +// +// TODO handle struct and list kinds. +func isConcreteScalar(v cue.Value) bool { + if !v.IsConcrete() { + return false + } + return (v.Kind() & (cue.StructKind | cue.ListKind)) == 0 +} + +// DefaultNameFunc holds the default function used by [Generate] +// to generate a JSON Schema definition name from a reference path +// within the value inst, where inst is usually a CUE package value. +func DefaultNameFunc(inst cue.Value, ref cue.Path) string { + var buf strings.Builder + for i, sel := range ref.Selectors() { + if i > 0 { + buf.WriteByte('.') + } + // TODO what should this do when it's not a valid identifier? + buf.WriteString(sel.String()) + } + return buf.String() +} + +// mapSlice returns a slice of f(x) for each x in xs. +func mapSlice[T1, T2 any](xs []T1, f func(T1) T2) []T2 { + xs1 := make([]T2, len(xs)) + for i, x := range xs { + xs1[i] = f(x) + } + return xs1 +} +func valueConjuncts(v cue.Value) iter.Seq[cue.Value] { + return func(yield func(cue.Value) bool) { + yieldValueConjuncts(v, yield) + } +} + +func yieldValueConjuncts(v cue.Value, yield func(cue.Value) bool) bool { + op, args := v.Expr() + if op != cue.AndOp { + return yield(v) + } + for _, v := range args { + if !yieldValueConjuncts(v, yield) { + return false + } + } + return true +} diff --git a/vendor/cuelang.org/go/encoding/jsonschema/generate_items.go b/vendor/cuelang.org/go/encoding/jsonschema/generate_items.go new file mode 100644 index 0000000000..d873d8bda4 --- /dev/null +++ b/vendor/cuelang.org/go/encoding/jsonschema/generate_items.go @@ -0,0 +1,926 @@ +// Copyright 2025 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "cmp" + "fmt" + "hash/maphash" + "maps" + "net/url" + "reflect" + "slices" + + "cuelang.org/go/cue" + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/format" + "cuelang.org/go/cue/token" + "cuelang.org/go/encoding/json" + "cuelang.org/go/internal/anyunique" +) + +// TODO use a defined order when keywords are marshaled +// so that we always put $schema at the start, for example. + +// item represents a JSON Schema constraint or structure that can be +// converted to an AST representation for serialization. +type item interface { + // generate returns the AST representation of this item. + generate(g *generator) ast.Expr + + // apply invokes f on each sub-item, replacing each with the item + // returned, and returns the new item (or the same if nothing has + // changed). Note that it does not call f on the item itself. It can + // use u to create new unique items. + apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item + + // hash writes the hash of the item to h; it should use u.writeHash + // to write the hash value for any items it contains. + hash(h *maphash.Hash, u *uniqueItems) +} + +// itemTrue represents a schema that accepts any value (true schema) +type itemTrue struct{} + +func (i *itemTrue) generate(g *generator) ast.Expr { + return ast.NewBool(true) +} + +func (it *itemTrue) hash(h *maphash.Hash, u *uniqueItems) { +} + +func (i *itemTrue) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + return i +} + +// itemFalse represents a schema that accepts no values (false schema) +type itemFalse struct{} + +func (i *itemFalse) generate(g *generator) ast.Expr { + return ast.NewBool(false) +} + +func (it *itemFalse) hash(h *maphash.Hash, u *uniqueItems) { +} + +func (i *itemFalse) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + return i +} + +// itemAllOf represents an allOf combinator +type itemAllOf struct { + elems []internItem +} + +func (it *itemAllOf) hash(h *maphash.Hash, u *uniqueItems) { + for _, it := range it.elems { + u.writeHash(h, it) + } +} + +func (i *itemAllOf) add(it internItem) { + i.elems = append(i.elems, it) +} + +var _ elementsItem = (*itemAllOf)(nil) + +// elements implements [elementsItem]. +func (i *itemAllOf) elements() []internItem { + return i.elems +} + +func (i *itemAllOf) generate(g *generator) ast.Expr { + // Because a single json schema object is essentially an allOf itself, + // we can merge objects that don't share keywords + // but we also have to be careful not to merge keywords + // that interact with one another (for example `properties` and `patternProperties`). + var unmerged []ast.Expr + var finalFields []ast.Decl + finalFieldNames := make(map[string]bool) + + for _, e := range i.elems { + expr := e.Value().generate(g) + if lit, ok := expr.(*ast.BasicLit); ok { + switch lit.Kind { + case token.TRUE: + // true does nothing, so can be ignored. + continue + case token.FALSE: + // false means everything is false. + return expr + } + } + + // Try to extract struct literal fields for merging + st, ok := expr.(*ast.StructLit) + if !ok { + // A schema should only ever encode to a bool or a struct. + panic(fmt.Errorf("unexpected expression in itemAllOf: %T", expr)) + } + + // Check if we can merge these fields with existing ones + avoidMerging := false + loop: + for _, decl := range st.Elts { + name := fieldLabel(decl) + if name == "" { + panic(fmt.Errorf("unexpected element in struct %#v", decl)) + } + if finalFieldNames[name] { + // Field already exists in merge target. + avoidMerging = true + break + } + for _, ik := range keywordInteractions[name] { + if finalFieldNames[ik] { + // Field interacts with one of the other fields in merge target. + avoidMerging = true + break loop + } + } + } + + if avoidMerging { + unmerged = append(unmerged, expr) + continue + } + // Merge the fields + for _, decl := range st.Elts { + finalFieldNames[fieldLabel(decl)] = true + finalFields = append(finalFields, decl) + } + } + + if len(unmerged) == 0 { + return makeSchemaStructLit(finalFields...) + } + + // Add the merged fields as one element if non-empty + if len(finalFields) > 0 { + unmerged = append(unmerged, makeSchemaStructLit(finalFields...)) + } + + return singleKeyword("allOf", ast.NewList(unmerged...)) +} + +func (i *itemAllOf) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + elems, changed := applyElems(i.elems, f, u) + if !changed { + return i + } + return &itemAllOf{elems: elems} +} + +// itemOneOf represents a oneOf combinator +type itemOneOf struct { + elems []internItem +} + +func (it *itemOneOf) hash(h *maphash.Hash, u *uniqueItems) { + for _, it := range it.elems { + u.writeHash(h, it) + } +} + +func (i *itemOneOf) generate(g *generator) ast.Expr { + return singleKeyword("oneOf", generateList(g, i.elems)) +} + +func (i *itemOneOf) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + elems, changed := applyElems(i.elems, f, u) + if !changed { + return i + } + return &itemOneOf{elems: elems} +} + +var _ elementsItem = (*itemOneOf)(nil) + +// elements implements [elementsItem]. +func (i *itemOneOf) elements() []internItem { + return i.elems +} + +// itemAnyOf represents an anyOf combinator +type itemAnyOf struct { + elems []internItem +} + +func (it *itemAnyOf) hash(h *maphash.Hash, u *uniqueItems) { + for _, it := range it.elems { + u.writeHash(h, it) + } +} + +func (i *itemAnyOf) generate(g *generator) ast.Expr { + return singleKeyword("anyOf", generateList(g, i.elems)) +} + +func (i *itemAnyOf) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + elems, changed := applyElems(i.elems, f, u) + if !changed { + return i + } + return &itemAnyOf{elems: elems} +} + +var _ elementsItem = (*itemAnyOf)(nil) + +// elements implements [elementsItem]. +func (i *itemAnyOf) elements() []internItem { + return i.elems +} + +// itemNot represents a not combinator +type itemNot struct { + elem internItem +} + +func (it *itemNot) hash(h *maphash.Hash, u *uniqueItems) { + u.writeHash(h, it.elem) +} + +func (i *itemNot) generate(g *generator) ast.Expr { + return singleKeyword("not", i.elem.Value().generate(g)) +} + +func (i *itemNot) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + elem := f(i.elem, u) + if elem == i.elem { + return i + } + return &itemNot{elem: elem} +} + +// itemConst represents a constant value constraint. +// The value represents the actual constant in question as an AST expression. +type itemConst struct { + value ast.Expr +} + +func (it *itemConst) hash(h *maphash.Hash, u *uniqueItems) { + writeExprHash(h, it.value) +} + +func (i *itemConst) generate(g *generator) ast.Expr { + return singleKeyword("const", i.value) +} + +func (i *itemConst) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + return i +} + +// itemEnum represents an "enum" constraint. +// Each value represents one possible value of the enum. +type itemEnum struct { + values []ast.Expr +} + +func (it *itemEnum) hash(h *maphash.Hash, u *uniqueItems) { + for _, v := range it.values { + writeExprHash(h, v) + } +} + +func (i *itemEnum) generate(g *generator) ast.Expr { + return singleKeyword("enum", ast.NewList(i.values...)) +} + +func (i *itemEnum) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + return i +} + +type itemRef struct { + defName string +} + +func (it *itemRef) hash(h *maphash.Hash, u *uniqueItems) { + h.WriteString(it.defName) +} + +func (i *itemRef) generate(g *generator) ast.Expr { + // Note: we might need to escape the fragment to produce + // a valid URI. Also, if the definition name itself contains + // a slash, it should be treated as a literal slash not as a JSON Pointer + // separator. + jptr := json.PointerFromTokens(slices.Values([]string{"$defs", i.defName})) + u := &url.URL{ + Fragment: string(jptr), + } + return singleKeyword("$ref", ast.NewString(u.String())) +} + +func (i *itemRef) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + return i +} + +// itemType represents a type constraint +type itemType struct { + kinds []string +} + +func (it *itemType) hash(h *maphash.Hash, u *uniqueItems) { + for _, k := range it.kinds { + h.WriteString(k) + } +} + +func (i *itemType) generate(g *generator) ast.Expr { + if len(i.kinds) == 1 { + return singleKeyword("type", ast.NewString(i.kinds[0])) + } + exprs := make([]ast.Expr, len(i.kinds)) + for i, k := range i.kinds { + exprs[i] = ast.NewString(k) + } + return singleKeyword("type", ast.NewList(exprs...)) +} + +func (i *itemType) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + return i +} + +// itemFormat represents a format constraint +type itemFormat struct { + format string +} + +func (it *itemFormat) hash(h *maphash.Hash, u *uniqueItems) { + h.WriteString(it.format) +} + +func (i *itemFormat) generate(g *generator) ast.Expr { + return singleKeyword("format", ast.NewString(i.format)) +} + +func (i *itemFormat) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + return i +} + +// itemPattern represents a pattern constraint +type itemPattern struct { + regexp string +} + +func (it *itemPattern) hash(h *maphash.Hash, u *uniqueItems) { + h.WriteString(it.regexp) +} + +func (i *itemPattern) generate(g *generator) ast.Expr { + return singleKeyword("pattern", ast.NewString(i.regexp)) +} + +func (i *itemPattern) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + return i +} + +// itemBounds represents numeric bounds constraints +type itemBounds struct { + constraint cue.Op // LessThanEqualOp, LessThanOp, GreaterThanEqualOp, GreaterThanOp + // TODO this encodes awkwardly in CUE (for example 10 becomes 1e0). It + // would be good to fix that. + n float64 +} + +func (it *itemBounds) hash(h *maphash.Hash, u *uniqueItems) { + maphash.WriteComparable(h, it.constraint) + maphash.WriteComparable(h, it.n) +} + +func (i *itemBounds) generate(g *generator) ast.Expr { + var keyword string + switch i.constraint { + case cue.LessThanOp: + keyword = "exclusiveMaximum" + case cue.LessThanEqualOp: + keyword = "maximum" + case cue.GreaterThanOp: + keyword = "exclusiveMinimum" + case cue.GreaterThanEqualOp: + keyword = "minimum" + default: + panic(fmt.Errorf("unexpected bound operand %v", i.constraint)) + } + return singleKeyword(keyword, ast.NewLit(token.FLOAT, fmt.Sprint(i.n))) +} + +func (i *itemBounds) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + return i +} + +// itemMultipleOf represents a multipleOf constraint +type itemMultipleOf struct { + n float64 +} + +func (it *itemMultipleOf) hash(h *maphash.Hash, u *uniqueItems) { + maphash.WriteComparable(h, it.n) +} + +func (i *itemMultipleOf) generate(g *generator) ast.Expr { + return singleKeyword("multipleOf", ast.NewLit(token.FLOAT, fmt.Sprint(i.n))) +} + +func (i *itemMultipleOf) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + return i +} + +// itemLengthBounds represents string length constraints +type itemLengthBounds struct { + constraint cue.Op // LessThanEqualOp, GreaterThanEqualOp + n int +} + +func (it *itemLengthBounds) hash(h *maphash.Hash, u *uniqueItems) { + maphash.WriteComparable(h, it.constraint) + maphash.WriteComparable(h, it.n) +} + +func (i *itemLengthBounds) generate(g *generator) ast.Expr { + var keyword string + switch i.constraint { + case cue.LessThanEqualOp: + keyword = "maxLength" + case cue.GreaterThanEqualOp: + keyword = "minLength" + default: + panic("unexpected constraint in length bounds") + } + + return singleKeyword(keyword, ast.NewLit(token.INT, fmt.Sprint(i.n))) +} + +func (i *itemLengthBounds) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + return i +} + +// itemItemsBounds represents array length constraints +type itemItemsBounds struct { + constraint cue.Op // LessThanEqualOp, GreaterThanEqualOp + n int +} + +func (it *itemItemsBounds) hash(h *maphash.Hash, u *uniqueItems) { + maphash.WriteComparable(h, it.constraint) + maphash.WriteComparable(h, it.n) +} + +func (i *itemItemsBounds) generate(g *generator) ast.Expr { + var keyword string + switch i.constraint { + case cue.LessThanEqualOp: + keyword = "maxItems" + case cue.GreaterThanEqualOp: + keyword = "minItems" + default: + panic("unexpected constraint in items bounds") + } + return singleKeyword(keyword, ast.NewLit(token.INT, fmt.Sprint(i.n))) +} + +func (i *itemItemsBounds) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + return i +} + +// itemPropertyBounds represents object property count constraints +type itemPropertyBounds struct { + constraint cue.Op // LessThanEqualOp, GreaterThanEqualOp + n int +} + +func (it *itemPropertyBounds) hash(h *maphash.Hash, u *uniqueItems) { + maphash.WriteComparable(h, it.constraint) + maphash.WriteComparable(h, it.n) +} + +func (i *itemPropertyBounds) generate(g *generator) ast.Expr { + var keyword string + switch i.constraint { + case cue.LessThanEqualOp: + keyword = "maxProperties" + case cue.GreaterThanEqualOp: + keyword = "minProperties" + default: + panic("unexpected constraint in items bounds") + } + return singleKeyword(keyword, ast.NewLit(token.INT, fmt.Sprint(i.n))) +} + +func (i *itemPropertyBounds) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + return i +} + +// itemItems represents the items and prefixItems constraint for arrays. +type itemItems struct { + // known prefix. + prefix []internItem + // all elements beyond the prefix. + rest internItem +} + +func (it *itemItems) hash(h *maphash.Hash, u *uniqueItems) { + for _, p := range it.prefix { + u.writeHash(h, p) + } + u.writeHash(h, it.rest) +} + +func (i *itemItems) generate(g *generator) ast.Expr { + fields := make([]ast.Decl, 0, 2) + if len(i.prefix) > 0 { + items := make([]ast.Expr, len(i.prefix)) + for i, e := range i.prefix { + items[i] = e.Value().generate(g) + } + fields = append(fields, makeField("prefixItems", &ast.ListLit{ + Elts: items, + })) + } + if i.rest.Value() != nil { + fields = append(fields, makeField("items", i.rest.Value().generate(g))) + } + return makeSchemaStructLit(fields...) +} + +func (i *itemItems) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + rest := i.rest + if rest.Value() != nil { + rest = f(rest, u) + } + prefix, changed := applyElems(i.prefix, f, u) + if !changed && rest == i.rest { + return i + } + return &itemItems{prefix: prefix, rest: rest} +} + +// itemContains represents a contains constraint for arrays +type itemContains struct { + elem internItem + min *int64 + max *int64 +} + +func (it *itemContains) hash(h *maphash.Hash, u *uniqueItems) { + u.writeHash(h, it.elem) + maphash.WriteComparable(h, it.min == nil) + if it.min != nil { + maphash.WriteComparable(h, *it.min) + } + maphash.WriteComparable(h, it.max == nil) + if it.max != nil { + maphash.WriteComparable(h, *it.max) + } +} + +func (i *itemContains) generate(g *generator) ast.Expr { + fields := []ast.Decl{makeField("contains", i.elem.Value().generate(g))} + if i.min != nil { + fields = append(fields, makeField("minContains", ast.NewLit(token.INT, fmt.Sprint(*i.min)))) + } + if i.max != nil { + fields = append(fields, makeField("maxContains", ast.NewLit(token.INT, fmt.Sprint(*i.max)))) + } + return makeSchemaStructLit(fields...) +} + +func (i *itemContains) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + elem := f(i.elem, u) + if elem == i.elem { + return i + } + return &itemContains{elem: elem, min: i.min, max: i.max} +} + +// property represents an object property +type property struct { + name string + item item +} + +// itemProperties represents object properties and associated keywords. +type itemProperties struct { + properties map[string]internItem + required []string + additionalProperties internItem + patternProperties map[string]internItem +} + +func (it *itemProperties) hash(h *maphash.Hash, u *uniqueItems) { + writeMapHash(h, it.properties, u) + for _, name := range slices.Sorted(slices.Values(it.required)) { + h.WriteString(name) + } + u.writeHash(h, it.additionalProperties) + writeMapHash(h, it.patternProperties, u) +} + +func (i *itemProperties) generate(g *generator) ast.Expr { + fields := []ast.Decl{} + if len(i.properties) > 0 { + propFields := make([]ast.Decl, 0, len(i.properties)) + for name, it := range i.properties { + propFields = append(propFields, makeField(name, it.Value().generate(g))) + } + slices.SortFunc(propFields, func(a, b ast.Decl) int { + return cmp.Compare(fieldLabel(a), fieldLabel(b)) + }) + fields = append(fields, makeField("properties", &ast.StructLit{Elts: propFields})) + } + if len(i.required) > 0 { + reqExprs := make([]ast.Expr, len(i.required)) + for j, r := range i.required { + reqExprs[j] = ast.NewString(r) + } + fields = append(fields, makeField("required", ast.NewList(reqExprs...))) + } + if i.additionalProperties.Value() != nil { + fields = append(fields, makeField("additionalProperties", i.additionalProperties.Value().generate(g))) + } + if len(i.patternProperties) > 0 { + pp := &ast.StructLit{} + for _, p := range slices.Sorted(maps.Keys(i.patternProperties)) { + pp.Elts = append(pp.Elts, makeField(p, i.patternProperties[p].Value().generate(g))) + } + fields = append(fields, makeField("patternProperties", pp)) + } + return makeSchemaStructLit(fields...) +} + +func (i *itemProperties) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + properties, changed0 := applyMap(i.properties, f, u) + patternProperties, changed1 := applyMap(i.patternProperties, f, u) + changed := changed0 || changed1 + additionalProperties := i.additionalProperties + if additionalProperties.Value() != nil { + if ap := f(additionalProperties, u); ap != additionalProperties { + additionalProperties = ap + changed = true + } + } + if !changed { + return i + } + return &itemProperties{ + properties: properties, + required: i.required, + additionalProperties: additionalProperties, + patternProperties: patternProperties, + } +} + +// itemIfThenElse represents if/then/else constraints +type itemIfThenElse struct { + ifElem internItem + thenElem internItem + elseElem internItem +} + +func (it *itemIfThenElse) hash(h *maphash.Hash, u *uniqueItems) { + u.writeHash(h, it.ifElem) + u.writeHash(h, it.thenElem) + u.writeHash(h, it.elseElem) +} + +func (i *itemIfThenElse) generate(g *generator) ast.Expr { + fields := []ast.Decl{makeField("if", i.ifElem.Value().generate(g))} + if i.thenElem.Value() != nil { + fields = append(fields, makeField("then", i.thenElem.Value().generate(g))) + } + if i.elseElem.Value() != nil { + fields = append(fields, makeField("else", i.elseElem.Value().generate(g))) + } + return makeSchemaStructLit(fields...) +} + +func (i *itemIfThenElse) apply(f func(internItem, *uniqueItems) internItem, u *uniqueItems) item { + ifElem := f(i.ifElem, u) + var thenElem, elseElem internItem + if i.thenElem.Value() != nil { + thenElem = f(i.thenElem, u) + } + if i.elseElem.Value() != nil { + elseElem = f(i.elseElem, u) + } + + if ifElem == i.ifElem && thenElem == i.thenElem && elseElem == i.elseElem { + return i + } + return &itemIfThenElse{ifElem: ifElem, thenElem: thenElem, elseElem: elseElem} +} + +func generateList(g *generator, items []internItem) ast.Expr { + exprs := make([]ast.Expr, len(items)) + for i, it := range items { + exprs[i] = it.Value().generate(g) + } + return ast.NewList(exprs...) +} + +func singleKeyword(name string, val ast.Expr) ast.Expr { + return makeSchemaStructLit(makeField(name, val)) +} + +// keywordGroups holds sets of JSON Schema keywords that +// interact directly with one another and therefore should not +// be merged with other keywords in the same group. +var keywordGroups = [][]string{ + {"properties", "patternProperties", "additionalProperties"}, + {"contains", "maxContains", "minContains"}, + {"items", "additionalItems", "prefixItems"}, + {"if", "then", "else"}, +} + +// keywordInteractions maps from a keyword to the set of +// keywords it interacts with (including itself). +var keywordInteractions = func() map[string][]string { + m := make(map[string][]string) + for _, ks := range keywordGroups { + for _, k := range ks { + m[k] = ks + } + } + return m +}() + +// fieldLabel extracts the field label name from a declaration. +func fieldLabel(d ast.Decl) string { + if f, ok := d.(*ast.Field); ok { + if name, _, _ := ast.LabelName(f.Label); name != "" { + return name + } + } + return "" +} + +// makeField creates a field with a string label and given value. +func makeField(name string, value ast.Expr) *ast.Field { + return &ast.Field{ + Label: ast.NewStringLabel(name), + Value: value, + } +} + +// makeSchemaStructLit creates a struct literal representing a JSON Schema +// schema, with fields in schema-centric order. +func makeSchemaStructLit(fields ...ast.Decl) *ast.StructLit { + slices.SortFunc(fields, func(a, b ast.Decl) int { + return cmpSchemaLabels(fieldLabel(a), fieldLabel(b)) + }) + return &ast.StructLit{ + Elts: fields, + } +} + +func cmpSchemaLabels(l1, l2 string) int { + return cmp.Or(cmp.Compare(labelPriority(l1), labelPriority(l2)), cmp.Compare(l1, l2)) +} + +// labelPriorityValues holds priority groups for sorting label names. +var labelPriorityValues = func() map[string]int { + // Always put these keywords at the start. + m := map[string]int{ + "$schema": 0, + "$defs": 1, + "type": 2, + } + // It's nice to group related keywords together. + n := len(m) + for i, g := range keywordGroups { + for _, name := range g { + m[name] = n + i + 1 + } + } + // Anything else gets put at the end in lexical order. + return m +}() + +func labelPriority(s string) int { + if pri, ok := labelPriorityValues[s]; ok { + return pri + } + return 1000 +} + +func writeMapHash[K cmp.Ordered](h *maphash.Hash, m map[K]internItem, u *uniqueItems) { + for _, k := range slices.Sorted(maps.Keys(m)) { + maphash.WriteComparable(h, k) + u.writeHash(h, m[k]) + } +} + +// writeExprHash hashes an AST expression using its formatted representation. +// This is a simple approach that ensures structurally equivalent expressions +// hash to the same value. +func writeExprHash(h *maphash.Hash, expr ast.Expr) { + // Use the formatted string representation of the expression for hashing. + // This ensures that expressions that format the same way will hash the same. + data, err := format.Node(expr, format.Simplify()) + if err != nil { + panic(fmt.Errorf("invalid ast Expr: %v", err)) + } + h.Write(data) +} + +type uniqueItems struct { + items *anyunique.Store[item, *uniqueItems] +} + +func newUniqueItems() *uniqueItems { + u := &uniqueItems{} + u.items = anyunique.New[item, *uniqueItems](u) + return u +} + +func (u *uniqueItems) writeHash(h *maphash.Hash, it internItem) { + u.items.WriteHash(h, it) +} + +func (u *uniqueItems) apply(it internItem, f func(internItem, *uniqueItems) internItem) internItem { + it1 := it.Value().apply(f, u) + if it1 == it.Value() { + return it + } + return u.items.Make(it1) +} + +type internItem = anyunique.Handle[item] + +func (u *uniqueItems) intern(it item) internItem { + return u.items.Make(it) +} + +// Hash implements [anyunique.Hasher.Hash]. +func (u *uniqueItems) Hash(h *maphash.Hash, x item) { + maphash.WriteComparable(h, reflect.TypeOf(x)) + x.hash(h, u) +} + +// Equal implements [anyunique.Hasher.Equal] for two +// items x0 and x1. +func (u *uniqueItems) Equal(x0, x1 item) bool { + if x0 == x1 { + return true + } + // TODO although this is typically only called when items are + // identical (because hash collisions are rare), it could made more + // efficient. It would be better to have custom equality methods for + // each type, or at least a reflect-based equality checker that + // avoids unexported fields and compares [anyunique.Handle] values + // without descending into them. + return reflect.DeepEqual(x0, x1) +} + +func applyMap(m map[string]internItem, f func(internItem, *uniqueItems) internItem, u *uniqueItems) (map[string]internItem, bool) { + var m1 map[string]internItem + for key, e := range m { + e1 := f(e, u) + if e1 == e { + continue + } + if m1 == nil { + m1 = make(map[string]internItem) + } + m1[key] = e1 + } + if m1 == nil { + return m, false + } + if len(m1) == len(m) { + return m1, true + } + for key, e := range m { + if _, ok := m1[key]; !ok { + m1[key] = e + } + } + return m1, true +} + +func applyElems(elems []internItem, f func(internItem, *uniqueItems) internItem, u *uniqueItems) ([]internItem, bool) { + changed := false + for i, e := range elems { + e1 := f(e, u) + if e1 == e { + continue + } + if !changed { + elems = slices.Clone(elems) + changed = true + } + elems[i] = e1 + } + return elems, changed +} diff --git a/vendor/cuelang.org/go/encoding/jsonschema/pointer.go b/vendor/cuelang.org/go/encoding/jsonschema/pointer.go deleted file mode 100644 index bff9b690ff..0000000000 --- a/vendor/cuelang.org/go/encoding/jsonschema/pointer.go +++ /dev/null @@ -1,44 +0,0 @@ -package jsonschema - -import ( - "iter" - "strings" -) - -// TODO this file contains functionality that mimics the JSON Pointer functionality -// in https://pkg.go.dev/github.com/go-json-experiment/json/jsontext#Pointer; -// perhaps use it when it moves into the stdlib as json/v2. - -var ( - jsonPtrEsc = strings.NewReplacer("~", "~0", "/", "~1") - jsonPtrUnesc = strings.NewReplacer("~0", "~", "~1", "/") -) - -func jsonPointerFromTokens(tokens iter.Seq[string]) string { - var buf strings.Builder - for tok := range tokens { - buf.WriteByte('/') - buf.WriteString(jsonPtrEsc.Replace(tok)) - } - return buf.String() -} - -func jsonPointerTokens(p string) iter.Seq[string] { - return func(yield func(string) bool) { - needUnesc := strings.IndexByte(p, '~') >= 0 - for len(p) > 0 { - p = strings.TrimPrefix(p, "/") - i := min(uint(strings.IndexByte(p, '/')), uint(len(p))) - var ok bool - if needUnesc { - ok = yield(jsonPtrUnesc.Replace(p[:i])) - } else { - ok = yield(p[:i]) - } - if !ok { - return - } - p = p[i:] - } - } -} diff --git a/vendor/cuelang.org/go/encoding/jsonschema/ref.go b/vendor/cuelang.org/go/encoding/jsonschema/ref.go index ca92b480ea..1d3bc2348e 100644 --- a/vendor/cuelang.org/go/encoding/jsonschema/ref.go +++ b/vendor/cuelang.org/go/encoding/jsonschema/ref.go @@ -27,7 +27,7 @@ import ( "cuelang.org/go/cue/ast" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" - "cuelang.org/go/internal" + "cuelang.org/go/encoding/json" ) func parseRootRef(str string) (cue.Path, error) { @@ -45,7 +45,7 @@ func parseRootRef(str string) (cue.Path, error) { // (technically a trailing slash `/` means there's an empty // final element). u.Fragment = strings.TrimSuffix(u.Fragment, "/") - fragmentParts := slices.Collect(jsonPointerTokens(u.Fragment)) + fragmentParts := slices.Collect(json.Pointer(u.Fragment).Tokens()) var selectors []cue.Selector for _, r := range fragmentParts { if i, err := strconv.ParseUint(r, 10, 64); err == nil && strconv.FormatUint(i, 10) == r { @@ -63,7 +63,7 @@ func parseRootRef(str string) (cue.Path, error) { var errRefNotFound = errors.New("JSON Pointer reference not found") func lookupJSONPointer(v cue.Value, p string) (cue.Value, error) { - for part := range jsonPointerTokens(p) { + for part := range json.Pointer(p).Tokens() { // Note: a JSON Pointer doesn't distinguish between indexing // and struct lookup. We have to use the value itself to decide // which operation is appropriate. @@ -160,7 +160,7 @@ func defaultMapRef( ) (importPath string, path cue.Path, err error) { var fragment string if loc.IsLocal { - fragment = cuePathToJSONPointer(loc.Path) + fragment = mustCUEPathToJSONPointer(loc.Path) } else { // It's external: use mapURLFn. u := ref(*loc.ID) @@ -175,7 +175,7 @@ func defaultMapRef( if len(fragment) > 0 && fragment[0] != '/' { return "", cue.Path{}, fmt.Errorf("anchors (%s) not supported", fragment) } - parts := slices.Collect(jsonPointerTokens(fragment)) + parts := slices.Collect(json.Pointer(fragment).Tokens()) labels, err := mapFn(token.Pos{}, parts) if err != nil { return "", cue.Path{}, err @@ -202,13 +202,11 @@ func defaultMap(p token.Pos, a []string) ([]ast.Label, error) { // TODO this is needlessly inefficient, as we're putting something // back together that was already joined before defaultMap was // invoked. This does avoid dual implementations though. - p := jsonPointerFromTokens(slices.Values(a)) - return []ast.Label{ast.NewIdent("_#defs"), ast.NewString(p)}, nil + p := json.PointerFromTokens(slices.Values(a)) + return []ast.Label{ast.NewIdent("_#defs"), ast.NewString(string(p))}, nil } name := a[1] - if ast.IsValidIdent(name) && - name != rootDefs[1:] && - !internal.IsDefOrHidden(name) { + if name != rootDefs[1:] && !ast.StringLabelNeedsQuoting(name) { return []ast.Label{ast.NewIdent("#" + name)}, nil } return []ast.Label{ast.NewIdent(rootDefs), ast.NewString(name)}, nil diff --git a/vendor/cuelang.org/go/encoding/jsonschema/util.go b/vendor/cuelang.org/go/encoding/jsonschema/util.go index d119ba8d45..6ee3d56b03 100644 --- a/vendor/cuelang.org/go/encoding/jsonschema/util.go +++ b/vendor/cuelang.org/go/encoding/jsonschema/util.go @@ -17,12 +17,12 @@ package jsonschema import ( "fmt" "slices" - "strconv" "strings" "cuelang.org/go/cue" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/token" + "cuelang.org/go/encoding/json" ) // TODO a bunch of stuff in this file is potentially suitable @@ -159,23 +159,12 @@ func labelForSelector(sel cue.Selector) (ast.Label, error) { } } -func cuePathToJSONPointer(p cue.Path) string { - return jsonPointerFromTokens(func(yield func(s string) bool) { - for _, sel := range p.Selectors() { - var token string - switch sel.Type() { - case cue.StringLabel: - token = sel.Unquoted() - case cue.IndexLabel: - token = strconv.Itoa(sel.Index()) - default: - panic(fmt.Errorf("cannot convert selector %v to JSON pointer", sel)) - } - if !yield(token) { - return - } - } - }) +func mustCUEPathToJSONPointer(p cue.Path) string { + ptr, err := json.PointerFromCUEPath(p) + if err != nil { + panic(err) + } + return string(ptr) } // relPath returns the path to v relative to root, diff --git a/vendor/cuelang.org/go/encoding/jsonschema/version.go b/vendor/cuelang.org/go/encoding/jsonschema/version.go index cff5c0ad8f..d9d11beee3 100644 --- a/vendor/cuelang.org/go/encoding/jsonschema/version.go +++ b/vendor/cuelang.org/go/encoding/jsonschema/version.go @@ -19,7 +19,7 @@ import ( "strings" ) -//go:generate go run golang.org/x/tools/cmd/stringer -type=Version -linecomment +//go:generate go tool stringer -type=Version -linecomment type Version int diff --git a/vendor/cuelang.org/go/encoding/jsonschema/version_string.go b/vendor/cuelang.org/go/encoding/jsonschema/version_string.go index b5b43266f6..b9439d1e60 100644 --- a/vendor/cuelang.org/go/encoding/jsonschema/version_string.go +++ b/vendor/cuelang.org/go/encoding/jsonschema/version_string.go @@ -25,8 +25,9 @@ const _Version_name = "unknownhttp://json-schema.org/draft-04/schema#http://json var _Version_index = [...]uint16{0, 7, 46, 85, 124, 168, 212, 219, 230, 244, 258} func (i Version) String() string { - if i < 0 || i >= Version(len(_Version_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Version_index)-1 { return "Version(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Version_name[_Version_index[i]:_Version_index[i+1]] + return _Version_name[_Version_index[idx]:_Version_index[idx+1]] } diff --git a/vendor/cuelang.org/go/encoding/openapi/build.go b/vendor/cuelang.org/go/encoding/openapi/build.go index 89ee9cbafc..04bbe7f54b 100644 --- a/vendor/cuelang.org/go/encoding/openapi/build.go +++ b/vendor/cuelang.org/go/encoding/openapi/build.go @@ -28,8 +28,8 @@ import ( "cuelang.org/go/cue/ast" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" - "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" + "cuelang.org/go/internal/core/subsume" ) type buildContext struct { @@ -79,7 +79,7 @@ func schemas(g *Generator, inst cue.InstanceOrValue) (schemas *ast.StructLit, er } // verify that certain elements are still passed. - for _, f := range strings.Split( + for f := range strings.SplitSeq( "version,title,allOf,anyOf,not,enum,Schema/properties,Schema/items"+ "nullable,type", ",") { if fieldFilter.MatchString(f) { @@ -183,6 +183,7 @@ func (c *buildContext) isInternal(sel cue.Selector) bool { func (b *builder) failf(v cue.Value, format string, args ...interface{}) { panic(&openapiError{ errors.NewMessagef(format, args...), + v.Err(), cue.MakePath(b.ctx.path...), v.Pos(), }) @@ -489,7 +490,7 @@ func (b *builder) disjunction(a []cue.Value, f typeFunc) { for _, v := range a { switch { - case v.Null() == nil: + case v.IsNull(): // TODO: for JSON schema, we need to fall through. nullable = true @@ -551,7 +552,7 @@ func (b *builder) disjunction(a []cue.Value, f typeFunc) { continue } err := v.Subsume(w, cue.Schema()) - if err == nil || errors.Is(err, internal.ErrInexact) { + if err == nil || errors.Is(err, subsume.ErrInexact) { subsumed = append(subsumed, schemas[j]) } } @@ -692,6 +693,8 @@ func (b *builder) object(v cue.Value) { // TODO: extract format from specific type. default: + // TODO: consider // TODO(pkg): wrapping may cause issues in the + // builtin package. Seems fine for now though. b.failf(v, "unsupported op %v for object type (%v)", op, v) return } @@ -873,7 +876,7 @@ func (b *builder) listCap(v cue.Value) { // must be type, so okay. case cue.NotEqualOp: i := b.int(a[0]) - b.setNot("allOff", ast.NewList( + b.setNot("allOf", ast.NewList( b.kv("minItems", i), b.kv("maxItems", i), )) @@ -913,7 +916,7 @@ func (b *builder) number(v cue.Value) { case cue.NotEqualOp: i := b.big(a[0]) - b.setNot("allOff", ast.NewList( + b.setNot("allOf", ast.NewList( b.kv("minimum", i), b.kv("maximum", i), )) diff --git a/vendor/cuelang.org/go/encoding/openapi/cycle.go b/vendor/cuelang.org/go/encoding/openapi/cycle.go index a8c6933d02..7f0fea5892 100644 --- a/vendor/cuelang.org/go/encoding/openapi/cycle.go +++ b/vendor/cuelang.org/go/encoding/openapi/cycle.go @@ -15,9 +15,11 @@ package openapi import ( + "slices" + "cuelang.org/go/cue" "cuelang.org/go/cue/errors" - "cuelang.org/go/cue/token" + "cuelang.org/go/internal/core/adt" "cuelang.org/go/internal/core/dep" "cuelang.org/go/internal/core/eval" internalvalue "cuelang.org/go/internal/value" @@ -40,17 +42,11 @@ func (b *builder) checkCycle(v cue.Value) bool { ctx := eval.NewContext(r, n) err := dep.Visit(nil, ctx, n, func(d dep.Dependency) error { - for _, m := range b.ctx.cycleNodes { - if m == d.Node { - var p token.Pos - if src := d.Node.Source(); src != nil { - p = src.Pos() - } - err := errors.Newf(p, - "cycle in reference at %v: cyclic structures not allowed when reference expansion is requested", v.Path()) - b.ctx.errs = errors.Append(b.ctx.errs, err) - return err - } + if slices.Contains(b.ctx.cycleNodes, d.Node) { + err := errors.Newf(adt.Pos(d.Node), + "cycle in reference at %v: cyclic structures not allowed when reference expansion is requested", v.Path()) + b.ctx.errs = errors.Append(b.ctx.errs, err) + return err } return nil }) diff --git a/vendor/cuelang.org/go/encoding/openapi/decode.go b/vendor/cuelang.org/go/encoding/openapi/decode.go index 279610fa0c..71a37ee1b9 100644 --- a/vendor/cuelang.org/go/encoding/openapi/decode.go +++ b/vendor/cuelang.org/go/encoding/openapi/decode.go @@ -73,7 +73,7 @@ func Extract(data cue.InstanceOrValue, c *Config) (*ast.File, error) { if c.PkgName != "" { p := &ast.Package{Name: ast.NewIdent(c.PkgName)} - p.AddComment(cg) + ast.AddComment(p, cg) add(p) } else if cg != nil { add(cg) @@ -106,7 +106,7 @@ func Extract(data cue.InstanceOrValue, c *Config) (*ast.File, error) { // TODO: do we want to store the OpenAPI version? // if version, _ := v.Lookup("openapi").String(); version != "" { - // add(internal.NewAttr("openapi", "version="+ version)) + // add(&ast.Attribute{Text: fmt.Sprintf("@openapi(version=%s)", version)}) // } if info := v.LookupPath(cue.MakePath(cue.Str("info"))); info.Exists() { @@ -157,9 +157,7 @@ func openAPIMapping(pos token.Pos, a []string) ([]ast.Label, error) { oapiSchemas, strings.Join(a, "/")) } name := a[2] - if ast.IsValidIdent(name) && - name != rootDefs[1:] && - !internal.IsDefOrHidden(name) { + if name != rootDefs[1:] && !ast.StringLabelNeedsQuoting(name) { return []ast.Label{ast.NewIdent("#" + name)}, nil } return []ast.Label{ast.NewIdent(rootDefs), ast.NewString(name)}, nil diff --git a/vendor/cuelang.org/go/encoding/openapi/errors.go b/vendor/cuelang.org/go/encoding/openapi/errors.go index 33d8358fe8..2f48a8037b 100644 --- a/vendor/cuelang.org/go/encoding/openapi/errors.go +++ b/vendor/cuelang.org/go/encoding/openapi/errors.go @@ -18,6 +18,8 @@ import ( "cuelang.org/go/cue" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" + "cuelang.org/go/internal/core/adt" + "cuelang.org/go/internal/pkg" ) var _ errors.Error = &openapiError{} @@ -25,10 +27,20 @@ var _ errors.Error = &openapiError{} // implements cue/Error type openapiError struct { errors.Message + err error path cue.Path pos token.Pos } +// Bottom implements [pkg.Bottomer]. By doing so we ensure that logic that +// checks for an incomplete error can do so, even if wrapped in an openapiError. +func (e *openapiError) Bottom() *adt.Bottom { + if x, ok := e.err.(pkg.Bottomer); ok { + return x.Bottom() + } + return nil +} + func (e *openapiError) Position() token.Pos { return e.pos } diff --git a/vendor/cuelang.org/go/encoding/openapi/openapi.go b/vendor/cuelang.org/go/encoding/openapi/openapi.go index f4efc39791..3e68c4c8e7 100644 --- a/vendor/cuelang.org/go/encoding/openapi/openapi.go +++ b/vendor/cuelang.org/go/encoding/openapi/openapi.go @@ -87,19 +87,14 @@ type Generator = Config // Gen generates the set OpenAPI schema for all top-level types of the // given instance. +// +// Deprecated: use [Generate]. func Gen(inst cue.InstanceOrValue, c *Config) ([]byte, error) { - if c == nil { - c = defaultConfig - } - all, err := schemas(c, inst) - if err != nil { - return nil, err - } - top, err := c.compose(inst, all) + f, err := Generate(inst, c) if err != nil { return nil, err } - topValue := inst.Value().Context().BuildExpr(top) + topValue := inst.Value().Context().BuildFile(f) if err := topValue.Err(); err != nil { return nil, err } @@ -131,6 +126,8 @@ func toCUE(name string, x interface{}) (v ast.Expr, err error) { v, err = cuejson.Extract(name, b) } if err != nil { + // TODO(pkg): wrapping may cause issues in the builtin package. Seems + // fine for now though. return nil, errors.Wrapf(err, token.NoPos, "openapi: could not encode %s", name) } diff --git a/vendor/cuelang.org/go/encoding/protobuf/jsonpb/decoder.go b/vendor/cuelang.org/go/encoding/protobuf/jsonpb/decoder.go index 14c55c3ff7..2ecba49545 100644 --- a/vendor/cuelang.org/go/encoding/protobuf/jsonpb/decoder.go +++ b/vendor/cuelang.org/go/encoding/protobuf/jsonpb/decoder.go @@ -172,7 +172,7 @@ func (r *rewriter) rewrite(schema cue.Value, expr ast.Expr) (x ast.Expr) { return x case *ast.ListLit: - elem, _ := schema.Elem() + elem := schema.LookupPath(cue.MakePath(cue.AnyIndex)) iter, _ := schema.List() for i, e := range x.Elts { v := elem diff --git a/vendor/cuelang.org/go/encoding/protobuf/parse.go b/vendor/cuelang.org/go/encoding/protobuf/parse.go index 7dd961c45c..10850ad8d4 100644 --- a/vendor/cuelang.org/go/encoding/protobuf/parse.go +++ b/vendor/cuelang.org/go/encoding/protobuf/parse.go @@ -277,7 +277,11 @@ func (p *protoConverter) resolveTopScope(pos scanner.Position, name string, opti if k == -1 { i = len(name) } - if m, ok := p.scope[0][name[:i]]; ok { + curName := name[:i] + if local, ok := strings.CutPrefix(curName, p.protoPkg+"."); ok { + curName = local + } + if m, ok := p.scope[0][curName]; ok { if m.pkg != nil { p.imported[m.pkg.qualifiedImportPath()] = true } @@ -298,7 +302,7 @@ func (p *protoConverter) resolveTopScope(pos scanner.Position, name string, opti } func (p *protoConverter) doImport(v *proto.Import) error { - if v.Filename == "cue/cue.proto" { + if p.mapBuiltinPackage(v.Filename) { return nil } @@ -319,10 +323,6 @@ func (p *protoConverter) doImport(v *proto.Import) error { return err } - if !p.mapBuiltinPackage(v.Position, v.Filename, filename == "") { - return nil - } - imp, err := p.state.parse(filename, nil) if err != nil { fail(v.Position, err) @@ -529,7 +529,7 @@ func (p *protoConverter) messageField(s *ast.StructLit, i int, v proto.Visitee) p.addTag(f, o.tags) if !o.required { - f.Optional = token.NoSpace.Pos() + f.Constraint = token.OPTION } case *proto.Enum: @@ -706,7 +706,7 @@ func (p *protoConverter) oneOf(x *proto.Oneof) { s := ast.NewStruct() ast.SetRelPos(s, token.Newline) embed := &ast.EmbedDecl{Expr: s} - embed.AddComment(comment(x.Comment, true)) + ast.AddComment(embed, comment(x.Comment, true)) p.addDecl(embed) @@ -722,7 +722,7 @@ func (p *protoConverter) oneOf(x *proto.Oneof) { case *proto.OneOfField: newStruct() oneOf := p.parseField(s, 0, x.Field) - oneOf.Optional = token.NoPos + oneOf.Constraint = token.ILLEGAL case *proto.Comment: cg := comment(x, false) @@ -761,7 +761,7 @@ func (p *protoConverter) parseField(s *ast.StructLit, i int, x *proto.Field) *as p.addTag(f, o.tags) if !o.required { - f.Optional = token.NoSpace.Pos() + f.Constraint = token.OPTION } return f } @@ -795,7 +795,7 @@ func (p *optionParser) parse(options []*proto.Option) { addComments(constraint, 1, o.Comment, o.InlineComment) p.message.Elts = append(p.message.Elts, constraint) if !p.required { - constraint.Optional = token.NoSpace.Pos() + constraint.Constraint = token.OPTION } case "(google.api.field_behavior)": if o.Constant.Source == "REQUIRED" { diff --git a/vendor/cuelang.org/go/encoding/protobuf/pbinternal/attribute.go b/vendor/cuelang.org/go/encoding/protobuf/pbinternal/attribute.go index 54a1949586..53000453ee 100644 --- a/vendor/cuelang.org/go/encoding/protobuf/pbinternal/attribute.go +++ b/vendor/cuelang.org/go/encoding/protobuf/pbinternal/attribute.go @@ -89,8 +89,7 @@ func FromValue(name string, v cue.Value) (info Info, err error) { switch v.IncompleteKind() { case cue.ListKind: info.CompositeType = List - e, _ := v.Elem() - if e.Exists() { + if e := v.LookupPath(cue.MakePath(cue.AnyIndex)); e.Exists() { v = e } else { for i, _ := v.List(); i.Next(); { @@ -116,7 +115,7 @@ func FromValue(name string, v cue.Value) (info Info, err error) { info.KeyType = Int // Assuming } info.CompositeType = Map - v, _ = v.Elem() + v = v.LookupPath(cue.MakePath(cue.AnyString)) } } diff --git a/vendor/cuelang.org/go/encoding/protobuf/textproto/decoder.go b/vendor/cuelang.org/go/encoding/protobuf/textproto/decoder.go index 01f63efc16..4823c70175 100644 --- a/vendor/cuelang.org/go/encoding/protobuf/textproto/decoder.go +++ b/vendor/cuelang.org/go/encoding/protobuf/textproto/decoder.go @@ -165,8 +165,13 @@ func (d *decoder) parseSchema(schema cue.Value) *mapping { msg = d.parseSchema(i.Value()) } - case pbinternal.List, pbinternal.Map: - e, _ := i.Value().Elem() + case pbinternal.List: + e := i.Value().LookupPath(cue.MakePath(cue.AnyIndex)) + if e.IncompleteKind() == cue.StructKind { + msg = d.parseSchema(e) + } + case pbinternal.Map: + e := i.Value().LookupPath(cue.MakePath(cue.AnyString)) if e.IncompleteKind() == cue.StructKind { msg = d.parseSchema(e) } @@ -320,17 +325,10 @@ func (d *decoder) decodeMsg(m *mapping, n []*pbast.Node) ast.Expr { } if value != nil { - var label ast.Label - if s := f.CUEName; ast.IsValidIdent(s) { - label = ast.NewIdent(s) - } else { - label = ast.NewString(s) - - } // TODO: convert line number information. However, position // information in textpbfmt packages is too wonky to be useful f := &ast.Field{ - Label: label, + Label: ast.NewStringLabel(f.CUEName), Value: value, // Attrs: []*ast.Attribute{{Text: f.attr.}}, } diff --git a/vendor/cuelang.org/go/encoding/protobuf/textproto/encoder.go b/vendor/cuelang.org/go/encoding/protobuf/textproto/encoder.go index 7994089898..3dbcf61ddf 100644 --- a/vendor/cuelang.org/go/encoding/protobuf/textproto/encoder.go +++ b/vendor/cuelang.org/go/encoding/protobuf/textproto/encoder.go @@ -150,7 +150,7 @@ func (e *encoder) encodeMsg(parent *pbast.Node, v cue.Value) { func copyMeta(x *pbast.Node, v cue.Value) { for _, doc := range v.Doc() { s := strings.TrimRight(doc.Text(), "\n") - for _, c := range strings.Split(s, "\n") { + for c := range strings.SplitSeq(s, "\n") { x.PreComments = append(x.PreComments, "# "+c) } } diff --git a/vendor/cuelang.org/go/encoding/protobuf/types.go b/vendor/cuelang.org/go/encoding/protobuf/types.go index b451fbfbee..beb2b57dea 100644 --- a/vendor/cuelang.org/go/encoding/protobuf/types.go +++ b/vendor/cuelang.org/go/encoding/protobuf/types.go @@ -16,7 +16,6 @@ package protobuf import ( "fmt" - "text/scanner" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/parser" @@ -84,11 +83,11 @@ var ( importStruct = ast.NewImport(nil, "struct") ) -func (p *protoConverter) mapBuiltinPackage(pos scanner.Position, file string, required bool) (generate bool) { +func (p *protoConverter) mapBuiltinPackage(file string) (found bool) { // Map some builtin types to their JSON/CUE mappings. switch file { - case "gogoproto/gogo.proto": - + case "cue/cue.proto": + return true case "google/protobuf/struct.proto": p.setBuiltin("google.protobuf.Struct", func() ast.Expr { return ast.NewStruct() @@ -118,7 +117,7 @@ func (p *protoConverter) mapBuiltinPackage(pos scanner.Position, file string, re return predeclared("number") }, nil) - return false + return true case "google/protobuf/empty.proto": f := func() ast.Expr { @@ -129,7 +128,7 @@ func (p *protoConverter) mapBuiltinPackage(pos scanner.Position, file string, re ) } p.setBuiltin("google.protobuf.Empty", f, pkgStruct) - return false + return true case "google/protobuf/duration.proto": f := func() ast.Expr { @@ -137,7 +136,7 @@ func (p *protoConverter) mapBuiltinPackage(pos scanner.Position, file string, re return ast.NewSel(time, "Duration") } p.setBuiltin("google.protobuf.Duration", f, pkgTime) - return false + return true case "google/protobuf/timestamp.proto": f := func() ast.Expr { @@ -145,7 +144,7 @@ func (p *protoConverter) mapBuiltinPackage(pos scanner.Position, file string, re return ast.NewSel(time, "Time") } p.setBuiltin("google.protobuf.Timestamp", f, pkgTime) - return false + return true case "google/protobuf/any.proto": // TODO: technically, the value should be `_` (anything), but that @@ -160,7 +159,7 @@ func (p *protoConverter) mapBuiltinPackage(pos scanner.Position, file string, re // The remaining fields of this object correspond to fields of the proto messsage. If the embedded message is well-known and has a custom JSON representation, that representation is assigned to the 'value' field. "@type": string, }`, nil) - return false + return true case "google/protobuf/wrappers.proto": p.setBuiltinParse("google.protobuf.DoubleValue", `null | float`, nil) @@ -172,17 +171,12 @@ func (p *protoConverter) mapBuiltinPackage(pos scanner.Position, file string, re p.setBuiltinParse("google.protobuf.BoolValue", `null | bool`, nil) p.setBuiltinParse("google.protobuf.StringValue", `null | string`, nil) p.setBuiltinParse("google.protobuf.BytesValue", `null | bytes`, nil) - return false - - // case "google/protobuf/field_mask.proto": - // p.setBuiltin("google.protobuf.FieldMask", "protobuf.FieldMask", nil) + return true - // protobuf.Any + // case "google/protobuf/field_mask.proto": + // p.setBuiltin("google.protobuf.FieldMask", "protobuf.FieldMask", nil) - default: - if required { - failf(pos, "import %q not found", file) - } + // protobuf.Any } - return true + return false } diff --git a/vendor/cuelang.org/go/encoding/protobuf/util.go b/vendor/cuelang.org/go/encoding/protobuf/util.go index 13afa50326..43949ebdeb 100644 --- a/vendor/cuelang.org/go/encoding/protobuf/util.go +++ b/vendor/cuelang.org/go/encoding/protobuf/util.go @@ -40,18 +40,15 @@ type protoError struct { error } -var ( - newSection = token.NewSection.Pos() -) +var newSection = token.NewSection.Pos() -func addComments(f ast.Node, i int, doc, inline *proto.Comment) bool { +func addComments(f ast.Node, i int, doc, inline *proto.Comment) { cg := comment(doc, true) if cg != nil && len(cg.List) > 0 && i > 0 { cg.List[0].Slash = newSection } ast.AddComment(f, cg) ast.AddComment(f, comment(inline, false)) - return doc != nil } func comment(c *proto.Comment, doc bool) *ast.CommentGroup { diff --git a/vendor/cuelang.org/go/encoding/toml/decode.go b/vendor/cuelang.org/go/encoding/toml/decode.go index 8c7da995a7..45d4e0a64c 100644 --- a/vendor/cuelang.org/go/encoding/toml/decode.go +++ b/vendor/cuelang.org/go/encoding/toml/decode.go @@ -21,6 +21,8 @@ package toml import ( "fmt" "io" + "maps" + "slices" "strconv" "strings" "time" @@ -202,6 +204,7 @@ func (d *Decoder) nextRootNode(tnode *toml.Node) error { case toml.Table: // Tables always begin a new line. key, keyElems := d.decodeKey("", tnode.Key()) + // All table keys must be unique, including for the top-level table. if d.seenTableKeys[key] { return d.nodeErrf(tnode.Child(), "duplicate key: %s", key) @@ -323,6 +326,19 @@ func (d *Decoder) findArrayPrefix(rkey rootedKey) *openTableArray { // Prefer an exact match over a relative prefix match. if arr := d.findArray(rkey); arr != nil { + // TODO: the fact that we need to delete from both structures below + // strongly hints towards merging the two structures in some way. + // We already have a TODO about making openTableArrays a more efficient structure. + + // When we find an exact match, we must forget about its subkeys + // because we're starting an entirely new array element. + d.openTableArrays = slices.DeleteFunc(d.openTableArrays, func(arr openTableArray) bool { + return strings.HasPrefix(arr.rkey, rkey+".") + }) + // We also need to forget about seen table keys. + maps.DeleteFunc(d.seenTableKeys, func(seenRkey rootedKey, _ bool) bool { + return strings.HasPrefix(seenRkey, rkey+".") + }) return arr } // The longest relative key match wins. @@ -392,13 +408,11 @@ func (d *Decoder) inlineFields(tkeys []tomlKey, relPos token.RelPos) (top, leaf } // quoteLabelIfNeeded quotes a label name only if it needs quoting. -// -// TODO(mvdan): this exists in multiple packages; move to cue/literal or cue/ast? func quoteLabelIfNeeded(name string) string { - if ast.IsValidIdent(name) { - return name + if ast.StringLabelNeedsQuoting(name) { + return literal.Label.Quote(name) } - return literal.Label.Quote(name) + return name } // label creates an ast.Label that represents a key with exactly the literal string name. @@ -407,17 +421,9 @@ func quoteLabelIfNeeded(name string) string { // cue/format knows how to quote any other identifiers correctly. func (d *Decoder) label(tkey tomlKey, relPos token.RelPos) ast.Label { pos := d.tokenFile.Pos(tkey.shape.Start.Offset, relPos) - if strings.HasPrefix(tkey.name, "_") { - return &ast.BasicLit{ - ValuePos: pos, - Kind: token.STRING, - Value: literal.String.Quote(tkey.name), - } - } - return &ast.Ident{ - NamePos: pos, - Name: tkey.name, - } + label := ast.NewStringLabel(tkey.name) + ast.SetPos(label, pos) + return label } // decodeExpr decodes a single TOML value expression, found on the right side diff --git a/vendor/cuelang.org/go/encoding/xml/koala/decode.go b/vendor/cuelang.org/go/encoding/xml/koala/decode.go index ff54a1ddee..e26e9474fe 100644 --- a/vendor/cuelang.org/go/encoding/xml/koala/decode.go +++ b/vendor/cuelang.org/go/encoding/xml/koala/decode.go @@ -14,7 +14,7 @@ // Package koala converts XML to and from CUE, as described in the proposal for the [koala] encoding. // This encoding is inspired by the [BadgerFish] convention for translating XML to JSON. -// It differs from this to better fit CUE syntax, (as "$" and "@" are special characters), +// It differs from this to better fit CUE syntax, (as "$" and "@" are special characters), // and for improved readability, as described in the koala proposal. // // XML elements are modeled as CUE structs, their attributes are modeled as struct fields @@ -96,6 +96,9 @@ func (dec *Decoder) Decode() (ast.Expr, error) { return nil, io.EOF } dec.decoderRan = true + // TODO(mvdan): note that we read the whole input just for the sake of [token.NewFile]; + // either revamp that API so that it doesn't need the length upfront, + // or lean into it and have internal/encoding pass the input size here. xmlText, err := io.ReadAll(dec.reader) if err != nil { return nil, err @@ -145,11 +148,10 @@ func (dec *Decoder) decoderInnerText(xmlToken xml.CharData, contentOffset int64) return fmt.Errorf("text content outside of an XML element is not supported") } pos := dec.tokenFile.Pos(int(contentOffset), token.NoRelPos) - txtContentPosition := pos - txtLabel := ast.NewString(contentAttribute) - txtLabel.ValuePos = txtContentPosition + txtLabel := ast.NewStringLabel(contentAttribute) + ast.SetPos(txtLabel, pos) val := toBasicLit(textContent) - val.ValuePos = txtContentPosition + ast.SetPos(val, pos) textContentNode := &ast.Field{ Label: txtLabel, Value: val, @@ -188,10 +190,7 @@ func (dec *Decoder) decodeStartElement(xmlToken xml.StartElement, startOffset in // Covers the root node. if dec.currField.field == nil { dec.currXmlElement = &xmlElement{xmlName: xmlToken.Name, attr: xmlToken.Attr} - cueElement, err := dec.cueFieldFromXmlElement(xmlToken, dec.currXmlElement, startOffset) - if err != nil { - return err - } + cueElement := dec.cueFieldFromXmlElement(xmlToken, dec.currXmlElement, startOffset) dec.currField.assignNewCurrField(cueElement) dec.astRoot = ast.NewStruct(dec.currField.field) ast.SetPos(dec.astRoot, dec.tokenFile.Pos(0, token.NoRelPos)) @@ -209,10 +208,7 @@ func (dec *Decoder) decodeStartElement(xmlToken xml.StartElement, startOffset in parentXmlNode.children = append(parentXmlNode.children, dec.currXmlElement) // For the CUE ast: step down the CUE hierarchy. dec.ancestors = append(dec.ancestors, dec.currField) - newElement, err := dec.cueFieldFromXmlElement(xmlToken, dec.currXmlElement, startOffset) - if err != nil { - return err - } + newElement := dec.cueFieldFromXmlElement(xmlToken, dec.currXmlElement, startOffset) // Check if this new XML element has a name that's been seen before at the current level. prefixedXmlElementName := prefixedElementName(xmlToken, dec.currXmlElement) sameNameElements := dec.currField.currFieldChildren[prefixedXmlElementName] @@ -254,11 +250,11 @@ func isWhiteSpace(s string) bool { // cueFieldFromXmlElement creates a new [ast.Field] to model the given xml element information // in [xml.StartElement] and [xmlElement]. The startOffset represents the offset // for the beginning of the start tag of the given XML element. -func (dec *Decoder) cueFieldFromXmlElement(elem xml.StartElement, xmlNode *xmlElement, startOffset int64) (*ast.Field, error) { +func (dec *Decoder) cueFieldFromXmlElement(elem xml.StartElement, xmlNode *xmlElement, startOffset int64) *ast.Field { elementName := prefixedElementName(elem, xmlNode) - resLabel := ast.NewString(elementName) + resLabel := ast.NewStringLabel(elementName) pos := dec.tokenFile.Pos(int(startOffset), token.NoRelPos) - resLabel.ValuePos = pos + ast.SetPos(resLabel, pos) resultValue := &ast.StructLit{} result := &ast.Field{ Label: resLabel, @@ -268,10 +264,10 @@ func (dec *Decoder) cueFieldFromXmlElement(elem xml.StartElement, xmlNode *xmlEl // Extract attributes as children. for _, a := range elem.Attr { attrName := prefixedAttrName(a, elem, xmlNode) - label := ast.NewString(attributeSymbol + attrName) + label := ast.NewStringLabel(attributeSymbol + attrName) value := toBasicLit(a.Value) - label.ValuePos = pos - value.ValuePos = pos + ast.SetPos(label, pos) + ast.SetPos(value, pos) attrExpr := &ast.Field{ Label: label, Value: value, @@ -279,7 +275,7 @@ func (dec *Decoder) cueFieldFromXmlElement(elem xml.StartElement, xmlNode *xmlEl } resultValue.Elts = append(resultValue.Elts, attrExpr) } - return result, nil + return result } // prefixedElementName returns the full name of an element, diff --git a/vendor/cuelang.org/go/encoding/yaml/yaml.go b/vendor/cuelang.org/go/encoding/yaml/yaml.go index f076be3afc..c0a5da3190 100644 --- a/vendor/cuelang.org/go/encoding/yaml/yaml.go +++ b/vendor/cuelang.org/go/encoding/yaml/yaml.go @@ -70,7 +70,9 @@ func Extract(filename string, src interface{}) (*ast.File, error) { // Encode returns the YAML encoding of v. func Encode(v cue.Value) ([]byte, error) { - n := v.Syntax(cue.Final()) + // Note that we use [cue.Concrete] in this package, which expands all references. + // If we want YAML to encode with anchors in the future, we can change this. + n := v.Syntax(cue.Concrete(true)) b, err := cueyaml.Encode(n) return b, err } @@ -84,7 +86,7 @@ func EncodeStream(iter cue.Iterator) ([]byte, error) { if i > 0 { buf.WriteString("---\n") } - n := iter.Value().Syntax(cue.Final()) + n := iter.Value().Syntax(cue.Concrete(true)) b, err := cueyaml.Encode(n) if err != nil { return nil, err @@ -94,6 +96,40 @@ func EncodeStream(iter cue.Iterator) ([]byte, error) { return buf.Bytes(), nil } +// NewDecoder configures a YAML decoder. The path is used to associate position +// information with each node. +// +// Use the Decoder's Extract method to extract YAML values one at a time. +// For YAML streams with multiple documents separated by `---`, each call to +// Extract will return the next document. +func NewDecoder(path string, src io.Reader) *Decoder { + b, err := source.ReadAll(path, src) + return &Decoder{ + path: path, + dec: cueyaml.NewDecoder(path, b), + readAllErr: err, + } +} + +// A Decoder converts YAML values to CUE. +type Decoder struct { + path string + dec cueyaml.Decoder + readAllErr error +} + +// Extract converts the current YAML value to a CUE ast. It returns io.EOF +// if the input has been exhausted. +// +// For YAML streams with multiple documents separated by `---`, each call to +// Extract will return the next document as a separate CUE expression. +func (d *Decoder) Extract() (ast.Expr, error) { + if d.readAllErr != nil { + return nil, d.readAllErr + } + return d.dec.Decode() +} + // Validate validates the YAML and confirms it matches the constraints // specified by v. For YAML streams, all values must match v. func Validate(b []byte, v cue.Value) error { diff --git a/vendor/cuelang.org/go/internal/anyunique/unique.go b/vendor/cuelang.org/go/internal/anyunique/unique.go new file mode 100644 index 0000000000..13906a46bc --- /dev/null +++ b/vendor/cuelang.org/go/internal/anyunique/unique.go @@ -0,0 +1,122 @@ +// Package anyunique provides canonicalization of values under a +// caller-defined equivalence relation. +// +// A [Store] holds a set of unique values of a specific type T. Calling +// [Store.Make] with two values that are equivalent according to the provided +// [Hasher] returns [Handle] values that are identical. [Handle] is a lightweight +// wrapper around the canonical value; use [U.Get] to obtain the +// underlying T. +// +// The zero [Handle] represents the zero value of T. Make returns the zero +// [Handle] when called with the zero value of T: it will never try to hash +// the zero value. +// +// [Store.WriteHash] writes a short representation of a canonicalized +// value to a [maphash.Hash]. It is useful when hashing structures that +// themselves contain canonicalized values, avoiding re-hashing the full +// value graph. +// +// NOTE this package assumes that T values are treated as immutable. +// That is, after calling [Store.Make] a value must not change. +package anyunique + +import "hash/maphash" + +// A Hasher defines a hash function and an equivalence relation over +// values of type T. +// +// Hash must write a hash of its argument to the provided *maphash.Hash, +// and Equal must report whether two values are equivalent. Hash and +// Equal must be consistent: if Equal(x, y) is true then Hash must +// produce the same output for x and y. +// +// Note: this is an exact copy of the proposed new Hasher interface +// for the Go API. +// See https://go-review.googlesource.com/c/go/+/657296/11/src/hash/maphash/hasher.go +// +// TODO alias this to maphash.Hasher when the above CL lands. +type Hasher[T any] interface { + Hash(*maphash.Hash, T) + Equal(x, y T) bool +} + +// New returns a new store holding a set of unique values +// of type T, using h to determine whether values are the +// same. +// +// The equivalence relation and hash are supplied by the given [Hasher]. +func New[T comparable, H Hasher[T]](h H) *Store[T, H] { + s := &Store[T, H]{ + h: h, + seed: maphash.MakeSeed(), + hashes: make(map[T]uint64), + entries: make(map[uint64][]T), + } + return s +} + +// Store holds a set of unique values of type T. +type Store[T comparable, H Hasher[T]] struct { + h H + seed maphash.Seed + entries map[uint64][]T + hashes map[T]uint64 +} + +// Handle represents a unique value of type T. If two values of type Handle[T] +// originating from the same [Store] compare equal, they are guaranteed +// to be equal according to the equality criteria that the store was +// created with. +type Handle[T comparable] struct { + x T +} + +// Value returns the actual value held in u. +func (u Handle[T]) Value() T { + return u.x +} + +// WriteHash writes a short representation of x to h. +// This allows callers to avoid hashing an tree of values +// when hashing a value that itself contains other Handle[T] items. +func (s *Store[T, H]) WriteHash(h *maphash.Hash, x Handle[T]) { + z := isZero(x) + maphash.WriteComparable(h, z) + if !z { + // TODO we _could_ write two independent hashes here + // if we were concerned about collisions. + maphash.WriteComparable(h, s.hashes[x.Value()]) + } +} + +// Make returns a unique value u such that u.Get() is equal to x +// according to the equality criteria defined by the store. +// +// It is assumed that values will not change after passing to Make: the +// caller must take care to preserve immutability. +func (s *Store[T, H]) Make(x T) Handle[T] { + if isZero(x) { + return Handle[T]{} + } + + if _, ok := s.hashes[x]; ok { + return Handle[T]{x} + } + var hasher maphash.Hash + hasher.SetSeed(s.seed) + s.h.Hash(&hasher, x) + h := hasher.Sum64() + entries := s.entries[h] + for _, e := range entries { + if s.h.Equal(x, e) { + return Handle[T]{e} + } + } + s.entries[h] = append(entries, x) + s.hashes[x] = h + return Handle[T]{x} +} + +func isZero[T comparable](x T) bool { + return x == *new(T) +} diff --git a/vendor/cuelang.org/go/internal/astinternal/debug.go b/vendor/cuelang.org/go/internal/astinternal/debug.go index 24e4398237..c09cb751be 100644 --- a/vendor/cuelang.org/go/internal/astinternal/debug.go +++ b/vendor/cuelang.org/go/internal/astinternal/debug.go @@ -23,7 +23,6 @@ import ( "cuelang.org/go/cue/ast" "cuelang.org/go/cue/token" - "cuelang.org/go/internal" ) // AppendDebug writes a multi-line Go-like representation of a syntax tree node, @@ -61,6 +60,10 @@ type DebugConfig struct { // values; setting this also implies [DebugConfig.IncludeNodeRefs] // and references will be printed as pointers. IncludePointers bool + + // AllPositions causes all [ast.Node] implementions to emit their start + // and end positions. + AllPositions bool } type debugPrinter struct { @@ -87,6 +90,7 @@ func (d *debugPrinter) value0(v reflect.Value, impliedType reflect.Type) { // Skip over interfaces and pointers, stopping early if nil. concreteType := v.Type() refName := "" + var startPos, endPos token.Pos ptrVal := uintptr(0) for { k := v.Kind() @@ -100,11 +104,12 @@ func (d *debugPrinter) value0(v reflect.Value, impliedType reflect.Type) { return } if k == reflect.Pointer { - if n, ok := v.Interface().(ast.Node); ok { + if n, ok := reflect.TypeAssert[ast.Node](v); ok { ptrVal = v.Pointer() if id, ok := d.nodeRefs[n]; ok { refName = refIDToName(id) } + startPos, endPos = n.Pos(), n.End() } } v = v.Elem() @@ -130,6 +135,10 @@ func (d *debugPrinter) value0(v reflect.Value, impliedType reflect.Type) { d.printf(")") return case token.Token: + if d.cfg.OmitEmpty && v == token.ILLEGAL { + // ILLEGAL is the zero value, meaning "no token". + return + } d.printf("%s(%q)", t, v) return } @@ -157,6 +166,9 @@ func (d *debugPrinter) value0(v reflect.Value, impliedType reflect.Type) { } else if refName != "" { d.printf("@%s", refName) } + if d.cfg.AllPositions && startPos.IsValid() { + d.printf("[%v]", positionRange(startPos, endPos)) + } d.printf("{") d.level++ var anyElems bool @@ -177,6 +189,26 @@ func (d *debugPrinter) value0(v reflect.Value, impliedType reflect.Type) { } } +// positionRange returns a string representing the range +// of positions between p0 and p1, as returned +// by [ast.Node.Pos] and [ast.Node.End] respectively. +func positionRange(p0, p1 token.Pos) string { + if !p1.IsValid() { + return p0.String() + } + pos0, pos1 := p0.Position(), p1.Position() + if pos1.Filename != pos0.Filename { + return fmt.Sprintf("%v,%v", pos0, pos1) + } + var buf strings.Builder + if len(pos0.Filename) != 0 { + buf.WriteString(pos0.Filename) + buf.WriteString(":") + } + fmt.Fprintf(&buf, "%d:%d,%d:%d", pos0.Line, pos0.Column, pos1.Line, pos1.Column) + return buf.String() +} + func (d *debugPrinter) sliceElems(v reflect.Value, elemType reflect.Type) (anyElems bool) { for i := 0; i < v.Len(); i++ { ev := v.Index(i) @@ -196,7 +228,7 @@ func (d *debugPrinter) sliceElems(v reflect.Value, elemType reflect.Type) (anyEl func (d *debugPrinter) structFields(v reflect.Value) (anyElems bool) { t := v.Type() - for i := 0; i < v.NumField(); i++ { + for i := range v.NumField() { f := t.Field(i) if !gotoken.IsExported(f.Name) { continue @@ -231,8 +263,7 @@ func (d *debugPrinter) structFields(v reflect.Value) (anyElems bool) { d.truncate(elemStart) } } - val := v.Addr().Interface() - if val, ok := val.(ast.Node); ok { + if val, ok := reflect.TypeAssert[ast.Node](v.Addr()); ok { // Comments attached to a node aren't a regular field, but are still useful. // The majority of nodes won't have comments, so skip them when empty. if comments := ast.Comments(val); len(comments) > 0 { @@ -295,7 +326,7 @@ func (d *debugPrinter) addNodeRefs(v reflect.Value) { } case reflect.Struct: t := v.Type() - for i := 0; i < v.NumField(); i++ { + for i := range v.NumField() { f := t.Field(i) if !gotoken.IsExported(f.Name) { continue @@ -345,6 +376,15 @@ func DebugStr(x interface{}) (out string) { out += DebugStr(v.Expr) return out + case *ast.TryClause: + out := "try" + if v.Ident != nil { + // Assignment form: try x = expr + out += " " + DebugStr(v.Ident) + " = " + DebugStr(v.Expr) + } + // Struct form: body is in Comprehension.Value + return out + case *ast.Alias: out := DebugStr(v.Ident) out += "=" @@ -369,7 +409,7 @@ func DebugStr(x interface{}) (out string) { case *ast.ImportDecl: out := "import " - if v.Lparen != token.NoPos { + if v.Lparen.IsValid() { out += "( " out += DebugStr(v.Specs) out += " )" @@ -381,6 +421,16 @@ func DebugStr(x interface{}) (out string) { case *ast.Comprehension: out := DebugStr(v.Clauses) out += DebugStr(v.Value) + if v.Fallback != nil { + // Use "fallback" for 'for' comprehensions, "else" for 'if'/'try' + kw := "else" + if len(v.Clauses) > 1 { + kw = "fallback" + } else if _, ok := v.Clauses[0].(*ast.ForClause); ok { + kw = "fallback" + } + out += " " + kw + " " + DebugStr(v.Fallback.Body) + } return out case *ast.StructLit: @@ -419,24 +469,34 @@ func DebugStr(x interface{}) (out string) { return out case *ast.Field: - out := DebugStr(v.Label) - if t, ok := internal.ConstraintToken(v); ok { - out += t.String() + var out strings.Builder + out.WriteString(DebugStr(v.Label)) + if v.Alias != nil { + out.WriteString("~") + if v.Alias.Label != nil { + // Dual form + out.WriteString("(") + out.WriteString(DebugStr(v.Alias.Label)) + out.WriteString(",") + out.WriteString(DebugStr(v.Alias.Field)) + out.WriteString(")") + } else { + // Simple form + out.WriteString(DebugStr(v.Alias.Field)) + } + } + if t := v.Constraint; t != token.ILLEGAL { + out.WriteString(t.String()) } if v.Value != nil { - switch v.Token { - case token.ILLEGAL, token.COLON: - out += ": " - default: - out += fmt.Sprintf(" %s ", v.Token) - } - out += DebugStr(v.Value) + out.WriteString(": ") + out.WriteString(DebugStr(v.Value)) for _, a := range v.Attrs { - out += " " - out += DebugStr(a) + out.WriteString(" ") + out.WriteString(DebugStr(a)) } } - return out + return out.String() case *ast.Attribute: return v.Text @@ -473,6 +533,9 @@ func DebugStr(x interface{}) (out string) { out += DebugStr(v.Y) return out + case *ast.PostfixExpr: + return DebugStr(v.X) + v.Op.String() + case []*ast.CommentGroup: var a []string for _, c := range v { @@ -538,12 +601,12 @@ func DebugStr(x interface{}) (out string) { if len(v) == 0 { return "" } - out := "" + var out strings.Builder for _, c := range v { - out += DebugStr(c) - out += " " + out.WriteString(DebugStr(c)) + out.WriteString(" ") } - return out + return out.String() case []ast.Expr: if len(v) == 0 { diff --git a/vendor/cuelang.org/go/internal/attrs.go b/vendor/cuelang.org/go/internal/attrs.go index b3d1918eaf..5e082d4f65 100644 --- a/vendor/cuelang.org/go/internal/attrs.go +++ b/vendor/cuelang.org/go/internal/attrs.go @@ -273,7 +273,7 @@ func scanAttributeTokens(s *scanner.Scanner, startPos token.Pos, close uint64) ( func tokenMaskStr(m uint64) string { var buf strings.Builder - for t := token.Token(0); t < 64; t++ { + for t := range token.Token(64) { if (m & (1 << t)) != 0 { if buf.Len() > 0 { buf.WriteByte('|') diff --git a/vendor/cuelang.org/go/internal/cli/cli.go b/vendor/cuelang.org/go/internal/cli/cli.go index d4702e4cc4..aee8a901ef 100644 --- a/vendor/cuelang.org/go/internal/cli/cli.go +++ b/vendor/cuelang.org/go/internal/cli/cli.go @@ -20,7 +20,7 @@ import ( "cuelang.org/go/cue" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/errors" - "cuelang.org/go/cue/parser" + "cuelang.org/go/cue/literal" "cuelang.org/go/cue/token" ) @@ -28,11 +28,18 @@ func ParseValue(pos token.Pos, name, str string, k cue.Kind) (x ast.Expr, errs e var expr ast.Expr if k&cue.NumberKind != 0 { - var err error - expr, err = parser.ParseExpr(name, str) - if err != nil { + var info literal.NumInfo + if err := literal.ParseNum(str, &info); err != nil { + // Note that the wrapped err already mentions str. errs = errors.Wrapf(err, pos, "invalid number for injection tag %q", name) + } else if info.IsInt() { + expr = ast.NewLit(token.INT, str) + } else if k&cue.FloatKind == 0 { + errs = errors.Newf(pos, + "invalid int %q for injection tag %q", str, name) + } else { + expr = ast.NewLit(token.FLOAT, str) } } @@ -41,7 +48,7 @@ func ParseValue(pos token.Pos, name, str string, k cue.Kind) (x ast.Expr, errs e b, ok := boolValues[str] if !ok { errs = errors.Append(errs, errors.Newf(pos, - "invalid boolean value %q for injection tag %q", str, name)) + "invalid boolean %q for injection tag %q", str, name)) } else if expr != nil || k&cue.StringKind != 0 { // Convert into an expression bl := ast.NewBool(b) diff --git a/vendor/cuelang.org/go/internal/core/adt/adt.go b/vendor/cuelang.org/go/internal/core/adt/adt.go index 5969a3bc8e..db59945662 100644 --- a/vendor/cuelang.org/go/internal/core/adt/adt.go +++ b/vendor/cuelang.org/go/internal/core/adt/adt.go @@ -39,7 +39,7 @@ func Resolve(ctx *OpContext, c Conjunct) *Vertex { v = x case Resolver: - r, err := ctx.resolveState(c, x, combinedFlags{ + r, err := ctx.resolveState(c, x, Flags{ status: finalized, condition: allKnown, mode: attemptOnly, @@ -112,14 +112,14 @@ type Evaluator interface { // evaluate evaluates the underlying expression. If the expression // is incomplete, it may record the error in ctx and return nil. - evaluate(ctx *OpContext, state combinedFlags) Value + evaluate(ctx *OpContext, state Flags) Value } // A Resolver represents a reference somewhere else within a tree that resolves // a value. type Resolver interface { Node - resolve(ctx *OpContext, state combinedFlags) *Vertex + resolve(ctx *OpContext, state Flags) *Vertex } type YieldFunc func(env *Environment) @@ -138,6 +138,9 @@ type Validator interface { // Pos returns the file position of n, or token.NoPos if it is unknown. func Pos(n Node) token.Pos { + if n == nil { + return token.NoPos + } src := n.Source() if src == nil { return token.NoPos @@ -232,6 +235,7 @@ func (*SliceExpr) expr() {} func (*Interpolation) expr() {} func (*UnaryExpr) expr() {} func (*BinaryExpr) expr() {} +func (*OpenExpr) expr() {} func (*CallExpr) expr() {} // Decl and Expr (so allow attaching original source in Conjunct) @@ -329,6 +333,8 @@ func (*UnaryExpr) declNode() {} func (*UnaryExpr) elemNode() {} func (*BinaryExpr) declNode() {} func (*BinaryExpr) elemNode() {} +func (*OpenExpr) declNode() {} +func (*OpenExpr) elemNode() {} func (*CallExpr) declNode() {} func (*CallExpr) elemNode() {} func (*Builtin) declNode() {} @@ -345,6 +351,7 @@ func (*Comprehension) elemNode() {} func (*Vertex) node() {} func (*Conjunction) node() {} +func (*OpenExpr) node() {} func (*ConjunctGroup) node() {} func (*Disjunction) node() {} func (*BoundValue) node() {} @@ -385,3 +392,4 @@ func (*Comprehension) node() {} func (*ForClause) node() {} func (*IfClause) node() {} func (*LetClause) node() {} +func (*TryClause) node() {} diff --git a/vendor/cuelang.org/go/internal/core/adt/arctype_string.go b/vendor/cuelang.org/go/internal/core/adt/arctype_string.go new file mode 100644 index 0000000000..50786dab80 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/arctype_string.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=ArcType -trimprefix=Arc"; DO NOT EDIT. + +package adt + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ArcMember-0] + _ = x[ArcRequired-1] + _ = x[ArcOptional-2] + _ = x[ArcPending-3] + _ = x[ArcNotPresent-4] +} + +const _ArcType_name = "MemberRequiredOptionalPendingNotPresent" + +var _ArcType_index = [...]uint8{0, 6, 14, 22, 29, 39} + +func (i ArcType) String() string { + idx := int(i) - 0 + if i < 0 || idx >= len(_ArcType_index)-1 { + return "ArcType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ArcType_name[_ArcType_index[idx]:_ArcType_index[idx+1]] +} diff --git a/vendor/cuelang.org/go/internal/core/adt/binop.go b/vendor/cuelang.org/go/internal/core/adt/binop.go index 7ed760ce63..0a055e0648 100644 --- a/vendor/cuelang.org/go/internal/core/adt/binop.go +++ b/vendor/cuelang.org/go/internal/core/adt/binop.go @@ -17,8 +17,6 @@ package adt import ( "bytes" "strings" - - "cuelang.org/go/cue/token" ) var checkConcrete = &ValidateConfig{ @@ -31,6 +29,21 @@ var checkConcrete = &ValidateConfig{ // kinds. var errOnDiffType = &UnaryExpr{} +// BinOpBool is like [BinOp] but it avoids allocating a [Bool] for boolean operators +// such as [EqualOp]. +func BinOpBool(c *OpContext, node Node, op Op, left, right Value) bool { + // The caller doesn't need a full [Value], so to save allocations, + // use a nil source to ensure that [OpContext.newBool] won't allocate. + // This swap seems fine given that OpContext is not meant for concurrent use. + src := c.src + c.src = nil + defer func() { c.src = src }() + + v := BinOp(c, node, op, left, right) + b, ok := v.(*Bool) + return ok && b.B +} + // BinOp handles all operations except AndOp and OrOp. This includes processing // unary comparators such as '<4' and '=~"foo"'. // @@ -40,25 +53,20 @@ var errOnDiffType = &UnaryExpr{} // // BinOp returns nil if not both left and right are concrete. func BinOp(c *OpContext, node Node, op Op, left, right Value) Value { - var p token.Pos - if node != nil { - if src := node.Source(); src != nil { - p = src.Pos() - } - } + p := Pos(node) leftKind := left.Kind() rightKind := right.Kind() if err := validateValue(c, left, checkConcrete); err != nil { const msg = "invalid left-hand value to '%s' (type %s): %v" // TODO: Wrap bottom instead of using NewErrf? - b := c.NewErrf(msg, op, left.Kind(), err.Err) + b := c.NewErrf(msg, op, leftKind, err.Err) b.Code = err.Code return b } if err := validateValue(c, right, checkConcrete); err != nil { const msg = "invalid right-hand value to '%s' (type %s): %v" - b := c.NewErrf(msg, op, left.Kind(), err.Err) + b := c.NewErrf(msg, op, leftKind, err.Err) b.Code = err.Code return b } @@ -77,14 +85,14 @@ func BinOp(c *OpContext, node Node, op Op, left, right Value) Value { if p.Experiment().StructCmp || // compatibility with !structCmp: leftKind == NullKind || rightKind == NullKind { - return c.newBool(false) + return c.NewBool(false) } case leftKind == NullKind: - return c.newBool(true) + return c.NewBool(true) case leftKind == BoolKind: - return c.newBool(c.BoolValue(left) == c.BoolValue(right)) + return c.NewBool(c.BoolValue(left) == c.BoolValue(right)) case leftKind == StringKind: // normalize? @@ -94,11 +102,11 @@ func BinOp(c *OpContext, node Node, op Op, left, right Value) Value { return cmpTonode(c, op, bytes.Compare(c.bytesValue(left, op), c.bytesValue(right, op))) case leftKind == ListKind: - return c.newBool(Equal(c, left, right, RegularOnly|IgnoreOptional)) + return c.NewBool(Equal(c, left, right, RegularOnly|IgnoreOptional)) case !p.Experiment().StructCmp: case leftKind == StructKind: - return c.newBool(Equal(c, left, right, RegularOnly|IgnoreOptional)) + return c.NewBool(Equal(c, left, right, RegularOnly|IgnoreOptional)) } case NotEqualOp: @@ -111,14 +119,14 @@ func BinOp(c *OpContext, node Node, op Op, left, right Value) Value { // compatibility with !structCmp: leftKind == NullKind || rightKind == NullKind { - return c.newBool(true) + return c.NewBool(true) } case leftKind == NullKind: - return c.newBool(false) + return c.NewBool(false) case leftKind == BoolKind: - return c.newBool(c.boolValue(left, op) != c.boolValue(right, op)) + return c.NewBool(c.boolValue(left, op) != c.boolValue(right, op)) case leftKind == StringKind: // normalize? @@ -128,11 +136,11 @@ func BinOp(c *OpContext, node Node, op Op, left, right Value) Value { return cmpTonode(c, op, bytes.Compare(c.bytesValue(left, op), c.bytesValue(right, op))) case leftKind == ListKind: - return c.newBool(!Equal(c, left, right, RegularOnly|IgnoreOptional)) + return c.NewBool(!Equal(c, left, right, RegularOnly|IgnoreOptional)) case !p.Experiment().StructCmp: case leftKind == StructKind: - return c.newBool(!Equal(c, left, right, RegularOnly|IgnoreOptional)) + return c.NewBool(!Equal(c, left, right, RegularOnly|IgnoreOptional)) } case LessThanOp, LessEqualOp, GreaterEqualOp, GreaterThanOp: @@ -150,10 +158,10 @@ func BinOp(c *OpContext, node Node, op Op, left, right Value) Value { } case BoolAndOp: - return c.newBool(c.boolValue(left, op) && c.boolValue(right, op)) + return c.NewBool(c.boolValue(left, op) && c.boolValue(right, op)) case BoolOrOp: - return c.newBool(c.boolValue(left, op) || c.boolValue(right, op)) + return c.NewBool(c.boolValue(left, op) || c.boolValue(right, op)) case MatchOp: // if y.re == nil { @@ -164,10 +172,10 @@ func BinOp(c *OpContext, node Node, op Op, left, right Value) Value { // } // return boolTonode(Src, b) // } - return c.newBool(c.regexp(right).MatchString(c.stringValue(left, op))) + return c.NewBool(c.regexp(right).MatchString(c.stringValue(left, op))) case NotMatchOp: - return c.newBool(!c.regexp(right).MatchString(c.stringValue(left, op))) + return c.NewBool(!c.regexp(right).MatchString(c.stringValue(left, op))) case AddOp: switch { @@ -266,5 +274,5 @@ func cmpTonode(c *OpContext, op Op, r int) Value { case GreaterThanOp: result = r == 1 } - return c.newBool(result) + return c.NewBool(result) } diff --git a/vendor/cuelang.org/go/internal/core/adt/call.go b/vendor/cuelang.org/go/internal/core/adt/call.go index 670e931a36..f28862eb07 100644 --- a/vendor/cuelang.org/go/internal/core/adt/call.go +++ b/vendor/cuelang.org/go/internal/core/adt/call.go @@ -15,7 +15,6 @@ package adt import ( - "cuelang.org/go/cue/ast" "cuelang.org/go/cue/token" ) @@ -29,34 +28,27 @@ type CallContext struct { isValidator bool } -func (c *CallContext) OpContext() *OpContext { +func (c CallContext) OpContext() *OpContext { return c.ctx } -func (c *CallContext) Pos() token.Pos { - var src ast.Node - switch { - case c.call != nil: - src = c.call.Source() - case c.builtin != nil: - src = c.builtin.Source() +func (c CallContext) Pos() token.Pos { + if c.call != nil { + return Pos(c.call) } - if src != nil { - return src.Pos() - } - return token.NoPos + return Pos(c.builtin) } -func (c *CallContext) Value(i int) Value { +func (c CallContext) Value(i int) Value { return c.args[i] } // NumParams returns the total number of parameters to this function. -func (c *CallContext) NumParams() int { +func (c CallContext) NumParams() int { return len(c.args) } -func (c *CallContext) AddPositions(err *ValueError) { +func (c CallContext) AddPositions(err *ValueError) { for _, v := range c.args { err.AddPosition(v) } @@ -65,7 +57,7 @@ func (c *CallContext) AddPositions(err *ValueError) { // Args return the pre-evaluated arguments. This function is only used for // transitioning and will be removed at some point. Use [CallContext.Value] // instead. -func (c *CallContext) Args() []Value { +func (c CallContext) Args() []Value { return c.args } @@ -75,7 +67,7 @@ func (c *CallContext) Args() []Value { // // This method of getting an argument should be used when the argument is used // as a schema and may contain cycles. -func (c *CallContext) Arg(i int) Value { +func (c CallContext) Arg(i int) Value { // If the call context represents a validator call, the argument will be // offset by 1. if c.isValidator { @@ -92,7 +84,7 @@ func (c *CallContext) Arg(i int) Value { } // Expr returns the nth argument expression without evaluating it. -func (c *CallContext) Expr(i int) Expr { +func (c CallContext) Expr(i int) Expr { // If the call context represents a validator call, the argument will be // offset by 1. if c.isValidator { @@ -107,6 +99,6 @@ func (c *CallContext) Expr(i int) Expr { return x } -func (c *CallContext) Errf(format string, args ...interface{}) *Bottom { +func (c CallContext) Errf(format string, args ...interface{}) *Bottom { return c.ctx.NewErrf(format, args...) } diff --git a/vendor/cuelang.org/go/internal/core/adt/closed.go b/vendor/cuelang.org/go/internal/core/adt/closed.go index 4c2df89e18..e39e12cd2a 100644 --- a/vendor/cuelang.org/go/internal/core/adt/closed.go +++ b/vendor/cuelang.org/go/internal/core/adt/closed.go @@ -14,6 +14,13 @@ package adt +import ( + "iter" + + "cuelang.org/go/cue/token" + "cuelang.org/go/internal/core/layer" +) + // This file implements the closedness algorithm. // Outline of algorithm @@ -70,51 +77,68 @@ package adt // TODO(errors): return a dedicated ConflictError that can track original // positions on demand. -// IsInOneOf reports whether any of the Structs associated with v is contained -// within any of the span types in the given mask. -func (v *Vertex) IsInOneOf(mask SpanType) bool { - for _, s := range v.Structs { - if s.CloseInfo.IsInOneOf(mask) { - return true - } - } - return false -} - // IsRecursivelyClosed returns true if this value is either a definition or unified // with a definition. func (v *Vertex) IsRecursivelyClosed() bool { - return v.ClosedRecursive || v.IsInOneOf(DefinitionSpan) + return v.ClosedRecursive } -type closeNodeType uint8 +// ShouldRecursivelyClose reports whether this vertex should be closed +// recursively using __reclose. This is to simulate compatibility mode +// with the semantics from before explicitOpen was introduced. +// +// This is the case if any of the embeddings marked with ... were recursively +// closed before opening them up with .... +func (v *Vertex) ShouldRecursivelyClose() bool { + if v.state == nil { + return false + } + return v.state.embedsRecursivelyClosed +} -const ( - // a closeRef node is created when there is a non-definition reference. - closeRef closeNodeType = iota +// posInfo is a compact representation of position information for error reporting. +// It stores only the essential fields needed for tracking positions and priority, +// saving significant memory compared to the full CloseInfo struct. +// This is used for scalarID and kindID fields in nodeContext. +type posInfo struct { + // opID is the generation of this conjunct, used for sanity check. + opID uint64 - // closeDef indicates this node was introduced as a result of referencing - // a definition. - closeDef + // defID is a unique ID to track anything that gets inserted from this + // Conjunct. + defID defID - // closeEmbed indicates this node was added as a result of an embedding. - closeEmbed -) + // Priority is used for default resolution. Higher values win. 0 means no + // priority is assigned. Default handling may be more restrictive than + // specified in the spec when a priority is assigned. + Priority layer.Priority +} + +// AncestorPositions returns an iterator over each parent of p, +// starting with the most immediate parent. This is used +// to add positions to errors that are associated with position info. +func (p posInfo) AncestorPositions(ctx *OpContext) iter.Seq[token.Pos] { + return func(yield func(token.Pos) bool) { + if p.opID != ctx.opID { + return + } + for id := p.defID; id != 0; id = ctx.containments[id].id { + pos := ctx.positionTable[ctx.containments[id].posIndex] + if !yield(pos) { + return + } + } + } +} -// TODO: merge with closeInfo: this is a leftover of the refactoring. type CloseInfo struct { - *closeInfo // old implementation (TODO: remove) - // defID is a unique ID to track anything that gets inserted from this - // Conjunct. - opID uint64 // generation of this conjunct, used for sanity check. - defID defID + // Embedded posInfo provides opID, defID, and priority fields. + // These are the core fields needed for position tracking and priority comparison. + posInfo + enclosingEmbed defID // Tracks an embedding within a struct. outerID defID // Tracks the {} that should be closed after unifying. - // IsClosed is true if this conjunct represents a single level of closing - // as indicated by the closed builtin. - IsClosed bool - // FromEmbed indicates whether this conjunct was inserted because of an // embedding. This flag is sticky: it will be set for conjuncts created // from fields defined by this conjunct. @@ -130,115 +154,17 @@ type CloseInfo struct { // Like FromDef, but used by APIs to force FromDef to be true. TopDef bool - // FieldTypes indicates which kinds of fields (optional, dynamic, patterns, - // etc.) are contained in this conjunct. - FieldTypes OptionalType + // This conjunct was opened by the ... postfix operator. + Opened bool CycleInfo } -func (c CloseInfo) Location() Node { - if c.closeInfo == nil { - return nil - } - return c.closeInfo.location -} - -func (c CloseInfo) span() SpanType { - if c.closeInfo == nil { - return 0 - } - return c.closeInfo.span -} - -func (c CloseInfo) RootSpanType() SpanType { - if c.closeInfo == nil { - return 0 - } - return c.root -} - -// IsInOneOf reports whether c is contained within any of the span types in the -// given mask. -func (c CloseInfo) IsInOneOf(t SpanType) bool { - return c.span()&t != 0 -} - -// TODO(perf): remove: error positions should always be computed on demand -// in dedicated error types. -func (c *CloseInfo) AddPositions(ctx *OpContext) { - for s := c.closeInfo; s != nil; s = s.parent { - if loc := s.location; loc != nil { - ctx.AddPosition(loc) - } +func (c CloseInfo) Location(ctx *OpContext) token.Pos { + if c.opID != ctx.opID || c.defID == 0 { + return token.NoPos } -} - -// TODO(perf): use on StructInfo. Then if parent and expression are the same -// it is possible to use cached value. -func (c CloseInfo) SpawnEmbed(x Node) CloseInfo { - c.closeInfo = &closeInfo{ - parent: c.closeInfo, - location: x, - mode: closeEmbed, - root: EmbeddingSpan, - span: c.span() | EmbeddingSpan, - } - return c -} - -// SpawnGroup is used for structs that contain embeddings that may end up -// closing the struct. This is to force that `b` is not allowed in -// -// a: {#foo} & {b: int} -func (c CloseInfo) SpawnGroup(x Expr) CloseInfo { - c.closeInfo = &closeInfo{ - parent: c.closeInfo, - location: x, - span: c.span(), - } - return c -} - -// SpawnSpan is used to track that a value is introduced by a comprehension -// or constraint. Definition and embedding spans are introduced with SpawnRef -// and SpawnEmbed, respectively. -func (c CloseInfo) SpawnSpan(x Node, t SpanType) CloseInfo { - c.closeInfo = &closeInfo{ - parent: c.closeInfo, - location: x, - root: t, - span: c.span() | t, - } - return c -} - -func (c CloseInfo) SpawnRef(arc *Vertex, isDef bool, x Expr) CloseInfo { - span := c.span() - found := false - if !isDef { - xnode := Node(x) // Optimization so we're comparing identical interface types. - // TODO: make this work for non-definitions too. - for p := c.closeInfo; p != nil; p = p.parent { - if p.span == span && p.location == xnode { - found = true - break - } - } - } - if !found { - c.closeInfo = &closeInfo{ - parent: c.closeInfo, - location: x, - span: span, - } - } - if isDef { - c.mode = closeDef - c.closeInfo.root = DefinitionSpan - c.closeInfo.span |= DefinitionSpan - } - return c + return ctx.positionTable[ctx.containments[c.defID].posIndex] } // IsDef reports whether an expressions is a reference that references a @@ -265,71 +191,6 @@ func IsDef(x Expr) (isDef bool, depth int) { return isDef, depth } -// A SpanType is used to indicate whether a CUE value is within the scope of -// a certain CUE language construct, the span type. -type SpanType uint8 - -const ( - // EmbeddingSpan means that this value was embedded at some point and should - // not be included as a possible root node in the todo field of OpContext. - EmbeddingSpan SpanType = 1 << iota - ConstraintSpan - ComprehensionSpan - DefinitionSpan -) - -type closeInfo struct { - // location records the expression that led to this node's introduction. - location Node - - // The parent node in the tree. - parent *closeInfo - - // TODO(performance): if references are chained, we could have a separate - // parent pointer to skip the chain. - - // mode indicates whether this node was added as part of an embedding, - // definition or non-definition reference. - mode closeNodeType - - // noCheck means this struct is irrelevant for closedness checking. This can - // happen when: - // - it is a sibling of a new definition. - noCheck bool // don't process for inclusion info - - root SpanType - span SpanType -} - -// closeStats holds the administrative fields for a closeInfo value. Each -// closeInfo is associated with a single closeStats value per unification -// operator. This association is done through an OpContext. This allows the -// same value to be used in multiple concurrent unification operations. -// NOTE: there are other parts of the algorithm that are not thread-safe yet. -type closeStats struct { - // the other fields of this closeStats value are only valid if generation - // is equal to the generation in OpContext. This allows for lazy - // initialization of closeStats. - generation uint64 - - // These counts keep track of how many required child nodes need to be - // completed before this node is accepted. - requiredCount int - acceptedCount int - - // accepted is set if this node is accepted. - accepted bool - - required bool - - inTodoList bool // true if added to todo list. - next *closeStats -} - -func (c *closeInfo) isClosed() bool { - return c.mode == closeDef -} - // isClosed reports whether v is closed at this level (so not recursively). func isClosed(v *Vertex) bool { // We could have used IsRecursivelyClosed here, but (effectively) @@ -338,213 +199,11 @@ func isClosed(v *Vertex) bool { if v.ClosedRecursive || v.ClosedNonRecursive { return true } - // TODO(evalv3): this can be removed once we delete the evalv2 code. - for _, s := range v.Structs { - if s.IsClosed || s.IsInOneOf(DefinitionSpan) { - return true - } - } return false } // Accept determines whether f is allowed in n. It uses the OpContext for // caching administrative fields. func Accept(ctx *OpContext, n *Vertex, f Feature) (found, required bool) { - if ctx.isDevVersion() { - return n.accept(ctx, f), true - } - ctx.opID++ - ctx.todo = nil - - var optionalTypes OptionalType - - // TODO(perf): more aggressively determine whether a struct is open or - // closed: open structs do not have to be checked, yet they can particularly - // be the ones with performance issues, for instanced as a result of - // embedded for comprehensions. - for _, s := range n.Structs { - if !s.useForAccept() { - continue - } - markCounts(ctx, s.CloseInfo) - optionalTypes |= s.types - } - - var str Value - if f.Index() == MaxIndex { - f &= fTypeMask - } else if optionalTypes&(HasComplexPattern|HasDynamic) != 0 && f.IsString() { - str = f.ToValue(ctx) - } - - for _, s := range n.Structs { - if !s.useForAccept() { - continue - } - if verifyArc(ctx, s, f, str) { - // Beware: don't add to below expression: this relies on the - // side effects of markUp. - ok := markUp(ctx, s.closeInfo, 0) - found = found || ok - } - } - - // Reject if any of the roots is not accepted. - for x := ctx.todo; x != nil; x = x.next { - if !x.accepted { - return false, true - } - } - - return found, ctx.todo != nil -} - -func markCounts(ctx *OpContext, info CloseInfo) { - if info.IsClosed { - markRequired(ctx, info.closeInfo) - return - } - for s := info.closeInfo; s != nil; s = s.parent { - if s.isClosed() { - markRequired(ctx, s) - return - } - } -} - -func markRequired(ctx *OpContext, info *closeInfo) { - count := 0 - for ; ; info = info.parent { - var s closeInfo - if info != nil { - s = *info - } - - x := getScratch(ctx, info) - - x.requiredCount += count - - if x.required { - return - } - - if s.span&EmbeddingSpan == 0 && !x.inTodoList { - x.next = ctx.todo - ctx.todo = x - x.inTodoList = true - } - - x.required = true - - if info == nil { - return - } - - count = 0 - if s.mode != closeEmbed { - count = 1 - } - } -} - -func markUp(ctx *OpContext, info *closeInfo, count int) bool { - for ; ; info = info.parent { - var s closeInfo - if info != nil { - s = *info - } - - x := getScratch(ctx, info) - - x.acceptedCount += count - - if x.acceptedCount < x.requiredCount { - return false - } - - x.accepted = true - - if info == nil { - return true - } - - count = 0 - if x.required && s.mode != closeEmbed { - count = 1 - } - } -} - -// getScratch: explain generation. -func getScratch(ctx *OpContext, s *closeInfo) *closeStats { - m := ctx.closed - if m == nil { - m = map[*closeInfo]*closeStats{} - ctx.closed = m - } - - x := m[s] - if x == nil { - x = &closeStats{} - m[s] = x - } - - if x.generation != ctx.opID { - *x = closeStats{generation: ctx.opID} - } - - return x -} - -func verifyArc(ctx *OpContext, s *StructInfo, f Feature, label Value) bool { - isRegular := f.IsString() - - o := s.StructLit - env := s.Env - - if len(o.Additional) > 0 || o.IsOpen { - return true - } - - for _, g := range o.Fields { - if f == g.Label { - return true - } - } - - if !isRegular { - return false - } - - // Do not record errors during this validation. - errs := ctx.errs - defer func() { ctx.errs = errs }() - - if len(o.Dynamic) > 0 && f.IsString() && label != nil { - for _, b := range o.Dynamic { - v := env.evalCached(ctx, b.Key) - v, _ = ctx.getDefault(v) - s, ok := Unwrap(v).(*String) - if !ok { - continue - } - if label.(*String).Str == s.Str { - return true - } - } - } - - for _, b := range o.Bulk { - if matchBulk(ctx, env, b, f, label) { - return true - } - } - - // TODO(perf): delay adding this position: create a special error type that - // computes all necessary positions on demand. - if ctx != nil { - ctx.AddPosition(s.StructLit) - } - - return false + return n.accept(ctx, f), true } diff --git a/vendor/cuelang.org/go/internal/core/adt/closed2.go b/vendor/cuelang.org/go/internal/core/adt/closed2.go deleted file mode 100644 index ce25d47009..0000000000 --- a/vendor/cuelang.org/go/internal/core/adt/closed2.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2020 CUE Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adt - -// CloseDef defines how individual fieldSets (corresponding to conjuncts) -// combine to determine whether a field is contained in a closed set. -// -// A CloseDef combines multiple conjuncts and embeddings. All CloseDefs are -// stored in slice. References to other CloseDefs are indices within this slice. -// Together they define the top of the tree of the expression tree of how -// conjuncts combine together (a canopy). - -// isComplexStruct reports whether the Closed information should be copied as a -// subtree into the parent node using InsertSubtree. If not, the conjuncts can -// just be inserted at the current ID. -func isComplexStruct(ctx *OpContext, v *Vertex) bool { - return v.IsClosedStruct() -} - -// TODO: cleanup code and error messages. Reduce duplication in some related -// code. -func verifyArc2(ctx *OpContext, f Feature, v *Vertex, isClosed bool) (found bool, err *Bottom) { - unreachableForDev(ctx) - - // Don't check computed, temporary vertices. - if v.Label == InvalidLabel { - return true, nil - } - - // TODO(perf): collect positions in error. - defer ctx.ReleasePositions(ctx.MarkPositions()) - - // Note: it is okay to use parent here as this only needs to be computed - // for the original location. - if ok, required := Accept(ctx, v.Parent, f); ok || (!required && !isClosed) { - return true, nil - } - - if !f.IsString() { - // if f.IsHidden() { Also change Accept in composite.go - return false, nil - } - - if v != nil { - for _, c := range v.Conjuncts { - if pos := c.Field(); pos != nil { - ctx.AddPosition(pos) - } - } - } - - for _, s := range v.Parent.Structs { - s.AddPositions(ctx) - } - - return false, ctx.NewErrf("field not allowed") -} diff --git a/vendor/cuelang.org/go/internal/core/adt/composite.go b/vendor/cuelang.org/go/internal/core/adt/composite.go index 75d6dbdac2..ff6c765951 100644 --- a/vendor/cuelang.org/go/internal/core/adt/composite.go +++ b/vendor/cuelang.org/go/internal/core/adt/composite.go @@ -16,12 +16,13 @@ package adt import ( "fmt" + "iter" "slices" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" - "cuelang.org/go/internal" + "cuelang.org/go/internal/iterutil" ) // TODO: unanswered questions about structural cycles: @@ -83,7 +84,13 @@ import ( // node. Each conjunct that make up node in the tree can be associated with // a different environment (although some conjuncts may share an Environment). type Environment struct { - Up *Environment + Up *Environment + + // Vertex should not be accessed directly in most cases. + // Use DerefVertex(ctx) instead to handle overlay mappings correctly. + // + // TODO(mvdan): unexport this field, or give it a longer name + // to clarify it should not be read directly in most cases? Vertex *Vertex // DynamicLabel is only set when instantiating a field from a pattern @@ -98,49 +105,32 @@ type Environment struct { cache map[cacheKey]Value } +// Equal reports whether e and f refer to the same node. +func (e *Environment) Equal(ctx *OpContext, f *Environment) bool { + return e.Up == f.Up && e.DerefVertex(ctx) == f.DerefVertex(ctx) +} + type cacheKey struct { Expr Expr Arc *Vertex } -func (e *Environment) up(ctx *OpContext, count int32) *Environment { - for i := int32(0); i < count; i++ { - e = e.Up - ctx.Assertf(ctx.Pos(), e.Vertex != nil, "Environment.up encountered a nil vertex") +// DerefVertex returns the dereferenced vertex for this environment. +// It must be used instead of directly accessing the Vertex field +// to handle overlay mappings correctly during disjunction evaluation. +func (e *Environment) DerefVertex(ctx *OpContext) *Vertex { + if ctx == nil { + return e.Vertex } - return e + return ctx.derefRoot(e.Vertex) } -type ID int32 - -// evalCached is used to look up dynamic field pattern constraint expressions. -func (e *Environment) evalCached(c *OpContext, x Expr) Value { - if v, ok := x.(Value); ok { - return v - } - key := cacheKey{x, nil} - v, ok := e.cache[key] - if !ok { - if e.cache == nil { - e.cache = map[cacheKey]Value{} - } - env, src := c.e, c.src - c.e, c.src = e, x.Source() - // Save and restore errors to ensure that only relevant errors are - // associated with the cash. - err := c.errs - v = c.evalState(x, combinedFlags{ - status: partial, - condition: allKnown, - mode: yield, - }) // TODO: should this be finalized? - c.e, c.src = env, src - c.errs = err - if b, ok := v.(*Bottom); !ok || !b.IsIncomplete() { - e.cache[key] = v - } +func (e *Environment) up(ctx *OpContext, count int32) *Environment { + for range count { + e = e.Up + ctx.Assertf(ctx.Pos(), e.DerefVertex(ctx) != nil, "Environment.up encountered a nil vertex") } - return v + return e } // A Vertex is a node in the value tree. It may be a leaf or internal node. @@ -174,11 +164,6 @@ type Vertex struct { // status indicates the evaluation progress of this vertex. status vertexStatus - // hasAllConjuncts indicates that the set of conjuncts is complete. - // This is the case if the conjuncts of all its ancestors have been - // processed. - hasAllConjuncts bool - // isData indicates that this Vertex is to be interpreted as data: pattern // and additional constraints, as well as optional fields, should be // ignored. @@ -193,6 +178,10 @@ type Vertex struct { // level only. This supports the close builtin. ClosedNonRecursive bool + // Opened is set when a node that is opened with @experiment(explicitopen) + // is structure shared. This will override any of the above booleans. + OpenedShared bool + // HasEllipsis indicates that this Vertex is open by means of an ellipsis. // TODO: combine this field with Closed once we removed the old evaluator. HasEllipsis bool @@ -202,11 +191,6 @@ type Vertex struct { // the per-Environment value cache. MultiLet bool - // After this is set, no more arcs may be added during evaluation. This is - // set, for instance, after a Vertex is used as a source for comprehensions, - // or any other operation that relies on the set of arcs being constant. - LockArcs bool - // IsDynamic signifies whether this struct is computed as part of an // expression and not part of the static evaluation tree. // Used for cycle detection. @@ -230,9 +214,6 @@ type Vertex struct { // the for source of comprehensions and let fields or let clauses. anonymous bool - // hasPendingArc is set if this Vertex has a void arc (e.g. for comprehensions) - hasPendingArc bool - // IsDisjunct indicates this Vertex is a disjunct resulting from a // disjunction evaluation. IsDisjunct bool @@ -242,18 +223,9 @@ type Vertex struct { // The debug printer, for instance, takes extra care not to print in a loop. IsShared bool - // IsCyclic is true if a node is cyclic, for instance if its value is - // a cyclic reference to a shared node or if the value is a conjunction - // of which at least one value is cyclic (not yet supported). - IsCyclic bool - // ArcType indicates the level of optionality of this arc. ArcType ArcType - // cyclicReferences is a linked list of internal references pointing to this - // Vertex. This is used to shorten the path of some structural cycles. - cyclicReferences *RefNode - // BaseValue is the value associated with this vertex. For lists and structs // this is a sentinel value indicating its kind. BaseValue BaseValue @@ -280,13 +252,13 @@ type Vertex struct { // the final value of this Vertex. // // TODO: all access to Conjuncts should go through functions like - // VisitLeafConjuncts and VisitAllConjuncts. We should probably make this - // an unexported field. + // [Vertex.LeafConjuncts] and [Vertex.AllConjuncts]. + // We should probably make this an unexported field. Conjuncts ConjunctGroup // Structs is a slice of struct literals that contributed to this value. // This information is used to compute the topological sort of arcs. - Structs []*StructInfo + Structs []StructInfo } func deref(v *Vertex) *Vertex { @@ -308,6 +280,9 @@ func equalDeref(a, b *Vertex) bool { // newInlineVertex creates a Vertex that is needed for computation, but for // which there is no CUE path defined from the root Vertex. func (ctx *OpContext) newInlineVertex(parent *Vertex, v BaseValue, a ...Conjunct) *Vertex { + // TODO: parent is an unused parameter here. Setting [Vertex.Parent] to it + // improves paths in a bunch of errors, fixing regressions compared to evalv2. + // However, it also breaks a few tests. Perhaps try with evalv4. n := &Vertex{ BaseValue: v, IsDynamic: true, @@ -318,9 +293,6 @@ func (ctx *OpContext) newInlineVertex(parent *Vertex, v BaseValue, a ...Conjunct state := ctx.freeScope[len(ctx.freeScope)-1] state.toFree = append(state.toFree, n) } - if !ctx.isDevVersion() { - n.Parent = parent - } if ctx.inDetached > 0 { n.anonymous = true } @@ -337,7 +309,6 @@ func (v *Vertex) updateArcType(t ArcType) { return } s := v.state - // NOTE: this condition does not occur in V2. if s != nil && v.isFinal() { c := s.ctx if s.scheduler.frozen.meets(arcTypeKnown) { @@ -350,7 +321,7 @@ func (v *Vertex) updateArcType(t ArcType) { return } } - if v.Parent != nil && v.Parent.ArcType == ArcPending && v.Parent.state != nil && v.Parent.state.ctx.isDevVersion() { + if v.Parent != nil && v.Parent.ArcType == ArcPending && v.Parent.state != nil { // TODO: check that state is always non-nil. v.Parent.state.unshare() } @@ -420,6 +391,8 @@ func (v *Vertex) MayAttach() bool { return !v.Label.IsLet() && !v.anonymous } +//go:generate go tool stringer -type=ArcType -trimprefix=Arc + type ArcType uint8 const ( @@ -454,29 +427,6 @@ const ( // We could also define types for required fields and potentially lets. ) -func (a ArcType) String() string { - switch a { - case ArcMember: - return "Member" - case ArcOptional: - return "Optional" - case ArcRequired: - return "Required" - case ArcPending: - return "Pending" - case ArcNotPresent: - return "NotPresent" - } - return fmt.Sprintf("ArcType(%d)", a) -} - -// definitelyExists reports whether an arc is a constraint or member arc. -// TODO: we should check that users of this call ensure there are no -// ArcPendings. -func (v *Vertex) definitelyExists() bool { - return v.ArcType < ArcPending -} - // ConstraintFromToken converts a given AST constraint token to the // corresponding ArcType. func ConstraintFromToken(t token.Token) ArcType { @@ -528,32 +478,20 @@ func (v *Vertex) Clone() *Vertex { type StructInfo struct { *StructLit - Env *Environment - - CloseInfo + // Repeats tracks how many additional times this struct appeared via [Vertex.AddStruct]. + // This is used by toposort to give proper weight to repeated structs. + Repeats int // Embed indicates the struct in which this struct is embedded (originally), // or nil if this is a root structure. // Embed *StructInfo // Context *RefInfo // the location from which this struct originates. - Disable bool - - Embedding bool -} - -// TODO(perf): this could be much more aggressive for eliminating structs that -// are immaterial for closing. -func (s *StructInfo) useForAccept() bool { - if c := s.closeInfo; c != nil { - return !c.noCheck - } - return true } // vertexStatus indicates the evaluation progress of a Vertex. type vertexStatus int8 -//go:generate go run golang.org/x/tools/cmd/stringer -type=vertexStatus +//go:generate go tool stringer -type=vertexStatus const ( // unprocessed indicates a Vertex has not been processed before. @@ -575,16 +513,6 @@ const ( // but without recursively processing arcs. conjuncts - // evaluatingArcs indicates that the arcs of the Vertex are currently being - // evaluated. If this is encountered it indicates a structural cycle. - // Value does not have to be nil - evaluatingArcs - - // TODO: introduce a "frozen" state. Right now a node may marked and used - // as finalized, before all tasks have completed. We should introduce a - // frozen state that simply checks that all remaining tasks are idempotent - // and errors if they are not. - // finalized means that this node is fully evaluated and that the results // are save to use without further consideration. finalized @@ -645,29 +573,32 @@ func (v *Vertex) updateStatus(s vertexStatus) { // received all their conjuncts as well, after which this node will have been // notified of these conjuncts. func (v *Vertex) setParentDone() { - v.hasAllConjuncts = true // Could set "Conjuncts" flag of arc at this point. - if n := v.state; n != nil && len(n.conjuncts) == n.conjunctsPos { + if n := v.state; n != nil { for _, a := range v.Arcs { a.setParentDone() } } } -// VisitLeafConjuncts visits all conjuncts that are leafs of the ConjunctGroup tree. -func (v *Vertex) VisitLeafConjuncts(f func(Conjunct) bool) { - VisitConjuncts(v.Conjuncts, f) +// LeafConjuncts iterates over all conjuncts that are leaves of the [ConjunctGroup] tree. +func (v *Vertex) LeafConjuncts() iter.Seq[Conjunct] { + return func(yield func(Conjunct) bool) { + _ = iterConjuncts(v.Conjuncts, yield) + } } -func VisitConjuncts(a []Conjunct, f func(Conjunct) bool) bool { +func iterConjuncts(a []Conjunct, yield func(Conjunct) bool) bool { + // TODO: note that this is iterAllConjuncts but without yielding ConjunctGroups. + // Can we reuse the code in a simple enough way? for _, c := range a { switch x := c.x.(type) { case *ConjunctGroup: - if !VisitConjuncts(*x, f) { + if !iterConjuncts(*x, yield) { return false } default: - if !f(c) { + if !yield(c) { return false } } @@ -675,22 +606,39 @@ func VisitConjuncts(a []Conjunct, f func(Conjunct) bool) bool { return true } -// VisitAllConjuncts visits all conjuncts of v, including ConjunctGroups. +// ConjunctsSeq iterates over all conjuncts that are leafs in the list of trees given. +func ConjunctsSeq(a []Conjunct) iter.Seq[Conjunct] { + return func(yield func(Conjunct) bool) { + _ = iterConjuncts(a, yield) + } +} + +// AllConjuncts iterates through all conjuncts of v, including [ConjunctGroup]s. // Note that ConjunctGroups do not have an Environment associated with them. -func (v *Vertex) VisitAllConjuncts(f func(c Conjunct, isLeaf bool)) { - visitAllConjuncts(v.Conjuncts, f) +// The boolean reports whether the conjunct is a leaf. +func (v *Vertex) AllConjuncts() iter.Seq2[Conjunct, bool] { + return func(yield func(Conjunct, bool) bool) { + _ = iterAllConjuncts(v.Conjuncts, yield) + } } -func visitAllConjuncts(a []Conjunct, f func(c Conjunct, isLeaf bool)) { +func iterAllConjuncts(a []Conjunct, yield func(c Conjunct, isLeaf bool) bool) bool { for _, c := range a { switch x := c.x.(type) { case *ConjunctGroup: - f(c, false) - visitAllConjuncts(*x, f) + if !yield(c, false) { + return false + } + if !iterAllConjuncts(*x, yield) { + return false + } default: - f(c, true) + if !yield(c, true) { + return false + } } } + return true } // HasConjuncts reports whether v has any conjuncts. @@ -707,14 +655,11 @@ func (v *Vertex) SingleConjunct() (c Conjunct, count int) { if v == nil { return c, 0 } - v.VisitLeafConjuncts(func(x Conjunct) bool { - c = x + for c = range v.LeafConjuncts() { if count++; count > 1 { - return false + break } - return true - }) - + } return c, count } @@ -827,12 +772,6 @@ func (v *Vertex) toDataAllRec(ctx *OpContext, processed map[*Vertex]*Vertex) *Ve w.ClosedRecursive = false w.ClosedNonRecursive = false - // TODO(perf): this is not strictly necessary for evaluation, but it can - // hurt performance greatly. Drawback is that it may disable ordering. - for _, s := range w.Structs { - s.Disable = true - } - w.Conjuncts = slices.Clone(v.Conjuncts) for i, c := range w.Conjuncts { @@ -905,9 +844,6 @@ func isFinal(v Value, isClosed bool) bool { case *Vertex: closed := isClosed || x.ClosedNonRecursive || x.ClosedRecursive - // TODO(evalv3): this is for V2 compatibility. Remove once V2 is gone. - closed = closed || x.IsClosedList() || x.IsClosedStruct() - // This also dereferences the value. if v, ok := x.BaseValue.(Value); ok { return isFinal(v, closed) @@ -984,23 +920,19 @@ func Unify(c *OpContext, a, b Value) *Vertex { // early enough to error on schemas used for validation. if n := c.vertex; n != nil { v.Parent = n.Parent - if c.isDevVersion() { - v.Label = n.Label // this doesn't work in V2 - } + v.Label = n.Label } addConjuncts(c, v, a) addConjuncts(c, v, b) - if c.isDevVersion() { - s := v.getState(c) - // As this is a new node, we should drop all the requirements from - // parent nodes, as these will not be aligned with the reinsertion - // of the conjuncts. - s.dropParentRequirements = true - if p := c.vertex; p != nil && p.state != nil && s != nil { - s.hasNonCyclic = p.state.hasNonCyclic - } + s := v.getState(c) + // As this is a new node, we should drop all the requirements from + // parent nodes, as these will not be aligned with the reinsertion + // of the conjuncts. + s.dropParentRequirements = true + if p := c.vertex; p != nil && p.state != nil && s != nil { + s.hasNonCyclic = p.state.hasNonCyclic } v.Finalize(c) @@ -1018,38 +950,35 @@ func addConjuncts(ctx *OpContext, dst *Vertex, src Value) { c := MakeConjunct(nil, src, closeInfo) if v, ok := src.(*Vertex); ok { - if ctx.Version == internal.EvalV2 { - if v.ClosedRecursive { - var root CloseInfo - c.CloseInfo = root.SpawnRef(v, v.ClosedRecursive, nil) - } - } else { - // By default, all conjuncts in a node are considered to be not - // mutually closed. This means that if one of the arguments to Unify - // closes, but is acquired to embedding, the closeness information - // is disregarded. For instance, for Unify(a, b) where a and b are - // - // a: {#D, #D: d: f: int} - // b: {d: e: 1} - // - // we expect 'e' to be not allowed. - // - // In order to do so, we wrap the outer conjunct in a separate - // scope that will be closed in the presence of closed embeddings - // independently from the other conjuncts. - n := dst.getBareState(ctx) - c.CloseInfo = n.splitScope(c.CloseInfo) - - // Even if a node is marked as ClosedRecursive, it may be that this - // is the first node that references a definition. - // We approximate this to see if the path leading up to this - // value is a defintion. This is not fully accurate. We could - // investigate the closedness information contained in the parent. - for p := v; p != nil; p = p.Parent { - if p.Label.IsDef() { - c.CloseInfo.TopDef = true - break - } + // TODO(v1.0.0): we should determine whether to apply the new semantics + // for closedness. However, this is not applicable for a Vertex. + // Ultimately, this logic should be removed. + + // By default, all conjuncts in a node are considered to be not + // mutually closed. This means that if one of the arguments to Unify + // closes, but is acquired to embedding, the closeness information + // is disregarded. For instance, for Unify(a, b) where a and b are + // + // a: {#D, #D: d: f: int} + // b: {d: e: 1} + // + // we expect 'e' to be not allowed. + // + // In order to do so, we wrap the outer conjunct in a separate + // scope that will be closed in the presence of closed embeddings + // independently from the other conjuncts. + n := dst.getBareState(ctx) + c.CloseInfo = n.splitScope(nil, c.CloseInfo) + + // Even if a node is marked as ClosedRecursive, it may be that this + // is the first node that references a definition. + // We approximate this to see if the path leading up to this + // value is a defintion. This is not fully accurate. We could + // investigate the closedness information contained in the parent. + for p := v; p != nil; p = p.Parent { + if p.Label.IsDef() { + c.CloseInfo.TopDef = true + break } } } @@ -1062,28 +991,40 @@ func (v *Vertex) Finalize(c *OpContext) { // case the caller did not handle existing errors in the context. err := c.errs c.errs = nil - c.unify(v, combinedFlags{ - status: finalized, - condition: allKnown, - mode: finalize, + c.unify(v, Flags{ + status: finalized, + condition: allKnown, + mode: finalize, + checkTypos: true, }) c.errs = err } +func (v *Vertex) Unify(c *OpContext, flags Flags) { + // Saving and restoring the error context prevents v from panicking in + // case the caller did not handle existing errors in the context. + err := c.errs + c.errs = nil + c.unify(v, flags) + c.errs = err +} + // CompleteArcs ensures the set of arcs has been computed. func (v *Vertex) CompleteArcs(c *OpContext) { - c.unify(v, combinedFlags{ - status: conjuncts, - condition: allKnown, - mode: finalize, + c.unify(v, Flags{ + status: conjuncts, + condition: allKnown, + mode: finalize, + checkTypos: true, }) } func (v *Vertex) CompleteArcsOnly(c *OpContext) { - c.unify(v, combinedFlags{ - status: conjuncts, - condition: fieldSetKnown, - mode: finalize, + c.unify(v, Flags{ + status: conjuncts, + condition: fieldSetKnown, + mode: finalize, + checkTypos: false, }) } @@ -1153,19 +1094,6 @@ func Unwrap(v Value) Value { return x.Value() } -// OptionalType is a bit field of the type of optional constraints in use by an -// Acceptor. -type OptionalType int8 - -const ( - HasField OptionalType = 1 << iota // X: T - HasDynamic // (X): T or "\(X)": T - HasPattern // [X]: T - HasComplexPattern // anything but a basic type - HasAdditional // ...T - IsOpen // Defined for all fields -) - func (v *Vertex) Kind() Kind { // This is possible when evaluating comprehensions. It is potentially // not known at this time what the type is. @@ -1181,14 +1109,6 @@ func (v *Vertex) Kind() Kind { } } -func (v *Vertex) OptionalTypes() OptionalType { - var mask OptionalType - for _, s := range v.Structs { - mask |= s.OptionalTypes() - } - return mask -} - // IsOptional reports whether a field is explicitly defined as optional, // as opposed to whether it is allowed by a pattern constraint. func (v *Vertex) IsOptional(label Feature) bool { @@ -1204,17 +1124,33 @@ func (v *Vertex) accepts(ok, required bool) bool { return ok || (!required && !v.ClosedRecursive) } +// IsOpenStruct reports whether any field that is not contained within v is allowed. +// +// TODO: merge this function with IsClosedStruct and possibly IsClosedList. +// right now this causes too many issues if we do so. +func (v *Vertex) IsOpenStruct() bool { + // TODO: move this check to IsClosedStruct. Right now this causes too many + // changes in the debug output, and it also appears to be not entirely + // correct. + if v.HasEllipsis { + return true + } + if v.ClosedNonRecursive { + return false + } + if v.IsClosedStruct() { + return false + } + return true +} + func (v *Vertex) IsClosedStruct() bool { - // TODO: uncomment this. This fixes a bunch of closedness bugs - // in the old and new evaluator. For compability sake, though, we - // keep it as is for now. - // if v.Closed { - // return true - // } + // TODO: add this check. Right now this causes issues. It will have + // to be carefully introduced. // if v.HasEllipsis { // return false // } - switch x := v.BaseValue.(type) { + switch v.BaseValue.(type) { default: return false @@ -1222,10 +1158,6 @@ func (v *Vertex) IsClosedStruct() bool { return v.ClosedRecursive && !v.HasEllipsis case *StructMarker: - if x.NeedClose { - return true - } - case *Disjunction: } return isClosed(v) @@ -1270,7 +1202,7 @@ func (v *Vertex) Accept(ctx *OpContext, f Feature) bool { switch v.BaseValue.(type) { case *ListMarker: // TODO(perf): use precomputed length. - if f.Index() < len(v.Elems()) { + if f.Index() < iterutil.Count(v.Elems()) { return true } return !v.IsClosedList() @@ -1288,15 +1220,7 @@ func (v *Vertex) Accept(ctx *OpContext, f Feature) bool { } } - // TODO: move this check to IsClosedStruct. Right now this causes too many - // changes in the debug output, and it also appears to be not entirely - // correct. - if v.HasEllipsis { - return true - - } - - if !v.IsClosedStruct() || v.Lookup(f) != nil { + if v.IsOpenStruct() || v.Lookup(f) != nil { return true } @@ -1315,15 +1239,6 @@ func (v *Vertex) MatchAndInsert(ctx *OpContext, arc *Vertex) { } // Go backwards to simulate old implementation. - if !ctx.isDevVersion() { - for i := len(v.Structs) - 1; i >= 0; i-- { - s := v.Structs[i] - if s.Disable { - continue - } - s.MatchAndInsert(ctx, arc) - } - } // This is the equivalent for the new implementation. if pcs := v.PatternConstraints; pcs != nil { @@ -1387,15 +1302,17 @@ func (v *Vertex) LookupRaw(f Feature) *Vertex { } // Elems returns the regular elements of a list. -func (v *Vertex) Elems() []*Vertex { - // TODO: add bookkeeping for where list arcs start and end. - a := make([]*Vertex, 0, len(v.Arcs)) - for _, x := range v.Arcs { - if x.Label.IsInt() { - a = append(a, x) +func (v *Vertex) Elems() iter.Seq[*Vertex] { + return func(yield func(*Vertex) bool) { + // TODO: add bookkeeping for where list arcs start and end. + for _, x := range v.Arcs { + if x.Label.IsInt() { + if !yield(x) { + break + } + } } } - return a } func (v *Vertex) Init(c *OpContext) { @@ -1411,33 +1328,7 @@ func (v *Vertex) GetArc(c *OpContext, f Feature, t ArcType) (arc *Vertex, isNew return arc, false } - if c.isDevVersion() { - return nil, false - } - - if v.LockArcs { - // TODO(errors): add positions. - if f.IsInt() { - c.addErrf(EvalError, token.NoPos, - "element at index %v not allowed by earlier comprehension or reference cycle", f) - } else { - c.addErrf(EvalError, token.NoPos, - "field %v not allowed by earlier comprehension or reference cycle", f) - } - } - // TODO: consider setting Dynamic here from parent. - arc = &Vertex{ - Parent: v, - Label: f, - ArcType: t, - nonRooted: v.IsDynamic || v.Label.IsLet() || v.nonRooted, - anonymous: v.anonymous || v.Label.IsLet(), - } - v.Arcs = append(v.Arcs, arc) - if t == ArcPending { - v.hasPendingArc = true - } - return arc, true + return nil, false } func (v *Vertex) Source() ast.Node { @@ -1491,7 +1382,11 @@ func (v *Vertex) hasConjunct(c Conjunct) (added bool) { default: v.ArcType = ArcMember } - p, _ := findConjunct(v.Conjuncts, c) + var ctx *OpContext + if v.state != nil { + ctx = v.state.ctx + } + p, _ := findConjunct(ctx, v.Conjuncts, c) return p >= 0 } @@ -1499,95 +1394,31 @@ func (v *Vertex) hasConjunct(c Conjunct) (added bool) { // // NOTE: we are not comparing closeContexts. The intended use of this function // is only to add to list of conjuncts within a closeContext. -func findConjunct(cs []Conjunct, c Conjunct) (int, Conjunct) { +func findConjunct(ctx *OpContext, cs []Conjunct, c Conjunct) (int, Conjunct) { for i, x := range cs { // TODO: disregard certain fields from comparison (e.g. Refs)? - if x.CloseInfo.closeInfo == c.CloseInfo.closeInfo && // V2 - x.x == c.x && - x.Env.Up == c.Env.Up && x.Env.Vertex == c.Env.Vertex { + if x.x == c.x && x.Env.Equal(ctx, c.Env) { return i, x } } return -1, Conjunct{} } -func (n *nodeContext) addConjunction(c Conjunct, index int) { - unreachableForDev(n.ctx) - - // NOTE: This does not split binary expressions for comprehensions. - // TODO: split for comprehensions and rewrap? - if x, ok := c.Elem().(*BinaryExpr); ok && x.Op == AndOp { - c.x = x.X - n.conjuncts = append(n.conjuncts, conjunct{C: c, index: index}) - c.x = x.Y - n.conjuncts = append(n.conjuncts, conjunct{C: c, index: index}) - } else { - n.conjuncts = append(n.conjuncts, conjunct{C: c, index: index}) - } -} - func (v *Vertex) addConjunctUnchecked(c Conjunct) { - index := len(v.Conjuncts) v.Conjuncts = append(v.Conjuncts, c) - if n := v.state; n != nil && !n.ctx.isDevVersion() { - // TODO(notify): consider this as a central place to send out - // notifications. At the moment this is not necessary, but it may - // be if we move the notification mechanism outside of the path of - // running tasks. - n.addConjunction(c, index) - - // TODO: can we remove notifyConjunct here? This method is only - // used if either Unprocessed is 0, in which case there will be no - // notification recipients, or for "pushed down" comprehensions, - // which should also have been added at an earlier point. - n.notifyConjunct(c) - } -} - -// addConjunctDynamic adds a conjunct to a vertex and immediately evaluates -// it, whilst doing the same for any vertices on the notify list, recursively. -func (n *nodeContext) addConjunctDynamic(c Conjunct) { - unreachableForDev(n.ctx) - - n.node.Conjuncts = append(n.node.Conjuncts, c) - n.addExprConjunct(c, partial) - n.notifyConjunct(c) - } -func (n *nodeContext) notifyConjunct(c Conjunct) { - unreachableForDev(n.ctx) - - for _, rec := range n.notify { - arc := rec.v - if !arc.hasConjunct(c) { - if arc.state == nil { - // TODO: continuing here is likely to result in a faulty - // (incomplete) configuration. But this may be okay. The - // CUE_DEBUG=0 flag disables this assertion. - n.ctx.Assertf(n.ctx.pos(), !n.ctx.Strict, "unexpected nil state") - n.ctx.addErrf(0, n.ctx.pos(), "cannot add to field %v", arc.Label) - continue - } - arc.state.addConjunctDynamic(c) +func (v *Vertex) AddStruct(s *StructLit) { + for i, t := range v.Structs { + if t.StructLit == s { + v.Structs[i].Repeats++ + return } } -} - -func (v *Vertex) AddStruct(s *StructLit, env *Environment, ci CloseInfo) *StructInfo { info := StructInfo{ StructLit: s, - Env: env, - CloseInfo: ci, - } - for _, t := range v.Structs { - if *t == info { // TODO: check for different identity. - return t - } } - t := &info - v.Structs = append(v.Structs, t) - return t + v.Structs = append(v.Structs, info) } // Path computes the sequence of Features leading from the root to of the diff --git a/vendor/cuelang.org/go/internal/core/adt/comprehension.go b/vendor/cuelang.org/go/internal/core/adt/comprehension.go index 3c60a35331..31a2f97f5c 100644 --- a/vendor/cuelang.org/go/internal/core/adt/comprehension.go +++ b/vendor/cuelang.org/go/internal/core/adt/comprehension.go @@ -77,8 +77,6 @@ type envComprehension struct { // runtime-related fields - err *Bottom - // envs holds all the environments that define a single "yield" result in // combination with the comprehension struct. envs []*Environment // nil: unprocessed, non-nil: done. @@ -89,6 +87,12 @@ type envComprehension struct { structs []*StructLit } +// addEnv is used as a [YieldFunc] so that we don't need to create a new func +// value for each comprehension. +func (e *envComprehension) addEnv(env *Environment) { + e.envs = append(e.envs, env) +} + // envYield defines a comprehension for a specific field within a comprehension // value. Multiple envYields can be associated with a single envComprehension. // An envComprehension only needs to be evaluated once for multiple envYields. @@ -98,8 +102,6 @@ type envYield struct { // Values specific to the field corresponding to this envYield - // This envYield was added to selfComprehensions - self bool // This envYield was successfully executed and the resulting conjuncts were // added. inserted bool @@ -148,13 +150,6 @@ func (n *nodeContext) insertComprehension( x := c.Value - if !n.ctx.isDevVersion() { - ci = ci.SpawnEmbed(c) - ci.closeInfo.span |= ComprehensionSpan - } else { - ci.setOptionalV3(nil) - } - node := n.node.DerefDisjunct() var decls []Decl @@ -177,7 +172,7 @@ func (n *nodeContext) insertComprehension( } // Create partial comprehension - c := &Comprehension{ + partialComp := &Comprehension{ Syntax: c.Syntax, Clauses: c.Clauses, Value: f, @@ -188,13 +183,9 @@ func (n *nodeContext) insertComprehension( arc: node, } - conjunct := MakeConjunct(env, c, ci) - if n.ctx.isDevVersion() { - n.assertInitialized() - n.insertArc(f.Label, ArcPending, conjunct, conjunct.CloseInfo, false) - } else { - n.insertFieldUnchecked(f.Label, ArcPending, conjunct) - } + conjunct := MakeConjunct(env, partialComp, ci) + n.assertInitialized() + n.insertArc(f.Label, ArcPending, conjunct, conjunct.CloseInfo, false) fields = append(fields, f) @@ -204,7 +195,7 @@ func (n *nodeContext) insertComprehension( numFixed++ // Create partial comprehension - c := &Comprehension{ + partialComp := &Comprehension{ Syntax: c.Syntax, Clauses: c.Clauses, Value: f, @@ -214,14 +205,10 @@ func (n *nodeContext) insertComprehension( arc: node, } - conjunct := MakeConjunct(env, c, ci) + conjunct := MakeConjunct(env, partialComp, ci) n.assertInitialized() arc := n.insertFieldUnchecked(f.Label, ArcMember, conjunct) - if n.ctx.isDevVersion() { - arc.MultiLet = true - } else { - arc.MultiLet = f.IsMulti - } + arc.MultiLet = true // NOTE: v2 was f.IsMulti fields = append(fields, f) @@ -245,7 +232,7 @@ func (n *nodeContext) insertComprehension( isComprehension: true, } } - node.AddStruct(st, env, ci) + node.AddStruct(st) switch { case !ec.done: ec.structs = append(ec.structs, st) @@ -270,7 +257,17 @@ func (n *nodeContext) insertComprehension( if kind == TopKind { c.kind = StructKind } - return + // If there's an else clause, we still need to schedule a task + // to handle the fallback case when comprehension yields zero values. + if c.Fallback == nil { + return + } + // Use an empty struct as the main value since all fields were + // handled at field level. The else clause will be embedded if + // the comprehension yields zero values. + x = &StructLit{ + isComprehension: true, + } default: // Create a new StructLit with only the fields that need to be @@ -282,19 +279,9 @@ func (n *nodeContext) insertComprehension( } } - if n.ctx.isDevVersion() { - t := n.scheduleTask(handleComprehension, env, x, ci) - t.comp = ec - t.leaf = c - } else { - n.comprehensions = append(n.comprehensions, envYield{ - envComprehension: ec, - leaf: c, - env: env, - id: ci, - expr: x, - }) - } + t := n.scheduleTask(handleComprehension, env, x, ci) + t.comp = ec + t.leaf = c } type compState struct { @@ -311,7 +298,7 @@ func (c *OpContext) yield( node *Vertex, // errors are associated with this node env *Environment, // env for field for which this yield is called comp *Comprehension, - state combinedFlags, + state Flags, f YieldFunc, // called for every result ) *Bottom { s := &compState{ @@ -354,71 +341,6 @@ func (s *compState) yield(env *Environment) (ok bool) { return !c.HasErr() } -// injectComprehension evaluates and inserts embeddings. It first evaluates all -// embeddings before inserting the results to ensure that the order of -// evaluation does not matter. -func (n *nodeContext) injectComprehensions(state vertexStatus) (progress bool) { - unreachableForDev(n.ctx) - - workRemaining := false - - // We use variables, instead of range, as the list may grow dynamically. - for i := 0; i < len(n.comprehensions); i++ { - d := &n.comprehensions[i] - if d.self || d.inserted { - continue - } - if err := n.processComprehension(d, state); err != nil { - // TODO: Detect that the nodes are actually equal - if err.ForCycle && err.Value == n.node { - n.selfComprehensions = append(n.selfComprehensions, *d) - progress = true - d.self = true - return - } - - d.err = err - workRemaining = true - - continue - - // TODO: add this when it can be done without breaking other - // things. - // - // // Add comprehension to ensure incomplete error is inserted. - // // This ensures that the error is reported in the Vertex - // // where the comprehension was defined, and not just in the - // // node below. This, in turn, is necessary to support - // // certain logic, like export, that expects to be able to - // // detect an "incomplete" error at the first level where it - // // is necessary. - // n := d.node.getNodeContext(ctx) - // n.addBottom(err) - - } - progress = true - } - - if !workRemaining { - n.comprehensions = n.comprehensions[:0] // Signal that all work is done. - } - - return progress -} - -// injectSelfComprehensions processes comprehensions that were earlier marked -// as iterating over the node in which they are defined. Such comprehensions -// are legal as long as they do not modify the arc set of the node. -func (n *nodeContext) injectSelfComprehensions(state vertexStatus) { - unreachableForDev(n.ctx) - - // We use variables, instead of range, as the list may grow dynamically. - for i := 0; i < len(n.selfComprehensions); i++ { - n.processComprehension(&n.selfComprehensions[i], state) - } - n.selfComprehensions = n.selfComprehensions[:0] // Signal that all work is done. -} - // processComprehension processes a single Comprehension conjunct. // It returns an incomplete error if there was one. Fatal errors are // processed as a "successfully" completed computation. @@ -427,16 +349,11 @@ func (n *nodeContext) processComprehension(d *envYield, state vertexStatus) *Bot // Compute environments, if needed. if !d.done { - var envs []*Environment - f := func(env *Environment) { - envs = append(envs, env) - } - - if err := ctx.yield(d.vertex, d.env, d.comp, combinedFlags{ + if err := ctx.yield(d.vertex, d.env, d.comp, Flags{ status: state, condition: allKnown, mode: ignore, - }, f); err != nil { + }, d.addEnv); err != nil { if err.IsIncomplete() { return err } @@ -451,8 +368,6 @@ func (n *nodeContext) processComprehension(d *envYield, state vertexStatus) *Bot return nil } - d.envs = envs - if len(d.envs) > 0 { for _, s := range d.structs { s.Init(n.ctx) @@ -465,6 +380,15 @@ func (n *nodeContext) processComprehension(d *envYield, state vertexStatus) *Bot d.inserted = true if len(d.envs) == 0 { + // If there's an else clause, use it instead of marking arc as not present. + if d.leaf.Fallback != nil { + // Evaluate the else clause in the outer environment. + // We use linkChildren to properly chain the environment, similar to + // normal comprehension yield processing. + env := linkChildren(d.env, d.leaf) + n.scheduleConjunct(Conjunct{env, d.leaf.Fallback, d.id}, d.id) + return nil + } n.node.updateArcType(ArcNotPresent) return nil } @@ -505,11 +429,7 @@ func (n *nodeContext) processComprehension(d *envYield, state vertexStatus) *Bot env = linkChildren(env, d.leaf) - if ctx.isDevVersion() { - n.scheduleConjunct(Conjunct{env, d.expr, id}, id) - } else { - n.addExprConjunct(Conjunct{env, d.expr, id}, state) - } + n.scheduleConjunct(Conjunct{env, d.expr, id}, id) } return nil diff --git a/vendor/cuelang.org/go/internal/core/adt/conjunct.go b/vendor/cuelang.org/go/internal/core/adt/conjunct.go index c91fd4c543..5d394f84da 100644 --- a/vendor/cuelang.org/go/internal/core/adt/conjunct.go +++ b/vendor/cuelang.org/go/internal/core/adt/conjunct.go @@ -16,6 +16,7 @@ package adt import ( "fmt" + "slices" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" @@ -77,10 +78,17 @@ func (n *nodeContext) scheduleConjunct(c Conjunct, id CloseInfo) { } case Value: - // TODO: perhaps some values could be shared. - n.unshare() n.insertValueConjunct(env, x, id) + case *OpenExpr: + // This is not strictly necessary, but it ensures the same code path + // is taken for references that now have been rewritten to have a ... + // suffix. + c.x = x.X + c.CloseInfo.Opened = true // NOTE: seems unnecessary, but just to be sure. + id.Opened = true + n.scheduleConjunct(c, id) + case *BinaryExpr: // NOTE: do not unshare: a conjunction could still allow structure // sharing, such as in the case of `ref & ref`. @@ -104,7 +112,7 @@ func (n *nodeContext) scheduleConjunct(c Conjunct, id CloseInfo) { n.unshare() // At this point we known we have at least an empty list. - n.updateCyclicStatusV3(id) + n.updateCyclicStatus(id) env := &Environment{ Up: env, @@ -116,25 +124,24 @@ func (n *nodeContext) scheduleConjunct(c Conjunct, id CloseInfo) { case *DisjunctionExpr: n.unshare() id := id - id.setOptionalV3(n) + id.setOptional(n) - // TODO(perf): reuse envDisjunct values so that we can also reuse the - // disjunct slice. n.ctx.holeID++ - d := envDisjunct{ - env: env, - cloneID: id, - holeID: n.ctx.holeID, - src: x, - expr: x, - } + // Reuse disjunctBuffer to avoid allocating a new slice for each disjunction. + n.ctx.disjunctBuffer = n.ctx.disjunctBuffer[:0] for _, dv := range x.Values { - d.disjuncts = append(d.disjuncts, disjunct{ - expr: dv.Val, - isDefault: dv.Default, - mode: mode(x.HasDefaults, dv.Default), + n.ctx.disjunctBuffer = append(n.ctx.disjunctBuffer, disjunct{ + expr: dv.Val, + mode: mode(x.HasDefaults, dv.Default), }) } + d := envDisjunct{ + env: env, + cloneID: id, + holeID: n.ctx.holeID, + src: x, + disjuncts: slices.Clone(n.ctx.disjunctBuffer), + } n.scheduleDisjunction(d) n.updateConjunctInfo(TopKind, id, 0) @@ -148,17 +155,26 @@ func (n *nodeContext) scheduleConjunct(c Conjunct, id CloseInfo) { case Evaluator: n.unshare() - // Expressions that contain a call may end up in an infinite recursion - // here if we do not ensure that there is non-cyclic data to propagate - // the evaluation. We therefore postpone expressions until we have - // evidence that such non-cyclic conjuncts exist. - if id.CycleType == IsCyclic && !n.hasNonCycle && !n.hasNonCyclic { - n.hasAncestorCycle = true - n.cyclicConjuncts = append(n.cyclicConjuncts, cyclicConjunct{c: c}) - return + // Call expressions may end up in an infinite recursion if we do not + // ensure that there is non-cyclic data to propagate the evaluation. + // We therefore postpone call expressions until we have evidence that + // such non-cyclic conjuncts exist. + // + // We only defer calls that contain references in their arguments, as + // only those can cause infinite recursion. Calls with only literal + // arguments (like `or(["a", "b"])`) are safe to evaluate immediately, + // even when evaluated from within an unrelated cyclic context. + // + // TODO: this is rather hacky. We probably need a better solution + // in a future iteration of the evaluator. + if call, ok := x.(*CallExpr); ok && id.CycleType == IsCyclic && !n.hasNonCycle && !n.hasNonCyclic { + if slices.ContainsFunc(call.Args, exprHasResolver) { + n.hasAncestorCycle = true + n.cyclicConjuncts = append(n.cyclicConjuncts, cyclicConjunct{c: c}) + return + } } - // Interpolation, UnaryExpr, CallExpr n.scheduleTask(handleExpr, env, x, id) default: @@ -168,13 +184,38 @@ func (n *nodeContext) scheduleConjunct(c Conjunct, id CloseInfo) { n.ctx.stats.Conjuncts++ } +// exprHasResolver checks if an expression contains a Resolver (reference). +func exprHasResolver(x Expr) bool { + switch v := x.(type) { + case Resolver: + return true + case Value: + return false // Already evaluated, cannot contain references. + case *BinaryExpr: + return exprHasResolver(v.X) || exprHasResolver(v.Y) + case *UnaryExpr: + return exprHasResolver(v.X) + case *CallExpr: + return slices.ContainsFunc(v.Args, exprHasResolver) + case *ListLit: + for _, elem := range v.Elems { + if expr, ok := elem.(Expr); ok && exprHasResolver(expr) { + return true + } + } + return false + default: + return true // Conservatively assume unknown types may have references. + } +} + // scheduleStruct records all elements of this conjunct in the structure and // then processes it. If an element needs to be inserted for evaluation, // it may be scheduled. func (n *nodeContext) scheduleStruct(env *Environment, s *StructLit, ci CloseInfo) { - n.updateCyclicStatusV3(ci) + n.updateCyclicStatus(ci) n.updateConjunctInfo(StructKind, ci, cHasStruct) // NOTE: This is a crucial point in the code: @@ -197,10 +238,8 @@ func (n *nodeContext) scheduleStruct(env *Environment, s.Init(n.ctx) - // TODO: do we still need to AddStruct and do we still need to Disable? - parent := n.node.AddStruct(s, childEnv, ci) - parent.Disable = true // disable until processing is done. - ci.IsClosed = false + // TODO: do we still need to AddStruct? + n.node.AddStruct(s) // TODO(perf): precompile whether struct has embedding. loop1: @@ -217,17 +256,21 @@ loop1: ci = n.splitStruct(s, ci) } + // Struct literals with static elements are common; + // grow the capacity ahead of time to make space for their arcs. + n.node.Arcs = slices.Grow(n.node.Arcs, len(s.Decls)) + // First add fixed fields and schedule expressions. for _, d := range s.Decls { switch x := d.(type) { case *Field: if x.Label.IsString() && x.ArcType == ArcMember { - n.aStruct = s - n.aStructID = ci + n.aStruct = true + n.updateNodeType(StructKind, s, ci) } ci := n.ctx.subField(ci) if x.ArcType == ArcOptional { - ci.setOptionalV3(n) + ci.setOptional(n) } fc := MakeConjunct(childEnv, x, ci) @@ -250,14 +293,14 @@ loop1: case *DynamicField: ci := n.ctx.subField(ci) if x.ArcType == ArcMember { - n.aStruct = s - n.aStructID = ci + n.aStruct = true + n.updateNodeType(StructKind, s, ci) } n.scheduleTask(handleDynamic, childEnv, x, ci) case *BulkOptionalField: ci := n.ctx.subField(ci) - ci.setOptionalV3(n) + ci.setOptional(n) // All do not depend on each other, so can be added at once. n.scheduleTask(handlePatternConstraint, childEnv, x, ci) @@ -274,12 +317,9 @@ loop1: n.updateConjunctInfo(TopKind, ci, cHasEllipsis) } if !hasEmbed { - n.aStruct = s - n.aStructID = ci + n.aStruct = true + n.updateNodeType(StructKind, s, ci) } - - // TODO: probably no longer necessary. - parent.Disable = false } // scheduleVertexConjuncts injects the conjuncst of src n. If src was not fully @@ -304,6 +344,9 @@ func (n *nodeContext) scheduleVertexConjuncts(c Conjunct, arc *Vertex, closeInfo closeInfo.enclosingEmbed != 0 { closeInfo.FromDef = false } + if arc.ClosedRecursive && c.CloseInfo.Opened { + n.embedsRecursivelyClosed = true + } // disjunctions, we need to dereference he underlying node. if deref(n.node) == deref(arc) { @@ -318,33 +361,6 @@ func (n *nodeContext) scheduleVertexConjuncts(c Conjunct, arc *Vertex, closeInfo return } - // We need to ensure that each arc is only unified once (or at least) a - // bounded time, witch each conjunct. Comprehensions, for instance, may - // distribute a value across many values that get unified back into the - // same value. If such a value is a disjunction, than a disjunction of N - // disjuncts will result in a factor N more unifications for each - // occurrence of such value, resulting in exponential running time. This - // is especially common values that are used as a type. - // - // However, unification is idempotent, so each such conjunct only needs - // to be unified once. This cache checks for this and prevents an - // exponential blowup in such case. - // - // TODO(perf): this cache ensures the conjuncts of an arc at most once - // per ID. However, we really need to add the conjuncts of an arc only - // once total, and then add the close information once per close ID - // (pointer can probably be shared). Aside from being more performant, - // this is probably the best way to guarantee that conjunctions are - // linear in this case. - - ciKey := closeInfo - ciKey.Refs = nil - ciKey.Inline = false - if n.ctx.isDevVersion() { - // No need to key on CloseInfo with evalv3. - ciKey = CloseInfo{} - } - // Also check arc.Label: definitions themselves do not have the FromDef to // reflect their closedness. This means that if we are structure sharing, we // may end up with a Vertex that is a definition without the reference @@ -353,14 +369,17 @@ func (n *nodeContext) scheduleVertexConjuncts(c Conjunct, arc *Vertex, closeInfo // once. switch isDef, _ := IsDef(c.Expr()); { case isDef || arc.Label.IsDef() || closeInfo.TopDef: + if c.CloseInfo.Opened { + n.embedsRecursivelyClosed = true + } n.isDef = true // n.node.ClosedRecursive = true // TODO: should we set this here? closeInfo.FromDef = true closeInfo.TopDef = false - closeInfo = n.addResolver(arc, closeInfo, false) + closeInfo = n.addResolver(c.x, arc, closeInfo, false) default: - closeInfo = n.addResolver(arc, closeInfo, true) + closeInfo = n.addResolver(c.x, arc, closeInfo, true) } if closeInfo.defID != 0 && closeInfo.opID == n.ctx.opID { c.CloseInfo.opID = closeInfo.opID @@ -369,13 +388,28 @@ func (n *nodeContext) scheduleVertexConjuncts(c Conjunct, arc *Vertex, closeInfo c.CloseInfo.enclosingEmbed = closeInfo.enclosingEmbed } - key := arcKey{arc, ciKey} - for _, k := range n.arcMap { - if key == k { - return - } + // We need to ensure that each arc is only unified once (or at least) a + // bounded time, witch each conjunct. Comprehensions, for instance, may + // distribute a value across many values that get unified back into the + // same value. If such a value is a disjunction, than a disjunction of N + // disjuncts will result in a factor N more unifications for each + // occurrence of such value, resulting in exponential running time. This + // is especially common values that are used as a type. + // + // However, unification is idempotent, so each such conjunct only needs + // to be unified once. This cache checks for this and prevents an + // exponential blowup in such case. + // + // TODO(perf): this cache ensures the conjuncts of an arc at most once + // per ID. However, we really need to add the conjuncts of an arc only + // once total, and then add the close information once per close ID + // (pointer can probably be shared). Aside from being more performant, + // this is probably the best way to guarantee that conjunctions are + // linear in this case. + if slices.Contains(n.arcMap, arc) { + return } - n.arcMap = append(n.arcMap, key) + n.arcMap = append(n.arcMap, arc) if arc.Parent != nil && (!n.node.nonRooted || n.node.IsDynamic) { // If the arc has a parent that for which the field conjuncts are not @@ -433,7 +467,7 @@ func (n *nodeContext) addNotify2(v *Vertex, c CloseInfo) { // Literal conjuncts -// NoSharingSentinel is a sentinel value that is used to disable sharing of +// NoShareSentinel is a sentinel value that is used to disable sharing of // nodes. We make this an error to make it clear that we discard the value. var NoShareSentinel = &Bottom{ Err: errors.Newf(token.NoPos, "no sharing"), @@ -442,8 +476,6 @@ var NoShareSentinel = &Bottom{ func (n *nodeContext) insertValueConjunct(env *Environment, v Value, id CloseInfo) { ctx := n.ctx - n.updateConjunctInfo(TopKind, id, 0) - switch x := v.(type) { case *Vertex: if x.ClosedNonRecursive { @@ -451,16 +483,23 @@ func (n *nodeContext) insertValueConjunct(env *Environment, v Value, id CloseInf // If this is a definition, it will be repeated in the evaluation. if !x.IsFromDisjunction() { - id = n.addResolver(x, id, false) + id = n.addResolver(v, x, id, false) + } + } else if x.ClosedRecursive { + n.node.ClosedRecursive = true + + // If this is a definition, it will be repeated in the evaluation. + if !x.IsFromDisjunction() { + id = n.addResolver(v, x, id, false) } } if _, ok := x.BaseValue.(*StructMarker); ok { - n.aStruct = x - n.aStructID = id + n.aStruct = true + n.updateNodeType(StructKind, x, id) } if !x.IsData() { - n.updateCyclicStatusV3(id) + n.updateCyclicStatus(id) c := MakeConjunct(env, x, id) n.scheduleVertexConjuncts(c, x, id) @@ -473,7 +512,7 @@ func (n *nodeContext) insertValueConjunct(env *Environment, v Value, id CloseInf panic(fmt.Sprintf("invalid type %T", x.BaseValue)) case *ListMarker: - n.updateCyclicStatusV3(id) + n.updateCyclicStatus(id) // TODO: arguably we know now that the type _must_ be a list. n.scheduleTask(handleListVertex, env, x, id) @@ -498,14 +537,15 @@ func (n *nodeContext) insertValueConjunct(env *Environment, v Value, id CloseInf return case *Bottom: + n.unshare() if x == NoShareSentinel { - n.unshare() return } n.addBottom(x) return case *Builtin: + n.unshare() if v := x.BareValidator(); v != nil { n.insertValueConjunct(env, v, id) return @@ -518,28 +558,28 @@ func (n *nodeContext) insertValueConjunct(env *Environment, v Value, id CloseInf switch x := v.(type) { case *Disjunction: - n.updateCyclicStatusV3(id) + n.updateCyclicStatus(id) + n.unshare() - // TODO(perf): reuse envDisjunct values so that we can also reuse the - // disjunct slice. id := id - id.setOptionalV3(n) + id.setOptional(n) n.ctx.holeID++ - d := envDisjunct{ - env: env, - cloneID: id, - holeID: n.ctx.holeID, - src: x, - value: x, - } + // Reuse disjunctBuffer to avoid allocating a new slice for each disjunction. + n.ctx.disjunctBuffer = n.ctx.disjunctBuffer[:0] for i, dv := range x.Values { - d.disjuncts = append(d.disjuncts, disjunct{ - expr: dv, - isDefault: i < x.NumDefaults, - mode: mode(x.HasDefaults, i < x.NumDefaults), + n.ctx.disjunctBuffer = append(n.ctx.disjunctBuffer, disjunct{ + expr: dv, + mode: mode(x.HasDefaults, i < x.NumDefaults), }) } + d := envDisjunct{ + env: env, + cloneID: id, + holeID: n.ctx.holeID, + src: x, + disjuncts: slices.Clone(n.ctx.disjunctBuffer), + } n.scheduleDisjunction(d) case *Conjunction: @@ -551,75 +591,67 @@ func (n *nodeContext) insertValueConjunct(env *Environment, v Value, id CloseInf } case *Top: - n.updateCyclicStatusV3(id) + n.updateCyclicStatus(id) n.hasTop = true n.updateConjunctInfo(TopKind, id, cHasTop) case *BasicType: - n.updateCyclicStatusV3(id) + n.unshare() + n.updateCyclicStatus(id) if x.K != TopKind { n.updateConjunctInfo(TopKind, id, cHasTop) } case *BoundValue: - n.updateCyclicStatusV3(id) + n.unshare() + n.updateCyclicStatus(id) switch x.Op { - case LessThanOp, LessEqualOp: - if y := n.upperBound; y != nil { - v := SimplifyBounds(ctx, n.kind, x, y) - if err := valueError(v); err != nil { - err.AddPosition(v) - err.AddPosition(n.upperBound) - err.AddClosedPositions(id) - } - n.upperBound = nil - n.insertValueConjunct(env, v, id) - return + case LessThanOp, LessEqualOp, GreaterThanOp, GreaterEqualOp: + bound := &n.upperBound + if x.Op == GreaterThanOp || x.Op == GreaterEqualOp { + bound = &n.lowerBound } - n.upperBound = x - - case GreaterThanOp, GreaterEqualOp: - if y := n.lowerBound; y != nil { - v := SimplifyBounds(ctx, n.kind, x, y) - if err := valueError(v); err != nil { - err.AddPosition(v) - err.AddPosition(n.lowerBound) - err.AddClosedPositions(id) + if y := *bound; y != nil { + if v := SimplifyBounds(ctx, n.kind, x, y); v != nil { + *bound = nil + n.insertValueConjunct(env, v, id) + return } - n.lowerBound = nil - n.insertValueConjunct(env, v, id) - return } - n.lowerBound = x + *bound = x - case EqualOp, NotEqualOp, MatchOp, NotMatchOp: + case EqualOp, NotEqualOp: + // We treat equality as an open validator. + n.updateConjunctInfo(TopKind, id, cHasOpenValidator|cHasTop) + fallthrough + + case MatchOp, NotMatchOp: // This check serves as simplifier, but also to remove duplicates. - k := 0 match := false - for _, c := range n.checks { + n.checks = slices.DeleteFunc(n.checks, func(c Conjunct) bool { if y, ok := c.x.(*BoundValue); ok { - switch z := SimplifyBounds(ctx, n.kind, x, y); { - case z == y: + switch SimplifyBounds(ctx, n.kind, x, y) { + case y: match = true - case z == x: - continue + case x: + return true } } - n.checks[k] = c - k++ - } - n.checks = n.checks[:k] + return false + }) // TODO(perf): do an early check to be able to prune further // processing. if !match { n.checks = append(n.checks, MakeConjunct(env, x, id)) } + return } case Validator: + n.unshare() // This check serves as simplifier, but also to remove duplicates. cx := MakeConjunct(env, x, id) kind := x.Kind() @@ -667,16 +699,35 @@ func (n *nodeContext) insertValueConjunct(env *Environment, v Value, id CloseInf // handled above. case Value: // *NullLit, *BoolLit, *NumLit, *StringLit, *BytesLit, *Builtin - n.updateCyclicStatusV3(id) + n.unshare() + if p, isData := Pos(v).Priority(); isData { + id.Priority = p + } + + n.updateCyclicStatus(id) if y := n.scalar; y != nil { - if b, ok := BinOp(ctx, errOnDiffType, EqualOp, x, y).(*Bool); !ok || !b.B { - n.reportConflict(x, y, x.Kind(), y.Kind(), n.scalarID, id) + p1 := n.scalarID.Priority + p2 := id.Priority + if p1 != 0 && p2 != 0 { + if p1 > p2 { + // all good + break + } else if p1 < p2 { + goto patchConjunct + } + } + if !BinOpBool(ctx, errOnDiffType, EqualOp, x, y) { + n.reportConflict(x, y, x.Kind(), y.Kind(), n.scalarID, id.posInfo) } break } + patchConjunct: n.scalar = x - n.scalarID = id + n.scalarID = id.posInfo + // TODO: only set "scalarKnown" if there are no other high priority + // conjuncts. Alternatively, we should process high priority conjuncts + // in the scheduler first. n.signal(scalarKnown) default: @@ -685,11 +736,6 @@ func (n *nodeContext) insertValueConjunct(env *Environment, v Value, id CloseInf if n.lowerBound != nil && n.upperBound != nil { if u := SimplifyBounds(ctx, n.kind, n.lowerBound, n.upperBound); u != nil { - if err := valueError(u); err != nil { - err.AddPosition(n.lowerBound) - err.AddPosition(n.upperBound) - err.AddClosedPositions(id) - } n.lowerBound = nil n.upperBound = nil n.insertValueConjunct(env, u, id) diff --git a/vendor/cuelang.org/go/internal/core/adt/constraints.go b/vendor/cuelang.org/go/internal/core/adt/constraints.go index a27fb3336d..a9de171d14 100644 --- a/vendor/cuelang.org/go/internal/core/adt/constraints.go +++ b/vendor/cuelang.org/go/internal/core/adt/constraints.go @@ -98,10 +98,8 @@ func (n *nodeContext) insertConstraint(pattern Value, c Conjunct) bool { Constraint: constraint, }) } else { - found := false - constraint.VisitLeafConjuncts(func(x Conjunct) bool { - if x.x == c.x && x.Env.Up == c.Env.Up && x.Env.Vertex == c.Env.Vertex { - found = true + for x := range constraint.LeafConjuncts() { + if x.x == c.x && x.Env.Equal(ctx, c.Env) { if c.CloseInfo.opID == n.ctx.opID { // TODO: do we need this replacement? src := x.CloseInfo.defID @@ -110,13 +108,9 @@ func (n *nodeContext) insertConstraint(pattern Value, c Conjunct) bool { } else { n.ctx.stats.MisalignedConstraint++ } + // The constraint already existed and the conjunct was already added. return false } - return true - }) - // The constraint already existed and the conjunct was already added. - if found { - return false } } @@ -133,35 +127,18 @@ func matchPattern(ctx *OpContext, pattern Value, f Feature) bool { return false } - // TODO(perf): this assumes that comparing an int64 against apd.Decimal - // is faster than converting this to a Num and using that for comparison. - // This may very well not be the case. But it definitely will be if we - // special-case integers that can fit in an int64 (or int32 if we want to - // avoid many bound checks), which we probably should. Especially when we - // allow list constraints, like [<10]: T. - var label Value - if f.IsString() && int64(f.Index()) != MaxIndex { - label = f.ToValue(ctx) - } - - return matchPatternValue(ctx, pattern, f, label) + return matchPatternValue(ctx, pattern, f) } -// matchPatternValue matches a concrete value against f. label must be the -// CUE value that is obtained from converting f. +// matchPatternValue matches a concrete value against f. // // This is an optimization an intended to be faster than regular CUE evaluation // for the majority of cases where pattern constraints are used. -func matchPatternValue(ctx *OpContext, pattern Value, f Feature, label Value) (result bool) { +func matchPatternValue(ctx *OpContext, pattern Value, f Feature) (result bool) { if v, ok := pattern.(*Vertex); ok { - v.unify(ctx, scalarKnown, finalize, false) + v.unify(ctx, Flags{condition: scalarKnown, mode: finalize, checkTypos: false}) } pattern = Unwrap(pattern) - label = Unwrap(label) - - if pattern == label { - return true - } k := IntKind if f.IsString() { @@ -176,11 +153,10 @@ func matchPatternValue(ctx *OpContext, pattern Value, f Feature, label Value) (r case *Bottom: // TODO: hoist and reuse with the identical code in optional.go. if x == cycle { - err := ctx.NewPosf(pos(pattern), "cyclic pattern constraint") - ctx.vertex.VisitLeafConjuncts(func(c Conjunct) bool { - addPositions(err, c) - return true - }) + err := ctx.NewPosf(Pos(pattern), "cyclic pattern constraint") + for c := range ctx.vertex.LeafConjuncts() { + addPositions(ctx, err, c) + } ctx.AddBottom(&Bottom{ Err: err, Node: ctx.vertex, @@ -200,10 +176,10 @@ func matchPatternValue(ctx *OpContext, pattern Value, f Feature, label Value) (r case *BoundValue: switch x.Kind() { case StringKind: - if label == nil { + if !f.IsString() || int64(f.Index()) == MaxIndex { return false } - str := label.(*String).Str + str := ctx.IndexToString(f.safeIndex()) return x.validateStr(ctx, str) case NumberKind: @@ -219,15 +195,15 @@ func matchPatternValue(ctx *OpContext, pattern Value, f Feature, label Value) (r return err == nil && xi == yi case *String: - if label == nil { + if !f.IsString() || int64(f.Index()) == MaxIndex { return false } - y, ok := label.(*String) - return ok && x.Str == y.Str + str := ctx.IndexToString(f.safeIndex()) + return x.Str == str case *Conjunction: for _, a := range x.Values { - if !matchPatternValue(ctx, a, f, label) { + if !matchPatternValue(ctx, a, f) { return false } } @@ -235,7 +211,7 @@ func matchPatternValue(ctx *OpContext, pattern Value, f Feature, label Value) (r case *Disjunction: for _, a := range x.Values { - if matchPatternValue(ctx, a, f, label) { + if matchPatternValue(ctx, a, f) { return true } } @@ -249,10 +225,7 @@ func matchPatternValue(ctx *OpContext, pattern Value, f Feature, label Value) (r // slow track. One way to signal this would be to have a "value thunk" at // the root that causes the fast track to be bypassed altogether. - if label == nil { - label = f.ToValue(ctx) - } - + label := f.ToValue(ctx) n := ctx.newInlineVertex(nil, nil, MakeConjunct(ctx.e, pattern, ctx.ci), MakeConjunct(ctx.e, label, ctx.ci)) diff --git a/vendor/cuelang.org/go/internal/core/adt/context.go b/vendor/cuelang.org/go/internal/core/adt/context.go index c2f52174ce..5e41a0afb7 100644 --- a/vendor/cuelang.org/go/internal/core/adt/context.go +++ b/vendor/cuelang.org/go/internal/core/adt/context.go @@ -16,14 +16,17 @@ package adt import ( "fmt" + "iter" "reflect" "regexp" + "strings" "sync/atomic" + "unicode/utf8" "github.com/cockroachdb/apd/v3" - "golang.org/x/text/encoding/unicode" "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/stats" "cuelang.org/go/cue/token" @@ -38,9 +41,14 @@ type Runtime interface { // canonical numeric representation. StringIndexer - // LoadImport loads a unique Vertex associated with a given import path. It - // returns nil if no import for this package could be found. - LoadImport(importPath string) *Vertex + // LoadBuiltin loads a unique Vertex associated with a given builtin + // (standard library) import path. It returns nil if no builtin for + // this import path could be found. + LoadBuiltin(importPath string) *Vertex + + // LoadInstance loads a unique Vertex associated with the given build + // instance. It returns nil if no such instance has been compiled. + LoadInstance(inst *build.Instance) *Vertex // StoreType associates a CUE expression with a Go type. StoreType(t reflect.Type, src ast.Expr, expr Expr) @@ -85,43 +93,80 @@ func New(v *Vertex, cfg *Config) *OpContext { return ctx } -// See also: [unreachableForDev] func (c *OpContext) isDevVersion() bool { - if c.Version == internal.EvalVersionUnset { - panic("OpContext was not provided with an evaluator version") - } return c.Version == internal.DevVersion } -// An OpContext implements CUE's unification operation. It only -// operates on values that are created with the Runtime with which an OpContext -// is associated. An OpContext is not goroutine safe and only one goroutine may -// use an OpContext at a time. +// An OpContext holds context associated with an on-going CUE +// evaluation. It functions both as an optimized memory store, +// amortizing allocations during an evaluation, and as a record of the +// current state within an evaluation. +// +// It should only be used on values that are created with the Runtime +// with which an OpContext is created. +// +// An OpContext is not goroutine safe and only one goroutine may use an +// OpContext at a time. +// +// An OpContext is typically used for an entire operation involving CUE +// values that are derived from the same [cue.Context], such as any call +// to exported Go APIs like methods on [cue.Value]. +// +// An OpContext stores: +// - errors encountered during the evaluation +// - the current vertex and its parents +// - statistics on evaluation operations +// +// The recorded set of errors is added to by calls to [OpContext.AddErr], +// [OpContext.AddErr], [OpContext.AddErrf], and in general +// any other operation that encounters an error. +// +// The current vertex is modified by calling [OpContext.PushArc], which +// must be balanced by a corresponding call to [OpContext.PopArc]. +// +// The entire state, including recorded errors and the current vertex, can be +// reset by calling [OpContext.PushState], which must be balanced by a +// corresponding call to [OpContext.PopState], causing the original +// errors and vertex to be restored. type OpContext struct { Runtime Format func(Runtime, Node) string cuedebug.Config - Version internal.EvaluatorVersion // Copied from Runtime - SimplifyValidators bool // Copied from Runtime + Version internal.EvaluatorVersion // Copied from Runtime taskContext nest int // used in typocheck.go - nextDefID defID // next available defID - containments []defID // parent relations - redirectsBuf []replaceID // reusable buffer used in containsDefID + nextDefID defID // next available defID + containments []containment // parent relations + containsDefIDCache map[uint64]bool // cache for containsDefID results + + // [token.Pos] interning for containments to reduce memory usage, + // given that millions of elements in [OpContext.containments] + // can share the same position. [uint32] is 4 bytes, + // whereas [token.Pos] is 16 bytes with alignment. + positionTable []token.Pos // unique positions + positionIndex map[token.Pos]uint32 // reverse lookup for interning + + // disjunctBuffer is reused when constructing [envDisjunct.disjuncts]. + disjunctBuffer []disjunct stats stats.Counts freeListNode *nodeContext - e *Environment - ci CloseInfo - src ast.Node + e *Environment + ci CloseInfo + + // Source node associated with the CUE operation, if any. + // When nil, created nodes like [Bool] may use sentinels to avoid allocations. + src ast.Node + errs *Bottom positions []Node // keep track of error positions + skipTry bool // set when an option reference is not present // vertex is used to determine the path location in case of error. Turning // this into a stack could also allow determining the cyclic path for @@ -132,7 +177,8 @@ type OpContext struct { // TODO: remove this again once we have a proper way of detecting references // across optional boundaries in hasAncestorV3. We can probably do this // with an optional depth counter. - toFinalize []*Vertex + // See the TODO in unify.go for toFinalize. + // toFinalize []*Vertex // freeScope tracks the nodeContexts that are currently responsible for // allocating new inlined vertices. Only nodes within the current scope can @@ -151,36 +197,16 @@ type OpContext struct { // TODO(perf): have two generations: one for each pass of the closedness // algorithm, so that the results of the first pass can be reused for all // features of a node. - opID uint64 - closed map[*closeInfo]*closeStats - todo *closeStats + opID uint64 // evalDepth indicates the current depth of evaluation. It is used to // detect structural cycles and their severity.s evalDepth int - // optionalMark indicates the evalDepth at which the last optional field, - // pattern constraint or other construct that may contain errors was - // encountered. A value of 0 indicates we are not within such field. - optionalMark int - // holdID is a unique identifier for the current "hole", a choice of // disjunct to be made when processing disjunctions. holeID int - // inDisjunct indicates that non-monotonic checks should be skipped. - // This is used if we want to do some extra work to eliminate disjunctions - // early. The result of unification should be thrown away if this check is - // used. - // - // TODO: replace this with a mechanism to determine the correct set (per - // conjunct) of StructInfos to include in closedness checking. - inDisjunct int - - // inConstaint overrides inDisjunct as field matching should always be - // enabled. - inConstraint int - // inDetached indicates that inline structs evaluated in the current context // should never be shared. This is the case, for instance, with the source // for the for clause in a comprehension. @@ -219,13 +245,6 @@ type OpContext struct { func (c *OpContext) CloseInfo() CloseInfo { return c.ci } func (c *OpContext) UpdateCloseInfo(ci CloseInfo) { c.ci = ci } -func (n *nodeContext) skipNonMonotonicChecks() bool { - if n.ctx.inConstraint > 0 { - return false - } - return n.ctx.inDisjunct > 0 -} - func (c *OpContext) Pos() token.Pos { if c.src == nil { return token.NoPos @@ -266,12 +285,13 @@ func (c *OpContext) Env(upCount int32) *Environment { func (c *OpContext) relNode(upCount int32) *Vertex { e := c.e.up(c, upCount) - c.unify(e.Vertex, combinedFlags{ + v := e.DerefVertex(c) + c.unify(v, Flags{ status: partial, condition: allKnown, mode: ignore, }) - return e.Vertex + return v } func (c *OpContext) relLabel(upCount int32) Feature { @@ -290,7 +310,7 @@ func (c *OpContext) concreteIsPossible(op Op, x Expr) bool { return true } -// Assert that the given expression can evaluate to a concrete value. +// AssertConcreteIsPossible reports whether the given expression can evaluate to a concrete value. func AssertConcreteIsPossible(op Op, x Expr) bool { switch v := x.(type) { case *Bottom: @@ -369,6 +389,12 @@ type frame struct { ci CloseInfo } +// PushState resets c as if it was a newly created context +// with the same configuration c was created with, +// returning a value which should be used to restore the current +// state by passing it to a matching call to [OpContext.PopState]. +// +// If src is nil, c will still refer to the same source node. func (c *OpContext) PushState(env *Environment, src ast.Node) (saved frame) { saved.env = c.e saved.err = c.errs @@ -402,6 +428,7 @@ func (c *OpContext) PushConjunct(x Conjunct) (saved frame) { return saved } +// PopState restores a state pushed by [OpContext.PushState]. func (c *OpContext) PopState(s frame) *Bottom { err := c.errs c.e = s.env @@ -450,14 +477,14 @@ func (c *OpContext) Resolve(x Conjunct, r Resolver) (v *Vertex, b *Bottom) { panic(x) } }() - return c.resolveState(x, r, combinedFlags{ + return c.resolveState(x, r, Flags{ status: finalized, condition: allKnown, mode: finalize, }) } -func (c *OpContext) resolveState(x Conjunct, r Resolver, state combinedFlags) (*Vertex, *Bottom) { +func (c *OpContext) resolveState(x Conjunct, r Resolver, state Flags) (*Vertex, *Bottom) { s := c.PushConjunct(x) arc := r.resolve(c, state) @@ -482,7 +509,7 @@ func (c *OpContext) resolveState(x Conjunct, r Resolver, state combinedFlags) (* func (c *OpContext) Lookup(env *Environment, r Resolver) (*Vertex, *Bottom) { s := c.PushState(env, r.Source()) - arc := r.resolve(c, combinedFlags{ + arc := r.resolve(c, Flags{ status: partial, condition: allKnown, mode: ignore, @@ -490,12 +517,6 @@ func (c *OpContext) Lookup(env *Environment, r Resolver) (*Vertex, *Bottom) { err := c.PopState(s) - if arc != nil && !c.isDevVersion() { - // TODO(deref): lookup should probably not use DerefValue, but - // rather only dereference disjunctions. - arc = arc.DerefValue() - } - return arc, err } @@ -532,7 +553,7 @@ func (c *OpContext) Validate(check Conjunct, value Value) *Bottom { func (c *OpContext) concrete(env *Environment, x Expr, msg interface{}) (result Value, complete bool) { s := c.PushState(env, x.Source()) - state := combinedFlags{ + state := Flags{ status: partial, condition: concreteKnown, mode: yield, @@ -597,7 +618,7 @@ func (c *OpContext) getDefault(v Value) (result Value, ok bool) { func (c *OpContext) Evaluate(env *Environment, x Expr) (result Value, complete bool) { s := c.PushState(env, x.Source()) - val := c.evalState(x, combinedFlags{ + val := c.evalState(x, Flags{ status: partial, condition: concreteKnown, mode: finalize, @@ -614,7 +635,7 @@ func (c *OpContext) Evaluate(env *Environment, x Expr) (result Value, complete b val = &Bottom{ Code: IncompleteError, Err: c.Newf("UNANTICIPATED ERROR"), - Node: env.Vertex, + Node: env.DerefVertex(c), } } @@ -634,7 +655,7 @@ func (c *OpContext) EvaluateKeepState(x Expr) (result Value) { src := c.src c.src = x.Source() - result, ci := c.evalStateCI(x, combinedFlags{ + result, ci := c.evalStateCI(x, Flags{ status: partial, condition: concreteKnown, mode: finalize, @@ -646,29 +667,11 @@ func (c *OpContext) EvaluateKeepState(x Expr) (result Value) { return result } -func (c *OpContext) evaluateRec(v Conjunct, state combinedFlags) Value { - x := v.Expr() - s := c.PushConjunct(v) - - val := c.evalState(x, state) - if val == nil { - // Be defensive: this never happens, but just in case. - Assertf(c, false, "nil return value: unspecified error") - val = &Bottom{ - Code: IncompleteError, - Err: c.Newf("UNANTICIPATED ERROR"), - Node: c.vertex, - } - } - _ = c.PopState(s) - - return val -} - // value evaluates expression v within the current environment. The result may // be nil if the result is incomplete. value leaves errors untouched to that // they can be collected by the caller. -func (c *OpContext) value(x Expr, state combinedFlags) (result Value) { +func (c *OpContext) value(x Expr, state Flags) (result Value) { + state.concrete = true v := c.evalState(x, state) v, _ = c.getDefault(v) @@ -676,12 +679,12 @@ func (c *OpContext) value(x Expr, state combinedFlags) (result Value) { return v } -func (c *OpContext) evalState(v Expr, state combinedFlags) (result Value) { +func (c *OpContext) evalState(v Expr, state Flags) (result Value) { result, _ = c.evalStateCI(v, state) return result } -func (c *OpContext) evalStateCI(v Expr, state combinedFlags) (result Value, ci CloseInfo) { +func (c *OpContext) evalStateCI(v Expr, state Flags) (result Value, ci CloseInfo) { savedSrc := c.src c.src = v.Source() err := c.errs @@ -698,10 +701,7 @@ func (c *OpContext) evalStateCI(v Expr, state combinedFlags) (result Value, ci C switch b.Code { case IncompleteError: case CycleError: - if state.status == partial || c.isDevVersion() { - break - } - fallthrough + break default: result = b } @@ -731,6 +731,11 @@ func (c *OpContext) evalStateCI(v Expr, state combinedFlags) (result Value, ci C case Value: return x, c.ci + case *OpenExpr: + v, ci := c.evalStateCI(x.X, state) + ci.Opened = true + return v, ci + case Evaluator: v := x.evaluate(c, state) return v, c.ci @@ -746,7 +751,7 @@ func (c *OpContext) evalStateCI(v Expr, state combinedFlags) (result Value, ci C // TODO(3977): register internal nodes for later verifications. The // following limits the possibility of some common and useful cycles. // - // if arc.Internal() && c.isDevVersion() { + // if arc.Internal() { // mode := state.conditions() // state = final(partial, mode|allTasksCompleted) // } @@ -760,69 +765,60 @@ func (c *OpContext) evalStateCI(v Expr, state combinedFlags) (result Value, ci C // TODO: is this indirect necessary? // arc = arc.Indirect() - if c.isDevVersion() { - if n := arc.getState(c); n != nil { - c.ci, _ = n.detectCycleV3(arc, nil, x, c.ci) - } - } else { - if n := arc.state; n != nil { - c.ci, _ = n.markCycle(arc, nil, x, c.ci) - } + if n := arc.getState(c); n != nil { + c.ci, _ = n.detectCycle(arc, nil, x, c.ci) } - if !c.isDevVersion() { - c.ci.Inline = true - } + if s := arc.getState(c); s != nil { + defer s.retainProcess().releaseProcess() - if c.isDevVersion() { - if s := arc.getState(c); s != nil { - defer s.retainProcess().releaseProcess() - - origNeeds := state.condition - needs := origNeeds | arcTypeKnown - runMode := state.mode - - switch runMode { - case finalize: - arc.unify(c, needs, attemptOnly, true) // to set scalar - s.freeze(needs) - case attemptOnly: - arc.unify(c, needs, attemptOnly, true) // to set scalar - - case yield: - arc.unify(c, needs, runMode, true) // to set scalar - - evaluating := arc.status == evaluating - - // We cannot resolve a value that represents an unresolved - // disjunction. - if evaluating && orig != arc && arc.IsDisjunct { - task := c.current() - if origNeeds == scalarKnown && !orig.state.meets(scalarKnown) { - orig.state.defaultAttemptInCycle = task.node.node - task.waitFor(&orig.state.scheduler, needs) - s.yield() - panic("unreachable") - } - err := c.Newf("unresolved disjunction: %v", x) - b := &Bottom{Code: CycleError, Err: err} - return b, c.ci - } + origNeeds := state.condition + needs := origNeeds | arcTypeKnown + runMode := state.mode + + switch runMode { + case finalize: + arc.unify(c, Flags{condition: needs, mode: attemptOnly, checkTypos: true}) // to set scalar + s.freeze(needs) + case attemptOnly: + arc.unify(c, Flags{condition: needs, mode: attemptOnly, checkTypos: true}) // to set scalar - hasCycleBreakingValue := s.hasFieldValue || - !isCyclePlaceholder(arc.BaseValue) + case yield: + arc.unify(c, Flags{condition: needs, mode: runMode, checkTypos: true}) // to set scalar + + evaluating := arc.status == evaluating + if state.concrete && orig != arc && orig.state != nil && orig.state.meets(scalarKnown) && IsRecursivelyConcrete(arc) { + evaluating = false + } - if evaluating && !hasCycleBreakingValue { - err := c.Newf("cycle with field: %v", x) - b := &Bottom{Code: CycleError, Err: err} - c.AddBottom(b) - break + // We cannot resolve a value that represents an unresolved + // disjunction. + if evaluating && orig != arc && arc.IsDisjunct { + task := c.current() + if origNeeds == scalarKnown && !orig.state.meets(scalarKnown) { + orig.state.defaultAttemptInCycle = task.node.node + task.waitFor(&orig.state.scheduler, needs) + s.yield() + panic("unreachable") } + err := c.Newf("unresolved disjunction: %v", x) + b := &Bottom{Code: CycleError, Err: err} + return b, c.ci + } - v := c.evaluate(arc, x, state) + hasCycleBreakingValue := s.hasFieldValue || + !isCyclePlaceholder(arc.BaseValue) - return v, c.ci + if evaluating && !hasCycleBreakingValue { + err := c.Newf("cycle with field: %v", x) + b := &Bottom{Code: CycleError, Err: err} + c.AddBottom(b) + break } + + v := c.evaluate(arc, x, state) + + return v, c.ci } } arc = arc.DerefValue() @@ -852,7 +848,7 @@ func (c *OpContext) wrapCycleError(src ast.Node, b *Bottom) *Bottom { // unifyNode returns a possibly partially evaluated node value. // // TODO: maybe return *Vertex, *Bottom -func (c *OpContext) unifyNode(expr Expr, state combinedFlags) (result Value) { +func (c *OpContext) unifyNode(expr Expr, state Flags) (result Value) { savedSrc := c.src c.src = expr.Source() err := c.errs @@ -871,7 +867,7 @@ func (c *OpContext) unifyNode(expr Expr, state combinedFlags) (result Value) { if b, ok := result.(*Bottom); ok { if c.src != nil && b.Code == CycleError && - b.Err.Position() == token.NoPos && + !b.Err.Position().IsValid() && len(b.Err.InputPositions()) == 0 { bb := *b bb.Err = errors.Wrapf(b.Err, c.src.Pos(), "") @@ -915,187 +911,35 @@ func (c *OpContext) unifyNode(expr Expr, state combinedFlags) (result Value) { // TODO: is this indirect necessary? // v = v.Indirect() - if c.isDevVersion() { - if n := v.getState(c); n != nil { - defer n.retainProcess().releaseProcess() - - // A lookup counts as new structure. See the commend in Section - // "Lookups in inline cycles" in cycle.go. - if !c.ci.IsCyclic || v.Label.IsLet() { - // TODO: fix! Setting this when we are not structure sharing can - // cause some hangs. We are conservative and not set this in - // this case, with the potential that some configurations will - // break. It is probably related to let. - n.hasNonCycle = true - } - - // Always yield to not get spurious errors. - n.process(arcTypeKnown, yield) - // It is possible that the node is only midway through - // evaluating a disjunction. In this case, we want to ensure - // that disjunctions are finalized, so that disjunction shows - // up in BaseValue. - if len(n.disjuncts) > 0 { - n.node.unify(c, arcTypeKnown, yield, false) - } + if n := v.getState(c); n != nil { + defer n.retainProcess().releaseProcess() + + // A lookup counts as new structure. See the commend in Section + // "Lookups in inline cycles" in cycle.go. + if !c.ci.IsCyclic() || v.Label.IsLet() { + // TODO: fix! Setting this when we are not structure sharing can + // cause some hangs. We are conservative and not set this in + // this case, with the potential that some configurations will + // break. It is probably related to let. + n.hasNonCycle = true } - } else { - if v.isUndefined() || state.status > v.Status() { - c.unify(v, state) + + // Always yield to not get spurious errors. + n.process(arcTypeKnown, yield) + // It is possible that the node is only midway through + // evaluating a disjunction. In this case, we want to ensure + // that disjunctions are finalized, so that disjunction shows + // up in BaseValue. + if len(n.disjuncts) > 0 { + n.node.unify(c, Flags{condition: arcTypeKnown, mode: yield, checkTypos: false}) } } return v } -func (c *OpContext) lookup(x *Vertex, pos token.Pos, l Feature, flags combinedFlags) *Vertex { - if c.isDevVersion() { - return x.lookup(c, pos, l, flags) - } - - state := flags.status - - if l == InvalidLabel || x == nil { - // TODO: is it possible to have an invalid label here? Maybe through the - // API? - return &Vertex{} - } - - // var kind Kind - // if x.BaseValue != nil { - // kind = x.BaseValue.Kind() - // } - - switch x.BaseValue.(type) { - case *StructMarker: - if l.Typ() == IntLabel { - c.addErrf(0, pos, "invalid struct selector %v (type int)", l) - return nil - } - - case *ListMarker: - switch { - case l.Typ() == IntLabel: - switch { - case l.Index() < 0: - c.addErrf(0, pos, "invalid list index %v (index must be non-negative)", l) - return nil - case l.Index() > len(x.Arcs): - c.addErrf(0, pos, "invalid list index %v (out of bounds)", l) - return nil - } - - case l.IsDef(), l.IsHidden(), l.IsLet(): - - default: - c.addErrf(0, pos, "invalid list index %v (type string)", l) - return nil - } - - case nil: - // c.addErrf(IncompleteError, pos, "incomplete value %s", x) - // return nil - - case *Bottom: - - default: - kind := x.BaseValue.Kind() - if kind&(ListKind|StructKind) != 0 { - // c.addErrf(IncompleteError, pos, - // "cannot look up %s in incomplete type %s (type %s)", - // l, x.Source(), kind) - // return nil - } else if !l.IsDef() && !l.IsHidden() && !l.IsLet() { - c.addErrf(0, pos, - "invalid selector %v for value of type %s", l, kind) - return nil - } - } - - a := x.Lookup(l) - - var hasCycle bool - - if a != nil { - // Ensure that a's status is at least of the required level. Otherwise, - // ensure that any remaining unprocessed conjuncts are processed by - // calling c.Unify(a, Partial). The ensures that need to rely on - // hasAllConjuncts, but that are finalized too early, get conjuncts - // processed beforehand. - if state > a.status { - c.unify(a, combinedFlags{ - status: state, - }) - } else if a.state != nil { - c.unify(a, combinedFlags{ - status: partial, - }) - } - - // TODO(refRequired): see comment in unify.go:Vertex.lookup near the - // namesake TODO. - if a.ArcType == ArcOptional { - code := IncompleteError - if hasCycle { - code = CycleError - } - label := l.SelectorString(c.Runtime) - c.AddBottom(&Bottom{ - Code: code, - Permanent: x.status >= conjuncts, - Err: c.NewPosf(pos, - "cannot reference optional field: %s", label), - Node: x, - }) - } - } else { - if x.state != nil { - x.state.assertInitialized() - - for _, e := range x.state.exprs { - if isCyclePlaceholder(e.err) { - hasCycle = true - } - } - } - code := IncompleteError - // As long as we have incomplete information, we cannot mark the - // inability to look up a field as "final", as it may resolve down the - // line. - permanent := x.status >= conjuncts - if m, _ := x.BaseValue.(*ListMarker); m != nil && !m.IsOpen { - permanent = true - } - if (state > partial || permanent) && !x.Accept(c, l) { - code = 0 - } else if hasCycle { - code = CycleError - } - // TODO: if the struct was a literal struct, we can also treat it as - // closed and make this a permanent error. - label := l.SelectorString(c.Runtime) - - // TODO(errors): add path reference and make message - // "undefined field %s in %s" - var err *ValueError - switch { - case isCyclePlaceholder(x.BaseValue): - err = c.NewPosf(pos, "cycle error referencing %s", label) - permanent = false - case l.IsInt(): - err = c.NewPosf(pos, "index out of range [%d] with length %d", - l.Index(), len(x.Elems())) - default: - err = c.NewPosf(pos, "undefined field: %s", label) - } - c.AddBottom(&Bottom{ - Code: code, - Permanent: permanent, - Err: err, - Node: x, - }) - } - return a +func (c *OpContext) lookup(x *Vertex, pos token.Pos, l Feature, flags Flags) *Vertex { + return x.lookup(c, pos, l, flags) } func (c *OpContext) undefinedFieldError(v *Vertex, code ErrorCode) { @@ -1112,7 +956,7 @@ func (c *OpContext) typeError(v Value, k Kind) { return } if !IsConcrete(v) && v.Kind()&k != 0 { - c.addErrf(IncompleteError, pos(v), "incomplete %s: %s", k, v) + c.addErrf(IncompleteError, Pos(v), "incomplete %s: %s", k, v) } else { c.AddErrf("cannot use %s (type %s) as type %s", v, v.Kind(), k) } @@ -1127,7 +971,7 @@ func (c *OpContext) typeErrorAs(v Value, k Kind, as interface{}) { return } if !IsConcrete(v) && v.Kind()&k != 0 { - c.addErrf(IncompleteError, pos(v), + c.addErrf(IncompleteError, Pos(v), "incomplete %s in %v: %s", k, as, v) } else { c.AddErrf("cannot use %s (type %s) as type %s in %v", v, v.Kind(), k, as) @@ -1136,15 +980,8 @@ func (c *OpContext) typeErrorAs(v Value, k Kind, as interface{}) { var emptyNode = &Vertex{status: finalized} -func pos(x Node) token.Pos { - if x.Source() == nil { - return token.NoPos - } - return x.Source().Pos() -} - // node is called by SelectorExpr.resolve and IndexExpr.resolve. -func (c *OpContext) node(orig Node, x Expr, scalar bool, state combinedFlags) *Vertex { +func (c *OpContext) node(orig Node, x Expr, scalar bool, state Flags) *Vertex { // Do not treat inline structs as closed by default if within a schema. // See comment at top of scheduleVertexConjuncts. if _, ok := x.(Resolver); !ok { @@ -1181,7 +1018,7 @@ func (c *OpContext) node(orig Node, x Expr, scalar bool, state combinedFlags) *V switch nv := v.(type) { case nil: - c.addErrf(IncompleteError, pos(x), + c.addErrf(IncompleteError, Pos(x), "%s undefined (%s is incomplete)", orig, x) return emptyNode @@ -1200,17 +1037,17 @@ func (c *OpContext) node(orig Node, x Expr, scalar bool, state combinedFlags) *V // while traversing values. Not evaluating the node here could lead // to a lookup in an unevaluated node, resulting in erroneously failing // lookups. - if c.isDevVersion() && nv.nonRooted { + if nv.nonRooted { nv.CompleteArcsOnly(c) } default: if kind := v.Kind(); kind&StructKind != 0 { - c.addErrf(IncompleteError, pos(x), + c.addErrf(IncompleteError, Pos(x), "%s undefined as %s is incomplete (type %s)", orig, x, kind) return emptyNode } else if !ok { - c.addErrf(0, pos(x), // TODO(error): better message. + c.addErrf(0, Pos(x), // TODO(error): better message. "invalid operand %s (found %s, want list or struct)", x.Source(), v.Kind()) return emptyNode @@ -1221,14 +1058,14 @@ func (c *OpContext) node(orig Node, x Expr, scalar bool, state combinedFlags) *V } // Elems returns the evaluated elements of a list. -func (c *OpContext) Elems(v Value) []*Vertex { +func (c *OpContext) Elems(v Value) iter.Seq[*Vertex] { list := c.list(v) list.Finalize(c) return list.Elems() } // RawElems returns the elements of the list without evaluating them. -func (c *OpContext) RawElems(v Value) []*Vertex { +func (c *OpContext) RawElems(v Value) iter.Seq[*Vertex] { list := c.list(v) return list.Elems() } @@ -1247,16 +1084,6 @@ func (c *OpContext) list(v Value) *Vertex { return x } -func (c *OpContext) scalar(v Value) Value { - v = Unwrap(v) - switch v.(type) { - case *Null, *Bool, *Num, *String, *Bytes: - default: - c.typeError(v, ScalarKinds) - } - return v -} - var zero = &Num{K: NumberKind} func (c *OpContext) Num(v Value, as interface{}) *Num { @@ -1389,8 +1216,7 @@ func (c *OpContext) toStringValue(v Value, k Kind, as interface{}) string { } func bytesToString(b []byte) string { - b, _ = unicode.UTF8.NewDecoder().Bytes(b) - return string(b) + return strings.ToValidUTF8(string(b), string(utf8.RuneError)) } func (c *OpContext) bytesValue(v Value, as interface{}) []byte { @@ -1408,45 +1234,40 @@ func (c *OpContext) bytesValue(v Value, as interface{}) []byte { var matchNone = regexp.MustCompile("^$") +// regexpCache caches compiled regular expressions by pattern string. +// Uses weak references so unused patterns can be garbage collected. +var regexpCache = newMemoizer(func(pattern string) (*regexp.Regexp, error) { + // TODO(mvdan): consider simplifying patterns (which regexp/syntax can do) + // before we look up or insert on the weak map? so that e.g. fo[o] bar{1} and + // foo bar share the same entry. + return regexp.Compile(pattern) +}) + +// cachedRegexp returns a compiled regexp for the given pattern, using a shared +// cache to avoid recompilation and enable thread-safe access. +// + func (c *OpContext) regexp(v Value) *regexp.Regexp { v = Unwrap(v) if isError(v) { return matchNone } + var pattern string switch x := v.(type) { case *String: - if x.RE != nil { - return x.RE - } - // TODO: synchronization - p, err := regexp.Compile(x.Str) - if err != nil { - // FatalError? How to cache error - c.AddErrf("invalid regexp: %s", err) - x.RE = matchNone - } else { - x.RE = p - } - return x.RE - + pattern = x.Str case *Bytes: - if x.RE != nil { - return x.RE - } - // TODO: synchronization - p, err := regexp.Compile(string(x.B)) - if err != nil { - c.AddErrf("invalid regexp: %s", err) - x.RE = matchNone - } else { - x.RE = p - } - return x.RE - + pattern = string(x.B) default: c.typeError(v, StringKind|BytesKind) return matchNone } + re, err := regexpCache.get(pattern) + if err != nil { + c.AddErrf("invalid regexp: %s", err) + return matchNone + } + return re } // newNum creates a new number of the given kind. It reports an error value @@ -1480,10 +1301,26 @@ func (c *OpContext) newBytes(b []byte) Value { return &Bytes{Src: c.src, B: b} } -func (c *OpContext) newBool(b bool) Value { +var ( + StaticBoolFalse = &Bool{B: false} + StaticBoolTrue = &Bool{B: true} +) + +func (c *OpContext) NewBool(b bool) Value { if c.HasErr() { return c.Err() } + // Creating boolean values is a very common operation, + // such as when evaluating unary and binary operators. + // A significant portion of the time, no source is attached + // to the operation, so we can reuse Bool allocations. + if c.src == nil { + if b { + return StaticBoolTrue + } else { + return StaticBoolFalse + } + } return &Bool{Src: c.src, B: b} } @@ -1500,15 +1337,31 @@ func (c *OpContext) String(x Node) string { return c.Format(c.Runtime, x) } -type stringerFunc func() string +// Formatter wraps an adt.Node with the necessary information to print it. +// +// TODO: we could eliminate the need for this by ensuring that errors are +// _always_ formatted with a printer. We are not far off from this goal, but +// we need to verify several things. +// This is mainly possible because we intend to have a global string index +// using weak references. It also assumes that errors are always printed +// equally. +type Formatter struct { + X Node + + // F formats Node, resolving references as needed.using Runtime. + // TODO: only used for cases where the debug printer is somehow + // circumvented. Verify this no longer happens. + F func(Runtime, Node) string + + // TODO: is runtime needed? Probably not if we have a global string index. + R Runtime +} -func (f stringerFunc) String() string { return f() } +func (f Formatter) String() string { return f.F(f.R, f.X) } // Str reports a string of x via a [fmt.Stringer], for use in errors or debugging. func (c *OpContext) Str(x Node) fmt.Stringer { - return stringerFunc(func() string { - return c.String(x) - }) + return Formatter{X: x, F: c.Format, R: c.Runtime} } // NewList returns a new list for the given values. diff --git a/vendor/cuelang.org/go/internal/core/adt/cycle.go b/vendor/cuelang.org/go/internal/core/adt/cycle.go index ad7e3a8819..28a6ca70c4 100644 --- a/vendor/cuelang.org/go/internal/core/adt/cycle.go +++ b/vendor/cuelang.org/go/internal/core/adt/cycle.go @@ -374,201 +374,6 @@ package adt // - treatment of let fields // - tighter termination for some mutual cycles in optional conjuncts. -// DEPRECATED: V2 cycle detection. -// -// TODO(evalv3): remove these comments once we have fully moved to V3. -// - -// Cycle detection: -// -// - Current algorithm does not allow for early non-cyclic conjunct detection. -// - Record possibly cyclic references. -// - Mark as cyclic if no evidence is found. -// - Note that this also activates the same reference in other (parent) conjuncts. - -// CYCLE DETECTION ALGORITHM -// -// BACKGROUND -// -// The cycle detection is inspired by the cycle detection used by Tomabechi's -// [Tomabechi COLING 1992] and Van Lohuizen's [Van Lohuizen ACL 2000] graph -// unification algorithms. -// -// Unlike with traditional graph unification, however, CUE uses references, -// which, unlike node equivalence, are unidirectional. This means that the -// technique to track equivalence through dereference, as common in graph -// unification algorithms like Tomabechi's, does not work unaltered. -// -// The unidirectional nature of references imply that each reference equates a -// facsimile of the value it points to. This renders the original approach of -// node-pointer equivalence useless. -// -// -// PRINCIPLE OF ALGORITHM -// -// The solution for CUE is based on two observations: -// -// - the CUE algorithm tracks all conjuncts that define a node separately, - -// accumulating used references on a per-conjunct basis causes duplicate -// references to uniquely identify cycles. -// -// A structural cycle, as defined by the spec, can then be detected if all -// conjuncts are marked as a cycle. -// -// References are accumulated as follows: -// -// 1. If a conjunct is a reference the reference is associated with that -// conjunct as well as the conjunct corresponding to the value it refers to. -// 2. If a conjunct is a struct (including lists), its references are associated -// with all embedded values and fields. -// -// To narrow down the specifics of the reference-based cycle detection, let us -// explore structural cycles in a bit more detail. -// -// -// STRUCTURAL CYCLES -// -// See the language specification for a higher-level and more complete overview. -// -// We have to define when a cycle is detected. CUE implementations MUST report -// an error upon a structural cycle, and SHOULD report cycles at the shortest -// possible paths at which they occur, but MAY report these at deeper paths. For -// instance, the following CUE has a structural cycle -// -// f: g: f -// -// The shortest path at which the cycle can be reported is f.g, but as all -// failed configurations are logically equal, it is fine for implementations to -// report them at f.g.g, for instance. -// -// It is not, however, correct to assume that a reference to a parent is always -// a cycle. Consider this case: -// -// a: [string]: b: a -// -// Even though reference `a` refers to a parent node, the cycle needs to be fed -// by a concrete field in struct `a` to persist, meaning it cannot result in a -// cycle as defined in the spec as it is defined here. Note however, that a -// specialization of this configuration _can_ result in a cycle. Consider -// -// a: [string]: b: a -// a: c: _ -// -// Here reference `a` is guaranteed to result in a structural cycle, as field -// `c` will match the pattern constraint unconditionally. -// -// In other words, it is not possible to exclude tracking references across -// pattern constraints from cycle checking. -// -// It is tempting to try to find a complete set of these edge cases with the aim -// to statically determine cases in which this occurs. But as [Carpenter 1992] -// demonstrates, it is possible for cycles to be created as a result of unifying -// two graphs that are themselves acyclic. The following example is a -// translation of Carpenters example to CUE: -// -// y: { -// f: h: g -// g: _ -// } -// x: { -// f: _ -// g: f -// } -// -// Even though the above contains no cycles, the result of `x & y` is cyclic: -// -// f: h: g -// g: f -// -// This means that, in practice, cycle detection has at least partially a -// dynamic component to it. -// -// -// ABSTRACT ALGORITHM -// -// The algorithm is described declaratively by defining what it means for a -// field to have a structural cycle. In the below, a _reference_ is uniquely -// identified by the pointer identity of a Go Resolver instance. -// -// Cycles are tracked on a per-conjunct basis and are not aggregated per Vertex: -// administrative information is only passed on from parent to child conjunct. -// -// A conjunct is a _parent_ of another conjunct if is a conjunct of one of the -// non-optional fields of the conjunct. For instance, conjunct `x` with value -// `{b: y & z}`, is a parent of conjunct `y` as well as `z`. Within field `b`, -// the conjuncts `y` and `z` would be tracked individually, though. -// -// A conjunct is _associated with a reference_ if its value was obtained by -// evaluating a reference. Note that a conjunct may be associated with many -// references if its evaluation requires evaluating a chain of references. For -// instance, consider -// -// a: {x: d} -// b: a -// c: b & e -// d: y: 1 -// -// the first conjunct of field `c` (reference `b`) has the value `{x: y: 1}` and -// is associated with references `b` and `a`. -// -// The _tracked references_ of a conjunct are all references that are associated -// with it or any of its ancestors (parents of parents). For instance, the -// tracked references of conjunct `b.x` of field `c.x` are `a`, `b`, and `d`. -// -// A conjunct is a violating cycle if it is a reference that: -// - occurs in the tracked references of the conjunct, or -// - directly refers to a parent node of the conjunct. -// -// A conjunct is cyclic if it is a violating cycle or if any of its ancestors -// are a violating cycle. -// -// A field has a structural cycle if it is composed of at least one conjunct -// that is a violating cycle and no conjunct that is not cyclic. -// -// Note that a field can be composed of only cyclic conjuncts while still not be -// structural cycle: as long as there are no conjuncts that are a violating -// cycle, it is not a structural cycle. This is important for the following -// case: -// -// a: [string]: b: a -// x: a -// x: c: b: c: {} -// -// Here, reference `a` is never a cycle as the recursive references crosses a -// pattern constraint that only instantiates if it is unified with something -// else. -// -// -// DISCUSSION -// -// The goal of conjunct cycle marking algorithm is twofold: - mark conjuncts -// that are proven to propagate indefinitely - mark them as early as possible -// (shortest CUE path). -// -// TODO: Prove all cyclic conjuncts will eventually be marked as cyclic. -// -// TODO: -// - reference marks whether it crosses a pattern, improving the case -// a: [string]: b: c: b -// This requires a compile-time detection mechanism. -// -// -// REFERENCES -// [Tomabechi COLING 1992]: https://aclanthology.org/C92-2068 -// Hideto Tomabechi. 1992. Quasi-Destructive Graph Unification with -// Structure-Sharing. In COLING 1992 Volume 2: The 14th International -// Conference on Computational Linguistics. -// -// [Van Lohuizen ACL 2000]: https://aclanthology.org/P00-1045/ -// Marcel P. van Lohuizen. 2000. "Memory-Efficient and Thread-Safe -// Quasi-Destructive Graph Unification". In Proceedings of the 38th Annual -// Meeting of the Association for Computational Linguistics, pages 352–359, -// Hong Kong. Association for Computational Linguistics. -// -// [Carpenter 1992]: -// Bob Carpenter, "The logic of typed feature structures." -// Cambridge University Press, ISBN:0-521-41932-8 - // TODO: mark references as crossing optional boundaries, rather than // approximating it during evaluation. @@ -577,20 +382,17 @@ type CycleInfo struct { // a cycle is detected and of which type. CycleType CyclicType - // IsCyclic indicates whether this conjunct, or any of its ancestors, - // had a violating cycle. - // TODO: make this a method and use CycleType == IsCyclic after V2 is removed. - IsCyclic bool - - // Inline is used to detect expressions referencing themselves, for instance: - // {x: out, out: x}.out - Inline bool - // TODO(perf): pack this in with CloseInfo. Make an uint32 pointing into // a buffer maintained in OpContext, using a mark-release mechanism. Refs *RefNode } +// IsCyclic indicates whether this conjunct, or any of its ancestors, +// had a violating cycle. +func (ci CycleInfo) IsCyclic() bool { + return ci.CycleType == IsCyclic +} + // A RefNode is a linked list of associated references. type RefNode struct { Ref Resolver @@ -627,7 +429,7 @@ type cyclicConjunct struct { arc *Vertex // cached Vertex } -// CycleType indicates the type of cycle detected. The CyclicType is associated +// CyclicType indicates the type of cycle detected. The CyclicType is associated // with a conjunct and may only increase in value for child conjuncts. type CyclicType uint8 @@ -646,7 +448,7 @@ const ( IsCyclic ) -func (n *nodeContext) detectCycleV3(arc *Vertex, env *Environment, x Resolver, ci CloseInfo) (_ CloseInfo, skip bool) { +func (n *nodeContext) detectCycle(arc *Vertex, env *Environment, x Resolver, ci CloseInfo) (_ CloseInfo, skip bool) { n.assertInitialized() // If we are pointing to a direct ancestor, and we are in an optional arc, @@ -654,8 +456,8 @@ func (n *nodeContext) detectCycleV3(arc *Vertex, env *Environment, x Resolver, c // is okay. If we are pointing to a direct ancestor in a non-optional arc, // we also can terminate, as this is a structural cycle. // TODO: use depth or check direct ancestry. - if n.hasAncestorV3(arc) { - return n.markCyclicV3(arc, env, x, ci) + if n.hasAncestor(arc) { + return n.markCyclic(arc, env, x, ci) } // As long as a node-wide cycle has not yet been detected, we allow cycles @@ -695,10 +497,22 @@ func (n *nodeContext) detectCycleV3(arc *Vertex, env *Environment, x Resolver, c return ci, false } - return n.markCyclicPathV3(arc, env, x, ci) + return n.markCyclicPath(arc, env, x, ci) } - if equalDeref(r.Node, n.node) && r.Ref == x && arc.nonRooted { - return n.markCyclicPathV3(arc, env, x, ci) + if r.Ref == x && arc.nonRooted { + if equalDeref(r.Node, n.node) { + return n.markCyclicPath(arc, env, x, ci) + } + // Also detect cycles through StructLit inline vertices + // (e.g. {a}.b), where each evaluation creates a fresh + // vertex that prevents matching by identity above. + // We identify these as IsDynamic with no Parent (unlike + // let vertices which have Parent set). + if p := arc.Parent; r.Arc.nonRooted && + p != nil && p.IsDynamic && p.Parent == nil && + p.state != nil && p.state.hasAncestorCycle { + return n.markCyclic(arc, env, x, ci) + } } } @@ -721,11 +535,10 @@ func (n *nodeContext) markNonCyclic(id CloseInfo) { } } -// markCyclicV3 marks a conjunct as being cyclic. Also, it postpones processing +// markCyclic marks a conjunct as being cyclic. Also, it postpones processing // the conjunct in the absence of evidence of a non-cyclic conjunct. -func (n *nodeContext) markCyclicV3(arc *Vertex, env *Environment, x Resolver, ci CloseInfo) (CloseInfo, bool) { +func (n *nodeContext) markCyclic(arc *Vertex, env *Environment, x Resolver, ci CloseInfo) (CloseInfo, bool) { ci.CycleType = IsCyclic - ci.IsCyclic = true n.hasAnyCyclicConjunct = true n.hasAncestorCycle = true @@ -740,9 +553,8 @@ func (n *nodeContext) markCyclicV3(arc *Vertex, env *Environment, x Resolver, ci return ci, false } -func (n *nodeContext) markCyclicPathV3(arc *Vertex, env *Environment, x Resolver, ci CloseInfo) (CloseInfo, bool) { +func (n *nodeContext) markCyclicPath(arc *Vertex, env *Environment, x Resolver, ci CloseInfo) (CloseInfo, bool) { ci.CycleType = IsCyclic - ci.IsCyclic = true n.hasAnyCyclicConjunct = true @@ -761,7 +573,7 @@ func (n *nodeContext) markCyclicPathV3(arc *Vertex, env *Environment, x Resolver // entirety, if present, to avoid getting unrelated data. func (c *OpContext) combineCycleInfo(ci CloseInfo) CloseInfo { cc := c.ci.CycleInfo - if cc.IsCyclic { + if cc.IsCyclic() { ci.CycleInfo = cc } return ci @@ -781,9 +593,9 @@ func (c *OpContext) hasDepthCycle(v *Vertex) bool { return false } -// hasAncestorV3 checks whether a node is currently being processed. The code +// hasAncestor checks whether a node is currently being processed. The code // still assumes that is includes any node that is currently being processed. -func (n *nodeContext) hasAncestorV3(arc *Vertex) bool { +func (n *nodeContext) hasAncestor(arc *Vertex) bool { if n.ctx.hasDepthCycle(arc) { return true } @@ -809,338 +621,21 @@ func (n *nodeContext) hasOnlyCyclicConjuncts() bool { (n.hasAnyCyclicConjunct && !n.hasNonCyclic) } -// setOptionalV3 marks a conjunct as being optional. The nodeContext is +// setOptional marks a conjunct as being optional. The nodeContext is // currently unused, but allows for checks to be added and to add logging during // debugging. -func (c *CloseInfo) setOptionalV3(n *nodeContext) { +func (c *CloseInfo) setOptional(n *nodeContext) { _ = n // See comment. if c.CycleType == NoCycle { c.CycleType = IsOptional } } -// markCycle checks whether the reference x is cyclic. There are two cases: -// 1. it was previously used in this conjunct, and -// 2. it directly references a parent node. -// -// Other inputs: -// -// arc the reference to which x points -// env, ci the components of the Conjunct from which x originates -// -// A cyclic node is added to a queue for later processing if no evidence of a -// non-cyclic node has so far been found. updateCyclicStatus processes delayed -// nodes down the line once such evidence is found. -// -// If a cycle is the result of "inline" processing (an expression referencing -// itself), an error is reported immediately. -// -// It returns the CloseInfo with tracked cyclic conjuncts updated, and -// whether or not its processing should be skipped, which is the case either if -// the conjunct seems to be fully cyclic so far or if there is a valid reference -// cycle. -func (n *nodeContext) markCycle(arc *Vertex, env *Environment, x Resolver, ci CloseInfo) (_ CloseInfo, skip bool) { - unreachableForDev(n.ctx) - - n.assertInitialized() - - // TODO(perf): this optimization can work if we also check for any - // references pointing to arc within arc. This can be done with compiler - // support. With this optimization, almost all references could avoid cycle - // checking altogether! - // if arc.status == Finalized && arc.cyclicReferences == nil { - // return v, false - // } - - // Check whether the reference already occurred in the list, signaling - // a potential cycle. - found := false - depth := int32(0) - for r := ci.Refs; r != nil; r = r.Next { - if r.Ref != x { - // TODO(share): this is a bit of a hack. We really should implement - // (*Vertex).cyclicReferences for the new evaluator. However, - // implementing cyclicReferences is somewhat tricky, as it requires - // referenced nodes to be evaluated, which is a guarantee we may not - // want to give. Moreover, it seems we can find a simpler solution - // based on structure sharing. So punt on this solution for now. - if r.Arc != arc || !n.ctx.isDevVersion() { - continue - } - found = true - } - - // A reference that is within a graph that is being evaluated - // may repeat with a different arc and will point to a - // non-finalized arc. A repeating reference that points outside the - // graph will always be the same address. Hence, if this is a - // finalized arc with a different address, it resembles a reference that - // is included through a different path and is not a cycle. - if !equalDeref(r.Arc, arc) && arc.status == finalized { - continue - } - - // For dynamically created structs we mark this as an error. Otherwise - // there is only an error if we have visited the arc before. - if ci.Inline && (arc.IsDynamic || equalDeref(r.Arc, arc)) { - n.reportCycleError() - return ci, true - } - - // We have a reference cycle, as distinguished from a structural - // cycle. Reference cycles represent equality, and thus are equal - // to top. We can stop processing here. - // var nn1, nn2 *Vertex - // if u := r.Node.state.underlay; u != nil { - // nn1 = u.node - // } - // if u := n.node.state.underlay; u != nil { - // nn2 = u.node - // } - if equalDeref(r.Node, n.node) { - return ci, true - } - - depth = r.Depth - found = true - - // Mark all conjuncts of this Vertex that refer to the same node as - // cyclic. This is an extra safety measure to ensure that two conjuncts - // cannot work in tandom to circumvent a cycle. It also tightens - // structural cycle detection in some cases. Late detection of cycles - // can result in a lot of redundant work. - // - // TODO: this loop is not on a critical path, but it may be evaluated - // if it is worthy keeping at some point. - for i, c := range n.node.Conjuncts { - if c.CloseInfo.IsCyclic { - continue - } - for rr := c.CloseInfo.Refs; rr != nil; rr = rr.Next { - // TODO: Is it necessary to find another way to find - // "parent" conjuncts? This mechanism seems not entirely - // accurate. Maybe a pointer up to find the root and then - // "spread" downwards? - if r.Ref == x && equalDeref(r.Arc, rr.Arc) { - n.node.Conjuncts[i].CloseInfo.IsCyclic = true - break - } - } - } - - break - } - - if arc.state != nil { - if d := arc.state.evalDepth; d > 0 && d >= n.ctx.optionalMark { - arc.IsCyclic = true - } - } - - // The code in this switch statement registers structural cycles caught - // through EvaluatingArcs to the root of the cycle. This way, any node - // referencing this value can track these nodes early. This is mostly an - // optimization to shorten the path for which structural cycles are - // detected, which may be critical for performance. -outer: - switch arc.status { - case evaluatingArcs: // also Evaluating? - if arc.state.evalDepth < n.ctx.optionalMark { - break - } - - // The reference may already be there if we had no-cyclic structure - // invalidating the cycle. - for r := arc.cyclicReferences; r != nil; r = r.Next { - if r.Ref == x { - break outer - } - } - - arc.cyclicReferences = &RefNode{ - Arc: deref(arc), - Ref: x, - Next: arc.cyclicReferences, - } - - case finalized: - // Insert cyclic references from found arc, if any. - for r := arc.cyclicReferences; r != nil; r = r.Next { - if r.Ref == x { - // We have detected a cycle, with the only exception if arc is - // a disjunction, as evaluation always stops at unresolved - // disjunctions. - if _, ok := arc.BaseValue.(*Disjunction); !ok { - found = true - } - } - ci.Refs = &RefNode{ - Arc: deref(r.Arc), - Node: deref(n.node), - - Ref: x, - Next: ci.Refs, - Depth: n.depth, - } - } - } - - // NOTE: we need to add a tracked reference even if arc is not cyclic: it - // may still cause a cycle that does not refer to a parent node. For - // instance: - // - // y: [string]: b: y - // x: y - // x: c: x - // - // -> - // - in conjuncts - // - out conjuncts: these count for cycle detection. - // x: { - // [string]: <1: y> b: y - // c: x - // } - // x.c: { - // <1: y> b: y - // <2: x> y - // [string]: <3: x, y> b: y - // <2: x> c: x - // } - // x.c.b: { - // <1: y> y - // [string]: <4: y; Cyclic> b: y - // <3: x, y> b: y - // } - // x.c.b.b: { - // <3: x, y> y - // [string]: <5: x, y, Cyclic> b: y - // <4: y, Cyclic> y - // [string]: <5: x, y, Cyclic> b: y - // } - // x.c.c: { // structural cycle - // <3: x, y> b: y - // <2: x> x - // <6: x, Cyclic>: y - // [string]: <8: x, y; Cyclic> b: y - // <7: x, Cyclic>: c: x - // } - // x.c.c.b: { // structural cycle - // <3: x, y> y - // [string]: <3: x, y; Cyclic> b: y - // <8: x, y; Cyclic> y - // } - // -> - // x: [string]: b: y - // x: c: b: y - // x: c: [string]: b: y - // x: c: b: b: y - // x: c: b: [string]: b: y - // x: c: b: b: b: y - // .... // structural cycle 1 - // x: c: c: x // structural cycle 2 - // - // Note that in this example there is a structural cycle at x.c.c, but we - // would need go guarantee that cycle is detected before the algorithm - // descends into x.c.b. - if !found || depth != n.depth { - // Adding this in case there is a definite cycle is unnecessary, but - // gives somewhat better error messages. - // We also need to add the reference again if the depth differs, as - // the depth is used for tracking "new structure". - // var nn *Vertex - // if u := n.node.state.underlay; u != nil { - // nn = u.node - // } - ci.Refs = &RefNode{ - Arc: deref(arc), - Ref: x, - Node: deref(n.node), - Next: ci.Refs, - Depth: n.depth, - } - } - - if !found && arc.status != evaluatingArcs { - // No cycle. - return ci, false - } - - // TODO: consider if we should bail if a cycle is detected using this - // mechanism. Ultimately, especially when the old evaluator is removed - // and the status field purged, this should be used instead of the above. - // if !found && arc.state.evalDepth < n.ctx.optionalMark { - // // No cycle. - // return ci, false - // } - - alreadyCycle := ci.IsCyclic - ci.IsCyclic = true - - // TODO: depth might legitimately be 0 if it is a root vertex. - // In the worst case, this may lead to a spurious cycle. - // Fix this by ensuring the root vertex starts with a depth of 1, for - // instance. - if depth > 0 { - // Look for evidence of "new structure" to invalidate the cycle. - // This is done by checking for non-cyclic conjuncts between the - // current vertex up to the ancestor to which the reference points. - // Note that the cyclic conjunct may not be marked as such, so we - // look for at least one other non-cyclic conjunct if this is the case. - upCount := n.depth - depth - for p := n.node.Parent; p != nil; p = p.Parent { - if upCount--; upCount <= 0 { - break - } - a := p.Conjuncts - count := 0 - for _, c := range a { - count += getNonCyclicCount(c) - } - if !alreadyCycle { - count-- - } - if count > 0 { - return ci, false - } - } - } - - n.hasAnyCyclicConjunct = true - if !n.hasNonCycle && env != nil { - // TODO: investigate if we can get rid of cyclicConjuncts in the new - // evaluator. - v := Conjunct{env, x, ci} - n.cyclicConjuncts = append(n.cyclicConjuncts, cyclicConjunct{v, arc}) - return ci, true - } - - return ci, false -} - -func getNonCyclicCount(c Conjunct) int { - switch a, ok := c.x.(*ConjunctGroup); { - case ok: - count := 0 - for _, c := range *a { - count += getNonCyclicCount(c) - } - return count - - case !c.CloseInfo.IsCyclic: - return 1 - - default: - return 0 - } -} - -// updateCyclicStatusV3 looks for proof of non-cyclic conjuncts to override +// updateCyclicStatus looks for proof of non-cyclic conjuncts to override // a structural cycle. -func (n *nodeContext) updateCyclicStatusV3(c CloseInfo) { - if n.ctx.inDisjunct == 0 { - n.hasFieldValue = true - } - if !c.IsCyclic { +func (n *nodeContext) updateCyclicStatus(c CloseInfo) { + n.hasFieldValue = true + if !c.IsCyclic() { n.hasNonCycle = true for _, c := range n.cyclicConjuncts { ci := c.c.CloseInfo @@ -1154,21 +649,7 @@ func (n *nodeContext) updateCyclicStatusV3(c CloseInfo) { } } -// updateCyclicStatus looks for proof of non-cyclic conjuncts to override -// a structural cycle. -func (n *nodeContext) updateCyclicStatus(c CloseInfo) { - unreachableForDev(n.ctx) - - if !c.IsCyclic { - n.hasNonCycle = true - for _, c := range n.cyclicConjuncts { - n.addVertexConjuncts(c.c, c.arc, false) - } - n.cyclicConjuncts = n.cyclicConjuncts[:0] - } -} - -func assertStructuralCycleV3(n *nodeContext) bool { +func assertStructuralCycle(n *nodeContext) bool { n.cyclicConjuncts = n.cyclicConjuncts[:0] if n.hasOnlyCyclicConjuncts() { @@ -1178,14 +659,6 @@ func assertStructuralCycleV3(n *nodeContext) bool { return false } -func assertStructuralCycle(n *nodeContext) bool { - if n.hasAnyCyclicConjunct && !n.hasNonCycle { - n.reportCycleError() - return true - } - return false -} - func (n *nodeContext) reportCycleError() { b := &Bottom{ Code: StructuralCycleError, @@ -1209,8 +682,7 @@ func (n *nodeContext) reportCycleError() { func makeAnonymousConjunct(env *Environment, x Expr, refs *RefNode) Conjunct { return Conjunct{ env, x, CloseInfo{CycleInfo: CycleInfo{ - Inline: true, - Refs: refs, + Refs: refs, }}, } } @@ -1227,35 +699,6 @@ func (n *nodeContext) decDepth() { n.ctx.evalDepth-- } -// markOptional marks that we are about to process an "optional element" that -// allows errors. In these cases, structural cycles are not "terminal". -// -// Examples of such constructs are: -// -// Optional fields: -// -// a: b?: a -// -// Pattern constraints: -// -// a: [string]: a -// -// Disjunctions: -// -// a: b: null | a -// -// A call to markOptional should be paired with a call to unmarkOptional. -func (n *nodeContext) markOptional() (saved int) { - saved = n.ctx.evalDepth - n.ctx.optionalMark = n.ctx.evalDepth - return saved -} - -// See markOptional. -func (n *nodeContext) unmarkOptional(saved int) { - n.ctx.optionalMark = saved -} - // markDepth assigns the current evaluation depth to the receiving node. // Any previously assigned depth is saved and returned and should be restored // using unmarkDepth after processing n. diff --git a/vendor/cuelang.org/go/internal/core/adt/debug.go b/vendor/cuelang.org/go/internal/core/adt/debug.go index 93fe9cb38a..955b3c4d12 100644 --- a/vendor/cuelang.org/go/internal/core/adt/debug.go +++ b/vendor/cuelang.org/go/internal/core/adt/debug.go @@ -491,7 +491,7 @@ func indentOnNewline(w io.Writer, level int) { } func indent(w io.Writer, level int) { - for i := 0; i < level; i++ { + for range level { io.WriteString(w, " ") } } diff --git a/vendor/cuelang.org/go/internal/core/adt/default.go b/vendor/cuelang.org/go/internal/core/adt/default.go index d1acfe9274..fff73990a0 100644 --- a/vendor/cuelang.org/go/internal/core/adt/default.go +++ b/vendor/cuelang.org/go/internal/core/adt/default.go @@ -62,6 +62,14 @@ func (v *Vertex) Default() *Vertex { return v case 1: w = ToVertex(Default(d.Values[0])) + // If w already has conjuncts, return as-is to avoid race. + if w.Conjuncts != nil { + return w + } + // Make a copy before modifying to avoid racing on shared vertex. + x := *w + x.state = nil + w = &x default: x := *v x.state = nil @@ -71,19 +79,21 @@ func (v *Vertex) Default() *Vertex { NumDefaults: 0, } w = &x - w.Conjuncts = nil } - if w.Conjuncts == nil { - for _, c := range v.Conjuncts { - // TODO: preserve field information. - expr, _ := stripNonDefaults(c.Elem()) - w.Conjuncts = append(w.Conjuncts, MakeRootConjunct(c.Env, expr)) - } + // w is now a fresh copy, safe to modify without race. + w.Conjuncts = make([]Conjunct, 0, len(v.Conjuncts)) + for _, c := range v.Conjuncts { + node := stripNonDefaultsNode(c) + w.Conjuncts = append(w.Conjuncts, MakeRootConjunct(c.Env, node)) } return w case *ListMarker: + if !d.IsOpen { + // If the list is already closed, avoid the copies below. + return v + } m := *d m.IsOpen = false @@ -94,6 +104,10 @@ func (v *Vertex) Default() *Vertex { } } +// stripNonDefaults removes non-default values from disjunctions in the given +// expression. It returns the modified expression and whether any stripping +// occurred. For example, `*1 | 2 | 3` becomes `1`. +// // TODO: this should go: record preexpanded disjunctions in Vertex. func stripNonDefaults(elem Elem) (r Elem, stripped bool) { expr, ok := elem.(Expr) @@ -151,3 +165,54 @@ func stripNonDefaults(elem Elem) (r Elem, stripped bool) { return x, false } } + +// stripNonDefaultsNode is like stripNonDefaults but operates on a Conjunct +// and preserves field wrappers (Field, LetField, etc.) so that field metadata +// is retained when computing defaults. +func stripNonDefaultsNode(c Conjunct) Node { + switch x := c.x.(type) { + case *Field: + if expr, stripped := stripNonDefaults(x.Value); stripped { + f := *x + f.Value = expr.(Expr) + return &f + } + return x + case *LetField: + if expr, stripped := stripNonDefaults(x.Value); stripped { + f := *x + f.Value = expr.(Expr) + return &f + } + return x + case *BulkOptionalField: + if expr, stripped := stripNonDefaults(x.Value); stripped { + f := *x + f.Value = expr.(Expr) + return &f + } + return x + case *DynamicField: + if expr, stripped := stripNonDefaults(x.Value); stripped { + f := *x + f.Value = expr.(Expr) + return &f + } + return x + case *Ellipsis: + if x.Value == nil { + return x + } + if expr, stripped := stripNonDefaults(x.Value); stripped { + e := *x + e.Value = expr.(Expr) + return &e + } + return x + default: + if elem, stripped := stripNonDefaults(c.Elem()); stripped { + return elem + } + return c.x + } +} diff --git a/vendor/cuelang.org/go/internal/core/adt/defidtype_string.go b/vendor/cuelang.org/go/internal/core/adt/defidtype_string.go new file mode 100644 index 0000000000..a00577fa8b --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/defidtype_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=defIDType -linecomment"; DO NOT EDIT. + +package adt + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[defIDTypeUnknown-0] + _ = x[defEmbedding-1] + _ = x[defReference-2] + _ = x[defStruct-3] +} + +const _defIDType_name = "*EDS" + +var _defIDType_index = [...]uint8{0, 1, 2, 3, 4} + +func (i defIDType) String() string { + idx := int(i) - 0 + if i < 0 || idx >= len(_defIDType_index)-1 { + return "defIDType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _defIDType_name[_defIDType_index[idx]:_defIDType_index[idx+1]] +} diff --git a/vendor/cuelang.org/go/internal/core/adt/disjunct.go b/vendor/cuelang.org/go/internal/core/adt/disjunct.go index 323c563634..ae96afc0aa 100644 --- a/vendor/cuelang.org/go/internal/core/adt/disjunct.go +++ b/vendor/cuelang.org/go/internal/core/adt/disjunct.go @@ -15,8 +15,6 @@ package adt import ( - "slices" - "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" ) @@ -89,371 +87,10 @@ type envDisjunct struct { cloneID CloseInfo holeID int - // fields for new evaluator - + // TODO: src is set but never used. It seems like it should be used + // in either [nodeContext.crossProduct] or [nodeContext.collectErrors]? src Node disjuncts []disjunct - - // fields for old evaluator - - expr *DisjunctionExpr - value *Disjunction - hasDefaults bool - - // These are used for book keeping, tracking whether any of the - // disjuncts marked with a default marker remains after unification. - // If no default is used, all other elements are treated as "maybeDefault". - // Otherwise, elements are treated as is. - parentDefaultUsed bool - childDefaultUsed bool -} - -func (n *nodeContext) addDisjunction(env *Environment, x *DisjunctionExpr, cloneID CloseInfo) { - - // TODO: precompute - numDefaults := 0 - for _, v := range x.Values { - isDef := v.Default // || n.hasDefaults(env, v.Val) - if isDef { - numDefaults++ - } - } - - n.disjunctions = append(n.disjunctions, envDisjunct{ - env: env, - cloneID: cloneID, - expr: x, - hasDefaults: numDefaults > 0, - }) -} - -func (n *nodeContext) addDisjunctionValue(env *Environment, x *Disjunction, cloneID CloseInfo) { - n.disjunctions = append(n.disjunctions, envDisjunct{ - env: env, - cloneID: cloneID, - value: x, - hasDefaults: x.HasDefaults, - }) - -} - -func (n *nodeContext) expandDisjuncts( - state vertexStatus, - parent *nodeContext, - parentMode defaultMode, // default mode of this disjunct - recursive, last bool) { - - unreachableForDev(n.ctx) - - n.ctx.stats.Disjuncts++ - - // refNode is used to collect cyclicReferences for all disjuncts to be - // passed up to the parent node. Note that because the node in the parent - // context is overwritten in the course of expanding disjunction to retain - // pointer identity, it is not possible to simply record the refNodes in the - // parent directly. - var refNode *RefNode - - node := n.node - defer func() { - n.node = node - }() - - for n.expandOne(partial) { - } - - // save node to snapShot in nodeContex - // save nodeContext. - - if recursive || len(n.disjunctions) > 0 { - n.snapshot = clone(n.node) - } else { - n.snapshot = n.node - } - - defaultOffset := len(n.usedDefault) - - switch { - default: // len(n.disjunctions) == 0 - m := *n - n.postDisjunct(state) - - switch { - case n.hasErr(): - // TODO: consider finalizing the node thusly: - // if recursive { - // n.node.Finalize(n.ctx) - // } - x := n.node - err, ok := x.BaseValue.(*Bottom) - if !ok { - err = n.getErr() - } - if err == nil { - // TODO(disjuncts): Is this always correct? Especially for partial - // evaluation it is okay for child errors to have incomplete errors. - // Perhaps introduce an Err() method. - err = x.ChildErrors - } - if err != nil { - parent.disjunctErrs = append(parent.disjunctErrs, err) - } - if recursive { - n.free() - } - return - } - - if recursive { - *n = m - nn := *n.node - n.result = &nn // XXX: n.result = snapshotVertex(n.node)? - n.node = n.result - n.disjuncts = append(n.disjuncts, n) - } - if n.node.BaseValue == nil { - n.setBaseValue(n.getValidators(state)) - } - - n.usedDefault = append(n.usedDefault, defaultInfo{ - parentMode: parentMode, - nestedMode: parentMode, - origMode: parentMode, - }) - - case len(n.disjunctions) > 0: - // Process full disjuncts to ensure that erroneous disjuncts are - // eliminated as early as possible. - state = finalized - - n.disjuncts = append(n.disjuncts, n) - - n.refCount++ - defer n.free() - - for i, d := range n.disjunctions { - a := n.disjuncts - n.disjuncts = n.buffer[:0] - n.buffer = a[:0] - - last := i+1 == len(n.disjunctions) - skipNonMonotonicChecks := i+1 < len(n.disjunctions) - if skipNonMonotonicChecks { - n.ctx.inDisjunct++ - } - - for _, dn := range a { - switch { - case d.expr != nil: - for _, v := range d.expr.Values { - cn := dn.clone() - *cn.node = *clone(dn.snapshot) - cn.node.state = cn - - c := MakeConjunct(d.env, v.Val, d.cloneID) - cn.addExprConjunct(c, state) - - newMode := mode(d.hasDefaults, v.Default) - - cn.expandDisjuncts(state, n, newMode, true, last) - - // Record the cyclicReferences of the conjunct in the - // parent list. - // TODO: avoid the copy. It should be okay to "steal" - // this list and avoid the copy. But this change is best - // done in a separate CL. - for r := n.node.cyclicReferences; r != nil; r = r.Next { - s := *r - s.Next = refNode - refNode = &s - } - } - - case d.value != nil: - for i, v := range d.value.Values { - cn := dn.clone() - *cn.node = *clone(dn.snapshot) - cn.node.state = cn - - cn.addValueConjunct(d.env, v, d.cloneID) - - newMode := mode(d.hasDefaults, i < d.value.NumDefaults) - - cn.expandDisjuncts(state, n, newMode, true, last) - - // See comment above. - for r := n.node.cyclicReferences; r != nil; r = r.Next { - s := *r - s.Next = refNode - refNode = &s - } - } - } - } - - if skipNonMonotonicChecks { - n.ctx.inDisjunct-- - } - - if len(n.disjuncts) == 0 { - n.makeError() - } - - if recursive || i > 0 { - for _, x := range a { - x.free() - } - } - - if len(n.disjuncts) == 0 { - break - } - } - - // Annotate disjunctions with whether any of the default disjunctions - // was used. - for _, d := range n.disjuncts { - for i, info := range d.usedDefault[defaultOffset:] { - if info.parentMode == isDefault { - n.disjunctions[i].parentDefaultUsed = true - } - if info.origMode == isDefault { - n.disjunctions[i].childDefaultUsed = true - } - } - } - - // Combine parent and child default markers, considering that a parent - // "notDefault" is treated as "maybeDefault" if none of the disjuncts - // marked as default remain. - // - // NOTE for a parent marked as "notDefault", a child is *never* - // considered as default. It may either be "not" or "maybe" default. - // - // The result for each disjunction is conjoined into a single value. - for _, d := range n.disjuncts { - m := maybeDefault - orig := maybeDefault - for i, info := range d.usedDefault[defaultOffset:] { - parent := info.parentMode - - used := n.disjunctions[i].parentDefaultUsed - childUsed := n.disjunctions[i].childDefaultUsed - hasDefaults := n.disjunctions[i].hasDefaults - - orig = combineDefault(orig, info.parentMode) - orig = combineDefault(orig, info.nestedMode) - - switch { - case childUsed: - // One of the children used a default. This is "normal" - // mode. This may also happen when we are in - // hasDefaults/notUsed mode. Consider - // - // ("a" | "b") & (*(*"a" | string) | string) - // - // Here the doubly nested default is called twice, once - // for "a" and then for "b", where the second resolves to - // not using a default. The first does, however, and on that - // basis the "ot default marker cannot be overridden. - m = combineDefault(m, info.parentMode) - m = combineDefault(m, info.origMode) - - case !hasDefaults, used: - m = combineDefault(m, info.parentMode) - m = combineDefault(m, info.nestedMode) - - case hasDefaults && !used: - Assertf(n.ctx, parent == notDefault, "unexpected default mode") - } - } - d.defaultMode = m - - d.usedDefault = d.usedDefault[:defaultOffset] - d.usedDefault = append(d.usedDefault, defaultInfo{ - parentMode: parentMode, - nestedMode: m, - origMode: orig, - }) - - } - - // TODO: this is an old trick that seems no longer necessary for the new - // implementation. Keep around until we finalize the semantics for - // defaults, though. The recursion of nested defaults is not entirely - // proper yet. - // - // A better approach, that avoids the need for recursion (semantically), - // would be to only consider default usage for one level, but then to - // also allow a default to be passed if only one value is remaining. - // This means that a nested subsumption would first have to be evaluated - // in isolation, however, to determine that it is not previous - // disjunctions that cause the disambiguation. - // - // HACK alert: this replaces the hack of the previous algorithm with a - // slightly less worse hack: instead of dropping the default info when - // the value was scalar before, we drop this information when there is - // only one disjunct, while not discarding hard defaults. TODO: a more - // principled approach would be to recognize that there is only one - // default at a point where this does not break commutativity. - // if len(n.disjuncts) == 1 && n.disjuncts[0].defaultMode != isDefault { - // n.disjuncts[0].defaultMode = maybeDefault - // } - } - - // Compare to root, but add to this one. - switch p := parent; { - case p != n: - p.disjunctErrs = append(p.disjunctErrs, n.disjunctErrs...) - n.disjunctErrs = n.disjunctErrs[:0] - - outer: - for _, d := range n.disjuncts { - for k, v := range p.disjuncts { - // As long as a vertex isn't finalized, it may be that potential - // errors are not yet detected. This may lead two structs that - // are identical except for closedness information, - // for instance, to appear identical. - if v.result.status < finalized || d.result.status < finalized { - break - } - // Even if a node is finalized, it may still have an - // "incomplete" component that may change down the line. - if !d.done() || !v.done() { - break - } - flags := CheckStructural - if last { - flags |= IgnoreOptional - } - if Equal(n.ctx, v.result, d.result, flags) { - m := maybeDefault - for _, u := range d.usedDefault { - m = combineDefault(m, u.nestedMode) - } - if m == isDefault { - p.disjuncts[k] = d - v.free() - } else { - d.free() - } - continue outer - } - } - - p.disjuncts = append(p.disjuncts, d) - } - - n.disjuncts = n.disjuncts[:0] - } - - // Record the refNodes in the parent. - for r := refNode; r != nil; { - next := r.Next - r.Next = parent.node.cyclicReferences - parent.node.cyclicReferences = r - r = next - } } func (n *nodeContext) makeError() { @@ -489,48 +126,6 @@ func mode(hasDefault, marked bool) defaultMode { return mode } -// clone makes a shallow copy of a Vertex. The purpose is to create different -// disjuncts from the same Vertex under computation. This allows the conjuncts -// of an arc to be reset to a previous position and the reuse of earlier -// computations. -// -// Notes: only Arcs need to be copied recursively. Either the arc is finalized -// and can be used as is, or Structs is assumed to not yet be computed at the -// time that a clone is needed and must be nil. Conjuncts no longer needed and -// can become nil. All other fields can be copied shallowly. -func clone(v *Vertex) *Vertex { - v2 := *v - v = &v2 - v.state = nil - if a := v.Arcs; len(a) > 0 { - v.Arcs = make([]*Vertex, len(a)) - for i, arc := range a { - switch arc.status { - case finalized: - v.Arcs[i] = arc - - case unprocessed: - a := *arc - v.Arcs[i] = &a - a.Conjuncts = slices.Clone(arc.Conjuncts) - - default: - a := *arc - a.state = arc.state.clone() - a.state.node = &a - a.state.snapshot = clone(arc) - v.Arcs[i] = &a - } - } - } - - if a := v.Structs; len(a) > 0 { - v.Structs = slices.Clone(a) - } - - return v -} - // Default rules from spec: // // U1: (v1, d1) & v2 => (v1&v2, d1&v2) @@ -579,25 +174,40 @@ func combineDefault(a, b defaultMode) defaultMode { // // TODO(perf): the set of errors is now computed during evaluation. Eventually, // this could be done lazily. -func (n *nodeContext) disjunctError() (errs errors.Error) { +func (n *nodeContext) disjunctError() errors.Error { ctx := n.ctx disjuncts := selectErrors(n.disjunctErrs) + var pos errors.Error + + if len(n.userErrs) > 0 { + pos = disjuncts + disjuncts = selectErrors(n.userErrs) + } if disjuncts == nil { - errs = ctx.Newf("empty disjunction") // XXX: add space to sort first - } else { - disjuncts = errors.Sanitize(disjuncts) - k := len(errors.Errors(disjuncts)) - if k == 1 { - return disjuncts + return ctx.Newf("empty disjunction") // XXX: add space to sort first + } + disjuncts = errors.Sanitize(disjuncts) + k := len(errors.Errors(disjuncts)) + if k == 1 { + if pos != nil { + addDisjunctPositions(disjuncts.(*ValueError), pos) } - // prefix '-' to sort to top - errs = ctx.Newf("%d errors in empty disjunction:", k) - errs = errors.Append(errs, disjuncts) + return disjuncts + } + // prefix '-' to sort to top + err := ctx.Newf("%d errors in empty disjunction:", k) + if pos != nil { + addDisjunctPositions(err, pos) } + return errors.Append(err, disjuncts) +} - return errs +func addDisjunctPositions(dst *ValueError, src errors.Error) { + for _, p := range src.InputPositions() { + dst.AddPos(p) + } } func selectErrors(a []*Bottom) (errs errors.Error) { diff --git a/vendor/cuelang.org/go/internal/core/adt/disjunct2.go b/vendor/cuelang.org/go/internal/core/adt/disjunct2.go index 702671e50c..86cd83cc81 100644 --- a/vendor/cuelang.org/go/internal/core/adt/disjunct2.go +++ b/vendor/cuelang.org/go/internal/core/adt/disjunct2.go @@ -14,7 +14,12 @@ package adt -import "slices" +import ( + "math" + "slices" + + "cuelang.org/go/internal/core/layer" +) // # Overview // @@ -201,8 +206,7 @@ type disjunct struct { expr Expr err *Bottom - isDefault bool - mode defaultMode + mode defaultMode } func (n *nodeContext) scheduleDisjunction(d envDisjunct) { @@ -284,7 +288,7 @@ func (n *nodeContext) processDisjunctions() *Bottom { // Slow path for processing all disjunctions. Do not use `range` in case // evaluation adds more disjunctions. - for i := 0; i < len(a); i++ { + for i := range a { d := &a[i] n.nextDisjunction(i, len(a), d.holeID) @@ -384,8 +388,8 @@ func (n *nodeContext) processDisjunctions() *Bottom { // crossProduct computes the cross product of the disjuncts of a disjunction // with an existing set of results. func (n *nodeContext) crossProduct(dst, cross []*nodeContext, dn *envDisjunct, mode runMode) []*nodeContext { + // TODO: do we still need this? removing it does not break any tests. defer n.unmarkDepth(n.markDepth()) - defer n.unmarkOptional(n.markOptional()) // TODO(perf): use a pre-allocated buffer in n.ctx. Note that the actual // buffer may grow and has a max size of len(cross) * len(dn.disjuncts). @@ -393,6 +397,7 @@ func (n *nodeContext) crossProduct(dst, cross []*nodeContext, dn *envDisjunct, m leftDropsDefault := true rightDropsDefault := true + priority, _ := Pos(dn.src).Priority() for i, p := range cross { ID := n.nextCrossProduct(i, len(cross), p) @@ -408,11 +413,21 @@ func (n *nodeContext) crossProduct(dst, cross []*nodeContext, dn *envDisjunct, m r, err := p.doDisjunct(c, d.mode, mode, n.node) if err != nil { + if err.Code == UserError { + n.userErrs = append(n.userErrs, err) + } // TODO: store more error context dn.disjuncts[j].err = err continue } + // Promote the priority of defaults. + r.origPriority = priority + r.priority = p.origPriority + if p.defaultMode == isDefault && p.origPriority > priority { + r.origPriority = priority + } + tmp = append(tmp, r) if p.defaultMode == isDefault || p.origDefaultMode == isDefault { leftDropsDefault = false @@ -428,6 +443,17 @@ func (n *nodeContext) crossProduct(dst, cross []*nodeContext, dn *envDisjunct, m // Unroll nested disjunctions. switch len(r.disjuncts) { case 0: + // If a default is not dropped in a higher priority, allow still to + // become a default, even if other other side has dropped defaults. + if !rightDropsDefault && r.origDefaultMode == notDefault && + priority < r.priority { + r.origDefaultMode = maybeDefault + } + if !leftDropsDefault && r.defaultMode == notDefault && + priority > r.priority { + r.defaultMode = maybeDefault + } + r.defaultMode = combineDefault2(r.defaultMode, r.origDefaultMode, leftDropsDefault, rightDropsDefault) // r did not have a nested disjunction. dst = appendDisjunct(n.ctx, dst, r) @@ -475,22 +501,14 @@ func combineDefault2(a, b defaultMode, dropsDefaultA, dropsDefaultB bool) defaul // collectErrors collects errors from a failed disjunctions. func (n *nodeContext) collectErrors(dn *envDisjunct) (errs *Bottom) { code := EvalError - hasUserError := false for _, d := range dn.disjuncts { if b := d.err; b != nil { + if b.Code == UserError { + continue + } if b.Code > code { code = b.Code } - switch { - case b.Code == UserError: - if !hasUserError { - n.disjunctErrs = n.disjunctErrs[:0] - } - hasUserError = true - - case hasUserError: - continue - } n.disjunctErrs = append(n.disjunctErrs, b) } } @@ -550,7 +568,6 @@ func (n *nodeContext) doDisjunct(c Conjunct, m defaultMode, mode runMode, orig * defer n.ctx.popOverlay() d.runMode = mode - c.Env = oc.derefDisjunctsEnv(c.Env) v := d.node @@ -562,6 +579,9 @@ func (n *nodeContext) doDisjunct(c Conjunct, m defaultMode, mode runMode, orig * // a special mode, or evaluating more aggressively if finalize is not given. v.status = unprocessed + if m == isDefault { + c.CloseInfo.Priority, _ = Pos(c.x).Priority() + } d.scheduleConjunct(c, c.CloseInfo) oc.unlinkOverlay() @@ -569,7 +589,7 @@ func (n *nodeContext) doDisjunct(c Conjunct, m defaultMode, mode runMode, orig * d.defaultMode = n.defaultMode d.origDefaultMode = m - v.unify(n.ctx, allKnown, mode, true) + v.unify(n.ctx, Flags{condition: allKnown, mode: mode, checkTypos: true}) if err := d.getErrorAll(); err != nil && !isCyclePlaceholder(err) { d.freeDisjunct() @@ -596,7 +616,11 @@ func (n *nodeContext) finalizeDisjunctions() { x.node.Conjuncts = nil if b := x.getErr(); b != nil { - n.disjunctErrs = append(n.disjunctErrs, b) + if b.Code == UserError { + n.userErrs = append(n.userErrs, b) + } else { + n.disjunctErrs = append(n.disjunctErrs, b) + } numErrs++ continue } @@ -607,17 +631,32 @@ func (n *nodeContext) finalizeDisjunctions() { return } + // Determine highest priority and if the default exists. + hasDefaults := false + defaultPriority := layer.Priority(math.MinInt8) + for _, x := range n.disjuncts { + if x.defaultMode == isDefault { + hasDefaults = true + if x.origPriority > defaultPriority { + defaultPriority = x.origPriority + } + } + } + a := make([]Value, len(n.disjuncts)) p := 0 - hasDefaults := false for i, x := range n.disjuncts { + if x.origPriority < defaultPriority && x.defaultMode != isDefault { + x.defaultMode = notDefault + } + switch x.defaultMode { case isDefault: - a[i] = a[p] - a[p] = x.node - p++ - hasDefaults = true - + if x.origPriority == defaultPriority { + a[i] = a[p] + a[p] = x.node + p++ + } case notDefault: hasDefaults = true fallthrough @@ -822,24 +861,7 @@ outer: // TODO: this could be done much more cleanly if we are more deligent in early // evaluation. func isEqualNodeValue(x, y *nodeContext) bool { - xk := x.kind - yk := y.kind - - // If a node is mid evaluation, the kind might not be actual if the type is - // a struct, as whether a struct is a struct kind or an embedded type is - // determined later. This is just a limitation of the current - // implementation, we should update the kind more directly so that this code - // is not necessary. - // TODO: verify that this is still necessary and if so fix it so that this - // can be removed. - if x.aStruct != nil { - xk &= StructKind - } - if y.aStruct != nil { - yk &= StructKind - } - - if xk != yk { + if x.kind != y.kind { return false } if x.hasTop != y.hasTop { diff --git a/vendor/cuelang.org/go/internal/core/adt/equality.go b/vendor/cuelang.org/go/internal/core/adt/equality.go index 8ecbd11f36..5c64b9531f 100644 --- a/vendor/cuelang.org/go/internal/core/adt/equality.go +++ b/vendor/cuelang.org/go/internal/core/adt/equality.go @@ -90,9 +90,6 @@ func equalVertex(ctx *OpContext, x *Vertex, v Value, flags Flag) bool { if x.IsClosedList() != y.IsClosedList() { return false } - if !equalClosed(ctx, x, y, flags) { - return false - } } skipRegular := flags&RegularOnly != 0 @@ -141,40 +138,6 @@ loop2: return equalTerminal(ctx, v, w, flags) } -// equalClosed tests if x and y have the same set of close information. -// TODO: the following refinements are possible: -// - unify optional fields and equate the optional fields -// - do the same for pattern constraints, where the pattern constraints -// are collated by pattern equality. -// - a further refinement would collate patterns by ranges. -// -// For all these refinements it would be necessary to have well-working -// structure sharing so as to not repeatedly recompute optional arcs. -func equalClosed(ctx *OpContext, x, y *Vertex, flags Flag) bool { - return verifyStructs(x, y, flags) && verifyStructs(y, x, flags) -} - -func verifyStructs(x, y *Vertex, flags Flag) bool { -outer: - for _, s := range x.Structs { - if (flags&IgnoreOptional != 0) && !s.StructLit.HasOptional() { - continue - } - if s.span()&DefinitionSpan == 0 { - if !s.StructLit.HasOptional() { - continue - } - } - for _, t := range y.Structs { - if s.StructLit == t.StructLit { - continue outer - } - } - return false - } - return true -} - func equalTerminal(ctx *OpContext, v, w Value, flags Flag) bool { if v == w { return true @@ -191,10 +154,7 @@ func equalTerminal(ctx *OpContext, v, w Value, flags Flag) bool { return ok case *Num, *String, *Bool, *Bytes, *Null: - if b, ok := BinOp(ctx, errOnDiffType, EqualOp, v, w).(*Bool); ok { - return b.B - } - return false + return BinOpBool(ctx, errOnDiffType, EqualOp, v, w) // TODO: for the remainder we are dealing with non-concrete values, so we // could also just not bother. diff --git a/vendor/cuelang.org/go/internal/core/adt/errorcode_string.go b/vendor/cuelang.org/go/internal/core/adt/errorcode_string.go index fd6573a9a3..29030e64a4 100644 --- a/vendor/cuelang.org/go/internal/core/adt/errorcode_string.go +++ b/vendor/cuelang.org/go/internal/core/adt/errorcode_string.go @@ -21,8 +21,9 @@ const _ErrorCode_name = "evaluseruserstructural cycleincompletecycle" var _ErrorCode_index = [...]uint8{0, 4, 8, 12, 28, 38, 43} func (i ErrorCode) String() string { - if i < 0 || i >= ErrorCode(len(_ErrorCode_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_ErrorCode_index)-1 { return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")" } - return _ErrorCode_name[_ErrorCode_index[i]:_ErrorCode_index[i+1]] + return _ErrorCode_name[_ErrorCode_index[idx]:_ErrorCode_index[idx+1]] } diff --git a/vendor/cuelang.org/go/internal/core/adt/errors.go b/vendor/cuelang.org/go/internal/core/adt/errors.go index d15aa8340e..e3376e4d47 100644 --- a/vendor/cuelang.org/go/internal/core/adt/errors.go +++ b/vendor/cuelang.org/go/internal/core/adt/errors.go @@ -32,17 +32,20 @@ package adt // import ( + "slices" + "cuelang.org/go/cue/ast" "cuelang.org/go/cue/errors" cueformat "cuelang.org/go/cue/format" "cuelang.org/go/cue/token" + "cuelang.org/go/internal/iterutil" ) // ErrorCode indicates the type of error. The type of error may influence // control flow. No other aspects of an error may influence control flow. type ErrorCode int8 -//go:generate go run golang.org/x/tools/cmd/stringer -type=ErrorCode -linecomment +//go:generate go tool stringer -type=ErrorCode -linecomment const ( // An EvalError is a fatal evaluation error. @@ -99,9 +102,8 @@ type Bottom struct { Node *Vertex } -func (x *Bottom) Source() ast.Node { return x.Src } -func (x *Bottom) Kind() Kind { return BottomKind } -func (x *Bottom) Specialize(k Kind) Value { return x } // XXX remove +func (x *Bottom) Source() ast.Node { return x.Src } +func (x *Bottom) Kind() Kind { return BottomKind } func (b *Bottom) IsIncomplete() bool { if b == nil { @@ -212,7 +214,7 @@ func CombineErrors(src ast.Node, x, y Value) *Bottom { } } -func addPositions(err *ValueError, c Conjunct) { +func addPositions(ctx *OpContext, err *ValueError, c Conjunct) { switch x := c.x.(type) { case *Field: // if x.ArcType == ArcRequired { @@ -220,26 +222,24 @@ func addPositions(err *ValueError, c Conjunct) { // } case *ConjunctGroup: for _, c := range *x { - addPositions(err, c) + addPositions(ctx, err, c) } } - if c.CloseInfo.closeInfo != nil { - err.AddPosition(c.CloseInfo.location) - } + err.AddPos(c.CloseInfo.Location(ctx)) } -func NewRequiredNotPresentError(ctx *OpContext, v *Vertex) *Bottom { +func NewRequiredNotPresentError(ctx *OpContext, v *Vertex, morePositions ...Node) *Bottom { saved := ctx.PushArc(v) err := ctx.Newf("field is required but not present") - v.VisitLeafConjuncts(func(c Conjunct) bool { + for _, p := range morePositions { + err.AddPosition(p) + } + for c := range v.LeafConjuncts() { if f, ok := c.x.(*Field); ok && f.ArcType == ArcRequired { err.AddPosition(c.x) } - if c.CloseInfo.closeInfo != nil { - err.AddPosition(c.CloseInfo.location) - } - return true - }) + err.AddPos(c.CloseInfo.Location(ctx)) + } b := &Bottom{ Code: IncompleteError, @@ -253,10 +253,9 @@ func NewRequiredNotPresentError(ctx *OpContext, v *Vertex) *Bottom { func newRequiredFieldInComprehensionError(ctx *OpContext, x *ForClause, v *Vertex) *Bottom { err := ctx.Newf("missing required field in for comprehension: %v", v.Label) err.AddPosition(x.Src) - v.VisitLeafConjuncts(func(c Conjunct) bool { - addPositions(err, c) - return true - }) + for c := range v.LeafConjuncts() { + addPositions(ctx, err, c) + } return &Bottom{ Code: IncompleteError, Err: err, @@ -277,7 +276,10 @@ func (v *Vertex) reportFieldCycleError(c *OpContext, pos token.Pos, f Feature) * func (v *Vertex) reportFieldError(c *OpContext, pos token.Pos, f Feature, intMsg, stringMsg string) *Bottom { code := IncompleteError - if !v.Accept(c, f) { + // If v is an error, we need to adopt the worst error. + if b := v.Bottom(); b != nil && !isCyclePlaceholder(b) { + code = b.Code + } else if !v.Accept(c, f) { code = EvalError } @@ -285,7 +287,7 @@ func (v *Vertex) reportFieldError(c *OpContext, pos token.Pos, f Feature, intMsg var err errors.Error if f.IsInt() { - err = c.NewPosf(pos, intMsg, f.Index(), len(v.Elems())) + err = c.NewPosf(pos, intMsg, f.Index(), iterutil.Count(v.Elems())) } else { err = c.NewPosf(pos, stringMsg, label) } @@ -299,36 +301,60 @@ func (v *Vertex) reportFieldError(c *OpContext, pos token.Pos, f Feature, intMsg return b } -// A ValueError is returned as a result of evaluating a value. -type ValueError struct { +// baseError contains common fields and methods for error types. +type baseError struct { r Runtime v *Vertex pos token.Pos auxpos []token.Pos altPath []string - errors.Message } -func (v *ValueError) AddPosition(n Node) { - if n == nil { +func (e *baseError) AddPos(p token.Pos) { + if !p.IsValid() { return } - if p := pos(n); p != token.NoPos { - for _, q := range v.auxpos { - if p == q { - return - } - } - v.auxpos = append(v.auxpos, p) + if slices.Contains(e.auxpos, p) { + return } + e.auxpos = append(e.auxpos, p) } -func (v *ValueError) AddClosedPositions(c CloseInfo) { - for s := c.closeInfo; s != nil; s = s.parent { - if loc := s.location; loc != nil { - v.AddPosition(loc) - } +func (e *baseError) AddClosedPositions(ctx *OpContext, p posInfo) { + for n := range p.AncestorPositions(ctx) { + e.AddPos(n) + } +} + +func (e *baseError) Position() token.Pos { + return e.pos +} + +func (e *baseError) InputPositions() []token.Pos { + return e.auxpos +} + +func (e *baseError) Path() (a []string) { + if len(e.altPath) > 0 { + return e.altPath + } + if e.v == nil { + return nil } + for _, f := range appendPath(nil, e.v) { + a = append(a, f.SelectorString(e.r)) + } + return a +} + +// A ValueError is returned as a result of evaluating a value. +type ValueError struct { + baseError + errors.Message +} + +func (v *ValueError) AddPosition(n Node) { + v.AddPos(Pos(n)) } func (c *OpContext) errNode() *Vertex { @@ -356,14 +382,13 @@ func (c *OpContext) Newf(format string, args ...interface{}) *ValueError { } func appendNodePositions(a []token.Pos, n Node) []token.Pos { - if p := pos(n); p != token.NoPos { + if p := Pos(n); p.IsValid() { a = append(a, p) } if v, ok := n.(*Vertex); ok { - v.VisitLeafConjuncts(func(c Conjunct) bool { + for c := range v.LeafConjuncts() { a = appendNodePositions(a, c.Elem()) - return true - }) + } } return a } @@ -401,7 +426,7 @@ func (c *OpContext) NewPosf(p token.Pos, format string, args ...interface{}) *Va // level packages. This will allow the debug packages to be used // more widely. b, _ := cueformat.Node(x) - if p := x.Pos(); p != token.NoPos { + if p := x.Pos(); p.IsValid() { a = append(a, p) } args[i] = string(b) @@ -411,11 +436,13 @@ func (c *OpContext) NewPosf(p token.Pos, format string, args ...interface{}) *Va } return &ValueError{ - r: c.Runtime, - v: c.errNode(), - pos: p, - auxpos: a, - altPath: c.makeAltPath(), + baseError: baseError{ + r: c.Runtime, + v: c.errNode(), + pos: p, + auxpos: a, + altPath: c.makeAltPath(), + }, Message: errors.NewMessagef(format, args...), } } @@ -440,23 +467,25 @@ func (e *ValueError) Error() string { return errors.String(e) } -func (e *ValueError) Position() token.Pos { - return e.pos +// ConflictError defers formatting of conflict messages until the error is +// actually needed, avoiding expensive string conversions and allocations. +type ConflictError struct { + baseError + format func(Runtime, Node) string + v1, v2 Node + k1, k2 Kind } -func (e *ValueError) InputPositions() (a []token.Pos) { - return e.auxpos +func (e *ConflictError) Error() string { + return errors.String(e) } -func (e *ValueError) Path() (a []string) { - if len(e.altPath) > 0 { - return e.altPath +func (e *ConflictError) Msg() (format string, args []interface{}) { + v1Str := Formatter{X: e.v1, F: e.format, R: e.r} + v2Str := Formatter{X: e.v2, F: e.format, R: e.r} + if e.k1 == e.k2 { + return "conflicting values %s and %s", []interface{}{v1Str, v2Str} } - if e.v == nil { - return nil - } - for _, f := range appendPath(nil, e.v) { - a = append(a, f.SelectorString(e.r)) - } - return a + return "conflicting values %s and %s (mismatched types %s and %s)", + []interface{}{v1Str, v2Str, e.k1, e.k2} } diff --git a/vendor/cuelang.org/go/internal/core/adt/eval.go b/vendor/cuelang.org/go/internal/core/adt/eval.go index 897f10536c..44b47cbfd5 100644 --- a/vendor/cuelang.org/go/internal/core/adt/eval.go +++ b/vendor/cuelang.org/go/internal/core/adt/eval.go @@ -12,11 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package eval contains the high level CUE evaluation strategy. -// -// CUE allows for a significant amount of freedom in order of evaluation due to -// the commutativity of the unification operation. This package implements one -// of the possible strategies. package adt // TODO: @@ -25,12 +20,12 @@ package adt // import ( - "fmt" + "slices" - "cuelang.org/go/cue/ast" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/stats" "cuelang.org/go/cue/token" + "cuelang.org/go/internal/core/layer" ) // TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO @@ -54,11 +49,6 @@ func (c *OpContext) Stats() *stats.Counts { // return e.NewContext(v) // } -var incompleteSentinel = &Bottom{ - Code: IncompleteError, - Err: errors.Newf(token.NoPos, "incomplete"), -} - // evaluate returns the evaluated value associated with v. It may return a // partial result. That is, if v was not yet unified, it may return a // concrete value that must be the result assuming the configuration has no @@ -71,7 +61,7 @@ var incompleteSentinel = &Bottom{ // error. // // TODO: return *Vertex -func (c *OpContext) evaluate(v *Vertex, r Resolver, state combinedFlags) Value { +func (c *OpContext) evaluate(v *Vertex, r Resolver, state Flags) Value { if v.isUndefined() { // Use node itself to allow for cycle detection. c.unify(v, state) @@ -135,17 +125,6 @@ func (c *OpContext) evaluate(v *Vertex, r Resolver, state combinedFlags) Value { return nil } - if v.status < finalized && v.state != nil && !c.isDevVersion() { - // TODO: errors are slightly better if we always add addNotify, but - // in this case it is less likely to cause a performance penalty. - // See https://cuelang.org/issue/661. It may be possible to - // relax this again once we have proper tests to prevent regressions of - // that issue. - if !v.state.done() || v.state.errs != nil { - v.state.addNotify(c.vertex) - } - } - return v } @@ -154,451 +133,8 @@ func (c *OpContext) evaluate(v *Vertex, r Resolver, state combinedFlags) Value { // state can be used to indicate to which extent processing should continue. // state == finalized means it is evaluated to completion. See vertexStatus // for more details. -func (c *OpContext) unify(v *Vertex, flags combinedFlags) { - if c.isDevVersion() { - requires, mode := flags.condition, flags.mode - v.unify(c, requires, mode, true) - return - } - - // defer c.PopVertex(c.PushVertex(v)) - if c.LogEval > 0 { - c.nest++ - c.Logf(v, "Unify") - defer func() { - c.Logf(v, "END Unify") - c.nest-- - }() - } - - // Ensure a node will always have a nodeContext after calling Unify if it is - // not yet Finalized. - n := v.getNodeContext(c, 1) - defer v.freeNode(n) - - state := flags.status - - // TODO(cycle): verify this happens in all cases when we need it. - if n != nil && v.Parent != nil && v.Parent.state != nil { - n.depth = v.Parent.state.depth + 1 - } - - if state <= v.Status() && - state == partial && - v.isDefined() && - n != nil && n.scalar != nil { - return - } - - switch v.Status() { - case evaluating: - n.insertConjuncts(state) - return - - case evaluatingArcs: - Assertf(c, v.status > unprocessed, "unexpected status %d", v.status) - return - - case 0: - if v.Label.IsDef() { - v.ClosedRecursive = true - } - - if v.Parent != nil { - if v.Parent.ClosedRecursive { - v.ClosedRecursive = true - } - } - - defer c.PopArc(c.PushArc(v)) - - v.updateStatus(evaluating) - - if p := v.Parent; p != nil && p.state != nil && v.Label.IsString() { - for _, s := range p.state.node.Structs { - if s.Disable { - continue - } - s.MatchAndInsert(n.ctx, v) - } - } - - c.stats.Unifications++ - - // Set the cache to a cycle error to ensure a cyclic reference will result - // in an error if applicable. A cyclic error may be ignored for - // non-expression references. The cycle error may also be removed as soon - // as there is evidence what a correct value must be, but before all - // validation has taken place. - // - // TODO(cycle): having a more recursive algorithm would make this - // special cycle handling unnecessary. - v.BaseValue = cycle - - if c.HasErr() { - n.addBottom(c.errs) - } - - // NOTE: safeguard against accidentally entering the 'unprocessed' state - // twice. - n.conjuncts = n.conjuncts[:0] - - for i, c := range v.Conjuncts { - n.addConjunction(c, i) - } - if n.insertConjuncts(state) { - n.maybeSetCache() - v.updateStatus(partial) - return - } - - fallthrough - - case partial, conjuncts: - // TODO: remove this optimization or make it correct. - // No need to do further processing when we have errors and all values - // have been considered. - // TODO: is checkClosed really still necessary here? - if v.status == conjuncts && (n.hasErr() || !n.checkClosed(state)) { - if err := n.getErr(); err != nil { - b, _ := v.BaseValue.(*Bottom) - v.BaseValue = CombineErrors(nil, b, err) - } - break - } - - defer c.PopArc(c.PushArc(v)) - - n.insertConjuncts(state) - - v.status = evaluating - - // Use maybeSetCache for cycle breaking - for n.maybeSetCache(); n.expandOne(partial); n.maybeSetCache() { - } - - n.doNotify() - - if !n.done() { - switch { - case state < conjuncts: - n.node.updateStatus(partial) - return - - case state == conjuncts: - if err := n.incompleteErrors(true); err != nil && err.Code < CycleError { - n.node.AddErr(c, err) - } else { - n.node.updateStatus(partial) - } - return - } - } - - // Disjunctions should always be finalized. If there are nested - // disjunctions the last one should be finalized. - disState := state - if len(n.disjunctions) > 0 && disState != finalized { - disState = finalized - } - n.expandDisjuncts(disState, n, maybeDefault, false, true) - - n.finalizeDisjuncts() - - switch len(n.disjuncts) { - case 0: - case 1: - x := *n.disjuncts[0].result - x.state = nil - x.cyclicReferences = n.node.cyclicReferences - *v = x - - default: - d := n.createDisjunct() - v.BaseValue = d - // The conjuncts will have too much information. Better have no - // information than incorrect information. - for _, d := range d.Values { - d, ok := d.(*Vertex) - if !ok { - continue - } - // We clear the conjuncts for now. As these disjuncts are for API - // use only, we will fill them out when necessary (using Defaults). - d.Conjuncts = nil - - // TODO: use a more principled form of dereferencing. For instance, - // disjuncts could already be assumed to be the given Vertex, and - // the main vertex could be dereferenced during evaluation. - for _, a := range d.Arcs { - for _, x := range a.Conjuncts { - // All the environments for embedded structs need to be - // dereferenced. - for env := x.Env; env != nil && env.Vertex == v; env = env.Up { - env.Vertex = d - } - } - } - } - v.Arcs = nil - v.ChildErrors = nil - // v.Structs = nil // TODO: should we keep or discard the Structs? - // TODO: how to represent closedness information? Do we need it? - } - - // If the state has changed, it is because a disjunct has been run, or - // because a single disjunct has replaced it. Restore the old state as - // to not confuse memory management. - v.state = n - - // We don't do this in postDisjuncts, as it should only be done after - // completing all disjunctions. - if !n.done() { - if err := n.incompleteErrors(true); err != nil { - b := n.node.Bottom() - if b != err { - err = CombineErrors(n.ctx.src, b, err) - } - n.node.BaseValue = err - } - } - - assertStructuralCycle(n) - - if state != finalized { - return - } - - if v.BaseValue == nil { - v.BaseValue = n.getValidators(finalized) - } - - // Free memory here? - v.updateStatus(finalized) - - case finalized: - } -} - -// insertConjuncts inserts conjuncts previously not inserted. -func (n *nodeContext) insertConjuncts(state vertexStatus) bool { - unreachableForDev(n.ctx) - - // Exit early if we have a concrete value and only need partial results. - if state == partial { - for n.conjunctsPartialPos < len(n.conjuncts) { - c := &n.conjuncts[n.conjunctsPartialPos] - n.conjunctsPartialPos++ - if c.done { - continue - } - if v, ok := c.C.Elem().(Value); ok && IsConcrete(v) { - c.done = true - n.addValueConjunct(c.C.Env, v, c.C.CloseInfo) - } - } - if n.scalar != nil && n.node.isDefined() { - return true - } - } - for n.conjunctsPos < len(n.conjuncts) { - nInfos := len(n.node.Structs) - p := &n.conjuncts[n.conjunctsPos] - n.conjunctsPos++ - if p.done { - continue - } - - // Initially request a Partial state to allow cyclic references to - // resolve more naturally first. This results in better error messages - // and less operations. - n.addExprConjunct(p.C, partial) - p.done = true - - // Record the OptionalTypes for all structs that were inferred by this - // Conjunct. This information can be used by algorithms such as trim. - for i := nInfos; i < len(n.node.Structs); i++ { - n.node.Conjuncts[p.index].CloseInfo.FieldTypes |= n.node.Structs[i].types - } - } - return false -} - -// finalizeDisjuncts: incomplete errors are kept around and not removed early. -// This call filters the incomplete errors and removes them -// -// This also collects all errors of empty disjunctions. These cannot be -// collected during the finalization state of individual disjuncts. Care should -// be taken to only call this after all disjuncts have been finalized. -func (n *nodeContext) finalizeDisjuncts() { - a := n.disjuncts - if len(a) == 0 { - return - } - k := 0 - for i, d := range a { - switch d.finalDone() { - case true: - a[k], a[i] = d, a[k] - k++ - default: - if err := d.incompleteErrors(true); err != nil { - n.disjunctErrs = append(n.disjunctErrs, err) - } - } - d.free() - } - if k == 0 { - n.makeError() - } - n.disjuncts = a[:k] -} - -func (n *nodeContext) doNotify() { - if n.errs == nil || len(n.notify) == 0 { - return - } - for _, rec := range n.notify { - v := rec.v - if v.state == nil { - if b := v.Bottom(); b != nil { - v.BaseValue = CombineErrors(nil, b, n.errs) - } else { - v.BaseValue = n.errs - } - } else { - v.state.addBottom(n.errs) - } - } - n.notify = n.notify[:0] -} - -func (n *nodeContext) postDisjunct(state vertexStatus) { - ctx := n.ctx - unreachableForDev(ctx) - - for { - // Use maybeSetCache for cycle breaking - for n.maybeSetCache(); n.expandOne(state); n.maybeSetCache() { - } - - if !n.addLists(combinedFlags{ - status: state, - condition: allKnown, - mode: ignore, - }) { - break - } - } - - if n.aStruct != nil { - n.updateNodeType(StructKind, n.aStruct, n.aStructID) - } - - if len(n.selfComprehensions) > 0 { - // Up to here all comprehensions with sources other than this node will - // have had a chance to run. We can now run self-referencing - // comprehensions with the restriction that they cannot add new arcs. - // - // Note: we should only set this in case of self-referential - // comprehensions. A comprehension in a parent node may still add - // arcs to this node, even if it has reached AllConjunctsDone status, - // as long as any evaluation did not rely on its specific set of arcs. - // Example: - // - // a: { - // b: _env: c: 1 - // - // // Using dynamic field ("b") prevents the evaluation of the - // // comprehension to be pushed down to env: and instead evaluates - // // it before b is completed. Even though b needs to reach state - // // AllConjunctsDone before evaluating b._env, it is still okay - // // to add arcs to b after this evaluation: only the set of arcs - // // in b._env needs to be frozen after that. - // for k2, v2 in b._env { - // ("b"): env: (k2): v2 - // } - // } - n.node.LockArcs = true - - n.injectSelfComprehensions(state) - } - - for n.expandOne(state) { - } - - switch err := n.getErr(); { - case err != nil: - if err.Code < IncompleteError && n.node.ArcType == ArcPending { - n.node.ArcType = ArcMember - } - n.node.BaseValue = err - n.errs = nil - - default: - if isCyclePlaceholder(n.node.BaseValue) { - if !n.done() { - n.node.BaseValue = n.incompleteErrors(true) - } else { - n.node.BaseValue = nil - } - } - // TODO: this ideally should be done here. However, doing so causes - // a somewhat more aggressive cutoff in disjunction cycles, which cause - // some incompatibilities. Fix in another CL. - // - // else if !n.done() { - // n.expandOne() - // if err := n.incompleteErrors(); err != nil { - // n.node.BaseValue = err - // } - // } - - // We are no longer evaluating. - - n.validateValue(state) - - v := n.node.Value() - - // TODO(perf): only delay processing of actual non-monotonic checks. - skip := n.skipNonMonotonicChecks() - if v != nil && IsConcrete(v) && !skip { - for _, v := range n.checks { - // TODO(errors): make Validate return bottom and generate - // optimized conflict message. Also track and inject IDs - // to determine origin location.s - if b := ctx.Validate(v, n.node); b != nil { - n.addBottom(b) - } - } - } - - if v == nil { - break - } - - switch { - case v.Kind() == ListKind: - for _, a := range n.node.Arcs { - if a.Label.Typ() == StringLabel && a.IsDefined(ctx) { - n.addErr(ctx.Newf("list may not have regular fields")) - // TODO(errors): add positions for list and arc definitions. - - } - } - - // case !isStruct(n.node) && v.Kind() != BottomKind: - // for _, a := range n.node.Arcs { - // if a.Label.IsRegular() { - // n.addErr(errors.Newf(token.NoPos, - // // TODO(errors): add positions of non-struct values and arcs. - // "cannot combine scalar values with arcs")) - // } - // } - } - } - - n.completeArcs(state) +func (c *OpContext) unify(v *Vertex, flags Flags) { + v.unify(c, flags) } // validateValue checks collected bound validators and checks them against @@ -615,7 +151,7 @@ func (n *nodeContext) validateValue(state vertexStatus) { // TODO: verify and simplify the below code to determine whether // something is a struct. markStruct := false - if n.aStruct != nil { + if n.aStruct { markStruct = true } else if len(n.node.Structs) > 0 { // TODO: do something more principled here. @@ -659,28 +195,17 @@ func (n *nodeContext) validateValue(state vertexStatus) { if v != nil && IsConcrete(v) { // Also check when we already have errors as we may find more // serious errors and would like to know about all errors anyway. - - if n.lowerBound != nil { - c := MakeRootConjunct(nil, n.lowerBound) - if b := ctx.Validate(c, v); b != nil { - // TODO(errors): make Validate return boolean and generate - // optimized conflict message. Also track and inject IDs - // to determine origin location.s - if e, _ := b.Err.(*ValueError); e != nil { - e.AddPosition(n.lowerBound) - e.AddPosition(v) - } - n.addBottom(b) + for _, bound := range []*BoundValue{n.lowerBound, n.upperBound} { + if bound == nil { + continue } - } - if n.upperBound != nil { - c := MakeRootConjunct(nil, n.upperBound) + c := MakeRootConjunct(nil, bound) if b := ctx.Validate(c, v); b != nil { // TODO(errors): make Validate return boolean and generate // optimized conflict message. Also track and inject IDs // to determine origin location.s if e, _ := b.Err.(*ValueError); e != nil { - e.AddPosition(n.upperBound) + e.AddPosition(bound) e.AddPosition(v) } n.addBottom(b) @@ -692,279 +217,6 @@ func (n *nodeContext) validateValue(state vertexStatus) { } } -// incompleteErrors reports all errors from uncompleted conjuncts. -// If final is true, errors are permanent and reported to parents. -func (n *nodeContext) incompleteErrors(final bool) *Bottom { - unreachableForDev(n.ctx) - - // collect incomplete errors. - var err *Bottom // n.incomplete - for _, d := range n.dynamicFields { - err = CombineErrors(nil, err, d.err) - } - for _, c := range n.comprehensions { - if c.err == nil { - continue - } - err = CombineErrors(nil, err, c.err) - - // TODO: use this code once possible. - // - // Add comprehension to ensure incomplete error is inserted. This - // ensures that the error is reported in the Vertex where the - // comprehension was defined, and not just in the node below. This, in - // turn, is necessary to support certain logic, like export, that - // expects to be able to detect an "incomplete" error at the first level - // where it is necessary. - // if c.node.status != Finalized { - // n := c.node.getNodeContext(n.ctx) - // n.comprehensions = append(n.comprehensions, c) - // } else { - // n.node.AddErr(n.ctx, err) - // } - // n := d.node.getNodeContext(ctx) - // n.addBottom(err) - if final && c.vertex != nil && c.vertex.status != finalized { - c.vertex.state.assertInitialized() - c.vertex.state.addBottom(err) - c.vertex = nil - } - } - for _, c := range n.selfComprehensions { - if c.err == nil { - continue - } - - err = CombineErrors(nil, err, c.err) - - // TODO: use this code once possible. - // - // Add comprehension to ensure incomplete error is inserted. This - // ensures that the error is reported in the Vertex where the - // comprehension was defined, and not just in the node below. This, in - // turn, is necessary to support certain logic, like export, that - // expects to be able to detect an "incomplete" error at the first level - // where it is necessary. - // if c.node.status != Finalized { - // n := c.node.getNodeContext(n.ctx) - // n.comprehensions = append(n.comprehensions, c) - // } else { - // n.node.AddErr(n.ctx, err) - // } - // n := d.node.getNodeContext(ctx) - // n.addBottom(err) - if c.vertex != nil && c.vertex.status != finalized { - c.vertex.state.addBottom(err) - c.vertex = nil - } - } - for _, x := range n.exprs { - err = CombineErrors(nil, err, x.err) - } - if err == nil { - // safeguard. - err = incompleteSentinel - } - if err.Code < IncompleteError { - n.node.ArcType = ArcMember - } - return err -} - -// TODO(perf): ideally we should always perform a closedness check if -// state is Finalized. This is currently not possible when computing a -// partial disjunction as the closedness information is not yet -// complete, possibly leading to a disjunct to be rejected prematurely. -// It is probably possible to fix this if we could add StructInfo -// structures demarked per conjunct. -// -// In practice this should not be a problem: when disjuncts originate -// from the same disjunct, they will have the same StructInfos, and thus -// Equal is able to equate them even in the presence of optional field. -// In general, combining any limited set of disjuncts will soon reach -// a fixed point where duplicate elements can be eliminated this way. -// -// Note that not checking closedness is irrelevant for disjunctions of -// scalars. This means it also doesn't hurt performance where structs -// have a discriminator field (e.g. Kubernetes). We should take care, -// though, that any potential performance issues are eliminated for -// Protobuf-like oneOf fields. -func (n *nodeContext) checkClosed(state vertexStatus) bool { - unreachableForDev(n.ctx) - - ignore := state != finalized || n.skipNonMonotonicChecks() - - v := n.node - if !v.Label.IsInt() && v.Parent != nil && !ignore && v.ArcType <= ArcRequired { - ctx := n.ctx - // Visit arcs recursively to validate and compute error. - if _, err := verifyArc2(ctx, v.Label, v, v.ClosedRecursive); err != nil { - // Record error in child node to allow recording multiple - // conflicts at the appropriate place, to allow valid fields to - // be represented normally and, most importantly, to avoid - // recursive processing of a disallowed field. - v.SetValue(ctx, err) - return false - } - } - return true -} - -func (n *nodeContext) completeArcs(state vertexStatus) { - unreachableForDev(n.ctx) - - if n.node.hasAllConjuncts || n.node.Parent == nil { - n.node.setParentDone() - } - - // At this point, if this arc is of type arcVoid, it means that the value - // may still be modified by child arcs. So in this case we must now process - // all arcs to be sure we get the correct result. - // For other cases we terminate early as this results in considerably - // better error messages. - if state <= conjuncts && - // Is allowed to go one step back. See Vertex.UpdateStatus. - n.node.status <= state+1 && - (!n.node.hasPendingArc || n.node.ArcType == ArcMember) { - - n.node.updateStatus(conjuncts) - return - } - - n.node.updateStatus(evaluatingArcs) - - ctx := n.ctx - - if !assertStructuralCycle(n) { - k := 0 - // Visit arcs recursively to validate and compute error. - for _, a := range n.node.Arcs { - // Call UpdateStatus here to be absolutely sure the status is set - // correctly and that we are not regressing. - n.node.updateStatus(evaluatingArcs) - - wasVoid := a.ArcType == ArcPending - - ctx.unify(a, combinedFlags{ - status: finalized, - condition: allKnown, - mode: ignore, - }) - - if a.ArcType == ArcPending { - continue - } - - // Errors are allowed in let fields. Handle errors and failure to - // complete accordingly. - if !a.Label.IsLet() && a.ArcType <= ArcRequired { - // Don't set the state to Finalized if the child arcs are not done. - if state == finalized && a.status < finalized { - state = conjuncts - } - - if err := a.Bottom(); err != nil { - n.AddChildError(err) - } - } - - // If a structural cycle is detected, Arcs is cleared to avoid - // going into an infinite loop. If this is the case, we can bail - // from this loop. - if len(n.node.Arcs) == 0 { - goto postChecks - } - n.node.Arcs[k] = a - k++ - - switch { - case a.ArcType > ArcRequired, !a.Label.IsString(): - case n.kind&StructKind == 0: - if !n.node.IsErr() { - n.reportFieldMismatch(pos(a.Value()), nil, a.Label, n.node.Value()) - } - case !wasVoid: - case n.kind == TopKind: - // Theoretically it may be possible that a "void" arc references - // this top value where it really should have been a struct. One - // way to solve this is to have two passes over the arcs, where - // the first pass additionally analyzes whether comprehensions - // will yield values and "un-voids" an arc ahead of the rest. - // - // At this moment, though, I fail to see a possibility to create - // faulty CUE using this mechanism, though. At most error - // messages are a bit unintuitive. This may change once we have - // functionality to reflect on types. - if !n.node.IsErr() { - n.node.BaseValue = &StructMarker{} - n.kind = StructKind - } - } - } - n.node.Arcs = n.node.Arcs[:k] - - postChecks: - for _, c := range n.postChecks { - f := ctx.PushState(c.env, c.expr.Source()) - - // TODO(errors): make Validate return bottom and generate - // optimized conflict message. Also track and inject IDs - // to determine origin location.s - v := ctx.evalState(c.expr, combinedFlags{ - status: finalized, - condition: allKnown, - mode: ignore, - }) - v, _ = ctx.getDefault(v) - v = Unwrap(v) - - switch _, isError := v.(*Bottom); { - case isError == c.expectError: - default: - n.node.AddErr(ctx, &Bottom{ - Src: c.expr.Source(), - Code: CycleError, - Node: n.node, - Err: ctx.NewPosf(pos(c.expr), - "circular dependency in evaluation of conditionals: %v changed after evaluation", - ctx.Str(c.expr)), - }) - } - - ctx.PopState(f) - } - } - - if err := n.getErr(); err != nil { - n.errs = nil - if b, _ := n.node.BaseValue.(*Bottom); b != nil { - err = CombineErrors(nil, b, err) - } - n.node.BaseValue = err - } - - b, hasErr := n.node.BaseValue.(*Bottom) - if !hasErr && b != cycle { - n.checkClosed(state) - } - - // Strip struct literals that were not initialized and are not part - // of the output. - // - // TODO(perf): we could keep track if any such structs exist and only - // do this removal if there is a change of shrinking the list. - k := 0 - for _, s := range n.node.Structs { - if s.initialized { - n.node.Structs[k] = s - k++ - } - } - n.node.Structs = n.node.Structs[:k] - - n.node.updateStatus(finalized) -} - // TODO: this is now a sentinel. Use a user-facing error that traces where // the cycle originates. var cycle = &Bottom{ @@ -980,44 +232,6 @@ func isCyclePlaceholder(v BaseValue) bool { return v == cycle } -func (n *nodeContext) createDisjunct() *Disjunction { - a := make([]Value, len(n.disjuncts)) - p := 0 - hasDefaults := false - for i, x := range n.disjuncts { - v := *x.result - v.state = nil - switch x.defaultMode { - case isDefault: - a[i] = a[p] - a[p] = &v - p++ - hasDefaults = true - - case notDefault: - hasDefaults = true - fallthrough - case maybeDefault: - a[i] = &v - } - } - // TODO: disambiguate based on concrete values. - // TODO: consider not storing defaults. - // if p > 0 { - // a = a[:p] - // } - return &Disjunction{ - Values: a, - NumDefaults: p, - HasDefaults: hasDefaults, - } -} - -type arcKey struct { - arc *Vertex - id CloseInfo -} - // A nodeContext is used to collate all conjuncts of a value to facilitate // unification. Conceptually order of unification does not matter. However, // order has relevance when performing checks of non-monotic properties. Such @@ -1029,12 +243,9 @@ type nodeContext struct { // This allows checking that we are not using stale nodeContexts. opID uint64 - // refCount: - // evalv2: keeps track of all current usages of the node, such that the - // node can be freed when the counter reaches zero. - // evalv3: keeps track of the number points in the code where this - //. nodeContext is used for processing. A nodeContext that is being - //. processed may not be freed yet. + // refCount keeps track of the number points in the code where this + // nodeContext is used for processing. A nodeContext that is being + // processed may not be freed yet. refCount int // isDisjunct indicates whether this nodeContext is used in a disjunction. @@ -1070,7 +281,7 @@ type nodeContext struct { // slices to nil, we truncate the existing buffers so that they do not // need to be reallocated upon reuse of the nodeContext. - arcMap []arcKey // not copied for cloning + arcMap []*Vertex // not copied for cloning // vertexMap is used to map vertices in disjunctions. vertexMap vertexMap @@ -1086,25 +297,15 @@ type nodeContext struct { // closedness information is correctly computed in such cases. sharedIDs []CloseInfo - // Conjuncts holds a reference to the Vertex Arcs that still need - // processing. It does NOT need to be copied. - conjuncts []conjunct cyclicConjuncts []cyclicConjunct - dynamicFields []envDynamic - comprehensions []envYield - selfComprehensions []envYield // comprehensions iterating over own struct. - - // Expression conjuncts - lists []envList - vLists []*Vertex - exprs []envExpr - // These fields are used to track type checking. - reqDefIDs []refInfo - replaceIDs []replaceID - conjunctInfo []conjunctInfo - reqSets reqSets + reqDefIDs []refInfo + replaceIDs []replaceID + flatReplaceIDs []replaceID // including all parents, and sorted by 'to' in descending order + minFlatReplaceIDTo defID // minimum 'to' value in flatReplaceIDs, 0 if empty + conjunctInfo []conjunctInfo + reqSets reqSets // Checks is a list of conjuncts, as we need to preserve the context in // which it was evaluated. The conjunct is always a validator (and thus @@ -1118,39 +319,16 @@ type nodeContext struct { // Disjunction handling disjunctions []envDisjunct - // usedDefault indicates the for each of possibly multiple parent - // disjunctions whether it is unified with a default disjunct or not. - // This is then later used to determine whether a disjunction should - // be treated as a marked disjunction. - usedDefault []defaultInfo - // disjuncts holds disjuncts that evaluated to a non-bottom value. // TODO: come up with a better name. disjuncts []*nodeContext - buffer []*nodeContext disjunctErrs []*Bottom + userErrs []*Bottom // hasDisjunction marks wither any disjunct was added. It is listed here // instead of in nodeContextState as it should be cleared when a disjunction // is split off. TODO: find something more principled. hasDisjunction bool - - // snapshot holds the last value of the vertex before calling postDisjunct. - snapshot *Vertex - - // Result holds the last evaluated value of the vertex after calling - // postDisjunct. - result *Vertex -} - -type conjunct struct { - C Conjunct - - // done marks that this conjunct has been inserted. This prevents a - // conjunct from being processed more than once, for instance, when - // insertConjuncts is called more than once for the same node. - done bool - index int // index of the original conjunct in Vertex.Conjuncts } type nodeContextState struct { @@ -1163,6 +341,13 @@ type nodeContextState struct { // node after a corresponding task has been completed. toComplete bool + // embedsRecursivelyClosed is used to implement __reclose. It must be set + // when a vertex that is recursively closed is embedded through a spread + // operator. It is okay to set it if it is just unified with a vertex that + // is recursively closed, but not added through a spread operator. The + // result will just be an unnecessary call to __reclose. + embedsRecursivelyClosed bool + // isCompleting > 0 indicates whether a call to completeNodeTasks is in // progress. isCompleting int @@ -1195,6 +380,7 @@ type nodeContextState struct { dropParentRequirements bool // used for typo checking computedCloseInfo bool // used for typo checking + computedFlatReplaceIDs bool // whether [nodeContext.flatReplaceIDs] has been computed isShared bool // set if we are currently structure sharing noSharing bool // set if structure sharing is not allowed @@ -1204,8 +390,10 @@ type nodeContextState struct { shareDecremented bool // counters of sharedIDs have been decremented depth int32 - defaultMode defaultMode // cumulative default mode - origDefaultMode defaultMode // default mode of the original disjunct + defaultMode defaultMode // cumulative default mode + origDefaultMode defaultMode // default mode of the original disjunct + priority layer.Priority // Priority corresponding to defaultMode + origPriority layer.Priority // Priority of the original disjunct // has a value filled out before the node splits into a disjunction. Aside // from detecting a self-reference cycle when there is otherwise just an @@ -1260,16 +448,17 @@ type nodeContextState struct { // Value info - kind Kind - kindExpr Expr // expr that adjust last value (for error reporting) - kindID CloseInfo // for error tracing + kind Kind + constraintKind Kind + defaultKind Kind + kindExpr Expr // expr that adjust last value (for error reporting) + kindID posInfo // for error tracing // Current value (may be under construction) scalar Value // TODO: use Value in node. - scalarID CloseInfo + scalarID posInfo - aStruct Expr - aStructID CloseInfo + aStruct bool // TODO: eventually replace with kind & StructKind // List fields listIsClosed bool @@ -1279,21 +468,9 @@ type nodeContextState struct { lowerBound *BoundValue // > or >= upperBound *BoundValue // < or <= errs *Bottom - - // Slice positions - - // conjunctsPos is an index into conjuncts indicating the next conjunct - // to process. This is used to avoids processing a conjunct twice in some - // cases where there is an evaluation cycle. - conjunctsPos int - // conjunctsPartialPos is like conjunctsPos, but for the 'partial' phase - // of processing where conjuncts are only processed as concrete scalars. - conjunctsPartialPos int } // A receiver receives notifications. -// cc is used for V3 and is nil in V2. -// v is equal to cc.src._cc in V3. type receiver struct { v *Vertex c CloseInfo @@ -1306,107 +483,39 @@ func (n *nodeContext) Logf(format string, args ...interface{}) { n.ctx.Logf(n.node, format, args...) } -type defaultInfo struct { - // parentMode indicates whether this values was used as a default value, - // based on the parent mode. - parentMode defaultMode - - // The result of default evaluation for a nested disjunction. - nestedMode defaultMode - - origMode defaultMode -} - -func (n *nodeContext) addNotify(v *Vertex) { - unreachableForDev(n.ctx) - - if v != nil && !n.node.hasAllConjuncts { - n.notify = append(n.notify, receiver{v: v}) - } -} - -func (n *nodeContext) clone() *nodeContext { - unreachableForDev(n.ctx) - - d := n.ctx.newNodeContext(n.node) +func (c *OpContext) newNodeContext(node *Vertex) *nodeContext { + var n *nodeContext + if n = c.freeListNode; n != nil { + c.stats.Reused++ + c.freeListNode = n.nextFree - d.refCount++ - - d.ctx = n.ctx - d.node = n.node - - d.nodeContextState = n.nodeContextState - - d.toFree = append(d.toFree, n.toFree...) - d.arcMap = append(d.arcMap, n.arcMap...) - d.notify = append(d.notify, n.notify...) - d.sharedIDs = append(d.sharedIDs, n.sharedIDs...) - - n.scheduler.cloneInto(&d.scheduler) - - d.conjuncts = append(d.conjuncts, n.conjuncts...) - d.cyclicConjuncts = append(d.cyclicConjuncts, n.cyclicConjuncts...) - d.dynamicFields = append(d.dynamicFields, n.dynamicFields...) - d.comprehensions = append(d.comprehensions, n.comprehensions...) - d.selfComprehensions = append(d.selfComprehensions, n.selfComprehensions...) - d.lists = append(d.lists, n.lists...) - d.vLists = append(d.vLists, n.vLists...) - d.exprs = append(d.exprs, n.exprs...) - - d.reqDefIDs = append(d.reqDefIDs, n.reqDefIDs...) - d.replaceIDs = append(d.replaceIDs, n.replaceIDs...) - d.conjunctInfo = append(d.conjunctInfo, n.conjunctInfo...) - d.reqSets = append(d.reqSets, n.reqSets...) - - d.checks = append(d.checks, n.checks...) - d.postChecks = append(d.postChecks, n.postChecks...) - - d.usedDefault = append(d.usedDefault, n.usedDefault...) - - // Do not clone other disjunction-related slices, like disjuncts and buffer: - // disjunction slices are managed by disjunction processing directly. - - return d -} - -func (c *OpContext) newNodeContext(node *Vertex) *nodeContext { - var n *nodeContext - if n = c.freeListNode; n != nil { - c.stats.Reused++ - c.freeListNode = n.nextFree - - n.scheduler.clear() - n.scheduler.ctx = c + n.scheduler.clear() + n.scheduler.ctx = c *n = nodeContext{ scheduler: n.scheduler, node: node, nodeContextState: nodeContextState{ - kind: TopKind, + kind: TopKind, + constraintKind: TopKind, + defaultKind: TopKind, }, - toFree: n.toFree[:0], - arcMap: n.arcMap[:0], - conjuncts: n.conjuncts[:0], - cyclicConjuncts: n.cyclicConjuncts[:0], - notify: n.notify[:0], - sharedIDs: n.sharedIDs[:0], - checks: n.checks[:0], - postChecks: n.postChecks[:0], - dynamicFields: n.dynamicFields[:0], - comprehensions: n.comprehensions[:0], - selfComprehensions: n.selfComprehensions[:0], - lists: n.lists[:0], - vLists: n.vLists[:0], - exprs: n.exprs[:0], - reqDefIDs: n.reqDefIDs[:0], - replaceIDs: n.replaceIDs[:0], - conjunctInfo: n.conjunctInfo[:0], - reqSets: n.reqSets[:0], - disjunctions: n.disjunctions[:0], - usedDefault: n.usedDefault[:0], - disjunctErrs: n.disjunctErrs[:0], - disjuncts: n.disjuncts[:0], - buffer: n.buffer[:0], + toFree: n.toFree[:0], + arcMap: n.arcMap[:0], + cyclicConjuncts: n.cyclicConjuncts[:0], + notify: n.notify[:0], + sharedIDs: n.sharedIDs[:0], + checks: n.checks[:0], + postChecks: n.postChecks[:0], + reqDefIDs: n.reqDefIDs[:0], + replaceIDs: n.replaceIDs[:0], + flatReplaceIDs: n.flatReplaceIDs[:0], + conjunctInfo: n.conjunctInfo[:0], + reqSets: n.reqSets[:0], + disjunctions: n.disjunctions[:0], + disjunctErrs: n.disjunctErrs[:0], + userErrs: n.userErrs[:0], + disjuncts: n.disjuncts[:0], } n.scheduler.clear() } else { @@ -1418,7 +527,11 @@ func (c *OpContext) newNodeContext(node *Vertex) *nodeContext { }, node: node, - nodeContextState: nodeContextState{kind: TopKind}, + nodeContextState: nodeContextState{ + kind: TopKind, + constraintKind: TopKind, + defaultKind: TopKind, + }, } } @@ -1431,50 +544,6 @@ func (c *OpContext) newNodeContext(node *Vertex) *nodeContext { return n } -func (v *Vertex) getNodeContext(c *OpContext, ref int) *nodeContext { - unreachableForDev(c) - - if v.state == nil { - if v.status == finalized { - return nil - } - v.state = c.newNodeContext(v) - } else if v.state.node != v { - panic("getNodeContext: nodeContext out of sync") - } - v.state.refCount += ref - return v.state -} - -func (v *Vertex) freeNode(n *nodeContext) { - if n == nil { - return - } - if n.node != v { - panic("freeNode: unpaired free") - } - if v.state != nil && v.state != n { - panic("freeNode: nodeContext out of sync") - } - if n.refCount--; n.refCount == 0 { - if v.status == finalized { - v.freeNodeState() - } else { - n.ctx.stats.Retained++ - } - } -} - -func (v *Vertex) freeNodeState() { - if v.state == nil { - return - } - state := v.state - v.state = nil - - state.ctx.freeNodeContext(state) -} - func (n *nodeContext) free() { if n.refCount--; n.refCount == 0 { n.ctx.freeNodeContext(n) @@ -1499,28 +568,43 @@ func (c *OpContext) freeNodeContext(n *nodeContext) { n.scheduler.clear() } -// TODO(perf): return a dedicated ConflictError that can track original -// positions on demand. -func (n *nodeContext) reportConflict( - v1, v2 Node, - k1, k2 Kind, - ids ...CloseInfo) { - +func (n *nodeContext) reportConflict(v1, v2 Node, k1, k2 Kind, ids ...posInfo) { ctx := n.ctx - var err *ValueError - if k1 == k2 { - err = ctx.NewPosf(token.NoPos, "conflicting values %s and %s", v1, v2) - } else { - err = ctx.NewPosf(token.NoPos, - "conflicting values %s and %s (mismatched types %s and %s)", - v1, v2, k1, k2) + // Collect all positions from the nodes, including their leaf conjuncts + var auxpos []token.Pos + auxpos = appendNodePositions(auxpos, v1) + auxpos = appendNodePositions(auxpos, v2) + + // Make shallow copies of Vertex nodes to avoid endless recursion when + // the error is set as the BaseValue. This matches the behavior in [OpContext.NewPosf]. + if v, ok := v1.(*Vertex); ok { + vcopy := *v + v1 = &vcopy + } + if v, ok := v2.(*Vertex); ok { + vcopy := *v + v2 = &vcopy + } + + // Create a ConflictError that defers formatting until needed + err := &ConflictError{ + baseError: baseError{ + r: ctx.Runtime, + v: ctx.errNode(), + pos: token.NoPos, + auxpos: auxpos, + altPath: ctx.makeAltPath(), + }, + format: ctx.Format, + v1: v1, + v2: v2, + k1: k1, + k2: k2, } - err.AddPosition(v1) - err.AddPosition(v2) for _, id := range ids { - err.AddClosedPositions(id) + err.AddClosedPositions(ctx, id) } n.addErr(err) @@ -1528,39 +612,29 @@ func (n *nodeContext) reportConflict( // reportFieldMismatch reports the mixture of regular fields with non-struct // values. Either s or f needs to be given. -func (n *nodeContext) reportFieldMismatch( - p token.Pos, - s *StructLit, - f Feature, - scalar Expr, - id ...CloseInfo) { - +func (n *nodeContext) reportFieldMismatch(p token.Pos, s *StructLit, f Feature, scalar Expr) { ctx := n.ctx if f == InvalidLabel { for _, a := range s.Decls { if x, ok := a.(*Field); ok && x.Label.IsRegular() { f = x.Label - p = pos(x) + p = Pos(x) break } } if f == InvalidLabel { - n.reportConflict(scalar, s, n.kind, StructKind, id...) + n.reportConflict(scalar, s, n.kind, StructKind) return } } err := ctx.NewPosf(p, "cannot combine regular field %q with %v", f, scalar) - if s != nil { + if s != nil { // important to avoid a typed nil err.AddPosition(s) } - for _, ci := range id { - err.AddClosedPositions(ci) - } - n.addErr(err) } @@ -1585,7 +659,7 @@ func (n *nodeContext) updateNodeType(k Kind, v Expr, id CloseInfo) bool { // n.reportFieldMismatch(token.NoPos, s, 0, n.kindExpr, id, n.kindID) case n.kindExpr != nil: - n.reportConflict(n.kindExpr, v, n.kind, k, n.kindID, id) + n.reportConflict(n.kindExpr, v, n.kind, k, n.kindID, id.posInfo) default: n.addErr(ctx.Newf( @@ -1597,31 +671,10 @@ func (n *nodeContext) updateNodeType(k Kind, v Expr, id CloseInfo) bool { n.kindExpr = v } n.kind = kind + n.kindID = id.posInfo return kind != BottomKind } -func (n *nodeContext) done() bool { - // TODO(v0.7): verify that done() is checking for the right conditions in - // the new evaluator implementation. - return len(n.dynamicFields) == 0 && - len(n.comprehensions) == 0 && - len(n.exprs) == 0 -} - -// finalDone is like done, but allows for cycle errors, which can be ignored -// as they essentially indicate a = a & _. -func (n *nodeContext) finalDone() bool { - // TODO(v0.7): update for new evaluator? - for _, x := range n.exprs { - if x.err.Code != CycleError { - return false - } - } - return len(n.dynamicFields) == 0 && - len(n.comprehensions) == 0 && - len(n.selfComprehensions) == 0 -} - // hasErr is used to determine if an evaluation path, for instance a single // path after expanding all disjunctions, has an error. func (n *nodeContext) hasErr() bool { @@ -1649,7 +702,8 @@ func (n *nodeContext) getValidators(state vertexStatus) BaseValue { ctx := n.ctx - a := []Value{} + // Preallocate some space in the stack. + a := make([]Value, 0, 4) // if n.node.Value != nil { // a = append(a, n.node.Value) // } @@ -1698,48 +752,14 @@ func (n *nodeContext) getValidators(state vertexStatus) BaseValue { v = a[0] default: - v = &Conjunction{Values: a} + // With multiple elements, the slice escapes so it must allocate. + // Make a copy here, so that the len(a)==1 case stays in the stack. + v = &Conjunction{Values: slices.Clone(a)} } return v } -// TODO: this function can probably go as this is now handled in the nodeContext. -func (n *nodeContext) maybeSetCache() { - // Set BaseValue to scalar, but only if it was not set before. Most notably, - // errors should not be discarded. - _, isErr := n.node.BaseValue.(*Bottom) - if n.scalar != nil && (!isErr || isCyclePlaceholder(n.node.BaseValue)) { - n.node.BaseValue = n.scalar - } - // NOTE: this is now handled by associating the nodeContext - // if n.errs != nil { - // n.node.SetValue(n.ctx, Partial, n.errs) - // } -} - -type envExpr struct { - c Conjunct - err *Bottom -} - -type envDynamic struct { - env *Environment - field *DynamicField - id CloseInfo - err *Bottom -} - -type envList struct { - env *Environment - list *ListLit - n int64 // recorded length after evaluator - elipsis *Ellipsis - id CloseInfo - ignore bool // has a self-referencing comprehension and is postponed - self bool // was added as a postponed self-referencing comprehension -} - type envCheck struct { env *Environment expr Expr @@ -1766,947 +786,8 @@ func (n *nodeContext) addErr(err errors.Error) { } } -// addExprConjuncts will attempt to evaluate an Expr and insert the value -// into the nodeContext if successful or queue it for later evaluation if it is -// incomplete or is not value. -func (n *nodeContext) addExprConjunct(v Conjunct, state vertexStatus) { - unreachableForDev(n.ctx) - - env := v.Env - id := v.CloseInfo - - switch x := v.Elem().(type) { - case *Vertex: - if x.IsData() { - n.addValueConjunct(env, x, id) - } else { - n.addVertexConjuncts(v, x, true) - } - - case Value: - n.addValueConjunct(env, x, id) - - case *BinaryExpr: - if x.Op == AndOp { - n.addExprConjunct(MakeConjunct(env, x.X, id), state) - n.addExprConjunct(MakeConjunct(env, x.Y, id), state) - return - } else { - n.evalExpr(v, state) - } - - case *StructLit: - n.addStruct(env, x, id) - - case *ListLit: - childEnv := &Environment{ - Up: env, - Vertex: n.node, - } - n.lists = append(n.lists, envList{env: childEnv, list: x, id: id}) - - case *DisjunctionExpr: - n.addDisjunction(env, x, id) - - case *Comprehension: - // always a partial comprehension. - n.insertComprehension(env, x, id) - return - - default: - // Must be Resolver or Evaluator. - n.evalExpr(v, state) - } - n.ctx.stats.Conjuncts++ -} - -// evalExpr is only called by addExprConjunct. If an error occurs, it records -// the error in n and returns nil. -func (n *nodeContext) evalExpr(v Conjunct, state vertexStatus) { - unreachableForDev(n.ctx) - - // Require an Environment. - ctx := n.ctx - - closeID := v.CloseInfo - - switch x := v.Expr().(type) { - case Resolver: - // We elevate a field evaluated to the Conjuncts state to Finalized - // later. For now we allow partial evaluation so that we can break - // cycles and postpone incomplete evaluations until more information is - // available down the line. - if state == finalized { - state = conjuncts - } - arc, err := ctx.resolveState(v, x, combinedFlags{ - status: state, - condition: allKnown, - mode: ignore, - }) - if err != nil && (!err.IsIncomplete() || err.Permanent) { - n.addBottom(err) - break - } - if arc == nil { - n.exprs = append(n.exprs, envExpr{v, err}) - break - } - - // We complete the evaluation. Some optimizations will only work when an - // arc is already finalized. So this ensures that such optimizations get - // triggered more often. - // - // NOTE(let finalization): aside from being an optimization, this also - // ensures that let arcs that are not contained as fields of arcs, but - // rather are held in the cash, are finalized. This, in turn, is - // necessary to trigger the notification mechanism, where appropriate. - // - // A node should not Finalize itself as it may erase the state object - // which is still assumed to be present down the line - // (see https://cuelang.org/issues/2171). - if arc.status == conjuncts && arc != n.node && arc.hasAllConjuncts { - arc.Finalize(ctx) - } - - ci, skip := n.markCycle(arc, v.Env, x, v.CloseInfo) - if skip { - return - } - v.CloseInfo = ci - - n.addVertexConjuncts(v, arc, false) - - case Evaluator: - // Interpolation, UnaryExpr, BinaryExpr, CallExpr - // Could be unify? - val := ctx.evaluateRec(v, combinedFlags{ - status: partial, - condition: allKnown, - mode: ignore, - }) - if b, ok := val.(*Bottom); ok && - b.IsIncomplete() { - n.exprs = append(n.exprs, envExpr{v, b}) - break - } - - if v, ok := val.(*Vertex); ok { - // Handle generated disjunctions (as in the 'or' builtin). - // These come as a Vertex, but should not be added as a value. - b, ok := v.BaseValue.(*Bottom) - if ok && b.IsIncomplete() && len(v.Conjuncts) > 0 { - for _, c := range v.Conjuncts { - c.CloseInfo = closeID - n.addExprConjunct(c, state) - } - break - } - } - - // TODO: also to through normal Vertex handling here. At the moment - // addValueConjunct handles StructMarker.NeedsClose, as this is always - // only needed when evaluation an Evaluator, and not a Resolver. - // The two code paths should ideally be merged once this separate - // mechanism is eliminated. - // - // if arc, ok := val.(*Vertex); ok && !arc.IsData() { - // n.addVertexConjuncts(v.Env, closeID, v.Expr(), arc) - // break - // } - - // TODO: insert in vertex as well - n.addValueConjunct(v.Env, val, closeID) - - default: - panic(fmt.Sprintf("unknown expression of type %T", x)) - } -} - -func (n *nodeContext) addVertexConjuncts(c Conjunct, arc *Vertex, inline bool) { - unreachableForDev(n.ctx) - - closeInfo := c.CloseInfo - - // We need to ensure that each arc is only unified once (or at least) a - // bounded time, witch each conjunct. Comprehensions, for instance, may - // distribute a value across many values that get unified back into the - // same value. If such a value is a disjunction, than a disjunction of N - // disjuncts will result in a factor N more unifications for each - // occurrence of such value, resulting in exponential running time. This - // is especially common values that are used as a type. - // - // However, unification is idempotent, so each such conjunct only needs - // to be unified once. This cache checks for this and prevents an - // exponential blowup in such case. - // - // TODO(perf): this cache ensures the conjuncts of an arc at most once - // per ID. However, we really need to add the conjuncts of an arc only - // once total, and then add the close information once per close ID - // (pointer can probably be shared). Aside from being more performant, - // this is probably the best way to guarantee that conjunctions are - // linear in this case. - - ckey := closeInfo - ckey.Refs = nil - ckey.Inline = false - key := arcKey{arc, ckey} - for _, k := range n.arcMap { - if key == k { - return - } - } - n.arcMap = append(n.arcMap, key) - - status := arc.status - - switch status { - case evaluating: - // Reference cycle detected. We have reached a fixed point and - // adding conjuncts at this point will not change the value. Also, - // continuing to pursue this value will result in an infinite loop. - - // TODO: add a mechanism so that the computation will only have to - // be done once? - - if arc == n.node { - // TODO: we could use node sharing here. This may avoid an - // exponential blowup during evaluation, like is possible with - // YAML. - return - } - - case evaluatingArcs: - // There is a structural cycle, but values may be processed nonetheless - // if there is a non-cyclic conjunct. See cycle.go. - } - - // Performance: the following if check filters cases that are not strictly - // necessary for correct functioning. Not updating the closeInfo may cause - // some position information to be lost for top-level positions of merges - // resulting form APIs. These tend to be fairly uninteresting. - // At the same time, this optimization may prevent considerable slowdown - // in case an API does many calls to Unify. - x := c.Expr() - if !inline || arc.IsClosedStruct() || arc.IsClosedList() { - isDef, _ := IsDef(x) - closeInfo = closeInfo.SpawnRef(arc, isDef, x) - } - - if arc.status == unprocessed && !inline { - // This is a rare condition, but can happen in certain - // evaluation orders. Unfortunately, adding this breaks - // resolution of cyclic mutually referring disjunctions. But it - // is necessary to prevent lookups in unevaluated structs. - // TODO(cycles): this can probably most easily be fixed with a - // having a more recursive implementation. - n.ctx.unify(arc, combinedFlags{ - status: partial, - condition: allKnown, - mode: ignore, - }) - } - - // Don't add conjuncts if a node is referring to itself. - if n.node == arc { - return - } - - if arc.state != nil { - arc.state.addNotify(n.node) - } - - for _, c := range arc.Conjuncts { - // Note that we are resetting the tree here. We hereby assume that - // closedness conflicts resulting from unifying the referenced arc were - // already caught there and that we can ignore further errors here. - c.CloseInfo = closeInfo - n.addExprConjunct(c, partial) - } -} - -func (n *nodeContext) addValueConjunct(env *Environment, v Value, id CloseInfo) { - n.updateCyclicStatus(id) - - ctx := n.ctx - - if x, ok := v.(*Vertex); ok { - if m, ok := x.BaseValue.(*StructMarker); ok { - n.aStruct = x - n.aStructID = id - if m.NeedClose { - id.IsClosed = true - } - } - - if !x.IsData() { - // TODO: this really shouldn't happen anymore. - if isComplexStruct(ctx, x) { - // This really shouldn't happen, but just in case. - n.addVertexConjuncts(MakeConjunct(env, x, id), x, true) - return - } - - for _, c := range x.Conjuncts { - c.CloseInfo = id - n.addExprConjunct(c, partial) // TODO: Pass from eval - } - return - } - - // TODO: evaluate value? - switch v := x.BaseValue.(type) { - default: - panic(fmt.Sprintf("invalid type %T", x.BaseValue)) - - case *ListMarker: - n.vLists = append(n.vLists, x) - return - - case *StructMarker: - - case Value: - n.addValueConjunct(env, v, id) - } - - if len(x.Arcs) == 0 { - return - } - - s := &StructLit{} - - // Keep ordering of Go struct for topological sort. - n.node.AddStruct(s, env, id) - n.node.Structs = append(n.node.Structs, x.Structs...) - - for _, a := range x.Arcs { - if !a.definitelyExists() { - continue - } - // TODO(errors): report error when this is a regular field. - c := MakeConjunct(nil, a, id) - n.insertField(a.Label, a.ArcType, c) - s.MarkField(a.Label) - } - return - } - - switch b := v.(type) { - case *Bottom: - if b == NoShareSentinel { - return - } - n.addBottom(b) - return - case *Builtin: - if v := b.BareValidator(); v != nil { - n.addValueConjunct(env, v, id) - return - } - } - - if !n.updateNodeType(v.Kind(), v, id) { - return - } - - switch x := v.(type) { - case *Disjunction: - n.addDisjunctionValue(env, x, id) - - case *Conjunction: - for _, x := range x.Values { - n.addValueConjunct(env, x, id) - } - - case *Top: - n.hasTop = true - - case *BasicType: - // handled above - - case *BoundValue: - switch x.Op { - case LessThanOp, LessEqualOp: - if y := n.upperBound; y != nil { - v := SimplifyBounds(ctx, n.kind, x, y) - if err := valueError(v); err != nil { - err.AddPosition(v) - err.AddPosition(n.upperBound) - err.AddClosedPositions(id) - } - n.upperBound = nil - n.addValueConjunct(env, v, id) - return - } - n.upperBound = x - - case GreaterThanOp, GreaterEqualOp: - if y := n.lowerBound; y != nil { - v := SimplifyBounds(ctx, n.kind, x, y) - if err := valueError(v); err != nil { - err.AddPosition(v) - err.AddPosition(n.lowerBound) - err.AddClosedPositions(id) - } - n.lowerBound = nil - n.addValueConjunct(env, v, id) - return - } - n.lowerBound = x - - case EqualOp, NotEqualOp, MatchOp, NotMatchOp: - // This check serves as simplifier, but also to remove duplicates. - k := 0 - match := false - cx := MakeConjunct(env, x, id) - for _, c := range n.checks { - if y, ok := c.x.(*BoundValue); ok { - switch z := SimplifyBounds(ctx, n.kind, x, y); { - case z == y: - match = true - case z == x: - continue - } - } - n.checks[k] = c - k++ - } - n.checks = n.checks[:k] - if !match { - n.checks = append(n.checks, cx) - } - return - } - - case Validator: - // This check serves as simplifier, but also to remove duplicates. - cx := MakeConjunct(env, x, id) - for i, y := range n.checks { - if b, ok := SimplifyValidator(ctx, cx, y); ok { - n.checks[i] = b - return - } - } - n.updateNodeType(x.Kind(), x, id) - n.checks = append(n.checks, cx) - // TODO(validatorType): see namesake TODO in conjunct.go. - k := x.Kind() - if k == TopKind { - n.hasTop = true - } - n.updateNodeType(k, x, id) - - case *Vertex: - // handled above. - - case Value: // *NullLit, *BoolLit, *NumLit, *StringLit, *BytesLit, *Builtin - if y := n.scalar; y != nil { - if b, ok := BinOp(ctx, errOnDiffType, EqualOp, x, y).(*Bool); !ok || !b.B { - n.reportConflict(x, y, x.Kind(), y.Kind(), n.scalarID, id) - } - // TODO: do we need to explicitly add again? - // n.scalar = nil - // n.addValueConjunct(c, BinOp(c, EqualOp, x, y)) - break - } - n.scalar = x - n.scalarID = id - if n.node.status >= conjuncts { - n.node.BaseValue = x - } - - default: - panic(fmt.Sprintf("unknown value type %T", x)) - } - - if n.lowerBound != nil && n.upperBound != nil { - if u := SimplifyBounds(ctx, n.kind, n.lowerBound, n.upperBound); u != nil { - if err := valueError(u); err != nil { - err.AddPosition(n.lowerBound) - err.AddPosition(n.upperBound) - err.AddClosedPositions(id) - } - n.lowerBound = nil - n.upperBound = nil - n.addValueConjunct(env, u, id) - } - } -} - -func valueError(v Value) *ValueError { - if v == nil { - return nil - } - b, _ := v.(*Bottom) - if b == nil { - return nil - } - err, _ := b.Err.(*ValueError) - if err == nil { - return nil - } - return err -} - -// addStruct collates the declarations of a struct. -// -// addStruct fulfills two additional pivotal functions: -// 1. Implement vertex unification (this happens through De Bruijn indices -// combined with proper set up of Environments). -// 2. Implied closedness for definitions. -func (n *nodeContext) addStruct( - env *Environment, - s *StructLit, - closeInfo CloseInfo) { - - n.updateCyclicStatus(closeInfo) - - // NOTE: This is a crucial point in the code: - // Unification dereferencing happens here. The child nodes are set to - // an Environment linked to the current node. Together with the De Bruijn - // indices, this determines to which Vertex a reference resolves. - - childEnv := &Environment{ - Up: env, - Vertex: n.node, - } - - s.Init(n.ctx) - - if s.HasEmbed && !s.IsFile() { - closeInfo = closeInfo.SpawnGroup(nil) - } - - parent := n.node.AddStruct(s, childEnv, closeInfo) - closeInfo.IsClosed = false - - parent.Disable = true // disable until processing is done. - - for _, d := range s.Decls { - switch x := d.(type) { - case *Field: - if x.Label.IsString() && x.ArcType == ArcMember { - n.aStruct = s - n.aStructID = closeInfo - } - n.insertField(x.Label, x.ArcType, MakeConjunct(childEnv, x, closeInfo)) - - case *LetField: - arc := n.insertField(x.Label, ArcMember, MakeConjunct(childEnv, x, closeInfo)) - if x.IsMulti { - arc.MultiLet = x.IsMulti - } - - case *DynamicField: - n.aStruct = s - n.aStructID = closeInfo - n.dynamicFields = append(n.dynamicFields, envDynamic{childEnv, x, closeInfo, nil}) - - case *Comprehension: - n.insertComprehension(childEnv, x, closeInfo) - - case Expr: - // add embedding to optional - - // TODO(perf): only do this if addExprConjunct below will result in - // a fieldSet. Otherwise the entry will just be removed next. - id := closeInfo.SpawnEmbed(x) - - c := MakeConjunct(childEnv, x, id) - n.addExprConjunct(c, partial) - - case *BulkOptionalField, *Ellipsis: - // Nothing to do here. Note that the presence of these fields do not - // excluded embedded scalars: only when they match actual fields - // does it exclude those. - - default: - panic("unreachable") - } - } - - if !s.HasEmbed { - n.aStruct = s - n.aStructID = closeInfo - } - - parent.Disable = false - -} - -// TODO(perf): if an arc is the only arc with that label added to a Vertex, and -// if there are no conjuncts of optional fields to be added, then the arc could -// be added as is until any of these conditions change. This would allow -// structure sharing in many cases. One should be careful, however, to -// recursively track arcs of previously unified evaluated vertices ot make this -// optimization meaningful. -// -// An alternative approach to avoid evaluating optional arcs (if we take that -// route) is to not recursively evaluate those arcs, even for Finalize. This is -// possible as it is not necessary to evaluate optional arcs to evaluate -// disjunctions. -func (n *nodeContext) insertField(f Feature, mode ArcType, x Conjunct) *Vertex { - ctx := n.ctx - if ctx.isDevVersion() { - return n.insertArc(f, mode, x, x.CloseInfo, true) - } - - arc, isNew := n.node.GetArc(ctx, f, mode) - if f.IsLet() && !isNew { - arc.MultiLet = true - return arc - } - if arc.hasConjunct(x) { - return arc - } - - switch { - case arc.state != nil: - arc.state.addConjunctDynamic(x) - - case arc.IsUnprocessed() || arc.status != finalized: - arc.addConjunctUnchecked(x) - - default: - n.addBottom(&Bottom{ - Code: IncompleteError, - Node: n.node, - Err: ctx.NewPosf(pos(x.Field()), - "cannot add field %s: was already used", - f.SelectorString(ctx)), - }) - } - return arc -} - func (n *nodeContext) insertFieldUnchecked(f Feature, mode ArcType, x Conjunct) *Vertex { - ctx := n.ctx - if ctx.isDevVersion() { - return n.insertArc(f, mode, x, x.CloseInfo, false) - } - - arc, isNew := n.node.GetArc(ctx, f, mode) - if f.IsLet() && !isNew { - arc.MultiLet = true - return arc - } - arc.addConjunctUnchecked(x) - return arc -} - -// expandOne adds dynamic fields to a node until a fixed point is reached. -// On each iteration, dynamic fields that cannot resolve due to incomplete -// values are skipped. They will be retried on the next iteration until no -// progress can be made. Note that a dynamic field may add more dynamic fields. -// -// forClauses are processed after all other clauses. A struct may be referenced -// before it is complete, meaning that fields added by other forms of injection -// may influence the result of a for clause _after_ it has already been -// processed. We could instead detect such insertion and feed it to the -// ForClause to generate another entry or have the for clause be recomputed. -// This seems to be too complicated and lead to iffy edge cases. -// TODO(errors): detect when a field is added to a struct that is already used -// in a for clause. -func (n *nodeContext) expandOne(state vertexStatus) (done bool) { - unreachableForDev(n.ctx) - - // Don't expand incomplete expressions if we detected a cycle. - if n.done() || (n.hasAnyCyclicConjunct && !n.hasNonCycle) { - return false - } - - var progress bool - - if progress = n.injectDynamic(); progress { - return true - } - - if progress = n.injectComprehensions(state); progress { - return true - } - - // Do expressions after comprehensions, as comprehensions can never - // refer to embedded scalars, whereas expressions may refer to generated - // fields if we were to allow attributes to be defined alongside - // scalars. - exprs := n.exprs - n.exprs = n.exprs[:0] - for _, x := range exprs { - n.addExprConjunct(x.c, state) - - // collect and or - } - if len(n.exprs) < len(exprs) { - return true - } - - // No progress, report error later if needed: unification with - // disjuncts may resolve this later on. - return false -} - -// injectDynamic evaluates and inserts dynamic declarations. -func (n *nodeContext) injectDynamic() (progress bool) { - unreachableForDev(n.ctx) - - ctx := n.ctx - k := 0 - - a := n.dynamicFields - for _, d := range n.dynamicFields { - var f Feature - x := d.field.Key - // Push state to capture and remove errors. - s := ctx.PushState(d.env, x.Source()) - v := ctx.evalState(x, combinedFlags{ - status: finalized, - condition: allKnown, - mode: ignore, - }) - b := ctx.PopState(s) - - if b != nil && b.IsIncomplete() { - d.err, _ = v.(*Bottom) - a[k] = d - k++ - continue - } - if b, _ := v.(*Bottom); b != nil { - n.addValueConjunct(nil, b, d.id) - continue - } - f = ctx.Label(d.field.Key, v) - if f.IsInt() { - n.addErr(ctx.NewPosf(pos(d.field.Key), "integer fields not supported")) - } - n.insertField(f, d.field.ArcType, MakeConjunct(d.env, d.field, d.id)) - } - - progress = k < len(n.dynamicFields) - - n.dynamicFields = a[:k] - - return progress -} - -// addLists evaluates the queued list conjuncts and inserts its arcs into the -// Vertex. -// -// TODO: association arrays: -// If an association array marker was present in a struct, create a struct node -// instead of a list node. In either case, a node may only have list fields -// or struct fields and not both. -// -// addLists should be run after the fixpoint expansion: -// - it enforces that comprehensions may not refer to the list itself -// - there may be no other fields within the list. -// -// TODO(embeddedScalars): for embedded scalars, there should be another pass -// of evaluation expressions after expanding lists. -func (n *nodeContext) addLists(state combinedFlags) (progress bool) { - if len(n.lists) == 0 && len(n.vLists) == 0 { - return false - } - - var oneOfTheLists Expr - var anID CloseInfo - - isOpen := true - max := 0 - var maxNode Expr - - if m, ok := n.node.BaseValue.(*ListMarker); ok { - isOpen = m.IsOpen - max = len(n.node.Arcs) - } - - c := n.ctx - - for _, l := range n.vLists { - // XXX: set hasNonCycle if appropriate. - - oneOfTheLists = l - - elems := l.Elems() - isClosed := l.IsClosedList() - - switch { - case len(elems) < max: - if isClosed { - n.invalidListLength(len(elems), max, l, maxNode) - continue - } - - case len(elems) > max: - if !isOpen { - n.invalidListLength(max, len(elems), maxNode, l) - continue - } - isOpen = !isClosed - max = len(elems) - maxNode = l - - case isClosed: - isOpen = false - maxNode = l - } - - for _, a := range elems { - if a.Conjuncts == nil { - n.insertField(a.Label, ArcMember, MakeRootConjunct(nil, a)) - continue - } - for _, c := range a.Conjuncts { - n.insertField(a.Label, ArcMember, c) - } - } - } - -outer: - // updateCyclicStatus may grow the list of values, so we cannot use range. - for i := 0; i < len(n.lists); i++ { - l := n.lists[i] - - n.updateCyclicStatus(l.id) - - if l.self { - n.node.LockArcs = true - } - - index := int64(0) - hasComprehension := false - for j, elem := range l.list.Elems { - switch x := elem.(type) { - case *Comprehension: - err := c.yield(nil, l.env, x, state, func(e *Environment) { - label, err := MakeLabel(x.Source(), index, IntLabel) - n.addErr(err) - index++ - c := MakeConjunct(e, x.Value, l.id) - n.insertField(label, ArcMember, c) - }) - hasComprehension = true - if err != nil { - if err.ForCycle && !l.self { - // The list has a comprehension that refers to the list - // itself. This means we should postpone evaluating this - // list until all other lists have been evaluated. - n.lists[i].ignore = true - l.self = true - n.lists = append(n.lists, l) - } else { - n.addBottom(err) - } - continue outer - } - - case *Ellipsis: - if j != len(l.list.Elems)-1 { - n.addErr(c.Newf("ellipsis must be last element in list")) - } - - n.lists[i].elipsis = x - - default: - label, err := MakeLabel(x.Source(), index, IntLabel) - n.addErr(err) - index++ // TODO: don't use insertField. - n.insertField(label, ArcMember, MakeConjunct(l.env, x, l.id)) - } - - // Terminate early in case of runaway comprehension. - if !isOpen && int(index) > max { - n.invalidListLength(max, len(l.list.Elems), maxNode, l.list) - continue outer - } - } - - oneOfTheLists = l.list - anID = l.id - - switch closed := n.lists[i].elipsis == nil; { - case int(index) < max: - if closed { - n.invalidListLength(int(index), max, l.list, maxNode) - continue - } - - case int(index) > max, - closed && isOpen, - (!closed == isOpen) && !hasComprehension: - max = int(index) - maxNode = l.list - isOpen = !closed - } - - n.lists[i].n = index - } - - // add additionalItem values to list and construct optionals. - elems := n.node.Elems() - for _, l := range n.vLists { - if !l.IsClosedList() { - continue - } - - newElems := l.Elems() - if len(newElems) >= len(elems) { - continue // error generated earlier, if applicable. - } - - for _, arc := range elems[len(newElems):] { - l.MatchAndInsert(c, arc) - } - } - - for _, l := range n.lists { - if l.elipsis == nil || l.ignore { - continue - } - - s := l.list.info - if s == nil { - s = &StructLit{Decls: []Decl{l.elipsis}} - s.Init(n.ctx) - l.list.info = s - } - info := n.node.AddStruct(s, l.env, l.id) - - for _, arc := range elems[l.n:] { - info.MatchAndInsert(c, arc) - } - } - - sources := []ast.Expr{} - // Add conjuncts for additional items. - for _, l := range n.lists { - if l.elipsis == nil || l.ignore { - continue - } - if src, _ := l.elipsis.Source().(ast.Expr); src != nil { - sources = append(sources, src) - } - } - - if m, ok := n.node.BaseValue.(*ListMarker); !ok { - n.node.setValue(c, partial, &ListMarker{ - Src: ast.NewBinExpr(token.AND, sources...), - IsOpen: isOpen, - }) - } else { - if m.Src != nil { - sources = append(sources, m.Src) - } - m.Src = ast.NewBinExpr(token.AND, sources...) - m.IsOpen = m.IsOpen && isOpen - } - - n.lists = n.lists[:0] - n.vLists = n.vLists[:0] - - n.updateNodeType(ListKind, oneOfTheLists, anID) - - return true + return n.insertArc(f, mode, x, x.CloseInfo, false) } func (n *nodeContext) invalidListLength(na, nb int, a, b Expr) { diff --git a/vendor/cuelang.org/go/internal/core/adt/expr.go b/vendor/cuelang.org/go/internal/core/adt/expr.go index 85b412d712..3e80627549 100644 --- a/vendor/cuelang.org/go/internal/core/adt/expr.go +++ b/vendor/cuelang.org/go/internal/core/adt/expr.go @@ -17,11 +17,12 @@ package adt import ( "bytes" "fmt" - "regexp" + "math/big" "github.com/cockroachdb/apd/v3" "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" ) @@ -43,27 +44,14 @@ type StructLit struct { // TODO: record the merge order somewhere. - // The below fields are redundant to Decls and are computed with Init. + // IsOpen is kept as it's used by ListMarker and some validations + IsOpen bool // has a ... - // field marks the optional conjuncts of all explicit Fields. - // Required Fields are marked as empty - Fields []FieldInfo + // initialized is kept as it's used in the Init method and unify.go + initialized bool - Dynamic []*DynamicField - - // excluded are all literal fields that already exist. - Bulk []*BulkOptionalField - - Additional []*Ellipsis - HasEmbed bool - IsOpen bool // has a ... - initialized bool + // isComprehension is kept as it's used in comprehension.go and conjunct.go isComprehension bool - - types OptionalType - - // administrative fields like hasreferences. - // hasReferences bool } func (o *StructLit) IsFile() bool { @@ -71,19 +59,11 @@ func (o *StructLit) IsFile() bool { return ok } -type FieldInfo struct { - Label Feature -} - -func (x *StructLit) HasOptional() bool { - return x.types&(HasPattern|HasAdditional) != 0 -} - func (x *StructLit) Source() ast.Node { return x.Src } -func (x *StructLit) evaluate(c *OpContext, state combinedFlags) Value { +func (x *StructLit) evaluate(c *OpContext, state Flags) Value { e := c.Env(0) - v := c.newInlineVertex(e.Vertex, nil, Conjunct{e, x, c.ci}) + v := c.newInlineVertex(e.DerefVertex(c), nil, Conjunct{e, x, c.ci}) // evaluate may not finalize a field, as the resulting value may be // used in a context where more conjuncts are added. It may also lead // to disjuncts being in a partially expanded state, leading to @@ -114,88 +94,11 @@ func (x *StructLit) evaluate(c *OpContext, state combinedFlags) Value { return v } -// TODO: remove this method -func (o *StructLit) MarkField(f Feature) { - o.Fields = append(o.Fields, FieldInfo{Label: f}) -} - func (o *StructLit) Init(ctx *OpContext) { if o.initialized { return } o.initialized = true - - if ctx.isDevVersion() { - return - } - - for _, d := range o.Decls { - switch x := d.(type) { - case *Field: - if o.fieldIndex(x.Label) < 0 { - o.Fields = append(o.Fields, FieldInfo{Label: x.Label}) - } - if x.ArcType > ArcMember { - o.types |= HasField - } - - case *LetField: - if o.fieldIndex(x.Label) >= 0 { - panic("duplicate let identifier") - } - o.Fields = append(o.Fields, FieldInfo{Label: x.Label}) - - case *DynamicField: - o.Dynamic = append(o.Dynamic, x) - o.types |= HasDynamic - - case Expr: - o.HasEmbed = true - - case *Comprehension: - o.HasEmbed = true - - case *LetClause: - o.HasEmbed = true - - case *BulkOptionalField: - o.Bulk = append(o.Bulk, x) - o.types |= HasPattern - switch x.Filter.(type) { - case *BasicType, *Top: - default: - o.types |= HasComplexPattern - } - - case *Ellipsis: - switch x.Value.(type) { - case nil, *Top: - o.IsOpen = true - o.types |= IsOpen - - default: - // TODO: consider only adding for non-top. - o.types |= HasAdditional - } - o.Additional = append(o.Additional, x) - - default: - panic("unreachable") - } - } -} - -func (o *StructLit) fieldIndex(f Feature) int { - for i := range o.Fields { - if o.Fields[i].Label == f { - return i - } - } - return -1 -} - -func (o *StructLit) OptionalTypes() OptionalType { - return o.types } // FIELDS @@ -303,8 +206,6 @@ type ListLit struct { // scalars, comprehensions, ...T Elems []Elem - - info *StructLit // Shared closedness info. } func (x *ListLit) Source() ast.Node { @@ -314,10 +215,10 @@ func (x *ListLit) Source() ast.Node { return x.Src } -func (x *ListLit) evaluate(c *OpContext, state combinedFlags) Value { +func (x *ListLit) evaluate(c *OpContext, state Flags) Value { e := c.Env(0) // Pass conditions but at least set fieldSetKnown. - v := c.newInlineVertex(e.Vertex, nil, Conjunct{e, x, c.ci}) + v := c.newInlineVertex(e.DerefVertex(c), nil, Conjunct{e, x, c.ci}) v.CompleteArcsOnly(c) // TODO(evalv3): evaluating more aggressively yields some improvements, but @@ -375,22 +276,24 @@ type Num struct { func (x *Num) Source() ast.Node { return x.Src } func (x *Num) Kind() Kind { return x.K } -// TODO: do we still need this? -// func (x *Num) Specialize(k Kind) Value { -// k = k & x.K -// if k == x.K { -// return x -// } -// y := *x -// y.K = k -// return &y -// } +func (x *Num) BigInt(z *big.Int) *big.Int { + if x.X.Exponent != 0 { + panic("cue: exponent should always be nil for integer types") + } + if z == nil { + z = &big.Int{} + } + z.Set(x.X.Coeff.MathBigInt()) + if x.X.Negative { + z.Neg(z) + } + return z +} // String is a string value. It can be used as a Value and Expr. type String struct { Src ast.Node Str string - RE *regexp.Regexp // only set if needed } func (x *String) Source() ast.Node { return x.Src } @@ -400,7 +303,6 @@ func (x *String) Kind() Kind { return StringKind } type Bytes struct { Src ast.Node B []byte - RE *regexp.Regexp // only set if needed } func (x *Bytes) Source() ast.Node { return x.Src } @@ -419,12 +321,9 @@ func (x *ListMarker) Kind() Kind { return ListKind } func (x *ListMarker) node() {} type StructMarker struct { - // NeedClose is used to signal that the evaluator should close this struct. - // It is only set by the close builtin. - // TODO(evalv3: remove this field. Once we removed this, and also introduced - // open by default lists, we can get rid of StructMarker and ListMarker + // TODO: once we introduce open by default lists, + // we can get rid of StructMarker and ListMarker // in its entirety in favor of using type bit masks. - NeedClose bool } func (x *StructMarker) Source() ast.Node { return nil } @@ -462,17 +361,6 @@ func (x *BasicType) Source() ast.Node { } func (x *BasicType) Kind() Kind { return x.K } -// TODO: do we still need this? -// func (x *BasicType) Specialize(k Kind) Value { -// k = x.K & k -// if k == x.K { -// return x -// } -// y := *x -// y.K = k -// return &y -// } - // TODO: should we use UnaryExpr for Bound now we have BoundValue? // BoundExpr represents an unresolved unary comparator. @@ -492,10 +380,10 @@ func (x *BoundExpr) Source() ast.Node { return x.Src } -func (x *BoundExpr) evaluate(ctx *OpContext, state combinedFlags) Value { +func (x *BoundExpr) evaluate(ctx *OpContext, state Flags) Value { // scalarKnown is used here to ensure we know the value. The result does // not have to be concrete, though. - v := ctx.value(x.Expr, combinedFlags{ + v := ctx.value(x.Expr, Flags{ status: partial, condition: scalarKnown, mode: yield, @@ -508,7 +396,7 @@ func (x *BoundExpr) evaluate(ctx *OpContext, state combinedFlags) Value { case IntKind, FloatKind, NumberKind, StringKind, BytesKind: case NullKind, StructKind, ListKind: if x.Op != NotEqualOp && x.Op != EqualOp { - err := ctx.NewPosf(pos(x.Expr), + err := ctx.NewPosf(Pos(x.Expr), "cannot use %s for bound %s", k, x.Op) return &Bottom{ Err: err, @@ -525,7 +413,7 @@ func (x *BoundExpr) evaluate(ctx *OpContext, state combinedFlags) Value { "non-concrete value %s for bound %s", x.Expr, x.Op) return nil } - err := ctx.NewPosf(pos(x.Expr), + err := ctx.NewPosf(Pos(x.Expr), "invalid value %s (type %s) for bound %s", v, k, x.Op) return &Bottom{ Err: err, @@ -540,85 +428,6 @@ func (x *BoundExpr) evaluate(ctx *OpContext, state combinedFlags) Value { return &BoundValue{x.Src, x.Op, v} } - if !ctx.SimplifyValidators { - goto finalCheck - } - - // This simplifies boundary expressions. It is an alternative to an - // evaluation strategy that makes nodes increasingly more specific. - // - // For instance, a completely different implementation would be to allow - // the presence of a concrete value to ignore incomplete errors. - // - // TODO: consider an alternative approach. - switch y := v.(type) { - case *BoundValue: - switch { - case y.Op == NotEqualOp: - switch x.Op { - case LessEqualOp, LessThanOp, GreaterEqualOp, GreaterThanOp: - // <(!=3) => number - // Smaller than an arbitrarily large number is any number. - return &BasicType{K: y.Kind()} - case NotEqualOp: - // !=(!=3) ==> 3 - // Not a value that is anything but a given value is that - // given value. - return y.Value - } - - case x.Op == NotEqualOp: - // Invert if applicable. - switch y.Op { - case LessEqualOp: - return &BoundValue{x.Src, GreaterThanOp, y.Value} - case LessThanOp: - return &BoundValue{x.Src, GreaterEqualOp, y.Value} - case GreaterEqualOp: - return &BoundValue{x.Src, LessThanOp, y.Value} - case GreaterThanOp: - return &BoundValue{x.Src, LessEqualOp, y.Value} - } - - case (x.Op == LessThanOp || x.Op == LessEqualOp) && - (y.Op == GreaterThanOp || y.Op == GreaterEqualOp), - (x.Op == GreaterThanOp || x.Op == GreaterEqualOp) && - (y.Op == LessThanOp || y.Op == LessEqualOp): - // <(>=3) - // Something smaller than an arbitrarily large number is any number. - return &BasicType{K: y.Kind()} - - case x.Op == LessThanOp && - (y.Op == LessEqualOp || y.Op == LessThanOp), - x.Op == GreaterThanOp && - (y.Op == GreaterEqualOp || y.Op == GreaterThanOp): - // <(<=x) => <=x - // Less or equal than something that is less than x is less than x. - return y - } - - case *BasicType: - switch x.Op { - case LessEqualOp, LessThanOp, GreaterEqualOp, GreaterThanOp: - // TODO: this does not seem correct and results in some weird - // behavior for bounds. - ctx.addErrf(IncompleteError, token.NoPos, - "non-concrete value %s for bound %s", x.Expr, x.Op) - return nil - } - } - -finalCheck: if v.Concreteness() > Concrete { // TODO(errors): analyze dependencies of x.Expr to get positions. ctx.addErrf(IncompleteError, token.NoPos, // TODO(errors): use ctx.pos()? @@ -748,7 +557,7 @@ func (x *NodeLink) Kind() Kind { } func (x *NodeLink) Source() ast.Node { return x.Node.Source() } -func (x *NodeLink) resolve(c *OpContext, state combinedFlags) *Vertex { +func (x *NodeLink) resolve(c *OpContext, state Flags) *Vertex { return x.Node } @@ -756,9 +565,10 @@ func (x *NodeLink) resolve(c *OpContext, state combinedFlags) *Vertex { // // a type FieldReference struct { - Src *ast.Ident - UpCount int32 - Label Feature + Src *ast.Ident + UpCount int32 + Label Feature + Optional bool // true if this is a ?-marked reference (e.g., a?) } func (x *FieldReference) Source() ast.Node { @@ -768,10 +578,31 @@ func (x *FieldReference) Source() ast.Node { return x.Src } -func (x *FieldReference) resolve(c *OpContext, state combinedFlags) *Vertex { +func (x *FieldReference) resolve(c *OpContext, state Flags) *Vertex { n := c.relNode(x.UpCount) - pos := pos(x) - return c.lookup(n, pos, x.Label, state) + pos := Pos(x) + + savedErrs := c.errs + c.errs = nil + defer func() { + c.errs = CombineErrors(c.src, c.errs, savedErrs) + }() + + v := c.lookup(n, pos, x.Label, state) + + return c.checkSkipTry(x.Optional, v) +} + +func (c *OpContext) checkSkipTry(optional bool, arc *Vertex) *Vertex { + if arc != nil { + return arc + } + + if optional && c.errs != nil && c.errs.IsIncomplete() { + c.skipTry = true + } + + return nil } // A ValueReference represents a lexical reference to a value. @@ -782,7 +613,7 @@ func (x *FieldReference) resolve(c *OpContext, state combinedFlags) *Vertex { type ValueReference struct { Src *ast.Ident UpCount int32 - Label Feature // for informative purposes + Label Feature // for informative purposes. } func (x *ValueReference) Source() ast.Node { @@ -792,7 +623,7 @@ func (x *ValueReference) Source() ast.Node { return x.Src } -func (x *ValueReference) resolve(c *OpContext, state combinedFlags) *Vertex { +func (x *ValueReference) resolve(c *OpContext, state Flags) *Vertex { if x.UpCount == 0 { return c.vertex } @@ -819,7 +650,7 @@ func (x *LabelReference) Source() ast.Node { return x.Src } -func (x *LabelReference) evaluate(ctx *OpContext, state combinedFlags) Value { +func (x *LabelReference) evaluate(ctx *OpContext, state Flags) Value { label := ctx.relLabel(x.UpCount) if label == 0 { // There is no label. This may happen if a LabelReference is evaluated @@ -863,7 +694,7 @@ func (x *DynamicReference) Source() ast.Node { func (x *DynamicReference) EvaluateLabel(ctx *OpContext, env *Environment) Feature { env = env.up(ctx, x.UpCount) frame := ctx.PushState(env, x.Src) - v := ctx.value(x.Label, combinedFlags{ + v := ctx.value(x.Label, Flags{ status: partial, condition: scalarKnown, mode: yield, @@ -872,17 +703,17 @@ func (x *DynamicReference) EvaluateLabel(ctx *OpContext, env *Environment) Featu return ctx.Label(x, v) } -func (x *DynamicReference) resolve(ctx *OpContext, state combinedFlags) *Vertex { +func (x *DynamicReference) resolve(ctx *OpContext, state Flags) *Vertex { e := ctx.Env(x.UpCount) frame := ctx.PushState(e, x.Src) - v := ctx.value(x.Label, combinedFlags{ + v := ctx.value(x.Label, Flags{ status: partial, condition: scalarKnown, mode: yield, }) ctx.PopState(frame) f := ctx.Label(x.Label, v) - return ctx.lookup(e.Vertex, pos(x), f, state) + return ctx.lookup(e.DerefVertex(ctx), Pos(x), f, state) } // An ImportReference refers to an imported package. @@ -895,7 +726,10 @@ func (x *DynamicReference) resolve(ctx *OpContext, state combinedFlags) *Vertex type ImportReference struct { Src *ast.Ident ImportPath Feature - Label Feature // for informative purposes + // Instance holds the build instance that the import + // resolves to. This is nil for standard library imports. + Instance *build.Instance + Label Feature // for informative purposes } func (x *ImportReference) Source() ast.Node { @@ -905,11 +739,16 @@ func (x *ImportReference) Source() ast.Node { return x.Src } -func (x *ImportReference) resolve(ctx *OpContext, state combinedFlags) *Vertex { - path := x.ImportPath.StringValue(ctx) - v := ctx.Runtime.LoadImport(path) +func (x *ImportReference) resolve(ctx *OpContext, state Flags) *Vertex { + var v *Vertex + if x.Instance != nil { + v = ctx.Runtime.LoadInstance(x.Instance) + } else { + v = ctx.Runtime.LoadBuiltin(x.ImportPath.StringValue(ctx)) + } if v == nil { - ctx.addErrf(EvalError, x.Src.Pos(), "cannot find package %q", path) + ctx.addErrf(EvalError, x.Src.Pos(), "cannot find package %q", + x.ImportPath.StringValue(ctx)) } return v } @@ -933,18 +772,14 @@ func (x *LetReference) Source() ast.Node { return x.Src } -func (x *LetReference) resolve(ctx *OpContext, state combinedFlags) *Vertex { +func (x *LetReference) resolve(ctx *OpContext, state Flags) *Vertex { e := ctx.Env(x.UpCount) - n := e.Vertex // No need to Unify n, as Let references can only result from evaluating // an expression within n, in which case evaluation must already have // started. - if n.status < evaluating && !ctx.isDevVersion() { - panic("unexpected node state < Evaluating") - } - arc := ctx.lookup(n, pos(x), x.Label, state) + arc := ctx.lookup(e.DerefVertex(ctx), Pos(x), x.Label, state) if arc == nil { return nil } @@ -965,13 +800,17 @@ func (x *LetReference) resolve(ctx *OpContext, state combinedFlags) *Vertex { // In other words, a Vertex is not necessarily erroneous when a let // field contained in that Vertex is erroneous. + // NOTE: eval v2 used to finalize here. // We should only partly finalize the result here as it is not safe to // finalize any references made by the let. - if !ctx.isDevVersion() { - arc.Finalize(ctx) - } + b := arc.Bottom() - if !arc.MultiLet && (b == nil || isCyclePlaceholder(b)) { + // Check if the arc is currently being evaluated to prevent infinite + // recursion when a let references itself through a field selector. + // If the arc has a running state, we must use the cache mechanism + // to properly detect and handle cycles. + arcState := arc.getState(ctx) + if !arc.MultiLet && ((b == nil && arcState == nil) || isCyclePlaceholder(b)) { return arc } @@ -986,7 +825,7 @@ func (x *LetReference) resolve(ctx *OpContext, state combinedFlags) *Vertex { // ensure that Comprehensions, which may be wrapped in ConjunctGroups, // are eliminated. _, isGroup := expr.(*ConjunctGroup) - ctx.Assertf(pos(expr), !isGroup, "unexpected number of expressions") + ctx.Assertf(Pos(expr), !isGroup, "unexpected number of expressions") // TODO(mem): add counter for let cache usage. key := cacheKey{expr, arc} @@ -1008,22 +847,17 @@ func (x *LetReference) resolve(ctx *OpContext, state combinedFlags) *Vertex { } v = n e.cache[key] = n - if ctx.isDevVersion() { - // TODO(mem): enable again once we implement memory management. - // nc := n.getState(ctx) - // TODO: unlike with the old evaluator, we do not allow the first - // cycle to be skipped. Doing so can lead to hanging evaluation. - // As the cycle detection works slightly differently in the new - // evaluator (and is not entirely completed), this can happen. We - // should revisit this once we have completed the structural cycle - // detection. - // nc.hasNonCycle = true - // Allow a first cycle to be skipped. - // nc.free() - } else { - nc := n.getNodeContext(ctx, 0) - nc.hasNonCycle = true // Allow a first cycle to be skipped. - } + // TODO(mem): enable again once we implement memory management. + // nc := n.getState(ctx) + // TODO: unlike with the old evaluator, we do not allow the first + // cycle to be skipped. Doing so can lead to hanging evaluation. + // As the cycle detection works slightly differently in the new + // evaluator (and is not entirely completed), this can happen. We + // should revisit this once we have completed the structural cycle + // detection. + // nc.hasNonCycle = true + // Allow a first cycle to be skipped. + // nc.free() // Parents cannot add more conjuncts to a let expression, so set of // conjuncts is always complete. @@ -1042,10 +876,12 @@ func (x *LetReference) resolve(ctx *OpContext, state combinedFlags) *Vertex { // A SelectorExpr looks up a fixed field in an expression. // // a.sel +// a.sel? (optional - returns OptionalUndefined if field doesn't exist) type SelectorExpr struct { - Src *ast.SelectorExpr - X Expr - Sel Feature + Src *ast.SelectorExpr + X Expr + Sel Feature + Optional bool // true if selector has ? suffix (e.g., foo.bar?) } func (x *SelectorExpr) Source() ast.Node { @@ -1055,8 +891,8 @@ func (x *SelectorExpr) Source() ast.Node { return x.Src } -func (x *SelectorExpr) resolve(c *OpContext, state combinedFlags) *Vertex { - n := c.node(x, x.X, x.Sel.IsRegular(), combinedFlags{ +func (x *SelectorExpr) resolve(c *OpContext, state Flags) *Vertex { + n := c.node(x, x.X, x.Sel.IsRegular(), Flags{ status: partial, condition: needFieldSetKnown, mode: yield, @@ -1064,27 +900,31 @@ func (x *SelectorExpr) resolve(c *OpContext, state combinedFlags) *Vertex { if n == emptyNode { return n } - if n.status == partial && !c.isDevVersion() { - if b := n.state.incompleteErrors(false); b != nil && b.Code < CycleError { - c.AddBottom(b) - return n - } - } // TODO(eval): dynamic nodes should be fully evaluated here as the result // will otherwise be discarded and there will be no other chance to check // the struct is valid. + savedErrs := c.errs + c.errs = nil + defer func() { + c.errs = CombineErrors(c.src, c.errs, savedErrs) + }() + pos := x.Src.Sel.Pos() - return c.lookup(n, pos, x.Sel, state) + result := c.lookup(n, pos, x.Sel, state) + + return c.checkSkipTry(x.Optional, result) } // IndexExpr is like a selector, but selects an index. // // a[index] +// a[index]? (optional - returns OptionalUndefined if index doesn't exist) type IndexExpr struct { - Src *ast.IndexExpr - X Expr - Index Expr + Src *ast.IndexExpr + X Expr + Index Expr + Optional bool // true if index has ? suffix (e.g., foo[0]?) } func (x *IndexExpr) Source() ast.Node { @@ -1094,14 +934,14 @@ func (x *IndexExpr) Source() ast.Node { return x.Src } -func (x *IndexExpr) resolve(ctx *OpContext, state combinedFlags) *Vertex { +func (x *IndexExpr) resolve(ctx *OpContext, state Flags) *Vertex { // TODO: support byte index. - n := ctx.node(x, x.X, true, combinedFlags{ + n := ctx.node(x, x.X, true, Flags{ status: partial, condition: needFieldSetKnown, mode: yield, }) - i := ctx.value(x.Index, combinedFlags{ + i := ctx.value(x.Index, Flags{ status: partial, condition: scalarKnown, mode: yield, @@ -1109,12 +949,6 @@ func (x *IndexExpr) resolve(ctx *OpContext, state combinedFlags) *Vertex { if n == emptyNode { return n } - if n.status == partial && !ctx.isDevVersion() { - if b := n.state.incompleteErrors(false); b != nil && b.Code < CycleError { - ctx.AddBottom(b) - return n - } - } // TODO(eval): dynamic nodes should be fully evaluated here as the result // will otherwise be discarded and there will be no other chance to check // the struct is valid. @@ -1129,8 +963,18 @@ func (x *IndexExpr) resolve(ctx *OpContext, state combinedFlags) *Vertex { if ctx.errs != nil { return nil } + + // TODO: uncomment once above code can be removed. + // savedErrs := ctx.errs + // ctx.errs = nil + // defer func() { + // ctx.errs = CombineErrors(ctx.src, ctx.errs, savedErrs) + // }() + pos := x.Src.Index.Pos() - return ctx.lookup(n, pos, f, state) + result := ctx.lookup(n, pos, f, state) + + return ctx.checkSkipTry(x.Optional, result) } // A SliceExpr represents a slice operation. (Not currently in spec.) @@ -1151,10 +995,10 @@ func (x *SliceExpr) Source() ast.Node { return x.Src } -func (x *SliceExpr) evaluate(c *OpContext, state combinedFlags) Value { +func (x *SliceExpr) evaluate(c *OpContext, state Flags) Value { // TODO: strides - v := c.value(x.X, combinedFlags{ + v := c.value(x.X, Flags{ status: partial, condition: fieldSetKnown, mode: yield, @@ -1175,14 +1019,14 @@ func (x *SliceExpr) evaluate(c *OpContext, state combinedFlags) Value { hi = uint64(len(v.Arcs)) ) if x.Lo != nil { - lo = c.uint64(c.value(x.Lo, combinedFlags{ + lo = c.uint64(c.value(x.Lo, Flags{ status: partial, condition: scalarKnown, mode: yield, }), as) } if x.Hi != nil { - hi = c.uint64(c.value(x.Hi, combinedFlags{ + hi = c.uint64(c.value(x.Hi, Flags{ status: partial, condition: scalarKnown, mode: yield, @@ -1227,14 +1071,14 @@ func (x *SliceExpr) evaluate(c *OpContext, state combinedFlags) Value { hi = uint64(len(v.B)) ) if x.Lo != nil { - lo = c.uint64(c.value(x.Lo, combinedFlags{ + lo = c.uint64(c.value(x.Lo, Flags{ status: partial, condition: scalarKnown, mode: yield, }), as) } if x.Hi != nil { - hi = c.uint64(c.value(x.Hi, combinedFlags{ + hi = c.uint64(c.value(x.Hi, Flags{ status: partial, condition: scalarKnown, mode: yield, @@ -1271,10 +1115,10 @@ func (x *Interpolation) Source() ast.Node { return x.Src } -func (x *Interpolation) evaluate(c *OpContext, state combinedFlags) Value { +func (x *Interpolation) evaluate(c *OpContext, state Flags) Value { buf := bytes.Buffer{} for _, e := range x.Parts { - v := c.value(e, combinedFlags{ + v := c.value(e, Flags{ status: partial, condition: scalarKnown, mode: yield, @@ -1289,16 +1133,16 @@ func (x *Interpolation) evaluate(c *OpContext, state combinedFlags) Value { err = &Bottom{ Code: err.Code, Node: c.vertex, - Err: errors.Wrapf(err.Err, pos(x), "invalid interpolation"), + Err: errors.Wrapf(err.Err, Pos(x), "invalid interpolation"), } // c.AddBottom(err) // return nil return err } if x.K == BytesKind { - return &Bytes{x.Src, buf.Bytes(), nil} + return &Bytes{x.Src, buf.Bytes()} } - return &String{x.Src, buf.String(), nil} + return &String{x.Src, buf.String()} } // UnaryExpr is a unary expression. @@ -1318,11 +1162,11 @@ func (x *UnaryExpr) Source() ast.Node { return x.Src } -func (x *UnaryExpr) evaluate(c *OpContext, state combinedFlags) Value { +func (x *UnaryExpr) evaluate(c *OpContext, state Flags) Value { if !c.concreteIsPossible(x.Op, x.X) { return nil } - v := c.value(x.X, combinedFlags{ + v := c.value(x.X, Flags{ status: partial, condition: scalarKnown, mode: yield, @@ -1353,12 +1197,12 @@ func (x *UnaryExpr) evaluate(c *OpContext, state combinedFlags) Value { case NotOp: if v, ok := v.(*Bool); ok { - return &Bool{x.Src, !v.B} + return c.NewBool(!v.B) } expectedKind = BoolKind } if k&expectedKind != BottomKind { - c.addErrf(IncompleteError, pos(x.X), + c.addErrf(IncompleteError, Pos(x.X), "operand %s of '%s' not concrete (was %s)", x.X, op, k) return nil } @@ -1383,7 +1227,7 @@ func (x *BinaryExpr) Source() ast.Node { return x.Src } -func (x *BinaryExpr) evaluate(c *OpContext, state combinedFlags) Value { +func (x *BinaryExpr) evaluate(c *OpContext, state Flags) Value { env := c.Env(0) if x.Op == AndOp { v := c.newInlineVertex(nil, nil, makeAnonymousConjunct(env, x, c.ci.Refs)) @@ -1395,7 +1239,8 @@ func (x *BinaryExpr) evaluate(c *OpContext, state combinedFlags) Value { // to the required state. If the struct is already dynamic, we will // evaluate the struct regardless to ensure that cycle reporting // keeps working. - if (c.inDetached == 0 && env.Vertex.IsDynamic) || c.inValidator > 0 { + envVertex := env.DerefVertex(c) + if (c.inDetached == 0 && envVertex != nil && envVertex.IsDynamic) || c.inValidator > 0 { v.Finalize(c) } else { v.CompleteArcsOnly(c) @@ -1435,7 +1280,27 @@ func (x *BinaryExpr) evaluate(c *OpContext, state combinedFlags) Value { return BinOp(c, x, x.Op, left, right) } -func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, flags combinedFlags) (r Value) { +// OpenExpr represents the ... operator to disable typo checking. +// +// #A... +type OpenExpr struct { + Src *ast.PostfixExpr + X Expr +} + +func (x *OpenExpr) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *OpenExpr) evaluate(c *OpContext, state Flags) Value { + c.ci.Opened = true + return c.evalState(x.X, state) +} + +func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, flags Flags) (r Value) { state := flags.status s := c.PushState(env, src) @@ -1443,23 +1308,15 @@ func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, flag match := op != EqualOp // non-error case c.inValidator++ - // NOTE: https://cuelang.org/cl/1208898 broke an evalv2 user on Unity. - // For the time being, reverting the change just for evalv2 allows them - // to continue using the old evaluator on the latest CUE version. - if c.isDevVersion() { - // Note that evalState may call yield, so we need to balance the counter - // with a defer. - defer func() { c.inValidator-- }() - } - req := combinedFlags{ + // Note that evalState may call yield, so we need to balance the counter + // with a defer. + defer func() { c.inValidator-- }() + req := Flags{ status: state, condition: needTasksDone, mode: finalize, } v := c.evalState(x, req) - if !c.isDevVersion() { - c.inValidator-- - } u, _ := c.getDefault(v) u = Unwrap(u) @@ -1483,7 +1340,7 @@ func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, flag return nil case IncompleteError: - c.evalState(x, combinedFlags{ + c.evalState(x, Flags{ status: finalized, condition: allKnown, mode: ignore, @@ -1506,37 +1363,14 @@ func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, flag // if c.hasDepthCycle(v) { - // Eval V3 logic - c.verifyNonMonotonicResult(env, x, true) - match = op == EqualOp - break - } - if v.status == evaluatingArcs { - unreachableForDev(c) // Eval V2 logic - - // We have a cycle, which may be an error. Cycle errors may occur - // in chains that are themselves not a cycle. It suffices to check - // for non-monotonic results at the end for this particular path. - // TODO(perf): finding the right path through such comprehensions - // may be expensive. Finding a path in a directed graph is O(n), - // though, so we should ensure that the implementation conforms to - // this. c.verifyNonMonotonicResult(env, x, true) match = op == EqualOp break } + v.Finalize(c) switch { - case !v.IsDefined(c): - c.verifyNonMonotonicResult(env, x, true) // TODO: remove? - - // TODO: mimic comparison to bottom semantics. If it is a valid - // value, check for concreteness that this level only. This - // should ultimately be replaced with an exists and valid - // builtin. - match = op == EqualOp - case isFinalError(v): // Need to recursively check for errors, so we need to evaluate the // Vertex in case it hadn't been evaluated yet. @@ -1560,7 +1394,7 @@ func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, flag match = op == EqualOp } - c.evalState(x, combinedFlags{ + c.evalState(x, Flags{ status: state, condition: needTasksDone, mode: yield, @@ -1568,7 +1402,7 @@ func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, flag } c.PopState(s) - return &Bool{src, match} + return c.NewBool(match) } func isFinalError(n *Vertex) bool { @@ -1585,7 +1419,7 @@ func isFinalError(n *Vertex) bool { // change after the fact. // expectError indicates whether the value should evaluate to an error or not. func (c *OpContext) verifyNonMonotonicResult(env *Environment, x Expr, expectError bool) { - if n := env.Vertex.state; n != nil { + if n := env.DerefVertex(c).state; n != nil { n.postChecks = append(n.postChecks, envCheck{ env: env, expr: x, @@ -1611,13 +1445,13 @@ func (x *CallExpr) Source() ast.Node { return x.Src } -func (x *CallExpr) evaluate(c *OpContext, state combinedFlags) Value { - call := &CallContext{ +func (x *CallExpr) evaluate(c *OpContext, state Flags) Value { + call := CallContext{ ctx: c, call: x, } - fun := c.value(x.Fun, combinedFlags{ + fun := c.value(x.Fun, Flags{ status: partial, condition: concreteKnown, mode: yield, @@ -1626,7 +1460,7 @@ func (x *CallExpr) evaluate(c *OpContext, state combinedFlags) Value { case *Builtin: call.builtin = f if f.RawFunc != nil { - if !call.builtin.checkArgs(c, pos(x), len(x.Args)) { + if !call.builtin.checkArgs(c, Pos(x), len(x.Args)) { return nil } return f.RawFunc(call) @@ -1649,7 +1483,11 @@ func (x *CallExpr) evaluate(c *OpContext, state combinedFlags) Value { } default: - c.AddErrf("cannot call non-function %s (type %s)", x.Fun, kind(fun)) + if !IsConcrete(fun) && fun.Kind()&FuncKind != 0 { + c.addErrf(IncompleteError, Pos(x.Fun), "cannot call non-concrete value %s (type %s)", x.Fun, kind(fun)) + } else { + c.AddErrf("cannot call non-function %s (type %s)", x.Fun, kind(fun)) + } return nil } @@ -1663,7 +1501,7 @@ func (x *CallExpr) evaluate(c *OpContext, state combinedFlags) Value { c.ci.FromEmbed = saved.FromEmbed }() - args := []Value{} + args := make([]Value, 0, len(x.Args)) for i, a := range x.Args { saved := c.errs c.errs = nil @@ -1673,7 +1511,7 @@ func (x *CallExpr) evaluate(c *OpContext, state combinedFlags) Value { cond := state.condition var expr Value if call.builtin.NonConcrete { - state = combinedFlags{ + state = Flags{ status: state.status, condition: cond, mode: runMode, @@ -1689,7 +1527,7 @@ func (x *CallExpr) evaluate(c *OpContext, state combinedFlags) Value { if runMode == finalize { cond |= disjunctionTask } - state = combinedFlags{ + state = Flags{ status: state.status, condition: cond, mode: runMode, @@ -1702,7 +1540,7 @@ func (x *CallExpr) evaluate(c *OpContext, state combinedFlags) Value { if c.errs == nil { // There SHOULD be an error in the context. If not, we generate // one. - c.Assertf(pos(x.Fun), c.HasErr(), + c.Assertf(Pos(x.Fun), c.HasErr(), "argument %d to function %s is incomplete", i, x.Fun) } @@ -1726,7 +1564,7 @@ func (x *CallExpr) evaluate(c *OpContext, state combinedFlags) Value { if result == nil { return nil } - v, ci := c.evalStateCI(result, combinedFlags{status: partial, condition: state.condition, mode: state.mode}) + v, ci := c.evalStateCI(result, Flags{status: partial, condition: state.condition, mode: state.mode}) c.ci = ci return v } @@ -1741,7 +1579,7 @@ type Builtin struct { // arguments. By default, all arguments are checked to be concrete. NonConcrete bool - Func func(call *CallContext) Expr + Func func(call CallContext) Expr // RawFunc gives low-level control to CUE's internals for builtins. // It should be used when fine control over the evaluation process is @@ -1751,7 +1589,7 @@ type Builtin struct { // the Context. // // TODO: consider merging Func and RawFunc into a single field again. - RawFunc func(call *CallContext) Value + RawFunc func(call CallContext) Value // Added indicates as of which language version this builtin can be used. Added string @@ -1835,7 +1673,7 @@ func (x *Builtin) checkArgs(c *OpContext, p token.Pos, numArgs int) bool { return true } -func (x *Builtin) call(call *CallContext) Expr { +func (x *Builtin) call(call CallContext) Expr { c := call.ctx p := call.Pos() @@ -1859,7 +1697,7 @@ func (x *Builtin) call(call *CallContext) Expr { if b != nil { code = b.Code } - c.addErrf(code, pos(a), + c.addErrf(code, Pos(a), "cannot use %s (type %s) as %s in argument %d to %v", a, k, x.Params[i].Kind(), i+1, fun) return nil @@ -1871,7 +1709,7 @@ func (x *Builtin) call(call *CallContext) Expr { n := c.newInlineVertex(nil, nil, Conjunct{env, x, c.ci}) n.Finalize(c) if n.IsErr() { - c.addErrf(0, pos(a), + c.addErrf(0, Pos(a), "cannot use %s as %s in argument %d to %v", a, v, i+1, fun) return nil @@ -1918,13 +1756,6 @@ func (x *BuiltinValidator) Source() ast.Node { return x.Src.Source() } -func (x *BuiltinValidator) Pos() token.Pos { - if src := x.Source(); src != nil { - return src.Pos() - } - return token.NoPos -} - func (x *BuiltinValidator) Kind() Kind { return x.Builtin.Params[0].Kind() } @@ -1934,7 +1765,7 @@ func (x *BuiltinValidator) validate(c *OpContext, v Value) *Bottom { args[0] = v copy(args[1:], x.Args) - call := &CallContext{ + call := CallContext{ ctx: c, call: x.Src, builtin: x.Builtin, @@ -1945,7 +1776,7 @@ func (x *BuiltinValidator) validate(c *OpContext, v Value) *Bottom { return validateWithBuiltin(call) } -func validateWithBuiltin(call *CallContext) *Bottom { +func validateWithBuiltin(call CallContext) *Bottom { var severeness ErrorCode var err errors.Error @@ -2011,7 +1842,7 @@ func validateWithBuiltin(call *CallContext) *Bottom { } } -// A Disjunction represents a disjunction, where each disjunct may or may not +// A DisjunctionExpr represents a disjunction, where each disjunct may or may not // be marked as a default. type DisjunctionExpr struct { Src *ast.BinaryExpr @@ -2033,7 +1864,7 @@ func (x *DisjunctionExpr) Source() ast.Node { return x.Src } -func (x *DisjunctionExpr) evaluate(c *OpContext, state combinedFlags) Value { +func (x *DisjunctionExpr) evaluate(c *OpContext, state Flags) Value { e := c.Env(0) v := c.newInlineVertex(nil, nil, Conjunct{e, x, c.ci}) v.Finalize(c) // TODO: also partial okay? @@ -2058,8 +1889,8 @@ func (x *Conjunction) Kind() Kind { return k } -// A disjunction is a disjunction of values. It is the result of expanding -// a DisjunctionExpr if the expression cannot be represented as a single value. +// A Disjunction is a disjunction of values. It is the result of expanding +// a [DisjunctionExpr] if the expression cannot be represented as a single value. type Disjunction struct { Src ast.Expr @@ -2096,6 +1927,10 @@ type Comprehension struct { // information as possible. Value Node + // Fallback is the optional else clause that is yielded when the comprehension + // produces zero values. + Fallback *StructLit + // The type of field as which the comprehension is added. arcType ArcType @@ -2159,7 +1994,7 @@ func (x *ForClause) Source() ast.Node { } func (c *OpContext) forSource(x Expr) *Vertex { - state := combinedFlags{ + state := Flags{ status: conjuncts, condition: needFieldSetKnown, mode: attemptOnly, @@ -2172,14 +2007,14 @@ func (c *OpContext) forSource(x Expr) *Vertex { c.inDetached-- node, ok := v.(*Vertex) - if ok && c.isDevVersion() { + if ok { // We do not request to "yield" here, but rather rely on the // call-by-need behavior in combination with the freezing mechanism. // TODO: this seems a bit fragile. At some point we need to make this // more robust by moving to a pure call-by-need mechanism, for instance. // TODO: using attemptOnly here will remove the cyclic reference error // of comprehension.t1.ok (which also errors in V2), - node.unify(c, state.condition, finalize, true) + node.unify(c, Flags{condition: state.condition, mode: finalize, checkTypos: true}) } v, ok = c.getDefault(v) @@ -2200,7 +2035,7 @@ func (c *OpContext) forSource(x Expr) *Vertex { switch nv := v.(type) { case nil: - c.addErrf(IncompleteError, pos(x), + c.addErrf(IncompleteError, Pos(x), "cannot range over %s (incomplete)", x) return emptyNode @@ -2218,27 +2053,25 @@ func (c *OpContext) forSource(x Expr) *Vertex { default: if kind := v.Kind(); kind&(StructKind|ListKind) != 0 { - c.addErrf(IncompleteError, pos(x), + c.addErrf(IncompleteError, Pos(x), "cannot range over %s (incomplete type %s)", x, kind) return emptyNode } else if !ok { - c.addErrf(0, pos(x), // TODO(error): better message. + c.addErrf(0, Pos(x), // TODO(error): better message. "cannot range over %s (found %s, want list or struct)", x.Source(), v.Kind()) return emptyNode } } - if c.isDevVersion() { - kind := v.Kind() - // At this point it is possible that the Vertex represents an incomplete - // struct or list, which is the case if it may be struct or list, but - // is also at least some other type, such as is the case with top. - if kind&(StructKind|ListKind) != 0 && kind != StructKind && kind != ListKind { - c.addErrf(IncompleteError, pos(x), - "cannot range over %s (incomplete type %s)", x, kind) - return emptyNode - } + kind := v.Kind() + // At this point it is possible that the Vertex represents an incomplete + // struct or list, which is the case if it may be struct or list, but + // is also at least some other type, such as is the case with top. + if kind&(StructKind|ListKind) != 0 && kind != StructKind && kind != ListKind { + c.addErrf(IncompleteError, Pos(x), + "cannot range over %s (incomplete type %s)", x, kind) + return emptyNode } return node @@ -2246,27 +2079,11 @@ func (c *OpContext) forSource(x Expr) *Vertex { func (x *ForClause) yield(s *compState) { c := s.ctx + env := c.Env(0) n := c.forSource(x.Src) - if c.isDevVersion() { - if s := n.getState(c); s != nil { - s.freeze(fieldSetKnown) - } - } else { - if n.status == evaluating && !n.LockArcs { - c.AddBottom(&Bottom{ - Code: CycleError, - ForCycle: true, - Value: n, - Node: n, - Err: errors.Newf(pos(x.Src), "comprehension source references itself"), - }) - return - } - if c.HasErr() { - return - } - n.LockArcs = true + if s := n.getState(c); s != nil { + s.freeze(fieldSetKnown) } for _, a := range n.Arcs { @@ -2274,18 +2091,9 @@ func (x *ForClause) yield(s *compState) { continue } - if c.isDevVersion() { - // TODO(evalv3): See comment in StructLit.evaluate. - if state := a.getState(c); state != nil { - state.process(arcTypeKnown, attemptOnly) - } - } else { - if !a.isDefined() { - a.Finalize(c) - } - if !a.definitelyExists() { - continue - } + // See comment in StructLit.evaluate. + if state := a.getState(c); state != nil { + state.process(arcTypeKnown, attemptOnly) } switch a.ArcType { @@ -2297,8 +2105,18 @@ func (x *ForClause) yield(s *compState) { continue } - n := &Vertex{ - Parent: c.Env(0).Vertex, + // "for" clauses tend to yield many values; + // group allocations with the same lifetime here + // for the sake of reducing the runtime overhead. + alloc := struct { + v0, v1, v2 Vertex + arcs [2]*Vertex + env Environment + }{} + + n := &alloc.v0 + *n = Vertex{ + Parent: env.DerefVertex(c), // Using Finalized here ensures that no nodeContext is allocated, // preventing a leak, as this "helper" struct bypasses normal @@ -2307,10 +2125,13 @@ func (x *ForClause) yield(s *compState) { IsDynamic: true, anonymous: true, ArcType: ArcMember, + + Arcs: alloc.arcs[:0], } if x.Value != InvalidLabel { - b := &Vertex{ + b := &alloc.v1 + *b = Vertex{ Label: x.Value, BaseValue: a, IsDynamic: true, @@ -2321,18 +2142,30 @@ func (x *ForClause) yield(s *compState) { } if x.Key != InvalidLabel { - v := &Vertex{ + v := &alloc.v2 + *v = Vertex{ Label: x.Key, IsDynamic: true, anonymous: true, } - key := a.Label.ToValue(c) - v.AddConjunct(MakeRootConjunct(c.Env(0), key)) + var key Value + if a.Label.IsString() { + key = &String{Src: c.src, Str: c.IndexToString(a.Label.safeIndex())} + } else { + num := &Num{Src: c.src, K: IntKind} + num.X.SetInt64(int64(a.Label.Index())) + key = num + } + v.AddConjunct(MakeRootConjunct(env, key)) v.SetValue(c, key) n.Arcs = append(n.Arcs, v) } - sub := c.spawn(n) + sub := &alloc.env + *sub = Environment{ + Up: env, + Vertex: n, + } if !s.yield(sub) { break } @@ -2357,7 +2190,7 @@ func (x *IfClause) Source() ast.Node { func (x *IfClause) yield(s *compState) { ctx := s.ctx - if ctx.BoolValue(ctx.value(x.Condition, combinedFlags{ + if ctx.BoolValue(ctx.value(x.Condition, Flags{ status: s.state, condition: scalarKnown, mode: yield, @@ -2395,3 +2228,78 @@ func (x *LetClause) yield(s *compState) { s.yield(c.spawn(n)) } + +// A TryClause represents a try clause in a comprehension. +// It evaluates its body and yields if successful. If a ?-marked reference +// fails due to an undefined optional field, the try clause discards silently. +// Other errors propagate normally. +// +// try { ... } +// +// TryClause represents a try clause in a comprehension. +// It can have two forms: +// - try { struct } - Value is set, Label/Expr are zero/nil +// - try x = expr - Label/Expr are set, Value is nil +type TryClause struct { + Src *ast.TryClause + Label Feature // identifier for assignment form (InvalidLabel for struct form) + Expr Expr // expression for assignment form (nil for struct form) + // Struct form: body is in Comprehension.Value +} + +func (x *TryClause) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *TryClause) yield(s *compState) { + c := s.ctx + env := c.e + + // Pre-evaluate the try body to detect OptionalUndefined errors from + // ?-marked references. If any ?-marked reference fails, the try block + // is discarded and the else clause (if present) runs. + // + // Final (non-incomplete) errors are reported immediately as an optimization, + // since they would be encountered during re-evaluation anyway. + + // Save state + savedSkipTry := c.skipTry + c.skipTry = false + defer func() { c.skipTry = savedSkipTry }() + + // TODO(perf): we could capture "final" errors and bail out processing of + // the try expression early. + + var expr Expr + if x.Expr != nil { + expr = x.Expr + } else { + // Struct form: body is in Comprehension.Value + expr = s.comp.Value.(Expr) + } + + v := c.newInlineVertex(env.DerefVertex(c), nil, Conjunct{env, expr, c.ci}) + v.Finalize(c) + + // If any ?-marked reference failed, don't yield - else clause runs. + // Check for OptionalUndefined - set by ?-marked references that fail. + if c.skipTry { + return + } + + // Success - yield with fresh conjuncts (will be re-evaluated). + if x.Expr != nil { + n := &Vertex{Arcs: []*Vertex{{ + Label: x.Label, + IsDynamic: true, + anonymous: true, + Conjuncts: []Conjunct{{c.Env(0), x.Expr, c.ci}}, + }}} + s.yield(c.spawn(n)) + } else { + s.yield(env) + } +} diff --git a/vendor/cuelang.org/go/internal/core/adt/feature.go b/vendor/cuelang.org/go/internal/core/adt/feature.go index 5b9528e3a0..a8938c2b9c 100644 --- a/vendor/cuelang.org/go/internal/core/adt/feature.go +++ b/vendor/cuelang.org/go/internal/core/adt/feature.go @@ -23,7 +23,6 @@ import ( "cuelang.org/go/cue/errors" "cuelang.org/go/cue/literal" "cuelang.org/go/cue/token" - "cuelang.org/go/internal" ) // A Feature is an encoded form of a label which comprises a compact @@ -76,14 +75,14 @@ func (f Feature) SelectorString(index StringIndexer) string { } return strconv.Itoa(int(x)) case StringLabel: - s := index.IndexToString(x) - if ast.IsValidIdent(s) && !internal.IsDefOrHidden(s) { - return s - } if f == AnyString { return "_" } - return literal.Label.Quote(s) + s := index.IndexToString(x) + if ast.StringLabelNeedsQuoting(s) { + return literal.Label.Quote(s) + } + return s default: return f.IdentString(index) } @@ -240,7 +239,7 @@ func LabelFromValue(c *OpContext, src Expr, v Value) Feature { case IntKind, NumberKind: x, _ := Unwrap(v).(*Num) if x == nil { - c.addErrf(IncompleteError, pos(v), msgGround, v, "int") + c.addErrf(IncompleteError, Pos(v), msgGround, v, "int") return InvalidLabel } t = IntLabel @@ -269,7 +268,7 @@ func LabelFromValue(c *OpContext, src Expr, v Value) Feature { case StringKind: x, _ := Unwrap(v).(*String) if x == nil { - c.addErrf(IncompleteError, pos(v), msgGround, v, "string") + c.addErrf(IncompleteError, Pos(v), msgGround, v, "string") return InvalidLabel } t = StringLabel @@ -389,6 +388,3 @@ func (f Feature) safeIndex() int64 { } return int64(x) } - -// TODO: should let declarations be implemented as fields? -// func (f Feature) isLet() bool { return f.typ() == letLabel } diff --git a/vendor/cuelang.org/go/internal/core/adt/fields.go b/vendor/cuelang.org/go/internal/core/adt/fields.go index cd098ced21..a10661d739 100644 --- a/vendor/cuelang.org/go/internal/core/adt/fields.go +++ b/vendor/cuelang.org/go/internal/core/adt/fields.go @@ -163,12 +163,21 @@ func (n *nodeContext) getArc(f Feature, mode ArcType) (arc *Vertex, isNew bool) } } - arc = &Vertex{ + // getArc is immediately followed by inserting one conjunct, + // and vertices with one conjunct are rather common. + // When allocating the vertex, also allocate a bootstrap conjuncts array. + alloc := struct { + arc Vertex + conjuncts [1]Conjunct + }{} + arc = &alloc.arc + *arc = Vertex{ Parent: v, Label: f, ArcType: mode, nonRooted: v.IsDynamic || v.nonRooted, anonymous: v.anonymous || v.Label.IsLet(), + Conjuncts: alloc.conjuncts[:0], } if n.scheduler.frozen&fieldSetKnown != 0 { b := n.ctx.NewErrf("adding field %v not allowed as field set was already referenced", f) @@ -203,7 +212,7 @@ func (v *Vertex) insertConjunct(ctx *OpContext, c Conjunct, id CloseInfo, mode A var c2 Conjunct pos = -1 if check { - pos, c2 = findConjunct(v.Conjuncts, c) + pos, c2 = findConjunct(ctx, v.Conjuncts, c) } if pos == -1 { @@ -317,9 +326,6 @@ func (n *nodeContext) addConstraint(arc *Vertex, mode ArcType, c Conjunct, check bulkEnv.DynamicLabel = f c.Env = &bulkEnv - // TODO: can go, but do in separate CL. - arc, _ = n.getArc(f, mode) - arc.insertConjunct(n.ctx, c, c.CloseInfo, mode, check, false) } @@ -414,11 +420,6 @@ func (ctx *OpContext) notAllowedError(arc *Vertex) *Bottom { arc.state.kind = 0 } - // TODO: remove? We are now setting it on both fields, which seems to be - // necessary for now. But we should remove this as it often results in - // a duplicate error. - // v.SetValue(ctx, ctx.NewErrf("field not allowed")) - // TODO: create a special kind of error that gets the positions // of the relevant locations upon request from the arc. return err diff --git a/vendor/cuelang.org/go/internal/core/adt/flags.go b/vendor/cuelang.org/go/internal/core/adt/flags.go new file mode 100644 index 0000000000..ccbdfcbf1a --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/flags.go @@ -0,0 +1,41 @@ +// Copyright 2023 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +// Flags describe the mode of evaluation for a vertex. +type Flags struct { + // status is a remnant from evalv2, where it used to request a certain + // state. It is still used here and there. TODO: remove + status vertexStatus + + // condition and runmode indicates properties to satisfy for evalv4 + condition condition + mode runMode + + // concrete indicates whether the result should be concrete. + concrete bool + + // checkTypos indicates whether to check for typos (closedness). + checkTypos bool +} + +var ( + FinalizeWithoutTypoCheck = Flags{ + status: finalized, + condition: allKnown, + mode: finalize, + checkTypos: false, + } +) diff --git a/vendor/cuelang.org/go/internal/core/adt/kind.go b/vendor/cuelang.org/go/internal/core/adt/kind.go index b8a419993b..290d6ed864 100644 --- a/vendor/cuelang.org/go/internal/core/adt/kind.go +++ b/vendor/cuelang.org/go/internal/core/adt/kind.go @@ -16,6 +16,7 @@ package adt import ( "fmt" + "iter" "math/bits" "strings" ) @@ -53,6 +54,26 @@ func IsConcrete(v Value) bool { return v.Concreteness() <= Concrete } +// IsRecursivelyConcrete checks that v is a scalar or that all regular fields +// have concrete values, recursively. +func IsRecursivelyConcrete(v *Vertex) bool { + for _, a := range v.Arcs { + if !a.Label.IsRegular() { + continue + } + switch a.ArcType { + case ArcMember: + case ArcRequired: + return false + } + + if !IsRecursivelyConcrete(a) { + return false + } + } + return v.IsConcrete() +} + // Kind reports the Value kind. type Kind uint16 @@ -113,12 +134,37 @@ func (k Kind) String() string { return toString(k, kindStrs) } -// TypeString is like String, but returns a string representation of a valid -// CUE type. +// TypeString is like [Kind.String], +// but returns a string representation of a valid CUE type. func (k Kind) TypeString() string { return toString(k, typeStrs) } +// Kinds returns an iterator over all the individual +// Kinds in k. There will be k.Count() items in the sequence. +func (k Kind) Kinds() iter.Seq[Kind] { + return func(yield func(Kind) bool) { + k := k + for count := 0; ; count++ { + n := bits.TrailingZeros(uint(k)) + if n == bits.UintSize { + break + } + bit := Kind(1 << uint(n)) + k &^= bit + if !yield(bit) { + return + } + } + } +} + +// Count returns the number of individual Kinds +// making up k. +func (k Kind) Count() int { + return bits.OnesCount(uint(k)) +} + func toString(k Kind, m map[Kind]string) string { if k == BottomKind { return "_|_" @@ -130,25 +176,21 @@ func toString(k Kind, m map[Kind]string) string { k = (k &^ NumberKind) | _numberKind } var buf strings.Builder - multiple := bits.OnesCount(uint(k)) > 1 + multiple := k.Count() > 1 if multiple { buf.WriteByte('(') } - for count := 0; ; count++ { - n := bits.TrailingZeros(uint(k)) - if n == bits.UintSize { - break - } - bit := Kind(1 << uint(n)) - k &^= bit + count := 0 + for bit := range k.Kinds() { s, ok := m[bit] if !ok { - s = fmt.Sprintf("bad(%d)", n) + s = fmt.Sprintf("bad(%d)", bits.TrailingZeros(uint(bit))) } if count > 0 { buf.WriteByte('|') } buf.WriteString(s) + count++ } if multiple { buf.WriteByte(')') diff --git a/vendor/cuelang.org/go/internal/core/adt/log.go b/vendor/cuelang.org/go/internal/core/adt/log.go index 73f1dc74cd..2852d7b699 100644 --- a/vendor/cuelang.org/go/internal/core/adt/log.go +++ b/vendor/cuelang.org/go/internal/core/adt/log.go @@ -25,7 +25,7 @@ import ( "cuelang.org/go/cue/token" ) -// Assert panics if the condition is false. Assert can be used to check for +// Assertf panics if the condition is false. Assertf can be used to check for // conditions that are considers to break an internal variant or unexpected // condition, but that nonetheless probably will be handled correctly down the // line. For instance, a faulty condition could lead to error being caught diff --git a/vendor/cuelang.org/go/internal/core/adt/mem.go b/vendor/cuelang.org/go/internal/core/adt/mem.go index 6de0f22a60..c11f96de01 100644 --- a/vendor/cuelang.org/go/internal/core/adt/mem.go +++ b/vendor/cuelang.org/go/internal/core/adt/mem.go @@ -153,13 +153,17 @@ func (r reclaimer) reclaim(v *Vertex) bool { // of a disjunct it is reclaimed later as part of [freeDisjunct]. return false } else { + if n.refCount > 0 { + goto skipRoot + } + r.reclaimBaseValueBuffers(v) - if n.refCount > 0 || (v.Parent != nil && !v.Label.IsLet()) { + if v.Parent != nil && !v.Label.IsLet() { goto skipRoot } } - if n.ctx == r.ctx { + if n.node != nil && n.ctx == r.ctx { // TODO(mem): it should be fine to just release the nodeContext into // c unconditionally. But the result is that it can result in // negative values for 'Leaks'. This is because loading imports @@ -169,9 +173,21 @@ func (r reclaimer) reclaim(v *Vertex) bool { } } - if w := v.DerefDisjunct(); v != w { - r.ctx.reclaimRecursive(w) - } + // TODO: this is not generally true. A dereferenced disjunct may already be + // in use to the point it cannot be freed. Mark such disjuncts to prevent + // reclamation or figure out something else. For now we disable to + // optimization as its effect is limited. + // + // See Issue #4055: + // a: "x" + // a: _ | error("a") + // if len(a) > 0 { + // a: _ | error("b") + // } + // + // if w := v.DerefDisjunct(); v != w { + // r.ctx.reclaimRecursive(w) + // } skipRoot: if v.PatternConstraints != nil { diff --git a/vendor/cuelang.org/go/internal/core/adt/op.go b/vendor/cuelang.org/go/internal/core/adt/op.go index dfa83cbbfe..8c50bd0b94 100644 --- a/vendor/cuelang.org/go/internal/core/adt/op.go +++ b/vendor/cuelang.org/go/internal/core/adt/op.go @@ -20,7 +20,7 @@ import "cuelang.org/go/cue/token" // use to evaluate a value. type Op int -//go:generate go run golang.org/x/tools/cmd/stringer -type=Op -linecomment +//go:generate go tool stringer -type=Op -linecomment // Values of Op. const ( @@ -58,6 +58,8 @@ const ( IntModuloOp // mod InterpolationOp // \() + + SpreadOp // ... ) // OpFromToken converts a token.Token to an Op. diff --git a/vendor/cuelang.org/go/internal/core/adt/op_string.go b/vendor/cuelang.org/go/internal/core/adt/op_string.go index 1fedcaed84..0573f1157e 100644 --- a/vendor/cuelang.org/go/internal/core/adt/op_string.go +++ b/vendor/cuelang.org/go/internal/core/adt/op_string.go @@ -35,15 +35,17 @@ func _() { _ = x[IntDivideOp-24] _ = x[IntModuloOp-25] _ = x[InterpolationOp-26] + _ = x[SpreadOp-27] } -const _Op_name = "NoOp&|.[][:]()&&||==!!=<<=>>==~!~+-*/quoremdivmod\\()" +const _Op_name = "NoOp&|.[][:]()&&||==!!=<<=>>==~!~+-*/quoremdivmod\\()..." -var _Op_index = [...]uint8{0, 4, 5, 6, 7, 9, 12, 14, 16, 18, 20, 21, 23, 24, 26, 27, 29, 31, 33, 34, 35, 36, 37, 40, 43, 46, 49, 52} +var _Op_index = [...]uint8{0, 4, 5, 6, 7, 9, 12, 14, 16, 18, 20, 21, 23, 24, 26, 27, 29, 31, 33, 34, 35, 36, 37, 40, 43, 46, 49, 52, 55} func (i Op) String() string { - if i < 0 || i >= Op(len(_Op_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Op_index)-1 { return "Op(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Op_name[_Op_index[i]:_Op_index[i+1]] + return _Op_name[_Op_index[idx]:_Op_index[idx+1]] } diff --git a/vendor/cuelang.org/go/internal/core/adt/optional.go b/vendor/cuelang.org/go/internal/core/adt/optional.go deleted file mode 100644 index cb99dd26c4..0000000000 --- a/vendor/cuelang.org/go/internal/core/adt/optional.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2020 CUE Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adt - -// MatchAndInsert finds matching optional parts for a given Arc and adds its -// conjuncts. Bulk fields are only applied if no fields match, and additional -// constraints are only added if neither regular nor bulk fields match. -func (o *StructInfo) MatchAndInsert(c *OpContext, arc *Vertex) { - env := o.Env - - closeInfo := o.CloseInfo - closeInfo.IsClosed = false - - // Match normal fields - matched := false - // TODO: this could be lookup up more efficiently in the outer Vertex now. - // Keep this logic for now, though. - for _, f := range o.Fields { - if f.Label == arc.Label { - matched = true - break - } - } - - f := arc.Label - if !f.IsRegular() { - return - } - var label Value - - if int64(f.Index()) == MaxIndex { - f = 0 - } else if o.types&HasComplexPattern != 0 && f.IsString() { - label = f.ToValue(c) - } - - if len(o.Bulk) > 0 { - bulkEnv := *env - bulkEnv.DynamicLabel = f - - // match bulk optional fields / pattern properties - for _, b := range o.Bulk { - // if matched && f.additional { - // continue - // } - - // Mark the current arc as cyclic while evaluating pattern - // expressions, but not while adding conjuncts. - // TODO: make MatchAndInsert return a list of conjuncts instead? - // TODO: it could be that we can set the cycle before calling - // MatchAndInsert after the renewed implementation of disjunctions. - saved := arc.BaseValue - arc.BaseValue = cycle - match := matchBulk(c, env, b, f, label) - arc.BaseValue = saved - - if match { - matched = true - info := closeInfo.SpawnSpan(b.Value, ConstraintSpan) - arc.AddConjunct(MakeConjunct(&bulkEnv, b, info)) - } - } - } - - if matched || len(o.Additional) == 0 { - return - } - - // match others - for _, x := range o.Additional { - info := closeInfo - if _, ok := x.expr().(*Top); !ok { - info = info.SpawnSpan(x, ConstraintSpan) - } - // TODO: consider moving in above block (2 lines up). - arc.AddConjunct(MakeConjunct(env, x, info)) - } -} - -// matchBulk reports whether feature f matches the filter of x. It evaluation of -// the filter is erroneous, it returns false and the error will be set in c. -func matchBulk(c *OpContext, env *Environment, p *BulkOptionalField, f Feature, label Value) bool { - unreachableForDev(c) - - v := env.evalCached(c, p.Filter) - v = Unwrap(v) - - // Fast-track certain cases. - switch x := v.(type) { - case *Bottom: - if x == cycle { - err := c.NewPosf(pos(p.Filter), "cyclic pattern constraint") - for _, c := range c.vertex.Conjuncts { - err.AddPosition(c.Elem()) - } - c.AddBottom(&Bottom{ - Err: err, - Node: c.vertex, - }) - } - if c.errs == nil { - c.AddBottom(x) - } - return false - case *Top: - return true - - case *BasicType: - return x.K&StringKind != 0 - - case *BoundValue: - switch x.Kind() { - case StringKind: - if label == nil { - return false - } - str := label.(*String).Str - return x.validateStr(c, str) - - case IntKind: - return x.validateInt(c, int64(f.Index())) - } - } - - if label == nil { - return false - } - - n := Vertex{ - IsDynamic: true, - } - m := MakeConjunct(env, v, c.ci) - n.AddConjunct(m) - n.AddConjunct(MakeConjunct(m.Env, label, c.ci)) - - c.inConstraint++ - n.Finalize(c) - c.inConstraint-- - - b, _ := n.BaseValue.(*Bottom) - return b == nil -} diff --git a/vendor/cuelang.org/go/internal/core/adt/overlay.go b/vendor/cuelang.org/go/internal/core/adt/overlay.go index 1b3f9886a9..96f77403d6 100644 --- a/vendor/cuelang.org/go/internal/core/adt/overlay.go +++ b/vendor/cuelang.org/go/internal/core/adt/overlay.go @@ -42,11 +42,17 @@ import ( // could get by with only copying arcs to that are modified in the copy. func newOverlayContext(ctx *OpContext) *overlayContext { + // Reuse the map from the overlays stack if one exists at this depth. + var m vertexMap + if i := len(ctx.overlays); i < cap(ctx.overlays) { + m = ctx.overlays[:cap(ctx.overlays)][i].vertexMap + } + if m == nil { + m = make(map[*Vertex]*Vertex) + } return &overlayContext{ - ctx: ctx, - - // TODO(perf): take a map from a pool of maps and reuse. - vertexMap: make(map[*Vertex]*Vertex), + ctx: ctx, + vertexMap: m, } } @@ -56,13 +62,16 @@ func newOverlayContext(ctx *OpContext) *overlayContext { type overlayContext struct { ctx *OpContext - // vertices holds the original, non-overlay vertices. The overlay for a - // vertex v can be obtained by looking up v.cc.overlay.src. - vertices []*Vertex + // root is the root of the disjunct. + root *Vertex + + // TODO(mvdan): investigate if we can avoid vertexMap by using a field on [nodeContext]. + // Note that this is likely not trivial, as we might need a stacking behavior + // similar to [OpContext.overlays]. - // vertexMap maps Vertex values of an originating node to the ones copied - // for this overlayContext. This is used to update the Vertex values in - // Environment values. + // vertexMap maps original Vertex values to their cloned counterparts. + // This is used to update the Vertex values in Environment values. + // The overlay for a vertex v can be obtained by looking up v.overlay. vertexMap vertexMap // confMap maps envComprehension values to the ones copied for this @@ -95,7 +104,17 @@ func (c *OpContext) pushOverlay(v *Vertex, m vertexMap) { } func (c *OpContext) popOverlay() { - c.overlays = c.overlays[:len(c.overlays)-1] + i := len(c.overlays) - 1 + // TODO(mvdan): unfortunately, clearing maps with large capacities + // is fairly slow as of Go 1.25, and most uses only need few entries. + // Do not reuse large maps to avoid this pitfall for now. + // See: https://github.com/golang/go/issues/70617 + if l := len(c.overlays[i].vertexMap); l > 512 { + c.overlays[i].vertexMap = nil + } else { + clear(c.overlays[i].vertexMap) + } + c.overlays = c.overlays[:i] } func (c *OpContext) deref(v *Vertex) *Vertex { @@ -111,6 +130,19 @@ func (c *OpContext) deref(v *Vertex) *Vertex { return v } +// derefRoot is like deref, but also dereferences the root vertex of each overlay +// frame. This is needed for environment vertex resolution, where the root +// vertex must be mapped to its clone during disjunction evaluation. +func (c *OpContext) derefRoot(v *Vertex) *Vertex { + for i := len(c.overlays) - 1; i >= 0; i-- { + f := c.overlays[i] + if x, ok := f.vertexMap[v]; ok { + return x + } + } + return v +} + // deref reports a replacement of v or v itself if such a replacement does not // exists. It computes the transitive closure of the replacement graph. // TODO(perf): it is probably sufficient to only replace one level. But we need @@ -144,27 +176,20 @@ func (ctx *overlayContext) cloneRoot(root *nodeContext) *nodeContext { v := ctx.cloneVertex(root.node) v.IsDisjunct = true v.state.vertexMap = ctx.vertexMap + ctx.root = v - for _, v := range ctx.vertices { + for v := range ctx.vertexMap { v = v.overlay + if v == nil { + // Skip vertices copied from root.vertexMap that weren't cloned. + continue + } n := v.state if n == nil { continue } - // The group of the root closeContext should point to the Conjuncts field - // of the Vertex. As we already allocated the group, we use that allocation, - // but "move" it to v.Conjuncts. - // TODO: Is this ever necessary? It is certainly necessary to rewrite - // environments from inserted disjunction values, but expressions that - // were already added will typically need to be recomputed and recreated - // anyway. We add this in to be a bit defensive and reinvestigate once we - // have more aggressive structure sharing implemented - for i, c := range v.Conjuncts { - v.Conjuncts[i].Env = ctx.derefDisjunctsEnv(c.Env) - } - for _, t := range n.tasks { ctx.rewriteComprehension(t) @@ -198,8 +223,10 @@ func (ctx *overlayContext) cloneRoot(root *nodeContext) *nodeContext { // // TODO(perf): consider using generation counters. func (ctx *overlayContext) unlinkOverlay() { - for _, v := range ctx.vertices { - v.overlay = nil + for orig := range ctx.vertexMap { + if orig.overlay != nil { + orig.overlay = nil + } } } @@ -223,8 +250,6 @@ func (ctx *overlayContext) cloneVertex(x *Vertex) *Vertex { ctx.vertexMap[x] = v x.overlay = v - ctx.vertices = append(ctx.vertices, x) - v.Conjuncts = slices.Clone(v.Conjuncts) if a := x.Arcs; len(a) > 0 { @@ -236,6 +261,9 @@ func (ctx *overlayContext) cloneVertex(x *Vertex) *Vertex { v.Arcs[i] = arc arc.Parent = v } + } else if cap(x.Arcs) > 0 { + // If the original slice has any capacity, don't share it. + v.Arcs = nil } v.Structs = slices.Clone(v.Structs) @@ -263,29 +291,6 @@ func (ctx *overlayContext) cloneVertex(x *Vertex) *Vertex { return v } -// derefDisjunctsEnv creates a new env for each Environment in the Up chain with -// each Environment where Vertex is "from" to one where Vertex is "to". -// -// TODO(perf): we could, instead, just look up the mapped vertex in -// OpContext.Up. This would avoid us having to copy the Environments for each -// disjunct. This requires quite a bit of plumbing, though, so we leave it as -// is until this proves to be a performance issue. -func (ctx *overlayContext) derefDisjunctsEnv(env *Environment) *Environment { - if env == nil { - return nil - } - up := ctx.derefDisjunctsEnv(env.Up) - to := ctx.vertexMap.deref(env.Vertex) - if up != env.Up || env.Vertex != to { - env = &Environment{ - Up: up, - Vertex: to, - DynamicLabel: env.DynamicLabel, - } - } - return env -} - func (ctx *overlayContext) cloneNodeContext(n *nodeContext) *nodeContext { n.node.getState(ctx.ctx) // ensure state is initialized. @@ -308,6 +313,8 @@ func (ctx *overlayContext) cloneNodeContext(n *nodeContext) *nodeContext { d.reqDefIDs = append(d.reqDefIDs, n.reqDefIDs...) d.replaceIDs = append(d.replaceIDs, n.replaceIDs...) + d.flatReplaceIDs = append(d.flatReplaceIDs, n.flatReplaceIDs...) + d.minFlatReplaceIDTo = n.minFlatReplaceIDTo d.conjunctInfo = append(d.conjunctInfo, n.conjunctInfo...) // TODO: do we need to add cyclicConjuncts? Typically, cyclicConjuncts @@ -319,14 +326,9 @@ func (ctx *overlayContext) cloneNodeContext(n *nodeContext) *nodeContext { // to correct results. // d.cyclicConjuncts = append(d.cyclicConjuncts, n.cyclicConjuncts...) - if len(n.disjunctions) > 0 { - // Do not clone cc in disjunctions, as it is identified by underlying. - // We only need to clone the cc in disjunctCCs. - for _, x := range n.disjunctions { - x.env = ctx.derefDisjunctsEnv(x.env) - d.disjunctions = append(d.disjunctions, x) - } - } + // Do not clone cc in disjunctions, as it is identified by underlying. + // We only need to clone the cc in disjunctCCs. + d.disjunctions = append(d.disjunctions, n.disjunctions...) return d } @@ -377,28 +379,22 @@ func (ctx *overlayContext) cloneTask(t *task, dst, src *scheduler) *task { id := t.id - env := ctx.derefDisjunctsEnv(t.env) - - // TODO(perf): alloc from buffer. - d := &task{ - run: t.run, - state: t.state, - completes: t.completes, - unblocked: t.unblocked, - blockCondition: t.blockCondition, - blockedOn: t.blockedOn, // will be rewritten later - err: t.err, - env: env, - x: t.x, - id: id, - - node: dst.node, - - // These are rewritten after everything is cloned when all vertices are - // known. - comp: t.comp, - leaf: t.leaf, - } + d := ctx.ctx.newTask() + d.run = t.run + d.state = t.state + d.completes = t.completes + d.unblocked = t.unblocked + d.blockCondition = t.blockCondition + d.blockedOn = t.blockedOn // will be rewritten later + d.err = t.err + d.env = t.env + d.x = t.x + d.id = id + d.node = dst.node + // These are rewritten after everything is cloned when all vertices are + // known. + d.comp = t.comp + d.leaf = t.leaf return d } @@ -432,10 +428,16 @@ func (ctx *overlayContext) mapComprehensionContext(ec *envComprehension) *envCom } if ctx.compMap[ec] == nil { + vertex := ctx.vertexMap.deref(ec.vertex) + // Report the error at the root of the disjunction if otherwise the + // error would be reported outside of the disjunction. + if vertex == ec.vertex { + vertex = ctx.root + } x := &envComprehension{ comp: ec.comp, structs: ec.structs, - vertex: ctx.ctx.deref(ec.vertex), + vertex: vertex, } ctx.compMap[ec] = x ec = x diff --git a/vendor/cuelang.org/go/internal/core/adt/runmode_string.go b/vendor/cuelang.org/go/internal/core/adt/runmode_string.go index 1cb34d6ab1..a35a462245 100644 --- a/vendor/cuelang.org/go/internal/core/adt/runmode_string.go +++ b/vendor/cuelang.org/go/internal/core/adt/runmode_string.go @@ -19,9 +19,9 @@ const _runMode_name = "ignoreattemptOnlyyieldfinalize" var _runMode_index = [...]uint8{0, 6, 17, 22, 30} func (i runMode) String() string { - i -= 1 - if i >= runMode(len(_runMode_index)-1) { - return "runMode(" + strconv.FormatInt(int64(i+1), 10) + ")" + idx := int(i) - 1 + if i < 1 || idx >= len(_runMode_index)-1 { + return "runMode(" + strconv.FormatInt(int64(i), 10) + ")" } - return _runMode_name[_runMode_index[i]:_runMode_index[i+1]] + return _runMode_name[_runMode_index[idx]:_runMode_index[idx+1]] } diff --git a/vendor/cuelang.org/go/internal/core/adt/sched.go b/vendor/cuelang.org/go/internal/core/adt/sched.go index cc12a48181..b9d52e633c 100644 --- a/vendor/cuelang.org/go/internal/core/adt/sched.go +++ b/vendor/cuelang.org/go/internal/core/adt/sched.go @@ -76,6 +76,9 @@ type taskContext struct { // can be frozen and the tasks unblocked. blocking []*task + // taskPool is a pool of tasks that can be reused to avoid allocations. + taskPool []*task + // counterMask marks which conditions use counters. Other conditions are // handled by signals only. counterMask condition @@ -110,10 +113,23 @@ func (p *taskContext) popTask() { } func (p *taskContext) newTask() *task { - // TODO: allocate from pool. + if n := len(p.taskPool); n > 0 { + t := p.taskPool[n-1] + p.taskPool = p.taskPool[:n-1] + // Clear task fields to avoid retaining references from previous use. + // We clear here (not in freeTask) because tasks can be in multiple queues + // simultaneously and may still be accessed after being "freed" from one queue. + *t = task{} + return t + } return &task{} } +func (p *taskContext) freeTask(t *task) { + // Add task to pool without clearing. Task will be cleared when reused. + p.taskPool = append(p.taskPool, t) +} + type taskState uint8 const ( @@ -176,7 +192,7 @@ func (s schedState) String() string { // runMode indicates how to proceed after a condition could not be met. type runMode uint8 -//go:generate go run golang.org/x/tools/cmd/stringer -type=runMode +//go:generate go tool stringer -type=runMode const ( // ignore indicates that the new evaluator should not do any processing. @@ -252,7 +268,7 @@ type scheduler struct { // counters keeps track of the number of uncompleted tasks that are // outstanding for each of the possible conditions. A state is // considered completed if the corresponding counter reaches zero. - counters [numCompletionStates]int + counters [numCompletionStates]int32 // tasks lists all tasks that were scheduled for this scheduler. // The list only contains tasks that are associated with this node. @@ -267,17 +283,11 @@ type scheduler struct { } func (s *scheduler) clear() { - // TODO(perf): free tasks into task pool - - // Any tasks blocked on this scheduler are unblocked once the scheduler is cleared. - // Otherwise they might signal a cleared scheduler, which can panic. - // - // TODO(mvdan,mpvl): In principle, all blocks should have been removed when a scheduler - // is cleared. Perhaps this can happen when the scheduler is stopped prematurely. - // For now, this solution seems to work OK. - for _, t := range s.blocking { - t.blockedOn = nil - t.blockCondition = neverKnown + // Free tasks back to the pool for reuse. Tasks are not cleared here because + // they may still be referenced in other schedulers' blocking queues. + // They will be cleared when obtained from the pool for reuse. + for _, t := range s.tasks { + s.ctx.freeTask(t) } *s = scheduler{ @@ -603,8 +613,8 @@ type task struct { // The Conjunct processed by this task. env *Environment - id CloseInfo // TODO: rename to closeInfo? - x Node // The conjunct Expression or Value. + id CloseInfo + x Node // The conjunct Expression or Value. // For Comprehensions: comp *envComprehension diff --git a/vendor/cuelang.org/go/internal/core/adt/share.go b/vendor/cuelang.org/go/internal/core/adt/share.go index c1df735935..0c7599a3a5 100644 --- a/vendor/cuelang.org/go/internal/core/adt/share.go +++ b/vendor/cuelang.org/go/internal/core/adt/share.go @@ -38,6 +38,7 @@ func (n *nodeContext) unshare() { } n.isShared = false n.node.IsShared = false + n.node.OpenedShared = false v := n.node.BaseValue.(*Vertex) @@ -63,11 +64,12 @@ func (n *nodeContext) finalizeSharing() { case *Vertex: if n.shareCycleType == NoCycle { v.Finalize(n.ctx) - } else if !v.isFinal() { - // TODO: ideally we just handle cycles in optional chains directly, - // rather than relying on this mechanism. This requires us to add - // a mechanism to detect that. - n.ctx.toFinalize = append(n.ctx.toFinalize, v) + // See the TODO in unify.go for toFinalize. + // } else if !v.isFinal() { + // // TODO: ideally we just handle cycles in optional chains directly, + // // rather than relying on this mechanism. This requires us to add + // // a mechanism to detect that. + // n.ctx.toFinalize = append(n.ctx.toFinalize, v) } // If state.parent is non-nil, we determined earlier that this Vertex // is not rooted and that it can safely be shared. Because it is @@ -77,10 +79,6 @@ func (n *nodeContext) finalizeSharing() { // its assigned location. if v.state != nil && v.state.parent != nil { v.Parent = v.state.parent - - // TODO: see if this can be removed and why some errors are not - // propagated when removed. - n.isShared = false } case *Bottom: // An error trumps sharing. We can leave it as is. @@ -122,6 +120,7 @@ func (n *nodeContext) share(c Conjunct, arc *Vertex, id CloseInfo) { n.isShared = true n.shared = c n.addShared(id) + n.node.OpenedShared = id.Opened if arc.IsDetached() && arc.MayAttach() { // TODO: Second check necessary? // This node can safely be shared. Since it is not rooted, though, it diff --git a/vendor/cuelang.org/go/internal/core/adt/simplify.go b/vendor/cuelang.org/go/internal/core/adt/simplify.go index 6be4b49ac2..7a3f7b9577 100644 --- a/vendor/cuelang.org/go/internal/core/adt/simplify.go +++ b/vendor/cuelang.org/go/internal/core/adt/simplify.go @@ -42,7 +42,7 @@ func SimplifyBounds(ctx *OpContext, k Kind, x, y *BoundValue) Value { // NOTE: EqualOp should not happen, but include it defensively. // Maybe an API would use it, for instance. case EqualOp, NotEqualOp, MatchOp, NotMatchOp: - if test(ctx, EqualOp, xv, yv) { + if BinOpBool(ctx, nil, EqualOp, xv, yv) { return x } return nil // keep both bounds @@ -64,7 +64,7 @@ func SimplifyBounds(ctx *OpContext, k Kind, x, y *BoundValue) Value { // inverse is true as well. // Tighten bound. - if test(ctx, cmp, xv, yv) { + if BinOpBool(ctx, nil, cmp, xv, yv) { return x } return y @@ -86,9 +86,6 @@ func SimplifyBounds(ctx *OpContext, k Kind, x, y *BoundValue) Value { case -1: case 0: if x.Op == GreaterEqualOp && y.Op == LessEqualOp { - if ctx.SimplifyValidators { - return ctx.NewString(a.Str) - } return nil } fallthrough @@ -114,9 +111,6 @@ func SimplifyBounds(ctx *OpContext, k Kind, x, y *BoundValue) Value { case -1: case 0: if x.Op == GreaterEqualOp && y.Op == LessEqualOp { - if ctx.SimplifyValidators { - return ctx.newBytes(a.B) - } return nil } fallthrough @@ -132,32 +126,54 @@ func SimplifyBounds(ctx *OpContext, k Kind, x, y *BoundValue) Value { } a, aOK := xv.(*Num) b, bOK := yv.(*Num) - - if !aOK || !bOK { + if !aOK || !bOK || a.X.Form != apd.Finite || b.X.Form != apd.Finite { + // Nothing to do if either bound is not a finite number. break } - var d, lo, hi apd.Decimal - lo.Set(&a.X) - hi.Set(&b.X) + var d apd.Decimal + lo, hi := a.X, b.X if k&FloatKind == 0 { - // Readjust bounds for integers. - if x.Op == GreaterEqualOp { - // >=3.4 ==> >=4 - _, _ = internal.BaseContext.Ceil(&lo, &a.X) - } else { - // >3.4 ==> >3 - _, _ = internal.BaseContext.Floor(&lo, &a.X) + // Readjust bounds for integers if the bounds have decimal places, + // which are represented as a negative exponent to multiply with the coefficient. + // We reset lo and hi to empty apd.Decimal values to not modify the originals, + // given that apd.Decimal contains pointers which are carried with shallow copies. + if a.X.Exponent < 0 { + lo = apd.Decimal{} + if x.Op == GreaterEqualOp { + // >=3.4 ==> >=4 + internal.BaseContext.Ceil(&lo, &a.X) + } else { + // >3.4 ==> >3 + internal.BaseContext.Floor(&lo, &a.X) + } } - if y.Op == LessEqualOp { - // <=2.3 ==> <= 2 - _, _ = internal.BaseContext.Floor(&hi, &b.X) - } else { - // <2.3 ==> < 3 - _, _ = internal.BaseContext.Ceil(&hi, &b.X) + if b.X.Exponent < 0 { + hi = apd.Decimal{} + if y.Op == LessEqualOp { + // <=2.3 ==> <= 2 + internal.BaseContext.Floor(&hi, &b.X) + } else { + // <2.3 ==> < 3 + internal.BaseContext.Ceil(&hi, &b.X) + } } } + // Negative or zero minimum with a large positive maximum + // common with e.g. int32 being ">=-2147483648 & <=2147483647" + // or uint16 being ">=0 & <=65535". + // + // We detect a large positive maximum by checking that the exponent isn't negative + // and that the coefficient has at least two digits, so its value must be at least 10. + // When the maximum is at least 10 and the minimum is at most 0, + // the subtraction will be positive and larger than 2, so we can avoid all the work below. + // This is important given that a subtraction allocates a new number for the result. + hiSign, loSign := hi.Sign(), lo.Sign() + if hiSign > 0 && loSign <= 0 && hi.Exponent >= 0 && hi.NumDigits() > 1 { + break + } + cond, err := internal.BaseContext.Sub(&d, &hi, &lo) if cond.Inexact() || err != nil { break @@ -192,26 +208,14 @@ func SimplifyBounds(ctx *OpContext, k Kind, x, y *BoundValue) Value { if d.Negative { return errIncompatibleBounds(ctx, k, x, y) } - // [apd.Decimal.Int64] on `d = hi - lo` will error if it overflows an int64. - // This is pretty common with CUE bounds like int64, which expands to: - // - // >=-9_223_372_036_854_775_808 & <=9_223_372_036_854_775_807 - // - // Constructing that error is unfortunate as it allocates a few times - // and stringifies the number too, which also has a cost. - // Which is entirely unnecessary, as we don't use the error value at all. - // If we know the integer will have more than one digit, give up early. - if d.NumDigits() > 1 { - break - } switch diff, err := d.Int64(); { case diff == 1: if k&FloatKind == 0 { if x.Op == GreaterEqualOp && y.Op == LessThanOp { - return newNum(ctx, &lo, k&NumberKind, x, y) + return nil } if x.Op == GreaterThanOp && y.Op == LessEqualOp { - return newNum(ctx, &hi, k&NumberKind, x, y) + return nil } if x.Op == GreaterThanOp && y.Op == LessThanOp { return ctx.NewErrf("incompatible integer bounds %v and %v", x, y) @@ -220,24 +224,33 @@ func SimplifyBounds(ctx *OpContext, k Kind, x, y *BoundValue) Value { case diff == 2: if k&FloatKind == 0 && x.Op == GreaterThanOp && y.Op == LessThanOp { - _, _ = internal.BaseContext.Add(&d, d.SetInt64(1), &lo) - return newNum(ctx, &d, k&NumberKind, x, y) + return nil } case diff == 0 && err == nil: if x.Op == GreaterEqualOp && y.Op == LessEqualOp { - return newNum(ctx, &lo, k&NumberKind, x, y) + return nil } return errIncompatibleBounds(ctx, k, x, y) } + case x.Op == EqualOp: + if BinOpBool(ctx, nil, y.Op, xv, yv) { + return x + } + + case y.Op == EqualOp: + if BinOpBool(ctx, nil, x.Op, yv, xv) { + return y + } + case x.Op == NotEqualOp: - if !test(ctx, y.Op, xv, yv) { + if !BinOpBool(ctx, nil, y.Op, xv, yv) { return y } case y.Op == NotEqualOp: - if !test(ctx, x.Op, yv, xv) { + if !BinOpBool(ctx, nil, x.Op, yv, xv) { return x } } @@ -251,12 +264,6 @@ func errIncompatibleBounds(ctx *OpContext, k Kind, x, y *BoundValue) *Bottom { return ctx.NewErrf("incompatible number bounds %v and %v", y, x) } } -func newNum(ctx *OpContext, d *apd.Decimal, k Kind, sources ...Node) Value { - if ctx.SimplifyValidators { - return ctx.newNum(d, k, sources...) - } - return nil -} func opInfo(op Op) (cmp Op, norm int) { switch op { @@ -268,6 +275,8 @@ func opInfo(op Op) (cmp Op, norm int) { return LessEqualOp, -1 case LessEqualOp: return LessThanOp, -1 + case EqualOp: + return EqualOp, 4 case NotEqualOp: return NotEqualOp, 0 case MatchOp: @@ -278,13 +287,6 @@ func opInfo(op Op) (cmp Op, norm int) { panic("cue: unreachable") } -func test(ctx *OpContext, op Op, a, b Value) bool { - if b, ok := BinOp(ctx, nil, op, a, b).(*Bool); ok { - return b.B - } - return false -} - // SimplifyValidator simplifies non-bound validators. // // Currently this only checks for pure equality. In the future this can be used diff --git a/vendor/cuelang.org/go/internal/core/adt/states.go b/vendor/cuelang.org/go/internal/core/adt/states.go index 5474258a09..0127fe9eee 100644 --- a/vendor/cuelang.org/go/internal/core/adt/states.go +++ b/vendor/cuelang.org/go/internal/core/adt/states.go @@ -313,7 +313,7 @@ func stateCompletions(s *scheduler) condition { // allChildConjunctsKnown indicates that all conjuncts have been added by // the parents and every conjunct that may add fields to subfields have been // processed. -func (v *Vertex) allChildConjunctsKnown() bool { +func (v *Vertex) allChildConjunctsKnown(ctx *OpContext) bool { if v == nil { return true } @@ -328,18 +328,18 @@ func (v *Vertex) allChildConjunctsKnown() bool { return true } - return v.state.meets(fieldConjunctsKnown | allAncestorsProcessed) + n := v.getState(ctx) + + return n.meets(fieldConjunctsKnown | allAncestorsProcessed) } func (n *nodeContext) scheduleTask(r *runner, env *Environment, x Node, ci CloseInfo) *task { - t := &task{ - run: r, - node: n, - - env: env, - id: ci, - x: x, - } + t := n.ctx.newTask() + t.run = r + t.node = n + t.env = env + t.id = ci + t.x = x n.insertTask(t) return t } diff --git a/vendor/cuelang.org/go/internal/core/adt/tasks.go b/vendor/cuelang.org/go/internal/core/adt/tasks.go index b6d5e0a9d2..88f3a414e8 100644 --- a/vendor/cuelang.org/go/internal/core/adt/tasks.go +++ b/vendor/cuelang.org/go/internal/core/adt/tasks.go @@ -16,6 +16,7 @@ package adt import ( "fmt" + "slices" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/token" @@ -84,7 +85,7 @@ func init() { func processExpr(ctx *OpContext, t *task, mode runMode) { x := t.x.(Expr) - state := combinedFlags{ + state := Flags{ condition: concreteKnown, mode: mode, } @@ -104,7 +105,7 @@ func processResolver(ctx *OpContext, t *task, mode runMode) { // be conclusive, we could avoid triggering evaluating disjunctions. This // would be a pretty significant rework, though. - arc := r.resolve(ctx, combinedFlags{ + arc := r.resolve(ctx, Flags{ condition: fieldSetKnown, mode: mode, }) @@ -113,6 +114,11 @@ func processResolver(ctx *OpContext, t *task, mode runMode) { // TODO: yield instead? return } + ci := ctx.ci + if arc.OpenedShared { + ci.Opened = true + } + arc = arc.DerefNonDisjunct() if ctx.LogEval > 0 { @@ -121,11 +127,9 @@ func processResolver(ctx *OpContext, t *task, mode runMode) { // TODO: consider moving after markCycle or removing. d := arc.DerefDisjunct() - ci := t.updateCI(ctx.ci) - // A reference that points to itself indicates equality. In that case // we are done computing and we can return the arc as is. - ci, skip := t.node.detectCycleV3(d, t.env, r, ci) + ci, skip := t.node.detectCycle(d, t.env, r, ci) if skip { // Either we have a structure cycle or we are unifying with another // conjunct. In either case, we are no longer structure sharing here. @@ -153,7 +157,7 @@ func processDynamic(ctx *OpContext, t *task, mode runMode) { field := t.x.(*DynamicField) - v := ctx.value(field.Key, combinedFlags{ + v := ctx.value(field.Key, Flags{ condition: scalarValue, mode: mode, }) @@ -165,7 +169,7 @@ func processDynamic(ctx *OpContext, t *task, mode runMode) { n.addBottom(&Bottom{ Code: IncompleteError, Node: n.node, - Err: ctx.NewPosf(pos(field.Key), + Err: ctx.NewPosf(Pos(field.Key), "key value of dynamic field must be concrete, found %v", v), }) return @@ -174,7 +178,7 @@ func processDynamic(ctx *OpContext, t *task, mode runMode) { f := ctx.Label(field.Key, v) // TODO: remove this restriction. if f.IsInt() { - n.addErr(ctx.NewPosf(pos(field.Key), "integer fields not supported")) + n.addErr(ctx.NewPosf(Pos(field.Key), "integer fields not supported")) return } @@ -182,7 +186,20 @@ func processDynamic(ctx *OpContext, t *task, mode runMode) { // unevaluated. ci := t.id - c := MakeConjunct(t.env, field, ci) + // TODO: consider using a different mechanism where we do not have to + // copy the environment every time. If we do not have an alternative, + // we could use the same technique in pattern constraints to at least + // not have to copy it in most cases. + env := t.env + if x := field.Src; x != nil && x.Alias != nil && x.Alias.Label != nil { + e := *(t.env) + if f.Index() < MaxIndex { + e.DynamicLabel = f + } + env = &e + } + + c := MakeConjunct(env, field, ci) n.insertArc(f, field.ArcType, c, ci, true) } @@ -193,7 +210,7 @@ func processPatternConstraint(ctx *OpContext, t *task, mode runMode) { // Note that the result may be a disjunction. Be sure to not take the // default value as we want to retain the options of the disjunction. - v := ctx.evalState(field.Filter, combinedFlags{ + v := ctx.evalState(field.Filter, Flags{ condition: scalarValue, mode: yield, }) @@ -237,22 +254,29 @@ func processListLit(c *OpContext, t *task, mode runMode) { l := t.x.(*ListLit) - n.updateCyclicStatusV3(t.id) + n.updateCyclicStatus(t.id) var ellipsis Node + id := c.subField(t.id) + index := int64(0) hasComprehension := false + + // List literals with static elements are common; + // grow the capacity ahead of time to make space for their arcs. + n.node.Arcs = slices.Grow(n.node.Arcs, len(l.Elems)) + for j, elem := range l.Elems { // TODO: Terminate early in case of runaway comprehension. switch x := elem.(type) { case *Comprehension: - err := c.yield(nil, t.env, x, combinedFlags{status: partial, mode: mode}, func(e *Environment) { + indexBefore := index + err := c.yield(nil, t.env, x, Flags{status: partial, mode: mode}, func(e *Environment) { label, err := MakeLabel(x.Source(), index, IntLabel) n.addErr(err) index++ - id := t.id // id.setOptional(t.node) c := MakeConjunct(e, x.Value, id) n.insertArc(label, ArcMember, c, id, true) @@ -262,6 +286,15 @@ func processListLit(c *OpContext, t *task, mode runMode) { n.addBottom(err) return } + // If comprehension yielded zero values and has an else clause, + // insert the else clause's struct contents as list elements. + if index == indexBefore && x.Fallback != nil { + label, err := MakeLabel(x.Source(), index, IntLabel) + n.addErr(err) + index++ + conj := MakeConjunct(t.env, x.Fallback, id) + n.insertArc(label, ArcMember, conj, id, true) + } case *Ellipsis: // TODO(openlist): this will work once we have the same closedness @@ -277,8 +310,8 @@ func processListLit(c *OpContext, t *task, mode runMode) { elem = &Top{} } - id := t.id - id.setOptionalV3(t.node) + id := id + id.setOptional(t.node) c := MakeConjunct(t.env, elem, id) pat := &BoundValue{ @@ -292,8 +325,8 @@ func processListLit(c *OpContext, t *task, mode runMode) { label, err := MakeLabel(x.Source(), index, IntLabel) n.addErr(err) index++ - c := MakeConjunct(t.env, x, t.id) - n.insertArc(label, ArcMember, c, t.id, true) + c := MakeConjunct(t.env, x, id) + n.insertArc(label, ArcMember, c, id, true) } if max := n.maxListLen; n.listIsClosed && int(index) > max { @@ -319,7 +352,7 @@ func processListLit(c *OpContext, t *task, mode runMode) { n.listIsClosed = isClosed } - n.updateListType(l, t.id, isClosed, ellipsis) + n.updateListType(l, id, isClosed, ellipsis) } func processListVertex(c *OpContext, t *task, mode runMode) { @@ -327,7 +360,7 @@ func processListVertex(c *OpContext, t *task, mode runMode) { l := t.x.(*Vertex) - elems := l.Elems() + elems := slices.Collect(l.Elems()) isClosed := l.IsClosedList() // TODO: Share with code above. @@ -368,7 +401,6 @@ func processListVertex(c *OpContext, t *task, mode runMode) { func (n *nodeContext) updateListType(list Expr, id CloseInfo, isClosed bool, ellipsis Node) { if n.kind == 0 { - n.node.updateStatus(finalized) // TODO(neweval): remove once transitioned. return } m, ok := n.node.BaseValue.(*ListMarker) diff --git a/vendor/cuelang.org/go/internal/core/adt/typocheck.go b/vendor/cuelang.org/go/internal/core/adt/typocheck.go index 656bbc2e49..d55d97737d 100644 --- a/vendor/cuelang.org/go/internal/core/adt/typocheck.go +++ b/vendor/cuelang.org/go/internal/core/adt/typocheck.go @@ -166,53 +166,68 @@ package adt // type. // import ( - "math" + "iter" "slices" "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" ) type defID uint32 +//go:generate go tool stringer -type=defIDType -linecomment + type defIDType int8 const ( // defIDTypeUnknown indicates that the ID is not a definition. - defIDTypeUnknown defIDType = iota + defIDTypeUnknown defIDType = iota // * - defEmbedding - defReference - defStruct + defEmbedding // E + defReference // D + defStruct // S ) -func (d defIDType) String() string { - switch d { - case defEmbedding: - return "E" - case defReference: - return "D" - case defStruct: - return "S" - default: - return "*" - } +type containment struct { + id defID // parent defID + posIndex uint32 // index into [OpContext.positionTable] } -const deleteID defID = math.MaxUint32 - -func (c *OpContext) getNextDefID() defID { +func (c *OpContext) getNextDefID(n Node) defID { c.stats.NumCloseIDs++ c.nextDefID++ if len(c.containments) == 0 { // Our ID starts at 1. Create an extra element for the zero value. - c.containments = make([]defID, 1, 16) + c.containments = make([]containment, 1, 16) + // Position index 0 is reserved for [token.NoPos]. + c.positionTable = []token.Pos{token.NoPos} + c.positionIndex = make(map[token.Pos]uint32) + c.positionIndex[token.NoPos] = 0 } - c.containments = append(c.containments, 0) + + posIdx := c.internPosition(Pos(n)) + c.containments = append(c.containments, containment{id: 0, posIndex: posIdx}) return c.nextDefID } +// internPosition returns an index into [OpContext.positionTable], +// which deduplicates positions to save memory. +func (c *OpContext) internPosition(p token.Pos) uint32 { + if idx, ok := c.positionIndex[p]; ok { + return idx + } + idx := uint32(len(c.positionTable)) + if int(idx) != len(c.positionTable) { + // If we manage to overflow, fall back index zero for [token.NoPos]. + return 0 + } + c.positionTable = append(c.positionTable, p) + c.positionIndex[p] = idx + return idx +} + type refInfo struct { v *Vertex id defID @@ -271,12 +286,12 @@ type replaceID struct { } func (n *nodeContext) addReplacement(x replaceID) { - if x.from == x.to { + if x.from == 0 || x.from == x.to { return } - if x.from < x.to && n.ctx.containments[x.to] == 0 { - n.ctx.containments[x.to] = x.from + if x.from < x.to && n.ctx.containments[x.to].id == 0 { + n.ctx.containments[x.to].id = x.from return } @@ -320,7 +335,7 @@ func (n *nodeContext) updateConjunctInfo(k Kind, id CloseInfo, flags conjunctFla // multiple times to a single node. As we only want to insert each conjunct // once, we need to ensure that within all contexts a single ID assigned to such // a resolver is tracked. -func (n *nodeContext) addResolver(v *Vertex, id CloseInfo, forceIgnore bool) CloseInfo { +func (n *nodeContext) addResolver(p Node, v *Vertex, id CloseInfo, forceIgnore bool) CloseInfo { if n.ctx.OpenDef { return id } @@ -329,7 +344,8 @@ func (n *nodeContext) addResolver(v *Vertex, id CloseInfo, forceIgnore bool) Clo return id } - closeOuter := (id.FromDef && id.FromEmbed) || v.ClosedNonRecursive + // This can only be true when not using the @experiment(explicitopen). + closeOuter := id.FromDef && id.FromEmbed && !id.Opened if closeOuter && !forceIgnore { // Walk up the parent chain of the outer structs to "activate" them. @@ -348,7 +364,7 @@ func (n *nodeContext) addResolver(v *Vertex, id CloseInfo, forceIgnore bool) Clo var ignore bool switch { - case forceIgnore: + case forceIgnore, id.Opened: // Special mode to always ignore the outer enclosing group. // This is the case, for instance, if a resolver resolves to a // non-definition. @@ -366,7 +382,7 @@ func (n *nodeContext) addResolver(v *Vertex, id CloseInfo, forceIgnore bool) Clo // // the embedding. // #A & #B // } - isClosed := id.FromDef || v.ClosedNonRecursive + isClosed := id.FromDef || v.ClosedNonRecursive || v.ClosedRecursive ignore = !isClosed default: // In the default case we can disable typo checking this type if it is @@ -375,15 +391,21 @@ func (n *nodeContext) addResolver(v *Vertex, id CloseInfo, forceIgnore bool) Clo } dstID := defID(0) - for _, x := range n.reqDefIDs { + for i, x := range n.reqDefIDs { if x.v == v { dstID = x.id + if x.ignore && !ignore { + // Override settings of a vertex added by #A... + n.reqDefIDs[i].ignore = false + n.reqDefIDs[i].parent = id.outerID + n.reqDefIDs[i].embed = id.enclosingEmbed + } break } } if dstID == 0 || id.enclosingEmbed != 0 { - next := n.ctx.getNextDefID() + next := n.ctx.getNextDefID(p) if dstID != 0 { // If we need to activate an enclosing embed group, and the added // resolver was already before, we need to allocate a new ID and @@ -418,6 +440,10 @@ func (c *OpContext) subField(ci CloseInfo) CloseInfo { // as, at this point, it seems to be only used for debugging. We may // want to consider having a separate field for this, though. ci.FromEmbed = false + + // The priority should be cleared for sub fields: we take default struct + // in its entirety, but not piecemeal. + ci.Priority = 0 return ci } @@ -431,12 +457,12 @@ func (id CloseInfo) clearCloseCheck() CloseInfo { return id } -func (n *nodeContext) newReq(id CloseInfo, kind defIDType) CloseInfo { +func (n *nodeContext) newReq(p Node, id CloseInfo, kind defIDType) CloseInfo { if id.defID != 0 && id.opID != n.ctx.opID { return id.clearCloseCheck() } - dstID := n.ctx.getNextDefID() + dstID := n.ctx.getNextDefID(p) n.addReplacement(replaceID{from: id.defID, to: dstID}) parent := id.defID @@ -496,6 +522,10 @@ func (v *Vertex) AddOpenConjunct(ctx *OpContext, w *Vertex) { // We can then say that requirement 3 (node A) holds if all fields contain // either label 3, or any field within 1 that is not 2. func (n *nodeContext) injectEmbedNode(x Decl, id CloseInfo) CloseInfo { + if Pos(x).Experiment().ExplicitOpen { + return id + } + id.FromEmbed = true // Filter cases where we do not need to track the definition. @@ -506,7 +536,7 @@ func (n *nodeContext) injectEmbedNode(x Decl, id CloseInfo) CloseInfo { } } - return n.newReq(id, defEmbedding) + return n.newReq(x, id, defEmbedding) } // splitStruct is used to mark the outer struct of a field in which embeddings @@ -520,7 +550,7 @@ func (n *nodeContext) injectEmbedNode(x Decl, id CloseInfo) CloseInfo { // definition is embedded within a struct. It can be removed if we implement // the #A vs #A... semantics. func (n *nodeContext) splitStruct(s *StructLit, id CloseInfo) CloseInfo { - if n.ctx.OpenDef { + if Pos(s).Experiment().ExplicitOpen || n.ctx.OpenDef { return id } @@ -546,11 +576,11 @@ func (n *nodeContext) splitStruct(s *StructLit, id CloseInfo) CloseInfo { return id } - return n.splitScope(id) + return n.splitScope(s, id) } -func (n *nodeContext) splitScope(id CloseInfo) CloseInfo { - return n.newReq(id, defStruct) +func (n *nodeContext) splitScope(p Node, id CloseInfo) CloseInfo { + return n.newReq(p, id, defStruct) } func (n *nodeContext) checkTypos() { @@ -598,12 +628,11 @@ func (n *nodeContext) checkTypos() { required = slices.Clone(required) } - n.filterSets(&required, func(n *nodeContext, a *reqSet) bool { + for a := range required.elemPtrs() { if id := hasParentEllipsis(n, a, n.conjunctInfo); id != 0 { a.removed = true } - return true - }) + } // TODO(perf): somehow prevent error generation of recursive structures, // or at least make it cheap. Right now if this field is a typo, likely // all descendents will be regarded as typos. @@ -623,7 +652,9 @@ func (n *nodeContext) checkTypos() { // TODO: do not descend on optional? - // openDebugGraph(ctx, a, "NOT ALLOWED") // Uncomment for debugging. + if OpenGraphs { + openDebugGraph(ctx, a, "NOT ALLOWED") // Uncomment for debugging. + } if b := ctx.notAllowedError(a); b != nil && a.ArcType <= ArcRequired { err = CombineErrors(nil, err, b) @@ -668,17 +699,12 @@ func (n *nodeContext) hasEvidenceForOne(all reqSets, i uint32, conjuncts []conju } embedScope, ok := all.lookupSet(a.embed) - if !ok { return false } outerScope, ok := all.lookupSet(a.parent) - if ok && outerScope.removed { - return true - } - outer: for _, c := range conjuncts { if n.containsDefID(embedScope.id, c.embed) { @@ -689,6 +715,9 @@ outer: if !ok || a.parent == 0 { return true } + if outerScope.removed { + return true + } // If this conjunct is within the outer struct, but outside the // embedding scope, this means it was "added" and we do not have @@ -701,50 +730,96 @@ outer: } func (n *nodeContext) containsDefID(node, child defID) bool { - // TODO(perf): cache result - // TODO(perf): we could keep track of the minimum defID that could map so - // that we can use this to bail out early. c := n.ctx - c.redirectsBuf = c.redirectsBuf[:0] - for p := n; p != nil; p = p.node.Parent.state { - if p.opID != n.opID { - break + + // Build sortedReplaceIDs once per nodeContext by traversing the parent chain. + if !n.computedFlatReplaceIDs { + for p := n; p != nil; p = p.node.Parent.state { + if p.opID != n.opID { + break + } + n.flatReplaceIDs = append(n.flatReplaceIDs, p.replaceIDs...) + if p.node.Parent == nil { + break + } } - c.redirectsBuf = append(c.redirectsBuf, p.replaceIDs...) - if p.node.Parent == nil { - break + slices.SortFunc(n.flatReplaceIDs, func(a, b replaceID) int { + return int(b.to) - int(a.to) + }) + n.computedFlatReplaceIDs = true + + // Compute minimum 'to' value for early bailout optimization. + // Since flatReplaceIDs is sorted by 'to' in descending order, + // the last entry has the minimum value. + if len(n.flatReplaceIDs) > 0 { + n.minFlatReplaceIDTo = n.flatReplaceIDs[len(n.flatReplaceIDs)-1].to + } + + if int64(len(n.flatReplaceIDs)) > c.stats.MaxRedirect { + c.stats.MaxRedirect = int64(len(n.flatReplaceIDs)) } } - if int64(len(c.redirectsBuf)) > c.stats.MaxRedirect { - c.stats.MaxRedirect = int64(len(c.redirectsBuf)) + // Caching in [OpContext.containsDefIDCache] adds overhead; + // only do it if we estimate that [nodeContext.containsDefIDRec] + // will do significant work by looking at the number of replaceIDs. + caching := len(n.flatReplaceIDs) > 15 + + // Pack node and child into a single uint64 key for faster map lookups. + key := uint64(node)<<32 | uint64(child) + // Note that even a map lookup has some overhead, which adds up. + if caching { + if result, ok := c.containsDefIDCache[key]; ok { + return result + } + } + + result := n.containsDefIDRec(node, child, child) + + if caching { + if c.containsDefIDCache == nil { + c.containsDefIDCache = make(map[uint64]bool) + } + c.containsDefIDCache[key] = result } - return n.containsDefIDRec(node, child, child) + return result } func (n *nodeContext) containsDefIDRec(node, child, start defID) bool { c := n.ctx - // NOTE: this loop is O(H) + // Walk up the containment hierarchy. + // Since p only decreases and flatReplaceIDs is sorted by 'to' (descending), + // we maintain a cursor and scan linearly forwards through matches. + // This is O(H + R) total, where R is the number of redirects. + cursor := 0 for p := child; p != 0; { if p == node { return true } - // TODO(perf): can be binary search if we keep redirects sorted. Also, p - // should be monotonically decreasing, so we could use this to direct - // the binary search or-- at the very least--to only have to pass the - // array once. - for _, r := range c.redirectsBuf { - if r.to == p && r.from != child { - if n.containsDefIDRec(node, r.from, start) { + // Early bailout: if p < minFlatReplaceIDTo, no replaceID entries + // can match p, so we can skip replacement scanning entirely. + if p >= n.minFlatReplaceIDTo { + // Skip entries where 'to' > p. + for cursor < len(n.flatReplaceIDs) && n.flatReplaceIDs[cursor].to > p { + cursor++ + } + + // Process all entries with 'to' == p. + for cursor < len(n.flatReplaceIDs) && n.flatReplaceIDs[cursor].to == p { + from := n.flatReplaceIDs[cursor].from + if from != child && n.containsDefIDRec(node, from, start) { return true } + cursor++ } + // cursor now points to the first entry with 'to' < p (or past the end) + // which is perfect for the next iteration with an even smaller p. } - p = c.containments[p] + p = c.containments[p].id if p == start { // We won't match node we haven't already after one cycle. return false @@ -805,7 +880,7 @@ func mergeCloseInfo(nv, nw *nodeContext) { if w == nil { return } - // Merge missing closeInfos + // Merge missing conjunct infos outer: for _, wci := range nw.conjunctInfo { for _, vci := range nv.conjunctInfo { @@ -844,22 +919,24 @@ outer2: // getReqSets initializes, if necessary, and returns the reqSets for n. func getReqSets(n *nodeContext) reqSets { - if n == nil { - return nil - } - if n.computedCloseInfo { return n.reqSets } - a := n.reqSets + a := n.reqSets[:0] v := n.node - if p := v.Parent; p != nil && !n.dropParentRequirements { - a = append(a, getReqSets(p.state)...) - n.filterNonRecursive(&a) + var parentReqs reqSets + // Only inherit parent reqSets from the same OpContext to avoid stale defIDs. + if p := v.Parent; p != nil && !n.dropParentRequirements && p.state != nil && p.state.opID == n.opID { + parentReqs = getReqSets(p.state) } + // Grow the capacity of the slice ahead of time to avoid incremental growth below. + a = slices.Grow(a, len(parentReqs)+len(n.reqDefIDs)) + a = append(a, parentReqs...) + markIgnored(a) + last := len(a) - 1 outer: @@ -914,7 +991,7 @@ outer: // If 'v' is a hidden field, then all reqSets in 'a' for which there is no // corresponding entry in conjunctInfo should be removed from 'a'. if allowedInClosed(v.Label) { - n.filterSets(&a, func(n *nodeContext, a *reqSet) bool { + a = filterSets(a, func(a *reqSet) bool { for _, c := range n.conjunctInfo { if n.containsDefID(a.id, c.id) { return true // keep the set @@ -929,7 +1006,7 @@ outer: parentConjuncts = p.state.conjunctInfo } - n.filterTop(&a, n.conjunctInfo, parentConjuncts) + a = n.filterTop(a, parentConjuncts) n.computedCloseInfo = true if int64(len(a)) > n.ctx.stats.MaxReqSets { @@ -941,12 +1018,12 @@ outer: // If there is a top or ellipsis for all supported conjuncts, we have // evidence that this node can be dropped. -func (n *nodeContext) filterTop(a *reqSets, conjuncts, parentConjuncts []conjunctInfo) (openLevel bool) { - n.filterSets(a, func(n *nodeContext, a *reqSet) bool { +func (n *nodeContext) filterTop(a reqSets, parentConjuncts []conjunctInfo) reqSets { + return filterSets(a, func(a *reqSet) bool { var f conjunctFlags hasAny := false - for _, c := range conjuncts { + for _, c := range n.conjunctInfo { if n.containsDefID(a.id, c.id) { hasAny = true flags := c.flags @@ -973,7 +1050,7 @@ func (n *nodeContext) filterTop(a *reqSets, conjuncts, parentConjuncts []conjunc // The following logic should only apply to non-structs. default: hasAny = false - for _, c := range conjuncts { + for _, c := range n.conjunctInfo { if n.containsDefID(id, c.id) { hasAny = true } @@ -985,7 +1062,6 @@ func (n *nodeContext) filterTop(a *reqSets, conjuncts, parentConjuncts []conjunc return true }) - return openLevel } // hasParentEllipsis reports if the parent has any conjuncts from an ellipsis @@ -1006,27 +1082,34 @@ func hasParentEllipsis(n *nodeContext, a *reqSet, conjuncts []conjunctInfo) defI return 0 } -func (n *nodeContext) filterNonRecursive(a *reqSets) { - n.filterSets(a, func(n *nodeContext, e *reqSet) bool { - x := e - if x.once { // || x.id == 0 +func markIgnored(a reqSets) { + for e := range a.elemPtrs() { + if e.once { // || e.id == 0 e.ignored = true } - return true // keep the entry - }) + } } // filter keeps all reqSets e in a for which f(e) and removes the rest. -func (n *nodeContext) filterSets(a *reqSets, f func(n *nodeContext, e *reqSet) bool) { - temp := (*a)[:0] - for i := range *a { - set := (*a)[i] +func filterSets(a reqSets, f func(e *reqSet) bool) reqSets { + temp := a[:0] + for i := range a { + set := &a[i] + if f(set) { + temp = append(temp, *set) + } + } + return temp +} - if f(n, &set) { - temp = append(temp, set) +func (a reqSets) elemPtrs() iter.Seq[*reqSet] { + return func(yield func(e *reqSet) bool) { + for i := range a { + if !yield(&a[i]) { + return + } } } - *a = temp } // lookupSet returns the set in a with the given id or nil if no such set. diff --git a/vendor/cuelang.org/go/internal/core/adt/unify.go b/vendor/cuelang.org/go/internal/core/adt/unify.go index c612d96c85..5bda1ceef0 100644 --- a/vendor/cuelang.org/go/internal/core/adt/unify.go +++ b/vendor/cuelang.org/go/internal/core/adt/unify.go @@ -16,6 +16,7 @@ package adt import ( "fmt" + "slices" "cuelang.org/go/cue/token" ) @@ -26,7 +27,11 @@ func (v *Vertex) isInitialized() bool { } func (n *nodeContext) assertInitialized() { - if n != nil && n.ctx.isDevVersion() { + if n != nil { + if n.node == nil { + // Can happen for unit tests. + return + } if v := n.node; !v.isInitialized() { panic(fmt.Sprintf("vertex %p not initialized", v)) } @@ -116,7 +121,11 @@ func (n *nodeContext) scheduleConjuncts() { // func (v *Vertex) unify@(c *OpContext, needs condition, mode runMode) bool { // return v.unifyC(c, needs, mode, true) // } -func (v *Vertex) unify(c *OpContext, needs condition, mode runMode, checkTypos bool) bool { +func (v *Vertex) unify(c *OpContext, flags Flags) bool { + needs := flags.condition + mode := flags.mode + checkTypos := flags.checkTypos + if c.LogEval > 0 { defer c.Un(c.Indentf(v, "UNIFY(%x, %v)", needs, mode)) } @@ -189,7 +198,7 @@ func (v *Vertex) unify(c *OpContext, needs condition, mode runMode, checkTypos b // Note that if mode is final, we will guarantee that the conditions for // this if clause are met down the line. So we assume this is already the // case and set the signal accordingly if so. - if !v.Rooted() || v.Parent.allChildConjunctsKnown() || mode == finalize { + if !v.Rooted() || v.Parent.allChildConjunctsKnown(c) || mode == finalize { n.signal(allAncestorsProcessed) } @@ -232,7 +241,7 @@ func (v *Vertex) unify(c *OpContext, needs condition, mode runMode, checkTypos b n.process(pendingKnown, attemptOnly) if n.node.ArcType == ArcPending { for _, a := range n.node.Arcs { - a.unify(c, needs, attemptOnly, checkTypos) + a.unify(c, Flags{condition: needs, mode: attemptOnly, checkTypos: checkTypos}) } } // TODO(evalv3): do we need this? Error messages are slightly better, @@ -264,10 +273,6 @@ func (v *Vertex) unify(c *OpContext, needs condition, mode runMode, checkTypos b } n.updateScalar() - if n.aStruct != nil { - n.updateNodeType(StructKind, n.aStruct, n.aStructID) - } - // First process all but the subfields. switch { case n.meets(nodeOnlyNeeds): @@ -301,7 +306,7 @@ func (v *Vertex) unify(c *OpContext, needs condition, mode runMode, checkTypos b } // We unify here to proactively detect cycles. We do not need to, // nor should we, if have have already found one. - v.unify(n.ctx, needs, mode, checkTypos) + v.unify(n.ctx, Flags{condition: needs, mode: mode, checkTypos: checkTypos}) } // At this point, no more conjuncts will be added, so we could decrement @@ -313,7 +318,7 @@ func (v *Vertex) unify(c *OpContext, needs condition, mode runMode, checkTypos b case needs&subFieldsProcessed != 0: switch { - case assertStructuralCycleV3(n): + case assertStructuralCycle(n): case n.node.status == finalized: // There is no need to recursively process if the node is already @@ -333,11 +338,6 @@ func (v *Vertex) unify(c *OpContext, needs condition, mode runMode, checkTypos b n.signal(subFieldsProcessed) } - if v.BaseValue == nil { - // TODO: this seems to not be possible. Possibly remove. - state := finalized - v.BaseValue = n.getValidators(state) - } if v := n.node.Value(); v != nil && IsConcrete(v) { // Ensure that checks are not run again when this value is used // in a validator. @@ -387,7 +387,7 @@ func (v *Vertex) unify(c *OpContext, needs condition, mode runMode, checkTypos b // Ensure the shared node is processed to the requested level. This is // typically needed for scalar values. if w.status == unprocessed { - w.unify(c, needs, mode, false) + w.unify(c, Flags{condition: needs, mode: mode, checkTypos: false}) } return n.meets(needs) @@ -409,14 +409,14 @@ func (v *Vertex) unify(c *OpContext, needs condition, mode runMode, checkTypos b // TODO: find a more principled way to catch this cycle and avoid this // check. - if n.hasAncestorV3(w) { + if n.hasAncestor(w) { n.reportCycleError() return true } // Ensure that shared nodes comply to the same requirements as we // need for the current node. - w.unify(c, needs, mode, checkTypos) + w.unify(c, Flags{condition: needs, mode: mode, checkTypos: checkTypos}) return true } @@ -515,7 +515,14 @@ func (n *nodeContext) completeNodeTasks(mode runMode) { defer n.ctx.Un(n.ctx.Indentf(n.node, "(%v)", mode)) } - n.assertInitialized() + // In attemptOnly mode, don't assert initialization to allow processing + // of partially initialized vertices + if mode != attemptOnly { + n.assertInitialized() + } else if n.node != nil && !n.node.isInitialized() { + // In attemptOnly mode, skip processing if vertex is not initialized + return + } if n.isCompleting > 0 { return @@ -535,7 +542,7 @@ func (n *nodeContext) completeNodeTasks(mode runMode) { } } - if v.IsDynamic || v.Label.IsLet() || v.Parent.allChildConjunctsKnown() { + if v.IsDynamic || v.Label.IsLet() || v.Parent.allChildConjunctsKnown(n.ctx) { n.signal(allAncestorsProcessed) } @@ -604,7 +611,7 @@ func (n *nodeContext) completeAllArcs(needs condition, mode runMode, checkTypos a := n.node.Arcs[arcPos] // TODO: Consider skipping lets. - if !a.unify(n.ctx, needs, mode, checkTypos) { + if !a.unify(n.ctx, Flags{condition: needs, mode: mode, checkTypos: checkTypos}) { success = false } @@ -624,7 +631,7 @@ func (n *nodeContext) completeAllArcs(needs condition, mode runMode, checkTypos case a.ArcType > ArcRequired, !a.Label.IsString(): case n.kind&StructKind == 0: if !n.node.IsErr() && !a.IsErr() { - n.reportFieldMismatch(pos(a.Value()), nil, a.Label, n.node.Value()) + n.reportFieldMismatch(Pos(a.Value()), nil, a.Label, n.node.Value()) } // case !wasVoid: // case n.kind == TopKind: @@ -645,14 +652,9 @@ func (n *nodeContext) completeAllArcs(needs condition, mode runMode, checkTypos } } - k := 0 - for _, a := range n.node.Arcs { - if a.ArcType != ArcNotPresent { - n.node.Arcs[k] = a - k++ - } - } - n.node.Arcs = n.node.Arcs[:k] + n.node.Arcs = slices.DeleteFunc(n.node.Arcs, func(a *Vertex) bool { + return a.ArcType == ArcNotPresent + }) for _, a := range n.node.Arcs { // Errors are allowed in let fields. Handle errors and failure to @@ -672,7 +674,7 @@ func (n *nodeContext) completeAllArcs(needs condition, mode runMode, checkTypos ctx := n.ctx f := ctx.PushState(c.env, c.expr.Source()) - v := ctx.evalState(c.expr, combinedFlags{ + v := ctx.evalState(c.expr, Flags{ status: finalized, condition: allKnown, mode: ignore, @@ -687,7 +689,7 @@ func (n *nodeContext) completeAllArcs(needs condition, mode runMode, checkTypos Src: c.expr.Source(), Code: CycleError, Node: n.node, - Err: ctx.NewPosf(pos(c.expr), + Err: ctx.NewPosf(Pos(c.expr), "circular dependency in evaluation of conditionals: %v changed after evaluation", ctx.Str(c.expr)), }) @@ -706,14 +708,9 @@ func (n *nodeContext) completeAllArcs(needs condition, mode runMode, checkTypos // // TODO(perf): we could keep track if any such structs exist and only // do this removal if there is a change of shrinking the list. - k = 0 - for _, s := range n.node.Structs { - if s.initialized { - n.node.Structs[k] = s - k++ - } - } - n.node.Structs = n.node.Structs[:k] + n.node.Structs = slices.DeleteFunc(n.node.Structs, func(s StructInfo) bool { + return !s.initialized + }) // TODO: This seems to be necessary, but enables structural cycles. // Evaluator whether we still need this. @@ -767,7 +764,7 @@ func (n *nodeContext) evalArcTypes(mode runMode) { if a.ArcType != ArcPending { continue } - a.unify(n.ctx, arcTypeKnown, mode, false) + a.unify(n.ctx, Flags{condition: arcTypeKnown, mode: mode, checkTypos: false}) // Ensure the arc is processed up to the desired level if a.ArcType == ArcPending { // TODO: cancel tasks? @@ -783,8 +780,7 @@ func root(v *Vertex) *Vertex { return v } -func (v *Vertex) lookup(c *OpContext, pos token.Pos, f Feature, flags combinedFlags) *Vertex { - task := c.current() +func (v *Vertex) lookup(c *OpContext, pos token.Pos, f Feature, flags Flags) *Vertex { needs := flags.condition runMode := flags.mode @@ -822,11 +818,6 @@ func (v *Vertex) lookup(c *OpContext, pos token.Pos, f Feature, flags combinedFl state.completeNodeTasks(attemptOnly) } - // TODO: remove because unnecessary? - if task != nil && task.state != taskRUNNING { - return nil // abort, task is blocked or terminated in a cycle. - } - // TODO: verify lookup types. arc := v.LookupRaw(f) @@ -876,10 +867,6 @@ func (v *Vertex) lookup(c *OpContext, pos token.Pos, f Feature, flags combinedFl // arcType be known at this point, but that does not seem to work. // Revisit once we have the structural cycle detection in place. - // TODO: should we avoid notifying ArcPending vertices here? - if task != nil { - arcState.addNotify2(task.node.node, task.id) - } if arc.ArcType == ArcPending { return arcReturn } @@ -897,11 +884,11 @@ func (v *Vertex) lookup(c *OpContext, pos token.Pos, f Feature, flags combinedFl // some values to be remain unevaluated. switch { case needs == arcTypeKnown|fieldSetKnown: - arc.unify(c, needs, finalize, false) + arc.unify(c, Flags{condition: needs, mode: finalize, checkTypos: false}) default: // Now we can't finalize, at least try to get as far as we // can and only yield if we really have to. - if !arc.unify(c, needs, attemptOnly, false) { + if !arc.unify(c, Flags{condition: needs, mode: attemptOnly, checkTypos: false}) { arcState.process(needs, yield) } } @@ -912,7 +899,17 @@ func (v *Vertex) lookup(c *OpContext, pos token.Pos, f Feature, flags combinedFl } switch arc.ArcType { - case ArcMember, ArcRequired: + case ArcRequired: + label := f.SelectorString(c.Runtime) + b := &Bottom{ + Code: IncompleteError, + Err: c.NewPosf(pos, "required field missing: %s", label), + Node: v, + } + // TODO: yield failure + c.AddBottom(b) // TODO: unify error mechanism. + return arcReturn + case ArcMember: return arcReturn case ArcOptional: diff --git a/vendor/cuelang.org/go/internal/core/adt/validate.go b/vendor/cuelang.org/go/internal/core/adt/validate.go index 6195f4d9b9..6de64d9163 100644 --- a/vendor/cuelang.org/go/internal/core/adt/validate.go +++ b/vendor/cuelang.org/go/internal/core/adt/validate.go @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package validate collects errors from an evaluated Vertex. package adt type ValidateConfig struct { @@ -25,6 +24,10 @@ type ValidateConfig struct { // DisallowCycles indicates that there may not be cycles. DisallowCycles bool + // ReportIncomplete reports an incomplete error even when concrete is not + // requested. + ReportIncomplete bool + // AllErrors continues descending into a Vertex, even if errors are found. AllErrors bool @@ -73,6 +76,8 @@ type validator struct { err *Bottom inDefinition int + sharedPositions []Node + // shared vertices should be visited at least once if referenced by // a non-definition. // TODO: we could also keep track of the number of references to a @@ -81,6 +86,12 @@ type validator struct { visited map[*Vertex]bool } +func (v *validator) addPositions(err *ValueError) { + for _, p := range v.sharedPositions { + err.AddPosition(p) + } +} + func (v *validator) checkConcrete() bool { return v.Concrete && v.inDefinition == 0 } @@ -104,6 +115,17 @@ func (v *validator) validate(x *Vertex) { y := x + if x.IsShared { + saved := v.sharedPositions + // assume there is always a single conjunct: multiple references either + // result in the same shared value, or no sharing. And there has to be + // at least one to be able to share in the first place. + c, n := x.SingleConjunct() + if n >= 1 { + v.sharedPositions = append(v.sharedPositions, c.Elem()) + } + defer func() { v.sharedPositions = saved }() + } // Dereference values, but only those that are not shared. This includes let // values. This prevents us from processing structure-shared nodes more than // once and prevents potential cycles. @@ -131,7 +153,7 @@ func (v *validator) validate(x *Vertex) { } case IncompleteError: - if v.checkFinal() { + if v.ReportIncomplete || v.checkConcrete() { v.add(b) } @@ -146,9 +168,11 @@ func (v *validator) validate(x *Vertex) { x = x.Default() if !IsConcrete(x) { x := x.Value() + err := v.ctx.Newf("incomplete value %v", x) + v.addPositions(err) v.add(&Bottom{ Code: IncompleteError, - Err: v.ctx.Newf("incomplete value %v", x), + Err: err, }) } } @@ -156,7 +180,7 @@ func (v *validator) validate(x *Vertex) { for _, a := range x.Arcs { if a.ArcType == ArcRequired && v.Final && v.inDefinition == 0 { v.ctx.PushArcAndLabel(a) - v.add(NewRequiredNotPresentError(v.ctx, a)) + v.add(NewRequiredNotPresentError(v.ctx, a, v.sharedPositions...)) v.ctx.PopArcAndLabel(a) continue } diff --git a/vendor/cuelang.org/go/internal/core/adt/vertexstatus_string.go b/vendor/cuelang.org/go/internal/core/adt/vertexstatus_string.go index 789f0883bb..4c7893ad09 100644 --- a/vendor/cuelang.org/go/internal/core/adt/vertexstatus_string.go +++ b/vendor/cuelang.org/go/internal/core/adt/vertexstatus_string.go @@ -12,17 +12,17 @@ func _() { _ = x[evaluating-1] _ = x[partial-2] _ = x[conjuncts-3] - _ = x[evaluatingArcs-4] - _ = x[finalized-5] + _ = x[finalized-4] } -const _vertexStatus_name = "unprocessedevaluatingpartialconjunctsevaluatingArcsfinalized" +const _vertexStatus_name = "unprocessedevaluatingpartialconjunctsfinalized" -var _vertexStatus_index = [...]uint8{0, 11, 21, 28, 37, 51, 60} +var _vertexStatus_index = [...]uint8{0, 11, 21, 28, 37, 46} func (i vertexStatus) String() string { - if i < 0 || i >= vertexStatus(len(_vertexStatus_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_vertexStatus_index)-1 { return "vertexStatus(" + strconv.FormatInt(int64(i), 10) + ")" } - return _vertexStatus_name[_vertexStatus_index[i]:_vertexStatus_index[i+1]] + return _vertexStatus_name[_vertexStatus_index[idx]:_vertexStatus_index[idx+1]] } diff --git a/vendor/cuelang.org/go/internal/core/adt/weakmap.go b/vendor/cuelang.org/go/internal/core/adt/weakmap.go new file mode 100644 index 0000000000..0087fb5b2e --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/weakmap.go @@ -0,0 +1,71 @@ +// Copyright 2025 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "runtime" + "sync" + "weak" +) + +// TODO: this was inspired by (but rewritten from) a suggestion in +// https://github.com/golang/go/issues/43615. Once this issue is resolved or a +// properly licensed package is released, we should consider using that. + +// newMemoizer returns a new memoizer value that caches +// the results of calling the make function. +// It does not guarantee that there will be at most one +// *V value at any one time or that make won't be invoked concurrently. +// +// It does not memoize results when make returns an error, +func newMemoizer[K comparable, V any](make func(K) (*V, error)) *memoizer[K, V] { + return &memoizer[K, V]{ + make: make, + } +} + +// memoizer implements a garbage-collectable cache of +// results from calling the make function. +type memoizer[K comparable, V any] struct { + // make returns a new result for K. It is expected + // that it will always return an equivalent non-nil value + // for a given key. + make func(K) (*V, error) + // string -> weak.Pointer[V] + m sync.Map +} + +// get returns the result for the key k. +func (c *memoizer[K, V]) get(k K) (*V, error) { + if entry, ok := c.m.Load(k); ok { + if v := entry.(weak.Pointer[V]).Value(); v != nil { + return v, nil + } + } + // Could potentially use singleflight or similar to + // avoid redundant make calls in concurrent situations + // but the redundancy probably isn't much of an issue + // in practice. + v, err := c.make(k) + if err != nil { + return nil, err + } + wp := weak.Make(v) + runtime.AddCleanup(v, func(wp weak.Pointer[V]) { + c.m.CompareAndDelete(k, wp) + }, wp) + c.m.Store(k, wp) + return v, nil +} diff --git a/vendor/cuelang.org/go/internal/core/compile/builtin.go b/vendor/cuelang.org/go/internal/core/compile/builtin.go index 13f07a10e2..96216020ed 100644 --- a/vendor/cuelang.org/go/internal/core/compile/builtin.go +++ b/vendor/cuelang.org/go/internal/core/compile/builtin.go @@ -18,8 +18,8 @@ import ( "strings" "cuelang.org/go/cue/errors" - "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" + "cuelang.org/go/internal/iterutil" ) // This file contains predeclared builtins. @@ -43,7 +43,7 @@ var errorBuiltin = &adt.Builtin{ Params: []adt.Param{stringParam}, Result: adt.BottomKind, - RawFunc: func(call *adt.CallContext) adt.Value { + RawFunc: func(call adt.CallContext) adt.Value { ctx := call.OpContext() arg := call.Expr(0) @@ -64,7 +64,7 @@ var errorBuiltin = &adt.Builtin{ if err := ctx.Err(); err != nil { args = append(args, x.Parts[i]) } else if y.Concreteness() == adt.Concrete && - y.Kind()&adt.NumberKind|adt.StringKind|adt.BytesKind|adt.BoolKind != 0 { + y.Kind()&(adt.NumberKind|adt.StringKind|adt.BytesKind|adt.BoolKind) != 0 { args = append(args, ctx.ToString(y)) } else { args = append(args, y) @@ -86,19 +86,18 @@ var lenBuiltin = &adt.Builtin{ Name: "len", Params: []adt.Param{{Value: &adt.BasicType{K: supportedByLen}}}, Result: adt.IntKind, - Func: func(call *adt.CallContext) adt.Expr { + Func: func(call adt.CallContext) adt.Expr { c := call.OpContext() args := call.Args() v := args[0] if x, ok := v.(*adt.Vertex); ok { - x.LockArcs = true switch x.BaseValue.(type) { case nil: // This should not happen, but be defensive. return c.NewErrf("unevaluated vertex") case *adt.ListMarker: - return c.NewInt64(int64(len(x.Elems())), v) + return c.NewInt64(int64(iterutil.Count(x.Elems())), v) case *adt.StructMarker: n := 0 @@ -136,7 +135,7 @@ var closeBuiltin = &adt.Builtin{ Name: "close", Params: []adt.Param{structParam}, Result: adt.StructKind, - Func: func(call *adt.CallContext) adt.Expr { + Func: func(call adt.CallContext) adt.Expr { c := call.OpContext() args := call.Args() @@ -144,22 +143,80 @@ var closeBuiltin = &adt.Builtin{ if !ok { return c.NewErrf("struct argument must be concrete") } - var v *adt.Vertex - if c.Version == internal.DevVersion { - // TODO(evalv3) this is a rather convoluted and inefficient way to - // accomplish signaling vertex should be closed. In most cases, it - // would suffice to set IsClosed in the CloseInfo. However, that - // does not cover all code paths. Consider simplifying this. - v = c.Wrap(s, c.CloseInfo()) - v.ClosedNonRecursive = true - } else { - if m, ok := s.BaseValue.(*adt.StructMarker); ok && m.NeedClose { - return s + // TODO(evalv3) this is a rather convoluted and inefficient way to + // accomplish signaling vertex should be closed. In most cases, it + // would suffice to set IsClosed in the CloseInfo. However, that + // does not cover all code paths. Consider simplifying this. + v := c.Wrap(s, c.CloseInfo()) + v.ClosedNonRecursive = true + return v + }, +} + +var closeAllBuiltin = &adt.Builtin{ + Name: "__closeAll", + Params: []adt.Param{topParam}, + Result: adt.TopKind, + Func: func(call adt.CallContext) adt.Expr { + c := call.OpContext() + + x := call.Expr(0) + switch x.(type) { + case *adt.StructLit, *adt.ListLit: + if src := x.Source(); src == nil || !src.Pos().Experiment().ExplicitOpen { + // Allow usage if explicit open is set + return c.NewErrf("__closeAll may only be used when explicitopen is enabled") } - v = s.Clone() - v.BaseValue = &adt.StructMarker{NeedClose: true} + default: + return c.NewErrf("argument must be a struct or list literal") } - return v + + // must be literal struct + args := call.Args() + + s, ok := args[0].(*adt.Vertex) + if !ok { + return c.NewErrf("struct argument must be concrete") + } + + s.ClosedRecursive = true + + return s + }, +} + +var recloseBuiltin = &adt.Builtin{ + Name: "__reclose", + Params: []adt.Param{topParam}, + Result: adt.TopKind, + Func: func(call adt.CallContext) adt.Expr { + c := call.OpContext() + + x := call.Expr(0) + switch x.(type) { + case *adt.StructLit, *adt.ListLit: + if src := x.Source(); src == nil || !src.Pos().Experiment().ExplicitOpen { + // Allow usage if explicit open is set + return c.NewErrf("__reclose may only be used when explicitopen is enabled") + } + default: + return c.NewErrf("argument must be a struct or list literal") + } + + // must be literal struct + args := call.Args() + + // Note that we could have an embedded scalar here, so having a struct + // or list does not guarantee that the result is that as well. + // + // #Def: 1 + // a: __reclose({ #Def }) + // + if s, ok := args[0].(*adt.Vertex); ok && s.ShouldRecursivelyClose() { + s.ClosedRecursive = true + } + + return args[0] }, } @@ -167,18 +224,18 @@ var andBuiltin = &adt.Builtin{ Name: "and", Params: []adt.Param{listParam}, Result: adt.IntKind, - RawFunc: func(call *adt.CallContext) adt.Value { + Func: func(call adt.CallContext) adt.Expr { c := call.OpContext() - arg := call.Arg(0) + args := call.Args() - list := c.RawElems(arg) - if len(list) == 0 { - return &adt.Top{} - } + seq := c.RawElems(args[0]) a := []adt.Value{} - for _, c := range list { + for c := range seq { a = append(a, c) } + if len(a) == 0 { + return &adt.Top{} + } return &adt.Conjunction{Values: a} }, } @@ -188,12 +245,12 @@ var orBuiltin = &adt.Builtin{ Params: []adt.Param{listParam}, Result: adt.IntKind, NonConcrete: true, - Func: func(call *adt.CallContext) adt.Expr { + Func: func(call adt.CallContext) adt.Expr { c := call.OpContext() args := call.Args() d := []adt.Disjunct{} - for _, c := range c.RawElems(args[0]) { + for c := range c.RawElems(args[0]) { d = append(d, adt.Disjunct{Val: c, Default: false}) } if len(d) == 0 { @@ -226,7 +283,7 @@ var divBuiltin = &adt.Builtin{ Name: "div", Params: []adt.Param{intParam, intParam}, Result: adt.IntKind, - Func: func(call *adt.CallContext) adt.Expr { + Func: func(call adt.CallContext) adt.Expr { c := call.OpContext() args := call.Args() @@ -240,7 +297,7 @@ var modBuiltin = &adt.Builtin{ Name: "mod", Params: []adt.Param{intParam, intParam}, Result: adt.IntKind, - Func: func(call *adt.CallContext) adt.Expr { + Func: func(call adt.CallContext) adt.Expr { c := call.OpContext() args := call.Args() @@ -254,7 +311,7 @@ var quoBuiltin = &adt.Builtin{ Name: "quo", Params: []adt.Param{intParam, intParam}, Result: adt.IntKind, - Func: func(call *adt.CallContext) adt.Expr { + Func: func(call adt.CallContext) adt.Expr { c := call.OpContext() args := call.Args() @@ -268,7 +325,7 @@ var remBuiltin = &adt.Builtin{ Name: "rem", Params: []adt.Param{intParam, intParam}, Result: adt.IntKind, - Func: func(call *adt.CallContext) adt.Expr { + Func: func(call adt.CallContext) adt.Expr { c := call.OpContext() args := call.Args() @@ -295,7 +352,7 @@ var testExperiment = &adt.Builtin{ Name: "testExperiment", Params: []adt.Param{topParam}, Result: adt.TopKind, - Func: func(call *adt.CallContext) adt.Expr { + Func: func(call adt.CallContext) adt.Expr { args := call.Args() if call.Pos().Experiment().Testing { diff --git a/vendor/cuelang.org/go/internal/core/compile/compile.go b/vendor/cuelang.org/go/internal/core/compile/compile.go index 7044c8fb27..a547956575 100644 --- a/vendor/cuelang.org/go/internal/core/compile/compile.go +++ b/vendor/cuelang.org/go/internal/core/compile/compile.go @@ -18,6 +18,7 @@ import ( "strings" "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/literal" "cuelang.org/go/cue/token" @@ -46,23 +47,28 @@ type Config struct { // Under normal circumstances, identifiers bind to import specifications, // which get resolved to an ImportReference. Use this option to // automatically resolve identifiers to imports. - Imports func(x *ast.Ident) (pkgPath string) + Imports func(x *ast.Ident) string // pkgPath is used to qualify the scope of hidden fields. The default // scope is "_". pkgPath string } -// Files compiles the given files as a single instance. It disregards -// the package names and it is the responsibility of the user to verify that -// the packages names are consistent. The pkgID must be a unique identifier -// for a package in a module, for instance as obtained from build.Instance.ID. +// Files is a convenience method that wraps [Instance]. +func Files(cfg *Config, r adt.Runtime, pkgID string, files ...*ast.File) (*adt.Vertex, errors.Error) { + return Instance(cfg, r, &build.Instance{ + ImportPath: pkgID, + Files: files, + }) +} + +// Instance compiles the given instance. // // Files may return a completed parse even if it has errors. -func Files(cfg *Config, r adt.Runtime, pkgID string, files ...*ast.File) (*adt.Vertex, errors.Error) { - c := newCompiler(cfg, pkgID, r) +func Instance(cfg *Config, r adt.Runtime, inst *build.Instance) (*adt.Vertex, errors.Error) { + c := newCompiler(cfg, inst, r) - v := c.compileFiles(files) + v := c.compileFiles(inst.Files) // TODO use inst.BuildFiles? if c.errs != nil { return v, c.errs @@ -74,7 +80,9 @@ func Files(cfg *Config, r adt.Runtime, pkgID string, files ...*ast.File) (*adt.V // unique identifier for a package in a module, for instance as obtained from // build.Instance.ID. func Expr(cfg *Config, r adt.Runtime, pkgPath string, x ast.Expr) (adt.Conjunct, errors.Error) { - c := newCompiler(cfg, pkgPath, r) + c := newCompiler(cfg, &build.Instance{ + ImportPath: pkgPath, + }, r) v := c.compileExpr(x) @@ -84,30 +92,33 @@ func Expr(cfg *Config, r adt.Runtime, pkgPath string, x ast.Expr) (adt.Conjunct, return v, nil } -func newCompiler(cfg *Config, pkgPath string, r adt.Runtime) *compiler { +func newCompiler(cfg *Config, inst *build.Instance, r adt.Runtime) *compiler { c := &compiler{ + inst: inst, index: r, } if cfg != nil { c.Config = *cfg } - if pkgPath == "" { - pkgPath = "_" - } - c.Config.pkgPath = pkgPath + c.Config.pkgPath = inst.ID() return c } type compiler struct { Config + + // inst holds the build instance within which the current + // expression is being compiler. This is used to resolve + // imports to instances + inst *build.Instance + upCountOffset int32 // 1 for files; 0 for expressions index adt.StringIndexer experiments cueexperiment.File - stack []frame - inSelector int + stack []frame // refersToForVariable tracks whether an expression refers to a key or // value produced by a for comprehension embedded within a struct. @@ -120,6 +131,11 @@ type compiler struct { // across different iterations of the same field. refersToForVariable bool + // inTryContext tracks the nesting depth of try clauses. The ? marker on + // references is only valid when inTryContext > 0. In the future, this may + // also be set by other contexts like exists() builtins or query contexts. + inTryContext int + fileScope map[adt.Feature]bool num literal.NumInfo @@ -219,7 +235,8 @@ func (c *compiler) lookupAlias(k int, id *ast.Ident) aliasEntry { entry, ok := m[name] if !ok { - err := c.errf(id, "could not find LetClause associated with identifier %q", name) + err := c.errf(id, + "could not find let or alias associated with identifier %q", name) return aliasEntry{expr: err} } @@ -334,32 +351,64 @@ func (c *compiler) compileExpr(x ast.Expr) adt.Conjunct { return adt.MakeRootConjunct(env, expr) } +// requireVersion checks if a feature is available in the current language version. +// Returns an error if the feature requires a newer version, nil otherwise. +func (c *compiler) requireVersion(n ast.Node, minVersion, feature string) *adt.Bottom { + v := c.experiments.LanguageVersion() + if v == "" { + return nil + } + if semver.Compare(minVersion, v) > 0 { + return c.errf(n, "%s requires language version %s or later; current version is %s", feature, minVersion, v) + } + return nil +} + // verifyVersion checks whether n is a Builtin and then checks whether the // Added version is compatible with the file version registered in c. func (c *compiler) verifyVersion(src ast.Node, n adt.Expr) adt.Expr { - b, ok := n.(*adt.Builtin) - if !ok { + var kind, name, added string + switch x := n.(type) { + default: return n - } - if b.Added == "" { - // No version check needed. + + case *adt.Builtin: + if x.Added == "" { + // No version check needed. + return n + } + + kind = "builtin" + name = x.Name + added = x.Added + + case *adt.ValueReference: + // NOTE: this is always self or __self. + kind = "predeclared identifier" + name = x.Src.Name + // Check if Self experiment is enabled + if !c.experiments.AliasV2 { + return c.errf(src, "%s %q requires @experiment(aliasv2)", kind, name) + } + x.Label = adt.MakeStringLabel(c.index, name) return n } + v := c.experiments.LanguageVersion() if v == "" { // We assume "latest" if the file is not associated with a version. return n } - if semver.Compare(b.Added, v) <= 0 { + if semver.Compare(added, v) <= 0 { // The feature is available in the file version. return n } // The feature is not available in the file version. // NonConcrete builtins are not allowed in older versions. - return c.errf(src, "builtin %q is not available in version %v; "+ - "it was added in version %q", b.Name, v, b.Added) + return c.errf(src, "%s %q is not available in version %v; "+ + "it was added in version %q", kind, name, v, added) } // resolve assumes that all existing resolutions are legal. Validation should @@ -371,9 +420,17 @@ func (c *compiler) resolve(n *ast.Ident) adt.Expr { // X in import "path/X" // X in import X "path" if imp, ok := n.Node.(*ast.ImportSpec); ok { + importPath := c.label(imp.Path) + importPathStr := importPath.StringValue(c.index) + inst := c.inst.LookupImport(importPathStr) + if inst == nil && !isStdlibPackage(importPathStr) { + // It's an external package, which should be mentioned in [build.Instance.Imports]. + c.errf(n, "import %q not found", importPathStr) + } return &adt.ImportReference{ Src: n, ImportPath: c.label(imp.Path), + Instance: inst, Label: c.label(n), } } @@ -416,6 +473,8 @@ func (c *compiler) resolve(n *ast.Ident) adt.Expr { if c.Config.Imports != nil { if pkgPath := c.Config.Imports(n); pkgPath != "" { + // Note: Config.Imports is (currently) only used for stdlib + // imports, so we can leave the Instance field nil. return &adt.ImportReference{ Src: n, ImportPath: adt.MakeStringLabel(c.index, pkgPath), @@ -450,9 +509,35 @@ func (c *compiler) resolve(n *ast.Ident) adt.Expr { UpCount: upCount, } - switch f := n.Node.(type) { + switch x := n.Node.(type) { + case *ast.Ident: + // If the identifier refers to a label alias, we link to that. + if f.Alias != nil && f.Alias.Label == x { + switch lab := f.Label.(type) { + case *ast.Ident: + if internal.IsDefOrHidden(lab.Name) { + return c.errf(x, "label alias cannot reference definition or hidden field") + } + return c.expr(ast.NewString(lab.Name)) + case *ast.BasicLit: + return c.expr(lab) + } + } case *ast.Field: - _ = c.lookupAlias(k, f.Label.(*ast.Alias).Ident) // mark as used + var ident *ast.Ident + if alias, _ := x.Label.(*ast.Alias); alias != nil { + if x.Alias != nil { + return c.errf(x, + "field has both label alias and postfix alias") + } + ident = alias.Ident + } else if x.Alias != nil { + ident = x.Alias.Field + } else { + return c.errf(x, "label reference has no alias") + } + + _ = c.lookupAlias(k, ident) // mark as used // The expression of field Label is always done in the same // Environment as pointed to by the UpCount of the DynamicReference // and the evaluation of a DynamicReference assumes this. @@ -465,11 +550,11 @@ func (c *compiler) resolve(n *ast.Ident) adt.Expr { } case *ast.Alias: - _ = c.lookupAlias(k, f.Ident) // mark as used + _ = c.lookupAlias(k, x.Ident) // mark as used return &adt.ValueReference{ Src: n, UpCount: upCount, - Label: c.label(f.Ident), + Label: c.label(x.Ident), } } return label @@ -519,26 +604,43 @@ func (c *compiler) resolve(n *ast.Ident) adt.Expr { X: entry.expr, // TODO: remove usage } - // TODO: handle new-style aliases - + // Handle new-style postfix aliases: a~X or a~(K,V) case *ast.Field: - // X=x: y - // X=(x): y - // X="\(x)": y - a, ok := f.Label.(*ast.Alias) - if !ok { + var ident *ast.Ident + lab := f.Label + // Old-style label aliases: X=x: y, X=(x): y, X="\(x)": + + if a, ok := f.Label.(*ast.Alias); ok { + ident = a.Ident + if f.Alias != nil { + return c.errf(f, "field has both label alias and postfix alias") + } + label, ok := a.Expr.(ast.Label) + if !ok { + return c.errf(a.Expr, "invalid label expression") + } + lab = label + } else if f.Alias != nil { + // Check if this identifier refers to the Field alias or Label alias + // The Field alias (X or V) is the value reference + // The Label alias (K) in dual form is a string reference + if f.Alias.Field == nil { + return c.errf(f, "postfix alias must have field component") + } + ident = f.Alias.Field + } else { return c.errf(n, "illegal reference %s", n.Name) } - aliasInfo := c.lookupAlias(k, a.Ident) // marks alias as used. - lab, ok := a.Expr.(ast.Label) - if !ok { - return c.errf(a.Expr, "invalid label expression") - } + + aliasInfo := c.lookupAlias(k, ident) // marks alias as used. + name, _, err := ast.LabelName(lab) switch { case errors.Is(err, ast.ErrIsExpression): if aliasInfo.expr == nil { - panic("unreachable") + // This can happen when we have a cyclic reference like (x)~x: 3 + // where the label expression references the alias being defined. + return c.errf(n, "cyclic reference in field alias") } return &adt.DynamicReference{ Src: n, @@ -578,6 +680,10 @@ func (c *compiler) addDecls(st *adt.StructLit, a []ast.Decl) { } } +func isNonBlank(a *ast.Ident) bool { + return a != nil && a.Name != "_" +} + func (c *compiler) markAlias(d ast.Decl) { switch x := d.(type) { case *ast.Field: @@ -592,6 +698,15 @@ func (c *compiler) markAlias(d ast.Decl) { c.insertAlias(a.Ident, e) } + // Register postfix aliases for regular fields (not pattern constraints) + // Pattern constraints register aliases in value scope only + // Regular field: register in parent scope + // Store the Field in the label so we can find it later + // Skip _ (blank identifier) + if a := x.Alias; a != nil && isNonBlank(a.Field) { + c.insertAlias(a.Field, aliasEntry{source: a}) + } + case *ast.LetClause: a := aliasEntry{ label: (*letScope)(x), @@ -621,6 +736,8 @@ func (c *compiler) decl(d ast.Decl) adt.Decl { v := x.Value var value adt.Expr + + // Handle value aliases. Deprecated in new aliases. if a, ok := v.(*ast.Alias); ok { c.pushScope(nil, 0, a) c.insertAlias(a.Ident, aliasEntry{source: a}) @@ -638,12 +755,10 @@ func (c *compiler) decl(d ast.Decl) adt.Decl { return c.errf(x, "cannot use _ as label") } - t, _ := internal.ConstraintToken(x) - return &adt.Field{ Src: x, Label: label, - ArcType: adt.ConstraintFromToken(t), + ArcType: adt.ConstraintFromToken(x.Constraint), Value: value, } @@ -661,6 +776,12 @@ func (c *compiler) decl(d ast.Decl) adt.Decl { elem = a.Expr } + // For postfix aliases, use the Field identifier (X or V) + // For dual form ~(K,V), we use V as the primary label + if a := x.Alias; a != nil && isNonBlank(a.Label) { + label = c.label(a.Label) + } + return &adt.BulkOptionalField{ Src: x, Filter: c.expr(elem), @@ -669,22 +790,18 @@ func (c *compiler) decl(d ast.Decl) adt.Decl { } case *ast.ParenExpr: - t, _ := internal.ConstraintToken(x) - return &adt.DynamicField{ Src: x, Key: c.expr(l), - ArcType: adt.ConstraintFromToken(t), + ArcType: adt.ConstraintFromToken(x.Constraint), Value: value, } case *ast.Interpolation: - t, _ := internal.ConstraintToken(x) - return &adt.DynamicField{ Src: x, Key: c.expr(l), - ArcType: adt.ConstraintFromToken(t), + ArcType: adt.ConstraintFromToken(x.Constraint), Value: value, } } @@ -714,8 +831,6 @@ func (c *compiler) decl(d ast.Decl) adt.Decl { Value: value, } - // case: *ast.Alias: // TODO(value alias) - case *ast.CommentGroup: // Nothing to do for a free-floating comment group. @@ -735,7 +850,7 @@ func (c *compiler) decl(d ast.Decl) adt.Decl { for _, c := range ast.Comments(x.Expr) { ast.AddComment(x, c) } - ast.SetComments(x.Expr, x.Comments()) + ast.SetComments(x.Expr, ast.Comments(x)) return c.expr(x.Expr) case ast.Expr: @@ -748,19 +863,31 @@ func (c *compiler) addLetDecl(d ast.Decl) { switch x := d.(type) { case *ast.Field: lab := x.Label + var ident *ast.Ident if a, ok := lab.(*ast.Alias); ok { + if x.Alias != nil { + c.errf(x, "field has both label alias and postfix alias") + return + } + if lab, ok = a.Expr.(ast.Label); !ok { // error reported elsewhere return } + ident = a.Ident - switch lab.(type) { - case *ast.Ident, *ast.BasicLit, *ast.ListLit: - // Even though we won't need the alias, we still register it - // for duplicate and failed reference detection. - default: - c.updateAlias(a.Ident, c.expr(a.Expr)) - } + } else if a := x.Alias; a != nil && isNonBlank(a.Field) { + ident = x.Alias.Field + } else { + break + } + + switch lab.(type) { + case *ast.Ident, *ast.BasicLit, *ast.ListLit: + // Even though we won't need the alias, we still register it + // for duplicate and failed reference detection. + default: + c.updateAlias(ident, c.expr(lab.(ast.Expr))) } case *ast.Alias: @@ -787,6 +914,7 @@ func (c *compiler) elem(n ast.Expr) adt.Elem { func (c *compiler) comprehension(x *ast.Comprehension, inList bool) adt.Elem { var a []adt.Yielder + hasTry := false for _, v := range x.Clauses { switch x := v.(type) { case *ast.ForClause: @@ -830,12 +958,54 @@ func (c *compiler) comprehension(x *ast.Comprehension, inList bool) adt.Elem { defer c.popScope() f.isComprehensionVar = !inList && refsCompVar a = append(a, y) + + case *ast.TryClause: + // Check experiment flag + if !c.experiments.Try { + return c.errf(x, "try clause requires the try experiment (language version v0.16.0 or later)") + } + y := &adt.TryClause{Src: x} + if x.Ident == nil { + // Struct form: try { ... } + hasTry = true + } else { + // Bind form try x = expr + savedUses := c.refersToForVariable + c.refersToForVariable = false + + // Enable try context for the expression + c.inTryContext++ + expr := c.expr(x.Expr) + c.inTryContext-- + + refsCompVar := c.refersToForVariable + c.refersToForVariable = savedUses || refsCompVar + + y.Label = c.label(x.Ident) + y.Expr = expr + + // Push a scope for the identifier binding. + f := c.pushScope((*tryScope)(x), 1, v) + defer c.popScope() + f.isComprehensionVar = !inList && refsCompVar + } + // Struct form: body is in Comprehension.Value, compiled with try context + a = append(a, y) } if _, ok := a[0].(*adt.LetClause); ok { return c.errf(x, "first comprehension clause must be 'if' or 'for'") } + + // Check that struct-form try (without assignment) is the last clause. + // It gets its body from Comprehension.Value, so it must be last. + for i, clause := range a[:len(a)-1] { + if tc, ok := clause.(*adt.TryClause); ok && tc.Expr == nil { + return c.errf(x.Clauses[i], + "struct-form try clause must be the last clause in a comprehension") + } + } } // TODO: make x.Value an *ast.StructLit and this is redundant. @@ -844,7 +1014,14 @@ func (c *compiler) comprehension(x *ast.Comprehension, inList bool) adt.Elem { "comprehension value must be struct, found %T", y) } + // Enable try context for struct form try clause body + if hasTry { + c.inTryContext++ + } y := c.expr(x.Value) + if hasTry { + c.inTryContext-- + } st, ok := y.(*adt.StructLit) if !ok { @@ -856,11 +1033,50 @@ func (c *compiler) comprehension(x *ast.Comprehension, inList bool) adt.Elem { return c.errf(x, "comprehension value without clauses") } - return &adt.Comprehension{ + comp := &adt.Comprehension{ Syntax: x, Clauses: a, Value: st, } + + // Compile fallback clause in the outer scope (before comprehension variables). + // The fallback clause should NOT have access to for/let variables. + if x.Fallback != nil { + // Both 'else' (with if) and 'fallback' (with for) require the try experiment. + if !c.experiments.Try { + // Use appropriate keyword in error message based on clause type. + keyword := "fallback" + if len(x.Clauses) == 1 { + switch x.Clauses[0].(type) { + case *ast.IfClause, *ast.TryClause: + keyword = "else" + } + } + return c.errf(x.Fallback, "%s clause requires the try experiment (language version v0.16.0 or later)", keyword) + } + + // Pop all comprehension scopes temporarily to compile fallback in outer scope. + // We need to compile the fallback body outside the comprehension's scope chain. + savedStack := c.stack + // Find the scope depth before the comprehension clauses were pushed. + // Each for/let clause pushes one scope. + outerDepth := len(savedStack) + for _, clause := range x.Clauses { + switch clause.(type) { + case *ast.ForClause, *ast.LetClause: + outerDepth-- + } + } + // Temporarily truncate to outer scope depth. + c.stack = savedStack[:outerDepth] + fallbackBody := c.expr(x.Fallback.Body) + c.stack = savedStack // Restore full stack + if fallbackSt, ok := fallbackBody.(*adt.StructLit); ok { + comp.Fallback = fallbackSt + } + } + + return comp } func (c *compiler) labeledExpr(f ast.Decl, lab labeler, expr ast.Expr) adt.Expr { @@ -870,12 +1086,21 @@ func (c *compiler) labeledExpr(f ast.Decl, lab labeler, expr ast.Expr) adt.Expr func (c *compiler) labeledExprAt(k int, f ast.Decl, lab labeler, expr ast.Expr) adt.Expr { saved := c.stack[k] + savedStack := c.stack c.stack[k].label = lab c.stack[k].field = f + if k < len(c.stack)-1 { + // Limit the capacity, so that if there is growth, we don't overwrite + // any values we need to restore later. This shouldn't happen too often, + // as this will result in a non-reclaimable allocation. + c.stack = c.stack[: k+1 : k+1] + } + value := c.expr(expr) + c.stack = savedStack c.stack[k] = saved return value } @@ -907,7 +1132,7 @@ func (c *compiler) expr(expr ast.Expr) adt.Expr { case *ast.ListLit: c.pushScope(nil, 1, n) v := &adt.ListLit{Src: n} - elts, ellipsis := internal.ListEllipsis(n) + elts, ellipsis := listEllipsis(n) for _, d := range elts { elem := c.elem(d) @@ -930,18 +1155,16 @@ func (c *compiler) expr(expr ast.Expr) adt.Expr { return v case *ast.SelectorExpr: - c.inSelector++ x := c.expr(n.X) // TODO: check if x is an ImportReference, and if so, check if it a // standard library, look up the builtin, and check its version. The // index of standard libraries is available in c.index, which is really // an adt.Runtime under the hood. - ret := &adt.SelectorExpr{ + return &adt.SelectorExpr{ Src: n, X: x, - Sel: c.label(n.Sel)} - c.inSelector-- - return ret + Sel: c.label(n.Sel), + } case *ast.IndexExpr: return &adt.IndexExpr{ @@ -1073,17 +1296,77 @@ func (c *compiler) expr(expr ast.Expr) adt.Expr { return &adt.BinaryExpr{Src: n, Op: op, X: x, Y: y} // ) } + case *ast.PostfixExpr: + switch n.Op { + case token.ELLIPSIS: + if !c.experiments.ExplicitOpen { + // TODO: consider not returning. + return c.errf(n, "postfix ... operator requires @experiment(explicitopen)") + } + + return &adt.OpenExpr{ + Src: n, + X: c.expr(n.X), + } + + case token.OPTION: + if !c.experiments.Try { + c.errf(n, "optional marker (?) requires the try experiment (language version v0.16.0 or later)") + } else if c.inTryContext == 0 { + c.errf(n, "optional marker (?) is only valid within a try clause") + } + + // Compile the inner expression first, then validate. + // This gives better error output by showing the reference. + x := c.expr(n.X) + switch r := x.(type) { + case *adt.FieldReference: + r.Optional = true + return r + case *adt.SelectorExpr: + r.Optional = true + return r + case *adt.IndexExpr: + r.Optional = true + return r + case *adt.Bottom: + return r + default: + // NOTE: as the parser already enforces correct use of ?, this + // can only happen if a user builds an incorrect AST + // programmatically. We do not generate errors for invalid + // references as this just leads to distracting additional error + // messages. + return c.errf(n, "optional marker (?) can only be used on references") + } + default: + return c.errf(n, "unsupported postfix operator %s", n.Op) + } + default: return c.errf(n, "%s values not allowed in this position", ast.Name(n)) } } -func (c *compiler) assertConcreteIsPossible(src ast.Node, op adt.Op, x adt.Expr) bool { +// listEllipsis reports the list type and remaining elements of a list. If we +// ever relax the usage of ellipsis, this function will likely change. Using +// this function will ensure keeping correct behavior or causing a compiler failure. +func listEllipsis(n *ast.ListLit) (elts []ast.Expr, e *ast.Ellipsis) { + elts = n.Elts + if n := len(elts); n > 0 { + var ok bool + if e, ok = elts[n-1].(*ast.Ellipsis); ok { + elts = elts[:n-1] + } + } + return elts, e +} + +func (c *compiler) assertConcreteIsPossible(src ast.Node, op adt.Op, x adt.Expr) { if !adt.AssertConcreteIsPossible(op, x) { str := astinternal.DebugStr(src) c.errf(src, "invalid operand %s ('%s' requires concrete value)", str, op) } - return false } func (c *compiler) addDisjunctionElem(d *adt.DisjunctionExpr, n ast.Expr, mark bool) { @@ -1156,7 +1439,14 @@ func parseString(c *compiler, node ast.Expr, q literal.QuoteInfo, s string) (n a return c.errf(node, "invalid string: %v", err) } if q.IsDouble() { - return &adt.String{Src: node, Str: str, RE: nil} + return &adt.String{Src: node, Str: str} } - return &adt.Bytes{Src: node, B: []byte(str), RE: nil} + return &adt.Bytes{Src: node, B: []byte(str)} +} + +// isStdlibPackage reports whether the given import +// path looks like an import path in the standard library. +func isStdlibPackage(pkgPath string) bool { + firstElem, _, _ := strings.Cut(pkgPath, "/") + return !strings.Contains(firstElem, ".") } diff --git a/vendor/cuelang.org/go/internal/core/compile/label.go b/vendor/cuelang.org/go/internal/core/compile/label.go index 2d0daf5732..efe57350b7 100644 --- a/vendor/cuelang.org/go/internal/core/compile/label.go +++ b/vendor/cuelang.org/go/internal/core/compile/label.go @@ -119,7 +119,7 @@ func (l *fieldLabel) labelString() string { case *ast.BasicLit: if x.Kind == token.STRING { s, err := literal.Unquote(x.Value) - if err == nil && ast.IsValidIdent(s) { + if err == nil && !ast.StringLabelNeedsQuoting(s) { return s } } @@ -148,3 +148,10 @@ func (l *letScope) labelString() string { // TODO: include more info in square brackets. return "let[]" } + +type tryScope ast.TryClause + +func (t *tryScope) labelString() string { + // TODO: include more info in square brackets. + return "try[]" +} diff --git a/vendor/cuelang.org/go/internal/core/compile/predeclared.go b/vendor/cuelang.org/go/internal/core/compile/predeclared.go index fb23ede019..eaff9daf7c 100644 --- a/vendor/cuelang.org/go/internal/core/compile/predeclared.go +++ b/vendor/cuelang.org/go/internal/core/compile/predeclared.go @@ -46,6 +46,10 @@ func predeclared(n *ast.Ident) adt.Expr { return lenBuiltin case "close", "__close": return closeBuiltin + case "__closeAll": + return closeAllBuiltin + case "__reclose": + return recloseBuiltin case "matchIf", "__matchIf": return matchIfBuiltin case "matchN", "__matchN": @@ -63,6 +67,10 @@ func predeclared(n *ast.Ident) adt.Expr { case "rem", "__rem": return remBuiltin + case "self", "__self": + // UpCount of 1 gets resolved to relNode(1) + return &adt.ValueReference{Src: n, UpCount: 1} + case "__no_sharing": return adt.NoShareSentinel diff --git a/vendor/cuelang.org/go/internal/core/compile/validator.go b/vendor/cuelang.org/go/internal/core/compile/validator.go index 012847e04a..8a18bdbc40 100644 --- a/vendor/cuelang.org/go/internal/core/compile/validator.go +++ b/vendor/cuelang.org/go/internal/core/compile/validator.go @@ -29,7 +29,7 @@ var matchNBuiltin = &adt.Builtin{ Params: []adt.Param{topParam, intParam, listParam}, // varargs Result: adt.BoolKind, NonConcrete: true, - Func: func(call *adt.CallContext) adt.Expr { + Func: func(call adt.CallContext) adt.Expr { c := call.OpContext() args := call.Args() @@ -39,15 +39,12 @@ var matchNBuiltin = &adt.Builtin{ self := finalizeSelf(c, args[0]) if err := bottom(c, self); err != nil { - return &adt.Bool{B: false} + return adt.StaticBoolFalse } var errs []*adt.Bottom - - constraints := c.Elems(args[2]) - var count, possibleCount int64 - for _, check := range constraints { + for check := range c.Elems(args[2]) { v := adt.Unify(c, self, check) if err := adt.Validate(c, v, finalCfg); err == nil { // TODO: is it always true that the lack of an error signifies @@ -77,7 +74,7 @@ var matchNBuiltin = &adt.Builtin{ } return b } - return &adt.Bool{B: true} + return adt.StaticBoolTrue }, } @@ -91,7 +88,7 @@ var matchIfBuiltin = &adt.Builtin{ Params: []adt.Param{topParam, topParam, topParam, topParam}, Result: adt.BoolKind, NonConcrete: true, - Func: func(call *adt.CallContext) adt.Expr { + Func: func(call adt.CallContext) adt.Expr { c := call.OpContext() args := call.Args() @@ -101,7 +98,7 @@ var matchIfBuiltin = &adt.Builtin{ self := finalizeSelf(c, args[0]) if err := bottom(c, self); err != nil { - return &adt.Bool{B: false} + return adt.StaticBoolFalse } ifSchema, thenSchema, elseSchema := args[1], args[2], args[3] v := adt.Unify(c, self, ifSchema) @@ -114,7 +111,7 @@ var matchIfBuiltin = &adt.Builtin{ v = adt.Unify(c, self, chosenSchema) err := adt.Validate(c, v, finalCfg) if err == nil { - return &adt.Bool{B: true} + return adt.StaticBoolTrue } // TODO should we also include in the error something about the fact that // the if condition passed or failed? @@ -122,7 +119,8 @@ var matchIfBuiltin = &adt.Builtin{ }, } -var finalCfg = &adt.ValidateConfig{Final: true} +// Explicitly disallow incomplete errors. +var finalCfg = &adt.ValidateConfig{ReportIncomplete: true, Final: true} // finalizeSelf ensures a value is fully evaluated and then strips it of any // of its validators or default values. diff --git a/vendor/cuelang.org/go/internal/core/convert/go.go b/vendor/cuelang.org/go/internal/core/convert/go.go index 72e8c36f90..1904adfb1e 100644 --- a/vendor/cuelang.org/go/internal/core/convert/go.go +++ b/vendor/cuelang.org/go/internal/core/convert/go.go @@ -19,15 +19,14 @@ import ( "encoding" "encoding/json" "fmt" - "math" "math/big" "reflect" "slices" "strconv" "strings" + "unicode/utf8" "github.com/cockroachdb/apd/v3" - "golang.org/x/text/encoding/unicode" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/ast/astutil" @@ -45,56 +44,46 @@ import ( // The code in this file is a prototype implementation and is far from // optimized. -func GoValueToValue(ctx *adt.OpContext, x interface{}, nilIsTop bool) adt.Value { - v := GoValueToExpr(ctx, nilIsTop, x) - // TODO: return Value - return toValue(v) -} +// TODO(mvdan): get rid of the uses of %T below; have the recursive methods return *Bottom +// TODO(mvdan): swap order of parameters in the recursive methods to match the top-level API order -func GoTypeToExpr(ctx *adt.OpContext, x interface{}) (adt.Expr, errors.Error) { - v := newGoConverter(ctx).convertGoType(reflect.TypeOf(x)) - if err := ctx.Err(); err != nil { - return v, err.Err +// FromGoValue converts a Go value to an internal CUE value. +// The returned CUE value is finalized and concrete. +func FromGoValue(ctx *adt.OpContext, x any, nilIsTop bool) adt.Value { + val := reflect.ValueOf(x) + v := fromGoValue(ctx, nilIsTop, val) + if v == nil { + return ctx.AddErrf("unsupported Go type (%T)", x) } - return v, nil -} - -type goConverter struct { - ctx *adt.OpContext - tfile *token.File - offset int + // TODO: return Value + return v } -func newGoConverter(ctx *adt.OpContext) *goConverter { - return &goConverter{ - ctx: ctx, - // Code in *[token.File] uses size+1 in a few places. So do - // MaxInt-2 to be sure to avoid wrap-around issues. - tfile: token.NewFile(pkgID(), -1, math.MaxInt-2), - offset: 1, +// FromGoType converts a Go type to an internal CUE expression. +func FromGoType(ctx *adt.OpContext, x any) (adt.Expr, errors.Error) { + // TODO: if this value will always be unified with a concrete type in Go, + // then many of the fields may be omitted. + // TODO: this can be much more efficient. + // TODO: synchronize + typ := reflect.TypeOf(x) + if _, t, ok := ctx.LoadType(typ); ok { + return t, nil } -} - -func (c *goConverter) setNextPos(n ast.Node) ast.Node { - ast.SetPos(n, c.tfile.Pos(c.offset, 0)) - c.offset++ - return n -} - -func toValue(e adt.Expr) adt.Value { - if v, ok := e.(adt.Value); ok { - return v + _, expr := fromGoType(ctx, true, typ) + if expr == nil { + expr = ctx.AddErrf("unsupported Go type (%v)", typ) + } + if err := ctx.Err(); err != nil { + // TODO: return an error as the expr itself, like [FromGoValue]? + return expr, err.Err } - obj := &adt.Vertex{} - obj.AddConjunct(adt.MakeRootConjunct(nil, e)) - return obj + return expr, nil } func compileExpr(ctx *adt.OpContext, expr ast.Expr) adt.Value { c, err := compile.Expr(nil, ctx, pkgID(), expr) if err != nil { - return &adt.Bottom{ - Err: errors.Promote(err, "compile")} + return &adt.Bottom{Err: errors.Promote(err, "compile")} } return adt.Resolve(ctx, c) } @@ -108,8 +97,7 @@ func parseTag(ctx *adt.OpContext, field, tag string) ast.Expr { expr, err := parser.ParseExpr("", tag) if err != nil { err := errors.Promote(err, "parser") - ctx.AddErr(errors.Wrapf(err, ctx.Pos(), - "invalid tag %q for field %q", tag, field)) + ctx.AddErr(errors.Wrapf(err, ctx.Pos(), "invalid tag %q for field %q", tag, field)) return &ast.BadExpr{} } return expr @@ -151,7 +139,7 @@ func getName(f *reflect.StructField) string { func isOptional(f *reflect.StructField) bool { isOptional := false switch f.Type.Kind() { - case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Interface, reflect.Slice: + case reflect.Pointer, reflect.Map, reflect.Chan, reflect.Interface, reflect.Slice: // Note: it may be confusing to distinguish between an empty slice and // a nil slice. However, it is also surprising to not be able to specify // a default value for a slice. So for now we will allow it. @@ -161,7 +149,7 @@ func isOptional(f *reflect.StructField) bool { // TODO: only if first field is not empty. _, opt := splitTag(tag) isOptional = false - for _, f := range strings.Split(opt, ",") { + for f := range strings.SplitSeq(opt, ",") { switch f { case "opt": isOptional = true @@ -171,10 +159,8 @@ func isOptional(f *reflect.StructField) bool { } } else if tag, ok = f.Tag.Lookup("json"); ok { isOptional = false - for _, f := range strings.Split(tag, ",")[1:] { - if f == "omitempty" { - return true - } + if slices.Contains(strings.Split(tag, ",")[1:], "omitempty") { + return true } } return isOptional @@ -184,12 +170,11 @@ func isOptional(f *reflect.StructField) bool { func isOmitEmpty(f *reflect.StructField) bool { isOmitEmpty := false switch f.Type.Kind() { - case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Interface, reflect.Slice: + case reflect.Pointer, reflect.Map, reflect.Chan, reflect.Interface, reflect.Slice: // Note: it may be confusing to distinguish between an empty slice and // a nil slice. However, it is also surprising to not be able to specify // a default value for a slice. So for now we will allow it. isOmitEmpty = true - default: // TODO: we can also infer omit empty if a type cannot be nil if there // is a constraint that unconditionally disallows the zero value. @@ -197,49 +182,36 @@ func isOmitEmpty(f *reflect.StructField) bool { tag, ok := f.Tag.Lookup("json") if ok { isOmitEmpty = false - for _, f := range strings.Split(tag, ",")[1:] { - if f == "omitempty" { - return true - } + if slices.Contains(strings.Split(tag, ",")[1:], "omitempty") { + return true } } return isOmitEmpty } -func GoValueToExpr(ctx *adt.OpContext, nilIsTop bool, x interface{}) adt.Expr { - e := newGoConverter(ctx).convertRec(nilIsTop, x) - if e == nil { - return ctx.AddErrf("unsupported Go type (%T)", x) - } - return e -} - func isNil(x reflect.Value) bool { - switch x.Kind() { - // Only check for supported types; ignore func and chan. - case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Interface: + switch x.Kind() { // Only check for supported types; ignore func and chan. + case reflect.Pointer, reflect.Map, reflect.Slice, reflect.Interface: return x.IsNil() } return false } -func (c *goConverter) convertRec(nilIsTop bool, x interface{}) (result adt.Value) { - if t := (&types.Value{}); types.CastValue(t, x) { - // TODO: panic if not the same runtime. - return t.V - } - src := c.ctx.Source() - - switch v := x.(type) { - case nil: +func fromGoValue(ctx *adt.OpContext, nilIsTop bool, val reflect.Value) (result adt.Value) { + src := ctx.Source() + if !val.IsValid() { // untyped nil, or dereferencing a nil pointer/interface if nilIsTop { ident, _ := src.(*ast.Ident) return &adt.Top{Src: ident} } return &adt.Null{Src: src} - - case *ast.File: - x, err := compile.Files(nil, c.ctx, pkgID(), v) + } + env := ctx.Env(0) + typ := val.Type() + switch typ { + case astFile: + v, _ := reflect.TypeAssert[*ast.File](val) + x, err := compile.Files(nil, ctx, pkgID(), v) if err != nil { return &adt.Bottom{Err: errors.Promote(err, "compile")} } @@ -248,47 +220,43 @@ func (c *goConverter) convertRec(nilIsTop bool, x interface{}) (result adt.Value } return x - case ast.Expr: - return compileExpr(c.ctx, v) - - case *big.Int: - v2 := new(apd.BigInt).SetMathBigInt(v) + case bigInt: + v, _ := reflect.TypeAssert[*big.Int](val) return &adt.Num{ Src: src, K: adt.IntKind, - X: *apd.NewWithBigInt(v2, 0), + X: fromGoBigInt(v), } - case *big.Rat: + case bigRat: + v, _ := reflect.TypeAssert[*big.Rat](val) // should we represent this as a binary operation? n := &adt.Num{Src: src, K: adt.IntKind} - _, err := internal.BaseContext.Quo(&n.X, - apd.NewWithBigInt(new(apd.BigInt).SetMathBigInt(v.Num()), 0), - apd.NewWithBigInt(new(apd.BigInt).SetMathBigInt(v.Denom()), 0), - ) - if err != nil { - return c.ctx.AddErrf("could not convert *big.Rat: %v", err) + num := fromGoBigInt(v.Num()) + denom := fromGoBigInt(v.Denom()) + if _, err := internal.BaseContext.Quo(&n.X, &num, &denom); err != nil { + return ctx.AddErrf("could not convert *big.Rat: %v", err) } if !v.IsInt() { n.K = adt.FloatKind } return n - case *big.Float: + case bigFloat: + v, _ := reflect.TypeAssert[*big.Float](val) n := &adt.Num{Src: src, K: adt.FloatKind} - _, _, err := n.X.SetString(v.String()) - if err != nil { - return c.ctx.AddErr(errors.Promote(err, "invalid float")) + // NOTE: apd.Decimal has an API to set from a big.Int, but not from a big.Float. + if _, _, err := n.X.SetString(v.String()); err != nil { + return ctx.AddErr(errors.Promote(err, "invalid float")) } return n - case *apd.Decimal: - // TODO: should we allow an "int" bit to be set here? It is a bit - // tricky, as we would also need to pass down the result of rounding. - // So more likely an API must return explicitly whether a value is - // a float or an int after all. - // The code to autodetect whether something is an integer can be done - // with this: + case apdDecimal: + v, _ := reflect.TypeAssert[*apd.Decimal](val) + // TODO: should we allow an "int" bit to be set here? + // It is a bit tricky, as we would also need to pass down the result of rounding. + // So more likely an API must return explicitly whether a value is a float or an int after all. + // The code to autodetect whether something is an integer can be done with this: kind := adt.FloatKind var d apd.Decimal res, _ := internal.BaseContext.RoundToIntegralExact(&d, v) @@ -299,352 +267,314 @@ func (c *goConverter) convertRec(nilIsTop bool, x interface{}) (result adt.Value n := &adt.Num{Src: src, K: kind} n.X = *v return n + } - case json.Marshaler: + if _, ok := implements(typ, typesInterface); ok { + v, _ := reflect.TypeAssert[types.Interface](val) + t := v.Core() + // TODO: panic if not the same runtime. + return t.V + } + if _, ok := implements(typ, astExpr); ok { + v, _ := reflect.TypeAssert[ast.Expr](val) + return compileExpr(ctx, v) + } + if _, ok := implements(typ, jsonMarshaler); ok { + v, _ := reflect.TypeAssert[json.Marshaler](val) b, err := v.MarshalJSON() if err != nil { - return c.ctx.AddErr(errors.Promote(err, "json.Marshaler")) + return ctx.AddErr(errors.Promote(err, "json.Marshaler")) } expr, err := parser.ParseExpr("json", b) if err != nil { panic(err) // cannot happen } - return compileExpr(c.ctx, expr) - - case encoding.TextMarshaler: + return compileExpr(ctx, expr) + } + if _, ok := implements(typ, textMarshaler); ok { + v, _ := reflect.TypeAssert[encoding.TextMarshaler](val) b, err := v.MarshalText() if err != nil { - return c.ctx.AddErr(errors.Promote(err, "encoding.TextMarshaler")) + return ctx.AddErr(errors.Promote(err, "encoding.TextMarshaler")) } - s, _ := unicode.UTF8.NewEncoder().String(string(b)) - return &adt.String{Src: src, Str: s} - - case error: - var errs errors.Error - switch x := v.(type) { - case errors.Error: - errs = x - default: - errs = c.ctx.Newf("%s", x.Error()) + str := strings.ToValidUTF8(string(b), string(utf8.RuneError)) + return &adt.String{Src: src, Str: str} + } + if _, ok := implements(typ, goError); ok { + v, _ := reflect.TypeAssert[error](val) + errs, ok := v.(errors.Error) + if !ok { + errs = ctx.Newf("%s", v.Error()) } return &adt.Bottom{Err: errs} - case bool: - return &adt.Bool{Src: src, B: v} - case string: - s, _ := unicode.UTF8.NewEncoder().String(v) - return &adt.String{Src: src, Str: s} - case []byte: - return &adt.Bytes{Src: src, B: v} - case int: - return c.toInt(int64(v)) - case int8: - return c.toInt(int64(v)) - case int16: - return c.toInt(int64(v)) - case int32: - return c.toInt(int64(v)) - case int64: - return c.toInt(v) - case uint: - return c.toUint(uint64(v)) - case uint8: - return c.toUint(uint64(v)) - case uint16: - return c.toUint(uint64(v)) - case uint32: - return c.toUint(uint64(v)) - case uint64: - return c.toUint(v) - case uintptr: - return c.toUint(uint64(v)) - case float64: + } + + switch typ.Kind() { + case reflect.Bool: + return ctx.NewBool(val.Bool()) + + case reflect.String: + str := strings.ToValidUTF8(val.String(), string(utf8.RuneError)) + // TODO: here and above: allow to fail on invalid strings. + // if !utf8.ValidString(str) { + // return ctx.AddErrf("cannot convert result to string: invalid UTF-8") + // } + return &adt.String{Src: src, Str: str} + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := &adt.Num{Src: src, K: adt.IntKind} + n.X = *apd.New(val.Int(), 0) + return n + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n := &adt.Num{Src: src, K: adt.IntKind} + n.X.Coeff.SetUint64(val.Uint()) + return n + + case reflect.Float64: n := &adt.Num{Src: src, K: adt.FloatKind} - _, err := n.X.SetFloat64(v) - if err != nil { - return c.ctx.AddErr(errors.Promote(err, "invalid float")) + if _, err := n.X.SetFloat64(val.Float()); err != nil { + return ctx.AddErr(errors.Promote(err, "invalid float")) } return n - case float32: + + case reflect.Float32: n := &adt.Num{Src: src, K: adt.FloatKind} - // apd.Decimal has a SetFloat64 method, but no SetFloat32. - _, _, err := n.X.SetString(strconv.FormatFloat(float64(v), 'E', -1, 32)) - if err != nil { - return c.ctx.AddErr(errors.Promote(err, "invalid float")) + // NOTE: apd.Decimal has a SetFloat64 method, but no SetFloat32. + if _, _, err := n.X.SetString(strconv.FormatFloat(val.Float(), 'E', -1, 32)); err != nil { + return ctx.AddErr(errors.Promote(err, "invalid float")) } return n - case reflect.Value: - if v.CanInterface() { - return c.convertRec(nilIsTop, v.Interface()) + case reflect.Pointer, reflect.Interface: + return fromGoValue(ctx, nilIsTop, val.Elem()) + + case reflect.Struct: + // Grow the slices to match the number of fields in the Go struct, + // avoiding repeated slice growth in append calls below. + numFields := typ.NumField() + sl := &adt.StructLit{ + Src: src, + Decls: make([]adt.Decl, 0, numFields), + } + sl.Init(ctx) + v := &adt.Vertex{ + Arcs: make([]*adt.Vertex, 0, numFields), } - default: - value := reflect.ValueOf(v) - switch value.Kind() { - case reflect.Bool: - return &adt.Bool{Src: src, B: value.Bool()} - - case reflect.String: - str := value.String() - str, _ = unicode.UTF8.NewEncoder().String(str) - // TODO: here and above: allow to fail on invalid strings. - // if !utf8.ValidString(str) { - // return ctx.AddErrf("cannot convert result to string: invalid UTF-8") - // } - return &adt.String{Src: src, Str: str} - - case reflect.Int, reflect.Int8, reflect.Int16, - reflect.Int32, reflect.Int64: - return c.toInt(value.Int()) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return c.toUint(value.Uint()) - - case reflect.Float32, reflect.Float64: - return c.convertRec(nilIsTop, value.Float()) - - case reflect.Ptr: - if value.IsNil() { - if nilIsTop { - ident, _ := src.(*ast.Ident) - return &adt.Top{Src: ident} - } - return &adt.Null{Src: src} + for i := range typ.NumField() { + sf := typ.Field(i) + if sf.PkgPath != "" { + continue } - return c.convertRec(nilIsTop, value.Elem().Interface()) - - case reflect.Struct: - sl := &adt.StructLit{Src: c.setNextPos(ast.NewStruct())} - sl.Init(c.ctx) - v := &adt.Vertex{} - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - sf := t.Field(i) - if sf.PkgPath != "" { - continue - } - val := value.Field(i) - if !nilIsTop && isNil(val) { - continue - } - if tag, _ := sf.Tag.Lookup("json"); tag == "-" { - continue - } - if isOmitEmpty(&sf) && val.IsZero() { - continue - } - sub := c.convertRec(nilIsTop, val.Interface()) - if sub == nil { - // mimic behavior of encoding/json: skip fields of unsupported types - continue - } - if _, ok := sub.(*adt.Bottom); ok { - return sub - } - - // leave errors like we do during normal evaluation or do we - // want to return the error? - name := getName(&sf) - if name == "-" { - continue - } - if sf.Anonymous && name == "" { - arc, ok := sub.(*adt.Vertex) - if ok { - v.Arcs = append(v.Arcs, arc.Arcs...) - } - continue - } - - f := c.ctx.StringLabel(name) - c.createField(f, sub, sl) - v.Arcs = append(v.Arcs, c.ensureArcVertex(sub, f)) + val := val.Field(i) + if !nilIsTop && isNil(val) { + continue } - - env := c.ctx.Env(0) - if env == nil { - env = &adt.Environment{} + if tag, _ := sf.Tag.Lookup("json"); tag == "-" { + continue } - // There is no closedness or cycle info for Go structs, so we - // pass an empty CloseInfo. - v.AddStruct(sl, env, adt.CloseInfo{}) - v.SetValue(c.ctx, &adt.StructMarker{}) - v.ForceDone() - - return v - - case reflect.Map: - obj := &adt.StructLit{Src: c.setNextPos(ast.NewStruct())} - obj.Init(c.ctx) - v := &adt.Vertex{} - - t := value.Type() - switch key := t.Key(); key.Kind() { - default: - if !key.Implements(textMarshaler) { - return c.ctx.AddErrf("unsupported Go type for map key (%v)", key) - } - fallthrough - case reflect.String, - reflect.Int, reflect.Int8, reflect.Int16, - reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64, reflect.Uintptr: - - iter := value.MapRange() - for iter.Next() { - k := iter.Key() - val := iter.Value() - // if isNil(val) { - // continue - // } - - sub := c.convertRec(nilIsTop, val.Interface()) - // mimic behavior of encoding/json: report error of - // unsupported type. - if sub == nil { - return c.ctx.AddErrf("unsupported Go type (%T)", val.Interface()) - } - if isBottom(sub) { - return sub - } - - s := fmt.Sprint(k) - f := c.ctx.StringLabel(s) - v.Arcs = append(v.Arcs, c.ensureArcVertex(sub, f)) - } - slices.SortFunc(v.Arcs, func(a, b *adt.Vertex) int { - return strings.Compare(a.Label.IdentString(c.ctx), b.Label.IdentString(c.ctx)) - }) - // Create all the adt/ast fields after sorting the arcs - for _, arc := range v.Arcs { - c.createField(arc.Label, arc, obj) - } + if isOmitEmpty(&sf) && val.IsZero() { + continue + } + sub := fromGoValue(ctx, nilIsTop, val) + if sub == nil { + // mimic behavior of encoding/json: skip fields of unsupported types + continue + } + if _, ok := sub.(*adt.Bottom); ok { + return sub } - env := c.ctx.Env(0) - if env == nil { - env = &adt.Environment{} + // leave errors like we do during normal evaluation or do we want to return the error? + name := getName(&sf) + if name == "-" { + continue + } + if sf.Anonymous && name == "" { + arc, ok := sub.(*adt.Vertex) + if ok { + v.Arcs = append(v.Arcs, arc.Arcs...) + } + continue } - v.AddStruct(obj, env, adt.CloseInfo{}) - v.SetValue(c.ctx, &adt.StructMarker{}) - v.ForceDone() - return v + f := ctx.StringLabel(name) + sl.Decls = append(sl.Decls, &adt.Field{Label: f, Value: sub}) + v.Arcs = append(v.Arcs, ensureArcVertex(ctx, env, sub, f)) + } - case reflect.Slice, reflect.Array: - list := &adt.ListLit{Src: ast.NewList()} - c.setNextPos(list.Src) + // There is no closedness or cycle info for Go structs, so we pass an empty CloseInfo. + v.AddStruct(sl) + v.SetValue(ctx, &adt.StructMarker{}) + v.ForceDone() + return v - v := &adt.Vertex{} + case reflect.Map: + obj := &adt.StructLit{Src: src} + obj.Init(ctx) + v := &adt.Vertex{} - for i := 0; i < value.Len(); i++ { - val := value.Index(i) - x := c.convertRec(nilIsTop, val.Interface()) - if x == nil { - return c.ctx.AddErrf("unsupported Go type (%T)", - val.Interface()) + switch key := typ.Key(); key.Kind() { + default: + if !key.Implements(textMarshaler) { + return ctx.AddErrf("unsupported Go type for map key (%v)", key) + } + fallthrough + case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + // Note that we don't use [reflect.Value.Seq2]; see the note below for [reflect.Array]. + iter := val.MapRange() + for iter.Next() { + k, val := iter.Key(), iter.Value() + sub := fromGoValue(ctx, nilIsTop, val) + // mimic behavior of encoding/json: report error of unsupported type. + if sub == nil { + return ctx.AddErrf("unsupported Go type (%T)", val.Interface()) } - if isBottom(x) { - return x + if isBottom(sub) { + return sub } - list.Elems = append(list.Elems, x) - f := adt.MakeIntLabel(adt.IntLabel, int64(i)) - v.Arcs = append(v.Arcs, c.ensureArcVertex(x, f)) - } - env := c.ctx.Env(0) - if env == nil { - env = &adt.Environment{} + s := fmt.Sprint(k) + f := ctx.StringLabel(s) + v.Arcs = append(v.Arcs, ensureArcVertex(ctx, env, sub, f)) + } + slices.SortFunc(v.Arcs, func(a, b *adt.Vertex) int { + return strings.Compare(a.Label.IdentString(ctx), b.Label.IdentString(ctx)) + }) + // Create all the adt/ast fields after sorting the arcs + for _, arc := range v.Arcs { + obj.Decls = append(obj.Decls, &adt.Field{Label: arc.Label, Value: arc}) } - v.AddConjunct(adt.MakeRootConjunct(env, list)) - v.SetValue(c.ctx, &adt.ListMarker{}) - v.ForceDone() + } - return v + v.AddStruct(obj) + v.SetValue(ctx, structMarker) + v.ForceDone() + return v + + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { // []byte + return &adt.Bytes{Src: src, B: val.Bytes()} + } + fallthrough + case reflect.Array: + // Grow the slices to match the number of fields in the Go struct, + // avoiding repeated slice growth in append calls below. + numElems := val.Len() + src, _ := src.(*ast.ListLit) + list := &adt.ListLit{ + Src: src, + Elems: make([]adt.Elem, 0, numElems), + } + v := &adt.Vertex{ + Arcs: make([]*adt.Vertex, 0, numElems), } - } - return nil -} -func (c *goConverter) ensureArcVertex(x adt.Value, l adt.Feature) *adt.Vertex { - env := c.ctx.Env(0) - if env == nil { - env = &adt.Environment{} - } - arc, ok := x.(*adt.Vertex) - if ok { - a := *arc - arc = &a - arc.Label = l - } else { - arc = &adt.Vertex{Label: l} - arc.AddConjunct(adt.MakeRootConjunct(env, x)) - arc.SetValue(c.ctx, x) - arc.ForceDone() - } - return arc -} + // Note that we don't use [reflect.Value.Seq2], + // as it allocates more per iteration, and we don't need the index value. + // We can't use [reflect.Value.Seq] either, as that's just the indices. + // See the upstream bug report: https://go.dev/issue/76357 + for i := range numElems { + val := val.Index(i) + x := fromGoValue(ctx, nilIsTop, val) + if x == nil { + return ctx.AddErrf("unsupported Go type (%T)", val.Interface()) + } + if isBottom(x) { + return x + } + list.Elems = append(list.Elems, x) + f := adt.MakeIntLabel(adt.IntLabel, int64(i)) + v.Arcs = append(v.Arcs, ensureArcVertex(ctx, env, x, f)) + } -func (c *goConverter) createField(l adt.Feature, sub adt.Value, sl *adt.StructLit) { - src := sl.Src.(*ast.StructLit) - astField := &ast.Field{ - Label: ast.NewIdent(l.IdentString(c.ctx)), - Constraint: token.ILLEGAL, - } - if expr, ok := sub.Source().(ast.Expr); ok { - astField.Value = expr + v.AddConjunct(adt.MakeRootConjunct(env, list)) + v.SetValue(ctx, listMarker) + v.ForceDone() + return v } - c.setNextPos(astField.Label) - src.Elts = append(src.Elts, astField) - field := &adt.Field{Label: l, Value: sub, Src: astField} - sl.Decls = append(sl.Decls, field) + return nil } -func (c *goConverter) toInt(x int64) adt.Value { - n := &adt.Num{Src: c.ctx.Source(), K: adt.IntKind} - n.X = *apd.New(x, 0) - return n -} +// These are effectively singletons, so avoid allocating new ones. +var ( + structMarker = &adt.StructMarker{} + listMarker = &adt.ListMarker{} +) -func (c *goConverter) toUint(x uint64) adt.Value { - n := &adt.Num{Src: c.ctx.Source(), K: adt.IntKind} - n.X.Coeff.SetUint64(x) - return n +func fromGoBigInt(x *big.Int) apd.Decimal { + // Integers fitting in 64 bits is rather common. + // In that case, avoid the conversion to [apd.BigInt], which also allocates. + if x.IsInt64() { + var dec apd.Decimal + dec.SetInt64(x.Int64()) + return dec + } + return *apd.NewWithBigInt(new(apd.BigInt).SetMathBigInt(x), 0) } -func (c *goConverter) convertGoType(t reflect.Type) adt.Expr { - // TODO: this can be much more efficient. - // TODO: synchronize - return c.goTypeToValue(true, t) +func ensureArcVertex(ctx *adt.OpContext, env *adt.Environment, x adt.Value, l adt.Feature) *adt.Vertex { + if arc, ok := x.(*adt.Vertex); ok { + if arc.Label == l { + // We already have a vertex with the correct label; do not make a copy. + return arc + } + // We already have a vertex; copy it and adjust its label. + a := *arc + a.Label = l + return &a + } + // We know this is one vertex with exactly one conjunct, + // so allocate both together to reduce the runtime overhead. + var alloc struct { + arc adt.Vertex + conjs [1]adt.Conjunct + } + arc := &alloc.arc + arc.Conjuncts = alloc.conjs[:0] + + arc.Label = l + arc.AddConjunct(adt.MakeRootConjunct(env, x)) + arc.SetValue(ctx, x) + arc.ForceDone() + return arc } var ( - jsonMarshaler = reflect.TypeFor[json.Marshaler]() - textMarshaler = reflect.TypeFor[encoding.TextMarshaler]() - topSentinel = ast.NewIdent("_") + goError = reflect.TypeFor[error]() + typesInterface = reflect.TypeFor[types.Interface]() + jsonMarshaler = reflect.TypeFor[json.Marshaler]() + textMarshaler = reflect.TypeFor[encoding.TextMarshaler]() + astExpr = reflect.TypeFor[ast.Expr]() + astFile = reflect.TypeFor[*ast.File]() + bigInt = reflect.TypeFor[*big.Int]() + bigRat = reflect.TypeFor[*big.Rat]() + bigFloat = reflect.TypeFor[*big.Float]() + apdDecimal = reflect.TypeFor[*apd.Decimal]() + topSentinel = ast.NewIdent("_") ) -// goTypeToValue converts a Go Type to a value. -// -// TODO: if this value will always be unified with a concrete type in Go, then -// many of the fields may be omitted. -func (c *goConverter) goTypeToValue(allowNullDefault bool, t reflect.Type) adt.Expr { - if _, t, ok := c.ctx.LoadType(t); ok { - return t - } - - _, v := c.goTypeToValueRec(allowNullDefault, t) - if v == nil { - return c.ctx.AddErrf("unsupported Go type (%v)", t) +// implements is like t.Implements(ifaceType) but checks whether +// either t or reflect.PointerTo(t) implements the interface. +// It also returns false for the case where t is an interface type. +func implements(t, ifaceType reflect.Type) (needAddr, ok bool) { + switch { + case t.Kind() == reflect.Interface: + return false, false + case t.Implements(ifaceType): + return false, true + case reflect.PointerTo(t).Implements(ifaceType): + return true, true + default: + return false, false } - return v } -func (c *goConverter) goTypeToValueRec(allowNullDefault bool, t reflect.Type) (e ast.Expr, expr adt.Expr) { - if src, t, ok := c.ctx.LoadType(t); ok { +func fromGoType(ctx *adt.OpContext, allowNullDefault bool, t reflect.Type) (e ast.Expr, expr adt.Expr) { + if src, t, ok := ctx.LoadType(t); ok { return src, t } @@ -672,12 +602,12 @@ func (c *goConverter) goTypeToValueRec(allowNullDefault bool, t reflect.Type) (e } switch k := t.Kind(); k { - case reflect.Ptr: + case reflect.Pointer: elem := t.Elem() - for elem.Kind() == reflect.Ptr { + for elem.Kind() == reflect.Pointer { elem = elem.Elem() } - e, _ = c.goTypeToValueRec(false, elem) + e, _ = fromGoType(ctx, false, elem) if allowNullDefault { e = wrapOrNull(e) } @@ -716,15 +646,15 @@ func (c *goConverter) goTypeToValueRec(allowNullDefault bool, t reflect.Type) (e // TODO: dirty trick: set this to a temporary Vertex and then update the // arcs and conjuncts of this vertex below. This will allow circular // references. Maybe have a special kind of "hardlink" reference. - c.ctx.StoreType(t, obj, nil) + ctx.StoreType(t, obj, nil) - for i := 0; i < t.NumField(); i++ { + for i := range t.NumField() { f := t.Field(i) if f.PkgPath != "" { continue } _, ok := f.Tag.Lookup("cue") - elem, _ := c.goTypeToValueRec(!ok, f.Type) + elem, _ := fromGoType(ctx, !ok, f.Type) if isBad(elem) { continue // Ignore fields for unsupported types } @@ -737,7 +667,7 @@ func (c *goConverter) goTypeToValueRec(allowNullDefault bool, t reflect.Type) (e } if tag, ok := f.Tag.Lookup("cue"); ok { - v := parseTag(c.ctx, name, tag) + v := parseTag(ctx, name, tag) if isBad(v) { return v, nil } @@ -747,11 +677,10 @@ func (c *goConverter) goTypeToValueRec(allowNullDefault bool, t reflect.Type) (e // valid CUE name), make it a string and create a map to a new // name for references. - // The GO JSON decoder always allows a value to be undefined. + // The Go JSON decoder always allows a value to be undefined. d := &ast.Field{Label: ast.NewIdent(name), Value: elem} - c.setNextPos(d) if isOptional(&f) { - internal.SetConstraint(d, token.OPTION) + d.Constraint = token.OPTION } obj.Elts = append(obj.Elts, d) } @@ -765,18 +694,17 @@ func (c *goConverter) goTypeToValueRec(allowNullDefault bool, t reflect.Type) (e if t.Elem().Kind() == reflect.Uint8 { e = ast.NewIdent("__bytes") } else { - elem, _ := c.goTypeToValueRec(allowNullDefault, t.Elem()) + elem, _ := fromGoType(ctx, allowNullDefault, t.Elem()) if elem == nil { - b := c.ctx.AddErrf("unsupported Go type (%v)", t.Elem()) - return &ast.BadExpr{}, b + return &ast.BadExpr{}, ctx.AddErrf("unsupported Go type (%v)", t.Elem()) } if t.Kind() == reflect.Array { e = ast.NewCall( ast.NewSel(&ast.Ident{ Name: "list", - Node: ast.NewImport(nil, "list")}, - "Repeat"), + Node: ast.NewImport(nil, "list"), + }, "Repeat"), ast.NewList(elem), ast.NewLit(token.INT, strconv.Itoa(t.Len()))) } else { @@ -793,14 +721,12 @@ func (c *goConverter) goTypeToValueRec(allowNullDefault bool, t reflect.Type) (e reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: default: - b := c.ctx.AddErrf("unsupported Go type for map key (%v)", key) - return &ast.BadExpr{}, b + return &ast.BadExpr{}, ctx.AddErrf("unsupported Go type for map key (%v)", key) } - v, x := c.goTypeToValueRec(allowNullDefault, t.Elem()) + v, x := fromGoType(ctx, allowNullDefault, t.Elem()) if v == nil { - b := c.ctx.AddErrf("unsupported Go type (%v)", t.Elem()) - return &ast.BadExpr{}, b + return &ast.BadExpr{}, ctx.AddErrf("unsupported Go type (%v)", t.Elem()) } if isBad(v) { return v, x @@ -817,21 +743,20 @@ func (c *goConverter) goTypeToValueRec(allowNullDefault bool, t reflect.Type) (e store: // TODO: store error if not nil? if e != nil { - c.setNextPos(e) f := &ast.File{Decls: []ast.Decl{&ast.EmbedDecl{Expr: e}}} astutil.Resolve(f, func(_ token.Pos, msg string, args ...interface{}) { - c.ctx.AddErrf(msg, args...) + ctx.AddErrf(msg, args...) }) var x adt.Expr - x2, err := compile.Expr(nil, c.ctx, pkgID(), e) + x2, err := compile.Expr(nil, ctx, pkgID(), e) if err != nil { b := &adt.Bottom{Err: err} - c.ctx.AddBottom(b) + ctx.AddBottom(b) x = b } else { x = x2.Expr() } - c.ctx.StoreType(t, e, x) + ctx.StoreType(t, e, x) return e, x } return e, nil diff --git a/vendor/cuelang.org/go/internal/core/debug/compact.go b/vendor/cuelang.org/go/internal/core/debug/compact.go index 6900c4ebac..8d9e18714b 100644 --- a/vendor/cuelang.org/go/internal/core/debug/compact.go +++ b/vendor/cuelang.org/go/internal/core/debug/compact.go @@ -27,32 +27,28 @@ import ( "cuelang.org/go/internal/core/adt" ) -type compactPrinter struct { - printer -} - -func (w *compactPrinter) string(s string) { - w.dst = append(w.dst, s...) -} - -func (w *compactPrinter) node(n adt.Node) { +func (w *printer) compactNode(n adt.Node) { switch x := n.(type) { case *adt.Vertex: if x.BaseValue == nil || (w.cfg.Raw && !x.IsData()) { i := 0 - x.VisitLeafConjuncts(func(c adt.Conjunct) bool { + for c := range x.LeafConjuncts() { if i > 0 { w.string(" & ") } i++ w.node(c.Elem()) - return true - }) + } return } switch v := x.BaseValue.(type) { case *adt.StructMarker: + if !w.pushVertex(x) { + return + } + defer w.popVertex() + w.string("{") for i, a := range x.Arcs { if i > 0 { @@ -80,6 +76,11 @@ func (w *compactPrinter) node(n adt.Node) { w.string("}") case *adt.ListMarker: + if !w.pushVertex(x) { + return + } + defer w.popVertex() + w.string("[") for i, a := range x.Arcs { if i > 0 { @@ -90,6 +91,8 @@ func (w *compactPrinter) node(n adt.Node) { w.string("]") case *adt.Vertex: + // Disjunction, structure shared, etc. + if v, ok := w.printShared(x); !ok { w.node(v) w.popVertex() @@ -162,7 +165,7 @@ func (w *compactPrinter) node(n adt.Node) { w.string(`_|_`) if x.Err != nil { w.string("(") - w.string(x.Err.Error()) + w.shortError(x.Err, false) w.string(")") } @@ -207,6 +210,9 @@ func (w *compactPrinter) node(n adt.Node) { case *adt.FieldReference: w.label(x.Label) + if x.Optional { + w.string("?") + } case *adt.ValueReference: w.label(x.Label) @@ -231,12 +237,18 @@ func (w *compactPrinter) node(n adt.Node) { w.node(x.X) w.string(".") w.label(x.Sel) + if x.Optional { + w.string("?") + } case *adt.IndexExpr: w.node(x.X) w.string("[") w.node(x.Index) w.string("]") + if x.Optional { + w.string("?") + } case *adt.SliceExpr: w.node(x.X) @@ -270,6 +282,10 @@ func (w *compactPrinter) node(n adt.Node) { w.node(x.Y) w.string(")") + case *adt.OpenExpr: + w.node(x.X) + w.string("...") + case *adt.CallExpr: w.node(x.Fun) w.string("(") @@ -345,6 +361,10 @@ func (w *compactPrinter) node(n adt.Node) { w.node(c) } w.node(adt.ToExpr(x.Value)) + if x.Fallback != nil { + w.string(" else ") + w.node(x.Fallback) + } case *adt.ForClause: w.string("for ") @@ -367,6 +387,17 @@ func (w *compactPrinter) node(n adt.Node) { w.node(x.Expr) w.string(" ") + case *adt.TryClause: + w.string("try ") + if x.Label != adt.InvalidLabel { + // Assignment form: try x = expr + w.ident(x.Label) + w.string(" = ") + w.node(x.Expr) + w.string(" ") + } + // Struct form has no Ident/Expr - body is in Comprehension.Value + case *adt.ValueClause: default: diff --git a/vendor/cuelang.org/go/internal/core/debug/debug.go b/vendor/cuelang.org/go/internal/core/debug/debug.go index 2ea586e13d..391061d285 100644 --- a/vendor/cuelang.org/go/internal/core/debug/debug.go +++ b/vendor/cuelang.org/go/internal/core/debug/debug.go @@ -21,6 +21,7 @@ package debug import ( "fmt" + "slices" "strconv" "strings" @@ -38,6 +39,11 @@ type Config struct { Cwd string Compact bool Raw bool + + // ExpandLetExpr causes the expression of let reference to be printed. + // Note that this may result in large outputs. Use with care. + // Only applies if Compact is false. + ExpandLetExpr bool } // AppendNode writes a string representation of the node to w. @@ -45,12 +51,7 @@ func AppendNode(dst []byte, i adt.StringIndexer, n adt.Node, config *Config) []b if config == nil { config = &Config{} } - p := printer{dst: dst, index: i, cfg: config} - if config.Compact { - p := compactPrinter{p} - p.node(n) - return p.dst - } + p := printer{dst: dst, index: i, cfg: config, compact: config.Compact} p.node(n) return p.dst } @@ -65,10 +66,11 @@ func NodeString(i adt.StringIndexer, n adt.Node, config *Config) string { } type printer struct { - dst []byte - index adt.StringIndexer - indent string - cfg *Config + dst []byte + index adt.StringIndexer + indent string + cfg *Config + compact bool // copied from config.Compact // keep track of vertices to avoid cycles. stack []*adt.Vertex @@ -80,8 +82,67 @@ type printer struct { // - auto } +// ReplaceArg implements the format.Printer interface. It wraps Vertex arguments +// with a formatter value, that holds a pointer to w. This allows the stack +// of processed vertices to be passed down, which in turn is used for cycle +// detection. +func (w *printer) ReplaceArg(arg any) (replacement any, replaced bool) { + var x adt.Node + var r adt.Runtime + switch v := arg.(type) { + case adt.Node: + x = v + case adt.Formatter: + x = v.X + r = v.R + case errors.Error: + // Wrap errors to ensure they are formatted with our printer, + // which enables cycle detection in nested error formatting. + return errorFormatter{p: w, err: v}, true + } + + switch x := x.(type) { + default: + return arg, false + case *adt.Vertex: + // We replace the formatter (or node) with our own formatter that is + // capable of detecting cycles. + return formatter{p: w, x: x, r: r}, true + } +} + +type formatter struct { + p *printer + x adt.Node + r adt.Runtime `` +} + +func (f formatter) String() string { + p := printer{ + dst: make([]byte, 0, 128), + index: f.r, + cfg: f.p.cfg, + compact: true, // Always compact for error arguments. + stack: f.p.stack, + } + p.node(f.x) + return string(p.dst) +} + +// errorFormatter wraps an error to ensure it is formatted with a printer +// that supports cycle detection. +type errorFormatter struct { + p *printer + err errors.Error +} + +func (f errorFormatter) String() string { + cfg := &errors.Config{Printer: f.p} + return errors.StringWithConfig(f.err, cfg) +} + func (w *printer) string(s string) { - if len(w.indent) > 0 { + if !w.compact && len(w.indent) > 0 { s = strings.Replace(s, "\n", "\n"+w.indent, -1) } w.dst = append(w.dst, s...) @@ -138,12 +199,9 @@ func (w *printer) printShared(v0 *adt.Vertex) (x *adt.Vertex, ok bool) { // but rather to the original arc that subsequently points to a // disjunct. v0 = v0.DerefDisjunct() - isCyclic := v0.IsCyclic s, ok := v0.BaseValue.(*adt.Vertex) v1 := v0.DerefValue() useReference := v0.IsShared && !v1.Internal() - isCyclic = isCyclic || v1.IsCyclic - _ = isCyclic // NOTE(debug): use this line instead of the following to expand shared // cases where it is safe to do so. // if useReference && isCyclic && ok && len(v.Arcs) > 0 { @@ -163,11 +221,11 @@ func (w *printer) printShared(v0 *adt.Vertex) (x *adt.Vertex, ok bool) { } func (w *printer) pushVertex(v *adt.Vertex) bool { - for _, x := range w.stack { - if x == v { - w.string("") - return false - } + if slices.Contains(w.stack, v) { + w.string("value at path '") + w.path(v) + w.string("'") + return false } w.stack = append(w.stack, v) return true @@ -177,21 +235,15 @@ func (w *printer) popVertex() { w.stack = w.stack[:len(w.stack)-1] } -func (w *printer) shortError(errs errors.Error) { - for { - msg, args := errs.Msg() - w.dst = fmt.Appendf(w.dst, msg, args...) - - err := errors.Unwrap(errs) - if err == nil { - break - } - - if errs, _ = err.(errors.Error); errs != nil { - w.string(err.Error()) - break - } - } +// TODO: always print path? We allow a choice for keeping the error diff at a +// minimum. +func (w *printer) shortError(errs errors.Error, omitPath bool) { + w.string(errors.StringWithConfig(errs, &errors.Config{ + Cwd: w.cfg.Cwd, + ToSlash: true, + OmitPath: omitPath, + Printer: w, + })) } func (w *printer) interpolation(x *adt.Interpolation) { @@ -235,6 +287,10 @@ func (w *printer) arg(n adt.Node) { } func (w *printer) node(n adt.Node) { + if w.compact { + w.compactNode(n) + return + } switch x := n.(type) { case *adt.Vertex: x, ok := w.printShared(x) @@ -271,10 +327,11 @@ func (w *printer) node(n adt.Node) { w.indent += "// " w.string("\n") w.dst = fmt.Appendf(w.dst, "[%v]", v.Code) - if !v.ChildError { + if !v.ChildError || len(x.Arcs) == 0 { msg := errors.Details(v.Err, &errors.Config{ Cwd: w.cfg.Cwd, ToSlash: true, + Printer: w, }) msg = strings.TrimSpace(msg) if msg != "" { @@ -436,7 +493,7 @@ func (w *printer) node(n adt.Node) { w.string(`_|_`) if x.Err != nil { w.string("(") - w.shortError(x.Err) + w.shortError(x.Err, true) w.string(")") } @@ -485,6 +542,9 @@ func (w *printer) node(n adt.Node) { w.string(";") w.label(x.Label) w.string(closeTuple) + if x.Optional { + w.string("?") + } case *adt.ValueReference: w.string(openTuple) @@ -516,17 +576,27 @@ func (w *printer) node(n adt.Node) { w.string(";let ") w.label(x.Label) w.string(closeTuple) + if w.cfg.ExpandLetExpr { + w.string("=>") + w.node(x.X) + } case *adt.SelectorExpr: w.node(x.X) w.string(".") w.label(x.Sel) + if x.Optional { + w.string("?") + } case *adt.IndexExpr: w.node(x.X) w.string("[") w.node(x.Index) w.string("]") + if x.Optional { + w.string("?") + } case *adt.SliceExpr: w.node(x.X) @@ -560,6 +630,10 @@ func (w *printer) node(n adt.Node) { w.node(x.Y) w.string(")") + case *adt.OpenExpr: + w.node(x.X) + w.string("...") + case *adt.CallExpr: w.node(x.Fun) w.string("(") @@ -641,6 +715,10 @@ func (w *printer) node(n adt.Node) { w.node(c) } w.node(adt.ToExpr(x.Value)) + if x.Fallback != nil { + w.string(" else ") + w.node(x.Fallback) + } case *adt.ForClause: w.string("for ") @@ -663,6 +741,16 @@ func (w *printer) node(n adt.Node) { w.node(x.Expr) w.string(" ") + case *adt.TryClause: + w.string("try ") + if x.Label != adt.InvalidLabel { + // Assignment form: try x = expr + w.ident(x.Label) + w.string(" = ") + w.node(x.Expr) + w.string(" ") + } + case *adt.ValueClause: default: diff --git a/vendor/cuelang.org/go/internal/core/dep/dep.go b/vendor/cuelang.org/go/internal/core/dep/dep.go index eb76a1cd52..640447a255 100644 --- a/vendor/cuelang.org/go/internal/core/dep/dep.go +++ b/vendor/cuelang.org/go/internal/core/dep/dep.go @@ -17,7 +17,6 @@ package dep import ( "cuelang.org/go/cue/errors" - "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" ) @@ -190,13 +189,15 @@ func Visit(cfg *Config, c *adt.OpContext, n *adt.Vertex, f VisitFunc) error { panic("nil context") } v := visitor{ - ctxt: c, - fn: f, - pkg: cfg.Pkg, - recurse: cfg.Descend, - all: cfg.Descend, - top: true, - cfgDynamic: cfg.Dynamic, + ctxt: c, + fn: f, + pkg: cfg.Pkg, + recurse: cfg.Descend, + all: cfg.Descend, + top: true, + cfgDynamic: cfg.Dynamic, + resolved: map[refEntry]bool{}, + visitedInternal: map[adt.Resolver]bool{}, } return v.visitReusingVisitor(n, true) } @@ -236,10 +237,9 @@ func (v *visitor) visit(n *adt.Vertex, top bool) (err error) { } }() - n.VisitLeafConjuncts(func(x adt.Conjunct) bool { + for x := range n.LeafConjuncts() { v.markExpr(x.Env, x.Elem()) - return true - }) + } return nil } @@ -269,6 +269,14 @@ type visitor struct { cfgDynamic bool marked marked + + // resolved dedups resolving references to prevent exponential blowup. + resolved map[refEntry]bool + + // visitedInternal tracks resolvers already processed by + // markInternalResolvers to prevent infinite recursion on recursive + // definitions like [string]: #c or (#R & {x: 1}).out. + visitedInternal map[adt.Resolver]bool } type refEntry struct { @@ -299,6 +307,9 @@ func (c *visitor) markExpr(env *adt.Environment, expr adt.Elem) { case *adt.UnaryExpr: c.markExpr(env, x.X) + case *adt.OpenExpr: + c.markExpr(env, x.X) + case *adt.Interpolation: for i := 1; i < len(x.Parts); i += 2 { c.markExpr(env, x.Parts[i]) @@ -357,10 +368,18 @@ func (c *visitor) markExpr(env *adt.Environment, expr adt.Elem) { // markResolve resolves dependencies. func (c *visitor) markResolver(env *adt.Environment, r adt.Resolver) { + if c.resolved[refEntry{env, r}] { + // TODO: this seems to still not remove everything. Consider a + // different approach. + return + } + // Note: it is okay to pass an empty CloseInfo{} here as we assume that // all nodes are finalized already and we need neither closedness nor cycle // checks. ref, _ := c.ctxt.Resolve(adt.MakeConjunct(env, r, adt.CloseInfo{}), r) + c.resolved[refEntry{env, r}] = true + c.ctxt.Stats().ResolveDep++ // TODO: consider the case where an inlined composite literal does not // resolve, but has references. For instance, {a: k, ref}.b would result @@ -414,13 +433,7 @@ func (c *visitor) reportDependency(env *adt.Environment, ref adt.Resolver, v *ad reference = c.topRef } - inspect := false - - if c.ctxt.Version == internal.DevVersion { - inspect = v.IsDetached() || !v.MayAttach() - } else { - inspect = !v.Rooted() - } + inspect := v.IsDetached() || !v.MayAttach() if inspect { // TODO: there is currently no way to inspect where a non-rooted node @@ -489,9 +502,8 @@ func (c *visitor) reportDependency(env *adt.Environment, ref adt.Resolver, v *ad c.numRefs++ - if c.ctxt.Version == internal.DevVersion { - v.Finalize(c.ctxt) - } + // Note: we did not finalize in V2. + v.Unify(c.ctxt, adt.FinalizeWithoutTypoCheck) d := Dependency{ Node: v, @@ -539,12 +551,11 @@ func hasLetParent(v *adt.Vertex) bool { // markConjuncts transitively marks all reference of the current node. func (c *visitor) markConjuncts(v *adt.Vertex) { - v.VisitLeafConjuncts(func(x adt.Conjunct) bool { + for x := range v.LeafConjuncts() { // Use Elem instead of Expr to preserve the Comprehension to, in turn, // ensure an Environment is inserted for the Value clause. c.markExpr(x.Env, x.Elem()) - return true - }) + } } // markInternalResolvers marks dependencies for rootless nodes. As these @@ -552,15 +563,19 @@ func (c *visitor) markConjuncts(v *adt.Vertex) { // proactive. For selectors and indices this means we need to evaluate their // objects to see exactly what the selector or index refers to. func (c *visitor) markInternalResolvers(env *adt.Environment, r adt.Resolver, v *adt.Vertex) { + if c.visitedInternal[r] { + return + } + c.visitedInternal[r] = true + saved := c.all // recursive traversal already done by this function. // As lets have no path and we otherwise will not process them, we set // processing all to true. if c.marked != nil && hasLetParent(v) { - v.VisitLeafConjuncts(func(x adt.Conjunct) bool { + for x := range v.LeafConjuncts() { c.marked.markExpr(x.Expr()) - return true - }) + } } c.markConjuncts(v) @@ -590,7 +605,7 @@ func (c *visitor) evaluateInner(env *adt.Environment, x adt.Expr, r adt.Resolver return } // TODO(perf): one level of evaluation would suffice. - v.Finalize(c.ctxt) + v.Unify(c.ctxt, adt.FinalizeWithoutTypoCheck) saved := len(c.pathStack) c.pathStack = append(c.pathStack, refEntry{env, r}) @@ -651,6 +666,9 @@ func (c *visitor) markDecl(env *adt.Environment, d adt.Decl) { } func (c *visitor) markComprehension(env *adt.Environment, y *adt.Comprehension) { + // Save outer environment for else clause. + outerEnv := env + env = c.markClauses(env, y.Clauses) // Use "live" environments if we have them. This is important if @@ -675,6 +693,11 @@ func (c *visitor) markComprehension(env *adt.Environment, y *adt.Comprehension) } // TODO: consider using adt.EnvExpr and remove the above loop. c.markExpr(env, adt.ToExpr(y.Value)) + + // Mark else clause in outer environment. + if y.Fallback != nil { + c.markExpr(outerEnv, y.Fallback) + } } func (c *visitor) markClauses(env *adt.Environment, a []adt.Yielder) *adt.Environment { @@ -694,6 +717,14 @@ func (c *visitor) markClauses(env *adt.Environment, a []adt.Yielder) *adt.Enviro c.markExpr(env, x.Condition) // In dynamic mode, only continue if condition is true. + case *adt.TryClause: + if x.Expr != nil { + // Assignment form: try x = expr + c.markExpr(env, x.Expr) + env = &adt.Environment{Up: env, Vertex: empty} + } + // Struct form: no expression, body is in Comprehension.Value + case *adt.ValueClause: env = &adt.Environment{Up: env, Vertex: empty} } diff --git a/vendor/cuelang.org/go/internal/core/dep/mixed.go b/vendor/cuelang.org/go/internal/core/dep/mixed.go index 5ae089a928..e79a91490b 100644 --- a/vendor/cuelang.org/go/internal/core/dep/mixed.go +++ b/vendor/cuelang.org/go/internal/core/dep/mixed.go @@ -31,13 +31,12 @@ func (v *visitor) dynamic(n *adt.Vertex, top bool) { found := false // TODO: Consider if we should only visit the conjuncts of the disjunction // for dynamic mode. - n.VisitLeafConjuncts(func(c adt.Conjunct) bool { + for c := range n.LeafConjuncts() { if v.marked[c.Expr()] { found = true - return false + break } - return true - }) + } if !found { return @@ -70,10 +69,9 @@ func (m marked) markExpr(x adt.Expr) { case nil: case *adt.Vertex: - x.VisitLeafConjuncts(func(c adt.Conjunct) bool { + for c := range x.LeafConjuncts() { m.markExpr(c.Expr()) - return true - }) + } case *adt.BinaryExpr: if x.Op == adt.AndOp { @@ -136,4 +134,7 @@ func (m marked) markExpr(x adt.Expr) { func (m marked) markComprehension(y *adt.Comprehension) { m.markExpr(adt.ToExpr(y.Value)) + if y.Fallback != nil { + m.markExpr(y.Fallback) + } } diff --git a/vendor/cuelang.org/go/internal/core/export/adt.go b/vendor/cuelang.org/go/internal/core/export/adt.go index dbb4a9cbbf..77821ab75f 100644 --- a/vendor/cuelang.org/go/internal/core/export/adt.go +++ b/vendor/cuelang.org/go/internal/core/export/adt.go @@ -25,12 +25,11 @@ import ( "cuelang.org/go/cue/ast/astutil" "cuelang.org/go/cue/literal" "cuelang.org/go/cue/token" - "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" ) func (e *exporter) ident(x adt.Feature) *ast.Ident { - s := x.IdentString(e.ctx) + s := e.identString(x) if !ast.IsValidIdent(s) { panic(s + " is not a valid identifier") } @@ -55,7 +54,6 @@ func (e *exporter) adt(env *adt.Environment, expr adt.Elem) ast.Expr { // _, saved := e.pushFrame([]adt.Conjunct{adt.MakeConjunct(nil, x)}) // defer e.popFrame(saved) // s := e.frame(0).scope - s := &ast.StructLit{} // TODO: ensure e.node() is set in more cases. Right now it is not // always set in mergeValues, even in cases where it could be. Better @@ -231,6 +229,12 @@ func (e *exporter) adt(env *adt.Environment, expr adt.Elem) ast.Expr { X: e.innerExpr(env, x.X), } + case *adt.OpenExpr: + return &ast.PostfixExpr{ + X: e.innerExpr(env, x.X), + Op: token.ELLIPSIS, + } + case *adt.BinaryExpr: if x.Op == adt.AndOp || x.Op == adt.OrOp { return e.sortBinaryTree(env, x) @@ -278,12 +282,18 @@ func (e *exporter) adt(env *adt.Environment, expr adt.Elem) ast.Expr { return dummyTop } for _, c := range x.Clauses { - switch c.(type) { + switch c := c.(type) { case *adt.ForClause: env = &adt.Environment{Up: env, Vertex: empty} case *adt.IfClause: case *adt.LetClause: env = &adt.Environment{Up: env, Vertex: empty} + case *adt.TryClause: + if c.Expr != nil { + // Assignment form: needs new environment for binding + env = &adt.Environment{Up: env, Vertex: empty} + } + // Struct form: no new environment needed (like IfClause) case *adt.ValueClause: // Can occur in nested comprehenions. env = &adt.Environment{Up: env, Vertex: empty} @@ -391,6 +401,13 @@ func typeOrder(x adt.Node) int { var dummyTop = &ast.Ident{Name: "_"} +func wrapIfOptional(expr ast.Expr, isOptional bool) ast.Expr { + if isOptional { + return &ast.PostfixExpr{X: expr, Op: token.OPTION} + } + return expr +} + func (e *exporter) resolve(env *adt.Environment, r adt.Resolver) ast.Expr { if c := e.pivotter; c != nil { if alt := c.refExpr(r); alt != nil { @@ -407,7 +424,7 @@ func (e *exporter) resolve(env *adt.Environment, r adt.Resolver) ast.Expr { ident := ast.NewIdent(aliasFromLabel(f)) ident.Node = entry.field ident.Scope = entry.scope - return ident + return wrapIfOptional(ident, x.Optional) } } } @@ -434,7 +451,7 @@ func (e *exporter) resolve(env *adt.Environment, r adt.Resolver) ast.Expr { } } - return ident + return wrapIfOptional(ident, x.Optional) case *adt.ValueReference: name := x.Label.IdentString(e.ctx) @@ -502,16 +519,18 @@ func (e *exporter) resolve(env *adt.Environment, r adt.Resolver) ast.Expr { return e.resolveLet(env, x) case *adt.SelectorExpr: - return &ast.SelectorExpr{ + sel := &ast.SelectorExpr{ X: e.innerExpr(env, x.X), Sel: e.stringLabel(x.Sel), } + return wrapIfOptional(sel, x.Optional) case *adt.IndexExpr: - return &ast.IndexExpr{ + idx := &ast.IndexExpr{ X: e.innerExpr(env, x.X), Index: e.innerExpr(env, x.Index), } + return wrapIfOptional(idx, x.Optional) } panic("unreachable") } @@ -556,8 +575,7 @@ func (e *exporter) decl(env *adt.Environment, d adt.Decl) ast.Decl { case *adt.Field: e.setDocs(x) f := e.getFixedField(x) - - internal.SetConstraint(f, x.ArcType.Token()) + f.Constraint = x.ArcType.Token() e.setField(x.Label, f) f.Attrs = extractFieldAttrs(nil, x) @@ -625,8 +643,7 @@ func (e *exporter) decl(env *adt.Environment, d adt.Decl) ast.Decl { e.setDocs(x) srcKey := x.Key - f := &ast.Field{} - internal.SetConstraint(f, x.ArcType.Token()) + f := &ast.Field{Constraint: x.ArcType.Token()} v, _ := e.ctx.Evaluate(env, x.Key) @@ -679,16 +696,9 @@ func (e *exporter) copyMeta(dst, src ast.Node) { func filterDocs(a []*ast.CommentGroup) (out []*ast.CommentGroup) { out = append(out, a...) - k := 0 - for _, c := range a { - if !c.Doc { - continue - } - out[k] = c - k++ - } - out = out[:k] - return out + return slices.DeleteFunc(out, func(c *ast.CommentGroup) bool { + return !c.Doc + }) } func (e *exporter) setField(label adt.Feature, f *ast.Field) { @@ -735,6 +745,10 @@ func (e *exporter) elem(env *adt.Environment, d adt.Elem) ast.Expr { func (e *exporter) comprehension(env *adt.Environment, comp *adt.Comprehension) *ast.Comprehension { c := &ast.Comprehension{} + // Save outer environment for else clause (which doesn't have access to + // comprehension-internal bindings). + outerEnv := env + for _, y := range comp.Clauses { switch x := y.(type) { case *adt.ForClause: @@ -777,6 +791,23 @@ func (e *exporter) comprehension(env *adt.Environment, comp *adt.Comprehension) e.addField(x.Label, nil, clause) + case *adt.TryClause: + clause := &ast.TryClause{} + if x.Label != adt.InvalidLabel { + // Assignment form: try x = expr + env = &adt.Environment{Up: env, Vertex: empty} + clause.Ident = e.ident(x.Label) + clause.Expr = e.innerExpr(env, x.Expr) + + _, saved := e.pushFrame(empty, nil) + defer e.popFrame(saved) + + e.addField(x.Label, nil, clause) + } + // Struct form has no Ident/Expr - body is in Comprehension.Value + e.copyMeta(clause, x.Src) + c.Clauses = append(c.Clauses, clause) + case *adt.ValueClause: // Can occur in nested comprehenions. env = &adt.Environment{Up: env, Vertex: empty} @@ -799,5 +830,14 @@ func (e *exporter) comprehension(env *adt.Environment, comp *adt.Comprehension) v = ast.NewStruct(ast.Embed(v)) } c.Value = v + + // Export fallback clause using outer environment. + if comp.Fallback != nil { + fallbackBody := e.expr(outerEnv, comp.Fallback) + if body, ok := fallbackBody.(*ast.StructLit); ok { + c.Fallback = &ast.FallbackClause{Body: body} + } + } + return c } diff --git a/vendor/cuelang.org/go/internal/core/export/export.go b/vendor/cuelang.org/go/internal/core/export/export.go index cdf9de03b6..b1913935a3 100644 --- a/vendor/cuelang.org/go/internal/core/export/export.go +++ b/vendor/cuelang.org/go/internal/core/export/export.go @@ -17,6 +17,7 @@ package export import ( "fmt" "math/rand/v2" + "slices" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/ast/astutil" @@ -71,6 +72,10 @@ type Profile struct { // InlineImports expands references to non-builtin packages. InlineImports bool + + // ExpandReferences causes all references to be expanded inline. This + // disables the ability to prevent billion laughs attacks, so use with care. + ExpandReferences bool } var Simplified = &Profile{ @@ -192,10 +197,10 @@ func (e *exporter) toFile(v *adt.Vertex, x ast.Expr) *ast.File { // prevent the file comment from attaching to pkg when there is no pkg comment PackagePos: token.NoPos.WithRel(token.NewSection), } - v.VisitLeafConjuncts(func(c adt.Conjunct) bool { + for c := range v.LeafConjuncts() { f, _ := c.Source().(*ast.File) if f == nil { - return true + continue } if name := f.PackageName(); name != "" { @@ -214,18 +219,17 @@ func (e *exporter) toFile(v *adt.Vertex, x ast.Expr) *ast.File { ast.AddComment(fout, c) } } - return true - }) + } if pkgName != "" { pkg.Name = ast.NewIdent(pkgName) fout.Decls = append(fout.Decls, pkg) - ast.SetComments(pkg, internal.MergeDocs(pkg.Comments())) + ast.SetComments(pkg, mergeDocs(ast.Comments(pkg))) } else { - for _, c := range fout.Comments() { + for _, c := range ast.Comments(fout) { ast.AddComment(pkg, c) } - ast.SetComments(fout, internal.MergeDocs(pkg.Comments())) + ast.SetComments(fout, mergeDocs(ast.Comments(pkg))) } } @@ -243,6 +247,39 @@ func (e *exporter) toFile(v *adt.Vertex, x ast.Expr) *ast.File { return fout } +// mergeDocs merges multiple doc comments into one single doc comment. +func mergeDocs(comments []*ast.CommentGroup) []*ast.CommentGroup { + if len(comments) <= 1 || !hasDocComment(comments) { + return comments + } + + comments1 := make([]*ast.CommentGroup, 0, len(comments)) + comments1 = append(comments1, nil) + var docComment *ast.CommentGroup + for _, c := range comments { + switch { + case !c.Doc: + comments1 = append(comments1, c) + case docComment == nil: + docComment = c + default: + docComment.List = append(slices.Clip(docComment.List), &ast.Comment{Text: "//"}) + docComment.List = append(docComment.List, c.List...) + } + } + comments1[0] = docComment + return comments1 +} + +func hasDocComment(comments []*ast.CommentGroup) bool { + for _, c := range comments { + if c.Doc { + return true + } + } + return false +} + // Vertex exports evaluated values (data mode). // It resolves incomplete references that point outside the current context. func Vertex(r adt.Runtime, pkgID string, n *adt.Vertex) (*ast.File, errors.Error) { @@ -383,7 +420,7 @@ func (e *exporter) initPivot(n *adt.Vertex) { switch { case e.cfg.SelfContained, e.cfg.InlineImports: // Explicitly enabled. - case n.Parent == nil, e.cfg.Fragment: + case n.Parent == nil, e.cfg.Fragment, e.cfg.ExpandReferences: return } e.initPivotter(n) @@ -404,18 +441,21 @@ func (e *exporter) finalize(n *adt.Vertex, v ast.Expr) (f *ast.File, err errors. return f, nil } +// markUsedFeatures walks x to record features in usedFeature, +// so that uniqueFeature can avoid generating colliding names. +// It may be called multiple times to accumulate features from different expressions. func (e *exporter) markUsedFeatures(x adt.Expr) { - e.usedFeature = make(map[adt.Feature]adt.Expr) - + if e.usedFeature == nil { + e.usedFeature = make(map[adt.Feature]adt.Expr) + } w := &walk.Visitor{} w.Before = func(n adt.Node) bool { switch x := n.(type) { case *adt.Vertex: if !x.IsData() { - x.VisitLeafConjuncts(func(c adt.Conjunct) bool { + for c := range x.LeafConjuncts() { w.Elem(c.Elem()) - return true - }) + } } case *adt.DynamicReference: @@ -564,15 +604,10 @@ func (e *exporter) markLetAlias(x *ast.LetClause) { // In value mode, lets are only used if there wasn't an error. func filterUnusedLets(s *ast.StructLit) { - k := 0 - for i, d := range s.Elts { - if let, ok := d.(*ast.LetClause); ok && let.Expr == nil { - continue - } - s.Elts[k] = s.Elts[i] - k++ - } - s.Elts = s.Elts[:k] + s.Elts = slices.DeleteFunc(s.Elts, func(d ast.Decl) bool { + let, ok := d.(*ast.LetClause) + return ok && let.Expr == nil + }) } // resolveLet actually parses the let expression. diff --git a/vendor/cuelang.org/go/internal/core/export/expr.go b/vendor/cuelang.org/go/internal/core/export/expr.go index 04a83f3bb4..afa5ebe4e9 100644 --- a/vendor/cuelang.org/go/internal/core/export/expr.go +++ b/vendor/cuelang.org/go/internal/core/export/expr.go @@ -17,11 +17,11 @@ package export import ( "cmp" "fmt" + "maps" "slices" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/token" - "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" ) @@ -80,13 +80,12 @@ func (e *exporter) expr(env *adt.Environment, v adt.Elem) (result ast.Expr) { } // Should this be the arcs label? a := []conjunct{} - x.VisitLeafConjuncts(func(c adt.Conjunct) bool { + for c := range x.LeafConjuncts() { if c, ok := c.Elem().(*adt.Comprehension); ok && !c.DidResolve() { - return true + continue } a = append(a, conjunct{c, 0}) - return true - }) + } return e.mergeValues(adt.InvalidLabel, x, a, x.Conjuncts...) @@ -177,10 +176,7 @@ func (x *exporter) mergeValues(label adt.Feature, src *adt.Vertex, a []conjunct, // Collect and order set of fields. - fields := []adt.Feature{} - for f := range e.fields { - fields = append(fields, f) - } + fields := slices.Collect(maps.Keys(e.fields)) // Sort fields in case features lists are missing to ensure // predictability. Also sort in reverse order, so that bugs @@ -267,7 +263,7 @@ func (x *exporter) mergeValues(label adt.Feature, src *adt.Vertex, a []conjunct, x.inDefinition-- } - internal.SetConstraint(d, field.arcType.Token()) + d.Constraint = field.arcType.Token() if x.cfg.ShowDocs { v := &adt.Vertex{Conjuncts: a} docs := extractDocs(v) @@ -297,15 +293,8 @@ func (x *exporter) mergeValues(label adt.Feature, src *adt.Vertex, a []conjunct, } func (e *conjuncts) wrapCloseIfNecessary(s *ast.StructLit, v *adt.Vertex) ast.Expr { - if !e.hasEllipsis && v != nil { - if v.ClosedNonRecursive { - // Eval V3 logic - return ast.NewCall(ast.NewIdent("close"), s) - } - if st, ok := v.BaseValue.(*adt.StructMarker); ok && st.NeedClose { - // Eval V2 logic - return ast.NewCall(ast.NewIdent("close"), s) - } + if !e.hasEllipsis && v != nil && v.ClosedNonRecursive { + return ast.NewCall(ast.NewIdent("close"), s) } return s } @@ -317,7 +306,7 @@ type conjuncts struct { values *adt.Vertex embed []ast.Expr conjuncts []ast.Expr - structs []*adt.StructInfo + structs []adt.StructInfo fields map[adt.Feature]field attrs []*ast.Attribute hasEllipsis bool @@ -388,7 +377,7 @@ func (e *conjuncts) addExpr(env *adt.Environment, src *adt.Vertex, x adt.Elem, i return } // Used for sorting. - e.structs = append(e.structs, &adt.StructInfo{StructLit: x, Env: env}) + e.structs = append(e.structs, adt.StructInfo{StructLit: x}) env = &adt.Environment{Up: env, Vertex: e.node()} @@ -433,10 +422,9 @@ func (e *conjuncts) addExpr(env *adt.Environment, src *adt.Vertex, x adt.Elem, i switch { default: - v.VisitLeafConjuncts(func(c adt.Conjunct) bool { + for c := range v.LeafConjuncts() { e.addExpr(c.Env, v, c.Elem(), false) - return true - }) + } case v.IsData(): e.structs = append(e.structs, v.Structs...) diff --git a/vendor/cuelang.org/go/internal/core/export/extract.go b/vendor/cuelang.org/go/internal/core/export/extract.go index e666a9b497..26f3d1b25c 100644 --- a/vendor/cuelang.org/go/internal/core/export/extract.go +++ b/vendor/cuelang.org/go/internal/core/export/extract.go @@ -15,8 +15,9 @@ package export import ( + "slices" + "cuelang.org/go/cue/ast" - "cuelang.org/go/cue/token" "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" ) @@ -36,20 +37,19 @@ func extractDocs(v *adt.Vertex) (docs []*ast.CommentGroup) { fields := []*ast.Field{} // Collect docs directly related to this Vertex. - v.VisitLeafConjuncts(func(x adt.Conjunct) bool { + for x := range v.LeafConjuncts() { // TODO: Is this still being used? if v, ok := x.Elem().(*adt.Vertex); ok { docs = append(docs, extractDocs(v)...) - return true } switch f := x.Field().Source().(type) { case *ast.Field: if hasShorthandValue(f) { - return true + continue } fields = append(fields, f) - for _, cg := range f.Comments() { + for _, cg := range ast.Comments(f) { if !containsDoc(docs, cg) && cg.Doc { docs = append(docs, cg) } @@ -59,34 +59,31 @@ func extractDocs(v *adt.Vertex) (docs []*ast.CommentGroup) { fdocs, _ := internal.FileComments(f) docs = append(docs, fdocs...) } - - return true - }) + } // Collect docs from parent scopes in collapsed fields. for p := v.Parent; p != nil; p = p.Parent { newFields := []*ast.Field{} - p.VisitLeafConjuncts(func(x adt.Conjunct) bool { + for x := range p.LeafConjuncts() { f, ok := x.Source().(*ast.Field) if !ok || !hasShorthandValue(f) { - return true + continue } nested := nestedField(f) for _, child := range fields { if nested == child { newFields = append(newFields, f) - for _, cg := range f.Comments() { + for _, cg := range ast.Comments(f) { if !containsDoc(docs, cg) && cg.Doc { docs = append(docs, cg) } } } } - return true - }) + } fields = newFields } @@ -114,10 +111,9 @@ func hasShorthandValue(f *ast.Field) bool { // nestedField returns the child field of a field shorthand. func nestedField(f *ast.Field) *ast.Field { s, _ := f.Value.(*ast.StructLit) - if s == nil || - len(s.Elts) != 1 || - s.Lbrace != token.NoPos || - s.Rbrace != token.NoPos { + if s == nil || len(s.Elts) != 1 || + s.Lbrace.IsValid() || + s.Rbrace.IsValid() { return nil } @@ -126,10 +122,8 @@ func nestedField(f *ast.Field) *ast.Field { } func containsDoc(a []*ast.CommentGroup, cg *ast.CommentGroup) bool { - for _, c := range a { - if c == cg { - return true - } + if slices.Contains(a, cg) { + return true } for _, c := range a { @@ -142,10 +136,9 @@ func containsDoc(a []*ast.CommentGroup, cg *ast.CommentGroup) bool { } func ExtractFieldAttrs(v *adt.Vertex) (attrs []*ast.Attribute) { - v.VisitLeafConjuncts(func(x adt.Conjunct) bool { + for x := range v.LeafConjuncts() { attrs = extractFieldAttrs(attrs, x.Field()) - return true - }) + } return attrs } diff --git a/vendor/cuelang.org/go/internal/core/export/label.go b/vendor/cuelang.org/go/internal/core/export/label.go index 5de459bdcf..0dca59757c 100644 --- a/vendor/cuelang.org/go/internal/core/export/label.go +++ b/vendor/cuelang.org/go/internal/core/export/label.go @@ -19,10 +19,8 @@ import ( "fmt" "io" "strconv" - "strings" "cuelang.org/go/cue/ast" - "cuelang.org/go/cue/literal" "cuelang.org/go/cue/token" "cuelang.org/go/internal/core/adt" ) @@ -37,16 +35,8 @@ func (e *exporter) stringLabel(f adt.Feature) ast.Label { s := e.identString(f) return ast.NewIdent(s) - case adt.StringLabel: - s := e.ctx.IndexToString(int64(x)) - if f == 0 || !ast.IsValidIdent(s) || - strings.HasPrefix(s, "#") || strings.HasPrefix(s, "_") { - return ast.NewLit(token.STRING, literal.Label.Quote(s)) - } - fallthrough - default: - return ast.NewIdent(e.ctx.IndexToString(int64(x))) + return ast.NewStringLabel(e.ctx.IndexToString(int64(x))) } } diff --git a/vendor/cuelang.org/go/internal/core/export/self.go b/vendor/cuelang.org/go/internal/core/export/self.go index 11a633ce09..92df32c0a0 100644 --- a/vendor/cuelang.org/go/internal/core/export/self.go +++ b/vendor/cuelang.org/go/internal/core/export/self.go @@ -56,11 +56,21 @@ func (e *exporter) completePivot(f *ast.File) { if s == nil || f == nil { return } - for _, d := range s.deps { - if !d.isExternalRoot() { - continue + // Use a fixpoint loop because exporting one external's body (in + // addExternal) may set needTopLevel on deps earlier in the list. + for { + progress := false + for _, d := range s.deps { + if d.exported || !d.isExternalRoot() || !d.needTopLevel { + continue + } + d.exported = true + progress = true + s.addExternal(d) + } + if !progress { + break } - s.addExternal(d) } f.Decls = append(f.Decls, s.decls...) } @@ -81,6 +91,15 @@ type pivotter struct { refs []*refData refMap map[adt.Resolver]*refData + // inlining tracks vertices currently being inlined by refExpr to prevent + // infinite recursion on recursive definitions like [string]: #c. + inlining map[*adt.Vertex]bool + + // exporting is the dep currently being exported by addExternal, used to + // detect self-referential lets and generate inner definition references + // instead. + exporting *depData + decls []ast.Decl } @@ -95,6 +114,7 @@ type depData struct { useCount int // Other reference using this vertex included bool needTopLevel bool + exported bool // already processed by addExternal in the fixpoint loop } // isExternalRoot reports whether d is an external node (a node referenced @@ -135,6 +155,15 @@ func (p *pivotter) linkDependencies(v *adt.Vertex) { } } + // Mark features used in dependency vertices so that generated let + // variable names (from makeParentPath/uniqueFeature) don't collide + // with field names in the exported external values. + for _, d := range p.deps { + if d.dstNode != nil { + p.x.markUsedFeatures(d.dstNode) + } + } + // Compute the paths for the parent nodes. for _, d := range p.deps { if d.parent == nil { @@ -282,6 +311,15 @@ func (p *pivotter) makeParentPath(d *depData) { func (p *pivotter) makeAlternativeReference(ref *refData, r adt.Resolver) ast.Expr { d := ref.dst + // If this reference points back to the dep we're currently exporting + // as a let binding, and the dep has a definition wrapper (len > 1), + // generate a reference to the inner definition label instead of the + // let identifier. This avoids self-referential lets, which CUE does + // not support, while definitions can be self-referential. + if d == p.exporting && d.parent == nil && len(d.path) > 1 { + return p.x.ident(d.path[len(d.path)-1]) + } + // Determine if the reference can be inline. var path []adt.Feature @@ -376,7 +414,9 @@ func relPathLength(r adt.Resolver) (length int, newRoot bool) { case adt.Resolver: r = x default: - panic("unreachable") + // A non-Resolver expression (e.g. BinaryExpr) is a valid + // end-of-chain; we simply cannot walk any further. + return length, false } } } @@ -410,11 +450,18 @@ func (p *pivotter) refExpr(r adt.Resolver) ast.Expr { // Don't simplify for errors to make the position of the error clearer. case !n.IsConcrete() && p.x.inExpression > 0: // Don't simplify an expression that is known will fail. + case p.inlining[n]: + // Prevent infinite recursion on recursive definitions. case dst.usageCount() == 1 && p.x.inExpression == 0: // Used only once. fallthrough case n.IsConcrete() && len(n.Arcs) == 0: // Simple scalar value. + if p.inlining == nil { + p.inlining = map[*adt.Vertex]bool{} + } + p.inlining[n] = true + defer delete(p.inlining, n) return p.x.expr(nil, n) } @@ -432,7 +479,10 @@ func (p *pivotter) addExternal(d *depData) { return } + saved := p.exporting + p.exporting = d expr := p.x.expr(nil, d.node()) + p.exporting = saved if len(d.path) > 1 { expr = ast.NewStruct(&ast.Field{ diff --git a/vendor/cuelang.org/go/internal/core/export/toposort.go b/vendor/cuelang.org/go/internal/core/export/toposort.go index 36b58bbdd3..1660e0ea4b 100644 --- a/vendor/cuelang.org/go/internal/core/export/toposort.go +++ b/vendor/cuelang.org/go/internal/core/export/toposort.go @@ -29,8 +29,16 @@ func VertexFeatures(c *adt.OpContext, v *adt.Vertex) []adt.Feature { return toposort.VertexFeatures(c, v) } -func extractFeatures(in []*adt.StructInfo) (a [][]adt.Feature) { - a = make([][]adt.Feature, 0, len(in)) +func extractFeatures(in []adt.StructInfo) (a [][]adt.Feature) { + // Calculate total entries accounting for repeats. + // Total occurrences = 1 + Repeats for each StructInfo. + totalEntries := 0 + for _, s := range in { + if len(s.Decls) > 0 { + totalEntries += 1 + s.Repeats + } + } + a = make([][]adt.Feature, 0, totalEntries) for _, s := range in { sorted := make([]adt.Feature, 0, len(s.Decls)) for _, e := range s.Decls { @@ -43,7 +51,11 @@ func extractFeatures(in []*adt.StructInfo) (a [][]adt.Feature) { // Lists with a single element may still be useful to distinguish // between known and unknown fields: unknown fields are sorted last. if len(sorted) > 0 { - a = append(a, sorted) + occurrences := 1 + s.Repeats + // Add this front (1 + Repeats) times to give it proper weight + for range occurrences { + a = append(a, sorted) + } } } return a diff --git a/vendor/cuelang.org/go/internal/core/export/value.go b/vendor/cuelang.org/go/internal/core/export/value.go index f476a51bcd..9a12702614 100644 --- a/vendor/cuelang.org/go/internal/core/export/value.go +++ b/vendor/cuelang.org/go/internal/core/export/value.go @@ -23,7 +23,6 @@ import ( "cuelang.org/go/cue/ast/astutil" "cuelang.org/go/cue/literal" "cuelang.org/go/cue/token" - "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" ) @@ -56,10 +55,9 @@ func (e *exporter) vertex(n *adt.Vertex) (result ast.Expr) { e.popFrame(saved) }() - n.VisitLeafConjuncts(func(c adt.Conjunct) bool { + for c := range n.LeafConjuncts() { e.markLets(c.Expr().Source(), s) - return true - }) + } switch x := n.BaseValue.(type) { case nil: @@ -103,14 +101,9 @@ func (e *exporter) vertex(n *adt.Vertex) (result ast.Expr) { } if result == nil { // fall back to expression mode - a := []adt.Conjunct{} - n.VisitLeafConjuncts(func(c adt.Conjunct) bool { - a = append(a, c) - return true - }) // Use stable sort to ensure that tie breaks (for instance if elements // are not associated with a position) are deterministic. - slices.SortStableFunc(a, cmpConjuncts) + a := slices.SortedStableFunc(n.LeafConjuncts(), cmpConjuncts) exprs := make([]ast.Expr, 0, len(a)) for _, c := range a { @@ -122,9 +115,7 @@ func (e *exporter) vertex(n *adt.Vertex) (result ast.Expr) { result = ast.NewBinExpr(token.AND, exprs...) } - if len(s.Elts) > 0 { - filterUnusedLets(s) - } + filterUnusedLets(s) if result != s && len(s.Elts) > 0 { // There are used let expressions within a non-struct. // For now we just fall back to the original expressions. @@ -228,6 +219,9 @@ func (e *exporter) value(n adt.Value, a ...adt.Conjunct) (result ast.Expr) { } result = ast.NewBinExpr(token.OR, a...) + case *adt.NodeLink: + return e.value(x.Node, a...) + default: panic(fmt.Sprintf("unsupported type %T", x)) } @@ -242,7 +236,7 @@ func (e *exporter) bottom(n *adt.Bottom) *ast.BottomLit { if x := n.Err; x != nil { msg := x.Error() comment := &ast.Comment{Text: "// " + msg} - err.AddComment(&ast.CommentGroup{ + ast.AddComment(err, &ast.CommentGroup{ Line: true, Position: 2, List: []*ast.Comment{comment}, @@ -259,15 +253,13 @@ func (e *exporter) bool(n *adt.Bool) (b *ast.BasicLit) { return ast.NewBool(n.B) } -func extractBasic(a []adt.Conjunct) (lit *ast.BasicLit) { - adt.VisitConjuncts(a, func(c adt.Conjunct) bool { +func extractBasic(a []adt.Conjunct) *ast.BasicLit { + for c := range adt.ConjunctsSeq(a) { if b, ok := c.Source().(*ast.BasicLit); ok { - lit = &ast.BasicLit{Kind: b.Kind, Value: b.Value} - return false + return &ast.BasicLit{Kind: b.Kind, Value: b.Value} } - return true - }) - return lit + } + return nil } func (e *exporter) num(n *adt.Num, orig []adt.Conjunct) *ast.BasicLit { @@ -460,7 +452,7 @@ func (e *exporter) structComposite(v *adt.Vertex, attrs []*ast.Attribute) ast.Ex // This package typically does not create errors that did not result // from evaluation already. - internal.SetConstraint(f, arc.ArcType.Token()) + f.Constraint = arc.ArcType.Token() f.Value = e.vertex(arc.DerefValue()) diff --git a/vendor/cuelang.org/go/internal/core/format/printer.go b/vendor/cuelang.org/go/internal/core/format/printer.go new file mode 100644 index 0000000000..a907a73b01 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/format/printer.go @@ -0,0 +1,26 @@ +// Copyright 2025 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package format provides functionality for pretty-printing CUE values. +// These types need to be in a separate package to avoid import cycles. +package format + +// Printer is the interface used to print CUE values. The only implementation so +// far is the one in internal/core/debug. Note that most packages cannot +// directly import the debug package. +type Printer interface { + // ReplaceArg is a function that may be called to replace arguments to + // errors. This is mostly used for cycle detection. + ReplaceArg(x any) (r any, wasReplaced bool) +} diff --git a/vendor/cuelang.org/go/cue/ast/astutil/walk.go b/vendor/cuelang.org/go/internal/core/layer/layer.go similarity index 53% rename from vendor/cuelang.org/go/cue/ast/astutil/walk.go rename to vendor/cuelang.org/go/internal/core/layer/layer.go index fdb99fe649..2d04be7ee5 100644 --- a/vendor/cuelang.org/go/cue/ast/astutil/walk.go +++ b/vendor/cuelang.org/go/internal/core/layer/layer.go @@ -1,10 +1,10 @@ -// Copyright 2018 The CUE Authors +// Copyright 2025 CUE Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,14 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -package astutil +package layer -import "cuelang.org/go/cue/ast" +type Priority int8 -// A visitor's before method is invoked for each node encountered by Walk. -// If the result visitor w is true, Walk visits each of the children -// of node with the visitor w, followed by a call of w.After. -type visitor interface { - Before(node ast.Node) (w visitor) - After(node ast.Node) -} +// TODO: algorithm for handling disjunctions more intuitively with layering. +// For instance, how do we handle this case?: +// +// x: {} | *{ +// b: x: 1 +// c: x: 2 +// } +// // If y > x, does the 2 of b force the default of x to fail? Could be an option. +// y: { +// b: *{x: 2} | {} +// c: *{x: 3} | {} +// } diff --git a/vendor/cuelang.org/go/internal/core/runtime/build.go b/vendor/cuelang.org/go/internal/core/runtime/build.go index 5fed1ec274..860336a566 100644 --- a/vendor/cuelang.org/go/internal/core/runtime/build.go +++ b/vendor/cuelang.org/go/internal/core/runtime/build.go @@ -15,10 +15,10 @@ package runtime import ( + "strconv" "strings" "cuelang.org/go/cue/ast" - "cuelang.org/go/cue/ast/astutil" "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/stats" @@ -57,15 +57,12 @@ func (x *Runtime) Build(cfg *Config, b *build.Instance) (v *adt.Vertex, errs err // Build transitive dependencies. for _, file := range b.Files { - file.VisitImports(func(d *ast.ImportDecl) { - for _, s := range d.Specs { - errs = errors.Append(errs, x.buildSpec(cfg, b, s)) - } - }) + for s := range file.ImportSpecs() { + errs = errors.Append(errs, x.buildSpec(cfg, b, s)) + } } - err := x.ResolveFiles(b) - errs = errors.Append(errs, err) + errs = errors.Append(errs, b.ResolutionErr) var cc *compile.Config if cfg != nil { @@ -73,9 +70,9 @@ func (x *Runtime) Build(cfg *Config, b *build.Instance) (v *adt.Vertex, errs err } if cfg != nil && cfg.ImportPath != "" { b.ImportPath = cfg.ImportPath - b.PkgName = astutil.ImportPathName(b.ImportPath) + b.PkgName = ast.ParseImportPath(b.ImportPath).Qualifier } - v, err = compile.Files(cc, x, b.ID(), b.Files...) + v, err := compile.Instance(cc, x, b) errs = errors.Append(errs, err) errs = errors.Append(errs, x.InjectImplementations(b, v)) @@ -85,7 +82,7 @@ func (x *Runtime) Build(cfg *Config, b *build.Instance) (v *adt.Vertex, errs err b.Err = errs } - x.AddInst(b.ImportPath, v, b) + x.AddInst(v, b) return v, errs } @@ -121,20 +118,20 @@ func (r *Runtime) CompileFile(cfg *Config, file *ast.File) (*adt.Vertex, *build. } func (x *Runtime) buildSpec(cfg *Config, b *build.Instance, spec *ast.ImportSpec) (errs errors.Error) { - info, err := astutil.ParseImportSpec(spec) + path, err := strconv.Unquote(spec.Path.Value) if err != nil { return errors.Promote(err, "invalid import path") } - pkg := b.LookupImport(info.ID) + pkg := b.LookupImport(path) if pkg == nil { - if strings.Contains(info.ID, ".") { + if strings.Contains(path, ".") { return errors.Newf(spec.Pos(), "package %q imported but not defined in %s", - info.ID, b.ImportPath) - } else if x.index.builtinPaths[info.ID] == nil { + path, b.ImportPath) + } else if x.index.builtins == nil || x.index.builtins.importPaths[path] == nil { return errors.Newf(spec.Pos(), - "builtin package %q undefined", info.ID) + "builtin package %q undefined", path) } return nil } diff --git a/vendor/cuelang.org/go/internal/core/runtime/errors.go b/vendor/cuelang.org/go/internal/core/runtime/errors.go deleted file mode 100644 index b953a98421..0000000000 --- a/vendor/cuelang.org/go/internal/core/runtime/errors.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2020 CUE Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "cuelang.org/go/cue/ast" - "cuelang.org/go/cue/errors" - "cuelang.org/go/cue/token" -) - -var _ errors.Error = &nodeError{} - -// A nodeError is an error associated with processing an AST node. -type nodeError struct { - path []string // optional - n ast.Node - - errors.Message -} - -func (n *nodeError) Error() string { - return errors.String(n) -} - -func nodeErrorf(n ast.Node, format string, args ...interface{}) *nodeError { - return &nodeError{ - n: n, - Message: errors.NewMessagef(format, args...), - } -} - -func (e *nodeError) Position() token.Pos { - return e.n.Pos() -} - -func (e *nodeError) InputPositions() []token.Pos { return nil } - -func (e *nodeError) Path() []string { - return e.path -} diff --git a/vendor/cuelang.org/go/internal/core/runtime/extern.go b/vendor/cuelang.org/go/internal/core/runtime/extern.go index edb231cef3..b19658f427 100644 --- a/vendor/cuelang.org/go/internal/core/runtime/extern.go +++ b/vendor/cuelang.org/go/internal/core/runtime/extern.go @@ -74,10 +74,9 @@ func (r *Runtime) InjectImplementations(b *build.Instance, v *adt.Vertex) (errs d.errs = errors.Append(d.errs, d.addFile(f)) } - v.VisitLeafConjuncts(func(c adt.Conjunct) bool { + for c := range v.LeafConjuncts() { d.decorateConjunct(c.Elem(), v) - return true - }) + } return d.errs } @@ -149,23 +148,23 @@ loop: } fileAttr = a - attr := internal.ParseAttrBody(a.Pos(), body) + attr := internal.ParseAttrBody(pos, body) if attr.Err != nil { return "", pos, nil, attr.Err } k, err := attr.String(0) if err != nil { // Unreachable. - return "", pos, nil, errors.Newf(a.Pos(), "%s", err) + return "", pos, nil, errors.Newf(pos, "%s", err) } if k == "" { - return "", pos, nil, errors.Newf(a.Pos(), + return "", pos, nil, errors.Newf(pos, "interpreter name must be non-empty") } if kind != "" { - return "", pos, nil, errors.Newf(a.Pos(), + return "", pos, nil, errors.Newf(pos, "only one file-level extern attribute allowed per file") } @@ -292,6 +291,71 @@ func (d *externDecorator) markExternFieldAttr(kind string, decls []ast.Decl) (er return errs } +// ExtractFieldAttrsByKind finds all the attributes of the given kind +// in the given AST, parsing their bodies into [internal.Attr]. +func ExtractFieldAttrsByKind(file *ast.File, kind string) (attrsByField map[*ast.Field]*internal.Attr, errs errors.Error) { + k, _, decs, err := findExternFileAttr(file) + if err != nil || len(decs) == 0 || k != kind { + return nil, err + } + + var fieldStack []*ast.Field + + ast.Walk(&ast.File{Decls: decs}, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.Field: + fieldStack = append(fieldStack, n) + + case *ast.Attribute: + pos := n.Pos() + k, body := n.Split() + + // Support old-style and new-style extern attributes. + if k != "extern" && k != kind { + break + } + + lastFieldIdx := len(fieldStack) - 1 + if lastFieldIdx < 0 { + errs = errors.Append(errs, errors.Newf(pos, "@%s attribute not associated with field", kind)) + return true + } + + f := fieldStack[lastFieldIdx] + + _, _, err := ast.LabelName(f.Label) + if err != nil { + b, _ := format.Node(f.Label) + errs = errors.Append(errs, errors.Newf(pos, "external attribute has non-concrete label %s", b)) + break + } + + if attrsByField == nil { + attrsByField = make(map[*ast.Field]*internal.Attr) + } + if _, found := attrsByField[f]; found { + errs = errors.Append(errs, errors.Newf(pos, "duplicate @%s attributes", k)) + break + } + + attrParsed := internal.ParseAttrBody(pos, body) + attrsByField[f] = &attrParsed + + return false + } + + return true + + }, func(n ast.Node) { + switch n.(type) { + case *ast.Field: + fieldStack = fieldStack[:len(fieldStack)-1] + } + }) + + return attrsByField, errs +} + func (d *externDecorator) decorateConjunct(e adt.Elem, scope *adt.Vertex) { w := walk.Visitor{Before: func(n adt.Node) bool { return d.processADTNode(n, scope) diff --git a/vendor/cuelang.org/go/internal/core/runtime/imports.go b/vendor/cuelang.org/go/internal/core/runtime/imports.go index 17d8705f13..7d0b6cfb35 100644 --- a/vendor/cuelang.org/go/internal/core/runtime/imports.go +++ b/vendor/cuelang.org/go/internal/core/runtime/imports.go @@ -20,77 +20,73 @@ import ( "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" - "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" ) type PackageFunc func(ctx adt.Runtime) (*adt.Vertex, errors.Error) -func RegisterBuiltin(importPath string, f PackageFunc) { - sharedIndex.RegisterBuiltin(importPath, f) +func RegisterBuiltin(inst *build.Instance, f PackageFunc) { + stdBuiltins.registerBuiltin(inst, f) } -func (x *index) RegisterBuiltin(importPath string, f PackageFunc) { - if x.builtinPaths == nil { - x.builtinPaths = map[string]PackageFunc{} - x.builtinShort = map[string]string{} +var stdBuiltins = &builtins{ + importPaths: make(map[string]*build.Instance), + instances: make(map[*build.Instance]func(*Runtime) (*adt.Vertex, error)), + shortNames: make(map[string]*build.Instance), +} + +// builtins defines a set of builtin packages (usually the CUE standard library). +type builtins struct { + importPaths map[string]*build.Instance + instances map[*build.Instance]func(*Runtime) (*adt.Vertex, error) + shortNames map[string]*build.Instance +} + +func (b *builtins) registerBuiltin(inst *build.Instance, f PackageFunc) { + b.importPaths[inst.ImportPath] = inst + b.instances[inst] = func(r *Runtime) (*adt.Vertex, error) { + return f(r) } - x.builtinPaths[importPath] = f - base := path.Base(importPath) - if _, ok := x.builtinShort[base]; ok { - importPath = "" // Don't allow ambiguous base paths. + base := path.Base(inst.ImportPath) + if _, ok := b.shortNames[base]; ok { + b.shortNames[base] = nil // Don't allow ambiguous base paths. + } else { + b.shortNames[base] = inst } - x.builtinShort[base] = importPath } -// We use a sync.OnceValue below so that cueexperiment.Init is only called -// the first time that the API is used, letting the user set $CUE_EXPERIMENT globally -// as part of their package init if they want to. -var SharedRuntime = sync.OnceValue(func() *Runtime { - r := &Runtime{index: sharedIndex} - // The version logic below is copied from [Runtime.Init]; - // consider refactoring to share the code if it gets any more complicated. - // - // TODO(mvdan,mpvl): Note that SharedRuntime follows the globally set evaluator version, - // which may be different than what was supplied via Go code for each context like - // via cuecontext.EvaluatorVersion(cuecontext.EvalV3). - // This does not cause issues between evalv2 and evalv3 as they use the same ADT, - // but future evaluator versions may not be compatible at that level. - // We should consider using one SharedRuntime per evaluator version, - // or getting rid of SharedRuntime altogether. - r.SetVersion(internal.DefaultVersion) - return r -}) - // BuiltinPackagePath converts a short-form builtin package identifier to its // full path or "" if this doesn't exist. -func (x *Runtime) BuiltinPackagePath(path string) string { - return x.index.shortBuiltinToPath(path) +func (x *Runtime) BuiltinPackagePath(ident string) string { + inst := x.BuiltinPackageInstance(ident) + if inst == nil { + return "" + } + return inst.ImportPath } -// sharedIndex is used for indexing builtins and any other labels common to -// all instances. -var sharedIndex = newIndex() +// BuiltinPackageInstance converts a short-form builtin package identifier to its +// build instance or nil if this doesn't exist. +func (x *Runtime) BuiltinPackageInstance(ident string) *build.Instance { + if x.index.builtins == nil { + return nil + } + return x.index.builtins.shortNames[ident] +} -// index maps conversions from label names to internal codes. -// -// All instances belonging to the same package should share this index. +// index caches the results of converting [build.Instance] +// values to ADT nodes, and also defines the namespace +// of builtin packages. type index struct { + builtins *builtins + // lock is used to guard imports-related maps. - // TODO: makes these per cuecontext. lock sync.RWMutex imports map[*adt.Vertex]*build.Instance - importsByPath map[string]*adt.Vertex importsByBuild map[*build.Instance]*adt.Vertex nextUniqueID uint64 - - // These are initialized during Go package initialization time and do not - // need to be guarded. - builtinPaths map[string]PackageFunc // Full path - builtinShort map[string]string // Commandline shorthand - - typeCache sync.Map // map[reflect.Type]evaluated + typeCache sync.Map // map[reflect.Type]evaluated } func (i *index) getNextUniqueID() uint64 { @@ -105,20 +101,12 @@ func (i *index) getNextUniqueID() uint64 { func newIndex() *index { i := &index{ imports: map[*adt.Vertex]*build.Instance{}, - importsByPath: map[string]*adt.Vertex{}, importsByBuild: map[*build.Instance]*adt.Vertex{}, } return i } -func (x *index) shortBuiltinToPath(id string) string { - if x == nil || x.builtinPaths == nil { - return "" - } - return x.builtinShort[id] -} - -func (r *Runtime) AddInst(path string, key *adt.Vertex, p *build.Instance) { +func (r *Runtime) AddInst(key *adt.Vertex, p *build.Instance) { r.index.lock.Lock() defer r.index.lock.Unlock() @@ -128,9 +116,6 @@ func (r *Runtime) AddInst(path string, key *adt.Vertex, p *build.Instance) { } x.imports[key] = p x.importsByBuild[p] = key - if path != "" { - x.importsByPath[path] = key - } } func (r *Runtime) GetInstanceFromNode(key *adt.Vertex) *build.Instance { @@ -147,33 +132,36 @@ func (r *Runtime) getNodeFromInstance(key *build.Instance) *adt.Vertex { return r.index.importsByBuild[key] } -func (r *Runtime) LoadImport(importPath string) *adt.Vertex { - r.index.lock.Lock() - defer r.index.lock.Unlock() - +// LoadBuiltin loads the builtin package with the given import path. +func (r *Runtime) LoadBuiltin(importPath string) *adt.Vertex { x := r.index - - key := x.importsByPath[importPath] - if key != nil { - return key + inst := x.builtins.importPaths[importPath] + if inst == nil { + return nil + } + if v := r.LoadInstance(inst); v != nil { + return v } + x.lock.Lock() + defer x.lock.Unlock() - if x.builtinPaths != nil { - if f := x.builtinPaths[importPath]; f != nil { - p, err := f(r) - if err != nil { - return adt.ToVertex(&adt.Bottom{Err: err}) - } - inst := &build.Instance{ - ImportPath: importPath, - PkgName: path.Base(importPath), - } - x.imports[p] = inst - x.importsByPath[importPath] = p - x.importsByBuild[inst] = p - return p - } + if v := x.importsByBuild[inst]; v != nil { + // Another goroutine got there first. + return v + } + v, err := x.builtins.instances[inst](r) + if err != nil { + // TODO why not just cache the error, or even just have the + // builtin builder return *adt.Bottom? + return adt.ToVertex(&adt.Bottom{Err: errors.Promote(err, "builtin")}) } + x.importsByBuild[inst] = v + x.imports[v] = inst + return v +} - return key +func (r *Runtime) LoadInstance(inst *build.Instance) *adt.Vertex { + r.index.lock.RLock() + defer r.index.lock.RUnlock() + return r.index.importsByBuild[inst] } diff --git a/vendor/cuelang.org/go/internal/core/runtime/resolve.go b/vendor/cuelang.org/go/internal/core/runtime/resolve.go deleted file mode 100644 index 3fe4d63436..0000000000 --- a/vendor/cuelang.org/go/internal/core/runtime/resolve.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2020 CUE Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "path" - "strconv" - - "cuelang.org/go/cue/ast" - "cuelang.org/go/cue/build" - "cuelang.org/go/cue/errors" -) - -// TODO(resolve): this is also done in compile, do we need both? -func (r *Runtime) ResolveFiles(p *build.Instance) (errs errors.Error) { - idx := r.index - - // Link top-level declarations. As top-level entries get unified, an entry - // may be linked to any top-level entry of any of the files. - allFields := map[string]ast.Node{} - for _, f := range p.Files { - if f.PackageName() == "" { - continue - } - for _, d := range f.Decls { - if f, ok := d.(*ast.Field); ok && f.Value != nil { - if ident, ok := f.Label.(*ast.Ident); ok { - allFields[ident.Name] = f.Value - } - } - } - } - for _, f := range p.Files { - err := resolveFile(idx, f, p, allFields) - errs = errors.Append(errs, err) - } - return errs -} - -func resolveFile( - idx *index, - f *ast.File, - p *build.Instance, - allFields map[string]ast.Node, -) errors.Error { - unresolved := map[string][]*ast.Ident{} - for _, u := range f.Unresolved { - unresolved[u.Name] = append(unresolved[u.Name], u) - } - fields := map[string]ast.Node{} - for _, d := range f.Decls { - if f, ok := d.(*ast.Field); ok && f.Value != nil { - if ident, ok := f.Label.(*ast.Ident); ok { - fields[ident.Name] = d - } - } - } - var errs errors.Error - - specs := []*ast.ImportSpec{} - - for _, spec := range f.Imports { - id, err := strconv.Unquote(spec.Path.Value) - if err != nil { - continue // quietly ignore the error - } - name := path.Base(id) - if imp := p.LookupImport(id); imp != nil { - name = imp.PkgName - } else if _, ok := idx.builtinPaths[id]; !ok { - errs = errors.Append(errs, - nodeErrorf(spec, "package %q not found", id)) - continue - } - if spec.Name != nil { - name = spec.Name.Name - } - if n, ok := fields[name]; ok { - errs = errors.Append(errs, nodeErrorf(spec, - "%s redeclared as imported package name\n"+ - "\tprevious declaration at %s", name, n.Pos())) - continue - } - fields[name] = spec - used := false - for _, u := range unresolved[name] { - used = true - u.Node = spec - } - if !used { - specs = append(specs, spec) - } - } - - // Verify each import is used. - if len(specs) > 0 { - // Find references to imports. This assumes that identifiers in labels - // are not resolved or that such errors are caught elsewhere. - ast.Walk(f, nil, func(n ast.Node) { - if x, ok := n.(*ast.Ident); ok { - // As we also visit labels, most nodes will be nil. - if x.Node == nil { - return - } - for i, s := range specs { - if s == x.Node { - specs[i] = nil - return - } - } - } - }) - - // Add errors for unused imports. - for _, spec := range specs { - if spec == nil { - continue - } - if spec.Name == nil { - errs = errors.Append(errs, nodeErrorf(spec, - "imported and not used: %s", spec.Path.Value)) - } else { - errs = errors.Append(errs, nodeErrorf(spec, - "imported and not used: %s as %s", spec.Path.Value, spec.Name)) - } - } - } - - k := 0 - for _, u := range f.Unresolved { - if u.Node != nil { - continue - } - if n, ok := allFields[u.Name]; ok { - u.Node = n - u.Scope = f - continue - } - f.Unresolved[k] = u - k++ - } - f.Unresolved = f.Unresolved[:k] - // TODO: also need to resolve types. - // if len(f.Unresolved) > 0 { - // n := f.Unresolved[0] - // return ctx.mkErr(newBase(n), "unresolved reference %s", n.Name) - // } - return errs -} diff --git a/vendor/cuelang.org/go/internal/core/runtime/runtime.go b/vendor/cuelang.org/go/internal/core/runtime/runtime.go index dc71dcb99b..0f18c7360f 100644 --- a/vendor/cuelang.org/go/internal/core/runtime/runtime.go +++ b/vendor/cuelang.org/go/internal/core/runtime/runtime.go @@ -32,8 +32,7 @@ type Runtime struct { // the kind in a file-level @extern(kind) attribute. interpreters map[string]Interpreter - version internal.EvaluatorVersion - simplifyValidators bool + version internal.EvaluatorVersion flags cuedebug.Config } @@ -44,15 +43,18 @@ func (r *Runtime) Settings() (internal.EvaluatorVersion, cuedebug.Config) { func (r *Runtime) ConfigureOpCtx(ctx *adt.OpContext) { ctx.Version = r.version - ctx.SimplifyValidators = r.simplifyValidators ctx.Config = r.flags } func (r *Runtime) SetBuildData(b *build.Instance, x interface{}) { + r.index.lock.Lock() + defer r.index.lock.Unlock() r.loaded[b] = x } func (r *Runtime) BuildData(b *build.Instance) (x interface{}, ok bool) { + r.index.lock.RLock() + defer r.index.lock.RUnlock() x, ok = r.loaded[b] return x, ok } @@ -81,15 +83,13 @@ func NewWithSettings(v internal.EvaluatorVersion, flags cuedebug.Config) *Runtim // before first use. func (r *Runtime) SetVersion(v internal.EvaluatorVersion) { switch v { - case internal.EvalV2, internal.EvalV3: + case internal.EvalV3: r.version = v case internal.EvalVersionUnset, internal.DefaultVersion: - cueexperiment.Init() - if cueexperiment.Flags.EvalV3 { - r.version = internal.EvalV3 - } else { - r.version = internal.EvalV2 - } + // TODO(evalv4): read from cueexperiment. + // cueexperiment.Init() + // if cueexperiment.Flags.EvalV3 { + r.version = internal.EvalV3 } } @@ -102,7 +102,6 @@ func (r *Runtime) SetDebugOptions(flags *cuedebug.Config) { // SetGlobalExperiments that apply to language evaluation. // It does not set the version. func (r *Runtime) SetGlobalExperiments(flags *cueexperiment.Config) { - r.simplifyValidators = !flags.KeepValidators // Do not set version as this is already set by NewWithSettings or // SetVersion. } @@ -117,11 +116,7 @@ func (r *Runtime) Init() { return } r.index = newIndex() - - // TODO: the builtin-specific instances will ultimately also not be - // shared by indexes. - r.index.builtinPaths = sharedIndex.builtinPaths - r.index.builtinShort = sharedIndex.builtinShort + r.index.builtins = stdBuiltins r.loaded = map[*build.Instance]interface{}{} diff --git a/vendor/cuelang.org/go/internal/core/subsume/structural.go b/vendor/cuelang.org/go/internal/core/subsume/structural.go index 0cc73c1254..c810f4bdcd 100644 --- a/vendor/cuelang.org/go/internal/core/subsume/structural.go +++ b/vendor/cuelang.org/go/internal/core/subsume/structural.go @@ -54,7 +54,7 @@ func (s *subsumer) node(env *adt.Environment, up int32) *adt.Vertex { for ; up != 0; up-- { env = env.Up } - return env.Vertex + return env.DerefVertex(s.ctx) } func (s *subsumer) structural(a, b adt.Conjunct) bool { diff --git a/vendor/cuelang.org/go/internal/core/subsume/subsume.go b/vendor/cuelang.org/go/internal/core/subsume/subsume.go index e2a6a7901c..0316bf2364 100644 --- a/vendor/cuelang.org/go/internal/core/subsume/subsume.go +++ b/vendor/cuelang.org/go/internal/core/subsume/subsume.go @@ -17,7 +17,6 @@ package subsume import ( "cuelang.org/go/cue/errors" - "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" ) @@ -114,6 +113,8 @@ func unifyValue(c *adt.OpContext, a, b adt.Value) adt.Value { return x } +var ErrInexact = errors.New("inexact subsumption") + func (s *subsumer) getError() (err errors.Error) { c := s.ctx // src := binSrc(token.NoPos, opUnify, gt, lt) @@ -132,7 +133,7 @@ func (s *subsumer) getError() (err errors.Error) { } err = s.errs if s.inexact { - err = errors.Wrap(err, internal.ErrInexact) + err = errors.Wrap(err, ErrInexact) } return err } diff --git a/vendor/cuelang.org/go/internal/core/subsume/value.go b/vendor/cuelang.org/go/internal/core/subsume/value.go index 3f13ea1d9a..f15f6efe86 100644 --- a/vendor/cuelang.org/go/internal/core/subsume/value.go +++ b/vendor/cuelang.org/go/internal/core/subsume/value.go @@ -124,7 +124,7 @@ func (s *subsumer) values(a, b adt.Value) (result bool) { case *adt.Num: y, ok := b.(*adt.Num) - return ok && x.K&y.K == y.K && test(s.ctx, x, adt.EqualOp, x, y) + return ok && x.K&y.K == y.K && adt.BinOpBool(s.ctx, x, adt.EqualOp, x, y) case *adt.String: y, ok := b.(*adt.String) @@ -264,40 +264,40 @@ func (s *subsumer) bound(x *adt.BoundValue, v adt.Value) bool { switch x.Op { case adt.GreaterThanOp: if y.Op == adt.GreaterEqualOp { - return test(ctx, x, adt.LessThanOp, xv, yv) + return adt.BinOpBool(ctx, x, adt.LessThanOp, xv, yv) } fallthrough case adt.GreaterEqualOp: if y.Op == adt.GreaterThanOp || y.Op == adt.GreaterEqualOp { - return test(ctx, x, adt.LessEqualOp, xv, yv) + return adt.BinOpBool(ctx, x, adt.LessEqualOp, xv, yv) } case adt.LessThanOp: if y.Op == adt.LessEqualOp { - return test(ctx, x, adt.GreaterThanOp, xv, yv) + return adt.BinOpBool(ctx, x, adt.GreaterThanOp, xv, yv) } fallthrough case adt.LessEqualOp: if y.Op == adt.LessThanOp || y.Op == adt.LessEqualOp { - return test(ctx, x, adt.GreaterEqualOp, xv, yv) + return adt.BinOpBool(ctx, x, adt.GreaterEqualOp, xv, yv) } case adt.NotEqualOp: switch y.Op { case adt.NotEqualOp: - return test(ctx, x, adt.EqualOp, xv, yv) + return adt.BinOpBool(ctx, x, adt.EqualOp, xv, yv) case adt.GreaterEqualOp: - return test(ctx, x, adt.LessThanOp, xv, yv) + return adt.BinOpBool(ctx, x, adt.LessThanOp, xv, yv) case adt.GreaterThanOp: - return test(ctx, x, adt.LessEqualOp, xv, yv) + return adt.BinOpBool(ctx, x, adt.LessEqualOp, xv, yv) case adt.LessThanOp: - return test(ctx, x, adt.GreaterEqualOp, xv, yv) + return adt.BinOpBool(ctx, x, adt.GreaterEqualOp, xv, yv) case adt.LessEqualOp: - return test(ctx, x, adt.GreaterThanOp, xv, yv) + return adt.BinOpBool(ctx, x, adt.GreaterThanOp, xv, yv) } case adt.MatchOp, adt.NotMatchOp: // these are just approximations if y.Op == x.Op { - return test(ctx, x, adt.EqualOp, xv, yv) + return adt.BinOpBool(ctx, x, adt.EqualOp, xv, yv) } default: @@ -306,13 +306,7 @@ func (s *subsumer) bound(x *adt.BoundValue, v adt.Value) bool { } case *adt.Num, *adt.String, *adt.Bool: - return test(ctx, x, x.Op, y, x.Value) + return adt.BinOpBool(ctx, x, x.Op, y, x.Value) } return false } - -func test(ctx *adt.OpContext, src adt.Node, op adt.Op, gt, lt adt.Value) bool { - x := adt.BinOp(ctx, src, op, gt, lt) - b, ok := x.(*adt.Bool) - return ok && b.B -} diff --git a/vendor/cuelang.org/go/internal/core/subsume/vertex.go b/vendor/cuelang.org/go/internal/core/subsume/vertex.go index a4ede21c2d..67531f3823 100644 --- a/vendor/cuelang.org/go/internal/core/subsume/vertex.go +++ b/vendor/cuelang.org/go/internal/core/subsume/vertex.go @@ -16,10 +16,9 @@ package subsume import ( "fmt" + "slices" - "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" - "cuelang.org/go/internal/core/export" ) // Notes: @@ -29,216 +28,6 @@ import ( // // TODO(perf): use merge sort where possible. func (s *subsumer) vertices(x, y *adt.Vertex) bool { - if s.ctx.Version == internal.DevVersion { - return s.verticesDev(x, y) - } - if x == y { - return true - } - if x.ArcType < y.ArcType { - return false - } - - if s.Defaults { - y = y.Default() - } - - if b := y.Bottom(); b != nil { - // If the value is incomplete, the error is not final. So either check - // structural equivalence or return an error. - return !b.IsIncomplete() - } - - ctx := s.ctx - - final := y.IsData() || s.Final - - switch v := x.BaseValue.(type) { - case *adt.Bottom: - return false - - case *adt.ListMarker: - if !y.IsList() { - s.errf("list does not subsume %v (type %s)", y, y.Kind()) - return false - } - if !s.listVertices(x, y) { - return false - } - // TODO: allow other arcs alongside list arc. - return true - - case *adt.StructMarker: - _, ok := y.BaseValue.(*adt.StructMarker) - if !ok { - return false - } - - case adt.Value: - if !s.values(v, y.Value()) { - return false - } - - // Embedded scalars could still have arcs. - if final { - return true - } - - default: - panic(fmt.Sprintf("unexpected type %T", v)) - } - - xClosed := x.IsClosedStruct() && !s.IgnoreClosedness - // TODO: this should not close for taking defaults. Do a more principled - // makeover of this package before making it public, though. - yClosed := s.Final || s.Defaults || - (y.IsClosedStruct() && !s.IgnoreClosedness) - - if xClosed && !yClosed && !final { - return false - } - - types := x.OptionalTypes() - if !final && !s.IgnoreOptional && types&(adt.HasPattern|adt.HasAdditional) != 0 { - // TODO: there are many cases where pattern constraints can be checked. - s.inexact = true - return false - } - - // All arcs in x must exist in y and its values must subsume. - xFeatures := export.VertexFeaturesUnsorted(x) - for _, f := range xFeatures { - if s.Final && !f.IsRegular() { - continue - } - - a := x.Lookup(f) - aOpt := false - if a == nil { - // x.f is optional - if s.IgnoreOptional { - continue - } - - a = &adt.Vertex{Label: f} - x.MatchAndInsert(ctx, a) - a.Finalize(ctx) - - // If field a is optional and has value top, neither the - // omission of the field nor the field defined with any value - // may cause unification to fail. - if a.Kind() == adt.TopKind { - continue - } - - aOpt = true - } else if a.IsConstraint() { - if s.IgnoreOptional { - continue - } - // If field a is optional and has value top, neither the - // omission of the field nor the field defined with any value - // may cause unification to fail. - if a.Kind() == adt.TopKind { - continue - } - aOpt = true - } - - b := y.Lookup(f) - if b == nil { - // y.f is optional - if !aOpt { - s.errf("required field is optional in subsumed value: %v", f) - return false - } - - // If f is undefined for y and if y is closed, the field is - // implicitly defined as _|_ and thus subsumed. Technically, this is - // even true if a is not optional, but in that case it means that y - // is invalid, so return false regardless - if !y.Accept(ctx, f) || y.IsData() || s.Final { - continue - } - - b = &adt.Vertex{Label: f} - y.MatchAndInsert(ctx, b) - b.Finalize(ctx) - } - - if s.values(a, b) { - continue - } - - s.missing = f - s.gt = a - s.lt = y - - s.errf("field %v not present in %v", f, y) - return false - } - - if xClosed && !yClosed && !s.Final { - s.errf("closed struct does not subsume open struct") - return false - } - - yFeatures := export.VertexFeaturesUnsorted(y) -outer: - for _, f := range yFeatures { - if s.Final && !f.IsRegular() { - continue - } - - for _, g := range xFeatures { - if g == f { - // already validated - continue outer - } - } - - b := y.Lookup(f) - if b == nil { - if s.IgnoreOptional || s.Final { - continue - } - - b = &adt.Vertex{Label: f} - y.MatchAndInsert(ctx, b) - } else if b.IsConstraint() { - if s.IgnoreOptional || s.Final { - continue - } - } - - if !x.Accept(ctx, f) { - if s.Profile.IgnoreClosedness { - continue - } - s.errf("field not allowed in closed struct: %v", f) - return false - } - - a := &adt.Vertex{Label: f} - x.MatchAndInsert(ctx, a) - if !a.HasConjuncts() { - // It is accepted and has no further constraints, so all good. - continue - } - - a.Finalize(ctx) - b.Finalize(ctx) - - if !s.vertices(a, b) { - return false - } - } - - return true -} - -// verticesDev replaces vertices with the implementation of the new evaluator. -func (s *subsumer) verticesDev(x, y *adt.Vertex) bool { if x == y { return true } @@ -435,7 +224,7 @@ outer: if apc == nil { return true } - if y.IsClosedList() || y.IsClosedList() || final { + if y.IsClosedStruct() || y.IsClosedList() || final { // This is a special case where know that any allowed optional field // in a must be bottom in y, which is strictly more specific. return true @@ -483,8 +272,8 @@ func (s *subsumer) listVertices(x, y *adt.Vertex) bool { return false } - xElems := x.Elems() - yElems := y.Elems() + xElems := slices.Collect(x.Elems()) + yElems := slices.Collect(y.Elems()) switch { case len(xElems) == len(yElems): diff --git a/vendor/cuelang.org/go/internal/core/toposort/graph.go b/vendor/cuelang.org/go/internal/core/toposort/graph.go index b2183f527d..082a262a84 100644 --- a/vendor/cuelang.org/go/internal/core/toposort/graph.go +++ b/vendor/cuelang.org/go/internal/core/toposort/graph.go @@ -16,6 +16,7 @@ package toposort import ( "cmp" + "maps" "slices" "cuelang.org/go/internal/core/adt" @@ -32,11 +33,10 @@ type Graph struct { type Node struct { Feature adt.Feature Outgoing Nodes - Incoming Nodes structMeta *structMeta // temporary state for calculating the Strongly Connected // Components of a graph. - sccNodeState *sccNodeState + sccNodeState sccNodeState position int } @@ -108,7 +108,6 @@ func (builder *GraphBuilder) AddEdge(from, to adt.Feature) { fromNode := builder.EnsureNode(from) toNode := builder.EnsureNode(to) fromNode.Outgoing = append(fromNode.Outgoing, toNode) - toNode.Incoming = append(toNode.Incoming, fromNode) } // Ensure that a node for this feature exists. This is necessary for @@ -123,11 +122,8 @@ func (builder *GraphBuilder) EnsureNode(feature adt.Feature) *Node { } func (builder *GraphBuilder) Build() *Graph { - nodesByFeature := builder.nodesByFeature - nodes := make(Nodes, 0, len(nodesByFeature)) - for _, node := range nodesByFeature { - nodes = append(nodes, node) - } + nodes := make(Nodes, 0, len(builder.nodesByFeature)) + nodes = slices.AppendSeq(nodes, maps.Values(builder.nodesByFeature)) return &Graph{nodes: nodes} } diff --git a/vendor/cuelang.org/go/internal/core/toposort/scc.go b/vendor/cuelang.org/go/internal/core/toposort/scc.go index 9f8f3ef707..8ff1cc43f3 100644 --- a/vendor/cuelang.org/go/internal/core/toposort/scc.go +++ b/vendor/cuelang.org/go/internal/core/toposort/scc.go @@ -39,11 +39,6 @@ type StronglyConnectedComponent struct { // The components returned are topologically sorted (forwards), and // form a DAG (this is the "condensation graph"). func (graph *Graph) StronglyConnectedComponents() []*StronglyConnectedComponent { - nodeStates := make([]sccNodeState, len(graph.nodes)) - for i, node := range graph.nodes { - node.sccNodeState = &nodeStates[i] - } - scc := &sccFinderState{} for _, node := range graph.nodes { if !node.sccNodeState.visited { @@ -51,8 +46,9 @@ func (graph *Graph) StronglyConnectedComponents() []*StronglyConnectedComponent } } + // sccNodeState is temporary; clear it for the next call. for _, node := range graph.nodes { - node.sccNodeState = nil + node.sccNodeState = sccNodeState{} } components := scc.components @@ -81,7 +77,7 @@ func (scc *sccFinderState) findSCC(cur *Node) { num := scc.counter scc.counter++ - curScc := cur.sccNodeState + curScc := &cur.sccNodeState curScc.lowLink = num curScc.index = num curScc.visited = true @@ -90,7 +86,7 @@ func (scc *sccFinderState) findSCC(cur *Node) { scc.stack = append(scc.stack, cur) for _, next := range cur.Outgoing { - nextScc := next.sccNodeState + nextScc := &next.sccNodeState if !nextScc.visited { scc.findSCC(next) curScc.lowLink = min(curScc.lowLink, nextScc.lowLink) @@ -112,7 +108,7 @@ func (scc *sccFinderState) findSCC(cur *Node) { for i := len(scc.stack) - 1; i >= 0; i-- { nodeN := scc.stack[i] - nodeNScc := nodeN.sccNodeState + nodeNScc := &nodeN.sccNodeState nodeNScc.onStack = false nodeNScc.component = component componentNodes = append(componentNodes, nodeN) diff --git a/vendor/cuelang.org/go/internal/core/toposort/vertex.go b/vendor/cuelang.org/go/internal/core/toposort/vertex.go index 74b6046dbb..62f7171249 100644 --- a/vendor/cuelang.org/go/internal/core/toposort/vertex.go +++ b/vendor/cuelang.org/go/internal/core/toposort/vertex.go @@ -145,7 +145,7 @@ import ( ) type structMeta struct { - structInfo *adt.StructInfo + structInfo adt.StructInfo pos token.Pos // Should this struct be considered to be part of an explicit @@ -154,10 +154,7 @@ type structMeta struct { } func (sMeta *structMeta) String() string { - var sl *adt.StructLit - if sMeta.structInfo != nil { - sl = sMeta.structInfo.StructLit - } + sl := sMeta.structInfo.StructLit return fmt.Sprintf("{%p sl:%p %v (explicit? %v)}", sMeta, sl, sMeta.pos, sMeta.isExplicit) } @@ -217,7 +214,7 @@ func (sm *structMeta) hasDynamic(dynFieldsMap map[*adt.DynamicField][]adt.Featur // we look at the vertex's conjuncts. If a conjunct is a binary // expression &, then we look up the structMeta for the arguments to // the binary expression, and mark them as explicit unification. -func analyseStructs(v *adt.Vertex, builder *GraphBuilder) []*structMeta { +func analyseStructs(v *adt.Vertex, builder *GraphBuilder) []structMeta { structInfos := v.Structs // Note that it's important that nodeToStructMetas avoids duplicate entries, // which cause significant slowness for some large configs. @@ -231,22 +228,21 @@ func analyseStructs(v *adt.Vertex, builder *GraphBuilder) []*structMeta { nodeToStructMetas[node] = m return m } - structMetas := make([]*structMeta, 0, len(structInfos)) + + structMetas := make([]structMeta, len(structInfos)) // Create all the structMetas and map to them from a StructInfo's // StructLit, and all its internal Decls. Initial attempt at // recording a position, which will be correct only for direct use // of literal structs in the calculation of vertex v. + metaIdx := 0 for _, s := range structInfos { sl := s.StructLit - sMeta := &structMeta{ - structInfo: s, - } - structMetas = append(structMetas, sMeta) + sMeta := &structMetas[metaIdx] + metaIdx++ + sMeta.structInfo = s + sMeta.pos = adt.Pos(sl) - if src := sl.Source(); src != nil { - sMeta.pos = src.Pos() - } structMetaMap(sl)[sMeta] = true for _, decl := range sl.Decls { structMetaMap(decl)[sMeta] = true @@ -258,22 +254,23 @@ func analyseStructs(v *adt.Vertex, builder *GraphBuilder) []*structMeta { // uncover the position of the earliest reference. for _, arc := range v.Arcs { builder.EnsureNode(arc.Label) - arc.VisitLeafConjuncts(func(c adt.Conjunct) bool { + for c := range arc.LeafConjuncts() { field := c.Field() debug("self arc conjunct field %p :: %T, expr %p :: %T (%v)\n", field, field, c.Expr(), c.Expr(), c.Expr().Source()) sMetas, found := nodeToStructMetas[field] if !found { - return true + continue } if src := field.Source(); src != nil { + pos := src.Pos() for sMeta := range sMetas { - sMeta.pos = src.Pos() + sMeta.pos = pos } } refs := c.CloseInfo.CycleInfo.Refs if refs == nil { - return true + continue } debug(" ref %p :: %T (%v)\n", refs.Ref, refs.Ref, refs.Ref.Source().Pos()) @@ -283,25 +280,22 @@ func analyseStructs(v *adt.Vertex, builder *GraphBuilder) []*structMeta { refs.Ref, refs.Ref, refs.Ref.Source().Pos()) } maps.Insert(structMetaMap(refs.Ref), maps.All(sMetas)) - if pos := refs.Ref.Source().Pos(); pos != token.NoPos { + if pos := refs.Ref.Source().Pos(); pos.IsValid() { for sMeta := range nodeToStructMetas[refs.Ref] { sMeta.pos = pos } } - - return true - }) + } } // Explore our own conjuncts, and the decls from our StructList, to // find explicit unifications, and mark structMetas accordingly. var worklist []adt.Expr - v.VisitLeafConjuncts(func(c adt.Conjunct) bool { + for c := range v.LeafConjuncts() { debug("self conjunct field %p :: %T, expr %p :: %T\n", c.Field(), c.Field(), c.Expr(), c.Expr()) worklist = append(worklist, c.Expr()) - return true - }) + } for _, si := range structInfos { for _, decl := range si.StructLit.Decls { if expr, ok := decl.(adt.Expr); ok { @@ -335,15 +329,14 @@ func analyseStructs(v *adt.Vertex, builder *GraphBuilder) []*structMeta { func dynamicFieldsFeatures(v *adt.Vertex) map[*adt.DynamicField][]adt.Feature { var m map[*adt.DynamicField][]adt.Feature for _, arc := range v.Arcs { - arc.VisitLeafConjuncts(func(c adt.Conjunct) bool { + for c := range arc.LeafConjuncts() { if dynField, ok := c.Field().(*adt.DynamicField); ok { if m == nil { m = make(map[*adt.DynamicField][]adt.Feature) } m[dynField] = append(m[dynField], arc.Label) } - return true - }) + } } return m } @@ -377,7 +370,7 @@ type vertexFeatures struct { dynFieldsMap map[*adt.DynamicField][]adt.Feature } -func (vf *vertexFeatures) compareStructMeta(a, b *structMeta) int { +func (vf *vertexFeatures) compareStructMeta(a, b structMeta) int { if c := a.pos.Compare(b.pos); c != 0 { return c } @@ -410,13 +403,16 @@ func VertexFeatures(ctx *adt.OpContext, v *adt.Vertex) []adt.Feature { var batches structMetaBatches var batch structMetaBatch - for _, root := range roots { - if len(batch) == 0 || - (batch[0].pos == root.pos && !root.hasDynamic(dynFieldsMap)) { - batch = append(batch, root) - } else { - batches.appendBatch(batch) - batch = structMetaBatch{root} + for i := range roots { + root := &roots[i] + for range 1 + root.structInfo.Repeats { + if len(batch) == 0 || + (batch[0].pos == root.pos && !root.hasDynamic(dynFieldsMap)) { + batch = append(batch, root) + } else { + batches.appendBatch(batch) + batch = structMetaBatch{root} + } } } batches.appendBatch(batch) @@ -485,7 +481,7 @@ func (vf *vertexFeatures) addEdges(previous []adt.Feature, sMeta *structMeta) [] // same field within the same structLit debug(" skipping 1\n") - } else if exists && !sMeta.isExplicit && sMeta.pos != token.NoPos && + } else if exists && !sMeta.isExplicit && sMeta.pos.IsValid() && node.structMeta != nil && node.structMeta.pos.Filename() == filename { // same field within the same file during implicit unification diff --git a/vendor/cuelang.org/go/internal/core/walk/walk.go b/vendor/cuelang.org/go/internal/core/walk/walk.go index fb4ac6bf3e..0dade2c7f9 100644 --- a/vendor/cuelang.org/go/internal/core/walk/walk.go +++ b/vendor/cuelang.org/go/internal/core/walk/walk.go @@ -123,6 +123,9 @@ func (w *Visitor) node(n adt.Node) { w.node(x.X) w.node(x.Y) + case *adt.OpenExpr: + w.node(x.X) + case *adt.CallExpr: w.node(x.Fun) for _, arg := range x.Args { @@ -164,6 +167,9 @@ func (w *Visitor) node(n adt.Node) { w.node(c) } w.node(adt.ToExpr(x.Value)) + if x.Fallback != nil { + w.node(x.Fallback) + } case *adt.ForClause: w.feature(x.Key, x) @@ -176,6 +182,14 @@ func (w *Visitor) node(n adt.Node) { w.feature(x.Label, x) w.node(x.Expr) + case *adt.TryClause: + if x.Expr != nil { + // Assignment form + w.feature(x.Label, x) + w.node(x.Expr) + } + // Struct form: body is in Comprehension.Value, walked separately + case *adt.ValueClause: default: diff --git a/vendor/cuelang.org/go/internal/cueconfig/config.go b/vendor/cuelang.org/go/internal/cueconfig/config.go index c2145e210e..b8ed40c1cb 100644 --- a/vendor/cuelang.org/go/internal/cueconfig/config.go +++ b/vendor/cuelang.org/go/internal/cueconfig/config.go @@ -31,8 +31,6 @@ type Logins struct { type RegistryLogin struct { // These fields mirror [oauth2.Token]. // We don't directly reference the type so we can be in control of our file format. - // Note that Expiry is a pointer, so omitempty can work as intended. - // TODO(mvdan): drop the pointer once we can use json's omitzero: https://go.dev/issue/45669 // Note that we store Expiry at rest as an absolute timestamp in UTC, // rather than the ExpiresIn field following the RFC's wire format, // a duration in seconds relative to the current time which is not useful at rest. @@ -43,7 +41,7 @@ type RegistryLogin struct { RefreshToken string `json:"refresh_token,omitempty"` - Expiry *time.Time `json:"expiry,omitempty"` + Expiry time.Time `json:"expiry,omitzero"` } func LoginConfigPath(getenv func(string) string) (string, error) { @@ -201,25 +199,19 @@ func RegistryOAuthConfig(host modresolve.Host) oauth2.Config { // changed between reading and writing the file. func TokenFromLogin(login RegistryLogin) *oauth2.Token { - tok := &oauth2.Token{ + return &oauth2.Token{ AccessToken: login.AccessToken, TokenType: login.TokenType, RefreshToken: login.RefreshToken, + Expiry: login.Expiry, } - if login.Expiry != nil { - tok.Expiry = *login.Expiry - } - return tok } func LoginFromToken(tok *oauth2.Token) RegistryLogin { - login := RegistryLogin{ + return RegistryLogin{ AccessToken: tok.AccessToken, TokenType: tok.TokenType, RefreshToken: tok.RefreshToken, + Expiry: tok.Expiry, } - if !tok.Expiry.IsZero() { - login.Expiry = &tok.Expiry - } - return login } diff --git a/vendor/cuelang.org/go/internal/cueexperiment/exp.go b/vendor/cuelang.org/go/internal/cueexperiment/exp.go index de145bbdb2..03047de98d 100644 --- a/vendor/cuelang.org/go/internal/cueexperiment/exp.go +++ b/vendor/cuelang.org/go/internal/cueexperiment/exp.go @@ -1,9 +1,10 @@ package cueexperiment import ( + "fmt" + "os" + "strings" "sync" - - "cuelang.org/go/internal/envflag" ) // Flags holds the set of global CUE_EXPERIMENT flags. It is initialized by Init. @@ -11,74 +12,68 @@ var Flags Config // Config holds the set of known CUE_EXPERIMENT flags. // -// When adding, deleting, or modifying entries below, -// update cmd/cue/cmd/help.go as well for `cue help environment`. +// Keep the fields sorted from oldest to newest based on when the experiment was added. type Config struct { - // EvalV3 enables the new CUE evaluator, addressing performance issues - // and bringing better algorithms for disjunctions, closedness, and cycles. - // - // This experiment was introduced in v0.9.0 (2024-06), - // and enabled by default in v0.13.0 (2025-05). - EvalV3 bool `envflag:"default:true"` - - // CmdReferencePkg requires referencing an imported tool package to declare tasks. - // Otherwise, declaring tasks via "$id" or "kind" string fields is allowed. - // - // This experiment was introduced in v0.13.0 (2025-05), - // and enabled by default in the upcoming v0.14 release. - CmdReferencePkg bool `envflag:"default:true"` - - // KeepValidators prevents validators from simplifying into concrete values, - // even if their concrete value could be derived, such as `>=1 & <=1` to `1`. - // See the proposal at https://cuelang.org/discussion/3775. - // - // This experiment is introduced in the upcoming v0.14 release, already on by default. - KeepValidators bool `envflag:"default:true"` + // The flags in this first section describe active experiments. + // Sort from oldest to newest based on when they were introduced as a `preview`. - // The flags below describe completed experiments; they can still be set + // The flags in this second section describe completed experiments; they can still be set // as long as the value aligns with the final behavior once the experiment finished. // Breaking users who set such a flag seems unnecessary, // and it simplifies using the same experiment flags across a range of CUE versions. + // Sort from oldest to newest based on when they were completed as `stable` or `withdrawn`. // Modules enables support for the modules and package management proposal // as described in https://cuelang.org/discussion/2939. - // - // This experiment was introduced in v0.8.0 (2024-03), - // enabled by default in v0.9.0 (2024-06), - // and deprecated in v0.11.0 (2024-11). - Modules bool `envflag:"deprecated,default:true"` + Modules bool `experiment:"preview:v0.8.0,default:v0.9.0,stable:v0.11.0"` // YAMLV3Decoder swaps the old internal/third_party/yaml decoder with the new // decoder implemented in internal/encoding/yaml on top of yaml.v3. - // - // This experiment was introduced in v0.9.0 (2024-06), already on by default, - // and was deprecated in v0.11.0 (2024-11). - YAMLV3Decoder bool `envflag:"deprecated,default:true"` + YAMLV3Decoder bool `experiment:"preview:v0.9.0,default:v0.9.0,stable:v0.11.0"` // DecodeInt64 changes [cuelang.org/go/cue.Value.Decode] to choose - // `int64` rather than `int` as the default type for CUE integer values + // 'int64' rather than 'int' as the default type for CUE integer values // to ensure consistency with 32-bit platforms. - // - // This experiment was introduced in v0.11.0 (2024-11), - // enabled by default in v0.12.0 (2025-01), - // and was deprecated in v0.13.0 (2025-05). - DecodeInt64 bool `envflag:"deprecated,default:true"` + DecodeInt64 bool `experiment:"preview:v0.11.0,default:v0.12.0,stable:v0.13.0"` // Embed enables support for embedded data files as described in // https://cuelang.org/discussion/3264. - // - // This experiment was introduced in v0.10.0 (2024-08), - // enabled by default in v0.12.0 (2025-01), - // and deprecated in the upcoming v0.14 release. - Embed bool `envflag:"deprecated,default:true"` + Embed bool `experiment:"preview:v0.10.0,default:v0.12.0,stable:v0.14.0"` // TopoSort enables topological sorting of struct fields. // Provide feedback via https://cuelang.org/issue/3558. - // - // This experiment was introduced in v0.11.0 (2024-11) - // enabled by default in v0.12.0 (2025-01), - // and deprecated in the upcoming v0.14 release. - TopoSort bool `envflag:"deprecated,default:true"` + TopoSort bool `experiment:"preview:v0.11.0,default:v0.12.0,stable:v0.14.0"` + + // EvalV3 enables the new CUE evaluator, addressing performance issues + // and bringing better algorithms for disjunctions, closedness, and cycles. + EvalV3 bool `experiment:"preview:v0.9.0,default:v0.13.0,stable:v0.15.0"` + + // KeepValidators prevents validators from simplifying into concrete values, + // even if their concrete value could be derived, such as '>=1 & <=1' to '1'. + // Proposal: https://cuelang.org/discussion/3775. + // Spec change: https://cuelang.org/cl/1217013 + // Spec change: https://cuelang.org/cl/1217014 + KeepValidators bool `experiment:"preview:v0.14.0,default:v0.14.0,stable:v0.15.0"` + + // CmdReferencePkg requires referencing an imported tool package to declare tasks. + // Otherwise, declaring tasks via "$id" or "kind" string fields is allowed. + CmdReferencePkg bool `experiment:"preview:v0.13.0,default:v0.14.0,stable:v0.16.0"` +} + +// initExperimentFlags initializes the experiment flags by processing both +// the experiment lifecycle and environment variable overrides. +func initExperimentFlags() error { + a := strings.Split(os.Getenv("CUE_EXPERIMENT"), ",") + experiments, err := parseEnvExperiments(a...) + if err != nil { + return err + } + + // First, set defaults based on experiment lifecycle + if err := parseConfig(&Flags, "", experiments); err != nil { + return fmt.Errorf("error in CUE_EXPERIMENT: %w", err) + } + return nil } // Init initializes Flags. Note: this isn't named "init" because we @@ -90,6 +85,4 @@ func Init() error { return initOnce() } -var initOnce = sync.OnceValue(func() error { - return envflag.Init(&Flags, "CUE_EXPERIMENT") -}) +var initOnce = sync.OnceValue(initExperimentFlags) diff --git a/vendor/cuelang.org/go/internal/cueexperiment/file.go b/vendor/cuelang.org/go/internal/cueexperiment/file.go index 16fa5bae10..12335a3aee 100644 --- a/vendor/cuelang.org/go/internal/cueexperiment/file.go +++ b/vendor/cuelang.org/go/internal/cueexperiment/file.go @@ -15,9 +15,13 @@ package cueexperiment import ( + "fmt" "maps" + "reflect" "slices" "strings" + + "cuelang.org/go/internal/mod/semver" ) // This contains experiments that are configured per file. @@ -27,9 +31,9 @@ import ( // a CUE file. When an experiment is first introduced, it is disabled by // default. // -// since: the version from when the experiment was introduced. -// accepted: the version from when it is permanently set to true. -// rejected: results in an error if the user attempts to use the flag. +// preview: the version from when the experiment was introduced. +// stable: the version from when it is permanently set to true. +// withdrawn: results in an error if the user attempts to use the flag. type File struct { // version is the module version of the file that was compiled. version string @@ -43,14 +47,47 @@ type File struct { // // TODO: we could later use it for enabling testing features, such as // testing-specific builtins. - Testing bool `experiment:"since:v0.13.0"` + Testing bool `experiment:"preview:v0.13.0"` + + // Accepted_ is for testing purposes only. It should be removed when an + // experiment is accepted and can be used to test this feature instead. + Accepted_ bool `experiment:"preview:v0.13.0,stable:v0.15.0"` // StructCmp enables comparison of structs. This also defines the == - // operator to be defined on all values. For instance, comparing `1` and + // operator to be defined on all values. For instance, comparing 1 and // "foo" will return false, whereas previously it would return an error. // - // Proposal was defined in https://cuelang.org/issue/2358. - StructCmp bool `experiment:"since:v0.14.0"` + // Proposal: https://cuelang.org/issue/2583 + // Spec change: https://cuelang.org/cl/1217013 + // Spec change: https://cuelang.org/cl/1217014 + StructCmp bool `experiment:"preview:v0.14.0,stable:v0.15.0"` + + // ExplicitOpen enables the postfix ... operator to explicitly open + // closed structs, allowing additional fields to be added. + // + // Proposal: https://cuelang.org/issue/4032 + // Spec change: https://cuelang.org/cl/1221642 + // Requires cue fix when upgrading + ExplicitOpen bool `experiment:"preview:v0.15.0"` + + // AliasV2 enables the use of 'self' identifier to refer to the + // enclosing struct and enables the postfix alias syntax (~X and ~(K,V)). + // The file where this experiment is enabled disallows the use of old prefix + // alias syntax (X=). + // + // Proposal: https://cuelang.org/issue/4014 + // Spec change: https://cuelang.org/cl/1222377 + // Requires cue fix when upgrading + AliasV2 bool `experiment:"preview:v0.15.0"` + + // Try enables the try clause and optional reference markers (?). + // The try clause allows conditional field inclusion based on whether + // optional references resolve. The ? marker on references (a?, a.b?, a[i]?) + // is only valid within a try context. + // + // Proposal: https://cuelang.org/issue/4019 + // Spec change: https://cuelang.org/cl/1231444 + Try bool `experiment:"preview:v0.16.0"` } // LanguageVersion returns the language version of the file or "" if no language @@ -76,3 +113,215 @@ func NewFile(version string, experiments ...string) (*File, error) { } return f, nil } + +// IsPreview returns true if the experiment exists and can be used +// for the given version. +func IsPreview(experiment, version string) bool { + return isPreview(experiment, version, File{}) +} + +func isPreview(experiment, version string, t any) bool { + expInfo := getExperimentInfoT(experiment, t) + if expInfo == nil { + return false + } + return expInfo.isValidForVersion(version) +} + +func (e *experimentInfo) isValidForVersion(version string) bool { + // Check if experiment is available for this version + if version != "" && e.Preview != "" { + if semver.Compare(version, e.Preview) < 0 { + return false + } + } + + // Check if experiment is rejected for this version + if e.Withdrawn != "" { + if version == "" || semver.Compare(version, e.Withdrawn) >= 0 { + return false + } + } + + return true +} + +// IsStable returns true if the experiment is stable (no longer +// experimental) for the given version. +func IsStable(experiment, version string) bool { + expInfo := getExperimentInfo(experiment) + if expInfo == nil { + return false + } + return expInfo.isStableForVersion(version) +} + +func (e *experimentInfo) isStableForVersion(version string) bool { + if e.Stable == "" { + return false + } + return version == "" || semver.Compare(version, e.Stable) >= 0 +} + +// CanApplyFix validates whether an experiment fix can be applied +// to a file with the given version and existing experiments. +func CanApplyFix(experiment, version, target string) error { + return canApplyExperimentFix(experiment, version, target, File{}) +} + +func canApplyExperimentFix(experiment, version, target string, t any) error { + expInfo := getExperimentInfoT(experiment, t) + if expInfo == nil { + return fmt.Errorf("unknown experiment %q", experiment) + } + + // Check if experiment is valid for this version + if !expInfo.isValidForVersion(target) { + if version != "" && expInfo.Preview != "" && + semver.Compare(target, expInfo.Preview) < 0 { + const msg = "experiment %q requires language version %s or later, have %s" + return fmt.Errorf(msg, experiment, expInfo.Preview, version) + } + + if expInfo.Withdrawn != "" { + if version == "" || semver.Compare(target, expInfo.Withdrawn) >= 0 { + const msg = "experiment %q is withdrawn in language version %s" + return fmt.Errorf(msg, experiment, expInfo.Withdrawn) + } + } + } + + // Check if experiment is already stable (cannot fix) + if expInfo.isStableForVersion(version) { + const msg = "experiment %q is already stable as of language version %s - cannot apply fix" + return fmt.Errorf(msg, experiment, expInfo.Stable) + } + + return nil +} + +// GetActive returns all experiments that are active (can be enabled) +// for the given version, but not yet accepted. +func GetActive(origVersion, targetVersion string) []string { + return getActiveExperiments(origVersion, targetVersion, File{}) +} + +func getActiveExperiments(origVersion, targetVersion string, t any) []string { + var active []string + + ft := reflect.TypeOf(t) + for i := 0; i < ft.NumField(); i++ { + field := ft.Field(i) + tagStr, ok := field.Tag.Lookup("experiment") + if !ok { + continue + } + name := strings.ToLower(field.Name) + expInfo := parseExperimentTag(tagStr) + + // Skip if not yet available for this version + if targetVersion != "" && expInfo.Preview != "" && semver.Compare(targetVersion, expInfo.Preview) < 0 { + continue + } + + // Skip if already stable + if expInfo.Stable != "" && (targetVersion == "" || semver.Compare(origVersion, expInfo.Stable) >= 0) { + continue + } + + // Skip if withdrawn + if expInfo.Withdrawn != "" { + continue + } + + active = append(active, name) + } + + slices.Sort(active) + return active +} + +// GetUpgradable returns all experiments that are stable +// (possibly in later versions), that can be upgraded from the current +// version (must be lower than stable) to the desired version. +func GetUpgradable(origVersion, targetVersion string) []string { + return getUpgradeExperiments(origVersion, targetVersion, File{}) +} + +func getUpgradeExperiments(origVersion, targetVersion string, t any) []string { + var accepted []string + if origVersion == "" { + panic("original version is empty") + } + + ft := reflect.TypeOf(t) + for i := 0; i < ft.NumField(); i++ { + field := ft.Field(i) + tagStr, ok := field.Tag.Lookup("experiment") + if !ok { + continue + } + name := strings.ToLower(field.Name) + expInfo := parseExperimentTag(tagStr) + + if expInfo.Stable != "" && + semver.Compare(targetVersion, expInfo.Preview) >= 0 && + semver.Compare(origVersion, expInfo.Stable) < 0 { + accepted = append(accepted, name) + } + } + + slices.Sort(accepted) + return accepted +} + +// ShouldRemoveAttribute returns true if the experiment attribute +// should be removed because the experiment is stable for the given version. +func ShouldRemoveAttribute(experiment, version string) bool { + return IsStable(experiment, version) +} + +// experimentInfo holds parsed experiment lifecycle information +type experimentInfo struct { + Preview string + Stable string + Withdrawn string +} + +// getExperimentInfo returns experiment lifecycle info for the given experiment name +func getExperimentInfo(experiment string) *experimentInfo { + return getExperimentInfoT(experiment, File{}) +} + +func getExperimentInfoT(experiment string, t any) *experimentInfo { + ft := reflect.TypeOf(t) + for i := 0; i < ft.NumField(); i++ { + field := ft.Field(i) + if strings.EqualFold(field.Name, experiment) { + if tagStr, ok := field.Tag.Lookup("experiment"); ok { + return parseExperimentTag(tagStr) + } + } + } + return nil +} + +// parseExperimentTag parses experiment tag string into experimentInfo +func parseExperimentTag(tagStr string) *experimentInfo { + info := &experimentInfo{} + for f := range strings.SplitSeq(tagStr, ",") { + key, rest, _ := strings.Cut(f, ":") + if !semver.IsValid(rest) { + panic(fmt.Sprintf("invalid semver in experiment tag %q: %q", key, rest)) + } + switch key { + case "preview": + info.Preview = rest + case "stable": + info.Stable = rest + case "withdrawn": + info.Withdrawn = rest + } + } + return info +} diff --git a/vendor/cuelang.org/go/internal/cueexperiment/parse.go b/vendor/cuelang.org/go/internal/cueexperiment/parse.go index 4cb1ce1b30..de5a5b93e6 100644 --- a/vendor/cuelang.org/go/internal/cueexperiment/parse.go +++ b/vendor/cuelang.org/go/internal/cueexperiment/parse.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "reflect" + "strconv" "strings" "cuelang.org/go/internal/mod/semver" @@ -31,22 +32,42 @@ func parseExperiments(x ...string) (m map[string]bool) { if m == nil { m = make(map[string]bool) } - for _, elem := range strings.Split(a, ",") { - m[strings.TrimSpace(elem)] = true + for elem := range strings.SplitSeq(a, ",") { + elem = strings.TrimSpace(elem) + m[elem] = true } } return m } +func parseEnvExperiments(x ...string) (m map[string]bool, err error) { + for _, name := range x { + if name == "" { + continue + } + if m == nil { + m = make(map[string]bool) + } + name, valueStr, _ := strings.Cut(name, "=") + if valueStr == "" { + m[name] = true + } else if val, err := strconv.ParseBool(valueStr); err == nil { + m[name] = val + } else { + return nil, fmt.Errorf("cannot parse CUE_EXPERIMENT: invalid value %q for experiment %q", valueStr, name) + } + } + return m, nil +} + // parseConfig initializes the fields in flags from the attached struct field -// tags as well as the contents of the given string, which is a comma-separated -// list of experiment names. +// tags as well as a set of experiment names. // -// version is the language version associated with th module of a file. An empty -// version string indicates the latest language version supported by the +// version is the language version associated with the module of a file. An +// empty version string indicates the latest language version supported by the // compiler. // -// experiments is a comma-separated list of experiment names. +// experiments is a map of experiment names. // // The struct field tag indicates the life cycle of the experiment, starting // with the version from when it was introduced, the version where it became @@ -59,36 +80,51 @@ func parseConfig[T any](flags *T, version string, experiments map[string]bool) e // Collect the field indices and set the default values. fv := reflect.ValueOf(flags).Elem() ft := fv.Type() -outer: for i := range ft.NumField() { field := ft.Field(i) if tagStr, ok := field.Tag.Lookup("experiment"); ok { name := strings.ToLower(field.Name) - for _, f := range strings.Split(tagStr, ",") { + explicitlyEnabled, hasExperiment := experiments[name] + explicitlyDisabled := hasExperiment && !explicitlyEnabled + for f := range strings.SplitSeq(tagStr, ",") { key, rest, _ := strings.Cut(f, ":") switch key { - case "since": + case "preview": switch { - case !experiments[name]: + case !explicitlyEnabled: + // Experiment not explicitly enabled, skip case version != "" && semver.Compare(version, rest) < 0: const msg = "cannot set experiment %q before version %s" errs = append(errs, fmt.Errorf(msg, name, rest)) - continue outer default: + // Experiment is explicitly enabled and version allows it fv.Field(i).Set(reflect.ValueOf(true)) } - case "accepted": + case "default": + if version == "" || semver.Compare(version, rest) >= 0 { + if !explicitlyDisabled { + fv.Field(i).Set(reflect.ValueOf(true)) + } + } + + case "stable": if version == "" || semver.Compare(version, rest) >= 0 { fv.Field(i).Set(reflect.ValueOf(true)) } + if explicitlyDisabled { + // We allow setting deprecated flags to their default + // value so that bold explorers will not be penalized + // for their experimentation. + errs = append(errs, fmt.Errorf("cannot disable stable experiment %q", name)) + continue + } - case "rejected": + case "withdrawn": expired := (version == "" || semver.Compare(version, rest) >= 0) - if expired && experiments[name] { + if expired && explicitlyEnabled { const msg = "cannot set rejected experiment %q" errs = append(errs, fmt.Errorf(msg, name)) - continue outer } default: diff --git a/vendor/cuelang.org/go/internal/cueversion/version.go b/vendor/cuelang.org/go/internal/cueversion/version.go index 5740f9fa34..28b1f675e6 100644 --- a/vendor/cuelang.org/go/internal/cueversion/version.go +++ b/vendor/cuelang.org/go/internal/cueversion/version.go @@ -8,14 +8,13 @@ import ( "runtime/debug" "strings" "sync" - "time" ) // LanguageVersion returns the CUE language version. // This determines the latest version of CUE that // is accepted by the module. func LanguageVersion() string { - return "v0.14.1" + return "v0.16.0" } // ModuleVersion returns the version of the cuelang.org/go module as best as can @@ -41,45 +40,9 @@ var moduleVersionOnce = sync.OnceValue(func() string { // module name; it also happens when running the cue tests. return "(no-cue-module)" } - version := cueMod.Version - if version != "(devel)" { - return version - } - // A specific version was not provided by the buildInfo - // so attempt to make our own. - var vcsTime time.Time - var vcsRevision string - for _, s := range bi.Settings { - switch s.Key { - case "vcs.time": - // If the format is invalid, we'll print a zero timestamp. - vcsTime, _ = time.Parse(time.RFC3339Nano, s.Value) - case "vcs.revision": - vcsRevision = s.Value - // module.PseudoVersion recommends the revision to be a 12-byte - // commit hash prefix, which is what cmd/go uses as well. - if len(vcsRevision) > 12 { - vcsRevision = vcsRevision[:12] - } - } - } - if vcsRevision != "" { - version = pseudoVersion(vcsTime, vcsRevision) - } - return version + return cueMod.Version }) -const pseudoVersionTimestampFormat = "20060102150405" - -// pseudoVersion returns a Go-style pseudo-version, given a revision time, -// and revision identifier (usually a 12-byte commit hash prefix). -// -// This code was adapted directly from [golang.org/x/mod/module.PseudoVersion] (@v0.24.0) -// to avoid adding a dependency on that module from the core CUE packages. -func pseudoVersion(t time.Time, rev string) string { - return fmt.Sprintf("v0.0.0-%s-%s", t.UTC().Format(pseudoVersionTimestampFormat), rev) -} - func findCUEModule(bi *debug.BuildInfo) *debug.Module { if bi.Main.Path == cueModule { return &bi.Main diff --git a/vendor/cuelang.org/go/internal/encoding/encoder.go b/vendor/cuelang.org/go/internal/encoding/encoder.go index ac8f83ffa5..98a936c938 100644 --- a/vendor/cuelang.org/go/internal/encoding/encoder.go +++ b/vendor/cuelang.org/go/internal/encoding/encoder.go @@ -29,6 +29,7 @@ import ( "cuelang.org/go/cue/errors" "cuelang.org/go/cue/format" "cuelang.org/go/cue/token" + "cuelang.org/go/encoding/jsonschema" "cuelang.org/go/encoding/openapi" "cuelang.org/go/encoding/protobuf/jsonpb" "cuelang.org/go/encoding/protobuf/textproto" @@ -84,18 +85,21 @@ func NewEncoder(ctx *cue.Context, f *build.File, cfg *Config) (*Encoder, error) e.interpret = func(v cue.Value) (*ast.File, error) { return openapi.Generate(v, cfg) } + case build.JSONSchema: + // TODO: get encoding options + cfg := &jsonschema.GenerateConfig{} + e.interpret = func(v cue.Value) (*ast.File, error) { + expr, err := jsonschema.Generate(v, cfg) + if err != nil { + return nil, err + } + return internal.ToFile(expr, false), nil + } case build.ProtobufJSON: e.interpret = func(v cue.Value) (*ast.File, error) { - f := internal.ToFile(v.Syntax()) + f := internal.ToFile(v.Syntax(), false) return f, jsonpb.NewEncoder(v).RewriteFile(f) } - - // case build.JSONSchema: - // // TODO: get encoding options - // cfg := openapi.Config{} - // i.interpret = func(inst *cue.Instance) (*ast.File, error) { - // return jsonschmea.Generate(inst, cfg) - // } default: return nil, fmt.Errorf("unsupported interpretation %q", f.Interpretation) } @@ -143,7 +147,7 @@ func NewEncoder(ctx *cue.Context, f *build.File, cfg *Config) (*Encoder, error) // Casting an ast.Expr to an ast.File ensures that it always ends // with a newline. - f := internal.ToFile(n) + f := internal.ToFile(n, false) if e.cfg.PkgName != "" && f.PackageName() == "" { pkg := &ast.Package{ PackagePos: token.NoPos.WithRel(token.NewSection), @@ -250,40 +254,37 @@ func NewEncoder(ctx *cue.Context, f *build.File, cfg *Config) (*Encoder, error) } func (e *Encoder) EncodeFile(f *ast.File) error { - e.autoSimplify = false - return e.encodeFile(f, e.interpret) + if e.interpret == nil && e.encFile != nil { + // TODO it's not clear that it's actually desirable to turn + // off simplification in this case. This case generally arises + // when we're producing CUE code with `cue eval` and + // simplified results seem generally preferable. + e.autoSimplify = false + return e.encFile(f) + } + e.autoSimplify = true + return e.Encode(e.ctx.BuildFile(f)) } func (e *Encoder) Encode(v cue.Value) error { e.autoSimplify = true - if err := v.Validate(cue.Concrete(e.concrete)); err != nil { - return err - } - if e.interpret != nil { - f, err := e.interpret(v) - if err != nil { + if e.interpret == nil { + if err := v.Validate(cue.Concrete(e.concrete)); err != nil { return err } - return e.encodeFile(f, nil) - } - if e.encValue != nil { return e.encValue(v) } - return e.encFile(internal.ToFile(v.Syntax())) -} - -func (e *Encoder) encodeFile(f *ast.File, interpret func(cue.Value) (*ast.File, error)) error { - if interpret == nil && e.encFile != nil { - return e.encFile(f) + if err := v.Validate(); err != nil { + return err } - e.autoSimplify = true - v := e.ctx.BuildFile(f) - if err := v.Err(); err != nil { + f, err := e.interpret(v) + if err != nil { return err } - if interpret != nil { - return e.Encode(v) + if e.encFile != nil { + return e.encFile(f) } + v = e.ctx.BuildFile(f) if err := v.Validate(cue.Concrete(e.concrete)); err != nil { return err } @@ -311,10 +312,18 @@ func writer(f *build.File, cfg *Config) (_ io.Writer, close func() error) { mode = os.O_WRONLY | os.O_CREATE | os.O_TRUNC } f, err := os.OpenFile(path, mode, 0o666) - if err != nil { - if errors.Is(err, fs.ErrExist) { + if errors.Is(err, fs.ErrExist) { + // If we failed because the file already existed, + // but the file in question is not regular, allow writing to it. + // This is done as a retry to avoid a Stat call before every OpenFile. + stat, err2 := os.Stat(path) + if err2 == nil && !stat.Mode().IsRegular() { + f, err = os.OpenFile(path, os.O_WRONLY, 0o666) + } else { return errors.Wrapf(fs.ErrExist, token.NoPos, "error writing %q", path) } + } + if err != nil { return err } _, err = f.Write(b.Bytes()) diff --git a/vendor/cuelang.org/go/internal/encoding/encoding.go b/vendor/cuelang.org/go/internal/encoding/encoding.go index 6d312f32d8..ee502cc5d5 100644 --- a/vendor/cuelang.org/go/internal/encoding/encoding.go +++ b/vendor/cuelang.org/go/internal/encoding/encoding.go @@ -58,6 +58,7 @@ type Decoder struct { file *ast.File filename string // may change on iteration for some formats index int + size int // length of the source file if known; -1 otherwise err error } @@ -112,7 +113,7 @@ func (i *Decoder) File() *ast.File { if i.file != nil { return i.file } - return internal.ToFile(i.expr) + return internal.ToFile(i.expr, false) } func (i *Decoder) Err() error { @@ -139,7 +140,6 @@ type Config struct { PkgName string // package name for files to generate Force bool // overwrite existing files - Strict bool // strict mode for jsonschema (deprecated) Stream bool // potentially write more than one document per file AllErrors bool @@ -184,14 +184,15 @@ func NewDecoder(ctx *cue.Context, f *build.File, cfg *Config) *Decoder { if f.Source == nil && f.Filename == "-" { // TODO: should we allow this? r = cfg.Stdin + i.size = -1 } else { - rc, err := source.Open(f.Filename, f.Source) - i.closer = rc - i.err = err + r, i.size, i.err = source.Open(f.Filename, f.Source) + if c, ok := r.(io.Closer); ok { + i.closer = c + } if i.err != nil { return i } - r = rc } switch f.Interpretation { @@ -238,17 +239,22 @@ func NewDecoder(ctx *cue.Context, f *build.File, cfg *Config) *Decoder { path := f.Filename switch f.Encoding { case build.CUE: + b, err := source.ReadAllSize(r, i.size) + if err != nil { + i.err = err + break + } if cfg.ParseFile == nil { - i.file, i.err = parser.ParseFile(path, r, cfg.ParserConfig) + i.file, i.err = parser.ParseFile(path, b, cfg.ParserConfig) } else { - i.file, i.err = cfg.ParseFile(path, r, cfg.ParserConfig) + i.file, i.err = cfg.ParseFile(path, b, cfg.ParserConfig) } i.validate(i.file, f) if i.err == nil { i.doInterpret() } case build.JSON: - b, err := io.ReadAll(r) + b, err := source.ReadAllSize(r, i.size) if err != nil { i.err = err break @@ -261,7 +267,7 @@ func NewDecoder(ctx *cue.Context, f *build.File, cfg *Config) *Decoder { i.next = json.NewDecoder(nil, path, r).Extract i.Next() case build.YAML: - b, err := io.ReadAll(r) + b, err := source.ReadAllSize(r, i.size) i.err = err i.next = yaml.NewDecoder(path, b).Decode i.Next() @@ -277,11 +283,11 @@ func NewDecoder(ctx *cue.Context, f *build.File, cfg *Config) *Decoder { i.err = fmt.Errorf("xml requires a variant, such as: xml+koala") } case build.Text: - b, err := io.ReadAll(r) + b, err := source.ReadAllSize(r, i.size) i.err = err i.expr = ast.NewString(string(b)) case build.Binary: - b, err := io.ReadAll(r) + b, err := source.ReadAllSize(r, i.size) i.err = err s := literal.Bytes.WithTabIndent(1).Quote(string(b)) i.expr = ast.NewLit(token.STRING, s) @@ -292,7 +298,7 @@ func NewDecoder(ctx *cue.Context, f *build.File, cfg *Config) *Decoder { } i.file, i.err = protobuf.Extract(path, r, paths) case build.TextProto: - b, err := io.ReadAll(r) + b, err := source.ReadAllSize(r, i.size) i.err = err if err == nil { d := textproto.NewDecoder() @@ -318,8 +324,9 @@ func jsonSchemaFunc(cfg *Config, f *build.File) interpretFunc { // The strictKeywords and strictFeatures tags are // set by internal/filetypes from the strict tag when appropriate. - StrictKeywords: cfg.Strict || tags["strictKeywords"], - StrictFeatures: cfg.Strict || tags["strictFeatures"], + StrictKeywords: tags["strictKeywords"], + StrictFeatures: tags["strictFeatures"], + OpenOnlyWhenExplicit: tags["openOnlyWhenExplicit"], } file, err = jsonschema.Extract(v, cfg) // TODO: simplify currently erases file line info. Reintroduce after fix. @@ -337,8 +344,8 @@ func openAPIFunc(c *Config, f *build.File) interpretFunc { // Note: don't populate Strict (see more detailed // comment in jsonSchemaFunc) - StrictKeywords: c.Strict || tags["strictKeywords"], - StrictFeatures: c.Strict || tags["strictFeatures"], + StrictKeywords: tags["strictKeywords"], + StrictFeatures: tags["strictFeatures"], }) // TODO: simplify currently erases file line info. Reintroduce after fix. // file, err = simplify(file, err) @@ -463,7 +470,6 @@ func (v *validator) validate(n ast.Node) bool { case *ast.Field: check(n, i.Definitions, "definitions", internal.IsDefinition(x.Label)) check(n, i.Data, "regular fields", internal.IsRegularField(x)) - check(n, constraints, "optional fields", x.Optional != token.NoPos) _, _, err := ast.LabelName(x.Label) check(n, constraints, "optional fields", err != nil) diff --git a/vendor/cuelang.org/go/internal/encoding/json/patch.go b/vendor/cuelang.org/go/internal/encoding/json/patch.go new file mode 100644 index 0000000000..873f42206e --- /dev/null +++ b/vendor/cuelang.org/go/internal/encoding/json/patch.go @@ -0,0 +1,136 @@ +// Copyright 2026 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package json + +import ( + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/token" +) + +// PatchExpr simplifies the AST parsed from JSON. +// TODO: some of the modifications are already done in format, but are +// a package deal of a more aggressive simplify. Other pieces of modification +// should probably be moved to format. +func PatchExpr(n ast.Node, patchPos func(n ast.Node)) { + type info struct { + reflow bool + } + stack := []info{{true}} + + afterFn := func(n ast.Node) { + switch n.(type) { + case *ast.ListLit, *ast.StructLit: + stack = stack[:len(stack)-1] + } + } + + var beforeFn func(n ast.Node) bool + + beforeFn = func(n ast.Node) bool { + if patchPos != nil { + patchPos(n) + } + + isLarge := n.End().Offset()-n.Pos().Offset() > 50 + descent := true + + switch x := n.(type) { + case *ast.ListLit: + reflow := true + if !isLarge { + for _, e := range x.Elts { + if hasSpaces(e) { + reflow = false + break + } + } + } + stack = append(stack, info{reflow}) + if reflow { + x.Lbrack = x.Lbrack.WithRel(token.NoRelPos) + x.Rbrack = x.Rbrack.WithRel(token.NoRelPos) + } + return true + + case *ast.StructLit: + reflow := true + if !isLarge { + for _, e := range x.Elts { + if f, ok := e.(*ast.Field); !ok || hasSpaces(f) || hasSpaces(f.Value) { + reflow = false + break + } + } + } + stack = append(stack, info{reflow}) + if reflow { + x.Lbrace = x.Lbrace.WithRel(token.NoRelPos) + x.Rbrace = x.Rbrace.WithRel(token.NoRelPos) + } + return true + + case *ast.Field: + // label is always a string for JSON. + s, ok := x.Label.(*ast.BasicLit) + if !ok || s.Kind != token.STRING { + break // should not happen: implies invalid JSON + } + + u, err := literal.Unquote(s.Value) + if err != nil { + break // should not happen: implies invalid JSON + } + + // TODO(legacy): remove checking for '_' prefix once hidden + // fields are removed. + if ast.StringLabelNeedsQuoting(u) { + break // keep string + } + + x.Label = ast.NewIdent(u) + astutil.CopyMeta(x.Label, s) + // Having removed the quote-marks, the ident start should be + // incremented by 1 so that the label content matches up with + // the raw json. + ast.SetPos(x.Label, x.Label.Pos().Add(1)) + + ast.Walk(x.Value, beforeFn, afterFn) + descent = false + + case *ast.BasicLit: + if x.Kind == token.STRING && len(x.Value) > 10 { + s, err := literal.Unquote(x.Value) + if err != nil { + break // should not happen: implies invalid JSON + } + + x.Value = literal.String.WithOptionalTabIndent(len(stack)).Quote(s) + } + } + + if stack[len(stack)-1].reflow { + ast.SetRelPos(n, token.NoRelPos) + } + return descent + } + + ast.Walk(n, beforeFn, afterFn) +} + +func hasSpaces(n ast.Node) bool { + return n.Pos().RelPos() > token.NoSpace +} diff --git a/vendor/cuelang.org/go/internal/encoding/yaml/decode.go b/vendor/cuelang.org/go/internal/encoding/yaml/decode.go index 82872405b9..af48e68fb9 100644 --- a/vendor/cuelang.org/go/internal/encoding/yaml/decode.go +++ b/vendor/cuelang.org/go/internal/encoding/yaml/decode.go @@ -8,16 +8,16 @@ import ( "io" "regexp" "slices" + "sort" "strconv" "strings" "sync" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/literal" "cuelang.org/go/cue/token" - "cuelang.org/go/internal" ) // TODO(mvdan): we should sanity check that the decoder always produces valid CUE, @@ -45,6 +45,7 @@ type decoder struct { // decodeErr is returned by any further calls to Decode when not nil. decodeErr error + src []byte tokFile *token.File tokLines []int @@ -57,13 +58,21 @@ type decoder struct { // extractingAliases ensures we don't loop forever when expanding YAML anchors. extractingAliases map[*yaml.Node]bool - // lastPos is the last YAML node position that we decoded, - // used for working out relative positions such as token.NewSection. - // This position can only increase, moving forward in the file. - lastPos token.Position + // lastOffset is byte offset from the last yaml.Node position that + // we decoded, used for working out relative positions such as + // token.NewSection. This offset can only increase, moving forward + // in the file. A value of -1 means no position has been recorded + // yet. + lastOffset int // forceNewline ensures that the next position will be on a new line. forceNewline bool + + // scopeEnd is the byte offset (exclusive) bounding the current + // node's extent in the source. Used to compute Rbrace positions + // for struct literals: the Rbrace is placed at offset scopeEnd-1 + // (typically the \n ending the last line before the next content). + scopeEnd int } // TODO(mvdan): this can be io.Reader really, except that token.Pos is offset-based, @@ -83,9 +92,14 @@ func NewDecoder(filename string, b []byte) *decoder { tokFile := token.NewFile(filename, 0, len(b)+1) tokFile.SetLinesForContent(b) return &decoder{ + src: b, tokFile: tokFile, tokLines: append(tokFile.Lines(), len(b)), yamlDecoder: *yaml.NewDecoder(bytes.NewReader(b)), + lastOffset: -1, + // TODO: for a streaming decoder we'll need to remove this + // dependency on knowing the length of the input ahead of time. + scopeEnd: len(b), } } @@ -208,7 +222,7 @@ func (d *decoder) comments(src string) []*ast.Comment { return nil } var comments []*ast.Comment - for _, line := range strings.Split(src, "\n") { + for line := range strings.SplitSeq(src, "\n") { if line == "" { continue // yaml.v3 comments have a trailing newline at times } @@ -232,7 +246,7 @@ func (d *decoder) addHeadCommentsToPending(yn *yaml.Node) { // This will be wrong in some cases, moving empty lines, but is better than nothing. if len(d.pendingHeadComments) == 0 && len(comments) > 0 { c := comments[0] - if d.lastPos.IsValid() && (yn.Line-len(comments))-d.lastPos.Line >= 2 { + if d.lastOffset >= 0 && (yn.Line-len(comments))-d.offsetLine(d.lastOffset) >= 2 { c.Slash = c.Slash.WithRel(token.NewSection) } } @@ -285,41 +299,186 @@ func (d *decoder) posErrorf(yn *yaml.Node, format string, args ...any) error { return fmt.Errorf(d.tokFile.Name()+":"+strconv.Itoa(yn.Line)+": "+format, args...) } -// pos converts a YAML node position to a cue/ast position. -// Note that this method uses and updates the last position in lastPos, -// so it should be called on YAML nodes in increasing position order. -func (d *decoder) pos(yn *yaml.Node) token.Pos { - // Calculate the position's offset via the line and column numbers. - offset := d.tokLines[yn.Line-1] + (yn.Column - 1) +// yamlOffset converts a YAML node's line and column to a byte offset. +func (d *decoder) yamlOffset(yn *yaml.Node) int { + return d.tokLines[yn.Line-1] + (yn.Column - 1) +} + +// offsetLine returns a 1-indexed line number for the given byte +// offset. +func (d *decoder) offsetLine(offset int) int { + return sort.Search(len(d.tokLines), func(i int) bool { + return d.tokLines[i] > offset + }) +} + +// contentOffset returns the byte offset where a node's content +// starts, skipping past any YAML anchor prefix (&name). For +// flow-style nodes, it finds the opening delimiter (open). For +// block-style nodes, it skips past the anchor and newline. When the +// node has no anchor, it returns the node's position as-is. +func (d *decoder) contentOffset(yn *yaml.Node, open byte) int { + offset := d.yamlOffset(yn) + if yn.Anchor == "" { + return offset + } + + if yn.Style&yaml.FlowStyle != 0 { + for offset < len(d.src) && d.src[offset] != open { + offset++ + } + return offset + } + + // For block style, we want to be as greedy as possible. So once + // we're past the anchor, we want to stop right after the first + // newline, or at the first non-whitespace, whichever is sooner. + offset += 1 + len(yn.Anchor) // skip '&' and anchor name + newlineSeen := false + for ; offset < len(d.src); offset++ { + if newlineSeen { + return offset + } + switch d.src[offset] { + case ' ', '\t': + case '\n', '\r': + newlineSeen = true + default: + return offset + } + } + return offset +} + +// pos converts a byte offset to a cue/ast position. +// Note that this method uses and updates the last offset in lastOffset, +// so it should be called with increasing offsets. +func (d *decoder) pos(offset int) token.Pos { pos := d.tokFile.Pos(offset, token.NoRelPos) if d.forceNewline { d.forceNewline = false pos = pos.WithRel(token.Newline) - } else if d.lastPos.IsValid() { + } else if d.lastOffset >= 0 { + lastLine := d.offsetLine(d.lastOffset) + curLine := d.offsetLine(offset) switch { - case yn.Line-d.lastPos.Line >= 2: + case curLine-lastLine >= 2: pos = pos.WithRel(token.NewSection) - case yn.Line-d.lastPos.Line == 1: + case curLine-lastLine == 1: pos = pos.WithRel(token.Newline) - case yn.Column-d.lastPos.Column > 0: + case offset-d.lastOffset > 0: pos = pos.WithRel(token.Blank) default: pos = pos.WithRel(token.NoSpace) } - // If for any reason the node's position is before the last position, - // give up and return an empty position. Akin to: yn.Pos().Before(d.lastPos) + // If for any reason the offset is before the last offset, give + // up and return an empty position. // // TODO(mvdan): Brought over from the old decoder; when does this happen? // Can we get rid of those edge cases and this bit of logic? - if yn.Line < d.lastPos.Line || (yn.Line == d.lastPos.Line && yn.Column < d.lastPos.Column) { + if offset < d.lastOffset { return token.NoPos } } - d.lastPos = token.Position{Line: yn.Line, Column: yn.Column} + d.lastOffset = offset return pos } +// findClosing scans forward from start in the source bytes to find +// the first occurrence of close (typically '}' or ']') that is not +// inside a quoted string or comment. It returns the byte offset. +func (d *decoder) findClosing(start int, close byte) int { + for i := start; i < len(d.src); i++ { + switch d.src[i] { + case close: + return i + case '"': + // Skip double-quoted string. + for i++; i < len(d.src); i++ { + if d.src[i] == '\\' { + i++ // skip escaped character + } else if d.src[i] == '"' { + break + } + } + case '\'': + // Skip single-quoted string. + for i++; i < len(d.src); i++ { + if d.src[i] == '\'' { + if i+1 < len(d.src) && d.src[i+1] == '\'' { + i++ // skip '' escape + } else { + break + } + } + } + case '#': + // Skip comment to end of line. In YAML flow context, # + // starts a comment when preceded by whitespace (or at the + // start of the scan region). + if i == start || d.src[i-1] == ' ' || d.src[i-1] == '\t' { + for i++; i < len(d.src) && d.src[i] != '\n'; i++ { + } + } + } + } + return len(d.src) // shouldn't happen with valid YAML +} + +// isBlankLine returns true if the 0-indexed line contains only +// whitespace. +func (d *decoder) isBlankLine(lineIdx int) bool { + start := d.tokLines[lineIdx] + end := d.tokLines[lineIdx+1] + for i := start; i < end; i++ { + switch d.src[i] { + case ' ', '\t', '\n', '\r': + default: + return false + } + } + return true +} + +// isCommentLine returns true if the 0-indexed line is a comment-only +// line (optional leading whitespace followed by '#'). +func (d *decoder) isCommentLine(lineIdx int) bool { + start := d.tokLines[lineIdx] + end := d.tokLines[lineIdx+1] + for i := start; i < end; i++ { + switch c := d.src[i]; c { + case ' ', '\t': + default: + return c == '#' + } + } + return false +} + +// scopeEndBefore computes the scope end before the given YAML node, +// excluding any head comments and their surrounding blank lines from +// the scope. This ensures that comments belonging to the next sibling +// are not consumed by the current node's scope. +func (d *decoder) scopeEndBefore(yn *yaml.Node) int { + end := d.tokLines[yn.Line-1] + if yn.HeadComment == "" { + return end + } + // Walk backwards from the line before yn, skipping blank lines and + // then comment lines that belong to yn's head comments. + lineIdx := yn.Line - 2 // 0-indexed line just before yn + // Skip blank lines between comment and node. + for lineIdx >= 0 && d.isBlankLine(lineIdx) { + lineIdx-- + } + // Skip comment lines. + for lineIdx >= 0 && d.isCommentLine(lineIdx) { + lineIdx-- + } + return d.tokLines[lineIdx+1] +} + func (d *decoder) document(yn *yaml.Node) (ast.Expr, error) { if n := len(yn.Content); n != 1 { return nil, d.posErrorf(yn, "yaml document nodes are meant to have one content node but have %d", n) @@ -328,9 +487,23 @@ func (d *decoder) document(yn *yaml.Node) (ast.Expr, error) { } func (d *decoder) sequence(yn *yaml.Node) (ast.Expr, error) { + parentScopeEnd := d.scopeEnd // save before the loop modifies it list := &ast.ListLit{ - Lbrack: d.pos(yn).WithRel(token.Blank), + // Compute the bracket position directly without the side + // effects of d.pos. Like struct braces, brackets are a CUE + // concept with no YAML counterpart in block-style sequences. + // Use contentOffset to skip past any anchor prefix. + Lbrack: d.tokFile.Pos(d.contentOffset(yn, '['), token.Blank), + } + + // Unlike mappings which use d.label for keys, sequences extract + // elements directly. Advance lastOffset so that element relative + // positions are computed against the sequence node, not whatever + // came before it. + if ynOffset := d.yamlOffset(yn); ynOffset >= d.lastOffset { + d.lastOffset = ynOffset } + multiline := false if len(yn.Content) > 0 { multiline = yn.Line < yn.Content[len(yn.Content)-1].Line @@ -338,8 +511,14 @@ func (d *decoder) sequence(yn *yaml.Node) (ast.Expr, error) { // If a list is empty, or ends with a struct, the closing `]` is on the same line. closeSameLine := true - for _, c := range yn.Content { + for i, c := range yn.Content { d.forceNewline = multiline + // Set the scope end for the element we're about to extract. + if i+1 < len(yn.Content) { + d.scopeEnd = d.scopeEndBefore(yn.Content[i+1]) + } else { + d.scopeEnd = parentScopeEnd + } elem, err := d.extract(c) if err != nil { return nil, err @@ -348,14 +527,42 @@ func (d *decoder) sequence(yn *yaml.Node) (ast.Expr, error) { // A list of structs begins with `[{`, so let it end with `}]`. _, closeSameLine = elem.(*ast.StructLit) } - if multiline && !closeSameLine { - list.Rbrack = list.Rbrack.WithRel(token.Newline) + + if yn.Style&yaml.FlowStyle != 0 { + // Flow-style sequence: find the actual ']' in the source. + start := d.lastOffset + if len(yn.Content) == 0 { + start = list.Lbrack.Offset() + 1 + } + rbrackOff := d.findClosing(start, ']') + // Update lastOffset past the ']' so that any parent flow + // mapping's scan starts after this one's closing brace. + d.lastOffset = rbrackOff + 1 + list.Rbrack = d.tokFile.Pos(rbrackOff, token.Blank) + + } else if len(yn.Content) > 0 { + // In block-style, there are no explicit brackets, so we have to + // guess. We want to be as greedy as possible, so we go one byte + // before the end of our parent node. This intentionally + // includes whitespace after the end of this sequence but before + // the end of our parent. + rel := token.Blank + if multiline && !closeSameLine { + rel = token.Newline + } + list.Rbrack = d.tokFile.Pos(parentScopeEnd-1, rel) + + } else { + list.Rbrack = list.Lbrack } return list, nil } func (d *decoder) mapping(yn *yaml.Node) (ast.Expr, error) { - strct := &ast.StructLit{} + parentScopeEnd := d.scopeEnd // save before insertMap modifies it + strct := &ast.StructLit{ + Lbrace: d.tokFile.Pos(d.contentOffset(yn, '{'), token.Blank), + } multiline := false if len(yn.Content) > 0 { multiline = yn.Line < yn.Content[len(yn.Content)-1].Line @@ -364,10 +571,31 @@ func (d *decoder) mapping(yn *yaml.Node) (ast.Expr, error) { if err := d.insertMap(yn, strct, multiline, false); err != nil { return nil, err } - // TODO(mvdan): moving these positions above insertMap breaks a few tests, why? - strct.Lbrace = d.pos(yn).WithRel(token.Blank) - if multiline { - strct.Rbrace = strct.Lbrace.WithRel(token.Newline) + + if yn.Style&yaml.FlowStyle != 0 { + // Flow-style mapping: find the actual '}' in the source. + // Start scanning from the last decoded position (which is + // past all children due to chaining), or from the '{' if empty. + start := d.lastOffset + if len(yn.Content) == 0 { + start = strct.Lbrace.Offset() + 1 + } + rbraceOff := d.findClosing(start, '}') + d.lastOffset = rbraceOff + 1 + strct.Rbrace = d.tokFile.Pos(rbraceOff, token.Blank) + + } else if len(yn.Content) > 0 { + // In block-style, there are no explicit braces, so we have to + // guess. We want to be as greedy as possible, so we go one byte + // before the end of our parent node. This intentionally + // includes whitespace after the end of this mapping but before + // the end of our parent. + rel := token.Blank + if multiline { + rel = token.Newline + } + strct.Rbrace = d.tokFile.Pos(parentScopeEnd-1, rel) + } else { strct.Rbrace = strct.Lbrace } @@ -375,6 +603,7 @@ func (d *decoder) mapping(yn *yaml.Node) (ast.Expr, error) { } func (d *decoder) insertMap(yn *yaml.Node, m *ast.StructLit, multiline, mergeValues bool) error { + parentScopeEnd := d.scopeEnd l := len(yn.Content) outer: for i := 0; i < l; i += 2 { @@ -399,6 +628,13 @@ outer: d.addCommentsToNode(field, yk, 2) field.Label = label + // Set the scope end for the value we're about to extract. + if i+2 < l { + d.scopeEnd = d.scopeEndBefore(yn.Content[i+2]) + } else { + d.scopeEnd = parentScopeEnd + } + if mergeValues { key := labelStr(label) for _, decl := range m.Elts { @@ -445,7 +681,7 @@ func (d *decoder) merge(yn *yaml.Node, m *ast.StructLit, multiline bool) error { } func (d *decoder) label(yn *yaml.Node) (ast.Label, error) { - pos := d.pos(yn) + pos := d.pos(d.yamlOffset(yn)) var expr ast.Expr var err error @@ -469,23 +705,13 @@ func (d *decoder) label(yn *yaml.Node) (ast.Label, error) { switch expr := expr.(type) { case *ast.BasicLit: - if expr.Kind == token.STRING { - if ast.IsValidIdent(value) && !internal.IsDefOrHidden(value) { - return &ast.Ident{ - NamePos: pos, - Name: value, - }, nil - } - ast.SetPos(expr, pos) - return expr, nil + if expr.Kind != token.STRING { + // With incoming YAML like `Null: 1`, the key scalar is normalized to "null". + value = expr.Value } - - return &ast.BasicLit{ - ValuePos: pos, - Kind: token.STRING, - Value: literal.Label.Quote(expr.Value), - }, nil - + label := ast.NewStringLabel(value) + ast.SetPos(label, pos) + return label, nil default: return nil, d.posErrorf(yn, "invalid label "+value) } @@ -520,17 +746,18 @@ func (d *decoder) scalar(yn *yaml.Node) (ast.Expr, error) { if yn.Style&yaml.TaggedStyle == 0 && tag == floatTag && rxAnyOctalYaml11().MatchString(yn.Value) { tag = strTag } + pos := d.pos(d.yamlOffset(yn)) switch tag { // TODO: use parse literal or parse expression instead. case timestampTag: return &ast.BasicLit{ - ValuePos: d.pos(yn), + ValuePos: pos, Kind: token.STRING, Value: literal.String.Quote(yn.Value), }, nil case strTag: return &ast.BasicLit{ - ValuePos: d.pos(yn), + ValuePos: pos, Kind: token.STRING, Value: literal.String.WithOptionalTabIndent(1).Quote(yn.Value), }, nil @@ -541,7 +768,7 @@ func (d *decoder) scalar(yn *yaml.Node) (ast.Expr, error) { return nil, d.posErrorf(yn, "!!binary value contains invalid base64 data") } return &ast.BasicLit{ - ValuePos: d.pos(yn), + ValuePos: pos, Kind: token.STRING, Value: literal.Bytes.Quote(string(data)), }, nil @@ -554,7 +781,7 @@ func (d *decoder) scalar(yn *yaml.Node) (ast.Expr, error) { t = true } lit := ast.NewBool(t) - lit.ValuePos = d.pos(yn) + lit.ValuePos = pos return lit, nil case intTag: @@ -573,7 +800,7 @@ func (d *decoder) scalar(yn *yaml.Node) (ast.Expr, error) { } else if !info.IsInt() { return nil, d.posErrorf(yn, "cannot decode %q as %s: not a literal number", value, tag) } - return d.makeNum(yn, value, token.INT), nil + return d.makeNum(pos, value, token.INT), nil case floatTag: value := yn.Value @@ -609,11 +836,11 @@ func (d *decoder) scalar(yn *yaml.Node) (ast.Expr, error) { } } } - return d.makeNum(yn, value, token.FLOAT), nil + return d.makeNum(pos, value, token.FLOAT), nil case nullTag: return &ast.BasicLit{ - ValuePos: d.pos(yn).WithRel(token.Blank), + ValuePos: pos.WithRel(token.Blank), Kind: token.NULL, Value: "null", }, nil @@ -622,16 +849,16 @@ func (d *decoder) scalar(yn *yaml.Node) (ast.Expr, error) { } } -func (d *decoder) makeNum(yn *yaml.Node, val string, kind token.Token) (expr ast.Expr) { +func (d *decoder) makeNum(pos token.Pos, val string, kind token.Token) (expr ast.Expr) { val, negative := strings.CutPrefix(val, "-") expr = &ast.BasicLit{ - ValuePos: d.pos(yn), + ValuePos: pos, Kind: kind, Value: val, } if negative { expr = &ast.UnaryExpr{ - OpPos: d.pos(yn), + OpPos: pos, Op: token.SUB, X: expr, } @@ -648,10 +875,42 @@ func (d *decoder) alias(yn *yaml.Node) (ast.Expr, error) { d.extractingAliases = make(map[*yaml.Node]bool) } d.extractingAliases[yn] = true - var node ast.Expr + + // Save and reset decoder state so the alias extraction doesn't + // interfere with the outer position tracking. The aliased node + // may be earlier in the source (the common case: define then use), + // which would leave lastOffset pointing past the aliased content + // and cause findClosing to find the wrong closing delimiter. + savedLastOffset := d.lastOffset + savedForceNewline := d.forceNewline + savedScopeEnd := d.scopeEnd + d.lastOffset = -1 // no position yet, so pos() produces valid positions for the alias's children + d.forceNewline = false + node, err := d.extract(yn.Alias) + + d.lastOffset = savedLastOffset + d.forceNewline = savedForceNewline + d.scopeEnd = savedScopeEnd delete(d.extractingAliases, yn) - return node, err + if err != nil { + return nil, err + } + + // For container types, override brace/bracket positions to reflect + // the alias reference site (*name), not the anchor definition site. + // The alias is where this value logically appears in the document. + aliasStart := d.yamlOffset(yn) + aliasEnd := aliasStart + len(yn.Value) // *: 1 + len(name) - 1 + switch n := node.(type) { + case *ast.StructLit: + n.Lbrace = d.tokFile.Pos(aliasStart, token.Blank) + n.Rbrace = d.tokFile.Pos(aliasEnd, token.Blank) + case *ast.ListLit: + n.Lbrack = d.tokFile.Pos(aliasStart, token.Blank) + n.Rbrack = d.tokFile.Pos(aliasEnd, token.Blank) + } + return node, nil } func labelStr(l ast.Label) string { diff --git a/vendor/cuelang.org/go/internal/encoding/yaml/encode.go b/vendor/cuelang.org/go/internal/encoding/yaml/encode.go index be970c90e9..4e76ddb665 100644 --- a/vendor/cuelang.org/go/internal/encoding/yaml/encode.go +++ b/vendor/cuelang.org/go/internal/encoding/yaml/encode.go @@ -23,7 +23,7 @@ import ( "strings" "sync" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/errors" @@ -234,6 +234,28 @@ func encodeExprs(exprs []ast.Expr) (n *yaml.Node, err error) { return n, nil } +// extractYAMLTag looks for @yaml(,tag="...") attribute and returns the tag value. +// Returns an empty string if no @yaml attribute or no tag argument is found. +// Returns an error if the attribute is malformed. +func extractYAMLTag(attrs []*ast.Attribute) (string, error) { + for _, attr := range attrs { + key, body := attr.Split() + if key != "yaml" { + continue + } + parsed := internal.ParseAttrBody(attr.Pos(), body) + if parsed.Err != nil { + return "", parsed.Err + } + if val, found, err := parsed.Lookup(1, "tag"); err != nil { + return "", err + } else if found { + return val, nil + } + } + return "", nil +} + // encodeDecls converts a sequence of declarations to a value. If it encounters // an embedded value, it will return this expression. This is more relaxed for // structs than is currently allowed for CUE, but the expectation is that this @@ -267,8 +289,8 @@ func encodeDecls(decls []ast.Decl) (n *yaml.Node, err error) { if !internal.IsRegularField(x) { return nil, errors.Newf(x.TokenPos, "yaml: definition or hidden fields not allowed") } - if x.Optional != token.NoPos { - return nil, errors.Newf(x.Optional, "yaml: optional fields not allowed") + if x.Constraint != token.ILLEGAL { + return nil, errors.Newf(x.TokenPos, "yaml: optional fields not allowed") } if hasEmbed { return nil, errors.Newf(x.TokenPos, "yaml: embedding mixed with fields") @@ -289,6 +311,15 @@ func encodeDecls(decls []ast.Decl) (n *yaml.Node, err error) { if err != nil { return nil, err } + + yamlTag, err := extractYAMLTag(x.Attrs) + if err != nil { + return nil, err + } + if yamlTag != "" { + value.Tag = yamlTag + } + lastHead = label lastFoot = value addDocs(x, label, value) diff --git a/vendor/cuelang.org/go/internal/envflag/flag.go b/vendor/cuelang.org/go/internal/envflag/flag.go index b39abea0a1..18c3c86ef2 100644 --- a/vendor/cuelang.org/go/internal/envflag/flag.go +++ b/vendor/cuelang.org/go/internal/envflag/flag.go @@ -40,11 +40,11 @@ func Parse[T any](flags *T, env string) error { deprecated := make(map[string]bool) fv := reflect.ValueOf(flags).Elem() ft := fv.Type() - for i := 0; i < ft.NumField(); i++ { + for i := range ft.NumField() { field := ft.Field(i) name := strings.ToLower(field.Name) if tagStr, ok := field.Tag.Lookup("envflag"); ok { - for _, f := range strings.Split(tagStr, ",") { + for f := range strings.SplitSeq(tagStr, ",") { key, rest, hasRest := strings.Cut(f, ":") switch key { case "default": @@ -67,7 +67,7 @@ func Parse[T any](flags *T, env string) error { } var errs []error - for _, elem := range strings.Split(env, ",") { + for elem := range strings.SplitSeq(env, ",") { if elem == "" { // Allow empty elements such as `,somename=true` so that env vars // can be joined together like @@ -95,18 +95,18 @@ func Parse[T any](flags *T, env string) error { } } else if field.Kind() == reflect.Bool { // For bools, "somename" is short for "somename=true" or "somename=1". - // This mimicks how Go flags work, e.g. -knob is short for -knob=true. + // This mimics how Go flags work, e.g. -knob is short for -knob=true. val = true } else { // For any other type, a value must be specified. - // This mimicks how Go flags work, e.g. -output=path does not allow -output. + // This mimics how Go flags work, e.g. -output=path does not allow -output. errs = append(errs, fmt.Errorf("value needed for %s flag %q", field.Kind(), name)) continue } if deprecated[name] { // We allow setting deprecated flags to their default value so that - // bold explorers will not be penalised for their experimentation. + // bold explorers will not be penalized for their experimentation. if field.Interface() != val { errs = append(errs, fmt.Errorf("cannot change default value of deprecated flag %q", name)) } diff --git a/vendor/cuelang.org/go/internal/filetypes/fileinfo.dat b/vendor/cuelang.org/go/internal/filetypes/fileinfo.dat index 6af2904c3cfa2f722ad115c8480fd8df557129e2..b67f2b8ef1ba4a22764224b67648339ee7404f83 100644 GIT binary patch delta 20082 zcmYk^4cLhF*#_|EzVGMz)0503lO#!!BuSFVSCS;jWY$d9Ofs)NUK3`Lsbtcf$|RX& zhIvh9CbMR;)+95@Ofr+KNs=V*d0qeey3YGJcI-HJAJ2XM|Ie@Iad3LpiWOxmR+K&Q zKvX#E!IoWiR`z6NPgeG1WiM9VkX6p^5ubOya>g`EO{Esuqh8QPoDAp4u$)#~){AW@r zPOyj*5OL{Aa_Q`;Tsn(#X^FF2mB_81EqNUOeR1iFXnwO)nj>-`+T9WD?ud5xsK7a* z0^^w7sZ5%emWfB}^3drXQGt6PveP}H0{4gt++$w3cvMuVM~h1F7*MGm^=kA4dPMX< zG!I1ch$gv5G|4@t)X1%m)`-WXI^xl+R+?jaAeslF-5t?9VtT~%m|ZK)(OKd#^{mk8 z9x*)-+36lJJz{zsJ4-z7tg9Xq&lZoRXRF7^`g#IAVtOE&2cmhz^oZ%PtG?X2$5+MU zZ(kLU@eQOop$DRQAlls#%_E^lLXSfYq

u;<5Xj(CHotJrLRH9tk}Xdh}{29%~w^ z$BuKwBWk1`8yo8h^hoG|XdZ~>k~xPpdKA)QepB(Nx;plQACg9EyZKlCF(J`m3S;`r5>X$(-Y`XL=QysKs1jcdKA%P_hoYHUTwtV z?`_0m!sXIjObapo6J%Jv@^guKZMDr-7M=?FxbdX!m?;swRUM(Kmua@Q#dLWtyqTL;N`l+a^Z7rFKEF5>ayuZu^EuF_me4@C1ow7VmkM=3o@>5+Do=A!GxWBT=> z(>+S*fyhqxD5Xa!Jx*LN9>cq-$CU2kvAnx_jP9i;(4&+dh~|N49;NgsrN^FLa_inV zh{roOh{wd<(p*LlMDswjyCa%M89mDAaiq62557@6_TCse-J^^ii0pKaGJ2HJqfa04 zSl35AcHJZ%MSa!dwSIa6J<8~TXdZ~>QAUq4dbGVoZoS|Z@wlwNc#zZ%>&Uq%IQ%~kLll#Tc7xbcsx8QM1mIaEEyd|OYTM+H3)%>&UqD(F!`kG<#LxzjTzTu(MJu2ye$WHgDq(>z^ z`raiT>+e#J-6O=KQAv+VdbArUw_Z3>JTCvfckjt44`O`+GvCdsNW_k)7^QMUN_aEV@TL>W)^AHus9hkbBjm z(O5l!9#!-}G!I1csG>&|J!XuRTb~>&9*_Kwc(fWP&DHckG!I0(JED11)1#Um&yJJk zvirp2vHL=&dsNc{k)7^QO^<4NoVrgu?!I3=rad4Ys~%8~vE%gwdQ{T`(L4~%qnaMo z^w>9EZr%5X;_;6kipP`*(p*CiMDswjyCa%M4Lxe;add(-4}C~H_CFLl-J^yci0pKa z8hX^wqu)gF*f3E&_Dm9wvdQYPWs06aj~aR)ng^nJ)X<}b9_@cDw_fyP@wj5DcLuYOoOI!p_l?oo#xi0pKaI`pVRkHyo(quwLx(RR9c z44tkXjUUw$=uw9rh~|N49(CwZhaQhTDz`rMsCfM3G4W{qxHQ+&1JOJX?e2)?QA>|n zddz!Vnk#0C$IO|b(>-eGfyhqxsHI0OJxalmOcvL*29$TN&6Wk*za1V)@hs4YS(e@xj z--9~LlUpyICmvV+Ogwh~Oqwx&nFpfX9nm}x?e5&i@S1z{nlH_3=8H$i=R>D^a38}f z?t#co_sH#IOP&{x`U}*f-3#Jz=L_o5WRadgkBASdt<4<(vfoL9x z=7DH;=RStl+~fFiX&$yhJPxi1o$kSX46nEcB0Jq9w~q~2DIS|vs>i-xiAU8c^>|~o zoI%>&Uq5bf^V$MBka^nOK} z*S;bioz{j<_uxK;SKI@Uo$is_$Cj=Yj|RU{kM`@tW7s^`JH$?zCk?NzADX_zsv*CJP^$T(eBQD46nJzf>))vW}|q_-WWRFgZmg> zaSudxx<_swi+(R2qkpd+k8Kii2lufkw~rnCquhGHX7PA` zvv^GZlQd)gG7m)aKr|0TyF2$WyyhM!{v^%Aw}{7~EuqsrxR2o#_dsN)d*t@9fv=0l zYp<)v{;lFs^Jn#V^DlY=J&NgpXdZ~>!F?>s?PHzZlv^)-Q#`Kzt9b1Ft2AT&G7m)a zKr|0TyF2$WyyhN#wn_84ZQ{}St&Wy&V3B8xyQm? z(p>wFc+7bxbh-!kF}&g)i0pKa+&-4<7LPHz)nn$n;<4^s^%%cbPoPIBJrK#`xCbIT-6OY;Ek7(C zjXqS5jz`2}#1Zvq{$D+T9_92vG!I1c;64`R_OUttm0KrA#p9<(#iRYl(v11bJP^$T z(L50C?%c=lntLqzSeok|6OZ&*=yVV6V|c|q5ZUP-xqYnY6Y&`PiF(XBE*|TTtH*@T z^aOfT&;!vt5Y2=8Sd`nx4u2-M9&}PX{(VwB9{XIHF@Ko{qIn>i2cq4b`xstxk5ivZ z^WCS!T&S2c+~w;JmzeV^aOfT(gV>v5Y2=8 zSj2rS?Gi;eB3&Lu5J|6%Bao!~V{OL-W+sScf@mg)c7JYU5jV25U!v_B68Wizv}<8j z>nxnf@RqY6vesERlcg&QrFKo@qDWxUPQ?*q*4$kj2~FCfEF43dD%v2L4Wij_D~q_5 zrD>VozNlPno-9|J4i(ytIm~Pj%?8nI5bcxTUKVjLOBYvYd%a4vnOm9Fx(z2YyyZ5C ztaTesW@$;4+Kj6THnXeMWwR-!(XQ|BxXQ|E1 zy4sFu%xnC zYz}=zZR*tzHai-GV`x)D8$`1~G#hSc5jV87>p6P+mFK9F z(eSh^`aZi(IHr&(jRBkUnPn6C*UvFR1RBfJWsx}=j&~|QW5jQo7 z_DOJ4!&A56rj{+x((+xyyZ5W+wjzFIJc#%uT-0+?SoC1tJG%HRl%lJ z$8Zd7BHAFD4Wij_bBnpTrO$TM+m~IVHcwxpHk~?YJ7zVrK{Ojgvq3Z)?rt%6w{&SI zZEtX`+RVE)t92VrZ+OdXIKAPi+i-eID>|#qgPns-dY#&Ax-QsE=^BoqO-vg^vq3Z) z?r$;oxAbUNz5USZ)#g9ftIh0g+K!3MY!J-`(QFXSh8rBdH2r%j>aOjhyQ|Hy?pdwd zaE8NMZo?T4Pu+$yTzY2@wRy8gusPCGZ5s3nHoJOsy*KSgqUVXD#x8Wp*x7>!49GAYL@_7wxv=9vL%)A=@S z$Mj}4h-QOmHi%}!Z7$|EmoB?a+Zzs4oB0E?TDRdmhqv5@^BkVK4d=PE>UOmme|xZ* zJ4kI_8x(A&-Vu(WO%ZJn%?8nIxY5Pj=+a|%=qBe8BsqL8I%m&eH5X}bB zY`D|K-09L}sJ4$8sy4@mX0>j^sSa9B99&9-j^n~&~Pn}**GHoLzQ zj-gF4Z4k`{(QLTa#oX)CUc>eFYlf>$_q){Q@Lk%DNzQB#%?8nI5Y2|0UChlc9W+AQ zH;+)8-rvn?-G;Lr-f|nxc6jPGobA%J-&LFDcL$qp-&310-wQTvMulT&Q$ia=vq3Z) zZg(-ayA*$+C4bhex<_rEyGL!hjMjF{b7q5RHi%|}Xg1vMV(xe8^3mGf=w7v1aBo)Y zHk|PAmfLW`!&A56gqPNgQJV>4g3Yr(P@By^2sYEkg=1(_N*hG8K{Olgcrkap^!PZv z{jmGg=HvU+CcR(VG1Zw3qS+vt4Wij_%Zs_?r9}^D``8E6=EMV8t=n+U!&`2{IS)_W zhI3vz{6V$Z{$Q{<`oC(^Xne5QGa(#9n=;xUnhm1aaMO#q>7~6N(%Y|nNNsvfRGT9c zwH>pa*&vz?qS+vt4R^hmyIwkYlD2P|q&9sfXSHs_X%BC?4W~UkbsJ85>AJ~k)8a?L zrpFYu89OD|w0$@nLz{BiAes%L*>K;Bx$mV59@g8}OjDbmO;ek$k7zq4JhMSG8$`1~ zG#hSwF*m++Q{8Vjr{504co2@pDpA0s8p9;s&rjj;@W`k%p z-2LL*?$;;P+pkO2=7y)$=A);z9kZUXyF|BTwSd^XthdQNS|Jr``+%@4=WriwO*W`k%p-2LL*?zeEh-oEyE zwR!$|wduA%+cE2z4Wii~nhm1aaQBOIyWgq>+TP>^wORB+R_iw0{qUCCaQDMgx5@2( zbr-75LxZbGu*P6?*&iE7a!3m1=WzrM7eTi@EzrG#l=Icv_b2 zeu(~O&7r^2_BVc|HvLv*wQj@R4{x~*qLFj=!w2|&vtgCmwEDkb(|fhr+`l^5w0|WW zLmTdXF?TH!6_lvpvNi-Yoet7COx!rH|Z?wJX zI<;B6E~|AL?tUn68}5F1>R&gx-LKwn)n>|XgUy2VYV*eWVDs3k;TYO*_lvpvNi-Yo zet7COr(V_D-@Q?7KHI1^^L{Vwn8ECON@DgsB{8#+nEeArG#f52_GGQu$Wzvu4W9bf&B#Bf&8|NLn-j08P18RHoBe+Z$GA;W;NMdcv+pU1nGK@c z9!KDlrcJ*sdixDq)aItw)#lji+KyQ-`}GDtf9N)dW`k&-1b07ttlQkVRoma(sy6-q zoYlGwcR#%4Hr)O2)NOLR-^M?yP3t#;O`pH0&4YglHXXKwV`vl62GMK~&4#;QlH2_j zZ`0e?drNIzd`oS5>E8@uJ9QgG`v;6@Hi%}!-4Cz1&6@4n-t29)S@O1C8nd2}bN9nr zZo}OVPu(WB`_YU!Y-Z~JGdhWBgNWPfmj$BPAes$#KfETlmyb

ZF0Ncs6A@4drz=A`46>ewl~-u*cXnWO+p()vq3Z)?tV#b_v`(A(EPtTsPCtTw$r)OO5zW`k%p zh-QOmHr)M^-0rvbLv3$NdIEufc!RX4-#(&7zOgX4^-> zX4c2y7}^xk2GMK~&4#;QlH2{xe5|)0bxdu(IHooWKGF7K+8~+@qS+vt4R^mJxBJx` z*Y*j=)h7Bht92Xhet64mxclL$+vIk?(Vwc#o==0#sS|3`{Ig(l@bhpCZHj4wXf}vu z!`&~*?S2DJ>Fqb2Qkz@8P@5B9Xgg*-`?^6i8$`1~G#l=INpANWep=hNpH`cJUuLy# z!`%;Wxea$eJawDg?)Tc4YSZ?N+RW(}))i@ci{e;* z!em)-R_iic{qUB{aP`Aem&vVu4NIhU&GZueS8Jq;OND05_R?T8yCNJzn^M{!nhm1a zaP>=at6x;9w;x@pHeXh%&B7{e$EasEh-QOmHi%}!)i24dezn!wKCxPDlA5g6ZMgd3 zEw|z7ho^3nTm8n=sLkG*U~{^T+O((*HiznlV`x)G8$`1~G#jpdNpAHUc(&gDwX@ac z)_Q7lvYxhM)H54Ivq3Z)M6==Qm*iHz5ns{v9bZwKLG`m*x8drCx7>!SAD+5RZuQ$- zUv1ibHQ4lTpf(d51e?z1hGS?`P8&qCK{Ok#eo1ciTXwGAzF{M^dAX6=^!=K)W7IPn zM6*FO8$`3=>X+nJzx7|!_Lhy+W_jbR)@``@;Vrk}>W8OplUw~7ou@W4&I>k6nyAf= zCc$RT1>qRlRL};|Y!JROj!n;K5RC@WXb_EtqhFF6 z{pvQ?^2yDWrs$%q)@eBU;Vq}(=!d6HlN}T&Xtw+G{&DJ+nbH8$`1~G#ie7NpAGp&|ceHU8OcFugYrOhNB zXKlx(XEumtgJ?F0X2a1h$&G&XuG97@*QrfOm#o%pIQro&x8dlAr*4xQ{l;}soBdsa zP1IFwT6GOJN4kY$Xj4NQM6*FO8;*WSZuA@6U2ngoyV~5|Lv2p?&~}b~2}eJPX2a1B zPs_5=57GaiIkKm=@9L>GLwaSkZo|DiYfM?ZPW+Mf31$k7iUaD)AvT$5YWX67xyW?6r=+0{ST%)Kof gLmQ5M2}eJPX2a1BPu-?upx%DmK(#5nJ-+1s0U;yUbpQYW delta 20082 zcmYk^4cLhF*#_|EzVGMz)0503lO#!!BuSDfpGlHrGHWJlCYe_suL(2BR5IyKWs*!X z!@MRllUXxaYm%8{CYedrBuSF@ysrO!UFUrq+m3Vh@!aSC|NMF$2WL;MSW&iOMcES% zM9HiNTXu~L3JOZ2(zvjoKx16yv9MfXA|Fs%VNc5n3N4lw6lt7wa8wk}9uQ55lG(eW zP}zf(Jy_X;l|5M5la;-)%Gupxbgo(*H;PcbJ!y&mb0*mkxdcq{RH138=Pb$R; z7I6Y1{xeA~ojsLHXHhOKadxW`x%IOpkK=zYE`1TrZN%PV&@n~HhI^82Ga1TUwx<^#t9#MgN%qtg;<5B>^%z-SPoPIk4@C1oG>@1bF+FzGms@xLs(AeE ztKu=ffix%dKr|0TyE~$JB=kt=aj1bb4?IUacApbE-6Np~B0Jq9p+`cGo(;ugO+)qA zajtkojnrdfV?BW$2|W49h-h<0~G^C+Z8 zAw4>uFU`x&7mqegL#KNb(gTs5?omjOLVC<^DjroAs7K3Y;xVw9depy2PoPI3JrK;pmfyhqxD5gg-J$hdu9&4{q zkDcwrBe_yNHeIDB(4&|hh~|N49>w%1rbn9&a_jjW#N*Pd#bf)`(p*9hMDswjyCa%M z2|Y^a(WRp_FYhQGZLbNP?omPyM0UDI2|Y^avEUlQ9_RrdQ9sgw?5uQJbwIj@o3RinoH?{XdZ}mcSQ3jrAH|}(yr26biH^?zdm%j zM=3oJ+36mo^eCmriR;B<_zmhYrJHyx@1`E3d+G`FD5VFYc_5lcDLqQ*v8Sipx_2+} zc&C?mOzbVqW%NKa4@A2=qIs0jql_L$dQ0=*8^vSqjiJ*$%IJZ}PWLFIM;Sf(^bwDB zebi&uP2y41S3O?qrzg;(j2?*QfoL9O^eCf8+gs$;3vLmQ%leDQj{eeIP7g%$K(xCf znnyW3%IVSdR%u>&t9Y~<5IWtXoF0hmbdPd+l+$D30P(23O+8u<6pz6J)uZ7z^aOg8 z(*w~w5Y3~U9_93y{tdbHiEoI( zpJC$h?lAF~{2ghoqz9sTAlls#&7+bYmGt=NJJLL4xOnUv9y;Blk{*ccbdO4URMMmG zUE;C+F7?os~(%brzg;(k{*cWfoL9;^r)mqyODD1g(Jn|^6!hs&hJZe6+IBm z1JUk|XdYGcsG`RWqojG&DDh~2Pv~@yDtaKY(>k^d2oR^z0(njVPefoOL}G>>X}RMX?xanf9N zpLjfWU+8p?YI-2D(>d*s`o$gVG9(Cxkc$#?Bdqh3jP8W}% z)77K#qj~~8>d*txJP^&J4n6A76zj&@(J~rK1)1S&r*+ZPwENusHF#@c_5lc zEj?=KvHwZAb-y{{@y|KpG4&}lM_9kIElXnNAu+qV#LNTH_8>&xgN{8V&3C5aaUczy z=7Epv!A|p#r|h&n2v2i@KOYvMX7dqf59Au;oin0X-D9)#$7 zP=|SP>&5fLI%>&Uq z5bf^V$MBka9A7TY!&ZpL!4;v?J-CnI756}7r+eh~u>mW^W7A6Y*!L^(s9L2SZ>-i6 z=#kI^(L4~%gZo&N+s8WoT5i4M*Wz*28u8e(Mw&5ynFpeIAeslF-JSawUUQG$uSoOS zSHz>!+R*79+{f^WdmysYJ#zcl(zW8z;5X{gew}y>Tc;jPf2SwVqmUkm=7DG)+{dEa zJ~r!ja_cj{6OYF?h)3I3r5W>=c_5kxqIn?N-MNq9HTPKXsx;SZ6pz^(L#KOiAHyr| zfyhqx$n9g%@5N*E@73e6P2#b3lX^V(M?HZaMf5;44@C3eJ{INnv4ek!ojs$JKuokG+4DX3SsafoL9x z=7DH;=RStl+@sGnXI^Bc&7+!G?M0UDIZXa9rmUuMWt{xrU7LVa?t4Fh) zdICL4=z(Y+h~~k4EXwU;vvi2lufk zw~rm#E4Lo_Px1KIKgDClK554MWgdv;foL9xc6aV$c+EXd?vv&b?}^9Z_d=(8a38}f z?t#co_sH#IgZ7KZ=Kbn%;C=C^J)j=j4(bW?D5D3Wc_5kx_pvCqk9GdH+FMkKq;fKxC(T*T0-{Pd`JwEtL|F@Ko{qIn>i z2cq4b`xstxk3}C#bKPU&ksb@3?!kQwueb*yJKZCT_wn z`;>SbITbqHgZmg>aSudxx<_sw8~lZMZ23Yx4xSc|x?hUNob8dGK#xj#AeslFd2k<# zxR0e>q6kN%%cBS)>9uhLl5~Hp?U=yK1kp?o%>>cz&y6hNMwa$Vw0%P&KlPAyEzD}2 zg)aZ$J1fwfW#IwV7F0 z+cAxq4Wii~nhm0T65P%rZfEJ~y4pVSY_<95?5x&pIG^Dyw?SmB+i*ThhtyM>t@VP< zp|7Y-z52msM}u$-ZE9$PXf}vu!woIshL(0cM{mFK9JRTwq1qg1sO{X*BJOAs&4xP~ zo|a`l^Mh!=&ZPa%)%K0&stx|uoBTq6(agv>rO8v)x(%l^d_b@4M~Tu^jnt;e*Md#w z#%eRNajNece(j^yYd;Ml=^K7%M)@?Yep}=hr?dyiK8a}{n$}Uu!`!5VObDFEo#^%9h z^2Om8+HhNoxUESv8*XcO>NX!;thXQ1Qf)qLsW!7Nk#_w2mVHl2%)X~2W;PPDPXf_w z5T#vyYAro;iMEewr8Y-fWwmC55ADHPvyrE)weKlBb(^7=s?8gh2AjjJ)u#Ss!Di>> z;TX4x3;cUZB5sc_i$r2(gDAI`FBU|%xuLDzepOqw`T7-VbMOjn$CQ?R@neT{8$`1~ zv`>Ot8$Q-;2DH=mP3_dC`;}R(+i-5fTW-U-4Nu*Mb6dLlO0{X)KG<}*N^M466>M5{ z49CzWq79I1GxyPjgZ_@VHZc>|`eY0A(;UtH*+=i1Jp1KVu zxpYlmwP|*9u<6=QZASMCHmz?B$IzybHi%|}Xg1vCV(xP3yj%756$8}fnE`6k`8I9G z^kz1QW`k%ph-SlWF6K6uF1t#F9GHgmqI?U>=r2GMK~%?8nI zxYNbl>C$AVwvQRAHpho%wQj?y4sW>)r#d`!8%}lUuy3i&wr>TSkM2~PhTjf0yT22T zp-nMu5X}bBY`E9O-0RYw!}a!ShO14tyVU0JUD}RG&TJ6P2GMK~&4!y@%*`$xG(y`q zk5HT5-_2^>hO-^savRQecJy$x! zaKgh|Zo>%=Pu+$SURpCoZ6=HfHqZV*Z8rZP*i0K2j-gE{Z4k`{(QLTm#oY1I0Z@=~-wdpZYZH`RT zcFcBWgJ?F0W`k%p-1TDadgMoc8e4Z8+_v>n5vBiysA> z?o-re?37^B_Tg|0ZOUnbXf}vu!+kI2zLzd|SZ`l5O>KTQO>Me9qV1UQ%m&eH5X}bB zY`F2o-1yR!k7#@2>1wlZdRFT;ocZvU+i>Q?Q@7#Fm)8D7Z6^LC*vy-uHd|%{o9T~- zV`x)B8$`1~G#hSxF}J?-#N&GV;WO3dlbLEW_X%ysoM$$OW`k%ph-SmRFXrBtmdw)j zakJFsSjB2GMM|`^CB4Z{d8seeLsV z^ZfH_bHf5{$E;^Ih-QOmHi%}!-7n7VeybK}dy^N`X3+~-t=n+-!&`2{-49RQCb#?5 zU8pvb7Y3X8FRIPf7lX}=#o-v*RMQ60Y!Jt9UiRxvf@q%vqS+wYC&AqhAL}-EZq@cTx2jG5 zKWDXW!`%;Wxea$eJawDg?zi#JYSa3SVAJO>YV+V^8Q@6?Oe)V^#&D0&iX5rt|=FPtao0w4o?vtGA8ONVZ?HMAFC0Uggf@s~gJ?F~{gT}7*Z)1e{l@py=H~rs zb9}$HW7abpM6*FO8$`3=?w90tzhUod`?mMhX25~0)@``^;Vrk}?uVytliU3^9Z;J# z{|YvJ530@hgTbcb2jLjn6w(IKY!JZF0L`ga4?_wEqN~MIWinwvU3% ztdGMnv?-zuqS+vt4R^mJxBH#>SZ_b-nA&`COl=l?qV2`BK{Ojgvq3Z)?tV#b_p3Rs z?GuixP4sD2>o(l|@Rr+f_rp`S$?bllKUJGOp9Y&#C)B3-XTj#+=iwOI6w?OLY!JNdIE@3k-0rtKNEnbR-Ek5R0i5bIA-bV_3R{YMF95OH_?l^D@v5KV@w zA71lciI*nwFR4fy6vpxsCNCAn@)IU~inJZ0p2;AZ45G;(nhaOJB)9slE7JBB#j*T^ z$+F_C)@8W*;VqZp>W8N;lUw~7mPqZI=_UHF)<_qZ3eB4BrNL%)ML32wrL;jb8$`3= z>X+nJzo=4gKe|$FzN}Q6g;m;)QO|4;%?8nI5Y2|GUy@t>YOA$SwiX!_^OOxeZr8JawDg>bJSR z+O+#>u<74GZ6-DdHl5E6$IzynHi%|}Xf|B^lHBUI>|DKl!$xZJawE0r`!#LHsAo2a zW`k%ph-SmpFUhTb>%XS$EgP%N^2S-M+i>;6TW-VE4^Q1DxB4|YPi zK{OhUeo1ch8`Mf~zqyru+1z%i+MK#n+p+1H4Wii~nhm1aaP&)Zqu<@FwS8x6wHbU_ zR_it#{qUCCaP-4dx5Fpa`sW!j3Qf>OR*LG}rW`k%ph-QOmHXQwu+~~KVy|%ZyN^MqNmDRcpM?bvfHXQx% z)NOL3U*isH^H_&qv-E1U*?D!aNjrsOXj4raM6*FO8;*WSZuBdKbg0+z^hTO$}`j%?8nIIQk{I(Qj}!z5SMMYIA#cwK?5g+d29r9Q`Dk4M#sb zEz3qfME^th$R66htB2YQ>6z8K4M#t`7}ZKm`NHeGKD z$IynOU&7H(qS+iRvoA-Ee)5#HJ?zVoqaQw?mkpv#Zc&?=w*;GI{nchy|6nuswr~t> eIQk_V{Un+VM?XAuo05Tg`*8!+rttRolK%%vCeJMZ diff --git a/vendor/cuelang.org/go/internal/filetypes/filetypes.go b/vendor/cuelang.org/go/internal/filetypes/filetypes.go index c468329289..e961559a90 100644 --- a/vendor/cuelang.org/go/internal/filetypes/filetypes.go +++ b/vendor/cuelang.org/go/internal/filetypes/filetypes.go @@ -23,6 +23,7 @@ import ( "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" "cuelang.org/go/internal/filetypes/internal" + cuepath "cuelang.org/go/pkg/path" ) // Mode indicate the base mode of operation and indicates a different set of @@ -75,39 +76,37 @@ func ParseArgs(args []string) (files []*build.File, err error) { sc := &scope{} for i, s := range args { - a := strings.Split(s, ":") + scope, file, found := cutScope(s) switch { - case len(a) == 1 || len(a[0]) == 1: // filename - if s == "" { + case !found: // just a filename, like "foo.yaml" + if file == "" { return nil, errors.Newf(token.NoPos, "empty file name") } - f, err := toFile(Input, sc, s) + f, err := toFile(Input, sc, file) if err != nil { return nil, err } files = append(files, f) hasFiles = true - case len(a) > 2 || a[0] == "": - return nil, errors.Newf(token.NoPos, - "unsupported file name %q: may not have ':'", s) - - case a[1] != "": + case scope == "": + return nil, errors.Newf(token.NoPos, "empty filetype prefix in %q", s) + case file != "": return nil, errors.Newf(token.NoPos, "cannot combine scope with file") - default: // scope + default: // just a scope, like "json:" switch { case i == len(args)-1: - qualifier = a[0] + qualifier = scope fallthrough case qualifier != "" && !hasFiles: return nil, errors.Newf(token.NoPos, "scoped qualifier %q without file", qualifier+":") } - sc, err = parseScope(a[0]) + sc, err = parseScope(scope) if err != nil { return nil, err } - qualifier = a[0] + qualifier = scope hasFiles = false } } @@ -134,6 +133,20 @@ func DefaultTagsForInterpretation(interp build.Interpretation, mode Mode) map[st return f.BoolTags } +func cutScope(s string) (scope, file string, found bool) { + if cuepath.IsAbs(s, cuepath.Windows) || cuepath.IsAbs(s, cuepath.Unix) { + // Absolute paths on Windows can begin with a volume name, like `C:\foo\bar`; + // do not confuse that for a scope prefix. + // Note that we use [cuepath.IsAbs] for consistent behavior across platforms. + // + // We also check for Unix, so that `/foo:colons.json` is treated + // as an absolute filename rather than a `/foo` scope prefix on `colons.json`. + } else if before, after, ok := strings.Cut(s, ":"); ok { + return before, after, true + } + return "", s, false // Just a filename +} + // ParseFile parses a single-argument file specifier, such as when a file is // passed to a command line argument. // @@ -141,15 +154,9 @@ func DefaultTagsForInterpretation(interp build.Interpretation, mode Mode) map[st // // cue eval -o yaml:foo.data func ParseFile(s string, mode Mode) (*build.File, error) { - scope := "" - file := s - - if p := strings.LastIndexByte(s, ':'); p >= 0 { - scope = s[:p] - file = s[p+1:] - if scope == "" { - return nil, errors.Newf(token.NoPos, "unsupported file name %q: may not have ':", s) - } + scope, file, found := cutScope(s) + if found && scope == "" { + return nil, errors.Newf(token.NoPos, "empty filetype prefix in %q", s) } if file == "" { @@ -189,7 +196,7 @@ func parseScope(scopeStr string) (*scope, error) { subsidiaryBool: make(map[string]bool), subsidiaryString: make(map[string]string), } - for _, tag := range strings.Split(scopeStr, "+") { + for tag := range strings.SplitSeq(scopeStr, "+") { tagName, tagVal, hasValue := strings.Cut(tag, "=") switch tagTypes[tagName] { case TagTopLevel: diff --git a/vendor/cuelang.org/go/internal/filetypes/tagtype_string.go b/vendor/cuelang.org/go/internal/filetypes/tagtype_string.go index 270d3516d1..18a0a1d9a9 100644 --- a/vendor/cuelang.org/go/internal/filetypes/tagtype_string.go +++ b/vendor/cuelang.org/go/internal/filetypes/tagtype_string.go @@ -19,8 +19,9 @@ const _TagType_name = "TagUnknownTagTopLevelTagSubsidiaryBoolTagSubsidiaryString var _TagType_index = [...]uint8{0, 10, 21, 38, 57} func (i TagType) String() string { - if i < 0 || i >= TagType(len(_TagType_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_TagType_index)-1 { return "TagType(" + strconv.FormatInt(int64(i), 10) + ")" } - return _TagType_name[_TagType_index[i]:_TagType_index[i+1]] + return _TagType_name[_TagType_index[idx]:_TagType_index[idx+1]] } diff --git a/vendor/cuelang.org/go/internal/filetypes/tofile.go b/vendor/cuelang.org/go/internal/filetypes/tofile.go index 64f07c6170..572d1ef760 100644 --- a/vendor/cuelang.org/go/internal/filetypes/tofile.go +++ b/vendor/cuelang.org/go/internal/filetypes/tofile.go @@ -18,7 +18,7 @@ import ( "cuelang.org/go/cue/build" ) -//go:generate go run -tags bootstrap ./generate.go +//go:generate go run -tags cuebootstrap ./generate.go func toFile(mode Mode, sc *scope, filename string) (*build.File, error) { return toFileGenerated(mode, sc, filename) diff --git a/vendor/cuelang.org/go/internal/filetypes/tofile_bootstrap.go b/vendor/cuelang.org/go/internal/filetypes/tofile_bootstrap.go index b127edf519..97dc156041 100644 --- a/vendor/cuelang.org/go/internal/filetypes/tofile_bootstrap.go +++ b/vendor/cuelang.org/go/internal/filetypes/tofile_bootstrap.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build bootstrap +//go:build cuebootstrap package filetypes diff --git a/vendor/cuelang.org/go/internal/filetypes/types.cue b/vendor/cuelang.org/go/internal/filetypes/types.cue index 0b2bea8f69..6d4ff3fcb0 100644 --- a/vendor/cuelang.org/go/internal/filetypes/types.cue +++ b/vendor/cuelang.org/go/internal/filetypes/types.cue @@ -316,7 +316,8 @@ interpretations: jsonschema: { strict: *false | bool strictKeywords: *strict | bool // TODO: enable strictFeatures by default? (see https://cuelang.org/issue/3923). - strictFeatures: *strict | bool + strictFeatures: *strict | bool + openOnlyWhenExplicit: *false | bool } } diff --git a/vendor/cuelang.org/go/internal/filetypes/types.go b/vendor/cuelang.org/go/internal/filetypes/types.go index f896d60b78..3aaab10e3d 100644 --- a/vendor/cuelang.org/go/internal/filetypes/types.go +++ b/vendor/cuelang.org/go/internal/filetypes/types.go @@ -18,7 +18,7 @@ import ( _ "embed" ) -//go:generate go run golang.org/x/tools/cmd/stringer -type=TagType -linecomment +//go:generate go tool stringer -type=TagType -linecomment type TagType int diff --git a/vendor/cuelang.org/go/internal/filetypes/types_gen.go b/vendor/cuelang.org/go/internal/filetypes/types_gen.go index 816a143063..8e75966f03 100644 --- a/vendor/cuelang.org/go/internal/filetypes/types_gen.go +++ b/vendor/cuelang.org/go/internal/filetypes/types_gen.go @@ -1,6 +1,6 @@ // Code generated by cuelang.org/go/pkg/gen. DO NOT EDIT. -//go:build !bootstrap +//go:build !cuebootstrap package filetypes @@ -31,31 +31,32 @@ var fromFileDataBytes []byte func init() { tagTypes = map[string]TagType{ - "auto": TagTopLevel, - "binary": TagTopLevel, - "code": TagTopLevel, - "cue": TagTopLevel, - "dag": TagTopLevel, - "data": TagTopLevel, - "go": TagTopLevel, - "graph": TagTopLevel, - "json": TagTopLevel, - "jsonl": TagTopLevel, - "jsonschema": TagTopLevel, - "koala": TagSubsidiaryBool, - "lang": TagSubsidiaryString, - "openapi": TagTopLevel, - "pb": TagTopLevel, - "proto": TagTopLevel, - "schema": TagTopLevel, - "strict": TagSubsidiaryBool, - "strictFeatures": TagSubsidiaryBool, - "strictKeywords": TagSubsidiaryBool, - "text": TagTopLevel, - "textproto": TagTopLevel, - "toml": TagTopLevel, - "xml": TagTopLevel, - "yaml": TagTopLevel, + "auto": TagTopLevel, + "binary": TagTopLevel, + "code": TagTopLevel, + "cue": TagTopLevel, + "dag": TagTopLevel, + "data": TagTopLevel, + "go": TagTopLevel, + "graph": TagTopLevel, + "json": TagTopLevel, + "jsonl": TagTopLevel, + "jsonschema": TagTopLevel, + "koala": TagSubsidiaryBool, + "lang": TagSubsidiaryString, + "openOnlyWhenExplicit": TagSubsidiaryBool, + "openapi": TagTopLevel, + "pb": TagTopLevel, + "proto": TagTopLevel, + "schema": TagTopLevel, + "strict": TagSubsidiaryBool, + "strictFeatures": TagSubsidiaryBool, + "strictKeywords": TagSubsidiaryBool, + "text": TagTopLevel, + "textproto": TagTopLevel, + "toml": TagTopLevel, + "xml": TagTopLevel, + "yaml": TagTopLevel, } } @@ -239,6 +240,8 @@ var subsidiaryBoolTagFuncs = []func(subsidiaryBoolTags) (subsidiaryBoolTags, err unifySubsidiaryBoolTags_0, unifySubsidiaryBoolTags_1, unifySubsidiaryBoolTags_2, + unifySubsidiaryBoolTags_3, + unifySubsidiaryBoolTags_4, } var subsidiaryTagFuncs = []func(subsidiaryTags) (subsidiaryTags, error){ @@ -266,16 +269,20 @@ func (t subsidiaryTags) marshalToMap() map[string]string { } type subsidiaryBoolTags struct { - koala opt.Opt[bool] - strict opt.Opt[bool] - strictFeatures opt.Opt[bool] - strictKeywords opt.Opt[bool] + koala opt.Opt[bool] + openOnlyWhenExplicit opt.Opt[bool] + strict opt.Opt[bool] + strictFeatures opt.Opt[bool] + strictKeywords opt.Opt[bool] } func (t *subsidiaryBoolTags) unmarshalFromMap(m map[string]bool) error { if x, ok := m["koala"]; ok { t.koala = opt.Some(x) } + if x, ok := m["openOnlyWhenExplicit"]; ok { + t.openOnlyWhenExplicit = opt.Some(x) + } if x, ok := m["strict"]; ok { t.strict = opt.Some(x) } @@ -292,6 +299,9 @@ func (t subsidiaryBoolTags) marshalToMap() map[string]bool { if t.koala.IsPresent() { m["koala"] = t.koala.Value() } + if t.openOnlyWhenExplicit.IsPresent() { + m["openOnlyWhenExplicit"] = t.openOnlyWhenExplicit.Value() + } if t.strict.IsPresent() { m["strict"] = t.strict.Value() } @@ -357,7 +367,44 @@ func unifySubsidiaryTags_1(t subsidiaryTags) (subsidiaryTags, error) { return r, nil } -// unifySubsidiaryBoolTags_2 unifies subsidiaryBoolTags values according to the following CUE logic: +// unifySubsidiaryBoolTags_3 unifies subsidiaryBoolTags values according to the following CUE logic: +// +// { +// { +// [string]: bool +// } +// koala: *false | bool +// strict: *false | bool +// strictKeywords: *strict | bool +// strictFeatures: *strict | bool +// openOnlyWhenExplicit: *false | bool +// } +func unifySubsidiaryBoolTags_3(t subsidiaryBoolTags) (subsidiaryBoolTags, error) { + var r subsidiaryBoolTags + r.koala = opt.Some(false) + if t.koala.IsPresent() { + r.koala = t.koala + } + r.openOnlyWhenExplicit = opt.Some(false) + if t.openOnlyWhenExplicit.IsPresent() { + r.openOnlyWhenExplicit = t.openOnlyWhenExplicit + } + r.strict = opt.Some(false) + if t.strict.IsPresent() { + r.strict = t.strict + } + r.strictFeatures = r.strict + if t.strictFeatures.IsPresent() { + r.strictFeatures = t.strictFeatures + } + r.strictKeywords = r.strict + if t.strictKeywords.IsPresent() { + r.strictKeywords = t.strictKeywords + } + return r, nil +} + +// unifySubsidiaryBoolTags_4 unifies subsidiaryBoolTags values according to the following CUE logic: // // { // { @@ -368,12 +415,15 @@ func unifySubsidiaryTags_1(t subsidiaryTags) (subsidiaryTags, error) { // strictKeywords: *strict | bool // strictFeatures: *strict | bool // } -func unifySubsidiaryBoolTags_2(t subsidiaryBoolTags) (subsidiaryBoolTags, error) { +func unifySubsidiaryBoolTags_4(t subsidiaryBoolTags) (subsidiaryBoolTags, error) { var r subsidiaryBoolTags r.koala = opt.Some(false) if t.koala.IsPresent() { r.koala = t.koala } + if t.openOnlyWhenExplicit.IsPresent() { + return subsidiaryBoolTags{}, fmt.Errorf("field %q not allowed", "openOnlyWhenExplicit") + } r.strict = opt.Some(false) if t.strict.IsPresent() { r.strict = t.strict @@ -403,6 +453,9 @@ func unifySubsidiaryBoolTags_0(t subsidiaryBoolTags) (subsidiaryBoolTags, error) if t.koala.IsPresent() { r.koala = t.koala } + if t.openOnlyWhenExplicit.IsPresent() { + return subsidiaryBoolTags{}, fmt.Errorf("field %q not allowed", "openOnlyWhenExplicit") + } if t.strict.IsPresent() { return subsidiaryBoolTags{}, fmt.Errorf("field %q not allowed", "strict") } @@ -421,15 +474,53 @@ func unifySubsidiaryBoolTags_0(t subsidiaryBoolTags) (subsidiaryBoolTags, error) // { // [string]: bool // } +// strict: *false | bool +// strictKeywords: *strict | bool +// strictFeatures: *strict | bool +// openOnlyWhenExplicit: *false | bool +// } +func unifySubsidiaryBoolTags_1(t subsidiaryBoolTags) (subsidiaryBoolTags, error) { + var r subsidiaryBoolTags + if t.koala.IsPresent() { + return subsidiaryBoolTags{}, fmt.Errorf("field %q not allowed", "koala") + } + r.openOnlyWhenExplicit = opt.Some(false) + if t.openOnlyWhenExplicit.IsPresent() { + r.openOnlyWhenExplicit = t.openOnlyWhenExplicit + } + r.strict = opt.Some(false) + if t.strict.IsPresent() { + r.strict = t.strict + } + r.strictFeatures = r.strict + if t.strictFeatures.IsPresent() { + r.strictFeatures = t.strictFeatures + } + r.strictKeywords = r.strict + if t.strictKeywords.IsPresent() { + r.strictKeywords = t.strictKeywords + } + return r, nil +} + +// unifySubsidiaryBoolTags_2 unifies subsidiaryBoolTags values according to the following CUE logic: +// +// { +// { +// [string]: bool +// } // strict: *false | bool // strictKeywords: *strict | bool // strictFeatures: *strict | bool // } -func unifySubsidiaryBoolTags_1(t subsidiaryBoolTags) (subsidiaryBoolTags, error) { +func unifySubsidiaryBoolTags_2(t subsidiaryBoolTags) (subsidiaryBoolTags, error) { var r subsidiaryBoolTags if t.koala.IsPresent() { return subsidiaryBoolTags{}, fmt.Errorf("field %q not allowed", "koala") } + if t.openOnlyWhenExplicit.IsPresent() { + return subsidiaryBoolTags{}, fmt.Errorf("field %q not allowed", "openOnlyWhenExplicit") + } r.strict = opt.Some(false) if t.strict.IsPresent() { r.strict = t.strict diff --git a/vendor/cuelang.org/go/internal/filetypes/types_gen.go.tmpl b/vendor/cuelang.org/go/internal/filetypes/types_gen.go.tmpl index 49d45951f5..7ab68ed016 100644 --- a/vendor/cuelang.org/go/internal/filetypes/types_gen.go.tmpl +++ b/vendor/cuelang.org/go/internal/filetypes/types_gen.go.tmpl @@ -1,6 +1,6 @@ // Code generated by cuelang.org/go/pkg/gen. DO NOT EDIT. -//go:build !bootstrap +//go:build !cuebootstrap package filetypes diff --git a/vendor/cuelang.org/go/internal/internal.go b/vendor/cuelang.org/go/internal/internal.go index 7b6d91c3b1..45316a7f8d 100644 --- a/vendor/cuelang.org/go/internal/internal.go +++ b/vendor/cuelang.org/go/internal/internal.go @@ -23,14 +23,12 @@ import ( "bufio" "fmt" "path/filepath" - "slices" "strings" + "unicode/utf8" "github.com/cockroachdb/apd/v3" "cuelang.org/go/cue/ast" - "cuelang.org/go/cue/ast/astutil" - "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" ) @@ -87,27 +85,9 @@ func (c Context) Sqrt(d, x *apd.Decimal) (apd.Condition, error) { return res, err } -// ErrIncomplete can be used by builtins to signal the evaluation was -// incomplete. -var ErrIncomplete = errors.New("incomplete value") - // BaseContext is used as CUE's default context for arbitrary-precision decimals. var BaseContext = Context{*apd.BaseContext.WithPrecision(34)} -// APIVersionSupported is the back version until which deprecated features -// are still supported. -var APIVersionSupported = Version(MinorSupported, PatchSupported) - -const ( - MinorCurrent = 5 - MinorSupported = 4 - PatchSupported = 0 -) - -func Version(minor, patch int) int { - return -1000 + 100*minor + patch -} - // EvaluatorVersion is declared here so it can be used everywhere without import cycles, // but the canonical documentation lives at [cuelang.org/go/cue/cuecontext.EvalVersion]. // @@ -137,62 +117,23 @@ const ( DevVersion = EvalV3 // TODO(mvdan): rename to EvalExperiment for consistency with cuecontext ) -// ListEllipsis reports the list type and remaining elements of a list. If we -// ever relax the usage of ellipsis, this function will likely change. Using -// this function will ensure keeping correct behavior or causing a compiler -// failure. -func ListEllipsis(n *ast.ListLit) (elts []ast.Expr, e *ast.Ellipsis) { - elts = n.Elts - if n := len(elts); n > 0 { - var ok bool - if e, ok = elts[n-1].(*ast.Ellipsis); ok { - elts = elts[:n-1] - } - } - return elts, e -} - -// Package finds the package declaration from the preamble of a file. -func Package(f *ast.File) *ast.Package { - for _, d := range f.Decls { +// Package finds the package declaration from the preamble of a file, +// returning it, and its index within the file's Decls. +func Package(f *ast.File) (*ast.Package, int) { + for i, d := range f.Decls { switch d := d.(type) { case *ast.CommentGroup: case *ast.Attribute: case *ast.Package: if d.Name == nil { // malformed package declaration - return nil + return nil, -1 } - return d + return d, i default: - return nil + return nil, -1 } } - return nil -} - -func SetPackage(f *ast.File, name string, overwrite bool) { - if pkg := Package(f); pkg != nil { - if !overwrite || pkg.Name.Name == name { - return - } - ident := ast.NewIdent(name) - astutil.CopyMeta(ident, pkg.Name) - return - } - - decls := make([]ast.Decl, len(f.Decls)+1) - k := 0 - for _, d := range f.Decls { - if _, ok := d.(*ast.CommentGroup); ok { - decls[k] = d - k++ - continue - } - break - } - decls[k] = &ast.Package{Name: ast.NewIdent(name)} - copy(decls[k+1:], f.Decls[k:]) - f.Decls = decls + return nil, -1 } // NewComment creates a new CommentGroup from the given text. @@ -217,7 +158,7 @@ func NewComment(isDoc bool, s string) *ast.CommentGroup { buf.WriteString("//") for scanner.Scan() { s := scanner.Text() - n := len([]rune(s)) + 1 + n := utf8.RuneCountInString(s) + 1 if count+n > maxRunesPerLine && count > 3 { cg.List = append(cg.List, &ast.Comment{Text: buf.String()}) count = 3 @@ -238,12 +179,12 @@ func NewComment(isDoc bool, s string) *ast.CommentGroup { func FileComments(f *ast.File) (docs, rest []*ast.CommentGroup) { hasPkg := false - if pkg := Package(f); pkg != nil { + if pkg, _ := Package(f); pkg != nil { hasPkg = true - docs = pkg.Comments() + docs = ast.Comments(pkg) } - for _, c := range f.Comments() { + for _, c := range ast.Comments(f) { if c.Doc { docs = append(docs, c) } else { @@ -260,49 +201,6 @@ func FileComments(f *ast.File) (docs, rest []*ast.CommentGroup) { return } -// MergeDocs merges multiple doc comments into one single doc comment. -func MergeDocs(comments []*ast.CommentGroup) []*ast.CommentGroup { - if len(comments) <= 1 || !hasDocComment(comments) { - return comments - } - - comments1 := make([]*ast.CommentGroup, 0, len(comments)) - comments1 = append(comments1, nil) - var docComment *ast.CommentGroup - for _, c := range comments { - switch { - case !c.Doc: - comments1 = append(comments1, c) - case docComment == nil: - docComment = c - default: - docComment.List = append(slices.Clip(docComment.List), &ast.Comment{Text: "//"}) - docComment.List = append(docComment.List, c.List...) - } - } - comments1[0] = docComment - return comments1 -} - -func hasDocComment(comments []*ast.CommentGroup) bool { - for _, c := range comments { - if c.Doc { - return true - } - } - return false -} - -func NewAttr(name, str string) *ast.Attribute { - buf := &strings.Builder{} - buf.WriteByte('@') - buf.WriteString(name) - buf.WriteByte('(') - buf.WriteString(str) - buf.WriteByte(')') - return &ast.Attribute{Text: buf.String()} -} - // ToExpr converts a node to an expression. If it is a file, it will return // it as a struct. If is an expression, it will return it as is. Otherwise // it panics. @@ -342,16 +240,27 @@ func ToExpr(n ast.Node) ast.Expr { // ToFile converts an expression to a file. // // Adjusts the spacing of x when needed. -func ToFile(n ast.Node) *ast.File { +// +// If preserveStructLit is true and n is a [*ast.StructLit], then n +// will be embedded within the returned [*ast.File] rather than only +// its elements being included in the returned File. This ensures that +// position information of the StructLit's braces is not lost. +func ToFile(n ast.Node, preserveStructLit bool) *ast.File { if n == nil { return nil } switch n := n.(type) { case *ast.StructLit: - f := &ast.File{Decls: n.Elts} - // Ensure that the comments attached to the struct literal are not lost. - ast.SetComments(f, ast.Comments(n)) - return f + if preserveStructLit { + ast.SetRelPos(n, token.NoSpace) + return &ast.File{Decls: []ast.Decl{&ast.EmbedDecl{Expr: n}}} + + } else { + f := &ast.File{Decls: n.Elts} + // Ensure that the comments attached to the struct literal are not lost. + ast.SetComments(f, ast.Comments(n)) + return f + } case ast.Expr: ast.SetRelPos(n, token.NoSpace) return &ast.File{Decls: []ast.Decl{&ast.EmbedDecl{Expr: n}}} @@ -403,77 +312,7 @@ func IsRegularField(f *ast.Field) bool { return true } -// ConstraintToken reports which constraint token (? or !) is associated -// with a field (if any), taking into account compatibility of deprecated -// fields. -func ConstraintToken(f *ast.Field) (t token.Token, ok bool) { - if f.Constraint != token.ILLEGAL { - return f.Constraint, true - } - if f.Optional != token.NoPos { - return token.OPTION, true - } - return f.Constraint, false -} - -// SetConstraints sets both the main and deprecated fields of f according to the -// given constraint token. -func SetConstraint(f *ast.Field, t token.Token) { - f.Constraint = t - if t == token.ILLEGAL { - f.Optional = token.NoPos - } else { - f.Optional = token.Blank.Pos() - } -} - -func EmbedStruct(s *ast.StructLit) *ast.EmbedDecl { - e := &ast.EmbedDecl{Expr: s} - if len(s.Elts) == 1 { - d := s.Elts[0] - astutil.CopyPosition(e, d) - ast.SetRelPos(d, token.NoSpace) - astutil.CopyComments(e, d) - ast.SetComments(d, nil) - if f, ok := d.(*ast.Field); ok { - ast.SetRelPos(f.Label, token.NoSpace) - } - } - s.Lbrace = token.Newline.Pos() - s.Rbrace = token.NoSpace.Pos() - return e -} - -// IsEllipsis reports whether the declaration can be represented as an ellipsis. -func IsEllipsis(x ast.Decl) bool { - // ... - if _, ok := x.(*ast.Ellipsis); ok { - return true - } - - // [string]: _ or [_]: _ - f, ok := x.(*ast.Field) - if !ok { - return false - } - v, ok := f.Value.(*ast.Ident) - if !ok || v.Name != "_" { - return false - } - l, ok := f.Label.(*ast.ListLit) - if !ok || len(l.Elts) != 1 { - return false - } - i, ok := l.Elts[0].(*ast.Ident) - if !ok { - return false - } - return i.Name == "string" || i.Name == "_" -} - // GenPath reports the directory in which to store generated files. func GenPath(root string) string { return filepath.Join(root, "cue.mod", "gen") } - -var ErrInexact = errors.New("inexact subsumption") diff --git a/vendor/cuelang.org/go/cue/builtin.go b/vendor/cuelang.org/go/internal/iterutil/iter.go similarity index 69% rename from vendor/cuelang.org/go/cue/builtin.go rename to vendor/cuelang.org/go/internal/iterutil/iter.go index 74aa56bbf7..991678b897 100644 --- a/vendor/cuelang.org/go/cue/builtin.go +++ b/vendor/cuelang.org/go/internal/iterutil/iter.go @@ -1,4 +1,4 @@ -// Copyright 2018 The CUE Authors +// Copyright 2025 CUE Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,20 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -package cue +package iterutil -import ( - "cuelang.org/go/cue/token" - "cuelang.org/go/internal/core/adt" -) +import "iter" -func pos(n adt.Node) (p token.Pos) { - if n == nil { - return +func Count[E any](seq iter.Seq[E]) int { + n := 0 + for range seq { + n++ } - src := n.Source() - if src == nil { - return - } - return src.Pos() + return n } diff --git a/vendor/cuelang.org/go/internal/mod/modfiledata/modfile.go b/vendor/cuelang.org/go/internal/mod/modfiledata/modfile.go index bf4de1d333..b6cc3e7605 100644 --- a/vendor/cuelang.org/go/internal/mod/modfiledata/modfile.go +++ b/vendor/cuelang.org/go/internal/mod/modfiledata/modfile.go @@ -81,6 +81,8 @@ func (f *File) QualifiedModule() string { // Deprecated: this method is misnamed; use [File.ModuleRootPath] // instead. +// +//go:fix inline func (f *File) ModulePath() string { return f.ModuleRootPath() } diff --git a/vendor/cuelang.org/go/internal/mod/modimports/modimports.go b/vendor/cuelang.org/go/internal/mod/modimports/modimports.go index 1fc2dad7e2..402f071b19 100644 --- a/vendor/cuelang.org/go/internal/mod/modimports/modimports.go +++ b/vendor/cuelang.org/go/internal/mod/modimports/modimports.go @@ -25,14 +25,18 @@ type ModuleFile struct { // If there's an error, it might not a be CUE file. FilePath string - // Syntax includes only the portion of the file up to and including - // the imports. It will be nil if there was an error reading the file. - Syntax *ast.File + // Syntax (and SyntaxError) are the results from invoking + // [parser.ParseFile] + Syntax *ast.File + SyntaxError error } // AllImports returns a sorted list of all the package paths // imported by the module files produced by modFilesIter // in canonical form. +// +// If the modFilesIter yields an err then AllImports immediately stops +// and returns the accumulated package paths. func AllImports(modFilesIter iter.Seq2[ModuleFile, error]) ([]string, error) { pkgPaths := make(map[string]bool) for mf, err := range modFilesIter { @@ -40,7 +44,7 @@ func AllImports(modFilesIter iter.Seq2[ModuleFile, error]) ([]string, error) { return nil, fmt.Errorf("cannot read %q: %v", mf.FilePath, err) } // TODO look at build tags and omit files with "ignore" tags. - for _, imp := range mf.Syntax.Imports { + for imp := range mf.Syntax.ImportSpecs() { pkgPath, err := strconv.Unquote(imp.Path.Value) if err != nil { // TODO location formatting @@ -58,6 +62,9 @@ func AllImports(modFilesIter iter.Seq2[ModuleFile, error]) ([]string, error) { // inside the package with the given name at the given location. // If pkgQualifier is "*", files from all packages in the directory will be produced. // +// The iterator will yield an error if an I/O error is encountered +// when accessing the fsys. +// // TODO(mvdan): this should now be called InstanceFiles, to follow the naming from // https://cuelang.org/docs/concept/modules-packages-instances/#instances. func PackageFiles(fsys fs.FS, dir string, pkgQualifier string) iter.Seq2[ModuleFile, error] { @@ -206,17 +213,18 @@ func yieldAllModFiles(fsys fs.FS, fpath string, topDir bool, yield func(ModuleFi // at the given path if selectPackage returns true for the file's // package name. // +// yield is only invoked with a non-nil error if that error originates +// within fsys. In particular, errors from the parser are found via +// the [ModuleFile.SyntaxError] field. +// // It returns the yielded package name (if any) and reports whether // the iteration should continue. func yieldPackageFile(fsys fs.FS, fpath string, selectPackage func(pkgName string) bool, yield func(ModuleFile, error) bool) (pkgName string, cont bool) { - if !strings.HasSuffix(fpath, ".cue") { - return "", true - } pf := ModuleFile{ FilePath: fpath, } var syntax *ast.File - var err error + var syntaxErr error if cueFS, ok := fsys.(module.ReadCUEFS); ok { // The FS implementation supports reading CUE syntax directly. // A notable FS implementation that does this is the one @@ -225,15 +233,25 @@ func yieldPackageFile(fsys fs.FS, fpath string, selectPackage func(pkgName strin // TODO maybe we should make the options here match // the default parser options used by cue/load for better // cache behavior. - syntax, err = cueFS.ReadCUEFile(fpath, parser.NewConfig(parser.ImportsOnly)) - if err != nil && !errors.Is(err, errors.ErrUnsupported) { - return "", yield(pf, err) + syntax, syntaxErr = cueFS.ReadCUEFile(fpath, parser.NewConfig(parser.ImportsOnly)) + if syntax == nil { + if syntaxErr == nil { + // This file couldn't be read-and-converted to a CUE + // AST. Make no further attempts on this file. + return "", true + } else if !errors.Is(syntaxErr, errors.ErrUnsupported) { + return "", yield(pf, syntaxErr) + } } } if syntax == nil { // Either the FS doesn't implement [module.ReadCUEFS] // or the ReadCUEFile method returned ErrUnsupported, // so we need to acquire the syntax ourselves. + if !strings.HasSuffix(fpath, ".cue") { + // This fallback only supports reading `.cue` files. + return "", true + } f, err := fsys.Open(fpath) if err != nil { @@ -252,9 +270,9 @@ func yieldPackageFile(fsys fs.FS, fpath string, selectPackage func(pkgName strin } // Add a leading "./" so that a parse error filename is consistent // with the other error filenames created elsewhere in the codebase. - syntax, err = parser.ParseFile("./"+fpath, data, parser.ImportsOnly) - if err != nil { - return "", yield(pf, err) + syntax, syntaxErr = parser.ParseFile("./"+fpath, data, parser.ImportsOnly) + if syntax == nil { + return "", yield(pf, syntaxErr) } } @@ -262,6 +280,7 @@ func yieldPackageFile(fsys fs.FS, fpath string, selectPackage func(pkgName strin return "", true } pf.Syntax = syntax + pf.SyntaxError = syntaxErr return syntax.PackageName(), yield(pf, nil) } diff --git a/vendor/cuelang.org/go/internal/mod/modpkgload/import.go b/vendor/cuelang.org/go/internal/mod/modpkgload/import.go index 1b430ccffa..d1123cad33 100644 --- a/vendor/cuelang.org/go/internal/mod/modpkgload/import.go +++ b/vendor/cuelang.org/go/internal/mod/modpkgload/import.go @@ -301,11 +301,18 @@ func (pkgs *Packages) findLocalPackage(pkgPath string) ([]module.SourceLoc, erro } func isDirWithCUEFiles(loc module.SourceLoc) (bool, error) { + fsys := loc.FS + if cueFS, ok := fsys.(module.ReadCUEFS); ok { + result, err := cueFS.IsDirWithCUEFiles(loc.Dir) + if !errors.Is(err, errors.ErrUnsupported) { + return result, err + } + } // It would be nice if we could inspect the error returned from ReadDir to see // if it's failing because it's not a directory, but unfortunately that doesn't // seem to be something defined by the Go fs interface. // For now, catching fs.ErrNotExist seems to be enough. - entries, err := fs.ReadDir(loc.FS, loc.Dir) + entries, err := fs.ReadDir(fsys, loc.Dir) if err != nil { if errors.Is(err, fs.ErrNotExist) { return false, nil @@ -320,7 +327,7 @@ func isDirWithCUEFiles(loc module.SourceLoc) (bool, error) { // If the directory entry is a symlink, stat it to obtain the info for the // link target instead of the link itself. if ftype&fs.ModeSymlink != 0 { - info, err := fs.Stat(loc.FS, filepath.Join(loc.Dir, e.Name())) + info, err := fs.Stat(fsys, filepath.Join(loc.Dir, e.Name())) if err != nil { continue // Ignore broken symlinks. } diff --git a/vendor/cuelang.org/go/internal/mod/modpkgload/pkgload.go b/vendor/cuelang.org/go/internal/mod/modpkgload/pkgload.go index 7d01157b47..5d0d4abc39 100644 --- a/vendor/cuelang.org/go/internal/mod/modpkgload/pkgload.go +++ b/vendor/cuelang.org/go/internal/mod/modpkgload/pkgload.go @@ -5,6 +5,7 @@ import ( "fmt" "io/fs" "maps" + "path/filepath" "runtime" "slices" "strings" @@ -430,3 +431,37 @@ func IsStdlibPackage(pkgPath string) bool { firstElem, _, _ := strings.Cut(pkgPath, "/") return !strings.Contains(firstElem, ".") } + +// InsideCueMod reports whether absDir is inside a cue.mod directory, +// excluding the legacy directories cue.mod/{pkg,usr,gen}, +// which are still supported for placing package dependencies. +// +// For example, /foo/cue.mod and /foo/cue.mod/bar are inside cue.mod, +// but /foo, /foo/cue.modx, and /foo/cue.mod/pkg/example.com are not. +// +// absDir must be an absolute system path that is clean; see [filepath.Clean]. +func InsideCueMod(absDir string) bool { + lastPart := "" + for { + dir, base, found := cutLast(absDir, string(filepath.Separator)) + if base == "cue.mod" { + switch lastPart { + case "pkg", "usr", "gen": + return false + } + return true + } + if !found { + return false + } + absDir = dir + lastPart = base + } +} + +func cutLast(s, sep string) (before, after string, found bool) { + if i := strings.LastIndex(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return "", s, false +} diff --git a/vendor/cuelang.org/go/internal/mod/modrequirements/requirements.go b/vendor/cuelang.org/go/internal/mod/modrequirements/requirements.go index 66640d1344..bf0a1d00c9 100644 --- a/vendor/cuelang.org/go/internal/mod/modrequirements/requirements.go +++ b/vendor/cuelang.org/go/internal/mod/modrequirements/requirements.go @@ -315,7 +315,6 @@ func (rs *Requirements) readModGraph(ctx context.Context) (*ModuleGraph, error) } for _, m := range rs.rootModules { - m := m if !m.IsValid() { panic("root module version is invalid") } diff --git a/vendor/cuelang.org/go/internal/mod/modresolve/resolve.go b/vendor/cuelang.org/go/internal/mod/modresolve/resolve.go index 60253eb476..501d304cc3 100644 --- a/vendor/cuelang.org/go/internal/mod/modresolve/resolve.go +++ b/vendor/cuelang.org/go/internal/mod/modresolve/resolve.go @@ -288,8 +288,8 @@ func ParseCUERegistry(s string, catchAllDefault string) (LocationResolver, error cfg := config{ ModuleRegistries: make(map[string]*registryConfig), } - parts := strings.Split(s, ",") - for _, part := range parts { + parts := strings.SplitSeq(s, ",") + for part := range parts { key, val, ok := strings.Cut(part, "=") if !ok { if part == "" { diff --git a/vendor/cuelang.org/go/internal/mod/semver/semver.go b/vendor/cuelang.org/go/internal/mod/semver/semver.go index 0d33ac41cf..7450d2c6b2 100644 --- a/vendor/cuelang.org/go/internal/mod/semver/semver.go +++ b/vendor/cuelang.org/go/internal/mod/semver/semver.go @@ -295,20 +295,10 @@ func isNum(v string) bool { } func compareInt(x, y string) int { - if x == y { - return 0 - } - if len(x) < len(y) { - return -1 - } - if len(x) > len(y) { - return +1 - } - if x < y { - return -1 - } else { - return +1 + if c := cmp.Compare(len(x), len(y)); c != 0 { + return c } + return cmp.Compare(x, y) } func comparePrerelease(x, y string) int { @@ -352,18 +342,11 @@ func comparePrerelease(x, y string) int { } } if ix { - if len(dx) < len(dy) { - return -1 + if c := cmp.Compare(len(dx), len(dy)); c != 0 { + return c } - if len(dx) > len(dy) { - return +1 - } - } - if dx < dy { - return -1 - } else { - return +1 } + return cmp.Compare(dx, dy) } } if x == "" { diff --git a/vendor/cuelang.org/go/internal/pkg/builtin.go b/vendor/cuelang.org/go/internal/pkg/builtin.go index c842bb46af..d4d5ef0394 100644 --- a/vendor/cuelang.org/go/internal/pkg/builtin.go +++ b/vendor/cuelang.org/go/internal/pkg/builtin.go @@ -20,7 +20,6 @@ import ( "cuelang.org/go/cue/errors" "cuelang.org/go/cue/parser" - "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" "cuelang.org/go/internal/core/compile" "cuelang.org/go/internal/core/convert" @@ -131,15 +130,13 @@ func ToBuiltin(b *Builtin) *adt.Builtin { Package: b.Pkg, Name: b.Name, } - x.Func = func(call *adt.CallContext) (ret adt.Expr) { + x.Func = func(call adt.CallContext) (ret adt.Expr) { ctx := call.OpContext() - args := call.Args() // call, _ := ctx.Source().(*ast.CallExpr) c := &CallCtxt{ CallContext: call, ctx: ctx, - args: args, builtin: b, } defer func() { @@ -174,7 +171,7 @@ func ToBuiltin(b *Builtin) *adt.Builtin { return nil } } - return convert.GoValueToValue(ctx, c.Ret, true) + return convert.FromGoValue(ctx, c.Ret, true) } return x } @@ -202,7 +199,6 @@ func (x *Builtin) name(ctx *adt.OpContext) string { } func processErr(call *CallCtxt, errVal interface{}, ret adt.Expr) adt.Expr { - ctx := call.ctx switch err := errVal.(type) { case nil: case ValidationError: @@ -238,23 +234,17 @@ func processErr(call *CallCtxt, errVal interface{}, ret adt.Expr) adt.Expr { ret = wrapCallErr(call, &adt.Bottom{Err: err}) case error: - if call.Err == internal.ErrIncomplete { - err := ctx.NewErrf("incomplete value") - err.Code = adt.IncompleteError - ret = err - } else { - // TODO: store the underlying error explicitly - ret = wrapCallErr(call, &adt.Bottom{Err: errors.Promote(err, "")}) - } + // TODO: store the underlying error explicitly + ret = wrapCallErr(call, &adt.Bottom{Err: errors.Promote(err, "")}) case string, fmt.Stringer: // A string or a stringer likely used as a panic value. ret = wrapCallErr(call, &adt.Bottom{ - Err: errors.Newf(call.Pos(), "%s", err), + Err: errors.Newf(call.ctx.Pos(), "%s", err), }) default: // Some other value used when panicking; likely a bug. ret = wrapCallErr(call, &adt.Bottom{ - Err: errors.Newf(call.Pos(), "BUG: non-stringifiable %T", err), + Err: errors.Newf(call.ctx.Pos(), "BUG: non-stringifiable %T", err), }) } return ret diff --git a/vendor/cuelang.org/go/internal/pkg/context.go b/vendor/cuelang.org/go/internal/pkg/context.go index f5e8d21cad..0a3bf5c227 100644 --- a/vendor/cuelang.org/go/internal/pkg/context.go +++ b/vendor/cuelang.org/go/internal/pkg/context.go @@ -17,28 +17,22 @@ package pkg import ( "io" "math/big" + "math/bits" "github.com/cockroachdb/apd/v3" "cuelang.org/go/cue" - "cuelang.org/go/cue/token" "cuelang.org/go/internal/core/adt" "cuelang.org/go/internal/value" ) // CallCtxt is passed to builtin implementations that need to use a cue.Value. This is an internal type. Its interface may change. type CallCtxt struct { - *adt.CallContext + adt.CallContext ctx *adt.OpContext builtin *Builtin - Err interface{} - Ret interface{} - - args []adt.Value -} - -func (c *CallCtxt) Pos() token.Pos { - return c.ctx.Pos() + Err any + Ret any } func (c *CallCtxt) Name() string { @@ -62,7 +56,7 @@ func (c *CallCtxt) Schema(i int) Schema { // Value returns a finalized cue.Value for the ith argument. func (c *CallCtxt) Value(i int) cue.Value { - v := value.Make(c.ctx, c.args[i]) + v := value.Make(c.ctx, c.CallContext.Value(i)) if c.builtin.NonConcrete { // In case NonConcrete is false, the concreteness is already checked // at call time. We may want to use finalize semantics in both cases, @@ -78,7 +72,7 @@ func (c *CallCtxt) Value(i int) cue.Value { } func (c *CallCtxt) Struct(i int) Struct { - x := c.args[i] + x := c.CallContext.Value(i) if c.builtin.NonConcrete { x = adt.Default(x) } @@ -95,7 +89,7 @@ func (c *CallCtxt) Struct(i int) Struct { if b, ok := x.(*adt.Bottom); ok { err = &callError{b} } - c.invalidArgType(c.args[i], i, "struct", err) + c.invalidArgType(x, i, "struct", err) } else { err := c.ctx.NewErrf("non-concrete struct for argument %d", i) err.Code = adt.IncompleteError @@ -111,15 +105,21 @@ func (c *CallCtxt) Int32(i int) int32 { return int32(c.intValue(i, 32, "int32")) func (c *CallCtxt) Rune(i int) rune { return rune(c.intValue(i, 32, "rune")) } func (c *CallCtxt) Int64(i int) int64 { return c.intValue(i, 64, "int64") } -func (c *CallCtxt) intValue(i, bits int, typ string) int64 { - arg := c.args[i] +func (c *CallCtxt) intValue(i, bitLen int, typ string) int64 { + arg := c.CallContext.Value(i) + if num, _ := c.ctx.EvaluateKeepState(arg).(*adt.Num); num != nil { + // In the happy path, avoid converting to the public [cue.Value] API, which is wasteful. + if n, err := num.X.Int64(); err == nil && bits.Len64(uint64(n)) <= bitLen { + return n + } + } x := value.Make(c.ctx, arg) n, err := x.Int(nil) if err != nil { c.invalidArgType(arg, i, typ, err) return 0 } - if n.BitLen() > bits { + if n.BitLen() > bitLen { c.errf(err, "int %s overflows %s in argument %d in call to %s", n, typ, i, c.Name()) } @@ -134,14 +134,23 @@ func (c *CallCtxt) Uint16(i int) uint16 { return uint16(c.uintValue(i, 16, "uint func (c *CallCtxt) Uint32(i int) uint32 { return uint32(c.uintValue(i, 32, "uint32")) } func (c *CallCtxt) Uint64(i int) uint64 { return c.uintValue(i, 64, "uint64") } -func (c *CallCtxt) uintValue(i, bits int, typ string) uint64 { - x := value.Make(c.ctx, c.args[i]) +func (c *CallCtxt) uintValue(i, bitLen int, typ string) uint64 { + arg := c.CallContext.Value(i) + if num, _ := c.ctx.EvaluateKeepState(arg).(*adt.Num); num != nil { + // In the happy path, avoid converting to the public [cue.Value] API, which is wasteful. + // Note that [apd.Decimal] has an Int64 method, but no Uint64 method, + // so any uint64 value that doesn't fit in an int64 skips the shortcut. + if n, err := num.X.Int64(); err == nil && n >= 0 && bits.Len64(uint64(n)) <= bitLen { + return uint64(n) + } + } + x := value.Make(c.ctx, arg) n, err := x.Int(nil) if err != nil || n.Sign() < 0 { - c.invalidArgType(c.args[i], i, typ, err) + c.invalidArgType(arg, i, typ, err) return 0 } - if n.BitLen() > bits { + if n.BitLen() > bitLen { c.errf(err, "int %s overflows %s in argument %d in call to %s", n, typ, i, c.Name()) } @@ -150,30 +159,47 @@ func (c *CallCtxt) uintValue(i, bits int, typ string) uint64 { } func (c *CallCtxt) Decimal(i int) *apd.Decimal { - x := value.Make(c.ctx, c.args[i]) + arg := c.CallContext.Value(i) + if num, _ := c.ctx.EvaluateKeepState(arg).(*adt.Num); num != nil { + // In the happy path, avoid converting to the public [cue.Value] API, which is wasteful. + return &num.X + } + x := value.Make(c.ctx, arg) res, err := x.Decimal() if err != nil { - c.invalidArgType(c.args[i], i, "Decimal", err) + c.invalidArgType(arg, i, "Decimal", err) return nil } return res } func (c *CallCtxt) Float64(i int) float64 { - x := value.Make(c.ctx, c.args[i]) + arg := c.CallContext.Value(i) + if num, _ := c.ctx.EvaluateKeepState(arg).(*adt.Num); num != nil { + // In the happy path, avoid converting to the public [cue.Value] API, which is wasteful. + if f, err := num.X.Float64(); err == nil { + return f + } + } + x := value.Make(c.ctx, arg) res, err := x.Float64() if err != nil { - c.invalidArgType(c.args[i], i, "float64", err) + c.invalidArgType(arg, i, "float64", err) return 0 } return res } func (c *CallCtxt) BigInt(i int) *big.Int { - x := value.Make(c.ctx, c.args[i]) + arg := c.CallContext.Value(i) + if num, _ := c.ctx.EvaluateKeepState(arg).(*adt.Num); num != nil { + // In the happy path, avoid converting to the public [cue.Value] API, which is wasteful. + return num.BigInt(nil) + } + x := value.Make(c.ctx, arg) n, err := x.Int(nil) if err != nil { - c.invalidArgType(c.args[i], i, "int", err) + c.invalidArgType(arg, i, "int", err) return nil } return n @@ -182,11 +208,12 @@ func (c *CallCtxt) BigInt(i int) *big.Int { var ten = big.NewInt(10) func (c *CallCtxt) BigFloat(i int) *big.Float { - x := value.Make(c.ctx, c.args[i]) + arg := c.CallContext.Value(i) + x := value.Make(c.ctx, arg) var mant big.Int exp, err := x.MantExp(&mant) if err != nil { - c.invalidArgType(c.args[i], i, "float", err) + c.invalidArgType(arg, i, "float", err) return nil } f := &big.Float{} @@ -200,53 +227,68 @@ func (c *CallCtxt) BigFloat(i int) *big.Float { } func (c *CallCtxt) String(i int) string { - // TODO: use Evaluate instead. - x := value.Make(c.ctx, c.args[i]) + arg := c.CallContext.Value(i) + if str, _ := c.ctx.EvaluateKeepState(arg).(*adt.String); str != nil { + // In the happy path, avoid converting to the public [cue.Value] API, which is wasteful. + return str.Str + } + x := value.Make(c.ctx, arg) v, err := x.String() if err != nil { - c.invalidArgType(c.args[i], i, "string", err) + c.invalidArgType(arg, i, "string", err) return "" } return v } func (c *CallCtxt) Bytes(i int) []byte { - x := value.Make(c.ctx, c.args[i]) + arg := c.CallContext.Value(i) + if bs, _ := c.ctx.EvaluateKeepState(arg).(*adt.Bytes); bs != nil { + // In the happy path, avoid converting to the public [cue.Value] API, which is wasteful. + return bs.B + } + x := value.Make(c.ctx, arg) v, err := x.Bytes() if err != nil { - c.invalidArgType(c.args[i], i, "bytes", err) + c.invalidArgType(arg, i, "bytes", err) return nil } return v } func (c *CallCtxt) Reader(i int) io.Reader { - x := value.Make(c.ctx, c.args[i]) + arg := c.CallContext.Value(i) + x := value.Make(c.ctx, arg) // TODO: optimize for string and bytes cases r, err := x.Reader() if err != nil { - c.invalidArgType(c.args[i], i, "bytes|string", err) + c.invalidArgType(arg, i, "bytes|string", err) return nil } return r } func (c *CallCtxt) Bool(i int) bool { - x := value.Make(c.ctx, c.args[i]) + arg := c.CallContext.Value(i) + if b, _ := c.ctx.EvaluateKeepState(arg).(*adt.Bool); b != nil { + // In the happy path, avoid converting to the public [cue.Value] API, which is wasteful. + return b.B + } + x := value.Make(c.ctx, arg) b, err := x.Bool() if err != nil { - c.invalidArgType(c.args[i], i, "bool", err) + c.invalidArgType(arg, i, "bool", err) return false } return b } func (c *CallCtxt) List(i int) (a []cue.Value) { - arg := c.args[i] + arg := c.CallContext.Value(i) x := value.Make(c.ctx, arg) v, err := x.List() if err != nil { - c.invalidArgType(c.args[i], i, "list", err) + c.invalidArgType(arg, i, "list", err) return a } for v.Next() { @@ -264,17 +306,17 @@ func (c *CallCtxt) CueList(i int) List { } func (c *CallCtxt) Iter(i int) (a cue.Iterator) { - arg := c.args[i] + arg := c.CallContext.Value(i) x := value.Make(c.ctx, arg) v, err := x.List() if err != nil { - c.invalidArgType(c.args[i], i, "list", err) + c.invalidArgType(arg, i, "list", err) } return v } func (c *CallCtxt) getList(i int) *adt.Vertex { - x := c.args[i] + x := c.CallContext.Value(i) if c.builtin.NonConcrete { x = adt.Default(x) } @@ -296,7 +338,7 @@ func (c *CallCtxt) getList(i int) *adt.Vertex { if b, ok := x.(*adt.Bottom); ok { err = &callError{b} } - c.invalidArgType(c.args[i], i, "list", err) + c.invalidArgType(x, i, "list", err) } else { err := c.ctx.NewErrf("non-concrete list for argument %d", i) err.Code = adt.IncompleteError @@ -311,7 +353,8 @@ func (c *CallCtxt) DecimalList(i int) (a []*apd.Decimal) { return nil } - for j, w := range v.Elems() { + j := 0 + for w := range v.Elems() { w.Finalize(c.ctx) // defensive switch x := adt.Unwrap(adt.Default(w.Value())).(type) { case *adt.Num: @@ -339,6 +382,7 @@ func (c *CallCtxt) DecimalList(i int) (a []*apd.Decimal) { c.Err = &callError{err} return nil } + j++ } return a } @@ -349,7 +393,8 @@ func (c *CallCtxt) StringList(i int) (a []string) { return nil } - for j, w := range v.Elems() { + j := 0 + for w := range v.Elems() { w.Finalize(c.ctx) // defensive switch x := adt.Unwrap(adt.Default(w.Value())).(type) { case *adt.String: @@ -377,6 +422,7 @@ func (c *CallCtxt) StringList(i int) (a []string) { c.Err = &callError{err} return nil } + j++ } return a } diff --git a/vendor/cuelang.org/go/internal/pkg/errors.go b/vendor/cuelang.org/go/internal/pkg/errors.go index d374fb9f5c..4c06c2ae53 100644 --- a/vendor/cuelang.org/go/internal/pkg/errors.go +++ b/vendor/cuelang.org/go/internal/pkg/errors.go @@ -48,7 +48,7 @@ func (c *CallCtxt) errf(underlying error, format string, args ...interface{}) { case error: errs = errors.Promote(x, "") } - vErr := c.ctx.NewPosf(c.Pos(), format, args...) + vErr := c.ctx.NewPosf(c.ctx.Pos(), format, args...) c.Err = &callError{&adt.Bottom{Code: code, Err: errors.Wrap(vErr, errs)}} } diff --git a/vendor/cuelang.org/go/internal/pkg/register.go b/vendor/cuelang.org/go/internal/pkg/register.go index 847ecb75ce..0d10537d82 100644 --- a/vendor/cuelang.org/go/internal/pkg/register.go +++ b/vendor/cuelang.org/go/internal/pkg/register.go @@ -15,6 +15,9 @@ package pkg import ( + "path" + + "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" "cuelang.org/go/internal/core/adt" "cuelang.org/go/internal/core/eval" @@ -22,10 +25,14 @@ import ( ) func Register(importPath string, p *Package) { + inst := &build.Instance{ + ImportPath: importPath, + PkgName: path.Base(importPath), + } f := func(r adt.Runtime) (*adt.Vertex, errors.Error) { ctx := eval.NewContext(r, nil) return p.MustCompile(ctx, importPath), nil } - runtime.RegisterBuiltin(importPath, f) + runtime.RegisterBuiltin(inst, f) } diff --git a/vendor/cuelang.org/go/internal/pkg/types.go b/vendor/cuelang.org/go/internal/pkg/types.go index 801971bcce..63fc40923b 100644 --- a/vendor/cuelang.org/go/internal/pkg/types.go +++ b/vendor/cuelang.org/go/internal/pkg/types.go @@ -15,6 +15,8 @@ package pkg import ( + "iter" + "cuelang.org/go/cue" "cuelang.org/go/internal/core/adt" ) @@ -31,7 +33,7 @@ type List struct { } // Elems returns the elements of a list. -func (l *List) Elems() []*adt.Vertex { +func (l *List) Elems() iter.Seq[*adt.Vertex] { return l.node.Elems() } @@ -64,17 +66,15 @@ func (s *Struct) Len() int { // IsOpen reports whether s is open or has pattern constraints. func (s *Struct) IsOpen() bool { - if !s.node.IsClosedStruct() { + if s.node.IsOpenStruct() { return true } - // Technically this is not correct, but it is in the context of where - // it is used. + // Check for pattern constraints which indicate openness. if s.node.PatternConstraints != nil && len(s.node.PatternConstraints.Pairs) > 0 { return true } - // The equivalent code for the old implementation. - ot := s.node.OptionalTypes() - return ot&^adt.HasDynamic != 0 + // After removing OptionalTypes, we rely on other checks for openness. + return false } // NumConstraintFields reports the number of explicit optional and required diff --git a/vendor/cuelang.org/go/internal/source/source.go b/vendor/cuelang.org/go/internal/source/source.go index 8ba8c96084..6e43d48128 100644 --- a/vendor/cuelang.org/go/internal/source/source.go +++ b/vendor/cuelang.org/go/internal/source/source.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "io" + "math" "os" "strings" ) @@ -37,36 +38,72 @@ func ReadAll(filename string, src any) ([]byte, error) { return src, nil case *bytes.Buffer: // is io.Reader, but src is already available in []byte form - if src != nil { - return src.Bytes(), nil - } + return src.Bytes(), nil case io.Reader: - var buf bytes.Buffer - if _, err := io.Copy(&buf, src); err != nil { - return nil, err - } - return buf.Bytes(), nil + return io.ReadAll(src) } return nil, fmt.Errorf("invalid source type %T", src) } return os.ReadFile(filename) } -// Open creates a source reader for the given arguments. If src != nil, -// Open converts src to an io.Open if possible; otherwise it returns an -// error. If src == nil, Open returns the result of opening the file -// specified by filename. -func Open(filename string, src any) (io.ReadCloser, error) { +// ReadAllSize is like [io.ReadAll] while taking advantage of a size hint for the input reader, +// much like [os.ReadFile] does when reading regular files with a known size. +// When the size hint is negative, it simply uses [io.ReadAll]. +func ReadAllSize(r io.Reader, size int) ([]byte, error) { + if size >= 0 { + // We use a [bytes.Buffer] here, because the given size is a hint, + // and not guaranteed to be exactly correct. + // + // Before each read, [bytes.Buffer] ensures that the internal buffer + // has enough available capacity to read at least [bytes.MinRead] bytes. + // Many readers tend to signal EOF via a final (0, EOF) read, + // which then triggers growing the slice to accomodate [bytes.MinRead]. + buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + _, err := buf.ReadFrom(r) + return buf.Bytes(), err + } + return io.ReadAll(r) +} + +// Open creates a source reader for the given arguments. +// If src != nil, Open converts src to an [io.Reader] if possible; otherwise it returns an error. +// If src == nil, Open returns the result of opening the file specified by filename. +// +// The caller must check if the result is an [io.Closer], and if so, close it when done. +// The size of the opened reader is returned if possible, or -1 otherwise. +func Open(filename string, src any) (_ io.Reader, size int, _ error) { if src != nil { switch src := src.(type) { case string: - return io.NopCloser(strings.NewReader(src)), nil + return strings.NewReader(src), len(src), nil case []byte: - return io.NopCloser(bytes.NewReader(src)), nil + return bytes.NewReader(src), len(src), nil + case *os.File: + return fileWithSize(src) case io.Reader: - return io.NopCloser(src), nil + return src, -1, nil } - return nil, fmt.Errorf("invalid source type %T", src) + return nil, -1, fmt.Errorf("invalid source type %T", src) + } + f, err := os.Open(filename) + if err != nil { + return nil, -1, err + } + return fileWithSize(f) +} + +func fileWithSize(f *os.File) (io.Reader, int, error) { + // If we just opened a regular file, return its size too. + // If we can't get its size, such as non-regular files, don't give one. + stat, err := f.Stat() + if err != nil || !stat.Mode().IsRegular() { + return f, -1, nil + } + size := stat.Size() + // If the size would overflow an int, it won't fit in memory anyway. + if size > math.MaxInt { + return f, -1, nil } - return os.Open(filename) + return f, int(size), nil } diff --git a/vendor/cuelang.org/go/internal/task/task.go b/vendor/cuelang.org/go/internal/task/task.go index 78885d328c..a6890861bb 100644 --- a/vendor/cuelang.org/go/internal/task/task.go +++ b/vendor/cuelang.org/go/internal/task/task.go @@ -19,18 +19,22 @@ import ( "context" "io" "sync" + "sync/atomic" "cuelang.org/go/cue" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" - "cuelang.org/go/internal/core/adt" "cuelang.org/go/internal/value" + "cuelang.org/go/tools/flow" ) // A Context provides context for running a task. type Context struct { Context context.Context + TaskKey func(v cue.Value) (string, error) + + Root cue.Value Stdin io.Reader Stdout io.Writer Stderr io.Writer @@ -90,6 +94,130 @@ func (c *Context) addErr(v cue.Value, wrap error, format string, args ...interfa c.Err = errors.Append(c.Err, errors.Wrap(err, wrap)) } +// ErrLegacy is a sentinel error value that may be returned by a TaskKey +// function to indicate that the task is a legacy task. This will cause the +// configuration value to be passed to the RunnerFunc instead of an empty +// value. +var ErrLegacy error = errors.New("legacy task error") + +// NewTaskFunc creates a flow.TaskFunc that uses global settings from Context +// and a taskKey function to determine the kind of task to run. +func (c Context) TaskFunc(didWork *atomic.Bool) flow.TaskFunc { + return func(v cue.Value) (flow.Runner, error) { + kind, err := c.TaskKey(v) + var isLegacy bool + if err == ErrLegacy { + err = nil + isLegacy = true + } + if err != nil || kind == "" { + return nil, err + } + + didWork.Store(true) + + rf := Lookup(kind) + if rf == nil { + return nil, errors.Newf(v.Pos(), "runner of kind %q not found", kind) + } + + // Verify entry against template. + v = value.UnifyBuiltin(v, kind) + if err := v.Err(); err != nil { + err = v.Validate() + return nil, errors.Promote(err, "newTask") + } + + runner, err := rf(v) + if err != nil { + return nil, errors.Promote(err, "errors running task") + } + + if !isLegacy { + v = cue.Value{} + } + + return c.flowFunc(runner, v), nil + } +} + +// flowFunc takes a Runner and a schema v, which should only be defined for +// legacy task ids. +func (c Context) flowFunc(runner Runner, v cue.Value) flow.Runner { + wrapper := &flowRunner{c: c, runner: runner, v: v} + // If the runner declares it is a service, return a + // wrapper that implements both Runner and Service. + if ce, ok := runner.(flow.Service); ok && ce.IsService() { + return &flowRunnerWithService{flowRunner: wrapper} + } + return wrapper +} + +// flowRunner wraps a task.Runner to implement flow.Runner. +type flowRunner struct { + c Context + runner Runner + v cue.Value +} + +func (r *flowRunner) Run(t *flow.Task, err error) error { + // Set task-specific values. + r.c.Context = t.Context() + r.c.Obj = t.Value() + if r.v.Exists() { + r.c.Obj = r.c.Obj.Unify(r.v) + } + value, runErr := r.runner.Run(&r.c) + if runErr != nil { + return runErr + } + if value != nil { + _ = t.Fill(value) + } + return nil +} + +// flowRunnerWithService wraps a flowRunner and also implements +// flow.Service. +type flowRunnerWithService struct { + *flowRunner +} + +func (r *flowRunnerWithService) IsService() bool { + return true +} + +// ForkRunLoop is used to serve an external event. It makes a copy of the +// configuration that results from the first phase and than patches the +// task at path to run the given runner, instead of the initialization phase. +func (c Context) ForkRunLoop(ctx context.Context, path cue.Path, v cue.Value, r Runner) *flow.Controller { + cfg := &flow.Config{ + Root: path, + InferTasks: true, + IgnoreConcrete: true, + RunInferredTasks: true, // Run inferred tasks since inputs are filled + } + + // Fill the root with the request data. The v value contains the filled + // serve task, including request fields. We need to ensure dependent tasks + // can access this data through references. + root := c.Root.FillPath(path, v) + + taskFunc := func(v cue.Value) (flow.Runner, error) { + // The node itself has a function to continue. + if v.Path().String() == path.String() { + return c.flowFunc(r, cue.Value{}), nil + } + var didWork atomic.Bool + // if !didWork.Load() { + // return nil, fmt.Errorf("%v: no tasks found", cmdPath) + // } + return c.TaskFunc(&didWork)(v) + } + + return flow.New(cfg, root, taskFunc) +} + // taskError wraps some error values to retain position information about the // error. type taskError struct { @@ -114,12 +242,11 @@ func (t *taskError) Position() token.Pos { func (t *taskError) InputPositions() (a []token.Pos) { _, nx := value.ToInternal(t.v) - nx.VisitLeafConjuncts(func(x adt.Conjunct) bool { + for x := range nx.LeafConjuncts() { if src := x.Source(); src != nil { a = append(a, src.Pos()) } - return true - }) + } return a } @@ -138,6 +265,23 @@ type Runner interface { Run(ctx *Context) (results interface{}, err error) } +// Background indicates whether the task is running in the background after +// finishing. +var Background atomic.Bool + +// BackgroundTask must be used by a task to indicate that it is running in the +// background. +// TODO: this is a hack. We should have a better way to indicate this. Also, +// introduce mechanism to cancel and background tasks, detect when they are +// done, and collect errors. +// Maybe do something like put a sync.WaitGroup (or errgroup.Group) inside +// task.Context and have a way to add to it (perhaps just expose a Go method) +// and wait for it to complete (in practice you'd probably select on that +// finishing and os.Interrupt)? +func (c *Context) BackgroundTask() { + Background.Store(true) +} + // Register registers a task for cue commands. func Register(key string, f RunnerFunc) { runners.Store(key, f) diff --git a/vendor/cuelang.org/go/internal/tools.mod b/vendor/cuelang.org/go/internal/tools.mod index 085c3be665..536793b1e9 100644 --- a/vendor/cuelang.org/go/internal/tools.mod +++ b/vendor/cuelang.org/go/internal/tools.mod @@ -3,15 +3,15 @@ // TODO(mvdan): once we stabilize on this model, have CI ensure this module is tidy too. module test/tools -go 1.23.0 +go 1.25.0 tool honnef.co/go/tools/cmd/staticcheck require ( github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 // indirect - golang.org/x/mod v0.23.0 // indirect - golang.org/x/sync v0.11.0 // indirect - golang.org/x/tools v0.30.0 // indirect - honnef.co/go/tools v0.6.1 // indirect + golang.org/x/mod v0.31.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/tools v0.40.1-0.20260108161641-ca281cf95054 // indirect + honnef.co/go/tools v0.7.0 // indirect ) diff --git a/vendor/cuelang.org/go/internal/tools.sum b/vendor/cuelang.org/go/internal/tools.sum index 7843cd99aa..c7f4fa9acf 100644 --- a/vendor/cuelang.org/go/internal/tools.sum +++ b/vendor/cuelang.org/go/internal/tools.sum @@ -1,13 +1,12 @@ github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 h1:1P7xPZEwZMoBoz0Yze5Nx2/4pxj6nw9ZqHWXqP0iRgQ= golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= -honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= -honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/tools v0.40.1-0.20260108161641-ca281cf95054 h1:CHVDrNHx9ZoOrNN9kKWYIbT5Rj+WF2rlwPkhbQQ5V4U= +golang.org/x/tools v0.40.1-0.20260108161641-ca281cf95054/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +honnef.co/go/tools v0.7.0 h1:w6WUp1VbkqPEgLz4rkBzH/CSU6HkoqNLp6GstyTx3lU= +honnef.co/go/tools v0.7.0/go.mod h1:pm29oPxeP3P82ISxZDgIYeOaf9ta6Pi0EWvCFoLG2vc= diff --git a/vendor/cuelang.org/go/internal/types/value.go b/vendor/cuelang.org/go/internal/types/value.go index 7b0301f773..c03fae3835 100644 --- a/vendor/cuelang.org/go/internal/types/value.go +++ b/vendor/cuelang.org/go/internal/types/value.go @@ -25,13 +25,5 @@ type Value struct { } type Interface interface { - Core(v *Value) -} - -func CastValue(t *Value, x interface{}) bool { - c, ok := x.(Interface) - if ok { - c.Core(t) - } - return ok + Core() Value } diff --git a/vendor/cuelang.org/go/internal/value/value.go b/vendor/cuelang.org/go/internal/value/value.go index d7db51503a..d00480765f 100644 --- a/vendor/cuelang.org/go/internal/value/value.go +++ b/vendor/cuelang.org/go/internal/value/value.go @@ -24,7 +24,6 @@ import ( "cuelang.org/go/internal/core/convert" "cuelang.org/go/internal/core/eval" "cuelang.org/go/internal/core/runtime" - "cuelang.org/go/internal/types" ) // Context returns the cue.Context of the given argument. @@ -64,14 +63,12 @@ func OpContext[Ctx *cue.Runtime | *cue.Context | cue.Value](c Ctx) *adt.OpContex } func ToInternal(v cue.Value) (*runtime.Runtime, *adt.Vertex) { - var t types.Value - v.Core(&t) + t := v.Core() return t.R, t.V } func Vertex(v cue.Value) *adt.Vertex { - var t types.Value - v.Core(&t) + t := v.Core() return t.V } @@ -83,13 +80,14 @@ func Make(ctx *adt.OpContext, v adt.Value) cue.Value { // UnifyBuiltin returns the given Value unified with the given builtin template. func UnifyBuiltin(v cue.Value, kind string) cue.Value { pkg, name, _ := strings.Cut(kind, ".") - s := runtime.SharedRuntime().LoadImport(pkg) + ctx := v.Context() + rt := (*runtime.Runtime)(ctx) + s := rt.LoadBuiltin(pkg) if s == nil { return v } - ctx := v.Context() - a := s.Lookup((*runtime.Runtime)(ctx).Label(name, false)) + a := s.Lookup(rt.Label(name, false)) if a == nil { return v } @@ -101,7 +99,7 @@ func FromGoValue(r *cue.Context, x interface{}, nilIsTop bool) cue.Value { rt := (*runtime.Runtime)(r) rt.Init() ctx := eval.NewContext(rt, nil) - v := convert.GoValueToValue(ctx, x, nilIsTop) + v := convert.FromGoValue(ctx, x, nilIsTop) n := adt.ToVertex(v) return r.Encode(n) } @@ -110,7 +108,7 @@ func FromGoType(r *cue.Context, x interface{}) cue.Value { rt := (*runtime.Runtime)(r) rt.Init() ctx := eval.NewContext(rt, nil) - expr, err := convert.GoTypeToExpr(ctx, x) + expr, err := convert.FromGoType(ctx, x) if err != nil { expr = &adt.Bottom{Err: err} } diff --git a/vendor/cuelang.org/go/mod/modfile/modfile.go b/vendor/cuelang.org/go/mod/modfile/modfile.go index ef16d149a1..f4e814440c 100644 --- a/vendor/cuelang.org/go/mod/modfile/modfile.go +++ b/vendor/cuelang.org/go/mod/modfile/modfile.go @@ -211,7 +211,7 @@ func ParseLegacy(modfile []byte, filename string) (*File, error) { }, nil } -// ParseNonStrict is like Parse but allows some laxity in the parsing: +// ParseNonStrict is like [Parse] but allows some laxity in the parsing: // - if a module path lacks a version, it's taken from the version. // - if a non-canonical version is used, it will be canonicalized. // diff --git a/vendor/cuelang.org/go/mod/modregistry/client.go b/vendor/cuelang.org/go/mod/modregistry/client.go index a0249f5b8a..3eef69fa57 100644 --- a/vendor/cuelang.org/go/mod/modregistry/client.go +++ b/vendor/cuelang.org/go/mod/modregistry/client.go @@ -136,12 +136,15 @@ func (src *Client) Mirror(ctx context.Context, dst *Client, mv module.Version) e // We've uploaded all the blobs referenced by the manifest; now // we can upload the manifest itself. if _, err := dstLoc.Registry.ResolveManifest(ctx, dstLoc.Repository, m.manifestDigest); err == nil { - return nil + // Manifest already exists, but we still need to check for referrers + return src.mirrorReferrers(ctx, dst, m.loc, dstLoc, m.manifestDigest) } if _, err := dstLoc.Registry.PushManifest(ctx, dstLoc.Repository, dstLoc.Tag, m.manifestContents, ocispec.MediaTypeImageManifest); err != nil { return nil } - return nil + + // Mirror any referrers that point to this manifest + return src.mirrorReferrers(ctx, dst, m.loc, dstLoc, m.manifestDigest) } func mirrorBlob(ctx context.Context, srcLoc, dstLoc RegistryLocation, desc ocispec.Descriptor) error { @@ -164,6 +167,72 @@ func mirrorBlob(ctx context.Context, srcLoc, dstLoc RegistryLocation, desc ocisp return nil } +// mirrorReferrers mirrors all referrers that point to the given manifest digest. +func (src *Client) mirrorReferrers(ctx context.Context, dst *Client, srcLoc, dstLoc RegistryLocation, manifestDigest ociregistry.Digest) error { + var g errgroup.Group + + // Iterate through all referrers that point to this manifest + for referrerDesc, err := range srcLoc.Registry.Referrers(ctx, srcLoc.Repository, manifestDigest, "") { + if err != nil { + return fmt.Errorf("failed to get referrers: %w", err) + } + + g.Go(func() error { + return src.mirrorReferrer(ctx, dst, srcLoc, dstLoc, referrerDesc) + }) + } + + return g.Wait() +} + +// mirrorReferrer mirrors a single referrer manifest and its associated blobs. +func (src *Client) mirrorReferrer(ctx context.Context, dst *Client, srcLoc, dstLoc RegistryLocation, referrerDesc ocispec.Descriptor) error { + // Check if the referrer manifest already exists in the destination + if _, err := dstLoc.Registry.ResolveManifest(ctx, dstLoc.Repository, referrerDesc.Digest); err == nil { + // Manifest already exists, nothing to do + return nil + } + + // Get the referrer manifest from source + r, err := srcLoc.Registry.GetManifest(ctx, srcLoc.Repository, referrerDesc.Digest) + if err != nil { + return fmt.Errorf("failed to get referrer manifest %s: %w", referrerDesc.Digest, err) + } + defer r.Close() + + manifestData, err := io.ReadAll(r) + if err != nil { + return fmt.Errorf("failed to read referrer manifest %s: %w", referrerDesc.Digest, err) + } + + // Parse the manifest to get the blobs it references + manifest, err := unmarshalManifest(manifestData, r.Descriptor().MediaType) + if err != nil { + return fmt.Errorf("failed to parse referrer manifest %s: %w", referrerDesc.Digest, err) + } + + // Mirror all blobs referenced by this referrer manifest (excluding Subject) + var g errgroup.Group + g.Go(func() error { + return mirrorBlob(ctx, srcLoc, dstLoc, manifest.Config) + }) + for _, desc := range manifest.Layers { + g.Go(func() error { + return mirrorBlob(ctx, srcLoc, dstLoc, desc) + }) + } + if err := g.Wait(); err != nil { + return fmt.Errorf("failed to mirror blobs for referrer %s: %w", referrerDesc.Digest, err) + } + + // Push the referrer manifest itself (without a tag, just by content) + if _, err := dstLoc.Registry.PushManifest(ctx, dstLoc.Repository, "", manifestData, r.Descriptor().MediaType); err != nil { + return fmt.Errorf("failed to push referrer manifest %s: %w", referrerDesc.Digest, err) + } + + return nil +} + // GetModule returns the module instance for the given version. // It returns an error that satisfies [errors.Is]([ErrNotFound]) if the // module is not present in the store at this version. diff --git a/vendor/cuelang.org/go/mod/module/dirfs.go b/vendor/cuelang.org/go/mod/module/dirfs.go index c239635de7..fe66e830ac 100644 --- a/vendor/cuelang.org/go/mod/module/dirfs.go +++ b/vendor/cuelang.org/go/mod/module/dirfs.go @@ -16,7 +16,7 @@ type SourceLoc struct { Dir string } -// ReadCUE can be implemented by an [fs.FS] +// ReadCUEFS can be implemented by an [fs.FS] // to provide an optimized (cached) way of // reading and parsing CUE syntax. type ReadCUEFS interface { @@ -28,7 +28,20 @@ type ReadCUEFS interface { // If this method is implemented, but the implementation // does not support reading CUE files, // it should return [errors.ErrUnsupported]. + // + // This method may be called with paths which do not have a `.cue` + // suffix. If the implementation is unable to read-and-convert (as + // necessary) a path to a CUE AST, it should return (nil, nil). ReadCUEFile(path string, cfg parser.Config) (*ast.File, error) + + // IsDirWithCUEFiles reports whether the given path is a directory + // which contains files for which this implementation would attempt + // to read and parse, if its ReadCUEFile method were called. + // + // If this method is implemented, but the implementation does not + // support examining directories, it should return + // [errors.ErrUnsupported]. + IsDirWithCUEFiles(path string) (bool, error) } // OSRootFS can be implemented by an [fs.FS] diff --git a/vendor/cuelang.org/go/mod/module/module.go b/vendor/cuelang.org/go/mod/module/module.go index 9914f569b0..4ebf9ff395 100644 --- a/vendor/cuelang.org/go/mod/module/module.go +++ b/vendor/cuelang.org/go/mod/module/module.go @@ -85,6 +85,7 @@ import ( "slices" "strings" + "cuelang.org/go/cue/ast" "cuelang.org/go/internal/mod/semver" ) @@ -133,7 +134,7 @@ func (m Version) BasePath() string { if m.IsLocal() { return m.path } - basePath, _, ok := SplitPathVersion(m.path) + basePath, _, ok := ast.SplitPackageVersion(m.path) if !ok { panic(fmt.Errorf("broken invariant: failed to split version in %q", m.path)) } @@ -183,7 +184,7 @@ func MustParseVersion(s string) Version { // The version must be canonical (i.e. it can't be // just a major version). func ParseVersion(s string) (Version, error) { - basePath, vers, ok := SplitPathVersion(s) + basePath, vers, ok := ast.SplitPackageVersion(s) if !ok { return Version{}, fmt.Errorf("invalid module path@version %q", s) } @@ -222,19 +223,19 @@ func NewVersion(path string, version string) (Version, error) { return Version{}, fmt.Errorf("version %q (of module %q) is not canonical", version, path) } maj := semver.Major(version) - _, vmaj, ok := SplitPathVersion(path) + _, vmaj, ok := ast.SplitPackageVersion(path) if ok && maj != vmaj { return Version{}, fmt.Errorf("mismatched major version suffix in %q (version %v)", path, version) } if !ok { fullPath := path + "@" + maj - if _, _, ok := SplitPathVersion(fullPath); !ok { + if _, _, ok := ast.SplitPackageVersion(fullPath); !ok { return Version{}, fmt.Errorf("cannot form version path from %q, version %v", path, version) } path = fullPath } default: - base, _, ok := SplitPathVersion(path) + base, _, ok := ast.SplitPackageVersion(path) if !ok { return Version{}, fmt.Errorf("path %q has no major version", path) } diff --git a/vendor/cuelang.org/go/mod/module/path.go b/vendor/cuelang.org/go/mod/module/path.go index 073a84c9bd..38a12f4330 100644 --- a/vendor/cuelang.org/go/mod/module/path.go +++ b/vendor/cuelang.org/go/mod/module/path.go @@ -40,7 +40,7 @@ func Check(path, version string) error { Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")}, } } - _, pathMajor, _ := SplitPathVersion(path) + _, pathMajor, _ := ast.SplitPackageVersion(path) if err := CheckPathMajor(version, pathMajor); err != nil { return &ModuleError{Path: path, Err: err} } @@ -105,10 +105,10 @@ func fileNameOK(r rune) bool { return unicode.IsLetter(r) } -// CheckPathWithoutVersion is like CheckPath except that +// CheckPathWithoutVersion is like [CheckPath] except that // it expects a module path without a major version. func CheckPathWithoutVersion(basePath string) (err error) { - if _, _, ok := SplitPathVersion(basePath); ok { + if _, _, ok := ast.SplitPackageVersion(basePath); ok { return fmt.Errorf("module path inappropriately contains version") } if err := checkPath(basePath, modulePath); err != nil { @@ -162,7 +162,7 @@ func CheckPath(mpath string) (err error) { } }() - basePath, vers, ok := SplitPathVersion(mpath) + basePath, vers, ok := ast.SplitPackageVersion(mpath) if ok { if semver.Major(vers) != vers { return fmt.Errorf("path can contain major version only") @@ -194,7 +194,7 @@ func CheckPath(mpath string) (err error) { // The element prefix up to the first dot must not be a reserved file name // on Windows, regardless of case (CON, com1, NuL, and so on). func CheckImportPath(path string) error { - parts := ParseImportPath(path) + parts := ast.ParseImportPath(path) if semver.Major(parts.Version) != parts.Version { return &InvalidPathError{ Kind: "import", @@ -389,6 +389,8 @@ var badWindowsNames = []string{ // SplitPathVersion("foo.com/bar@") returns ("foo.com/bar@", "", false). // // Deprecated: use [ast.SplitPackageVersion] instead. +// +//go:fix inline func SplitPathVersion(path string) (prefix, version string, ok bool) { return ast.SplitPackageVersion(path) } @@ -396,13 +398,17 @@ func SplitPathVersion(path string) (prefix, version string, ok bool) { // ImportPath holds the various components of an import path. // // Deprecated: use [ast.ImportPath] instead. +// +//go:fix inline type ImportPath = ast.ImportPath // ParseImportPath returns the various components of an import path. // It does not check the result for validity. // // Deprecated: use [ast.ParseImportPath] instead. -func ParseImportPath(p string) ImportPath { +// +//go:fix inline +func ParseImportPath(p string) ast.ImportPath { return ast.ParseImportPath(p) } diff --git a/vendor/cuelang.org/go/mod/module/versions.go b/vendor/cuelang.org/go/mod/module/versions.go index ef4551a596..5e13bef3b3 100644 --- a/vendor/cuelang.org/go/mod/module/versions.go +++ b/vendor/cuelang.org/go/mod/module/versions.go @@ -7,17 +7,14 @@ import ( // Versions implements mvs.Versions[Version]. type Versions struct{} -// New implements mvs.Versions[Version].Version. func (Versions) Version(v Version) string { return v.Version() } -// New implements mvs.Versions[Version].Path. func (Versions) Path(v Version) string { return v.Path() } -// New implements mvs.Versions[Version].New. func (Versions) New(p, v string) (Version, error) { return NewVersion(p, v) } diff --git a/vendor/cuelang.org/go/mod/modzip/zip.go b/vendor/cuelang.org/go/mod/modzip/zip.go index 0ee2f18be2..48d72bbced 100644 --- a/vendor/cuelang.org/go/mod/modzip/zip.go +++ b/vendor/cuelang.org/go/mod/modzip/zip.go @@ -74,7 +74,7 @@ const ( MaxLICENSE = 16 << 20 ) -// File provides an abstraction for a file in a directory, zip, or anything +// FileIO provides an abstraction for a file in a directory, zip, or anything // else that looks like a file - it knows how to open files represented // as a particular type without being a file itself. // @@ -785,8 +785,8 @@ func listFilesInDir(dir string) (files []dirFile, omitted []FileError, err error return filepath.SkipDir } - // Skip submodules (directories containing go.mod files). - if goModInfo, err := os.Lstat(filepath.Join(filePath, "go.mod")); err == nil && !goModInfo.IsDir() { + // Skip submodules (directories containing cue.mod). + if _, err := os.Lstat(filepath.Join(filePath, "cue.mod")); err == nil { omitted = append(omitted, FileError{Path: slashPath, Err: errSubmoduleDir}) return filepath.SkipDir } diff --git a/vendor/cuelang.org/go/pkg/encoding/openapi/openapi.cue b/vendor/cuelang.org/go/pkg/encoding/openapi/openapi.cue new file mode 100644 index 0000000000..4ca0f57042 --- /dev/null +++ b/vendor/cuelang.org/go/pkg/encoding/openapi/openapi.cue @@ -0,0 +1,56 @@ +// Copyright 2023 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapi + +// #Config represents options for generating OpenAPI. +#Config: { + // version is fixed to 3.0.0 for now. + version!: "3.0.0" + + info?: #Info + + // selfContained causes all non-expanded external references to be included + // in this document. + selfContained: bool | *false + + // expandReferences replaces references with actual objects when generating + // OpenAPI Schema. It is an error for an CUE value to refer to itself + // if this option is used. + expandReferences: bool | *false +} + +// #Info represents metadata about the API. +#Info: { + title!: string + version!: string + summary?: string + description?: string + termsOfService?: string + contact?: #Contact + license?: #License +} + +// #Contact represents contact information for the exposed API. +#Contact: { + name?: string + url?: string + email?: string +} + +// #License represents license information for the exposed API. +#License: { + name!: string + url?: string +} diff --git a/vendor/cuelang.org/go/pkg/encoding/openapi/openapi.go b/vendor/cuelang.org/go/pkg/encoding/openapi/openapi.go new file mode 100644 index 0000000000..692ad9e7b4 --- /dev/null +++ b/vendor/cuelang.org/go/pkg/encoding/openapi/openapi.go @@ -0,0 +1,94 @@ +// Copyright 2023 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package openapi provides OpenAPI encoding and decoding functionality. +// +// This is an EXPERIMENTAL API. +package openapi + +import ( + "cuelang.org/go/cue" + "cuelang.org/go/encoding/openapi" + "cuelang.org/go/internal/core/adt" + "cuelang.org/go/internal/pkg" + "cuelang.org/go/internal/value" +) + +var ( + selfContainedPath = cue.ParsePath("selfContained") + expandReferencesPath = cue.ParsePath("expandReferences") + infoPath = cue.ParsePath("info") +) + +// Marshal returns the OpenAPI encoding of schema for the given OpenAPI version. +// The optional config value can be used to make further adjustments. +// +// Experimental: this API may change. +// +// schema can have the following fields: +// +// #Config: { +// // version holds the OpenAPI version to use when marshaling. +// // Currently only "3.0.0" is supported. +// version!: "3.0.0" // currently "3.0.0" only + +// // selfContained causes all non-expanded external references +// // to be included// +// selfContained?: bool +// +// // expandReferences replaces references with actual objects when generating +// // OpenAPI Schema. It is an error for an CUE value to refer to itself +// // if this option is used. +// expandReferences?: bool +// +// // info specifies the info section of the OpenAPI document. To be a valid +// // OpenAPI document, it must include at least the title and version fields. +// info?: { +// title: string +// description: string +// version: string +// } +// } +func MarshalSchema(config cue.Value, schema pkg.Schema) (string, error) { + // TODO: implement a proper struct for schema. + + ctx := value.OpContext(schema) + return marshalSchema(ctx, config, schema) +} + +func marshalSchema(_ *adt.OpContext, config cue.Value, schema pkg.Schema) (string, error) { + selfContained, _ := config.LookupPath(selfContainedPath).Bool() + expandReferences, _ := config.LookupPath(expandReferencesPath).Bool() + + version, err := config.LookupPath(cue.ParsePath("version")).String() + if err != nil { + return "", err + } + + c := &openapi.Config{ + Version: version, + SelfContained: selfContained, + ExpandReferences: expandReferences, + } + + if info := config.LookupPath(infoPath); info.Exists() { + c.Info = info + } + + b, err := openapi.Gen(schema, c) + if err != nil { + return "", err + } + return string(b), err +} diff --git a/vendor/cuelang.org/go/pkg/encoding/openapi/pkg.go b/vendor/cuelang.org/go/pkg/encoding/openapi/pkg.go new file mode 100644 index 0000000000..32a74eac4e --- /dev/null +++ b/vendor/cuelang.org/go/pkg/encoding/openapi/pkg.go @@ -0,0 +1,58 @@ +// Code generated by cuelang.org/go/pkg/gen. DO NOT EDIT. + +package openapi + +import ( + "cuelang.org/go/internal/core/adt" + "cuelang.org/go/internal/pkg" +) + +func init() { + pkg.Register("encoding/openapi", p) +} + +var _ = adt.TopKind // in case the adt package isn't used + +var p = &pkg.Package{ + Native: []*pkg.Builtin{{ + Name: "MarshalSchema", + Params: []pkg.Param{ + {Kind: adt.TopKind}, + {Kind: adt.TopKind}, + }, + Result: adt.StringKind, + NonConcrete: true, + Func: func(c *pkg.CallCtxt) { + config, schema := c.Value(0), c.Schema(1) + if c.Do() { + c.Ret, c.Err = marshalSchema(c.OpContext(), config, schema) + } + }, + }}, + CUE: `{ + #Config: { + version!: "3.0.0" + info?: #Info + selfContained: bool | *false + expandReferences: bool | *false + } + #Info: { + title!: string + version!: string + summary?: string + description?: string + termsOfService?: string + contact?: #Contact + license?: #License + } + #Contact: { + name?: string + url?: string + email?: string + } + #License: { + name!: string + url?: string + } +}`, +} diff --git a/vendor/cuelang.org/go/pkg/encoding/yaml/manual.go b/vendor/cuelang.org/go/pkg/encoding/yaml/manual.go index 5bf3db3963..bee74e9fae 100644 --- a/vendor/cuelang.org/go/pkg/encoding/yaml/manual.go +++ b/vendor/cuelang.org/go/pkg/encoding/yaml/manual.go @@ -31,7 +31,7 @@ func Marshal(v cue.Value) (string, error) { if err := v.Validate(cue.Concrete(true)); err != nil { return "", err } - n := v.Syntax(cue.Final(), cue.Concrete(true)) + n := v.Syntax(cue.Concrete(true)) b, err := cueyaml.Encode(n) return string(b), err } @@ -52,7 +52,7 @@ func MarshalStream(v cue.Value) (string, error) { if err := v.Validate(cue.Concrete(true)); err != nil { return "", err } - n := v.Syntax(cue.Final(), cue.Concrete(true)) + n := v.Syntax(cue.Concrete(true)) b, err := cueyaml.Encode(n) if err != nil { return "", err diff --git a/vendor/cuelang.org/go/pkg/list/list.go b/vendor/cuelang.org/go/pkg/list/list.go index 46ca0789b2..f2fadf83ac 100644 --- a/vendor/cuelang.org/go/pkg/list/list.go +++ b/vendor/cuelang.org/go/pkg/list/list.go @@ -24,8 +24,8 @@ import ( "cuelang.org/go/cue/token" "cuelang.org/go/internal/core/adt" "cuelang.org/go/internal/core/eval" + "cuelang.org/go/internal/iterutil" "cuelang.org/go/internal/pkg" - "cuelang.org/go/internal/types" "cuelang.org/go/internal/value" ) @@ -230,7 +230,7 @@ func Reverse(x []cue.Value) []cue.Value { // MinItems reports whether a has at least n items. func MinItems(list pkg.List, n int) (bool, error) { - count := len(list.Elems()) + count := iterutil.Count(list.Elems()) if count >= n { return true, nil } @@ -246,7 +246,7 @@ func MinItems(list pkg.List, n int) (bool, error) { // MaxItems reports whether a has at most n items. func MaxItems(list pkg.List, n int) (bool, error) { - count := len(list.Elems()) + count := iterutil.Count(list.Elems()) if count > n { return false, pkg.ValidationError{B: &adt.Bottom{ Code: adt.EvalError, @@ -270,8 +270,7 @@ func UniqueItems(a []cue.Value) (bool, error) { // - Sort the elements based on the hash value. // - Compare subsequent elements to see if they are equal. - var tv types.Value - a[0].Core(&tv) + tv := a[0].Core() ctx := eval.NewContext(tv.R, tv.V) posX, posY := 0, 0 diff --git a/vendor/cuelang.org/go/pkg/list/sort.go b/vendor/cuelang.org/go/pkg/list/sort.go index 1892fe92ea..917572a7bc 100644 --- a/vendor/cuelang.org/go/pkg/list/sort.go +++ b/vendor/cuelang.org/go/pkg/list/sort.go @@ -23,10 +23,8 @@ import ( "sort" "cuelang.org/go/cue" - "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" "cuelang.org/go/internal/core/eval" - "cuelang.org/go/internal/types" ) // valueSorter defines a sort.Interface; implemented in cue/builtinutil.go. @@ -56,48 +54,7 @@ func (s *valueSorter) Less(i, j int) bool { return false } - if s.ctx.Version == internal.DevVersion { - return s.lessNew(i, j) - } - - var x, y types.Value - s.a[i].Core(&x) - s.a[j].Core(&y) - - // Save the state of all relevant arcs and restore later for the - // next comparison. - saveCmp := *s.cmp - saveLess := *s.less - saveX := *s.x - saveY := *s.y - - s.x.InsertConjunctsFrom(x.V) - s.y.InsertConjunctsFrom(y.V) - - // TODO(perf): if we can determine that the comparator values for - // x and y are idempotent (no arcs and a basevalue being top or - // a struct or list marker), then we do not need to reevaluate the input. - // In that case, we can use the below code instead of the above two loops - // setting the conjuncts. This may improve performance significantly. - // - // s.x.BaseValue = x.V.BaseValue - // s.x.Arcs = x.V.Arcs - // s.y.BaseValue = y.V.BaseValue - // s.y.Arcs = y.V.Arcs - - s.less.Finalize(s.ctx) - isLess := s.ctx.BoolValue(s.less) - if b := s.less.Err(s.ctx); b != nil && s.err == nil { - s.err = b.Err - return true - } - - *s.less = saveLess - *s.cmp = saveCmp - *s.x = saveX - *s.y = saveY - - return isLess + return s.lessNew(i, j) } func (s *valueSorter) lessNew(i, j int) bool { @@ -115,9 +72,8 @@ func (s *valueSorter) lessNew(i, j int) bool { xa := getArc(ctx, n, "x") ya := getArc(ctx, n, "y") - var x, y types.Value - s.a[i].Core(&x) - s.a[j].Core(&y) + x := s.a[i].Core() + y := s.a[j].Core() xa.InsertConjunctsFrom(x.V) ya.InsertConjunctsFrom(y.V) @@ -151,8 +107,7 @@ func makeValueSorter(list []cue.Value, cmp cue.Value) (s valueSorter) { return valueSorter{err: v.Err()} } - var v types.Value - cmp.Core(&v) + v := cmp.Core() ctx := eval.NewContext(v.R, v.V) n := &adt.Vertex{ diff --git a/vendor/cuelang.org/go/pkg/math/bits/manual.go b/vendor/cuelang.org/go/pkg/math/bits/manual.go index 87d153f472..f029f94f07 100644 --- a/vendor/cuelang.org/go/pkg/math/bits/manual.go +++ b/vendor/cuelang.org/go/pkg/math/bits/manual.go @@ -25,18 +25,14 @@ import ( "math/bits" ) -// Lsh returns x shifted left by n bits. +// Lsh sets and returns x shifted left by n bits. func Lsh(x *big.Int, n uint) *big.Int { - var z big.Int - z.Lsh(x, n) - return &z + return x.Lsh(x, n) } -// Rsh returns x shifted right by n bits. +// Rsh sets and returns x shifted right by n bits. func Rsh(x *big.Int, n uint) *big.Int { - var z big.Int - z.Rsh(x, n) - return &z + return x.Rsh(x, n) } // At returns the value of the i'th bit of x. @@ -47,41 +43,31 @@ func At(x *big.Int, i uint) (uint, error) { return x.Bit(int(i)), nil } -// SetBit returns x with x's i'th bit set to b (0 or 1). That is, if b is 1 -// SetBit returns x with its i'th bit set; if b is 0 SetBit returns x with -// its i'th bit cleared. +// Set sets and returns x with x's i'th bit set to b (0 or 1). +// That is, if b is 1 Set returns x with its i'th bit set; +// if b is 0 Set returns x with its i'th bit cleared. func Set(x *big.Int, i int, bit uint) *big.Int { - var z big.Int - z.SetBit(x, i, bit) - return &z + return x.SetBit(x, i, bit) } -// And returns the bitwise and of a and b. +// And sets and returns a to the bitwise "and" of a and b. func And(a, b *big.Int) *big.Int { - var z big.Int - z.And(a, b) - return &z + return a.And(a, b) } -// Or returns the bitwise or of a and b (a | b in Go). +// Or sets and returns a to the bitwise "or" of a and b (a | b in Go). func Or(a, b *big.Int) *big.Int { - var z big.Int - z.Or(a, b) - return &z + return a.Or(a, b) } -// Xor returns the bitwise xor of a and b (a ^ b in Go). +// Xor sets and returns a to the bitwise xor of a and b (a ^ b in Go). func Xor(a, b *big.Int) *big.Int { - var z big.Int - z.Xor(a, b) - return &z + return a.Xor(a, b) } -// Clear returns the bitwise and not of a and b (a &^ b in Go). +// Clear sets and returns a to the bitwise "and not" of a and b (a &^ b in Go). func Clear(a, b *big.Int) *big.Int { - var z big.Int - z.AndNot(a, b) - return &z + return a.AndNot(a, b) } // OnesCount returns the number of one bits ("population count") in x. diff --git a/vendor/cuelang.org/go/pkg/net/ip.go b/vendor/cuelang.org/go/pkg/net/ip.go index 89c587be16..22e306c469 100644 --- a/vendor/cuelang.org/go/pkg/net/ip.go +++ b/vendor/cuelang.org/go/pkg/net/ip.go @@ -16,7 +16,9 @@ package net import ( + "errors" "fmt" + "math/big" "net/netip" "cuelang.org/go/cue" @@ -178,7 +180,7 @@ func InterfaceLocalMulticastIP(ip cue.Value) bool { return netGetIP(ip).IsInterfaceLocalMulticast() } -// LinkLocalMulticast reports whether ip is a link-local multicast address. +// LinkLocalMulticastIP reports whether ip is a link-local multicast address. func LinkLocalMulticastIP(ip cue.Value) bool { return netGetIP(ip).IsLinkLocalMulticast() } @@ -242,3 +244,145 @@ func IPString(ip cue.Value) (string, error) { } return ipdata.String(), nil } + +func netIPAdd(addr netip.Addr, offset *big.Int) (netip.Addr, error) { + i := big.NewInt(0).SetBytes(addr.AsSlice()) + i = i.Add(i, offset) + + if i.Sign() < 0 { + return netip.Addr{}, errors.New("IP address arithmetic resulted in out-of-range address (underflow)") + } + + b := i.Bytes() + size := addr.BitLen() / 8 + + if len(b) > size { + return netip.Addr{}, errors.New("IP address arithmetic resulted in out-of-range address (overflow)") + } + + if len(b) < size { + b = append(make([]byte, size-len(b), size), b...) + } + addr, _ = netip.AddrFromSlice(b) + return addr, nil +} + +// AddIP adds a numerical offset to a given IP address. +// The address can be provided as a string, byte array, or CIDR subnet notation. +// It returns the resulting IP address or CIDR subnet notation as a string. +func AddIP(ip cue.Value, offset *big.Int) (string, error) { + prefix, err := netGetIPCIDR(ip) + if err == nil { + addr, err := netIPAdd(prefix.Addr(), offset) + if err != nil { + return "", err + } + return netip.PrefixFrom(addr, prefix.Bits()).String(), nil + } + ipdata := netGetIP(ip) + if !ipdata.IsValid() { + return "", fmt.Errorf("invalid IP %q", ip) + } + addr, err := netIPAdd(ipdata, offset) + if err != nil { + return "", err + } + return addr.String(), nil +} + +// AddIPCIDR adds a numerical offset to a given CIDR subnet +// string, returning a CIDR string. +func AddIPCIDR(ip cue.Value, offset *big.Int) (string, error) { + prefix, err := netGetIPCIDR(ip) + if err != nil { + return "", err + } + shifted := big.NewInt(0).Lsh(offset, (uint)(prefix.Addr().BitLen()-prefix.Bits())) + addr, err := netIPAdd(prefix.Addr(), shifted) + if err != nil { + return "", err + } + return netip.PrefixFrom(addr, prefix.Bits()).String(), nil +} + +// ParsedCIDR holds the parsed components of a CIDR notation string. +type ParsedCIDR struct { + PrefixMask string `json:"prefix_mask"` + PrefixLen int `json:"prefix_len"` + PrefixAddr string `json:"prefix_addr"` + // BroadcastAddr is only set for IPv4 CIDRs. + BroadcastAddr string `json:"broadcast_addr,omitempty"` +} + +// ParseCIDR parses a CIDR notation string and returns its components: +// prefix_mask (e.g. "255.255.255.0"), prefix_len (e.g. 24), +// prefix_addr (e.g. "10.20.30.0"), and broadcast_addr (e.g. "10.20.30.255"). +// broadcast_addr is only set for IPv4 CIDRs. +func ParseCIDR(s string) (*ParsedCIDR, error) { + prefix, err := netip.ParsePrefix(s) + if err != nil { + return nil, err + } + + bits := prefix.Bits() + addr := prefix.Addr() + maskBytes := make([]byte, addr.BitLen()/8) + for i := range bits / 8 { + maskBytes[i] = 0xFF + } + if rem := bits % 8; rem > 0 { + maskBytes[bits/8] = ^byte(0xFF >> rem) + } + netmask, _ := netip.AddrFromSlice(maskBytes) + + networkAddr := prefix.Masked().Addr() + + result := &ParsedCIDR{ + PrefixMask: netmask.String(), + PrefixLen: bits, + PrefixAddr: networkAddr.String(), + } + + if addr.Is4() { + broadcastBytes := networkAddr.AsSlice() + for i := range broadcastBytes { + broadcastBytes[i] |= ^maskBytes[i] + } + broadcastAddr, _ := netip.AddrFromSlice(broadcastBytes) + result.BroadcastAddr = broadcastAddr.String() + } + + return result, nil +} + +// InCIDR reports whether an IP address is contained a CIDR subnet string. +func InCIDR(ip, cidr cue.Value) (bool, error) { + ipAddr := netGetIP(ip) + if !ipAddr.IsValid() { + return false, fmt.Errorf("invalid IP %q", ip) + } + + prefix, err := netGetIPCIDR(cidr) + if err != nil { + return false, err + } + + return prefix.Contains(ipAddr), nil +} + +// CompareIP compares two IP addresses and returns an integer: +// -1 if ip1 sorts before ip2, 0 if they are equal, and +1 if ip1 sorts after ip2. +// IPv4 addresses sort before IPv6 addresses. +// +// The addresses may be strings or lists of bytes. +func CompareIP(ip1, ip2 cue.Value) (int, error) { + addr1 := netGetIP(ip1) + if !addr1.IsValid() { + return 0, fmt.Errorf("invalid IP %q", ip1) + } + addr2 := netGetIP(ip2) + if !addr2.IsValid() { + return 0, fmt.Errorf("invalid IP %q", ip2) + } + return addr1.Compare(addr2), nil +} diff --git a/vendor/cuelang.org/go/pkg/net/pkg.go b/vendor/cuelang.org/go/pkg/net/pkg.go index 568236636b..71138afc1b 100644 --- a/vendor/cuelang.org/go/pkg/net/pkg.go +++ b/vendor/cuelang.org/go/pkg/net/pkg.go @@ -237,6 +237,70 @@ var p = &pkg.Package{ c.Ret, c.Err = IPString(ip) } }, + }, { + Name: "AddIP", + Params: []pkg.Param{ + {Kind: adt.TopKind}, + {Kind: adt.IntKind}, + }, + Result: adt.StringKind, + Func: func(c *pkg.CallCtxt) { + ip, offset := c.Value(0), c.BigInt(1) + if c.Do() { + c.Ret, c.Err = AddIP(ip, offset) + } + }, + }, { + Name: "AddIPCIDR", + Params: []pkg.Param{ + {Kind: adt.TopKind}, + {Kind: adt.IntKind}, + }, + Result: adt.StringKind, + Func: func(c *pkg.CallCtxt) { + ip, offset := c.Value(0), c.BigInt(1) + if c.Do() { + c.Ret, c.Err = AddIPCIDR(ip, offset) + } + }, + }, { + Name: "ParseCIDR", + Params: []pkg.Param{ + {Kind: adt.StringKind}, + }, + Result: adt.StructKind, + Func: func(c *pkg.CallCtxt) { + s := c.String(0) + if c.Do() { + c.Ret, c.Err = ParseCIDR(s) + } + }, + }, { + Name: "InCIDR", + Params: []pkg.Param{ + {Kind: adt.TopKind}, + {Kind: adt.TopKind}, + }, + Result: adt.BoolKind, + Func: func(c *pkg.CallCtxt) { + ip, cidr := c.Value(0), c.Value(1) + if c.Do() { + c.Ret, c.Err = InCIDR(ip, cidr) + } + }, + }, { + Name: "CompareIP", + Params: []pkg.Param{ + {Kind: adt.TopKind}, + {Kind: adt.TopKind}, + }, + Result: adt.IntKind, + Func: func(c *pkg.CallCtxt) { + ip1, ip2 := c.Value(0), c.Value(1) + if c.Do() { + c.Ret, c.Err = CompareIP(ip1, ip2) + } + }, }, { Name: "PathEscape", Params: []pkg.Param{ diff --git a/vendor/cuelang.org/go/pkg/net/url.go b/vendor/cuelang.org/go/pkg/net/url.go index 5981a54d02..4e1600b92c 100644 --- a/vendor/cuelang.org/go/pkg/net/url.go +++ b/vendor/cuelang.org/go/pkg/net/url.go @@ -57,7 +57,7 @@ func URL(s string) (bool, error) { return err == nil, err } -// URL validates that s is an absolute URL. +// AbsURL validates that s is an absolute URL. // Note: this does also allow non-ASCII characters. func AbsURL(s string) (bool, error) { u, err := url.Parse(s) diff --git a/vendor/cuelang.org/go/pkg/path/os.go b/vendor/cuelang.org/go/pkg/path/os.go index f55193977d..6eb640d104 100644 --- a/vendor/cuelang.org/go/pkg/path/os.go +++ b/vendor/cuelang.org/go/pkg/path/os.go @@ -41,7 +41,6 @@ type osInfo interface { splitList(path string) []string volumeNameLen(path string) int IsAbs(path string) (b bool) - HasPrefix(p, prefix string) bool join(elem []string) string sameWord(a, b string) bool } diff --git a/vendor/cuelang.org/go/pkg/path/path.go b/vendor/cuelang.org/go/pkg/path/path.go index 247dd7b3ff..a7cc3dd949 100644 --- a/vendor/cuelang.org/go/pkg/path/path.go +++ b/vendor/cuelang.org/go/pkg/path/path.go @@ -319,7 +319,7 @@ func Rel(basepath, targpath string, os OS) (string, error) { } buf := make([]byte, size) n := copy(buf, "..") - for i := 0; i < seps; i++ { + for range seps { buf[n] = x.Separator copy(buf[n+1:], "..") n += 3 diff --git a/vendor/cuelang.org/go/pkg/path/path_nix.go b/vendor/cuelang.org/go/pkg/path/path_nix.go index eb1b193afd..726f85eae7 100644 --- a/vendor/cuelang.org/go/pkg/path/path_nix.go +++ b/vendor/cuelang.org/go/pkg/path/path_nix.go @@ -44,14 +44,6 @@ func (o unixInfo) volumeNameLen(path string) int { return 0 } -// HasPrefix exists for historical compatibility and should not be used. -// -// Deprecated: HasPrefix does not respect path boundaries and -// does not ignore case when required. -func (o unixInfo) HasPrefix(p, prefix string) bool { - return strings.HasPrefix(p, prefix) -} - func (o unixInfo) splitList(path string) []string { if path == "" { return []string{} diff --git a/vendor/cuelang.org/go/pkg/path/path_p9.go b/vendor/cuelang.org/go/pkg/path/path_p9.go index f3379ea5f1..961539aa0d 100644 --- a/vendor/cuelang.org/go/pkg/path/path_p9.go +++ b/vendor/cuelang.org/go/pkg/path/path_p9.go @@ -42,14 +42,6 @@ func (o plan9Info) volumeNameLen(path string) int { return 0 } -// HasPrefix exists for historical compatibility and should not be used. -// -// Deprecated: HasPrefix does not respect path boundaries and -// does not ignore case when required. -func (o plan9Info) HasPrefix(p, prefix string) bool { - return strings.HasPrefix(p, prefix) -} - func (o plan9Info) splitList(path string) []string { if path == "" { return []string{} diff --git a/vendor/cuelang.org/go/pkg/path/path_win.go b/vendor/cuelang.org/go/pkg/path/path_win.go index 054ae28aa1..d854498d95 100644 --- a/vendor/cuelang.org/go/pkg/path/path_win.go +++ b/vendor/cuelang.org/go/pkg/path/path_win.go @@ -116,17 +116,6 @@ func (os windowsInfo) volumeNameLen(path string) int { return 0 } -// HasPrefix exists for historical compatibility and should not be used. -// -// Deprecated: HasPrefix does not respect path boundaries and -// does not ignore case when required. -func (os windowsInfo) HasPrefix(p, prefix string) bool { - if strings.HasPrefix(p, prefix) { - return true - } - return strings.HasPrefix(strings.ToLower(p), strings.ToLower(prefix)) -} - func (os windowsInfo) splitList(path string) []string { // The same implementation is used in LookPath in os/exec; // consider changing os/exec when changing this. diff --git a/vendor/cuelang.org/go/pkg/regexp/manual.go b/vendor/cuelang.org/go/pkg/regexp/manual.go index eb3c1d240d..a4e5825f33 100644 --- a/vendor/cuelang.org/go/pkg/regexp/manual.go +++ b/vendor/cuelang.org/go/pkg/regexp/manual.go @@ -92,8 +92,8 @@ func FindAll(pattern, s string, n int) ([]string, error) { return m, nil } -// FindAllNamedSubmatch is like FindAllSubmatch, but returns a list of maps -// with the named used in capturing groups. See FindNamedSubmatch for an +// FindAllNamedSubmatch is like [FindAllSubmatch], but returns a list of maps +// with the named used in capturing groups. See [FindNamedSubmatch] for an // example on how to use named groups. func FindAllNamedSubmatch(pattern, s string, n int) ([]map[string]string, error) { re, err := regexp.Compile(pattern) @@ -123,7 +123,7 @@ func FindAllNamedSubmatch(pattern, s string, n int) ([]map[string]string, error) var errNoNamedGroup = errors.New("no named groups") -// FindAllSubmatch is the 'All' version of FindSubmatch; it returns a list +// FindAllSubmatch is the 'All' version of [FindSubmatch]; it returns a list // of all successive matches of the expression, as defined by the 'All' // description in the package comment. // A return value of bottom indicates no match. @@ -139,7 +139,7 @@ func FindAllSubmatch(pattern, s string, n int) ([][]string, error) { return m, nil } -// FindNamedSubmatch is like FindSubmatch, but returns a map with the names used +// FindNamedSubmatch is like [FindSubmatch], but returns a map with the names used // in capturing groups. // // Example: diff --git a/vendor/cuelang.org/go/pkg/regexp/regexp.go b/vendor/cuelang.org/go/pkg/regexp/regexp.go index 5ee737b90d..8e87dd3197 100644 --- a/vendor/cuelang.org/go/pkg/regexp/regexp.go +++ b/vendor/cuelang.org/go/pkg/regexp/regexp.go @@ -24,7 +24,6 @@ import "regexp" // Match reports whether the string s // contains any match of the regular expression pattern. -// More complicated queries need to use Compile and the full Regexp interface. func Match(pattern string, s string) (matched bool, err error) { return regexp.MatchString(pattern, s) } diff --git a/vendor/cuelang.org/go/pkg/register.go b/vendor/cuelang.org/go/pkg/register.go index 484507b58a..592fcf9ab2 100644 --- a/vendor/cuelang.org/go/pkg/register.go +++ b/vendor/cuelang.org/go/pkg/register.go @@ -13,6 +13,7 @@ import ( _ "cuelang.org/go/pkg/encoding/csv" _ "cuelang.org/go/pkg/encoding/hex" _ "cuelang.org/go/pkg/encoding/json" + _ "cuelang.org/go/pkg/encoding/openapi" _ "cuelang.org/go/pkg/encoding/toml" _ "cuelang.org/go/pkg/encoding/yaml" _ "cuelang.org/go/pkg/html" diff --git a/vendor/cuelang.org/go/pkg/strconv/pkg.go b/vendor/cuelang.org/go/pkg/strconv/pkg.go index b698ead629..7292559d0d 100644 --- a/vendor/cuelang.org/go/pkg/strconv/pkg.go +++ b/vendor/cuelang.org/go/pkg/strconv/pkg.go @@ -63,6 +63,18 @@ var p = &pkg.Package{ c.Ret, c.Err = ParseFloat(s, bitSize) } }, + }, { + Name: "ParseNumber", + Params: []pkg.Param{ + {Kind: adt.StringKind}, + }, + Result: adt.NumberKind, + Func: func(c *pkg.CallCtxt) { + s := c.String(0) + if c.Do() { + c.Ret, c.Err = ParseNumber(s) + } + }, }, { Name: "IntSize", Const: "64", diff --git a/vendor/cuelang.org/go/pkg/strconv/strconv.go b/vendor/cuelang.org/go/pkg/strconv/strconv.go index 3ebb68e492..7563c6e837 100644 --- a/vendor/cuelang.org/go/pkg/strconv/strconv.go +++ b/vendor/cuelang.org/go/pkg/strconv/strconv.go @@ -21,6 +21,8 @@ package strconv import ( + "cuelang.org/go/cue/literal" + "cuelang.org/go/internal" "math/big" "strconv" ) @@ -65,41 +67,154 @@ func ParseFloat(s string, bitSize int) (float64, error) { return strconv.ParseFloat(s, bitSize) } +// ParseNumber interprets s using the full CUE number literal syntax and returns +// the resulting value as an arbitrary-precision decimal. It accepts decimal +// and non-decimal bases, underscores as separators, fractional syntax, and +// the decimal or binary multiplier suffixes defined by CUE (for example "1Ki" +// and "10M"). +// +// If s is not syntactically well-formed, ParseNumber returns a *strconv.NumError +// with Err containing detailed syntax information. Semantic errors, such as a +// multiplier that cannot be represented, are reported in the same way. +func ParseNumber(s string) (*internal.Decimal, error) { + var info literal.NumInfo + if err := literal.ParseNum(s, &info); err != nil { + return nil, &strconv.NumError{ + Func: "ParseNumber", + Num: s, + Err: err, + } + } + + var dec internal.Decimal + if err := info.Decimal(&dec); err != nil { + return nil, &strconv.NumError{ + Func: "ParseNumber", + Num: s, + Err: err, + } + } + return &dec, nil +} + // IntSize is the size in bits of an int or uint value. const IntSize = 64 -// ParseUint is like ParseInt but for unsigned numbers. -func ParseUint(s string, base int, bitSize int) (uint64, error) { - return strconv.ParseUint(s, base, bitSize) +// ParseUint is like [ParseInt] but for unsigned numbers. +func ParseUint(s string, base int, bitSize int) (*big.Int, error) { + if bitSize < 0 { + return nil, &strconv.NumError{ + Func: "ParseUint", + Num: s, + Err: strconv.ErrRange, + } + } + + // Parse the number using big.Int to handle arbitrary precision + i := new(big.Int) + i, ok := i.SetString(s, base) + if !ok { + return nil, &strconv.NumError{ + Func: "ParseUint", + Num: s, + Err: strconv.ErrSyntax, + } + } + + // Check if the value is negative (not allowed for unsigned) + if i.Sign() < 0 { + return nil, &strconv.NumError{ + Func: "ParseUint", + Num: s, + Err: strconv.ErrRange, + } + } + + // If bitSize is 0, return unlimited precision result + if bitSize == 0 { + return i, nil + } + + // Check if the value fits in the specified bit size + // For unsigned integers, the range is [0, 2^bitSize-1] + if i.BitLen() <= bitSize { + return i, nil + } + + return nil, &strconv.NumError{ + Func: "ParseUint", + Num: s, + Err: strconv.ErrRange, + } } // ParseInt interprets a string s in the given base (0, 2 to 36) and -// bit size (0 to 64) and returns the corresponding value i. +// bit size and returns the corresponding value i. // // If the base argument is 0, the true base is implied by the string's // prefix: 2 for "0b", 8 for "0" or "0o", 16 for "0x", and 10 otherwise. // Also, for argument base 0 only, underscore characters are permitted // as defined by the Go syntax for integer literals. // -// The bitSize argument specifies the integer type -// that the result must fit into. Bit sizes 0, 8, 16, 32, and 64 -// correspond to int, int8, int16, int32, and int64. -// If bitSize is below 0 or above 64, an error is returned. -// -// The errors that ParseInt returns have concrete type *NumError -// and include err.Num = s. If s is empty or contains invalid -// digits, err.Err = ErrSyntax and the returned value is 0; -// if the value corresponding to s cannot be represented by a -// signed integer of the given size, err.Err = ErrRange and the -// returned value is the maximum magnitude integer of the -// appropriate bitSize and sign. -func ParseInt(s string, base int, bitSize int) (i int64, err error) { - return strconv.ParseInt(s, base, bitSize) +// The bitSize argument specifies the integer type that the result must fit into. +// If bitSize is 0, the result is unconstrained (unlimited precision). +// If bitSize is positive, the result must fit in a signed integer of that many bits. +// If bitSize is negative, an error is returned. +func ParseInt(s string, base int, bitSize int) (*big.Int, error) { + if bitSize < 0 { + return nil, &strconv.NumError{ + Func: "ParseInt", + Num: s, + Err: strconv.ErrRange, + } + } + + // Parse the number using big.Int to handle arbitrary precision + i := new(big.Int) + i, ok := i.SetString(s, base) + if !ok { + return nil, &strconv.NumError{ + Func: "ParseInt", + Num: s, + Err: strconv.ErrSyntax, + } + } + + // If bitSize is 0, return unlimited precision result + if bitSize == 0 { + return i, nil + } + // Check if the value fits in the specified bit size + // For signed integers, the range is [-2^(bitSize-1), 2^(bitSize-1)-1] + bitLen := i.BitLen() + if bitLen <= bitSize-1 { + return i, nil + } + if i.Sign() < 0 && bitLen == bitSize { + // It might be all 1s; add one and see if it fits. + x := big.NewInt(1) + x.Add(i, x) + if x.BitLen() <= bitSize-1 { + return i, nil + } + } + return nil, &strconv.NumError{ + Func: "ParseInt", + Num: s, + Err: strconv.ErrRange, + } } // Atoi is equivalent to ParseInt(s, 10, 0), converted to type int. -func Atoi(s string) (int, error) { - return strconv.Atoi(s) +func Atoi(s string) (*big.Int, error) { + n, err := ParseInt(s, 10, 0) + if err == nil { + return n, nil + } + if nerr, ok := err.(*strconv.NumError); ok { + nerr.Func = "Atoi" + } + return nil, err } // FormatFloat converts the floating-point number f to a string, diff --git a/vendor/cuelang.org/go/pkg/strings/manual.go b/vendor/cuelang.org/go/pkg/strings/manual.go index 56b6f3d9c7..1e475b476c 100644 --- a/vendor/cuelang.org/go/pkg/strings/manual.go +++ b/vendor/cuelang.org/go/pkg/strings/manual.go @@ -29,6 +29,7 @@ import ( "fmt" "strings" "unicode" + "unicode/utf8" ) // ByteAt reports the ith byte of the underlying strings or byte. @@ -60,7 +61,7 @@ func MinRunes(s string, min int) bool { // TODO: CUE strings cannot be invalid UTF-8. In case this changes, we need // to use the following conversion to count properly: // s, _ = unicodeenc.UTF8.NewDecoder().String(s) - return len([]rune(s)) >= min + return utf8.RuneCountInString(s) >= min } // MaxRunes reports whether the number of runes (Unicode codepoints) in a string @@ -68,7 +69,7 @@ func MinRunes(s string, min int) bool { // except all strings for which this property holds func MaxRunes(s string, max int) bool { // See comment in MinRunes implementation. - return len([]rune(s)) <= max + return utf8.RuneCountInString(s) <= max } // ToTitle returns a copy of the string s with all Unicode letters that begin diff --git a/vendor/cuelang.org/go/pkg/strings/strings.go b/vendor/cuelang.org/go/pkg/strings/strings.go index fc10f68d64..3882e6e613 100644 --- a/vendor/cuelang.org/go/pkg/strings/strings.go +++ b/vendor/cuelang.org/go/pkg/strings/strings.go @@ -174,7 +174,7 @@ func Trim(s, cutset string) string { // TrimLeft returns a slice of the string s with all leading // Unicode code points contained in cutset removed. // -// To remove a prefix, use TrimPrefix instead. +// To remove a prefix, use [TrimPrefix] instead. func TrimLeft(s, cutset string) string { return strings.TrimLeft(s, cutset) } @@ -182,7 +182,7 @@ func TrimLeft(s, cutset string) string { // TrimRight returns a slice of the string s, with all trailing // Unicode code points contained in cutset removed. // -// To remove a suffix, use TrimSuffix instead. +// To remove a suffix, use [TrimSuffix] instead. func TrimRight(s, cutset string) string { return strings.TrimRight(s, cutset) } diff --git a/vendor/cuelang.org/go/pkg/tool/exec/exec.go b/vendor/cuelang.org/go/pkg/tool/exec/exec.go index e8a0d743fd..db6197e669 100644 --- a/vendor/cuelang.org/go/pkg/tool/exec/exec.go +++ b/vendor/cuelang.org/go/pkg/tool/exec/exec.go @@ -46,7 +46,7 @@ func (c *execCmd) Run(ctx *task.Context) (res interface{}, err error) { // TODO: set environment variables, if defined. stream := func(name string) (stream cue.Value, ok bool) { c := ctx.Obj.LookupPath(cue.ParsePath(name)) - if err := c.Null(); c.Err() != nil || err == nil { + if c.Err() != nil || c.IsNull() { return } return c, true diff --git a/vendor/cuelang.org/go/pkg/tool/file/file.cue b/vendor/cuelang.org/go/pkg/tool/file/file.cue index 7d4d75e0af..2fa119871d 100644 --- a/vendor/cuelang.org/go/pkg/tool/file/file.cue +++ b/vendor/cuelang.org/go/pkg/tool/file/file.cue @@ -67,6 +67,24 @@ Create: { contents: bytes | string } +// Symlink creates a symbolic link. +Symlink: { + $id: _id + _id: "tool/file.Symlink" + + // filename defines the name of the symlink. + // + // Relative names are taken relative to the current working directory. + // Slashes are converted to the native OS path separator. + filename: !="" + + // target names the target file or directory of the symlink. + // + // Relative names are taken relative to the current working directory. + // Slashes are converted to the native OS path separator. + target: !="" +} + // Glob returns a list of files. Glob: { $id: _id diff --git a/vendor/cuelang.org/go/pkg/tool/file/file.go b/vendor/cuelang.org/go/pkg/tool/file/file.go index a6b62689bb..85de97012b 100644 --- a/vendor/cuelang.org/go/pkg/tool/file/file.go +++ b/vendor/cuelang.org/go/pkg/tool/file/file.go @@ -29,6 +29,7 @@ func init() { task.Register("tool/file.Read", newReadCmd) task.Register("tool/file.Append", newAppendCmd) task.Register("tool/file.Create", newCreateCmd) + task.Register("tool/file.Symlink", newSymlinkCmd) task.Register("tool/file.Glob", newGlobCmd) task.Register("tool/file.Mkdir", newMkdirCmd) task.Register("tool/file.MkdirTemp", newMkdirTempCmd) @@ -38,6 +39,7 @@ func init() { func newReadCmd(v cue.Value) (task.Runner, error) { return &cmdRead{}, nil } func newAppendCmd(v cue.Value) (task.Runner, error) { return &cmdAppend{}, nil } func newCreateCmd(v cue.Value) (task.Runner, error) { return &cmdCreate{}, nil } +func newSymlinkCmd(v cue.Value) (task.Runner, error) { return &cmdSymlink{}, nil } func newGlobCmd(v cue.Value) (task.Runner, error) { return &cmdGlob{}, nil } func newMkdirCmd(v cue.Value) (task.Runner, error) { return &cmdMkdir{}, nil } func newMkdirTempCmd(v cue.Value) (task.Runner, error) { return &cmdMkdirTemp{}, nil } @@ -46,6 +48,7 @@ func newRemoveAllCmd(v cue.Value) (task.Runner, error) { return &cmdRemoveAll{}, type cmdRead struct{} type cmdAppend struct{} type cmdCreate struct{} +type cmdSymlink struct{} type cmdGlob struct{} type cmdMkdir struct{} type cmdMkdirTemp struct{} @@ -107,6 +110,18 @@ func (c *cmdCreate) Run(ctx *task.Context) (res interface{}, err error) { return nil, os.WriteFile(filename, b, os.FileMode(mode)) } +func (c *cmdSymlink) Run(ctx *task.Context) (res interface{}, err error) { + var ( + filename = filepath.FromSlash(ctx.String("filename")) + target = filepath.FromSlash(ctx.String("target")) + ) + if ctx.Err != nil { + return nil, ctx.Err + } + + return nil, os.Symlink(target, filename) +} + func (c *cmdGlob) Run(ctx *task.Context) (res interface{}, err error) { glob := ctx.String("glob") if ctx.Err != nil { diff --git a/vendor/cuelang.org/go/pkg/tool/file/pkg.go b/vendor/cuelang.org/go/pkg/tool/file/pkg.go index 530706277e..046dd55831 100644 --- a/vendor/cuelang.org/go/pkg/tool/file/pkg.go +++ b/vendor/cuelang.org/go/pkg/tool/file/pkg.go @@ -57,6 +57,24 @@ // contents: bytes | string // } // +// // Symlink creates a symbolic link. +// Symlink: { +// $id: _id +// _id: "tool/file.Symlink" +// +// // filename defines the name of the symlink. +// // +// // Relative names are taken relative to the current working directory. +// // Slashes are converted to the native OS path separator. +// filename: !="" +// +// // target names the target file or directory of the symlink. +// // +// // Relative names are taken relative to the current working directory. +// // Slashes are converted to the native OS path separator. +// target: !="" +// } +// // // Glob returns a list of files. // Glob: { // $id: _id @@ -167,6 +185,12 @@ var p = &pkg.Package{ permissions: int | *0o666 contents: bytes | string } + Symlink: { + $id: _id + _id: "tool/file.Symlink" + filename: !="" + target: !="" + } Glob: { $id: _id _id: "tool/file.Glob" diff --git a/vendor/cuelang.org/go/pkg/tool/http/http.cue b/vendor/cuelang.org/go/pkg/tool/http/http.cue index 49eb02cb37..d3c994d319 100644 --- a/vendor/cuelang.org/go/pkg/tool/http/http.cue +++ b/vendor/cuelang.org/go/pkg/tool/http/http.cue @@ -40,8 +40,8 @@ Do: { request: { body?: bytes | string - header: [string]: string | [...string] - trailer: [string]: string | [...string] + header: [string]: [string, ...string] + trailer: [string]: [string, ...string] } response: { status: string @@ -53,14 +53,155 @@ Do: { } } -// TODO: support serving once we have the cue serve command. -// Serve: { -// port: int +// Serve launches a task that listens on the given port and serves HTTP +// requests. (EXPERIMENTAL) // -// cert: string -// key: string +// Serve support HTTP multiplexing. Multiple tasks can be configured to be +// served from the same address. Serve will multiplex these different instances +// based on the serving path and, optionally, method. // -// handle: [Pattern=string]: Message & { -// pattern: Pattern -// } -// } +// For more details see the documentation of the routing parameters such as +// path and method. +Serve: { + $id: _id + _id: "tool/http.Serve" + + // listenAddr is the address to listen on (e.g., ":8080", "localhost:8000"). + // This field is required to avoid accidentally binding to privileged ports. + listenAddr!: string + + // routing configures the HTTP routes that are served. + // + // Routing is done based on path and methods (TODO: allow host as well) + // + // Literal (that is, non-wildcard) parts of a pattern match the + // corresponding parts of a request case-sensitively. + // + // If no method is given it matches every method. If routing.method is set to + // "GET", it matches both GET and HEAD requests. Otherwise, the method must + // match exactly. + // + // TODO: When no host is given, every host is matched. A pattern with a host + // matches URLs on that host only. + // + // A path can include wildcard segments of the form {NAME} or {NAME...}. For + // example, "/b/{bucket}/o/{objectname...}". The wildcard name must be a + // valid Go identifier. Wildcards must be full path segments: they must be + // preceded by a slash and followed by either a slash or the end of the + // string. For example, "/b_{bucket}" is not a valid pattern. + // + // Normally a wildcard matches only a single path segment, ending at the + // next literal slash (not %2F) in the request URL. But if "..." is + // present, then the wildcard matches the remainder of the URL path, + // including slashes. (Therefore it is invalid for a "..." wildcard to + // appear anywhere but at the end of a pattern.) The match for a wildcard + // can be obtained from request.pathValues with the wildcard's name. A + // trailing slash in a path acts as an anonymous "..." wildcard. + // + // The special wildcard {$} matches only the end of the URL. For example, + // the pattern "/{$}" matches only the path "/", whereas the pattern "/" + // matches every path. + // + // For matching, both pattern paths and incoming request paths are unescaped + // segment by segment. So, for example, the path "/a%2Fb/100%25" is treated + // as having two segments, "a/b" and "100%". The pattern "/a%2fb/" matches + // it, but the pattern "/a/b/" does not. + // + // + // Precedence + // + // If two or more patterns match a request, then the most specific pattern + // takes precedence. A pattern P1 is more specific than P2 if P1 matches a + // strict subset of P2’s requests; that is, if P2 matches all the requests + // of P1 and more. If neither is more specific, then the patterns conflict. + // There is one exception to this rule: if two patterns would otherwise + // conflict and one has a host while the other does not, then the pattern + // with the host takes precedence. If a pattern conflicts with another + // pattern that is already registered the task will panic. + // + // As an example of the general rule, "/images/thumbnails/" is more specific + // than "/images/", so both can be registered. The former matches paths + // beginning with "/images/thumbnails/" and the latter will match any other + // path in the "/images/" subtree. + // + // As another example, consider a route with path "/" and method "GET" versus + // a route with path "/index.html" and no method: both match a GET request + // for "/index.html", but the former matches all other GET and HEAD requests, + // while the latter matches any request for "/index.html" that uses a + // different method. These routes would conflict. + routing: { + // path sets the path to route to. It may include wildcard segments + // as described above. + path: *"/" | =~"^/" + + // method optionally sets the HTTP method to match (e.g. "GET" | + // "POST"). If not set, all methods are accepted. + method?: string + } + + // TODO: + // - schemes: string // e.g. "http" | "https" + // - TLS + + // request holds data about the incoming HTTP request. + // + // Fields marked [runtime] are populated automatically when a request is + // received. Users can add constraints to these fields to validate input, + // for example: `form: u!: [string]` to require a query parameter "u". + // + // The value field is for user-defined parsing of the request body. + request: { + // method is the HTTP method (GET, POST, PUT, etc.). [runtime] + method: string + + // url is the full request URL. [runtime] + url: string + + // body is the raw request body. [runtime] + body: *bytes | string + + // value can be set by the user to hold a parsed representation of + // the body. For example: `value: json.Unmarshal(body)` + value?: _ + + // pathValues contains values extracted from URL path wildcards. + // For example, with routing.path: "/users/{id}", a request to + // "/users/123" would have pathValues: {id: "123"}. [runtime] + pathValues: [string]: string + + // form contains the parsed form data, including both the URL + // query parameters and POST/PUT/PATCH form bodies. [runtime] + form: [string]: [string, ...string] + + // header contains the request headers. Each header key maps to a + // non-empty list of values, as HTTP allows multiple values per header. + // [runtime] + header: [string]: [string, ...string] + + // trailer contains the request trailers. Each trailer key maps to a + // non-empty list of values. [runtime] + trailer: [string]: [string, ...string] + } + + // response defines the HTTP response to send back to the client. All fields + // are optional and user-defined. + // + // Note: sub-tasks (e.g., exec.Run) cannot be nested within response fields. + // However, sibling tasks defined outside the Serve block can reference + // request data and their output can be used in the response. + response: { + // statusCode sets the HTTP status code. If not set, 200 is used. + statusCode?: int + + // body is the response body to send. + body?: *bytes | string + + // header sets response headers. Each key can be set to either a single + // string value or a list of values for headers with multiple values. + header?: [string]: string | [string, ...string] + + // trailer sets response trailers. Each key can be set to either a single + // string value or a list of values. + trailer?: [string]: string | [string, ...string] + } +} diff --git a/vendor/cuelang.org/go/pkg/tool/http/http.go b/vendor/cuelang.org/go/pkg/tool/http/http.go index 303c32bc6d..292d1d64ac 100644 --- a/vendor/cuelang.org/go/pkg/tool/http/http.go +++ b/vendor/cuelang.org/go/pkg/tool/http/http.go @@ -29,6 +29,7 @@ import ( func init() { task.Register("tool/http.Do", newHTTPCmd) + task.Register("tool/http.Serve", newServeCmd) // For backwards compatibility. task.Register("http", newHTTPCmd) @@ -176,11 +177,27 @@ func parseHeaders(obj cue.Value, label string) (http.Header, error) { } h := http.Header{} for iter.Next() { - str, err := iter.Value().String() + key := iter.Selector().Unquoted() + val := iter.Value() + + // Handle single string value + if s, err := val.String(); err == nil { + h.Add(key, s) + continue + } + + // Each header value is a list of strings [string, ...string] + list, err := val.List() if err != nil { return nil, err } - h.Add(iter.Selector().Unquoted(), str) + for list.Next() { + str, err := list.Value().String() + if err != nil { + return nil, err + } + h.Add(key, str) + } } return h, nil } diff --git a/vendor/cuelang.org/go/pkg/tool/http/pkg.go b/vendor/cuelang.org/go/pkg/tool/http/pkg.go index 097e79e081..2dc141baa6 100644 --- a/vendor/cuelang.org/go/pkg/tool/http/pkg.go +++ b/vendor/cuelang.org/go/pkg/tool/http/pkg.go @@ -30,8 +30,8 @@ // // request: { // body?: bytes | string -// header: [string]: string | [...string] -// trailer: [string]: string | [...string] +// header: [string]: [string, ...string] +// trailer: [string]: [string, ...string] // } // response: { // status: string @@ -43,17 +43,158 @@ // } // } // -// // TODO: support serving once we have the cue serve command. -// // Serve: { -// // port: int +// // Serve launches a task that listens on the given port and serves HTTP +// // requests. (EXPERIMENTAL) // // -// // cert: string -// // key: string +// // Serve support HTTP multiplexing. Multiple tasks can be configured to be +// // served from the same address. Serve will multiplex these different instances +// // based on the serving path and, optionally, method. // // -// // handle: [Pattern=string]: Message & { -// // pattern: Pattern -// // } -// // } +// // For more details see the documentation of the routing parameters such as +// // path and method. +// Serve: { +// $id: _id +// _id: "tool/http.Serve" +// +// // listenAddr is the address to listen on (e.g., ":8080", "localhost:8000"). +// // This field is required to avoid accidentally binding to privileged ports. +// listenAddr!: string +// +// // routing configures the HTTP routes that are served. +// // +// // Routing is done based on path and methods (TODO: allow host as well) +// // +// // Literal (that is, non-wildcard) parts of a pattern match the +// // corresponding parts of a request case-sensitively. +// // +// // If no method is given it matches every method. If routing.method is set to +// // "GET", it matches both GET and HEAD requests. Otherwise, the method must +// // match exactly. +// // +// // TODO: When no host is given, every host is matched. A pattern with a host +// // matches URLs on that host only. +// // +// // A path can include wildcard segments of the form {NAME} or {NAME...}. For +// // example, "/b/{bucket}/o/{objectname...}". The wildcard name must be a +// // valid Go identifier. Wildcards must be full path segments: they must be +// // preceded by a slash and followed by either a slash or the end of the +// // string. For example, "/b_{bucket}" is not a valid pattern. +// // +// // Normally a wildcard matches only a single path segment, ending at the +// // next literal slash (not %2F) in the request URL. But if "..." is +// // present, then the wildcard matches the remainder of the URL path, +// // including slashes. (Therefore it is invalid for a "..." wildcard to +// // appear anywhere but at the end of a pattern.) The match for a wildcard +// // can be obtained from request.pathValues with the wildcard's name. A +// // trailing slash in a path acts as an anonymous "..." wildcard. +// // +// // The special wildcard {$} matches only the end of the URL. For example, +// // the pattern "/{$}" matches only the path "/", whereas the pattern "/" +// // matches every path. +// // +// // For matching, both pattern paths and incoming request paths are unescaped +// // segment by segment. So, for example, the path "/a%2Fb/100%25" is treated +// // as having two segments, "a/b" and "100%". The pattern "/a%2fb/" matches +// // it, but the pattern "/a/b/" does not. +// // +// // +// // Precedence +// // +// // If two or more patterns match a request, then the most specific pattern +// // takes precedence. A pattern P1 is more specific than P2 if P1 matches a +// // strict subset of P2’s requests; that is, if P2 matches all the requests +// // of P1 and more. If neither is more specific, then the patterns conflict. +// // There is one exception to this rule: if two patterns would otherwise +// // conflict and one has a host while the other does not, then the pattern +// // with the host takes precedence. If a pattern conflicts with another +// // pattern that is already registered the task will panic. +// // +// // As an example of the general rule, "/images/thumbnails/" is more specific +// // than "/images/", so both can be registered. The former matches paths +// // beginning with "/images/thumbnails/" and the latter will match any other +// // path in the "/images/" subtree. +// // +// // As another example, consider a route with path "/" and method "GET" versus +// // a route with path "/index.html" and no method: both match a GET request +// // for "/index.html", but the former matches all other GET and HEAD requests, +// // while the latter matches any request for "/index.html" that uses a +// // different method. These routes would conflict. +// routing: { +// // path sets the path to route to. It may include wildcard segments +// // as described above. +// path: *"/" | =~"^/" +// +// // method optionally sets the HTTP method to match (e.g. "GET" | +// // "POST"). If not set, all methods are accepted. +// method?: string +// } +// +// // TODO: +// // - schemes: string // e.g. "http" | "https" +// // - TLS +// +// // request holds data about the incoming HTTP request. +// // +// // Fields marked [runtime] are populated automatically when a request is +// // received. Users can add constraints to these fields to validate input, +// // for example: `form: u!: [string]` to require a query parameter "u". +// // +// // The value field is for user-defined parsing of the request body. +// request: { +// // method is the HTTP method (GET, POST, PUT, etc.). [runtime] +// method: string +// +// // url is the full request URL. [runtime] +// url: string +// +// // body is the raw request body. [runtime] +// body: *bytes | string +// +// // value can be set by the user to hold a parsed representation of +// // the body. For example: `value: json.Unmarshal(body)` +// value?: _ +// +// // pathValues contains values extracted from URL path wildcards. +// // For example, with routing.path: "/users/{id}", a request to +// // "/users/123" would have pathValues: {id: "123"}. [runtime] +// pathValues: [string]: string +// +// // form contains the parsed form data, including both the URL +// // query parameters and POST/PUT/PATCH form bodies. [runtime] +// form: [string]: [string, ...string] +// +// // header contains the request headers. Each header key maps to a +// // non-empty list of values, as HTTP allows multiple values per header. +// // [runtime] +// header: [string]: [string, ...string] +// +// // trailer contains the request trailers. Each trailer key maps to a +// // non-empty list of values. [runtime] +// trailer: [string]: [string, ...string] +// } +// +// // response defines the HTTP response to send back to the client. All fields +// // are optional and user-defined. +// // +// // Note: sub-tasks (e.g., exec.Run) cannot be nested within response fields. +// // However, sibling tasks defined outside the Serve block can reference +// // request data and their output can be used in the response. +// response: { +// // statusCode sets the HTTP status code. If not set, 200 is used. +// statusCode?: int +// +// // body is the response body to send. +// body?: *bytes | string +// +// // header sets response headers. Each key can be set to either a single +// // string value or a list of values for headers with multiple values. +// header?: [string]: string | [string, ...string] +// +// // trailer sets response trailers. Each key can be set to either a single +// // string value or a list of values. +// trailer?: [string]: string | [string, ...string] +// } +// } package http import ( @@ -86,8 +227,8 @@ var p = &pkg.Package{ } request: { body?: bytes | string - header: [string]: string | [...string] - trailer: [string]: string | [...string] + header: [string]: [string, ...string] + trailer: [string]: [string, ...string] } response: { status: string @@ -97,5 +238,30 @@ var p = &pkg.Package{ trailer: [string]: string | [...string] } } + Serve: { + $id: _id + _id: "tool/http.Serve" + listenAddr!: string + routing: { + path: *"/" | =~"^/" + method?: string + } + request: { + method: string + url: string + body: *bytes | string + value?: _ + pathValues: [string]: string + form: [string]: [string, ...string] + header: [string]: [string, ...string] + trailer: [string]: [string, ...string] + } + response: { + statusCode?: int + body?: *bytes | string + header?: [string]: string | [string, ...string] + trailer?: [string]: string | [string, ...string] + } + } }`, } diff --git a/vendor/cuelang.org/go/pkg/tool/http/serve.go b/vendor/cuelang.org/go/pkg/tool/http/serve.go new file mode 100644 index 0000000000..1b3bea82cb --- /dev/null +++ b/vendor/cuelang.org/go/pkg/tool/http/serve.go @@ -0,0 +1,256 @@ +// Copyright 2023 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package http + +import ( + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "regexp" + "sync" + + "cuelang.org/go/cue" + "cuelang.org/go/cue/errors" + "cuelang.org/go/internal/task" +) + +var ( + muxers = map[string]*http.ServeMux{} + // listeners stores net.Listener for each address to enable error checking + // before starting the serve goroutine. + listeners = map[string]net.Listener{} +) + +func newServeCmd(v cue.Value) (task.Runner, error) { + return &listenCmd{}, nil +} + +type listenCmd struct { + w http.ResponseWriter + body cue.Path +} + +// IsService indicates that http.Serve acts as a service. +// Other tasks can reference request fields (which are filled +// at runtime) without creating a dependency cycle error. +func (c *listenCmd) IsService() bool { + return true +} + +var m sync.Mutex + +var ( + listenPath = cue.ParsePath("listenAddr") + pathPath = cue.ParsePath("routing.path") + methodPath = cue.ParsePath("routing.method") + + requestPath = cue.ParsePath("request") + respBodyPath = cue.ParsePath("response.body") + respStatusCodePath = cue.ParsePath("response.statusCode") + responsePath = cue.ParsePath("response") +) + +// httpRequest represents the request data to fill into the CUE value. +type httpRequest struct { + Method string `json:"method"` + URL string `json:"url"` + Body []byte `json:"body"` + Form map[string][]string `json:"form"` + Header map[string][]string `json:"header"` + PathValues map[string]string `json:"pathValues"` +} + +func (c *listenCmd) Run(ctx *task.Context) (res interface{}, err error) { + v := ctx.Obj + addr, err := v.LookupPath(listenPath).String() + + if err != nil { + return nil, err + } + + m.Lock() + mux := muxers[addr] + if mux == nil { + // Create listener first to catch errors (e.g., port unavailable) + // before starting the serve goroutine. + ln, err := net.Listen("tcp", addr) + if err != nil { + m.Unlock() + return nil, fmt.Errorf("cannot listen on %s: %w", addr, err) + } + + mux = http.NewServeMux() + muxers[addr] = mux + listeners[addr] = ln + + log.Printf("listening on %v\n", addr) + + // TODO: use Server at some point. + go http.Serve(ln, mux) + } + m.Unlock() + + url := "/" + if p := v.LookupPath(pathPath); p.Exists() { + url, err = p.String() + if err != nil { + return nil, err + } + } + + vars := extractPathVariables(url) + + if m := v.LookupPath(methodPath); m.Exists() { + method, err := m.String() + if err != nil { + return nil, err + } + url = fmt.Sprintf("%s %s", method, url) + } + + path := v.Path() + + log.Printf("adding handler for %v\n", url) + mux.HandleFunc(url, func(w http.ResponseWriter, req *http.Request) { + err := req.ParseForm() + if err != nil { + http.Error(w, fmt.Sprintf("cannot parse form: %v", err), http.StatusBadRequest) + return + } + + data, err := io.ReadAll(req.Body) + if err != nil { + http.Error(w, fmt.Sprintf("cannot read body: %v", err), http.StatusBadRequest) + return + } + + pathValues := make(map[string]string) + for _, variable := range vars { + if s := req.PathValue(variable); s != "" { + pathValues[variable] = s + } + } + + reqValue := v.FillPath(requestPath, httpRequest{ + Method: req.Method, + URL: req.URL.String(), + Body: data, + Form: req.Form, + Header: req.Header, + PathValues: pathValues, + }) + + handle := &serveCmd{w: w} + + c := req.Context() + controller := ctx.ForkRunLoop(c, path, reqValue, handle) + + if err := controller.Run(c); err != nil { + cwd, _ := os.Getwd() + details := errors.Details(err, &errors.Config{Cwd: cwd, ToSlash: true}) + // TODO: return JSON-formatted error response for consistency with + // successful responses (e.g. {"error": "...", "details": "..."}). + http.Error(w, fmt.Sprintf("error handling request: %v", details), http.StatusInternalServerError) + return + } + }) + + ctx.BackgroundTask() + return nil, nil +} + +// variableRegex is a regular expression to find all instances of {variableName} in a path. +// It captures the content inside the braces. +var variableRegex = regexp.MustCompile(`\{([^{}\.]+)(\.\.\.)?\}`) + +// extractPathVariables parses a URL pattern string and returns a slice of the variable names. +// For example, given "/users/{userID}/posts/{postID}", it returns ["userID", "postID"]. +// The special pattern {$} (exact path match in http.ServeMux) is excluded. +func extractPathVariables(pattern string) []string { + matches := variableRegex.FindAllStringSubmatch(pattern, -1) + if matches == nil { + return nil + } + + var variables []string + for _, match := range matches { + // The first submatch (index 1) is the captured group, which is the variable name. + // Skip {$} which is a special http.ServeMux pattern for exact path matching. + if name := match[1]; name != "$" { + variables = append(variables, name) + } + } + return variables +} + +type serveCmd struct { + w http.ResponseWriter + body cue.Path +} + +// IsService indicates that http.Serve should not be reported as part +// of task cycles during request handling via ForkRunLoop. +func (c *serveCmd) IsService() bool { + return true +} + +func (c *serveCmd) Run(ctx *task.Context) (res interface{}, err error) { + v := ctx.Obj + + response := v.LookupPath(responsePath) + headers, err := parseHeaders(response, "header") + if err != nil { + http.Error(c.w, fmt.Sprintf("cannot parse headers: %v", err), http.StatusBadRequest) + return nil, err + } + trailers, err := parseHeaders(response, "trailer") + if err != nil { + http.Error(c.w, fmt.Sprintf("cannot parse trailers: %v", err), http.StatusBadRequest) + return nil, err + } + + v = v.LookupPath(respBodyPath) + + b, err := v.Bytes() + if err != nil { + http.Error(c.w, fmt.Sprintf("cannot encode response: %v", err), http.StatusBadRequest) + } + + for k, v := range headers { + for _, v := range v { + c.w.Header().Set(k, v) + } + } + + for k, v := range trailers { + for _, v := range v { + c.w.Header().Set(k, v) + } + } + + // Set status code if specified, otherwise defaults to 200 + if sc := ctx.Obj.LookupPath(respStatusCodePath); sc.Exists() { + if code, err := sc.Int64(); err == nil { + c.w.WriteHeader(int(code)) + } + } + + c.w.Write(b) + + return nil, nil +} diff --git a/vendor/cuelang.org/go/tools/flow/cycle.go b/vendor/cuelang.org/go/tools/flow/cycle.go new file mode 100644 index 0000000000..82da0de125 --- /dev/null +++ b/vendor/cuelang.org/go/tools/flow/cycle.go @@ -0,0 +1,116 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flow + +import ( + "fmt" + "slices" + "strings" + + "cuelang.org/go/cue" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +// checkCycle checks for cyclic dependencies between tasks. +func checkCycle(a []*Task) errors.Error { + cc := cycleChecker{ + visited: make([]bool, len(a)), + stack: make([]*Task, 0, len(a)), + } + if slices.ContainsFunc(a, cc.isCyclic) { + + } + return cc.err +} + +type cycleChecker struct { + visited []bool + stack []*Task + err errors.Error +} + +func (cc *cycleChecker) isCyclic(t *Task) bool { + i := t.index + if !cc.visited[i] { + cc.visited[i] = true + cc.stack = append(cc.stack, t) + + for _, d := range t.depTasks { + if !cc.visited[d.index] && cc.isCyclic(d) { + return true + } else if cc.visited[d.index] { + cc.addCycleError(t) + return true + } + } + } + cc.stack = cc.stack[:len(cc.stack)-1] + cc.visited[i] = false + return false +} + +func (cc *cycleChecker) addCycleError(start *Task) { + // If any task in the cycle is marked as a service, don't report + // the error. This allows services like http.Serve to have bidirectional + // references with other tasks that read from request fields. + for _, t := range cc.stack { + if t.isService { + return + } + } + + err := &cycleError{} + + for _, t := range cc.stack { + err.path = append(err.path, t.v.Path()) + err.positions = append(err.positions, t.v.Pos()) + } + + cc.err = errors.Append(cc.err, err) +} + +type cycleError struct { + path []cue.Path + positions []token.Pos +} + +func (e *cycleError) Error() string { + msg, args := e.Msg() + return fmt.Sprintf(msg, args...) +} + +func (e *cycleError) Path() []string { return nil } + +func (e *cycleError) Msg() (format string, args []interface{}) { + w := &strings.Builder{} + for _, p := range e.path { + fmt.Fprintf(w, "\n\ttask %s refers to", p) + } + fmt.Fprintf(w, "\n\ttask %s", e.path[0]) + + return "cyclic task dependency:%v", []interface{}{w.String()} +} + +func (e *cycleError) Position() token.Pos { + if len(e.positions) == 0 { + return token.NoPos + } + return e.positions[0] +} + +func (e *cycleError) InputPositions() []token.Pos { + return e.positions +} diff --git a/vendor/cuelang.org/go/tools/flow/flow.go b/vendor/cuelang.org/go/tools/flow/flow.go new file mode 100644 index 0000000000..3d82778f6d --- /dev/null +++ b/vendor/cuelang.org/go/tools/flow/flow.go @@ -0,0 +1,528 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package flow provides a low-level workflow manager based on a CUE Instance. +// +// A Task defines an operational unit in a Workflow and corresponds to a struct +// in a CUE instance. This package does not define what a Task looks like in a +// CUE Instance. Instead, the user of this package must supply a TaskFunc that +// creates a Runner for cue.Values that are deemed to be a Task. +// +// Tasks may depend on other tasks. Cyclic dependencies are thereby not allowed. +// A Task A depends on another Task B if A, directly or indirectly, has a +// reference to any field of Task B, including its root. +package flow + +// TODO: Add hooks. This would allow UIs, for instance, to report on progress. +// +// - New(inst *cue.Instance, options ...Option) +// - AddTask(v cue.Value, r Runner) *Task +// - AddDependency(a, b *Task) +// - AddTaskGraph(root cue.Value, fn taskFunc) +// - AddSequence(list cue.Value, fn taskFunc) +// - Err() + +// TODO: +// Should we allow lists as a shorthand for a sequence of tasks? +// If so, how do we specify termination behavior? + +// TODO: +// Should we allow tasks to be a child of another task? Currently, the search +// for tasks end once a task root is found. +// +// Semantically it is somewhat unclear to do so: for instance, if an $after +// is used to refer to an explicit task dependency, it is logically +// indistinguishable whether this should be a subtask or is a dependency. +// Using higher-order constructs for analysis is generally undesirable. +// +// A possible solution would be to define specific "grouping tasks" whose sole +// purpose is to define sub tasks. The user of this package would then need +// to explicitly distinguish between tasks that are dependencies and tasks that +// are subtasks. + +// TODO: streaming tasks/ server applications +// +// Workflows are currently implemented for batch processing, for instance to +// implement shell scripting or other kinds of batch processing. +// +// This API has been designed, however, to also allow for streaming +// applications. For instance, a streaming Task could listen for Etcd changes +// or incoming HTTP requests and send updates each time an input changes. +// Downstream tasks could then alternate between a Waiting and Running state. +// +// Note that such streaming applications would also cause configurations to +// potentially not become increasingly more specific. Instead, a Task would +// replace its old result each time it is updated. This would require tracking +// of which conjunct was previously created by a task. + +import ( + "context" + "fmt" + "slices" + "strings" + "sync/atomic" + + "cuelang.org/go/cue" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/stats" + "cuelang.org/go/internal/core/adt" + "cuelang.org/go/internal/core/convert" + "cuelang.org/go/internal/core/eval" + "cuelang.org/go/internal/value" +) + +var ( + // ErrAbort may be returned by a task to avoid processing downstream tasks. + // This can be used by control nodes to influence execution. + ErrAbort = errors.New("abort dependant tasks without failure") + + // TODO: ErrUpdate: update and run a dependency, but don't complete a + // dependency as more results may come. This is useful in server mode. +) + +// A TaskFunc creates a Runner for v if v defines a task or reports nil +// otherwise. It reports an error for illformed tasks. +// +// If TaskFunc returns a non-nil Runner the search for task within v stops. +// That is, subtasks are not supported. +type TaskFunc func(v cue.Value) (Runner, error) + +// A Runner executes a Task. +type Runner interface { + // Run runs a Task. If any of the tasks it depends on returned an error it + // is passed to this task. It reports an error upon failure. + // + // Any results to be returned can be set by calling Fill on the passed task. + // + // TODO: what is a good contract for receiving and passing errors and abort. + // + // If for a returned error x errors.Is(x, ErrAbort), all dependant tasks + // will not be run, without this being an error. + Run(t *Task, err error) error +} + +// Service is an optional interface that Runner implementations can +// provide to indicate that the task describes a service. Services are +// typically long-running tasks (like http.Serve) that produce values +// at runtime (e.g. requests) that other tasks may depend on. +// +// Services are treated specially in the dependency graph: +// 1. They are excluded from cycle detection filters. +// 2. Dependencies on services are treated as runtime dependencies, +// causing dependent tasks to be deferred until the service runs. +type Service interface { + IsService() bool +} + +// A RunnerFunc runs a Task. +type RunnerFunc func(t *Task) error + +func (f RunnerFunc) Run(t *Task, err error) error { + return f(t) +} + +// A Config defines options for interpreting an Instance as a Workflow. +type Config struct { + // Root limits the search for tasks to be within the path indicated to root. + // For the cue command, this is set to ["command"]. The default value is + // for all tasks to be root. + Root cue.Path + + // InferTasks allows tasks to be defined outside of the Root. Such tasks + // will only be included in the workflow if any of its fields is referenced + // by any of the tasks defined within Root. + // + // CAVEAT EMPTOR: this features is mostly provided for backwards + // compatibility with v0.2. A problem with this approach is that it will + // look for task structs within arbitrary data. So if not careful, there may + // be spurious matches. + InferTasks bool + + // IgnoreConcrete ignores references for which the values are already + // concrete and cannot change. + IgnoreConcrete bool + + // FindHiddenTasks allows tasks to be defined in hidden fields. + FindHiddenTasks bool + + // UpdateFunc is called whenever the information in the controller is + // updated. This includes directly after initialization. The task may be + // nil if this call is not the result of a task completing. + UpdateFunc func(c *Controller, t *Task) error + + // RunInferredTasks, when true, prevents inferred tasks from being deferred. + // This is used by ForkRunLoop where inferred tasks should run immediately + // because their inputs are already filled. + RunInferredTasks bool +} + +// A Controller defines a set of Tasks to be executed. +type Controller struct { + cfg Config + isTask TaskFunc + + inst cue.Value + valueSeqNum int64 + + env *adt.Environment + + conjuncts []adt.Conjunct + conjunctSeq int64 + + taskCh chan *Task + + opCtx *adt.OpContext + context context.Context + cancelFunc context.CancelFunc + + // taskStats tracks counters for auxiliary operations done by tasks. It does + // not include the CUE operations done by the Controller on behalf of tasks, + // which is likely going to tbe the bulk of the operations. + taskStats stats.Counts + + done atomic.Bool + + // keys maps task keys to their index. This allows a recreation of the + // Instance while retaining the original task indices. + // + // TODO: do instance updating in place to allow for more efficient + // processing. + keys map[string]*Task + tasks []*Task + + // Only used during task initialization. + nodes map[*adt.Vertex]*Task + + errs errors.Error +} + +// Stats reports statistics on the total number of CUE operations used. +// +// This is an experimental method and the API is likely to change. The +// Counts.String method will likely stay and is the safest way to use this API. +// +// This currently should only be called after completion or within a call to +// UpdateFunc. +func (c *Controller) Stats() (counts stats.Counts) { + counts = *c.opCtx.Stats() + counts.Add(c.taskStats) + return counts +} + +// Tasks reports the tasks that are currently registered with the controller. +// +// This may currently only be called before Run is called or from within +// a call to UpdateFunc. Task pointers returned by this call are not guaranteed +// to be the same between successive calls to this method. +func (c *Controller) Tasks() []*Task { + return c.tasks +} + +func (c *Controller) cancel() { + if c.cancelFunc != nil { + c.cancelFunc() + } +} + +func (c *Controller) addErr(err error, msg string) { + c.errs = errors.Append(c.errs, errors.Promote(err, msg)) +} + +// New creates a Controller for a given Instance and TaskFunc. +// +// The instance value can either be a *cue.Instance or a cue.Value. +func New(cfg *Config, inst cue.InstanceOrValue, f TaskFunc) *Controller { + v := inst.Value() + ctx := eval.NewContext(value.ToInternal(v)) + + c := &Controller{ + isTask: f, + inst: v, + opCtx: ctx, + + taskCh: make(chan *Task), + keys: map[string]*Task{}, + } + + if cfg != nil { + c.cfg = *cfg + } + c.initTasks(true) + return c + +} + +// Run runs the tasks of a workflow until completion. +func (c *Controller) Run(ctx context.Context) error { + c.context, c.cancelFunc = context.WithCancel(ctx) + defer c.cancelFunc() + + c.runLoop() + + // NOTE: track state here as runLoop might add more tasks to the flow + // during the execution so checking current tasks state may not be + // accurate enough to determine that the flow is terminated. + // This is used to determine if the controller value can be retrieved. + // When the controller value is safe to be read concurrently this tracking + // can be removed. + c.done.Store(true) + + return c.errs +} + +// Value returns the value managed by the controller. +// +// It is safe to use the value only after [Controller.Run] has returned. +// It panics if the flow is running. +func (c *Controller) Value() cue.Value { + if !c.done.Load() { + panic("can't retrieve value before flow has terminated") + } + return c.inst +} + +// We need to escape quotes in the path, per +// https://mermaid-js.github.io/mermaid/#/flowchart?id=entity-codes-to-escape-characters +// This also requires that we escape the quoting character #. +var mermaidQuote = strings.NewReplacer("#", "#35;", `"`, "#quot;") + +// mermaidGraph generates a mermaid graph of the current state. This can be +// pasted into https://mermaid-js.github.io/mermaid-live-editor/ for +// visualization. +func mermaidGraph(c *Controller) string { + w := &strings.Builder{} + fmt.Fprintln(w, "graph TD") + for i, t := range c.Tasks() { + path := mermaidQuote.Replace(t.Path().String()) + fmt.Fprintf(w, " t%d(\"%s [%s]\")\n", i, path, t.State()) + for _, t := range t.Dependencies() { + fmt.Fprintf(w, " t%d-->t%d\n", i, t.Index()) + } + } + return w.String() +} + +// A State indicates the state of a Task. +// +// The following state diagram indicates the possible state transitions: +// +// Ready +// ↗︎ ↘︎ +// Waiting ← Running +// ↘︎ ↙︎ +// Terminated +// +// A Task may move from Waiting to Terminating if one of +// the tasks on which it depends fails. +// +// NOTE: transitions from Running to Waiting are currently not supported. In +// the future this may be possible if a task depends on continuously running +// tasks that send updates. +type State int + +//go:generate go tool stringer -type=State + +const ( + // Waiting indicates a task is blocked on input from another task. + // + // NOTE: although this is currently not implemented, a task could + // theoretically move from the Running to Waiting state. + Waiting State = iota + + // Ready means a tasks is ready to run, but currently not running. + Ready + + // Running indicates a goroutine is currently active for a task and that + // it is not Waiting. + Running + + // Terminated means a task has stopped running either because it terminated + // while Running or was aborted by task on which it depends. The error + // value of a Task indicates the reason for the termination. + Terminated +) + +// A Task contains the context for a single task execution. +// Tasks may be run concurrently. +type Task struct { + // Static + c *Controller + ctxt *adt.OpContext + r Runner + + index int + path cue.Path + key string + labels []adt.Feature + isService bool // if true, this task is a service (excluded from cycles) + deferred bool // if true, this task is skipped at runtime (discovered from Service) + runtimeDeps []*Task // dependencies on Service tasks + + // Dynamic + update adt.Expr + deps map[*Task]bool + pathDeps map[string][]*Task + + conjunctSeq int64 + valueSeq int64 + v cue.Value + err errors.Error + state State + depTasks []*Task + + stats stats.Counts +} + +// Stats reports statistics on the number of CUE operations used to complete +// this task. +// +// This is an experimental method and the API is likely to change. +// +// It only shows numbers upon completion. This may change in the future. +func (t *Task) Stats() stats.Counts { + return t.stats +} + +// Context reports the Controller's Context. +func (t *Task) Context() context.Context { + return t.c.context +} + +// Path reports the path of Task within the Instance in which it is defined. +// The Path is always valid. +func (t *Task) Path() cue.Path { + return t.path +} + +// Index reports the sequence number of the Task. This will not change over +// time. +func (t *Task) Index() int { + return t.index +} + +func (t *Task) done() bool { + return t.state > Running +} + +func (t *Task) isReady() bool { + for _, d := range t.depTasks { + if !d.done() { + return false + } + } + return true +} + +func (t *Task) vertex() *adt.Vertex { + _, x := value.ToInternal(t.v) + return x +} + +func (t *Task) addDep(path string, dep *Task) { + if dep == nil || dep == t { + return + } + // Skip dependencies on tasks that are services. + // These are typically long-running tasks (like http.Serve) that fill + // their values at runtime, so waiting for them would cause deadlock. + if dep.isService { + t.runtimeDeps = append(t.runtimeDeps, dep) + return + } + // Skip dependencies on deferred tasks. These tasks won't run at startup + // and are expected to run later via ForkRunLoop. + if dep.deferred { + return + } + + if t.deps == nil { + t.deps = map[*Task]bool{} + t.pathDeps = map[string][]*Task{} + } + + // Add the dependencies for a given path to the controller. We could compute + // this again later, but this ensures there will be no discrepancies. + a := t.pathDeps[path] + found := slices.Contains(a, dep) + if !found { + t.pathDeps[path] = append(a, dep) + + } + + if !t.deps[dep] { + t.deps[dep] = true + t.depTasks = append(t.depTasks, dep) + } +} + +// Fill fills in values of the Controller's configuration for the current task. +// The changes take effect after the task completes. +// +// This method may currently only be called by the runner. +func (t *Task) Fill(x interface{}) error { + expr := convert.FromGoValue(t.ctxt, x, true) + if t.update == nil { + t.update = expr + return nil + } + t.update = &adt.BinaryExpr{ + Op: adt.AndOp, + X: t.update, + Y: expr, + } + return nil +} + +// Value reports the latest value of this task. +// +// This method may currently only be called before Run is called or after a +// Task completed, or from within a call to UpdateFunc. +func (t *Task) Value() cue.Value { + // TODO: synchronize + return t.v +} + +// Dependencies reports the Tasks t depends on. +// +// This method may currently only be called before Run is called or after a +// Task completed, or from within a call to UpdateFunc. +func (t *Task) Dependencies() []*Task { + // TODO: add synchronization. + return t.depTasks +} + +// PathDependencies reports the dependencies found for a value at the given +// path. +// +// This may currently only be called before Run is called or from within +// a call to UpdateFunc. +func (t *Task) PathDependencies(p cue.Path) []*Task { + return t.pathDeps[p.String()] +} + +// Err returns the error of a completed Task. +// +// This method may currently only be called before Run is called, after a +// Task completed, or from within a call to UpdateFunc. +func (t *Task) Err() error { + return t.err +} + +// State is the current state of the Task. +// +// This method may currently only be called before Run is called or after a +// Task completed, or from within a call to UpdateFunc. +func (t *Task) State() State { + return t.state +} diff --git a/vendor/cuelang.org/go/tools/flow/run.go b/vendor/cuelang.org/go/tools/flow/run.go new file mode 100644 index 0000000000..6b936ae7d0 --- /dev/null +++ b/vendor/cuelang.org/go/tools/flow/run.go @@ -0,0 +1,259 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flow + +// This file contains logic for running tasks. +// +// This implementation anticipates that workflows can also be used for defining +// servers, not just batch scripts. In the future, tasks may be long running and +// provide streams of results. +// +// The implementation starts a goroutine for each user-defined task, instead of +// having a fixed pool of workers. The main reason for this is that tasks are +// inherently heterogeneous and may be blocking on top of that. Also, in the +// future tasks may be long running, as discussed above. + +import ( + "fmt" + "os" + "slices" + + "cuelang.org/go/cue/errors" + "cuelang.org/go/internal/core/adt" + "cuelang.org/go/internal/core/eval" + "cuelang.org/go/internal/cuedebug" + "cuelang.org/go/internal/value" +) + +func (c *Controller) runLoop() { + _, root := value.ToInternal(c.inst) + + // Copy the initial conjuncts. + rootConjuncts := slices.Collect(root.LeafConjuncts()) + n := len(rootConjuncts) + c.conjuncts = make([]adt.Conjunct, n, n+len(c.tasks)) + copy(c.conjuncts, rootConjuncts) + + c.markReady(nil) + + for c.errs == nil { + // Dispatch all unblocked tasks to workers. Only update + // the configuration when all have been dispatched. + + waiting := false + running := false + + // Mark tasks as Ready. + for _, t := range c.tasks { + // Skip deferred tasks - they're expected to run later + // via ForkRunLoop when their inputs become available. + if t.deferred { + continue + } + switch t.state { + case Waiting: + waiting = true + + case Ready: + t.state = Running + c.updateTaskValue(t) + + // If the task's path no longer resolves in the configuration + // (e.g. because a conditional guard was eliminated as the + // config became more concrete), skip this task. + if !t.v.Exists() { + t.state = Terminated + continue + } + + running = true + t.ctxt = eval.NewContext(value.ToInternal(t.v)) + + go func(t *Task) { + if err := t.r.Run(t, nil); err != nil { + t.err = errors.Promote(err, "task failed") + } + + t.c.taskCh <- t + }(t) + + case Running: + running = true + + case Terminated: + } + } + + if !running { + if waiting { + // Should not happen ever, as cycle detection should have caught + // this. But keep this around as a defensive measure. + c.addErr(errors.New("deadlock"), "run loop") + } + break + } + + select { + case <-c.context.Done(): + return + + case t := <-c.taskCh: + t.state = Terminated + + taskStats := *t.ctxt.Stats() + t.stats.Add(taskStats) + c.taskStats.Add(taskStats) + + start := *c.opCtx.Stats() + + switch t.err { + case nil: + c.updateTaskResults(t) + + case ErrAbort: + // TODO: do something cleverer. + fallthrough + + default: + c.addErr(t.err, "task failure") + return + } + + // Recompute the configuration, if necessary. + if c.updateValue() { + // initTasks was already called in New to catch initialization + // errors earlier and add stats. + c.initTasks(false) + } + + c.updateTaskValue(t) + + t.stats.Add(c.opCtx.Stats().Since(start)) + + c.markReady(t) + } + } +} + +func (c *Controller) markReady(t *Task) { + for _, x := range c.tasks { + if x.state == Waiting && x.isReady() { + x.state = Ready + } + } + + cuedebug.Init() + if cuedebug.Flags.ToolsFlow { + fmt.Fprintln(os.Stderr, "tools/flow task dependency graph:") + fmt.Fprintln(os.Stderr, "```mermaid") + fmt.Fprint(os.Stderr, mermaidGraph(c)) + fmt.Fprintln(os.Stderr, "```") + } + + if c.cfg.UpdateFunc != nil { + if err := c.cfg.UpdateFunc(c, t); err != nil { + c.addErr(err, "task completed") + c.cancel() + return + } + } +} + +// updateValue recomputes the workflow configuration if it is out of date. It +// reports whether the values were updated. +func (c *Controller) updateValue() bool { + + if c.valueSeqNum == c.conjunctSeq { + return false + } + + // TODO: incrementally update results. Currently, the entire tree is + // recomputed on every update. This should not be necessary with the right + // notification structure in place. + + v := &adt.Vertex{Conjuncts: c.conjuncts} + v.Finalize(c.opCtx) + + c.inst = value.Make(c.opCtx, v) + c.valueSeqNum = c.conjunctSeq + return true +} + +// updateTaskValue updates the value of the task in the configuration if it is +// out of date. +func (c *Controller) updateTaskValue(t *Task) { + required := t.conjunctSeq + for _, dep := range t.depTasks { + if dep.conjunctSeq > required { + required = dep.conjunctSeq + } + } + + if t.valueSeq == required { + return + } + + if c.valueSeqNum < required { + c.updateValue() + } + + t.v = c.inst.LookupPath(t.path) + t.valueSeq = required +} + +// updateTaskResults updates the result status of the task and adds any result +// values to the overall configuration. +func (c *Controller) updateTaskResults(t *Task) bool { + if t.update == nil { + return false + } + + expr := t.update + for _, label := range slices.Backward(t.labels) { + switch label.Typ() { + case adt.StringLabel, adt.HiddenLabel, adt.DefinitionLabel, adt.HiddenDefinitionLabel, adt.LetLabel: + expr = &adt.StructLit{ + Decls: []adt.Decl{ + &adt.Field{ + Label: label, + Value: expr, + }, + }, + } + case adt.IntLabel: + i := label.Index() + list := &adt.ListLit{} + any := &adt.Top{} + // TODO(perf): make this a constant thing. This will be possible with the query extension. + for range i { + list.Elems = append(list.Elems, any) + } + list.Elems = append(list.Elems, expr, &adt.Ellipsis{}) + expr = list + default: + panic(fmt.Errorf("unexpected label type %v", label.Typ())) + } + } + + t.update = nil + + // TODO: replace rather than add conjunct if this task already added a + // conjunct before. This will allow for serving applications. + c.conjuncts = append(c.conjuncts, adt.MakeRootConjunct(c.env, expr)) + c.conjunctSeq++ + t.conjunctSeq = c.conjunctSeq + + return true +} diff --git a/vendor/cuelang.org/go/tools/flow/state_string.go b/vendor/cuelang.org/go/tools/flow/state_string.go new file mode 100644 index 0000000000..3f85735cb7 --- /dev/null +++ b/vendor/cuelang.org/go/tools/flow/state_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=State"; DO NOT EDIT. + +package flow + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Waiting-0] + _ = x[Ready-1] + _ = x[Running-2] + _ = x[Terminated-3] +} + +const _State_name = "WaitingReadyRunningTerminated" + +var _State_index = [...]uint8{0, 7, 12, 19, 29} + +func (i State) String() string { + idx := int(i) - 0 + if i < 0 || idx >= len(_State_index)-1 { + return "State(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _State_name[_State_index[idx]:_State_index[idx+1]] +} diff --git a/vendor/cuelang.org/go/tools/flow/tasks.go b/vendor/cuelang.org/go/tools/flow/tasks.go new file mode 100644 index 0000000000..a6e74ef410 --- /dev/null +++ b/vendor/cuelang.org/go/tools/flow/tasks.go @@ -0,0 +1,336 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flow + +// This file contains functionality for identifying tasks in the configuration +// and annotating the dependencies between them. + +import ( + "cuelang.org/go/cue" + "cuelang.org/go/cue/errors" + "cuelang.org/go/internal/core/adt" + "cuelang.org/go/internal/core/dep" + "cuelang.org/go/internal/value" +) + +// initTasks takes the current configuration and adds tasks to the list of +// tasks. It can be run multiple times on increasingly more concrete +// configurations to add more tasks, whereby the task pointers of previously +// found tasks are preserved. +func (c *Controller) initTasks(addStats bool) { + // Clear previous cache. + c.nodes = map[*adt.Vertex]*Task{} + + v := c.inst.LookupPath(c.cfg.Root) + if err := v.Err(); err != nil { + c.addErr(err, "invalid root") + c.cancel() + return + } + + // Mark any task that is located under the root. + c.findRootTasks(v) + + // Mark any tasks that are implied by dependencies. + // Note that the list of tasks may grow as this loop progresses. + for i := 0; i < len(c.tasks); i++ { + t := c.tasks[i] + start := *c.opCtx.Stats() + c.markTaskDependencies(t, t.vertex()) + if addStats { + t.stats.Add(c.opCtx.Stats().Since(start)) + } + } + + // Calculate deferred tasks (those waiting for runtime inputs). + // Only needed if we're not already running inferred tasks. + if !c.cfg.RunInferredTasks { + changed := true + for changed { + changed = false + for _, t := range c.tasks { + if t.deferred { + continue + } + + // Defer if task has runtime dependencies + shouldDefer := len(t.runtimeDeps) > 0 + + // Or if task depends on a deferred task (propagation) + if !shouldDefer { + for d := range t.deps { + if d.deferred { + shouldDefer = true + break + } + } + } + + // Service tasks (like Serve) are never deferred + if shouldDefer && !t.isService { + t.deferred = true + changed = true + } + } + } + + // Prune dependencies: Service tasks shouldn't wait for deferred tasks + for _, t := range c.tasks { + if t.isService { + var kept []*Task + for _, d := range t.depTasks { + if !d.deferred { + kept = append(kept, d) + } + } + t.depTasks = kept + } + } + } + + // Check if there are cycles in the task dependencies. + if err := checkCycle(c.tasks); err != nil { + c.addErr(err, "cyclic task") + } + + if c.errs != nil { + c.cancel() + } +} + +// findRootTasks finds tasks under the root. +func (c *Controller) findRootTasks(v cue.Value) { + t := c.getTask(nil, v) + + if t != nil { + return + } + + opts := []cue.Option{} + + if c.cfg.FindHiddenTasks { + opts = append(opts, cue.Hidden(true), cue.Definitions(false)) + } + + for iter, _ := v.Fields(opts...); iter.Next(); { + c.findRootTasks(iter.Value()) + } + for iter, _ := v.List(); iter.Next(); { + c.findRootTasks(iter.Value()) + } + +} + +// This file contains the functionality to locate and record the tasks of +// a configuration. It: +// - create Task struct for each node that is a task +// - associate nodes in a configuration with a Task, if applicable. +// The node-to-task map is used to determine task dependencies. + +// getTask finds and marks tasks that are descendents of v. +func (c *Controller) getTask(scope *Task, v cue.Value) *Task { + // Look up cached node. + _, w := value.ToInternal(v) + if t, ok := c.nodes[w]; ok { + return t + } + + if err := w.Err(c.opCtx); err != nil && err.Permanent { + c.addErr(err.Err, "invalid task") + return nil + } + + // Look up cached task from previous evaluation. + p := v.Path() + key := p.String() + + t := c.keys[key] + + if t == nil { + r, err := c.isTask(v) + + var errs errors.Error + if err != nil { + if !c.inRoot(w) { + // Must be in InferTask mode. In this case we ignore the error. + r = nil + } else { + c.addErr(err, "invalid task") + errs = errors.Promote(err, "create task") + } + } + + if r != nil { + index := len(c.tasks) + t = &Task{ + v: v, + c: c, + r: r, + path: p, + labels: w.Path(), + key: key, + index: index, + err: errs, + valueSeq: -1, // Ensure first updateTaskValue call updates the value + } + // If the runner declares it is a service, mark the task accordingly. + if ce, ok := r.(Service); ok && ce.IsService() { + t.isService = true + } + c.tasks = append(c.tasks, t) + c.keys[key] = t + } + } + + // Process nodes of task for this evaluation. + if t != nil { + scope = t + if t.state <= Ready { + // Don't set the value if the task is currently running as this may + // result in all kinds of inconsistency issues. + t.v = v + } + + c.tagChildren(w, t) + } + + c.nodes[w] = scope + + return t +} + +func (c *Controller) tagChildren(n *adt.Vertex, t *Task) { + for _, a := range n.Arcs { + c.nodes[a] = t + c.tagChildren(a, t) + } +} + +// findImpliedTask determines the task of corresponding to node n, if any. If n +// is not already associated with a task, it tries to determine whether n is +// part of a task by checking if any of the parent nodes is a task. +// +// TODO: it is actually more accurate to check for tasks from top down. TODO: +// What should be done if a subtasks is referenced that is embedded in another +// task. Should the surrounding tasks be added as well? +func (c *Controller) findImpliedTask(d dep.Dependency) *Task { + // Ignore references into packages. Fill will fundamentally not work for + // packages, and packages cannot point back to the main package as cycles + // are not allowed. + if d.Import() != nil { + return nil + } + + n := d.Node + + // This Finalize should not be necessary, as the input to dep is already + // finalized. However, cue cmd uses some legacy instance stitching code + // where some of the backlink Environments are not properly initialized. + // Finalizing should patch those up at the expense of doing some duplicate + // work. The plan is to replace `cue cmd` with a much more clean + // implementation (probably a separate tool called `cuerun`) where this + // issue is fixed. For now we leave this patch. + // + // Note that this issue predates package flow, but that it just surfaced in + // flow and having a different evaluation order. + // + // Note: this call is cheap if n is already Finalized. + n.Finalize(c.opCtx) + + for ; n != nil; n = n.Parent { + if c.cfg.IgnoreConcrete && n.IsConcrete() { + if k := n.BaseValue.Kind(); k != adt.StructKind && k != adt.ListKind { + return nil + } + } + + t, ok := c.nodes[n] + if ok || !c.cfg.InferTasks { + return t + } + + if !d.IsRoot() { + v := value.Make(c.opCtx, n) + + if t := c.getTask(nil, v); t != nil { + return t + } + } + } + + return nil +} + +// markTaskDependencies traces through all conjuncts of a Task and marks +// any dependencies on other tasks. +// +// The dependencies for a node by traversing the nodes of a task and then +// traversing the dependencies of the conjuncts. +// +// This terminates because: +// +// - traversing all nodes of all tasks is guaranteed finite (CUE does not +// evaluate to infinite structure). +// +// - traversing conjuncts of all nodes is finite, as the input syntax is +// inherently finite. +// +// - as regular nodes are traversed recursively they are marked with a cycle +// marker to detect cycles, ensuring a finite traversal as well. +func (c *Controller) markTaskDependencies(t *Task, n *adt.Vertex) { + cfg := &dep.Config{ + Dynamic: true, + } + dep.Visit(cfg, c.opCtx, n, func(d dep.Dependency) error { + depTask := c.findImpliedTask(d) + if depTask != nil { + if depTask != cycleMarker { + v := value.Make(c.opCtx, d.Node) + t.addDep(v.Path().String(), depTask) + } + return nil + } + + // If this points to a non-task node, it may itself point to a task. + // Handling this allows for dynamic references. For instance, such a + // value may reference the result value of a task, or even create + // new tasks based on the result of another task. + if d.Import() == nil { + if c.nodes[d.Node] == cycleMarker { + return nil + } + c.nodes[d.Node] = cycleMarker + d.Recurse() + c.nodes[d.Node] = nil + } + return nil + }) +} + +func (c *Controller) inRoot(n *adt.Vertex) bool { + path := value.Make(c.opCtx, n).Path().Selectors() + root := c.cfg.Root.Selectors() + if len(path) < len(root) { + return false + } + for i, sel := range root { + if path[i] != sel { + return false + } + } + return true +} + +var cycleMarker = &Task{} diff --git a/vendor/cyphar.com/go-pathrs/.golangci.yml b/vendor/cyphar.com/go-pathrs/.golangci.yml index 2778a3268e..a28cbd2a4f 100644 --- a/vendor/cyphar.com/go-pathrs/.golangci.yml +++ b/vendor/cyphar.com/go-pathrs/.golangci.yml @@ -1,8 +1,8 @@ # SPDX-License-Identifier: MPL-2.0 # # libpathrs: safe path resolution on Linux -# Copyright (C) 2019-2025 Aleksa Sarai # Copyright (C) 2019-2025 SUSE LLC +# Copyright (C) 2026 Aleksa Sarai # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this diff --git a/vendor/cyphar.com/go-pathrs/doc.go b/vendor/cyphar.com/go-pathrs/doc.go index a7ee4bc487..c3b4eedd0f 100644 --- a/vendor/cyphar.com/go-pathrs/doc.go +++ b/vendor/cyphar.com/go-pathrs/doc.go @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MPL-2.0 /* * libpathrs: safe path resolution on Linux - * Copyright (C) 2019-2025 Aleksa Sarai * Copyright (C) 2019-2025 SUSE LLC + * Copyright (C) 2026 Aleksa Sarai * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this diff --git a/vendor/cyphar.com/go-pathrs/handle_linux.go b/vendor/cyphar.com/go-pathrs/handle_linux.go index 3221ef6738..6ed0b7af7a 100644 --- a/vendor/cyphar.com/go-pathrs/handle_linux.go +++ b/vendor/cyphar.com/go-pathrs/handle_linux.go @@ -3,8 +3,8 @@ // SPDX-License-Identifier: MPL-2.0 /* * libpathrs: safe path resolution on Linux - * Copyright (C) 2019-2025 Aleksa Sarai * Copyright (C) 2019-2025 SUSE LLC + * Copyright (C) 2026 Aleksa Sarai * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this @@ -30,11 +30,9 @@ import ( // you can try to use [Root.Open] or [Root.OpenFile]. // // It is critical that perform all relevant operations through this [Handle] -// (rather than fetching the file descriptor yourself with [Handle.IntoRaw]), +// (rather than fetching the underlying [os.File] yourself with [Handle.IntoFile]), // because the security properties of libpathrs depend on users doing all // relevant filesystem operations through libpathrs. -// -// [os.File]: https://pkg.go.dev/os#File type Handle struct { inner *os.File } @@ -43,7 +41,7 @@ type Handle struct { // handle will be copied by this method, so the original handle should still be // freed by the caller. // -// This is effectively the inverse operation of [Handle.IntoRaw], and is used +// This is effectively the inverse operation of [Handle.IntoFile], and is used // for "deserialising" pathrs root handles. func HandleFromFile(file *os.File) (*Handle, error) { newFile, err := fdutils.DupFile(file) @@ -92,8 +90,6 @@ func (h *Handle) OpenFile(flags int) (*os.File, error) { // calling [Handle.Close] will also close any copies of the returned [os.File]. // If you want to get an independent copy, use [Handle.Clone] followed by // [Handle.IntoFile] on the cloned [Handle]. -// -// [os.File]: https://pkg.go.dev/os#File func (h *Handle) IntoFile() *os.File { // TODO: Figure out if we really don't want to make a copy. // TODO: We almost certainly want to clear r.inner here, but we can't do diff --git a/vendor/cyphar.com/go-pathrs/internal/fdutils/fd_linux.go b/vendor/cyphar.com/go-pathrs/internal/fdutils/fd_linux.go index 41aea3e4b3..418b298149 100644 --- a/vendor/cyphar.com/go-pathrs/internal/fdutils/fd_linux.go +++ b/vendor/cyphar.com/go-pathrs/internal/fdutils/fd_linux.go @@ -3,8 +3,8 @@ // SPDX-License-Identifier: MPL-2.0 /* * libpathrs: safe path resolution on Linux - * Copyright (C) 2019-2025 Aleksa Sarai * Copyright (C) 2019-2025 SUSE LLC + * Copyright (C) 2026 Aleksa Sarai * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this diff --git a/vendor/cyphar.com/go-pathrs/internal/libpathrs/error_unix.go b/vendor/cyphar.com/go-pathrs/internal/libpathrs/error_unix.go index c9f416de01..8f610ca564 100644 --- a/vendor/cyphar.com/go-pathrs/internal/libpathrs/error_unix.go +++ b/vendor/cyphar.com/go-pathrs/internal/libpathrs/error_unix.go @@ -5,8 +5,8 @@ // SPDX-License-Identifier: MPL-2.0 /* * libpathrs: safe path resolution on Linux - * Copyright (C) 2019-2025 Aleksa Sarai * Copyright (C) 2019-2025 SUSE LLC + * Copyright (C) 2026 Aleksa Sarai * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this diff --git a/vendor/cyphar.com/go-pathrs/internal/libpathrs/libpathrs_linux.go b/vendor/cyphar.com/go-pathrs/internal/libpathrs/libpathrs_linux.go index c07b80e307..d54497a5b7 100644 --- a/vendor/cyphar.com/go-pathrs/internal/libpathrs/libpathrs_linux.go +++ b/vendor/cyphar.com/go-pathrs/internal/libpathrs/libpathrs_linux.go @@ -3,8 +3,8 @@ // SPDX-License-Identifier: MPL-2.0 /* * libpathrs: safe path resolution on Linux - * Copyright (C) 2019-2025 Aleksa Sarai * Copyright (C) 2019-2025 SUSE LLC + * Copyright (C) 2026 Aleksa Sarai * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this @@ -100,7 +100,7 @@ func InRootReadlink(rootFd uintptr, path string) (string, error) { size := 128 for { linkBuf := make([]byte, size) - n := C.pathrs_inroot_readlink(C.int(rootFd), cPath, C.cast_ptr(unsafe.Pointer(&linkBuf[0])), C.ulong(len(linkBuf))) + n := C.pathrs_inroot_readlink(C.int(rootFd), cPath, C.cast_ptr(unsafe.Pointer(&linkBuf[0])), C.size_t(len(linkBuf))) switch { case int(n) < C.__PATHRS_MAX_ERR_VALUE: return "", fetchError(n) @@ -301,7 +301,7 @@ func ProcReadlinkat(procRootFd int, base ProcBase, path string) (string, error) linkBuf := make([]byte, size) n := C.pathrs_proc_readlinkat( C.int(procRootFd), cBase, cPath, - C.cast_ptr(unsafe.Pointer(&linkBuf[0])), C.ulong(len(linkBuf))) + C.cast_ptr(unsafe.Pointer(&linkBuf[0])), C.size_t(len(linkBuf))) switch { case int(n) < C.__PATHRS_MAX_ERR_VALUE: return "", fetchError(n) diff --git a/vendor/cyphar.com/go-pathrs/procfs/procfs_linux.go b/vendor/cyphar.com/go-pathrs/procfs/procfs_linux.go index 5533c427cb..915e9ccdb5 100644 --- a/vendor/cyphar.com/go-pathrs/procfs/procfs_linux.go +++ b/vendor/cyphar.com/go-pathrs/procfs/procfs_linux.go @@ -3,8 +3,8 @@ // SPDX-License-Identifier: MPL-2.0 /* * libpathrs: safe path resolution on Linux - * Copyright (C) 2019-2025 Aleksa Sarai * Copyright (C) 2019-2025 SUSE LLC + * Copyright (C) 2026 Aleksa Sarai * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this @@ -56,16 +56,15 @@ var ( // *before* you call wait(2)or any equivalent method that could reap // zombies). func ProcPid(pid int) ProcBase { - if pid < 0 || pid >= 1<<31 { + if pid < 0 || uint64(pid) >= 1<<31 { panic("invalid ProcBasePid value") // TODO: should this be an error? } - return ProcBase{inner: libpathrs.ProcPid(uint32(pid))} + pid32 := uint32(pid) //nolint:gosec // G115 false positive + return ProcBase{inner: libpathrs.ProcPid(pid32)} } // ThreadCloser is a callback that needs to be called when you are done // operating on an [os.File] fetched using [Handle.OpenThreadSelf]. -// -// [os.File]: https://pkg.go.dev/os#File type ThreadCloser func() // Handle is a wrapper around an *os.File handle to "/proc", which can be @@ -181,8 +180,6 @@ func (proc *Handle) OpenRoot(path string, flags int) (*os.File, error) { // Unlike [Handle.OpenThreadSelf], this method does not involve locking // the goroutine to the current OS thread and so is simpler to use and // theoretically has slightly less overhead. -// -// [runtime.LockOSThread]: https://pkg.go.dev/runtime#LockOSThread func (proc *Handle) OpenSelf(path string, flags int) (*os.File, error) { file, closer, err := proc.open(ProcSelf, path, flags) if closer != nil { @@ -228,10 +225,6 @@ func (proc *Handle) OpenPid(pid int, path string, flags int) (*os.File, error) { // callback MUST be called AFTER you have finished using the returned // [os.File]. This callback is completely separate to [os.File.Close], so it // must be called regardless of how you close the handle. -// -// [runtime.LockOSThread]: https://pkg.go.dev/runtime#LockOSThread -// [os.File]: https://pkg.go.dev/os#File -// [os.File.Close]: https://pkg.go.dev/os#File.Close func (proc *Handle) OpenThreadSelf(path string, flags int) (*os.File, ThreadCloser, error) { return proc.open(ProcThreadSelf, path, flags) } diff --git a/vendor/cyphar.com/go-pathrs/root_linux.go b/vendor/cyphar.com/go-pathrs/root_linux.go index edc9e4c87f..5bc2e90717 100644 --- a/vendor/cyphar.com/go-pathrs/root_linux.go +++ b/vendor/cyphar.com/go-pathrs/root_linux.go @@ -3,8 +3,8 @@ // SPDX-License-Identifier: MPL-2.0 /* * libpathrs: safe path resolution on Linux - * Copyright (C) 2019-2025 Aleksa Sarai * Copyright (C) 2019-2025 SUSE LLC + * Copyright (C) 2026 Aleksa Sarai * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this @@ -54,8 +54,6 @@ func OpenRoot(path string) (*Root, error) { // still be closed by the caller. // // This is effectively the inverse operation of [Root.IntoFile]. -// -// [os.File]: https://pkg.go.dev/os#File func RootFromFile(file *os.File) (*Root, error) { newFile, err := fdutils.DupFile(file) if err != nil { @@ -109,8 +107,6 @@ func (r *Root) ResolveNoFollow(path string) (*Handle, error) { // ergonomic to use. // // This is effectively equivalent to [os.Open]. -// -// [os.Open]: https://pkg.go.dev/os#Open func (r *Root) Open(path string) (*os.File, error) { return r.OpenFile(path, os.O_RDONLY) } @@ -127,8 +123,6 @@ func (r *Root) Open(path string) (*os.File, error) { // // This is effectively equivalent to [os.OpenFile], except that os.O_CREAT is // not supported. -// -// [os.OpenFile]: https://pkg.go.dev/os#OpenFile func (r *Root) OpenFile(path string, flags int) (*os.File, error) { return fdutils.WithFileFd(r.inner, func(rootFd uintptr) (*os.File, error) { fd, err := libpathrs.InRootOpen(rootFd, path, flags) @@ -145,8 +139,6 @@ func (r *Root) OpenFile(path string, flags int) (*os.File, error) { // // Unlike [os.Create], if the file already exists an error is created rather // than the file being opened and truncated. -// -// [os.Create]: https://pkg.go.dev/os#Create func (r *Root) Create(path string, flags int, mode os.FileMode) (*os.File, error) { unixMode, err := toUnixMode(mode, false) if err != nil { @@ -194,8 +186,6 @@ func (r *Root) RemoveFile(path string) error { // directory tree. // // This is effectively equivalent to [os.Remove]. -// -// [os.Remove]: https://pkg.go.dev/os#Remove func (r *Root) Remove(path string) error { // In order to match os.Remove's implementation we need to also do both // syscalls unconditionally and adjust the error based on whether @@ -219,8 +209,6 @@ func (r *Root) Remove(path string) error { // RemoveAll recursively deletes a path and all of its children. // // This is effectively equivalent to [os.RemoveAll]. -// -// [os.RemoveAll]: https://pkg.go.dev/os#RemoveAll func (r *Root) RemoveAll(path string) error { _, err := fdutils.WithFileFd(r.inner, func(rootFd uintptr) (struct{}, error) { err := libpathrs.InRootRemoveAll(rootFd, path) @@ -233,8 +221,6 @@ func (r *Root) RemoveAll(path string) error { // mode is used for the new directory (the process's umask applies). // // This is effectively equivalent to [os.Mkdir]. -// -// [os.Mkdir]: https://pkg.go.dev/os#Mkdir func (r *Root) Mkdir(path string, mode os.FileMode) error { unixMode, err := toUnixMode(mode, false) if err != nil { @@ -253,8 +239,6 @@ func (r *Root) Mkdir(path string, mode os.FileMode) error { // directories created by this function (the process's umask applies). // // This is effectively equivalent to [os.MkdirAll]. -// -// [os.MkdirAll]: https://pkg.go.dev/os#MkdirAll func (r *Root) MkdirAll(path string, mode os.FileMode) (*Handle, error) { unixMode, err := toUnixMode(mode, false) if err != nil { @@ -278,9 +262,7 @@ func (r *Root) MkdirAll(path string, mode os.FileMode) (*Handle, error) { // directory tree. The provided mode is used for the new directory (the // process's umask applies). // -// This is effectively equivalent to [unix.Mknod]. -// -// [unix.Mknod]: https://pkg.go.dev/golang.org/x/sys/unix#Mknod +// This is effectively equivalent to [golang.org/x/sys/unix.Mknod]. func (r *Root) Mknod(path string, mode os.FileMode, dev uint64) error { unixMode, err := toUnixMode(mode, true) if err != nil { @@ -298,8 +280,6 @@ func (r *Root) Mknod(path string, mode os.FileMode, dev uint64) error { // created at path and is a link to target. // // This is effectively equivalent to [os.Symlink]. -// -// [os.Symlink]: https://pkg.go.dev/os#Symlink func (r *Root) Symlink(path, target string) error { _, err := fdutils.WithFileFd(r.inner, func(rootFd uintptr) (struct{}, error) { err := libpathrs.InRootSymlink(rootFd, path, target) @@ -314,8 +294,6 @@ func (r *Root) Symlink(path, target string) error { // host). // // This is effectively equivalent to [os.Link]. -// -// [os.Link]: https://pkg.go.dev/os#Link func (r *Root) Hardlink(path, target string) error { _, err := fdutils.WithFileFd(r.inner, func(rootFd uintptr) (struct{}, error) { err := libpathrs.InRootHardlink(rootFd, path, target) @@ -327,8 +305,6 @@ func (r *Root) Hardlink(path, target string) error { // Readlink returns the target of a symlink with a [Root]'s directory tree. // // This is effectively equivalent to [os.Readlink]. -// -// [os.Readlink]: https://pkg.go.dev/os#Readlink func (r *Root) Readlink(path string) (string, error) { return fdutils.WithFileFd(r.inner, func(rootFd uintptr) (string, error) { return libpathrs.InRootReadlink(rootFd, path) @@ -345,8 +321,6 @@ func (r *Root) Readlink(path string) (string, error) { // calling [Root.Close] will also close any copies of the returned [os.File]. // If you want to get an independent copy, use [Root.Clone] followed by // [Root.IntoFile] on the cloned [Root]. -// -// [os.File]: https://pkg.go.dev/os#File func (r *Root) IntoFile() *os.File { // TODO: Figure out if we really don't want to make a copy. // TODO: We almost certainly want to clear r.inner here, but we can't do diff --git a/vendor/cyphar.com/go-pathrs/utils_linux.go b/vendor/cyphar.com/go-pathrs/utils_linux.go index 2208d608f8..b4e7e08e7d 100644 --- a/vendor/cyphar.com/go-pathrs/utils_linux.go +++ b/vendor/cyphar.com/go-pathrs/utils_linux.go @@ -3,8 +3,8 @@ // SPDX-License-Identifier: MPL-2.0 /* * libpathrs: safe path resolution on Linux - * Copyright (C) 2019-2025 Aleksa Sarai * Copyright (C) 2019-2025 SUSE LLC + * Copyright (C) 2026 Aleksa Sarai * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/aliyuncli.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/aliyuncli.go new file mode 100644 index 0000000000..3165c474be --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/aliyuncli.go @@ -0,0 +1,338 @@ +package provider + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/exec" + "regexp" + "strings" + "sync" + "time" +) + +const ( + envProfileName = "ALIBABA_CLOUD_PROFILE" +) + +type profileWrapper struct { + cp Profile + conf *Configuration + + stsEndpoint string + client *http.Client + logger Logger + + getProviderForUnknownMode func(p Profile) (CredentialsProvider, error) +} + +type CLIConfigProvider struct { + profile *profileWrapper + logger Logger + + pr CredentialsProvider + lock sync.Mutex +} + +type CLIConfigProviderOptions struct { + ConfigPath string + ProfileName string + STSEndpoint string + Logger Logger + + GetProviderForUnknownMode func(p Profile) (CredentialsProvider, error) + + conf *Configuration +} + +func NewCLIConfigProvider(opts CLIConfigProviderOptions) (*CLIConfigProvider, error) { + opts.applyDefaults() + logger := opts.Logger + + conf, profile, err := loadProfile(opts.ConfigPath, opts.ProfileName, opts.conf) + if err != nil { + return nil, NewNotEnableError(fmt.Errorf("load profile: %w", err)) + } + if err := profile.validate(); err != nil { + return nil, NewNotEnableError(fmt.Errorf("validate profile: %w", err)) + } + logger.Debug(fmt.Sprintf("use profile name: %s", profile.Name)) + c := &CLIConfigProvider{ + profile: &profileWrapper{ + cp: profile, + conf: conf, + stsEndpoint: opts.STSEndpoint, + client: &http.Client{ + Timeout: time.Second * 30, + }, + logger: logger, + getProviderForUnknownMode: opts.GetProviderForUnknownMode, + }, + logger: logger, + } + return c, nil +} + +func loadProfile(path string, name string, conf *Configuration) (*Configuration, Profile, error) { + var p Profile + var err error + + if conf == nil { + conf, err = loadConfiguration(path) + if err != nil { + return nil, p, fmt.Errorf("init config: %w", err) + } + } + if name == "" { + name = conf.CurrentProfile + } + p, ok := conf.getProfile(name) + if !ok { + return nil, p, fmt.Errorf("unknown profile %s", name) + } + return conf, p, nil +} + +func (c *CLIConfigProvider) Credentials(ctx context.Context) (*Credentials, error) { + p, err := c.getAndUpdateProvider() + if err != nil { + return nil, err + } + return p.Credentials(ctx) +} + +func (c *CLIConfigProvider) getAndUpdateProvider() (CredentialsProvider, error) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.pr != nil { + return c.pr, nil + } + + pr, err := c.profile.getProvider() + if err != nil { + return nil, err + } + c.pr = pr + + return pr, nil +} + +func (p *profileWrapper) getProvider() (CredentialsProvider, error) { + cp := p.cp + + switch cp.Mode { + case AK: + p.logger.Debug(fmt.Sprintf("using %s mode", cp.Mode)) + return p.getCredentialsByAK() + case StsToken: + p.logger.Debug(fmt.Sprintf("using %s mode", cp.Mode)) + return p.getCredentialsBySts() + case RamRoleArn: + p.logger.Debug(fmt.Sprintf("using %s mode", cp.Mode)) + return p.getCredentialsByRoleArn() + case EcsRamRole: + p.logger.Debug(fmt.Sprintf("using %s mode", cp.Mode)) + return p.getCredentialsByEcsRamRole() + case RamRoleArnWithEcs: + p.logger.Debug(fmt.Sprintf("using %s mode", cp.Mode)) + return p.getCredentialsByRamRoleArnWithEcs() + case ChainableRamRoleArn: + p.logger.Debug(fmt.Sprintf("using %s mode", cp.Mode)) + return p.getCredentialsByChainableRamRoleArn() + case External: + p.logger.Debug(fmt.Sprintf("using %s mode", cp.Mode)) + return p.getCredentialsByExternal() + case CredentialsURI: + p.logger.Debug(fmt.Sprintf("using %s mode", cp.Mode)) + return p.getCredentialsByCredentialsURI() + default: + if p.getProviderForUnknownMode != nil { + return p.getProviderForUnknownMode(cp) + } + return nil, fmt.Errorf("unexcepted credentials mode: %s", cp.Mode) + } +} + +func (p *profileWrapper) getCredentialsByAK() (CredentialsProvider, error) { + cp := p.cp + + return NewAccessKeyProvider(cp.AccessKeyId, cp.AccessKeySecret), nil +} + +func (p *profileWrapper) getCredentialsBySts() (CredentialsProvider, error) { + cp := p.cp + + return NewSTSTokenProvider(cp.AccessKeyId, cp.AccessKeySecret, cp.StsToken), nil +} + +func (p *profileWrapper) getCredentialsByRoleArn() (CredentialsProvider, error) { + cp := p.cp + + preP, _ := p.getCredentialsByAK() + if cp.StsToken != "" { + preP, _ = p.getCredentialsBySts() + } + + return p.getCredentialsByRoleArnWithPro(preP) +} + +func (p *profileWrapper) getCredentialsByRoleArnWithPro(preP CredentialsProvider) (CredentialsProvider, error) { + cp := p.cp + + credP := NewRoleArnProvider(preP, cp.RamRoleArn, RoleArnProviderOptions{ + STSEndpoint: p.stsEndpoint, + SessionName: cp.RoleSessionName, + Logger: p.logger, + ExternalId: cp.ExternalId, + }) + return credP, nil +} + +func (p *profileWrapper) getCredentialsByEcsRamRole() (CredentialsProvider, error) { + cp := p.cp + + credP := NewECSMetadataProvider(ECSMetadataProviderOptions{ + RoleName: cp.RamRoleName, + Logger: p.logger, + }) + return credP, nil +} + +//func (p *profileWrapper) getCredentialsByPrivateKey() (credentials.Credential, error) { +// +//} + +func (p *profileWrapper) getCredentialsByRamRoleArnWithEcs() (CredentialsProvider, error) { + preP, err := p.getCredentialsByEcsRamRole() + if err != nil { + return nil, err + } + return p.getCredentialsByRoleArnWithPro(preP) +} + +func (p *profileWrapper) getCredentialsByChainableRamRoleArn() (CredentialsProvider, error) { + cp := p.cp + profileName := cp.SourceProfile + + p.logger.Debug(fmt.Sprintf("get credentials from source profile %s", profileName)) + source, loaded := p.conf.getProfile(profileName) + if !loaded { + return nil, fmt.Errorf("can not load the source profile: " + profileName) + } + + newP := p.clone(source) + preP, err := newP.getProvider() + if err != nil { + return nil, err + } + + p.logger.Debug(fmt.Sprintf("using role arn by current profile %s", cp.Name)) + return p.getCredentialsByRoleArnWithPro(preP) +} + +func (p *profileWrapper) getCredentialsByExternal() (CredentialsProvider, error) { + cp := p.cp + args := strings.Fields(cp.ProcessCommand) + cmd := exec.Command(args[0], args[1:]...) // #nosec G204 + p.logger.Debug(fmt.Sprintf("running external program: %s", cp.ProcessCommand)) + + genmsg := func(buf []byte, err error) string { + message := fmt.Sprintf(`run external program to get credentials faild: + command: %s + output: %s + error: %s`, + cp.ProcessCommand, string(buf), err.Error()) + return message + } + + stdout := bytes.Buffer{} + stderr := bytes.Buffer{} + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + allOutput := stderr.String() + "\n" + stdout.String() + if err != nil { + message := genmsg([]byte(allOutput), err) + return nil, errors.New(message) + } + + buf := stdout.Bytes() + var newCP Profile + err = json.Unmarshal(buf, &newCP) + if err != nil { + if pp := tryToParseProfileFromOutput(string(buf)); pp != nil { + newCP = *pp + } else { + message := genmsg([]byte(allOutput), err) + return nil, errors.New(message) + } + } + + p.logger.Debug(fmt.Sprintf("using profile from output of external program")) + newP := p.clone(newCP) + return newP.getProvider() +} + +func (w *profileWrapper) clone(newCP Profile) profileWrapper { + return profileWrapper{ + client: w.client, + conf: w.conf, + cp: newCP, + getProviderForUnknownMode: w.getProviderForUnknownMode, + logger: w.logger, + stsEndpoint: w.stsEndpoint, + } +} + +var regexpCredJSON = regexp.MustCompile(`{[^}]+"mode":[^}]+}`) + +func tryToParseProfileFromOutput(output string) *Profile { + ret := regexpCredJSON.FindAllString(output, 1) + if len(ret) < 1 { + return nil + } + credJSON := ret[0] + var p Profile + if err := json.Unmarshal([]byte(credJSON), &p); err == nil && p.Mode != "" { + return &p + } + return nil +} + +func (p *profileWrapper) getCredentialsByCredentialsURI() (CredentialsProvider, error) { + cp := p.cp + uri := cp.CredentialsURI + if uri == "" { + uri = os.Getenv(envCredentialsURI) + } + if uri == "" { + return nil, fmt.Errorf("invalid credentials uri") + } + p.logger.Debug(fmt.Sprintf("get credentials from uri %s", uri)) + + newPr := NewURIProvider(uri, URIProviderOptions{}) + return newPr, nil +} + +func (c *CLIConfigProvider) ProfileName() string { + return c.profile.cp.Name +} + +func (o *CLIConfigProviderOptions) applyDefaults() { + if o.ConfigPath == "" { + o.ConfigPath = getDefaultConfigPath() + } + if o.ProfileName == "" { + if v := os.Getenv(envProfileName); v != "" { + o.ProfileName = v + } + } + if o.Logger == nil { + o.Logger = DefaultLogger + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/aliyuncli_configuration.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/aliyuncli_configuration.go new file mode 100644 index 0000000000..5f69a414c2 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/aliyuncli_configuration.go @@ -0,0 +1,96 @@ +// Copyright (c) 2009-present, Alibaba Cloud All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package provider + +import ( + "encoding/json" + "fmt" + "os" + "path" + "path/filepath" +) + +const ( + defaultCLIConfigDir = "~/.aliyun" + cliConfigFileName = "config.json" + defaultCLIConfigProfileName = "default" +) + +type Configuration struct { + CurrentProfile string `json:"current"` + Profiles []Profile `json:"profiles"` + MetaPath string `json:"meta_path"` + //Plugins []Plugin `json:"plugin"` +} + +func newConfiguration() *Configuration { + return &Configuration{ + CurrentProfile: defaultCLIConfigProfileName, + Profiles: []Profile{ + newProfile(defaultCLIConfigProfileName), + }, + } +} + +func (c *Configuration) getProfile(pn string) (Profile, bool) { + for _, p := range c.Profiles { + if p.Name == pn { + return p, true + } + } + return Profile{Name: pn}, false +} + +func getDefaultConfigPath() string { + dir, err := expandPath(defaultCLIConfigDir) + if err != nil { + dir = defaultCLIConfigDir + } + return path.Join(dir, cliConfigFileName) +} + +func loadConfiguration(inputPath string) (conf *Configuration, err error) { + _, statErr := os.Stat(inputPath) + if os.IsNotExist(statErr) { + conf = newConfiguration() + return + } + + bytes, err := os.ReadFile(path.Clean(inputPath)) + if err != nil { + err = fmt.Errorf("reading config from '%s' failed %v", inputPath, err) + return + } + + conf, err = newConfigFromBytes(bytes) + return +} + +func newConfigFromBytes(bytes []byte) (conf *Configuration, err error) { + conf = newConfiguration() + err = json.Unmarshal(bytes, conf) + return +} + +func expandPath(path string) (string, error) { + if len(path) > 0 && path[0] == '~' { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + path = filepath.Join(home, path[1:]) + } + return path, nil +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/aliyuncli_profile.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/aliyuncli_profile.go new file mode 100644 index 0000000000..2304d5955c --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/aliyuncli_profile.go @@ -0,0 +1,149 @@ +// Copyright (c) 2009-present, Alibaba Cloud All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package provider + +import ( + "fmt" +) + +type AuthenticateMode string + +const ( + AK = AuthenticateMode("AK") + StsToken = AuthenticateMode("StsToken") + RamRoleArn = AuthenticateMode("RamRoleArn") + EcsRamRole = AuthenticateMode("EcsRamRole") + RsaKeyPair = AuthenticateMode("RsaKeyPair") + RamRoleArnWithEcs = AuthenticateMode("RamRoleArnWithRoleName") + ChainableRamRoleArn = AuthenticateMode("ChainableRamRoleArn") + External = AuthenticateMode("External") + CredentialsURI = AuthenticateMode("CredentialsURI") +) + +type Profile struct { + Name string `json:"name"` + Mode AuthenticateMode `json:"mode"` + AccessKeyId string `json:"access_key_id,omitempty"` + AccessKeySecret string `json:"access_key_secret,omitempty"` + StsToken string `json:"sts_token,omitempty"` + StsRegion string `json:"sts_region,omitempty"` + RamRoleName string `json:"ram_role_name,omitempty"` + RamRoleArn string `json:"ram_role_arn,omitempty"` + RoleSessionName string `json:"ram_session_name,omitempty"` + ExternalId string `json:"external_id,omitempty"` + SourceProfile string `json:"source_profile,omitempty"` + PrivateKey string `json:"private_key,omitempty"` + KeyPairName string `json:"key_pair_name,omitempty"` + ExpiredSeconds int `json:"expired_seconds,omitempty"` + Verified string `json:"verified,omitempty"` + RegionId string `json:"region_id,omitempty"` + OutputFormat string `json:"output_format,omitempty"` + Language string `json:"language,omitempty"` + Site string `json:"site,omitempty"` + ReadTimeout int `json:"retry_timeout,omitempty"` + ConnectTimeout int `json:"connect_timeout,omitempty"` + RetryCount int `json:"retry_count,omitempty"` + ProcessCommand string `json:"process_command,omitempty"` + CredentialsURI string `json:"credentials_uri,omitempty"` + OIDCProviderARN string `json:"oidc_provider_arn,omitempty"` + OIDCTokenFile string `json:"oidc_token_file,omitempty"` + CloudSSOSignInUrl string `json:"cloud_sso_sign_in_url,omitempty"` + AccessToken string `json:"access_token,omitempty"` // for CloudSSO, read only + CloudSSOAccessTokenExpire int64 `json:"cloud_sso_access_token_expire,omitempty"` // for CloudSSO, read only + StsExpiration int64 `json:"sts_expiration,omitempty"` // for CloudSSO, read only + CloudSSOAccessConfig string `json:"cloud_sso_access_config,omitempty"` // for CloudSSO + CloudSSOAccountId string `json:"cloud_sso_account_id,omitempty"` // for CloudSSO, read only + parent *Configuration //`json:"-"` +} + +func newProfile(name string) Profile { + return Profile{ + Name: name, + Mode: "", + OutputFormat: "json", + Language: "en", + } +} + +func (cp *Profile) validate() error { + if cp.Mode == "" { + return fmt.Errorf("profile %s is not configure yet, run `aliyun configure --profile %s` first", cp.Name, cp.Name) + } + + switch cp.Mode { + case AK: + return cp.validateAK() + case StsToken: + err := cp.validateAK() + if err != nil { + return err + } + if cp.StsToken == "" { + return fmt.Errorf("invalid sts_token") + } + case RamRoleArn: + err := cp.validateAK() + if err != nil { + return err + } + if cp.RamRoleArn == "" { + return fmt.Errorf("invalid ram_role_arn") + } + if cp.RoleSessionName == "" { + return fmt.Errorf("invalid role_session_name") + } + case EcsRamRole, RamRoleArnWithEcs: + case RsaKeyPair: + if cp.PrivateKey == "" { + return fmt.Errorf("invalid private_key") + } + if cp.KeyPairName == "" { + return fmt.Errorf("invalid key_pair_name") + } + case External: + if cp.ProcessCommand == "" { + return fmt.Errorf("invalid process_command") + } + case CredentialsURI: + if cp.CredentialsURI == "" { + return fmt.Errorf("invalid credentials_uri") + } + case ChainableRamRoleArn: + if cp.SourceProfile == "" { + return fmt.Errorf("invalid source_profile") + } + if cp.RamRoleArn == "" { + return fmt.Errorf("invalid ram_role_arn") + } + if cp.RoleSessionName == "" { + return fmt.Errorf("invalid role_session_name") + } + } + return nil +} + +func (cp *Profile) getParent() *Configuration { + return cp.parent +} + +func (cp *Profile) validateAK() error { + if len(cp.AccessKeyId) == 0 { + return fmt.Errorf("invalid access_key_id: %s", cp.AccessKeyId) + } + if len(cp.AccessKeySecret) == 0 { + return fmt.Errorf("invaild access_key_secret: %s", cp.AccessKeySecret) + } + return nil +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/chain_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/chain_provider.go index 92cfcbe4cc..4a754b949e 100644 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/chain_provider.go +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/chain_provider.go @@ -162,26 +162,41 @@ type DefaultChainProviderOptions struct { func NewDefaultChainProvider(opts DefaultChainProviderOptions) *ChainProvider { opts.applyDefaults() - p := NewChainProviderWithOptions( - []CredentialsProvider{ - NewEnvProvider(EnvProviderOptions{}), - NewOIDCProvider(OIDCProviderOptions{ - STSEndpoint: opts.STSEndpoint, - ExpiryWindow: opts.ExpiryWindow, - RefreshPeriod: opts.RefreshPeriod, - Logger: opts.Logger, - }), - NewEncryptedFileProvider(EncryptedFileProviderOptions{ - ExpiryWindow: opts.ExpiryWindow, - RefreshPeriod: opts.RefreshPeriod, - Logger: opts.Logger, - }), - NewECSMetadataProvider(ECSMetadataProviderOptions{ - ExpiryWindow: opts.ExpiryWindow, - RefreshPeriod: opts.RefreshPeriod, - Logger: opts.Logger, - }), - }, + providers := []CredentialsProvider{ + NewEnvProvider(EnvProviderOptions{}), + NewOIDCProvider(OIDCProviderOptions{ + STSEndpoint: opts.STSEndpoint, + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + }), + NewEncryptedFileProvider(EncryptedFileProviderOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + }), + } + + if p, err := NewCLIConfigProvider(CLIConfigProviderOptions{ + STSEndpoint: opts.STSEndpoint, + Logger: opts.Logger, + }); err == nil { + providers = append(providers, p) + } + if p, err := NewIniConfigProvider(INIConfigProviderOptions{ + STSEndpoint: opts.STSEndpoint, + Logger: opts.Logger, + }); err == nil { + providers = append(providers, p) + } + + providers = append(providers, NewECSMetadataProvider(ECSMetadataProviderOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + })) + + p := NewChainProviderWithOptions(providers, ChainProviderOptions{ EnableRuntimeSwitch: opts.EnableRuntimeSwitch, RuntimeSwitchCacheDuration: opts.RuntimeSwitchCacheDuration, diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/credentials.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/credentials.go index 778458d7b8..598a5aa36f 100644 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/credentials.go +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/credentials.go @@ -7,6 +7,8 @@ type Credentials struct { AccessKeySecret string SecurityToken string Expiration time.Time + + nextRefresh time.Time } func (c *Credentials) DeepCopy() *Credentials { @@ -18,5 +20,28 @@ func (c *Credentials) DeepCopy() *Credentials { AccessKeySecret: c.AccessKeySecret, SecurityToken: c.SecurityToken, Expiration: c.Expiration, + nextRefresh: c.nextRefresh, + } +} + +func (c *Credentials) expired(now time.Time, expiryDelta time.Duration) bool { + exp := c.Expiration + if exp.IsZero() { + return false + } + if expiryDelta > 0 { + exp = exp.Add(-expiryDelta) + } + + return exp.Before(now) || exp.Equal(now) +} + +func (c *Credentials) shouldRefresh(now time.Time) bool { + if c.expired(now, 0) { + return true + } + if c.nextRefresh.IsZero() { + return false } + return c.nextRefresh.Before(now) || c.nextRefresh.Equal(now) } diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ecsmetadata_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ecsmetadata_provider.go index 4c7712b818..d6f853da86 100644 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ecsmetadata_provider.go +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ecsmetadata_provider.go @@ -2,16 +2,23 @@ package provider import ( "context" - "encoding/json" + "errors" "fmt" "net/http" + "os" + "strconv" "strings" "time" + + "github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata" ) const ( defaultExpiryWindow = time.Minute * 30 defaultECSMetadataServerEndpoint = "http://100.100.100.200" + envECSMetadataServerEndpoint = "ALIBABA_CLOUD_IMDS_ENDPOINT" + envIMDSV2Disabled = "ALIBABA_CLOUD_IMDSV2_DISABLED" + envIMDSRoleName = "ALIBABA_CLOUD_ECS_METADATA" defaultECSMetadataTokenTTLSeconds = 3600 defaultClientTimeout = time.Second * 30 ) @@ -21,11 +28,12 @@ type ECSMetadataProvider struct { endpoint string roleName string + disableToken bool metadataToken string metadataTokenTTLSeconds int metadataTokenExp time.Time - client *commonHttpClient + client *ecsmetadata.Client Logger Logger } @@ -36,6 +44,7 @@ type ECSMetadataProviderOptions struct { RoleName string MetadataTokenTTLSeconds int + DisableToken bool ExpiryWindow time.Duration RefreshPeriod time.Duration @@ -44,15 +53,26 @@ type ECSMetadataProviderOptions struct { func NewECSMetadataProvider(opts ECSMetadataProviderOptions) *ECSMetadataProvider { opts.applyDefaults() + cops := opts.toClientOptions() + wrapper := func(rt http.RoundTripper) http.RoundTripper { + w := &commonHttpClient{ + delegatedRoundTripper: rt, + logger: opts.Logger, + } + return w + } + cops.TransportWrappers = append(cops.TransportWrappers, wrapper) + client, err := ecsmetadata.NewClient(cops) + if err != nil { + client, _ = ecsmetadata.NewClient(ecsmetadata.ClientOptions{ + TransportWrappers: []ecsmetadata.TransportWrapper{wrapper}, + }) + } - client := newCommonHttpClient(opts.Transport, opts.Timeout) - client.logger = opts.Logger e := &ECSMetadataProvider{ - endpoint: opts.Endpoint, - roleName: opts.RoleName, - metadataTokenTTLSeconds: opts.MetadataTokenTTLSeconds, - client: client, - Logger: opts.Logger, + roleName: opts.RoleName, + client: client, + Logger: opts.Logger, } e.u = NewUpdater(e.getCredentials, UpdaterOptions{ ExpiryWindow: opts.ExpiryWindow, @@ -83,87 +103,43 @@ type ecsMetadataStsResponse struct { } func (e *ECSMetadataProvider) getCredentials(ctx context.Context) (*Credentials, error) { - roleName, err := e.getRoleName(ctx) + roleName, err := e.GetRoleName(ctx) if err != nil { if e, ok := err.(*httpError); ok && e.code == 404 { return nil, NewNotEnableError(fmt.Errorf("get role name from ecs metadata failed: %w", err)) } + var httperr *ecsmetadata.HTTPError + if errors.As(err, &httperr) && httperr.StatusCode == 404 { + return nil, NewNotEnableError(fmt.Errorf("get role name from ecs metadata failed: %w", err)) + } + return nil, err } - path := fmt.Sprintf("/latest/meta-data/ram/security-credentials/%s", roleName) - data, err := e.getMedataDataWithToken(ctx, http.MethodGet, path) + obj, err := e.client.GetRoleCredentials(ctx, roleName) if err != nil { return nil, err } - - var obj ecsMetadataStsResponse - if err := json.Unmarshal([]byte(data), &obj); err != nil { - return nil, fmt.Errorf("parse credentials failed: %w", err) - } - if obj.AccessKeyId == "" || obj.AccessKeySecret == "" || - obj.SecurityToken == "" || obj.Expiration == "" { - return nil, fmt.Errorf("parse credentials got unexpected data: %s", - strings.ReplaceAll(data, "\n", " ")) + if obj.AccessKeyId == "" || obj.AccessKeySecret == "" { + return nil, fmt.Errorf("parse credentials got unexpected data: %+v", *obj) } - exp, err := time.Parse("2006-01-02T15:04:05Z", obj.Expiration) - if err != nil { - return nil, fmt.Errorf("parse Expiration (%s) failed: %w", obj.Expiration, err) - } return &Credentials{ AccessKeyId: obj.AccessKeyId, AccessKeySecret: obj.AccessKeySecret, SecurityToken: obj.SecurityToken, - Expiration: exp, + Expiration: obj.Expiration, }, nil } -func (e *ECSMetadataProvider) getRoleName(ctx context.Context) (string, error) { +func (e *ECSMetadataProvider) GetRoleName(ctx context.Context) (string, error) { if e.roleName != "" { return e.roleName, nil } - name, err := e.getMedataDataWithToken(ctx, http.MethodGet, "/latest/meta-data/ram/security-credentials/") - if err != nil { - return "", err - } - return strings.TrimSpace(name), nil -} - -func (e *ECSMetadataProvider) getMedataToken(ctx context.Context) (string, error) { - if !e.metadataTokenExp.Before(time.Now()) { - return e.metadataToken, nil - } - - e.logger().Debug("start to get metadata token") - h := http.Header{} - h.Set("X-aliyun-ecs-metadata-token-ttl-seconds", fmt.Sprintf("%d", e.metadataTokenTTLSeconds)) - body, err := e.getMedataData(ctx, http.MethodPut, "/latest/api/token", h) - if err != nil { - return "", fmt.Errorf("get metadata token failed: %w", err) - } - - e.metadataToken = strings.TrimSpace(body) - e.metadataTokenExp = time.Now().Add(time.Duration(float64(e.metadataTokenTTLSeconds)*0.8) * time.Second) - - return body, nil -} - -func (e *ECSMetadataProvider) getMedataDataWithToken(ctx context.Context, method, path string) (string, error) { - token, err := e.getMedataToken(ctx) - if err != nil { - if e, ok := err.(*httpError); !(ok && e.code == 404) { - return "", err - } - } - h := http.Header{} - if token != "" { - h.Set("X-aliyun-ecs-metadata-token", token) - } - return e.getMedataData(ctx, method, path, h) + return e.client.GetRoleName(ctx) } -func (e *ECSMetadataProvider) getMedataData(ctx context.Context, method, path string, header http.Header) (string, error) { - url := fmt.Sprintf("%s%s", e.endpoint, path) - return e.client.send(ctx, method, url, header, nil) +func (e *ECSMetadataProvider) GetMedataDataWithToken(ctx context.Context, method, path string) (string, error) { + data, err := e.client.GetMetaData(ctx, method, path) + return string(data), err } func (e *ECSMetadataProvider) logger() Logger { @@ -182,13 +158,29 @@ func (o *ECSMetadataProviderOptions) applyDefaults() { o.Transport = ts } if o.Endpoint == "" { - o.Endpoint = defaultECSMetadataServerEndpoint + if v := os.Getenv(envECSMetadataServerEndpoint); v != "" { + o.Endpoint = v + } else { + o.Endpoint = defaultECSMetadataServerEndpoint + } } else { o.Endpoint = strings.TrimRight(o.Endpoint, "/") } + if !o.DisableToken { + if v := os.Getenv(envIMDSV2Disabled); v != "" { + if b, err := strconv.ParseBool(v); err == nil && b { + o.DisableToken = true + } + } + } if o.MetadataTokenTTLSeconds == 0 { o.MetadataTokenTTLSeconds = defaultECSMetadataTokenTTLSeconds } + if o.RoleName == "" { + if v := os.Getenv(envIMDSRoleName); v != "" { + o.RoleName = v + } + } if o.ExpiryWindow == 0 { o.ExpiryWindow = defaultExpiryWindow } @@ -196,3 +188,17 @@ func (o *ECSMetadataProviderOptions) applyDefaults() { o.Logger = defaultLog } } + +func (o *ECSMetadataProviderOptions) toClientOptions() ecsmetadata.ClientOptions { + return ecsmetadata.ClientOptions{ + Endpoint: o.Endpoint, + RoleName: o.RoleName, + DisableIMDSV2: o.DisableToken, + TokenTTLSeconds: o.MetadataTokenTTLSeconds, + TransportWrappers: nil, + Timeout: o.Timeout, + NowFunc: nil, + DisableRetry: false, + RetryOptions: nil, + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env.go index 7210140dda..f3a354816e 100644 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env.go +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env.go @@ -24,6 +24,7 @@ const ( envRoleSessionName = envNewSdkRoleSessionName envCredentialsURI = envNewSdkCredentialsURI // #nosec G101 + envINIConfigFile = envNewSdkCredentialFile // #nosec G101 ) // https://github.com/aliyun/alibaba-cloud-sdk-go/tree/master/sdk/auth diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env_provider.go index 33783eda26..75a35dd3b4 100644 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env_provider.go +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env_provider.go @@ -26,9 +26,14 @@ type EnvProviderOptions struct { EnvOIDCProviderArn string EnvOIDCTokenFile string - EnvCredentialsURI string + EnvCredentialsURI string + EnvConfigFile string + EnvConfigSectionName string - stsEndpoint string + EnvIMDSRoleName string + + STSEndpoint string + Logger Logger } func NewEnvProvider(opts EnvProviderOptions) *EnvProvider { @@ -67,38 +72,76 @@ func (e *EnvProvider) getProvider(opts EnvProviderOptions) CredentialsProvider { oidcProviderArn := os.Getenv(opts.EnvOIDCProviderArn) oidcTokenFile := os.Getenv(opts.EnvOIDCTokenFile) credentialsURI := os.Getenv(opts.EnvCredentialsURI) + iniConfigPath := os.Getenv(opts.EnvConfigFile) + iniConfigSectionName := os.Getenv(opts.EnvConfigSectionName) + imdsRoleName := os.Getenv(opts.EnvIMDSRoleName) switch { case accessKeyId != "" && accessKeySecret != "" && securityToken != "": - return NewSTSTokenProvider( + cp := NewSTSTokenProvider( os.Getenv(opts.EnvAccessKeyId), os.Getenv(opts.EnvAccessKeySecret), os.Getenv(opts.EnvSecurityToken), ) + if roleArn == "" { + return cp + } + return NewRoleArnProvider(cp, roleArn, RoleArnProviderOptions{ + STSEndpoint: opts.STSEndpoint, + Logger: opts.Logger, + }) + + case accessKeyId != "" && accessKeySecret != "": + cp := NewAccessKeyProvider( + os.Getenv(opts.EnvAccessKeyId), + os.Getenv(opts.EnvAccessKeySecret), + ) + if roleArn == "" { + return cp + } + return NewRoleArnProvider(cp, roleArn, RoleArnProviderOptions{ + STSEndpoint: opts.STSEndpoint, + Logger: opts.Logger, + }) case roleArn != "" && oidcProviderArn != "" && oidcTokenFile != "": return NewOIDCProvider(OIDCProviderOptions{ - RoleArn: os.Getenv(opts.EnvRoleArn), - OIDCProviderArn: os.Getenv(opts.EnvOIDCProviderArn), - OIDCTokenFile: os.Getenv(opts.EnvOIDCTokenFile), - STSEndpoint: opts.stsEndpoint, + RoleArn: roleArn, + OIDCProviderArn: oidcProviderArn, + OIDCTokenFile: oidcTokenFile, + STSEndpoint: opts.STSEndpoint, + Logger: opts.Logger, }) case credentialsURI != "": - return NewURIProvider(credentialsURI, URIProviderOptions{}) + return NewURIProvider(credentialsURI, URIProviderOptions{ + Logger: opts.Logger, + }) - case accessKeyId != "" && accessKeySecret != "": - return NewAccessKeyProvider( - os.Getenv(opts.EnvAccessKeyId), - os.Getenv(opts.EnvAccessKeySecret), - ) + case imdsRoleName != "": + return NewECSMetadataProvider(ECSMetadataProviderOptions{ + RoleName: imdsRoleName, + Logger: opts.Logger, + }) - default: - return &errorProvider{ - err: NewNoAvailableProviderError( - errors.New("no validated credentials were found in environment variables")), + case iniConfigPath != "": + cp, err := NewIniConfigProvider(INIConfigProviderOptions{ + ConfigPath: iniConfigPath, + SectionName: iniConfigSectionName, + STSEndpoint: opts.STSEndpoint, + Logger: opts.Logger, + }) + if err != nil { + opts.Logger.Debug(err.Error()) + } else { + return cp } } + + return &errorProvider{ + err: NewNoAvailableProviderError( + errors.New("no validated credentials were found in environment variables")), + } } func (o *EnvProviderOptions) applyDefaults() { @@ -125,4 +168,18 @@ func (o *EnvProviderOptions) applyDefaults() { if o.EnvCredentialsURI == "" { o.EnvCredentialsURI = envCredentialsURI } + if o.EnvConfigFile == "" { + o.EnvConfigFile = envINIConfigFile + } + if o.EnvConfigSectionName == "" { + o.EnvConfigSectionName = envProfileName + } + + if o.EnvIMDSRoleName == "" { + o.EnvIMDSRoleName = envIMDSRoleName + } + + if o.Logger == nil { + o.Logger = DefaultLogger + } } diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/file_cache.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/file_cache.go new file mode 100644 index 0000000000..916cb0b96b --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/file_cache.go @@ -0,0 +1,143 @@ +package provider + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "path" + "strings" + "time" +) + +type FileCacheProvider struct { + cacheDir string + cp CredentialsProvider + + expiryWindow time.Duration + logger Logger + logPrefix string + + nowFunc func() time.Time +} + +type FileCacheProviderOptions struct { + ExpiryWindow time.Duration + Logger Logger + LogPrefix string +} + +func NewFileCacheProvider(cacheDir string, cp CredentialsProvider, opts FileCacheProviderOptions) *FileCacheProvider { + opts.applyDefaults() + + p := &FileCacheProvider{ + cp: cp, + cacheDir: cacheDir, + expiryWindow: opts.ExpiryWindow, + logger: opts.Logger, + logPrefix: opts.LogPrefix, + } + return p +} + +func (f *FileCacheProvider) Credentials(ctx context.Context) (*Credentials, error) { + cred, err := f.getFromCache() + + if err != nil || cred == nil || cred.expired(f.now(), f.expiryWindow) { + f.logger.Debug(fmt.Sprintf("%s cache file not exist or expired", f.logPrefix)) + cred, err := f.getCredentials(ctx) + if err != nil { + return nil, err + } + + _ = f.saveCredentials(cred) + return cred, err + } + + f.logger.Debug(fmt.Sprintf("%s use data from cache", f.logPrefix)) + return cred, nil +} + +func (f *FileCacheProvider) saveCredentials(cred *Credentials) error { + if err := f.ensureCacheDir(); err != nil { + f.logger.Error(err, fmt.Sprintf("%s ensure cache dir %s failed: %s", f.logPrefix, f.cacheDir, err)) + return nil + } + + p := f.cacheFilePath() + data, err := json.Marshal(cred) + if err != nil { + f.logger.Error(err, fmt.Sprintf("%s marshal credentials failed: %s", f.logPrefix, err)) + return err + } + + encoded := base64.StdEncoding.EncodeToString(data) + if err := os.WriteFile(p, []byte(encoded), 0600); err != nil { + f.logger.Error(err, fmt.Sprintf("%s write cache file %s failed: %s", f.logPrefix, p, err)) + return err + } + + return nil +} + +func (f *FileCacheProvider) getCredentials(ctx context.Context) (*Credentials, error) { + cred, err := f.cp.Credentials(ctx) + if err != nil { + f.logger.Error(err, fmt.Sprintf("%s get credentials faild: %s", f.logPrefix, err)) + return nil, err + } + return cred, err +} + +func (f *FileCacheProvider) getFromCache() (*Credentials, error) { + p := f.cacheFilePath() + data, err := os.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + return nil, err + } + f.logger.Error(err, fmt.Sprintf("%s read cache file %s failed: %s", f.logPrefix, p, err)) + return nil, err + } + + rawJson, err := base64.StdEncoding.DecodeString(strings.TrimSpace(string(data))) + if err != nil { + f.logger.Error(err, fmt.Sprintf("%s decode cache file %s failed: %s", f.logPrefix, p, err)) + return nil, err + } + + var cred Credentials + if err := json.Unmarshal(rawJson, &cred); err != nil { + f.logger.Error(err, fmt.Sprintf("%s unmarshal cache file %s failed: %s", f.logPrefix, p, err)) + return nil, err + } + return &cred, err +} + +func (f *FileCacheProvider) ensureCacheDir() error { + return os.MkdirAll(f.cacheDir, 0750) +} + +func (f *FileCacheProvider) cacheFilePath() string { + return path.Join(f.cacheDir, "cred") +} + +func (f *FileCacheProvider) now() time.Time { + if f.nowFunc == nil { + return time.Now() + } + return f.nowFunc() +} + +func (f *FileCacheProviderOptions) applyDefaults() { + if f.ExpiryWindow == 0 { + f.ExpiryWindow = defaultExpiryWindowForAssumeRole + } + if f.Logger == nil { + f.Logger = defaultLog + } + if f.LogPrefix == "" { + f.LogPrefix = "[FileCacheProvider]" + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/function_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/function_provider.go index 08d145e449..5df5c75797 100644 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/function_provider.go +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/function_provider.go @@ -3,16 +3,43 @@ package provider import ( "context" "errors" + "time" ) type FunctionProvider struct { getCredentials func(ctx context.Context) (*Credentials, error) + + u *Updater +} + +type FunctionProviderOptions struct { + RefreshPeriod time.Duration + ExpiryWindow time.Duration + Logger Logger + LogPrefix string } func NewFunctionProvider(getCredentials func(ctx context.Context) (*Credentials, error)) *FunctionProvider { - return &FunctionProvider{ + return NewFunctionProviderWithOptions(getCredentials, FunctionProviderOptions{}) +} + +func NewFunctionProviderWithOptions(getCredentials func(ctx context.Context) (*Credentials, error), + opts FunctionProviderOptions) *FunctionProvider { + opts.applyDefaults() + + f := &FunctionProvider{ getCredentials: getCredentials, } + + f.u = NewUpdater(f.getCredentials, UpdaterOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + LogPrefix: opts.LogPrefix, + }) + f.u.Start(context.TODO()) + + return f } func (f *FunctionProvider) Credentials(ctx context.Context) (*Credentials, error) { @@ -20,9 +47,25 @@ func (f *FunctionProvider) Credentials(ctx context.Context) (*Credentials, error return nil, NewNotEnableError(errors.New("getCredentials function is nil")) } - cred, err := f.getCredentials(ctx) - if err != nil { - return nil, err + return f.u.Credentials(ctx) +} + +func (f *FunctionProvider) Stop(ctx context.Context) { + f.u.Stop(ctx) +} + +func (f *FunctionProvider) setNowFunc(t func() time.Time) { + f.u.nowFunc = t +} + +func (f *FunctionProviderOptions) applyDefaults() { + if f.ExpiryWindow == 0 { + f.ExpiryWindow = time.Minute * 1 + } + if f.Logger == nil { + f.Logger = defaultLog + } + if f.LogPrefix == "" { + f.LogPrefix = "[FunctionProvider]" } - return cred.DeepCopy(), nil } diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/http.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/http.go index 8bf61a4edb..30b94e1480 100644 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/http.go +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/http.go @@ -22,6 +22,9 @@ type httpError struct { type commonHttpClient struct { client httpClient logger Logger + + delegatedRoundTripper http.RoundTripper + replaceTransport http.RoundTripper } func newCommonHttpClient(transport http.RoundTripper, timeout time.Duration) *commonHttpClient { @@ -32,6 +35,32 @@ func newCommonHttpClient(transport http.RoundTripper, timeout time.Duration) *co return &commonHttpClient{client: client} } +func (c *commonHttpClient) RoundTrip(req *http.Request) (*http.Response, error) { + req.Header.Set("User-Agent", UserAgent) + if debugMode { + for _, item := range genDebugReqMessages(req) { + c.getLogger().Debug(item) + } + } + ts := c.delegatedRoundTripper + if c.replaceTransport != nil { + ts = c.replaceTransport + } + + resp, err := ts.RoundTrip(req) + if err != nil { + return nil, err + } + + if debugMode { + for _, item := range genDebugRespMessages(resp) { + c.getLogger().Debug(item) + } + } + + return resp, nil +} + func (c *commonHttpClient) send(ctx context.Context, method, url string, header http.Header, body io.Reader) (string, error) { req, err := http.NewRequest(method, url, body) if err != nil { diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ini.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ini.go new file mode 100644 index 0000000000..59455dd306 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ini.go @@ -0,0 +1,134 @@ +package provider + +import ( + "fmt" + "regexp" + "strings" +) + +type iniConfig struct { + items []iniConfigItem +} + +type iniConfigItem struct { + section string + config map[string]iniConfigItemValue +} + +type iniConfigItemValue string + +var ( + reIniConfigItem = regexp.MustCompile(`(?m)\s*(?:\[([^]]+)\]([^[]+))`) + reIniConfigLineSuffixComment = regexp.MustCompile(`(?m)[#;]+.+$`) +) + +func parseIniConfig(input []byte) iniConfig { + var config iniConfig + var tidyInput strings.Builder + lines := strings.Split(string(input), "\n") + for _, line := range lines { + line := strings.TrimSpace(line) + if line == "" || + strings.HasPrefix(line, ";") || + strings.HasPrefix(line, "#") { + continue + } + line = reIniConfigLineSuffixComment.ReplaceAllLiteralString(line, "") + tidyInput.WriteString(line + "\n") + } + multipleParts := reIniConfigItem.FindAllStringSubmatch(tidyInput.String(), -1) + if len(multipleParts) == 0 { + return config + } + + for _, parts := range multipleParts { + name := strings.TrimSpace(parts[1]) + item := iniConfigItem{ + section: name, + config: map[string]iniConfigItemValue{}, + } + for _, line := range strings.Split(parts[2], "\n") { + line := strings.TrimSpace(line) + kv := strings.SplitN(line, "=", 2) + if len(kv) != 2 { + continue + } + k, v := strings.TrimSpace(kv[0]), strings.Trim(strings.TrimSpace(kv[1]), `'"`) + item.config[k] = iniConfigItemValue(v) + } + config.items = append(config.items, item) + } + + return config +} + +func (c iniConfigItem) Type() string { + if len(c.config) == 0 { + return "" + } + return c.config["type"].String() +} + +func (v iniConfigItemValue) String() string { + return string(v) +} + +func (c iniConfig) toConfiguration(sectionName string) (*Configuration, error) { + inicfg := c.getSection(sectionName) + if inicfg.section == "" { + return nil, fmt.Errorf("section %s is not found", sectionName) + } + + pf := inicfg.toProfile() + if err := pf.validate(); err != nil { + return nil, err + } + cfg := &Configuration{ + CurrentProfile: sectionName, + Profiles: []Profile{pf}, + MetaPath: "", + } + return cfg, nil +} + +func (c iniConfig) getSection(name string) iniConfigItem { + for _, item := range c.items { + if item.section == name { + return item + } + } + return iniConfigItem{} +} + +func (c iniConfigItem) toProfile() Profile { + pf := Profile{ + Name: c.section, + Mode: "", + AccessKeyId: c.config["access_key_id"].String(), + AccessKeySecret: c.config["access_key_secret"].String(), + StsToken: c.config["sts_token"].String(), + StsRegion: c.config["sts_region"].String(), + RamRoleName: c.config["role_name"].String(), + RamRoleArn: c.config["role_arn"].String(), + RoleSessionName: c.config["role_session_name"].String(), + ProcessCommand: c.config["process_command"].String(), + CredentialsURI: c.config["credentials_uri"].String(), + } + + switch c.Type() { + case "access_key": + pf.Mode = AK + case "sts": + pf.Mode = StsToken + case "ram_role_arn": + pf.Mode = RamRoleArn + case "credentials_uri": + pf.Mode = CredentialsURI + case "ecs_ram_role": + pf.Mode = EcsRamRole + case "external": + pf.Mode = External + } + + return pf +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ini_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ini_provider.go new file mode 100644 index 0000000000..3826138b42 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ini_provider.go @@ -0,0 +1,80 @@ +package provider + +import ( + "context" + "fmt" + "os" + "path" +) + +const ( + defaultINIConfigDir = "~/.alibabacloud" + iniConfigFileName = "credentials" + defaultINIConfigSectionName = "default" +) + +type INIConfigProvider struct { + cp *CLIConfigProvider +} + +type INIConfigProviderOptions struct { + ConfigPath string + SectionName string + STSEndpoint string + Logger Logger +} + +func NewIniConfigProvider(opts INIConfigProviderOptions) (*INIConfigProvider, error) { + opts.applyDefaults() + + data, err := os.ReadFile(opts.ConfigPath) + if err != nil { + return nil, NewNotEnableError(err) + } + cf, err := parseIniConfig(data).toConfiguration(opts.SectionName) + if err != nil { + return nil, NewNotEnableError( + fmt.Errorf("parse config from %s: %w", opts.ConfigPath, err)) + } + + cp, err := NewCLIConfigProvider(CLIConfigProviderOptions{ + ConfigPath: "", + ProfileName: opts.SectionName, + STSEndpoint: opts.STSEndpoint, + Logger: opts.Logger, + conf: cf, + }) + if err != nil { + return nil, NewNotEnableError(err) + } + + return &INIConfigProvider{cp: cp}, nil +} + +func (i *INIConfigProvider) Credentials(ctx context.Context) (*Credentials, error) { + return i.cp.Credentials(ctx) +} + +func getDefaultINIConfigPath() string { + dir, err := expandPath(defaultINIConfigDir) + if err != nil { + dir = defaultINIConfigDir + } + return path.Join(dir, iniConfigFileName) +} + +func (o *INIConfigProviderOptions) applyDefaults() { + if o.ConfigPath == "" { + o.ConfigPath = getDefaultINIConfigPath() + } + if o.SectionName == "" { + if v := os.Getenv(envProfileName); v != "" { + o.SectionName = v + } else { + o.SectionName = defaultINIConfigSectionName + } + } + if o.Logger == nil { + o.Logger = DefaultLogger + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/log.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/log.go index 0b8039806d..768261e37c 100644 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/log.go +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/log.go @@ -14,7 +14,8 @@ type Logger interface { Error(err error, msg string) } -var defaultLog Logger = defaultLogger{} +var defaultLog Logger = &defaultLogger{} +var DefaultLogger = defaultLog.(*defaultLogger) func init() { debugEnv := strings.Split(strings.ToLower(os.Getenv("DEBUG")), ",") @@ -27,9 +28,21 @@ func init() { } type defaultLogger struct { + silentInfo bool +} + +func (d *defaultLogger) SetSilentInfo(v bool) { + d.silentInfo = v +} + +func (d defaultLogger) DebugMode() bool { + return debugMode } func (d defaultLogger) Info(msg string) { + if d.silentInfo && !debugMode { + return + } log.Print(msg) } diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/oidc_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/oidc_provider.go index 62ffc7c70a..21b595cd35 100644 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/oidc_provider.go +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/oidc_provider.go @@ -41,6 +41,7 @@ type OIDCProvider struct { roleArn string oidcProviderArn string oidcTokenFile string + oidcToken string Logger Logger } @@ -59,6 +60,7 @@ type OIDCProviderOptions struct { EnvOIDCProviderArn string OIDCTokenFile string EnvOIDCTokenFile string + OIDCToken string Timeout time.Duration Transport http.RoundTripper @@ -97,6 +99,7 @@ func NewOIDCProvider(opts OIDCProviderOptions) *OIDCProvider { roleArn: opts.getRoleArn(), oidcProviderArn: opts.getOIDCProviderArn(), oidcTokenFile: opts.getOIDCTokenFile(), + oidcToken: opts.OIDCToken, Logger: opts.Logger, } if opts.TokenDuration >= time.Second*900 { @@ -127,16 +130,21 @@ func (o *OIDCProvider) getCredentials(ctx context.Context) (*Credentials, error) roleArn := o.roleArn oidcProviderArn := o.oidcProviderArn tokenFile := o.oidcTokenFile - if roleArn == "" || oidcProviderArn == "" || tokenFile == "" { + tokenData := o.oidcToken + + if roleArn == "" || oidcProviderArn == "" || (tokenFile == "" && tokenData == "") { return nil, NewNotEnableError(errors.New("roleArn, oidcProviderArn or oidcTokenFile is empty")) } - tokenData, err := os.ReadFile(tokenFile) - if err != nil { - return nil, err + if tokenFile != "" { + if data, err := os.ReadFile(tokenFile); err != nil { + return nil, fmt.Errorf("read file %s: %w", tokenFile, err) + } else { + tokenData = string(data) + } } - token := string(tokenData) - return o.assumeRoleWithOIDC(ctx, roleArn, oidcProviderArn, token) + + return o.assumeRoleWithOIDC(ctx, roleArn, oidcProviderArn, tokenData) } type oidcResponse struct { @@ -204,21 +212,24 @@ func (o *OIDCProvider) assumeRoleWithOIDC(ctx context.Context, roleArn, oidcProv data, err := io.ReadAll(resp.Body) if err != nil { - return nil, err + return nil, fmt.Errorf("read body failed: %w", err) } var obj oidcResponse if err := json.Unmarshal(data, &obj); err != nil { - return nil, err + return nil, fmt.Errorf("parse AssumeRoleWithOIDC body failed (%s), got unexpected body (%s): %s", + err, resp.Status, strings.ReplaceAll(string(data), "\n", " ")) } - if obj.Credentials == nil || obj.Credentials.AccessKeySecret == "" { - return nil, fmt.Errorf("call AssumeRoleWithOIDC failed, got unexpected body: %s", - strings.ReplaceAll(string(data), "\n", " ")) + if obj.Credentials == nil || obj.Credentials.AccessKeySecret == "" || + obj.Credentials.AccessKeyId == "" || obj.Credentials.SecurityToken == "" || + obj.Credentials.Expiration == "" { + return nil, fmt.Errorf("call AssumeRoleWithOIDC failed (%s), got unexpected body: %s", + resp.Status, strings.ReplaceAll(string(data), "\n", " ")) } exp, err := time.Parse("2006-01-02T15:04:05Z", obj.Credentials.Expiration) if err != nil { - return nil, err + return nil, fmt.Errorf("parse Expiration %q failed: %w", obj.Credentials.Expiration, err) } return &Credentials{ AccessKeyId: obj.Credentials.AccessKeyId, diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/remote_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/remote_provider.go new file mode 100644 index 0000000000..d9dc4e16b9 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/remote_provider.go @@ -0,0 +1,77 @@ +package provider + +import ( + "context" + "fmt" + "time" +) + +type RemoteProvider struct { + u *Updater + + getRawData func(context.Context) ([]byte, error) + decoder func(ctx context.Context, data []byte) (*Credentials, error) +} + +type RemoteProviderOptions struct { + RefreshPeriod time.Duration + ExpiryWindow time.Duration + Logger Logger + LogPrefix string +} + +func NewRemoteProvider(getRawData func(ctx context.Context) ([]byte, error), + decoder func(ctx context.Context, data []byte) (*Credentials, error), + opts RemoteProviderOptions) *RemoteProvider { + opts.applyDefaults() + + e := &RemoteProvider{ + getRawData: getRawData, + decoder: decoder, + } + e.u = NewUpdater(e.getCredentials, UpdaterOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + LogPrefix: opts.LogPrefix, + }) + e.u.Start(context.TODO()) + + return e +} + +func (f *RemoteProvider) Credentials(ctx context.Context) (*Credentials, error) { + return f.u.Credentials(ctx) +} + +func (f *RemoteProvider) Stop(ctx context.Context) { + f.u.Stop(ctx) +} + +func (f *RemoteProvider) getCredentials(ctx context.Context) (*Credentials, error) { + if f.getRawData == nil { + return nil, NewNotEnableError(fmt.Errorf("getRawData function is nil")) + } + data, err := f.getRawData(ctx) + if err != nil { + return nil, fmt.Errorf("get raw data from remote failed: %w", err) + } + + cred, err := f.decoder(ctx, data) + if err != nil { + return nil, fmt.Errorf("decode data from remote failed: %w", err) + } + return cred, nil +} + +func (f *RemoteProviderOptions) applyDefaults() { + if f.ExpiryWindow == 0 { + f.ExpiryWindow = defaultExpiryWindow + } + if f.Logger == nil { + f.Logger = defaultLog + } + if f.LogPrefix == "" { + f.LogPrefix = "[RemoteProvider]" + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/req.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/req.go index 3d2f0b3c52..b586523235 100644 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/req.go +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/req.go @@ -158,11 +158,29 @@ func genDebugReqMessages(req *http.Request) []string { ret = append(ret, fmt.Sprintf("%s %s", req.Method, req.URL.String())) ret = append(ret, "Request Headers:") for k, vs := range req.Header { + if isTokenHeader(k) { + vs = []string{"******"} + } ret = append(ret, fmt.Sprintf(" %s: %s", k, strings.Join(vs, ", "))) } return ret } +var tokenHeaders = []string{ + "authorization", + "token", +} + +func isTokenHeader(k string) bool { + k = strings.ToLower(k) + for _, target := range tokenHeaders { + if k == target { + return true + } + } + return false +} + func genDebugRespMessages(resp *http.Response) []string { var ret []string ret = append(ret, fmt.Sprintf("Response Status: %s", resp.Status)) diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/rolearn_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/rolearn_provider.go index 7b55ebc9b2..ee9b3b4169 100644 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/rolearn_provider.go +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/rolearn_provider.go @@ -168,21 +168,24 @@ func (r *RoleArnProvider) assumeRole(ctx context.Context, roleArn string) (*Cred data, err := io.ReadAll(resp.Body) if err != nil { - return nil, err + return nil, fmt.Errorf("read body failed: %w", err) } var obj roleArnResponse if err := json.Unmarshal(data, &obj); err != nil { - return nil, err + return nil, fmt.Errorf("parse AssumeRole body failed (%s), got unexpected body (%s): %s", + err, resp.Status, strings.ReplaceAll(string(data), "\n", " ")) } - if obj.Credentials == nil || obj.Credentials.AccessKeySecret == "" { - return nil, fmt.Errorf("call AssumeRole failed, got unexpected body: %s", - strings.ReplaceAll(string(data), "\n", " ")) + if obj.Credentials == nil || obj.Credentials.AccessKeySecret == "" || + obj.Credentials.AccessKeyId == "" || obj.Credentials.SecurityToken == "" || + obj.Credentials.Expiration == "" { + return nil, fmt.Errorf("call AssumeRole failed (%s), got unexpected body: %s", + resp.Status, strings.ReplaceAll(string(data), "\n", " ")) } exp, err := time.Parse("2006-01-02T15:04:05Z", obj.Credentials.Expiration) if err != nil { - return nil, err + return nil, fmt.Errorf("parse Expiration %q failed: %w", obj.Credentials.Expiration, err) } return &Credentials{ AccessKeyId: obj.Credentials.AccessKeyId, diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/updater.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/updater.go index 705f436dd6..a3248acfc5 100644 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/updater.go +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/updater.go @@ -3,6 +3,7 @@ package provider import ( "context" "fmt" + "math/rand" "sync" "time" ) @@ -98,8 +99,8 @@ loop: } func (u *Updater) Credentials(ctx context.Context) (*Credentials, error) { - if u.Expired() { - if err := u.refreshCred(ctx); err != nil { + if u.shouldRefresh() { + if err := u.refreshCred(ctx); err != nil && u.Expired() { return nil, err } } @@ -111,7 +112,8 @@ func (u *Updater) Credentials(ctx context.Context) (*Credentials, error) { func (u *Updater) refreshCredForLoop(ctx context.Context) { exp := u.expiration() - if !u.expired(u.expiryWindowForRefreshLoop) { + if !(u.expired(u.expiryWindow+u.expiryWindowForRefreshLoop) || + u.shouldRefresh()) { return } @@ -154,10 +156,21 @@ func (u *Updater) setCred(cred *Credentials) { defer u.lockForCred.Unlock() newCred := cred.DeepCopy() - newCred.Expiration = newCred.Expiration.Round(0) - if u.expiryWindow > 0 { - newCred.Expiration = newCred.Expiration.Add(-u.expiryWindow) + + if !newCred.Expiration.IsZero() { + newCred.Expiration = newCred.Expiration.Round(0) + if u.expiryWindow > 0 { + window := u.expiryWindow + window += time.Second * time.Duration(rand.Int63n(60)) + newCred.nextRefresh = newCred.Expiration.Add(-window) + } else { + window := newCred.Expiration.Sub(u.now()) + window = time.Duration(float64(window) * 0.2) + window += time.Second * time.Duration(rand.Int63n(60)) + newCred.nextRefresh = newCred.Expiration.Add(-window) + } } + u.cred = newCred } @@ -172,13 +185,20 @@ func (u *Updater) Expired() bool { return u.expired(0) } +func (u *Updater) shouldRefresh() bool { + if u.expired(0) { + return true + } + return u.getCred().shouldRefresh(u.now()) +} + func (u *Updater) expired(expiryDelta time.Duration) bool { exp := u.expiration() if expiryDelta > 0 { exp = exp.Add(-expiryDelta) } - return exp.Before(u.now()) + return exp.Before(u.now()) || exp.Equal(u.now()) } func (u *Updater) expiration() time.Time { diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v1sdk.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v1sdk.go index fd2981cc0e..d499064f09 100644 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v1sdk.go +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v1sdk.go @@ -20,6 +20,10 @@ type SignerForV1SDKOptions struct { func NewSignerForV1SDK(p CredentialsProvider, opts SignerForV1SDKOptions) *SignerForV1SDK { opts.applyDefaults() + if _, ok := p.(*SemaphoreProvider); !ok { + p = NewSemaphoreProvider(p, SemaphoreProviderOptions{MaxWeight: 1}) + } + return &SignerForV1SDK{ p: p, Logger: opts.Logger, diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v2sdk.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v2sdk.go index 713695993f..4e87675b63 100644 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v2sdk.go +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v2sdk.go @@ -3,6 +3,8 @@ package provider import ( "context" "time" + + "github.com/aliyun/credentials-go/credentials" ) var defaultTimeout = time.Minute * 10 @@ -21,6 +23,10 @@ type CredentialForV2SDKOptions struct { func NewCredentialForV2SDK(p CredentialsProvider, opts CredentialForV2SDKOptions) *CredentialForV2SDK { opts.applyDefaults() + if _, ok := p.(*SemaphoreProvider); !ok { + p = NewSemaphoreProvider(p, SemaphoreProviderOptions{MaxWeight: 1}) + } + return &CredentialForV2SDK{ p: p, Logger: opts.Logger, @@ -28,38 +34,49 @@ func NewCredentialForV2SDK(p CredentialsProvider, opts CredentialForV2SDKOptions } } -func (c *CredentialForV2SDK) GetAccessKeyId() (*string, error) { +func (c *CredentialForV2SDK) GetCredential() (*credentials.CredentialModel, error) { timeoutCtx, cancel := context.WithTimeout(context.Background(), c.credentialRetrievalTimeout) defer cancel() cred, err := c.p.Credentials(timeoutCtx) if err != nil { return nil, err } - return stringPointer(cred.AccessKeyId), nil + return &credentials.CredentialModel{ + AccessKeyId: stringPointer(cred.AccessKeyId), + AccessKeySecret: stringPointer(cred.AccessKeySecret), + SecurityToken: stringPointer(cred.SecurityToken), + BearerToken: nil, + Type: nil, + ProviderName: nil, + }, nil +} + +func (c *CredentialForV2SDK) GetAccessKeyId() (*string, error) { + cred, err := c.GetCredential() + if err != nil { + return nil, err + } + return cred.AccessKeyId, nil } func (c *CredentialForV2SDK) GetAccessKeySecret() (*string, error) { - timeoutCtx, cancel := context.WithTimeout(context.Background(), c.credentialRetrievalTimeout) - defer cancel() - cred, err := c.p.Credentials(timeoutCtx) + cred, err := c.GetCredential() if err != nil { return nil, err } - return stringPointer(cred.AccessKeySecret), nil + return cred.AccessKeySecret, nil } func (c *CredentialForV2SDK) GetSecurityToken() (*string, error) { - timeoutCtx, cancel := context.WithTimeout(context.Background(), c.credentialRetrievalTimeout) - defer cancel() - cred, err := c.p.Credentials(timeoutCtx) + cred, err := c.GetCredential() if err != nil { return nil, err } - return stringPointer(cred.SecurityToken), nil + return cred.SecurityToken, nil } func (c *CredentialForV2SDK) GetBearerToken() *string { - return stringPointer("") + return nil } func (c *CredentialForV2SDK) GetType() *string { @@ -83,5 +100,8 @@ func (o *CredentialForV2SDKOptions) applyDefaults() { } func stringPointer(s string) *string { + if s == "" { + return nil + } return &s } diff --git a/vendor/github.com/oklog/ulid/LICENSE b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/LICENSE similarity index 100% rename from vendor/github.com/oklog/ulid/LICENSE rename to vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/LICENSE diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/base.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/base.go new file mode 100644 index 0000000000..ca86eefa9e --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/base.go @@ -0,0 +1,33 @@ +package ecsmetadata + +import ( + "context" +) + +func (c *Client) GetRegionId(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/region-id") +} + +func (c *Client) GetZoneId(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/zone-id") +} + +func (c *Client) GetOwnerAccountId(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/owner-account-id") +} + +func (c *Client) GetHostname(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/hostname") +} + +func (c *Client) GetSourceAddress(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/source-address") +} + +func (c *Client) GetSourceAddressList(ctx context.Context) ([]string, error) { + data, err := c.GetSourceAddress(ctx) + if err != nil { + return nil, err + } + return parsePathNames(data), nil +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/client.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/client.go new file mode 100644 index 0000000000..ba243341e0 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/client.go @@ -0,0 +1,296 @@ +package ecsmetadata + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "os" + "path" + "runtime" + "strconv" + "strings" + "time" +) + +// https://help.aliyun.com/zh/ecs/user-guide/view-instance-metadata + +const ( + DefaultEndpoint = "http://100.100.100.200" + EnvEndpoint = "ALIBABA_CLOUD_IMDS_ENDPOINT" + EnvIMDSV2Disabled = "ALIBABA_CLOUD_IMDSV2_DISABLED" + EnvIMDSRoleName = "ALIBABA_CLOUD_ECS_METADATA" + defaultTokenTTLSeconds = 18000 // 5hours + minTokenTTLSeconds = 1 + maxTokenTTLSeconds = 21600 + defaultClientTimeout = time.Second * 30 +) + +var DefaultClient, _ = NewClient(ClientOptions{}) +var UserAgent = "" + +type Client struct { + httpClient *http.Client + + endpoint string + roleName string + disableIMDSV2 bool + tokenTTLSeconds int + + metadataToken string + metadataTokenExp time.Time + + nowFunc func() time.Time + + disableRetry bool + retryOptions RetryOptions + + userAgent string +} + +type TransportWrapper func(rt http.RoundTripper) http.RoundTripper + +type ClientOptions struct { + // default: DefaultEndpoint + Endpoint string + // ram role of ecs instance + RoleName string + DisableIMDSV2 bool + // default: 18000, 5 hours + TokenTTLSeconds int + + TransportWrappers []TransportWrapper + transport http.RoundTripper + // default: 30 seconds + Timeout time.Duration + // default: time.Now + NowFunc func() time.Time + + DisableRetry bool + // default: DefaultRetryOptions() + RetryOptions *RetryOptions + + UserAgent string +} + +func init() { + name := path.Base(os.Args[0]) + UserAgent = fmt.Sprintf("%s %s/%s %s", name, runtime.GOOS, runtime.GOARCH, runtime.Version()) +} + +func NewClient(opts ClientOptions) (*Client, error) { + if err := opts.prepare(); err != nil { + return nil, err + } + httpClient := &http.Client{ + Transport: opts.transport, + Timeout: opts.Timeout, + } + return &Client{ + httpClient: httpClient, + endpoint: opts.Endpoint, + roleName: opts.RoleName, + disableIMDSV2: opts.DisableIMDSV2, + tokenTTLSeconds: opts.TokenTTLSeconds, + nowFunc: opts.NowFunc, + retryOptions: *opts.RetryOptions, + disableRetry: opts.DisableRetry, + userAgent: opts.UserAgent, + }, nil +} + +func (c *Client) GetMetaData(ctx context.Context, method, path string) ([]byte, error) { + return c.getMetaDataWithRetry(ctx, method, path) +} + +func (c *Client) getToken(ctx context.Context) (string, error) { + if c.disableIMDSV2 { + return "", nil + } + + now := c.getNow() + if c.metadataToken != "" && !c.tokenExpired(now) { + return c.metadataToken, nil + } + + h := http.Header{} + h.Set("X-aliyun-ecs-metadata-token-ttl-seconds", fmt.Sprintf("%d", c.tokenTTLSeconds)) + body, err := c.send(ctx, http.MethodPut, "/latest/api/token", h) + if err != nil { + return "", fmt.Errorf("get token failed: %w", err) + } + + c.metadataToken = strings.TrimSpace(string(body)) + c.metadataTokenExp = now. + Add(time.Duration(float64(c.tokenTTLSeconds)*0.8) * time.Second). + Add(-time.Minute) + + return c.metadataToken, nil +} + +func (c *Client) getMetaDataWithRetry(ctx context.Context, method, path string) ([]byte, error) { + if c.disableRetry { + return c.getMetaData(ctx, method, path) + } + + var data []byte + var err error + lastErr := retryWithOptions(ctx, func(ctx context.Context) error { + data, err = c.getMetaData(ctx, method, path) + if err != nil { + if !isRetryable(err) { + return newNoRetryError(err) + } + } + return err + }, c.retryOptions) + + return data, lastErr +} + +func (c *Client) getMetaData(ctx context.Context, method, path string) ([]byte, error) { + token, err := c.getToken(ctx) + if err != nil { + var httpErr *HTTPError + if errors.As(err, &httpErr) && + (httpErr.StatusCode == http.StatusNotFound || + httpErr.StatusCode == http.StatusForbidden) { + // ignore 404 and 403 error + } else { + return nil, err + } + } + + h := http.Header{} + if token != "" { + h.Set("X-aliyun-ecs-metadata-token", token) + } + return c.send(ctx, method, path, h) +} + +func (c *Client) getTidyStringData(ctx context.Context, path string) (string, error) { + data, err := c.getRawStringData(ctx, path) + if err != nil { + return "", err + } + return strings.TrimSpace(data), nil +} + +func (c *Client) getRawStringData(ctx context.Context, path string) (string, error) { + data, err := c.GetMetaData(ctx, http.MethodGet, path) + if err != nil { + return "", err + } + return string(data), nil +} + +func (c *Client) getRawData(ctx context.Context, path string) ([]byte, error) { + return c.GetMetaData(ctx, http.MethodGet, path) +} + +func (c *Client) send(ctx context.Context, method, path string, header http.Header) ([]byte, error) { + url := c.getURL(path) + req, err := http.NewRequestWithContext(ctx, method, url, nil) + if err != nil { + return nil, fmt.Errorf("create request failed: %w", err) + } + + for k, v := range header { + req.Header.Set(k, v[0]) + } + req.Header.Set("User-Agent", c.userAgent) + resp, err := c.httpClient.Do(req) + if err != nil { + err2 := fmt.Errorf("do request failed: %w", err) + return nil, newHTTPError(err2, url, resp, nil) + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + err2 := fmt.Errorf("read body failed: %w", err) + return nil, newHTTPError(err2, url, resp, nil) + } + if resp.StatusCode != http.StatusOK { + err2 := fmt.Errorf("status code of respose is not 200: %s", resp.Status) + return nil, newHTTPError(err2, url, resp, body) + } + return body, nil +} + +func (c *Client) tokenExpired(now time.Time) bool { + if c.metadataTokenExp.IsZero() { + return true + } + return c.metadataTokenExp.Before(now) +} + +func (c *Client) getNow() time.Time { + if c.nowFunc != nil { + return c.nowFunc() + } + return time.Now() +} + +func (o *ClientOptions) prepare() error { + if o.Timeout <= 0 { + o.Timeout = defaultClientTimeout + } + if o.transport == nil { + ts := http.DefaultTransport.(*http.Transport).Clone() + o.transport = ts + } + if len(o.TransportWrappers) > 0 { + for _, tw := range o.TransportWrappers { + o.transport = tw(o.transport) + } + } + if o.Endpoint == "" { + if v := os.Getenv(EnvEndpoint); v != "" { + o.Endpoint = v + } else { + o.Endpoint = DefaultEndpoint + } + } else { + o.Endpoint = strings.TrimRight(o.Endpoint, "/") + } + if !o.DisableIMDSV2 { + if v := os.Getenv(EnvIMDSV2Disabled); v != "" { + if b, err := strconv.ParseBool(v); err == nil && b { + o.DisableIMDSV2 = true + } + } + } + if o.TokenTTLSeconds == 0 { + o.TokenTTLSeconds = defaultTokenTTLSeconds + } + if o.TokenTTLSeconds < minTokenTTLSeconds || o.TokenTTLSeconds > maxTokenTTLSeconds { + return fmt.Errorf("invalid TokenTTLSeconds: %d", o.TokenTTLSeconds) + } + if o.RoleName == "" { + if v := os.Getenv(EnvIMDSRoleName); v != "" { + o.RoleName = v + } + } + + if !o.DisableRetry { + if o.RetryOptions == nil { + o.RetryOptions = DefaultRetryOptions() + } + } + if o.UserAgent == "" { + o.UserAgent = UserAgent + } + + return nil +} + +func (c *Client) SetEndpoint(endpoint string) *Client { + c.endpoint = strings.TrimRight(endpoint, "/") + return c +} + +func (c *Client) getURL(path string) string { + return fmt.Sprintf("%s%s", c.endpoint, path) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/disk.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/disk.go new file mode 100644 index 0000000000..912758b33f --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/disk.go @@ -0,0 +1,24 @@ +package ecsmetadata + +import ( + "context" + "fmt" +) + +func (c *Client) GetDisks(ctx context.Context) ([]string, error) { + data, err := c.getTidyStringData(ctx, "/latest/meta-data/disks/") + if err != nil { + return nil, err + } + return parsePathNames(data), nil +} + +func (c *Client) GetDiskId(ctx context.Context, diskSerial string) (string, error) { + return c.getTidyStringData(ctx, + fmt.Sprintf("/latest/meta-data/disks/%s/id", diskSerial)) +} + +func (c *Client) GetDiskName(ctx context.Context, diskSerial string) (string, error) { + return c.getTidyStringData(ctx, + fmt.Sprintf("/latest/meta-data/disks/%s/name", diskSerial)) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/dynamic.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/dynamic.go new file mode 100644 index 0000000000..5ab2a4c0d5 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/dynamic.go @@ -0,0 +1,50 @@ +package ecsmetadata + +import ( + "context" + "encoding/json" + "net/url" +) + +// https://help.aliyun.com/zh/ecs/user-guide/use-instance-identities + +type Document struct { + AccountId string `json:"account-id"` // always is empty? + OwnerAccountId string `json:"owner-account-id"` + InstanceId string `json:"instance-id"` + Mac string `json:"mac"` + RegionId string `json:"region-id"` + SerialNumber string `json:"serial-number"` + ZoneId string `json:"zone-id"` + InstanceType string `json:"instance-type"` + ImageId string `json:"image-id"` + PrivateIp string `json:"private-ip"` +} + +func (c *Client) GetDocument(ctx context.Context) (*Document, error) { + data, err := c.GetRawDocument(ctx) + if err != nil { + return nil, err + } + + var doc Document + if err := json.Unmarshal([]byte(data), &doc); err != nil { + return nil, err + } + return &doc, nil +} + +func (c *Client) GetRawDocument(ctx context.Context) (string, error) { + return c.getRawStringData(ctx, "/latest/dynamic/instance-identity/document") +} + +func (c *Client) NewDocumentPKCS7Signature(ctx context.Context, audience string) (string, error) { + path := "/latest/dynamic/instance-identity/pkcs7" + if audience != "" { + val := url.Values{} + val.Set("audience", audience) + path = path + "?" + val.Encode() + } + + return c.getRawStringData(ctx, path) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/errors.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/errors.go new file mode 100644 index 0000000000..56b8f9dc3a --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/errors.go @@ -0,0 +1,72 @@ +package ecsmetadata + +import ( + "fmt" + "net/http" + "strings" +) + +type HTTPError struct { + URL string + StatusCode int + Header http.Header + Body string + + Err error + Message string +} + +type noRetryError struct { + err error +} + +func newHTTPError(err error, url string, resp *http.Response, body []byte) *HTTPError { + var newBody string + if len(body) > 0 { + newBody = strings.ReplaceAll(string(body), "\n", " ") + newBody = strings.ReplaceAll(newBody, "\r", " ") + newBody = strings.TrimSpace(newBody) + newBody = truncateStr(newBody, 80) + } + + herr := &HTTPError{ + URL: url, + Body: newBody, + Err: err, + Message: err.Error(), + } + if resp != nil { + herr.StatusCode = resp.StatusCode + herr.Header = resp.Header + } + return herr +} + +func newNoRetryError(err error) *noRetryError { + return &noRetryError{err: err} +} + +func (e HTTPError) Error() string { + return fmt.Sprintf("%s. send request to %s failed, status code: %d, body: %s", + e.Message, e.URL, e.StatusCode, e.Body) +} + +func (e HTTPError) Unwrap() error { + return e.Err +} + +func (e noRetryError) Error() string { + return e.err.Error() +} + +func (e noRetryError) Unwrap() error { + return e.err +} + +func truncateStr(raw string, maxLen int) string { + currLen := len(raw) + if currLen <= maxLen { + return raw + } + return raw[:maxLen] + "..." +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/instance.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/instance.go new file mode 100644 index 0000000000..bde458b07f --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/instance.go @@ -0,0 +1,35 @@ +package ecsmetadata + +import ( + "context" + "time" +) + +func (c *Client) GetInstanceType(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/instance/instance-type") +} + +func (c *Client) GetInstanceName(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/instance/instance-name") +} + +func (c *Client) GetInstanceId(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/instance-id") +} + +func (c *Client) GetImageId(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/image-id") +} + +func (c *Client) GetSerialNumber(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/serial-number") +} + +func (c *Client) GetSpotTerminationTime(ctx context.Context) (time.Time, error) { + data, err := c.getTidyStringData(ctx, + "/latest/meta-data/instance/spot/termination-time") + if err != nil { + return time.Time{}, err + } + return time.Parse(time.RFC3339, data) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/interfaces.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/interfaces.go new file mode 100644 index 0000000000..d51da8217d --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/interfaces.go @@ -0,0 +1,117 @@ +package ecsmetadata + +import ( + "context" + "fmt" +) + +func (c *Client) GetMacs(ctx context.Context) ([]string, error) { + data, err := c.getTidyStringData(ctx, "/latest/meta-data/network/interfaces/macs/") + if err != nil { + return nil, err + } + return parsePathNames(data), nil +} + +func (c *Client) GetInterfaceIdByMac(ctx context.Context, mac string) (string, error) { + return c.getTidyStringData(ctx, + fmt.Sprintf( + "/latest/meta-data/network/interfaces/macs/%s/network-interface-id", + mac)) +} + +func (c *Client) GetNetMaskByMac(ctx context.Context, mac string) (string, error) { + return c.getTidyStringData(ctx, + fmt.Sprintf( + "/latest/meta-data/network/interfaces/macs/%s/netmask", + mac)) +} + +func (c *Client) GetVSwitchCidrBlockIdByMac(ctx context.Context, mac string) (string, error) { + return c.getTidyStringData(ctx, + fmt.Sprintf( + "/latest/meta-data/network/interfaces/macs/%s/vswitch-cidr-block", + mac)) +} + +func (c *Client) GetPrivateIPV4sByMac(ctx context.Context, mac string) ([]string, error) { + data, err := c.getRawData(ctx, + fmt.Sprintf( + "/latest/meta-data/network/interfaces/macs/%s/private-ipv4s", + mac)) + if err != nil { + return nil, err + } + return parseJSONStringArray(data) +} + +func (c *Client) GetVpcIPV6CidrBlocksByMac(ctx context.Context, mac string) ([]string, error) { + data, err := c.getRawData(ctx, + fmt.Sprintf( + "/latest/meta-data/network/interfaces/macs/%s/vpc-ipv6-cidr-blocks", + mac)) + if err != nil { + return nil, err + } + return parseJSONStringArray(data) +} + +func (c *Client) GetVSwitchIdByMac(ctx context.Context, mac string) (string, error) { + return c.getTidyStringData(ctx, + fmt.Sprintf( + "/latest/meta-data/network/interfaces/macs/%s/vswitch-id", + mac)) +} + +func (c *Client) GetVpcIdByMac(ctx context.Context, mac string) (string, error) { + return c.getTidyStringData(ctx, + fmt.Sprintf( + "/latest/meta-data/network/interfaces/macs/%s/vpc-id", + mac)) +} + +func (c *Client) GetPrimaryIPAddressByMac(ctx context.Context, mac string) (string, error) { + return c.getTidyStringData(ctx, + fmt.Sprintf( + "/latest/meta-data/network/interfaces/macs/%s/primary-ip-address", + mac)) +} + +func (c *Client) GetGatewayByMac(ctx context.Context, mac string) (string, error) { + return c.getTidyStringData(ctx, + fmt.Sprintf( + "/latest/meta-data/network/interfaces/macs/%s/gateway", + mac)) +} + +func (c *Client) GetIPV6sByMac(ctx context.Context, mac string) ([]string, error) { + data, err := c.getRawData(ctx, + fmt.Sprintf( + "/latest/meta-data/network/interfaces/macs/%s/ipv6s", + mac)) + if err != nil { + return nil, err + } + return parseJSONStringArray(data) +} + +func (c *Client) GetIPV6GatewayByMac(ctx context.Context, mac string) (string, error) { + return c.getTidyStringData(ctx, + fmt.Sprintf( + "/latest/meta-data/network/interfaces/macs/%s/ipv6-gateway", + mac)) +} + +func (c *Client) GetVSwitchIPV6CidrBlockByMac(ctx context.Context, mac string) (string, error) { + return c.getTidyStringData(ctx, + fmt.Sprintf( + "/latest/meta-data/network/interfaces/macs/%s/vswitch-ipv6-cidr-block", + mac)) +} + +func (c *Client) GetIPV4PrefixesByMac(ctx context.Context, mac string) (string, error) { + return c.getTidyStringData(ctx, + fmt.Sprintf( + "/latest/meta-data/network/interfaces/macs/%s/ipv4-prefixes", + mac)) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/net.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/net.go new file mode 100644 index 0000000000..ee397cbc76 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/net.go @@ -0,0 +1,75 @@ +package ecsmetadata + +import ( + "context" +) + +func (c *Client) GetVpcId(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/vpc-id") +} + +// GetVpcCidrBlockId deprecated, use GetVpcCidrBlock instead +func (c *Client) GetVpcCidrBlockId(ctx context.Context) (string, error) { + return c.GetVpcCidrBlock(ctx) +} + +func (c *Client) GetVpcCidrBlock(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/vpc-cidr-block") +} + +func (c *Client) GetVSwitchId(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/vswitch-id") +} + +// GetVSwitchCidrBlockId deprecated, use GetVSwitchCidrBlock instead +func (c *Client) GetVSwitchCidrBlockId(ctx context.Context) (string, error) { + return c.GetVSwitchCidrBlock(ctx) +} + +func (c *Client) GetVSwitchCidrBlock(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/vswitch-cidr-block") +} + +func (c *Client) GetPrivateIPV4(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/private-ipv4") +} + +func (c *Client) GetPublicIPV4(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/public-ipv4") +} + +func (c *Client) GetEIPV4(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/eipv4") +} + +func (c *Client) GetNetworkType(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/network-type") +} + +func (c *Client) GetMac(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/mac") +} + +func (c *Client) GetDNSNameServers(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/dns-conf/nameservers") +} + +func (c *Client) GetDNSNameServersList(ctx context.Context) ([]string, error) { + data, err := c.GetDNSNameServers(ctx) + if err != nil { + return nil, err + } + return parsePathNames(data), nil +} + +func (c *Client) GetNTPServers(ctx context.Context) (string, error) { + return c.getTidyStringData(ctx, "/latest/meta-data/ntp-conf/ntp-servers") +} + +func (c *Client) GetNTPServersList(ctx context.Context) ([]string, error) { + data, err := c.GetNTPServers(ctx) + if err != nil { + return nil, err + } + return parsePathNames(data), nil +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/ram.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/ram.go new file mode 100644 index 0000000000..6b4b9098fd --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/ram.go @@ -0,0 +1,65 @@ +package ecsmetadata + +import ( + "context" + "encoding/json" + "fmt" + "time" +) + +type rawCredentials struct { + AccessKeyId string `json:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken"` + Expiration string `json:"Expiration"` + LastUpdated string `json:"LastUpdated"` + Code string `json:"Code"` +} + +type RoleCredentials struct { + AccessKeyId string + AccessKeySecret string + SecurityToken string + Expiration time.Time + LastUpdated time.Time + Code string +} + +func (c *Client) GetRoleName(ctx context.Context) (string, error) { + if c.roleName != "" { + return c.roleName, nil + } + return c.getTidyStringData(ctx, "/latest/meta-data/ram/security-credentials/") +} + +func (c *Client) GetRawRoleCredentials(ctx context.Context, roleName string) (string, error) { + data, err := c.getRawStringData(ctx, "/latest/meta-data/ram/security-credentials/"+roleName) + return data, err +} + +func (c *Client) GetRoleCredentials(ctx context.Context, roleName string) (*RoleCredentials, error) { + data, err := c.GetRawRoleCredentials(ctx, roleName) + if err != nil { + return nil, err + } + + const format = time.RFC3339 + var raw rawCredentials + if err := json.Unmarshal([]byte(data), &raw); err != nil { + return nil, fmt.Errorf("parse credentials failed: %w", err) + } + exp, err := time.Parse(format, raw.Expiration) + if err != nil { + return nil, fmt.Errorf("parse Expiration (%s) failed: %w", raw.Expiration, err) + } + last, _ := time.Parse(format, raw.LastUpdated) + + return &RoleCredentials{ + AccessKeyId: raw.AccessKeyId, + AccessKeySecret: raw.AccessKeySecret, + SecurityToken: raw.SecurityToken, + Expiration: exp, + LastUpdated: last, + Code: raw.Code, + }, nil +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/retry.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/retry.go new file mode 100644 index 0000000000..a9f1072d86 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/retry.go @@ -0,0 +1,78 @@ +package ecsmetadata + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" +) + +const defaultMaxRetryTimes = 3 + +type RetryOptions struct { + MaxRetryTimes int + RetryDelayFunc func(n int) time.Duration +} + +func DefaultRetryOptions() *RetryOptions { + return &RetryOptions{ + MaxRetryTimes: defaultMaxRetryTimes, + RetryDelayFunc: func(n int) time.Duration { + return time.Duration(n) * time.Second + }, + } +} + +func retryWithOptions(ctx context.Context, fn func(ctx context.Context) error, opts RetryOptions) error { + if opts.MaxRetryTimes <= 0 { + return fn(ctx) + } + + var lastErr error +retry: + for i := 0; i <= opts.MaxRetryTimes; i++ { + lastErr = fn(ctx) + if lastErr == nil { + return nil + } + var nerr *noRetryError + if errors.As(lastErr, &nerr) { + return nerr.err + } + + if opts.RetryDelayFunc != nil && i < opts.MaxRetryTimes { + delay := opts.RetryDelayFunc(i + 1) + if delay > 0 { + select { + case <-ctx.Done(): + lastErr = ctx.Err() + break retry + case <-time.After(delay): + } + } + } + } + + return fmt.Errorf("retry failed after %d attempts: %w", opts.MaxRetryTimes, lastErr) +} + +func isRetryable(err error) bool { + if err == nil { + return false + } + + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { + return false + } + + var httperr *HTTPError + if errors.As(err, &httperr) { + if httperr.StatusCode == http.StatusNotFound || + httperr.StatusCode == http.StatusBadRequest { + return false + } + } + + return true +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/test.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/test.go new file mode 100644 index 0000000000..a25698331e --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/test.go @@ -0,0 +1,24 @@ +package ecsmetadata + +import ( + "io" + "net/http" + "strings" +) + +type MockWrapper struct { + Mock func(path string) (statusCode int, body string, err error) +} + +func (m *MockWrapper) RoundTrip(req *http.Request) (*http.Response, error) { + path := req.URL.RequestURI() + code, body, err := m.Mock(path) + if err != nil { + return nil, err + } + return &http.Response{ + Status: http.StatusText(code), + StatusCode: code, + Body: io.NopCloser(strings.NewReader(body)), + }, nil +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/userdata.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/userdata.go new file mode 100644 index 0000000000..db75cae95d --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/userdata.go @@ -0,0 +1,11 @@ +package ecsmetadata + +import ( + "context" +) + +// https://help.aliyun.com/zh/ecs/user-guide/customize-the-initialization-configuration-for-an-instance + +func (c *Client) GetUserData(ctx context.Context) (string, error) { + return c.getRawStringData(ctx, "/latest/user-data") +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/utils.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/utils.go new file mode 100644 index 0000000000..385e86993d --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/ecsmetadata/utils.go @@ -0,0 +1,28 @@ +package ecsmetadata + +import ( + "encoding/json" + "strings" +) + +func parsePathNames(data string) []string { + var ret []string + parts := strings.Split(data, "\n") + for _, part := range parts { + part = strings.TrimSpace(part) + part = strings.TrimRight(part, "/") + if part == "" { + continue + } + ret = append(ret, part) + } + return ret +} + +func parseJSONStringArray(data []byte) ([]string, error) { + var ret []string + if err := json.Unmarshal(data, &ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md index b11eb07884..97434ea7f7 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -160,7 +160,7 @@ if (err == nil) { ```Go certificatePath := "./example-app.pfx" -certData, err := ioutil.ReadFile(certificatePath) +certData, err := os.ReadFile(certificatePath) if err != nil { return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) } diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go index 9daa4b58b8..f040e2ac6b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -27,7 +27,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -116,7 +116,7 @@ func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConf } s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) + body := io.NopCloser(strings.NewReader(s)) req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) if err != nil { @@ -131,7 +131,7 @@ func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConf } defer resp.Body.Close() - rb, err := ioutil.ReadAll(resp.Body) + rb, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) } @@ -175,7 +175,7 @@ func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code } s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) + body := io.NopCloser(strings.NewReader(s)) req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) if err != nil { @@ -190,7 +190,7 @@ func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code } defer resp.Body.Close() - rb, err := ioutil.ReadAll(resp.Body) + rb, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) } diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go index 2a974a39b3..fb54a43235 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -20,7 +20,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "path/filepath" @@ -62,7 +61,7 @@ func SaveToken(path string, mode os.FileMode, token Token) error { return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) } - newFile, err := ioutil.TempFile(dir, "token") + newFile, err := os.CreateTemp(dir, "token") if err != nil { return fmt.Errorf("failed to create the temp file to write the token: %v", err) } diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 2a24ab80cf..67baecd83f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -25,7 +25,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "net/http" "net/url" @@ -1061,7 +1060,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource } else if msiSecret.clientResourceID != "" { data.Set("msi_res_id", msiSecret.clientResourceID) } - req.Body = ioutil.NopCloser(strings.NewReader(data.Encode())) + req.Body = io.NopCloser(strings.NewReader(data.Encode())) req.Header.Set("Content-Type", "application/x-www-form-urlencoded") break case msiTypeIMDS: @@ -1096,7 +1095,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource } s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) + body := io.NopCloser(strings.NewReader(s)) req.ContentLength = int64(len(s)) req.Header.Set(contentType, mimeTypeFormPost) req.Body = body @@ -1113,7 +1112,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter}) defer resp.Body.Close() - rb, err := ioutil.ReadAll(resp.Body) + rb, err := io.ReadAll(resp.Body) if resp.StatusCode != http.StatusOK { if err != nil { @@ -1235,7 +1234,7 @@ func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http for attempt < maxAttempts { if resp != nil && resp.Body != nil { - io.Copy(ioutil.Discard, resp.Body) + io.Copy(io.Discard, resp.Body) resp.Body.Close() } resp, err = sender.Do(req) diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go index 2af5030a1c..c58d7b7b8d 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go @@ -190,7 +190,7 @@ func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) } } - return string(cr.Bytes()), nil + return cr.String(), nil } func getCanonicalizedAccountName(accountName string) string { @@ -289,7 +289,7 @@ func buildCanonicalizedHeader(headers http.Header) string { ch.WriteRune('\n') } - return strings.TrimSuffix(string(ch.Bytes()), "\n") + return strings.TrimSuffix(ch.String(), "\n") } func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string { diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index 45575eedbf..f119b11d48 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -19,7 +19,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -318,11 +318,11 @@ func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { if err == nil && resp.Body != nil { // copy the body and close it so callers don't have to defer resp.Body.Close() - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return resp, err } - resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + resp.Body = io.NopCloser(bytes.NewReader(b)) } return resp, err } @@ -459,12 +459,12 @@ func (pt *pollingTrackerBase) updateRawBody() error { pt.rawBody = map[string]interface{}{} if pt.resp.ContentLength != 0 { defer pt.resp.Body.Close() - b, err := ioutil.ReadAll(pt.resp.Body) + b, err := io.ReadAll(pt.resp.Body) if err != nil { return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") } // put the body back so it's available to other callers - pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + pt.resp.Body = io.NopCloser(bytes.NewReader(b)) // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty if len(b) == 0 { return nil @@ -516,11 +516,11 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() { re := respErr{} defer pt.resp.Body.Close() var b []byte - if b, err = ioutil.ReadAll(pt.resp.Body); err != nil { + if b, err = io.ReadAll(pt.resp.Body); err != nil { goto Default } // put the body back so it's available to other callers - pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + pt.resp.Body = io.NopCloser(bytes.NewReader(b)) if len(b) == 0 { goto Default } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go index e97589dcdc..25697b3c85 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -21,7 +21,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "log" "os" "strings" @@ -325,7 +325,7 @@ func GetSettingsFromFile() (FileSettings, error) { return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set") } - contents, err := ioutil.ReadFile(fileLocation) + contents, err := os.ReadFile(fileLocation) if err != nil { return s, err } @@ -488,7 +488,7 @@ func decode(b []byte) ([]byte, error) { } return []byte(string(utf16.Decode(u16))), nil } - return ioutil.ReadAll(reader) + return io.ReadAll(reader) } func (settings FileSettings) getResourceForToken(baseURI string) (string, error) { @@ -636,7 +636,7 @@ func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincip if err != nil { return nil, err } - certData, err := ioutil.ReadFile(ccc.CertificatePath) + certData, err := os.ReadFile(ccc.CertificatePath) if err != nil { return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) } @@ -653,7 +653,7 @@ func (ccc ClientCertificateConfig) MultiTenantServicePrincipalToken() (*adal.Mul if err != nil { return nil, err } - certData, err := ioutil.ReadFile(ccc.CertificatePath) + certData, err := os.ReadFile(ccc.CertificatePath) if err != nil { return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go index 868345db68..09c870809b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -20,7 +20,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "regexp" "strconv" @@ -333,7 +333,7 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { // Copy and replace the Body in case it does not contain an error object. // This will leave the Body available to the caller. b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e) - resp.Body = ioutil.NopCloser(&b) + resp.Body = io.NopCloser(&b) if decodeErr != nil { return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, decodeErr) } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go index f45c3a516d..16f3be38cb 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go @@ -18,7 +18,6 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" @@ -66,7 +65,7 @@ func ProfilePath() (string, error) { // LoadProfile restores a Profile object from a file located at 'path'. func LoadProfile(path string) (result Profile, err error) { var contents []byte - contents, err = ioutil.ReadFile(path) + contents, err = os.ReadFile(path) if err != nil { err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) return diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go index 486619111c..056f6c342f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go @@ -47,7 +47,7 @@ type Token struct { const accessTokensJSON = "accessTokens.json" -// ToADALToken converts an Azure CLI `Token`` to an `adal.Token`` +// ToADALToken converts an Azure CLI `Token“ to an `adal.Token“ func (t Token) ToADALToken() (converted adal.Token, err error) { tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn) if err != nil { diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index b0a53769f2..4684291a88 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -17,7 +17,6 @@ package azure import ( "encoding/json" "fmt" - "io/ioutil" "os" "strings" ) @@ -315,7 +314,7 @@ func EnvironmentFromName(name string) (Environment, error) { // This function is particularly useful in the Hybrid Cloud model, where one must define their own // endpoints. func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { - fileContents, err := ioutil.ReadFile(location) + fileContents, err := os.ReadFile(location) if err != nil { return } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go index 507f9e95cf..f436a4512a 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -3,7 +3,7 @@ package azure import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "strings" @@ -236,7 +236,7 @@ func retrieveMetadataEnvironment(endpoint string) (environment environmentMetada return environment, err } defer response.Body.Close() - jsonResponse, err := ioutil.ReadAll(response.Body) + jsonResponse, err := io.ReadAll(response.Body) if err != nil { return environment, err } diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go index bb5f9396e9..b2f2357e76 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -20,7 +20,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "log" "net/http" "strings" @@ -106,14 +105,14 @@ func (li LoggingInspector) WithInspection() PrepareDecorator { defer r.Body.Close() - r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + r.Body = io.NopCloser(io.TeeReader(r.Body, &body)) if err := r.Write(&b); err != nil { return nil, fmt.Errorf("Failed to write response: %v", err) } li.Logger.Printf(requestFormat, b.String()) - r.Body = ioutil.NopCloser(&body) + r.Body = io.NopCloser(&body) return p.Prepare(r) }) } @@ -129,14 +128,14 @@ func (li LoggingInspector) ByInspecting() RespondDecorator { return ResponderFunc(func(resp *http.Response) error { var body, b bytes.Buffer defer resp.Body.Close() - resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + resp.Body = io.NopCloser(io.TeeReader(resp.Body, &body)) if err := resp.Write(&b); err != nil { return fmt.Errorf("Failed to write response: %v", err) } li.Logger.Printf(responseFormat, b.String()) - resp.Body = ioutil.NopCloser(&body) + resp.Body = io.NopCloser(&body) return r.Respond(resp) }) } diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go index 4e05432071..c879c200a7 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go @@ -1,3 +1,4 @@ +//go:build modhack // +build modhack package date diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go index 121a66fa88..f6de8c5e93 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -21,7 +21,6 @@ import ( "encoding/xml" "fmt" "io" - "io/ioutil" "mime/multipart" "net/http" "net/url" @@ -268,7 +267,7 @@ func WithBytes(input *[]byte) PrepareDecorator { } r.ContentLength = int64(len(*input)) - r.Body = ioutil.NopCloser(bytes.NewReader(*input)) + r.Body = io.NopCloser(bytes.NewReader(*input)) } return r, err }) @@ -296,7 +295,7 @@ func WithFormData(v url.Values) PrepareDecorator { setHeader(r, http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) r.ContentLength = int64(len(s)) - r.Body = ioutil.NopCloser(strings.NewReader(s)) + r.Body = io.NopCloser(strings.NewReader(s)) } return r, err }) @@ -331,7 +330,7 @@ func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDec return r, err } setHeader(r, http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) - r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.Body = io.NopCloser(bytes.NewReader(body.Bytes())) r.ContentLength = int64(body.Len()) return r, err } @@ -346,11 +345,11 @@ func WithFile(f io.ReadCloser) PrepareDecorator { return PreparerFunc(func(r *http.Request) (*http.Request, error) { r, err := p.Prepare(r) if err == nil { - b, err := ioutil.ReadAll(f) + b, err := io.ReadAll(f) if err != nil { return r, err } - r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.Body = io.NopCloser(bytes.NewReader(b)) r.ContentLength = int64(len(b)) } return r, err @@ -396,7 +395,7 @@ func WithString(v string) PrepareDecorator { r, err := p.Prepare(r) if err == nil { r.ContentLength = int64(len(v)) - r.Body = ioutil.NopCloser(strings.NewReader(v)) + r.Body = io.NopCloser(strings.NewReader(v)) } return r, err }) @@ -413,7 +412,7 @@ func WithJSON(v interface{}) PrepareDecorator { b, err := json.Marshal(v) if err == nil { r.ContentLength = int64(len(b)) - r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.Body = io.NopCloser(bytes.NewReader(b)) } } return r, err @@ -436,7 +435,7 @@ func WithXML(v interface{}) PrepareDecorator { r.ContentLength = int64(len(bytesWithHeader)) setHeader(r, headerContentLength, fmt.Sprintf("%d", len(bytesWithHeader))) - r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) + r.Body = io.NopCloser(bytes.NewReader(bytesWithHeader)) } } return r, err diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go index 349e1963a2..69d4b2b67b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -20,7 +20,6 @@ import ( "encoding/xml" "fmt" "io" - "io/ioutil" "net/http" "strings" ) @@ -111,7 +110,7 @@ func ByDiscardingBody() RespondDecorator { return ResponderFunc(func(resp *http.Response) error { err := r.Respond(resp) if err == nil && resp != nil && resp.Body != nil { - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + if _, err := io.Copy(io.Discard, resp.Body); err != nil { return fmt.Errorf("Error discarding the response body: %v", err) } } @@ -160,7 +159,7 @@ func ByUnmarshallingBytes(v *[]byte) RespondDecorator { return ResponderFunc(func(resp *http.Response) error { err := r.Respond(resp) if err == nil { - bytes, errInner := ioutil.ReadAll(resp.Body) + bytes, errInner := io.ReadAll(resp.Body) if errInner != nil { err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) } else { @@ -179,7 +178,7 @@ func ByUnmarshallingJSON(v interface{}) RespondDecorator { return ResponderFunc(func(resp *http.Response) error { err := r.Respond(resp) if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) + b, errInner := io.ReadAll(resp.Body) // Some responses might include a BOM, remove for successful unmarshalling b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) if errInner != nil { @@ -203,7 +202,7 @@ func ByUnmarshallingXML(v interface{}) RespondDecorator { return ResponderFunc(func(resp *http.Response) error { err := r.Respond(resp) if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) + b, errInner := io.ReadAll(resp.Body) if errInner != nil { err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) } else { @@ -232,9 +231,9 @@ func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { resp.Status) if resp.Body != nil { defer resp.Body.Close() - b, _ := ioutil.ReadAll(resp.Body) + b, _ := io.ReadAll(resp.Body) derr.ServiceError = b - resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + resp.Body = io.NopCloser(bytes.NewReader(b)) } err = derr } diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go index fa11dbed79..7634b0f570 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -17,7 +17,6 @@ package autorest import ( "bytes" "io" - "io/ioutil" "net/http" ) @@ -41,12 +40,12 @@ func (rr *RetriableRequest) prepareFromByteReader() (err error) { return err } } else { - b, err = ioutil.ReadAll(rr.req.Body) + b, err = io.ReadAll(rr.req.Body) if err != nil { return err } } rr.br = bytes.NewReader(b) - rr.req.Body = ioutil.NopCloser(rr.br) + rr.req.Body = io.NopCloser(rr.br) return err } diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go index 4c87030e81..8340bda408 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -19,7 +19,7 @@ package autorest import ( "bytes" - "io/ioutil" + "io" "net/http" ) @@ -33,10 +33,10 @@ type RetriableRequest struct { func (rr *RetriableRequest) Prepare() (err error) { // preserve the request body; this is to support retry logic as // the underlying transport will always close the reqeust body - if rr.req.Body != nil { + if rr.req.Body != nil && rr.req.Body != http.NoBody { if rr.br != nil { _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) - rr.req.Body = ioutil.NopCloser(rr.br) + rr.req.Body = io.NopCloser(rr.br) } if err != nil { return err diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go index 05847c08ba..e36d4b0465 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -20,7 +20,6 @@ package autorest import ( "bytes" "io" - "io/ioutil" "net/http" ) @@ -35,12 +34,12 @@ type RetriableRequest struct { func (rr *RetriableRequest) Prepare() (err error) { // preserve the request body; this is to support retry logic as // the underlying transport will always close the reqeust body - if rr.req.Body != nil { + if rr.req.Body != nil && rr.req.Body != http.NoBody { if rr.rc != nil { rr.req.Body = rr.rc } else if rr.br != nil { _, err = rr.br.Seek(0, io.SeekStart) - rr.req.Body = ioutil.NopCloser(rr.br) + rr.req.Body = io.NopCloser(rr.br) } if err != nil { return err diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go index d35b3850ab..8c5eb5dbe5 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -20,7 +20,6 @@ import ( "encoding/xml" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -217,7 +216,7 @@ func IsTemporaryNetworkError(err error) bool { // DrainResponseBody reads the response body then closes it. func DrainResponseBody(resp *http.Response) error { if resp != nil && resp.Body != nil { - _, err := io.Copy(ioutil.Discard, resp.Body) + _, err := io.Copy(io.Discard, resp.Body) resp.Body.Close() return err } diff --git a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go index 0aa27680db..be6aa9ee8b 100644 --- a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go @@ -1,3 +1,4 @@ +//go:build modhack // +build modhack package logger diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go index 2f5d8cc1a1..e70dc3dc29 100644 --- a/vendor/github.com/Azure/go-autorest/logger/logger.go +++ b/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -18,7 +18,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -182,7 +181,7 @@ var Instance Writer // default log level var logLevel = LogNone -// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// Level returns the value specified in AZURE_GO_SDK_LOG_LEVEL. // If no value was specified the default value is LogNone. // Custom loggers can call this to retrieve the configured log level. func Level() LevelType { @@ -275,7 +274,7 @@ func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { } if fl.shouldLogBody(req.Header, req.Body) { // dump body - body, err := ioutil.ReadAll(req.Body) + body, err := io.ReadAll(req.Body) if err == nil { fmt.Fprintln(b, string(filter.processBody(body))) if nc, ok := req.Body.(io.Seeker); ok { @@ -283,7 +282,7 @@ func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { nc.Seek(0, io.SeekStart) } else { // recreate the body - req.Body = ioutil.NopCloser(bytes.NewReader(body)) + req.Body = io.NopCloser(bytes.NewReader(body)) } } else { fmt.Fprintf(b, "failed to read body: %v\n", err) @@ -310,10 +309,10 @@ func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { if fl.shouldLogBody(resp.Header, resp.Body) { // dump body defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err == nil { fmt.Fprintln(b, string(filter.processBody(body))) - resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + resp.Body = io.NopCloser(bytes.NewReader(body)) } else { fmt.Fprintf(b, "failed to read body: %v\n", err) } diff --git a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go index e163975cd4..2136925751 100644 --- a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go @@ -1,3 +1,4 @@ +//go:build modhack // +build modhack package tracing diff --git a/vendor/github.com/alibabacloud-go/debug/debug/assert.go b/vendor/github.com/alibabacloud-go/debug/debug/assert.go deleted file mode 100644 index 6fca15a63c..0000000000 --- a/vendor/github.com/alibabacloud-go/debug/debug/assert.go +++ /dev/null @@ -1,12 +0,0 @@ -package debug - -import ( - "reflect" - "testing" -) - -func assertEqual(t *testing.T, a, b interface{}) { - if !reflect.DeepEqual(a, b) { - t.Errorf("%v != %v", a, b) - } -} diff --git a/vendor/github.com/alibabacloud-go/debug/debug/debug.go b/vendor/github.com/alibabacloud-go/debug/debug/debug.go index c977cb8c3d..c9c8b54225 100644 --- a/vendor/github.com/alibabacloud-go/debug/debug/debug.go +++ b/vendor/github.com/alibabacloud-go/debug/debug/debug.go @@ -1,3 +1,22 @@ +// Package debug is a library to display debug info that control by enviroment variable DEBUG +// +// # Example +// +// package main +// // import the package +// import "github.com/alibabacloud-go/debug/debug" +// +// // init a debug method +// var d = debug.Init("sdk") +// +// func main() { +// // try `go run demo.go` +// // and `DEBUG=sdk go run demo.go` +// d("this debug information just print when DEBUG environment variable was set") +// } +// +// When you run application with `DEBUG=sdk go run main.go`, it will display logs. Otherwise +// it do nothing package debug import ( @@ -6,6 +25,8 @@ import ( "strings" ) +// Debug is a method that display logs, it is useful for developer to trace program running +// details when troubleshooting type Debug func(format string, v ...interface{}) var hookGetEnv = func() string { @@ -16,6 +37,7 @@ var hookPrint = func(input string) { fmt.Println(input) } +// Init returns a debug method that based the enviroment variable DEBUG value func Init(flag string) Debug { enable := false diff --git a/vendor/github.com/alibabacloud-go/openapi-util/service/service.go b/vendor/github.com/alibabacloud-go/openapi-util/service/service.go index 245eeccb08..9dbb68bdeb 100644 --- a/vendor/github.com/alibabacloud-go/openapi-util/service/service.go +++ b/vendor/github.com/alibabacloud-go/openapi-util/service/service.go @@ -30,7 +30,7 @@ import ( "strings" "time" - util "github.com/alibabacloud-go/tea-utils/service" + util "github.com/alibabacloud-go/tea-utils/v2/service" "github.com/alibabacloud-go/tea/tea" "github.com/tjfoc/gmsm/sm3" ) @@ -633,3 +633,165 @@ func shaHmac1(source, secret string) []byte { hmac.Write([]byte(source)) return hmac.Sum(nil) } + +func getTimeLeft(rateLimit *string) (_result *int64) { + if rateLimit != nil { + pairs := strings.Split(tea.StringValue(rateLimit), ",") + for _, pair := range pairs { + kv := strings.Split(pair, ":") + if len(kv) == 2 { + key, value := kv[0], kv[1] + if key == "TimeLeft" { + timeLeftValue, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return nil + } + return tea.Int64(timeLeftValue) + } + } + } + } + return nil +} + +/** + * Get throttling param + * @param the response headers + * @return time left + */ +func GetThrottlingTimeLeft(headers map[string]*string) (_result *int64) { + rateLimitForUserApi := headers["x-ratelimit-user-api"] + rateLimitForUser := headers["x-ratelimit-user"] + timeLeftForUserApi := getTimeLeft(rateLimitForUserApi) + timeLeftForUser := getTimeLeft(rateLimitForUser) + if tea.Int64Value(timeLeftForUserApi) > tea.Int64Value(timeLeftForUser) { + return timeLeftForUserApi + } else { + return timeLeftForUser + } +} + +/** + * Parse map with flat style + * @param object the object + * @return the object + */ +func MapToFlatStyle(object interface{}) interface{} { + if object == nil { + return object + } + + val := reflect.ValueOf(object) + if !val.IsValid() { + return object + } + + // Handle pointer types + if val.Kind() == reflect.Ptr { + if val.IsNil() { + return object + } + val = val.Elem() + } + + // Handle slice/array (List) + if val.Kind() == reflect.Slice || val.Kind() == reflect.Array { + result := make([]interface{}, val.Len()) + for i := 0; i < val.Len(); i++ { + result[i] = MapToFlatStyle(val.Index(i).Interface()) + } + return result + } + + // Handle struct (TeaModel equivalent) + if val.Kind() == reflect.Struct { + // Create a pointer to the struct so we can modify it + if reflect.TypeOf(object).Kind() == reflect.Ptr { + // Already a pointer + val = reflect.ValueOf(object).Elem() + } else { + // Make a copy and work with pointer + ptrVal := reflect.New(val.Type()) + ptrVal.Elem().Set(val) + val = ptrVal.Elem() + object = ptrVal.Interface() + } + + valType := val.Type() + for i := 0; i < val.NumField(); i++ { + field := val.Field(i) + fieldType := valType.Field(i) + + // Skip unexported fields + if !field.CanSet() { + continue + } + + fieldValue := field.Interface() + + // Check if field is a map + if field.Kind() == reflect.Map { + flatMap := make(map[string]interface{}) + iter := field.MapRange() + for iter.Next() { + key := iter.Key() + value := iter.Value() + keyStr := fmt.Sprintf("%v", key.Interface()) + flatKey := fmt.Sprintf("#%d#%s", len(keyStr), keyStr) + flatMap[flatKey] = MapToFlatStyle(value.Interface()) + } + + // Set the flattened map back to the field + newMapValue := reflect.MakeMap(field.Type()) + for k, v := range flatMap { + keyVal := reflect.ValueOf(k) + valVal := reflect.ValueOf(v) + if valVal.IsValid() && valVal.Type().AssignableTo(field.Type().Elem()) { + newMapValue.SetMapIndex(keyVal, valVal) + } else if valVal.IsValid() { + // Try to convert the value + if field.Type().Elem().Kind() == reflect.Interface { + newMapValue.SetMapIndex(keyVal, valVal) + } + } + } + if newMapValue.Len() > 0 { + field.Set(newMapValue) + } + } else { + // Recursively process other fields + processed := MapToFlatStyle(fieldValue) + if processed != nil && field.CanSet() { + processedVal := reflect.ValueOf(processed) + if processedVal.IsValid() { + // Only set if types are compatible + if processedVal.Type().AssignableTo(fieldType.Type) { + field.Set(processedVal) + } else if fieldType.Type.Kind() == reflect.Interface { + field.Set(processedVal) + } else if processedVal.Type().ConvertibleTo(fieldType.Type) { + field.Set(processedVal.Convert(fieldType.Type)) + } + } + } + } + } + return object + } + + // Handle map + if val.Kind() == reflect.Map { + flatMap := make(map[string]interface{}) + iter := val.MapRange() + for iter.Next() { + key := iter.Key() + value := iter.Value() + keyStr := fmt.Sprintf("%v", key.Interface()) + flatKey := fmt.Sprintf("#%d#%s", len(keyStr), keyStr) + flatMap[flatKey] = MapToFlatStyle(value.Interface()) + } + return flatMap + } + + return object +} diff --git a/vendor/github.com/alibabacloud-go/tea-utils/v2/LICENSE b/vendor/github.com/alibabacloud-go/tea-utils/v2/LICENSE new file mode 100644 index 0000000000..0c44dcefe3 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea-utils/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/tea-utils/v2/service/service.go b/vendor/github.com/alibabacloud-go/tea-utils/v2/service/service.go new file mode 100644 index 0000000000..1fb0ce03b4 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea-utils/v2/service/service.go @@ -0,0 +1,694 @@ +package service + +import ( + "bufio" + "bytes" + "crypto/md5" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/url" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/alibabacloud-go/tea/tea" +) + +var defaultUserAgent = fmt.Sprintf("AlibabaCloud (%s; %s) Golang/%s Core/%s TeaDSL/1", runtime.GOOS, runtime.GOARCH, strings.Trim(runtime.Version(), "go"), "0.01") + +type ExtendsParameters struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty"` + Queries map[string]*string `json:"queries,omitempty" xml:"queries,omitempty"` +} + +func (s ExtendsParameters) String() string { + return tea.Prettify(s) +} + +func (s ExtendsParameters) GoString() string { + return s.String() +} + +func (s *ExtendsParameters) SetHeaders(v map[string]*string) *ExtendsParameters { + s.Headers = v + return s +} + +func (s *ExtendsParameters) SetQueries(v map[string]*string) *ExtendsParameters { + s.Queries = v + return s +} + +type RuntimeOptions struct { + IdleTimeout *int `json:"idleTimeout" xml:"idleTimeout"` + Autoretry *bool `json:"autoretry" xml:"autoretry"` + IgnoreSSL *bool `json:"ignoreSSL" xml:"ignoreSSL"` + Key *string `json:"key,omitempty" xml:"key,omitempty"` + Cert *string `json:"cert,omitempty" xml:"cert,omitempty"` + Ca *string `json:"ca,omitempty" xml:"ca,omitempty"` + MaxAttempts *int `json:"maxAttempts" xml:"maxAttempts"` + BackoffPolicy *string `json:"backoffPolicy" xml:"backoffPolicy"` + BackoffPeriod *int `json:"backoffPeriod" xml:"backoffPeriod"` + ReadTimeout *int `json:"readTimeout" xml:"readTimeout"` + ConnectTimeout *int `json:"connectTimeout" xml:"connectTimeout"` + LocalAddr *string `json:"localAddr" xml:"localAddr"` + HttpProxy *string `json:"httpProxy" xml:"httpProxy"` + HttpsProxy *string `json:"httpsProxy" xml:"httpsProxy"` + NoProxy *string `json:"noProxy" xml:"noProxy"` + MaxIdleConns *int `json:"maxIdleConns" xml:"maxIdleConns"` + Socks5Proxy *string `json:"socks5Proxy" xml:"socks5Proxy"` + Socks5NetWork *string `json:"socks5NetWork" xml:"socks5NetWork"` + KeepAlive *bool `json:"keepAlive" xml:"keepAlive"` + ExtendsParameters *ExtendsParameters `json:"extendsParameters,omitempty" xml:"extendsParameters,omitempty"` + + // WebSocket Specific Configuration + WebSocketPingInterval *int `json:"webSocketPingInterval" xml:"webSocketPingInterval"` // Ping 间隔(毫秒) + WebSocketPongTimeout *int `json:"webSocketPongTimeout" xml:"webSocketPongTimeout"` // Pong 超时(毫秒) + WebSocketEnableReconnect *bool `json:"webSocketEnableReconnect" xml:"webSocketEnableReconnect"` // 是否启用自动重连 + WebSocketReconnectInterval *int `json:"webSocketReconnectInterval" xml:"webSocketReconnectInterval"` // 重连间隔(毫秒) + WebSocketMaxReconnectTimes *int `json:"webSocketMaxReconnectTimes" xml:"webSocketMaxReconnectTimes"` // 最大重连次数 + WebSocketWriteTimeout *int `json:"webSocketWriteTimeout" xml:"webSocketWriteTimeout"` // 写入超时(毫秒) + WebSocketHandshakeTimeout *int `json:"webSocketHandshakeTimeout" xml:"webSocketHandshakeTimeout"` // 握手超时(毫秒) + WebSocketHandler interface{} `json:"-" xml:"-"` // WebSocket 消息处理器(不序列化),应使用 dara.WebSocketHandler 类型 +} + +var processStartTime int64 = time.Now().UnixNano() / 1e6 +var seqId int64 = 0 + +type SSEEvent struct { + ID *string + Event *string + Data *string + Retry *int +} + +func parseEvent(eventLines []string) (SSEEvent, error) { + var event SSEEvent + + for _, line := range eventLines { + if strings.HasPrefix(line, "data:") { + event.Data = tea.String(tea.StringValue(event.Data) + strings.TrimPrefix(line, "data:") + "\n") + } else if strings.HasPrefix(line, "id:") { + id := strings.TrimPrefix(line, "id:") + event.ID = tea.String(strings.Trim(id, " ")) + } else if strings.HasPrefix(line, "event:") { + eventName := strings.TrimPrefix(line, "event:") + event.Event = tea.String(strings.Trim(eventName, " ")) + } else if strings.HasPrefix(line, "retry:") { + trimmedLine := strings.TrimPrefix(line, "retry:") + trimmedLine = strings.Trim(trimmedLine, " ") + retryValue, _err := strconv.Atoi(trimmedLine) + if _err != nil { + return event, fmt.Errorf("retry %v is not a int", trimmedLine) + } + event.Retry = tea.Int(retryValue) + } + } + data := strings.TrimRight(tea.StringValue(event.Data), "\n") + event.Data = tea.String(strings.Trim(data, " ")) + return event, nil +} + +func getGID() uint64 { + // https://blog.sgmansfield.com/2015/12/goroutine-ids/ + b := make([]byte, 64) + b = b[:runtime.Stack(b, false)] + b = bytes.TrimPrefix(b, []byte("goroutine ")) + b = b[:bytes.IndexByte(b, ' ')] + n, _ := strconv.ParseUint(string(b), 10, 64) + return n +} + +func (s RuntimeOptions) String() string { + return tea.Prettify(s) +} + +func (s RuntimeOptions) GoString() string { + return s.String() +} + +func (s *RuntimeOptions) SetAutoretry(v bool) *RuntimeOptions { + s.Autoretry = &v + return s +} + +func (s *RuntimeOptions) SetIgnoreSSL(v bool) *RuntimeOptions { + s.IgnoreSSL = &v + return s +} + +func (s *RuntimeOptions) SetKey(v string) *RuntimeOptions { + s.Key = &v + return s +} + +func (s *RuntimeOptions) SetCert(v string) *RuntimeOptions { + s.Cert = &v + return s +} + +func (s *RuntimeOptions) SetCa(v string) *RuntimeOptions { + s.Ca = &v + return s +} + +func (s *RuntimeOptions) SetMaxAttempts(v int) *RuntimeOptions { + s.MaxAttempts = &v + return s +} + +func (s *RuntimeOptions) SetBackoffPolicy(v string) *RuntimeOptions { + s.BackoffPolicy = &v + return s +} + +func (s *RuntimeOptions) SetBackoffPeriod(v int) *RuntimeOptions { + s.BackoffPeriod = &v + return s +} + +func (s *RuntimeOptions) SetReadTimeout(v int) *RuntimeOptions { + s.ReadTimeout = &v + return s +} + +func (s *RuntimeOptions) SetConnectTimeout(v int) *RuntimeOptions { + s.ConnectTimeout = &v + return s +} + +func (s *RuntimeOptions) SetIdleTimeout(v int) *RuntimeOptions { + s.IdleTimeout = &v + return s +} +func (s *RuntimeOptions) SetHttpProxy(v string) *RuntimeOptions { + s.HttpProxy = &v + return s +} + +func (s *RuntimeOptions) SetHttpsProxy(v string) *RuntimeOptions { + s.HttpsProxy = &v + return s +} + +func (s *RuntimeOptions) SetNoProxy(v string) *RuntimeOptions { + s.NoProxy = &v + return s +} + +func (s *RuntimeOptions) SetMaxIdleConns(v int) *RuntimeOptions { + s.MaxIdleConns = &v + return s +} + +func (s *RuntimeOptions) SetLocalAddr(v string) *RuntimeOptions { + s.LocalAddr = &v + return s +} + +func (s *RuntimeOptions) SetSocks5Proxy(v string) *RuntimeOptions { + s.Socks5Proxy = &v + return s +} + +func (s *RuntimeOptions) SetSocks5NetWork(v string) *RuntimeOptions { + s.Socks5NetWork = &v + return s +} + +func (s *RuntimeOptions) SetKeepAlive(v bool) *RuntimeOptions { + s.KeepAlive = &v + return s +} + +func (s *RuntimeOptions) SetExtendsParameters(v *ExtendsParameters) *RuntimeOptions { + s.ExtendsParameters = v + return s +} + +func (s *RuntimeOptions) SetWebSocketPingInterval(v int) *RuntimeOptions { + s.WebSocketPingInterval = &v + return s +} + +func (s *RuntimeOptions) SetWebSocketPongTimeout(v int) *RuntimeOptions { + s.WebSocketPongTimeout = &v + return s +} + +func (s *RuntimeOptions) SetWebSocketEnableReconnect(v bool) *RuntimeOptions { + s.WebSocketEnableReconnect = &v + return s +} + +func (s *RuntimeOptions) SetWebSocketReconnectInterval(v int) *RuntimeOptions { + s.WebSocketReconnectInterval = &v + return s +} + +func (s *RuntimeOptions) SetWebSocketMaxReconnectTimes(v int) *RuntimeOptions { + s.WebSocketMaxReconnectTimes = &v + return s +} + +func (s *RuntimeOptions) SetWebSocketWriteTimeout(v int) *RuntimeOptions { + s.WebSocketWriteTimeout = &v + return s +} + +func (s *RuntimeOptions) SetWebSocketHandshakeTimeout(v int) *RuntimeOptions { + s.WebSocketHandshakeTimeout = &v + return s +} + +func (s *RuntimeOptions) SetWebSocketHandler(v interface{}) *RuntimeOptions { + s.WebSocketHandler = v + return s +} + +func ReadAsString(body io.Reader) (*string, error) { + byt, err := ioutil.ReadAll(body) + if err != nil { + return tea.String(""), err + } + r, ok := body.(io.ReadCloser) + if ok { + r.Close() + } + return tea.String(string(byt)), nil +} + +func StringifyMapValue(a map[string]interface{}) map[string]*string { + res := make(map[string]*string) + for key, value := range a { + if value != nil { + res[key] = ToJSONString(value) + } + } + return res +} + +func AnyifyMapValue(a map[string]*string) map[string]interface{} { + res := make(map[string]interface{}) + for key, value := range a { + res[key] = tea.StringValue(value) + } + return res +} + +func ReadAsBytes(body io.Reader) ([]byte, error) { + byt, err := ioutil.ReadAll(body) + if err != nil { + return nil, err + } + r, ok := body.(io.ReadCloser) + if ok { + r.Close() + } + return byt, nil +} + +func DefaultString(reaStr, defaultStr *string) *string { + if reaStr == nil { + return defaultStr + } + return reaStr +} + +func ToJSONString(a interface{}) *string { + switch v := a.(type) { + case *string: + return v + case string: + return tea.String(v) + case []byte: + return tea.String(string(v)) + case io.Reader: + byt, err := ioutil.ReadAll(v) + if err != nil { + return nil + } + return tea.String(string(byt)) + } + byt := bytes.NewBuffer([]byte{}) + jsonEncoder := json.NewEncoder(byt) + jsonEncoder.SetEscapeHTML(false) + if err := jsonEncoder.Encode(a); err != nil { + return nil + } + return tea.String(string(bytes.TrimSpace(byt.Bytes()))) +} + +func DefaultNumber(reaNum, defaultNum *int) *int { + if reaNum == nil { + return defaultNum + } + return reaNum +} + +func ReadAsJSON(body io.Reader) (result interface{}, err error) { + byt, err := ioutil.ReadAll(body) + if err != nil { + return + } + if string(byt) == "" { + return + } + r, ok := body.(io.ReadCloser) + if ok { + r.Close() + } + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + err = d.Decode(&result) + return +} + +func GetNonce() *string { + routineId := getGID() + currentTime := time.Now().UnixNano() / 1e6 + seq := atomic.AddInt64(&seqId, 1) + randNum := rand.Int63() + msg := fmt.Sprintf("%d-%d-%d-%d-%d", processStartTime, routineId, currentTime, seq, randNum) + h := md5.New() + h.Write([]byte(msg)) + ret := hex.EncodeToString(h.Sum(nil)) + return &ret +} + +func Empty(val *string) *bool { + return tea.Bool(val == nil || tea.StringValue(val) == "") +} + +func ValidateModel(a interface{}) error { + if a == nil { + return nil + } + err := tea.Validate(a) + return err +} + +func EqualString(val1, val2 *string) *bool { + return tea.Bool(tea.StringValue(val1) == tea.StringValue(val2)) +} + +func EqualNumber(val1, val2 *int) *bool { + return tea.Bool(tea.IntValue(val1) == tea.IntValue(val2)) +} + +func IsUnset(val interface{}) *bool { + if val == nil { + return tea.Bool(true) + } + + v := reflect.ValueOf(val) + if v.Kind() == reflect.Ptr || v.Kind() == reflect.Slice || v.Kind() == reflect.Map { + return tea.Bool(v.IsNil()) + } + + valType := reflect.TypeOf(val) + valZero := reflect.Zero(valType) + return tea.Bool(valZero == v) +} + +func ToBytes(a *string) []byte { + return []byte(tea.StringValue(a)) +} + +func AssertAsMap(a interface{}) (_result map[string]interface{}, _err error) { + r := reflect.ValueOf(a) + if r.Kind().String() != "map" { + return nil, errors.New(fmt.Sprintf("%v is not a map[string]interface{}", a)) + } + + res := make(map[string]interface{}) + tmp := r.MapKeys() + for _, key := range tmp { + res[key.String()] = r.MapIndex(key).Interface() + } + + return res, nil +} + +func AssertAsNumber(a interface{}) (_result *int, _err error) { + res := 0 + switch a.(type) { + case int: + tmp := a.(int) + res = tmp + case *int: + tmp := a.(*int) + res = tea.IntValue(tmp) + default: + return nil, errors.New(fmt.Sprintf("%v is not a int", a)) + } + + return tea.Int(res), nil +} + +/** + * Assert a value, if it is a integer, return it, otherwise throws + * @return the integer value + */ +func AssertAsInteger(value interface{}) (_result *int, _err error) { + res := 0 + switch value.(type) { + case int: + tmp := value.(int) + res = tmp + case *int: + tmp := value.(*int) + res = tea.IntValue(tmp) + default: + return nil, errors.New(fmt.Sprintf("%v is not a int", value)) + } + + return tea.Int(res), nil +} + +func AssertAsBoolean(a interface{}) (_result *bool, _err error) { + res := false + switch a.(type) { + case bool: + tmp := a.(bool) + res = tmp + case *bool: + tmp := a.(*bool) + res = tea.BoolValue(tmp) + default: + return nil, errors.New(fmt.Sprintf("%v is not a bool", a)) + } + + return tea.Bool(res), nil +} + +func AssertAsString(a interface{}) (_result *string, _err error) { + res := "" + switch a.(type) { + case string: + tmp := a.(string) + res = tmp + case *string: + tmp := a.(*string) + res = tea.StringValue(tmp) + default: + return nil, errors.New(fmt.Sprintf("%v is not a string", a)) + } + + return tea.String(res), nil +} + +func AssertAsBytes(a interface{}) (_result []byte, _err error) { + res, ok := a.([]byte) + if !ok { + return nil, errors.New(fmt.Sprintf("%v is not a []byte", a)) + } + return res, nil +} + +func AssertAsReadable(a interface{}) (_result io.Reader, _err error) { + res, ok := a.(io.Reader) + if !ok { + return nil, errors.New(fmt.Sprintf("%v is not a reader", a)) + } + return res, nil +} + +func AssertAsArray(a interface{}) (_result []interface{}, _err error) { + r := reflect.ValueOf(a) + if r.Kind().String() != "array" && r.Kind().String() != "slice" { + return nil, errors.New(fmt.Sprintf("%v is not a []interface{}", a)) + } + aLen := r.Len() + res := make([]interface{}, 0) + for i := 0; i < aLen; i++ { + res = append(res, r.Index(i).Interface()) + } + return res, nil +} + +func ParseJSON(a *string) interface{} { + mapTmp := make(map[string]interface{}) + d := json.NewDecoder(bytes.NewReader([]byte(tea.StringValue(a)))) + d.UseNumber() + err := d.Decode(&mapTmp) + if err == nil { + return mapTmp + } + + sliceTmp := make([]interface{}, 0) + d = json.NewDecoder(bytes.NewReader([]byte(tea.StringValue(a)))) + d.UseNumber() + err = d.Decode(&sliceTmp) + if err == nil { + return sliceTmp + } + + if num, err := strconv.Atoi(tea.StringValue(a)); err == nil { + return num + } + + if ok, err := strconv.ParseBool(tea.StringValue(a)); err == nil { + return ok + } + + if floa64tVal, err := strconv.ParseFloat(tea.StringValue(a), 64); err == nil { + return floa64tVal + } + return nil +} + +func ToString(a []byte) *string { + return tea.String(string(a)) +} + +func ToMap(in interface{}) map[string]interface{} { + if in == nil { + return nil + } + res := tea.ToMap(in) + return res +} + +func ToFormString(a map[string]interface{}) *string { + if a == nil { + return tea.String("") + } + res := "" + urlEncoder := url.Values{} + for key, value := range a { + v := fmt.Sprintf("%v", value) + urlEncoder.Add(key, v) + } + res = urlEncoder.Encode() + return tea.String(res) +} + +func GetDateUTCString() *string { + return tea.String(time.Now().UTC().Format(http.TimeFormat)) +} + +func GetUserAgent(userAgent *string) *string { + if userAgent != nil && tea.StringValue(userAgent) != "" { + return tea.String(defaultUserAgent + " " + tea.StringValue(userAgent)) + } + return tea.String(defaultUserAgent) +} + +func Is2xx(code *int) *bool { + tmp := tea.IntValue(code) + return tea.Bool(tmp >= 200 && tmp < 300) +} + +func Is3xx(code *int) *bool { + tmp := tea.IntValue(code) + return tea.Bool(tmp >= 300 && tmp < 400) +} + +func Is4xx(code *int) *bool { + tmp := tea.IntValue(code) + return tea.Bool(tmp >= 400 && tmp < 500) +} + +func Is5xx(code *int) *bool { + tmp := tea.IntValue(code) + return tea.Bool(tmp >= 500 && tmp < 600) +} + +func Sleep(millisecond *int) error { + ms := tea.IntValue(millisecond) + time.Sleep(time.Duration(ms) * time.Millisecond) + return nil +} + +func ToArray(in interface{}) []map[string]interface{} { + if tea.BoolValue(IsUnset(in)) { + return nil + } + + tmp := make([]map[string]interface{}, 0) + byt, _ := json.Marshal(in) + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + err := d.Decode(&tmp) + if err != nil { + return nil + } + return tmp +} + +func ReadAsSSE(body io.ReadCloser) (<-chan SSEEvent, <-chan error) { + eventChannel := make(chan SSEEvent) + errorChannel := make(chan error) + + go func() { + defer body.Close() + defer close(eventChannel) + + reader := bufio.NewReader(body) + var eventLines []string + + for { + line, err := reader.ReadString('\n') + if err == io.EOF { + break + } + if err != nil { + errorChannel <- err + } + + line = strings.TrimRight(line, "\n") + if line == "" { + if len(eventLines) > 0 { + event, err := parseEvent(eventLines) + if err != nil { + errorChannel <- err + } + eventChannel <- event + eventLines = []string{} + } + continue + } + eventLines = append(eventLines, line) + } + }() + return eventChannel, errorChannel +} + +func GetHostName() *string { + hostname, err := os.Hostname() + if err != nil { + return tea.String("") + } + return tea.String(hostname) +} diff --git a/vendor/github.com/alibabacloud-go/tea/tea/json_parser.go b/vendor/github.com/alibabacloud-go/tea/tea/json_parser.go index b3f202243d..a61df8da4c 100644 --- a/vendor/github.com/alibabacloud-go/tea/tea/json_parser.go +++ b/vendor/github.com/alibabacloud-go/tea/tea/json_parser.go @@ -13,12 +13,12 @@ import ( "github.com/modern-go/reflect2" ) +var jsonParser jsoniter.API + const maxUint = ^uint(0) const maxInt = int(maxUint >> 1) const minInt = -maxInt - 1 -var jsonParser jsoniter.API - func init() { jsonParser = jsoniter.Config{ EscapeHTML: true, diff --git a/vendor/github.com/alibabacloud-go/tea/tea/tea.go b/vendor/github.com/alibabacloud-go/tea/tea/tea.go index c984caf821..30e06e1650 100644 --- a/vendor/github.com/alibabacloud-go/tea/tea/tea.go +++ b/vendor/github.com/alibabacloud-go/tea/tea/tea.go @@ -31,7 +31,28 @@ import ( var debugLog = debug.Init("tea") -var hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) { +type HttpRequest interface { +} + +type HttpResponse interface { +} + +type HttpClient interface { + Call(request *http.Request, transport *http.Transport) (response *http.Response, err error) +} + +type teaClient struct { + sync.Mutex + httpClient *http.Client + ifInit bool +} + +func (client *teaClient) Call(request *http.Request, transport *http.Transport) (response *http.Response, err error) { + response, err = client.httpClient.Do(request) + return +} + +var hookDo = func(fn func(req *http.Request, transport *http.Transport) (*http.Response, error)) func(req *http.Request, transport *http.Transport) (*http.Response, error) { return fn } @@ -97,12 +118,7 @@ type RuntimeObject struct { Listener utils.ProgressListener `json:"listener" xml:"listener"` Tracker *utils.ReaderTracker `json:"tracker" xml:"tracker"` Logger *utils.Logger `json:"logger" xml:"logger"` -} - -type teaClient struct { - sync.Mutex - httpClient *http.Client - ifInit bool + HttpClient } var clientPool = &sync.Map{} @@ -143,6 +159,9 @@ func NewRuntimeObject(runtime map[string]interface{}) *RuntimeObject { if runtime["logger"] != nil { runtimeObject.Logger = runtime["logger"].(*utils.Logger) } + if runtime["httpClient"] != nil { + runtimeObject.HttpClient = runtime["httpClient"].(HttpClient) + } return runtimeObject } @@ -218,8 +237,11 @@ func NewSDKError(obj map[string]interface{}) *SDKError { } } } - byt, _ := json.Marshal(data) - err.Data = String(string(byt)) + byt := bytes.NewBuffer([]byte{}) + jsonEncoder := json.NewEncoder(byt) + jsonEncoder.SetEscapeHTML(false) + jsonEncoder.Encode(data) + err.Data = String(string(bytes.TrimSpace(byt.Bytes()))) } if statusCode, ok := obj["statusCode"].(int); ok { @@ -348,18 +370,27 @@ func DoRequest(request *Request, requestRuntime map[string]interface{}) (respons } httpRequest.Host = StringValue(request.Domain) - client := getTeaClient(runtimeObject.getClientTag(StringValue(request.Domain))) - client.Lock() - if !client.ifInit { - trans, err := getHttpTransport(request, runtimeObject) - if err != nil { - return nil, err + var client HttpClient + if runtimeObject.HttpClient == nil { + client = getTeaClient(runtimeObject.getClientTag(StringValue(request.Domain))) + } else { + client = runtimeObject.HttpClient + } + + trans, err := getHttpTransport(request, runtimeObject) + if err != nil { + return + } + if defaultClient, ok := client.(*teaClient); ok { + defaultClient.Lock() + if !defaultClient.ifInit || defaultClient.httpClient.Transport == nil { + defaultClient.httpClient.Transport = trans } - client.httpClient.Timeout = time.Duration(IntValue(runtimeObject.ReadTimeout)) * time.Millisecond - client.httpClient.Transport = trans - client.ifInit = true + defaultClient.httpClient.Timeout = time.Duration(IntValue(runtimeObject.ConnectTimeout)+IntValue(runtimeObject.ReadTimeout)) * time.Millisecond + defaultClient.ifInit = true + defaultClient.Unlock() } - client.Unlock() + for key, value := range request.Headers { if value == nil || key == "content-length" { continue @@ -381,7 +412,7 @@ func DoRequest(request *Request, requestRuntime map[string]interface{}) (respons putMsgToMap(fieldMap, httpRequest) startTime := time.Now() fieldMap["{start_time}"] = startTime.Format("2006-01-02 15:04:05") - res, err := hookDo(client.httpClient.Do)(httpRequest) + res, err := hookDo(client.Call)(httpRequest, trans) fieldMap["{cost}"] = time.Since(startTime).String() completedBytes := int64(0) if runtimeObject.Tracker != nil { @@ -411,6 +442,7 @@ func DoRequest(request *Request, requestRuntime map[string]interface{}) (respons func getHttpTransport(req *Request, runtime *RuntimeObject) (*http.Transport, error) { trans := new(http.Transport) + trans.ResponseHeaderTimeout = time.Duration(IntValue(runtime.ReadTimeout)) * time.Millisecond httpProxy, err := getHttpProxy(StringValue(req.Protocol), StringValue(req.Domain), runtime) if err != nil { return nil, err @@ -464,7 +496,7 @@ func getHttpTransport(req *Request, runtime *RuntimeObject) (*http.Transport, er Password: password, } } - dialer, err := proxy.SOCKS5(strings.ToLower(StringValue(runtime.Socks5NetWork)), socks5Proxy.String(), auth, + dialer, err := proxy.SOCKS5(strings.ToLower(StringValue(runtime.Socks5NetWork)), socks5Proxy.Host, auth, &net.Dialer{ Timeout: time.Duration(IntValue(runtime.ConnectTimeout)) * time.Millisecond, DualStack: true, @@ -571,7 +603,7 @@ func getSocks5Proxy(runtime *RuntimeObject) (proxy *url.URL, err error) { func getLocalAddr(localAddr string) (addr *net.TCPAddr) { if localAddr != "" { addr = &net.TCPAddr{ - IP: []byte(localAddr), + IP: net.ParseIP(localAddr), } } return addr @@ -579,20 +611,18 @@ func getLocalAddr(localAddr string) (addr *net.TCPAddr) { func setDialContext(runtime *RuntimeObject) func(cxt context.Context, net, addr string) (c net.Conn, err error) { return func(ctx context.Context, network, address string) (net.Conn, error) { + timeout := time.Duration(IntValue(runtime.ConnectTimeout)) * time.Millisecond + dialer := &net.Dialer{ + Timeout: timeout, + Resolver: &net.Resolver{ + PreferGo: false, + }, + DualStack: true, + } if runtime.LocalAddr != nil && StringValue(runtime.LocalAddr) != "" { - netAddr := &net.TCPAddr{ - IP: []byte(StringValue(runtime.LocalAddr)), - } - return (&net.Dialer{ - Timeout: time.Duration(IntValue(runtime.ConnectTimeout)) * time.Second, - DualStack: true, - LocalAddr: netAddr, - }).DialContext(ctx, network, address) + dialer.LocalAddr = getLocalAddr(StringValue(runtime.LocalAddr)) } - return (&net.Dialer{ - Timeout: time.Duration(IntValue(runtime.ConnectTimeout)) * time.Second, - DualStack: true, - }).DialContext(ctx, network, address) + return dialer.DialContext(ctx, network, address) } } @@ -1162,6 +1192,11 @@ func ToInt(a *int32) *int { return Int(int(Int32Value(a))) } +func ForceInt(a interface{}) int { + num, _ := a.(int) + return num +} + func ToInt32(a *int) *int32 { return Int32(int32(IntValue(a))) } diff --git a/vendor/github.com/alibabacloud-go/tea/utils/logger.go b/vendor/github.com/alibabacloud-go/tea/utils/logger.go index 0513668876..b15560958d 100644 --- a/vendor/github.com/alibabacloud-go/tea/utils/logger.go +++ b/vendor/github.com/alibabacloud-go/tea/utils/logger.go @@ -7,6 +7,10 @@ import ( "time" ) +var defaultLoggerTemplate = `{time} {channel}: "{method} {uri} HTTP/{version}" {code} {cost} {hostname}` +var loggerParam = []string{"{time}", "{start_time}", "{ts}", "{channel}", "{pid}", "{host}", "{method}", "{uri}", "{version}", "{target}", "{hostname}", "{code}", "{error}", "{req_headers}", "{res_body}", "{res_headers}", "{cost}"} +var logChannel string + type Logger struct { *log.Logger formatTemplate string @@ -14,10 +18,6 @@ type Logger struct { lastLogMsg string } -var defaultLoggerTemplate = `{time} {channel}: "{method} {uri} HTTP/{version}" {code} {cost} {hostname}` -var loggerParam = []string{"{time}", "{start_time}", "{ts}", "{channel}", "{pid}", "{host}", "{method}", "{uri}", "{version}", "{target}", "{hostname}", "{code}", "{error}", "{req_headers}", "{res_body}", "{res_headers}", "{cost}"} -var logChannel string - func InitLogMsg(fieldMap map[string]string) { for _, value := range loggerParam { fieldMap[value] = "" diff --git a/vendor/github.com/aliyun/credentials-go/.gitignore b/vendor/github.com/aliyun/credentials-go/.gitignore new file mode 100644 index 0000000000..3a7a58492e --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +coverage/ +coverage.txt +.idea \ No newline at end of file diff --git a/vendor/github.com/aliyun/credentials-go/.scrutinizer.yml b/vendor/github.com/aliyun/credentials-go/.scrutinizer.yml new file mode 100644 index 0000000000..5f1560f802 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/.scrutinizer.yml @@ -0,0 +1,13 @@ +build: + nodes: + analysis: + tests: + override: + - go-scrutinizer-run +filter: + excluded_paths: + - integration/ + dependency_paths: + - vendor/ +tools: + external_code_coverage: true diff --git a/vendor/github.com/aliyun/credentials-go/CONTRIBUTING.md b/vendor/github.com/aliyun/credentials-go/CONTRIBUTING.md new file mode 100644 index 0000000000..6c85e1e5dc --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/CONTRIBUTING.md @@ -0,0 +1,14 @@ +# Contributing to the Alibaba Cloud Credentials for Go + +We work hard to provide a high-quality and useful Credential for Alibaba Cloud, and we greatly value feedback and contributions from our community. Please submit your [issues][issues] or [pull requests][pull-requests] through GitHub. + +## Tips + +- The Credential is released under the [Apache license][license]. Any code you submit will be released under that license. For substantial contributions, we may ask you to sign a [Alibaba Documentation Corporate Contributor License Agreement (CLA)][cla]. +- We maintain a high percentage of code coverage in our unit tests. If you make changes to the code, please add, update, and/or remove tests as appropriate. +- If your code does not conform to the standards, does not include adequate tests, we may ask you to update your pull requests before we accept them. We also reserve the right to deny any pull requests that do not align with our standards or goals. + +[issues]: https://github.com/aliyun/credentials-go/issues +[pull-requests]: https://github.com/aliyun/credentials-go/pulls +[license]: http://www.apache.org/licenses/LICENSE-2.0 +[cla]: https://alibaba-cla-2018.oss-cn-beijing.aliyuncs.com/Alibaba_Documentation_Open_Source_Corporate_CLA.pdf diff --git a/vendor/github.com/aliyun/credentials-go/README-CN.md b/vendor/github.com/aliyun/credentials-go/README-CN.md new file mode 100644 index 0000000000..06a91c59b5 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/README-CN.md @@ -0,0 +1,492 @@ +[English](README.md) | 简体中文 + +# Alibaba Cloud Credentials for Go + +[![Latest Stable Version](https://badge.fury.io/gh/aliyun%2Fcredentials-go.svg)](https://badge.fury.io/gh/aliyun%2Fcredentials-go) +[![Go](https://github.com/aliyun/credentials-go/actions/workflows/go.yml/badge.svg)](https://github.com/aliyun/credentials-go/actions/workflows/go.yml) +[![codecov](https://codecov.io/gh/aliyun/credentials-go/branch/master/graph/badge.svg)](https://codecov.io/gh/aliyun/credentials-go) +[![License](https://poser.pugx.org/alibabacloud/credentials/license)](https://packagist.org/packages/alibabacloud/credentials) +[![Go Report Card](https://goreportcard.com/badge/github.com/aliyun/credentials-go)](https://goreportcard.com/report/github.com/aliyun/credentials-go) +[![Scrutinizer Code Quality](https://scrutinizer-ci.com/g/aliyun/credentials-go/badges/quality-score.png?b=master)](https://scrutinizer-ci.com/g/aliyun/credentials-go/?branch=master) + +![Alibaba Cloud Logo](https://aliyunsdk-pages.alicdn.com/icons/AlibabaCloud.svg) + +Alibaba Cloud Credentials for Go 是帮助 GO 开发者管理凭据的工具。 + +本文将介绍如何获取和使用 Alibaba Cloud Credentials for Go。 + +## 要求 + +- 请确保你的系统安装了 1.12.x 或更新版本的 Go 环境。 + +## 安装 + +使用 `go get` 下载安装 + +```sh +go get -u github.com/aliyun/credentials-go +``` + +## 快速使用 + +在您开始之前,您需要注册阿里云帐户并获取您的[凭证](https://usercenter.console.aliyun.com/#/manage/ak)。 + +### 凭证类型 + +#### 使用默认凭据链 +当您在初始化凭据客户端不传入任何参数时,Credentials工具会使用默认凭据链方式初始化客户端。默认凭据的读取逻辑请参见[默认凭据链](#默认凭证提供程序链)。 + +```go +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main(){ + provider, err := credentials.NewCredential(nil) + if err != nil { + return + } + credential, err := provider.GetCredential() + if err != nil { + return + } + accessKeyId := credential.AccessKeyId + accessSecret := credential.AccessKeySecret + securityToken := credential.SecurityToken + credentialType := credential.Type + fmt.Println(accessKeyId, accessKeySecret, securityToken, credentialType) +} +``` + +#### AccessKey + +通过[用户信息管理][ak]设置 access_key,它们具有该账户完全的权限,请妥善保管。有时出于安全考虑,您不能把具有完全访问权限的主账户 AccessKey 交于一个项目的开发者使用,您可以[创建RAM子账户][ram]并为子账户[授权][permissions],使用RAM子用户的 AccessKey 来进行API调用。 + +```go +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main(){ + config := new(credentials.Config). + // 设置凭证类型 + SetType("access_key"). + // 用户 AccessKey Id + SetAccessKeyId("AccessKeyId"). + // 用户 AccessKey Secret + SetAccessKeySecret("AccessKeySecret") + + provider, err := credentials.NewCredential(config) + if err != nil { + return + } + credential, err := provider.GetCredential() + if err != nil { + return + } + accessKeyId := credential.AccessKeyId + accessSecret := credential.AccessKeySecret + credentialType := credential.Type + fmt.Println(accessKeyId, accessSecret, credentialType) +} +``` + +#### STS + +通过安全令牌服务(Security Token Service,简称 STS),申请临时安全凭证(Temporary Security Credentials,简称 TSC),创建临时安全凭证。 + +```go +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main() { + config := new(credentials.Config). + // 设置凭证类型 + SetType("sts"). + // 临时用户 AccessKey Id + SetAccessKeyId("AccessKeyId"). + // 临时用户 AccessKey Secret + SetAccessKeySecret("AccessKeySecret"). + // 临时用户 Security Token + SetSecurityToken("SecurityToken") + + provider, err := credentials.NewCredential(config) + if err != nil { + return + } + + credential, err := provider.GetCredential() + if err != nil { + return + } + accessKeyId := credential.AccessKeyId + accessSecret := credential.AccessKeySecret + securityToken := credential.SecurityToken + credentialType := credential.Type + fmt.Println(accessKeyId, accessSecret, securityToken, credentialType) +} +``` + +#### AssumeRoleWithOIDC + +在容器服务 Kubernetes 版中设置了Worker节点RAM角色后,对应节点内的Pod中的应用也就可以像ECS上部署的应用一样,通过元数据服务(Meta Data Server)获取关联角色的STS Token。但如果容器集群上部署的是不可信的应用(比如部署您的客户提交的应用,代码也没有对您开放),您可能并不希望它们能通过元数据服务获取Worker节点关联实例RAM角色的STS Token。为了避免影响云上资源的安全,同时又能让这些不可信的应用安全地获取所需的 STS Token,实现应用级别的权限最小化,您可以使用RRSA(RAM Roles for Service Account)功能。阿里云容器集群会为不同的应用Pod创建和挂载相应的服务账户OIDC Token文件,并将相关配置信息注入到环境变量中,Credentials工具通过获取环境变量的配置信息,调用STS服务的AssumeRoleWithOIDC - OIDC角色SSO时获取扮演角色的临时身份凭证接口换取绑定角色的STS Token。详情请参见[通过RRSA配置ServiceAccount的RAM权限实现Pod权限隔离](https://help.aliyun.com/zh/ack/ack-managed-and-ack-dedicated/user-guide/use-rrsa-to-authorize-pods-to-access-different-cloud-services#task-2142941)。 + +``` go +package main + +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main() { + config := new(credentials.Config). + // 设置凭证类型 + SetType("oidc_role_arn"). + // OIDC提供商ARN,可以通过环境变量ALIBABA_CLOUD_OIDC_PROVIDER_ARN设置OidcProviderArn + SetOIDCProviderArn("OIDCProviderArn"). + // OIDC Token文件路径,可以通过环境变量ALIBABA_CLOUD_OIDC_TOKEN_FILE设置OidcTokenFilePath + SetOIDCTokenFilePath("OIDCTokenFilePath"). + // RAM角色名称ARN,可以通过环境变量ALIBABA_CLOUD_ROLE_ARN设置RoleArn + SetRoleArn("RoleArn"). + // 角色会话名称,可以通过环境变量ALIBABA_CLOUD_ROLE_SESSION_NAME设置RoleSessionName + SetRoleSessionName("RoleSessionName"). + // 设置更小的权限策略,非必填。示例值:{"Statement": [{"Action": ["*"],"Effect": "Allow","Resource": ["*"]}],"Version":"1"} + SetPolicy("Policy"). + // 设置session过期时间,非必填。 + SetRoleSessionExpiration(3600). + // 非必填,默认为sts.aliyuncs.com,建议使用Region化的STS域名,选择地理位置更接近的Region可以保证网络连通性,Region对应的域名请参考:https://api.aliyun.com/product/Sts + SetSTSEndpoint("sts.cn-hangzhou.aliyuncs.com") + + provider, err := credentials.NewCredential(config) + if err != nil { + return + } + + credential, err := provider.GetCredential() + if err != nil { + return + } + accessKeyId := credential.AccessKeyId + accessSecret := credential.AccessKeySecret + securityToken := credential.SecurityToken + credentialType := credential.Type + + fmt.Println(accessKeyId, accessKeySecret, securityToken, credentialType) +} +``` + +#### RamRoleArn + +通过指定[RAM角色][RAM Role],让凭证自动申请维护 STS Token。你可以通过为 `Policy` 赋值来限制获取到的 STS Token 的权限。 + +```go +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main(){ + config := new(credentials.Config). + // 设置凭证类型 + SetType("ram_role_arn"). + // 用户 AccessKey Id + SetAccessKeyId("AccessKeyId"). + // 用户 AccessKey Secret + SetAccessKeySecret("AccessKeySecret"). + // 要扮演的RAM角色ARN,示例值:acs:ram::123456789012****:role/adminrole,可以通过环境变量ALIBABA_CLOUD_ROLE_ARN设置RoleArn + SetRoleArn("RoleArn"). + // 角色会话名称,可以通过环境变量ALIBABA_CLOUD_ROLE_SESSION_NAME设置RoleSessionName + SetRoleSessionName("RoleSessionName"). + // 设置更小的权限策略,非必填。示例值:{"Statement": [{"Action": ["*"],"Effect": "Allow","Resource": ["*"]}],"Version":"1"} + SetPolicy("Policy"). + // 设置session过期时间,非必填。 + SetRoleSessionExpiration(3600). + // 非必填,角色外部 ID,该参数为外部提供的用于表示角色的参数信息,主要功能是防止混淆代理人问题。更多信息请参考:https://help.aliyun.com/zh/ram/use-cases/use-externalid-to-prevent-the-confused-deputy-problem + SetExternalId("ExternalId"). + // 非必填,默认为sts.aliyuncs.com,建议使用Region化的STS域名,选择地理位置更接近的Region可以保证网络连通性,Region对应的域名请参考:https://api.aliyun.com/product/Sts + SetSTSEndpoint("sts.cn-hangzhou.aliyuncs.com") + + provider, err := credentials.NewCredential(config) + if err != nil { + return + } + credential, err := provider.GetCredential() + if err != nil { + return + } + accessKeyId := credential.AccessKeyId + accessSecret := credential.AccessKeySecret + securityToken := credential.SecurityToken + credentialType := credential.Type + + fmt.Println(accessKeyId, accessKeySecret, securityToken, credentialType) +} +``` + +#### Credentials URI + +通过指定提供凭证的自定义网络服务地址,让凭证自动申请维护 STS Token。 + +```go +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main(){ + config := new(credentials.Config). + // 设置凭证类型 + SetType("credentials_uri"). + // 凭证的 URI,格式为http://local_or_remote_uri/,可以通过环境变量ALIBABA_CLOUD_CREDENTIALS_URI设置CredentialsUri + SetURL("http://127.0.0.1") + provider, err := credentials.NewCredential(config) + if err != nil { + return + } + + credential, err := provider.GetCredential() + if err != nil { + return + } + + accessKeyId := credential.AccessKeyId + accessSecret := credential.AccessKeySecret + securityToken := credential.SecurityToken + credentialType := credential.Type + + fmt.Println(accessKeyId, accessKeySecret, securityToken, credentialType) +} +``` + +#### EcsRamRole + +Credentials工具会自动获取ECS实例绑定的RAM角色,调用ECS的元数据服务(Meta Data Server)换取STS Token,完成凭据客户端初始化。ECI实例,容器服务 Kubernetes 版的Worker节点均支持绑定实例RAM角色。 + +```go +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main(){ + config := new(credentials.Config). + // 设置凭证类型 + SetType("ecs_ram_role"). + // 选填,该ECS角色的角色名称,不填会自动获取,但是建议加上以减少请求次数,可以通过环境变量 ALIBABA_CLOUD_ECS_METADATA 设置 RoleName + SetRoleName("RoleName"). + // 选填,推荐设置为 true,关闭 IMDS V1 的兜底能力,默认使用 IMDS V2(安全加固)。也可以通过环境变量 ALIBABA_CLOUD_IMDSV1_DISABLED 设置 + SetDisableIMDSv1(true) + + provider, err := credentials.NewCredential(config) + if err != nil { + return + } + + credential, err := provider.GetCredential() + if err != nil { + return + } + accessKeyId := credential.AccessKeyId + accessSecret := credential.AccessKeySecret + securityToken := credential.SecurityToken + credentialType := credential.Type + + fmt.Println(accessKeyId, accessKeySecret, securityToken, credentialType) +} +``` + +#### Bearer Token + +目前只有云呼叫中心 CCC 这款产品支持 Bearer Token 的凭据初始化方式。 + +```go +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main(){ + config := new(credentials.Config). + // 设置凭证类型 + SetType("bearer"). + // 填入您的Bearer Token + SetBearerToken("BearerToken") + + provider, err := credentials.NewCredential(config) + if err != nil { + return + } + + credential, err := provider.GetCredential() + if err != nil { + return + } + + bearerToken := credential.BearerToken + credentialType := credential.Type + fmt.Println(bearerToken, credentialType) +} +``` + +### 凭证提供程序链 + +当开发环境与生产环境使用的凭据类型不一致时,常见做法是在代码中获取当前环境信息,编写获取不同凭据的分支代码。借助Credentials工具的默认凭据链,您可以用同一套代码,通过程序之外的配置来控制不同环境下的凭据获取方式。当您使用 `NewCredential()` 初始化凭据客户端,且不传入任何参数时,阿里云SDK将会尝试按照如下顺序查找相关凭据信息。 + +#### 1. 使用环境变量 + +Credentials工具会优先在环境变量中获取凭据信息。 + +- 如果系统环境变量 `ALIBABA_CLOUD_ACCESS_KEY_ID`(密钥Key) 和 `ALIBABA_CLOUD_ACCESS_KEY_SECRET`(密钥Value) 不为空,Credentials工具会优先使用它们作为默认凭据。 + +- 如果系统环境变量 `ALIBABA_CLOUD_ACCESS_KEY_ID`(密钥Key)、`ALIBABA_CLOUD_ACCESS_KEY_SECRET`(密钥Value)、`ALIBABA_CLOUD_SECURITY_TOKEN`(Token)均不为空,Credentials工具会优先使用STS Token作为默认凭据。 + +### 2. 使用OIDC RAM角色 +若不存在优先级更高的凭据信息,Credentials工具会在环境变量中获取如下内容: + +`ALIBABA_CLOUD_ROLE_ARN`:RAM角色名称ARN; + +`ALIBABA_CLOUD_OIDC_PROVIDER_ARN`:OIDC提供商ARN; + +`ALIBABA_CLOUD_OIDC_TOKEN_FILE`:OIDC Token文件路径; + +若以上三个环境变量都已设置内容,Credentials将会使用变量内容调用STS服务的[AssumeRoleWithOIDC - OIDC角色SSO时获取扮演角色的临时身份凭证](https://help.aliyun.com/zh/ram/developer-reference/api-sts-2015-04-01-assumerolewithoidc)接口换取STS Token作为默认凭据。 + +### 3. 使用 Aliyun CLI 工具的 config.json 配置文件 + +若不存在优先级更高的凭据信息,Credentials工具会优先在如下位置查找 `config.json` 文件是否存在: +Linux系统:`~/.aliyun/config.json` +Windows系统: `C:\Users\USER_NAME\.aliyun\config.json` +如果文件存在,程序将会使用配置文件中 `current` 指定的凭据信息初始化凭据客户端。当然,您也可以通过环境变量 `ALIBABA_CLOUD_PROFILE` 来指定凭据信息,例如设置 `ALIBABA_CLOUD_PROFILE` 的值为 `AK`。 + +在config.json配置文件中每个module的值代表了不同的凭据信息获取方式: + +- AK:使用用户的Access Key作为凭据信息; +- RamRoleArn:使用RAM角色的ARN来获取凭据信息; +- EcsRamRole:利用ECS绑定的RAM角色来获取凭据信息; +- OIDC:通过OIDC ARN和OIDC Token来获取凭据信息; +- ChainableRamRoleArn:采用角色链的方式,通过指定JSON文件中的其他凭据,以重新获取新的凭据信息。 + +配置示例信息如下: + +```json +{ + "current": "AK", + "profiles": [ + { + "name": "AK", + "mode": "AK", + "access_key_id": "access_key_id", + "access_key_secret": "access_key_secret" + }, + { + "name": "RamRoleArn", + "mode": "RamRoleArn", + "access_key_id": "access_key_id", + "access_key_secret": "access_key_secret", + "ram_role_arn": "ram_role_arn", + "ram_session_name": "ram_session_name", + "expired_seconds": 3600, + "sts_region": "cn-hangzhou" + }, + { + "name": "EcsRamRole", + "mode": "EcsRamRole", + "ram_role_name": "ram_role_name" + }, + { + "name": "OIDC", + "mode": "OIDC", + "ram_role_arn": "ram_role_arn", + "oidc_token_file": "path/to/oidc/file", + "oidc_provider_arn": "oidc_provider_arn", + "ram_session_name": "ram_session_name", + "expired_seconds": 3600, + "sts_region": "cn-hangzhou" + }, + { + "name": "ChainableRamRoleArn", + "mode": "ChainableRamRoleArn", + "source_profile": "AK", + "ram_role_arn": "ram_role_arn", + "ram_session_name": "ram_session_name", + "expired_seconds": 3600, + "sts_region": "cn-hangzhou" + } + ] +} +``` + +### 4. 使用配置文件 +> +> 如果用户主目录存在默认文件 `~/.alibabacloud/credentials` (Windows 为 `C:\Users\USER_NAME\.alibabacloud\credentials`),程序会自动创建指定类型和名称的凭证。您也可通过环境变量 `ALIBABA_CLOUD_CREDENTIALS_FILE` 指定配置文件路径。如果文件存在,程序将会使用配置文件中 default 指定的凭据信息初始化凭据客户端。当然,您也可以通过环境变量 `ALIBABA_CLOUD_PROFILE` 来指定凭据信息,例如设置 `ALIBABA_CLOUD_PROFILE` 的值为 `client1`。 + +配置示例信息如下: + +```ini +[default] +type = access_key # 认证方式为 access_key +access_key_id = foo # Key +access_key_secret = bar # Secret + +[project1] +type = ecs_ram_role # 认证方式为 ecs_ram_role +role_name = EcsRamRoleTest # Role Name,非必填,不填则自动获取,建议设置,可以减少网络请求。 + +[project2] +type = ram_role_arn # 认证方式为 ram_role_arn +access_key_id = foo +access_key_secret = bar +role_arn = role_arn +role_session_name = session_name + +[project3] +type=oidc_role_arn # 认证方式为 oidc_role_arn +oidc_provider_arn=oidc_provider_arn +oidc_token_file_path=oidc_token_file_path +role_arn=role_arn +role_session_name=session_name +``` + +### 5. 使用 ECS 实例RAM角色 + +如果定义了环境变量 `ALIBABA_CLOUD_ECS_METADATA` 且不为空,程序会将该环境变量的值作为角色名称,请求 `http://100.100.100.200/latest/meta-data/ram/security-credentials/` 获取临时安全凭证作为默认凭证。 + +### 6. 使用外部服务 Credentials URI + +若不存在优先级更高的凭据信息,Credentials工具会在环境变量中获取ALIBABA_CLOUD_CREDENTIALS_URI,若存在,程序将请求该URI地址,获取临时安全凭证作为默认凭据信息。 + +外部服务响应结构应如下: + +```json +{ + "Code": "Success", + "AccessKeyId": "AccessKeyId", + "AccessKeySecret": "AccessKeySecret", + "SecurityToken": "SecurityToken", + "Expiration": "2024-10-26T03:46:38Z" +} +``` + +## 许可证 + +[Apache-2.0](/LICENSE) + +Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + +[ak]: https://usercenter.console.aliyun.com/#/manage/ak +[ram]: https://ram.console.aliyun.com/users +[permissions]: https://ram.console.aliyun.com/permissions +[RAM Role]: https://ram.console.aliyun.com/#/role/list diff --git a/vendor/github.com/aliyun/credentials-go/README.md b/vendor/github.com/aliyun/credentials-go/README.md new file mode 100644 index 0000000000..2395a227c8 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/README.md @@ -0,0 +1,503 @@ +English | [简体中文](README-CN.md) + +# Alibaba Cloud Credentials for Go + +[![Latest Stable Version](https://badge.fury.io/gh/aliyun%2Fcredentials-go.svg)](https://badge.fury.io/gh/aliyun%2Fcredentials-go) +[![Go](https://github.com/aliyun/credentials-go/actions/workflows/go.yml/badge.svg)](https://github.com/aliyun/credentials-go/actions/workflows/go.yml) +[![codecov](https://codecov.io/gh/aliyun/credentials-go/branch/master/graph/badge.svg)](https://codecov.io/gh/aliyun/credentials-go) +[![License](https://poser.pugx.org/alibabacloud/credentials/license)](https://packagist.org/packages/alibabacloud/credentials) +[![Go Report Card](https://goreportcard.com/badge/github.com/aliyun/credentials-go)](https://goreportcard.com/report/github.com/aliyun/credentials-go) +[![Scrutinizer Code Quality](https://scrutinizer-ci.com/g/aliyun/credentials-go/badges/quality-score.png?b=master)](https://scrutinizer-ci.com/g/aliyun/credentials-go/?branch=master) + +![Alibaba Cloud Logo](https://aliyunsdk-pages.alicdn.com/icons/AlibabaCloud.svg) + +Alibaba Cloud Credentials for Go is a tool for Go developers to manage credentials. + +This document introduces how to obtain and use Alibaba Cloud Credentials for Go. + +## Requirements + +- It's necessary for you to make sure your system have installed a Go environment which is 1.12.x or newer. + +## Installation + +Use `go get` to install SDK: + +```sh +go get -u github.com/aliyun/credentials-go +``` + +## Quick Examples + +Before you begin, you need to sign up for an Alibaba Cloud account and retrieve your [Credentials](https://usercenter.console.aliyun.com/#/manage/ak). + +### Credential Type + +#### Default credential provider chain + +If you do not specify a method to initialize a Credentials client, the default credential provider chain is used. For more information, see the Default credential provider chain section of this topic. + +```go +package main + +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main() { + provider, err := credentials.NewCredential(nil) + if err != nil { + return + } + credential, err := provider.GetCredential() + if err != nil { + return + } + accessKeyId := credential.AccessKeyId + accessSecret := credential.AccessKeySecret + securityToken := credential.SecurityToken + credentialType := credential.Type + fmt.Println(*accessKeyId, *accessSecret, *securityToken, *credentialType) +} +``` + +#### AccessKey + +Setup access_key credential through [User Information Management][ak], it have full authority over the account, please keep it safe. Sometimes for security reasons, you cannot hand over a primary account AccessKey with full access to the developer of a project. You may create a sub-account [RAM Sub-account][ram] , grant its [authorization][permissions],and use the AccessKey of RAM Sub-account. + +```go +package main + +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main() { + config := new(credentials.Config). + // Which type of credential you want + SetType("access_key"). + // AccessKeyId of your account + SetAccessKeyId("AccessKeyId"). + // AccessKeySecret of your account + SetAccessKeySecret("AccessKeySecret") + + provider, err := credentials.NewCredential(config) + if err != nil { + return + } + + credential, err := provider.GetCredential() + if err != nil { + return + } + + accessKeyId := credential.AccessKeyId + accessKeySecret := credential.AccessKeySecret + credentialType := credential.Type + fmt.Println(*accessKeyId, *accessKeySecret, *credentialType) +} +``` + +#### STS + +Create a temporary security credential by applying Temporary Security Credentials (TSC) through the Security Token Service (STS). + +```go +package main + +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main() { + config := new(credentials.Config). + // Which type of credential you want + SetType("sts"). + // AccessKeyId of your account + SetAccessKeyId("AccessKeyId"). + // AccessKeySecret of your account + SetAccessKeySecret("AccessKeySecret"). + // Temporary Security Token + SetSecurityToken("SecurityToken") + + provider, err := credentials.NewCredential(config) + if err != nil { + return + } + + credential, err := provider.GetCredential() + if err != nil { + return + } + + accessKeyId := credential.AccessKeyId + accessKeySecret := credential.AccessKeySecret + securityToken := credential.SecurityToken + credentialType := credential.Type + + fmt.Println(*accessKeyId, *accessKeySecret, *securityToken, *credentialType) +} +``` + +#### AssumeRoleWithOIDC + +After you attach a RAM role to a worker node in an Container Service for Kubernetes, applications in the pods on the worker node can use the metadata server to obtain an STS token the same way in which applications on ECS instances do. However, if an untrusted application is deployed on the worker node, such as an application that is submitted by your customer and whose code is unavailable to you, you may not want the application to use the metadata server to obtain an STS token of the RAM role attached to the worker node. To ensure the security of cloud resources and enable untrusted applications to securely obtain required STS tokens, you can use the RAM Roles for Service Accounts (RRSA) feature to grant minimum necessary permissions to an application. In this case, the ACK cluster creates a service account OpenID Connect (OIDC) token file, associates the token file with a pod, and then injects relevant environment variables into the pod. Then, the Credentials tool uses the environment variables to call the AssumeRoleWithOIDC operation of STS and obtains an STS token of the RAM role. For more information about the RRSA feature, see [Use RRSA to authorize different pods to access different cloud services](https://www.alibabacloud.com/help/en/ack/ack-managed-and-ack-dedicated/user-guide/use-rrsa-to-authorize-pods-to-access-different-cloud-services#task-2142941). + +``` go +package main + +import ( + "fmt" + "net/http" + + "github.com/aliyun/credentials-go/credentials" +) + +func main() { + config := new(credentials.Config). + SetType("oidc_role_arn"). + // Specify the ARN of the OIDC IdP by specifying the ALIBABA_CLOUD_OIDC_PROVIDER_ARN environment variable. + SetOIDCProviderArn("OIDCProviderArn"). + // Specify the path of the OIDC token file by specifying the ALIBABA_CLOUD_OIDC_TOKEN_FILE environment variable. + SetOIDCTokenFilePath("OIDCTokenFilePath"). + // Specify the ARN of the RAM role by specifying the ALIBABA_CLOUD_ROLE_ARN environment variable. + SetRoleArn("RoleArn"). + // Specify the role session name by specifying the ALIBABA_CLOUD_ROLE_SESSION_NAME environment variable. + SetRoleSessionName("RoleSessionName"). + // Optional. Specify limited permissions for the RAM role. Example: {"Statement": [{"Action": ["*"],"Effect": "Allow","Resource": ["*"]}],"Version":"1"}. + SetPolicy("Policy"). + // Optional. Specify the validity period of the session. + SetRoleSessionExpiration(3600). + // Optional. The default value is sts.aliyuncs.com. It is recommended to use a regionalized STS domain name. Selecting a region that is geographically closer can ensure network connectivity. For the domain name corresponding to the region, please refer to: https://api.alibabacloud.com/product/Sts + SetSTSEndpoint("sts.cn-hangzhou.aliyuncs.com") + + provider, err := credentials.NewCredential(config) + if err != nil { + return + } + + credential, err := provider.GetCredential() + if err != nil { + return + } + + accessKeyId := credential.AccessKeyId + accessKeySecret := credential.AccessKeySecret + securityToken := credential.SecurityToken + credentialType := credential.Type + + fmt.Println(accessKeyId, accessKeySecret, securityToken, credentialType) +} +``` + +#### RamRoleArn + +By specifying [RAM Role][RAM Role], the credential will be able to automatically request maintenance of STS Token. If you want to limit the permissions([How to make a policy][policy]) of STS Token, you can assign value for `Policy`. + +```go +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main(){ + config := new(credentials.Config). + // Which type of credential you want + SetType("ram_role_arn"). + // AccessKeyId of your account + SetAccessKeyId("AccessKeyId"). + // AccessKeySecret of your account + SetAccessKeySecret("AccessKeySecret"). + // Specify the ARN of the RAM role to be assumed. Example: acs:ram::123456789012****:role/adminrole. + SetRoleArn("RoleArn"). + // Specify the name of the role session. + SetRoleSessionName("RoleSessionName"). + // Optional. Specify limited permissions for the RAM role. Example: {"Statement": [{"Action": ["*"],"Effect": "Allow","Resource": ["*"]}],"Version":"1"}. + SetPolicy("Policy"). + // Optional. Specify the expiration of the session + SetRoleSessionExpiration(3600). + // Optional, role external ID, this parameter is the parameter information provided externally to represent the role, and its main function is to prevent the confused deputy problem. For more information, please refer to: https://www.alibabacloud.com/help/en/ram/use-cases/use-externalid-to-prevent-the-confused-deputy-problem + SetExternalId("ExternalId"). + // Optional. The default value is sts.aliyuncs.com. It is recommended to use a regionalized STS domain name. Selecting a region that is geographically closer can ensure network connectivity. For the domain name corresponding to the region, please refer to: https://api.alibabacloud.com/product/Sts + SetSTSEndpoint("sts.cn-hangzhou.aliyuncs.com") + + provider, err := credentials.NewCredential(config) + if err != nil { + return + } + credential, err := provider.GetCredential() + if err != nil { + return + } + + accessKeyId := credential.AccessKeyId + accessKeySecret := credential.AccessKeySecret + securityToken := credential.SecurityToken + credentialType := credential.Type + + fmt.Println(accessKeyId, accessKeySecret, securityToken, credentialType) +} +``` + +#### Credentials URI + +By specifying the url, the credential will be able to automatically request maintenance of STS Token. + +```go +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main(){ + config := new(credentials.Config). + SetType("credentials_uri"). + // Format: http url. `credentialsURI` can be replaced by setting environment variable: ALIBABA_CLOUD_CREDENTIALS_URI + SetURL("http://127.0.0.1") + provider, err := credentials.NewCredential(config) + if err != nil { + return + } + + credential, err := provider.GetCredential() + if err != nil { + return + } + + accessKeyId := credential.AccessKeyId + accessKeySecret := credential.AccessKeySecret + securityToken := credential.SecurityToken + credentialType := credential.Type + + fmt.Println(accessKeyId, accessKeySecret, securityToken, credentialType) +} +``` + +#### EcsRamRole + +The Credentials tool automatically obtains the RAM role attached to an ECS instance and uses the metadata server of ECS to obtain an STS token. The STS token is then used to initialize a Credentials client. You can also attach a RAM role to an elastic container instance or a worker node in an Alibaba Cloud Container Service for Kubernetes (ACK) cluster. + +```go +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main(){ + config := new(credentials.Config). + // Which type of credential you want + SetType("ecs_ram_role"). + // Optional. Specify the name of the RAM role of the ECS instance. If you do not specify this parameter, its value is automatically obtained. To reduce the number of requests, we recommend that you specify this parameter. + SetRoleName("RoleName"). + // `DisableIMDSv1` is optional and is recommended to be turned on. It can be replaced by setting environment variable: ALIBABA_CLOUD_IMDSV1_DISABLED + SetDisableIMDSv1(true) + + provider, err := credentials.NewCredential(config) + if err != nil { + return + } + + credential, err := provider.GetCredential() + if err != nil { + return + } + accessKeyId := credential.AccessKeyId + accessKeySecret := credential.AccessKeySecret + securityToken := credential.SecurityToken + credentialType := credential.Type + + fmt.Println(accessKeyId, accessKeySecret, securityToken, credentialType) +} +``` + +#### Bearer Token + +If credential is required by the Cloud Call Centre (CCC), please apply for Bearer Token maintenance by yourself. + +```go +import ( + "fmt" + + "github.com/aliyun/credentials-go/credentials" +) + +func main(){ + config := new(credentials.Config). + // Which type of credential you want + SetType("bearer"). + // BearerToken of your account + SetBearerToken("BearerToken") + + provider, err := credentials.NewCredential(config) + if err != nil { + return + } + + credential, err := provider.GetCredential() + if err != nil { + return + } + + bearerToken := credential.BearerToken + credentialType := credential.Type + fmt.Println(bearerToken, credentialType) +} +``` + +### Credential Provider Chain + +If you want to use different types of credentials in the development and production environments of your application, you generally need to obtain the environment information from the code and write code branches to obtain different credentials for the development and production environments. The default credential provider chain of the Credentials tool allows you to use the same code to obtain credentials for different environments based on configurations independent of the application. If you call `NewCredential()` with nil, it will use provider chain to get credential for you. + +### 1. Environmental certificate + +Look for environment credentials in environment variable. +- If the `ALIBABA_CLOUD_ACCESS_KEY_ID` and `ALIBABA_CLOUD_ACCESS_KEY_SECRET` environment variables are defined and are not empty, the program will use them to create default credentials. +- If the `ALIBABA_CLOUD_ACCESS_KEY_ID`, `ALIBABA_CLOUD_ACCESS_KEY_SECRET` and `ALIBABA_CLOUD_SECURITY_TOKEN` environment variables are defined and are not empty, the program will use them to create temporary security credentials(STS). Note: This token has an expiration time, it is recommended to use it in a temporary environment. + +### 2. The RAM role of an OIDC IdP + +If no credentials are found in the previous step, the Credentials tool obtains the values of the following environment variables: + +`ALIBABA_CLOUD_ROLE_ARN`: the ARN of the RAM role. + +`ALIBABA_CLOUD_OIDC_PROVIDER_ARN`: the ARN of the OIDC IdP. + +`ALIBABA_CLOUD_OIDC_TOKEN_FILE`: the path of the OIDC token file. + +If the preceding three environment variables are specified, the Credentials tool uses the environment variables to call the [AssumeRoleWithOIDC](https://www.alibabacloud.com/help/en/ram/developer-reference/api-sts-2015-04-01-assumerolewithoidc) operation of STS to obtain an STS token as the default credential. + +### 3. Using the config.json Configuration File of Aliyun CLI Tool +If there is no higher-priority credential information, the Credentials tool will first check the following locations to see if the config.json file exists: + +Linux system: `~/.aliyun/config.json` +Windows system: `C:\Users\USER_NAME\.aliyun\config.json` +If the file exists, the program will use the credential information specified by `current` in the configuration file to initialize the credentials client. Of course, you can also use the environment variable `ALIBABA_CLOUD_PROFILE` to specify the credential information, for example by setting the value of `ALIBABA_CLOUD_PROFILE` to `AK`. + +In the config.json configuration file, the value of each module represents different ways to obtain credential information: + +- AK: Use the Access Key of the user as credential information; +- RamRoleArn: Use the ARN of the RAM role to obtain credential information; +- EcsRamRole: Use the RAM role bound to the ECS to obtain credential information; +- OIDC: Obtain credential information through OIDC ARN and OIDC Token; +- ChainableRamRoleArn: Use the role chaining method to obtain new credential information by specifying other credentials in the JSON file. + +The configuration example information is as follows: + +```json +{ + "current": "AK", + "profiles": [ + { + "name": "AK", + "mode": "AK", + "access_key_id": "access_key_id", + "access_key_secret": "access_key_secret" + }, + { + "name": "RamRoleArn", + "mode": "RamRoleArn", + "access_key_id": "access_key_id", + "access_key_secret": "access_key_secret", + "ram_role_arn": "ram_role_arn", + "ram_session_name": "ram_session_name", + "expired_seconds": 3600, + "sts_region": "cn-hangzhou" + }, + { + "name": "EcsRamRole", + "mode": "EcsRamRole", + "ram_role_name": "ram_role_name" + }, + { + "name": "OIDC", + "mode": "OIDC", + "ram_role_arn": "ram_role_arn", + "oidc_token_file": "path/to/oidc/file", + "oidc_provider_arn": "oidc_provider_arn", + "ram_session_name": "ram_session_name", + "expired_seconds": 3600, + "sts_region": "cn-hangzhou" + }, + { + "name": "ChainableRamRoleArn", + "mode": "ChainableRamRoleArn", + "source_profile": "AK", + "ram_role_arn": "ram_role_arn", + "ram_session_name": "ram_session_name", + "expired_seconds": 3600, + "sts_region": "cn-hangzhou" + } + ] +} +``` + +### 4. Configuration file +> +> If the user's home directory has the default file `~/.alibabacloud/credentials` (Windows is `C:\Users\USER_NAME\.alibabacloud\credentials`), the program will automatically create credentials with the specified type and name. You can also specify the configuration file path by configuring the `ALIBABA_CLOUD_CREDENTIALS_FILE` environment variable. If the configuration file exists, the application initializes a Credentials client by using the credential information that is specified by default in the configuration file. You can also configure the `ALIBABA_CLOUD_PROFILE` environment variable to modify the default credential information that is read. + +Configuration example: +```ini +[default] +type = access_key # Authentication method is access_key +access_key_id = foo # Key +access_key_secret = bar # Secret + +[project1] +type = ecs_ram_role # Authentication method is ecs_ram_role +role_name = EcsRamRoleTest # Role name, optional. It will be retrieved automatically if not set. It is highly recommended to set it up to reduce requests. + +[project2] +type = ram_role_arn # Authentication method is ram_role_arn +access_key_id = foo +access_key_secret = bar +role_arn = role_arn +role_session_name = session_name + +[project3] +type=oidc_role_arn # Authentication method is oidc_role_arn +oidc_provider_arn=oidc_provider_arn +oidc_token_file_path=oidc_token_file_path +role_arn=role_arn +role_session_name=session_name +``` + +### 5. Instance RAM role + +If the environment variable `ALIBABA_CLOUD_ECS_METADATA` is defined and not empty, the program will take the value of the environment variable as the role name and request `http://100.100.100.200/latest/meta-data/ram/security-credentials/` to get the temporary Security credentials are used as default credentials. + +### 6. Using External Service Credentials URI + +If there are no higher-priority credential information, the Credentials tool will obtain the `ALIBABA_CLOUD_CREDENTIALS_URI` from the environment variables. If it exists, the program will request the URI address to obtain temporary security credentials as the default credential information. + +The external service response structure should be as follows: + +```json +{ + "Code": "Success", + "AccessKeyId": "AccessKeyId", + "AccessKeySecret": "AccessKeySecret", + "SecurityToken": "SecurityToken", + "Expiration": "2024-10-26T03:46:38Z" +} +``` + +## License + +[Apache-2.0](/LICENSE) + +Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + +[ak]: https://usercenter.console.aliyun.com/#/manage/ak +[ram]: https://ram.console.aliyun.com/users +[policy]: https://www.alibabacloud.com/help/doc-detail/28664.htm?spm=a2c63.p38356.a3.3.27a63b01khWgdh +[permissions]: https://ram.console.aliyun.com/permissions +[RAM Role]: https://ram.console.aliyun.com/#/role/list diff --git a/vendor/github.com/aliyun/credentials-go/credentials/access_key_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/access_key_credential.go deleted file mode 100644 index 78530e6893..0000000000 --- a/vendor/github.com/aliyun/credentials-go/credentials/access_key_credential.go +++ /dev/null @@ -1,50 +0,0 @@ -package credentials - -import "github.com/alibabacloud-go/tea/tea" - -// AccessKeyCredential is a kind of credential -type AccessKeyCredential struct { - AccessKeyId string - AccessKeySecret string -} - -func newAccessKeyCredential(accessKeyId, accessKeySecret string) *AccessKeyCredential { - return &AccessKeyCredential{ - AccessKeyId: accessKeyId, - AccessKeySecret: accessKeySecret, - } -} - -func (s *AccessKeyCredential) GetCredential() (*CredentialModel, error) { - credential := &CredentialModel{ - AccessKeyId: tea.String(s.AccessKeyId), - AccessKeySecret: tea.String(s.AccessKeySecret), - Type: tea.String("access_key"), - } - return credential, nil -} - -// GetAccessKeyId reutrns AccessKeyCreential's AccessKeyId -func (a *AccessKeyCredential) GetAccessKeyId() (*string, error) { - return tea.String(a.AccessKeyId), nil -} - -// GetAccessSecret reutrns AccessKeyCreential's AccessKeySecret -func (a *AccessKeyCredential) GetAccessKeySecret() (*string, error) { - return tea.String(a.AccessKeySecret), nil -} - -// GetSecurityToken is useless for AccessKeyCreential -func (a *AccessKeyCredential) GetSecurityToken() (*string, error) { - return tea.String(""), nil -} - -// GetBearerToken is useless for AccessKeyCreential -func (a *AccessKeyCredential) GetBearerToken() *string { - return tea.String("") -} - -// GetType reutrns AccessKeyCreential's type -func (a *AccessKeyCredential) GetType() *string { - return tea.String("access_key") -} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/bearer_token_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/bearer_token_credential.go index 9df4d32026..fc253b9052 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/bearer_token_credential.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/bearer_token_credential.go @@ -16,8 +16,9 @@ func newBearerTokenCredential(token string) *BearerTokenCredential { func (s *BearerTokenCredential) GetCredential() (*CredentialModel, error) { credential := &CredentialModel{ - BearerToken: tea.String(s.BearerToken), - Type: tea.String("bearer"), + BearerToken: tea.String(s.BearerToken), + Type: tea.String("bearer"), + ProviderName: tea.String("bearer"), } return credential, nil } diff --git a/vendor/github.com/aliyun/credentials-go/credentials/credential.go b/vendor/github.com/aliyun/credentials-go/credentials/credential.go index 2603dc0c7d..908359e9af 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/credential.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/credential.go @@ -12,9 +12,10 @@ import ( "github.com/alibabacloud-go/debug/debug" "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/internal/utils" + "github.com/aliyun/credentials-go/credentials/providers" "github.com/aliyun/credentials-go/credentials/request" "github.com/aliyun/credentials-go/credentials/response" - "github.com/aliyun/credentials-go/credentials/utils" ) var debuglog = debug.Init("credential") @@ -25,8 +26,11 @@ var hookParse = func(err error) error { // Credential is an interface for getting actual credential type Credential interface { + // Deprecated: GetAccessKeyId is deprecated, use GetCredential instead of. GetAccessKeyId() (*string, error) + // Deprecated: GetAccessKeySecret is deprecated, use GetCredential instead of. GetAccessKeySecret() (*string, error) + // Deprecated: GetSecurityToken is deprecated, use GetCredential instead of. GetSecurityToken() (*string, error) GetBearerToken() *string GetType() *string @@ -35,29 +39,50 @@ type Credential interface { // Config is important when call NewCredential type Config struct { - Type *string `json:"type"` - AccessKeyId *string `json:"access_key_id"` - AccessKeySecret *string `json:"access_key_secret"` - OIDCProviderArn *string `json:"oidc_provider_arn"` - OIDCTokenFilePath *string `json:"oidc_token"` - RoleArn *string `json:"role_arn"` - RoleSessionName *string `json:"role_session_name"` - PublicKeyId *string `json:"public_key_id"` - RoleName *string `json:"role_name"` - SessionExpiration *int `json:"session_expiration"` - PrivateKeyFile *string `json:"private_key_file"` - BearerToken *string `json:"bearer_token"` - SecurityToken *string `json:"security_token"` - RoleSessionExpiration *int `json:"role_session_expiratioon"` - Policy *string `json:"policy"` - Host *string `json:"host"` - Timeout *int `json:"timeout"` - ConnectTimeout *int `json:"connect_timeout"` - Proxy *string `json:"proxy"` - InAdvanceScale *float64 `json:"inAdvanceScale"` - Url *string `json:"url"` - STSEndpoint *string `json:"sts_endpoint"` - ExternalId *string `json:"external_id"` + // Credential type, including access_key, sts, bearer, ecs_ram_role, ram_role_arn, rsa_key_pair, oidc_role_arn, credentials_uri + Type *string `json:"type"` + AccessKeyId *string `json:"access_key_id"` + AccessKeySecret *string `json:"access_key_secret"` + SecurityToken *string `json:"security_token"` + BearerToken *string `json:"bearer_token"` + + // Used when the type is ram_role_arn or oidc_role_arn + OIDCProviderArn *string `json:"oidc_provider_arn"` + OIDCTokenFilePath *string `json:"oidc_token"` + RoleArn *string `json:"role_arn"` + RoleSessionName *string `json:"role_session_name"` + RoleSessionExpiration *int `json:"role_session_expiration"` + Policy *string `json:"policy"` + ExternalId *string `json:"external_id"` + STSEndpoint *string `json:"sts_endpoint"` + + // Used when the type is ecs_ram_role + RoleName *string `json:"role_name"` + // Deprecated + EnableIMDSv2 *bool `json:"enable_imds_v2"` + DisableIMDSv1 *bool `json:"disable_imds_v1"` + // Deprecated + MetadataTokenDuration *int `json:"metadata_token_duration"` + + // Used when the type is credentials_uri + Url *string `json:"url"` + + // Deprecated + // Used when the type is rsa_key_pair + SessionExpiration *int `json:"session_expiration"` + PublicKeyId *string `json:"public_key_id"` + PrivateKeyFile *string `json:"private_key_file"` + Host *string `json:"host"` + + // Read timeout, in milliseconds. + // The default value for ecs_ram_role is 1000ms, the default value for ram_role_arn is 5000ms, and the default value for oidc_role_arn is 5000ms. + Timeout *int `json:"timeout"` + // Connection timeout, in milliseconds. + // The default value for ecs_ram_role is 1000ms, the default value for ram_role_arn is 10000ms, and the default value for oidc_role_arn is 10000ms. + ConnectTimeout *int `json:"connect_timeout"` + + Proxy *string `json:"proxy"` + InAdvanceScale *float64 `json:"inAdvanceScale"` } func (s Config) String() string { @@ -103,6 +128,21 @@ func (s *Config) SetRoleName(v string) *Config { return s } +func (s *Config) SetEnableIMDSv2(v bool) *Config { + s.EnableIMDSv2 = &v + return s +} + +func (s *Config) SetDisableIMDSv1(v bool) *Config { + s.DisableIMDSv1 = &v + return s +} + +func (s *Config) SetMetadataTokenDuration(v int) *Config { + s.MetadataTokenDuration = &v + return s +} + func (s *Config) SetSessionExpiration(v int) *Config { s.SessionExpiration = &v return s @@ -176,75 +216,125 @@ func (s *Config) SetSTSEndpoint(v string) *Config { return s } +func (s *Config) SetExternalId(v string) *Config { + s.ExternalId = &v + return s +} + // NewCredential return a credential according to the type in config. -// if config is nil, the function will use default provider chain to get credential. +// if config is nil, the function will use default provider chain to get credentials. // please see README.md for detail. func NewCredential(config *Config) (credential Credential, err error) { if config == nil { - config, err = defaultChain.resolve() - if err != nil { - return - } - return NewCredential(config) + provider := providers.NewDefaultCredentialsProvider() + credential = FromCredentialsProvider("default", provider) + return } switch tea.StringValue(config.Type) { case "credentials_uri": - credential = newURLCredential(tea.StringValue(config.Url)) - case "oidc_role_arn": - err = checkoutAssumeRamoidc(config) + provider, err := providers.NewURLCredentialsProviderBuilder(). + WithUrl(tea.StringValue(config.Url)). + WithHttpOptions(&providers.HttpOptions{ + Proxy: tea.StringValue(config.Proxy), + ReadTimeout: tea.IntValue(config.Timeout), + ConnectTimeout: tea.IntValue(config.ConnectTimeout), + }). + Build() + if err != nil { - return + return nil, err } - runtime := &utils.Runtime{ - Host: tea.StringValue(config.Host), - Proxy: tea.StringValue(config.Proxy), - ReadTimeout: tea.IntValue(config.Timeout), - ConnectTimeout: tea.IntValue(config.ConnectTimeout), - STSEndpoint: tea.StringValue(config.STSEndpoint), + credential = FromCredentialsProvider("credentials_uri", provider) + case "oidc_role_arn": + provider, err := providers.NewOIDCCredentialsProviderBuilder(). + WithRoleArn(tea.StringValue(config.RoleArn)). + WithOIDCTokenFilePath(tea.StringValue(config.OIDCTokenFilePath)). + WithOIDCProviderARN(tea.StringValue(config.OIDCProviderArn)). + WithDurationSeconds(tea.IntValue(config.RoleSessionExpiration)). + WithPolicy(tea.StringValue(config.Policy)). + WithRoleSessionName(tea.StringValue(config.RoleSessionName)). + WithSTSEndpoint(tea.StringValue(config.STSEndpoint)). + WithHttpOptions(&providers.HttpOptions{ + Proxy: tea.StringValue(config.Proxy), + ReadTimeout: tea.IntValue(config.Timeout), + ConnectTimeout: tea.IntValue(config.ConnectTimeout), + }). + Build() + + if err != nil { + return nil, err } - credential = newOIDCRoleArnCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret), tea.StringValue(config.RoleArn), tea.StringValue(config.OIDCProviderArn), tea.StringValue(config.OIDCTokenFilePath), tea.StringValue(config.RoleSessionName), tea.StringValue(config.Policy), tea.IntValue(config.RoleSessionExpiration), runtime) + credential = FromCredentialsProvider("oidc_role_arn", provider) case "access_key": - err = checkAccessKey(config) + provider, err := providers.NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(tea.StringValue(config.AccessKeyId)). + WithAccessKeySecret(tea.StringValue(config.AccessKeySecret)). + Build() if err != nil { - return + return nil, err } - credential = newAccessKeyCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret)) + + credential = FromCredentialsProvider("access_key", provider) case "sts": - err = checkSTS(config) + provider, err := providers.NewStaticSTSCredentialsProviderBuilder(). + WithAccessKeyId(tea.StringValue(config.AccessKeyId)). + WithAccessKeySecret(tea.StringValue(config.AccessKeySecret)). + WithSecurityToken(tea.StringValue(config.SecurityToken)). + Build() if err != nil { - return + return nil, err } - credential = newStsTokenCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret), tea.StringValue(config.SecurityToken)) + + credential = FromCredentialsProvider("sts", provider) case "ecs_ram_role": - checkEcsRAMRole(config) - runtime := &utils.Runtime{ - Host: tea.StringValue(config.Host), - Proxy: tea.StringValue(config.Proxy), - ReadTimeout: tea.IntValue(config.Timeout), - ConnectTimeout: tea.IntValue(config.ConnectTimeout), + provider, err := providers.NewECSRAMRoleCredentialsProviderBuilder(). + WithRoleName(tea.StringValue(config.RoleName)). + WithDisableIMDSv1(tea.BoolValue(config.DisableIMDSv1)). + Build() + + if err != nil { + return nil, err } - credential = newEcsRAMRoleCredential(tea.StringValue(config.RoleName), tea.Float64Value(config.InAdvanceScale), runtime) + + credential = FromCredentialsProvider("ecs_ram_role", provider) case "ram_role_arn": - err = checkRAMRoleArn(config) + var credentialsProvider providers.CredentialsProvider + if config.SecurityToken != nil && *config.SecurityToken != "" { + credentialsProvider, err = providers.NewStaticSTSCredentialsProviderBuilder(). + WithAccessKeyId(tea.StringValue(config.AccessKeyId)). + WithAccessKeySecret(tea.StringValue(config.AccessKeySecret)). + WithSecurityToken(tea.StringValue(config.SecurityToken)). + Build() + } else { + credentialsProvider, err = providers.NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(tea.StringValue(config.AccessKeyId)). + WithAccessKeySecret(tea.StringValue(config.AccessKeySecret)). + Build() + } + if err != nil { - return + return nil, err } - runtime := &utils.Runtime{ - Host: tea.StringValue(config.Host), - Proxy: tea.StringValue(config.Proxy), - ReadTimeout: tea.IntValue(config.Timeout), - ConnectTimeout: tea.IntValue(config.ConnectTimeout), - STSEndpoint: tea.StringValue(config.STSEndpoint), + + provider, err := providers.NewRAMRoleARNCredentialsProviderBuilder(). + WithCredentialsProvider(credentialsProvider). + WithRoleArn(tea.StringValue(config.RoleArn)). + WithRoleSessionName(tea.StringValue(config.RoleSessionName)). + WithPolicy(tea.StringValue(config.Policy)). + WithDurationSeconds(tea.IntValue(config.RoleSessionExpiration)). + WithExternalId(tea.StringValue(config.ExternalId)). + WithStsEndpoint(tea.StringValue(config.STSEndpoint)). + WithHttpOptions(&providers.HttpOptions{ + Proxy: tea.StringValue(config.Proxy), + ReadTimeout: tea.IntValue(config.Timeout), + ConnectTimeout: tea.IntValue(config.ConnectTimeout), + }). + Build() + if err != nil { + return nil, err } - credential = newRAMRoleArnWithExternalIdCredential( - tea.StringValue(config.AccessKeyId), - tea.StringValue(config.AccessKeySecret), - tea.StringValue(config.RoleArn), - tea.StringValue(config.RoleSessionName), - tea.StringValue(config.Policy), - tea.IntValue(config.RoleSessionExpiration), - tea.StringValue(config.ExternalId), - runtime) + + credential = FromCredentialsProvider("ram_role_arn", provider) case "rsa_key_pair": err = checkRSAKeyPair(config) if err != nil { @@ -271,7 +361,11 @@ func NewCredential(config *Config) (credential Credential, err error) { ConnectTimeout: tea.IntValue(config.ConnectTimeout), STSEndpoint: tea.StringValue(config.STSEndpoint), } - credential = newRsaKeyPairCredential(privateKey, tea.StringValue(config.PublicKeyId), tea.IntValue(config.SessionExpiration), runtime) + credential = newRsaKeyPairCredential( + privateKey, + tea.StringValue(config.PublicKeyId), + tea.IntValue(config.SessionExpiration), + runtime) case "bearer": if tea.StringValue(config.BearerToken) == "" { err = errors.New("BearerToken cannot be empty") @@ -279,7 +373,7 @@ func NewCredential(config *Config) (credential Credential, err error) { } credential = newBearerTokenCredential(tea.StringValue(config.BearerToken)) default: - err = errors.New("Invalid type option, support: access_key, sts, ecs_ram_role, ram_role_arn, rsa_key_pair") + err = errors.New("invalid type option, support: access_key, sts, bearer, ecs_ram_role, ram_role_arn, rsa_key_pair, oidc_role_arn, credentials_uri") return } return credential, nil @@ -297,70 +391,6 @@ func checkRSAKeyPair(config *Config) (err error) { return } -func checkoutAssumeRamoidc(config *Config) (err error) { - if tea.StringValue(config.RoleArn) == "" { - err = errors.New("RoleArn cannot be empty") - return - } - if tea.StringValue(config.OIDCProviderArn) == "" { - err = errors.New("OIDCProviderArn cannot be empty") - return - } - return -} - -func checkRAMRoleArn(config *Config) (err error) { - if tea.StringValue(config.AccessKeySecret) == "" { - err = errors.New("AccessKeySecret cannot be empty") - return - } - if tea.StringValue(config.RoleArn) == "" { - err = errors.New("RoleArn cannot be empty") - return - } - if tea.StringValue(config.RoleSessionName) == "" { - err = errors.New("RoleSessionName cannot be empty") - return - } - if tea.StringValue(config.AccessKeyId) == "" { - err = errors.New("AccessKeyId cannot be empty") - return - } - return -} - -func checkEcsRAMRole(config *Config) (err error) { - return -} - -func checkSTS(config *Config) (err error) { - if tea.StringValue(config.AccessKeyId) == "" { - err = errors.New("AccessKeyId cannot be empty") - return - } - if tea.StringValue(config.AccessKeySecret) == "" { - err = errors.New("AccessKeySecret cannot be empty") - return - } - if tea.StringValue(config.SecurityToken) == "" { - err = errors.New("SecurityToken cannot be empty") - return - } - return -} - -func checkAccessKey(config *Config) (err error) { - if tea.StringValue(config.AccessKeyId) == "" { - err = errors.New("AccessKeyId cannot be empty") - return - } - if tea.StringValue(config.AccessKeySecret) == "" { - err = errors.New("AccessKeySecret cannot be empty") - return - } - return -} - func doAction(request *request.CommonRequest, runtime *utils.Runtime) (content []byte, err error) { var urlEncoded string if request.BodyParams != nil { @@ -390,12 +420,12 @@ func doAction(request *request.CommonRequest, runtime *utils.Runtime) (content [ return } } - trans := &http.Transport{} + transport := &http.Transport{} if proxy != nil && runtime.Proxy != "" { - trans.Proxy = http.ProxyURL(proxy) + transport.Proxy = http.ProxyURL(proxy) } - trans.DialContext = utils.Timeout(time.Duration(runtime.ConnectTimeout) * time.Second) - httpClient.Transport = trans + transport.DialContext = utils.Timeout(time.Duration(runtime.ConnectTimeout) * time.Second) + httpClient.Transport = transport httpResponse, err := hookDo(httpClient.Do)(httpRequest) if err != nil { return @@ -418,3 +448,71 @@ func doAction(request *request.CommonRequest, runtime *utils.Runtime) (content [ } return resp.GetHTTPContentBytes(), nil } + +type credentialsProviderWrap struct { + typeName string + provider providers.CredentialsProvider +} + +// Deprecated: use GetCredential() instead of +func (cp *credentialsProviderWrap) GetAccessKeyId() (accessKeyId *string, err error) { + cc, err := cp.provider.GetCredentials() + if err != nil { + return + } + accessKeyId = &cc.AccessKeyId + return +} + +// Deprecated: use GetCredential() instead of +func (cp *credentialsProviderWrap) GetAccessKeySecret() (accessKeySecret *string, err error) { + cc, err := cp.provider.GetCredentials() + if err != nil { + return + } + accessKeySecret = &cc.AccessKeySecret + return +} + +// Deprecated: use GetCredential() instead of +func (cp *credentialsProviderWrap) GetSecurityToken() (securityToken *string, err error) { + cc, err := cp.provider.GetCredentials() + if err != nil { + return + } + securityToken = &cc.SecurityToken + return +} + +// Deprecated: don't use it +func (cp *credentialsProviderWrap) GetBearerToken() (bearerToken *string) { + return tea.String("") +} + +// Get credentials +func (cp *credentialsProviderWrap) GetCredential() (cm *CredentialModel, err error) { + c, err := cp.provider.GetCredentials() + if err != nil { + return + } + + cm = &CredentialModel{ + AccessKeyId: &c.AccessKeyId, + AccessKeySecret: &c.AccessKeySecret, + SecurityToken: &c.SecurityToken, + Type: &cp.typeName, + ProviderName: &c.ProviderName, + } + return +} + +func (cp *credentialsProviderWrap) GetType() *string { + return &cp.typeName +} + +func FromCredentialsProvider(typeName string, cp providers.CredentialsProvider) Credential { + return &credentialsProviderWrap{ + typeName: typeName, + provider: cp, + } +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/credential_model.go b/vendor/github.com/aliyun/credentials-go/credentials/credential_model.go index 7b46c30881..b145feec30 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/credential_model.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/credential_model.go @@ -13,7 +13,17 @@ type CredentialModel struct { // bearer token BearerToken *string `json:"bearerToken,omitempty" xml:"bearerToken,omitempty"` // type + // + // example: + // + // access_key Type *string `json:"type,omitempty" xml:"type,omitempty"` + // provider name + // + // example: + // + // cli_profile/static_ak + ProviderName *string `json:"providerName,omitempty" xml:"providerName,omitempty"` } func (s CredentialModel) String() string { @@ -48,3 +58,8 @@ func (s *CredentialModel) SetType(v string) *CredentialModel { s.Type = &v return s } + +func (s *CredentialModel) SetProviderName(v string) *CredentialModel { + s.ProviderName = &v + return s +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/doc.go b/vendor/github.com/aliyun/credentials-go/credentials/doc.go new file mode 100644 index 0000000000..bfd59e308b --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/doc.go @@ -0,0 +1,2 @@ +// Package credentials is an alibaba cloud official credentials provider implementation +package credentials diff --git a/vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role.go b/vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role_credentials_provider.go similarity index 54% rename from vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role.go rename to vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role_credentials_provider.go index d86360fc5d..5a54d7b504 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role_credentials_provider.go @@ -3,21 +3,29 @@ package credentials import ( "encoding/json" "fmt" + "strconv" "time" "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/internal/utils" "github.com/aliyun/credentials-go/credentials/request" - "github.com/aliyun/credentials-go/credentials/utils" ) var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" +var securityCredTokenURL = "http://100.100.100.200/latest/api/token" -// EcsRAMRoleCredential is a kind of credential -type EcsRAMRoleCredential struct { +const defaultMetadataTokenDuration = int(21600) + +// ECSRAMRoleCredentialsProvider is a kind of credentials provider +type ECSRAMRoleCredentialsProvider struct { *credentialUpdater - RoleName string - sessionCredential *sessionCredential - runtime *utils.Runtime + RoleName string + EnableIMDSv2 bool + MetadataTokenDuration int + sessionCredential *sessionCredential + runtime *utils.Runtime + metadataToken string + staleTime int64 } type ecsRAMRoleResponse struct { @@ -28,86 +36,85 @@ type ecsRAMRoleResponse struct { Expiration string `json:"Expiration" xml:"Expiration"` } -func newEcsRAMRoleCredential(roleName string, inAdvanceScale float64, runtime *utils.Runtime) *EcsRAMRoleCredential { +func newEcsRAMRoleCredentialWithEnableIMDSv2(roleName string, enableIMDSv2 bool, metadataTokenDuration int, inAdvanceScale float64, runtime *utils.Runtime) *ECSRAMRoleCredentialsProvider { credentialUpdater := new(credentialUpdater) if inAdvanceScale < 1 && inAdvanceScale > 0 { credentialUpdater.inAdvanceScale = inAdvanceScale } - return &EcsRAMRoleCredential{ - RoleName: roleName, - credentialUpdater: credentialUpdater, - runtime: runtime, + return &ECSRAMRoleCredentialsProvider{ + RoleName: roleName, + EnableIMDSv2: enableIMDSv2, + MetadataTokenDuration: metadataTokenDuration, + credentialUpdater: credentialUpdater, + runtime: runtime, } } -func (e *EcsRAMRoleCredential) GetCredential() (*CredentialModel, error) { +func (e *ECSRAMRoleCredentialsProvider) GetCredential() (credentials *CredentialModel, err error) { if e.sessionCredential == nil || e.needUpdateCredential() { - err := e.updateCredential() + err = e.updateCredential() if err != nil { - return nil, err + if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { + // 虽然有错误,但是已有的 credentials 还有效 + } else { + return + } } } - credential := &CredentialModel{ + + credentials = &CredentialModel{ AccessKeyId: tea.String(e.sessionCredential.AccessKeyId), AccessKeySecret: tea.String(e.sessionCredential.AccessKeySecret), SecurityToken: tea.String(e.sessionCredential.SecurityToken), Type: tea.String("ecs_ram_role"), } - return credential, nil + + return } // GetAccessKeyId reutrns EcsRAMRoleCredential's AccessKeyId // if AccessKeyId is not exist or out of date, the function will update it. -func (e *EcsRAMRoleCredential) GetAccessKeyId() (*string, error) { - if e.sessionCredential == nil || e.needUpdateCredential() { - err := e.updateCredential() - if err != nil { - if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { - return &e.sessionCredential.AccessKeyId, nil - } - return tea.String(""), err - } +func (e *ECSRAMRoleCredentialsProvider) GetAccessKeyId() (accessKeyId *string, err error) { + c, err := e.GetCredential() + if err != nil { + return } - return tea.String(e.sessionCredential.AccessKeyId), nil + + accessKeyId = c.AccessKeyId + return } // GetAccessSecret reutrns EcsRAMRoleCredential's AccessKeySecret // if AccessKeySecret is not exist or out of date, the function will update it. -func (e *EcsRAMRoleCredential) GetAccessKeySecret() (*string, error) { - if e.sessionCredential == nil || e.needUpdateCredential() { - err := e.updateCredential() - if err != nil { - if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { - return &e.sessionCredential.AccessKeySecret, nil - } - return tea.String(""), err - } +func (e *ECSRAMRoleCredentialsProvider) GetAccessKeySecret() (accessKeySecret *string, err error) { + c, err := e.GetCredential() + if err != nil { + return } - return tea.String(e.sessionCredential.AccessKeySecret), nil + + accessKeySecret = c.AccessKeySecret + return } // GetSecurityToken reutrns EcsRAMRoleCredential's SecurityToken // if SecurityToken is not exist or out of date, the function will update it. -func (e *EcsRAMRoleCredential) GetSecurityToken() (*string, error) { - if e.sessionCredential == nil || e.needUpdateCredential() { - err := e.updateCredential() - if err != nil { - if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { - return &e.sessionCredential.SecurityToken, nil - } - return tea.String(""), err - } +func (e *ECSRAMRoleCredentialsProvider) GetSecurityToken() (securityToken *string, err error) { + c, err := e.GetCredential() + if err != nil { + return } - return tea.String(e.sessionCredential.SecurityToken), nil + + securityToken = c.SecurityToken + return } // GetBearerToken is useless for EcsRAMRoleCredential -func (e *EcsRAMRoleCredential) GetBearerToken() *string { +func (e *ECSRAMRoleCredentialsProvider) GetBearerToken() *string { return tea.String("") } // GetType reutrns EcsRAMRoleCredential's type -func (e *EcsRAMRoleCredential) GetType() *string { +func (e *ECSRAMRoleCredentialsProvider) GetType() *string { return tea.String("ecs_ram_role") } @@ -123,7 +130,27 @@ func getRoleName() (string, error) { return string(content), nil } -func (e *EcsRAMRoleCredential) updateCredential() (err error) { +func (e *ECSRAMRoleCredentialsProvider) getMetadataToken() (err error) { + if e.needToRefresh() { + if e.MetadataTokenDuration <= 0 { + e.MetadataTokenDuration = defaultMetadataTokenDuration + } + tmpTime := time.Now().Unix() + int64(e.MetadataTokenDuration*1000) + request := request.NewCommonRequest() + request.URL = securityCredTokenURL + request.Method = "PUT" + request.Headers["X-aliyun-ecs-metadata-token-ttl-seconds"] = strconv.Itoa(e.MetadataTokenDuration) + content, err := doAction(request, e.runtime) + if err != nil { + return err + } + e.staleTime = tmpTime + e.metadataToken = string(content) + } + return +} + +func (e *ECSRAMRoleCredentialsProvider) updateCredential() (err error) { if e.runtime == nil { e.runtime = new(utils.Runtime) } @@ -134,6 +161,13 @@ func (e *EcsRAMRoleCredential) updateCredential() (err error) { return fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) } } + if e.EnableIMDSv2 { + err = e.getMetadataToken() + if err != nil { + return fmt.Errorf("failed to get token from ECS Metadata Service: %s", err.Error()) + } + request.Headers["X-aliyun-ecs-metadata-token"] = e.metadataToken + } request.URL = securityCredURL + e.RoleName request.Method = "GET" content, err := doAction(request, e.runtime) @@ -163,3 +197,8 @@ func (e *EcsRAMRoleCredential) updateCredential() (err error) { return } + +func (e *ECSRAMRoleCredentialsProvider) needToRefresh() (needToRefresh bool) { + needToRefresh = time.Now().Unix() >= e.staleTime + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/env_provider.go b/vendor/github.com/aliyun/credentials-go/credentials/env_provider.go index 89df42f8c3..c09577284d 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/env_provider.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/env_provider.go @@ -23,7 +23,7 @@ func newEnvProvider() Provider { return &envProvider{} } -func (p *envProvider) resolve() (*Config, error) { +func (p *envProvider) resolve() (config *Config, err error) { accessKeyId, ok1 := os.LookupEnv(EnvVarAccessKeyIdNew) if !ok1 || accessKeyId == "" { accessKeyId, ok1 = os.LookupEnv(EnvVarAccessKeyId) @@ -38,10 +38,24 @@ func (p *envProvider) resolve() (*Config, error) { if accessKeySecret == "" { return nil, errors.New(EnvVarAccessKeySecret + " cannot be empty") } - config := &Config{ + + securityToken := os.Getenv("ALIBABA_CLOUD_SECURITY_TOKEN") + + if securityToken != "" { + config = &Config{ + Type: tea.String("sts"), + AccessKeyId: tea.String(accessKeyId), + AccessKeySecret: tea.String(accessKeySecret), + SecurityToken: tea.String(securityToken), + } + return + } + + config = &Config{ Type: tea.String("access_key"), AccessKeyId: tea.String(accessKeyId), AccessKeySecret: tea.String(accessKeySecret), } - return config, nil + + return } diff --git a/vendor/github.com/aliyun/credentials-go/credentials/instance_provider.go b/vendor/github.com/aliyun/credentials-go/credentials/instance_provider.go index 7e2ea07bb7..c82091dfda 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/instance_provider.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/instance_provider.go @@ -2,6 +2,7 @@ package credentials import ( "os" + "strings" "github.com/alibabacloud-go/tea/tea" ) @@ -19,10 +20,12 @@ func (p *instanceCredentialsProvider) resolve() (*Config, error) { if !ok { return nil, nil } + enableIMDSv2, _ := os.LookupEnv(ENVEcsMetadataIMDSv2Enable) config := &Config{ - Type: tea.String("ecs_ram_role"), - RoleName: tea.String(roleName), + Type: tea.String("ecs_ram_role"), + RoleName: tea.String(roleName), + EnableIMDSv2: tea.Bool(strings.ToLower(enableIMDSv2) == "true"), } return config, nil } diff --git a/vendor/github.com/aliyun/credentials-go/credentials/internal/http/http.go b/vendor/github.com/aliyun/credentials-go/credentials/internal/http/http.go new file mode 100644 index 0000000000..88e5fae05d --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/internal/http/http.go @@ -0,0 +1,151 @@ +package http + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "runtime" + "strings" + "time" + + "github.com/alibabacloud-go/debug/debug" + credentials_go "github.com/aliyun/credentials-go" + "github.com/aliyun/credentials-go/credentials/internal/utils" +) + +var defaultUserAgent = fmt.Sprintf("AlibabaCloud (%s; %s) Golang/%s Credentials/%s TeaDSL/1", runtime.GOOS, runtime.GOARCH, strings.Trim(runtime.Version(), "go"), credentials_go.PACKAGE_VERSION) + +type Request struct { + Method string // http request method + URL string // http url + Protocol string // http or https + Host string // http host + ReadTimeout time.Duration + ConnectTimeout time.Duration + Proxy string // http proxy + Form map[string]string // http form + Body []byte // request body for JSON or stream + Path string + Queries map[string]string + Headers map[string]string +} + +func (req *Request) BuildRequestURL() string { + httpUrl := fmt.Sprintf("%s://%s%s", req.Protocol, req.Host, req.Path) + if req.URL != "" { + httpUrl = req.URL + } + + querystring := utils.GetURLFormedMap(req.Queries) + if querystring != "" { + httpUrl = httpUrl + "?" + querystring + } + + return fmt.Sprintf("%s %s", req.Method, httpUrl) +} + +type Response struct { + StatusCode int + Headers map[string]string + Body []byte +} + +var newRequest = http.NewRequest + +type do func(req *http.Request) (*http.Response, error) + +var hookDo = func(fn do) do { + return fn +} + +var debuglog = debug.Init("credential") + +func Do(req *Request) (res *Response, err error) { + querystring := utils.GetURLFormedMap(req.Queries) + // do request + httpUrl := fmt.Sprintf("%s://%s%s?%s", req.Protocol, req.Host, req.Path, querystring) + if req.URL != "" { + httpUrl = req.URL + } + + var body io.Reader + if req.Method == "GET" { + body = strings.NewReader("") + } else if req.Body != nil { + body = bytes.NewReader(req.Body) + } else { + body = strings.NewReader(utils.GetURLFormedMap(req.Form)) + } + + httpRequest, err := newRequest(req.Method, httpUrl, body) + if err != nil { + return + } + + httpRequest.Header["User-Agent"] = []string{defaultUserAgent} + + if req.Form != nil { + httpRequest.Header["Content-Type"] = []string{"application/x-www-form-urlencoded"} + } + + for key, value := range req.Headers { + if value != "" { + debuglog("> %s: %s", key, value) + httpRequest.Header.Set(key, value) + } + } + + httpClient := &http.Client{} + + if req.ReadTimeout != 0 { + httpClient.Timeout = req.ReadTimeout + req.ConnectTimeout + } + + transport := http.DefaultTransport.(*http.Transport).Clone() + if req.Proxy != "" { + var proxy *url.URL + proxy, err = url.Parse(req.Proxy) + if err != nil { + return + } + transport.Proxy = http.ProxyURL(proxy) + } + + if req.ConnectTimeout != 0 { + transport.DialContext = func(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{ + Timeout: req.ConnectTimeout, + DualStack: true, + }).DialContext(ctx, network, address) + } + } + + httpClient.Transport = transport + + httpResponse, err := hookDo(httpClient.Do)(httpRequest) + if err != nil { + return + } + + defer httpResponse.Body.Close() + + responseBody, err := ioutil.ReadAll(httpResponse.Body) + if err != nil { + return + } + res = &Response{ + StatusCode: httpResponse.StatusCode, + Headers: make(map[string]string), + Body: responseBody, + } + for key, v := range httpResponse.Header { + res.Headers[key] = v[0] + } + + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/path.go b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/path.go new file mode 100644 index 0000000000..a94088c6b5 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/path.go @@ -0,0 +1,18 @@ +package utils + +import ( + "os" + "runtime" +) + +var getOS = func() string { + return runtime.GOOS +} + +func GetHomePath() string { + if getOS() == "windows" { + return os.Getenv("USERPROFILE") + } + + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/runtime.go b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/runtime.go new file mode 100644 index 0000000000..432395cf4a --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/runtime.go @@ -0,0 +1,36 @@ +package utils + +import ( + "context" + "net" + "time" +) + +// Runtime is for setting timeout, proxy and host +type Runtime struct { + ReadTimeout int + ConnectTimeout int + Proxy string + Host string + STSEndpoint string +} + +// NewRuntime returns a Runtime +func NewRuntime(readTimeout, connectTimeout int, proxy string, host string) *Runtime { + return &Runtime{ + ReadTimeout: readTimeout, + ConnectTimeout: connectTimeout, + Proxy: proxy, + Host: host, + } +} + +// Timeout is for connect Timeout +func Timeout(connectTimeout time.Duration) func(cxt context.Context, net, addr string) (c net.Conn, err error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{ + Timeout: connectTimeout, + DualStack: true, + }).DialContext(ctx, network, address) + } +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/utils.go b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/utils.go new file mode 100644 index 0000000000..fffee1eda0 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/utils.go @@ -0,0 +1,204 @@ +package utils + +import ( + "bytes" + "crypto" + "crypto/hmac" + "crypto/md5" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + mathrand "math/rand" + "net/url" + "os" + "runtime" + "strconv" + "sync/atomic" + "time" +) + +type uuid [16]byte + +const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +var hookRead = func(fn func(p []byte) (n int, err error)) func(p []byte) (n int, err error) { + return fn +} + +var hookRSA = func(fn func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)) func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) { + return fn +} + +// GetUUID returns a uuid +func GetUUID() (uuidHex string) { + uuid := newUUID() + uuidHex = hex.EncodeToString(uuid[:]) + return +} + +// RandStringBytes returns a rand string +func RandStringBytes(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[mathrand.Intn(len(letterBytes))] + } + return string(b) +} + +// ShaHmac1 return a string which has been hashed +func ShaHmac1(source, secret string) string { + key := []byte(secret) + hmac := hmac.New(sha1.New, key) + hmac.Write([]byte(source)) + signedBytes := hmac.Sum(nil) + signedString := base64.StdEncoding.EncodeToString(signedBytes) + return signedString +} + +// Sha256WithRsa return a string which has been hashed with Rsa +func Sha256WithRsa(source, secret string) string { + decodeString, err := base64.StdEncoding.DecodeString(secret) + if err != nil { + panic(err) + } + private, err := x509.ParsePKCS8PrivateKey(decodeString) + if err != nil { + panic(err) + } + + h := crypto.Hash.New(crypto.SHA256) + h.Write([]byte(source)) + hashed := h.Sum(nil) + signature, err := hookRSA(rsa.SignPKCS1v15)(rand.Reader, private.(*rsa.PrivateKey), + crypto.SHA256, hashed) + if err != nil { + panic(err) + } + + return base64.StdEncoding.EncodeToString(signature) +} + +// GetMD5Base64 returns a string which has been base64 +func GetMD5Base64(bytes []byte) (base64Value string) { + md5Ctx := md5.New() + md5Ctx.Write(bytes) + md5Value := md5Ctx.Sum(nil) + base64Value = base64.StdEncoding.EncodeToString(md5Value) + return +} + +// GetTimeInFormatISO8601 returns a time string +func GetTimeInFormatISO8601() (timeStr string) { + gmt := time.FixedZone("GMT", 0) + + return time.Now().In(gmt).Format("2006-01-02T15:04:05Z") +} + +// GetURLFormedMap returns a url encoded string +func GetURLFormedMap(source map[string]string) (urlEncoded string) { + urlEncoder := url.Values{} + for key, value := range source { + urlEncoder.Add(key, value) + } + urlEncoded = urlEncoder.Encode() + return +} + +func newUUID() uuid { + ns := uuid{} + safeRandom(ns[:]) + u := newFromHash(md5.New(), ns, RandStringBytes(16)) + u[6] = (u[6] & 0x0f) | (byte(2) << 4) + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + + return u +} + +func newFromHash(h hash.Hash, ns uuid, name string) uuid { + u := uuid{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +func safeRandom(dest []byte) { + if _, err := hookRead(rand.Read)(dest); err != nil { + panic(err) + } +} + +func (u uuid) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +var processStartTime int64 = time.Now().UnixNano() / 1e6 +var seqId int64 = 0 + +func getGID() uint64 { + // https://blog.sgmansfield.com/2015/12/goroutine-ids/ + b := make([]byte, 64) + b = b[:runtime.Stack(b, false)] + b = bytes.TrimPrefix(b, []byte("goroutine ")) + b = b[:bytes.IndexByte(b, ' ')] + n, _ := strconv.ParseUint(string(b), 10, 64) + return n +} + +func GetNonce() (uuidHex string) { + routineId := getGID() + currentTime := time.Now().UnixNano() / 1e6 + seq := atomic.AddInt64(&seqId, 1) + randNum := mathrand.Int63() + msg := fmt.Sprintf("%d-%d-%d-%d-%d", processStartTime, routineId, currentTime, seq, randNum) + h := md5.New() + h.Write([]byte(msg)) + return hex.EncodeToString(h.Sum(nil)) +} + +// Get first non-empty value +func GetDefaultString(values ...string) string { + for _, v := range values { + if v != "" { + return v + } + } + + return "" +} + +// set back the memoried enviroment variables +type Rollback func() + +func Memory(keys ...string) Rollback { + // remenber enviroment variables + m := make(map[string]string) + for _, key := range keys { + m[key] = os.Getenv(key) + } + + return func() { + for _, key := range keys { + os.Setenv(key, m[key]) + } + } +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go deleted file mode 100644 index de0acc7f87..0000000000 --- a/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go +++ /dev/null @@ -1,195 +0,0 @@ -package credentials - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "strconv" - "time" - - "github.com/alibabacloud-go/tea/tea" - "github.com/aliyun/credentials-go/credentials/request" - "github.com/aliyun/credentials-go/credentials/utils" -) - -const defaultOIDCDurationSeconds = 3600 - -// OIDCCredential is a kind of credentials -type OIDCCredential struct { - *credentialUpdater - AccessKeyId string - AccessKeySecret string - RoleArn string - OIDCProviderArn string - OIDCTokenFilePath string - Policy string - RoleSessionName string - RoleSessionExpiration int - sessionCredential *sessionCredential - runtime *utils.Runtime -} - -type OIDCResponse struct { - Credentials *credentialsInResponse `json:"Credentials" xml:"Credentials"` -} - -type OIDCcredentialsInResponse struct { - AccessKeyId string `json:"AccessKeyId" xml:"AccessKeyId"` - AccessKeySecret string `json:"AccessKeySecret" xml:"AccessKeySecret"` - SecurityToken string `json:"SecurityToken" xml:"SecurityToken"` - Expiration string `json:"Expiration" xml:"Expiration"` -} - -func newOIDCRoleArnCredential(accessKeyId, accessKeySecret, roleArn, OIDCProviderArn, OIDCTokenFilePath, RoleSessionName, policy string, RoleSessionExpiration int, runtime *utils.Runtime) *OIDCCredential { - return &OIDCCredential{ - AccessKeyId: accessKeyId, - AccessKeySecret: accessKeySecret, - RoleArn: roleArn, - OIDCProviderArn: OIDCProviderArn, - OIDCTokenFilePath: OIDCTokenFilePath, - RoleSessionName: RoleSessionName, - Policy: policy, - RoleSessionExpiration: RoleSessionExpiration, - credentialUpdater: new(credentialUpdater), - runtime: runtime, - } -} - -func (e *OIDCCredential) GetCredential() (*CredentialModel, error) { - if e.sessionCredential == nil || e.needUpdateCredential() { - err := e.updateCredential() - if err != nil { - return nil, err - } - } - credential := &CredentialModel{ - AccessKeyId: tea.String(e.sessionCredential.AccessKeyId), - AccessKeySecret: tea.String(e.sessionCredential.AccessKeySecret), - SecurityToken: tea.String(e.sessionCredential.SecurityToken), - Type: tea.String("oidc_role_arn"), - } - return credential, nil -} - -// GetAccessKeyId reutrns OIDCCredential's AccessKeyId -// if AccessKeyId is not exist or out of date, the function will update it. -func (r *OIDCCredential) GetAccessKeyId() (*string, error) { - if r.sessionCredential == nil || r.needUpdateCredential() { - err := r.updateCredential() - if err != nil { - return tea.String(""), err - } - } - return tea.String(r.sessionCredential.AccessKeyId), nil -} - -// GetAccessSecret reutrns OIDCCredential's AccessKeySecret -// if AccessKeySecret is not exist or out of date, the function will update it. -func (r *OIDCCredential) GetAccessKeySecret() (*string, error) { - if r.sessionCredential == nil || r.needUpdateCredential() { - err := r.updateCredential() - if err != nil { - return tea.String(""), err - } - } - return tea.String(r.sessionCredential.AccessKeySecret), nil -} - -// GetSecurityToken reutrns OIDCCredential's SecurityToken -// if SecurityToken is not exist or out of date, the function will update it. -func (r *OIDCCredential) GetSecurityToken() (*string, error) { - if r.sessionCredential == nil || r.needUpdateCredential() { - err := r.updateCredential() - if err != nil { - return tea.String(""), err - } - } - return tea.String(r.sessionCredential.SecurityToken), nil -} - -// GetBearerToken is useless OIDCCredential -func (r *OIDCCredential) GetBearerToken() *string { - return tea.String("") -} - -// GetType reutrns OIDCCredential's type -func (r *OIDCCredential) GetType() *string { - return tea.String("oidc_role_arn") -} - -func (r *OIDCCredential) GetOIDCToken(OIDCTokenFilePath string) *string { - tokenPath := OIDCTokenFilePath - _, err := os.Stat(tokenPath) - if os.IsNotExist(err) { - tokenPath = os.Getenv("ALIBABA_CLOUD_OIDC_TOKEN_FILE") - if tokenPath == "" { - return nil - } - } - byt, err := ioutil.ReadFile(tokenPath) - if err != nil { - return nil - } - return tea.String(string(byt)) -} - -func (r *OIDCCredential) updateCredential() (err error) { - if r.runtime == nil { - r.runtime = new(utils.Runtime) - } - request := request.NewCommonRequest() - request.Domain = "sts.aliyuncs.com" - if r.runtime.STSEndpoint != "" { - request.Domain = r.runtime.STSEndpoint - } - request.Scheme = "HTTPS" - request.Method = "POST" - request.QueryParams["Timestamp"] = utils.GetTimeInFormatISO8601() - request.QueryParams["Action"] = "AssumeRoleWithOIDC" - request.QueryParams["Format"] = "JSON" - request.BodyParams["RoleArn"] = r.RoleArn - request.BodyParams["OIDCProviderArn"] = r.OIDCProviderArn - token := r.GetOIDCToken(r.OIDCTokenFilePath) - request.BodyParams["OIDCToken"] = tea.StringValue(token) - if r.Policy != "" { - request.QueryParams["Policy"] = r.Policy - } - if r.RoleSessionExpiration > 0 { - request.QueryParams["DurationSeconds"] = strconv.Itoa(r.RoleSessionExpiration) - } - request.QueryParams["RoleSessionName"] = r.RoleSessionName - request.QueryParams["Version"] = "2015-04-01" - request.QueryParams["SignatureNonce"] = utils.GetUUID() - request.Headers["Host"] = request.Domain - request.Headers["Accept-Encoding"] = "identity" - request.Headers["content-type"] = "application/x-www-form-urlencoded" - request.URL = request.BuildURL() - content, err := doAction(request, r.runtime) - if err != nil { - return fmt.Errorf("refresh RoleArn sts token err: %s", err.Error()) - } - var resp *OIDCResponse - err = json.Unmarshal(content, &resp) - if err != nil { - return fmt.Errorf("refresh RoleArn sts token err: Json.Unmarshal fail: %s", err.Error()) - } - if resp == nil || resp.Credentials == nil { - return fmt.Errorf("refresh RoleArn sts token err: Credentials is empty") - } - respCredentials := resp.Credentials - if respCredentials.AccessKeyId == "" || respCredentials.AccessKeySecret == "" || respCredentials.SecurityToken == "" || respCredentials.Expiration == "" { - return fmt.Errorf("refresh RoleArn sts token err: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", respCredentials.AccessKeyId, respCredentials.AccessKeySecret, respCredentials.SecurityToken, respCredentials.Expiration) - } - - expirationTime, err := time.Parse("2006-01-02T15:04:05Z", respCredentials.Expiration) - r.lastUpdateTimestamp = time.Now().Unix() - r.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix()) - r.sessionCredential = &sessionCredential{ - AccessKeyId: respCredentials.AccessKeyId, - AccessKeySecret: respCredentials.AccessKeySecret, - SecurityToken: respCredentials.SecurityToken, - } - - return -} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/oidc_token b/vendor/github.com/aliyun/credentials-go/credentials/oidc_token deleted file mode 100644 index 653e068dfd..0000000000 --- a/vendor/github.com/aliyun/credentials-go/credentials/oidc_token +++ /dev/null @@ -1 +0,0 @@ -test_long_oidc_token_eyJhbGciOiJSUzI1NiIsImtpZCI6ImFQaXlpNEVGSU8wWnlGcFh1V0psQUNWbklZVlJsUkNmM2tlSzNMUlhWT1UifQ.eyJhdWQiOlsic3RzLmFsaXl1bmNzLmNvbSJdLCJleHAiOjE2NDUxMTk3ODAsImlhdCI6MTY0NTA4Mzc4MCwiaXNzIjoiaHR0cHM6Ly9vaWRjLWFjay1jbi1oYW5nemhvdS5vc3MtY24taGFuZ3pob3UtaW50ZXJuYWwuYWxpeXVuY3MuY29tL2NmMWQ4ZGIwMjM0ZDk0YzEyOGFiZDM3MTc4NWJjOWQxNSIsImt1YmVybmV0ZXMuaW8iOnsibmFtZXNwYWNlIjoidGVzdC1ycnNhIiwicG9kIjp7Im5hbWUiOiJydW4tYXMtcm9vdCIsInVpZCI6ImIzMGI0MGY2LWNiZTAtNGY0Yy1hZGYyLWM1OGQ4ZmExZTAxMCJ9LCJzZXJ2aWNlYWNjb3VudCI6eyJuYW1lIjoidXNlcjEiLCJ1aWQiOiJiZTEyMzdjYS01MTY4LTQyMzYtYWUyMC00NDM1YjhmMGI4YzAifX0sIm5iZiI6MTY0NTA4Mzc4MCwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OnRlc3QtcnJzYTp1c2VyMSJ9.XGP-wgLj-iMiAHjLe0lZLh7y48Qsj9HzsEbNh706WwerBoxnssdsyGFb9lzd2FyM8CssbAOCstr7OuAMWNdJmDZgpiOGGSbQ-KXXmbfnIS4ix-V3pQF6LVBFr7xJlj20J6YY89um3rv_04t0iCGxKWs2ZMUyU1FbZpIPRep24LVKbUz1saiiVGgDBTIZdHA13Z-jUvYAnsxK_Kj5tc1K-IuQQU0IwSKJh5OShMcdPugMV5LwTL3ogCikfB7yljq5vclBhCeF2lXLIibvwF711TOhuJ5lMlh-a2KkIgwBHhANg_U9k4Mt_VadctfUGc4hxlSbBD0w9o9mDGKwgGmW5Q \ No newline at end of file diff --git a/vendor/github.com/aliyun/credentials-go/credentials/profile_provider.go b/vendor/github.com/aliyun/credentials-go/credentials/profile_provider.go index de02c3dc43..ff89646011 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/profile_provider.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/profile_provider.go @@ -4,10 +4,10 @@ import ( "errors" "fmt" "os" - "runtime" "strings" "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/internal/utils" ini "gopkg.in/ini.v1" ) @@ -17,10 +17,6 @@ type profileProvider struct { var providerProfile = newProfileProvider() -var hookOS = func(goos string) string { - return goos -} - var hookState = func(info os.FileInfo, err error) (os.FileInfo, error) { return info, err } @@ -100,21 +96,21 @@ func (p *profileProvider) resolve() (*Config, error) { } return config, nil default: - return nil, errors.New("Invalid type option, support: access_key, sts, ecs_ram_role, ram_role_arn, rsa_key_pair") + return nil, errors.New("invalid type option, support: access_key, sts, ecs_ram_role, ram_role_arn, rsa_key_pair") } } func getRSAKeyPair(section *ini.Section) (*Config, error) { publicKeyId, err := section.GetKey("public_key_id") if err != nil { - return nil, errors.New("Missing required public_key_id option in profile for rsa_key_pair") + return nil, errors.New("missing required public_key_id option in profile for rsa_key_pair") } if publicKeyId.String() == "" { return nil, errors.New("public_key_id cannot be empty") } privateKeyFile, err := section.GetKey("private_key_file") if err != nil { - return nil, errors.New("Missing required private_key_file option in profile for rsa_key_pair") + return nil, errors.New("missing required private_key_file option in profile for rsa_key_pair") } if privateKeyFile.String() == "" { return nil, errors.New("private_key_file cannot be empty") @@ -143,28 +139,28 @@ func getRSAKeyPair(section *ini.Section) (*Config, error) { func getRAMRoleArn(section *ini.Section) (*Config, error) { accessKeyId, err := section.GetKey("access_key_id") if err != nil { - return nil, errors.New("Missing required access_key_id option in profile for ram_role_arn") + return nil, errors.New("missing required access_key_id option in profile for ram_role_arn") } if accessKeyId.String() == "" { return nil, errors.New("access_key_id cannot be empty") } accessKeySecret, err := section.GetKey("access_key_secret") if err != nil { - return nil, errors.New("Missing required access_key_secret option in profile for ram_role_arn") + return nil, errors.New("missing required access_key_secret option in profile for ram_role_arn") } if accessKeySecret.String() == "" { return nil, errors.New("access_key_secret cannot be empty") } roleArn, err := section.GetKey("role_arn") if err != nil { - return nil, errors.New("Missing required role_arn option in profile for ram_role_arn") + return nil, errors.New("missing required role_arn option in profile for ram_role_arn") } if roleArn.String() == "" { return nil, errors.New("role_arn cannot be empty") } roleSessionName, err := section.GetKey("role_session_name") if err != nil { - return nil, errors.New("Missing required role_session_name option in profile for ram_role_arn") + return nil, errors.New("missing required role_session_name option in profile for ram_role_arn") } if roleSessionName.String() == "" { return nil, errors.New("role_session_name cannot be empty") @@ -210,7 +206,7 @@ func getEcsRAMRole(section *ini.Section) (*Config, error) { func getBearerToken(section *ini.Section) (*Config, error) { bearerToken, err := section.GetKey("bearer_token") if err != nil { - return nil, errors.New("Missing required bearer_token option in profile for bearer") + return nil, errors.New("missing required bearer_token option in profile for bearer") } if bearerToken.String() == "" { return nil, errors.New("bearer_token cannot be empty") @@ -225,21 +221,21 @@ func getBearerToken(section *ini.Section) (*Config, error) { func getSTS(section *ini.Section) (*Config, error) { accesskeyid, err := section.GetKey("access_key_id") if err != nil { - return nil, errors.New("Missing required access_key_id option in profile for sts") + return nil, errors.New("missing required access_key_id option in profile for sts") } if accesskeyid.String() == "" { return nil, errors.New("access_key_id cannot be empty") } accessKeySecret, err := section.GetKey("access_key_secret") if err != nil { - return nil, errors.New("Missing required access_key_secret option in profile for sts") + return nil, errors.New("missing required access_key_secret option in profile for sts") } if accessKeySecret.String() == "" { return nil, errors.New("access_key_secret cannot be empty") } securityToken, err := section.GetKey("security_token") if err != nil { - return nil, errors.New("Missing required security_token option in profile for sts") + return nil, errors.New("missing required security_token option in profile for sts") } if securityToken.String() == "" { return nil, errors.New("security_token cannot be empty") @@ -256,14 +252,14 @@ func getSTS(section *ini.Section) (*Config, error) { func getAccessKey(section *ini.Section) (*Config, error) { accesskeyid, err := section.GetKey("access_key_id") if err != nil { - return nil, errors.New("Missing required access_key_id option in profile for access_key") + return nil, errors.New("missing required access_key_id option in profile for access_key") } if accesskeyid.String() == "" { return nil, errors.New("access_key_id cannot be empty") } accessKeySecret, err := section.GetKey("access_key_secret") if err != nil { - return nil, errors.New("Missing required access_key_secret option in profile for access_key") + return nil, errors.New("missing required access_key_secret option in profile for access_key") } if accessKeySecret.String() == "" { return nil, errors.New("access_key_secret cannot be empty") @@ -289,30 +285,15 @@ func getType(path, profile string) (*ini.Key, *ini.Section, error) { value, err := section.GetKey("type") if err != nil { - return nil, nil, errors.New("Missing required type option " + err.Error()) + return nil, nil, errors.New("missing required type option " + err.Error()) } return value, section, nil } -func getHomePath() string { - if hookOS(runtime.GOOS) == "windows" { - path, ok := os.LookupEnv("USERPROFILE") - if !ok { - return "" - } - return path - } - path, ok := os.LookupEnv("HOME") - if !ok { - return "" - } - return path -} - func checkDefaultPath() (path string, err error) { - path = getHomePath() + path = utils.GetHomePath() if path == "" { - return "", errors.New("The default credential file path is invalid") + return "", errors.New("the default credential file path is invalid") } path = strings.Replace("~/.alibabacloud/credentials", "~", path, 1) _, err = hookState(os.Stat(path)) @@ -333,14 +314,14 @@ func setRuntimeToConfig(config *Config, section *ini.Section) error { if rawConnectTimeout != nil { connectTimeout, err := rawConnectTimeout.Int() if err != nil { - return fmt.Errorf("Please set connect_timeout with an int value") + return fmt.Errorf("please set connect_timeout with an int value") } config.ConnectTimeout = tea.Int(connectTimeout) } if rawTimeout != nil { timeout, err := rawTimeout.Int() if err != nil { - return fmt.Errorf("Please set timeout with an int value") + return fmt.Errorf("please set timeout with an int value") } config.Timeout = tea.Int(timeout) } diff --git a/vendor/github.com/aliyun/credentials-go/credentials/provider.go b/vendor/github.com/aliyun/credentials-go/credentials/provider.go index fe813db330..506e110b95 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/provider.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/provider.go @@ -1,14 +1,15 @@ package credentials -//Environmental virables that may be used by the provider +// Environmental virables that may be used by the provider const ( - ENVCredentialFile = "ALIBABA_CLOUD_CREDENTIALS_FILE" - ENVEcsMetadata = "ALIBABA_CLOUD_ECS_METADATA" - PATHCredentialFile = "~/.alibabacloud/credentials" - ENVRoleArn = "ALIBABA_CLOUD_ROLE_ARN" - ENVOIDCProviderArn = "ALIBABA_CLOUD_OIDC_PROVIDER_ARN" - ENVOIDCTokenFile = "ALIBABA_CLOUD_OIDC_TOKEN_FILE" - ENVRoleSessionName = "ALIBABA_CLOUD_ROLE_SESSION_NAME" + ENVCredentialFile = "ALIBABA_CLOUD_CREDENTIALS_FILE" + ENVEcsMetadata = "ALIBABA_CLOUD_ECS_METADATA" + ENVEcsMetadataIMDSv2Enable = "ALIBABA_CLOUD_ECS_IMDSV2_ENABLE" + PATHCredentialFile = "~/.alibabacloud/credentials" + ENVRoleArn = "ALIBABA_CLOUD_ROLE_ARN" + ENVOIDCProviderArn = "ALIBABA_CLOUD_OIDC_PROVIDER_ARN" + ENVOIDCTokenFile = "ALIBABA_CLOUD_OIDC_TOKEN_FILE" + ENVRoleSessionName = "ALIBABA_CLOUD_ROLE_SESSION_NAME" ) // Provider will be implemented When you want to customize the provider. diff --git a/vendor/github.com/aliyun/credentials-go/credentials/provider_chain.go b/vendor/github.com/aliyun/credentials-go/credentials/provider_chain.go index a694d5cb58..e43388612d 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/provider_chain.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/provider_chain.go @@ -27,6 +27,6 @@ func (p *providerChain) resolve() (*Config, error) { } return config, err } - return nil, errors.New("No credential found") + return nil, errors.New("no credential found") } diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/cli_profile.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/cli_profile.go new file mode 100644 index 0000000000..fc5e378f65 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/cli_profile.go @@ -0,0 +1,506 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/aliyun/credentials-go/credentials/internal/utils" +) + +type CLIProfileCredentialsProvider struct { + profileFile string + profileName string + innerProvider CredentialsProvider + // 文件锁,用于并发安全 + fileMutex sync.RWMutex +} + +type CLIProfileCredentialsProviderBuilder struct { + provider *CLIProfileCredentialsProvider +} + +func (b *CLIProfileCredentialsProviderBuilder) WithProfileFile(profileFile string) *CLIProfileCredentialsProviderBuilder { + b.provider.profileFile = profileFile + return b +} + +func (b *CLIProfileCredentialsProviderBuilder) WithProfileName(profileName string) *CLIProfileCredentialsProviderBuilder { + b.provider.profileName = profileName + return b +} + +func (b *CLIProfileCredentialsProviderBuilder) Build() (provider *CLIProfileCredentialsProvider, err error) { + // 优先级: + // 1. 使用显示指定的 profileFile + // 2. 使用环境变量(ALIBABA_CLOUD_CONFIG_FILE)指定的 profileFile + // 3. 兜底使用 path.Join(homeDir, ".aliyun/config") 作为 profileFile + if b.provider.profileFile == "" { + b.provider.profileFile = os.Getenv("ALIBABA_CLOUD_CONFIG_FILE") + } + // 优先级: + // 1. 使用显示指定的 profileName + // 2. 使用环境变量(ALIBABA_CLOUD_PROFILE)制定的 profileName + // 3. 使用 CLI 配置中的当前 profileName + if b.provider.profileName == "" { + b.provider.profileName = os.Getenv("ALIBABA_CLOUD_PROFILE") + } + + if strings.ToLower(os.Getenv("ALIBABA_CLOUD_CLI_PROFILE_DISABLED")) == "true" { + err = errors.New("the CLI profile is disabled") + return + } + + provider = b.provider + return +} + +func NewCLIProfileCredentialsProviderBuilder() *CLIProfileCredentialsProviderBuilder { + return &CLIProfileCredentialsProviderBuilder{ + provider: &CLIProfileCredentialsProvider{}, + } +} + +type profile struct { + Name string `json:"name"` + Mode string `json:"mode"` + AccessKeyID string `json:"access_key_id"` + AccessKeySecret string `json:"access_key_secret"` + SecurityToken string `json:"sts_token"` + RegionID string `json:"region_id"` + RoleArn string `json:"ram_role_arn"` + RoleSessionName string `json:"ram_session_name"` + DurationSeconds int `json:"expired_seconds"` + StsRegion string `json:"sts_region"` + EnableVpc bool `json:"enable_vpc"` + SourceProfile string `json:"source_profile"` + RoleName string `json:"ram_role_name"` + OIDCTokenFile string `json:"oidc_token_file"` + OIDCProviderARN string `json:"oidc_provider_arn"` + Policy string `json:"policy"` + ExternalId string `json:"external_id"` + SignInUrl string `json:"cloud_sso_sign_in_url"` + AccountId string `json:"cloud_sso_account_id"` + AccessConfig string `json:"cloud_sso_access_config"` + AccessToken string `json:"access_token"` + AccessTokenExpire int64 `json:"cloud_sso_access_token_expire"` + OauthSiteType string `json:"oauth_site_type"` + OauthRefreshToken string `json:"oauth_refresh_token"` + OauthAccessToken string `json:"oauth_access_token"` + OauthAccessTokenExpire int64 `json:"oauth_access_token_expire"` + StsExpire int64 `json:"sts_expiration"` + ProcessCommand string `json:"process_command"` +} + +type configuration struct { + Current string `json:"current"` + Profiles []*profile `json:"profiles"` +} + +func newConfigurationFromPath(cfgPath string) (conf *configuration, err error) { + bytes, err := ioutil.ReadFile(cfgPath) + if err != nil { + err = fmt.Errorf("reading aliyun cli config from '%s' failed %v", cfgPath, err) + return + } + + conf = &configuration{} + + err = json.Unmarshal(bytes, conf) + if err != nil { + err = fmt.Errorf("unmarshal aliyun cli config from '%s' failed: %s", cfgPath, string(bytes)) + return + } + + if conf.Profiles == nil || len(conf.Profiles) == 0 { + err = fmt.Errorf("no any configured profiles in '%s'", cfgPath) + return + } + + return +} + +func (conf *configuration) getProfile(name string) (profile *profile, err error) { + for _, p := range conf.Profiles { + if p.Name == name { + profile = p + return + } + } + + err = fmt.Errorf("unable to get profile with '%s'", name) + return +} + +var oauthBaseUrlMap = map[string]string{ + "CN": "https://oauth.aliyun.com", + "INTL": "https://oauth.alibabacloud.com", +} + +var oauthClientMap = map[string]string{ + "CN": "4038181954557748008", + "INTL": "4103531455503354461", +} + +func (provider *CLIProfileCredentialsProvider) getCredentialsProvider(conf *configuration, profileName string) (credentialsProvider CredentialsProvider, err error) { + p, err := conf.getProfile(profileName) + if err != nil { + return + } + + switch p.Mode { + case "AK": + credentialsProvider, err = NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(p.AccessKeyID). + WithAccessKeySecret(p.AccessKeySecret). + Build() + case "StsToken": + credentialsProvider, err = NewStaticSTSCredentialsProviderBuilder(). + WithAccessKeyId(p.AccessKeyID). + WithAccessKeySecret(p.AccessKeySecret). + WithSecurityToken(p.SecurityToken). + Build() + case "RamRoleArn": + previousProvider, err1 := NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(p.AccessKeyID). + WithAccessKeySecret(p.AccessKeySecret). + Build() + if err1 != nil { + return nil, err1 + } + + credentialsProvider, err = NewRAMRoleARNCredentialsProviderBuilder(). + WithCredentialsProvider(previousProvider). + WithRoleArn(p.RoleArn). + WithRoleSessionName(p.RoleSessionName). + WithDurationSeconds(p.DurationSeconds). + WithStsRegionId(p.StsRegion). + WithEnableVpc(p.EnableVpc). + WithPolicy(p.Policy). + WithExternalId(p.ExternalId). + Build() + case "EcsRamRole": + credentialsProvider, err = NewECSRAMRoleCredentialsProviderBuilder().WithRoleName(p.RoleName).Build() + case "OIDC": + credentialsProvider, err = NewOIDCCredentialsProviderBuilder(). + WithOIDCTokenFilePath(p.OIDCTokenFile). + WithOIDCProviderARN(p.OIDCProviderARN). + WithRoleArn(p.RoleArn). + WithStsRegionId(p.StsRegion). + WithEnableVpc(p.EnableVpc). + WithDurationSeconds(p.DurationSeconds). + WithRoleSessionName(p.RoleSessionName). + WithPolicy(p.Policy). + Build() + case "ChainableRamRoleArn": + previousProvider, err1 := provider.getCredentialsProvider(conf, p.SourceProfile) + if err1 != nil { + err = fmt.Errorf("get source profile failed: %s", err1.Error()) + return + } + credentialsProvider, err = NewRAMRoleARNCredentialsProviderBuilder(). + WithCredentialsProvider(previousProvider). + WithRoleArn(p.RoleArn). + WithRoleSessionName(p.RoleSessionName). + WithDurationSeconds(p.DurationSeconds). + WithStsRegionId(p.StsRegion). + WithEnableVpc(p.EnableVpc). + WithPolicy(p.Policy). + WithExternalId(p.ExternalId). + Build() + case "CloudSSO": + credentialsProvider, err = NewCloudSSOCredentialsProviderBuilder(). + WithSignInUrl(p.SignInUrl). + WithAccountId(p.AccountId). + WithAccessConfig(p.AccessConfig). + WithAccessToken(p.AccessToken). + WithAccessTokenExpire(p.AccessTokenExpire). + Build() + case "OAuth": + siteType := strings.ToUpper(p.OauthSiteType) + signInUrl := oauthBaseUrlMap[siteType] + if signInUrl == "" { + err = fmt.Errorf("invalid site type, support CN or INTL") + return + } + clientId := oauthClientMap[siteType] + + credentialsProvider, err = NewOAuthCredentialsProviderBuilder(). + WithSignInUrl(signInUrl). + WithClientId(clientId). + WithRefreshToken(p.OauthRefreshToken). + WithAccessToken(p.OauthAccessToken). + WithAccessTokenExpire(p.OauthAccessTokenExpire). + WithTokenUpdateCallback(provider.getOAuthTokenUpdateCallback()). + Build() + case "External": + credentialsProvider, err = NewExternalCredentialsProviderBuilder(). + WithProcessCommand(p.ProcessCommand). + WithCredentialUpdateCallback(provider.getExternalCredentialUpdateCallback()). + Build() + default: + err = fmt.Errorf("unsupported profile mode '%s'", p.Mode) + } + + return +} + +// 默认设置为 GetHomePath,测试时便于 mock +var getHomePath = utils.GetHomePath + +func (provider *CLIProfileCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.innerProvider == nil { + cfgPath := provider.profileFile + if cfgPath == "" { + homeDir := getHomePath() + if homeDir == "" { + err = fmt.Errorf("cannot found home dir") + return + } + + cfgPath = path.Join(homeDir, ".aliyun/config.json") + provider.profileFile = cfgPath + } + + conf, err1 := newConfigurationFromPath(cfgPath) + if err1 != nil { + err = err1 + return + } + + if provider.profileName == "" { + provider.profileName = conf.Current + } + + provider.innerProvider, err = provider.getCredentialsProvider(conf, provider.profileName) + if err != nil { + return + } + } + + innerCC, err := provider.innerProvider.GetCredentials() + if err != nil { + return + } + + providerName := innerCC.ProviderName + if providerName == "" { + providerName = provider.innerProvider.GetProviderName() + } + + cc = &Credentials{ + AccessKeyId: innerCC.AccessKeyId, + AccessKeySecret: innerCC.AccessKeySecret, + SecurityToken: innerCC.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), providerName), + } + + return +} + +func (provider *CLIProfileCredentialsProvider) GetProviderName() string { + return "cli_profile" +} + +// findSourceOAuthProfile 递归查找 OAuth source profile +func (conf *configuration) findSourceOAuthProfile(profileName string) (*profile, error) { + profile, err := conf.getProfile(profileName) + if err != nil { + return nil, fmt.Errorf("unable to get profile with name '%s' from cli credentials file: %v", profileName, err) + } + + if profile.Mode == "OAuth" { + return profile, nil + } + + if profile.SourceProfile != "" { + return conf.findSourceOAuthProfile(profile.SourceProfile) + } + + return nil, fmt.Errorf("unable to get OAuth profile with name '%s' from cli credentials file", profileName) +} + +// updateOAuthTokens 更新OAuth令牌并写回配置文件 +func (provider *CLIProfileCredentialsProvider) updateOAuthTokens(refreshToken, accessToken, accessKey, secret, securityToken string, accessTokenExpire, stsExpire int64) error { + provider.fileMutex.Lock() + defer provider.fileMutex.Unlock() + + cfgPath := provider.profileFile + conf, err := newConfigurationFromPath(cfgPath) + if err != nil { + return fmt.Errorf("failed to read config file: %v", err) + } + + profileName := provider.profileName + if profileName == "" { + profileName = conf.Current + } + if profileName == "" { + return fmt.Errorf("unable to get profile to update") + } + + // 递归查找真正的 OAuth source profile + sourceProfile, err := conf.findSourceOAuthProfile(profileName) + if err != nil { + return fmt.Errorf("failed to find OAuth source profile: %v", err) + } + + // update OAuth tokens + sourceProfile.OauthRefreshToken = refreshToken + sourceProfile.OauthAccessToken = accessToken + sourceProfile.OauthAccessTokenExpire = accessTokenExpire + // update STS credentials + sourceProfile.AccessKeyID = accessKey + sourceProfile.AccessKeySecret = secret + sourceProfile.SecurityToken = securityToken + sourceProfile.StsExpire = stsExpire + + // write back with file lock + return provider.writeConfigurationToFileWithLock(cfgPath, conf) +} + +// writeConfigurationToFile 将配置写入文件,使用原子写入确保数据完整性 +func (provider *CLIProfileCredentialsProvider) writeConfigurationToFile(cfgPath string, conf *configuration) error { + // 获取原文件权限(如果存在) + fileMode := os.FileMode(0644) + if stat, err := os.Stat(cfgPath); err == nil { + fileMode = stat.Mode() + } + + // 创建唯一临时文件 + tempFile := cfgPath + ".tmp-" + strconv.FormatInt(time.Now().UnixNano(), 10) + + // 写入临时文件 + err := provider.writeConfigFile(tempFile, fileMode, conf) + if err != nil { + return fmt.Errorf("failed to write temp file: %v", err) + } + + // 原子性重命名,确保文件完整性 + err = os.Rename(tempFile, cfgPath) + if err != nil { + // 清理临时文件 + os.Remove(tempFile) + return fmt.Errorf("failed to rename temp file: %v", err) + } + + return nil +} + +// writeConfigFile 写入配置文件 +func (provider *CLIProfileCredentialsProvider) writeConfigFile(filename string, fileMode os.FileMode, conf *configuration) error { + f, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) + if err != nil { + return fmt.Errorf("failed to create config file: %w", err) + } + + defer func() { + closeErr := f.Close() + if err == nil && closeErr != nil { + err = fmt.Errorf("failed to close config file: %w", closeErr) + } + }() + + encoder := json.NewEncoder(f) + encoder.SetIndent("", " ") + + if err = encoder.Encode(conf); err != nil { + return fmt.Errorf("failed to serialize config: %w", err) + } + + return nil +} + +// writeConfigurationToFileWithLock 使用操作系统级别的文件锁写入配置文件 +func (provider *CLIProfileCredentialsProvider) writeConfigurationToFileWithLock(cfgPath string, conf *configuration) error { + // 获取原文件权限(如果存在) + fileMode := os.FileMode(0644) + if stat, err := os.Stat(cfgPath); err == nil { + fileMode = stat.Mode() + } + + // 打开文件用于锁定 + file, err := os.OpenFile(cfgPath, os.O_RDWR|os.O_CREATE, fileMode) + if err != nil { + return fmt.Errorf("failed to open config file: %v", err) + } + + // 获取独占锁(阻塞其他进程) + err = lockFile(int(file.Fd())) + if err != nil { + file.Close() + return fmt.Errorf("failed to acquire file lock: %v", err) + } + + // 创建唯一临时文件 + tempFile := cfgPath + ".tmp-" + strconv.FormatInt(time.Now().UnixNano(), 10) + err = provider.writeConfigFile(tempFile, fileMode, conf) + if err != nil { + unlockFile(int(file.Fd())) + file.Close() + return fmt.Errorf("failed to write temp file: %v", err) + } + + // 关闭并解锁原文件,以便在Windows上可以重命名 + unlockFile(int(file.Fd())) + file.Close() + + // 原子性重命名 + err = os.Rename(tempFile, cfgPath) + if err != nil { + os.Remove(tempFile) + return fmt.Errorf("failed to rename temp file: %v", err) + } + + return nil +} + +// getOAuthTokenUpdateCallback 获取OAuth令牌更新回调函数 +func (provider *CLIProfileCredentialsProvider) getOAuthTokenUpdateCallback() OAuthTokenUpdateCallback { + return func(refreshToken, accessToken, accessKey, secret, securityToken string, accessTokenExpire, stsExpire int64) error { + return provider.updateOAuthTokens(refreshToken, accessToken, accessKey, secret, securityToken, accessTokenExpire, stsExpire) + } +} + +// getExternalCredentialUpdateCallback 获取External凭证更新回调函数 +func (provider *CLIProfileCredentialsProvider) getExternalCredentialUpdateCallback() ExternalCredentialUpdateCallback { + return func(accessKeyId, accessKeySecret, securityToken string, expiration int64) error { + return provider.updateExternalCredentials(accessKeyId, accessKeySecret, securityToken, expiration) + } +} + +// updateExternalCredentials 更新External凭证并写回配置文件 +func (provider *CLIProfileCredentialsProvider) updateExternalCredentials(accessKeyId, accessKeySecret, securityToken string, expiration int64) error { + provider.fileMutex.Lock() + defer provider.fileMutex.Unlock() + + cfgPath := provider.profileFile + conf, err := newConfigurationFromPath(cfgPath) + if err != nil { + return fmt.Errorf("failed to read config file: %v", err) + } + + profileName := provider.profileName + profile, err := conf.getProfile(profileName) + if err != nil { + return fmt.Errorf("failed to get profile %s: %v", profileName, err) + } + + // update + profile.AccessKeyID = accessKeyId + profile.AccessKeySecret = accessKeySecret + profile.SecurityToken = securityToken + if expiration > 0 { + profile.StsExpire = expiration + } + + // write back with file lock + return provider.writeConfigurationToFileWithLock(cfgPath, conf) +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/cloud_sso.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/cloud_sso.go new file mode 100644 index 0000000000..7bc29b243a --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/cloud_sso.go @@ -0,0 +1,216 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" +) + +type CloudSSOCredentialsProvider struct { + signInUrl string + accountId string + accessConfig string + accessToken string + accessTokenExpire int64 + + lastUpdateTimestamp int64 + expirationTimestamp int64 + sessionCredentials *sessionCredentials + // for http options + httpOptions *HttpOptions +} + +type CloudSSOCredentialsProviderBuilder struct { + provider *CloudSSOCredentialsProvider +} + +type cloudCredentialOptions struct { + AccountId string `json:"AccountId"` + AccessConfigurationId string `json:"AccessConfigurationId"` +} + +type cloudCredentials struct { + AccessKeyId string `json:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken"` + Expiration string `json:"Expiration"` +} + +type cloudCredentialResponse struct { + CloudCredential *cloudCredentials `json:"CloudCredential"` + RequestId string `json:"RequestId"` +} + +func NewCloudSSOCredentialsProviderBuilder() *CloudSSOCredentialsProviderBuilder { + return &CloudSSOCredentialsProviderBuilder{ + provider: &CloudSSOCredentialsProvider{}, + } +} + +func (b *CloudSSOCredentialsProviderBuilder) WithSignInUrl(signInUrl string) *CloudSSOCredentialsProviderBuilder { + b.provider.signInUrl = signInUrl + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithAccountId(accountId string) *CloudSSOCredentialsProviderBuilder { + b.provider.accountId = accountId + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithAccessConfig(accessConfig string) *CloudSSOCredentialsProviderBuilder { + b.provider.accessConfig = accessConfig + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithAccessToken(accessToken string) *CloudSSOCredentialsProviderBuilder { + b.provider.accessToken = accessToken + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithAccessTokenExpire(accessTokenExpire int64) *CloudSSOCredentialsProviderBuilder { + b.provider.accessTokenExpire = accessTokenExpire + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *CloudSSOCredentialsProviderBuilder { + b.provider.httpOptions = httpOptions + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) Build() (provider *CloudSSOCredentialsProvider, err error) { + if b.provider.accessToken == "" || b.provider.accessTokenExpire == 0 || b.provider.accessTokenExpire-time.Now().Unix() <= 0 { + err = errors.New("CloudSSO access token is empty or expired, please re-login with cli") + return + } + + if b.provider.signInUrl == "" || b.provider.accountId == "" || b.provider.accessConfig == "" { + err = errors.New("CloudSSO sign in url or account id or access config is empty") + return + } + + provider = b.provider + return +} + +func (provider *CloudSSOCredentialsProvider) getCredentials() (session *sessionCredentials, err error) { + url, err := url.Parse(provider.signInUrl) + if err != nil { + return nil, err + } + + req := &httputil.Request{ + Method: "POST", + Protocol: url.Scheme, + Host: url.Host, + Path: "/cloud-credentials", + Headers: map[string]string{}, + } + + connectTimeout := 5 * time.Second + readTimeout := 10 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + body := cloudCredentialOptions{ + AccountId: provider.accountId, + AccessConfigurationId: provider.accessConfig, + } + + bodyBytes, err := json.Marshal(body) + if err != nil { + return nil, fmt.Errorf("failed to marshal options: %w", err) + } + + req.Body = bodyBytes + + // set headers + req.Headers["Accept"] = "application/json" + req.Headers["Content-Type"] = "application/json" + req.Headers["Authorization"] = fmt.Sprintf("Bearer %s", provider.accessToken) + res, err := httpDo(req) + if err != nil { + return + } + + if res.StatusCode != http.StatusOK { + message := "get session token from sso failed: " + err = errors.New(message + string(res.Body)) + return + } + var data cloudCredentialResponse + err = json.Unmarshal(res.Body, &data) + if err != nil { + err = fmt.Errorf("get session token from sso failed, json.Unmarshal fail: %s", err.Error()) + return + } + if data.CloudCredential == nil { + err = fmt.Errorf("get session token from sso failed, fail to get credentials") + return + } + + if data.CloudCredential.AccessKeyId == "" || data.CloudCredential.AccessKeySecret == "" || data.CloudCredential.SecurityToken == "" { + err = fmt.Errorf("refresh session token err, fail to get credentials") + return + } + + session = &sessionCredentials{ + AccessKeyId: data.CloudCredential.AccessKeyId, + AccessKeySecret: data.CloudCredential.AccessKeySecret, + SecurityToken: data.CloudCredential.SecurityToken, + Expiration: data.CloudCredential.Expiration, + } + return +} + +func (provider *CloudSSOCredentialsProvider) needUpdateCredential() (result bool) { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *CloudSSOCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.sessionCredentials == nil || provider.needUpdateCredential() { + sessionCredentials, err1 := provider.getCredentials() + if err1 != nil { + return nil, err1 + } + + provider.sessionCredentials = sessionCredentials + expirationTime, err2 := time.Parse("2006-01-02T15:04:05Z", sessionCredentials.Expiration) + if err2 != nil { + return nil, err2 + } + + provider.lastUpdateTimestamp = time.Now().Unix() + provider.expirationTimestamp = expirationTime.Unix() + } + + cc = &Credentials{ + AccessKeyId: provider.sessionCredentials.AccessKeyId, + AccessKeySecret: provider.sessionCredentials.AccessKeySecret, + SecurityToken: provider.sessionCredentials.SecurityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *CloudSSOCredentialsProvider) GetProviderName() string { + return "cloud_sso" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/credentials.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/credentials.go new file mode 100644 index 0000000000..26592fd226 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/credentials.go @@ -0,0 +1,22 @@ +package providers + +// 下一版本 Credentials 包 +// - 分离 bearer token +// - 从 config 传递迁移到真正的 credentials provider 模式 +// - 删除 GetAccessKeyId()/GetAccessKeySecret()/GetSecurityToken() 方法,只保留 GetCredentials() + +// The credentials struct +type Credentials struct { + AccessKeyId string + AccessKeySecret string + SecurityToken string + ProviderName string +} + +// The credentials provider interface, return credentials and provider name +type CredentialsProvider interface { + // Get credentials + GetCredentials() (*Credentials, error) + // Get credentials provider name + GetProviderName() string +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/default.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/default.go new file mode 100644 index 0000000000..597625f6f5 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/default.go @@ -0,0 +1,113 @@ +package providers + +import ( + "fmt" + "os" + "strings" +) + +type DefaultCredentialsProvider struct { + providerChain []CredentialsProvider + lastUsedProvider CredentialsProvider +} + +func NewDefaultCredentialsProvider() (provider *DefaultCredentialsProvider) { + providers := []CredentialsProvider{} + + // Add static ak or sts credentials provider + envProvider, err := NewEnvironmentVariableCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, envProvider) + } + + // oidc check + oidcProvider, err := NewOIDCCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, oidcProvider) + } + + // cli credentials provider + cliProfileProvider, err := NewCLIProfileCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, cliProfileProvider) + } + + // profile credentials provider + profileProvider, err := NewProfileCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, profileProvider) + } + + // Add IMDS + ecsRamRoleProvider, err := NewECSRAMRoleCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, ecsRamRoleProvider) + } + + // credentials uri + if os.Getenv("ALIBABA_CLOUD_CREDENTIALS_URI") != "" { + credentialsUriProvider, err := NewURLCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, credentialsUriProvider) + } + } + + return &DefaultCredentialsProvider{ + providerChain: providers, + } +} + +func (provider *DefaultCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.lastUsedProvider != nil { + inner, err1 := provider.lastUsedProvider.GetCredentials() + if err1 != nil { + err = err1 + return + } + + providerName := inner.ProviderName + if providerName == "" { + providerName = provider.lastUsedProvider.GetProviderName() + } + + cc = &Credentials{ + AccessKeyId: inner.AccessKeyId, + AccessKeySecret: inner.AccessKeySecret, + SecurityToken: inner.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), providerName), + } + return + } + + errors := []string{} + for _, p := range provider.providerChain { + provider.lastUsedProvider = p + inner, errInLoop := p.GetCredentials() + if errInLoop != nil { + errors = append(errors, errInLoop.Error()) + // 如果有错误,进入下一个获取过程 + continue + } + + if inner != nil { + providerName := inner.ProviderName + if providerName == "" { + providerName = p.GetProviderName() + } + cc = &Credentials{ + AccessKeyId: inner.AccessKeyId, + AccessKeySecret: inner.AccessKeySecret, + SecurityToken: inner.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), providerName), + } + return + } + } + + err = fmt.Errorf("unable to get credentials from any of the providers in the chain: %s", strings.Join(errors, ", ")) + return +} + +func (provider *DefaultCredentialsProvider) GetProviderName() string { + return "default" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/ecs_ram_role.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/ecs_ram_role.go new file mode 100644 index 0000000000..9a917b2bf0 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/ecs_ram_role.go @@ -0,0 +1,283 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "strconv" + "strings" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" +) + +type ECSRAMRoleCredentialsProvider struct { + roleName string + disableIMDSv1 bool + // for sts + session *sessionCredentials + expirationTimestamp int64 + // for http options + httpOptions *HttpOptions +} + +type ECSRAMRoleCredentialsProviderBuilder struct { + provider *ECSRAMRoleCredentialsProvider +} + +func NewECSRAMRoleCredentialsProviderBuilder() *ECSRAMRoleCredentialsProviderBuilder { + return &ECSRAMRoleCredentialsProviderBuilder{ + provider: &ECSRAMRoleCredentialsProvider{}, + } +} + +func (builder *ECSRAMRoleCredentialsProviderBuilder) WithRoleName(roleName string) *ECSRAMRoleCredentialsProviderBuilder { + builder.provider.roleName = roleName + return builder +} + +func (builder *ECSRAMRoleCredentialsProviderBuilder) WithDisableIMDSv1(disableIMDSv1 bool) *ECSRAMRoleCredentialsProviderBuilder { + builder.provider.disableIMDSv1 = disableIMDSv1 + return builder +} + +func (builder *ECSRAMRoleCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *ECSRAMRoleCredentialsProviderBuilder { + builder.provider.httpOptions = httpOptions + return builder +} + +const defaultMetadataTokenDuration = 21600 // 6 hours + +func (builder *ECSRAMRoleCredentialsProviderBuilder) Build() (provider *ECSRAMRoleCredentialsProvider, err error) { + + if strings.ToLower(os.Getenv("ALIBABA_CLOUD_ECS_METADATA_DISABLED")) == "true" { + err = errors.New("IMDS credentials is disabled") + return + } + + // 设置 roleName 默认值 + if builder.provider.roleName == "" { + builder.provider.roleName = os.Getenv("ALIBABA_CLOUD_ECS_METADATA") + } + + if !builder.provider.disableIMDSv1 { + builder.provider.disableIMDSv1 = strings.ToLower(os.Getenv("ALIBABA_CLOUD_IMDSV1_DISABLED")) == "true" + } + + provider = builder.provider + return +} + +type ecsRAMRoleResponse struct { + Code *string `json:"Code"` + AccessKeyId *string `json:"AccessKeyId"` + AccessKeySecret *string `json:"AccessKeySecret"` + SecurityToken *string `json:"SecurityToken"` + LastUpdated *string `json:"LastUpdated"` + Expiration *string `json:"Expiration"` +} + +func (provider *ECSRAMRoleCredentialsProvider) needUpdateCredential() bool { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *ECSRAMRoleCredentialsProvider) getRoleName() (roleName string, err error) { + req := &httputil.Request{ + Method: "GET", + Protocol: "http", + Host: "100.100.100.200", + Path: "/latest/meta-data/ram/security-credentials/", + Headers: map[string]string{}, + } + + connectTimeout := 1 * time.Second + readTimeout := 1 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + metadataToken, err := provider.getMetadataToken() + if err != nil { + return "", err + } + if metadataToken != "" { + req.Headers["x-aliyun-ecs-metadata-token"] = metadataToken + } + + res, err := httpDo(req) + if err != nil { + err = fmt.Errorf("get role name failed: %s", err.Error()) + return + } + + if res.StatusCode != 200 { + err = fmt.Errorf("get role name failed: %s %d", req.BuildRequestURL(), res.StatusCode) + return + } + + roleName = strings.TrimSpace(string(res.Body)) + return +} + +func (provider *ECSRAMRoleCredentialsProvider) getCredentials() (session *sessionCredentials, err error) { + roleName := provider.roleName + if roleName == "" { + roleName, err = provider.getRoleName() + if err != nil { + return + } + } + + req := &httputil.Request{ + Method: "GET", + Protocol: "http", + Host: "100.100.100.200", + Path: "/latest/meta-data/ram/security-credentials/" + roleName, + Headers: map[string]string{}, + } + + connectTimeout := 1 * time.Second + readTimeout := 1 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + metadataToken, err := provider.getMetadataToken() + if err != nil { + return nil, err + } + if metadataToken != "" { + req.Headers["x-aliyun-ecs-metadata-token"] = metadataToken + } + + res, err := httpDo(req) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) + return + } + + if res.StatusCode != 200 { + err = fmt.Errorf("refresh Ecs sts token err, httpStatus: %d, message = %s", res.StatusCode, string(res.Body)) + return + } + + var data ecsRAMRoleResponse + err = json.Unmarshal(res.Body, &data) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err, json.Unmarshal fail: %s", err.Error()) + return + } + + if data.AccessKeyId == nil || data.AccessKeySecret == nil || data.SecurityToken == nil { + err = fmt.Errorf("refresh Ecs sts token err, fail to get credentials") + return + } + + if *data.Code != "Success" { + err = fmt.Errorf("refresh Ecs sts token err, Code is not Success") + return + } + + session = &sessionCredentials{ + AccessKeyId: *data.AccessKeyId, + AccessKeySecret: *data.AccessKeySecret, + SecurityToken: *data.SecurityToken, + Expiration: *data.Expiration, + } + return +} + +func (provider *ECSRAMRoleCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.session == nil || provider.needUpdateCredential() { + session, err1 := provider.getCredentials() + if err1 != nil { + return nil, err1 + } + + provider.session = session + expirationTime, err2 := time.Parse("2006-01-02T15:04:05Z", session.Expiration) + if err2 != nil { + return nil, err2 + } + provider.expirationTimestamp = expirationTime.Unix() + } + + cc = &Credentials{ + AccessKeyId: provider.session.AccessKeyId, + AccessKeySecret: provider.session.AccessKeySecret, + SecurityToken: provider.session.SecurityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *ECSRAMRoleCredentialsProvider) GetProviderName() string { + return "ecs_ram_role" +} + +func (provider *ECSRAMRoleCredentialsProvider) getMetadataToken() (metadataToken string, err error) { + // PUT http://100.100.100.200/latest/api/token + req := &httputil.Request{ + Method: "PUT", + Protocol: "http", + Host: "100.100.100.200", + Path: "/latest/api/token", + Headers: map[string]string{ + "X-aliyun-ecs-metadata-token-ttl-seconds": strconv.Itoa(defaultMetadataTokenDuration), + }, + } + + connectTimeout := 1 * time.Second + readTimeout := 1 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + res, _err := httpDo(req) + if _err != nil { + if provider.disableIMDSv1 { + err = fmt.Errorf("get metadata token failed: %s", _err.Error()) + } + return + } + if res.StatusCode != 200 { + if provider.disableIMDSv1 { + err = fmt.Errorf("refresh Ecs sts token err, httpStatus: %d, message = %s", res.StatusCode, string(res.Body)) + } + return + } + metadataToken = string(res.Body) + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/env.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/env.go new file mode 100644 index 0000000000..27fe33b9e6 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/env.go @@ -0,0 +1,55 @@ +package providers + +import ( + "fmt" + "os" +) + +type EnvironmentVariableCredentialsProvider struct { +} + +type EnvironmentVariableCredentialsProviderBuilder struct { + provider *EnvironmentVariableCredentialsProvider +} + +func NewEnvironmentVariableCredentialsProviderBuilder() *EnvironmentVariableCredentialsProviderBuilder { + return &EnvironmentVariableCredentialsProviderBuilder{ + provider: &EnvironmentVariableCredentialsProvider{}, + } +} + +func (builder *EnvironmentVariableCredentialsProviderBuilder) Build() (provider *EnvironmentVariableCredentialsProvider, err error) { + provider = builder.provider + return +} + +func (provider *EnvironmentVariableCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + accessKeyId := os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_ID") + + if accessKeyId == "" { + err = fmt.Errorf("unable to get credentials from enviroment variables, Access key ID must be specified via environment variable (ALIBABA_CLOUD_ACCESS_KEY_ID)") + return + } + + accessKeySecret := os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_SECRET") + + if accessKeySecret == "" { + err = fmt.Errorf("unable to get credentials from enviroment variables, Access key secret must be specified via environment variable (ALIBABA_CLOUD_ACCESS_KEY_SECRET)") + return + } + + securityToken := os.Getenv("ALIBABA_CLOUD_SECURITY_TOKEN") + + cc = &Credentials{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + SecurityToken: securityToken, + ProviderName: provider.GetProviderName(), + } + + return +} + +func (provider *EnvironmentVariableCredentialsProvider) GetProviderName() string { + return "env" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/external.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/external.go new file mode 100644 index 0000000000..07e3529352 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/external.go @@ -0,0 +1,253 @@ +package providers + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "strings" + "sync" + "time" +) + +type ExternalOptions struct { + // Timeout, in milliseconds. + Timeout int +} + +// ExternalCredentialUpdateCallback 定义External凭证更新回调函数类型 +type ExternalCredentialUpdateCallback func(accessKeyId, accessKeySecret, securityToken string, expiration int64) error + +type externalCredentialResponse struct { + Mode string `json:"mode"` + AccessKeyId string `json:"access_key_id"` + AccessKeySecret string `json:"access_key_secret"` + SecurityToken string `json:"sts_token"` + Expiration string `json:"expiration,omitempty"` +} + +type ExternalCredentialsProvider struct { + processCommand string + options *ExternalOptions + + lastUpdateTimestamp int64 + expirationTimestamp int64 + sessionCredentials *sessionCredentials + // External credential call back + credentialUpdateCallback ExternalCredentialUpdateCallback + // 互斥锁,用于并发安全 + mu sync.RWMutex +} + +type ExternalCredentialsProviderBuilder struct { + provider *ExternalCredentialsProvider +} + +func NewExternalCredentialsProviderBuilder() *ExternalCredentialsProviderBuilder { + return &ExternalCredentialsProviderBuilder{ + provider: &ExternalCredentialsProvider{}, + } +} + +func (b *ExternalCredentialsProviderBuilder) WithProcessCommand(processCommand string) *ExternalCredentialsProviderBuilder { + b.provider.processCommand = processCommand + return b +} + +func (b *ExternalCredentialsProviderBuilder) WithExternalOptions(options *ExternalOptions) *ExternalCredentialsProviderBuilder { + b.provider.options = options + return b +} + +func (b *ExternalCredentialsProviderBuilder) WithCredentialUpdateCallback(callback ExternalCredentialUpdateCallback) *ExternalCredentialsProviderBuilder { + b.provider.credentialUpdateCallback = callback + return b +} + +func (b *ExternalCredentialsProviderBuilder) Build() (provider *ExternalCredentialsProvider, err error) { + if b.provider.processCommand == "" { + err = errors.New("process_command is empty") + return + } + + provider = b.provider + return +} + +func (provider *ExternalCredentialsProvider) getCredentials() (session *sessionCredentials, err error) { + args := strings.Fields(provider.processCommand) + if len(args) == 0 { + err = errors.New("process_command is empty") + return + } + + // 确保 options 不为 nil,并设置默认超时时间 + timeout := 60 * 1000 // 默认 60 秒 + if provider.options != nil && provider.options.Timeout > 0 { + timeout = provider.options.Timeout + } + + var cancelFunc func() + ctx, cancelFunc := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Millisecond) + defer cancelFunc() + + cmd := exec.CommandContext(ctx, args[0], args[1:]...) + cmd.Env = os.Environ() + + // 创建一个buffer来捕获标准输出 + var stdoutBuf bytes.Buffer + cmd.Stdout = &stdoutBuf + + // 创建一个buffer来捕获标准错误输出 + var stderrBuf bytes.Buffer + cmd.Stderr = &stderrBuf + + // Start the command + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("failed to execute external command: %w\nstderr: %s", err, stderrBuf.String()) + } + + done := make(chan error, 1) + go func() { + done <- cmd.Wait() + }() + + select { + case <-ctx.Done(): + // 超时了,context 会自动终止命令 + <-done + return nil, fmt.Errorf("command process timed out after %d milliseconds", timeout) + case execError := <-done: + if execError != nil { + // 检查是否是超时导致的错误 + if errors.Is(execError, context.DeadlineExceeded) { + return nil, fmt.Errorf("command process timed out after %d milliseconds", timeout) + } + return nil, fmt.Errorf("failed to execute external command: %w\nstderr: %s", execError, stderrBuf.String()) + } + } + + // 只解析标准输出 + buf := stdoutBuf.Bytes() + + // 解析得到凭证响应 + var resp externalCredentialResponse + err = json.Unmarshal(buf, &resp) + if err != nil { + fmt.Println(provider.processCommand) + fmt.Println(string(buf)) + return nil, fmt.Errorf("failed to parse external command output: %w", err) + } + + // 验证返回的凭证数据 + if resp.AccessKeyId == "" || resp.AccessKeySecret == "" { + return nil, fmt.Errorf("invalid credential response: access_key_id or access_key_secret is empty") + } + + // 根据 mode 验证 SecurityToken + if resp.Mode == "StsToken" && resp.SecurityToken == "" { + return nil, fmt.Errorf("invalid StsToken credential response: sts_token is empty") + } + + session = &sessionCredentials{ + AccessKeyId: resp.AccessKeyId, + AccessKeySecret: resp.AccessKeySecret, + SecurityToken: resp.SecurityToken, + Expiration: resp.Expiration, + } + + return +} + +func (provider *ExternalCredentialsProvider) needUpdateCredential() (result bool) { + provider.mu.RLock() + defer provider.mu.RUnlock() + + // 如果没有缓存凭证,需要更新 + if provider.sessionCredentials == nil { + return true + } + + // 如果没有过期时间,每次都更新(因为外部命令可能返回动态凭证) + if provider.expirationTimestamp == 0 { + return true + } + + // 如果凭证即将过期(提前180秒),需要更新 + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *ExternalCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + // 先检查是否需要更新(使用读锁) + provider.mu.RLock() + needUpdate := provider.sessionCredentials == nil || + provider.expirationTimestamp == 0 || + provider.expirationTimestamp-time.Now().Unix() <= 180 + provider.mu.RUnlock() + + if needUpdate { + // 获取新凭证(在锁外执行,避免阻塞其他 goroutine) + sessionCredentials, err1 := provider.getCredentials() + if err1 != nil { + return nil, err1 + } + + // 使用写锁更新共享状态 + provider.mu.Lock() + // 双重检查,避免多个 goroutine 同时更新 + if provider.sessionCredentials == nil || + provider.expirationTimestamp == 0 || + provider.expirationTimestamp-time.Now().Unix() <= 180 { + provider.sessionCredentials = sessionCredentials + + // 如果返回了过期时间,解析并缓存 + if sessionCredentials.Expiration != "" { + expirationTime, err2 := time.Parse("2006-01-02T15:04:05Z", sessionCredentials.Expiration) + if err2 != nil { + // 如果解析失败,不设置过期时间,下次调用时重新获取 + provider.expirationTimestamp = 0 + } else { + provider.lastUpdateTimestamp = time.Now().Unix() + provider.expirationTimestamp = expirationTime.Unix() + } + } else { + // 没有过期时间,下次调用时重新获取 + provider.expirationTimestamp = 0 + } + } + expirationTimestamp := provider.expirationTimestamp + sessionCredentials = provider.sessionCredentials + provider.mu.Unlock() + + // 如果设置了回调函数,则调用回调函数写回配置文件(在锁外执行) + if provider.credentialUpdateCallback != nil { + err1 := provider.credentialUpdateCallback( + sessionCredentials.AccessKeyId, + sessionCredentials.AccessKeySecret, + sessionCredentials.SecurityToken, + expirationTimestamp, + ) + if err1 != nil { + fmt.Printf("Warning: failed to update external credentials in config file: %v\n", err1) + } + } + } + + // 使用读锁读取凭证 + provider.mu.RLock() + cc = &Credentials{ + AccessKeyId: provider.sessionCredentials.AccessKeyId, + AccessKeySecret: provider.sessionCredentials.AccessKeySecret, + SecurityToken: provider.sessionCredentials.SecurityToken, + ProviderName: provider.GetProviderName(), + } + provider.mu.RUnlock() + return +} + +func (provider *ExternalCredentialsProvider) GetProviderName() string { + return "external" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/hook.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/hook.go new file mode 100644 index 0000000000..6839abd3e5 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/hook.go @@ -0,0 +1,7 @@ +package providers + +import ( + httputil "github.com/aliyun/credentials-go/credentials/internal/http" +) + +var httpDo = httputil.Do diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/lock_unix.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/lock_unix.go new file mode 100644 index 0000000000..f72c69eae9 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/lock_unix.go @@ -0,0 +1,17 @@ +// +build !windows + +package providers + +import ( + "syscall" +) + +// lockFile acquires an exclusive lock on the file descriptor +func lockFile(fd int) error { + return syscall.Flock(fd, syscall.LOCK_EX) +} + +// unlockFile releases the lock on the file descriptor +func unlockFile(fd int) error { + return syscall.Flock(fd, syscall.LOCK_UN) +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/lock_windows.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/lock_windows.go new file mode 100644 index 0000000000..77ce5ac085 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/lock_windows.go @@ -0,0 +1,59 @@ +// +build windows + +package providers + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + // LOCKFILE_EXCLUSIVE_LOCK - request exclusive lock + lockfileExclusiveLock = 0x00000002 +) + +// lockFile acquires an exclusive lock on the file using Windows LockFileEx +func lockFile(fd int) error { + // LockFileEx parameters: + // - hFile: file handle + // - dwFlags: LOCKFILE_EXCLUSIVE_LOCK for exclusive lock + // - dwReserved: must be 0 + // - nNumberOfBytesToLockLow: low-order 32 bits of lock range (1 byte is enough) + // - nNumberOfBytesToLockHigh: high-order 32 bits of lock range + // - lpOverlapped: pointer to OVERLAPPED structure + var overlapped syscall.Overlapped + r1, _, err := procLockFileEx.Call( + uintptr(fd), + uintptr(lockfileExclusiveLock), + 0, + 1, + 0, + uintptr(unsafe.Pointer(&overlapped)), + ) + if r1 == 0 { + return err + } + return nil +} + +// unlockFile releases the lock on the file using Windows UnlockFileEx +func unlockFile(fd int) error { + var overlapped syscall.Overlapped + r1, _, err := procUnlockFileEx.Call( + uintptr(fd), + 0, + 1, + 0, + uintptr(unsafe.Pointer(&overlapped)), + ) + if r1 == 0 { + return err + } + return nil +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/oauth.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/oauth.go new file mode 100644 index 0000000000..9b8e8bc32c --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/oauth.go @@ -0,0 +1,290 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" + "github.com/aliyun/credentials-go/credentials/internal/utils" +) + +// OAuthTokenUpdateCallback 定义OAuth令牌更新回调函数类型 +type OAuthTokenUpdateCallback func(refreshToken, accessToken, accessKey, secret, securityToken string, accessTokenExpire, stsExpire int64) error + +type oauthCredentialResponse struct { + AccessKeyId string `json:"accessKeyId"` + AccessKeySecret string `json:"accessKeySecret"` + SecurityToken string `json:"securityToken"` + Expiration string `json:"expiration"` + RequestId string `json:"requestId"` +} + +type oauthRefreshTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int64 `json:"expires_in"` + TokenType string `json:"token_type"` +} + +type OAuthCredentialsProvider struct { + clientId string + signInUrl string + refreshToken string + accessToken string + accessTokenExpire int64 + + lastUpdateTimestamp int64 + expirationTimestamp int64 + sessionCredentials *sessionCredentials + // for http options + httpOptions *HttpOptions + // OAuth token call back + tokenUpdateCallback OAuthTokenUpdateCallback +} + +type OAuthCredentialsProviderBuilder struct { + provider *OAuthCredentialsProvider +} + +func NewOAuthCredentialsProviderBuilder() *OAuthCredentialsProviderBuilder { + return &OAuthCredentialsProviderBuilder{ + provider: &OAuthCredentialsProvider{}, + } +} + +func (b *OAuthCredentialsProviderBuilder) WithClientId(clientId string) *OAuthCredentialsProviderBuilder { + b.provider.clientId = clientId + return b +} + +func (b *OAuthCredentialsProviderBuilder) WithSignInUrl(signInUrl string) *OAuthCredentialsProviderBuilder { + b.provider.signInUrl = signInUrl + return b +} + +func (b *OAuthCredentialsProviderBuilder) WithRefreshToken(refreshToken string) *OAuthCredentialsProviderBuilder { + b.provider.refreshToken = refreshToken + return b +} + +func (b *OAuthCredentialsProviderBuilder) WithAccessToken(accessToken string) *OAuthCredentialsProviderBuilder { + b.provider.accessToken = accessToken + return b +} + +func (b *OAuthCredentialsProviderBuilder) WithAccessTokenExpire(accessTokenExpire int64) *OAuthCredentialsProviderBuilder { + b.provider.accessTokenExpire = accessTokenExpire + return b +} + +func (b *OAuthCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *OAuthCredentialsProviderBuilder { + b.provider.httpOptions = httpOptions + return b +} + +func (b *OAuthCredentialsProviderBuilder) WithTokenUpdateCallback(callback OAuthTokenUpdateCallback) *OAuthCredentialsProviderBuilder { + b.provider.tokenUpdateCallback = callback + return b +} + +func (b *OAuthCredentialsProviderBuilder) Build() (provider *OAuthCredentialsProvider, err error) { + if b.provider.clientId == "" { + err = errors.New("the ClientId is empty") + return + } + + if b.provider.signInUrl == "" { + err = errors.New("the url for sign-in is empty") + return + } + + provider = b.provider + return +} + +func (provider *OAuthCredentialsProvider) getCredentials() (session *sessionCredentials, err error) { + + // 仅在 refreshToken 存在时尝试刷新 accessToken + // 若 refreshToken 不存在,则直接使用当前 accessToken 去交换 accessKeyId,由服务端判断是否有效 + if provider.refreshToken != "" && (provider.accessToken == "" || provider.accessTokenExpire == 0 || provider.accessTokenExpire-time.Now().Unix() <= 1200) { + err = provider.tryRefreshOauthToken() + if err != nil { + return nil, err + } + } + + url, err := url.Parse(provider.signInUrl) + if err != nil { + return nil, err + } + + req := &httputil.Request{ + Method: "POST", + Protocol: url.Scheme, + Host: url.Host, + Path: "/v1/exchange", + Headers: map[string]string{}, + } + + connectTimeout := 5 * time.Second + readTimeout := 10 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + // set headers + req.Headers["Content-Type"] = "application/json" + req.Headers["Authorization"] = fmt.Sprintf("Bearer %s", provider.accessToken) + res, err := httpDo(req) + if err != nil { + return + } + + if res.StatusCode != http.StatusOK { + message := "get session token from OAuth failed: " + err = errors.New(message + string(res.Body)) + return + } + var data oauthCredentialResponse + err = json.Unmarshal(res.Body, &data) + if err != nil { + err = fmt.Errorf("get session token from OAuth failed, json.Unmarshal fail: %s", err.Error()) + return + } + + if data.AccessKeyId == "" || data.AccessKeySecret == "" || data.SecurityToken == "" { + err = fmt.Errorf("refresh session token err, fail to get credentials from OAuth: " + string(res.Body)) + return + } + + session = &sessionCredentials{ + AccessKeyId: data.AccessKeyId, + AccessKeySecret: data.AccessKeySecret, + SecurityToken: data.SecurityToken, + Expiration: data.Expiration, + } + return +} + +func (provider *OAuthCredentialsProvider) tryRefreshOauthToken() (err error) { + refreshToken := provider.refreshToken + clientId := provider.clientId + + url, err := url.Parse(provider.signInUrl) + if err != nil { + return + } + + req := &httputil.Request{ + Method: "POST", + Protocol: url.Scheme, + Host: url.Host, + Path: "/v1/token", + Headers: map[string]string{}, + } + + connectTimeout := 5 * time.Second + readTimeout := 10 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + bodyForm := make(map[string]string) + bodyForm["grant_type"] = "refresh_token" + bodyForm["refresh_token"] = refreshToken + bodyForm["client_id"] = clientId + bodyForm["Timestamp"] = utils.GetTimeInFormatISO8601() + req.Form = bodyForm + + req.Headers["Content-Type"] = "application/x-www-form-urlencoded" + resp, err := httpDo(req) + if err != nil { + return + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("failed to refresh token, status code: %d", resp.StatusCode) + } + var tokenResp oauthRefreshTokenResponse + err = json.Unmarshal(resp.Body, &tokenResp) + if err != nil { + err = fmt.Errorf("get refresh token from OAuth failed, json.Unmarshal fail: %s", err.Error()) + return + } + if tokenResp.RefreshToken == "" || tokenResp.AccessToken == "" { + err = fmt.Errorf("failed to refresh token from OAuth: " + string(resp.Body)) + return + } + provider.accessToken = tokenResp.AccessToken + provider.refreshToken = tokenResp.RefreshToken + provider.accessTokenExpire = time.Now().Unix() + tokenResp.ExpiresIn + + return nil +} + +func (provider *OAuthCredentialsProvider) needUpdateCredential() (result bool) { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *OAuthCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.sessionCredentials == nil || provider.needUpdateCredential() { + sessionCredentials, err1 := provider.getCredentials() + if err1 != nil { + return nil, err1 + } + + provider.sessionCredentials = sessionCredentials + expirationTime, err2 := time.Parse("2006-01-02T15:04:05Z", sessionCredentials.Expiration) + if err2 != nil { + return nil, err2 + } + + provider.lastUpdateTimestamp = time.Now().Unix() + provider.expirationTimestamp = expirationTime.Unix() + + // 如果设置了回调函数,则调用回调函数写回配置文件 + if provider.tokenUpdateCallback != nil { + err1 := provider.tokenUpdateCallback(provider.refreshToken, provider.accessToken, sessionCredentials.AccessKeyId, sessionCredentials.AccessKeySecret, sessionCredentials.SecurityToken, provider.accessTokenExpire, provider.expirationTimestamp) + if err1 != nil { + fmt.Printf("Warning: failed to update OAuth tokens in config file: %v\n", err) + } + } + } + + cc = &Credentials{ + AccessKeyId: provider.sessionCredentials.AccessKeyId, + AccessKeySecret: provider.sessionCredentials.AccessKeySecret, + SecurityToken: provider.sessionCredentials.SecurityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *OAuthCredentialsProvider) GetProviderName() string { + return "oauth" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/oidc.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/oidc.go new file mode 100644 index 0000000000..ae7194c24e --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/oidc.go @@ -0,0 +1,278 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strconv" + "strings" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" + "github.com/aliyun/credentials-go/credentials/internal/utils" +) + +type OIDCCredentialsProvider struct { + oidcProviderARN string + oidcTokenFilePath string + roleArn string + roleSessionName string + durationSeconds int + policy string + // for sts endpoint + stsRegionId string + enableVpc bool + stsEndpoint string + + lastUpdateTimestamp int64 + expirationTimestamp int64 + sessionCredentials *sessionCredentials + // for http options + httpOptions *HttpOptions +} + +type OIDCCredentialsProviderBuilder struct { + provider *OIDCCredentialsProvider +} + +func NewOIDCCredentialsProviderBuilder() *OIDCCredentialsProviderBuilder { + return &OIDCCredentialsProviderBuilder{ + provider: &OIDCCredentialsProvider{}, + } +} + +func (b *OIDCCredentialsProviderBuilder) WithOIDCProviderARN(oidcProviderArn string) *OIDCCredentialsProviderBuilder { + b.provider.oidcProviderARN = oidcProviderArn + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithOIDCTokenFilePath(oidcTokenFilePath string) *OIDCCredentialsProviderBuilder { + b.provider.oidcTokenFilePath = oidcTokenFilePath + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithRoleArn(roleArn string) *OIDCCredentialsProviderBuilder { + b.provider.roleArn = roleArn + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithRoleSessionName(roleSessionName string) *OIDCCredentialsProviderBuilder { + b.provider.roleSessionName = roleSessionName + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithDurationSeconds(durationSeconds int) *OIDCCredentialsProviderBuilder { + b.provider.durationSeconds = durationSeconds + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithStsRegionId(regionId string) *OIDCCredentialsProviderBuilder { + b.provider.stsRegionId = regionId + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithEnableVpc(enableVpc bool) *OIDCCredentialsProviderBuilder { + b.provider.enableVpc = enableVpc + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithPolicy(policy string) *OIDCCredentialsProviderBuilder { + b.provider.policy = policy + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithSTSEndpoint(stsEndpoint string) *OIDCCredentialsProviderBuilder { + b.provider.stsEndpoint = stsEndpoint + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *OIDCCredentialsProviderBuilder { + b.provider.httpOptions = httpOptions + return b +} + +func (b *OIDCCredentialsProviderBuilder) Build() (provider *OIDCCredentialsProvider, err error) { + if b.provider.roleSessionName == "" { + b.provider.roleSessionName = "credentials-go-" + strconv.FormatInt(time.Now().UnixNano()/1000, 10) + } + + if b.provider.oidcTokenFilePath == "" { + b.provider.oidcTokenFilePath = os.Getenv("ALIBABA_CLOUD_OIDC_TOKEN_FILE") + } + + if b.provider.oidcTokenFilePath == "" { + err = errors.New("the OIDCTokenFilePath is empty") + return + } + + if b.provider.oidcProviderARN == "" { + b.provider.oidcProviderARN = os.Getenv("ALIBABA_CLOUD_OIDC_PROVIDER_ARN") + } + + if b.provider.oidcProviderARN == "" { + err = errors.New("the OIDCProviderARN is empty") + return + } + + if b.provider.roleArn == "" { + b.provider.roleArn = os.Getenv("ALIBABA_CLOUD_ROLE_ARN") + } + + if b.provider.roleArn == "" { + err = errors.New("the RoleArn is empty") + return + } + + if b.provider.durationSeconds == 0 { + b.provider.durationSeconds = 3600 + } + + if b.provider.durationSeconds < 900 { + err = errors.New("the Assume Role session duration should be in the range of 15min - max duration seconds") + } + + if b.provider.stsEndpoint == "" { + if !b.provider.enableVpc { + b.provider.enableVpc = strings.ToLower(os.Getenv("ALIBABA_CLOUD_VPC_ENDPOINT_ENABLED")) == "true" + } + prefix := "sts" + if b.provider.enableVpc { + prefix = "sts-vpc" + } + if b.provider.stsRegionId != "" { + b.provider.stsEndpoint = fmt.Sprintf("%s.%s.aliyuncs.com", prefix, b.provider.stsRegionId) + } else if region := os.Getenv("ALIBABA_CLOUD_STS_REGION"); region != "" { + b.provider.stsEndpoint = fmt.Sprintf("%s.%s.aliyuncs.com", prefix, region) + } else { + b.provider.stsEndpoint = "sts.aliyuncs.com" + } + } + + provider = b.provider + return +} + +func (provider *OIDCCredentialsProvider) getCredentials() (session *sessionCredentials, err error) { + req := &httputil.Request{ + Method: "POST", + Protocol: "https", + Host: provider.stsEndpoint, + Headers: map[string]string{}, + } + + connectTimeout := 5 * time.Second + readTimeout := 10 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + queries := make(map[string]string) + queries["Version"] = "2015-04-01" + queries["Action"] = "AssumeRoleWithOIDC" + queries["Format"] = "JSON" + queries["Timestamp"] = utils.GetTimeInFormatISO8601() + req.Queries = queries + + bodyForm := make(map[string]string) + bodyForm["RoleArn"] = provider.roleArn + bodyForm["OIDCProviderArn"] = provider.oidcProviderARN + token, err := ioutil.ReadFile(provider.oidcTokenFilePath) + if err != nil { + return + } + + bodyForm["OIDCToken"] = string(token) + if provider.policy != "" { + bodyForm["Policy"] = provider.policy + } + + bodyForm["RoleSessionName"] = provider.roleSessionName + bodyForm["DurationSeconds"] = strconv.Itoa(provider.durationSeconds) + req.Form = bodyForm + + // set headers + req.Headers["Accept-Encoding"] = "identity" + res, err := httpDo(req) + if err != nil { + return + } + + if res.StatusCode != http.StatusOK { + message := "get session token failed: " + err = errors.New(message + string(res.Body)) + return + } + var data assumeRoleResponse + err = json.Unmarshal(res.Body, &data) + if err != nil { + err = fmt.Errorf("get oidc sts token err, json.Unmarshal fail: %s", err.Error()) + return + } + if data.Credentials == nil { + err = fmt.Errorf("get oidc sts token err, fail to get credentials") + return + } + + if data.Credentials.AccessKeyId == nil || data.Credentials.AccessKeySecret == nil || data.Credentials.SecurityToken == nil { + err = fmt.Errorf("refresh RoleArn sts token err, fail to get credentials") + return + } + + session = &sessionCredentials{ + AccessKeyId: *data.Credentials.AccessKeyId, + AccessKeySecret: *data.Credentials.AccessKeySecret, + SecurityToken: *data.Credentials.SecurityToken, + Expiration: *data.Credentials.Expiration, + } + return +} + +func (provider *OIDCCredentialsProvider) needUpdateCredential() (result bool) { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *OIDCCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.sessionCredentials == nil || provider.needUpdateCredential() { + sessionCredentials, err1 := provider.getCredentials() + if err1 != nil { + return nil, err1 + } + + provider.sessionCredentials = sessionCredentials + expirationTime, err2 := time.Parse("2006-01-02T15:04:05Z", sessionCredentials.Expiration) + if err2 != nil { + return nil, err2 + } + + provider.lastUpdateTimestamp = time.Now().Unix() + provider.expirationTimestamp = expirationTime.Unix() + } + + cc = &Credentials{ + AccessKeyId: provider.sessionCredentials.AccessKeyId, + AccessKeySecret: provider.sessionCredentials.AccessKeySecret, + SecurityToken: provider.sessionCredentials.SecurityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *OIDCCredentialsProvider) GetProviderName() string { + return "oidc_role_arn" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/profile.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/profile.go new file mode 100644 index 0000000000..c26548e3eb --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/profile.go @@ -0,0 +1,169 @@ +package providers + +import ( + "errors" + "fmt" + "os" + "path" + + "github.com/aliyun/credentials-go/credentials/internal/utils" + "gopkg.in/ini.v1" +) + +type ProfileCredentialsProvider struct { + profileName string + innerProvider CredentialsProvider +} + +type ProfileCredentialsProviderBuilder struct { + provider *ProfileCredentialsProvider +} + +func NewProfileCredentialsProviderBuilder() (builder *ProfileCredentialsProviderBuilder) { + return &ProfileCredentialsProviderBuilder{ + provider: &ProfileCredentialsProvider{}, + } +} + +func (b *ProfileCredentialsProviderBuilder) WithProfileName(profileName string) *ProfileCredentialsProviderBuilder { + b.provider.profileName = profileName + return b +} + +func (b *ProfileCredentialsProviderBuilder) Build() (provider *ProfileCredentialsProvider, err error) { + // 优先级: + // 1. 使用显示指定的 profileName + // 2. 使用环境变量(ALIBABA_CLOUD_PROFILE)指定的 profileName + // 3. 兜底使用 default 作为 profileName + b.provider.profileName = utils.GetDefaultString(b.provider.profileName, os.Getenv("ALIBABA_CLOUD_PROFILE"), "default") + + provider = b.provider + return +} + +func (provider *ProfileCredentialsProvider) getCredentialsProvider(ini *ini.File) (credentialsProvider CredentialsProvider, err error) { + section, err := ini.GetSection(provider.profileName) + if err != nil { + err = errors.New("ERROR: Can not load section" + err.Error()) + return + } + + value, err := section.GetKey("type") + if err != nil { + err = errors.New("ERROR: Can not find credential type" + err.Error()) + return + } + + switch value.String() { + case "access_key": + value1, err1 := section.GetKey("access_key_id") + value2, err2 := section.GetKey("access_key_secret") + if err1 != nil || err2 != nil { + err = errors.New("ERROR: Failed to get value") + return + } + + if value1.String() == "" || value2.String() == "" { + err = errors.New("ERROR: Value can't be empty") + return + } + + credentialsProvider, err = NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(value1.String()). + WithAccessKeySecret(value2.String()). + Build() + case "ecs_ram_role": + value1, err1 := section.GetKey("role_name") + if err1 != nil { + err = errors.New("ERROR: Failed to get value") + return + } + credentialsProvider, err = NewECSRAMRoleCredentialsProviderBuilder().WithRoleName(value1.String()).Build() + case "ram_role_arn": + value1, err1 := section.GetKey("access_key_id") + value2, err2 := section.GetKey("access_key_secret") + value3, err3 := section.GetKey("role_arn") + value4, err4 := section.GetKey("role_session_name") + if err1 != nil || err2 != nil || err3 != nil || err4 != nil { + err = errors.New("ERROR: Failed to get value") + return + } + if value1.String() == "" || value2.String() == "" || value3.String() == "" || value4.String() == "" { + err = errors.New("ERROR: Value can't be empty") + return + } + previous, err5 := NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(value1.String()). + WithAccessKeySecret(value2.String()). + Build() + if err5 != nil { + err = errors.New("get previous credentials provider failed") + return + } + rawPolicy, _ := section.GetKey("policy") + policy := "" + if rawPolicy != nil { + policy = rawPolicy.String() + } + + credentialsProvider, err = NewRAMRoleARNCredentialsProviderBuilder(). + WithCredentialsProvider(previous). + WithRoleArn(value3.String()). + WithRoleSessionName(value4.String()). + WithPolicy(policy). + WithDurationSeconds(3600). + Build() + default: + err = errors.New("ERROR: Failed to get credential") + } + return +} + +func (provider *ProfileCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.innerProvider == nil { + sharedCfgPath := os.Getenv("ALIBABA_CLOUD_CREDENTIALS_FILE") + if sharedCfgPath == "" { + homeDir := getHomePath() + if homeDir == "" { + err = fmt.Errorf("cannot found home dir") + return + } + + sharedCfgPath = path.Join(homeDir, ".alibabacloud/credentials") + } + + ini, err1 := ini.Load(sharedCfgPath) + if err1 != nil { + err = errors.New("ERROR: Can not open file" + err1.Error()) + return + } + + provider.innerProvider, err = provider.getCredentialsProvider(ini) + if err != nil { + return + } + } + + innerCC, err := provider.innerProvider.GetCredentials() + if err != nil { + return + } + + providerName := innerCC.ProviderName + if providerName == "" { + providerName = provider.innerProvider.GetProviderName() + } + + cc = &Credentials{ + AccessKeyId: innerCC.AccessKeyId, + AccessKeySecret: innerCC.AccessKeySecret, + SecurityToken: innerCC.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), providerName), + } + + return +} + +func (provider *ProfileCredentialsProvider) GetProviderName() string { + return "profile" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/ram_role_arn.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/ram_role_arn.go new file mode 100644 index 0000000000..969e271ecb --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/ram_role_arn.go @@ -0,0 +1,375 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" + "github.com/aliyun/credentials-go/credentials/internal/utils" +) + +type assumedRoleUser struct { +} + +type credentials struct { + SecurityToken *string `json:"SecurityToken"` + Expiration *string `json:"Expiration"` + AccessKeySecret *string `json:"AccessKeySecret"` + AccessKeyId *string `json:"AccessKeyId"` +} + +type assumeRoleResponse struct { + RequestID *string `json:"RequestId"` + AssumedRoleUser *assumedRoleUser `json:"AssumedRoleUser"` + Credentials *credentials `json:"Credentials"` +} + +type sessionCredentials struct { + AccessKeyId string + AccessKeySecret string + SecurityToken string + Expiration string +} + +type HttpOptions struct { + Proxy string + // Connection timeout, in milliseconds. + ConnectTimeout int + // Read timeout, in milliseconds. + ReadTimeout int +} + +type RAMRoleARNCredentialsProvider struct { + // for previous credentials + accessKeyId string + accessKeySecret string + securityToken string + credentialsProvider CredentialsProvider + + roleArn string + roleSessionName string + durationSeconds int + policy string + externalId string + // for sts endpoint + stsRegionId string + enableVpc bool + stsEndpoint string + // for http options + httpOptions *HttpOptions + // inner + expirationTimestamp int64 + lastUpdateTimestamp int64 + previousProviderName string + sessionCredentials *sessionCredentials +} + +type RAMRoleARNCredentialsProviderBuilder struct { + provider *RAMRoleARNCredentialsProvider +} + +func NewRAMRoleARNCredentialsProviderBuilder() *RAMRoleARNCredentialsProviderBuilder { + return &RAMRoleARNCredentialsProviderBuilder{ + provider: &RAMRoleARNCredentialsProvider{}, + } +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithAccessKeyId(accessKeyId string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.accessKeyId = accessKeyId + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithAccessKeySecret(accessKeySecret string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.accessKeySecret = accessKeySecret + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithSecurityToken(securityToken string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.securityToken = securityToken + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithCredentialsProvider(credentialsProvider CredentialsProvider) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.credentialsProvider = credentialsProvider + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithRoleArn(roleArn string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.roleArn = roleArn + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithStsRegionId(regionId string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.stsRegionId = regionId + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithEnableVpc(enableVpc bool) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.enableVpc = enableVpc + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithStsEndpoint(endpoint string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.stsEndpoint = endpoint + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithRoleSessionName(roleSessionName string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.roleSessionName = roleSessionName + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithPolicy(policy string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.policy = policy + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithExternalId(externalId string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.externalId = externalId + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithDurationSeconds(durationSeconds int) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.durationSeconds = durationSeconds + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.httpOptions = httpOptions + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) Build() (provider *RAMRoleARNCredentialsProvider, err error) { + if builder.provider.credentialsProvider == nil { + if builder.provider.accessKeyId != "" && builder.provider.accessKeySecret != "" && builder.provider.securityToken != "" { + builder.provider.credentialsProvider, err = NewStaticSTSCredentialsProviderBuilder(). + WithAccessKeyId(builder.provider.accessKeyId). + WithAccessKeySecret(builder.provider.accessKeySecret). + WithSecurityToken(builder.provider.securityToken). + Build() + if err != nil { + return + } + } else if builder.provider.accessKeyId != "" && builder.provider.accessKeySecret != "" { + builder.provider.credentialsProvider, err = NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(builder.provider.accessKeyId). + WithAccessKeySecret(builder.provider.accessKeySecret). + Build() + if err != nil { + return + } + } else { + err = errors.New("must specify a previous credentials provider to assume role") + return + } + } + + if builder.provider.roleArn == "" { + if roleArn := os.Getenv("ALIBABA_CLOUD_ROLE_ARN"); roleArn != "" { + builder.provider.roleArn = roleArn + } else { + err = errors.New("the RoleArn is empty") + return + } + } + + if builder.provider.roleSessionName == "" { + if roleSessionName := os.Getenv("ALIBABA_CLOUD_ROLE_SESSION_NAME"); roleSessionName != "" { + builder.provider.roleSessionName = roleSessionName + } else { + builder.provider.roleSessionName = "credentials-go-" + strconv.FormatInt(time.Now().UnixNano()/1000, 10) + } + } + + // duration seconds + if builder.provider.durationSeconds == 0 { + // default to 3600 + builder.provider.durationSeconds = 3600 + } + + if builder.provider.durationSeconds < 900 { + err = errors.New("session duration should be in the range of 900s - max session duration") + return + } + + // sts endpoint + if builder.provider.stsEndpoint == "" { + if !builder.provider.enableVpc { + builder.provider.enableVpc = strings.ToLower(os.Getenv("ALIBABA_CLOUD_VPC_ENDPOINT_ENABLED")) == "true" + } + prefix := "sts" + if builder.provider.enableVpc { + prefix = "sts-vpc" + } + if builder.provider.stsRegionId != "" { + builder.provider.stsEndpoint = fmt.Sprintf("%s.%s.aliyuncs.com", prefix, builder.provider.stsRegionId) + } else if region := os.Getenv("ALIBABA_CLOUD_STS_REGION"); region != "" { + builder.provider.stsEndpoint = fmt.Sprintf("%s.%s.aliyuncs.com", prefix, region) + } else { + builder.provider.stsEndpoint = "sts.aliyuncs.com" + } + } + + provider = builder.provider + return +} + +func (provider *RAMRoleARNCredentialsProvider) getCredentials(cc *Credentials) (session *sessionCredentials, err error) { + method := "POST" + req := &httputil.Request{ + Method: method, + Protocol: "https", + Host: provider.stsEndpoint, + Headers: map[string]string{}, + } + + queries := make(map[string]string) + queries["Version"] = "2015-04-01" + queries["Action"] = "AssumeRole" + queries["Format"] = "JSON" + queries["Timestamp"] = utils.GetTimeInFormatISO8601() + queries["SignatureMethod"] = "HMAC-SHA1" + queries["SignatureVersion"] = "1.0" + queries["SignatureNonce"] = utils.GetNonce() + queries["AccessKeyId"] = cc.AccessKeyId + + if cc.SecurityToken != "" { + queries["SecurityToken"] = cc.SecurityToken + } + + bodyForm := make(map[string]string) + bodyForm["RoleArn"] = provider.roleArn + if provider.policy != "" { + bodyForm["Policy"] = provider.policy + } + if provider.externalId != "" { + bodyForm["ExternalId"] = provider.externalId + } + bodyForm["RoleSessionName"] = provider.roleSessionName + bodyForm["DurationSeconds"] = strconv.Itoa(provider.durationSeconds) + req.Form = bodyForm + + // caculate signature + signParams := make(map[string]string) + for key, value := range queries { + signParams[key] = value + } + for key, value := range bodyForm { + signParams[key] = value + } + + stringToSign := utils.GetURLFormedMap(signParams) + stringToSign = strings.Replace(stringToSign, "+", "%20", -1) + stringToSign = strings.Replace(stringToSign, "*", "%2A", -1) + stringToSign = strings.Replace(stringToSign, "%7E", "~", -1) + stringToSign = url.QueryEscape(stringToSign) + stringToSign = method + "&%2F&" + stringToSign + secret := cc.AccessKeySecret + "&" + queries["Signature"] = utils.ShaHmac1(stringToSign, secret) + + req.Queries = queries + + // set headers + req.Headers["Accept-Encoding"] = "identity" + req.Headers["Content-Type"] = "application/x-www-form-urlencoded" + req.Headers["x-acs-credentials-provider"] = cc.ProviderName + + connectTimeout := 5 * time.Second + readTimeout := 10 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + res, err := httpDo(req) + if err != nil { + return + } + + if res.StatusCode != http.StatusOK { + err = errors.New("refresh session token failed: " + string(res.Body)) + return + } + var data assumeRoleResponse + err = json.Unmarshal(res.Body, &data) + if err != nil { + err = fmt.Errorf("refresh RoleArn sts token err, json.Unmarshal fail: %s", err.Error()) + return + } + if data.Credentials == nil { + err = fmt.Errorf("refresh RoleArn sts token err, fail to get credentials") + return + } + + if data.Credentials.AccessKeyId == nil || data.Credentials.AccessKeySecret == nil || data.Credentials.SecurityToken == nil { + err = fmt.Errorf("refresh RoleArn sts token err, fail to get credentials") + return + } + + session = &sessionCredentials{ + AccessKeyId: *data.Credentials.AccessKeyId, + AccessKeySecret: *data.Credentials.AccessKeySecret, + SecurityToken: *data.Credentials.SecurityToken, + Expiration: *data.Credentials.Expiration, + } + return +} + +func (provider *RAMRoleARNCredentialsProvider) needUpdateCredential() (result bool) { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *RAMRoleARNCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.sessionCredentials == nil || provider.needUpdateCredential() { + // 获取前置凭证 + previousCredentials, err1 := provider.credentialsProvider.GetCredentials() + if err1 != nil { + return nil, err1 + } + sessionCredentials, err2 := provider.getCredentials(previousCredentials) + if err2 != nil { + return nil, err2 + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", sessionCredentials.Expiration) + if err != nil { + return nil, err + } + + provider.expirationTimestamp = expirationTime.Unix() + provider.lastUpdateTimestamp = time.Now().Unix() + provider.previousProviderName = previousCredentials.ProviderName + provider.sessionCredentials = sessionCredentials + } + + cc = &Credentials{ + AccessKeyId: provider.sessionCredentials.AccessKeyId, + AccessKeySecret: provider.sessionCredentials.AccessKeySecret, + SecurityToken: provider.sessionCredentials.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), provider.previousProviderName), + } + return +} + +func (provider *RAMRoleARNCredentialsProvider) GetProviderName() string { + return "ram_role_arn" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/static_ak.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/static_ak.go new file mode 100644 index 0000000000..bd3660cccc --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/static_ak.go @@ -0,0 +1,67 @@ +package providers + +import ( + "errors" + "os" +) + +type StaticAKCredentialsProvider struct { + accessKeyId string + accessKeySecret string +} + +type StaticAKCredentialsProviderBuilder struct { + provider *StaticAKCredentialsProvider +} + +func NewStaticAKCredentialsProviderBuilder() *StaticAKCredentialsProviderBuilder { + return &StaticAKCredentialsProviderBuilder{ + provider: &StaticAKCredentialsProvider{}, + } +} + +func (builder *StaticAKCredentialsProviderBuilder) WithAccessKeyId(accessKeyId string) *StaticAKCredentialsProviderBuilder { + builder.provider.accessKeyId = accessKeyId + return builder +} + +func (builder *StaticAKCredentialsProviderBuilder) WithAccessKeySecret(accessKeySecret string) *StaticAKCredentialsProviderBuilder { + builder.provider.accessKeySecret = accessKeySecret + return builder +} + +func (builder *StaticAKCredentialsProviderBuilder) Build() (provider *StaticAKCredentialsProvider, err error) { + if builder.provider.accessKeyId == "" { + builder.provider.accessKeyId = os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_ID") + } + + if builder.provider.accessKeyId == "" { + err = errors.New("the access key id is empty") + return + } + + if builder.provider.accessKeySecret == "" { + builder.provider.accessKeySecret = os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_SECRET") + } + + if builder.provider.accessKeySecret == "" { + err = errors.New("the access key secret is empty") + return + } + + provider = builder.provider + return +} + +func (provider *StaticAKCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + cc = &Credentials{ + AccessKeyId: provider.accessKeyId, + AccessKeySecret: provider.accessKeySecret, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *StaticAKCredentialsProvider) GetProviderName() string { + return "static_ak" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/static_sts.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/static_sts.go new file mode 100644 index 0000000000..ad5715187e --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/static_sts.go @@ -0,0 +1,83 @@ +package providers + +import ( + "errors" + "os" +) + +type StaticSTSCredentialsProvider struct { + accessKeyId string + accessKeySecret string + securityToken string +} + +type StaticSTSCredentialsProviderBuilder struct { + provider *StaticSTSCredentialsProvider +} + +func NewStaticSTSCredentialsProviderBuilder() *StaticSTSCredentialsProviderBuilder { + return &StaticSTSCredentialsProviderBuilder{ + provider: &StaticSTSCredentialsProvider{}, + } +} + +func (builder *StaticSTSCredentialsProviderBuilder) WithAccessKeyId(accessKeyId string) *StaticSTSCredentialsProviderBuilder { + builder.provider.accessKeyId = accessKeyId + return builder +} + +func (builder *StaticSTSCredentialsProviderBuilder) WithAccessKeySecret(accessKeySecret string) *StaticSTSCredentialsProviderBuilder { + builder.provider.accessKeySecret = accessKeySecret + return builder +} + +func (builder *StaticSTSCredentialsProviderBuilder) WithSecurityToken(securityToken string) *StaticSTSCredentialsProviderBuilder { + builder.provider.securityToken = securityToken + return builder +} + +func (builder *StaticSTSCredentialsProviderBuilder) Build() (provider *StaticSTSCredentialsProvider, err error) { + if builder.provider.accessKeyId == "" { + builder.provider.accessKeyId = os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_ID") + } + + if builder.provider.accessKeyId == "" { + err = errors.New("the access key id is empty") + return + } + + if builder.provider.accessKeySecret == "" { + builder.provider.accessKeySecret = os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_SECRET") + } + + if builder.provider.accessKeySecret == "" { + err = errors.New("the access key secret is empty") + return + } + + if builder.provider.securityToken == "" { + builder.provider.securityToken = os.Getenv("ALIBABA_CLOUD_SECURITY_TOKEN") + } + + if builder.provider.securityToken == "" { + err = errors.New("the security token is empty") + return + } + + provider = builder.provider + return +} + +func (provider *StaticSTSCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + cc = &Credentials{ + AccessKeyId: provider.accessKeyId, + AccessKeySecret: provider.accessKeySecret, + SecurityToken: provider.securityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *StaticSTSCredentialsProvider) GetProviderName() string { + return "static_sts" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/uri.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/uri.go new file mode 100644 index 0000000000..ccd877d16c --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/uri.go @@ -0,0 +1,152 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" +) + +type URLCredentialsProvider struct { + url string + // for sts + sessionCredentials *sessionCredentials + // for http options + httpOptions *HttpOptions + // inner + expirationTimestamp int64 +} + +type URLCredentialsProviderBuilder struct { + provider *URLCredentialsProvider +} + +func NewURLCredentialsProviderBuilder() *URLCredentialsProviderBuilder { + return &URLCredentialsProviderBuilder{ + provider: &URLCredentialsProvider{}, + } +} + +func (builder *URLCredentialsProviderBuilder) WithUrl(url string) *URLCredentialsProviderBuilder { + builder.provider.url = url + return builder +} + +func (builder *URLCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *URLCredentialsProviderBuilder { + builder.provider.httpOptions = httpOptions + return builder +} + +func (builder *URLCredentialsProviderBuilder) Build() (provider *URLCredentialsProvider, err error) { + + if builder.provider.url == "" { + builder.provider.url = os.Getenv("ALIBABA_CLOUD_CREDENTIALS_URI") + } + + if builder.provider.url == "" { + err = errors.New("the url is empty") + return + } + + provider = builder.provider + return +} + +type urlResponse struct { + AccessKeyId *string `json:"AccessKeyId"` + AccessKeySecret *string `json:"AccessKeySecret"` + SecurityToken *string `json:"SecurityToken"` + Expiration *string `json:"Expiration"` +} + +func (provider *URLCredentialsProvider) getCredentials() (session *sessionCredentials, err error) { + req := &httputil.Request{ + Method: "GET", + URL: provider.url, + } + + connectTimeout := 5 * time.Second + readTimeout := 10 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + res, err := httpDo(req) + if err != nil { + return + } + + if res.StatusCode != http.StatusOK { + err = fmt.Errorf("get credentials from %s failed: %s", req.BuildRequestURL(), string(res.Body)) + return + } + + var resp urlResponse + err = json.Unmarshal(res.Body, &resp) + if err != nil { + err = fmt.Errorf("get credentials from %s failed with error, json unmarshal fail: %s", req.BuildRequestURL(), err.Error()) + return + } + + if resp.AccessKeyId == nil || resp.AccessKeySecret == nil || resp.SecurityToken == nil || resp.Expiration == nil { + err = fmt.Errorf("refresh credentials from %s failed: %s", req.BuildRequestURL(), string(res.Body)) + return + } + + session = &sessionCredentials{ + AccessKeyId: *resp.AccessKeyId, + AccessKeySecret: *resp.AccessKeySecret, + SecurityToken: *resp.SecurityToken, + Expiration: *resp.Expiration, + } + return +} + +func (provider *URLCredentialsProvider) needUpdateCredential() (result bool) { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *URLCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.sessionCredentials == nil || provider.needUpdateCredential() { + sessionCredentials, err1 := provider.getCredentials() + if err1 != nil { + return nil, err1 + } + + provider.sessionCredentials = sessionCredentials + expirationTime, err2 := time.Parse("2006-01-02T15:04:05Z", sessionCredentials.Expiration) + if err2 != nil { + return nil, err2 + } + provider.expirationTimestamp = expirationTime.Unix() + } + + cc = &Credentials{ + AccessKeyId: provider.sessionCredentials.AccessKeyId, + AccessKeySecret: provider.sessionCredentials.AccessKeySecret, + SecurityToken: provider.sessionCredentials.SecurityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *URLCredentialsProvider) GetProviderName() string { + return "credential_uri" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/sts_role_arn_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/ram_role_arn_credentials_provider.go similarity index 70% rename from vendor/github.com/aliyun/credentials-go/credentials/sts_role_arn_credential.go rename to vendor/github.com/aliyun/credentials-go/credentials/ram_role_arn_credentials_provider.go index 3ddf32fa21..71ae004013 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/sts_role_arn_credential.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/ram_role_arn_credentials_provider.go @@ -8,17 +8,18 @@ import ( "time" "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/internal/utils" "github.com/aliyun/credentials-go/credentials/request" - "github.com/aliyun/credentials-go/credentials/utils" ) const defaultDurationSeconds = 3600 -// RAMRoleArnCredential is a kind of credentials -type RAMRoleArnCredential struct { +// RAMRoleArnCredentialsProvider is a kind of credentials +type RAMRoleArnCredentialsProvider struct { *credentialUpdater AccessKeyId string AccessKeySecret string + SecurityToken string RoleArn string RoleSessionName string RoleSessionExpiration int @@ -39,8 +40,23 @@ type credentialsInResponse struct { Expiration string `json:"Expiration" xml:"Expiration"` } -func newRAMRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string, roleSessionExpiration int, runtime *utils.Runtime) *RAMRoleArnCredential { - return &RAMRoleArnCredential{ +func newRAMRoleArnl(accessKeyId, accessKeySecret, securityToken, roleArn, roleSessionName, policy string, roleSessionExpiration int, externalId string, runtime *utils.Runtime) *RAMRoleArnCredentialsProvider { + return &RAMRoleArnCredentialsProvider{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + SecurityToken: securityToken, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + RoleSessionExpiration: roleSessionExpiration, + Policy: policy, + ExternalId: externalId, + credentialUpdater: new(credentialUpdater), + runtime: runtime, + } +} + +func newRAMRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string, roleSessionExpiration int, runtime *utils.Runtime) *RAMRoleArnCredentialsProvider { + return &RAMRoleArnCredentialsProvider{ AccessKeyId: accessKeyId, AccessKeySecret: accessKeySecret, RoleArn: roleArn, @@ -52,8 +68,8 @@ func newRAMRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionN } } -func newRAMRoleArnWithExternalIdCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string, roleSessionExpiration int, externalId string, runtime *utils.Runtime) *RAMRoleArnCredential { - return &RAMRoleArnCredential{ +func newRAMRoleArnWithExternalIdCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string, roleSessionExpiration int, externalId string, runtime *utils.Runtime) *RAMRoleArnCredentialsProvider { + return &RAMRoleArnCredentialsProvider{ AccessKeyId: accessKeyId, AccessKeySecret: accessKeySecret, RoleArn: roleArn, @@ -66,7 +82,7 @@ func newRAMRoleArnWithExternalIdCredential(accessKeyId, accessKeySecret, roleArn } } -func (e *RAMRoleArnCredential) GetCredential() (*CredentialModel, error) { +func (e *RAMRoleArnCredentialsProvider) GetCredential() (*CredentialModel, error) { if e.sessionCredential == nil || e.needUpdateCredential() { err := e.updateCredential() if err != nil { @@ -82,53 +98,53 @@ func (e *RAMRoleArnCredential) GetCredential() (*CredentialModel, error) { return credential, nil } -// GetAccessKeyId reutrns RamRoleArnCredential's AccessKeyId +// GetAccessKeyId reutrns RAMRoleArnCredentialsProvider's AccessKeyId // if AccessKeyId is not exist or out of date, the function will update it. -func (r *RAMRoleArnCredential) GetAccessKeyId() (*string, error) { - if r.sessionCredential == nil || r.needUpdateCredential() { - err := r.updateCredential() - if err != nil { - return tea.String(""), err - } +func (r *RAMRoleArnCredentialsProvider) GetAccessKeyId() (accessKeyId *string, err error) { + c, err := r.GetCredential() + if err != nil { + return } - return tea.String(r.sessionCredential.AccessKeyId), nil + + accessKeyId = c.AccessKeyId + return } -// GetAccessSecret reutrns RamRoleArnCredential's AccessKeySecret +// GetAccessSecret reutrns RAMRoleArnCredentialsProvider's AccessKeySecret // if AccessKeySecret is not exist or out of date, the function will update it. -func (r *RAMRoleArnCredential) GetAccessKeySecret() (*string, error) { - if r.sessionCredential == nil || r.needUpdateCredential() { - err := r.updateCredential() - if err != nil { - return tea.String(""), err - } +func (r *RAMRoleArnCredentialsProvider) GetAccessKeySecret() (accessKeySecret *string, err error) { + c, err := r.GetCredential() + if err != nil { + return } - return tea.String(r.sessionCredential.AccessKeySecret), nil + + accessKeySecret = c.AccessKeySecret + return } -// GetSecurityToken reutrns RamRoleArnCredential's SecurityToken +// GetSecurityToken reutrns RAMRoleArnCredentialsProvider's SecurityToken // if SecurityToken is not exist or out of date, the function will update it. -func (r *RAMRoleArnCredential) GetSecurityToken() (*string, error) { - if r.sessionCredential == nil || r.needUpdateCredential() { - err := r.updateCredential() - if err != nil { - return tea.String(""), err - } +func (r *RAMRoleArnCredentialsProvider) GetSecurityToken() (securityToken *string, err error) { + c, err := r.GetCredential() + if err != nil { + return } - return tea.String(r.sessionCredential.SecurityToken), nil + + securityToken = c.SecurityToken + return } -// GetBearerToken is useless RamRoleArnCredential -func (r *RAMRoleArnCredential) GetBearerToken() *string { +// GetBearerToken is useless RAMRoleArnCredentialsProvider +func (r *RAMRoleArnCredentialsProvider) GetBearerToken() *string { return tea.String("") } -// GetType reutrns RamRoleArnCredential's type -func (r *RAMRoleArnCredential) GetType() *string { +// GetType reutrns RAMRoleArnCredentialsProvider's type +func (r *RAMRoleArnCredentialsProvider) GetType() *string { return tea.String("ram_role_arn") } -func (r *RAMRoleArnCredential) updateCredential() (err error) { +func (r *RAMRoleArnCredentialsProvider) updateCredential() (err error) { if r.runtime == nil { r.runtime = new(utils.Runtime) } @@ -140,6 +156,9 @@ func (r *RAMRoleArnCredential) updateCredential() (err error) { request.Scheme = "HTTPS" request.Method = "GET" request.QueryParams["AccessKeyId"] = r.AccessKeyId + if r.SecurityToken != "" { + request.QueryParams["SecurityToken"] = r.SecurityToken + } request.QueryParams["Action"] = "AssumeRole" request.QueryParams["Format"] = "JSON" if r.RoleSessionExpiration > 0 { diff --git a/vendor/github.com/aliyun/credentials-go/credentials/request/doc.go b/vendor/github.com/aliyun/credentials-go/credentials/request/doc.go new file mode 100644 index 0000000000..7ec0952bda --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/request/doc.go @@ -0,0 +1,3 @@ +// Package request is used for internal. +// You should not depend on it directly, breaking changes can and will be introducted to it. +package request diff --git a/vendor/github.com/aliyun/credentials-go/credentials/response/doc.go b/vendor/github.com/aliyun/credentials-go/credentials/response/doc.go new file mode 100644 index 0000000000..2279570ea2 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/response/doc.go @@ -0,0 +1,3 @@ +// Package request is used for internal. +// You should not depend on it directly, breaking changes can and will be introducted to it. +package response diff --git a/vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credentials_provider.go similarity index 82% rename from vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credential.go rename to vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credentials_provider.go index 82988eca07..1d86f296c4 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credential.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credentials_provider.go @@ -8,12 +8,13 @@ import ( "time" "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/internal/utils" "github.com/aliyun/credentials-go/credentials/request" - "github.com/aliyun/credentials-go/credentials/utils" ) -// RsaKeyPairCredential is a kind of credentials -type RsaKeyPairCredential struct { +// Deprecated: no more recommend to use it +// RsaKeyPairCredentialsProvider is a kind of credentials provider +type RsaKeyPairCredentialsProvider struct { *credentialUpdater PrivateKey string PublicKeyId string @@ -32,8 +33,8 @@ type sessionAccessKey struct { Expiration string `json:"Expiration" xml:"Expiration"` } -func newRsaKeyPairCredential(privateKey, publicKeyId string, sessionExpiration int, runtime *utils.Runtime) *RsaKeyPairCredential { - return &RsaKeyPairCredential{ +func newRsaKeyPairCredential(privateKey, publicKeyId string, sessionExpiration int, runtime *utils.Runtime) *RsaKeyPairCredentialsProvider { + return &RsaKeyPairCredentialsProvider{ PrivateKey: privateKey, PublicKeyId: publicKeyId, SessionExpiration: sessionExpiration, @@ -42,7 +43,7 @@ func newRsaKeyPairCredential(privateKey, publicKeyId string, sessionExpiration i } } -func (e *RsaKeyPairCredential) GetCredential() (*CredentialModel, error) { +func (e *RsaKeyPairCredentialsProvider) GetCredential() (*CredentialModel, error) { if e.sessionCredential == nil || e.needUpdateCredential() { err := e.updateCredential() if err != nil { @@ -60,44 +61,42 @@ func (e *RsaKeyPairCredential) GetCredential() (*CredentialModel, error) { // GetAccessKeyId reutrns RsaKeyPairCredential's AccessKeyId // if AccessKeyId is not exist or out of date, the function will update it. -func (r *RsaKeyPairCredential) GetAccessKeyId() (*string, error) { - if r.sessionCredential == nil || r.needUpdateCredential() { - err := r.updateCredential() - if err != nil { - return tea.String(""), err - } +func (r *RsaKeyPairCredentialsProvider) GetAccessKeyId() (accessKeyId *string, err error) { + c, err := r.GetCredential() + if err != nil { + return } - return tea.String(r.sessionCredential.AccessKeyId), nil + accessKeyId = c.AccessKeyId + return } // GetAccessSecret reutrns RsaKeyPairCredential's AccessKeySecret // if AccessKeySecret is not exist or out of date, the function will update it. -func (r *RsaKeyPairCredential) GetAccessKeySecret() (*string, error) { - if r.sessionCredential == nil || r.needUpdateCredential() { - err := r.updateCredential() - if err != nil { - return tea.String(""), err - } +func (r *RsaKeyPairCredentialsProvider) GetAccessKeySecret() (accessKeySecret *string, err error) { + c, err := r.GetCredential() + if err != nil { + return } - return tea.String(r.sessionCredential.AccessKeySecret), nil + accessKeySecret = c.AccessKeySecret + return } // GetSecurityToken is useless RsaKeyPairCredential -func (r *RsaKeyPairCredential) GetSecurityToken() (*string, error) { +func (r *RsaKeyPairCredentialsProvider) GetSecurityToken() (*string, error) { return tea.String(""), nil } // GetBearerToken is useless for RsaKeyPairCredential -func (r *RsaKeyPairCredential) GetBearerToken() *string { +func (r *RsaKeyPairCredentialsProvider) GetBearerToken() *string { return tea.String("") } // GetType reutrns RsaKeyPairCredential's type -func (r *RsaKeyPairCredential) GetType() *string { +func (r *RsaKeyPairCredentialsProvider) GetType() *string { return tea.String("rsa_key_pair") } -func (r *RsaKeyPairCredential) updateCredential() (err error) { +func (r *RsaKeyPairCredentialsProvider) updateCredential() (err error) { if r.runtime == nil { r.runtime = new(utils.Runtime) } diff --git a/vendor/github.com/aliyun/credentials-go/credentials/sts_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/sts_credential.go deleted file mode 100644 index 5a9973f21a..0000000000 --- a/vendor/github.com/aliyun/credentials-go/credentials/sts_credential.go +++ /dev/null @@ -1,53 +0,0 @@ -package credentials - -import "github.com/alibabacloud-go/tea/tea" - -// StsTokenCredential is a kind of credentials -type StsTokenCredential struct { - AccessKeyId string - AccessKeySecret string - SecurityToken string -} - -func newStsTokenCredential(accessKeyId, accessKeySecret, securityToken string) *StsTokenCredential { - return &StsTokenCredential{ - AccessKeyId: accessKeyId, - AccessKeySecret: accessKeySecret, - SecurityToken: securityToken, - } -} - -func (s *StsTokenCredential) GetCredential() (*CredentialModel, error) { - credential := &CredentialModel{ - AccessKeyId: tea.String(s.AccessKeyId), - AccessKeySecret: tea.String(s.AccessKeySecret), - SecurityToken: tea.String(s.SecurityToken), - Type: tea.String("sts"), - } - return credential, nil -} - -// GetAccessKeyId reutrns StsTokenCredential's AccessKeyId -func (s *StsTokenCredential) GetAccessKeyId() (*string, error) { - return tea.String(s.AccessKeyId), nil -} - -// GetAccessSecret reutrns StsTokenCredential's AccessKeySecret -func (s *StsTokenCredential) GetAccessKeySecret() (*string, error) { - return tea.String(s.AccessKeySecret), nil -} - -// GetSecurityToken reutrns StsTokenCredential's SecurityToken -func (s *StsTokenCredential) GetSecurityToken() (*string, error) { - return tea.String(s.SecurityToken), nil -} - -// GetBearerToken is useless StsTokenCredential -func (s *StsTokenCredential) GetBearerToken() *string { - return tea.String("") -} - -// GetType reutrns StsTokenCredential's type -func (s *StsTokenCredential) GetType() *string { - return tea.String("sts") -} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/uri_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/uri_credential.go index d03006c9cd..56da596588 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/uri_credential.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/uri_credential.go @@ -7,12 +7,12 @@ import ( "time" "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/internal/utils" "github.com/aliyun/credentials-go/credentials/request" - "github.com/aliyun/credentials-go/credentials/utils" ) // URLCredential is a kind of credential -type URLCredential struct { +type URLCredentialsProvider struct { URL string *credentialUpdater *sessionCredential @@ -26,18 +26,18 @@ type URLResponse struct { Expiration string `json:"Expiration" xml:"Expiration"` } -func newURLCredential(URL string) *URLCredential { +func newURLCredential(URL string) *URLCredentialsProvider { credentialUpdater := new(credentialUpdater) if URL == "" { URL = os.Getenv("ALIBABA_CLOUD_CREDENTIALS_URI") } - return &URLCredential{ + return &URLCredentialsProvider{ URL: URL, credentialUpdater: credentialUpdater, } } -func (e *URLCredential) GetCredential() (*CredentialModel, error) { +func (e *URLCredentialsProvider) GetCredential() (*CredentialModel, error) { if e.sessionCredential == nil || e.needUpdateCredential() { err := e.updateCredential() if err != nil { @@ -55,60 +55,48 @@ func (e *URLCredential) GetCredential() (*CredentialModel, error) { // GetAccessKeyId reutrns URLCredential's AccessKeyId // if AccessKeyId is not exist or out of date, the function will update it. -func (e *URLCredential) GetAccessKeyId() (*string, error) { - if e.sessionCredential == nil || e.needUpdateCredential() { - err := e.updateCredential() - if err != nil { - if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { - return &e.sessionCredential.AccessKeyId, nil - } - return tea.String(""), err - } +func (e *URLCredentialsProvider) GetAccessKeyId() (accessKeyId *string, err error) { + c, err := e.GetCredential() + if err != nil { + return } - return tea.String(e.sessionCredential.AccessKeyId), nil + accessKeyId = c.AccessKeyId + return } // GetAccessSecret reutrns URLCredential's AccessKeySecret // if AccessKeySecret is not exist or out of date, the function will update it. -func (e *URLCredential) GetAccessKeySecret() (*string, error) { - if e.sessionCredential == nil || e.needUpdateCredential() { - err := e.updateCredential() - if err != nil { - if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { - return &e.sessionCredential.AccessKeySecret, nil - } - return tea.String(""), err - } +func (e *URLCredentialsProvider) GetAccessKeySecret() (accessKeySecret *string, err error) { + c, err := e.GetCredential() + if err != nil { + return } - return tea.String(e.sessionCredential.AccessKeySecret), nil + accessKeySecret = c.AccessKeySecret + return } // GetSecurityToken reutrns URLCredential's SecurityToken // if SecurityToken is not exist or out of date, the function will update it. -func (e *URLCredential) GetSecurityToken() (*string, error) { - if e.sessionCredential == nil || e.needUpdateCredential() { - err := e.updateCredential() - if err != nil { - if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { - return &e.sessionCredential.SecurityToken, nil - } - return tea.String(""), err - } +func (e *URLCredentialsProvider) GetSecurityToken() (securityToken *string, err error) { + c, err := e.GetCredential() + if err != nil { + return } - return tea.String(e.sessionCredential.SecurityToken), nil + securityToken = c.SecurityToken + return } // GetBearerToken is useless for URLCredential -func (e *URLCredential) GetBearerToken() *string { +func (e *URLCredentialsProvider) GetBearerToken() *string { return tea.String("") } // GetType reutrns URLCredential's type -func (e *URLCredential) GetType() *string { +func (e *URLCredentialsProvider) GetType() *string { return tea.String("credential_uri") } -func (e *URLCredential) updateCredential() (err error) { +func (e *URLCredentialsProvider) updateCredential() (err error) { if e.runtime == nil { e.runtime = new(utils.Runtime) } @@ -117,15 +105,15 @@ func (e *URLCredential) updateCredential() (err error) { request.Method = "GET" content, err := doAction(request, e.runtime) if err != nil { - return fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) + return fmt.Errorf("get credentials from %s failed with error: %s", e.URL, err.Error()) } var resp *URLResponse err = json.Unmarshal(content, &resp) if err != nil { - return fmt.Errorf("refresh Ecs sts token err: Json Unmarshal fail: %s", err.Error()) + return fmt.Errorf("get credentials from %s failed with error, json unmarshal fail: %s", e.URL, err.Error()) } if resp.AccessKeyId == "" || resp.AccessKeySecret == "" || resp.SecurityToken == "" || resp.Expiration == "" { - return fmt.Errorf("refresh Ecs sts token err: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", resp.AccessKeyId, resp.AccessKeySecret, resp.SecurityToken, resp.Expiration) + return fmt.Errorf("get credentials failed: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", resp.AccessKeyId, resp.AccessKeySecret, resp.SecurityToken, resp.Expiration) } expirationTime, err := time.Parse("2006-01-02T15:04:05Z", resp.Expiration) diff --git a/vendor/github.com/aliyun/credentials-go/credentials/utils/doc.go b/vendor/github.com/aliyun/credentials-go/credentials/utils/doc.go new file mode 100644 index 0000000000..d8566ff789 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/utils/doc.go @@ -0,0 +1,3 @@ +// Package request is used for internal. +// You should not depend on it directly, breaking changes can and will be introducted to it. +package utils diff --git a/vendor/github.com/aliyun/credentials-go/credentials/utils/runtime.go b/vendor/github.com/aliyun/credentials-go/credentials/utils/runtime.go index 432395cf4a..43830cdb5a 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/utils/runtime.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/utils/runtime.go @@ -7,6 +7,7 @@ import ( ) // Runtime is for setting timeout, proxy and host +// Deprecated: it was used for internal type Runtime struct { ReadTimeout int ConnectTimeout int @@ -16,6 +17,7 @@ type Runtime struct { } // NewRuntime returns a Runtime +// Deprecated: it was used for internal func NewRuntime(readTimeout, connectTimeout int, proxy string, host string) *Runtime { return &Runtime{ ReadTimeout: readTimeout, @@ -26,6 +28,7 @@ func NewRuntime(readTimeout, connectTimeout int, proxy string, host string) *Run } // Timeout is for connect Timeout +// Deprecated: it was used for internal func Timeout(connectTimeout time.Duration) func(cxt context.Context, net, addr string) (c net.Conn, err error) { return func(ctx context.Context, network, address string) (net.Conn, error) { return (&net.Dialer{ diff --git a/vendor/github.com/aliyun/credentials-go/credentials/utils/utils.go b/vendor/github.com/aliyun/credentials-go/credentials/utils/utils.go index 7468407fbc..66457c3f37 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/utils/utils.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/utils/utils.go @@ -30,6 +30,7 @@ var hookRSA = func(fn func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Has } // GetUUID returns a uuid +// Deprecated: it was used for internal func GetUUID() (uuidHex string) { uuid := newUUID() uuidHex = hex.EncodeToString(uuid[:]) @@ -46,6 +47,7 @@ func RandStringBytes(n int) string { } // ShaHmac1 return a string which has been hashed +// Deprecated: it was used for internal func ShaHmac1(source, secret string) string { key := []byte(secret) hmac := hmac.New(sha1.New, key) @@ -56,6 +58,7 @@ func ShaHmac1(source, secret string) string { } // Sha256WithRsa return a string which has been hashed with Rsa +// Deprecated: it was used for internal func Sha256WithRsa(source, secret string) string { decodeString, err := base64.StdEncoding.DecodeString(secret) if err != nil { @@ -79,6 +82,7 @@ func Sha256WithRsa(source, secret string) string { } // GetMD5Base64 returns a string which has been base64 +// Deprecated: it was used for internal func GetMD5Base64(bytes []byte) (base64Value string) { md5Ctx := md5.New() md5Ctx.Write(bytes) @@ -88,6 +92,7 @@ func GetMD5Base64(bytes []byte) (base64Value string) { } // GetTimeInFormatISO8601 returns a time string +// Deprecated: it was used for internal func GetTimeInFormatISO8601() (timeStr string) { gmt := time.FixedZone("GMT", 0) @@ -95,6 +100,7 @@ func GetTimeInFormatISO8601() (timeStr string) { } // GetURLFormedMap returns a url encoded string +// Deprecated: it was used for internal func GetURLFormedMap(source map[string]string) (urlEncoded string) { urlEncoder := url.Values{} for key, value := range source { diff --git a/vendor/github.com/aliyun/credentials-go/doc.go b/vendor/github.com/aliyun/credentials-go/doc.go new file mode 100644 index 0000000000..5cddf98e8b --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/doc.go @@ -0,0 +1,4 @@ +// Package credentials-go +package credentials_go + +const PACKAGE_VERSION = "1.4.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index c9d0bdc4cd..57bfbfb694 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.41.0" +const goModuleVersion = "1.41.5" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go index 3603447911..39efd848cd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go @@ -3,7 +3,7 @@ package query import ( "context" "fmt" - "io/ioutil" + "io" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" @@ -52,7 +52,7 @@ func (m *asGetRequest) HandleSerialize( delim = "&" } - b, err := ioutil.ReadAll(stream) + b, err := io.ReadAll(stream) if err != nil { return out, metadata, fmt.Errorf("unable to get request body %w", err) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go index 5549922ab8..52acb62f91 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -6,6 +6,7 @@ import ( "fmt" "strconv" "strings" + "sync/atomic" "time" internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" @@ -43,6 +44,10 @@ type Attempt struct { // A Meter instance for recording retry-related metrics. OperationMeter metrics.Meter + // Initial clock skew that would have been saved from a previous operation + // call. + ClientSkew *atomic.Int64 + retryer aws.RetryerV2 requestCloner RequestCloner } @@ -82,8 +87,12 @@ func (r Attempt) logf(logger logging.Logger, classification logging.Classificati func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, ) { - var attemptNum int var attemptClockSkew time.Duration + if r.ClientSkew != nil { + attemptClockSkew = time.Duration(r.ClientSkew.Load()) + } + + var attemptNum int var attemptResults AttemptResults maxAttempts := r.retryer.MaxAttempts() @@ -99,6 +108,8 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn attemptInput := in attemptInput.Request = r.requestCloner(attemptInput.Request) + ctx = internalcontext.SetAttemptSkewContext(ctx, attemptClockSkew) + // Record the metadata for the for attempt being started. attemptCtx := setRetryMetadata(ctx, retryMetadata{ AttemptNum: attemptNum, @@ -107,9 +118,6 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn AttemptClockSkew: attemptClockSkew, }) - // Setting clock skew to be used on other context (like signing) - ctx = internalcontext.SetAttemptSkewContext(ctx, attemptClockSkew) - var attemptResult AttemptResult attemptCtx, span := tracing.StartSpan(attemptCtx, "Attempt", func(o *tracing.SpanOptions) { @@ -149,6 +157,14 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn } } + // this guarantees we are staying on top of the persistent skew value + // (either to apply it or to heal it back if the clocks realign) + if r.ClientSkew != nil { + if resultSkew, ok := awsmiddle.GetAttemptSkew(metadata); ok { + r.ClientSkew.Store(resultSkew.Nanoseconds()) + } + } + addAttemptResults(&metadata, attemptResults) return out, metadata, err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go index c7ef0acc4d..49cc31205c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go @@ -300,6 +300,17 @@ func limitedRedirect(r *http.Request, via []*http.Request) error { switch resp.StatusCode { case 307, 308: // Only allow 307 and 308 redirects as they preserve the method. + + // If redirecting to a different host, remove X-Amz-Security-Token header + // to prevent credentials from being sent to a different host, similar to + // how Authorization header is handled by the HTTP client. + if len(via) > 0 { + lastRequest := via[len(via)-1] + if lastRequest.URL.Host != r.URL.Host { + r.Header.Del("X-Amz-Security-Token") + } + } + return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 0d9b296e1d..404561eede 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,38 @@ +# v1.32.13 (2026-03-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.12 (2026-03-13) + +* **Bug Fix**: Replace usages of the old ioutil/ package throughout the SDK. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.11 (2026-03-03) + +* **Bug Fix**: Modernize non codegen files with go fix +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.10 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.9 (2026-02-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.8 (2026-02-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.7 (2026-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.6 (2025-12-16) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.32.5 (2025-12-09) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go index caa20a158a..498a668a30 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -103,7 +103,7 @@ var defaultAWSConfigResolvers = []awsConfigResolver{ // // General the Config type will use type assertion against the Provider interfaces // to extract specific data from the Config. -type Config interface{} +type Config any // A loader is used to load external configuration data and returns it as // a generic Config type. @@ -170,8 +170,8 @@ func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigRes // ResolveConfig calls the provide function passing slice of configuration sources. // This implements the aws.ConfigResolver interface. -func (cs configs) ResolveConfig(f func(configs []interface{}) error) error { - var cfgs []interface{} +func (cs configs) ResolveConfig(f func(configs []any) error) error { + var cfgs []any for i := range cs { cfgs = append(cfgs, cs[i]) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index bb9399d5f3..80aee928f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.32.5" +const goModuleVersion = "1.32.13" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go index 18b9b5ad20..5531249710 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -130,7 +130,7 @@ type IgnoreConfiguredEndpointsProvider interface { // GetIgnoreConfiguredEndpoints is used in knowing when to disable configured // endpoints feature. -func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { +func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []any) (value bool, found bool, err error) { for _, cfg := range configs { if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go index 92a16d718d..a71c105d96 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -5,7 +5,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "io" "net/http" "os" @@ -21,7 +21,7 @@ import ( // This should be used as the first resolver in the slice of resolvers when // resolving external configuration. func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error { - var sources []interface{} + var sources []any for _, s := range cfgs { sources = append(sources, s) } @@ -69,7 +69,7 @@ func resolveCustomCABundle(ctx context.Context, cfg *aws.Config, cfgs configs) e tr.TLSClientConfig.RootCAs = x509.NewCertPool() } - b, err := ioutil.ReadAll(pemCerts) + b, err := io.ReadAll(pemCerts) if err != nil { appendErr = fmt.Errorf("failed to read custom CA bundle PEM file") } @@ -106,9 +106,9 @@ func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error } func resolveBaseEndpoint(ctx context.Context, cfg *aws.Config, configs configs) error { - var downcastCfgSources []interface{} + var downcastCfgSources []any for _, cs := range configs { - downcastCfgSources = append(downcastCfgSources, interface{}(cs)) + downcastCfgSources = append(downcastCfgSources, any(cs)) } if val, found, err := GetIgnoreConfiguredEndpoints(ctx, downcastCfgSources); found && val && err == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go index de83985999..4f8c324e0d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go @@ -3,7 +3,6 @@ package config import ( "context" "fmt" - "io/ioutil" "net" "net/url" "os" @@ -346,7 +345,7 @@ func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToke options.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) { var contents []byte var err error - if contents, err = ioutil.ReadFile(authFilePath); err != nil { + if contents, err = os.ReadFile(authFilePath); err != nil { return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err) } return string(contents), nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go index 5a0fea2220..44c616fd57 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -502,7 +501,7 @@ func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error return nil, false, nil } - b, err := ioutil.ReadFile(c.CustomCABundle) + b, err := os.ReadFile(c.CustomCABundle) if err != nil { return nil, false, err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index bf3fa5a1f1..e0af6364ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,37 @@ +# v1.19.13 (2026-03-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.12 (2026-03-13) + +* **Bug Fix**: Replace usages of the old ioutil/ package throughout the SDK. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.11 (2026-03-03) + +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.10 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.9 (2026-02-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.8 (2026-02-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.7 (2026-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.6 (2025-12-16) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.19.5 (2025-12-09) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index f539719414..450279760e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.19.5" +const goModuleVersion = "1.19.13" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go index 46ae2f9231..1fb6b2f0da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -145,7 +144,7 @@ func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { } func loadCachedToken(filename string) (token, error) { - fileBytes, err := ioutil.ReadFile(filename) + fileBytes, err := os.ReadFile(filename) if err != nil { return token{}, fmt.Errorf("failed to read cached SSO token file, %w", err) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go index 5f4286dda4..e3d4a3cd4c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go @@ -3,7 +3,7 @@ package stscreds import ( "context" "fmt" - "io/ioutil" + "os" "strconv" "strings" "time" @@ -80,7 +80,7 @@ type IdentityTokenFile string // GetIdentityToken retrieves the JWT token from the file and returns the contents as a []byte func (j IdentityTokenFile) GetIdentityToken() ([]byte, error) { - b, err := ioutil.ReadFile(string(j)) + b, err := os.ReadFile(string(j)) if err != nil { return nil, fmt.Errorf("unable to read file at %s: %v", string(j), err) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 5b9f6cc16a..829592ace2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,26 @@ +# v1.18.21 (2026-03-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.20 (2026-03-13) + +* **Bug Fix**: Replace usages of the old ioutil/ package throughout the SDK. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.19 (2026-03-03) + +* **Bug Fix**: Modernize non codegen files with go fix +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.18 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.17 (2026-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.18.16 (2025-12-08) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go index 75edc4e9d6..a1da93d9d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go @@ -226,10 +226,10 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { } func (c *Client) invokeOperation( - ctx context.Context, opID string, params interface{}, optFns []func(*Options), + ctx context.Context, opID string, params any, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, ) ( - result interface{}, metadata middleware.Metadata, err error, + result any, metadata middleware.Metadata, err error, ) { stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go index af58b6bb10..4f8775b2a9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go @@ -61,7 +61,7 @@ func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error buildGetDynamicDataOutput) } -func buildGetDynamicDataPath(params interface{}) (string, error) { +func buildGetDynamicDataPath(params any) (string, error) { p, ok := params.(*GetDynamicDataInput) if !ok { return "", fmt.Errorf("unknown parameter type %T", params) @@ -70,7 +70,7 @@ func buildGetDynamicDataPath(params interface{}) (string, error) { return appendURIPath(getDynamicDataPath, p.Path), nil } -func buildGetDynamicDataOutput(resp *smithyhttp.Response) (interface{}, error) { +func buildGetDynamicDataOutput(resp *smithyhttp.Response) (any, error) { return &GetDynamicDataOutput{ Content: resp.Body, }, nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go index 5111cc90ca..1ce70cb55c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go @@ -59,11 +59,11 @@ func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error { ) } -func buildGetIAMInfoPath(params interface{}) (string, error) { +func buildGetIAMInfoPath(params any) (string, error) { return getIAMInfoPath, nil } -func buildGetIAMInfoOutput(resp *smithyhttp.Response) (v interface{}, err error) { +func buildGetIAMInfoOutput(resp *smithyhttp.Response) (v any, err error) { defer func() { closeErr := resp.Body.Close() if err == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go index dc8c09edf0..5c454c75da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go @@ -60,11 +60,11 @@ func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options O ) } -func buildGetInstanceIdentityDocumentPath(params interface{}) (string, error) { +func buildGetInstanceIdentityDocumentPath(params any) (string, error) { return getInstanceIdentityDocumentPath, nil } -func buildGetInstanceIdentityDocumentOutput(resp *smithyhttp.Response) (v interface{}, err error) { +func buildGetInstanceIdentityDocumentOutput(resp *smithyhttp.Response) (v any, err error) { defer func() { closeErr := resp.Body.Close() if err == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go index 869bfc9feb..4a9f7542c7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go @@ -61,7 +61,7 @@ func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error { buildGetMetadataOutput) } -func buildGetMetadataPath(params interface{}) (string, error) { +func buildGetMetadataPath(params any) (string, error) { p, ok := params.(*GetMetadataInput) if !ok { return "", fmt.Errorf("unknown parameter type %T", params) @@ -70,7 +70,7 @@ func buildGetMetadataPath(params interface{}) (string, error) { return appendURIPath(getMetadataPath, p.Path), nil } -func buildGetMetadataOutput(resp *smithyhttp.Response) (interface{}, error) { +func buildGetMetadataOutput(resp *smithyhttp.Response) (any, error) { return &GetMetadataOutput{ Content: resp.Body, }, nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go index 8c0572bb5c..3171b45262 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go @@ -51,7 +51,7 @@ func addGetRegionMiddleware(stack *middleware.Stack, options Options) error { ) } -func buildGetRegionOutput(resp *smithyhttp.Response) (interface{}, error) { +func buildGetRegionOutput(resp *smithyhttp.Response) (any, error) { out, err := buildGetInstanceIdentityDocumentOutput(resp) if err != nil { return nil, err diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go index 1f9ee97a5b..1d33081b1e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go @@ -64,11 +64,11 @@ func addGetTokenMiddleware(stack *middleware.Stack, options Options) error { return nil } -func buildGetTokenPath(interface{}) (string, error) { +func buildGetTokenPath(any) (string, error) { return getTokenPath, nil } -func buildGetTokenOutput(resp *smithyhttp.Response) (v interface{}, err error) { +func buildGetTokenOutput(resp *smithyhttp.Response) (v any, err error) { defer func() { closeErr := resp.Body.Close() if err == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go index 8903697244..abda6eb032 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go @@ -50,11 +50,11 @@ func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error { buildGetUserDataOutput) } -func buildGetUserDataPath(params interface{}) (string, error) { +func buildGetUserDataPath(params any) (string, error) { return getUserDataPath, nil } -func buildGetUserDataOutput(resp *smithyhttp.Response) (interface{}, error) { +func buildGetUserDataOutput(resp *smithyhttp.Response) (any, error) { return &GetUserDataOutput{ Content: resp.Body, }, nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index 630ccb34d1..52c3d3923d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.16" +const goModuleVersion = "1.18.21" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go index 90cf4aeb3d..0585f144d0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "fmt" - "io/ioutil" + "io" "net/url" "path" "time" @@ -18,8 +18,8 @@ import ( func addAPIRequestMiddleware(stack *middleware.Stack, options Options, operation string, - getPath func(interface{}) (string, error), - getOutput func(*smithyhttp.Response) (interface{}, error), + getPath func(any) (string, error), + getOutput func(*smithyhttp.Response) (any, error), ) (err error) { err = addRequestMiddleware(stack, options, "GET", operation, getPath, getOutput) if err != nil { @@ -46,8 +46,8 @@ func addRequestMiddleware(stack *middleware.Stack, options Options, method string, operation string, - getPath func(interface{}) (string, error), - getOutput func(*smithyhttp.Response) (interface{}, error), + getPath func(any) (string, error), + getOutput func(*smithyhttp.Response) (any, error), ) (err error) { err = awsmiddleware.AddSDKAgentKey(awsmiddleware.FeatureMetadata, "ec2-imds")(stack) if err != nil { @@ -120,7 +120,7 @@ func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { } type serializeRequest struct { - GetPath func(interface{}) (string, error) + GetPath func(any) (string, error) Method string } @@ -150,7 +150,7 @@ func (m *serializeRequest) HandleSerialize( } type deserializeResponse struct { - GetOutput func(*smithyhttp.Response) (interface{}, error) + GetOutput func(*smithyhttp.Response) (any, error) } func (*deserializeResponse) ID() string { @@ -176,11 +176,11 @@ func (m *deserializeResponse) HandleDeserialize( // read the full body so that any operation timeouts cleanup will not race // the body being read. - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return out, metadata, fmt.Errorf("read response body failed, %w", err) } - resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + resp.Body = io.NopCloser(bytes.NewReader(body)) // Anything that's not 200 |< 300 is error if resp.StatusCode < 200 || resp.StatusCode >= 300 { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index 6ffdf06108..1def5e2d9f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,25 @@ +# v1.4.21 (2026-03-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.20 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.19 (2026-03-03) + +* **Bug Fix**: Modernize non codegen files with go fix +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.18 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.17 (2026-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.4.16 (2025-12-08) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go index cd4d19b898..bd731e5f58 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go @@ -14,7 +14,7 @@ type EnableEndpointDiscoveryProvider interface { // ResolveEnableEndpointDiscovery extracts the first instance of a EnableEndpointDiscoveryProvider from the config slice. // Additionally returns a aws.EndpointDiscoveryEnableState to indicate if the value was found in provided configs, // and error if one is encountered. -func ResolveEnableEndpointDiscovery(ctx context.Context, configs []interface{}) (value aws.EndpointDiscoveryEnableState, found bool, err error) { +func ResolveEnableEndpointDiscovery(ctx context.Context, configs []any) (value aws.EndpointDiscoveryEnableState, found bool, err error) { for _, cfg := range configs { if p, ok := cfg.(EnableEndpointDiscoveryProvider); ok { value, found, err = p.GetEnableEndpointDiscovery(ctx) @@ -33,7 +33,7 @@ type UseDualStackEndpointProvider interface { // ResolveUseDualStackEndpoint extracts the first instance of a UseDualStackEndpoint from the config slice. // Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. -func ResolveUseDualStackEndpoint(ctx context.Context, configs []interface{}) (value aws.DualStackEndpointState, found bool, err error) { +func ResolveUseDualStackEndpoint(ctx context.Context, configs []any) (value aws.DualStackEndpointState, found bool, err error) { for _, cfg := range configs { if p, ok := cfg.(UseDualStackEndpointProvider); ok { value, found, err = p.GetUseDualStackEndpoint(ctx) @@ -52,7 +52,7 @@ type UseFIPSEndpointProvider interface { // ResolveUseFIPSEndpoint extracts the first instance of a UseFIPSEndpointProvider from the config slice. // Additionally, returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. -func ResolveUseFIPSEndpoint(ctx context.Context, configs []interface{}) (value aws.FIPSEndpointState, found bool, err error) { +func ResolveUseFIPSEndpoint(ctx context.Context, configs []any) (value aws.FIPSEndpointState, found bool, err error) { for _, cfg := range configs { if p, ok := cfg.(UseFIPSEndpointProvider); ok { value, found, err = p.GetUseFIPSEndpoint(ctx) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go index e7835f8524..1ebe8f4a8a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go @@ -26,7 +26,7 @@ type IgnoreConfiguredEndpointsProvider interface { // Currently duplicated from github.com/aws/aws-sdk-go-v2/config because // service packages cannot import github.com/aws/aws-sdk-go-v2/config // due to result import cycle error. -func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { +func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []any) (value bool, found bool, err error) { for _, cfg := range configs { if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) @@ -40,7 +40,7 @@ func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (v // ResolveServiceBaseEndpoint is used to retrieve service endpoints from configured sources // while allowing for configured endpoints to be disabled -func ResolveServiceBaseEndpoint(ctx context.Context, sdkID string, configs []interface{}) (value string, found bool, err error) { +func ResolveServiceBaseEndpoint(ctx context.Context, sdkID string, configs []any) (value string, found bool, err error) { if val, found, _ := GetIgnoreConfiguredEndpoints(ctx, configs); found && val { return "", false, nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index 72df7b81bf..548da96016 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.4.16" +const goModuleVersion = "1.4.21" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index c789264d2b..fb9ebb1fa6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -152,7 +152,7 @@ "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", "regions" : { "eusc-de-east-1" : { - "description" : "EU (Germany)" + "description" : "AWS European Sovereign Cloud (Germany)" } } }, { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index b2d3477911..a2a1c183ff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,25 @@ +# v2.7.21 (2026-03-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.20 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.19 (2026-03-03) + +* **Bug Fix**: Modernize non codegen files with go fix +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.18 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.17 (2026-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.7.16 (2025-12-08) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go index 32251a7e3c..b425031cb3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go @@ -101,7 +101,7 @@ func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, region = opts.ResolvedRegion } - for i := 0; i < len(ps); i++ { + for i := range ps { if !ps[i].canResolveEndpoint(region, opts) { continue } @@ -290,8 +290,8 @@ func getByPriority(s []string, p []string, def string) string { return def } - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { + for i := range p { + for j := range s { if s[j] == p[i] { return s[j] } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 1a524ae2e5..03a0b8c038 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.7.16" +const goModuleVersion = "2.7.21" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index 4791d328c0..fdf434a5eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,3 +1,12 @@ +# v1.8.6 (2026-03-13) + +* **Bug Fix**: Replace usages of the old ioutil/ package throughout the SDK. + +# v1.8.5 (2026-03-03) + +* **Bug Fix**: Modernize non codegen files with go fix +* **Dependency Update**: Bump minimum Go version to 1.24 + # v1.8.4 (2025-10-16) * **Dependency Update**: Bump minimum Go version to 1.23. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index f94970e774..1dc2e12aa8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.8.4" +const goModuleVersion = "1.8.6" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go deleted file mode 100644 index 8e24a3f0a4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go +++ /dev/null @@ -1,42 +0,0 @@ -package middleware - -import ( - "context" - "sync/atomic" - "time" - - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/smithy-go/middleware" -) - -// AddTimeOffsetMiddleware sets a value representing clock skew on the request context. -// This can be read by other operations (such as signing) to correct the date value they send -// on the request -type AddTimeOffsetMiddleware struct { - Offset *atomic.Int64 -} - -// ID the identifier for AddTimeOffsetMiddleware -func (m *AddTimeOffsetMiddleware) ID() string { return "AddTimeOffsetMiddleware" } - -// HandleBuild sets a value for attemptSkew on the request context if one is set on the client. -func (m AddTimeOffsetMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - if m.Offset != nil { - offset := time.Duration(m.Offset.Load()) - ctx = internalcontext.SetAttemptSkewContext(ctx, offset) - } - return next.HandleBuild(ctx, in) -} - -// HandleDeserialize gets the clock skew context from the context, and if set, sets it on the pointer -// held by AddTimeOffsetMiddleware -func (m *AddTimeOffsetMiddleware) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - if v := internalcontext.GetAttemptSkewContext(ctx); v != 0 { - m.Offset.Store(v.Nanoseconds()) - } - return next.HandleDeserialize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/CHANGELOG.md index 41abdb8aa6..a9ca85f2c7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/CHANGELOG.md @@ -1,3 +1,167 @@ +# v1.56.2 (2026-03-26) + +* **Bug Fix**: Fix a bug where a recorded clock skew could persist on the client even if the client and server clock ended up realigning. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.56.1 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.56.0 (2026-03-12) + +* **Feature**: Add Chainguard to PTC upstreamRegistry enum + +# v1.55.4 (2026-03-03) + +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.3 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.2 (2026-02-16) + +* **Documentation**: Adds support for enabling blob mounting, and removes support for Clair based image scanning + +# v1.55.1 (2026-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.0 (2025-12-18) + +* **Feature**: Adds support for ECR Create On Push + +# v1.54.4 (2025-12-09) + +* No change notes available for this release. + +# v1.54.3 (2025-12-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.54.2 (2025-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.24.0. Notably this version of the library reduces the allocation footprint of the middleware system. We observe a ~10% reduction in allocations per SDK call with this change. + +# v1.54.1 (2025-11-25) + +* **Bug Fix**: Add error check for endpoint param binding during auth scheme resolution to fix panic reported in #3234 + +# v1.54.0 (2025-11-21) + +* **Feature**: Add support for ECR managed signing + +# v1.53.1 (2025-11-19.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.53.0 (2025-11-19) + +* **Feature**: Add support for ECR archival storage class and Inspector org policy for scanning + +# v1.52.0 (2025-11-13) + +* **Feature**: Add Amazon ECR FIPS PrivateLink endpoint support + +# v1.51.4 (2025-11-12) + +* **Bug Fix**: Further reduce allocation overhead when the metrics system isn't in-use. +* **Bug Fix**: Reduce allocation overhead when the client doesn't have any HTTP interceptors configured. +* **Bug Fix**: Remove blank trace spans towards the beginning of the request that added no additional information. This conveys a slight reduction in overall allocations. + +# v1.51.3 (2025-11-11) + +* **Bug Fix**: Return validation error if input region is not a valid host label. + +# v1.51.2 (2025-11-04) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system. + +# v1.51.1 (2025-10-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.0 (2025-10-23) + +* **Feature**: Update endpoint ruleset parameters casing +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.50.7 (2025-10-22) + +* No change notes available for this release. + +# v1.50.6 (2025-10-16) + +* **Dependency Update**: Bump minimum Go version to 1.23. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.50.5 (2025-09-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.50.4 (2025-09-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.50.3 (2025-09-10) + +* No change notes available for this release. + +# v1.50.2 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.50.1 (2025-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.50.0 (2025-08-28) + +* **Feature**: Remove incorrect endpoint tests + +# v1.49.3 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.49.2 (2025-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.49.1 (2025-08-20) + +* **Bug Fix**: Remove unused deserialization code. + +# v1.49.0 (2025-08-11) + +* **Feature**: Add support for configuring per-service Options via callback on global config. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.48.0 (2025-08-04) + +* **Feature**: Support configurable auth scheme preferences in service clients via AWS_AUTH_SCHEME_PREFERENCE in the environment, auth_scheme_preference in the config file, and through in-code settings on LoadDefaultConfig and client constructor methods. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.47.1 (2025-07-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.47.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.46.0 (2025-07-22) + +* **Feature**: Add support for Image Tag Mutability Exception feature, allowing repositories to define wildcard-based patterns that override the default image tag mutability settings. + +# v1.45.2 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.45.1 (2025-06-17) * **Dependency Update**: Update to smithy-go v1.22.4. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_client.go index 3d7c13163c..04c1984061 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_client.go @@ -15,9 +15,7 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/metrics" @@ -65,7 +63,12 @@ func timeOperationMetric[T any]( ctx context.Context, metric string, fn func() (T, error), opts ...metrics.RecordMetricOption, ) (T, error) { - instr := getOperationMetrics(ctx).histogramFor(metric) + mm := getOperationMetrics(ctx) + if mm == nil { // not using the metrics system + return fn() + } + + instr := mm.histogramFor(metric) opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) start := time.Now() @@ -78,7 +81,12 @@ func timeOperationMetric[T any]( } func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { - instr := getOperationMetrics(ctx).histogramFor(metric) + mm := getOperationMetrics(ctx) + if mm == nil { // not using the metrics system + return func() {} + } + + instr := mm.histogramFor(metric) opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) var ended bool @@ -106,6 +114,12 @@ func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { type operationMetricsKey struct{} func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + if _, ok := mp.(metrics.NopMeterProvider); ok { + // not using the metrics system - setting up the metrics context is a memory-intensive operation + // so we should skip it in this case + return parent, nil + } + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/ecr") om := &operationMetrics{} @@ -153,7 +167,10 @@ func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Hi } func getOperationMetrics(ctx context.Context) *operationMetrics { - return ctx.Value(operationMetricsKey{}).(*operationMetrics) + if v := ctx.Value(operationMetricsKey{}); v != nil { + return v.(*operationMetrics) + } + return nil } func operationTracer(p tracing.TracerProvider) tracing.Tracer { @@ -420,24 +437,33 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AuthSchemePreference: cfg.AuthSchemePreference, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) resolveAWSRetryMode(cfg, &opts) resolveAWSEndpointResolver(cfg, &opts) + resolveInterceptors(cfg, &opts) resolveUseDualStackEndpoint(cfg, &opts) resolveUseFIPSEndpoint(cfg, &opts) resolveBaseEndpoint(cfg, &opts) - return New(opts, optFns...) + return New(opts, func(o *Options) { + for _, opt := range cfg.ServiceOptions { + opt(ServiceID, o) + } + for _, opt := range optFns { + opt(o) + } + }) } func resolveHTTPClient(o *Options) { @@ -551,6 +577,10 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) } +func resolveInterceptors(cfg aws.Config, o *Options) { + o.Interceptors = cfg.Interceptors.Copy() +} + func addClientUserAgent(stack *middleware.Stack, options Options) error { ua, err := getOrAddRequestUserAgent(stack) if err != nil { @@ -680,10 +710,11 @@ func addIsPaginatorUserAgent(o *Options) { }) } -func addRetry(stack *middleware.Stack, o Options) error { +func addRetry(stack *middleware.Stack, o Options, c *Client) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ecr") + m.ClientSkew = c.timeOffset }) if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { return err @@ -724,25 +755,6 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } -func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { - if mode == aws.AccountIDEndpointModeDisabled { - return nil - } - - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { - return aws.String(ca.Credentials.AccountID) - } - - return nil -} - -func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { - mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} - if err := stack.Build.Add(&mw, middleware.After); err != nil { - return err - } - return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) -} func initializeTimeOffsetResolver(c *Client) { c.timeOffset = new(atomic.Int64) } @@ -857,88 +869,62 @@ func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { }, "ResolveEndpointV2", middleware.After) } -type spanInitializeStart struct { -} - -func (*spanInitializeStart) ID() string { - return "spanInitializeStart" -} - -func (m *spanInitializeStart) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "Initialize") - - return next.HandleInitialize(ctx, in) -} - -type spanInitializeEnd struct { -} - -func (*spanInitializeEnd) ID() string { - return "spanInitializeEnd" -} - -func (m *spanInitializeEnd) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleInitialize(ctx, in) -} - -type spanBuildRequestStart struct { -} - -func (*spanBuildRequestStart) ID() string { - return "spanBuildRequestStart" -} - -func (m *spanBuildRequestStart) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - middleware.SerializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "BuildRequest") - - return next.HandleSerialize(ctx, in) -} - -type spanBuildRequestEnd struct { -} - -func (*spanBuildRequestEnd) ID() string { - return "spanBuildRequestEnd" -} - -func (m *spanBuildRequestEnd) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - middleware.BuildOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleBuild(ctx, in) -} - -func addSpanInitializeStart(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) -} - -func addSpanInitializeEnd(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) -} - -func addSpanBuildRequestStart(stack *middleware.Stack) error { - return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) -} +func addInterceptBeforeRetryLoop(stack *middleware.Stack, opts Options) error { + return stack.Finalize.Insert(&smithyhttp.InterceptBeforeRetryLoop{ + Interceptors: opts.Interceptors.BeforeRetryLoop, + }, "Retry", middleware.Before) +} + +func addInterceptAttempt(stack *middleware.Stack, opts Options) error { + return stack.Finalize.Insert(&smithyhttp.InterceptAttempt{ + BeforeAttempt: opts.Interceptors.BeforeAttempt, + AfterAttempt: opts.Interceptors.AfterAttempt, + }, "Retry", middleware.After) +} + +func addInterceptors(stack *middleware.Stack, opts Options) error { + // middlewares are expensive, don't add all of these interceptor ones unless the caller + // actually has at least one interceptor configured + // + // at the moment it's all-or-nothing because some of the middlewares here are responsible for + // setting fields in the interceptor context for future ones + if len(opts.Interceptors.BeforeExecution) == 0 && + len(opts.Interceptors.BeforeSerialization) == 0 && len(opts.Interceptors.AfterSerialization) == 0 && + len(opts.Interceptors.BeforeRetryLoop) == 0 && + len(opts.Interceptors.BeforeAttempt) == 0 && + len(opts.Interceptors.BeforeSigning) == 0 && len(opts.Interceptors.AfterSigning) == 0 && + len(opts.Interceptors.BeforeTransmit) == 0 && len(opts.Interceptors.AfterTransmit) == 0 && + len(opts.Interceptors.BeforeDeserialization) == 0 && len(opts.Interceptors.AfterDeserialization) == 0 && + len(opts.Interceptors.AfterAttempt) == 0 && len(opts.Interceptors.AfterExecution) == 0 { + return nil + } -func addSpanBuildRequestEnd(stack *middleware.Stack) error { - return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) + return errors.Join( + stack.Initialize.Add(&smithyhttp.InterceptExecution{ + BeforeExecution: opts.Interceptors.BeforeExecution, + AfterExecution: opts.Interceptors.AfterExecution, + }, middleware.Before), + stack.Serialize.Insert(&smithyhttp.InterceptBeforeSerialization{ + Interceptors: opts.Interceptors.BeforeSerialization, + }, "OperationSerializer", middleware.Before), + stack.Serialize.Insert(&smithyhttp.InterceptAfterSerialization{ + Interceptors: opts.Interceptors.AfterSerialization, + }, "OperationSerializer", middleware.After), + stack.Finalize.Insert(&smithyhttp.InterceptBeforeSigning{ + Interceptors: opts.Interceptors.BeforeSigning, + }, "Signing", middleware.Before), + stack.Finalize.Insert(&smithyhttp.InterceptAfterSigning{ + Interceptors: opts.Interceptors.AfterSigning, + }, "Signing", middleware.After), + stack.Deserialize.Add(&smithyhttp.InterceptTransmit{ + BeforeTransmit: opts.Interceptors.BeforeTransmit, + AfterTransmit: opts.Interceptors.AfterTransmit, + }, middleware.After), + stack.Deserialize.Insert(&smithyhttp.InterceptBeforeDeserialization{ + Interceptors: opts.Interceptors.BeforeDeserialization, + }, "OperationDeserializer", middleware.After), // (deserialize stack is called in reverse) + stack.Deserialize.Insert(&smithyhttp.InterceptAfterDeserialization{ + Interceptors: opts.Interceptors.AfterDeserialization, + }, "OperationDeserializer", middleware.Before), + ) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchCheckLayerAvailability.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchCheckLayerAvailability.go index 6c3e55194c..6bbb891528 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchCheckLayerAvailability.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchCheckLayerAvailability.go @@ -104,7 +104,7 @@ func (c *Client) addOperationBatchCheckLayerAvailabilityMiddlewares(stack *middl if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -128,9 +128,6 @@ func (c *Client) addOperationBatchCheckLayerAvailabilityMiddlewares(stack *middl if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -158,16 +155,13 @@ func (c *Client) addOperationBatchCheckLayerAvailabilityMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchDeleteImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchDeleteImage.go index 239a497691..14b655c78d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchDeleteImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchDeleteImage.go @@ -106,7 +106,7 @@ func (c *Client) addOperationBatchDeleteImageMiddlewares(stack *middleware.Stack if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -130,9 +130,6 @@ func (c *Client) addOperationBatchDeleteImageMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -160,16 +157,13 @@ func (c *Client) addOperationBatchDeleteImageMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetImage.go index 8548e4a11d..85e21082f1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetImage.go @@ -107,7 +107,7 @@ func (c *Client) addOperationBatchGetImageMiddlewares(stack *middleware.Stack, o if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -131,9 +131,6 @@ func (c *Client) addOperationBatchGetImageMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -161,16 +158,13 @@ func (c *Client) addOperationBatchGetImageMiddlewares(stack *middleware.Stack, o if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetRepositoryScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetRepositoryScanningConfiguration.go index 44709d157b..bbea8f3e8c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetRepositoryScanningConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetRepositoryScanningConfiguration.go @@ -85,7 +85,7 @@ func (c *Client) addOperationBatchGetRepositoryScanningConfigurationMiddlewares( if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -109,9 +109,6 @@ func (c *Client) addOperationBatchGetRepositoryScanningConfigurationMiddlewares( if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -139,16 +136,13 @@ func (c *Client) addOperationBatchGetRepositoryScanningConfigurationMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CompleteLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CompleteLayerUpload.go index 78eddc3380..1c4e06f821 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CompleteLayerUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CompleteLayerUpload.go @@ -114,7 +114,7 @@ func (c *Client) addOperationCompleteLayerUploadMiddlewares(stack *middleware.St if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -138,9 +138,6 @@ func (c *Client) addOperationCompleteLayerUploadMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -168,16 +165,13 @@ func (c *Client) addOperationCompleteLayerUploadMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreatePullThroughCacheRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreatePullThroughCacheRule.go index e4c95171a4..701b479112 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreatePullThroughCacheRule.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreatePullThroughCacheRule.go @@ -160,7 +160,7 @@ func (c *Client) addOperationCreatePullThroughCacheRuleMiddlewares(stack *middle if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -184,9 +184,6 @@ func (c *Client) addOperationCreatePullThroughCacheRuleMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -214,16 +211,13 @@ func (c *Client) addOperationCreatePullThroughCacheRuleMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepository.go index b0f280577a..f2cccc2d12 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepository.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepository.go @@ -46,6 +46,10 @@ type CreateRepositoryInput struct { // contents of your repository are encrypted at rest. EncryptionConfiguration *types.EncryptionConfiguration + // The imageScanningConfiguration parameter is being deprecated, in favor of + // specifying the image scanning configuration at the registry level. For more + // information, see PutRegistryScanningConfiguration . + // // The image scanning configuration for the repository. This determines whether // images are scanned for known vulnerabilities after being pushed to the // repository. @@ -57,6 +61,10 @@ type CreateRepositoryInput struct { // will be immutable which will prevent them from being overwritten. ImageTagMutability types.ImageTagMutability + // A list of filters that specify which image tags should be excluded from the + // repository's image tag mutability setting. + ImageTagMutabilityExclusionFilters []types.ImageTagMutabilityExclusionFilter + // The Amazon Web Services account ID associated with the registry to create the // repository. If you do not specify a registry, the default registry is assumed. RegistryId *string @@ -115,7 +123,7 @@ func (c *Client) addOperationCreateRepositoryMiddlewares(stack *middleware.Stack if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -139,9 +147,6 @@ func (c *Client) addOperationCreateRepositoryMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -169,16 +174,13 @@ func (c *Client) addOperationCreateRepositoryMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepositoryCreationTemplate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepositoryCreationTemplate.go index 5af5ec0bb4..d13b460675 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepositoryCreationTemplate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepositoryCreationTemplate.go @@ -35,8 +35,8 @@ func (c *Client) CreateRepositoryCreationTemplate(ctx context.Context, params *C type CreateRepositoryCreationTemplateInput struct { // A list of enumerable strings representing the Amazon ECR repository creation - // scenarios that this template will apply towards. The two supported scenarios are - // PULL_THROUGH_CACHE and REPLICATION + // scenarios that this template will apply towards. The supported scenarios are + // PULL_THROUGH_CACHE , REPLICATION , and CREATE_ON_PUSH // // This member is required. AppliedFor []types.RCTAppliedFor @@ -77,6 +77,10 @@ type CreateRepositoryCreationTemplateInput struct { // will be immutable which will prevent them from being overwritten. ImageTagMutability types.ImageTagMutability + // A list of filters that specify which image tags should be excluded from the + // repository creation template's image tag mutability setting. + ImageTagMutabilityExclusionFilters []types.ImageTagMutabilityExclusionFilter + // The lifecycle policy to use for repositories created using the template. LifecyclePolicy *string @@ -142,7 +146,7 @@ func (c *Client) addOperationCreateRepositoryCreationTemplateMiddlewares(stack * if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -166,9 +170,6 @@ func (c *Client) addOperationCreateRepositoryCreationTemplateMiddlewares(stack * if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -196,16 +197,13 @@ func (c *Client) addOperationCreateRepositoryCreationTemplateMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteLifecyclePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteLifecyclePolicy.go index cbea5a7789..a1a4a539ee 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteLifecyclePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteLifecyclePolicy.go @@ -96,7 +96,7 @@ func (c *Client) addOperationDeleteLifecyclePolicyMiddlewares(stack *middleware. if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -120,9 +120,6 @@ func (c *Client) addOperationDeleteLifecyclePolicyMiddlewares(stack *middleware. if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -150,16 +147,13 @@ func (c *Client) addOperationDeleteLifecyclePolicyMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeletePullThroughCacheRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeletePullThroughCacheRule.go index 5677192fc4..1dce858ddd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeletePullThroughCacheRule.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeletePullThroughCacheRule.go @@ -107,7 +107,7 @@ func (c *Client) addOperationDeletePullThroughCacheRuleMiddlewares(stack *middle if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -131,9 +131,6 @@ func (c *Client) addOperationDeletePullThroughCacheRuleMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -161,16 +158,13 @@ func (c *Client) addOperationDeletePullThroughCacheRuleMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRegistryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRegistryPolicy.go index 4e70c924f5..643bc6f252 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRegistryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRegistryPolicy.go @@ -78,7 +78,7 @@ func (c *Client) addOperationDeleteRegistryPolicyMiddlewares(stack *middleware.S if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -102,9 +102,6 @@ func (c *Client) addOperationDeleteRegistryPolicyMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -129,16 +126,13 @@ func (c *Client) addOperationDeleteRegistryPolicyMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepository.go index c0084e8783..cf0053cc88 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepository.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepository.go @@ -93,7 +93,7 @@ func (c *Client) addOperationDeleteRepositoryMiddlewares(stack *middleware.Stack if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -117,9 +117,6 @@ func (c *Client) addOperationDeleteRepositoryMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -147,16 +144,13 @@ func (c *Client) addOperationDeleteRepositoryMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryCreationTemplate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryCreationTemplate.go index 45384c6a01..dd6703046d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryCreationTemplate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryCreationTemplate.go @@ -86,7 +86,7 @@ func (c *Client) addOperationDeleteRepositoryCreationTemplateMiddlewares(stack * if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -110,9 +110,6 @@ func (c *Client) addOperationDeleteRepositoryCreationTemplateMiddlewares(stack * if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -140,16 +137,13 @@ func (c *Client) addOperationDeleteRepositoryCreationTemplateMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryPolicy.go index 2aab4fa0cd..ec6f2092b5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryPolicy.go @@ -93,7 +93,7 @@ func (c *Client) addOperationDeleteRepositoryPolicyMiddlewares(stack *middleware if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -117,9 +117,6 @@ func (c *Client) addOperationDeleteRepositoryPolicyMiddlewares(stack *middleware if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -147,16 +144,13 @@ func (c *Client) addOperationDeleteRepositoryPolicyMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteSigningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteSigningConfiguration.go new file mode 100644 index 0000000000..c3cf2da333 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteSigningConfiguration.go @@ -0,0 +1,155 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the registry's signing configuration. Images pushed after deletion of +// the signing configuration will no longer be automatically signed. +// +// For more information, see [Managed signing] in the Amazon Elastic Container Registry User Guide. +// +// Deleting the signing configuration does not affect existing image signatures. +// +// [Managed signing]: https://docs.aws.amazon.com/AmazonECR/latest/userguide/managed-signing.html +func (c *Client) DeleteSigningConfiguration(ctx context.Context, params *DeleteSigningConfigurationInput, optFns ...func(*Options)) (*DeleteSigningConfigurationOutput, error) { + if params == nil { + params = &DeleteSigningConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteSigningConfiguration", params, optFns, c.addOperationDeleteSigningConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteSigningConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteSigningConfigurationInput struct { + noSmithyDocumentSerde +} + +type DeleteSigningConfigurationOutput struct { + + // The Amazon Web Services account ID associated with the registry. + RegistryId *string + + // The registry's deleted signing configuration. + SigningConfiguration *types.SigningConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteSigningConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteSigningConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteSigningConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteSigningConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options, c); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteSigningConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptors(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteSigningConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteSigningConfiguration", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeregisterPullTimeUpdateExclusion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeregisterPullTimeUpdateExclusion.go new file mode 100644 index 0000000000..e498668256 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeregisterPullTimeUpdateExclusion.go @@ -0,0 +1,156 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes a principal from the pull time update exclusion list for a registry. +// Once removed, Amazon ECR will resume updating the pull time if the specified +// principal pulls an image. +func (c *Client) DeregisterPullTimeUpdateExclusion(ctx context.Context, params *DeregisterPullTimeUpdateExclusionInput, optFns ...func(*Options)) (*DeregisterPullTimeUpdateExclusionOutput, error) { + if params == nil { + params = &DeregisterPullTimeUpdateExclusionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeregisterPullTimeUpdateExclusion", params, optFns, c.addOperationDeregisterPullTimeUpdateExclusionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeregisterPullTimeUpdateExclusionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeregisterPullTimeUpdateExclusionInput struct { + + // The ARN of the IAM principal to remove from the pull time update exclusion list. + // + // This member is required. + PrincipalArn *string + + noSmithyDocumentSerde +} + +type DeregisterPullTimeUpdateExclusionOutput struct { + + // The ARN of the IAM principal that was removed from the pull time update + // exclusion list. + PrincipalArn *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeregisterPullTimeUpdateExclusionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeregisterPullTimeUpdateExclusion{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeregisterPullTimeUpdateExclusion{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeregisterPullTimeUpdateExclusion"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options, c); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDeregisterPullTimeUpdateExclusionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeregisterPullTimeUpdateExclusion(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptors(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeregisterPullTimeUpdateExclusion(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeregisterPullTimeUpdateExclusion", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageReplicationStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageReplicationStatus.go index 595e5f314e..3b925e3d29 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageReplicationStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageReplicationStatus.go @@ -97,7 +97,7 @@ func (c *Client) addOperationDescribeImageReplicationStatusMiddlewares(stack *mi if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -121,9 +121,6 @@ func (c *Client) addOperationDescribeImageReplicationStatusMiddlewares(stack *mi if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -151,16 +148,13 @@ func (c *Client) addOperationDescribeImageReplicationStatusMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageScanFindings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageScanFindings.go index 2cd1025c1f..6cf19b5512 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageScanFindings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageScanFindings.go @@ -130,7 +130,7 @@ func (c *Client) addOperationDescribeImageScanFindingsMiddlewares(stack *middlew if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -154,9 +154,6 @@ func (c *Client) addOperationDescribeImageScanFindingsMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -184,16 +181,13 @@ func (c *Client) addOperationDescribeImageScanFindingsMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageSigningStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageSigningStatus.go new file mode 100644 index 0000000000..94685e2800 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageSigningStatus.go @@ -0,0 +1,180 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the signing status for a specified image. If the image matched signing +// rules that reference different signing profiles, a status is returned for each +// profile. +// +// For more information, see [Managed signing] in the Amazon Elastic Container Registry User Guide. +// +// [Managed signing]: https://docs.aws.amazon.com/AmazonECR/latest/userguide/managed-signing.html +func (c *Client) DescribeImageSigningStatus(ctx context.Context, params *DescribeImageSigningStatusInput, optFns ...func(*Options)) (*DescribeImageSigningStatusOutput, error) { + if params == nil { + params = &DescribeImageSigningStatusInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeImageSigningStatus", params, optFns, c.addOperationDescribeImageSigningStatusMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeImageSigningStatusOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeImageSigningStatusInput struct { + + // An object containing identifying information for an image. + // + // This member is required. + ImageId *types.ImageIdentifier + + // The name of the repository that contains the image. + // + // This member is required. + RepositoryName *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do not specify a registry, the default registry is + // assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type DescribeImageSigningStatusOutput struct { + + // An object with identifying information for the image. + ImageId *types.ImageIdentifier + + // The Amazon Web Services account ID associated with the registry. + RegistryId *string + + // The name of the repository. + RepositoryName *string + + // A list of signing statuses for the specified image. Each status corresponds to + // a signing profile. + SigningStatuses []types.ImageSigningStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeImageSigningStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeImageSigningStatus{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeImageSigningStatus{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeImageSigningStatus"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options, c); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDescribeImageSigningStatusValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImageSigningStatus(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptors(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeImageSigningStatus(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeImageSigningStatus", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImages.go index 07698b6252..5aba8f10c5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImages.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImages.go @@ -127,7 +127,7 @@ func (c *Client) addOperationDescribeImagesMiddlewares(stack *middleware.Stack, if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -151,9 +151,6 @@ func (c *Client) addOperationDescribeImagesMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -181,16 +178,13 @@ func (c *Client) addOperationDescribeImagesMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribePullThroughCacheRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribePullThroughCacheRules.go index 871d5b4718..4f77eb6cf4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribePullThroughCacheRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribePullThroughCacheRules.go @@ -111,7 +111,7 @@ func (c *Client) addOperationDescribePullThroughCacheRulesMiddlewares(stack *mid if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -135,9 +135,6 @@ func (c *Client) addOperationDescribePullThroughCacheRulesMiddlewares(stack *mid if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -162,16 +159,13 @@ func (c *Client) addOperationDescribePullThroughCacheRulesMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRegistry.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRegistry.go index 42a63bcd7a..50f47a9e92 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRegistry.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRegistry.go @@ -80,7 +80,7 @@ func (c *Client) addOperationDescribeRegistryMiddlewares(stack *middleware.Stack if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -104,9 +104,6 @@ func (c *Client) addOperationDescribeRegistryMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -131,16 +128,13 @@ func (c *Client) addOperationDescribeRegistryMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositories.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositories.go index ea17aecc16..0b282d6d69 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositories.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositories.go @@ -114,7 +114,7 @@ func (c *Client) addOperationDescribeRepositoriesMiddlewares(stack *middleware.S if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -138,9 +138,6 @@ func (c *Client) addOperationDescribeRepositoriesMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -165,16 +162,13 @@ func (c *Client) addOperationDescribeRepositoriesMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositoryCreationTemplates.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositoryCreationTemplates.go index 3e5e401717..fc71ca5e16 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositoryCreationTemplates.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositoryCreationTemplates.go @@ -114,7 +114,7 @@ func (c *Client) addOperationDescribeRepositoryCreationTemplatesMiddlewares(stac if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -138,9 +138,6 @@ func (c *Client) addOperationDescribeRepositoryCreationTemplatesMiddlewares(stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -165,16 +162,13 @@ func (c *Client) addOperationDescribeRepositoryCreationTemplatesMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAccountSetting.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAccountSetting.go index 581b1f2141..837e338e99 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAccountSetting.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAccountSetting.go @@ -28,8 +28,8 @@ func (c *Client) GetAccountSetting(ctx context.Context, params *GetAccountSettin type GetAccountSettingInput struct { - // The name of the account setting, such as BASIC_SCAN_TYPE_VERSION or - // REGISTRY_POLICY_SCOPE . + // The name of the account setting, such as BASIC_SCAN_TYPE_VERSION , + // REGISTRY_POLICY_SCOPE , or BLOB_MOUNTING . // // This member is required. Name *string @@ -42,9 +42,9 @@ type GetAccountSettingOutput struct { // Retrieves the name of the account setting. Name *string - // The setting value for the setting name. The following are valid values for the - // basic scan type being used: AWS_NATIVE or CLAIR . The following are valid values - // for the registry policy scope being used: V1 or V2 . + // The setting value for the setting name. Valid value for basic scan type: + // AWS_NATIVE . Valid values for registry policy scope: V1 or V2 . Valid values for + // blob mounting: ENABLED or DISABLED . Value *string // Metadata pertaining to the operation's result. @@ -87,7 +87,7 @@ func (c *Client) addOperationGetAccountSettingMiddlewares(stack *middleware.Stac if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -111,9 +111,6 @@ func (c *Client) addOperationGetAccountSettingMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -141,16 +138,13 @@ func (c *Client) addOperationGetAccountSettingMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAuthorizationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAuthorizationToken.go index af6d9aeef5..158c02f8db 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAuthorizationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAuthorizationToken.go @@ -101,7 +101,7 @@ func (c *Client) addOperationGetAuthorizationTokenMiddlewares(stack *middleware. if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -125,9 +125,6 @@ func (c *Client) addOperationGetAuthorizationTokenMiddlewares(stack *middleware. if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -152,16 +149,13 @@ func (c *Client) addOperationGetAuthorizationTokenMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetDownloadUrlForLayer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetDownloadUrlForLayer.go index 32b64e13ce..ae5cef8c4b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetDownloadUrlForLayer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetDownloadUrlForLayer.go @@ -102,7 +102,7 @@ func (c *Client) addOperationGetDownloadUrlForLayerMiddlewares(stack *middleware if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -126,9 +126,6 @@ func (c *Client) addOperationGetDownloadUrlForLayerMiddlewares(stack *middleware if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -156,16 +153,13 @@ func (c *Client) addOperationGetDownloadUrlForLayerMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicy.go index 7ca491b17f..65d5218090 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicy.go @@ -96,7 +96,7 @@ func (c *Client) addOperationGetLifecyclePolicyMiddlewares(stack *middleware.Sta if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -120,9 +120,6 @@ func (c *Client) addOperationGetLifecyclePolicyMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -150,16 +147,13 @@ func (c *Client) addOperationGetLifecyclePolicyMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicyPreview.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicyPreview.go index 7067e11fd4..fc02f33e8a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicyPreview.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicyPreview.go @@ -139,7 +139,7 @@ func (c *Client) addOperationGetLifecyclePolicyPreviewMiddlewares(stack *middlew if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -163,9 +163,6 @@ func (c *Client) addOperationGetLifecyclePolicyPreviewMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -193,16 +190,13 @@ func (c *Client) addOperationGetLifecyclePolicyPreviewMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryPolicy.go index 98a7305b56..83f88f9380 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryPolicy.go @@ -78,7 +78,7 @@ func (c *Client) addOperationGetRegistryPolicyMiddlewares(stack *middleware.Stac if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -102,9 +102,6 @@ func (c *Client) addOperationGetRegistryPolicyMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -129,16 +126,13 @@ func (c *Client) addOperationGetRegistryPolicyMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryScanningConfiguration.go index 242a3b1906..d11c6a3dfc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryScanningConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryScanningConfiguration.go @@ -79,7 +79,7 @@ func (c *Client) addOperationGetRegistryScanningConfigurationMiddlewares(stack * if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -103,9 +103,6 @@ func (c *Client) addOperationGetRegistryScanningConfigurationMiddlewares(stack * if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -130,16 +127,13 @@ func (c *Client) addOperationGetRegistryScanningConfigurationMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRepositoryPolicy.go index 7cb52dcdfb..021fe4ecc7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRepositoryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRepositoryPolicy.go @@ -92,7 +92,7 @@ func (c *Client) addOperationGetRepositoryPolicyMiddlewares(stack *middleware.St if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -116,9 +116,6 @@ func (c *Client) addOperationGetRepositoryPolicyMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -146,16 +143,13 @@ func (c *Client) addOperationGetRepositoryPolicyMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetSigningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetSigningConfiguration.go new file mode 100644 index 0000000000..6ff0688597 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetSigningConfiguration.go @@ -0,0 +1,153 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the registry's signing configuration, which defines rules for +// automatically signing images using Amazon Web Services Signer. +// +// For more information, see [Managed signing] in the Amazon Elastic Container Registry User Guide. +// +// [Managed signing]: https://docs.aws.amazon.com/AmazonECR/latest/userguide/managed-signing.html +func (c *Client) GetSigningConfiguration(ctx context.Context, params *GetSigningConfigurationInput, optFns ...func(*Options)) (*GetSigningConfigurationOutput, error) { + if params == nil { + params = &GetSigningConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetSigningConfiguration", params, optFns, c.addOperationGetSigningConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetSigningConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetSigningConfigurationInput struct { + noSmithyDocumentSerde +} + +type GetSigningConfigurationOutput struct { + + // The Amazon Web Services account ID associated with the registry. + RegistryId *string + + // The registry's signing configuration. + SigningConfiguration *types.SigningConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetSigningConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetSigningConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetSigningConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetSigningConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options, c); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSigningConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptors(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetSigningConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetSigningConfiguration", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_InitiateLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_InitiateLayerUpload.go index b2bf031817..6e575a7c64 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_InitiateLayerUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_InitiateLayerUpload.go @@ -98,7 +98,7 @@ func (c *Client) addOperationInitiateLayerUploadMiddlewares(stack *middleware.St if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -122,9 +122,6 @@ func (c *Client) addOperationInitiateLayerUploadMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -152,16 +149,13 @@ func (c *Client) addOperationInitiateLayerUploadMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImageReferrers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImageReferrers.go new file mode 100644 index 0000000000..d28e0cae24 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImageReferrers.go @@ -0,0 +1,197 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the artifacts associated with a specified subject image. +// +// The IAM principal invoking this operation must have the ecr:BatchGetImage +// permission. +func (c *Client) ListImageReferrers(ctx context.Context, params *ListImageReferrersInput, optFns ...func(*Options)) (*ListImageReferrersOutput, error) { + if params == nil { + params = &ListImageReferrersInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListImageReferrers", params, optFns, c.addOperationListImageReferrersMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListImageReferrersOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListImageReferrersInput struct { + + // The name of the repository that contains the subject image. + // + // This member is required. + RepositoryName *string + + // An object containing the image digest of the subject image for which to + // retrieve associated artifacts. + // + // This member is required. + SubjectId *types.SubjectIdentifier + + // The filter key and value with which to filter your ListImageReferrers results. + // If no filter is specified, only artifacts with ACTIVE status are returned. + Filter *types.ListImageReferrersFilter + + // The maximum number of image referrer results returned by ListImageReferrers in + // paginated output. When this parameter is used, ListImageReferrers only returns + // maxResults results in a single page along with a nextToken response element. + // The remaining results of the initial request can be seen by sending another + // ListImageReferrers request with the returned nextToken value. This value can be + // between 1 and 50. If this parameter is not used, then ListImageReferrers + // returns up to 50 results and a nextToken value, if applicable. + MaxResults *int32 + + // The nextToken value returned from a previous paginated ListImageReferrers + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to list image referrers. If you do not specify a + // registry, the default registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type ListImageReferrersOutput struct { + + // The nextToken value to include in a future ListImageReferrers request. When the + // results of a ListImageReferrers request exceed maxResults , this value can be + // used to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string + + // The list of artifacts associated with the subject image. + Referrers []types.ImageReferrer + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListImageReferrersMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListImageReferrers{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListImageReferrers{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListImageReferrers"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options, c); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpListImageReferrersValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListImageReferrers(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptors(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListImageReferrers(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListImageReferrers", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImages.go index 8f56a7b82d..e56e17b943 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImages.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImages.go @@ -120,7 +120,7 @@ func (c *Client) addOperationListImagesMiddlewares(stack *middleware.Stack, opti if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -144,9 +144,6 @@ func (c *Client) addOperationListImagesMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -174,16 +171,13 @@ func (c *Client) addOperationListImagesMiddlewares(stack *middleware.Stack, opti if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListPullTimeUpdateExclusions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListPullTimeUpdateExclusions.go new file mode 100644 index 0000000000..d6a224c6c4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListPullTimeUpdateExclusions.go @@ -0,0 +1,173 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the IAM principals that are excluded from having their image pull times +// recorded. +func (c *Client) ListPullTimeUpdateExclusions(ctx context.Context, params *ListPullTimeUpdateExclusionsInput, optFns ...func(*Options)) (*ListPullTimeUpdateExclusionsOutput, error) { + if params == nil { + params = &ListPullTimeUpdateExclusionsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListPullTimeUpdateExclusions", params, optFns, c.addOperationListPullTimeUpdateExclusionsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListPullTimeUpdateExclusionsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListPullTimeUpdateExclusionsInput struct { + + // The maximum number of pull time update exclusion results returned by + // ListPullTimeUpdateExclusions in paginated output. When this parameter is used, + // ListPullTimeUpdateExclusions only returns maxResults results in a single page + // along with a nextToken response element. The remaining results of the initial + // request can be seen by sending another ListPullTimeUpdateExclusions request + // with the returned nextToken value. This value can be between 1 and 1000. If + // this parameter is not used, then ListPullTimeUpdateExclusions returns up to 100 + // results and a nextToken value, if applicable. + MaxResults *int32 + + // The nextToken value returned from a previous paginated + // ListPullTimeUpdateExclusions request where maxResults was used and the results + // exceeded the value of that parameter. Pagination continues from the end of the + // previous results that returned the nextToken value. This value is null when + // there are no more results to return. + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string + + noSmithyDocumentSerde +} + +type ListPullTimeUpdateExclusionsOutput struct { + + // The nextToken value to include in a future ListPullTimeUpdateExclusions + // request. When the results of a ListPullTimeUpdateExclusions request exceed + // maxResults , this value can be used to retrieve the next page of results. This + // value is null when there are no more results to return. + NextToken *string + + // The list of IAM principal ARNs that are excluded from having their image pull + // times recorded. + PullTimeUpdateExclusions []string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListPullTimeUpdateExclusionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListPullTimeUpdateExclusions{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListPullTimeUpdateExclusions{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListPullTimeUpdateExclusions"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options, c); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListPullTimeUpdateExclusions(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptors(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListPullTimeUpdateExclusions(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListPullTimeUpdateExclusions", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListTagsForResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListTagsForResource.go index 1b88b19148..1317ab19dc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListTagsForResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListTagsForResource.go @@ -83,7 +83,7 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -107,9 +107,6 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -137,16 +134,13 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutAccountSetting.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutAccountSetting.go index 2c673b0391..f465eb5e28 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutAccountSetting.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutAccountSetting.go @@ -28,15 +28,15 @@ func (c *Client) PutAccountSetting(ctx context.Context, params *PutAccountSettin type PutAccountSettingInput struct { - // The name of the account setting, such as BASIC_SCAN_TYPE_VERSION or - // REGISTRY_POLICY_SCOPE . + // The name of the account setting, such as BASIC_SCAN_TYPE_VERSION , + // REGISTRY_POLICY_SCOPE , or BLOB_MOUNTING . // // This member is required. Name *string - // Setting value that is specified. The following are valid values for the basic - // scan type being used: AWS_NATIVE or CLAIR . The following are valid values for - // the registry policy scope being used: V1 or V2 . + // Setting value that is specified. Valid value for basic scan type: AWS_NATIVE . + // Valid values for registry policy scope: V1 or V2 . Valid values for blob + // mounting: ENABLED or DISABLED . // // This member is required. Value *string @@ -92,7 +92,7 @@ func (c *Client) addOperationPutAccountSettingMiddlewares(stack *middleware.Stac if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -116,9 +116,6 @@ func (c *Client) addOperationPutAccountSettingMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -146,16 +143,13 @@ func (c *Client) addOperationPutAccountSettingMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImage.go index d3847a108d..c2eb4254c5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImage.go @@ -55,9 +55,7 @@ type PutImageInput struct { // the request. ImageManifestMediaType *string - // The tag to associate with the image. This parameter is required for images that - // use the Docker Image Manifest V2 Schema 2 or Open Container Initiative (OCI) - // formats. + // The tag to associate with the image. This parameter is optional. ImageTag *string // The Amazon Web Services account ID associated with the registry that contains @@ -113,7 +111,7 @@ func (c *Client) addOperationPutImageMiddlewares(stack *middleware.Stack, option if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -137,9 +135,6 @@ func (c *Client) addOperationPutImageMiddlewares(stack *middleware.Stack, option if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -167,16 +162,13 @@ func (c *Client) addOperationPutImageMiddlewares(stack *middleware.Stack, option if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageScanningConfiguration.go index fc448205a9..1a5c316582 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageScanningConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageScanningConfiguration.go @@ -105,7 +105,7 @@ func (c *Client) addOperationPutImageScanningConfigurationMiddlewares(stack *mid if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -129,9 +129,6 @@ func (c *Client) addOperationPutImageScanningConfigurationMiddlewares(stack *mid if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -159,16 +156,13 @@ func (c *Client) addOperationPutImageScanningConfigurationMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageTagMutability.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageTagMutability.go index f5149a1878..da69849a05 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageTagMutability.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageTagMutability.go @@ -44,6 +44,10 @@ type PutImageTagMutabilityInput struct { // This member is required. RepositoryName *string + // A list of filters that specify which image tags should be excluded from the + // image tag mutability setting being applied. + ImageTagMutabilityExclusionFilters []types.ImageTagMutabilityExclusionFilter + // The Amazon Web Services account ID associated with the registry that contains // the repository in which to update the image tag mutability settings. If you do // not specify a registry, the default registry is assumed. @@ -57,6 +61,10 @@ type PutImageTagMutabilityOutput struct { // The image tag mutability setting for the repository. ImageTagMutability types.ImageTagMutability + // The list of filters that specify which image tags are excluded from the + // repository's image tag mutability setting. + ImageTagMutabilityExclusionFilters []types.ImageTagMutabilityExclusionFilter + // The registry ID associated with the request. RegistryId *string @@ -103,7 +111,7 @@ func (c *Client) addOperationPutImageTagMutabilityMiddlewares(stack *middleware. if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -127,9 +135,6 @@ func (c *Client) addOperationPutImageTagMutabilityMiddlewares(stack *middleware. if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -157,16 +162,13 @@ func (c *Client) addOperationPutImageTagMutabilityMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutLifecyclePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutLifecyclePolicy.go index 2b1db900a4..ddf006af31 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutLifecyclePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutLifecyclePolicy.go @@ -100,7 +100,7 @@ func (c *Client) addOperationPutLifecyclePolicyMiddlewares(stack *middleware.Sta if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -124,9 +124,6 @@ func (c *Client) addOperationPutLifecyclePolicyMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -154,16 +151,13 @@ func (c *Client) addOperationPutLifecyclePolicyMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryPolicy.go index 5572b91fe0..84c8c73808 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryPolicy.go @@ -94,7 +94,7 @@ func (c *Client) addOperationPutRegistryPolicyMiddlewares(stack *middleware.Stac if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -118,9 +118,6 @@ func (c *Client) addOperationPutRegistryPolicyMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -148,16 +145,13 @@ func (c *Client) addOperationPutRegistryPolicyMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryScanningConfiguration.go index ca11412d0a..f71f02cc30 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryScanningConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryScanningConfiguration.go @@ -96,7 +96,7 @@ func (c *Client) addOperationPutRegistryScanningConfigurationMiddlewares(stack * if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -120,9 +120,6 @@ func (c *Client) addOperationPutRegistryScanningConfigurationMiddlewares(stack * if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -150,16 +147,13 @@ func (c *Client) addOperationPutRegistryScanningConfigurationMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutReplicationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutReplicationConfiguration.go index 1203765be0..49cdd7f8a8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutReplicationConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutReplicationConfiguration.go @@ -94,7 +94,7 @@ func (c *Client) addOperationPutReplicationConfigurationMiddlewares(stack *middl if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -118,9 +118,6 @@ func (c *Client) addOperationPutReplicationConfigurationMiddlewares(stack *middl if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -148,16 +145,13 @@ func (c *Client) addOperationPutReplicationConfigurationMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutSigningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutSigningConfiguration.go new file mode 100644 index 0000000000..b7a2e3b438 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutSigningConfiguration.go @@ -0,0 +1,163 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates or updates the registry's signing configuration, which defines rules +// for automatically signing images with Amazon Web Services Signer. +// +// For more information, see [Managed signing] in the Amazon Elastic Container Registry User Guide. +// +// To successfully generate a signature, the IAM principal pushing images must +// have permission to sign payloads with the Amazon Web Services Signer signing +// profile referenced in the signing configuration. +// +// [Managed signing]: https://docs.aws.amazon.com/AmazonECR/latest/userguide/managed-signing.html +func (c *Client) PutSigningConfiguration(ctx context.Context, params *PutSigningConfigurationInput, optFns ...func(*Options)) (*PutSigningConfigurationOutput, error) { + if params == nil { + params = &PutSigningConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutSigningConfiguration", params, optFns, c.addOperationPutSigningConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutSigningConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutSigningConfigurationInput struct { + + // The signing configuration to assign to the registry. + // + // This member is required. + SigningConfiguration *types.SigningConfiguration + + noSmithyDocumentSerde +} + +type PutSigningConfigurationOutput struct { + + // The registry's updated signing configuration. + SigningConfiguration *types.SigningConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutSigningConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutSigningConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutSigningConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutSigningConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options, c); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpPutSigningConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutSigningConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptors(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutSigningConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutSigningConfiguration", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_RegisterPullTimeUpdateExclusion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_RegisterPullTimeUpdateExclusion.go new file mode 100644 index 0000000000..51ea69707f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_RegisterPullTimeUpdateExclusion.go @@ -0,0 +1,161 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Adds an IAM principal to the pull time update exclusion list for a registry. +// Amazon ECR will not record the pull time if an excluded principal pulls an +// image. +func (c *Client) RegisterPullTimeUpdateExclusion(ctx context.Context, params *RegisterPullTimeUpdateExclusionInput, optFns ...func(*Options)) (*RegisterPullTimeUpdateExclusionOutput, error) { + if params == nil { + params = &RegisterPullTimeUpdateExclusionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RegisterPullTimeUpdateExclusion", params, optFns, c.addOperationRegisterPullTimeUpdateExclusionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RegisterPullTimeUpdateExclusionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RegisterPullTimeUpdateExclusionInput struct { + + // The ARN of the IAM principal to exclude from having image pull times recorded. + // + // This member is required. + PrincipalArn *string + + noSmithyDocumentSerde +} + +type RegisterPullTimeUpdateExclusionOutput struct { + + // The date and time, expressed in standard JavaScript date format, when the + // exclusion was created. + CreatedAt *time.Time + + // The ARN of the IAM principal that was added to the pull time update exclusion + // list. + PrincipalArn *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRegisterPullTimeUpdateExclusionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpRegisterPullTimeUpdateExclusion{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpRegisterPullTimeUpdateExclusion{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "RegisterPullTimeUpdateExclusion"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options, c); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpRegisterPullTimeUpdateExclusionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterPullTimeUpdateExclusion(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptors(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRegisterPullTimeUpdateExclusion(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RegisterPullTimeUpdateExclusion", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_SetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_SetRepositoryPolicy.go index 69880d99fb..688ba80635 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_SetRepositoryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_SetRepositoryPolicy.go @@ -109,7 +109,7 @@ func (c *Client) addOperationSetRepositoryPolicyMiddlewares(stack *middleware.St if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -133,9 +133,6 @@ func (c *Client) addOperationSetRepositoryPolicyMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -163,16 +160,13 @@ func (c *Client) addOperationSetRepositoryPolicyMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartImageScan.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartImageScan.go index e32329bad5..a1142cb72f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartImageScan.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartImageScan.go @@ -109,7 +109,7 @@ func (c *Client) addOperationStartImageScanMiddlewares(stack *middleware.Stack, if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -133,9 +133,6 @@ func (c *Client) addOperationStartImageScanMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -163,16 +160,13 @@ func (c *Client) addOperationStartImageScanMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartLifecyclePolicyPreview.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartLifecyclePolicyPreview.go index 9c8a8783d2..3e83497130 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartLifecyclePolicyPreview.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartLifecyclePolicyPreview.go @@ -102,7 +102,7 @@ func (c *Client) addOperationStartLifecyclePolicyPreviewMiddlewares(stack *middl if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -126,9 +126,6 @@ func (c *Client) addOperationStartLifecyclePolicyPreviewMiddlewares(stack *middl if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -156,16 +153,13 @@ func (c *Client) addOperationStartLifecyclePolicyPreviewMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_TagResource.go index 2637c4ff61..bc4c8c1ef8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_TagResource.go @@ -87,7 +87,7 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -111,9 +111,6 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -141,16 +138,13 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UntagResource.go index 3f74cb345f..2d15f2a89c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UntagResource.go @@ -83,7 +83,7 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -107,9 +107,6 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -137,16 +134,13 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdateImageStorageClass.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdateImageStorageClass.go new file mode 100644 index 0000000000..c498884574 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdateImageStorageClass.go @@ -0,0 +1,181 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Transitions an image between storage classes. You can transition images from +// Amazon ECR standard storage class to Amazon ECR archival storage class for +// long-term storage, or restore archived images back to Amazon ECR standard. +func (c *Client) UpdateImageStorageClass(ctx context.Context, params *UpdateImageStorageClassInput, optFns ...func(*Options)) (*UpdateImageStorageClassOutput, error) { + if params == nil { + params = &UpdateImageStorageClassInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateImageStorageClass", params, optFns, c.addOperationUpdateImageStorageClassMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateImageStorageClassOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateImageStorageClassInput struct { + + // An object with identifying information for an image in an Amazon ECR repository. + // + // This member is required. + ImageId *types.ImageIdentifier + + // The name of the repository that contains the image to transition. + // + // This member is required. + RepositoryName *string + + // The target storage class for the image. + // + // This member is required. + TargetStorageClass types.TargetStorageClass + + // The Amazon Web Services account ID associated with the registry that contains + // the image to transition. If you do not specify a registry, the default registry + // is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type UpdateImageStorageClassOutput struct { + + // An object with identifying information for an image in an Amazon ECR repository. + ImageId *types.ImageIdentifier + + // The current status of the image after the call to UpdateImageStorageClass is + // complete. Valid values are ACTIVE , ARCHIVED , and ACTIVATING . + ImageStatus types.ImageStatus + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateImageStorageClassMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateImageStorageClass{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateImageStorageClass{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateImageStorageClass"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options, c); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateImageStorageClassValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateImageStorageClass(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptors(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateImageStorageClass(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateImageStorageClass", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdatePullThroughCacheRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdatePullThroughCacheRule.go index 525a4107a2..86dec381fb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdatePullThroughCacheRule.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdatePullThroughCacheRule.go @@ -113,7 +113,7 @@ func (c *Client) addOperationUpdatePullThroughCacheRuleMiddlewares(stack *middle if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -137,9 +137,6 @@ func (c *Client) addOperationUpdatePullThroughCacheRuleMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -167,16 +164,13 @@ func (c *Client) addOperationUpdatePullThroughCacheRuleMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdateRepositoryCreationTemplate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdateRepositoryCreationTemplate.go index 302e6fc8e7..8a02fef847 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdateRepositoryCreationTemplate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdateRepositoryCreationTemplate.go @@ -42,8 +42,8 @@ type UpdateRepositoryCreationTemplateInput struct { Prefix *string // Updates the list of enumerable strings representing the Amazon ECR repository - // creation scenarios that this template will apply towards. The two supported - // scenarios are PULL_THROUGH_CACHE and REPLICATION + // creation scenarios that this template will apply towards. The supported + // scenarios are PULL_THROUGH_CACHE , REPLICATION , and CREATE_ON_PUSH AppliedFor []types.RCTAppliedFor // The ARN of the role to be assumed by Amazon ECR. This role must be in the same @@ -65,6 +65,10 @@ type UpdateRepositoryCreationTemplateInput struct { // repository will be immutable which will prevent them from being overwritten. ImageTagMutability types.ImageTagMutability + // A list of filters that specify which image tags should be excluded from the + // repository creation template's image tag mutability setting. + ImageTagMutabilityExclusionFilters []types.ImageTagMutabilityExclusionFilter + // Updates the lifecycle policy associated with the specified repository creation // template. LifecyclePolicy *string @@ -131,7 +135,7 @@ func (c *Client) addOperationUpdateRepositoryCreationTemplateMiddlewares(stack * if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -155,9 +159,6 @@ func (c *Client) addOperationUpdateRepositoryCreationTemplateMiddlewares(stack * if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -185,16 +186,13 @@ func (c *Client) addOperationUpdateRepositoryCreationTemplateMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UploadLayerPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UploadLayerPart.go index 3f29d65939..e4e3b90baf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UploadLayerPart.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UploadLayerPart.go @@ -124,7 +124,7 @@ func (c *Client) addOperationUploadLayerPartMiddlewares(stack *middleware.Stack, if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -148,9 +148,6 @@ func (c *Client) addOperationUploadLayerPartMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -178,16 +175,13 @@ func (c *Client) addOperationUploadLayerPartMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ValidatePullThroughCacheRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ValidatePullThroughCacheRule.go index e7c8f78236..d62d8152ce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ValidatePullThroughCacheRule.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ValidatePullThroughCacheRule.go @@ -116,7 +116,7 @@ func (c *Client) addOperationValidatePullThroughCacheRuleMiddlewares(stack *midd if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -140,9 +140,6 @@ func (c *Client) addOperationValidatePullThroughCacheRuleMiddlewares(stack *midd if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -170,16 +167,13 @@ func (c *Client) addOperationValidatePullThroughCacheRuleMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/auth.go index a542aa8f1a..53b47ee330 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/auth.go @@ -12,10 +12,13 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" + "slices" + "strings" ) -func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) error { params.Region = options.Region + return nil } type setLegacyContextSigningOptionsMiddleware struct { @@ -92,14 +95,16 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) (*AuthResolverParameters, error) { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(ctx, params, input, options) + if err := bindAuthParamsRegion(ctx, params, input, options); err != nil { + return nil, err + } - return params + return params, nil } // AuthSchemeResolver returns a set of possible authentication options for an @@ -150,7 +155,10 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") defer span.End() - params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) + params, err := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) + if err != nil { + return out, metadata, fmt.Errorf("bind auth scheme params: %w", err) + } options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) @@ -169,7 +177,8 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid } func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - for _, option := range options { + sorted := sortAuthOptions(options, m.options.AuthSchemePreference) + for _, option := range sorted { if option.SchemeID == smithyauth.SchemeIDAnonymous { return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true } @@ -188,6 +197,29 @@ func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) return nil, false } +func sortAuthOptions(options []*smithyauth.Option, preferred []string) []*smithyauth.Option { + byPriority := make([]*smithyauth.Option, 0, len(options)) + for _, prefName := range preferred { + for _, option := range options { + optName := option.SchemeID + if parts := strings.Split(option.SchemeID, "#"); len(parts) == 2 { + optName = parts[1] + } + if prefName == optName { + byPriority = append(byPriority, option) + } + } + } + for _, option := range options { + if !slices.ContainsFunc(byPriority, func(o *smithyauth.Option) bool { + return o.SchemeID == option.SchemeID + }) { + byPriority = append(byPriority, option) + } + } + return byPriority +} + type resolvedAuthSchemeKey struct{} type resolvedAuthScheme struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/deserializers.go index dc6758fc5b..16ffb5c57c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/deserializers.go @@ -19,17 +19,8 @@ import ( "io" "math" "strings" - "time" ) -func deserializeS3Expires(v string) (*time.Time, error) { - t, err := smithytime.ParseHTTPDate(v) - if err != nil { - return nil, nil - } - return &t, nil -} - type awsAwsjson11_deserializeOpBatchCheckLayerAvailability struct { } @@ -1755,14 +1746,14 @@ func awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response *smithyhttp. } } -type awsAwsjson11_deserializeOpDescribeImageReplicationStatus struct { +type awsAwsjson11_deserializeOpDeleteSigningConfiguration struct { } -func (*awsAwsjson11_deserializeOpDescribeImageReplicationStatus) ID() string { +func (*awsAwsjson11_deserializeOpDeleteSigningConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeImageReplicationStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDeleteSigningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -1780,9 +1771,9 @@ func (m *awsAwsjson11_deserializeOpDescribeImageReplicationStatus) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImageReplicationStatus(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteSigningConfiguration(response, &metadata) } - output := &DescribeImageReplicationStatusOutput{} + output := &DeleteSigningConfigurationOutput{} out.Result = output var buff [1024]byte @@ -1802,7 +1793,7 @@ func (m *awsAwsjson11_deserializeOpDescribeImageReplicationStatus) HandleDeseria return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeImageReplicationStatusOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDeleteSigningConfigurationOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1816,7 +1807,7 @@ func (m *awsAwsjson11_deserializeOpDescribeImageReplicationStatus) HandleDeseria return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeImageReplicationStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDeleteSigningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -1853,18 +1844,12 @@ func awsAwsjson11_deserializeOpErrorDescribeImageReplicationStatus(response *smi errorMessage = bodyInfo.Message } switch { - case strings.EqualFold("ImageNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) - - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("SigningConfigurationNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorSigningConfigurationNotFoundException(response, errorBody) + case strings.EqualFold("ValidationException", errorCode): return awsAwsjson11_deserializeErrorValidationException(response, errorBody) @@ -1878,14 +1863,14 @@ func awsAwsjson11_deserializeOpErrorDescribeImageReplicationStatus(response *smi } } -type awsAwsjson11_deserializeOpDescribeImages struct { +type awsAwsjson11_deserializeOpDeregisterPullTimeUpdateExclusion struct { } -func (*awsAwsjson11_deserializeOpDescribeImages) ID() string { +func (*awsAwsjson11_deserializeOpDeregisterPullTimeUpdateExclusion) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDeregisterPullTimeUpdateExclusion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -1903,9 +1888,9 @@ func (m *awsAwsjson11_deserializeOpDescribeImages) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImages(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDeregisterPullTimeUpdateExclusion(response, &metadata) } - output := &DescribeImagesOutput{} + output := &DeregisterPullTimeUpdateExclusionOutput{} out.Result = output var buff [1024]byte @@ -1925,7 +1910,7 @@ func (m *awsAwsjson11_deserializeOpDescribeImages) HandleDeserialize(ctx context return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeImagesOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDeregisterPullTimeUpdateExclusionOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1939,7 +1924,7 @@ func (m *awsAwsjson11_deserializeOpDescribeImages) HandleDeserialize(ctx context return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDeregisterPullTimeUpdateExclusion(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -1976,18 +1961,21 @@ func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response errorMessage = bodyInfo.Message } switch { - case strings.EqualFold("ImageNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) + case strings.EqualFold("ExclusionNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorExclusionNotFoundException(response, errorBody) case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -1998,14 +1986,14 @@ func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response } } -type awsAwsjson11_deserializeOpDescribeImageScanFindings struct { +type awsAwsjson11_deserializeOpDescribeImageReplicationStatus struct { } -func (*awsAwsjson11_deserializeOpDescribeImageScanFindings) ID() string { +func (*awsAwsjson11_deserializeOpDescribeImageReplicationStatus) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeImageScanFindings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeImageReplicationStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -2023,9 +2011,9 @@ func (m *awsAwsjson11_deserializeOpDescribeImageScanFindings) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImageScanFindings(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImageReplicationStatus(response, &metadata) } - output := &DescribeImageScanFindingsOutput{} + output := &DescribeImageReplicationStatusOutput{} out.Result = output var buff [1024]byte @@ -2045,7 +2033,7 @@ func (m *awsAwsjson11_deserializeOpDescribeImageScanFindings) HandleDeserialize( return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeImageScanFindingsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeImageReplicationStatusOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2059,7 +2047,7 @@ func (m *awsAwsjson11_deserializeOpDescribeImageScanFindings) HandleDeserialize( return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeImageScanFindings(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeImageReplicationStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -2105,9 +2093,6 @@ func awsAwsjson11_deserializeOpErrorDescribeImageScanFindings(response *smithyht case strings.EqualFold("RepositoryNotFoundException", errorCode): return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - case strings.EqualFold("ScanNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorScanNotFoundException(response, errorBody) - case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) @@ -2124,14 +2109,14 @@ func awsAwsjson11_deserializeOpErrorDescribeImageScanFindings(response *smithyht } } -type awsAwsjson11_deserializeOpDescribePullThroughCacheRules struct { +type awsAwsjson11_deserializeOpDescribeImages struct { } -func (*awsAwsjson11_deserializeOpDescribePullThroughCacheRules) ID() string { +func (*awsAwsjson11_deserializeOpDescribeImages) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribePullThroughCacheRules) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -2149,9 +2134,9 @@ func (m *awsAwsjson11_deserializeOpDescribePullThroughCacheRules) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribePullThroughCacheRules(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImages(response, &metadata) } - output := &DescribePullThroughCacheRulesOutput{} + output := &DescribeImagesOutput{} out.Result = output var buff [1024]byte @@ -2171,7 +2156,7 @@ func (m *awsAwsjson11_deserializeOpDescribePullThroughCacheRules) HandleDeserial return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribePullThroughCacheRulesOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeImagesOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2185,7 +2170,7 @@ func (m *awsAwsjson11_deserializeOpDescribePullThroughCacheRules) HandleDeserial return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribePullThroughCacheRules(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -2222,18 +2207,18 @@ func awsAwsjson11_deserializeOpErrorDescribePullThroughCacheRules(response *smit errorMessage = bodyInfo.Message } switch { + case strings.EqualFold("ImageNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) + case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("PullThroughCacheRuleNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorPullThroughCacheRuleNotFoundException(response, errorBody) + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -2244,14 +2229,14 @@ func awsAwsjson11_deserializeOpErrorDescribePullThroughCacheRules(response *smit } } -type awsAwsjson11_deserializeOpDescribeRegistry struct { +type awsAwsjson11_deserializeOpDescribeImageScanFindings struct { } -func (*awsAwsjson11_deserializeOpDescribeRegistry) ID() string { +func (*awsAwsjson11_deserializeOpDescribeImageScanFindings) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeRegistry) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeImageScanFindings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -2269,9 +2254,9 @@ func (m *awsAwsjson11_deserializeOpDescribeRegistry) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeRegistry(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImageScanFindings(response, &metadata) } - output := &DescribeRegistryOutput{} + output := &DescribeImageScanFindingsOutput{} out.Result = output var buff [1024]byte @@ -2291,7 +2276,7 @@ func (m *awsAwsjson11_deserializeOpDescribeRegistry) HandleDeserialize(ctx conte return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeRegistryOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeImageScanFindingsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2305,7 +2290,7 @@ func (m *awsAwsjson11_deserializeOpDescribeRegistry) HandleDeserialize(ctx conte return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeRegistry(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeImageScanFindings(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -2342,9 +2327,18 @@ func awsAwsjson11_deserializeOpErrorDescribeRegistry(response *smithyhttp.Respon errorMessage = bodyInfo.Message } switch { + case strings.EqualFold("ImageNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) + case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ScanNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorScanNotFoundException(response, errorBody) + case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) @@ -2361,14 +2355,14 @@ func awsAwsjson11_deserializeOpErrorDescribeRegistry(response *smithyhttp.Respon } } -type awsAwsjson11_deserializeOpDescribeRepositories struct { +type awsAwsjson11_deserializeOpDescribeImageSigningStatus struct { } -func (*awsAwsjson11_deserializeOpDescribeRepositories) ID() string { +func (*awsAwsjson11_deserializeOpDescribeImageSigningStatus) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeRepositories) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeImageSigningStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -2386,9 +2380,9 @@ func (m *awsAwsjson11_deserializeOpDescribeRepositories) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeRepositories(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImageSigningStatus(response, &metadata) } - output := &DescribeRepositoriesOutput{} + output := &DescribeImageSigningStatusOutput{} out.Result = output var buff [1024]byte @@ -2408,7 +2402,7 @@ func (m *awsAwsjson11_deserializeOpDescribeRepositories) HandleDeserialize(ctx c return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeRepositoriesOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeImageSigningStatusOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2422,7 +2416,7 @@ func (m *awsAwsjson11_deserializeOpDescribeRepositories) HandleDeserialize(ctx c return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeImageSigningStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -2459,6 +2453,9 @@ func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Re errorMessage = bodyInfo.Message } switch { + case strings.EqualFold("ImageNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) + case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) @@ -2468,6 +2465,9 @@ func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Re case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -2478,14 +2478,14 @@ func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Re } } -type awsAwsjson11_deserializeOpDescribeRepositoryCreationTemplates struct { +type awsAwsjson11_deserializeOpDescribePullThroughCacheRules struct { } -func (*awsAwsjson11_deserializeOpDescribeRepositoryCreationTemplates) ID() string { +func (*awsAwsjson11_deserializeOpDescribePullThroughCacheRules) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeRepositoryCreationTemplates) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribePullThroughCacheRules) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -2503,9 +2503,9 @@ func (m *awsAwsjson11_deserializeOpDescribeRepositoryCreationTemplates) HandleDe } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeRepositoryCreationTemplates(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribePullThroughCacheRules(response, &metadata) } - output := &DescribeRepositoryCreationTemplatesOutput{} + output := &DescribePullThroughCacheRulesOutput{} out.Result = output var buff [1024]byte @@ -2525,7 +2525,7 @@ func (m *awsAwsjson11_deserializeOpDescribeRepositoryCreationTemplates) HandleDe return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeRepositoryCreationTemplatesOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribePullThroughCacheRulesOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2539,7 +2539,7 @@ func (m *awsAwsjson11_deserializeOpDescribeRepositoryCreationTemplates) HandleDe return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeRepositoryCreationTemplates(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribePullThroughCacheRules(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -2579,6 +2579,9 @@ func awsAwsjson11_deserializeOpErrorDescribeRepositoryCreationTemplates(response case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + case strings.EqualFold("PullThroughCacheRuleNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorPullThroughCacheRuleNotFoundException(response, errorBody) + case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) @@ -2595,14 +2598,14 @@ func awsAwsjson11_deserializeOpErrorDescribeRepositoryCreationTemplates(response } } -type awsAwsjson11_deserializeOpGetAccountSetting struct { +type awsAwsjson11_deserializeOpDescribeRegistry struct { } -func (*awsAwsjson11_deserializeOpGetAccountSetting) ID() string { +func (*awsAwsjson11_deserializeOpDescribeRegistry) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpGetAccountSetting) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeRegistry) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -2620,9 +2623,9 @@ func (m *awsAwsjson11_deserializeOpGetAccountSetting) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetAccountSetting(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeRegistry(response, &metadata) } - output := &GetAccountSettingOutput{} + output := &DescribeRegistryOutput{} out.Result = output var buff [1024]byte @@ -2642,7 +2645,7 @@ func (m *awsAwsjson11_deserializeOpGetAccountSetting) HandleDeserialize(ctx cont return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentGetAccountSettingOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeRegistryOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2656,7 +2659,7 @@ func (m *awsAwsjson11_deserializeOpGetAccountSetting) HandleDeserialize(ctx cont return out, metadata, err } -func awsAwsjson11_deserializeOpErrorGetAccountSetting(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeRegistry(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -2712,14 +2715,14 @@ func awsAwsjson11_deserializeOpErrorGetAccountSetting(response *smithyhttp.Respo } } -type awsAwsjson11_deserializeOpGetAuthorizationToken struct { +type awsAwsjson11_deserializeOpDescribeRepositories struct { } -func (*awsAwsjson11_deserializeOpGetAuthorizationToken) ID() string { +func (*awsAwsjson11_deserializeOpDescribeRepositories) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpGetAuthorizationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeRepositories) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -2737,9 +2740,9 @@ func (m *awsAwsjson11_deserializeOpGetAuthorizationToken) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeRepositories(response, &metadata) } - output := &GetAuthorizationTokenOutput{} + output := &DescribeRepositoriesOutput{} out.Result = output var buff [1024]byte @@ -2759,7 +2762,7 @@ func (m *awsAwsjson11_deserializeOpGetAuthorizationToken) HandleDeserialize(ctx return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentGetAuthorizationTokenOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeRepositoriesOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2773,7 +2776,7 @@ func (m *awsAwsjson11_deserializeOpGetAuthorizationToken) HandleDeserialize(ctx return out, metadata, err } -func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -2813,6 +2816,9 @@ func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.R case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) @@ -2826,14 +2832,14 @@ func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.R } } -type awsAwsjson11_deserializeOpGetDownloadUrlForLayer struct { +type awsAwsjson11_deserializeOpDescribeRepositoryCreationTemplates struct { } -func (*awsAwsjson11_deserializeOpGetDownloadUrlForLayer) ID() string { +func (*awsAwsjson11_deserializeOpDescribeRepositoryCreationTemplates) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpGetDownloadUrlForLayer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeRepositoryCreationTemplates) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -2851,9 +2857,9 @@ func (m *awsAwsjson11_deserializeOpGetDownloadUrlForLayer) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetDownloadUrlForLayer(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeRepositoryCreationTemplates(response, &metadata) } - output := &GetDownloadUrlForLayerOutput{} + output := &DescribeRepositoryCreationTemplatesOutput{} out.Result = output var buff [1024]byte @@ -2873,7 +2879,7 @@ func (m *awsAwsjson11_deserializeOpGetDownloadUrlForLayer) HandleDeserialize(ctx return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentGetDownloadUrlForLayerOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeRepositoryCreationTemplatesOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2887,7 +2893,7 @@ func (m *awsAwsjson11_deserializeOpGetDownloadUrlForLayer) HandleDeserialize(ctx return out, metadata, err } -func awsAwsjson11_deserializeOpErrorGetDownloadUrlForLayer(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeRepositoryCreationTemplates(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -2927,20 +2933,11 @@ func awsAwsjson11_deserializeOpErrorGetDownloadUrlForLayer(response *smithyhttp. case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("LayerInaccessibleException", errorCode): - return awsAwsjson11_deserializeErrorLayerInaccessibleException(response, errorBody) - - case strings.EqualFold("LayersNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorLayersNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) - case strings.EqualFold("UnableToGetUpstreamLayerException", errorCode): - return awsAwsjson11_deserializeErrorUnableToGetUpstreamLayerException(response, errorBody) + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) default: genericError := &smithy.GenericAPIError{ @@ -2952,14 +2949,14 @@ func awsAwsjson11_deserializeOpErrorGetDownloadUrlForLayer(response *smithyhttp. } } -type awsAwsjson11_deserializeOpGetLifecyclePolicy struct { +type awsAwsjson11_deserializeOpGetAccountSetting struct { } -func (*awsAwsjson11_deserializeOpGetLifecyclePolicy) ID() string { +func (*awsAwsjson11_deserializeOpGetAccountSetting) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpGetLifecyclePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpGetAccountSetting) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -2977,9 +2974,9 @@ func (m *awsAwsjson11_deserializeOpGetLifecyclePolicy) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetLifecyclePolicy(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorGetAccountSetting(response, &metadata) } - output := &GetLifecyclePolicyOutput{} + output := &GetAccountSettingOutput{} out.Result = output var buff [1024]byte @@ -2999,7 +2996,7 @@ func (m *awsAwsjson11_deserializeOpGetLifecyclePolicy) HandleDeserialize(ctx con return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentGetLifecyclePolicyOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentGetAccountSettingOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3013,7 +3010,7 @@ func (m *awsAwsjson11_deserializeOpGetLifecyclePolicy) HandleDeserialize(ctx con return out, metadata, err } -func awsAwsjson11_deserializeOpErrorGetLifecyclePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorGetAccountSetting(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3053,12 +3050,6 @@ func awsAwsjson11_deserializeOpErrorGetLifecyclePolicy(response *smithyhttp.Resp case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("LifecyclePolicyNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorLifecyclePolicyNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) @@ -3075,14 +3066,14 @@ func awsAwsjson11_deserializeOpErrorGetLifecyclePolicy(response *smithyhttp.Resp } } -type awsAwsjson11_deserializeOpGetLifecyclePolicyPreview struct { +type awsAwsjson11_deserializeOpGetAuthorizationToken struct { } -func (*awsAwsjson11_deserializeOpGetLifecyclePolicyPreview) ID() string { +func (*awsAwsjson11_deserializeOpGetAuthorizationToken) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpGetLifecyclePolicyPreview) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpGetAuthorizationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3100,9 +3091,9 @@ func (m *awsAwsjson11_deserializeOpGetLifecyclePolicyPreview) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetLifecyclePolicyPreview(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response, &metadata) } - output := &GetLifecyclePolicyPreviewOutput{} + output := &GetAuthorizationTokenOutput{} out.Result = output var buff [1024]byte @@ -3122,7 +3113,7 @@ func (m *awsAwsjson11_deserializeOpGetLifecyclePolicyPreview) HandleDeserialize( return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentGetLifecyclePolicyPreviewOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentGetAuthorizationTokenOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3136,7 +3127,7 @@ func (m *awsAwsjson11_deserializeOpGetLifecyclePolicyPreview) HandleDeserialize( return out, metadata, err } -func awsAwsjson11_deserializeOpErrorGetLifecyclePolicyPreview(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3176,18 +3167,9 @@ func awsAwsjson11_deserializeOpErrorGetLifecyclePolicyPreview(response *smithyht case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("LifecyclePolicyPreviewNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorLifecyclePolicyPreviewNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -3198,14 +3180,14 @@ func awsAwsjson11_deserializeOpErrorGetLifecyclePolicyPreview(response *smithyht } } -type awsAwsjson11_deserializeOpGetRegistryPolicy struct { +type awsAwsjson11_deserializeOpGetDownloadUrlForLayer struct { } -func (*awsAwsjson11_deserializeOpGetRegistryPolicy) ID() string { +func (*awsAwsjson11_deserializeOpGetDownloadUrlForLayer) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpGetRegistryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpGetDownloadUrlForLayer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3223,9 +3205,9 @@ func (m *awsAwsjson11_deserializeOpGetRegistryPolicy) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetRegistryPolicy(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorGetDownloadUrlForLayer(response, &metadata) } - output := &GetRegistryPolicyOutput{} + output := &GetDownloadUrlForLayerOutput{} out.Result = output var buff [1024]byte @@ -3245,7 +3227,7 @@ func (m *awsAwsjson11_deserializeOpGetRegistryPolicy) HandleDeserialize(ctx cont return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentGetRegistryPolicyOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentGetDownloadUrlForLayerOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3259,7 +3241,7 @@ func (m *awsAwsjson11_deserializeOpGetRegistryPolicy) HandleDeserialize(ctx cont return out, metadata, err } -func awsAwsjson11_deserializeOpErrorGetRegistryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorGetDownloadUrlForLayer(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3299,14 +3281,20 @@ func awsAwsjson11_deserializeOpErrorGetRegistryPolicy(response *smithyhttp.Respo case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("RegistryPolicyNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRegistryPolicyNotFoundException(response, errorBody) + case strings.EqualFold("LayerInaccessibleException", errorCode): + return awsAwsjson11_deserializeErrorLayerInaccessibleException(response, errorBody) + + case strings.EqualFold("LayersNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorLayersNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + case strings.EqualFold("UnableToGetUpstreamLayerException", errorCode): + return awsAwsjson11_deserializeErrorUnableToGetUpstreamLayerException(response, errorBody) default: genericError := &smithy.GenericAPIError{ @@ -3318,14 +3306,14 @@ func awsAwsjson11_deserializeOpErrorGetRegistryPolicy(response *smithyhttp.Respo } } -type awsAwsjson11_deserializeOpGetRegistryScanningConfiguration struct { +type awsAwsjson11_deserializeOpGetLifecyclePolicy struct { } -func (*awsAwsjson11_deserializeOpGetRegistryScanningConfiguration) ID() string { +func (*awsAwsjson11_deserializeOpGetLifecyclePolicy) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpGetRegistryScanningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpGetLifecyclePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3343,9 +3331,9 @@ func (m *awsAwsjson11_deserializeOpGetRegistryScanningConfiguration) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetRegistryScanningConfiguration(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorGetLifecyclePolicy(response, &metadata) } - output := &GetRegistryScanningConfigurationOutput{} + output := &GetLifecyclePolicyOutput{} out.Result = output var buff [1024]byte @@ -3365,7 +3353,7 @@ func (m *awsAwsjson11_deserializeOpGetRegistryScanningConfiguration) HandleDeser return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentGetRegistryScanningConfigurationOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentGetLifecyclePolicyOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3379,7 +3367,7 @@ func (m *awsAwsjson11_deserializeOpGetRegistryScanningConfiguration) HandleDeser return out, metadata, err } -func awsAwsjson11_deserializeOpErrorGetRegistryScanningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorGetLifecyclePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3419,6 +3407,12 @@ func awsAwsjson11_deserializeOpErrorGetRegistryScanningConfiguration(response *s case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + case strings.EqualFold("LifecyclePolicyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorLifecyclePolicyNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) @@ -3435,14 +3429,14 @@ func awsAwsjson11_deserializeOpErrorGetRegistryScanningConfiguration(response *s } } -type awsAwsjson11_deserializeOpGetRepositoryPolicy struct { +type awsAwsjson11_deserializeOpGetLifecyclePolicyPreview struct { } -func (*awsAwsjson11_deserializeOpGetRepositoryPolicy) ID() string { +func (*awsAwsjson11_deserializeOpGetLifecyclePolicyPreview) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpGetRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpGetLifecyclePolicyPreview) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3460,9 +3454,9 @@ func (m *awsAwsjson11_deserializeOpGetRepositoryPolicy) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorGetLifecyclePolicyPreview(response, &metadata) } - output := &GetRepositoryPolicyOutput{} + output := &GetLifecyclePolicyPreviewOutput{} out.Result = output var buff [1024]byte @@ -3482,7 +3476,7 @@ func (m *awsAwsjson11_deserializeOpGetRepositoryPolicy) HandleDeserialize(ctx co return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentGetRepositoryPolicyOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentGetLifecyclePolicyPreviewOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3496,7 +3490,7 @@ func (m *awsAwsjson11_deserializeOpGetRepositoryPolicy) HandleDeserialize(ctx co return out, metadata, err } -func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorGetLifecyclePolicyPreview(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3536,15 +3530,18 @@ func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Res case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + case strings.EqualFold("LifecyclePolicyPreviewNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorLifecyclePolicyPreviewNotFoundException(response, errorBody) + case strings.EqualFold("RepositoryNotFoundException", errorCode): return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - case strings.EqualFold("RepositoryPolicyNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryPolicyNotFoundException(response, errorBody) - case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -3555,14 +3552,14 @@ func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Res } } -type awsAwsjson11_deserializeOpInitiateLayerUpload struct { +type awsAwsjson11_deserializeOpGetRegistryPolicy struct { } -func (*awsAwsjson11_deserializeOpInitiateLayerUpload) ID() string { +func (*awsAwsjson11_deserializeOpGetRegistryPolicy) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpInitiateLayerUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpGetRegistryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3580,9 +3577,9 @@ func (m *awsAwsjson11_deserializeOpInitiateLayerUpload) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorGetRegistryPolicy(response, &metadata) } - output := &InitiateLayerUploadOutput{} + output := &GetRegistryPolicyOutput{} out.Result = output var buff [1024]byte @@ -3602,7 +3599,7 @@ func (m *awsAwsjson11_deserializeOpInitiateLayerUpload) HandleDeserialize(ctx co return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentInitiateLayerUploadOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentGetRegistryPolicyOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3616,7 +3613,7 @@ func (m *awsAwsjson11_deserializeOpInitiateLayerUpload) HandleDeserialize(ctx co return out, metadata, err } -func awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorGetRegistryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3656,15 +3653,15 @@ func awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response *smithyhttp.Res case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("KmsException", errorCode): - return awsAwsjson11_deserializeErrorKmsException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + case strings.EqualFold("RegistryPolicyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRegistryPolicyNotFoundException(response, errorBody) case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -3675,14 +3672,14 @@ func awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response *smithyhttp.Res } } -type awsAwsjson11_deserializeOpListImages struct { +type awsAwsjson11_deserializeOpGetRegistryScanningConfiguration struct { } -func (*awsAwsjson11_deserializeOpListImages) ID() string { +func (*awsAwsjson11_deserializeOpGetRegistryScanningConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpGetRegistryScanningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3700,9 +3697,9 @@ func (m *awsAwsjson11_deserializeOpListImages) HandleDeserialize(ctx context.Con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListImages(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorGetRegistryScanningConfiguration(response, &metadata) } - output := &ListImagesOutput{} + output := &GetRegistryScanningConfigurationOutput{} out.Result = output var buff [1024]byte @@ -3722,7 +3719,7 @@ func (m *awsAwsjson11_deserializeOpListImages) HandleDeserialize(ctx context.Con return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListImagesOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentGetRegistryScanningConfigurationOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3736,7 +3733,7 @@ func (m *awsAwsjson11_deserializeOpListImages) HandleDeserialize(ctx context.Con return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorGetRegistryScanningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3776,12 +3773,12 @@ func awsAwsjson11_deserializeOpErrorListImages(response *smithyhttp.Response, me case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -3792,14 +3789,14 @@ func awsAwsjson11_deserializeOpErrorListImages(response *smithyhttp.Response, me } } -type awsAwsjson11_deserializeOpListTagsForResource struct { +type awsAwsjson11_deserializeOpGetRepositoryPolicy struct { } -func (*awsAwsjson11_deserializeOpListTagsForResource) ID() string { +func (*awsAwsjson11_deserializeOpGetRepositoryPolicy) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpGetRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3817,9 +3814,9 @@ func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListTagsForResource(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response, &metadata) } - output := &ListTagsForResourceOutput{} + output := &GetRepositoryPolicyOutput{} out.Result = output var buff [1024]byte @@ -3839,7 +3836,7 @@ func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx co return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentGetRepositoryPolicyOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3853,7 +3850,7 @@ func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx co return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3896,6 +3893,9 @@ func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Res case strings.EqualFold("RepositoryNotFoundException", errorCode): return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + case strings.EqualFold("RepositoryPolicyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryPolicyNotFoundException(response, errorBody) + case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) @@ -3909,14 +3909,14 @@ func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Res } } -type awsAwsjson11_deserializeOpPutAccountSetting struct { +type awsAwsjson11_deserializeOpGetSigningConfiguration struct { } -func (*awsAwsjson11_deserializeOpPutAccountSetting) ID() string { +func (*awsAwsjson11_deserializeOpGetSigningConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpPutAccountSetting) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpGetSigningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3934,9 +3934,9 @@ func (m *awsAwsjson11_deserializeOpPutAccountSetting) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutAccountSetting(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorGetSigningConfiguration(response, &metadata) } - output := &PutAccountSettingOutput{} + output := &GetSigningConfigurationOutput{} out.Result = output var buff [1024]byte @@ -3956,7 +3956,7 @@ func (m *awsAwsjson11_deserializeOpPutAccountSetting) HandleDeserialize(ctx cont return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentPutAccountSettingOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentGetSigningConfigurationOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3970,7 +3970,7 @@ func (m *awsAwsjson11_deserializeOpPutAccountSetting) HandleDeserialize(ctx cont return out, metadata, err } -func awsAwsjson11_deserializeOpErrorPutAccountSetting(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorGetSigningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4010,12 +4010,12 @@ func awsAwsjson11_deserializeOpErrorPutAccountSetting(response *smithyhttp.Respo case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("LimitExceededException", errorCode): - return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("SigningConfigurationNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorSigningConfigurationNotFoundException(response, errorBody) + case strings.EqualFold("ValidationException", errorCode): return awsAwsjson11_deserializeErrorValidationException(response, errorBody) @@ -4029,14 +4029,14 @@ func awsAwsjson11_deserializeOpErrorPutAccountSetting(response *smithyhttp.Respo } } -type awsAwsjson11_deserializeOpPutImage struct { +type awsAwsjson11_deserializeOpInitiateLayerUpload struct { } -func (*awsAwsjson11_deserializeOpPutImage) ID() string { +func (*awsAwsjson11_deserializeOpInitiateLayerUpload) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpPutImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpInitiateLayerUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4054,9 +4054,9 @@ func (m *awsAwsjson11_deserializeOpPutImage) HandleDeserialize(ctx context.Conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutImage(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response, &metadata) } - output := &PutImageOutput{} + output := &InitiateLayerUploadOutput{} out.Result = output var buff [1024]byte @@ -4076,7 +4076,7 @@ func (m *awsAwsjson11_deserializeOpPutImage) HandleDeserialize(ctx context.Conte return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentPutImageOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentInitiateLayerUploadOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4090,7 +4090,7 @@ func (m *awsAwsjson11_deserializeOpPutImage) HandleDeserialize(ctx context.Conte return out, metadata, err } -func awsAwsjson11_deserializeOpErrorPutImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4127,30 +4127,12 @@ func awsAwsjson11_deserializeOpErrorPutImage(response *smithyhttp.Response, meta errorMessage = bodyInfo.Message } switch { - case strings.EqualFold("ImageAlreadyExistsException", errorCode): - return awsAwsjson11_deserializeErrorImageAlreadyExistsException(response, errorBody) - - case strings.EqualFold("ImageDigestDoesNotMatchException", errorCode): - return awsAwsjson11_deserializeErrorImageDigestDoesNotMatchException(response, errorBody) - - case strings.EqualFold("ImageTagAlreadyExistsException", errorCode): - return awsAwsjson11_deserializeErrorImageTagAlreadyExistsException(response, errorBody) - case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) case strings.EqualFold("KmsException", errorCode): return awsAwsjson11_deserializeErrorKmsException(response, errorBody) - case strings.EqualFold("LayersNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorLayersNotFoundException(response, errorBody) - - case strings.EqualFold("LimitExceededException", errorCode): - return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - - case strings.EqualFold("ReferencedImagesNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorReferencedImagesNotFoundException(response, errorBody) - case strings.EqualFold("RepositoryNotFoundException", errorCode): return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) @@ -4167,14 +4149,14 @@ func awsAwsjson11_deserializeOpErrorPutImage(response *smithyhttp.Response, meta } } -type awsAwsjson11_deserializeOpPutImageScanningConfiguration struct { +type awsAwsjson11_deserializeOpListImageReferrers struct { } -func (*awsAwsjson11_deserializeOpPutImageScanningConfiguration) ID() string { +func (*awsAwsjson11_deserializeOpListImageReferrers) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpPutImageScanningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListImageReferrers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4192,9 +4174,9 @@ func (m *awsAwsjson11_deserializeOpPutImageScanningConfiguration) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutImageScanningConfiguration(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListImageReferrers(response, &metadata) } - output := &PutImageScanningConfigurationOutput{} + output := &ListImageReferrersOutput{} out.Result = output var buff [1024]byte @@ -4214,7 +4196,7 @@ func (m *awsAwsjson11_deserializeOpPutImageScanningConfiguration) HandleDeserial return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentPutImageScanningConfigurationOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListImageReferrersOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4228,7 +4210,7 @@ func (m *awsAwsjson11_deserializeOpPutImageScanningConfiguration) HandleDeserial return out, metadata, err } -func awsAwsjson11_deserializeOpErrorPutImageScanningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListImageReferrers(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4287,14 +4269,14 @@ func awsAwsjson11_deserializeOpErrorPutImageScanningConfiguration(response *smit } } -type awsAwsjson11_deserializeOpPutImageTagMutability struct { +type awsAwsjson11_deserializeOpListImages struct { } -func (*awsAwsjson11_deserializeOpPutImageTagMutability) ID() string { +func (*awsAwsjson11_deserializeOpListImages) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpPutImageTagMutability) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4312,9 +4294,9 @@ func (m *awsAwsjson11_deserializeOpPutImageTagMutability) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutImageTagMutability(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListImages(response, &metadata) } - output := &PutImageTagMutabilityOutput{} + output := &ListImagesOutput{} out.Result = output var buff [1024]byte @@ -4334,7 +4316,7 @@ func (m *awsAwsjson11_deserializeOpPutImageTagMutability) HandleDeserialize(ctx return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentPutImageTagMutabilityOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListImagesOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4348,7 +4330,7 @@ func (m *awsAwsjson11_deserializeOpPutImageTagMutability) HandleDeserialize(ctx return out, metadata, err } -func awsAwsjson11_deserializeOpErrorPutImageTagMutability(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4404,14 +4386,14 @@ func awsAwsjson11_deserializeOpErrorPutImageTagMutability(response *smithyhttp.R } } -type awsAwsjson11_deserializeOpPutLifecyclePolicy struct { +type awsAwsjson11_deserializeOpListPullTimeUpdateExclusions struct { } -func (*awsAwsjson11_deserializeOpPutLifecyclePolicy) ID() string { +func (*awsAwsjson11_deserializeOpListPullTimeUpdateExclusions) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpPutLifecyclePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListPullTimeUpdateExclusions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4429,9 +4411,9 @@ func (m *awsAwsjson11_deserializeOpPutLifecyclePolicy) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutLifecyclePolicy(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListPullTimeUpdateExclusions(response, &metadata) } - output := &PutLifecyclePolicyOutput{} + output := &ListPullTimeUpdateExclusionsOutput{} out.Result = output var buff [1024]byte @@ -4451,7 +4433,7 @@ func (m *awsAwsjson11_deserializeOpPutLifecyclePolicy) HandleDeserialize(ctx con return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentPutLifecyclePolicyOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListPullTimeUpdateExclusionsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4465,7 +4447,7 @@ func (m *awsAwsjson11_deserializeOpPutLifecyclePolicy) HandleDeserialize(ctx con return out, metadata, err } -func awsAwsjson11_deserializeOpErrorPutLifecyclePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListPullTimeUpdateExclusions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4505,8 +4487,8 @@ func awsAwsjson11_deserializeOpErrorPutLifecyclePolicy(response *smithyhttp.Resp case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) @@ -4524,14 +4506,14 @@ func awsAwsjson11_deserializeOpErrorPutLifecyclePolicy(response *smithyhttp.Resp } } -type awsAwsjson11_deserializeOpPutRegistryPolicy struct { +type awsAwsjson11_deserializeOpListTagsForResource struct { } -func (*awsAwsjson11_deserializeOpPutRegistryPolicy) ID() string { +func (*awsAwsjson11_deserializeOpListTagsForResource) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpPutRegistryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4549,9 +4531,9 @@ func (m *awsAwsjson11_deserializeOpPutRegistryPolicy) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutRegistryPolicy(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListTagsForResource(response, &metadata) } - output := &PutRegistryPolicyOutput{} + output := &ListTagsForResourceOutput{} out.Result = output var buff [1024]byte @@ -4571,7 +4553,7 @@ func (m *awsAwsjson11_deserializeOpPutRegistryPolicy) HandleDeserialize(ctx cont return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentPutRegistryPolicyOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4585,7 +4567,7 @@ func (m *awsAwsjson11_deserializeOpPutRegistryPolicy) HandleDeserialize(ctx cont return out, metadata, err } -func awsAwsjson11_deserializeOpErrorPutRegistryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4625,12 +4607,12 @@ func awsAwsjson11_deserializeOpErrorPutRegistryPolicy(response *smithyhttp.Respo case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -4641,14 +4623,14 @@ func awsAwsjson11_deserializeOpErrorPutRegistryPolicy(response *smithyhttp.Respo } } -type awsAwsjson11_deserializeOpPutRegistryScanningConfiguration struct { +type awsAwsjson11_deserializeOpPutAccountSetting struct { } -func (*awsAwsjson11_deserializeOpPutRegistryScanningConfiguration) ID() string { +func (*awsAwsjson11_deserializeOpPutAccountSetting) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpPutRegistryScanningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpPutAccountSetting) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4666,9 +4648,9 @@ func (m *awsAwsjson11_deserializeOpPutRegistryScanningConfiguration) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutRegistryScanningConfiguration(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorPutAccountSetting(response, &metadata) } - output := &PutRegistryScanningConfigurationOutput{} + output := &PutAccountSettingOutput{} out.Result = output var buff [1024]byte @@ -4688,7 +4670,7 @@ func (m *awsAwsjson11_deserializeOpPutRegistryScanningConfiguration) HandleDeser return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentPutRegistryScanningConfigurationOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentPutAccountSettingOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4702,7 +4684,7 @@ func (m *awsAwsjson11_deserializeOpPutRegistryScanningConfiguration) HandleDeser return out, metadata, err } -func awsAwsjson11_deserializeOpErrorPutRegistryScanningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorPutAccountSetting(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4742,6 +4724,9 @@ func awsAwsjson11_deserializeOpErrorPutRegistryScanningConfiguration(response *s case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) @@ -4758,14 +4743,14 @@ func awsAwsjson11_deserializeOpErrorPutRegistryScanningConfiguration(response *s } } -type awsAwsjson11_deserializeOpPutReplicationConfiguration struct { +type awsAwsjson11_deserializeOpPutImage struct { } -func (*awsAwsjson11_deserializeOpPutReplicationConfiguration) ID() string { +func (*awsAwsjson11_deserializeOpPutImage) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpPutReplicationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpPutImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4783,9 +4768,9 @@ func (m *awsAwsjson11_deserializeOpPutReplicationConfiguration) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutReplicationConfiguration(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorPutImage(response, &metadata) } - output := &PutReplicationConfigurationOutput{} + output := &PutImageOutput{} out.Result = output var buff [1024]byte @@ -4805,7 +4790,7 @@ func (m *awsAwsjson11_deserializeOpPutReplicationConfiguration) HandleDeserializ return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentPutReplicationConfigurationOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentPutImageOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4819,7 +4804,7 @@ func (m *awsAwsjson11_deserializeOpPutReplicationConfiguration) HandleDeserializ return out, metadata, err } -func awsAwsjson11_deserializeOpErrorPutReplicationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorPutImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4856,33 +4841,54 @@ func awsAwsjson11_deserializeOpErrorPutReplicationConfiguration(response *smithy errorMessage = bodyInfo.Message } switch { + case strings.EqualFold("ImageAlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorImageAlreadyExistsException(response, errorBody) + + case strings.EqualFold("ImageDigestDoesNotMatchException", errorCode): + return awsAwsjson11_deserializeErrorImageDigestDoesNotMatchException(response, errorBody) + + case strings.EqualFold("ImageTagAlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorImageTagAlreadyExistsException(response, errorBody) + case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("KmsException", errorCode): + return awsAwsjson11_deserializeErrorKmsException(response, errorBody) - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + case strings.EqualFold("LayersNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorLayersNotFoundException(response, errorBody) - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ReferencedImagesNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorReferencedImagesNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } return genericError } } -type awsAwsjson11_deserializeOpSetRepositoryPolicy struct { +type awsAwsjson11_deserializeOpPutImageScanningConfiguration struct { } -func (*awsAwsjson11_deserializeOpSetRepositoryPolicy) ID() string { +func (*awsAwsjson11_deserializeOpPutImageScanningConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpSetRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpPutImageScanningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4900,9 +4906,9 @@ func (m *awsAwsjson11_deserializeOpSetRepositoryPolicy) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorPutImageScanningConfiguration(response, &metadata) } - output := &SetRepositoryPolicyOutput{} + output := &PutImageScanningConfigurationOutput{} out.Result = output var buff [1024]byte @@ -4922,7 +4928,7 @@ func (m *awsAwsjson11_deserializeOpSetRepositoryPolicy) HandleDeserialize(ctx co return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentSetRepositoryPolicyOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentPutImageScanningConfigurationOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4936,7 +4942,7 @@ func (m *awsAwsjson11_deserializeOpSetRepositoryPolicy) HandleDeserialize(ctx co return out, metadata, err } -func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorPutImageScanningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4982,6 +4988,9 @@ func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Res case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -4992,14 +5001,14 @@ func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Res } } -type awsAwsjson11_deserializeOpStartImageScan struct { +type awsAwsjson11_deserializeOpPutImageTagMutability struct { } -func (*awsAwsjson11_deserializeOpStartImageScan) ID() string { +func (*awsAwsjson11_deserializeOpPutImageTagMutability) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpStartImageScan) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpPutImageTagMutability) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -5017,9 +5026,9 @@ func (m *awsAwsjson11_deserializeOpStartImageScan) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorStartImageScan(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorPutImageTagMutability(response, &metadata) } - output := &StartImageScanOutput{} + output := &PutImageTagMutabilityOutput{} out.Result = output var buff [1024]byte @@ -5039,7 +5048,7 @@ func (m *awsAwsjson11_deserializeOpStartImageScan) HandleDeserialize(ctx context return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentStartImageScanOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentPutImageTagMutabilityOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5053,7 +5062,7 @@ func (m *awsAwsjson11_deserializeOpStartImageScan) HandleDeserialize(ctx context return out, metadata, err } -func awsAwsjson11_deserializeOpErrorStartImageScan(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorPutImageTagMutability(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -5090,27 +5099,15 @@ func awsAwsjson11_deserializeOpErrorStartImageScan(response *smithyhttp.Response errorMessage = bodyInfo.Message } switch { - case strings.EqualFold("ImageNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) - case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("LimitExceededException", errorCode): - return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - case strings.EqualFold("RepositoryNotFoundException", errorCode): return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) - case strings.EqualFold("UnsupportedImageTypeException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedImageTypeException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -5121,14 +5118,14 @@ func awsAwsjson11_deserializeOpErrorStartImageScan(response *smithyhttp.Response } } -type awsAwsjson11_deserializeOpStartLifecyclePolicyPreview struct { +type awsAwsjson11_deserializeOpPutLifecyclePolicy struct { } -func (*awsAwsjson11_deserializeOpStartLifecyclePolicyPreview) ID() string { +func (*awsAwsjson11_deserializeOpPutLifecyclePolicy) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpStartLifecyclePolicyPreview) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpPutLifecyclePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -5146,9 +5143,9 @@ func (m *awsAwsjson11_deserializeOpStartLifecyclePolicyPreview) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorStartLifecyclePolicyPreview(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorPutLifecyclePolicy(response, &metadata) } - output := &StartLifecyclePolicyPreviewOutput{} + output := &PutLifecyclePolicyOutput{} out.Result = output var buff [1024]byte @@ -5168,7 +5165,7 @@ func (m *awsAwsjson11_deserializeOpStartLifecyclePolicyPreview) HandleDeserializ return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentStartLifecyclePolicyPreviewOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentPutLifecyclePolicyOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5182,7 +5179,7 @@ func (m *awsAwsjson11_deserializeOpStartLifecyclePolicyPreview) HandleDeserializ return out, metadata, err } -func awsAwsjson11_deserializeOpErrorStartLifecyclePolicyPreview(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorPutLifecyclePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -5222,12 +5219,6 @@ func awsAwsjson11_deserializeOpErrorStartLifecyclePolicyPreview(response *smithy case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("LifecyclePolicyNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorLifecyclePolicyNotFoundException(response, errorBody) - - case strings.EqualFold("LifecyclePolicyPreviewInProgressException", errorCode): - return awsAwsjson11_deserializeErrorLifecyclePolicyPreviewInProgressException(response, errorBody) - case strings.EqualFold("RepositoryNotFoundException", errorCode): return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) @@ -5247,14 +5238,14 @@ func awsAwsjson11_deserializeOpErrorStartLifecyclePolicyPreview(response *smithy } } -type awsAwsjson11_deserializeOpTagResource struct { +type awsAwsjson11_deserializeOpPutRegistryPolicy struct { } -func (*awsAwsjson11_deserializeOpTagResource) ID() string { +func (*awsAwsjson11_deserializeOpPutRegistryPolicy) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpPutRegistryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -5272,9 +5263,9 @@ func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorTagResource(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorPutRegistryPolicy(response, &metadata) } - output := &TagResourceOutput{} + output := &PutRegistryPolicyOutput{} out.Result = output var buff [1024]byte @@ -5294,7 +5285,7 @@ func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Co return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentTagResourceOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentPutRegistryPolicyOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5308,7 +5299,7 @@ func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorPutRegistryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -5348,17 +5339,11 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("InvalidTagParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) - case strings.EqualFold("TooManyTagsException", errorCode): - return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) default: genericError := &smithy.GenericAPIError{ @@ -5370,14 +5355,14 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m } } -type awsAwsjson11_deserializeOpUntagResource struct { +type awsAwsjson11_deserializeOpPutRegistryScanningConfiguration struct { } -func (*awsAwsjson11_deserializeOpUntagResource) ID() string { +func (*awsAwsjson11_deserializeOpPutRegistryScanningConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpPutRegistryScanningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -5395,9 +5380,9 @@ func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorUntagResource(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorPutRegistryScanningConfiguration(response, &metadata) } - output := &UntagResourceOutput{} + output := &PutRegistryScanningConfigurationOutput{} out.Result = output var buff [1024]byte @@ -5417,7 +5402,7 @@ func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context. return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentUntagResourceOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentPutRegistryScanningConfigurationOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5431,7 +5416,7 @@ func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context. return out, metadata, err } -func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorPutRegistryScanningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -5468,20 +5453,17 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, errorMessage = bodyInfo.Message } switch { + case strings.EqualFold("BlockedByOrganizationPolicyException", errorCode): + return awsAwsjson11_deserializeErrorBlockedByOrganizationPolicyException(response, errorBody) + case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("InvalidTagParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) - case strings.EqualFold("TooManyTagsException", errorCode): - return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) default: genericError := &smithy.GenericAPIError{ @@ -5493,14 +5475,14 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, } } -type awsAwsjson11_deserializeOpUpdatePullThroughCacheRule struct { +type awsAwsjson11_deserializeOpPutReplicationConfiguration struct { } -func (*awsAwsjson11_deserializeOpUpdatePullThroughCacheRule) ID() string { +func (*awsAwsjson11_deserializeOpPutReplicationConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpUpdatePullThroughCacheRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpPutReplicationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -5518,9 +5500,9 @@ func (m *awsAwsjson11_deserializeOpUpdatePullThroughCacheRule) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorUpdatePullThroughCacheRule(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorPutReplicationConfiguration(response, &metadata) } - output := &UpdatePullThroughCacheRuleOutput{} + output := &PutReplicationConfigurationOutput{} out.Result = output var buff [1024]byte @@ -5540,7 +5522,7 @@ func (m *awsAwsjson11_deserializeOpUpdatePullThroughCacheRule) HandleDeserialize return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentUpdatePullThroughCacheRuleOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentPutReplicationConfigurationOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5554,7 +5536,7 @@ func (m *awsAwsjson11_deserializeOpUpdatePullThroughCacheRule) HandleDeserialize return out, metadata, err } -func awsAwsjson11_deserializeOpErrorUpdatePullThroughCacheRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorPutReplicationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -5594,21 +5576,9 @@ func awsAwsjson11_deserializeOpErrorUpdatePullThroughCacheRule(response *smithyh case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("PullThroughCacheRuleNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorPullThroughCacheRuleNotFoundException(response, errorBody) - - case strings.EqualFold("SecretNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorSecretNotFoundException(response, errorBody) - case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) - case strings.EqualFold("UnableToAccessSecretException", errorCode): - return awsAwsjson11_deserializeErrorUnableToAccessSecretException(response, errorBody) - - case strings.EqualFold("UnableToDecryptSecretValueException", errorCode): - return awsAwsjson11_deserializeErrorUnableToDecryptSecretValueException(response, errorBody) - case strings.EqualFold("ValidationException", errorCode): return awsAwsjson11_deserializeErrorValidationException(response, errorBody) @@ -5622,14 +5592,14 @@ func awsAwsjson11_deserializeOpErrorUpdatePullThroughCacheRule(response *smithyh } } -type awsAwsjson11_deserializeOpUpdateRepositoryCreationTemplate struct { +type awsAwsjson11_deserializeOpPutSigningConfiguration struct { } -func (*awsAwsjson11_deserializeOpUpdateRepositoryCreationTemplate) ID() string { +func (*awsAwsjson11_deserializeOpPutSigningConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpUpdateRepositoryCreationTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpPutSigningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -5647,9 +5617,9 @@ func (m *awsAwsjson11_deserializeOpUpdateRepositoryCreationTemplate) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorUpdateRepositoryCreationTemplate(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorPutSigningConfiguration(response, &metadata) } - output := &UpdateRepositoryCreationTemplateOutput{} + output := &PutSigningConfigurationOutput{} out.Result = output var buff [1024]byte @@ -5669,7 +5639,7 @@ func (m *awsAwsjson11_deserializeOpUpdateRepositoryCreationTemplate) HandleDeser return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentUpdateRepositoryCreationTemplateOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentPutSigningConfigurationOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5683,7 +5653,7 @@ func (m *awsAwsjson11_deserializeOpUpdateRepositoryCreationTemplate) HandleDeser return out, metadata, err } -func awsAwsjson11_deserializeOpErrorUpdateRepositoryCreationTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorPutSigningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -5726,9 +5696,6 @@ func awsAwsjson11_deserializeOpErrorUpdateRepositoryCreationTemplate(response *s case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) - case strings.EqualFold("TemplateNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorTemplateNotFoundException(response, errorBody) - case strings.EqualFold("ValidationException", errorCode): return awsAwsjson11_deserializeErrorValidationException(response, errorBody) @@ -5742,14 +5709,14 @@ func awsAwsjson11_deserializeOpErrorUpdateRepositoryCreationTemplate(response *s } } -type awsAwsjson11_deserializeOpUploadLayerPart struct { +type awsAwsjson11_deserializeOpRegisterPullTimeUpdateExclusion struct { } -func (*awsAwsjson11_deserializeOpUploadLayerPart) ID() string { +func (*awsAwsjson11_deserializeOpRegisterPullTimeUpdateExclusion) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpUploadLayerPart) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpRegisterPullTimeUpdateExclusion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -5767,9 +5734,9 @@ func (m *awsAwsjson11_deserializeOpUploadLayerPart) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorUploadLayerPart(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorRegisterPullTimeUpdateExclusion(response, &metadata) } - output := &UploadLayerPartOutput{} + output := &RegisterPullTimeUpdateExclusionOutput{} out.Result = output var buff [1024]byte @@ -5789,7 +5756,7 @@ func (m *awsAwsjson11_deserializeOpUploadLayerPart) HandleDeserialize(ctx contex return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentUploadLayerPartOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentRegisterPullTimeUpdateExclusionOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5803,7 +5770,7 @@ func (m *awsAwsjson11_deserializeOpUploadLayerPart) HandleDeserialize(ctx contex return out, metadata, err } -func awsAwsjson11_deserializeOpErrorUploadLayerPart(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorRegisterPullTimeUpdateExclusion(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -5840,26 +5807,20 @@ func awsAwsjson11_deserializeOpErrorUploadLayerPart(response *smithyhttp.Respons errorMessage = bodyInfo.Message } switch { - case strings.EqualFold("InvalidLayerPartException", errorCode): - return awsAwsjson11_deserializeErrorInvalidLayerPartException(response, errorBody) + case strings.EqualFold("ExclusionAlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorExclusionAlreadyExistsException(response, errorBody) case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("KmsException", errorCode): - return awsAwsjson11_deserializeErrorKmsException(response, errorBody) - case strings.EqualFold("LimitExceededException", errorCode): return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) - case strings.EqualFold("UploadNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorUploadNotFoundException(response, errorBody) + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) default: genericError := &smithy.GenericAPIError{ @@ -5871,14 +5832,14 @@ func awsAwsjson11_deserializeOpErrorUploadLayerPart(response *smithyhttp.Respons } } -type awsAwsjson11_deserializeOpValidatePullThroughCacheRule struct { +type awsAwsjson11_deserializeOpSetRepositoryPolicy struct { } -func (*awsAwsjson11_deserializeOpValidatePullThroughCacheRule) ID() string { +func (*awsAwsjson11_deserializeOpSetRepositoryPolicy) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpValidatePullThroughCacheRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpSetRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -5896,9 +5857,9 @@ func (m *awsAwsjson11_deserializeOpValidatePullThroughCacheRule) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorValidatePullThroughCacheRule(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response, &metadata) } - output := &ValidatePullThroughCacheRuleOutput{} + output := &SetRepositoryPolicyOutput{} out.Result = output var buff [1024]byte @@ -5918,7 +5879,7 @@ func (m *awsAwsjson11_deserializeOpValidatePullThroughCacheRule) HandleDeseriali return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentValidatePullThroughCacheRuleOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentSetRepositoryPolicyOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5932,7 +5893,7 @@ func (m *awsAwsjson11_deserializeOpValidatePullThroughCacheRule) HandleDeseriali return out, metadata, err } -func awsAwsjson11_deserializeOpErrorValidatePullThroughCacheRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -5972,15 +5933,12 @@ func awsAwsjson11_deserializeOpErrorValidatePullThroughCacheRule(response *smith case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - case strings.EqualFold("PullThroughCacheRuleNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorPullThroughCacheRuleNotFoundException(response, errorBody) + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -5991,11 +5949,40 @@ func awsAwsjson11_deserializeOpErrorValidatePullThroughCacheRule(response *smith } } -func awsAwsjson11_deserializeErrorEmptyUploadException(response *smithyhttp.Response, errorBody *bytes.Reader) error { +type awsAwsjson11_deserializeOpStartImageScan struct { +} + +func (*awsAwsjson11_deserializeOpStartImageScan) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpStartImageScan) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorStartImageScan(response, &metadata) + } + output := &StartImageScanOutput{} + out.Result = output + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) + body := io.TeeReader(response.Body, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() var shape interface{} @@ -6006,12 +5993,10 @@ func awsAwsjson11_deserializeErrorEmptyUploadException(response *smithyhttp.Resp Err: fmt.Errorf("failed to decode response body, %w", err), Snapshot: snapshot.Bytes(), } - return err + return out, metadata, err } - output := &types.EmptyUploadException{} - err := awsAwsjson11_deserializeDocumentEmptyUploadException(&output, shape) - + err = awsAwsjson11_deserializeOpDocumentStartImageScanOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -6019,20 +6004,1226 @@ func awsAwsjson11_deserializeErrorEmptyUploadException(response *smithyhttp.Resp Err: fmt.Errorf("failed to decode response body, %w", err), Snapshot: snapshot.Bytes(), } - return err + return out, metadata, err } - errorBody.Seek(0, io.SeekStart) - return output + return out, metadata, err } -func awsAwsjson11_deserializeErrorImageAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() +func awsAwsjson11_deserializeOpErrorStartImageScan(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ImageArchivedException", errorCode): + return awsAwsjson11_deserializeErrorImageArchivedException(response, errorBody) + + case strings.EqualFold("ImageNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedImageTypeException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedImageTypeException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpStartLifecyclePolicyPreview struct { +} + +func (*awsAwsjson11_deserializeOpStartLifecyclePolicyPreview) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpStartLifecyclePolicyPreview) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorStartLifecyclePolicyPreview(response, &metadata) + } + output := &StartLifecyclePolicyPreviewOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentStartLifecyclePolicyPreviewOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorStartLifecyclePolicyPreview(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("LifecyclePolicyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorLifecyclePolicyNotFoundException(response, errorBody) + + case strings.EqualFold("LifecyclePolicyPreviewInProgressException", errorCode): + return awsAwsjson11_deserializeErrorLifecyclePolicyPreviewInProgressException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpTagResource struct { +} + +func (*awsAwsjson11_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentTagResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("InvalidTagParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("TooManyTagsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUntagResource struct { +} + +func (*awsAwsjson11_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUntagResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("InvalidTagParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("TooManyTagsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateImageStorageClass struct { +} + +func (*awsAwsjson11_deserializeOpUpdateImageStorageClass) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateImageStorageClass) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateImageStorageClass(response, &metadata) + } + output := &UpdateImageStorageClassOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateImageStorageClassOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateImageStorageClass(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ImageNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) + + case strings.EqualFold("ImageStorageClassUpdateNotSupportedException", errorCode): + return awsAwsjson11_deserializeErrorImageStorageClassUpdateNotSupportedException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdatePullThroughCacheRule struct { +} + +func (*awsAwsjson11_deserializeOpUpdatePullThroughCacheRule) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdatePullThroughCacheRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdatePullThroughCacheRule(response, &metadata) + } + output := &UpdatePullThroughCacheRuleOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdatePullThroughCacheRuleOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdatePullThroughCacheRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("PullThroughCacheRuleNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorPullThroughCacheRuleNotFoundException(response, errorBody) + + case strings.EqualFold("SecretNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorSecretNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnableToAccessSecretException", errorCode): + return awsAwsjson11_deserializeErrorUnableToAccessSecretException(response, errorBody) + + case strings.EqualFold("UnableToDecryptSecretValueException", errorCode): + return awsAwsjson11_deserializeErrorUnableToDecryptSecretValueException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateRepositoryCreationTemplate struct { +} + +func (*awsAwsjson11_deserializeOpUpdateRepositoryCreationTemplate) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateRepositoryCreationTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateRepositoryCreationTemplate(response, &metadata) + } + output := &UpdateRepositoryCreationTemplateOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateRepositoryCreationTemplateOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateRepositoryCreationTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("TemplateNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorTemplateNotFoundException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUploadLayerPart struct { +} + +func (*awsAwsjson11_deserializeOpUploadLayerPart) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUploadLayerPart) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUploadLayerPart(response, &metadata) + } + output := &UploadLayerPartOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUploadLayerPartOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUploadLayerPart(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InvalidLayerPartException", errorCode): + return awsAwsjson11_deserializeErrorInvalidLayerPartException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("KmsException", errorCode): + return awsAwsjson11_deserializeErrorKmsException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UploadNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorUploadNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpValidatePullThroughCacheRule struct { +} + +func (*awsAwsjson11_deserializeOpValidatePullThroughCacheRule) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpValidatePullThroughCacheRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorValidatePullThroughCacheRule(response, &metadata) + } + output := &ValidatePullThroughCacheRuleOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentValidatePullThroughCacheRuleOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorValidatePullThroughCacheRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("PullThroughCacheRuleNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorPullThroughCacheRuleNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsjson11_deserializeErrorBlockedByOrganizationPolicyException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.BlockedByOrganizationPolicyException{} + err := awsAwsjson11_deserializeDocumentBlockedByOrganizationPolicyException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorEmptyUploadException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.EmptyUploadException{} + err := awsAwsjson11_deserializeDocumentEmptyUploadException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorExclusionAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ExclusionAlreadyExistsException{} + err := awsAwsjson11_deserializeDocumentExclusionAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorExclusionNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ExclusionNotFoundException{} + err := awsAwsjson11_deserializeDocumentExclusionNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorImageAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() var shape interface{} if err := decoder.Decode(&shape); err != nil && err != io.EOF { var snapshot bytes.Buffer @@ -6061,6 +7252,41 @@ func awsAwsjson11_deserializeErrorImageAlreadyExistsException(response *smithyht return output } +func awsAwsjson11_deserializeErrorImageArchivedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ImageArchivedException{} + err := awsAwsjson11_deserializeDocumentImageArchivedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + func awsAwsjson11_deserializeErrorImageDigestDoesNotMatchException(response *smithyhttp.Response, errorBody *bytes.Reader) error { var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) @@ -6131,6 +7357,41 @@ func awsAwsjson11_deserializeErrorImageNotFoundException(response *smithyhttp.Re return output } +func awsAwsjson11_deserializeErrorImageStorageClassUpdateNotSupportedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ImageStorageClassUpdateNotSupportedException{} + err := awsAwsjson11_deserializeDocumentImageStorageClassUpdateNotSupportedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + func awsAwsjson11_deserializeErrorImageTagAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) @@ -7006,6 +8267,41 @@ func awsAwsjson11_deserializeErrorServerException(response *smithyhttp.Response, return output } +func awsAwsjson11_deserializeErrorSigningConfigurationNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.SigningConfigurationNotFoundException{} + err := awsAwsjson11_deserializeDocumentSigningConfigurationNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + func awsAwsjson11_deserializeErrorTemplateAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) @@ -7391,6 +8687,42 @@ func awsAwsjson11_deserializeErrorValidationException(response *smithyhttp.Respo return output } +func awsAwsjson11_deserializeDocumentAnnotations(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + func awsAwsjson11_deserializeDocumentAttribute(v **types.Attribute, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -7708,6 +9040,46 @@ func awsAwsjson11_deserializeDocumentAwsEcrContainerImageDetails(v **types.AwsEc return nil } +func awsAwsjson11_deserializeDocumentBlockedByOrganizationPolicyException(v **types.BlockedByOrganizationPolicyException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BlockedByOrganizationPolicyException + if *v == nil { + sv = &types.BlockedByOrganizationPolicyException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentCvssScore(v **types.CvssScore, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -8379,24 +9751,104 @@ func awsAwsjson11_deserializeDocumentEnhancedImageScanFindingList(v *[]types.Enh return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.EnhancedImageScanFinding + var cv []types.EnhancedImageScanFinding + if *v == nil { + cv = []types.EnhancedImageScanFinding{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.EnhancedImageScanFinding + destAddr := &col + if err := awsAwsjson11_deserializeDocumentEnhancedImageScanFinding(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentExclusionAlreadyExistsException(v **types.ExclusionAlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExclusionAlreadyExistsException + if *v == nil { + sv = &types.ExclusionAlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentExclusionNotFoundException(v **types.ExclusionNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExclusionNotFoundException if *v == nil { - cv = []types.EnhancedImageScanFinding{} + sv = &types.ExclusionNotFoundException{} } else { - cv = *v + sv = *v } - for _, value := range shape { - var col types.EnhancedImageScanFinding - destAddr := &col - if err := awsAwsjson11_deserializeDocumentEnhancedImageScanFinding(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + } } - *v = cv + *v = sv return nil } @@ -8552,6 +10004,46 @@ func awsAwsjson11_deserializeDocumentImageAlreadyExistsException(v **types.Image return nil } +func awsAwsjson11_deserializeDocumentImageArchivedException(v **types.ImageArchivedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageArchivedException + if *v == nil { + sv = &types.ImageArchivedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentImageDetail(v **types.ImageDetail, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -8640,11 +10132,52 @@ func awsAwsjson11_deserializeDocumentImageDetail(v **types.ImageDetail, value in sv.ImageSizeInBytes = ptr.Int64(i64) } + case "imageStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageStatus to be of type string, got %T instead", value) + } + sv.ImageStatus = types.ImageStatus(jtv) + } + case "imageTags": if err := awsAwsjson11_deserializeDocumentImageTagList(&sv.ImageTags, value); err != nil { return err } + case "lastActivatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastActivatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected LastActivatedAtTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "lastArchivedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastArchivedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected LastArchivedAtTimestamp to be a JSON Number, got %T instead", value) + + } + } + case "lastRecordedPullTime": if value != nil { switch jtv := value.(type) { @@ -8679,6 +10212,15 @@ func awsAwsjson11_deserializeDocumentImageDetail(v **types.ImageDetail, value in sv.RepositoryName = ptr.String(jtv) } + case "subjectManifestDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) + } + sv.SubjectManifestDigest = ptr.String(jtv) + } + default: _, _ = key, value @@ -8912,28 +10454,187 @@ func awsAwsjson11_deserializeDocumentImageIdentifierList(v *[]types.ImageIdentif return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.ImageIdentifier - if *v == nil { - cv = []types.ImageIdentifier{} - } else { - cv = *v - } + var cv []types.ImageIdentifier + if *v == nil { + cv = []types.ImageIdentifier{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ImageIdentifier + destAddr := &col + if err := awsAwsjson11_deserializeDocumentImageIdentifier(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentImageList(v *[]types.Image, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Image + if *v == nil { + cv = []types.Image{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Image + destAddr := &col + if err := awsAwsjson11_deserializeDocumentImage(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentImageNotFoundException(v **types.ImageNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageNotFoundException + if *v == nil { + sv = &types.ImageNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageReferrer(v **types.ImageReferrer, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageReferrer + if *v == nil { + sv = &types.ImageReferrer{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "annotations": + if err := awsAwsjson11_deserializeDocumentAnnotations(&sv.Annotations, value); err != nil { + return err + } + + case "artifactStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArtifactStatus to be of type string, got %T instead", value) + } + sv.ArtifactStatus = types.ArtifactStatus(jtv) + } + + case "artifactType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArtifactType to be of type string, got %T instead", value) + } + sv.ArtifactType = ptr.String(jtv) + } + + case "digest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) + } + sv.Digest = ptr.String(jtv) + } + + case "mediaType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) + } + sv.MediaType = ptr.String(jtv) + } + + case "size": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ImageSizeInBytes to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Size = ptr.Int64(i64) + } + + default: + _, _ = key, value - for _, value := range shape { - var col types.ImageIdentifier - destAddr := &col - if err := awsAwsjson11_deserializeDocumentImageIdentifier(&destAddr, value); err != nil { - return err } - col = *destAddr - cv = append(cv, col) - } - *v = cv + *v = sv return nil } -func awsAwsjson11_deserializeDocumentImageList(v *[]types.Image, value interface{}) error { +func awsAwsjson11_deserializeDocumentImageReferrerList(v *[]types.ImageReferrer, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -8946,17 +10647,17 @@ func awsAwsjson11_deserializeDocumentImageList(v *[]types.Image, value interface return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.Image + var cv []types.ImageReferrer if *v == nil { - cv = []types.Image{} + cv = []types.ImageReferrer{} } else { cv = *v } for _, value := range shape { - var col types.Image + var col types.ImageReferrer destAddr := &col - if err := awsAwsjson11_deserializeDocumentImage(&destAddr, value); err != nil { + if err := awsAwsjson11_deserializeDocumentImageReferrer(&destAddr, value); err != nil { return err } col = *destAddr @@ -8967,46 +10668,6 @@ func awsAwsjson11_deserializeDocumentImageList(v *[]types.Image, value interface return nil } -func awsAwsjson11_deserializeDocumentImageNotFoundException(v **types.ImageNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageNotFoundException - if *v == nil { - sv = &types.ImageNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - func awsAwsjson11_deserializeDocumentImageReplicationStatus(v **types.ImageReplicationStatus, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -9449,6 +11110,147 @@ func awsAwsjson11_deserializeDocumentImageScanStatus(v **types.ImageScanStatus, return nil } +func awsAwsjson11_deserializeDocumentImageSigningStatus(v **types.ImageSigningStatus, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageSigningStatus + if *v == nil { + sv = &types.ImageSigningStatus{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failureCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SigningStatusFailureCode to be of type string, got %T instead", value) + } + sv.FailureCode = ptr.String(jtv) + } + + case "failureReason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SigningStatusFailureReason to be of type string, got %T instead", value) + } + sv.FailureReason = ptr.String(jtv) + } + + case "signingProfileArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SigningProfileArn to be of type string, got %T instead", value) + } + sv.SigningProfileArn = ptr.String(jtv) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SigningStatus to be of type string, got %T instead", value) + } + sv.Status = types.SigningStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageSigningStatusList(v *[]types.ImageSigningStatus, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ImageSigningStatus + if *v == nil { + cv = []types.ImageSigningStatus{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ImageSigningStatus + destAddr := &col + if err := awsAwsjson11_deserializeDocumentImageSigningStatus(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentImageStorageClassUpdateNotSupportedException(v **types.ImageStorageClassUpdateNotSupportedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageStorageClassUpdateNotSupportedException + if *v == nil { + sv = &types.ImageStorageClassUpdateNotSupportedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentImageTagAlreadyExistsException(v **types.ImageTagAlreadyExistsException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -9525,6 +11327,89 @@ func awsAwsjson11_deserializeDocumentImageTagList(v *[]string, value interface{} return nil } +func awsAwsjson11_deserializeDocumentImageTagMutabilityExclusionFilter(v **types.ImageTagMutabilityExclusionFilter, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageTagMutabilityExclusionFilter + if *v == nil { + sv = &types.ImageTagMutabilityExclusionFilter{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "filter": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageTagMutabilityExclusionFilterValue to be of type string, got %T instead", value) + } + sv.Filter = ptr.String(jtv) + } + + case "filterType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageTagMutabilityExclusionFilterType to be of type string, got %T instead", value) + } + sv.FilterType = types.ImageTagMutabilityExclusionFilterType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageTagMutabilityExclusionFilters(v *[]types.ImageTagMutabilityExclusionFilter, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ImageTagMutabilityExclusionFilter + if *v == nil { + cv = []types.ImageTagMutabilityExclusionFilter{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ImageTagMutabilityExclusionFilter + destAddr := &col + if err := awsAwsjson11_deserializeDocumentImageTagMutabilityExclusionFilter(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsAwsjson11_deserializeDocumentImageTagsList(v *[]string, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -10357,6 +12242,15 @@ func awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewResult(v **types.Life return err } + case "storageClass": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LifecyclePolicyStorageClass to be of type string, got %T instead", value) + } + sv.StorageClass = types.LifecyclePolicyStorageClass(jtv) + } + default: _, _ = key, value @@ -10435,6 +12329,11 @@ func awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewSummary(v **types.Lif sv.ExpiringImageTotalCount = ptr.Int32(int32(i64)) } + case "transitioningImageTotalCounts": + if err := awsAwsjson11_deserializeDocumentTransitioningImageTotalCounts(&sv.TransitioningImageTotalCounts, value); err != nil { + return err + } + default: _, _ = key, value @@ -10466,6 +12365,15 @@ func awsAwsjson11_deserializeDocumentLifecyclePolicyRuleAction(v **types.Lifecyc for key, value := range shape { switch key { + case "targetStorageClass": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LifecyclePolicyTargetStorageClass to be of type string, got %T instead", value) + } + sv.TargetStorageClass = types.LifecyclePolicyTargetStorageClass(jtv) + } + case "type": if value != nil { jtv, ok := value.(string) @@ -10883,6 +12791,42 @@ func awsAwsjson11_deserializeDocumentPullThroughCacheRuleNotFoundException(v **t return nil } +func awsAwsjson11_deserializeDocumentPullTimeUpdateExclusionList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrincipalArn to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsAwsjson11_deserializeDocumentRCTAppliedForList(v *[]types.RCTAppliedFor, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -10950,13 +12894,219 @@ func awsAwsjson11_deserializeDocumentRecommendation(v **types.Recommendation, va sv.Text = ptr.String(jtv) } - case "url": + case "url": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Url to be of type string, got %T instead", value) + } + sv.Url = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentReferencedImagesNotFoundException(v **types.ReferencedImagesNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReferencedImagesNotFoundException + if *v == nil { + sv = &types.ReferencedImagesNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentReferenceUrlsList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Url to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRegistryPolicyNotFoundException(v **types.RegistryPolicyNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RegistryPolicyNotFoundException + if *v == nil { + sv = &types.RegistryPolicyNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRegistryScanningConfiguration(v **types.RegistryScanningConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RegistryScanningConfiguration + if *v == nil { + sv = &types.RegistryScanningConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "rules": + if err := awsAwsjson11_deserializeDocumentRegistryScanningRuleList(&sv.Rules, value); err != nil { + return err + } + + case "scanType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScanType to be of type string, got %T instead", value) + } + sv.ScanType = types.ScanType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRegistryScanningRule(v **types.RegistryScanningRule, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RegistryScanningRule + if *v == nil { + sv = &types.RegistryScanningRule{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "repositoryFilters": + if err := awsAwsjson11_deserializeDocumentScanningRepositoryFilterList(&sv.RepositoryFilters, value); err != nil { + return err + } + + case "scanFrequency": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) + return fmt.Errorf("expected ScanFrequency to be of type string, got %T instead", value) } - sv.Url = ptr.String(jtv) + sv.ScanFrequency = types.ScanFrequency(jtv) } default: @@ -10968,7 +13118,7 @@ func awsAwsjson11_deserializeDocumentRecommendation(v **types.Recommendation, va return nil } -func awsAwsjson11_deserializeDocumentReferencedImagesNotFoundException(v **types.ReferencedImagesNotFoundException, value interface{}) error { +func awsAwsjson11_deserializeDocumentRegistryScanningRuleList(v *[]types.RegistryScanningRule, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -10976,39 +13126,33 @@ func awsAwsjson11_deserializeDocumentReferencedImagesNotFoundException(v **types return nil } - shape, ok := value.(map[string]interface{}) + shape, ok := value.([]interface{}) if !ok { return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.ReferencedImagesNotFoundException + var cv []types.RegistryScanningRule if *v == nil { - sv = &types.ReferencedImagesNotFoundException{} + cv = []types.RegistryScanningRule{} } else { - sv = *v + cv = *v } - for key, value := range shape { - switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - + for _, value := range shape { + var col types.RegistryScanningRule + destAddr := &col + if err := awsAwsjson11_deserializeDocumentRegistryScanningRule(&destAddr, value); err != nil { + return err } + col = *destAddr + cv = append(cv, col) + } - *v = sv + *v = cv return nil } -func awsAwsjson11_deserializeDocumentReferenceUrlsList(v *[]string, value interface{}) error { +func awsAwsjson11_deserializeDocumentRelatedVulnerabilitiesList(v *[]string, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11033,7 +13177,7 @@ func awsAwsjson11_deserializeDocumentReferenceUrlsList(v *[]string, value interf if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) + return fmt.Errorf("expected RelatedVulnerability to be of type string, got %T instead", value) } col = jtv } @@ -11044,7 +13188,7 @@ func awsAwsjson11_deserializeDocumentReferenceUrlsList(v *[]string, value interf return nil } -func awsAwsjson11_deserializeDocumentRegistryPolicyNotFoundException(v **types.RegistryPolicyNotFoundException, value interface{}) error { +func awsAwsjson11_deserializeDocumentRemediation(v **types.Remediation, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11057,22 +13201,18 @@ func awsAwsjson11_deserializeDocumentRegistryPolicyNotFoundException(v **types.R return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.RegistryPolicyNotFoundException + var sv *types.Remediation if *v == nil { - sv = &types.RegistryPolicyNotFoundException{} + sv = &types.Remediation{} } else { sv = *v } for key, value := range shape { switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) + case "recommendation": + if err := awsAwsjson11_deserializeDocumentRecommendation(&sv.Recommendation, value); err != nil { + return err } default: @@ -11084,7 +13224,7 @@ func awsAwsjson11_deserializeDocumentRegistryPolicyNotFoundException(v **types.R return nil } -func awsAwsjson11_deserializeDocumentRegistryScanningConfiguration(v **types.RegistryScanningConfiguration, value interface{}) error { +func awsAwsjson11_deserializeDocumentReplicationConfiguration(v **types.ReplicationConfiguration, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11097,9 +13237,9 @@ func awsAwsjson11_deserializeDocumentRegistryScanningConfiguration(v **types.Reg return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.RegistryScanningConfiguration + var sv *types.ReplicationConfiguration if *v == nil { - sv = &types.RegistryScanningConfiguration{} + sv = &types.ReplicationConfiguration{} } else { sv = *v } @@ -11107,19 +13247,10 @@ func awsAwsjson11_deserializeDocumentRegistryScanningConfiguration(v **types.Reg for key, value := range shape { switch key { case "rules": - if err := awsAwsjson11_deserializeDocumentRegistryScanningRuleList(&sv.Rules, value); err != nil { + if err := awsAwsjson11_deserializeDocumentReplicationRuleList(&sv.Rules, value); err != nil { return err } - case "scanType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScanType to be of type string, got %T instead", value) - } - sv.ScanType = types.ScanType(jtv) - } - default: _, _ = key, value @@ -11129,7 +13260,7 @@ func awsAwsjson11_deserializeDocumentRegistryScanningConfiguration(v **types.Reg return nil } -func awsAwsjson11_deserializeDocumentRegistryScanningRule(v **types.RegistryScanningRule, value interface{}) error { +func awsAwsjson11_deserializeDocumentReplicationDestination(v **types.ReplicationDestination, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11142,27 +13273,31 @@ func awsAwsjson11_deserializeDocumentRegistryScanningRule(v **types.RegistryScan return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.RegistryScanningRule + var sv *types.ReplicationDestination if *v == nil { - sv = &types.RegistryScanningRule{} + sv = &types.ReplicationDestination{} } else { sv = *v } for key, value := range shape { switch key { - case "repositoryFilters": - if err := awsAwsjson11_deserializeDocumentScanningRepositoryFilterList(&sv.RepositoryFilters, value); err != nil { - return err + case "region": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Region to be of type string, got %T instead", value) + } + sv.Region = ptr.String(jtv) } - case "scanFrequency": + case "registryId": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected ScanFrequency to be of type string, got %T instead", value) + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) } - sv.ScanFrequency = types.ScanFrequency(jtv) + sv.RegistryId = ptr.String(jtv) } default: @@ -11174,7 +13309,7 @@ func awsAwsjson11_deserializeDocumentRegistryScanningRule(v **types.RegistryScan return nil } -func awsAwsjson11_deserializeDocumentRegistryScanningRuleList(v *[]types.RegistryScanningRule, value interface{}) error { +func awsAwsjson11_deserializeDocumentReplicationDestinationList(v *[]types.ReplicationDestination, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11187,17 +13322,17 @@ func awsAwsjson11_deserializeDocumentRegistryScanningRuleList(v *[]types.Registr return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.RegistryScanningRule + var cv []types.ReplicationDestination if *v == nil { - cv = []types.RegistryScanningRule{} + cv = []types.ReplicationDestination{} } else { cv = *v } for _, value := range shape { - var col types.RegistryScanningRule + var col types.ReplicationDestination destAddr := &col - if err := awsAwsjson11_deserializeDocumentRegistryScanningRule(&destAddr, value); err != nil { + if err := awsAwsjson11_deserializeDocumentReplicationDestination(&destAddr, value); err != nil { return err } col = *destAddr @@ -11208,7 +13343,48 @@ func awsAwsjson11_deserializeDocumentRegistryScanningRuleList(v *[]types.Registr return nil } -func awsAwsjson11_deserializeDocumentRelatedVulnerabilitiesList(v *[]string, value interface{}) error { +func awsAwsjson11_deserializeDocumentReplicationRule(v **types.ReplicationRule, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReplicationRule + if *v == nil { + sv = &types.ReplicationRule{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "destinations": + if err := awsAwsjson11_deserializeDocumentReplicationDestinationList(&sv.Destinations, value); err != nil { + return err + } + + case "repositoryFilters": + if err := awsAwsjson11_deserializeDocumentRepositoryFilterList(&sv.RepositoryFilters, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentReplicationRuleList(v *[]types.ReplicationRule, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11221,22 +13397,20 @@ func awsAwsjson11_deserializeDocumentRelatedVulnerabilitiesList(v *[]string, val return fmt.Errorf("unexpected JSON type %v", value) } - var cv []string + var cv []types.ReplicationRule if *v == nil { - cv = []string{} + cv = []types.ReplicationRule{} } else { cv = *v } for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RelatedVulnerability to be of type string, got %T instead", value) - } - col = jtv + var col types.ReplicationRule + destAddr := &col + if err := awsAwsjson11_deserializeDocumentReplicationRule(&destAddr, value); err != nil { + return err } + col = *destAddr cv = append(cv, col) } @@ -11244,7 +13418,7 @@ func awsAwsjson11_deserializeDocumentRelatedVulnerabilitiesList(v *[]string, val return nil } -func awsAwsjson11_deserializeDocumentRemediation(v **types.Remediation, value interface{}) error { +func awsAwsjson11_deserializeDocumentRepository(v **types.Repository, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11257,20 +13431,91 @@ func awsAwsjson11_deserializeDocumentRemediation(v **types.Remediation, value in return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.Remediation + var sv *types.Repository if *v == nil { - sv = &types.Remediation{} + sv = &types.Repository{} } else { sv = *v } for key, value := range shape { switch key { - case "recommendation": - if err := awsAwsjson11_deserializeDocumentRecommendation(&sv.Recommendation, value); err != nil { + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "encryptionConfiguration": + if err := awsAwsjson11_deserializeDocumentEncryptionConfiguration(&sv.EncryptionConfiguration, value); err != nil { + return err + } + + case "imageScanningConfiguration": + if err := awsAwsjson11_deserializeDocumentImageScanningConfiguration(&sv.ImageScanningConfiguration, value); err != nil { + return err + } + + case "imageTagMutability": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageTagMutability to be of type string, got %T instead", value) + } + sv.ImageTagMutability = types.ImageTagMutability(jtv) + } + + case "imageTagMutabilityExclusionFilters": + if err := awsAwsjson11_deserializeDocumentImageTagMutabilityExclusionFilters(&sv.ImageTagMutabilityExclusionFilters, value); err != nil { return err } + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.RepositoryArn = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + case "repositoryUri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Url to be of type string, got %T instead", value) + } + sv.RepositoryUri = ptr.String(jtv) + } + default: _, _ = key, value @@ -11280,7 +13525,7 @@ func awsAwsjson11_deserializeDocumentRemediation(v **types.Remediation, value in return nil } -func awsAwsjson11_deserializeDocumentReplicationConfiguration(v **types.ReplicationConfiguration, value interface{}) error { +func awsAwsjson11_deserializeDocumentRepositoryAlreadyExistsException(v **types.RepositoryAlreadyExistsException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11293,18 +13538,22 @@ func awsAwsjson11_deserializeDocumentReplicationConfiguration(v **types.Replicat return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.ReplicationConfiguration + var sv *types.RepositoryAlreadyExistsException if *v == nil { - sv = &types.ReplicationConfiguration{} + sv = &types.RepositoryAlreadyExistsException{} } else { sv = *v } for key, value := range shape { switch key { - case "rules": - if err := awsAwsjson11_deserializeDocumentReplicationRuleList(&sv.Rules, value); err != nil { - return err + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) } default: @@ -11316,7 +13565,7 @@ func awsAwsjson11_deserializeDocumentReplicationConfiguration(v **types.Replicat return nil } -func awsAwsjson11_deserializeDocumentReplicationDestination(v **types.ReplicationDestination, value interface{}) error { +func awsAwsjson11_deserializeDocumentRepositoryCreationTemplate(v **types.RepositoryCreationTemplate, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11329,31 +13578,119 @@ func awsAwsjson11_deserializeDocumentReplicationDestination(v **types.Replicatio return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.ReplicationDestination + var sv *types.RepositoryCreationTemplate if *v == nil { - sv = &types.ReplicationDestination{} + sv = &types.RepositoryCreationTemplate{} } else { sv = *v } for key, value := range shape { switch key { - case "region": + case "appliedFor": + if err := awsAwsjson11_deserializeDocumentRCTAppliedForList(&sv.AppliedFor, value); err != nil { + return err + } + + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "customRoleArn": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Region to be of type string, got %T instead", value) + return fmt.Errorf("expected CustomRoleArn to be of type string, got %T instead", value) } - sv.Region = ptr.String(jtv) + sv.CustomRoleArn = ptr.String(jtv) } - case "registryId": + case "description": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + return fmt.Errorf("expected RepositoryTemplateDescription to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "encryptionConfiguration": + if err := awsAwsjson11_deserializeDocumentEncryptionConfigurationForRepositoryCreationTemplate(&sv.EncryptionConfiguration, value); err != nil { + return err + } + + case "imageTagMutability": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageTagMutability to be of type string, got %T instead", value) + } + sv.ImageTagMutability = types.ImageTagMutability(jtv) + } + + case "imageTagMutabilityExclusionFilters": + if err := awsAwsjson11_deserializeDocumentImageTagMutabilityExclusionFilters(&sv.ImageTagMutabilityExclusionFilters, value); err != nil { + return err + } + + case "lifecyclePolicy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LifecyclePolicyTextForRepositoryCreationTemplate to be of type string, got %T instead", value) + } + sv.LifecyclePolicy = ptr.String(jtv) + } + + case "prefix": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Prefix to be of type string, got %T instead", value) + } + sv.Prefix = ptr.String(jtv) + } + + case "repositoryPolicy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) + } + sv.RepositoryPolicy = ptr.String(jtv) + } + + case "resourceTags": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.ResourceTags, value); err != nil { + return err + } + + case "updatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.UpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + } - sv.RegistryId = ptr.String(jtv) } default: @@ -11365,7 +13702,7 @@ func awsAwsjson11_deserializeDocumentReplicationDestination(v **types.Replicatio return nil } -func awsAwsjson11_deserializeDocumentReplicationDestinationList(v *[]types.ReplicationDestination, value interface{}) error { +func awsAwsjson11_deserializeDocumentRepositoryCreationTemplateList(v *[]types.RepositoryCreationTemplate, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11378,17 +13715,17 @@ func awsAwsjson11_deserializeDocumentReplicationDestinationList(v *[]types.Repli return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.ReplicationDestination + var cv []types.RepositoryCreationTemplate if *v == nil { - cv = []types.ReplicationDestination{} + cv = []types.RepositoryCreationTemplate{} } else { cv = *v } for _, value := range shape { - var col types.ReplicationDestination + var col types.RepositoryCreationTemplate destAddr := &col - if err := awsAwsjson11_deserializeDocumentReplicationDestination(&destAddr, value); err != nil { + if err := awsAwsjson11_deserializeDocumentRepositoryCreationTemplate(&destAddr, value); err != nil { return err } col = *destAddr @@ -11399,7 +13736,7 @@ func awsAwsjson11_deserializeDocumentReplicationDestinationList(v *[]types.Repli return nil } -func awsAwsjson11_deserializeDocumentReplicationRule(v **types.ReplicationRule, value interface{}) error { +func awsAwsjson11_deserializeDocumentRepositoryFilter(v **types.RepositoryFilter, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11412,23 +13749,31 @@ func awsAwsjson11_deserializeDocumentReplicationRule(v **types.ReplicationRule, return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.ReplicationRule + var sv *types.RepositoryFilter if *v == nil { - sv = &types.ReplicationRule{} + sv = &types.RepositoryFilter{} } else { sv = *v } for key, value := range shape { switch key { - case "destinations": - if err := awsAwsjson11_deserializeDocumentReplicationDestinationList(&sv.Destinations, value); err != nil { - return err + case "filter": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryFilterValue to be of type string, got %T instead", value) + } + sv.Filter = ptr.String(jtv) } - case "repositoryFilters": - if err := awsAwsjson11_deserializeDocumentRepositoryFilterList(&sv.RepositoryFilters, value); err != nil { - return err + case "filterType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryFilterType to be of type string, got %T instead", value) + } + sv.FilterType = types.RepositoryFilterType(jtv) } default: @@ -11440,7 +13785,7 @@ func awsAwsjson11_deserializeDocumentReplicationRule(v **types.ReplicationRule, return nil } -func awsAwsjson11_deserializeDocumentReplicationRuleList(v *[]types.ReplicationRule, value interface{}) error { +func awsAwsjson11_deserializeDocumentRepositoryFilterList(v *[]types.RepositoryFilter, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11453,17 +13798,17 @@ func awsAwsjson11_deserializeDocumentReplicationRuleList(v *[]types.ReplicationR return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.ReplicationRule + var cv []types.RepositoryFilter if *v == nil { - cv = []types.ReplicationRule{} + cv = []types.RepositoryFilter{} } else { cv = *v } for _, value := range shape { - var col types.ReplicationRule + var col types.RepositoryFilter destAddr := &col - if err := awsAwsjson11_deserializeDocumentReplicationRule(&destAddr, value); err != nil { + if err := awsAwsjson11_deserializeDocumentRepositoryFilter(&destAddr, value); err != nil { return err } col = *destAddr @@ -11474,7 +13819,7 @@ func awsAwsjson11_deserializeDocumentReplicationRuleList(v *[]types.ReplicationR return nil } -func awsAwsjson11_deserializeDocumentRepository(v **types.Repository, value interface{}) error { +func awsAwsjson11_deserializeDocumentRepositoryList(v *[]types.Repository, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11482,89 +13827,101 @@ func awsAwsjson11_deserializeDocumentRepository(v **types.Repository, value inte return nil } - shape, ok := value.(map[string]interface{}) + shape, ok := value.([]interface{}) if !ok { return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.Repository + var cv []types.Repository if *v == nil { - sv = &types.Repository{} + cv = []types.Repository{} } else { - sv = *v + cv = *v } - for key, value := range shape { - switch key { - case "createdAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "encryptionConfiguration": - if err := awsAwsjson11_deserializeDocumentEncryptionConfiguration(&sv.EncryptionConfiguration, value); err != nil { - return err - } + for _, value := range shape { + var col types.Repository + destAddr := &col + if err := awsAwsjson11_deserializeDocumentRepository(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) - case "imageScanningConfiguration": - if err := awsAwsjson11_deserializeDocumentImageScanningConfiguration(&sv.ImageScanningConfiguration, value); err != nil { - return err - } + } + *v = cv + return nil +} - case "imageTagMutability": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageTagMutability to be of type string, got %T instead", value) - } - sv.ImageTagMutability = types.ImageTagMutability(jtv) - } +func awsAwsjson11_deserializeDocumentRepositoryNotEmptyException(v **types.RepositoryNotEmptyException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } - case "repositoryArn": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Arn to be of type string, got %T instead", value) - } - sv.RepositoryArn = ptr.String(jtv) - } + var sv *types.RepositoryNotEmptyException + if *v == nil { + sv = &types.RepositoryNotEmptyException{} + } else { + sv = *v + } - case "repositoryName": + for key, value := range shape { + switch key { + case "message", "Message": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) } - sv.RepositoryName = ptr.String(jtv) + sv.Message = ptr.String(jtv) } - case "repositoryUri": + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryNotFoundException(v **types.RepositoryNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryNotFoundException + if *v == nil { + sv = &types.RepositoryNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) } - sv.RepositoryUri = ptr.String(jtv) + sv.Message = ptr.String(jtv) } default: @@ -11576,7 +13933,7 @@ func awsAwsjson11_deserializeDocumentRepository(v **types.Repository, value inte return nil } -func awsAwsjson11_deserializeDocumentRepositoryAlreadyExistsException(v **types.RepositoryAlreadyExistsException, value interface{}) error { +func awsAwsjson11_deserializeDocumentRepositoryPolicyNotFoundException(v **types.RepositoryPolicyNotFoundException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11589,9 +13946,9 @@ func awsAwsjson11_deserializeDocumentRepositoryAlreadyExistsException(v **types. return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.RepositoryAlreadyExistsException + var sv *types.RepositoryPolicyNotFoundException if *v == nil { - sv = &types.RepositoryAlreadyExistsException{} + sv = &types.RepositoryPolicyNotFoundException{} } else { sv = *v } @@ -11616,7 +13973,7 @@ func awsAwsjson11_deserializeDocumentRepositoryAlreadyExistsException(v **types. return nil } -func awsAwsjson11_deserializeDocumentRepositoryCreationTemplate(v **types.RepositoryCreationTemplate, value interface{}) error { +func awsAwsjson11_deserializeDocumentRepositoryScanningConfiguration(v **types.RepositoryScanningConfiguration, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11629,126 +13986,158 @@ func awsAwsjson11_deserializeDocumentRepositoryCreationTemplate(v **types.Reposi return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.RepositoryCreationTemplate + var sv *types.RepositoryScanningConfiguration if *v == nil { - sv = &types.RepositoryCreationTemplate{} + sv = &types.RepositoryScanningConfiguration{} } else { sv = *v } for key, value := range shape { switch key { - case "appliedFor": - if err := awsAwsjson11_deserializeDocumentRCTAppliedForList(&sv.AppliedFor, value); err != nil { + case "appliedScanFilters": + if err := awsAwsjson11_deserializeDocumentScanningRepositoryFilterList(&sv.AppliedScanFilters, value); err != nil { return err } - case "createdAt": + case "repositoryArn": if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) - + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) } + sv.RepositoryArn = ptr.String(jtv) } - case "customRoleArn": + case "repositoryName": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected CustomRoleArn to be of type string, got %T instead", value) + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) } - sv.CustomRoleArn = ptr.String(jtv) + sv.RepositoryName = ptr.String(jtv) } - case "description": + case "scanFrequency": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected RepositoryTemplateDescription to be of type string, got %T instead", value) + return fmt.Errorf("expected ScanFrequency to be of type string, got %T instead", value) } - sv.Description = ptr.String(jtv) - } - - case "encryptionConfiguration": - if err := awsAwsjson11_deserializeDocumentEncryptionConfigurationForRepositoryCreationTemplate(&sv.EncryptionConfiguration, value); err != nil { - return err + sv.ScanFrequency = types.ScanFrequency(jtv) } - case "imageTagMutability": + case "scanOnPush": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(bool) if !ok { - return fmt.Errorf("expected ImageTagMutability to be of type string, got %T instead", value) + return fmt.Errorf("expected ScanOnPushFlag to be of type *bool, got %T instead", value) } - sv.ImageTagMutability = types.ImageTagMutability(jtv) + sv.ScanOnPush = jtv } - case "lifecyclePolicy": + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailure(v **types.RepositoryScanningConfigurationFailure, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryScanningConfigurationFailure + if *v == nil { + sv = &types.RepositoryScanningConfigurationFailure{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failureCode": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected LifecyclePolicyTextForRepositoryCreationTemplate to be of type string, got %T instead", value) + return fmt.Errorf("expected ScanningConfigurationFailureCode to be of type string, got %T instead", value) } - sv.LifecyclePolicy = ptr.String(jtv) + sv.FailureCode = types.ScanningConfigurationFailureCode(jtv) } - case "prefix": + case "failureReason": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Prefix to be of type string, got %T instead", value) + return fmt.Errorf("expected ScanningConfigurationFailureReason to be of type string, got %T instead", value) } - sv.Prefix = ptr.String(jtv) + sv.FailureReason = ptr.String(jtv) } - case "repositoryPolicy": + case "repositoryName": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) } - sv.RepositoryPolicy = ptr.String(jtv) + sv.RepositoryName = ptr.String(jtv) } - case "resourceTags": - if err := awsAwsjson11_deserializeDocumentTagList(&sv.ResourceTags, value); err != nil { - return err - } + default: + _, _ = key, value - case "updatedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.UpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + } + } + *v = sv + return nil +} - default: - return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) +func awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailureList(v *[]types.RepositoryScanningConfigurationFailure, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } - } - } + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } - default: - _, _ = key, value + var cv []types.RepositoryScanningConfigurationFailure + if *v == nil { + cv = []types.RepositoryScanningConfigurationFailure{} + } else { + cv = *v + } + for _, value := range shape { + var col types.RepositoryScanningConfigurationFailure + destAddr := &col + if err := awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailure(&destAddr, value); err != nil { + return err } + col = *destAddr + cv = append(cv, col) + } - *v = sv + *v = cv return nil } -func awsAwsjson11_deserializeDocumentRepositoryCreationTemplateList(v *[]types.RepositoryCreationTemplate, value interface{}) error { +func awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationList(v *[]types.RepositoryScanningConfiguration, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11761,17 +14150,17 @@ func awsAwsjson11_deserializeDocumentRepositoryCreationTemplateList(v *[]types.R return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.RepositoryCreationTemplate + var cv []types.RepositoryScanningConfiguration if *v == nil { - cv = []types.RepositoryCreationTemplate{} + cv = []types.RepositoryScanningConfiguration{} } else { cv = *v } for _, value := range shape { - var col types.RepositoryCreationTemplate + var col types.RepositoryScanningConfiguration destAddr := &col - if err := awsAwsjson11_deserializeDocumentRepositoryCreationTemplate(&destAddr, value); err != nil { + if err := awsAwsjson11_deserializeDocumentRepositoryScanningConfiguration(&destAddr, value); err != nil { return err } col = *destAddr @@ -11782,7 +14171,7 @@ func awsAwsjson11_deserializeDocumentRepositoryCreationTemplateList(v *[]types.R return nil } -func awsAwsjson11_deserializeDocumentRepositoryFilter(v **types.RepositoryFilter, value interface{}) error { +func awsAwsjson11_deserializeDocumentResource(v **types.Resource, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11795,31 +14184,41 @@ func awsAwsjson11_deserializeDocumentRepositoryFilter(v **types.RepositoryFilter return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.RepositoryFilter + var sv *types.Resource if *v == nil { - sv = &types.RepositoryFilter{} + sv = &types.Resource{} } else { sv = *v } for key, value := range shape { switch key { - case "filter": + case "details": + if err := awsAwsjson11_deserializeDocumentResourceDetails(&sv.Details, value); err != nil { + return err + } + + case "id": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected RepositoryFilterValue to be of type string, got %T instead", value) + return fmt.Errorf("expected ResourceId to be of type string, got %T instead", value) } - sv.Filter = ptr.String(jtv) + sv.Id = ptr.String(jtv) } - case "filterType": + case "tags": + if err := awsAwsjson11_deserializeDocumentTags(&sv.Tags, value); err != nil { + return err + } + + case "type": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected RepositoryFilterType to be of type string, got %T instead", value) + return fmt.Errorf("expected Type to be of type string, got %T instead", value) } - sv.FilterType = types.RepositoryFilterType(jtv) + sv.Type = ptr.String(jtv) } default: @@ -11831,7 +14230,7 @@ func awsAwsjson11_deserializeDocumentRepositoryFilter(v **types.RepositoryFilter return nil } -func awsAwsjson11_deserializeDocumentRepositoryFilterList(v *[]types.RepositoryFilter, value interface{}) error { +func awsAwsjson11_deserializeDocumentResourceDetails(v **types.ResourceDetails, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11839,33 +14238,35 @@ func awsAwsjson11_deserializeDocumentRepositoryFilterList(v *[]types.RepositoryF return nil } - shape, ok := value.([]interface{}) + shape, ok := value.(map[string]interface{}) if !ok { return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.RepositoryFilter + var sv *types.ResourceDetails if *v == nil { - cv = []types.RepositoryFilter{} + sv = &types.ResourceDetails{} } else { - cv = *v + sv = *v } - for _, value := range shape { - var col types.RepositoryFilter - destAddr := &col - if err := awsAwsjson11_deserializeDocumentRepositoryFilter(&destAddr, value); err != nil { - return err + for key, value := range shape { + switch key { + case "awsEcrContainerImage": + if err := awsAwsjson11_deserializeDocumentAwsEcrContainerImageDetails(&sv.AwsEcrContainerImage, value); err != nil { + return err + } + + default: + _, _ = key, value + } - col = *destAddr - cv = append(cv, col) - } - *v = cv + *v = sv return nil } -func awsAwsjson11_deserializeDocumentRepositoryList(v *[]types.Repository, value interface{}) error { +func awsAwsjson11_deserializeDocumentResourceList(v *[]types.Resource, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11878,17 +14279,17 @@ func awsAwsjson11_deserializeDocumentRepositoryList(v *[]types.Repository, value return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.Repository + var cv []types.Resource if *v == nil { - cv = []types.Repository{} + cv = []types.Resource{} } else { cv = *v } for _, value := range shape { - var col types.Repository + var col types.Resource destAddr := &col - if err := awsAwsjson11_deserializeDocumentRepository(&destAddr, value); err != nil { + if err := awsAwsjson11_deserializeDocumentResource(&destAddr, value); err != nil { return err } col = *destAddr @@ -11899,7 +14300,7 @@ func awsAwsjson11_deserializeDocumentRepositoryList(v *[]types.Repository, value return nil } -func awsAwsjson11_deserializeDocumentRepositoryNotEmptyException(v **types.RepositoryNotEmptyException, value interface{}) error { +func awsAwsjson11_deserializeDocumentScanningRepositoryFilter(v **types.ScanningRepositoryFilter, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11912,22 +14313,31 @@ func awsAwsjson11_deserializeDocumentRepositoryNotEmptyException(v **types.Repos return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.RepositoryNotEmptyException + var sv *types.ScanningRepositoryFilter if *v == nil { - sv = &types.RepositoryNotEmptyException{} + sv = &types.ScanningRepositoryFilter{} } else { sv = *v } for key, value := range shape { switch key { - case "message", "Message": + case "filter": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + return fmt.Errorf("expected ScanningRepositoryFilterValue to be of type string, got %T instead", value) } - sv.Message = ptr.String(jtv) + sv.Filter = ptr.String(jtv) + } + + case "filterType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScanningRepositoryFilterType to be of type string, got %T instead", value) + } + sv.FilterType = types.ScanningRepositoryFilterType(jtv) } default: @@ -11939,7 +14349,7 @@ func awsAwsjson11_deserializeDocumentRepositoryNotEmptyException(v **types.Repos return nil } -func awsAwsjson11_deserializeDocumentRepositoryNotFoundException(v **types.RepositoryNotFoundException, value interface{}) error { +func awsAwsjson11_deserializeDocumentScanningRepositoryFilterList(v *[]types.ScanningRepositoryFilter, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11947,39 +14357,33 @@ func awsAwsjson11_deserializeDocumentRepositoryNotFoundException(v **types.Repos return nil } - shape, ok := value.(map[string]interface{}) + shape, ok := value.([]interface{}) if !ok { return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.RepositoryNotFoundException + var cv []types.ScanningRepositoryFilter if *v == nil { - sv = &types.RepositoryNotFoundException{} + cv = []types.ScanningRepositoryFilter{} } else { - sv = *v + cv = *v } - for key, value := range shape { - switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - + for _, value := range shape { + var col types.ScanningRepositoryFilter + destAddr := &col + if err := awsAwsjson11_deserializeDocumentScanningRepositoryFilter(&destAddr, value); err != nil { + return err } + col = *destAddr + cv = append(cv, col) + } - *v = sv + *v = cv return nil } -func awsAwsjson11_deserializeDocumentRepositoryPolicyNotFoundException(v **types.RepositoryPolicyNotFoundException, value interface{}) error { +func awsAwsjson11_deserializeDocumentScanNotFoundException(v **types.ScanNotFoundException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11992,9 +14396,9 @@ func awsAwsjson11_deserializeDocumentRepositoryPolicyNotFoundException(v **types return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.RepositoryPolicyNotFoundException + var sv *types.ScanNotFoundException if *v == nil { - sv = &types.RepositoryPolicyNotFoundException{} + sv = &types.ScanNotFoundException{} } else { sv = *v } @@ -12019,7 +14423,7 @@ func awsAwsjson11_deserializeDocumentRepositoryPolicyNotFoundException(v **types return nil } -func awsAwsjson11_deserializeDocumentRepositoryScanningConfiguration(v **types.RepositoryScanningConfiguration, value interface{}) error { +func awsAwsjson11_deserializeDocumentScoreDetails(v **types.ScoreDetails, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12032,56 +14436,20 @@ func awsAwsjson11_deserializeDocumentRepositoryScanningConfiguration(v **types.R return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.RepositoryScanningConfiguration + var sv *types.ScoreDetails if *v == nil { - sv = &types.RepositoryScanningConfiguration{} + sv = &types.ScoreDetails{} } else { sv = *v } for key, value := range shape { switch key { - case "appliedScanFilters": - if err := awsAwsjson11_deserializeDocumentScanningRepositoryFilterList(&sv.AppliedScanFilters, value); err != nil { + case "cvss": + if err := awsAwsjson11_deserializeDocumentCvssScoreDetails(&sv.Cvss, value); err != nil { return err } - case "repositoryArn": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Arn to be of type string, got %T instead", value) - } - sv.RepositoryArn = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - case "scanFrequency": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScanFrequency to be of type string, got %T instead", value) - } - sv.ScanFrequency = types.ScanFrequency(jtv) - } - - case "scanOnPush": - if value != nil { - jtv, ok := value.(bool) - if !ok { - return fmt.Errorf("expected ScanOnPushFlag to be of type *bool, got %T instead", value) - } - sv.ScanOnPush = jtv - } - default: _, _ = key, value @@ -12091,7 +14459,7 @@ func awsAwsjson11_deserializeDocumentRepositoryScanningConfiguration(v **types.R return nil } -func awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailure(v **types.RepositoryScanningConfigurationFailure, value interface{}) error { +func awsAwsjson11_deserializeDocumentSecretNotFoundException(v **types.SecretNotFoundException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12104,40 +14472,22 @@ func awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailure(v ** return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.RepositoryScanningConfigurationFailure + var sv *types.SecretNotFoundException if *v == nil { - sv = &types.RepositoryScanningConfigurationFailure{} + sv = &types.SecretNotFoundException{} } else { sv = *v } for key, value := range shape { switch key { - case "failureCode": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScanningConfigurationFailureCode to be of type string, got %T instead", value) - } - sv.FailureCode = types.ScanningConfigurationFailureCode(jtv) - } - - case "failureReason": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScanningConfigurationFailureReason to be of type string, got %T instead", value) - } - sv.FailureReason = ptr.String(jtv) - } - - case "repositoryName": + case "message", "Message": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) } - sv.RepositoryName = ptr.String(jtv) + sv.Message = ptr.String(jtv) } default: @@ -12149,75 +14499,7 @@ func awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailure(v ** return nil } -func awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailureList(v *[]types.RepositoryScanningConfigurationFailure, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.RepositoryScanningConfigurationFailure - if *v == nil { - cv = []types.RepositoryScanningConfigurationFailure{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.RepositoryScanningConfigurationFailure - destAddr := &col - if err := awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailure(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationList(v *[]types.RepositoryScanningConfiguration, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.RepositoryScanningConfiguration - if *v == nil { - cv = []types.RepositoryScanningConfiguration{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.RepositoryScanningConfiguration - destAddr := &col - if err := awsAwsjson11_deserializeDocumentRepositoryScanningConfiguration(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentResource(v **types.Resource, value interface{}) error { +func awsAwsjson11_deserializeDocumentServerException(v **types.ServerException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12230,41 +14512,22 @@ func awsAwsjson11_deserializeDocumentResource(v **types.Resource, value interfac return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.Resource + var sv *types.ServerException if *v == nil { - sv = &types.Resource{} + sv = &types.ServerException{} } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "details": - if err := awsAwsjson11_deserializeDocumentResourceDetails(&sv.Details, value); err != nil { - return err - } - - case "id": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ResourceId to be of type string, got %T instead", value) - } - sv.Id = ptr.String(jtv) - } - - case "tags": - if err := awsAwsjson11_deserializeDocumentTags(&sv.Tags, value); err != nil { - return err - } + sv = *v + } - case "type": + for key, value := range shape { + switch key { + case "message", "Message": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Type to be of type string, got %T instead", value) + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) } - sv.Type = ptr.String(jtv) + sv.Message = ptr.String(jtv) } default: @@ -12276,7 +14539,7 @@ func awsAwsjson11_deserializeDocumentResource(v **types.Resource, value interfac return nil } -func awsAwsjson11_deserializeDocumentResourceDetails(v **types.ResourceDetails, value interface{}) error { +func awsAwsjson11_deserializeDocumentSigningConfiguration(v **types.SigningConfiguration, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12289,17 +14552,17 @@ func awsAwsjson11_deserializeDocumentResourceDetails(v **types.ResourceDetails, return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.ResourceDetails + var sv *types.SigningConfiguration if *v == nil { - sv = &types.ResourceDetails{} + sv = &types.SigningConfiguration{} } else { sv = *v } for key, value := range shape { switch key { - case "awsEcrContainerImage": - if err := awsAwsjson11_deserializeDocumentAwsEcrContainerImageDetails(&sv.AwsEcrContainerImage, value); err != nil { + case "rules": + if err := awsAwsjson11_deserializeDocumentSigningRuleList(&sv.Rules, value); err != nil { return err } @@ -12312,7 +14575,7 @@ func awsAwsjson11_deserializeDocumentResourceDetails(v **types.ResourceDetails, return nil } -func awsAwsjson11_deserializeDocumentResourceList(v *[]types.Resource, value interface{}) error { +func awsAwsjson11_deserializeDocumentSigningConfigurationNotFoundException(v **types.SigningConfigurationNotFoundException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12320,33 +14583,39 @@ func awsAwsjson11_deserializeDocumentResourceList(v *[]types.Resource, value int return nil } - shape, ok := value.([]interface{}) + shape, ok := value.(map[string]interface{}) if !ok { return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.Resource + var sv *types.SigningConfigurationNotFoundException if *v == nil { - cv = []types.Resource{} + sv = &types.SigningConfigurationNotFoundException{} } else { - cv = *v + sv = *v } - for _, value := range shape { - var col types.Resource - destAddr := &col - if err := awsAwsjson11_deserializeDocumentResource(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + } } - *v = cv + *v = sv return nil } -func awsAwsjson11_deserializeDocumentScanningRepositoryFilter(v **types.ScanningRepositoryFilter, value interface{}) error { +func awsAwsjson11_deserializeDocumentSigningRepositoryFilter(v **types.SigningRepositoryFilter, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12359,9 +14628,9 @@ func awsAwsjson11_deserializeDocumentScanningRepositoryFilter(v **types.Scanning return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.ScanningRepositoryFilter + var sv *types.SigningRepositoryFilter if *v == nil { - sv = &types.ScanningRepositoryFilter{} + sv = &types.SigningRepositoryFilter{} } else { sv = *v } @@ -12372,7 +14641,7 @@ func awsAwsjson11_deserializeDocumentScanningRepositoryFilter(v **types.Scanning if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected ScanningRepositoryFilterValue to be of type string, got %T instead", value) + return fmt.Errorf("expected SigningRepositoryFilterValue to be of type string, got %T instead", value) } sv.Filter = ptr.String(jtv) } @@ -12381,9 +14650,9 @@ func awsAwsjson11_deserializeDocumentScanningRepositoryFilter(v **types.Scanning if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected ScanningRepositoryFilterType to be of type string, got %T instead", value) + return fmt.Errorf("expected SigningRepositoryFilterType to be of type string, got %T instead", value) } - sv.FilterType = types.ScanningRepositoryFilterType(jtv) + sv.FilterType = types.SigningRepositoryFilterType(jtv) } default: @@ -12395,7 +14664,7 @@ func awsAwsjson11_deserializeDocumentScanningRepositoryFilter(v **types.Scanning return nil } -func awsAwsjson11_deserializeDocumentScanningRepositoryFilterList(v *[]types.ScanningRepositoryFilter, value interface{}) error { +func awsAwsjson11_deserializeDocumentSigningRepositoryFilterList(v *[]types.SigningRepositoryFilter, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12408,17 +14677,17 @@ func awsAwsjson11_deserializeDocumentScanningRepositoryFilterList(v *[]types.Sca return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.ScanningRepositoryFilter + var cv []types.SigningRepositoryFilter if *v == nil { - cv = []types.ScanningRepositoryFilter{} + cv = []types.SigningRepositoryFilter{} } else { cv = *v } for _, value := range shape { - var col types.ScanningRepositoryFilter + var col types.SigningRepositoryFilter destAddr := &col - if err := awsAwsjson11_deserializeDocumentScanningRepositoryFilter(&destAddr, value); err != nil { + if err := awsAwsjson11_deserializeDocumentSigningRepositoryFilter(&destAddr, value); err != nil { return err } col = *destAddr @@ -12429,47 +14698,7 @@ func awsAwsjson11_deserializeDocumentScanningRepositoryFilterList(v *[]types.Sca return nil } -func awsAwsjson11_deserializeDocumentScanNotFoundException(v **types.ScanNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ScanNotFoundException - if *v == nil { - sv = &types.ScanNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentScoreDetails(v **types.ScoreDetails, value interface{}) error { +func awsAwsjson11_deserializeDocumentSigningRule(v **types.SigningRule, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12482,58 +14711,27 @@ func awsAwsjson11_deserializeDocumentScoreDetails(v **types.ScoreDetails, value return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.ScoreDetails + var sv *types.SigningRule if *v == nil { - sv = &types.ScoreDetails{} + sv = &types.SigningRule{} } else { sv = *v } for key, value := range shape { switch key { - case "cvss": - if err := awsAwsjson11_deserializeDocumentCvssScoreDetails(&sv.Cvss, value); err != nil { + case "repositoryFilters": + if err := awsAwsjson11_deserializeDocumentSigningRepositoryFilterList(&sv.RepositoryFilters, value); err != nil { return err } - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentSecretNotFoundException(v **types.SecretNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.SecretNotFoundException - if *v == nil { - sv = &types.SecretNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message", "Message": + case "signingProfileArn": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + return fmt.Errorf("expected SigningProfileArn to be of type string, got %T instead", value) } - sv.Message = ptr.String(jtv) + sv.SigningProfileArn = ptr.String(jtv) } default: @@ -12545,7 +14743,7 @@ func awsAwsjson11_deserializeDocumentSecretNotFoundException(v **types.SecretNot return nil } -func awsAwsjson11_deserializeDocumentServerException(v **types.ServerException, value interface{}) error { +func awsAwsjson11_deserializeDocumentSigningRuleList(v *[]types.SigningRule, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12553,35 +14751,29 @@ func awsAwsjson11_deserializeDocumentServerException(v **types.ServerException, return nil } - shape, ok := value.(map[string]interface{}) + shape, ok := value.([]interface{}) if !ok { return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.ServerException + var cv []types.SigningRule if *v == nil { - sv = &types.ServerException{} + cv = []types.SigningRule{} } else { - sv = *v + cv = *v } - for key, value := range shape { - switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - + for _, value := range shape { + var col types.SigningRule + destAddr := &col + if err := awsAwsjson11_deserializeDocumentSigningRule(&destAddr, value); err != nil { + return err } + col = *destAddr + cv = append(cv, col) + } - *v = sv + *v = cv return nil } @@ -12764,15 +14956,108 @@ func awsAwsjson11_deserializeDocumentTemplateNotFoundException(v **types.Templat sv = *v } - for key, value := range shape { - switch key { - case "message", "Message": + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTooManyTagsException(v **types.TooManyTagsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TooManyTagsException + if *v == nil { + sv = &types.TooManyTagsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTransitioningImageTotalCount(v **types.TransitioningImageTotalCount, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TransitioningImageTotalCount + if *v == nil { + sv = &types.TransitioningImageTotalCount{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageTotalCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ImageCount to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ImageTotalCount = ptr.Int32(int32(i64)) + } + + case "targetStorageClass": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + return fmt.Errorf("expected LifecyclePolicyTargetStorageClass to be of type string, got %T instead", value) } - sv.Message = ptr.String(jtv) + sv.TargetStorageClass = types.LifecyclePolicyTargetStorageClass(jtv) } default: @@ -12784,7 +15069,7 @@ func awsAwsjson11_deserializeDocumentTemplateNotFoundException(v **types.Templat return nil } -func awsAwsjson11_deserializeDocumentTooManyTagsException(v **types.TooManyTagsException, value interface{}) error { +func awsAwsjson11_deserializeDocumentTransitioningImageTotalCounts(v *[]types.TransitioningImageTotalCount, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12792,35 +15077,29 @@ func awsAwsjson11_deserializeDocumentTooManyTagsException(v **types.TooManyTagsE return nil } - shape, ok := value.(map[string]interface{}) + shape, ok := value.([]interface{}) if !ok { return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.TooManyTagsException + var cv []types.TransitioningImageTotalCount if *v == nil { - sv = &types.TooManyTagsException{} + cv = []types.TransitioningImageTotalCount{} } else { - sv = *v + cv = *v } - for key, value := range shape { - switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - + for _, value := range shape { + var col types.TransitioningImageTotalCount + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTransitioningImageTotalCount(&destAddr, value); err != nil { + return err } + col = *destAddr + cv = append(cv, col) + } - *v = sv + *v = cv return nil } @@ -14079,6 +16358,91 @@ func awsAwsjson11_deserializeOpDocumentDeleteRepositoryPolicyOutput(v **DeleteRe return nil } +func awsAwsjson11_deserializeOpDocumentDeleteSigningConfigurationOutput(v **DeleteSigningConfigurationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteSigningConfigurationOutput + if *v == nil { + sv = &DeleteSigningConfigurationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "signingConfiguration": + if err := awsAwsjson11_deserializeDocumentSigningConfiguration(&sv.SigningConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeregisterPullTimeUpdateExclusionOutput(v **DeregisterPullTimeUpdateExclusionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeregisterPullTimeUpdateExclusionOutput + if *v == nil { + sv = &DeregisterPullTimeUpdateExclusionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "principalArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrincipalArn to be of type string, got %T instead", value) + } + sv.PrincipalArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentDescribeImageReplicationStatusOutput(v **DescribeImageReplicationStatusOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -14202,6 +16566,65 @@ func awsAwsjson11_deserializeOpDocumentDescribeImageScanFindingsOutput(v **Descr return nil } +func awsAwsjson11_deserializeOpDocumentDescribeImageSigningStatusOutput(v **DescribeImageSigningStatusOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeImageSigningStatusOutput + if *v == nil { + sv = &DescribeImageSigningStatusOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageId": + if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { + return err + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + case "signingStatuses": + if err := awsAwsjson11_deserializeDocumentImageSigningStatusList(&sv.SigningStatuses, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentDescribeImagesOutput(v **DescribeImagesOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -14743,9 +17166,103 @@ func awsAwsjson11_deserializeOpDocumentGetRegistryPolicyOutput(v **GetRegistryPo return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetRegistryPolicyOutput + var sv *GetRegistryPolicyOutput + if *v == nil { + sv = &GetRegistryPolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "policyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryPolicyText to be of type string, got %T instead", value) + } + sv.PolicyText = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetRegistryScanningConfigurationOutput(v **GetRegistryScanningConfigurationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetRegistryScanningConfigurationOutput + if *v == nil { + sv = &GetRegistryScanningConfigurationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "scanningConfiguration": + if err := awsAwsjson11_deserializeDocumentRegistryScanningConfiguration(&sv.ScanningConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetRepositoryPolicyOutput(v **GetRepositoryPolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetRepositoryPolicyOutput if *v == nil { - sv = &GetRegistryPolicyOutput{} + sv = &GetRepositoryPolicyOutput{} } else { sv = *v } @@ -14756,7 +17273,7 @@ func awsAwsjson11_deserializeOpDocumentGetRegistryPolicyOutput(v **GetRegistryPo if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected RegistryPolicyText to be of type string, got %T instead", value) + return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) } sv.PolicyText = ptr.String(jtv) } @@ -14770,6 +17287,15 @@ func awsAwsjson11_deserializeOpDocumentGetRegistryPolicyOutput(v **GetRegistryPo sv.RegistryId = ptr.String(jtv) } + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + default: _, _ = key, value @@ -14779,7 +17305,7 @@ func awsAwsjson11_deserializeOpDocumentGetRegistryPolicyOutput(v **GetRegistryPo return nil } -func awsAwsjson11_deserializeOpDocumentGetRegistryScanningConfigurationOutput(v **GetRegistryScanningConfigurationOutput, value interface{}) error { +func awsAwsjson11_deserializeOpDocumentGetSigningConfigurationOutput(v **GetSigningConfigurationOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -14792,9 +17318,9 @@ func awsAwsjson11_deserializeOpDocumentGetRegistryScanningConfigurationOutput(v return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetRegistryScanningConfigurationOutput + var sv *GetSigningConfigurationOutput if *v == nil { - sv = &GetRegistryScanningConfigurationOutput{} + sv = &GetSigningConfigurationOutput{} } else { sv = *v } @@ -14810,8 +17336,8 @@ func awsAwsjson11_deserializeOpDocumentGetRegistryScanningConfigurationOutput(v sv.RegistryId = ptr.String(jtv) } - case "scanningConfiguration": - if err := awsAwsjson11_deserializeDocumentRegistryScanningConfiguration(&sv.ScanningConfiguration, value); err != nil { + case "signingConfiguration": + if err := awsAwsjson11_deserializeDocumentSigningConfiguration(&sv.SigningConfiguration, value); err != nil { return err } @@ -14824,7 +17350,7 @@ func awsAwsjson11_deserializeOpDocumentGetRegistryScanningConfigurationOutput(v return nil } -func awsAwsjson11_deserializeOpDocumentGetRepositoryPolicyOutput(v **GetRepositoryPolicyOutput, value interface{}) error { +func awsAwsjson11_deserializeOpDocumentInitiateLayerUploadOutput(v **InitiateLayerUploadOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -14837,40 +17363,35 @@ func awsAwsjson11_deserializeOpDocumentGetRepositoryPolicyOutput(v **GetReposito return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetRepositoryPolicyOutput + var sv *InitiateLayerUploadOutput if *v == nil { - sv = &GetRepositoryPolicyOutput{} + sv = &InitiateLayerUploadOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "policyText": + case "partSize": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) + return fmt.Errorf("expected PartSize to be json.Number, got %T instead", value) } - sv.PolicyText = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + i64, err := jtv.Int64() + if err != nil { + return err } - sv.RegistryId = ptr.String(jtv) + sv.PartSize = ptr.Int64(i64) } - case "repositoryName": + case "uploadId": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) } - sv.RepositoryName = ptr.String(jtv) + sv.UploadId = ptr.String(jtv) } default: @@ -14882,7 +17403,7 @@ func awsAwsjson11_deserializeOpDocumentGetRepositoryPolicyOutput(v **GetReposito return nil } -func awsAwsjson11_deserializeOpDocumentInitiateLayerUploadOutput(v **InitiateLayerUploadOutput, value interface{}) error { +func awsAwsjson11_deserializeOpDocumentListImageReferrersOutput(v **ListImageReferrersOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -14895,35 +17416,27 @@ func awsAwsjson11_deserializeOpDocumentInitiateLayerUploadOutput(v **InitiateLay return fmt.Errorf("unexpected JSON type %v", value) } - var sv *InitiateLayerUploadOutput + var sv *ListImageReferrersOutput if *v == nil { - sv = &InitiateLayerUploadOutput{} + sv = &ListImageReferrersOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "partSize": + case "nextToken": if value != nil { - jtv, ok := value.(json.Number) + jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected PartSize to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) } - sv.PartSize = ptr.Int64(i64) + sv.NextToken = ptr.String(jtv) } - case "uploadId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) - } - sv.UploadId = ptr.String(jtv) + case "referrers": + if err := awsAwsjson11_deserializeDocumentImageReferrerList(&sv.Referrers, value); err != nil { + return err } default: @@ -14980,6 +17493,51 @@ func awsAwsjson11_deserializeOpDocumentListImagesOutput(v **ListImagesOutput, va return nil } +func awsAwsjson11_deserializeOpDocumentListPullTimeUpdateExclusionsOutput(v **ListPullTimeUpdateExclusionsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListPullTimeUpdateExclusionsOutput + if *v == nil { + sv = &ListPullTimeUpdateExclusionsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "pullTimeUpdateExclusions": + if err := awsAwsjson11_deserializeDocumentPullTimeUpdateExclusionList(&sv.PullTimeUpdateExclusions, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -15186,6 +17744,11 @@ func awsAwsjson11_deserializeOpDocumentPutImageTagMutabilityOutput(v **PutImageT sv.ImageTagMutability = types.ImageTagMutability(jtv) } + case "imageTagMutabilityExclusionFilters": + if err := awsAwsjson11_deserializeDocumentImageTagMutabilityExclusionFilters(&sv.ImageTagMutabilityExclusionFilters, value); err != nil { + return err + } + case "registryId": if value != nil { jtv, ok := value.(string) @@ -15392,6 +17955,98 @@ func awsAwsjson11_deserializeOpDocumentPutReplicationConfigurationOutput(v **Put return nil } +func awsAwsjson11_deserializeOpDocumentPutSigningConfigurationOutput(v **PutSigningConfigurationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutSigningConfigurationOutput + if *v == nil { + sv = &PutSigningConfigurationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "signingConfiguration": + if err := awsAwsjson11_deserializeDocumentSigningConfiguration(&sv.SigningConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentRegisterPullTimeUpdateExclusionOutput(v **RegisterPullTimeUpdateExclusionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *RegisterPullTimeUpdateExclusionOutput + if *v == nil { + sv = &RegisterPullTimeUpdateExclusionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "principalArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrincipalArn to be of type string, got %T instead", value) + } + sv.PrincipalArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentSetRepositoryPolicyOutput(v **SetRepositoryPolicyOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -15638,6 +18293,69 @@ func awsAwsjson11_deserializeOpDocumentUntagResourceOutput(v **UntagResourceOutp return nil } +func awsAwsjson11_deserializeOpDocumentUpdateImageStorageClassOutput(v **UpdateImageStorageClassOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateImageStorageClassOutput + if *v == nil { + sv = &UpdateImageStorageClassOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageId": + if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { + return err + } + + case "imageStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageStatus to be of type string, got %T instead", value) + } + sv.ImageStatus = types.ImageStatus(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentUpdatePullThroughCacheRuleOutput(v **UpdatePullThroughCacheRuleOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/endpoints.go index e61e54695c..1ad88a73c2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/endpoints.go @@ -14,6 +14,7 @@ import ( internalendpoints "github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints" smithyauth "github.com/aws/smithy-go/auth" smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/endpoints/private/rulesfn" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" "github.com/aws/smithy-go/tracing" @@ -217,24 +218,20 @@ func resolveBaseEndpoint(cfg aws.Config, o *Options) { } } -func bindRegion(region string) *string { +func bindRegion(region string) (*string, error) { if region == "" { - return nil + return nil, nil + } + if !rulesfn.IsValidHostLabel(region, true) { + return nil, fmt.Errorf("invalid input region %s", region) } - return aws.String(endpoints.MapFIPSRegion(region)) + + return aws.String(endpoints.MapFIPSRegion(region)), nil } // EndpointParameters provides the parameters that influence how endpoints are // resolved. type EndpointParameters struct { - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - // When true, use the dual-stack endpoint. If the configured endpoint does not // support dual-stack, dispatching the request MAY return an error. // @@ -261,6 +258,14 @@ type EndpointParameters struct { // // SDK::Endpoint Endpoint *string + + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string } // ValidateRequired validates required parameters are set. @@ -328,7 +333,9 @@ func (r *resolver) ResolveEndpoint( return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) } _UseDualStack := *params.UseDualStack + _ = _UseDualStack _UseFIPS := *params.UseFIPS + _ = _UseFIPS if exprVal := params.Endpoint; exprVal != nil { _Endpoint := *exprVal @@ -357,79 +364,63 @@ func (r *resolver) ResolveEndpoint( if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { _PartitionResult := *exprVal _ = _PartitionResult - if _UseFIPS == true { - if _UseDualStack == true { - if true == _PartitionResult.SupportsFIPS { - if true == _PartitionResult.SupportsDualStack { - if "aws" == _PartitionResult.Name { - uriString := func() string { - var out strings.Builder - out.WriteString("https://ecr-fips.") - out.WriteString(_Region) - out.WriteString(".api.aws") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if "aws-us-gov" == _PartitionResult.Name { - uriString := func() string { - var out strings.Builder - out.WriteString("https://ecr-fips.") - out.WriteString(_Region) - out.WriteString(".api.aws") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://api.ecr-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() + if _PartitionResult.Name == "aws" { + if _UseFIPS == false { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws" { + if _UseFIPS == false { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") } } - if _UseFIPS == true { - if _PartitionResult.SupportsFIPS == true { - if _PartitionResult.Name == "aws" { + if _PartitionResult.Name == "aws" { + if _UseFIPS == true { + if _UseDualStack == false { uriString := func() string { var out strings.Builder - out.WriteString("https://ecr-fips.") + out.WriteString("https://api.ecr-fips.") out.WriteString(_Region) - out.WriteString(".amazonaws.com") + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) return out.String() }() @@ -443,12 +434,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, }, nil } - if _PartitionResult.Name == "aws-us-gov" { + } + } + if _PartitionResult.Name == "aws" { + if _UseFIPS == true { + if _UseDualStack == true { uriString := func() string { var out strings.Builder out.WriteString("https://ecr-fips.") out.WriteString(_Region) - out.WriteString(".amazonaws.com") + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) return out.String() }() @@ -462,35 +458,41 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, }, nil } - uriString := func() string { - var out strings.Builder - out.WriteString("https://api.ecr-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() + } + } + if _PartitionResult.Name == "aws-us-gov" { + if _UseFIPS == false { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") } - if _UseDualStack == true { - if true == _PartitionResult.SupportsDualStack { - if "aws" == _PartitionResult.Name { + if _PartitionResult.Name == "aws-us-gov" { + if _UseFIPS == false { + if _UseDualStack == true { uriString := func() string { var out strings.Builder out.WriteString("https://ecr.") out.WriteString(_Region) - out.WriteString(".api.aws") + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) return out.String() }() @@ -504,12 +506,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, }, nil } - if "aws-cn" == _PartitionResult.Name { + } + } + if _PartitionResult.Name == "aws-us-gov" { + if _UseFIPS == true { + if _UseDualStack == false { uriString := func() string { var out strings.Builder - out.WriteString("https://ecr.") + out.WriteString("https://api.ecr-fips.") out.WriteString(_Region) - out.WriteString(".api.amazonwebservices.com.cn") + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) return out.String() }() @@ -523,12 +530,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, }, nil } - if "aws-us-gov" == _PartitionResult.Name { + } + } + if _PartitionResult.Name == "aws-us-gov" { + if _UseFIPS == true { + if _UseDualStack == true { uriString := func() string { var out strings.Builder - out.WriteString("https://ecr.") + out.WriteString("https://ecr-fips.") out.WriteString(_Region) - out.WriteString(".api.aws") + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) return out.String() }() @@ -542,87 +554,725 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, }, nil } - uriString := func() string { - var out strings.Builder - out.WriteString("https://api.ecr.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() + } + } + if _PartitionResult.Name == "aws-cn" { + if _UseFIPS == false { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } } - return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") } - uriString := func() string { - var out strings.Builder - out.WriteString("https://api.ecr.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() + if _PartitionResult.Name == "aws-cn" { + if _UseFIPS == false { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") -} + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-cn" { + if _UseFIPS == true { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() -type endpointParamsBinder interface { - bindEndpointParams(*EndpointParameters) -} + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } -func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { - params := &EndpointParameters{} + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-cn" { + if _UseFIPS == true { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://ecr-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() - params.Region = bindRegion(options.Region) - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } - if b, ok := input.(endpointParamsBinder); ok { - b.bindEndpointParams(params) - } + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso" { + if _UseFIPS == false { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() - return params -} + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } -type resolveEndpointV2Middleware struct { - options Options -} + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso" { + if _UseFIPS == false { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveEndpoint") - defer span.End() + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso" { + if _UseFIPS == true { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleFinalize(ctx, in) - } + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso" { + if _UseFIPS == true { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://ecr-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso-b" { + if _UseFIPS == false { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso-b" { + if _UseFIPS == false { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso-b" { + if _UseFIPS == true { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso-b" { + if _UseFIPS == true { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://ecr-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso-e" { + if _UseFIPS == false { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso-e" { + if _UseFIPS == false { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso-e" { + if _UseFIPS == true { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso-e" { + if _UseFIPS == true { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://ecr-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso-f" { + if _UseFIPS == false { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso-f" { + if _UseFIPS == false { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso-f" { + if _UseFIPS == true { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-iso-f" { + if _UseFIPS == true { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://ecr-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-eusc" { + if _UseFIPS == false { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-eusc" { + if _UseFIPS == false { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-eusc" { + if _UseFIPS == true { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-eusc" { + if _UseFIPS == true { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://ecr-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _UseFIPS == true { + if _UseDualStack == true { + if true == _PartitionResult.SupportsFIPS { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") + } + } + if _UseFIPS == true { + if _UseDualStack == false { + if _PartitionResult.SupportsFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") + } + } + if _UseFIPS == false { + if _UseDualStack == true { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") + } + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://api.ecr.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") +} + +type endpointParamsBinder interface { + bindEndpointParams(*EndpointParameters) +} + +func bindEndpointParams(ctx context.Context, input interface{}, options Options) (*EndpointParameters, error) { + params := &EndpointParameters{} + + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) + params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) + params.Endpoint = options.BaseEndpoint + region, err := bindRegion(options.Region) + if err != nil { + return nil, err + } + params.Region = region + + if b, ok := input.(endpointParamsBinder); ok { + b.bindEndpointParams(params) + } + + return params, nil +} + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleFinalize(ctx, in) + } req, ok := in.Request.(*smithyhttp.Request) if !ok { @@ -633,7 +1283,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + params, err := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + if err != nil { + return out, metadata, fmt.Errorf("failed to bind endpoint params, %w", err) + } endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", func() (smithyendpoints.Endpoint, error) { return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/generated.json index e9160421d9..16f23c2a27 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/generated.json @@ -22,8 +22,11 @@ "api_op_DeleteRepository.go", "api_op_DeleteRepositoryCreationTemplate.go", "api_op_DeleteRepositoryPolicy.go", + "api_op_DeleteSigningConfiguration.go", + "api_op_DeregisterPullTimeUpdateExclusion.go", "api_op_DescribeImageReplicationStatus.go", "api_op_DescribeImageScanFindings.go", + "api_op_DescribeImageSigningStatus.go", "api_op_DescribeImages.go", "api_op_DescribePullThroughCacheRules.go", "api_op_DescribeRegistry.go", @@ -37,8 +40,11 @@ "api_op_GetRegistryPolicy.go", "api_op_GetRegistryScanningConfiguration.go", "api_op_GetRepositoryPolicy.go", + "api_op_GetSigningConfiguration.go", "api_op_InitiateLayerUpload.go", + "api_op_ListImageReferrers.go", "api_op_ListImages.go", + "api_op_ListPullTimeUpdateExclusions.go", "api_op_ListTagsForResource.go", "api_op_PutAccountSetting.go", "api_op_PutImage.go", @@ -48,11 +54,14 @@ "api_op_PutRegistryPolicy.go", "api_op_PutRegistryScanningConfiguration.go", "api_op_PutReplicationConfiguration.go", + "api_op_PutSigningConfiguration.go", + "api_op_RegisterPullTimeUpdateExclusion.go", "api_op_SetRepositoryPolicy.go", "api_op_StartImageScan.go", "api_op_StartLifecyclePolicyPreview.go", "api_op_TagResource.go", "api_op_UntagResource.go", + "api_op_UpdateImageStorageClass.go", "api_op_UpdatePullThroughCacheRule.go", "api_op_UpdateRepositoryCreationTemplate.go", "api_op_UploadLayerPart.go", @@ -67,7 +76,6 @@ "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", "options.go", - "protocol_test.go", "serializers.go", "snapshot_test.go", "sra_operation_order_test.go", @@ -76,7 +84,7 @@ "types/types.go", "validators.go" ], - "go": "1.22", + "go": "1.24", "module": "github.com/aws/aws-sdk-go-v2/service/ecr", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/go_module_metadata.go index d861242c1f..3ccd9aa208 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/go_module_metadata.go @@ -3,4 +3,4 @@ package ecr // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.45.1" +const goModuleVersion = "1.56.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints/endpoints.go index 58b2e76cb3..868353b2e5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints/endpoints.go @@ -348,6 +348,9 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-5", }, }, + endpoints.EndpointKey{ + Region: "ap-southeast-6", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ap-southeast-7", }: endpoints.Endpoint{ @@ -980,6 +983,13 @@ var defaultPartitions = endpoints.Partitions{ { ID: "aws-eusc", Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "api.ecr.{region}.api.amazonwebservices.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, { Variant: endpoints.FIPSVariant, }: { @@ -987,6 +997,13 @@ var defaultPartitions = endpoints.Partitions{ Protocols: []string{"https"}, SignatureVersions: []string{"v4"}, }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "api.ecr-fips.{region}.api.amazonwebservices.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, { Variant: 0, }: { @@ -997,6 +1014,11 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.AwsEusc, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "eusc-de-east-1", + }: endpoints.Endpoint{}, + }, }, { ID: "aws-iso", @@ -1066,6 +1088,9 @@ var defaultPartitions = endpoints.Partitions{ Region: "us-isob-east-1", }, }, + endpoints.EndpointKey{ + Region: "us-isob-west-1", + }: endpoints.Endpoint{}, }, }, { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/options.go index 8d993c73e2..17d736f4e7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/options.go @@ -58,8 +58,7 @@ type Options struct { // the client option BaseEndpoint instead. EndpointResolver EndpointResolver - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. + // Resolves the endpoint used for a particular service operation. EndpointResolverV2 EndpointResolverV2 // Signature Version 4 (SigV4) Signer @@ -119,12 +118,18 @@ type Options struct { // implementation if nil. HTTPClient HTTPClient + // Client registry of operation interceptors. + Interceptors smithyhttp.InterceptorRegistry + // The auth scheme resolver which determines how to authenticate for each // operation. AuthSchemeResolver AuthSchemeResolver // The list of auth schemes supported by the client. AuthSchemes []smithyhttp.AuthScheme + + // Priority list of preferred auth scheme names (e.g. sigv4a). + AuthSchemePreference []string } // Copy creates a clone where the APIOptions list is deep copied. @@ -132,6 +137,7 @@ func (o Options) Copy() Options { to := o to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) copy(to.APIOptions, o.APIOptions) + to.Interceptors = o.Interceptors.Copy() return to } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/serializers.go index 5deaebee8c..183bbbdcfb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/serializers.go @@ -870,6 +870,128 @@ func (m *awsAwsjson11_serializeOpDeleteRepositoryPolicy) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpDeleteSigningConfiguration struct { +} + +func (*awsAwsjson11_serializeOpDeleteSigningConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteSigningConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteSigningConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DeleteSigningConfiguration") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteSigningConfigurationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeregisterPullTimeUpdateExclusion struct { +} + +func (*awsAwsjson11_serializeOpDeregisterPullTimeUpdateExclusion) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeregisterPullTimeUpdateExclusion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeregisterPullTimeUpdateExclusionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DeregisterPullTimeUpdateExclusion") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeregisterPullTimeUpdateExclusionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpDescribeImageReplicationStatus struct { } @@ -1053,6 +1175,67 @@ func (m *awsAwsjson11_serializeOpDescribeImageScanFindings) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpDescribeImageSigningStatus struct { +} + +func (*awsAwsjson11_serializeOpDescribeImageSigningStatus) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeImageSigningStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeImageSigningStatusInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DescribeImageSigningStatus") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeImageSigningStatusInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpDescribePullThroughCacheRules struct { } @@ -1785,14 +1968,14 @@ func (m *awsAwsjson11_serializeOpGetRepositoryPolicy) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsAwsjson11_serializeOpInitiateLayerUpload struct { +type awsAwsjson11_serializeOpGetSigningConfiguration struct { } -func (*awsAwsjson11_serializeOpInitiateLayerUpload) ID() string { +func (*awsAwsjson11_serializeOpGetSigningConfiguration) ID() string { return "OperationSerializer" } -func (m *awsAwsjson11_serializeOpInitiateLayerUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson11_serializeOpGetSigningConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -1804,7 +1987,7 @@ func (m *awsAwsjson11_serializeOpInitiateLayerUpload) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*InitiateLayerUploadInput) + input, ok := in.Parameters.(*GetSigningConfigurationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -1825,10 +2008,10 @@ func (m *awsAwsjson11_serializeOpInitiateLayerUpload) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.InitiateLayerUpload") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetSigningConfiguration") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentInitiateLayerUploadInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson11_serializeOpDocumentGetSigningConfigurationInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1846,14 +2029,14 @@ func (m *awsAwsjson11_serializeOpInitiateLayerUpload) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsAwsjson11_serializeOpListImages struct { +type awsAwsjson11_serializeOpInitiateLayerUpload struct { } -func (*awsAwsjson11_serializeOpListImages) ID() string { +func (*awsAwsjson11_serializeOpInitiateLayerUpload) ID() string { return "OperationSerializer" } -func (m *awsAwsjson11_serializeOpListImages) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson11_serializeOpInitiateLayerUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -1865,7 +2048,7 @@ func (m *awsAwsjson11_serializeOpListImages) HandleSerialize(ctx context.Context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ListImagesInput) + input, ok := in.Parameters.(*InitiateLayerUploadInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -1886,10 +2069,10 @@ func (m *awsAwsjson11_serializeOpListImages) HandleSerialize(ctx context.Context return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.ListImages") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.InitiateLayerUpload") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentListImagesInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson11_serializeOpDocumentInitiateLayerUploadInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1907,14 +2090,14 @@ func (m *awsAwsjson11_serializeOpListImages) HandleSerialize(ctx context.Context return next.HandleSerialize(ctx, in) } -type awsAwsjson11_serializeOpListTagsForResource struct { +type awsAwsjson11_serializeOpListImageReferrers struct { } -func (*awsAwsjson11_serializeOpListTagsForResource) ID() string { +func (*awsAwsjson11_serializeOpListImageReferrers) ID() string { return "OperationSerializer" } -func (m *awsAwsjson11_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson11_serializeOpListImageReferrers) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -1926,7 +2109,7 @@ func (m *awsAwsjson11_serializeOpListTagsForResource) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ListTagsForResourceInput) + input, ok := in.Parameters.(*ListImageReferrersInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -1947,10 +2130,10 @@ func (m *awsAwsjson11_serializeOpListTagsForResource) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.ListTagsForResource") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.ListImageReferrers") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentListTagsForResourceInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson11_serializeOpDocumentListImageReferrersInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1968,14 +2151,14 @@ func (m *awsAwsjson11_serializeOpListTagsForResource) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsAwsjson11_serializeOpPutAccountSetting struct { +type awsAwsjson11_serializeOpListImages struct { } -func (*awsAwsjson11_serializeOpPutAccountSetting) ID() string { +func (*awsAwsjson11_serializeOpListImages) ID() string { return "OperationSerializer" } -func (m *awsAwsjson11_serializeOpPutAccountSetting) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson11_serializeOpListImages) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -1987,7 +2170,7 @@ func (m *awsAwsjson11_serializeOpPutAccountSetting) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*PutAccountSettingInput) + input, ok := in.Parameters.(*ListImagesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -2008,10 +2191,10 @@ func (m *awsAwsjson11_serializeOpPutAccountSetting) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutAccountSetting") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.ListImages") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutAccountSettingInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson11_serializeOpDocumentListImagesInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2029,14 +2212,14 @@ func (m *awsAwsjson11_serializeOpPutAccountSetting) HandleSerialize(ctx context. return next.HandleSerialize(ctx, in) } -type awsAwsjson11_serializeOpPutImage struct { +type awsAwsjson11_serializeOpListPullTimeUpdateExclusions struct { } -func (*awsAwsjson11_serializeOpPutImage) ID() string { +func (*awsAwsjson11_serializeOpListPullTimeUpdateExclusions) ID() string { return "OperationSerializer" } -func (m *awsAwsjson11_serializeOpPutImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson11_serializeOpListPullTimeUpdateExclusions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -2048,7 +2231,7 @@ func (m *awsAwsjson11_serializeOpPutImage) HandleSerialize(ctx context.Context, return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*PutImageInput) + input, ok := in.Parameters.(*ListPullTimeUpdateExclusionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -2069,10 +2252,10 @@ func (m *awsAwsjson11_serializeOpPutImage) HandleSerialize(ctx context.Context, return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutImage") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.ListPullTimeUpdateExclusions") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutImageInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson11_serializeOpDocumentListPullTimeUpdateExclusionsInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2090,14 +2273,14 @@ func (m *awsAwsjson11_serializeOpPutImage) HandleSerialize(ctx context.Context, return next.HandleSerialize(ctx, in) } -type awsAwsjson11_serializeOpPutImageScanningConfiguration struct { +type awsAwsjson11_serializeOpListTagsForResource struct { } -func (*awsAwsjson11_serializeOpPutImageScanningConfiguration) ID() string { +func (*awsAwsjson11_serializeOpListTagsForResource) ID() string { return "OperationSerializer" } -func (m *awsAwsjson11_serializeOpPutImageScanningConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson11_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -2109,7 +2292,7 @@ func (m *awsAwsjson11_serializeOpPutImageScanningConfiguration) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*PutImageScanningConfigurationInput) + input, ok := in.Parameters.(*ListTagsForResourceInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -2130,10 +2313,10 @@ func (m *awsAwsjson11_serializeOpPutImageScanningConfiguration) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutImageScanningConfiguration") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.ListTagsForResource") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutImageScanningConfigurationInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson11_serializeOpDocumentListTagsForResourceInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2151,14 +2334,14 @@ func (m *awsAwsjson11_serializeOpPutImageScanningConfiguration) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsAwsjson11_serializeOpPutImageTagMutability struct { +type awsAwsjson11_serializeOpPutAccountSetting struct { } -func (*awsAwsjson11_serializeOpPutImageTagMutability) ID() string { +func (*awsAwsjson11_serializeOpPutAccountSetting) ID() string { return "OperationSerializer" } -func (m *awsAwsjson11_serializeOpPutImageTagMutability) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson11_serializeOpPutAccountSetting) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -2170,7 +2353,7 @@ func (m *awsAwsjson11_serializeOpPutImageTagMutability) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*PutImageTagMutabilityInput) + input, ok := in.Parameters.(*PutAccountSettingInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -2191,10 +2374,10 @@ func (m *awsAwsjson11_serializeOpPutImageTagMutability) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutImageTagMutability") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutAccountSetting") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutImageTagMutabilityInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson11_serializeOpDocumentPutAccountSettingInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2212,14 +2395,14 @@ func (m *awsAwsjson11_serializeOpPutImageTagMutability) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsAwsjson11_serializeOpPutLifecyclePolicy struct { +type awsAwsjson11_serializeOpPutImage struct { } -func (*awsAwsjson11_serializeOpPutLifecyclePolicy) ID() string { +func (*awsAwsjson11_serializeOpPutImage) ID() string { return "OperationSerializer" } -func (m *awsAwsjson11_serializeOpPutLifecyclePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson11_serializeOpPutImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -2231,7 +2414,7 @@ func (m *awsAwsjson11_serializeOpPutLifecyclePolicy) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*PutLifecyclePolicyInput) + input, ok := in.Parameters.(*PutImageInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -2252,10 +2435,10 @@ func (m *awsAwsjson11_serializeOpPutLifecyclePolicy) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutLifecyclePolicy") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutImage") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutLifecyclePolicyInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson11_serializeOpDocumentPutImageInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2273,14 +2456,14 @@ func (m *awsAwsjson11_serializeOpPutLifecyclePolicy) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsAwsjson11_serializeOpPutRegistryPolicy struct { +type awsAwsjson11_serializeOpPutImageScanningConfiguration struct { } -func (*awsAwsjson11_serializeOpPutRegistryPolicy) ID() string { +func (*awsAwsjson11_serializeOpPutImageScanningConfiguration) ID() string { return "OperationSerializer" } -func (m *awsAwsjson11_serializeOpPutRegistryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson11_serializeOpPutImageScanningConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -2292,7 +2475,7 @@ func (m *awsAwsjson11_serializeOpPutRegistryPolicy) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*PutRegistryPolicyInput) + input, ok := in.Parameters.(*PutImageScanningConfigurationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -2313,10 +2496,10 @@ func (m *awsAwsjson11_serializeOpPutRegistryPolicy) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutRegistryPolicy") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutImageScanningConfiguration") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutRegistryPolicyInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson11_serializeOpDocumentPutImageScanningConfigurationInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2334,7 +2517,190 @@ func (m *awsAwsjson11_serializeOpPutRegistryPolicy) HandleSerialize(ctx context. return next.HandleSerialize(ctx, in) } -type awsAwsjson11_serializeOpPutRegistryScanningConfiguration struct { +type awsAwsjson11_serializeOpPutImageTagMutability struct { +} + +func (*awsAwsjson11_serializeOpPutImageTagMutability) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutImageTagMutability) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutImageTagMutabilityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutImageTagMutability") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutImageTagMutabilityInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutLifecyclePolicy struct { +} + +func (*awsAwsjson11_serializeOpPutLifecyclePolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutLifecyclePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutLifecyclePolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutLifecyclePolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutLifecyclePolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutRegistryPolicy struct { +} + +func (*awsAwsjson11_serializeOpPutRegistryPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutRegistryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutRegistryPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutRegistryPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutRegistryPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutRegistryScanningConfiguration struct { } func (*awsAwsjson11_serializeOpPutRegistryScanningConfiguration) ID() string { @@ -2456,6 +2822,128 @@ func (m *awsAwsjson11_serializeOpPutReplicationConfiguration) HandleSerialize(ct return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpPutSigningConfiguration struct { +} + +func (*awsAwsjson11_serializeOpPutSigningConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutSigningConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutSigningConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutSigningConfiguration") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutSigningConfigurationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpRegisterPullTimeUpdateExclusion struct { +} + +func (*awsAwsjson11_serializeOpRegisterPullTimeUpdateExclusion) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpRegisterPullTimeUpdateExclusion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RegisterPullTimeUpdateExclusionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.RegisterPullTimeUpdateExclusion") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentRegisterPullTimeUpdateExclusionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpSetRepositoryPolicy struct { } @@ -2761,6 +3249,67 @@ func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Cont return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpUpdateImageStorageClass struct { +} + +func (*awsAwsjson11_serializeOpUpdateImageStorageClass) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateImageStorageClass) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateImageStorageClassInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.UpdateImageStorageClass") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateImageStorageClassInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpUpdatePullThroughCacheRule struct { } @@ -3004,6 +3553,17 @@ func (m *awsAwsjson11_serializeOpValidatePullThroughCacheRule) HandleSerialize(c span.End() return next.HandleSerialize(ctx, in) } +func awsAwsjson11_serializeDocumentArtifactTypeList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsAwsjson11_serializeDocumentBatchedOperationLayerDigestList(v []string, value smithyjson.Value) error { array := value.Array() defer array.Close() @@ -3019,6 +3579,11 @@ func awsAwsjson11_serializeDocumentDescribeImagesFilter(v *types.DescribeImagesF object := value.Object() defer object.Close() + if len(v.ImageStatus) > 0 { + ok := object.Key("imageStatus") + ok.String(string(v.ImageStatus)) + } + if len(v.TagStatus) > 0 { ok := object.Key("tagStatus") ok.String(string(v.TagStatus)) @@ -3095,25 +3660,55 @@ func awsAwsjson11_serializeDocumentImageIdentifierList(v []types.ImageIdentifier for i := range v { av := array.Value() - if err := awsAwsjson11_serializeDocumentImageIdentifier(&v[i], av); err != nil { + if err := awsAwsjson11_serializeDocumentImageIdentifier(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentImageScanningConfiguration(v *types.ImageScanningConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ScanOnPush { + ok := object.Key("scanOnPush") + ok.Boolean(v.ScanOnPush) + } + + return nil +} + +func awsAwsjson11_serializeDocumentImageTagMutabilityExclusionFilter(v *types.ImageTagMutabilityExclusionFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filter != nil { + ok := object.Key("filter") + ok.String(*v.Filter) + } + + if len(v.FilterType) > 0 { + ok := object.Key("filterType") + ok.String(string(v.FilterType)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentImageTagMutabilityExclusionFilters(v []types.ImageTagMutabilityExclusionFilter, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentImageTagMutabilityExclusionFilter(&v[i], av); err != nil { return err } } return nil } -func awsAwsjson11_serializeDocumentImageScanningConfiguration(v *types.ImageScanningConfiguration, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ScanOnPush { - ok := object.Key("scanOnPush") - ok.Boolean(v.ScanOnPush) - } - - return nil -} - func awsAwsjson11_serializeDocumentLayerDigestList(v []string, value smithyjson.Value) error { array := value.Array() defer array.Close() @@ -3137,10 +3732,34 @@ func awsAwsjson11_serializeDocumentLifecyclePolicyPreviewFilter(v *types.Lifecyc return nil } +func awsAwsjson11_serializeDocumentListImageReferrersFilter(v *types.ListImageReferrersFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ArtifactStatus) > 0 { + ok := object.Key("artifactStatus") + ok.String(string(v.ArtifactStatus)) + } + + if v.ArtifactTypes != nil { + ok := object.Key("artifactTypes") + if err := awsAwsjson11_serializeDocumentArtifactTypeList(v.ArtifactTypes, ok); err != nil { + return err + } + } + + return nil +} + func awsAwsjson11_serializeDocumentListImagesFilter(v *types.ListImagesFilter, value smithyjson.Value) error { object := value.Object() defer object.Close() + if len(v.ImageStatus) > 0 { + ok := object.Key("imageStatus") + ok.String(string(v.ImageStatus)) + } + if len(v.TagStatus) > 0 { ok := object.Key("tagStatus") ok.String(string(v.TagStatus)) @@ -3385,6 +4004,94 @@ func awsAwsjson11_serializeDocumentScanningRepositoryFilterList(v []types.Scanni return nil } +func awsAwsjson11_serializeDocumentSigningConfiguration(v *types.SigningConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Rules != nil { + ok := object.Key("rules") + if err := awsAwsjson11_serializeDocumentSigningRuleList(v.Rules, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentSigningRepositoryFilter(v *types.SigningRepositoryFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filter != nil { + ok := object.Key("filter") + ok.String(*v.Filter) + } + + if len(v.FilterType) > 0 { + ok := object.Key("filterType") + ok.String(string(v.FilterType)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentSigningRepositoryFilterList(v []types.SigningRepositoryFilter, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentSigningRepositoryFilter(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentSigningRule(v *types.SigningRule, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RepositoryFilters != nil { + ok := object.Key("repositoryFilters") + if err := awsAwsjson11_serializeDocumentSigningRepositoryFilterList(v.RepositoryFilters, ok); err != nil { + return err + } + } + + if v.SigningProfileArn != nil { + ok := object.Key("signingProfileArn") + ok.String(*v.SigningProfileArn) + } + + return nil +} + +func awsAwsjson11_serializeDocumentSigningRuleList(v []types.SigningRule, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentSigningRule(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentSubjectIdentifier(v *types.SubjectIdentifier, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImageDigest != nil { + ok := object.Key("imageDigest") + ok.String(*v.ImageDigest) + } + + return nil +} + func awsAwsjson11_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3623,6 +4330,13 @@ func awsAwsjson11_serializeOpDocumentCreateRepositoryCreationTemplateInput(v *Cr ok.String(string(v.ImageTagMutability)) } + if v.ImageTagMutabilityExclusionFilters != nil { + ok := object.Key("imageTagMutabilityExclusionFilters") + if err := awsAwsjson11_serializeDocumentImageTagMutabilityExclusionFilters(v.ImageTagMutabilityExclusionFilters, ok); err != nil { + return err + } + } + if v.LifecyclePolicy != nil { ok := object.Key("lifecyclePolicy") ok.String(*v.LifecyclePolicy) @@ -3671,6 +4385,13 @@ func awsAwsjson11_serializeOpDocumentCreateRepositoryInput(v *CreateRepositoryIn ok.String(string(v.ImageTagMutability)) } + if v.ImageTagMutabilityExclusionFilters != nil { + ok := object.Key("imageTagMutabilityExclusionFilters") + if err := awsAwsjson11_serializeDocumentImageTagMutabilityExclusionFilters(v.ImageTagMutabilityExclusionFilters, ok); err != nil { + return err + } + } + if v.RegistryId != nil { ok := object.Key("registryId") ok.String(*v.RegistryId) @@ -3783,6 +4504,25 @@ func awsAwsjson11_serializeOpDocumentDeleteRepositoryPolicyInput(v *DeleteReposi return nil } +func awsAwsjson11_serializeOpDocumentDeleteSigningConfigurationInput(v *DeleteSigningConfigurationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeregisterPullTimeUpdateExclusionInput(v *DeregisterPullTimeUpdateExclusionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.PrincipalArn != nil { + ok := object.Key("principalArn") + ok.String(*v.PrincipalArn) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentDescribeImageReplicationStatusInput(v *DescribeImageReplicationStatusInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3841,6 +4581,30 @@ func awsAwsjson11_serializeOpDocumentDescribeImageScanFindingsInput(v *DescribeI return nil } +func awsAwsjson11_serializeOpDocumentDescribeImageSigningStatusInput(v *DescribeImageSigningStatusInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImageId != nil { + ok := object.Key("imageId") + if err := awsAwsjson11_serializeDocumentImageIdentifier(v.ImageId, ok); err != nil { + return err + } + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentDescribeImagesInput(v *DescribeImagesInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -4108,6 +4872,13 @@ func awsAwsjson11_serializeOpDocumentGetRepositoryPolicyInput(v *GetRepositoryPo return nil } +func awsAwsjson11_serializeOpDocumentGetSigningConfigurationInput(v *GetSigningConfigurationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + return nil +} + func awsAwsjson11_serializeOpDocumentInitiateLayerUploadInput(v *InitiateLayerUploadInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -4125,6 +4896,47 @@ func awsAwsjson11_serializeOpDocumentInitiateLayerUploadInput(v *InitiateLayerUp return nil } +func awsAwsjson11_serializeOpDocumentListImageReferrersInput(v *ListImageReferrersInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filter != nil { + ok := object.Key("filter") + if err := awsAwsjson11_serializeDocumentListImageReferrersFilter(v.Filter, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + if v.SubjectId != nil { + ok := object.Key("subjectId") + if err := awsAwsjson11_serializeDocumentSubjectIdentifier(v.SubjectId, ok); err != nil { + return err + } + } + + return nil +} + func awsAwsjson11_serializeOpDocumentListImagesInput(v *ListImagesInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -4159,6 +4971,23 @@ func awsAwsjson11_serializeOpDocumentListImagesInput(v *ListImagesInput, value s return nil } +func awsAwsjson11_serializeOpDocumentListPullTimeUpdateExclusionsInput(v *ListPullTimeUpdateExclusionsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentListTagsForResourceInput(v *ListTagsForResourceInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -4258,6 +5087,13 @@ func awsAwsjson11_serializeOpDocumentPutImageTagMutabilityInput(v *PutImageTagMu ok.String(string(v.ImageTagMutability)) } + if v.ImageTagMutabilityExclusionFilters != nil { + ok := object.Key("imageTagMutabilityExclusionFilters") + if err := awsAwsjson11_serializeDocumentImageTagMutabilityExclusionFilters(v.ImageTagMutabilityExclusionFilters, ok); err != nil { + return err + } + } + if v.RegistryId != nil { ok := object.Key("registryId") ok.String(*v.RegistryId) @@ -4338,6 +5174,32 @@ func awsAwsjson11_serializeOpDocumentPutReplicationConfigurationInput(v *PutRepl return nil } +func awsAwsjson11_serializeOpDocumentPutSigningConfigurationInput(v *PutSigningConfigurationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.SigningConfiguration != nil { + ok := object.Key("signingConfiguration") + if err := awsAwsjson11_serializeDocumentSigningConfiguration(v.SigningConfiguration, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentRegisterPullTimeUpdateExclusionInput(v *RegisterPullTimeUpdateExclusionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.PrincipalArn != nil { + ok := object.Key("principalArn") + ok.String(*v.PrincipalArn) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentSetRepositoryPolicyInput(v *SetRepositoryPolicyInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -4449,6 +5311,35 @@ func awsAwsjson11_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, v return nil } +func awsAwsjson11_serializeOpDocumentUpdateImageStorageClassInput(v *UpdateImageStorageClassInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImageId != nil { + ok := object.Key("imageId") + if err := awsAwsjson11_serializeDocumentImageIdentifier(v.ImageId, ok); err != nil { + return err + } + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + if len(v.TargetStorageClass) > 0 { + ok := object.Key("targetStorageClass") + ok.String(string(v.TargetStorageClass)) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentUpdatePullThroughCacheRuleInput(v *UpdatePullThroughCacheRuleInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -4509,6 +5400,13 @@ func awsAwsjson11_serializeOpDocumentUpdateRepositoryCreationTemplateInput(v *Up ok.String(string(v.ImageTagMutability)) } + if v.ImageTagMutabilityExclusionFilters != nil { + ok := object.Key("imageTagMutabilityExclusionFilters") + if err := awsAwsjson11_serializeDocumentImageTagMutabilityExclusionFilters(v.ImageTagMutabilityExclusionFilters, ok); err != nil { + return err + } + } + if v.LifecyclePolicy != nil { ok := object.Key("lifecyclePolicy") ok.String(*v.LifecyclePolicy) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go index 5ecc1326dc..4bcea9e809 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go @@ -2,6 +2,50 @@ package types +type ArtifactStatus string + +// Enum values for ArtifactStatus +const ( + ArtifactStatusActive ArtifactStatus = "ACTIVE" + ArtifactStatusArchived ArtifactStatus = "ARCHIVED" + ArtifactStatusActivating ArtifactStatus = "ACTIVATING" +) + +// Values returns all known values for ArtifactStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ArtifactStatus) Values() []ArtifactStatus { + return []ArtifactStatus{ + "ACTIVE", + "ARCHIVED", + "ACTIVATING", + } +} + +type ArtifactStatusFilter string + +// Enum values for ArtifactStatusFilter +const ( + ArtifactStatusFilterActive ArtifactStatusFilter = "ACTIVE" + ArtifactStatusFilterArchived ArtifactStatusFilter = "ARCHIVED" + ArtifactStatusFilterActivating ArtifactStatusFilter = "ACTIVATING" + ArtifactStatusFilterAny ArtifactStatusFilter = "ANY" +) + +// Values returns all known values for ArtifactStatusFilter. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ArtifactStatusFilter) Values() []ArtifactStatusFilter { + return []ArtifactStatusFilter{ + "ACTIVE", + "ARCHIVED", + "ACTIVATING", + "ANY", + } +} + type EncryptionType string // Enum values for EncryptionType @@ -54,7 +98,8 @@ type ImageActionType string // Enum values for ImageActionType const ( - ImageActionTypeExpire ImageActionType = "EXPIRE" + ImageActionTypeExpire ImageActionType = "EXPIRE" + ImageActionTypeTransition ImageActionType = "TRANSITION" ) // Values returns all known values for ImageActionType. Note that this can be @@ -64,6 +109,7 @@ const ( func (ImageActionType) Values() []ImageActionType { return []ImageActionType{ "EXPIRE", + "TRANSITION", } } @@ -81,6 +127,7 @@ const ( ImageFailureCodeUpstreamAccessDenied ImageFailureCode = "UpstreamAccessDenied" ImageFailureCodeUpstreamTooManyRequests ImageFailureCode = "UpstreamTooManyRequests" ImageFailureCodeUpstreamUnavailable ImageFailureCode = "UpstreamUnavailable" + ImageFailureCodeImageInaccessible ImageFailureCode = "ImageInaccessible" ) // Values returns all known values for ImageFailureCode. Note that this can be @@ -99,6 +146,51 @@ func (ImageFailureCode) Values() []ImageFailureCode { "UpstreamAccessDenied", "UpstreamTooManyRequests", "UpstreamUnavailable", + "ImageInaccessible", + } +} + +type ImageStatus string + +// Enum values for ImageStatus +const ( + ImageStatusActive ImageStatus = "ACTIVE" + ImageStatusArchived ImageStatus = "ARCHIVED" + ImageStatusActivating ImageStatus = "ACTIVATING" +) + +// Values returns all known values for ImageStatus. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ImageStatus) Values() []ImageStatus { + return []ImageStatus{ + "ACTIVE", + "ARCHIVED", + "ACTIVATING", + } +} + +type ImageStatusFilter string + +// Enum values for ImageStatusFilter +const ( + ImageStatusFilterActive ImageStatusFilter = "ACTIVE" + ImageStatusFilterArchived ImageStatusFilter = "ARCHIVED" + ImageStatusFilterActivating ImageStatusFilter = "ACTIVATING" + ImageStatusFilterAny ImageStatusFilter = "ANY" +) + +// Values returns all known values for ImageStatusFilter. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ImageStatusFilter) Values() []ImageStatusFilter { + return []ImageStatusFilter{ + "ACTIVE", + "ARCHIVED", + "ACTIVATING", + "ANY", } } @@ -106,8 +198,10 @@ type ImageTagMutability string // Enum values for ImageTagMutability const ( - ImageTagMutabilityMutable ImageTagMutability = "MUTABLE" - ImageTagMutabilityImmutable ImageTagMutability = "IMMUTABLE" + ImageTagMutabilityMutable ImageTagMutability = "MUTABLE" + ImageTagMutabilityImmutable ImageTagMutability = "IMMUTABLE" + ImageTagMutabilityImmutableWithExclusion ImageTagMutability = "IMMUTABLE_WITH_EXCLUSION" + ImageTagMutabilityMutableWithExclusion ImageTagMutability = "MUTABLE_WITH_EXCLUSION" ) // Values returns all known values for ImageTagMutability. Note that this can be @@ -118,6 +212,26 @@ func (ImageTagMutability) Values() []ImageTagMutability { return []ImageTagMutability{ "MUTABLE", "IMMUTABLE", + "IMMUTABLE_WITH_EXCLUSION", + "MUTABLE_WITH_EXCLUSION", + } +} + +type ImageTagMutabilityExclusionFilterType string + +// Enum values for ImageTagMutabilityExclusionFilterType +const ( + ImageTagMutabilityExclusionFilterTypeWildcard ImageTagMutabilityExclusionFilterType = "WILDCARD" +) + +// Values returns all known values for ImageTagMutabilityExclusionFilterType. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ImageTagMutabilityExclusionFilterType) Values() []ImageTagMutabilityExclusionFilterType { + return []ImageTagMutabilityExclusionFilterType{ + "WILDCARD", } } @@ -127,6 +241,7 @@ type LayerAvailability string const ( LayerAvailabilityAvailable LayerAvailability = "AVAILABLE" LayerAvailabilityUnavailable LayerAvailability = "UNAVAILABLE" + LayerAvailabilityArchived LayerAvailability = "ARCHIVED" ) // Values returns all known values for LayerAvailability. Note that this can be @@ -137,6 +252,7 @@ func (LayerAvailability) Values() []LayerAvailability { return []LayerAvailability{ "AVAILABLE", "UNAVAILABLE", + "ARCHIVED", } } @@ -183,12 +299,50 @@ func (LifecyclePolicyPreviewStatus) Values() []LifecyclePolicyPreviewStatus { } } +type LifecyclePolicyStorageClass string + +// Enum values for LifecyclePolicyStorageClass +const ( + LifecyclePolicyStorageClassArchive LifecyclePolicyStorageClass = "ARCHIVE" + LifecyclePolicyStorageClassStandard LifecyclePolicyStorageClass = "STANDARD" +) + +// Values returns all known values for LifecyclePolicyStorageClass. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (LifecyclePolicyStorageClass) Values() []LifecyclePolicyStorageClass { + return []LifecyclePolicyStorageClass{ + "ARCHIVE", + "STANDARD", + } +} + +type LifecyclePolicyTargetStorageClass string + +// Enum values for LifecyclePolicyTargetStorageClass +const ( + LifecyclePolicyTargetStorageClassArchive LifecyclePolicyTargetStorageClass = "ARCHIVE" +) + +// Values returns all known values for LifecyclePolicyTargetStorageClass. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (LifecyclePolicyTargetStorageClass) Values() []LifecyclePolicyTargetStorageClass { + return []LifecyclePolicyTargetStorageClass{ + "ARCHIVE", + } +} + type RCTAppliedFor string // Enum values for RCTAppliedFor const ( RCTAppliedForReplication RCTAppliedFor = "REPLICATION" RCTAppliedForPullThroughCache RCTAppliedFor = "PULL_THROUGH_CACHE" + RCTAppliedForCreateOnPush RCTAppliedFor = "CREATE_ON_PUSH" ) // Values returns all known values for RCTAppliedFor. Note that this can be @@ -199,6 +353,7 @@ func (RCTAppliedFor) Values() []RCTAppliedFor { return []RCTAppliedFor{ "REPLICATION", "PULL_THROUGH_CACHE", + "CREATE_ON_PUSH", } } @@ -310,6 +465,7 @@ const ( ScanStatusScanEligibilityExpired ScanStatus = "SCAN_ELIGIBILITY_EXPIRED" ScanStatusFindingsUnavailable ScanStatus = "FINDINGS_UNAVAILABLE" ScanStatusLimitExceeded ScanStatus = "LIMIT_EXCEEDED" + ScanStatusImageArchived ScanStatus = "IMAGE_ARCHIVED" ) // Values returns all known values for ScanStatus. Note that this can be expanded @@ -327,6 +483,7 @@ func (ScanStatus) Values() []ScanStatus { "SCAN_ELIGIBILITY_EXPIRED", "FINDINGS_UNAVAILABLE", "LIMIT_EXCEEDED", + "IMAGE_ARCHIVED", } } @@ -349,6 +506,44 @@ func (ScanType) Values() []ScanType { } } +type SigningRepositoryFilterType string + +// Enum values for SigningRepositoryFilterType +const ( + SigningRepositoryFilterTypeWildcardMatch SigningRepositoryFilterType = "WILDCARD_MATCH" +) + +// Values returns all known values for SigningRepositoryFilterType. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (SigningRepositoryFilterType) Values() []SigningRepositoryFilterType { + return []SigningRepositoryFilterType{ + "WILDCARD_MATCH", + } +} + +type SigningStatus string + +// Enum values for SigningStatus +const ( + SigningStatusInProgress SigningStatus = "IN_PROGRESS" + SigningStatusComplete SigningStatus = "COMPLETE" + SigningStatusFailed SigningStatus = "FAILED" +) + +// Values returns all known values for SigningStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (SigningStatus) Values() []SigningStatus { + return []SigningStatus{ + "IN_PROGRESS", + "COMPLETE", + "FAILED", + } +} + type TagStatus string // Enum values for TagStatus @@ -370,6 +565,25 @@ func (TagStatus) Values() []TagStatus { } } +type TargetStorageClass string + +// Enum values for TargetStorageClass +const ( + TargetStorageClassStandard TargetStorageClass = "STANDARD" + TargetStorageClassArchive TargetStorageClass = "ARCHIVE" +) + +// Values returns all known values for TargetStorageClass. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TargetStorageClass) Values() []TargetStorageClass { + return []TargetStorageClass{ + "STANDARD", + "ARCHIVE", + } +} + type UpstreamRegistry string // Enum values for UpstreamRegistry @@ -382,6 +596,7 @@ const ( UpstreamRegistryGitHubContainerRegistry UpstreamRegistry = "github-container-registry" UpstreamRegistryAzureContainerRegistry UpstreamRegistry = "azure-container-registry" UpstreamRegistryGitLabContainerRegistry UpstreamRegistry = "gitlab-container-registry" + UpstreamRegistryChainguard UpstreamRegistry = "chainguard" ) // Values returns all known values for UpstreamRegistry. Note that this can be @@ -398,5 +613,6 @@ func (UpstreamRegistry) Values() []UpstreamRegistry { "github-container-registry", "azure-container-registry", "gitlab-container-registry", + "chainguard", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go index 9e1f90cf5f..18ff365843 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go @@ -7,6 +7,35 @@ import ( smithy "github.com/aws/smithy-go" ) +// The operation did not succeed because the account is managed by a organization +// policy. +type BlockedByOrganizationPolicyException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *BlockedByOrganizationPolicyException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *BlockedByOrganizationPolicyException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *BlockedByOrganizationPolicyException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "BlockedByOrganizationPolicyException" + } + return *e.ErrorCodeOverride +} +func (e *BlockedByOrganizationPolicyException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + // The specified layer upload does not contain any layer parts. type EmptyUploadException struct { Message *string @@ -33,6 +62,58 @@ func (e *EmptyUploadException) ErrorCode() string { } func (e *EmptyUploadException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// The specified pull time update exclusion already exists for the registry. +type ExclusionAlreadyExistsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ExclusionAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExclusionAlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExclusionAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ExclusionAlreadyExistsException" + } + return *e.ErrorCodeOverride +} +func (e *ExclusionAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified pull time update exclusion was not found. +type ExclusionNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ExclusionNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExclusionNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExclusionNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ExclusionNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ExclusionNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // The specified image has already been pushed, and there were no changes to the // manifest or image tag after the last push. type ImageAlreadyExistsException struct { @@ -60,6 +141,32 @@ func (e *ImageAlreadyExistsException) ErrorCode() string { } func (e *ImageAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// The specified image is archived and cannot be scanned. +type ImageArchivedException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ImageArchivedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ImageArchivedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ImageArchivedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ImageArchivedException" + } + return *e.ErrorCodeOverride +} +func (e *ImageArchivedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // The specified image digest does not match the digest that Amazon ECR calculated // for the image. type ImageDigestDoesNotMatchException struct { @@ -113,6 +220,34 @@ func (e *ImageNotFoundException) ErrorCode() string { } func (e *ImageNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// The requested image storage class update is not supported. +type ImageStorageClassUpdateNotSupportedException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ImageStorageClassUpdateNotSupportedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ImageStorageClassUpdateNotSupportedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ImageStorageClassUpdateNotSupportedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ImageStorageClassUpdateNotSupportedException" + } + return *e.ErrorCodeOverride +} +func (e *ImageStorageClassUpdateNotSupportedException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + // The specified image is tagged with a tag that already exists. The repository is // configured for tag immutability. type ImageTagAlreadyExistsException struct { @@ -799,6 +934,35 @@ func (e *ServerException) ErrorCode() string { } func (e *ServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } +// The specified signing configuration was not found. This occurs when attempting +// to retrieve or delete a signing configuration that does not exist. +type SigningConfigurationNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *SigningConfigurationNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SigningConfigurationNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SigningConfigurationNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "SigningConfigurationNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *SigningConfigurationNotFoundException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + // The repository creation template already exists. Specify a unique prefix and // try again. type TemplateAlreadyExistsException struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go index 7040606617..c6beefc3dc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go @@ -133,6 +133,10 @@ type CvssScoreDetails struct { // An object representing a filter on a DescribeImages operation. type DescribeImagesFilter struct { + // The image status with which to filter your DescribeImages results. Valid values are ACTIVE , + // ARCHIVED , and ACTIVATING . + ImageStatus ImageStatusFilter + // The tag status with which to filter your DescribeImages results. You can filter results based // on whether they are TAGGED or UNTAGGED . TagStatus TagStatus @@ -338,9 +342,20 @@ type ImageDetail struct { // larger image than the image shown in the Amazon Web Services Management Console. ImageSizeInBytes *int64 + // The current status of the image. + ImageStatus ImageStatus + // The list of tags associated with this image. ImageTags []string + // The date and time, expressed in standard JavaScript date format, when the image + // was last restored from Amazon ECR archive to Amazon ECR standard. + LastActivatedAt *time.Time + + // The date and time, expressed in standard JavaScript date format, when the image + // was last transitioned to Amazon ECR archive. + LastArchivedAt *time.Time + // The date and time, expressed in standard JavaScript date format, when Amazon // ECR recorded the last image pull. // @@ -359,6 +374,9 @@ type ImageDetail struct { // The name of the repository to which this image belongs. RepositoryName *string + // The digest of the subject manifest for images that are referrers. + SubjectManifestDigest *string + noSmithyDocumentSerde } @@ -389,6 +407,36 @@ type ImageIdentifier struct { noSmithyDocumentSerde } +// An object representing an artifact associated with a subject image. +type ImageReferrer struct { + + // The digest of the artifact manifest. + // + // This member is required. + Digest *string + + // The media type of the artifact manifest. + // + // This member is required. + MediaType *string + + // The size, in bytes, of the artifact. + // + // This member is required. + Size *int64 + + // A map of annotations associated with the artifact. + Annotations map[string]string + + // The status of the artifact. Valid values are ACTIVE , ARCHIVED , or ACTIVATING . + ArtifactStatus ArtifactStatus + + // A string identifying the type of artifact. + ArtifactType *string + + noSmithyDocumentSerde +} + // The status of the replication process for an image. type ImageReplicationStatus struct { @@ -491,6 +539,50 @@ type ImageScanStatus struct { noSmithyDocumentSerde } +// The signing status for an image. Each status corresponds to a signing profile. +type ImageSigningStatus struct { + + // The failure code, which is only present if status is FAILED . + FailureCode *string + + // A description of why signing the image failed. This field is only present if + // status is FAILED . + FailureReason *string + + // The ARN of the Amazon Web Services Signer signing profile used to sign the + // image. + SigningProfileArn *string + + // The image's signing status. Possible values are: + // + // - IN_PROGRESS - Signing is currently in progress. + // + // - COMPLETE - The signature was successfully generated. + // + // - FAILED - Signing failed. See failureCode and failureReason for details. + Status SigningStatus + + noSmithyDocumentSerde +} + +// A filter that specifies which image tags should be excluded from the +// repository's image tag mutability setting. +type ImageTagMutabilityExclusionFilter struct { + + // The filter value used to match image tags for exclusion from mutability + // settings. + // + // This member is required. + Filter *string + + // The type of filter to apply for excluding image tags from mutability settings. + // + // This member is required. + FilterType ImageTagMutabilityExclusionFilterType + + noSmithyDocumentSerde +} + // An object representing an Amazon ECR image layer. type Layer struct { @@ -554,6 +646,9 @@ type LifecyclePolicyPreviewResult struct { // The list of tags associated with this image. ImageTags []string + // The storage class of the image. + StorageClass LifecyclePolicyStorageClass + noSmithyDocumentSerde } @@ -563,23 +658,48 @@ type LifecyclePolicyPreviewSummary struct { // The number of expiring images. ExpiringImageTotalCount *int32 + // The total count of images that will be transitioned to each storage class. This + // field is only present if at least one image will be transitoned in the summary. + TransitioningImageTotalCounts []TransitioningImageTotalCount + noSmithyDocumentSerde } // The type of action to be taken. type LifecyclePolicyRuleAction struct { + // The target storage class for the action. This is only present when the type is + // TRANSITION. + TargetStorageClass LifecyclePolicyTargetStorageClass + // The type of action to be taken. Type ImageActionType noSmithyDocumentSerde } +// An object representing a filter on a ListImageReferrers operation. +type ListImageReferrersFilter struct { + + // The artifact status with which to filter your ListImageReferrers results. Valid values are ACTIVE + // , ARCHIVED , ACTIVATING , or ANY . If not specified, only artifacts with ACTIVE + // status are returned. + ArtifactStatus ArtifactStatusFilter + + // The artifact types with which to filter your ListImageReferrers results. + ArtifactTypes []string + + noSmithyDocumentSerde +} + // An object representing a filter on a ListImages operation. type ListImagesFilter struct { - // The tag status with which to filter your ListImages results. You can filter results based - // on whether they are TAGGED or UNTAGGED . + // The image status with which to filter your ListImages results. Valid values are ACTIVE , + // ARCHIVED , and ACTIVATING . + ImageStatus ImageStatusFilter + + // The tag status with which to filter your ListImages results. TagStatus TagStatus noSmithyDocumentSerde @@ -777,6 +897,10 @@ type Repository struct { // The tag mutability setting for the repository. ImageTagMutability ImageTagMutability + // A list of filters that specify which image tags are excluded from the + // repository's image tag mutability setting. + ImageTagMutabilityExclusionFilters []ImageTagMutabilityExclusionFilter + // The Amazon Web Services account ID associated with the registry that contains // the repository. RegistryId *string @@ -802,8 +926,8 @@ type Repository struct { type RepositoryCreationTemplate struct { // A list of enumerable Strings representing the repository creation scenarios - // that this template will apply towards. The two supported scenarios are - // PULL_THROUGH_CACHE and REPLICATION + // that this template will apply towards. The supported scenarios are + // PULL_THROUGH_CACHE, REPLICATION, and CREATE_ON_PUSH AppliedFor []RCTAppliedFor // The date and time, in JavaScript date format, when the repository creation @@ -828,6 +952,10 @@ type RepositoryCreationTemplate struct { // will be immutable which will prevent them from being overwritten. ImageTagMutability ImageTagMutability + // A list of filters that specify which image tags are excluded from the + // repository creation template's image tag mutability setting. + ImageTagMutabilityExclusionFilters []ImageTagMutabilityExclusionFilter + // The lifecycle policy to use for repositories created using the template. LifecyclePolicy *string @@ -968,6 +1096,82 @@ type ScoreDetails struct { noSmithyDocumentSerde } +// The signing configuration for a registry, which specifies rules for +// automatically signing images when pushed. +type SigningConfiguration struct { + + // A list of signing rules. Each rule defines a signing profile and optional + // repository filters that determine which images are automatically signed. Maximum + // of 10 rules. + // + // This member is required. + Rules []SigningRule + + noSmithyDocumentSerde +} + +// A repository filter used to determine which repositories have their images +// automatically signed on push. Each filter consists of a filter type and filter +// value. +type SigningRepositoryFilter struct { + + // The filter value used to match repository names. When using WILDCARD_MATCH , the + // * character matches any sequence of characters. + // + // Examples: + // + // - myapp/* - Matches all repositories starting with myapp/ + // + // - */production - Matches all repositories ending with /production + // + // - *prod* - Matches all repositories containing prod + // + // This member is required. + Filter *string + + // The type of filter to apply. Currently, only WILDCARD_MATCH is supported, which + // uses wildcard patterns to match repository names. + // + // This member is required. + FilterType SigningRepositoryFilterType + + noSmithyDocumentSerde +} + +// A signing rule that specifies a signing profile and optional repository +// filters. When an image is pushed to a matching repository, a signing job is +// created using the specified profile. +type SigningRule struct { + + // The ARN of the Amazon Web Services Signer signing profile to use for signing + // images that match this rule. For more information about signing profiles, see [Signing profiles] + // in the Amazon Web Services Signer Developer Guide. + // + // [Signing profiles]: https://docs.aws.amazon.com/signer/latest/developerguide/signing-profiles.html + // + // This member is required. + SigningProfileArn *string + + // A list of repository filters that determine which repositories have their + // images signed on push. If no filters are specified, all images pushed to the + // registry are signed using the rule's signing profile. Maximum of 100 filters per + // rule. + RepositoryFilters []SigningRepositoryFilter + + noSmithyDocumentSerde +} + +// An object that identifies an image subject. +type SubjectIdentifier struct { + + // The digest of the image. + // + // This member is required. + ImageDigest *string + + noSmithyDocumentSerde +} + // The metadata to apply to a resource to help you categorize and organize them. // Each tag consists of a key and a value, both of which you define. Tag keys can // have a maximum character length of 128 characters, and tag values can have a @@ -988,6 +1192,18 @@ type Tag struct { noSmithyDocumentSerde } +// The total count of images transitioning to a storage class. +type TransitioningImageTotalCount struct { + + // The total number of images transitioning to the storage class. + ImageTotalCount *int32 + + // The target storage class. + TargetStorageClass LifecyclePolicyTargetStorageClass + + noSmithyDocumentSerde +} + // Information on the vulnerable package identified by a finding. type VulnerablePackage struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/validators.go index 52b1104c13..9d427fcc35 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/validators.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/validators.go @@ -270,6 +270,26 @@ func (m *validateOpDeleteRepositoryPolicy) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } +type validateOpDeregisterPullTimeUpdateExclusion struct { +} + +func (*validateOpDeregisterPullTimeUpdateExclusion) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeregisterPullTimeUpdateExclusion) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeregisterPullTimeUpdateExclusionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeregisterPullTimeUpdateExclusionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDescribeImageReplicationStatus struct { } @@ -310,6 +330,26 @@ func (m *validateOpDescribeImageScanFindings) HandleInitialize(ctx context.Conte return next.HandleInitialize(ctx, in) } +type validateOpDescribeImageSigningStatus struct { +} + +func (*validateOpDescribeImageSigningStatus) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeImageSigningStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeImageSigningStatusInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeImageSigningStatusInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDescribeImages struct { } @@ -450,6 +490,26 @@ func (m *validateOpInitiateLayerUpload) HandleInitialize(ctx context.Context, in return next.HandleInitialize(ctx, in) } +type validateOpListImageReferrers struct { +} + +func (*validateOpListImageReferrers) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListImageReferrers) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListImageReferrersInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListImageReferrersInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpListImages struct { } @@ -650,6 +710,46 @@ func (m *validateOpPutReplicationConfiguration) HandleInitialize(ctx context.Con return next.HandleInitialize(ctx, in) } +type validateOpPutSigningConfiguration struct { +} + +func (*validateOpPutSigningConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutSigningConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutSigningConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutSigningConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRegisterPullTimeUpdateExclusion struct { +} + +func (*validateOpRegisterPullTimeUpdateExclusion) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRegisterPullTimeUpdateExclusion) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RegisterPullTimeUpdateExclusionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRegisterPullTimeUpdateExclusionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpSetRepositoryPolicy struct { } @@ -750,6 +850,26 @@ func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middl return next.HandleInitialize(ctx, in) } +type validateOpUpdateImageStorageClass struct { +} + +func (*validateOpUpdateImageStorageClass) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateImageStorageClass) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateImageStorageClassInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateImageStorageClassInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpUpdatePullThroughCacheRule struct { } @@ -882,6 +1002,10 @@ func addOpDeleteRepositoryPolicyValidationMiddleware(stack *middleware.Stack) er return stack.Initialize.Add(&validateOpDeleteRepositoryPolicy{}, middleware.After) } +func addOpDeregisterPullTimeUpdateExclusionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeregisterPullTimeUpdateExclusion{}, middleware.After) +} + func addOpDescribeImageReplicationStatusValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDescribeImageReplicationStatus{}, middleware.After) } @@ -890,6 +1014,10 @@ func addOpDescribeImageScanFindingsValidationMiddleware(stack *middleware.Stack) return stack.Initialize.Add(&validateOpDescribeImageScanFindings{}, middleware.After) } +func addOpDescribeImageSigningStatusValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeImageSigningStatus{}, middleware.After) +} + func addOpDescribeImagesValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDescribeImages{}, middleware.After) } @@ -918,6 +1046,10 @@ func addOpInitiateLayerUploadValidationMiddleware(stack *middleware.Stack) error return stack.Initialize.Add(&validateOpInitiateLayerUpload{}, middleware.After) } +func addOpListImageReferrersValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListImageReferrers{}, middleware.After) +} + func addOpListImagesValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpListImages{}, middleware.After) } @@ -958,6 +1090,14 @@ func addOpPutReplicationConfigurationValidationMiddleware(stack *middleware.Stac return stack.Initialize.Add(&validateOpPutReplicationConfiguration{}, middleware.After) } +func addOpPutSigningConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutSigningConfiguration{}, middleware.After) +} + +func addOpRegisterPullTimeUpdateExclusionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRegisterPullTimeUpdateExclusion{}, middleware.After) +} + func addOpSetRepositoryPolicyValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpSetRepositoryPolicy{}, middleware.After) } @@ -978,6 +1118,10 @@ func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) } +func addOpUpdateImageStorageClassValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateImageStorageClass{}, middleware.After) +} + func addOpUpdatePullThroughCacheRuleValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpUpdatePullThroughCacheRule{}, middleware.After) } @@ -1024,6 +1168,41 @@ func validateEncryptionConfigurationForRepositoryCreationTemplate(v *types.Encry } } +func validateImageTagMutabilityExclusionFilter(v *types.ImageTagMutabilityExclusionFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ImageTagMutabilityExclusionFilter"} + if len(v.FilterType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("FilterType")) + } + if v.Filter == nil { + invalidParams.Add(smithy.NewErrParamRequired("Filter")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateImageTagMutabilityExclusionFilters(v []types.ImageTagMutabilityExclusionFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ImageTagMutabilityExclusionFilters"} + for i := range v { + if err := validateImageTagMutabilityExclusionFilter(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateRegistryScanningRule(v *types.RegistryScanningRule) error { if v == nil { return nil @@ -1228,6 +1407,112 @@ func validateScanningRepositoryFilterList(v []types.ScanningRepositoryFilter) er } } +func validateSigningConfiguration(v *types.SigningConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SigningConfiguration"} + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateSigningRuleList(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSigningRepositoryFilter(v *types.SigningRepositoryFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SigningRepositoryFilter"} + if v.Filter == nil { + invalidParams.Add(smithy.NewErrParamRequired("Filter")) + } + if len(v.FilterType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("FilterType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSigningRepositoryFilterList(v []types.SigningRepositoryFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SigningRepositoryFilterList"} + for i := range v { + if err := validateSigningRepositoryFilter(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSigningRule(v *types.SigningRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SigningRule"} + if v.SigningProfileArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("SigningProfileArn")) + } + if v.RepositoryFilters != nil { + if err := validateSigningRepositoryFilterList(v.RepositoryFilters); err != nil { + invalidParams.AddNested("RepositoryFilters", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSigningRuleList(v []types.SigningRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SigningRuleList"} + for i := range v { + if err := validateSigningRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSubjectIdentifier(v *types.SubjectIdentifier) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SubjectIdentifier"} + if v.ImageDigest == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImageDigest")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateTag(v *types.Tag) error { if v == nil { return nil @@ -1389,6 +1674,11 @@ func validateOpCreateRepositoryCreationTemplateInput(v *CreateRepositoryCreation invalidParams.AddNested("ResourceTags", err.(smithy.InvalidParamsError)) } } + if v.ImageTagMutabilityExclusionFilters != nil { + if err := validateImageTagMutabilityExclusionFilters(v.ImageTagMutabilityExclusionFilters); err != nil { + invalidParams.AddNested("ImageTagMutabilityExclusionFilters", err.(smithy.InvalidParamsError)) + } + } if v.AppliedFor == nil { invalidParams.Add(smithy.NewErrParamRequired("AppliedFor")) } @@ -1412,6 +1702,11 @@ func validateOpCreateRepositoryInput(v *CreateRepositoryInput) error { invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) } } + if v.ImageTagMutabilityExclusionFilters != nil { + if err := validateImageTagMutabilityExclusionFilters(v.ImageTagMutabilityExclusionFilters); err != nil { + invalidParams.AddNested("ImageTagMutabilityExclusionFilters", err.(smithy.InvalidParamsError)) + } + } if v.EncryptionConfiguration != nil { if err := validateEncryptionConfiguration(v.EncryptionConfiguration); err != nil { invalidParams.AddNested("EncryptionConfiguration", err.(smithy.InvalidParamsError)) @@ -1499,6 +1794,21 @@ func validateOpDeleteRepositoryPolicyInput(v *DeleteRepositoryPolicyInput) error } } +func validateOpDeregisterPullTimeUpdateExclusionInput(v *DeregisterPullTimeUpdateExclusionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeregisterPullTimeUpdateExclusionInput"} + if v.PrincipalArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("PrincipalArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDescribeImageReplicationStatusInput(v *DescribeImageReplicationStatusInput) error { if v == nil { return nil @@ -1535,6 +1845,24 @@ func validateOpDescribeImageScanFindingsInput(v *DescribeImageScanFindingsInput) } } +func validateOpDescribeImageSigningStatusInput(v *DescribeImageSigningStatusInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeImageSigningStatusInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.ImageId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImageId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDescribeImagesInput(v *DescribeImagesInput) error { if v == nil { return nil @@ -1643,6 +1971,28 @@ func validateOpInitiateLayerUploadInput(v *InitiateLayerUploadInput) error { } } +func validateOpListImageReferrersInput(v *ListImageReferrersInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListImageReferrersInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.SubjectId == nil { + invalidParams.Add(smithy.NewErrParamRequired("SubjectId")) + } else if v.SubjectId != nil { + if err := validateSubjectIdentifier(v.SubjectId); err != nil { + invalidParams.AddNested("SubjectId", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpListImagesInput(v *ListImagesInput) error { if v == nil { return nil @@ -1738,6 +2088,11 @@ func validateOpPutImageTagMutabilityInput(v *PutImageTagMutabilityInput) error { if len(v.ImageTagMutability) == 0 { invalidParams.Add(smithy.NewErrParamRequired("ImageTagMutability")) } + if v.ImageTagMutabilityExclusionFilters != nil { + if err := validateImageTagMutabilityExclusionFilters(v.ImageTagMutabilityExclusionFilters); err != nil { + invalidParams.AddNested("ImageTagMutabilityExclusionFilters", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -1814,6 +2169,40 @@ func validateOpPutReplicationConfigurationInput(v *PutReplicationConfigurationIn } } +func validateOpPutSigningConfigurationInput(v *PutSigningConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutSigningConfigurationInput"} + if v.SigningConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("SigningConfiguration")) + } else if v.SigningConfiguration != nil { + if err := validateSigningConfiguration(v.SigningConfiguration); err != nil { + invalidParams.AddNested("SigningConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRegisterPullTimeUpdateExclusionInput(v *RegisterPullTimeUpdateExclusionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RegisterPullTimeUpdateExclusionInput"} + if v.PrincipalArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("PrincipalArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpSetRepositoryPolicyInput(v *SetRepositoryPolicyInput) error { if v == nil { return nil @@ -1905,6 +2294,27 @@ func validateOpUntagResourceInput(v *UntagResourceInput) error { } } +func validateOpUpdateImageStorageClassInput(v *UpdateImageStorageClassInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateImageStorageClassInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.ImageId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImageId")) + } + if len(v.TargetStorageClass) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("TargetStorageClass")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpUpdatePullThroughCacheRuleInput(v *UpdatePullThroughCacheRuleInput) error { if v == nil { return nil @@ -1938,6 +2348,11 @@ func validateOpUpdateRepositoryCreationTemplateInput(v *UpdateRepositoryCreation invalidParams.AddNested("ResourceTags", err.(smithy.InvalidParamsError)) } } + if v.ImageTagMutabilityExclusionFilters != nil { + if err := validateImageTagMutabilityExclusionFilters(v.ImageTagMutabilityExclusionFilters); err != nil { + invalidParams.AddNested("ImageTagMutabilityExclusionFilters", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/CHANGELOG.md index d93e75b7c7..28ad26d262 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/CHANGELOG.md @@ -1,3 +1,128 @@ +# v1.38.13 (2026-03-26) + +* **Bug Fix**: Fix a bug where a recorded clock skew could persist on the client even if the client and server clock ended up realigning. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.12 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.11 (2026-03-03) + +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.10 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.9 (2026-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.8 (2025-12-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.7 (2025-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.24.0. Notably this version of the library reduces the allocation footprint of the middleware system. We observe a ~10% reduction in allocations per SDK call with this change. + +# v1.38.6 (2025-11-25) + +* **Bug Fix**: Add error check for endpoint param binding during auth scheme resolution to fix panic reported in #3234 + +# v1.38.5 (2025-11-19.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.4 (2025-11-12) + +* **Bug Fix**: Further reduce allocation overhead when the metrics system isn't in-use. +* **Bug Fix**: Reduce allocation overhead when the client doesn't have any HTTP interceptors configured. +* **Bug Fix**: Remove blank trace spans towards the beginning of the request that added no additional information. This conveys a slight reduction in overall allocations. + +# v1.38.3 (2025-11-11) + +* **Bug Fix**: Return validation error if input region is not a valid host label. + +# v1.38.2 (2025-11-04) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system. + +# v1.38.1 (2025-10-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.0 (2025-10-23) + +* **Feature**: Update endpoint ruleset parameters casing +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.7 (2025-10-16) + +* **Dependency Update**: Bump minimum Go version to 1.23. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.6 (2025-09-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.5 (2025-09-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.4 (2025-09-10) + +* No change notes available for this release. + +# v1.37.3 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.2 (2025-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.1 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.0 (2025-08-21) + +* **Feature**: Remove incorrect endpoint tests +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.36.1 (2025-08-20) + +* **Bug Fix**: Remove unused deserialization code. + +# v1.36.0 (2025-08-11) + +* **Feature**: Add support for configuring per-service Options via callback on global config. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.0 (2025-08-04) + +* **Feature**: Support configurable auth scheme preferences in service clients via AWS_AUTH_SCHEME_PREFERENCE in the environment, auth_scheme_preference in the config file, and through in-code settings on LoadDefaultConfig and client constructor methods. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.1 (2025-07-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.3 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.33.2 (2025-06-17) * **Dependency Update**: Update to smithy-go v1.22.4. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_client.go index 9d7e183d2f..470ddf8010 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_client.go @@ -15,9 +15,7 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/metrics" @@ -65,7 +63,12 @@ func timeOperationMetric[T any]( ctx context.Context, metric string, fn func() (T, error), opts ...metrics.RecordMetricOption, ) (T, error) { - instr := getOperationMetrics(ctx).histogramFor(metric) + mm := getOperationMetrics(ctx) + if mm == nil { // not using the metrics system + return fn() + } + + instr := mm.histogramFor(metric) opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) start := time.Now() @@ -78,7 +81,12 @@ func timeOperationMetric[T any]( } func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { - instr := getOperationMetrics(ctx).histogramFor(metric) + mm := getOperationMetrics(ctx) + if mm == nil { // not using the metrics system + return func() {} + } + + instr := mm.histogramFor(metric) opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) var ended bool @@ -106,6 +114,12 @@ func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { type operationMetricsKey struct{} func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + if _, ok := mp.(metrics.NopMeterProvider); ok { + // not using the metrics system - setting up the metrics context is a memory-intensive operation + // so we should skip it in this case + return parent, nil + } + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/ecrpublic") om := &operationMetrics{} @@ -153,7 +167,10 @@ func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Hi } func getOperationMetrics(ctx context.Context) *operationMetrics { - return ctx.Value(operationMetricsKey{}).(*operationMetrics) + if v := ctx.Value(operationMetricsKey{}); v != nil { + return v.(*operationMetrics) + } + return nil } func operationTracer(p tracing.TracerProvider) tracing.Tracer { @@ -420,24 +437,33 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AuthSchemePreference: cfg.AuthSchemePreference, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) resolveAWSRetryMode(cfg, &opts) resolveAWSEndpointResolver(cfg, &opts) + resolveInterceptors(cfg, &opts) resolveUseDualStackEndpoint(cfg, &opts) resolveUseFIPSEndpoint(cfg, &opts) resolveBaseEndpoint(cfg, &opts) - return New(opts, optFns...) + return New(opts, func(o *Options) { + for _, opt := range cfg.ServiceOptions { + opt(ServiceID, o) + } + for _, opt := range optFns { + opt(o) + } + }) } func resolveHTTPClient(o *Options) { @@ -551,6 +577,10 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) } +func resolveInterceptors(cfg aws.Config, o *Options) { + o.Interceptors = cfg.Interceptors.Copy() +} + func addClientUserAgent(stack *middleware.Stack, options Options) error { ua, err := getOrAddRequestUserAgent(stack) if err != nil { @@ -680,10 +710,11 @@ func addIsPaginatorUserAgent(o *Options) { }) } -func addRetry(stack *middleware.Stack, o Options) error { +func addRetry(stack *middleware.Stack, o Options, c *Client) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ecrpublic") + m.ClientSkew = c.timeOffset }) if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { return err @@ -724,25 +755,6 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } -func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { - if mode == aws.AccountIDEndpointModeDisabled { - return nil - } - - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { - return aws.String(ca.Credentials.AccountID) - } - - return nil -} - -func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { - mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} - if err := stack.Build.Add(&mw, middleware.After); err != nil { - return err - } - return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) -} func initializeTimeOffsetResolver(c *Client) { c.timeOffset = new(atomic.Int64) } @@ -857,88 +869,62 @@ func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { }, "ResolveEndpointV2", middleware.After) } -type spanInitializeStart struct { -} - -func (*spanInitializeStart) ID() string { - return "spanInitializeStart" -} - -func (m *spanInitializeStart) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "Initialize") - - return next.HandleInitialize(ctx, in) -} - -type spanInitializeEnd struct { -} - -func (*spanInitializeEnd) ID() string { - return "spanInitializeEnd" -} - -func (m *spanInitializeEnd) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleInitialize(ctx, in) -} - -type spanBuildRequestStart struct { -} - -func (*spanBuildRequestStart) ID() string { - return "spanBuildRequestStart" -} - -func (m *spanBuildRequestStart) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - middleware.SerializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "BuildRequest") - - return next.HandleSerialize(ctx, in) -} - -type spanBuildRequestEnd struct { -} - -func (*spanBuildRequestEnd) ID() string { - return "spanBuildRequestEnd" -} - -func (m *spanBuildRequestEnd) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - middleware.BuildOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleBuild(ctx, in) -} - -func addSpanInitializeStart(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) -} - -func addSpanInitializeEnd(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) -} - -func addSpanBuildRequestStart(stack *middleware.Stack) error { - return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) -} +func addInterceptBeforeRetryLoop(stack *middleware.Stack, opts Options) error { + return stack.Finalize.Insert(&smithyhttp.InterceptBeforeRetryLoop{ + Interceptors: opts.Interceptors.BeforeRetryLoop, + }, "Retry", middleware.Before) +} + +func addInterceptAttempt(stack *middleware.Stack, opts Options) error { + return stack.Finalize.Insert(&smithyhttp.InterceptAttempt{ + BeforeAttempt: opts.Interceptors.BeforeAttempt, + AfterAttempt: opts.Interceptors.AfterAttempt, + }, "Retry", middleware.After) +} + +func addInterceptors(stack *middleware.Stack, opts Options) error { + // middlewares are expensive, don't add all of these interceptor ones unless the caller + // actually has at least one interceptor configured + // + // at the moment it's all-or-nothing because some of the middlewares here are responsible for + // setting fields in the interceptor context for future ones + if len(opts.Interceptors.BeforeExecution) == 0 && + len(opts.Interceptors.BeforeSerialization) == 0 && len(opts.Interceptors.AfterSerialization) == 0 && + len(opts.Interceptors.BeforeRetryLoop) == 0 && + len(opts.Interceptors.BeforeAttempt) == 0 && + len(opts.Interceptors.BeforeSigning) == 0 && len(opts.Interceptors.AfterSigning) == 0 && + len(opts.Interceptors.BeforeTransmit) == 0 && len(opts.Interceptors.AfterTransmit) == 0 && + len(opts.Interceptors.BeforeDeserialization) == 0 && len(opts.Interceptors.AfterDeserialization) == 0 && + len(opts.Interceptors.AfterAttempt) == 0 && len(opts.Interceptors.AfterExecution) == 0 { + return nil + } -func addSpanBuildRequestEnd(stack *middleware.Stack) error { - return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) + return errors.Join( + stack.Initialize.Add(&smithyhttp.InterceptExecution{ + BeforeExecution: opts.Interceptors.BeforeExecution, + AfterExecution: opts.Interceptors.AfterExecution, + }, middleware.Before), + stack.Serialize.Insert(&smithyhttp.InterceptBeforeSerialization{ + Interceptors: opts.Interceptors.BeforeSerialization, + }, "OperationSerializer", middleware.Before), + stack.Serialize.Insert(&smithyhttp.InterceptAfterSerialization{ + Interceptors: opts.Interceptors.AfterSerialization, + }, "OperationSerializer", middleware.After), + stack.Finalize.Insert(&smithyhttp.InterceptBeforeSigning{ + Interceptors: opts.Interceptors.BeforeSigning, + }, "Signing", middleware.Before), + stack.Finalize.Insert(&smithyhttp.InterceptAfterSigning{ + Interceptors: opts.Interceptors.AfterSigning, + }, "Signing", middleware.After), + stack.Deserialize.Add(&smithyhttp.InterceptTransmit{ + BeforeTransmit: opts.Interceptors.BeforeTransmit, + AfterTransmit: opts.Interceptors.AfterTransmit, + }, middleware.After), + stack.Deserialize.Insert(&smithyhttp.InterceptBeforeDeserialization{ + Interceptors: opts.Interceptors.BeforeDeserialization, + }, "OperationDeserializer", middleware.After), // (deserialize stack is called in reverse) + stack.Deserialize.Insert(&smithyhttp.InterceptAfterDeserialization{ + Interceptors: opts.Interceptors.AfterDeserialization, + }, "OperationDeserializer", middleware.Before), + ) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchCheckLayerAvailability.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchCheckLayerAvailability.go index 1e2e234bec..02565065ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchCheckLayerAvailability.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchCheckLayerAvailability.go @@ -103,7 +103,7 @@ func (c *Client) addOperationBatchCheckLayerAvailabilityMiddlewares(stack *middl if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -127,9 +127,6 @@ func (c *Client) addOperationBatchCheckLayerAvailabilityMiddlewares(stack *middl if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -157,16 +154,13 @@ func (c *Client) addOperationBatchCheckLayerAvailabilityMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchDeleteImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchDeleteImage.go index f5b14b6bf0..b2cac5030e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchDeleteImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchDeleteImage.go @@ -104,7 +104,7 @@ func (c *Client) addOperationBatchDeleteImageMiddlewares(stack *middleware.Stack if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -128,9 +128,6 @@ func (c *Client) addOperationBatchDeleteImageMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -158,16 +155,13 @@ func (c *Client) addOperationBatchDeleteImageMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CompleteLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CompleteLayerUpload.go index 41d822b8e9..6a4f57fc32 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CompleteLayerUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CompleteLayerUpload.go @@ -115,7 +115,7 @@ func (c *Client) addOperationCompleteLayerUploadMiddlewares(stack *middleware.St if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -139,9 +139,6 @@ func (c *Client) addOperationCompleteLayerUploadMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -169,16 +166,13 @@ func (c *Client) addOperationCompleteLayerUploadMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CreateRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CreateRepository.go index 8ff307b8ea..0347dac42b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CreateRepository.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CreateRepository.go @@ -102,7 +102,7 @@ func (c *Client) addOperationCreateRepositoryMiddlewares(stack *middleware.Stack if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -126,9 +126,6 @@ func (c *Client) addOperationCreateRepositoryMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -156,16 +153,13 @@ func (c *Client) addOperationCreateRepositoryMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepository.go index afac66ba36..df8d85fcbb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepository.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepository.go @@ -94,7 +94,7 @@ func (c *Client) addOperationDeleteRepositoryMiddlewares(stack *middleware.Stack if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -118,9 +118,6 @@ func (c *Client) addOperationDeleteRepositoryMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -148,16 +145,13 @@ func (c *Client) addOperationDeleteRepositoryMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepositoryPolicy.go index 395c5168ac..9a1138543b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepositoryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepositoryPolicy.go @@ -93,7 +93,7 @@ func (c *Client) addOperationDeleteRepositoryPolicyMiddlewares(stack *middleware if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -117,9 +117,6 @@ func (c *Client) addOperationDeleteRepositoryPolicyMiddlewares(stack *middleware if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -147,16 +144,13 @@ func (c *Client) addOperationDeleteRepositoryPolicyMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImageTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImageTags.go index 78efb512cb..52af5c650f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImageTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImageTags.go @@ -110,7 +110,7 @@ func (c *Client) addOperationDescribeImageTagsMiddlewares(stack *middleware.Stac if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -134,9 +134,6 @@ func (c *Client) addOperationDescribeImageTagsMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -164,16 +161,13 @@ func (c *Client) addOperationDescribeImageTagsMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImages.go index f4e94d832a..a33da4d650 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImages.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImages.go @@ -119,7 +119,7 @@ func (c *Client) addOperationDescribeImagesMiddlewares(stack *middleware.Stack, if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -143,9 +143,6 @@ func (c *Client) addOperationDescribeImagesMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -173,16 +170,13 @@ func (c *Client) addOperationDescribeImagesMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRegistries.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRegistries.go index a82daac2ec..4398037afa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRegistries.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRegistries.go @@ -105,7 +105,7 @@ func (c *Client) addOperationDescribeRegistriesMiddlewares(stack *middleware.Sta if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -129,9 +129,6 @@ func (c *Client) addOperationDescribeRegistriesMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -156,16 +153,13 @@ func (c *Client) addOperationDescribeRegistriesMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRepositories.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRepositories.go index 6f10382df2..10164e05bf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRepositories.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRepositories.go @@ -114,7 +114,7 @@ func (c *Client) addOperationDescribeRepositoriesMiddlewares(stack *middleware.S if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -138,9 +138,6 @@ func (c *Client) addOperationDescribeRepositoriesMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -165,16 +162,13 @@ func (c *Client) addOperationDescribeRepositoriesMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetAuthorizationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetAuthorizationToken.go index 3857357bc4..65cbb1d6dd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetAuthorizationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetAuthorizationToken.go @@ -80,7 +80,7 @@ func (c *Client) addOperationGetAuthorizationTokenMiddlewares(stack *middleware. if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -104,9 +104,6 @@ func (c *Client) addOperationGetAuthorizationTokenMiddlewares(stack *middleware. if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -131,16 +128,13 @@ func (c *Client) addOperationGetAuthorizationTokenMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRegistryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRegistryCatalogData.go index f9e1f6e3eb..151b2ed423 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRegistryCatalogData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRegistryCatalogData.go @@ -78,7 +78,7 @@ func (c *Client) addOperationGetRegistryCatalogDataMiddlewares(stack *middleware if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -102,9 +102,6 @@ func (c *Client) addOperationGetRegistryCatalogDataMiddlewares(stack *middleware if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -129,16 +126,13 @@ func (c *Client) addOperationGetRegistryCatalogDataMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryCatalogData.go index 8f8b6b91f2..345c090425 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryCatalogData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryCatalogData.go @@ -88,7 +88,7 @@ func (c *Client) addOperationGetRepositoryCatalogDataMiddlewares(stack *middlewa if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -112,9 +112,6 @@ func (c *Client) addOperationGetRepositoryCatalogDataMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -142,16 +139,13 @@ func (c *Client) addOperationGetRepositoryCatalogDataMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryPolicy.go index 4493fd529a..925cae36a8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryPolicy.go @@ -93,7 +93,7 @@ func (c *Client) addOperationGetRepositoryPolicyMiddlewares(stack *middleware.St if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -117,9 +117,6 @@ func (c *Client) addOperationGetRepositoryPolicyMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -147,16 +144,13 @@ func (c *Client) addOperationGetRepositoryPolicyMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_InitiateLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_InitiateLayerUpload.go index 0e48a830f5..bb75684937 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_InitiateLayerUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_InitiateLayerUpload.go @@ -98,7 +98,7 @@ func (c *Client) addOperationInitiateLayerUploadMiddlewares(stack *middleware.St if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -122,9 +122,6 @@ func (c *Client) addOperationInitiateLayerUploadMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -152,16 +149,13 @@ func (c *Client) addOperationInitiateLayerUploadMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_ListTagsForResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_ListTagsForResource.go index 53c4f38418..2a546de844 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_ListTagsForResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_ListTagsForResource.go @@ -83,7 +83,7 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -107,9 +107,6 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -137,16 +134,13 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutImage.go index d160dec595..8b494d47c3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutImage.go @@ -114,7 +114,7 @@ func (c *Client) addOperationPutImageMiddlewares(stack *middleware.Stack, option if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -138,9 +138,6 @@ func (c *Client) addOperationPutImageMiddlewares(stack *middleware.Stack, option if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -168,16 +165,13 @@ func (c *Client) addOperationPutImageMiddlewares(stack *middleware.Stack, option if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRegistryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRegistryCatalogData.go index 7f3c79e3ac..0cd62438c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRegistryCatalogData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRegistryCatalogData.go @@ -86,7 +86,7 @@ func (c *Client) addOperationPutRegistryCatalogDataMiddlewares(stack *middleware if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -110,9 +110,6 @@ func (c *Client) addOperationPutRegistryCatalogDataMiddlewares(stack *middleware if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -137,16 +134,13 @@ func (c *Client) addOperationPutRegistryCatalogDataMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRepositoryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRepositoryCatalogData.go index 1af5ece102..b16abc429c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRepositoryCatalogData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRepositoryCatalogData.go @@ -93,7 +93,7 @@ func (c *Client) addOperationPutRepositoryCatalogDataMiddlewares(stack *middlewa if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -117,9 +117,6 @@ func (c *Client) addOperationPutRepositoryCatalogDataMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -147,16 +144,13 @@ func (c *Client) addOperationPutRepositoryCatalogDataMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_SetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_SetRepositoryPolicy.go index 7c0237f992..3e5e0d22da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_SetRepositoryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_SetRepositoryPolicy.go @@ -109,7 +109,7 @@ func (c *Client) addOperationSetRepositoryPolicyMiddlewares(stack *middleware.St if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -133,9 +133,6 @@ func (c *Client) addOperationSetRepositoryPolicyMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -163,16 +160,13 @@ func (c *Client) addOperationSetRepositoryPolicyMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_TagResource.go index e7d8550c5b..a5340efa20 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_TagResource.go @@ -89,7 +89,7 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -113,9 +113,6 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -143,16 +140,13 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UntagResource.go index a0c2f475cd..93443a4cae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UntagResource.go @@ -83,7 +83,7 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -107,9 +107,6 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -137,16 +134,13 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UploadLayerPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UploadLayerPart.go index cc6712300a..3dde5255fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UploadLayerPart.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UploadLayerPart.go @@ -124,7 +124,7 @@ func (c *Client) addOperationUploadLayerPartMiddlewares(stack *middleware.Stack, if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -148,9 +148,6 @@ func (c *Client) addOperationUploadLayerPartMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } @@ -178,16 +175,13 @@ func (c *Client) addOperationUploadLayerPartMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { return err } - if err = addSpanBuildRequestStart(stack); err != nil { + if err = addInterceptAttempt(stack, options); err != nil { return err } - if err = addSpanBuildRequestEnd(stack); err != nil { + if err = addInterceptors(stack, options); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/auth.go index 0d60de1a4e..10424d598b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/auth.go @@ -12,10 +12,13 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" + "slices" + "strings" ) -func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) error { params.Region = options.Region + return nil } type setLegacyContextSigningOptionsMiddleware struct { @@ -92,14 +95,16 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) (*AuthResolverParameters, error) { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(ctx, params, input, options) + if err := bindAuthParamsRegion(ctx, params, input, options); err != nil { + return nil, err + } - return params + return params, nil } // AuthSchemeResolver returns a set of possible authentication options for an @@ -150,7 +155,10 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") defer span.End() - params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) + params, err := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) + if err != nil { + return out, metadata, fmt.Errorf("bind auth scheme params: %w", err) + } options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) @@ -169,7 +177,8 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid } func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - for _, option := range options { + sorted := sortAuthOptions(options, m.options.AuthSchemePreference) + for _, option := range sorted { if option.SchemeID == smithyauth.SchemeIDAnonymous { return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true } @@ -188,6 +197,29 @@ func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) return nil, false } +func sortAuthOptions(options []*smithyauth.Option, preferred []string) []*smithyauth.Option { + byPriority := make([]*smithyauth.Option, 0, len(options)) + for _, prefName := range preferred { + for _, option := range options { + optName := option.SchemeID + if parts := strings.Split(option.SchemeID, "#"); len(parts) == 2 { + optName = parts[1] + } + if prefName == optName { + byPriority = append(byPriority, option) + } + } + } + for _, option := range options { + if !slices.ContainsFunc(byPriority, func(o *smithyauth.Option) bool { + return o.SchemeID == option.SchemeID + }) { + byPriority = append(byPriority, option) + } + } + return byPriority +} + type resolvedAuthSchemeKey struct{} type resolvedAuthScheme struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/deserializers.go index e8796324de..cb064151e8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/deserializers.go @@ -18,17 +18,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" "io" "strings" - "time" ) -func deserializeS3Expires(v string) (*time.Time, error) { - t, err := smithytime.ParseHTTPDate(v) - if err != nil { - return nil, nil - } - return &t, nil -} - type awsAwsjson11_deserializeOpBatchCheckLayerAvailability struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/endpoints.go index 4af6f11388..97b6dcab15 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/endpoints.go @@ -14,6 +14,7 @@ import ( internalendpoints "github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints" smithyauth "github.com/aws/smithy-go/auth" smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/endpoints/private/rulesfn" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" "github.com/aws/smithy-go/tracing" @@ -217,11 +218,15 @@ func resolveBaseEndpoint(cfg aws.Config, o *Options) { } } -func bindRegion(region string) *string { +func bindRegion(region string) (*string, error) { if region == "" { - return nil + return nil, nil + } + if !rulesfn.IsValidHostLabel(region, true) { + return nil, fmt.Errorf("invalid input region %s", region) } - return aws.String(endpoints.MapFIPSRegion(region)) + + return aws.String(endpoints.MapFIPSRegion(region)), nil } // EndpointParameters provides the parameters that influence how endpoints are @@ -328,7 +333,9 @@ func (r *resolver) ResolveEndpoint( return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) } _UseDualStack := *params.UseDualStack + _ = _UseDualStack _UseFIPS := *params.UseFIPS + _ = _UseFIPS if exprVal := params.Endpoint; exprVal != nil { _Endpoint := *exprVal @@ -477,10 +484,15 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) (*EndpointParameters, error) { params := &EndpointParameters{} - params.Region = bindRegion(options.Region) + region, err := bindRegion(options.Region) + if err != nil { + return nil, err + } + params.Region = region + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) params.Endpoint = options.BaseEndpoint @@ -489,7 +501,7 @@ func bindEndpointParams(ctx context.Context, input interface{}, options Options) b.bindEndpointParams(params) } - return params + return params, nil } type resolveEndpointV2Middleware struct { @@ -519,7 +531,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + params, err := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + if err != nil { + return out, metadata, fmt.Errorf("failed to bind endpoint params, %w", err) + } endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", func() (smithyendpoints.Endpoint, error) { return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/generated.json index 04e54760c0..8949b7ac0c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/generated.json @@ -41,7 +41,6 @@ "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", "options.go", - "protocol_test.go", "serializers.go", "snapshot_test.go", "sra_operation_order_test.go", @@ -50,7 +49,7 @@ "types/types.go", "validators.go" ], - "go": "1.22", + "go": "1.24", "module": "github.com/aws/aws-sdk-go-v2/service/ecrpublic", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/go_module_metadata.go index 872cb8f5ea..28a81908a3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/go_module_metadata.go @@ -3,4 +3,4 @@ package ecrpublic // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.33.2" +const goModuleVersion = "1.38.13" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints/endpoints.go index 78baf149cf..b8f9933663 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints/endpoints.go @@ -206,6 +206,13 @@ var defaultPartitions = endpoints.Partitions{ { ID: "aws-eusc", Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "api.ecr-public.{region}.api.amazonwebservices.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, { Variant: endpoints.FIPSVariant, }: { @@ -213,6 +220,13 @@ var defaultPartitions = endpoints.Partitions{ Protocols: []string{"https"}, SignatureVersions: []string{"v4"}, }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "api.ecr-public-fips.{region}.api.amazonwebservices.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, { Variant: 0, }: { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/options.go index c10ee3d0cf..86bab76e7c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/options.go @@ -58,8 +58,7 @@ type Options struct { // the client option BaseEndpoint instead. EndpointResolver EndpointResolver - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. + // Resolves the endpoint used for a particular service operation. EndpointResolverV2 EndpointResolverV2 // Signature Version 4 (SigV4) Signer @@ -119,12 +118,18 @@ type Options struct { // implementation if nil. HTTPClient HTTPClient + // Client registry of operation interceptors. + Interceptors smithyhttp.InterceptorRegistry + // The auth scheme resolver which determines how to authenticate for each // operation. AuthSchemeResolver AuthSchemeResolver // The list of auth schemes supported by the client. AuthSchemes []smithyhttp.AuthScheme + + // Priority list of preferred auth scheme names (e.g. sigv4a). + AuthSchemePreference []string } // Copy creates a clone where the APIOptions list is deep copied. @@ -132,6 +137,7 @@ func (o Options) Copy() Options { to := o to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) copy(to.APIOptions, o.APIOptions) + to.Interceptors = o.Interceptors.Copy() return to } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index 6ffbf3fe4a..497d372304 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.13.7 (2026-03-13) + +* **Bug Fix**: Replace usages of the old ioutil/ package throughout the SDK. + +# v1.13.6 (2026-03-03) + +* **Dependency Update**: Bump minimum Go version to 1.24 + +# v1.13.5 (2026-02-23) + +* No change notes available for this release. + # v1.13.4 (2025-12-02) * **Dependency Update**: Upgrade to smithy-go v1.24.0. Notably this version of the library reduces the allocation footprint of the middleware system. We observe a ~10% reduction in allocations per SDK call with this change. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index 970bb210ec..5679a2b2b1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.4" +const goModuleVersion = "1.13.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 743183c8df..7c5e13816e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,25 @@ +# v1.13.21 (2026-03-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.20 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.19 (2026-03-03) + +* **Bug Fix**: Modernize non codegen files with go fix +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.18 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.17 (2026-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.16 (2025-12-08) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index a8a2e692bf..456855e885 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.16" +const goModuleVersion = "1.13.21" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go index 1e2f5c8122..8b25d03874 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go @@ -14,26 +14,26 @@ import ( // presigned URL. type URLPresigner interface { // PresignURL presigns a URL. - PresignURL(ctx context.Context, srcRegion string, params interface{}) (*v4.PresignedHTTPRequest, error) + PresignURL(ctx context.Context, srcRegion string, params any) (*v4.PresignedHTTPRequest, error) } // ParameterAccessor provides an collection of accessor to for retrieving and // setting the values needed to PresignedURL generation type ParameterAccessor struct { // GetPresignedURL accessor points to a function that retrieves a presigned url if present - GetPresignedURL func(interface{}) (string, bool, error) + GetPresignedURL func(any) (string, bool, error) // GetSourceRegion accessor points to a function that retrieves source region for presigned url - GetSourceRegion func(interface{}) (string, bool, error) + GetSourceRegion func(any) (string, bool, error) // CopyInput accessor points to a function that takes in an input, and returns a copy. - CopyInput func(interface{}) (interface{}, error) + CopyInput func(any) (any, error) // SetDestinationRegion accessor points to a function that sets destination region on api input struct - SetDestinationRegion func(interface{}, string) error + SetDestinationRegion func(any, string) error // SetPresignedURL accessor points to a function that sets presigned url on api input struct - SetPresignedURL func(interface{}, string) error + SetPresignedURL func(any, string) error } // Options provides the set of options needed by the presigned URL middleware. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/signin/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/signin/CHANGELOG.md index 4d6c08996e..d93bf5e7cc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/signin/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/signin/CHANGELOG.md @@ -1,3 +1,25 @@ +# v1.0.9 (2026-03-26) + +* **Bug Fix**: Fix a bug where a recorded clock skew could persist on the client even if the client and server clock ended up realigning. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.8 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.7 (2026-03-03) + +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.6 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.5 (2026-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.0.4 (2025-12-08) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/signin/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/signin/api_client.go index d2db11d2aa..2c0413c16e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/signin/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/signin/api_client.go @@ -15,9 +15,7 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/metrics" @@ -711,10 +709,11 @@ func addIsPaginatorUserAgent(o *Options) { }) } -func addRetry(stack *middleware.Stack, o Options) error { +func addRetry(stack *middleware.Stack, o Options, c *Client) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/signin") + m.ClientSkew = c.timeOffset }) if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { return err @@ -755,25 +754,6 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } -func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { - if mode == aws.AccountIDEndpointModeDisabled { - return nil - } - - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { - return aws.String(ca.Credentials.AccountID) - } - - return nil -} - -func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { - mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} - if err := stack.Build.Add(&mw, middleware.After); err != nil { - return err - } - return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) -} func initializeTimeOffsetResolver(c *Client) { c.timeOffset = new(atomic.Int64) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/signin/api_op_CreateOAuth2Token.go b/vendor/github.com/aws/aws-sdk-go-v2/service/signin/api_op_CreateOAuth2Token.go index 54ba42422d..dec8656f86 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/signin/api_op_CreateOAuth2Token.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/signin/api_op_CreateOAuth2Token.go @@ -134,7 +134,7 @@ func (c *Client) addOperationCreateOAuth2TokenMiddlewares(stack *middleware.Stac if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -158,9 +158,6 @@ func (c *Client) addOperationCreateOAuth2TokenMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/signin/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/signin/generated.json index 8014c56167..6043ab63f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/signin/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/signin/generated.json @@ -19,7 +19,6 @@ "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", "options.go", - "protocol_test.go", "serializers.go", "snapshot_test.go", "sra_operation_order_test.go", @@ -28,7 +27,7 @@ "types/types.go", "validators.go" ], - "go": "1.23", + "go": "1.24", "module": "github.com/aws/aws-sdk-go-v2/service/signin", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/signin/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/signin/go_module_metadata.go index 2424c057e8..c922e7adfb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/signin/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/signin/go_module_metadata.go @@ -3,4 +3,4 @@ package signin // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.0.4" +const goModuleVersion = "1.0.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/signin/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/signin/options.go index 3262aa5822..88559705f4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/signin/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/signin/options.go @@ -58,8 +58,7 @@ type Options struct { // the client option BaseEndpoint instead. EndpointResolver EndpointResolver - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. + // Resolves the endpoint used for a particular service operation. EndpointResolverV2 EndpointResolverV2 // Signature Version 4 (SigV4) Signer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 4ef304c1f7..697dce1a2e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,33 @@ +# v1.30.14 (2026-03-26) + +* **Bug Fix**: Fix a bug where a recorded clock skew could persist on the client even if the client and server clock ended up realigning. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.13 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.12 (2026-03-03) + +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.11 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.10 (2026-02-18) + +* No change notes available for this release. + +# v1.30.9 (2026-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.8 (2025-12-16) + +* No change notes available for this release. + # v1.30.7 (2025-12-08) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go index 8e5a2e77f8..ca5364792a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -15,9 +15,7 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/metrics" @@ -711,10 +709,11 @@ func addIsPaginatorUserAgent(o *Options) { }) } -func addRetry(stack *middleware.Stack, o Options) error { +func addRetry(stack *middleware.Stack, o Options, c *Client) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso") + m.ClientSkew = c.timeOffset }) if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { return err @@ -755,25 +754,6 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } -func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { - if mode == aws.AccountIDEndpointModeDisabled { - return nil - } - - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { - return aws.String(ca.Credentials.AccountID) - } - - return nil -} - -func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { - mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} - if err := stack.Build.Add(&mw, middleware.After); err != nil { - return err - } - return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) -} func initializeTimeOffsetResolver(c *Client) { c.timeOffset = new(atomic.Int64) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go index c0b961fcf1..5482b7a032 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -93,7 +93,7 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -117,9 +117,6 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go index f5ca09ac7d..8759d52576 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -98,7 +98,7 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -122,9 +122,6 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go index 54511d34a6..fea5b43912 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -97,7 +97,7 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -121,9 +121,6 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go index a21116e96c..84aef7ce5f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -92,7 +92,7 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -116,9 +116,6 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json index 1499c0a959..39a393d441 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json @@ -22,7 +22,6 @@ "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", "options.go", - "protocol_test.go", "serializers.go", "snapshot_test.go", "sra_operation_order_test.go", @@ -30,7 +29,7 @@ "types/types.go", "validators.go" ], - "go": "1.23", + "go": "1.24", "module": "github.com/aws/aws-sdk-go-v2/service/sso", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index 7ad1390e8f..9674e4957b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.30.7" +const goModuleVersion = "1.30.14" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go index bbac359645..9f550c3f1b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -157,6 +157,9 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-east-1", }, }, + endpoints.EndpointKey{ + Region: "ap-east-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ap-northeast-1", }: endpoints.Endpoint{ @@ -237,6 +240,9 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-5", }, }, + endpoints.EndpointKey{ + Region: "ap-southeast-6", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ap-southeast-7", }: endpoints.Endpoint{}, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go index 277550af47..8b4e34d064 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go @@ -58,8 +58,7 @@ type Options struct { // the client option BaseEndpoint instead. EndpointResolver EndpointResolver - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. + // Resolves the endpoint used for a particular service operation. EndpointResolverV2 EndpointResolverV2 // Signature Version 4 (SigV4) Signer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 0ac3f8cc4c..2bb4cd8fbb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,29 @@ +# v1.35.18 (2026-03-26) + +* **Bug Fix**: Fix a bug where a recorded clock skew could persist on the client even if the client and server clock ended up realigning. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.17 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.16 (2026-03-03) + +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.15 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.14 (2026-02-17) + +* No change notes available for this release. + +# v1.35.13 (2026-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.35.12 (2025-12-08) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go index 8e8508fa34..2c0958ade2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -15,9 +15,7 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/metrics" @@ -711,10 +709,11 @@ func addIsPaginatorUserAgent(o *Options) { }) } -func addRetry(stack *middleware.Stack, o Options) error { +func addRetry(stack *middleware.Stack, o Options, c *Client) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") + m.ClientSkew = c.timeOffset }) if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { return err @@ -755,25 +754,6 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } -func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { - if mode == aws.AccountIDEndpointModeDisabled { - return nil - } - - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { - return aws.String(ca.Credentials.AccountID) - } - - return nil -} - -func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { - mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} - if err := stack.Build.Add(&mw, middleware.After); err != nil { - return err - } - return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) -} func initializeTimeOffsetResolver(c *Client) { c.timeOffset = new(atomic.Int64) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go index 3f622dbcb9..cd739d53f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -163,7 +163,7 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -187,9 +187,6 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go index 24cb2fac8d..a02f62a286 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go @@ -210,7 +210,7 @@ func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Sta if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -234,9 +234,6 @@ func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go index 14472ee3be..f32e86be9c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -134,7 +134,7 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -158,9 +158,6 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go index 92a6854a77..a35750b227 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -116,7 +116,7 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -140,9 +140,6 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json index ee79b48eaa..1e34b9a9d5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json @@ -22,7 +22,6 @@ "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", "options.go", - "protocol_test.go", "serializers.go", "snapshot_test.go", "sra_operation_order_test.go", @@ -31,7 +30,7 @@ "types/types.go", "validators.go" ], - "go": "1.23", + "go": "1.24", "module": "github.com/aws/aws-sdk-go-v2/service/ssooidc", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index 1399651002..2ae8e4e3b8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.35.12" +const goModuleVersion = "1.35.18" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go index 2088fc7fb2..b7c58e2f24 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go @@ -240,6 +240,9 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-5", }, }, + endpoints.EndpointKey{ + Region: "ap-southeast-6", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ap-southeast-7", }: endpoints.Endpoint{}, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go index f35f3d5a31..c2eac09190 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go @@ -58,8 +58,7 @@ type Options struct { // the client option BaseEndpoint instead. EndpointResolver EndpointResolver - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. + // Resolves the endpoint used for a particular service operation. EndpointResolverV2 EndpointResolverV2 // Signature Version 4 (SigV4) Signer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 42c252ba1f..c009086381 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,25 @@ +# v1.41.10 (2026-03-26) + +* **Bug Fix**: Fix a bug where a recorded clock skew could persist on the client even if the client and server clock ended up realigning. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.41.9 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.41.8 (2026-03-03) + +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.41.7 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.41.6 (2026-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.41.5 (2025-12-09) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index 70228d0dfa..c0c6af3a15 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -16,11 +16,9 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/metrics" @@ -715,10 +713,11 @@ func addIsPaginatorUserAgent(o *Options) { }) } -func addRetry(stack *middleware.Stack, o Options) error { +func addRetry(stack *middleware.Stack, o Options, c *Client) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts") + m.ClientSkew = c.timeOffset }) if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { return err @@ -759,25 +758,6 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } -func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { - if mode == aws.AccountIDEndpointModeDisabled { - return nil - } - - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { - return aws.String(ca.Credentials.AccountID) - } - - return nil -} - -func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { - mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} - if err := stack.Build.Add(&mw, middleware.After); err != nil { - return err - } - return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) -} func initializeTimeOffsetResolver(c *Client) { c.timeOffset = new(atomic.Int64) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go index 0ddd3623ae..83aa65a5a2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -448,7 +448,7 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -472,9 +472,6 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go index 15f1dd91d2..520e6e1c61 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -383,7 +383,7 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -407,9 +407,6 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go index 7006eb3b7f..8a164be5be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -400,7 +400,7 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -424,9 +424,6 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go index 009c405583..b52a372dba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go @@ -157,7 +157,7 @@ func (c *Client) addOperationAssumeRootMiddlewares(stack *middleware.Stack, opti if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -181,9 +181,6 @@ func (c *Client) addOperationAssumeRootMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go index b00b0c4096..eaeab8a683 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -117,7 +117,7 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -141,9 +141,6 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go index 887bb081f3..2f7adb2f53 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -108,7 +108,7 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -132,9 +132,6 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go index 2c8d886701..f2d4fbc240 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -99,7 +99,7 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -123,9 +123,6 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetDelegatedAccessToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetDelegatedAccessToken.go index 092ec13e3a..78d688acc7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetDelegatedAccessToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetDelegatedAccessToken.go @@ -97,7 +97,7 @@ func (c *Client) addOperationGetDelegatedAccessTokenMiddlewares(stack *middlewar if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -121,9 +121,6 @@ func (c *Client) addOperationGetDelegatedAccessTokenMiddlewares(stack *middlewar if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go index e0fc9a5484..57b77ebcc3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -321,7 +321,7 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -345,9 +345,6 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go index 2f931f4446..4b4083501d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -170,7 +170,7 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -194,9 +194,6 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetWebIdentityToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetWebIdentityToken.go index 306ee43b1e..7738de5f60 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetWebIdentityToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetWebIdentityToken.go @@ -120,7 +120,7 @@ func (c *Client) addOperationGetWebIdentityTokenMiddlewares(stack *middleware.St if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetry(stack, options); err != nil { + if err = addRetry(stack, options, c); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { @@ -144,9 +144,6 @@ func (c *Client) addOperationGetWebIdentityTokenMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } if err = addUserAgentRetryMode(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json index e61823ea01..b5556cbfbf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -31,7 +31,6 @@ "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", "options.go", - "protocol_test.go", "serializers.go", "snapshot_test.go", "sra_operation_order_test.go", @@ -39,7 +38,7 @@ "types/types.go", "validators.go" ], - "go": "1.23", + "go": "1.24", "module": "github.com/aws/aws-sdk-go-v2/service/sts", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index c081cdeb30..317746f0fd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.41.5" +const goModuleVersion = "1.41.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go index f60b7d3381..c66e69a8d9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go @@ -58,8 +58,7 @@ type Options struct { // the client option BaseEndpoint instead. EndpointResolver EndpointResolver - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. + // Resolves the endpoint used for a particular service operation. EndpointResolverV2 EndpointResolverV2 // Signature Version 4 (SigV4) Signer diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 80af245f08..27fc881232 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,17 @@ +# Release (2026-02-27) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +# Release (2026-02-20) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.24.1 + * **Feature**: Add new middleware functions to get event stream output from middleware + # Release (2025-12-01) ## General Highlights diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md index ddce37b99e..a413ff3d87 100644 --- a/vendor/github.com/aws/smithy-go/README.md +++ b/vendor/github.com/aws/smithy-go/README.md @@ -4,7 +4,7 @@ [Smithy](https://smithy.io/) code generators for Go and the accompanying smithy-go runtime. -The smithy-go runtime requires a minimum version of Go 1.23. +The smithy-go runtime requires a minimum version of Go 1.24. **WARNING: All interfaces are subject to change.** @@ -80,7 +80,7 @@ example created from `smithy init`: "service": "example.weather#Weather", "module": "github.com/example/weather", "generateGoMod": true, - "goDirective": "1.23" + "goDirective": "1.24" } } } diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index b6c4c2f51c..dc9dfd0d86 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.24.0" +const goModuleVersion = "1.24.2" diff --git a/vendor/github.com/aws/smithy-go/middleware/eventstream_middleware.go b/vendor/github.com/aws/smithy-go/middleware/eventstream_middleware.go new file mode 100644 index 0000000000..ce523d98a3 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/eventstream_middleware.go @@ -0,0 +1,21 @@ +package middleware + +type eventStreamOutputKey struct{} + +func AddEventStreamOutputToMetadata(metadata *Metadata, output any) { + metadata.Set(eventStreamOutputKey{}, output) +} + +func GetEventStreamOutputToMetadata[T any](metadata *Metadata) (*T, bool) { + val := metadata.Get(eventStreamOutputKey{}) + // not found + if val == nil { + return nil, false + } + // wrong type + res, ok := val.(*T) + if !ok { + return nil, false + } + return res, true +} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/client.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/client.go index d952716d0d..a03c40726c 100644 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/client.go +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/client.go @@ -16,6 +16,7 @@ package api import ( "context" "encoding/base64" + "errors" "fmt" "net/url" "regexp" @@ -28,16 +29,17 @@ import ( "github.com/sirupsen/logrus" "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache" + "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config" ) const ( - proxyEndpointScheme = "https://" - programName = "docker-credential-ecr-login" - ecrPublicName = "public.ecr.aws" - ecrPublicEndpoint = proxyEndpointScheme + ecrPublicName + proxyEndpointScheme = "https://" + programName = "docker-credential-ecr-login" + ecrPublicName = "public.ecr.aws" + ecrPublicDualStackName = "ecr-public.aws.com" ) -var ecrPattern = regexp.MustCompile(`^(\d{12})\.dkr[\.\-]ecr(\-fips)?\.([a-zA-Z0-9][a-zA-Z0-9-_]*)\.(amazonaws\.com(?:\.cn)?|on\.(?:aws|amazonwebservices\.com\.cn)|sc2s\.sgov\.gov|c2s\.ic\.gov|cloud\.adc-e\.uk|csp\.hci\.ic\.gov)$`) +var ecrPattern = regexp.MustCompile(`^(\d{12})\.dkr[\.\-]ecr(\-fips)?\.([a-zA-Z0-9][a-zA-Z0-9-_]*)\.(amazonaws\.(?:com(?:\.cn)?|eu)|on\.(?:aws|amazonwebservices\.com\.cn)|sc2s\.sgov\.gov|c2s\.ic\.gov|cloud\.adc-e\.uk|csp\.hci\.ic\.gov)$`) type Service string @@ -52,20 +54,21 @@ type Registry struct { ID string FIPS bool Region string + Name string } // ExtractRegistry returns the ECR registry behind a given service endpoint func ExtractRegistry(input string) (*Registry, error) { - if strings.HasPrefix(input, proxyEndpointScheme) { - input = strings.TrimPrefix(input, proxyEndpointScheme) - } + input = strings.TrimPrefix(input, proxyEndpointScheme) + serverURL, err := url.Parse(proxyEndpointScheme + input) if err != nil { return nil, err } - if serverURL.Hostname() == ecrPublicName { + if serverURL.Hostname() == ecrPublicName || serverURL.Hostname() == ecrPublicDualStackName { return &Registry{ Service: ServiceECRPublic, + Name: serverURL.Hostname(), }, nil } matches := ecrPattern.FindStringSubmatch(serverURL.Hostname()) @@ -110,6 +113,56 @@ type ECRPublicAPI interface { GetAuthorizationToken(context.Context, *ecrpublic.GetAuthorizationTokenInput, ...func(*ecrpublic.Options)) (*ecrpublic.GetAuthorizationTokenOutput, error) } +// sanitizeURLError checks if an error contains a url.Error and returns +// a sanitized version with sensitive URL information redacted. +func sanitizeURLError(err error) error { + if err == nil { + return nil + } + + var urlErr *url.Error + if errors.As(err, &urlErr) { + return &url.Error{ + Op: urlErr.Op, + URL: config.RedactURL(urlErr.URL), + Err: urlErr.Err, + } + } + return err +} + +// ecrClientWrapper wraps an ECRAPI and sanitizes url.Error from responses. +type ecrClientWrapper struct { + client ECRAPI +} + +// NewECRClientWrapper creates a new ECRAPI wrapper that sanitizes sensitive +// information from url.Error before returning errors. +func NewECRClientWrapper(client ECRAPI) ECRAPI { + return &ecrClientWrapper{client: client} +} + +func (w *ecrClientWrapper) GetAuthorizationToken(ctx context.Context, input *ecr.GetAuthorizationTokenInput, opts ...func(*ecr.Options)) (*ecr.GetAuthorizationTokenOutput, error) { + output, err := w.client.GetAuthorizationToken(ctx, input, opts...) + return output, sanitizeURLError(err) +} + +// ecrPublicClientWrapper wraps an ECRPublicAPI and sanitizes url.Error from responses. +type ecrPublicClientWrapper struct { + client ECRPublicAPI +} + +// NewECRPublicClientWrapper creates a new ECRPublicAPI wrapper that sanitizes +// sensitive information from url.Error before returning errors. +func NewECRPublicClientWrapper(client ECRPublicAPI) ECRPublicAPI { + return &ecrPublicClientWrapper{client: client} +} + +func (w *ecrPublicClientWrapper) GetAuthorizationToken(ctx context.Context, input *ecrpublic.GetAuthorizationTokenInput, opts ...func(*ecrpublic.Options)) (*ecrpublic.GetAuthorizationTokenOutput, error) { + output, err := w.client.GetAuthorizationToken(ctx, input, opts...) + return output, sanitizeURLError(err) +} + // GetCredentials returns username, password, and proxyEndpoint func (c *defaultClient) GetCredentials(serverURL string) (*Auth, error) { registry, err := ExtractRegistry(serverURL) @@ -121,12 +174,13 @@ func (c *defaultClient) GetCredentials(serverURL string) (*Auth, error) { WithField("registry", registry.ID). WithField("region", registry.Region). WithField("serverURL", serverURL). + WithField("name", registry.Name). Debug("Retrieving credentials") switch registry.Service { case ServiceECR: return c.GetCredentialsByRegistryID(registry.ID) case ServiceECRPublic: - return c.GetPublicCredentials() + return c.GetPublicCredentials(registry.Name) } return nil, fmt.Errorf("unknown service %q", registry.Service) } @@ -157,11 +211,11 @@ func (c *defaultClient) GetCredentialsByRegistryID(registryID string) (*Auth, er return auth, err } -func (c *defaultClient) GetPublicCredentials() (*Auth, error) { +func (c *defaultClient) GetPublicCredentials(registry string) (*Auth, error) { cachedEntry := c.credentialCache.GetPublic() if cachedEntry != nil { if cachedEntry.IsValid(time.Now()) { - logrus.WithField("registry", ecrPublicName).Debug("Using cached token") + logrus.WithField("registry", registry).Debug("Using cached token") return extractToken(cachedEntry.AuthorizationToken, cachedEntry.ProxyEndpoint) } logrus. @@ -170,7 +224,7 @@ func (c *defaultClient) GetPublicCredentials() (*Auth, error) { Debug("Cached token is no longer valid") } - auth, err := c.getPublicAuthorizationToken() + auth, err := c.getPublicAuthorizationToken(registry) // if we have a cached token, fall back to avoid failing the request. This may result an expired token // being returned, but if there is a 500 or timeout from the service side, we'd like to attempt to re-use an // old token. We invalidate tokens prior to their expiration date to help mitigate this scenario. @@ -187,7 +241,7 @@ func (c *defaultClient) ListCredentials() ([]*Auth, error) { if err != nil { logrus.WithError(err).Debug("couldn't get authorization token for default registry") } - _, err = c.GetPublicCredentials() + _, err = c.GetPublicCredentials(ecrPublicName) if err != nil { logrus.WithError(err).Debug("couldn't get authorization token for public registry") } @@ -256,7 +310,7 @@ func (c *defaultClient) getAuthorizationToken(registryID string) (*Auth, error) return nil, fmt.Errorf("No AuthorizationToken found for %s", registryID) } -func (c *defaultClient) getPublicAuthorizationToken() (*Auth, error) { +func (c *defaultClient) getPublicAuthorizationToken(registry string) (*Auth, error) { var input *ecrpublic.GetAuthorizationTokenInput output, err := c.ecrPublicClient.GetAuthorizationToken(context.TODO(), input) @@ -266,8 +320,9 @@ func (c *defaultClient) getPublicAuthorizationToken() (*Auth, error) { if output == nil || output.AuthorizationData == nil { return nil, fmt.Errorf("ecr: missing AuthorizationData in ECR Public response") } + endpoint := ecrPublicEndpoint(registry) authData := output.AuthorizationData - token, err := extractToken(aws.ToString(authData.AuthorizationToken), ecrPublicEndpoint) + token, err := extractToken(aws.ToString(authData.AuthorizationToken), endpoint) if err != nil { return nil, err } @@ -275,10 +330,10 @@ func (c *defaultClient) getPublicAuthorizationToken() (*Auth, error) { AuthorizationToken: aws.ToString(authData.AuthorizationToken), RequestedAt: time.Now(), ExpiresAt: aws.ToTime(authData.ExpiresAt), - ProxyEndpoint: ecrPublicEndpoint, + ProxyEndpoint: endpoint, Service: cache.ServiceECRPublic, } - c.credentialCache.Set(ecrPublicName, &authEntry) + c.credentialCache.Set(registry, &authEntry) return token, nil } @@ -299,3 +354,7 @@ func extractToken(token string, proxyEndpoint string) (*Auth, error) { ProxyEndpoint: proxyEndpoint, }, nil } + +func ecrPublicEndpoint(registry string) string { + return proxyEndpointScheme + registry +} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/factory.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/factory.go index 58626d4e07..afbbca7760 100644 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/factory.go +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/factory.go @@ -100,8 +100,8 @@ func (defaultClientFactory DefaultClientFactory) NewClientWithOptions(opts Optio publicConfig := opts.Config.Copy() publicConfig.Region = "us-east-1" return &defaultClient{ - ecrClient: ecr.NewFromConfig(opts.Config), - ecrPublicClient: ecrpublic.NewFromConfig(publicConfig), + ecrClient: NewECRClientWrapper(ecr.NewFromConfig(opts.Config)), + ecrPublicClient: NewECRPublicClientWrapper(ecrpublic.NewFromConfig(publicConfig)), credentialCache: cache.BuildCredentialsCache(opts.Config, opts.CacheDir), } } diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/build.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/build.go index 8647378cca..4ff22b091d 100644 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/build.go +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/build.go @@ -16,9 +16,11 @@ package cache import ( "context" "crypto/md5" + "crypto/sha256" "encoding/base64" "fmt" "os" + "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/mitchellh/go-homedir" @@ -52,7 +54,21 @@ func BuildCredentialsCache(config aws.Config, cacheDir string) CredentialsCache return NewNullCredentialsCache() } - return NewFileCredentialsCache(cacheDir, cacheFilename, credentialsCachePrefix(config.Region, credentials), credentialsPublicCacheKey(credentials)) + // In FIPS mode, skip legacy MD5-based cache keys + var legacyPrefix, legacyPublicKey string + if !isFipsMode() { + legacyPrefix = legacyCredentialsCachePrefix(config.Region, credentials) + legacyPublicKey = legacyCredentialsPublicCacheKey(credentials) + } + + return NewFileCredentialsCache( + cacheDir, + cacheFilename, + credentialsCachePrefix(config.Region, credentials), + credentialsPublicCacheKey(credentials), + legacyPrefix, + legacyPublicKey, + ) } // Determine a key prefix for a credentials cache. Because auth tokens are scoped to an account and region, rely on provided @@ -65,8 +81,42 @@ func credentialsPublicCacheKey(credentials aws.Credentials) string { return fmt.Sprintf("%s-%s", ServiceECRPublic, checksum(credentials.AccessKeyID)) } -// Base64 encodes an MD5 checksum. Relied on for uniqueness, and not for cryptographic security. +// Legacy cache key functions for backward compatibility with MD5-based keys +func legacyCredentialsCachePrefix(region string, credentials aws.Credentials) string { + return fmt.Sprintf("%s-%s-", region, md5Checksum(credentials.AccessKeyID)) +} + +func legacyCredentialsPublicCacheKey(credentials aws.Credentials) string { + return fmt.Sprintf("%s-%s", ServiceECRPublic, md5Checksum(credentials.AccessKeyID)) +} + +// Base64 encodes a SHA-256 checksum. Used for uniqueness, not cryptographic security. func checksum(text string) string { + hasher := sha256.New() + hasher.Write([]byte(text)) + data := hasher.Sum(nil) + return base64.StdEncoding.EncodeToString(data) +} + +// isFipsMode checks if GODEBUG=fips140=on or GODEBUG=fips140=only is set +func isFipsMode() bool { + godebug := os.Getenv("GODEBUG") + if godebug == "" { + return false + } + + for _, setting := range strings.Split(godebug, ",") { + trimmed := strings.TrimSpace(setting) + if trimmed == "fips140=on" || trimmed == "fips140=only" { + return true + } + } + return false +} + +// Deprecated: Use checksum for new cache entries. +// Note: This function will panic if GODEBUG=fips140=only is set. +func md5Checksum(text string) string { hasher := md5.New() data := hasher.Sum([]byte(text)) return base64.StdEncoding.EncodeToString(data) diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/file.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/file.go index 81fcf0f285..feced1e36e 100644 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/file.go +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/file.go @@ -30,10 +30,12 @@ type RegistryCache struct { } type fileCredentialCache struct { - path string - filename string - cachePrefixKey string - publicCacheKey string + path string + filename string + cachePrefixKey string + publicCacheKey string + legacyCachePrefixKey string + legacyPublicCacheKey string } func newRegistryCache() *RegistryCache { @@ -49,29 +51,68 @@ func newRegistryCache() *RegistryCache { // in the same directory where the cache is serialized and deserialized. // // cachePrefixKey is used for scoping credentials for a given credential cache (i.e. region and -// accessKey). -func NewFileCredentialsCache(path string, filename string, cachePrefixKey string, publicCacheKey string) CredentialsCache { +// accessKey). legacyCachePrefixKey and legacyPublicCacheKey are used for backward compatibility +// with MD5-based cache keys. +func NewFileCredentialsCache(path string, filename string, cachePrefixKey string, publicCacheKey string, legacyCachePrefixKey string, legacyPublicCacheKey string) CredentialsCache { if _, err := os.Stat(path); err != nil { os.MkdirAll(path, 0700) } return &fileCredentialCache{ - path: path, - filename: filename, - cachePrefixKey: cachePrefixKey, - publicCacheKey: publicCacheKey, + path: path, + filename: filename, + cachePrefixKey: cachePrefixKey, + publicCacheKey: publicCacheKey, + legacyCachePrefixKey: legacyCachePrefixKey, + legacyPublicCacheKey: legacyPublicCacheKey, } } func (f *fileCredentialCache) Get(registry string) *AuthEntry { logrus.WithField("registry", registry).Debug("Checking file cache") registryCache := f.init() - return registryCache.Registries[f.cachePrefixKey+registry] + + entry := registryCache.Registries[f.cachePrefixKey+registry] + if entry != nil { + return entry + } + + if isFipsMode() { + logrus.WithField("registry", registry).Debug("FIPS mode enabled, skipping legacy MD5 cache lookup") + return nil + } + + legacyEntry := registryCache.Registries[f.legacyCachePrefixKey+registry] + if legacyEntry != nil { + logrus.WithField("registry", registry).Debug("Found cached credentials using legacy MD5 key") + return legacyEntry + } + + logrus.WithField("registry", registry).Debug("Credentials not found") + return nil } func (f *fileCredentialCache) GetPublic() *AuthEntry { logrus.Debug("Checking file cache for ECR Public") registryCache := f.init() - return registryCache.Registries[f.publicCacheKey] + + entry := registryCache.Registries[f.publicCacheKey] + if entry != nil { + return entry + } + + if isFipsMode() { + logrus.Debug("FIPS mode enabled, skipping legacy MD5 cache lookup for ECR Public") + return nil + } + + legacyEntry := registryCache.Registries[f.legacyPublicCacheKey] + if legacyEntry != nil { + logrus.Debug("Found cached ECR Public credentials using legacy MD5 key") + return legacyEntry + } + + logrus.WithField("registry", "public").Debug("Credentials not found") + return nil } func (f *fileCredentialCache) Set(registry string, entry *AuthEntry) { diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/log.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/log.go index b5082570a7..1414f9d0c0 100644 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/log.go +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/log.go @@ -27,6 +27,9 @@ func SetupLogger() { } func logrusConfig() { + // Add URL redactor hook to sanitize sensitive URL information in logs + logrus.AddHook(&URLRedactorHook{}) + logdir, err := homedir.Expand(GetCacheDir() + "/log") if err != nil { fmt.Fprintf(os.Stderr, "log: failed to find directory: %v", err) diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/url_redactor.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/url_redactor.go new file mode 100644 index 0000000000..66258b162c --- /dev/null +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/url_redactor.go @@ -0,0 +1,102 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "errors" + "net/url" + + "github.com/sirupsen/logrus" +) + +// URLRedactorHook is a logrus hook that sanitizes URL fields in log entries +// to prevent leaking sensitive information like userinfo (passwords) and query parameters. +type URLRedactorHook struct{} + +// Levels returns all log levels that this hook should be applied to +func (hook *URLRedactorHook) Levels() []logrus.Level { + return logrus.AllLevels +} + +// Fire is called when a log event is fired. It sanitizes the serverURL field +// and error fields that may contain URLs. +func (hook *URLRedactorHook) Fire(entry *logrus.Entry) error { + // Redact serverURL field + if value, exists := entry.Data["serverURL"]; exists { + if strValue, ok := value.(string); ok { + entry.Data["serverURL"] = RedactURL(strValue) + } + } + + // Redact URLs in error field + if value, exists := entry.Data[logrus.ErrorKey]; exists { + if err, ok := value.(error); ok { + entry.Data[logrus.ErrorKey] = RedactURLFromError(err) + } + } + + return nil +} + +// RedactURL redacts sensitive information from a URL string including: +// - Password in userinfo (user:password@host) +// - Query parameter values +// Returns the original string unchanged if it's not a valid URL. +func RedactURL(rawURL string) string { + if rawURL == "" { + return rawURL + } + + parsed, err := url.ParseRequestURI(rawURL) + if err != nil { + return rawURL + } + + return redactParsedURL(parsed) +} + +// redactParsedURL redacts password and query parameters from a parsed URL +func redactParsedURL(parsed *url.URL) string { + if parsed.User != nil { + if _, hasPassword := parsed.User.Password(); hasPassword { + parsed.User = url.UserPassword(parsed.User.Username(), "xxxxx") + } + } + + if query := parsed.Query(); len(query) > 0 { + for k := range query { + query.Set(k, "redacted") + } + parsed.RawQuery = query.Encode() + } + + return parsed.String() +} + +// RedactURLFromError redacts URL query parameter values from url.Error. +// This handles cases where HTTP errors contain URLs with sensitive query parameters. +// Returns the original error if it's not a url.Error or cannot be parsed. +func RedactURLFromError(err error) error { + var urlErr *url.Error + + if err != nil && errors.As(err, &urlErr) { + parsedURL, urlParseErr := url.Parse(urlErr.URL) + if urlParseErr == nil && parsedURL.Scheme != "" && parsedURL.Host != "" { + urlErr.URL = redactParsedURL(parsedURL) + return urlErr + } + } + + return err +} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/ecr.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/ecr.go index 5fbbd0eca9..8f2ef4eb62 100644 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/ecr.go +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/ecr.go @@ -22,6 +22,7 @@ import ( "github.com/sirupsen/logrus" "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api" + "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config" "github.com/docker/docker-credential-helpers/credentials" ) @@ -50,6 +51,7 @@ func WithLogger(w io.Writer) Option { return func(e *ECRHelper) { logger := logrus.New() logger.Out = w + logger.AddHook(&config.URLRedactorHook{}) e.logger = logger } } diff --git a/vendor/github.com/buildkite/agent/v3/api/BUILD.bazel b/vendor/github.com/buildkite/agent/v3/api/BUILD.bazel index 507469e307..b50d5bc3ca 100644 --- a/vendor/github.com/buildkite/agent/v3/api/BUILD.bazel +++ b/vendor/github.com/buildkite/agent/v3/api/BUILD.bazel @@ -17,17 +17,22 @@ go_library( "meta_data.go", "oidc.go", "pings.go", + "pings_streaming.go", "pipelines.go", "retryable.go", "secrets.go", "steps.go", + "token.go", "uuid.go", ], importpath = "github.com/buildkite/agent/v3/api", visibility = ["//visibility:public"], deps = [ + "//api/proto/gen", + "//api/proto/gen/agentedgev1connect", "//internal/agenthttp", "//logger", + "@com_connectrpc_connect//:connect", "@com_github_buildkite_go_pipeline//:go-pipeline", "@com_github_buildkite_roko//:roko", "@com_github_google_go_querystring//query", @@ -40,6 +45,7 @@ go_test( srcs = [ "api_internal_test.go", "client_internal_test.go", + "client_private_test.go", "client_test.go", "oidc_test.go", "secrets_test.go", diff --git a/vendor/github.com/buildkite/agent/v3/api/annotations.go b/vendor/github.com/buildkite/agent/v3/api/annotations.go index 90f5649e57..2871d75bae 100644 --- a/vendor/github.com/buildkite/agent/v3/api/annotations.go +++ b/vendor/github.com/buildkite/agent/v3/api/annotations.go @@ -3,6 +3,7 @@ package api import ( "context" "fmt" + "net/url" ) // Annotation represents a Buildkite Agent API Annotation @@ -12,6 +13,7 @@ type Annotation struct { Style string `json:"style,omitempty"` Append bool `json:"append,omitempty"` Priority int `json:"priority,omitempty"` + Scope string `json:"scope,omitempty"` } // Annotate a build in the Buildkite UI @@ -27,7 +29,7 @@ func (c *Client) Annotate(ctx context.Context, jobId string, annotation *Annotat } // Remove an annotation from a build -func (c *Client) AnnotationRemove(ctx context.Context, jobId string, context string) (*Response, error) { +func (c *Client) AnnotationRemove(ctx context.Context, jobId, context, scope string) (*Response, error) { u := fmt.Sprintf("jobs/%s/annotations/%s", railsPathEscape(jobId), railsPathEscape(context)) req, err := c.newRequest(ctx, "DELETE", u, nil) @@ -35,5 +37,13 @@ func (c *Client) AnnotationRemove(ctx context.Context, jobId string, context str return nil, err } + q, err := url.ParseQuery(req.URL.RawQuery) + if err != nil { + return nil, fmt.Errorf("decoding query string: %w", err) + } + + q.Set("scope", scope) + req.URL.RawQuery = q.Encode() + return c.doRequest(req, nil) } diff --git a/vendor/github.com/buildkite/agent/v3/api/client.go b/vendor/github.com/buildkite/agent/v3/api/client.go index a4d2e329f0..8891f57be8 100644 --- a/vendor/github.com/buildkite/agent/v3/api/client.go +++ b/vendor/github.com/buildkite/agent/v3/api/client.go @@ -1,7 +1,5 @@ package api -//go:generate go run github.com/rjeczalik/interfaces/cmd/interfacer@v0.3.0 -for github.com/buildkite/agent/v3/api.Client -as agent.APIClient -o ../agent/api.go - import ( "bytes" "context" @@ -309,7 +307,6 @@ func newResponse(r *http.Response) *Response { // interface, the raw response body will be written to v, without attempting to // first decode it. func (c *Client) doRequest(req *http.Request, v any) (*Response, error) { - resp, err := agenthttp.Do(c.logger, c.client, req, agenthttp.WithDebugHTTP(c.conf.DebugHTTP), agenthttp.WithTraceHTTP(c.conf.TraceHTTP), @@ -393,7 +390,7 @@ func checkResponse(r *http.Response) error { // be a struct whose fields may contain "url" tags. func addOptions(s string, opt any) (string, error) { v := reflect.ValueOf(opt) - if v.Kind() == reflect.Ptr && v.IsNil() { + if v.Kind() == reflect.Pointer && v.IsNil() { return s, nil } @@ -411,7 +408,7 @@ func addOptions(s string, opt any) (string, error) { return u.String(), nil } -func joinURLPath(endpoint string, path string) string { +func joinURLPath(endpoint, path string) string { return strings.TrimRight(endpoint, "/") + "/" + strings.TrimLeft(path, "/") } diff --git a/vendor/github.com/buildkite/agent/v3/api/github_code_access_token.go b/vendor/github.com/buildkite/agent/v3/api/github_code_access_token.go index 2ec224b29d..b7a369521b 100644 --- a/vendor/github.com/buildkite/agent/v3/api/github_code_access_token.go +++ b/vendor/github.com/buildkite/agent/v3/api/github_code_access_token.go @@ -54,7 +54,6 @@ func (c *Client) GenerateGithubCodeAccessToken(ctx context.Context, repoURL, job return resp, err }) - if err != nil { return "", resp, err } diff --git a/vendor/github.com/buildkite/agent/v3/api/jobs.go b/vendor/github.com/buildkite/agent/v3/api/jobs.go index 4f932c625c..17f1a93daa 100644 --- a/vendor/github.com/buildkite/agent/v3/api/jobs.go +++ b/vendor/github.com/buildkite/agent/v3/api/jobs.go @@ -9,23 +9,24 @@ import ( // Job represents a Buildkite Agent API Job type Job struct { - ID string `json:"id,omitempty"` - Endpoint string `json:"endpoint"` - State string `json:"state,omitempty"` - Env map[string]string `json:"env,omitempty"` - Step pipeline.CommandStep `json:"step,omitempty"` - MatrixPermutation pipeline.MatrixPermutation `json:"matrix_permutation,omitempty"` - ChunksMaxSizeBytes uint64 `json:"chunks_max_size_bytes,omitempty"` - LogMaxSizeBytes uint64 `json:"log_max_size_bytes,omitempty"` - Token string `json:"token,omitempty"` - ExitStatus string `json:"exit_status,omitempty"` - Signal string `json:"signal,omitempty"` - SignalReason string `json:"signal_reason,omitempty"` - StartedAt string `json:"started_at,omitempty"` - FinishedAt string `json:"finished_at,omitempty"` - RunnableAt string `json:"runnable_at,omitempty"` - ChunksFailedCount int `json:"chunks_failed_count,omitempty"` - TraceParent string `json:"traceparent"` + ID string `json:"id,omitempty"` + Endpoint string `json:"endpoint"` + State string `json:"state,omitempty"` + Env map[string]string `json:"env,omitempty"` + Step pipeline.CommandStep `json:"step"` + MatrixPermutation pipeline.MatrixPermutation `json:"matrix_permutation,omitempty"` + ChunksMaxSizeBytes uint64 `json:"chunks_max_size_bytes,omitempty"` + ChunksIntervalSeconds int `json:"chunks_interval_seconds,omitempty"` + LogMaxSizeBytes uint64 `json:"log_max_size_bytes,omitempty"` + Token string `json:"token,omitempty"` + ExitStatus string `json:"exit_status,omitempty"` + Signal string `json:"signal,omitempty"` + SignalReason string `json:"signal_reason,omitempty"` + StartedAt string `json:"started_at,omitempty"` + FinishedAt string `json:"finished_at,omitempty"` + RunnableAt string `json:"runnable_at,omitempty"` + ChunksFailedCount int `json:"chunks_failed_count,omitempty"` + TraceParent string `json:"traceparent"` } type JobState struct { @@ -84,8 +85,8 @@ func (c *Client) AcquireJob(ctx context.Context, id string, headers ...Header) ( // AcceptJob accepts the passed in job. Returns the job with its finalized set of // environment variables (when a job is accepted, the agents environment is // applied to the job) -func (c *Client) AcceptJob(ctx context.Context, job *Job) (*Job, *Response, error) { - u := fmt.Sprintf("jobs/%s/accept", railsPathEscape(job.ID)) +func (c *Client) AcceptJob(ctx context.Context, jobID string) (*Job, *Response, error) { + u := fmt.Sprintf("jobs/%s/accept", railsPathEscape(jobID)) req, err := c.newRequest(ctx, "PUT", u, nil) if err != nil { @@ -133,3 +134,26 @@ func (c *Client) FinishJob(ctx context.Context, job *Job, ignoreAgentInDispatche return c.doRequest(req, nil) } + +// JobUpdateResponse is the response from updating a job +type JobUpdateResponse struct { + ID string `json:"id"` +} + +// UpdateJob updates mutable attributes on a job +func (c *Client) UpdateJob(ctx context.Context, id string, attrs map[string]string) (*JobUpdateResponse, *Response, error) { + u := fmt.Sprintf("jobs/%s", railsPathEscape(id)) + + req, err := c.newRequest(ctx, "PUT", u, attrs) + if err != nil { + return nil, nil, err + } + + j := new(JobUpdateResponse) + resp, err := c.doRequest(req, j) + if err != nil { + return nil, resp, err + } + + return j, resp, err +} diff --git a/vendor/github.com/buildkite/agent/v3/api/oidc.go b/vendor/github.com/buildkite/agent/v3/api/oidc.go index b28378894e..3a798e8b04 100644 --- a/vendor/github.com/buildkite/agent/v3/api/oidc.go +++ b/vendor/github.com/buildkite/agent/v3/api/oidc.go @@ -15,6 +15,7 @@ type OIDCTokenRequest struct { Lifetime int Claims []string AWSSessionTags []string + SubjectClaim string } func (c *Client) OIDCToken(ctx context.Context, methodReq *OIDCTokenRequest) (*OIDCToken, *Response, error) { @@ -23,11 +24,13 @@ func (c *Client) OIDCToken(ctx context.Context, methodReq *OIDCTokenRequest) (*O Lifetime int `json:"lifetime,omitempty"` Claims []string `json:"claims,omitempty"` AWSSessionTags []string `json:"aws_session_tags,omitempty"` + SubjectClaim string `json:"subject_claim,omitempty"` }{ Audience: methodReq.Audience, Lifetime: methodReq.Lifetime, Claims: methodReq.Claims, AWSSessionTags: methodReq.AWSSessionTags, + SubjectClaim: methodReq.SubjectClaim, } u := fmt.Sprintf("jobs/%s/oidc/tokens", railsPathEscape(methodReq.Job)) diff --git a/vendor/github.com/buildkite/agent/v3/api/pings_streaming.go b/vendor/github.com/buildkite/agent/v3/api/pings_streaming.go new file mode 100644 index 0000000000..f591680a30 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/pings_streaming.go @@ -0,0 +1,72 @@ +package api + +import ( + "context" + "fmt" + "iter" + "net/url" + + "connectrpc.com/connect" + agentedgev1 "github.com/buildkite/agent/v3/api/proto/gen" + "github.com/buildkite/agent/v3/api/proto/gen/agentedgev1connect" +) + +// StreamPings opens a ConnectRPC channel for streaming pings. It returns an +// iterator over received messages and any error that occurs. +func (c *Client) StreamPings(ctx context.Context, agentID string, opts ...connect.ClientOption) (iter.Seq2[*agentedgev1.StreamPingsResponse, error], error) { + // The streaming endpoint is the same as the main endpoint, + // minus the `/v3/`. + u, err := url.Parse(c.conf.Endpoint) + if err != nil { + return nil, fmt.Errorf("parsing endpoint: %w", err) + } + u.Path = "/" + + cl := agentedgev1connect.NewAgentEdgeServiceClient( + c.client, + u.String(), + connect.WithGRPC(), + connect.WithClientOptions(opts...), + ) + + // In order to set request headers, we need to tweak a value set in the + // context. To me, this feels too much like burying optional parameters + // in a context, which I think is bad - https://pkg.go.dev/context says: + // "Use context Values only for request-scoped data that transits processes + // and APIs, not for passing optional parameters to functions." + ctx, callInfo := connect.NewClientContext(ctx) + h := callInfo.RequestHeader() + + // Add any request headers specified by the server during register/ping + for k, values := range c.requestHeaders { + for _, v := range values { + h.Add(k, v) + } + } + + // The Authorization header is added by the custom transport. + // Other methods add User-Agent in newRequest. + // Note that this does not set the entire header. + // ConnectRPC takes our value here and adds its own component *before* our + // own, which violates the convention of decreasing importance + // (see RFC 7231 section 5.5.3). + h.Set("User-Agent", c.conf.UserAgent) + stream, err := cl.StreamPings(ctx, connect.NewRequest(&agentedgev1.StreamPingsRequest{ + AgentId: agentID, + })) + if err != nil { + return nil, fmt.Errorf("from StreamPings: %w", err) + } + + return func(yield func(*agentedgev1.StreamPingsResponse, error) bool) { + defer stream.Close() //nolint:errcheck // Best-effort cleanup + for stream.Receive() { + if !yield(stream.Msg(), nil) { + return + } + } + if err := stream.Err(); err != nil { + yield(nil, err) + } + }, nil +} diff --git a/vendor/github.com/buildkite/agent/v3/api/proto/gen/BUILD.bazel b/vendor/github.com/buildkite/agent/v3/api/proto/gen/BUILD.bazel new file mode 100644 index 0000000000..b261951747 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/proto/gen/BUILD.bazel @@ -0,0 +1,13 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "gen", + srcs = ["agentedge.pb.go"], + importpath = "github.com/buildkite/agent/v3/api/proto/gen", + visibility = ["//visibility:public"], + deps = [ + "@build_buf_gen_go_bufbuild_protovalidate_protocolbuffers_go//buf/validate", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//runtime/protoimpl", + ], +) diff --git a/vendor/github.com/buildkite/agent/v3/api/proto/gen/agentedge.pb.go b/vendor/github.com/buildkite/agent/v3/api/proto/gen/agentedge.pb.go new file mode 100644 index 0000000000..08c33aebac --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/proto/gen/agentedge.pb.go @@ -0,0 +1,488 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: agentedge.proto + +package agentedgev1 + +import ( + _ "buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/buf/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StreamPingsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StreamPingsRequest) Reset() { + *x = StreamPingsRequest{} + mi := &file_agentedge_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamPingsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamPingsRequest) ProtoMessage() {} + +func (x *StreamPingsRequest) ProtoReflect() protoreflect.Message { + mi := &file_agentedge_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamPingsRequest.ProtoReflect.Descriptor instead. +func (*StreamPingsRequest) Descriptor() ([]byte, []int) { + return file_agentedge_proto_rawDescGZIP(), []int{0} +} + +func (x *StreamPingsRequest) GetAgentId() string { + if x != nil { + return x.AgentId + } + return "" +} + +type StreamPingsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Action: + // + // *StreamPingsResponse_Resume + // *StreamPingsResponse_Pause + // *StreamPingsResponse_Disconnect + // *StreamPingsResponse_JobAssigned + Action isStreamPingsResponse_Action `protobuf_oneof:"action"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StreamPingsResponse) Reset() { + *x = StreamPingsResponse{} + mi := &file_agentedge_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamPingsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamPingsResponse) ProtoMessage() {} + +func (x *StreamPingsResponse) ProtoReflect() protoreflect.Message { + mi := &file_agentedge_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamPingsResponse.ProtoReflect.Descriptor instead. +func (*StreamPingsResponse) Descriptor() ([]byte, []int) { + return file_agentedge_proto_rawDescGZIP(), []int{1} +} + +func (x *StreamPingsResponse) GetAction() isStreamPingsResponse_Action { + if x != nil { + return x.Action + } + return nil +} + +func (x *StreamPingsResponse) GetResume() *ResumeAction { + if x != nil { + if x, ok := x.Action.(*StreamPingsResponse_Resume); ok { + return x.Resume + } + } + return nil +} + +func (x *StreamPingsResponse) GetPause() *PauseAction { + if x != nil { + if x, ok := x.Action.(*StreamPingsResponse_Pause); ok { + return x.Pause + } + } + return nil +} + +func (x *StreamPingsResponse) GetDisconnect() *DisconnectAction { + if x != nil { + if x, ok := x.Action.(*StreamPingsResponse_Disconnect); ok { + return x.Disconnect + } + } + return nil +} + +func (x *StreamPingsResponse) GetJobAssigned() *JobAssignedAction { + if x != nil { + if x, ok := x.Action.(*StreamPingsResponse_JobAssigned); ok { + return x.JobAssigned + } + } + return nil +} + +type isStreamPingsResponse_Action interface { + isStreamPingsResponse_Action() +} + +type StreamPingsResponse_Resume struct { + Resume *ResumeAction `protobuf:"bytes,2,opt,name=resume,proto3,oneof"` +} + +type StreamPingsResponse_Pause struct { + Pause *PauseAction `protobuf:"bytes,3,opt,name=pause,proto3,oneof"` +} + +type StreamPingsResponse_Disconnect struct { + Disconnect *DisconnectAction `protobuf:"bytes,4,opt,name=disconnect,proto3,oneof"` +} + +type StreamPingsResponse_JobAssigned struct { + JobAssigned *JobAssignedAction `protobuf:"bytes,5,opt,name=job_assigned,json=jobAssigned,proto3,oneof"` +} + +func (*StreamPingsResponse_Resume) isStreamPingsResponse_Action() {} + +func (*StreamPingsResponse_Pause) isStreamPingsResponse_Action() {} + +func (*StreamPingsResponse_Disconnect) isStreamPingsResponse_Action() {} + +func (*StreamPingsResponse_JobAssigned) isStreamPingsResponse_Action() {} + +type ResumeAction struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResumeAction) Reset() { + *x = ResumeAction{} + mi := &file_agentedge_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResumeAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResumeAction) ProtoMessage() {} + +func (x *ResumeAction) ProtoReflect() protoreflect.Message { + mi := &file_agentedge_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResumeAction.ProtoReflect.Descriptor instead. +func (*ResumeAction) Descriptor() ([]byte, []int) { + return file_agentedge_proto_rawDescGZIP(), []int{2} +} + +type PauseAction struct { + state protoimpl.MessageState `protogen:"open.v1"` + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PauseAction) Reset() { + *x = PauseAction{} + mi := &file_agentedge_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PauseAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PauseAction) ProtoMessage() {} + +func (x *PauseAction) ProtoReflect() protoreflect.Message { + mi := &file_agentedge_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PauseAction.ProtoReflect.Descriptor instead. +func (*PauseAction) Descriptor() ([]byte, []int) { + return file_agentedge_proto_rawDescGZIP(), []int{3} +} + +func (x *PauseAction) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +type DisconnectAction struct { + state protoimpl.MessageState `protogen:"open.v1"` + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DisconnectAction) Reset() { + *x = DisconnectAction{} + mi := &file_agentedge_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DisconnectAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DisconnectAction) ProtoMessage() {} + +func (x *DisconnectAction) ProtoReflect() protoreflect.Message { + mi := &file_agentedge_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DisconnectAction.ProtoReflect.Descriptor instead. +func (*DisconnectAction) Descriptor() ([]byte, []int) { + return file_agentedge_proto_rawDescGZIP(), []int{4} +} + +func (x *DisconnectAction) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +type JobAssignedAction struct { + state protoimpl.MessageState `protogen:"open.v1"` + Job *Job `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *JobAssignedAction) Reset() { + *x = JobAssignedAction{} + mi := &file_agentedge_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *JobAssignedAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JobAssignedAction) ProtoMessage() {} + +func (x *JobAssignedAction) ProtoReflect() protoreflect.Message { + mi := &file_agentedge_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JobAssignedAction.ProtoReflect.Descriptor instead. +func (*JobAssignedAction) Descriptor() ([]byte, []int) { + return file_agentedge_proto_rawDescGZIP(), []int{5} +} + +func (x *JobAssignedAction) GetJob() *Job { + if x != nil { + return x.Job + } + return nil +} + +type Job struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Job) Reset() { + *x = Job{} + mi := &file_agentedge_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Job) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Job) ProtoMessage() {} + +func (x *Job) ProtoReflect() protoreflect.Message { + mi := &file_agentedge_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Job.ProtoReflect.Descriptor instead. +func (*Job) Descriptor() ([]byte, []int) { + return file_agentedge_proto_rawDescGZIP(), []int{6} +} + +func (x *Job) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +var File_agentedge_proto protoreflect.FileDescriptor + +const file_agentedge_proto_rawDesc = "" + + "\n" + + "\x0fagentedge.proto\x12\fagentedge.v1\x1a\x1bbuf/validate/validate.proto\"/\n" + + "\x12StreamPingsRequest\x12\x19\n" + + "\bagent_id\x18\x01 \x01(\tR\aagentId\"\x90\x02\n" + + "\x13StreamPingsResponse\x124\n" + + "\x06resume\x18\x02 \x01(\v2\x1a.agentedge.v1.ResumeActionH\x00R\x06resume\x121\n" + + "\x05pause\x18\x03 \x01(\v2\x19.agentedge.v1.PauseActionH\x00R\x05pause\x12@\n" + + "\n" + + "disconnect\x18\x04 \x01(\v2\x1e.agentedge.v1.DisconnectActionH\x00R\n" + + "disconnect\x12D\n" + + "\fjob_assigned\x18\x05 \x01(\v2\x1f.agentedge.v1.JobAssignedActionH\x00R\vjobAssignedB\b\n" + + "\x06action\"\x0e\n" + + "\fResumeAction\"%\n" + + "\vPauseAction\x12\x16\n" + + "\x06reason\x18\x01 \x01(\tR\x06reason\"*\n" + + "\x10DisconnectAction\x12\x16\n" + + "\x06reason\x18\x01 \x01(\tR\x06reason\"8\n" + + "\x11JobAssignedAction\x12#\n" + + "\x03job\x18\x01 \x01(\v2\x11.agentedge.v1.JobR\x03job\"\x15\n" + + "\x03Job\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id2j\n" + + "\x10AgentEdgeService\x12V\n" + + "\vStreamPings\x12 .agentedge.v1.StreamPingsRequest\x1a!.agentedge.v1.StreamPingsResponse\"\x000\x01B\xac\x01\n" + + "\x10com.agentedge.v1B\x0eAgentedgeProtoP\x01Z7github.com/buildkite/agent/v3/api/proto/gen;agentedgev1\xa2\x02\x03AXX\xaa\x02\fAgentedge.V1\xca\x02\fAgentedge\\V1\xe2\x02\x18Agentedge\\V1\\GPBMetadata\xea\x02\rAgentedge::V1b\x06proto3" + +var ( + file_agentedge_proto_rawDescOnce sync.Once + file_agentedge_proto_rawDescData []byte +) + +func file_agentedge_proto_rawDescGZIP() []byte { + file_agentedge_proto_rawDescOnce.Do(func() { + file_agentedge_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agentedge_proto_rawDesc), len(file_agentedge_proto_rawDesc))) + }) + return file_agentedge_proto_rawDescData +} + +var file_agentedge_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_agentedge_proto_goTypes = []any{ + (*StreamPingsRequest)(nil), // 0: agentedge.v1.StreamPingsRequest + (*StreamPingsResponse)(nil), // 1: agentedge.v1.StreamPingsResponse + (*ResumeAction)(nil), // 2: agentedge.v1.ResumeAction + (*PauseAction)(nil), // 3: agentedge.v1.PauseAction + (*DisconnectAction)(nil), // 4: agentedge.v1.DisconnectAction + (*JobAssignedAction)(nil), // 5: agentedge.v1.JobAssignedAction + (*Job)(nil), // 6: agentedge.v1.Job +} +var file_agentedge_proto_depIdxs = []int32{ + 2, // 0: agentedge.v1.StreamPingsResponse.resume:type_name -> agentedge.v1.ResumeAction + 3, // 1: agentedge.v1.StreamPingsResponse.pause:type_name -> agentedge.v1.PauseAction + 4, // 2: agentedge.v1.StreamPingsResponse.disconnect:type_name -> agentedge.v1.DisconnectAction + 5, // 3: agentedge.v1.StreamPingsResponse.job_assigned:type_name -> agentedge.v1.JobAssignedAction + 6, // 4: agentedge.v1.JobAssignedAction.job:type_name -> agentedge.v1.Job + 0, // 5: agentedge.v1.AgentEdgeService.StreamPings:input_type -> agentedge.v1.StreamPingsRequest + 1, // 6: agentedge.v1.AgentEdgeService.StreamPings:output_type -> agentedge.v1.StreamPingsResponse + 6, // [6:7] is the sub-list for method output_type + 5, // [5:6] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_agentedge_proto_init() } +func file_agentedge_proto_init() { + if File_agentedge_proto != nil { + return + } + file_agentedge_proto_msgTypes[1].OneofWrappers = []any{ + (*StreamPingsResponse_Resume)(nil), + (*StreamPingsResponse_Pause)(nil), + (*StreamPingsResponse_Disconnect)(nil), + (*StreamPingsResponse_JobAssigned)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_agentedge_proto_rawDesc), len(file_agentedge_proto_rawDesc)), + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_agentedge_proto_goTypes, + DependencyIndexes: file_agentedge_proto_depIdxs, + MessageInfos: file_agentedge_proto_msgTypes, + }.Build() + File_agentedge_proto = out.File + file_agentedge_proto_goTypes = nil + file_agentedge_proto_depIdxs = nil +} diff --git a/vendor/github.com/buildkite/agent/v3/api/proto/gen/agentedgev1connect/BUILD.bazel b/vendor/github.com/buildkite/agent/v3/api/proto/gen/agentedgev1connect/BUILD.bazel new file mode 100644 index 0000000000..0b9767e103 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/proto/gen/agentedgev1connect/BUILD.bazel @@ -0,0 +1,12 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "agentedgev1connect", + srcs = ["agentedge.connect.go"], + importpath = "github.com/buildkite/agent/v3/api/proto/gen/agentedgev1connect", + visibility = ["//visibility:public"], + deps = [ + "//api/proto/gen", + "@com_connectrpc_connect//:connect", + ], +) diff --git a/vendor/github.com/buildkite/agent/v3/api/proto/gen/agentedgev1connect/agentedge.connect.go b/vendor/github.com/buildkite/agent/v3/api/proto/gen/agentedgev1connect/agentedge.connect.go new file mode 100644 index 0000000000..f2738d669f --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/proto/gen/agentedgev1connect/agentedge.connect.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-connect-go. DO NOT EDIT. +// +// Source: agentedge.proto + +package agentedgev1connect + +import ( + connect "connectrpc.com/connect" + context "context" + errors "errors" + gen "github.com/buildkite/agent/v3/api/proto/gen" + http "net/http" + strings "strings" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion1_13_0 + +const ( + // AgentEdgeServiceName is the fully-qualified name of the AgentEdgeService service. + AgentEdgeServiceName = "agentedge.v1.AgentEdgeService" +) + +// These constants are the fully-qualified names of the RPCs defined in this package. They're +// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. +// +// Note that these are different from the fully-qualified method names used by +// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to +// reflection-formatted method names, remove the leading slash and convert the remaining slash to a +// period. +const ( + // AgentEdgeServiceStreamPingsProcedure is the fully-qualified name of the AgentEdgeService's + // StreamPings RPC. + AgentEdgeServiceStreamPingsProcedure = "/agentedge.v1.AgentEdgeService/StreamPings" +) + +// AgentEdgeServiceClient is a client for the agentedge.v1.AgentEdgeService service. +type AgentEdgeServiceClient interface { + StreamPings(context.Context, *connect.Request[gen.StreamPingsRequest]) (*connect.ServerStreamForClient[gen.StreamPingsResponse], error) +} + +// NewAgentEdgeServiceClient constructs a client for the agentedge.v1.AgentEdgeService service. By +// default, it uses the Connect protocol with the binary Protobuf Codec, asks for gzipped responses, +// and sends uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the +// connect.WithGRPC() or connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewAgentEdgeServiceClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) AgentEdgeServiceClient { + baseURL = strings.TrimRight(baseURL, "/") + agentEdgeServiceMethods := gen.File_agentedge_proto.Services().ByName("AgentEdgeService").Methods() + return &agentEdgeServiceClient{ + streamPings: connect.NewClient[gen.StreamPingsRequest, gen.StreamPingsResponse]( + httpClient, + baseURL+AgentEdgeServiceStreamPingsProcedure, + connect.WithSchema(agentEdgeServiceMethods.ByName("StreamPings")), + connect.WithClientOptions(opts...), + ), + } +} + +// agentEdgeServiceClient implements AgentEdgeServiceClient. +type agentEdgeServiceClient struct { + streamPings *connect.Client[gen.StreamPingsRequest, gen.StreamPingsResponse] +} + +// StreamPings calls agentedge.v1.AgentEdgeService.StreamPings. +func (c *agentEdgeServiceClient) StreamPings(ctx context.Context, req *connect.Request[gen.StreamPingsRequest]) (*connect.ServerStreamForClient[gen.StreamPingsResponse], error) { + return c.streamPings.CallServerStream(ctx, req) +} + +// AgentEdgeServiceHandler is an implementation of the agentedge.v1.AgentEdgeService service. +type AgentEdgeServiceHandler interface { + StreamPings(context.Context, *connect.Request[gen.StreamPingsRequest], *connect.ServerStream[gen.StreamPingsResponse]) error +} + +// NewAgentEdgeServiceHandler builds an HTTP handler from the service implementation. It returns the +// path on which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewAgentEdgeServiceHandler(svc AgentEdgeServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { + agentEdgeServiceMethods := gen.File_agentedge_proto.Services().ByName("AgentEdgeService").Methods() + agentEdgeServiceStreamPingsHandler := connect.NewServerStreamHandler( + AgentEdgeServiceStreamPingsProcedure, + svc.StreamPings, + connect.WithSchema(agentEdgeServiceMethods.ByName("StreamPings")), + connect.WithHandlerOptions(opts...), + ) + return "/agentedge.v1.AgentEdgeService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case AgentEdgeServiceStreamPingsProcedure: + agentEdgeServiceStreamPingsHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedAgentEdgeServiceHandler returns CodeUnimplemented from all methods. +type UnimplementedAgentEdgeServiceHandler struct{} + +func (UnimplementedAgentEdgeServiceHandler) StreamPings(context.Context, *connect.Request[gen.StreamPingsRequest], *connect.ServerStream[gen.StreamPingsResponse]) error { + return connect.NewError(connect.CodeUnimplemented, errors.New("agentedge.v1.AgentEdgeService.StreamPings is not implemented")) +} diff --git a/vendor/github.com/buildkite/agent/v3/api/retryable.go b/vendor/github.com/buildkite/agent/v3/api/retryable.go index beb5f2d811..4666477aa3 100644 --- a/vendor/github.com/buildkite/agent/v3/api/retryable.go +++ b/vendor/github.com/buildkite/agent/v3/api/retryable.go @@ -5,7 +5,6 @@ import ( "net" "net/http" "net/url" - "slices" "strings" "syscall" ) @@ -20,17 +19,17 @@ var retrableErrorSuffixes = []string{ io.EOF.Error(), } -var retryableStatuses = []int{ - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 +var retryableStatuses = map[int]bool{ + http.StatusTooManyRequests: true, // 429 + http.StatusInternalServerError: true, // 500 + http.StatusBadGateway: true, // 502 + http.StatusServiceUnavailable: true, // 503 + http.StatusGatewayTimeout: true, // 504 } // IsRetryableStatus returns true if the response's StatusCode is one that we should retry. func IsRetryableStatus(r *Response) bool { - return r.StatusCode >= 400 && slices.Contains(retryableStatuses, r.StatusCode) + return retryableStatuses[r.StatusCode] } // Looks at a bunch of connection related errors, and returns true if the error diff --git a/vendor/github.com/buildkite/agent/v3/api/token.go b/vendor/github.com/buildkite/agent/v3/api/token.go new file mode 100644 index 0000000000..3f3918736e --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/token.go @@ -0,0 +1,35 @@ +package api + +import ( + "context" + "net/http" +) + +// AgentTokenIdentity describes token identity information. +type AgentTokenIdentity struct { + UUID string `json:"uuid"` + Description string `json:"description"` + TokenType string `json:"token_type"` + OrganizationSlug string `json:"organization_slug"` + OrganizationUUID string `json:"organization_uuid"` + ClusterUUID string `json:"cluster_uuid"` + ClusterName string `json:"cluster_name"` + OrganizationQueueUUID string `json:"organization_queue_uuid"` + OrganizationQueueKey string `json:"organization_queue_key"` +} + +// GetTokenIdentity gets the identity information of an agent token. +func (c *Client) GetTokenIdentity(ctx context.Context) (*AgentTokenIdentity, *Response, error) { + req, err := c.newRequest(ctx, http.MethodGet, "token", nil) + if err != nil { + return nil, nil, err + } + + ident := new(AgentTokenIdentity) + resp, err := c.doRequest(req, ident) + if err != nil { + return nil, resp, err + } + + return ident, resp, nil +} diff --git a/vendor/github.com/buildkite/agent/v3/internal/agenthttp/client.go b/vendor/github.com/buildkite/agent/v3/internal/agenthttp/client.go index 6cead16292..95669365c2 100644 --- a/vendor/github.com/buildkite/agent/v3/internal/agenthttp/client.go +++ b/vendor/github.com/buildkite/agent/v3/internal/agenthttp/client.go @@ -78,6 +78,13 @@ func newTransport(conf *clientConfig) *http.Transport { } if conf.AllowHTTP2 { + // Ensure that we send HTTP/2 PING frames to help keep streaming connections (like streaming pings) open + // Pings will also ensure that stale connections get properly cleaned up + if transport.HTTP2 == nil { + transport.HTTP2 = &http.HTTP2Config{} + } + transport.HTTP2.SendPingTimeout = 10 * time.Second + transport.HTTP2.PingTimeout = 5 * time.Second // There is a bug in http2 on Linux regarding using dead connections. // This is a workaround. See https://github.com/golang/go/issues/59690 // @@ -86,16 +93,13 @@ func newTransport(conf *clientConfig) *http.Transport { // HTTP/1.1 as a protocol, so we get slightly odd-looking code where // we use `transport` later on instead of the just-returned `tr2`. // tr2 is needed merely to configure the http2 option. - tr2, err := http2.ConfigureTransports(transport) + _, err := http2.ConfigureTransports(transport) if err != nil { // ConfigureTransports is documented to only return an error if // the transport arg was already HTTP2-enabled, which it should not // have been... panic("http2.ConfigureTransports: " + err.Error()) } - if tr2 != nil { - tr2.ReadIdleTimeout = 30 * time.Second - } } else { transport.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) // The default TLSClientConfig has h2 in NextProtos, so the diff --git a/vendor/github.com/buildkite/agent/v3/logger/buffer.go b/vendor/github.com/buildkite/agent/v3/logger/buffer.go index 84480b7517..e120aacfc4 100644 --- a/vendor/github.com/buildkite/agent/v3/logger/buffer.go +++ b/vendor/github.com/buildkite/agent/v3/logger/buffer.go @@ -26,31 +26,37 @@ func (b *Buffer) Debug(format string, v ...any) { defer b.mu.Unlock() b.Messages = append(b.Messages, "[debug] "+fmt.Sprintf(format, v...)) } + func (b *Buffer) Error(format string, v ...any) { b.mu.Lock() defer b.mu.Unlock() b.Messages = append(b.Messages, "[error] "+fmt.Sprintf(format, v...)) } + func (b *Buffer) Fatal(format string, v ...any) { b.mu.Lock() defer b.mu.Unlock() b.Messages = append(b.Messages, "[fatal] "+fmt.Sprintf(format, v...)) } + func (b *Buffer) Notice(format string, v ...any) { b.mu.Lock() defer b.mu.Unlock() b.Messages = append(b.Messages, "[notice] "+fmt.Sprintf(format, v...)) } + func (b *Buffer) Warn(format string, v ...any) { b.mu.Lock() defer b.mu.Unlock() b.Messages = append(b.Messages, "[warn] "+fmt.Sprintf(format, v...)) } + func (b *Buffer) Info(format string, v ...any) { b.mu.Lock() defer b.mu.Unlock() b.Messages = append(b.Messages, "[info] "+fmt.Sprintf(format, v...)) } + func (b *Buffer) WithFields(fields ...Field) Logger { return b } diff --git a/vendor/github.com/buildkite/agent/v3/logger/init_windows.go b/vendor/github.com/buildkite/agent/v3/logger/init_windows.go index 168f2c2c4f..5796321b0e 100644 --- a/vendor/github.com/buildkite/agent/v3/logger/init_windows.go +++ b/vendor/github.com/buildkite/agent/v3/logger/init_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package logger diff --git a/vendor/github.com/buildkite/agent/v3/logger/log.go b/vendor/github.com/buildkite/agent/v3/logger/log.go index 98b5dcc3d7..d27e4194be 100644 --- a/vendor/github.com/buildkite/agent/v3/logger/log.go +++ b/vendor/github.com/buildkite/agent/v3/logger/log.go @@ -144,7 +144,7 @@ func (l *TextPrinter) Print(level Level, msg string, fields Fields) { now := time.Now().Format(DateFormat) var line string - var prefix string + var prefix strings.Builder var fieldStrs []string if l.IsPrefixFn != nil { @@ -155,7 +155,7 @@ func (l *TextPrinter) Print(level Level, msg string, fields Fields) { } // Allow some fields to be shown as prefixes if l.IsPrefixFn(f) { - prefix += f.String() + prefix.WriteString(f.String()) } } } @@ -180,9 +180,9 @@ func (l *TextPrinter) Print(level Level, msg string, fields Fields) { messageColor = red } - if prefix != "" { + if prefix.String() != "" { line = fmt.Sprintf("\x1b[%sm%s %-6s\x1b[0m \x1b[%sm%s\x1b[0m \x1b[%sm%s\x1b[0m", - levelColor, now, level, lightgray, prefix, messageColor, msg) + levelColor, now, level, lightgray, prefix.String(), messageColor, msg) } else { line = fmt.Sprintf("\x1b[%sm%s %-6s\x1b[0m \x1b[%sm%s\x1b[0m", levelColor, now, level, messageColor, msg) @@ -199,8 +199,8 @@ func (l *TextPrinter) Print(level Level, msg string, fields Fields) { fieldColor, field.Key(), messageColor, field.String())) } } else { - if prefix != "" { - line = fmt.Sprintf("%s %-6s %s %s", now, level, prefix, msg) + if prefix.String() != "" { + line = fmt.Sprintf("%s %-6s %s %s", now, level, prefix.String(), msg) } else { line = fmt.Sprintf("%s %-6s %s", now, level, msg) } diff --git a/vendor/github.com/buildkite/agent/v3/version/VERSION b/vendor/github.com/buildkite/agent/v3/version/VERSION index 2e00703aad..93213a28f7 100644 --- a/vendor/github.com/buildkite/agent/v3/version/VERSION +++ b/vendor/github.com/buildkite/agent/v3/version/VERSION @@ -1 +1 @@ -3.104.0 +3.121.0 diff --git a/vendor/github.com/buildkite/go-pipeline/pipeline.go b/vendor/github.com/buildkite/go-pipeline/pipeline.go index a535d49a65..ec8f0f0b8f 100644 --- a/vendor/github.com/buildkite/go-pipeline/pipeline.go +++ b/vendor/github.com/buildkite/go-pipeline/pipeline.go @@ -13,8 +13,9 @@ import ( // // Standard caveats apply - see the package comment. type Pipeline struct { - Steps Steps `yaml:"steps"` - Env *ordered.MapSS `yaml:"env,omitempty"` + Steps Steps `yaml:"steps"` + Env *ordered.MapSS `yaml:"env,omitempty"` + Secrets Secrets `yaml:"secrets,omitempty"` // RemainingFields stores any other top-level mapping items so they at least // survive an unmarshal-marshal round-trip. diff --git a/vendor/github.com/buildkite/go-pipeline/secret.go b/vendor/github.com/buildkite/go-pipeline/secret.go new file mode 100644 index 0000000000..8b5f437879 --- /dev/null +++ b/vendor/github.com/buildkite/go-pipeline/secret.go @@ -0,0 +1,43 @@ +package pipeline + +import ( + "encoding/json" + "fmt" +) + +var ( + _ interface { + json.Marshaler + selfInterpolater + } = (*Secret)(nil) +) + +// Secret represents a pipeline secret configuration. +type Secret struct { + Key string `json:"key" yaml:"key"` + EnvironmentVariable string `json:"environment_variable,omitempty" yaml:"environment_variable,omitempty"` + RemainingFields map[string]any `yaml:",inline"` +} + +// MarshalJSON marshals the secret to JSON. +func (s Secret) MarshalJSON() ([]byte, error) { + return inlineFriendlyMarshalJSON(s) +} + +func (s *Secret) interpolate(tf stringTransformer) error { + key, err := tf.Transform(s.Key) + if err != nil { + return fmt.Errorf("interpolating secret key: %w", err) + } + s.Key = key + + if s.EnvironmentVariable != "" { + envVar, err := tf.Transform(s.EnvironmentVariable) + if err != nil { + return fmt.Errorf("interpolating environment variable: %w", err) + } + s.EnvironmentVariable = envVar + } + + return nil +} diff --git a/vendor/github.com/buildkite/go-pipeline/secrets.go b/vendor/github.com/buildkite/go-pipeline/secrets.go new file mode 100644 index 0000000000..23d6522522 --- /dev/null +++ b/vendor/github.com/buildkite/go-pipeline/secrets.go @@ -0,0 +1,141 @@ +package pipeline + +import ( + "encoding/json" + "fmt" + + "github.com/buildkite/go-pipeline/ordered" + "gopkg.in/yaml.v3" +) + +var _ interface { + json.Unmarshaler + ordered.Unmarshaler + yaml.Marshaler +} = (*Secrets)(nil) + +// Secrets is a sequence of secrets. It is useful for unmarshaling. +type Secrets []Secret + +// UnmarshalOrdered unmarshals Secrets from []any (sequence of secret names). +func (s *Secrets) UnmarshalOrdered(o any) error { + switch o := o.(type) { + case nil: + // `secrets: null` is invalid - should be omitted entirely or use valid formats + return fmt.Errorf("unmarshaling secrets: secrets cannot be null") + + case *ordered.Map[string, any]: + // Handle map syntax: {"ENV_VAR": "SECRET_KEY"} + return o.Range(func(envVar string, secretKeyVal any) error { + secretKey, ok := secretKeyVal.(string) + if !ok { + return fmt.Errorf("unmarshaling secrets: secret key must be a string, but was %T", secretKeyVal) + } + if secretKey == "" { + return fmt.Errorf("unmarshaling secrets: secret key cannot be empty") + } + if envVar == "" { + return fmt.Errorf("unmarshaling secrets: environment variable name cannot be empty") + } + + secret := Secret{ + Key: secretKey, + EnvironmentVariable: envVar, + } + *s = append(*s, secret) + return nil + }) + + case []any: + for _, c := range o { + switch ct := c.(type) { + case string: + secret := Secret{ + Key: ct, + EnvironmentVariable: ct, // Default EnvironmentVariable to key value for simple string format + } + *s = append(*s, secret) + + case *ordered.Map[string, interface{}]: + // Backend sends ordered.Map format + secret := Secret{} + + keyVal, _ := ct.Get("key") + key, _ := keyVal.(string) + if key == "" { + return fmt.Errorf("unmarshaling secret: key must be a non-empty string, but was %[1]T %[1]v", keyVal) + } + secret.Key = key + + if envVarVal, _ := ct.Get("environment_variable"); envVarVal != nil { + envVar, ok := envVarVal.(string) + if !ok { + return fmt.Errorf("unmarshaling secret: environment_variable must be a string, but was %T", envVarVal) + } + secret.EnvironmentVariable = envVar + } + + *s = append(*s, secret) + + default: + return fmt.Errorf("unmarshaling secrets: secret type %T, want string, map[string]any, or *ordered.Map", c) + } + } + + default: + return fmt.Errorf("unmarshaling secrets: got %T, want []any or map[string]any", o) + } + + return nil +} + +// MergeWith merges these secrets with another set of secrets, with the other secrets taking precedence. +// Deduplication is performed based on the EnvironmentVariable field. +func (s Secrets) MergeWith(other Secrets) Secrets { + if len(s) == 0 { + return other + } + if len(other) == 0 { + return s + } + + // Create a map to track environment variables we've seen for deduplication + seen := make(map[string]bool) + var result Secrets + + for _, secret := range other { + if secret.EnvironmentVariable != "" && !seen[secret.EnvironmentVariable] { + result = append(result, secret) + seen[secret.EnvironmentVariable] = true + } + } + + for _, secret := range s { + if secret.EnvironmentVariable != "" && !seen[secret.EnvironmentVariable] { + result = append(result, secret) + seen[secret.EnvironmentVariable] = true + } + } + + return result +} + +// UnmarshalJSON is used for JSON unmarshaling. +func (s *Secrets) UnmarshalJSON(b []byte) error { + // JSON is just a specific kind of YAML. + var n yaml.Node + if err := yaml.Unmarshal(b, &n); err != nil { + return err + } + return ordered.Unmarshal(&n, &s) +} + +func (s Secrets) MarshalYAML() (any, error) { + if len(s) == 0 { + return nil, nil + } + + result := make([]Secret, len(s)) + copy(result, s) + return result, nil +} diff --git a/vendor/github.com/buildkite/go-pipeline/step_command.go b/vendor/github.com/buildkite/go-pipeline/step_command.go index b49ed4c2d0..29e6922543 100644 --- a/vendor/github.com/buildkite/go-pipeline/step_command.go +++ b/vendor/github.com/buildkite/go-pipeline/step_command.go @@ -33,6 +33,7 @@ type CommandStep struct { // Fields that are meaningful specifically for command steps Command string `yaml:"command"` Plugins Plugins `yaml:"plugins,omitempty"` + Secrets Secrets `yaml:"secrets,omitempty"` Env map[string]string `yaml:"env,omitempty"` Signature *Signature `yaml:"signature,omitempty"` Matrix *Matrix `yaml:"matrix,omitempty"` @@ -99,7 +100,7 @@ func (c *CommandStep) InterpolateMatrixPermutation(mp MatrixPermutation) error { func (c *CommandStep) interpolate(tf stringTransformer) error { // Fields that are interpolated with env vars and matrix tokens: - // command, plugins + // command, plugins, secrets if err := interpolateString(tf, &c.Command); err != nil { return fmt.Errorf("interpolating command: %w", err) } @@ -109,6 +110,9 @@ func (c *CommandStep) interpolate(tf stringTransformer) error { if err := interpolateSlice(tf, c.Plugins); err != nil { return fmt.Errorf("interpolating plugins: %w", err) } + if err := interpolateSlice(tf, c.Secrets); err != nil { + return fmt.Errorf("interpolating secrets: %w", err) + } switch tf.(type) { case envInterpolator: @@ -141,4 +145,10 @@ func (c *CommandStep) interpolate(tf stringTransformer) error { return nil } +// MergeSecretsFromPipeline merges pipeline-level secrets with this step's secrets. +// Step-level secrets take precedence over pipeline-level secrets for deduplication. +func (c *CommandStep) MergeSecretsFromPipeline(pipelineSecrets Secrets) { + c.Secrets = pipelineSecrets.MergeWith(c.Secrets) +} + func (CommandStep) stepTag() {} diff --git a/vendor/github.com/cert-manager/cert-manager/LICENSES b/vendor/github.com/cert-manager/cert-manager/LICENSES index dfd67cbfae..d6e7b21152 100644 --- a/vendor/github.com/cert-manager/cert-manager/LICENSES +++ b/vendor/github.com/cert-manager/cert-manager/LICENSES @@ -44,12 +44,13 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore,MIT github.com/Azure/azure-sdk-for-go/sdk/azidentity,MIT github.com/Azure/azure-sdk-for-go/sdk/internal,MIT github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns,MIT +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns,MIT github.com/Azure/go-ntlmssp,MIT github.com/AzureAD/microsoft-authentication-library-for-go/apps,MIT github.com/Khan/genqlient/graphql,MIT github.com/NYTimes/gziphandler,Apache-2.0 github.com/Venafi/vcert/v5,Apache-2.0 -github.com/akamai/AkamaiOPEN-edgegrid-golang/v12/pkg,Apache-2.0 +github.com/akamai/AkamaiOPEN-edgegrid-golang/v13/pkg,Apache-2.0 github.com/antlr4-go/antlr/v4,BSD-3-Clause github.com/aws/aws-sdk-go-v2,Apache-2.0 github.com/aws/aws-sdk-go-v2/config,Apache-2.0 @@ -62,6 +63,7 @@ github.com/aws/aws-sdk-go-v2/internal/sync/singleflight,BSD-3-Clause github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding,Apache-2.0 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url,Apache-2.0 github.com/aws/aws-sdk-go-v2/service/route53,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/signin,Apache-2.0 github.com/aws/aws-sdk-go-v2/service/sso,Apache-2.0 github.com/aws/aws-sdk-go-v2/service/ssooidc,Apache-2.0 github.com/aws/aws-sdk-go-v2/service/sts,Apache-2.0 @@ -171,11 +173,15 @@ go.opentelemetry.io/auto/sdk,Apache-2.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc,Apache-2.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp,Apache-2.0 go.opentelemetry.io/otel,Apache-2.0 +go.opentelemetry.io/otel,BSD-3-Clause go.opentelemetry.io/otel/exporters/otlp/otlptrace,Apache-2.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc,Apache-2.0 go.opentelemetry.io/otel/metric,Apache-2.0 +go.opentelemetry.io/otel/metric,BSD-3-Clause go.opentelemetry.io/otel/sdk,Apache-2.0 +go.opentelemetry.io/otel/sdk,BSD-3-Clause go.opentelemetry.io/otel/trace,Apache-2.0 +go.opentelemetry.io/otel/trace,BSD-3-Clause go.opentelemetry.io/proto/otlp,Apache-2.0 go.uber.org/multierr,MIT go.uber.org/ratelimit,MIT diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types.go index edfc16f1e6..8e1375f161 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types.go @@ -34,6 +34,15 @@ const ( // solver for each ingress class. ACMECertificateHTTP01IngressClassOverride = "acme.cert-manager.io/http01-override-ingress-class" + // ACMECertificateHTTP01IngressClassNameOverride is annotation to override ingressClassName. + // If this annotation is specified on a Certificate or Order resource when + // using the HTTP01 solver type, the ingress.ingressClassName field of the HTTP01 + // solver's configuration will be set to the value given here. + // This is especially useful for users deploying many different ingress + // classes into a single cluster that want to be able to re-use a single + // solver for each ingress class. + ACMECertificateHTTP01IngressClassNameOverride = "acme.cert-manager.io/http01-override-ingress-ingressclassname" + // IngressEditInPlaceAnnotationKey is used to toggle the use of ingressClass instead // of ingress on the created Certificate resource IngressEditInPlaceAnnotationKey = "acme.cert-manager.io/http01-edit-in-place" @@ -49,6 +58,15 @@ const ( // SolverIdentificationLabelKey is added to the labels of a Pod serving an ACME challenge. // Its value will be the "true" if the Pod is an HTTP-01 solver. SolverIdentificationLabelKey = "acme.cert-manager.io/http01-solver" + + // ACMECertificateHTTP01ParentRefName is an annotation to specify the parent ref + // for the HTTPRoute that would be created by using the HTTP01 solver. If not specified + // then parentRef mentioned in the HTTP01 solver config will be used. + ACMECertificateHTTP01ParentRefName = "acme.cert-manager.io/http01-parentrefname" + + // ACMECertificateHTTP01ParentRefKind is an annotation to specify the parent ref kind + // for the HTTPRoute that would be created by using the HTTP01 solver. + ACMECertificateHTTP01ParentRefKind = "acme.cert-manager.io/http01-parentrefkind" ) const ( diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go index dc3bb1b37f..eca29e56e7 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go @@ -30,6 +30,9 @@ import ( // +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.reason",description="",priority=1 // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." // +kubebuilder:resource:scope=Namespaced,categories={cert-manager,cert-manager-acme} +// +kubebuilder:selectablefield:JSONPath=.spec.issuerRef.group +// +kubebuilder:selectablefield:JSONPath=.spec.issuerRef.kind +// +kubebuilder:selectablefield:JSONPath=.spec.issuerRef.name // +kubebuilder:subresource:status // Challenge is a type to represent a Challenge request with an ACME server diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go index 009b1abe84..adb6ab137c 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go @@ -609,8 +609,8 @@ type ACMEIssuerDNS01ProviderRoute53 struct { // The AccessKeyID is used for authentication. // Cannot be set when SecretAccessKeyID is set. - // If neither the Access Key nor Key ID are set, we fall-back to using env - // vars, shared credentials file or AWS Instance metadata, + // If neither the Access Key nor Key ID are set, we fall back to using env + // vars, shared credentials file, or AWS Instance metadata, // see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials // +optional AccessKeyID string `json:"accessKeyID,omitempty"` @@ -618,15 +618,15 @@ type ACMEIssuerDNS01ProviderRoute53 struct { // The SecretAccessKey is used for authentication. If set, pull the AWS // access key ID from a key within a Kubernetes Secret. // Cannot be set when AccessKeyID is set. - // If neither the Access Key nor Key ID are set, we fall-back to using env - // vars, shared credentials file or AWS Instance metadata, + // If neither the Access Key nor Key ID are set, we fall back to using env + // vars, shared credentials file, or AWS Instance metadata, // see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials // +optional SecretAccessKeyID *cmmeta.SecretKeySelector `json:"accessKeyIDSecretRef,omitempty"` // The SecretAccessKey is used for authentication. - // If neither the Access Key nor Key ID are set, we fall-back to using env - // vars, shared credentials file or AWS Instance metadata, + // If neither the Access Key nor Key ID are set, we fall back to using env + // vars, shared credentials file, or AWS Instance metadata, // see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials // +optional SecretAccessKey cmmeta.SecretKeySelector `json:"secretAccessKeySecretRef"` @@ -738,11 +738,32 @@ type ACMEIssuerDNS01ProviderAzureDNS struct { // If set, ClientID, ClientSecret and TenantID must not be set. // +optional ManagedIdentity *AzureManagedIdentity `json:"managedIdentity,omitempty"` + + // ZoneType determines which type of Azure DNS zone to use. + // + // Valid values are: + // - AzurePublicZone (default): Use a public Azure DNS zone. + // - AzurePrivateZone: Use an Azure Private DNS zone. + // + // If not specified, AzurePublicZone is used. + // + // Support for Azure Private DNS zones is currently + // experimental and may change in future releases. + // +optional + ZoneType AzureZoneType `json:"zoneType,omitempty"` } +// +kubebuilder:validation:Enum=AzurePublicZone;AzurePrivateZone +type AzureZoneType string + +const ( + PrivateAzureZone AzureZoneType = "AzurePrivateZone" + PublicAzureZone AzureZoneType = "AzurePublicZone" +) + // AzureManagedIdentity contains the configuration for Azure Workload Identity or Azure Managed Service Identity // If the AZURE_FEDERATED_TOKEN_FILE environment variable is set, the Azure Workload Identity will be used. -// Otherwise, we fall-back to using Azure Managed Service Identity. +// Otherwise, we fall back to using Azure Managed Service Identity. type AzureManagedIdentity struct { // client ID of the managed identity, cannot be used at the same time as resourceID // +optional @@ -781,7 +802,7 @@ type ACMEIssuerDNS01ProviderAcmeDNS struct { type ACMEIssuerDNS01ProviderRFC2136 struct { // The IP address or hostname of an authoritative DNS server supporting // RFC2136 in the form host:port. If the host is an IPv6 address it must be - // enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. + // enclosed in square brackets (e.g [2001:db8::1]); port is optional. // This field is required. Nameserver string `json:"nameserver"` diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go index e7e199c31b..018109ad47 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go @@ -30,6 +30,9 @@ import ( // +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.reason",description="",priority=1 // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." // +kubebuilder:resource:scope=Namespaced,categories={cert-manager,cert-manager-acme} +// +kubebuilder:selectablefield:JSONPath=.spec.issuerRef.group +// +kubebuilder:selectablefield:JSONPath=.spec.issuerRef.kind +// +kubebuilder:selectablefield:JSONPath=.spec.issuerRef.name // +kubebuilder:subresource:status // Order is a type to represent an Order with an ACME server diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/generic_issuer.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/generic_issuer.go index d757978fe0..db262ef80d 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/generic_issuer.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/generic_issuer.go @@ -27,6 +27,7 @@ type GenericIssuer interface { runtime.Object metav1.Object + // Deprecated: Use the metav1.Object functions directly instead. GetObjectMeta() *metav1.ObjectMeta GetSpec() *IssuerSpec GetStatus() *IssuerStatus @@ -35,6 +36,7 @@ type GenericIssuer interface { var _ GenericIssuer = &Issuer{} var _ GenericIssuer = &ClusterIssuer{} +// Deprecated: Use the metav1.Object functions directly instead. func (c *ClusterIssuer) GetObjectMeta() *metav1.ObjectMeta { return &c.ObjectMeta } @@ -53,6 +55,8 @@ func (c *ClusterIssuer) SetStatus(status IssuerStatus) { func (c *ClusterIssuer) Copy() GenericIssuer { return c.DeepCopy() } + +// Deprecated: Use the metav1.Object functions directly instead. func (c *Issuer) GetObjectMeta() *metav1.ObjectMeta { return &c.ObjectMeta } diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types.go index 4b0c35a785..17c5750e70 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types.go @@ -143,6 +143,9 @@ const ( // IngressACMEIssuerHTTP01IngressClassAnnotationKey holds the acmeIssuerHTTP01IngressClassAnnotation value // which can be used to override the http01 ingressClass if the challenge type is set to http01 IngressACMEIssuerHTTP01IngressClassAnnotationKey = "acme.cert-manager.io/http01-ingress-class" + // IngressACMEIssuerHTTP01IngressClassNameAnnotationKey holds the annotation value + // which can be used to override the http01 ingressClassName if the challenge type is set to http01 + IngressACMEIssuerHTTP01IngressClassNameAnnotationKey = "acme.cert-manager.io/http01-ingress-ingressclassname" // IngressClassAnnotationKey picks a specific "class" for the Ingress. The // controller only processes Ingresses with this annotation either unset, or @@ -211,15 +214,15 @@ const ( // Issuer specific Annotations const ( - // VenafiCustomFieldsAnnotationKey is the annotation that passes on JSON encoded custom fields to the Venafi issuer - // This will only work with Venafi TPP v19.3 and higher + // VenafiCustomFieldsAnnotationKey is the annotation that passes on JSON encoded custom fields to the Certificate Manager issuer + // This will only work with CyberArk Certificate Manager Self-Hosted v19.3 and higher // The value is an array with objects containing the name and value keys // for example: `[{"name": "custom-field", "value": "custom-value"}]` VenafiCustomFieldsAnnotationKey = "venafi.cert-manager.io/custom-fields" // VenafiPickupIDAnnotationKey is the annotation key used to record the - // Venafi Pickup ID of a certificate signing request that has been submitted - // to the Venafi API for collection later. + // Certificate Manager Pickup ID of a certificate signing request that has been submitted + // to the Certificate Manager for collection later. VenafiPickupIDAnnotationKey = "venafi.cert-manager.io/pickup-id" ) diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go index bc5475a32d..80e641520f 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go @@ -33,6 +33,9 @@ import ( // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].message`,priority=1 // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." // +kubebuilder:resource:scope=Namespaced,shortName={cert,certs},categories=cert-manager +// +kubebuilder:selectablefield:JSONPath=.spec.issuerRef.group +// +kubebuilder:selectablefield:JSONPath=.spec.issuerRef.kind +// +kubebuilder:selectablefield:JSONPath=.spec.issuerRef.name // +kubebuilder:subresource:status // A Certificate resource should be created to ensure an up to date and signed @@ -92,11 +95,9 @@ type PrivateKeyEncoding string const ( // PKCS1 private key encoding. - // PKCS1 produces a PEM block that contains the private key algorithm - // in the header and the private key in the body. A key that uses this - // can be recognised by its `BEGIN RSA PRIVATE KEY` or `BEGIN EC PRIVATE KEY` header. - // NOTE: This encoding is not supported for Ed25519 keys. Attempting to use - // this encoding with an Ed25519 key will be ignored and default to PKCS8. + // For RSA keys: produces PEM block with `BEGIN RSA PRIVATE KEY` header and private key in PKCS#1 format. + // For EC keys: produces PEM block with `BEGIN EC PRIVATE KEY` header and private key in SEC 1 format. + // For Ed25519 keys: option will be ignored and PKCS8 encoding will be used instead. PKCS1 PrivateKeyEncoding = "PKCS1" // PKCS8 private key encoding. @@ -352,9 +353,6 @@ type CertificatePrivateKey struct { // will be generated whenever a re-issuance occurs. // Default is `Always`. // The default was changed from `Never` to `Always` in cert-manager >=v1.18.0. - // The new default can be disabled by setting the - // `--feature-gates=DefaultPrivateKeyRotationPolicyAlways=false` option on - // the controller component. // +optional RotationPolicy PrivateKeyRotationPolicy `json:"rotationPolicy,omitempty"` @@ -528,7 +526,7 @@ type JKSKeystore struct { // Mutually exclusive with passwordSecretRef. // One of password or passwordSecretRef must provide a password with a non-zero length. // +optional - Password *string `json:"password,omitempty"` + Password *string `json:"password,omitempty"` // #nosec G117 -- field is part of API spec and may contain a secret; not hardcoded } // PKCS12 configures options for storing a PKCS12 keystore in the @@ -568,7 +566,7 @@ type PKCS12Keystore struct { // Mutually exclusive with passwordSecretRef. // One of password or passwordSecretRef must provide a password with a non-zero length. // +optional - Password *string `json:"password,omitempty"` + Password *string `json:"password,omitempty"` // #nosec G117 -- field is part of API spec and may contain a secret; not hardcoded } // +kubebuilder:validation:Enum=LegacyRC2;LegacyDES;Modern2023 diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go index a948f11291..876889d6b8 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go @@ -53,6 +53,9 @@ const ( // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].message`,priority=1 // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." // +kubebuilder:resource:scope=Namespaced,shortName={cr,crs},categories=cert-manager +// +kubebuilder:selectablefield:JSONPath=.spec.issuerRef.group +// +kubebuilder:selectablefield:JSONPath=.spec.issuerRef.kind +// +kubebuilder:selectablefield:JSONPath=.spec.issuerRef.name // +kubebuilder:subresource:status // A CertificateRequest is used to request a signed certificate from one of the diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go index 1cbd93f951..18cd0bccdd 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go @@ -125,52 +125,52 @@ type IssuerConfig struct { // +optional SelfSigned *SelfSignedIssuer `json:"selfSigned,omitempty"` - // Venafi configures this issuer to sign certificates using a Venafi TPP - // or Venafi Cloud policy zone. + // Venafi configures this issuer to sign certificates using a CyberArk Certificate Manager Self-Hosted + // or SaaS policy zone. // +optional Venafi *VenafiIssuer `json:"venafi,omitempty"` } -// Configures an issuer to sign certificates using a Venafi TPP -// or Cloud policy zone. +// Configures an issuer to sign certificates using a CyberArk Certificate Manager Self-Hosted +// or SaaS policy zone. type VenafiIssuer struct { - // Zone is the Venafi Policy Zone to use for this issuer. - // All requests made to the Venafi platform will be restricted by the named + // Zone is the Certificate Manager Policy Zone to use for this issuer. + // All requests made to the Certificate Manager platform will be restricted by the named // zone policy. // This field is required. Zone string `json:"zone"` - // TPP specifies Trust Protection Platform configuration settings. - // Only one of TPP or Cloud may be specified. + // TPP specifies CyberArk Certificate Manager Self-Hosted configuration settings. + // Only one of CyberArk Certificate Manager may be specified. // +optional TPP *VenafiTPP `json:"tpp,omitempty"` - // Cloud specifies the Venafi cloud configuration settings. - // Only one of TPP or Cloud may be specified. + // Cloud specifies the CyberArk Certificate Manager SaaS configuration settings. + // Only one of CyberArk Certificate Manager may be specified. // +optional Cloud *VenafiCloud `json:"cloud,omitempty"` } -// VenafiTPP defines connection configuration details for a Venafi TPP instance +// VenafiTPP defines connection configuration details for a CyberArk Certificate Manager Self-Hosted instance type VenafiTPP struct { - // URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, + // URL is the base URL for the vedsdk endpoint of the CyberArk Certificate Manager Self-Hosted instance, // for example: "https://tpp.example.com/vedsdk". URL string `json:"url"` - // CredentialsRef is a reference to a Secret containing the Venafi TPP API credentials. + // CredentialsRef is a reference to a Secret containing the CyberArk Certificate Manager Self-Hosted API credentials. // The secret must contain the key 'access-token' for the Access Token Authentication, // or two keys, 'username' and 'password' for the API Keys Authentication. CredentialsRef cmmeta.LocalObjectReference `json:"credentialsRef"` // Base64-encoded bundle of PEM CAs which will be used to validate the certificate - // chain presented by the TPP server. Only used if using HTTPS; ignored for HTTP. + // chain presented by the CyberArk Certificate Manager Self-Hosted server. Only used if using HTTPS; ignored for HTTP. // If undefined, the certificate bundle in the cert-manager controller container // is used to validate the chain. // +optional CABundle []byte `json:"caBundle,omitempty"` // Reference to a Secret containing a base64-encoded bundle of PEM CAs - // which will be used to validate the certificate chain presented by the TPP server. + // which will be used to validate the certificate chain presented by the CyberArk Certificate Manager Self-Hosted server. // Only used if using HTTPS; ignored for HTTP. Mutually exclusive with CABundle. // If neither CABundle nor CABundleSecretRef is defined, the certificate bundle in // the cert-manager controller container is used to validate the TLS connection. @@ -178,14 +178,14 @@ type VenafiTPP struct { CABundleSecretRef *cmmeta.SecretKeySelector `json:"caBundleSecretRef,omitempty"` } -// VenafiCloud defines connection configuration details for Venafi Cloud +// VenafiCloud defines connection configuration details for CyberArk Certificate Manager SaaS type VenafiCloud struct { - // URL is the base URL for Venafi Cloud. + // URL is the base URL for CyberArk Certificate Manager SaaS. // Defaults to "https://api.venafi.cloud/". // +optional URL string `json:"url,omitempty"` - // APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + // APITokenSecretRef is a secret key selector for the CyberArk Certificate Manager SaaS API token. APITokenSecretRef cmmeta.SecretKeySelector `json:"apiTokenSecretRef"` } @@ -294,7 +294,7 @@ type VaultAppRole struct { SecretRef cmmeta.SecretKeySelector `json:"secretRef"` } -// VaultKubernetesAuth is used to authenticate against Vault using a client +// VaultClientCertificateAuth is used to authenticate against Vault using a client // certificate stored in a Secret. type VaultClientCertificateAuth struct { // The Vault mountPath here is the mount path to use when authenticating with @@ -347,15 +347,15 @@ type VaultKubernetesAuth struct { } // ServiceAccountRef is a service account used by cert-manager to request a -// token. Default audience is generated by -// cert-manager and takes the form `vault://namespace-name/issuer-name` for an -// Issuer and `vault://issuer-name` for a ClusterIssuer. The expiration of the +// token. By default two audiences are included: the address of the Vault server as specified +// on the issuer, and a generated audience taking the form of `vault://namespace-name/issuer-name` +// for an Issuer and `vault://issuer-name` for a ClusterIssuer. The expiration of the // token is also set by cert-manager to 10 minutes. type ServiceAccountRef struct { // Name of the ServiceAccount used to request a token. Name string `json:"name"` - // TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. The default token - // consisting of the issuer's namespace and name is always included. + // TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. + // The default audiences are always included in the token. // +optional // +listType=atomic TokenAudiences []string `json:"audiences,omitempty"` diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go index 2b294e1a92..733c27b071 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go @@ -63,6 +63,7 @@ type IssuerReference struct { } // ObjectReference is a reference to an object with a given name, kind and group. +// // Deprecated: Use IssuerReference instead. type ObjectReference = IssuerReference diff --git a/vendor/github.com/clipperhouse/displaywidth/.gitignore b/vendor/github.com/clipperhouse/displaywidth/.gitignore index e43b0f9889..b356d43c6f 100644 --- a/vendor/github.com/clipperhouse/displaywidth/.gitignore +++ b/vendor/github.com/clipperhouse/displaywidth/.gitignore @@ -1 +1,3 @@ .DS_Store +*.out +*.test diff --git a/vendor/github.com/clipperhouse/displaywidth/AGENTS.md b/vendor/github.com/clipperhouse/displaywidth/AGENTS.md index 853e2917db..9ae951b25a 100644 --- a/vendor/github.com/clipperhouse/displaywidth/AGENTS.md +++ b/vendor/github.com/clipperhouse/displaywidth/AGENTS.md @@ -18,13 +18,27 @@ by running `go generate` from the top package directory. ## Pull Requests and branches -For PRs (pull requests), you can use the gh CLI tool to retrieve details, -or post comments. Then, compare the current branch with main. Reviewing a PR -and reviewing a branch are about the same, but the PR may add context. +For PRs (pull requests), you can use the gh CLI tool. Compare the current branch with main. Reviewing a PR and reviewing a branch are about the same, but the PR may add context. -Look for bugs. Think like GitHub Copilot or Cursor BugBot. +Understand the goals of the PR. Note any API changes, especially breaking changes. -Offer to post a brief summary of the review to the PR, via the gh CLI tool. +Look for thoroughness of tests, as well as GoDoc comments. + +Retrieve and consider the comments on the PR, which may have come from GitHub Copilot or Cursor BugBot. Think like GitHub Copilot or Cursor BugBot. + +Offer to optionally post a brief summary of the review to the PR, via the gh CLI tool. + +## Tagged Go releases + +If I ask you whether we are ready to release, this means a tagged Go release on the main branch. Go releases are git tagged with a version number. + +Review the changes since the last release, i.e. the previous git tag. Ensure that the changes are complete and correct. Identify new features, bug fixes, and performance improvements. + +Identify breaking changes, especially API changes. + +Ensure good test coverage. Look for performance changes, especially performance regressions, by running benchmarks against the previous release. + +Ensure that the documentation in READMEs and GoDocs are complete, correct and consistent. ## Comparisons to go-runewidth diff --git a/vendor/github.com/clipperhouse/displaywidth/CHANGELOG.md b/vendor/github.com/clipperhouse/displaywidth/CHANGELOG.md index ae1919a867..8c6efc10d0 100644 --- a/vendor/github.com/clipperhouse/displaywidth/CHANGELOG.md +++ b/vendor/github.com/clipperhouse/displaywidth/CHANGELOG.md @@ -1,5 +1,74 @@ # Changelog +## [0.11.0] + +[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.10.0...v0.11.0) + +### Added +- New `ControlSequences8Bit` option to treat 8-bit ECMA-48 (C1) escape sequences as zero-width. (#22) + +### Changed +- Upgraded uax29 dependency to v2.7.0 for 8-bit escape sequence support in the grapheme iterator. +- Truncation now validates that preserved trailing escape sequences are zero-width, preventing edge cases where non-zero-width sequences could leak into output. + +### Note +- `ControlSequences8Bit` is deliberately ignored by `TruncateString` and `TruncateBytes`, because C1 byte values (0x80–0x9F) overlap with UTF-8 multi-byte encoding. + +## [0.10.0] + +[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.9.0...v0.10.0) + +### Added +- New `ControlSequences` option to treat ECMA-48/ANSI escape sequences as zero-width. (#20) +- `TruncateString` and `TruncateBytes` now preserve trailing ANSI escape sequences (such as SGR resets) when `ControlSequences` is true, preventing color bleed in terminal output. + +### Changed +- Removed `stringish` dependency; generic type constraints are now inline `~string | []byte`. +- Upgraded uax29 dependency to v2.6.0 for ANSI escape sequence support in the grapheme iterator. + +## [0.9.0] + +[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.8.0...v0.9.0) + +### Changed +- Unicode 17 support: East Asian Width and emoji data updated to Unicode 17.0.0. (#18) +- Upgraded uax29 dependency to v2.5.0 (Unicode 17 grapheme segmentation). + +## [0.8.0] + +[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.7.0...v0.8.0) + +### Changed +- Performance: ASCII fast path that applies to any run of printable + ASCII. 2x-10x faster for ASCII text vs v0.7.0. (#16) +- Upgraded uax29 dependency to v2.4.0 for Unicode 16 support. Text that includes + Indic_Conjunct_Break may segment differently (and more correctly). (#15) + +## [0.7.0] + +[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.6.2...v0.7.0) + +### Added +- New `TruncateString` and `TruncateBytes` methods to truncate strings to a + maximum display width, with optional tail (like an ellipsis). (#13) + +## [0.6.2] + +[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.6.1...v0.6.2) + +### Changed +- Internal: reduced property categories for simpler trie. + +## [0.6.1] + +[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.6.0...v0.6.1) + +### Changed +- Perf improvements: replaced the ASCII lookup table with a simple + function. A bit more cache-friendly. More inlining. +- Bug fix: single regional indicators are now treated as width 2, since that + is what actual terminals do. + ## [0.6.0] [Compare](https://github.com/clipperhouse/displaywidth/compare/v0.5.0...v0.6.0) @@ -9,7 +78,7 @@ widths of grapheme clusters. ### Changed -- Added ASCII fast paths +- Fast ASCII lookups ## [0.5.0] diff --git a/vendor/github.com/clipperhouse/displaywidth/README.md b/vendor/github.com/clipperhouse/displaywidth/README.md index c423b99524..506822b023 100644 --- a/vendor/github.com/clipperhouse/displaywidth/README.md +++ b/vendor/github.com/clipperhouse/displaywidth/README.md @@ -33,42 +33,101 @@ func main() { } ``` -For most purposes, you should use the `String` or `Bytes` methods. +For most purposes, you should use the `String` or `Bytes` methods. They sum +the widths of grapheme clusters in the string or byte slice. +> Note: in your application, iterating over runes to measure width is likely incorrect; +the smallest unit of display is a grapheme, not a rune. + +### Iterating over graphemes + +If you need the individual graphemes: + +```go +import ( + "fmt" + "github.com/clipperhouse/displaywidth" +) + +func main() { + g := displaywidth.StringGraphemes("Hello, 世界!") + for g.Next() { + width := g.Width() + value := g.Value() + // do something with the width or value + } +} +``` ### Options -You can specify East Asian Width settings. When false (default), -[East Asian Ambiguous characters](https://www.unicode.org/reports/tr11/#Ambiguous) -are treated as width 1. When true, East Asian Ambiguous characters are treated -as width 2. +Create the options you need, and then use methods on the options struct. ```go -myOptions := displaywidth.Options{ +var myOptions = displaywidth.Options{ EastAsianWidth: true, + ControlSequences: true, } width := myOptions.String("Hello, 世界!") -fmt.Println(width) ``` -## Technical details +#### ControlSequences + +`ControlSequences` specifies whether to ignore ECMA-48 escape sequences +when calculating the display width. When `false` (default), ANSI escape +sequences are treated as just a series of characters. When `true`, they are +treated as a single zero-width unit. + +#### ControlSequences8Bit + +`ControlSequences8Bit` specifies whether to ignore 8-bit ECMA-48 escape sequences +when calculating the display width. When `false` (default), these are treated +as just a series of characters. When `true`, they are treated as a single +zero-width unit. + +Note: this option is ignored by the `Truncate` methods, as the concatenation +can lead to unintended UTF-8 semantics. + +#### EastAsianWidth + +`EastAsianWidth` defines how +[East Asian Ambiguous characters](https://www.unicode.org/reports/tr11/#Ambiguous) +are treated. + +When `false` (default), East Asian Ambiguous characters are treated as width 1. +When `true`, they are treated as width 2. + +You may wish to configure this based on environment variables or locale. + `go-runewidth`, for example, does so + [during package initialization](https://github.com/mattn/go-runewidth/blob/master/runewidth.go#L26C1-L45C2). `displaywidth` does not do this automatically, we prefer to leave it to you. + + +## Technical standards and compatibility This package implements the Unicode East Asian Width standard -([UAX #11](https://www.unicode.org/reports/tr11/)), and handles +([UAX #11](https://www.unicode.org/reports/tr11/tr11-43.html)), and handles [version selectors](https://en.wikipedia.org/wiki/Variation_Selectors_(Unicode_block)), and [regional indicator pairs](https://en.wikipedia.org/wiki/Regional_indicator_symbol) -(flags). We implement [Unicode TR51](https://unicode.org/reports/tr51/). +(flags). We implement [Unicode TR51](https://www.unicode.org/reports/tr51/tr51-27.html) +for emojis. We are keeping an eye on +[emerging standards](https://www.jeffquast.com/post/state-of-terminal-emulation-2025/). + +For control sequences, we implement the [ECMA-48](https://ecma-international.org/publications-and-standards/standards/ecma-48/) standard for 7-bit and 8-bit control sequences. `clipperhouse/displaywidth`, `mattn/go-runewidth`, and `rivo/uniseg` will -give the same outputs for most real-world text. See extensive details in the +give the same outputs for most real-world text. Extensive details are in the [compatibility analysis](comparison/COMPATIBILITY_ANALYSIS.md). -If you wish to investigate the core logic, see the `lookupProperties` and `width` -functions in [width.go](width.go#L135). The essential trie generation logic is in -`buildPropertyBitmap` in [unicode.go](internal/gen/unicode.go#L317). +## Invalid UTF-8 + +This package does not validate UTF-8. If you pass invalid UTF-8, the results +are undefined. We fuzz against invalid UTF-8 to ensure we don't panic or +loop indefinitely. -I (@clipperhouse) am keeping an eye on [emerging standards and test suites](https://www.jeffquast.com/post/state-of-terminal-emulation-2025/). +The `ControlSequences8Bit` option means that we will segment valid 8-bit +control sequences, which are typically _not_ valid UTF-8. 8-bit control bytes +happen to also be UTF-8 continuation bytes. Use with caution. ## Prior Art @@ -93,31 +152,39 @@ goarch: arm64 pkg: github.com/clipperhouse/displaywidth/comparison cpu: Apple M2 -BenchmarkString_Mixed/clipperhouse/displaywidth-8 10469 ns/op 161.15 MB/s 0 B/op 0 allocs/op -BenchmarkString_Mixed/mattn/go-runewidth-8 14250 ns/op 118.39 MB/s 0 B/op 0 allocs/op -BenchmarkString_Mixed/rivo/uniseg-8 19258 ns/op 87.60 MB/s 0 B/op 0 allocs/op +BenchmarkString_Mixed/clipperhouse/displaywidth-8 5784 ns/op 291.69 MB/s 0 B/op 0 allocs/op +BenchmarkString_Mixed/mattn/go-runewidth-8 14751 ns/op 114.36 MB/s 0 B/op 0 allocs/op +BenchmarkString_Mixed/rivo/uniseg-8 19360 ns/op 87.14 MB/s 0 B/op 0 allocs/op -BenchmarkString_EastAsian/clipperhouse/displaywidth-8 10518 ns/op 160.39 MB/s 0 B/op 0 allocs/op -BenchmarkString_EastAsian/mattn/go-runewidth-8 23827 ns/op 70.80 MB/s 0 B/op 0 allocs/op -BenchmarkString_EastAsian/rivo/uniseg-8 19537 ns/op 86.35 MB/s 0 B/op 0 allocs/op +BenchmarkString_ASCII/clipperhouse/displaywidth-8 54.60 ns/op 2344.32 MB/s 0 B/op 0 allocs/op +BenchmarkString_ASCII/mattn/go-runewidth-8 1195 ns/op 107.08 MB/s 0 B/op 0 allocs/op +BenchmarkString_ASCII/rivo/uniseg-8 1578 ns/op 81.13 MB/s 0 B/op 0 allocs/op -BenchmarkString_ASCII/clipperhouse/displaywidth-8 1027 ns/op 124.61 MB/s 0 B/op 0 allocs/op -BenchmarkString_ASCII/mattn/go-runewidth-8 1166 ns/op 109.78 MB/s 0 B/op 0 allocs/op -BenchmarkString_ASCII/rivo/uniseg-8 1551 ns/op 82.52 MB/s 0 B/op 0 allocs/op +BenchmarkString_EastAsian/clipperhouse/displaywidth-8 5837 ns/op 289.01 MB/s 0 B/op 0 allocs/op +BenchmarkString_EastAsian/mattn/go-runewidth-8 24418 ns/op 69.09 MB/s 0 B/op 0 allocs/op +BenchmarkString_EastAsian/rivo/uniseg-8 19339 ns/op 87.23 MB/s 0 B/op 0 allocs/op -BenchmarkString_Emoji/clipperhouse/displaywidth-8 3164 ns/op 228.84 MB/s 0 B/op 0 allocs/op -BenchmarkString_Emoji/mattn/go-runewidth-8 4728 ns/op 153.13 MB/s 0 B/op 0 allocs/op -BenchmarkString_Emoji/rivo/uniseg-8 6489 ns/op 111.57 MB/s 0 B/op 0 allocs/op +BenchmarkString_Emoji/clipperhouse/displaywidth-8 3225 ns/op 224.51 MB/s 0 B/op 0 allocs/op +BenchmarkString_Emoji/mattn/go-runewidth-8 4851 ns/op 149.25 MB/s 0 B/op 0 allocs/op +BenchmarkString_Emoji/rivo/uniseg-8 6591 ns/op 109.85 MB/s 0 B/op 0 allocs/op -BenchmarkRune_Mixed/clipperhouse/displaywidth-8 3429 ns/op 491.96 MB/s 0 B/op 0 allocs/op -BenchmarkRune_Mixed/mattn/go-runewidth-8 5308 ns/op 317.81 MB/s 0 B/op 0 allocs/op +BenchmarkRune_Mixed/clipperhouse/displaywidth-8 3385 ns/op 498.34 MB/s 0 B/op 0 allocs/op +BenchmarkRune_Mixed/mattn/go-runewidth-8 5354 ns/op 315.07 MB/s 0 B/op 0 allocs/op -BenchmarkRune_EastAsian/clipperhouse/displaywidth-8 3419 ns/op 493.49 MB/s 0 B/op 0 allocs/op -BenchmarkRune_EastAsian/mattn/go-runewidth-8 15321 ns/op 110.11 MB/s 0 B/op 0 allocs/op +BenchmarkRune_EastAsian/clipperhouse/displaywidth-8 3397 ns/op 496.56 MB/s 0 B/op 0 allocs/op +BenchmarkRune_EastAsian/mattn/go-runewidth-8 15673 ns/op 107.64 MB/s 0 B/op 0 allocs/op -BenchmarkRune_ASCII/clipperhouse/displaywidth-8 254.4 ns/op 503.19 MB/s 0 B/op 0 allocs/op -BenchmarkRune_ASCII/mattn/go-runewidth-8 264.3 ns/op 484.31 MB/s 0 B/op 0 allocs/op +BenchmarkRune_ASCII/clipperhouse/displaywidth-8 255.7 ns/op 500.53 MB/s 0 B/op 0 allocs/op +BenchmarkRune_ASCII/mattn/go-runewidth-8 261.5 ns/op 489.55 MB/s 0 B/op 0 allocs/op -BenchmarkRune_Emoji/clipperhouse/displaywidth-8 1374 ns/op 527.02 MB/s 0 B/op 0 allocs/op -BenchmarkRune_Emoji/mattn/go-runewidth-8 2210 ns/op 327.66 MB/s 0 B/op 0 allocs/op +BenchmarkRune_Emoji/clipperhouse/displaywidth-8 1371 ns/op 528.22 MB/s 0 B/op 0 allocs/op +BenchmarkRune_Emoji/mattn/go-runewidth-8 2267 ns/op 319.43 MB/s 0 B/op 0 allocs/op + +BenchmarkTruncateWithTail/clipperhouse/displaywidth-8 3229 ns/op 54.82 MB/s 192 B/op 14 allocs/op +BenchmarkTruncateWithTail/mattn/go-runewidth-8 8408 ns/op 21.05 MB/s 192 B/op 14 allocs/op + +BenchmarkTruncateWithoutTail/clipperhouse/displaywidth-8 3554 ns/op 64.43 MB/s 0 B/op 0 allocs/op +BenchmarkTruncateWithoutTail/mattn/go-runewidth-8 11189 ns/op 20.47 MB/s 0 B/op 0 allocs/op ``` + +Here are some notes on [how to make Unicode things fast](https://clipperhouse.com/go-unicode/). diff --git a/vendor/github.com/clipperhouse/displaywidth/graphemes.go b/vendor/github.com/clipperhouse/displaywidth/graphemes.go index 673c2aab50..14a52788b5 100644 --- a/vendor/github.com/clipperhouse/displaywidth/graphemes.go +++ b/vendor/github.com/clipperhouse/displaywidth/graphemes.go @@ -1,7 +1,6 @@ package displaywidth import ( - "github.com/clipperhouse/stringish" "github.com/clipperhouse/uax29/v2/graphemes" ) @@ -9,8 +8,8 @@ import ( // // Iterate using the Next method, and get the width of the current grapheme // using the Width method. -type Graphemes[T stringish.Interface] struct { - iter graphemes.Iterator[T] +type Graphemes[T ~string | []byte] struct { + iter *graphemes.Iterator[T] options Options } @@ -44,10 +43,11 @@ func StringGraphemes(s string) Graphemes[string] { // Iterate using the Next method, and get the width of the current grapheme // using the Width method. func (options Options) StringGraphemes(s string) Graphemes[string] { - return Graphemes[string]{ - iter: graphemes.FromString(s), - options: options, - } + g := graphemes.FromString(s) + g.AnsiEscapeSequences = options.ControlSequences + g.AnsiEscapeSequences8Bit = options.ControlSequences8Bit + + return Graphemes[string]{iter: g, options: options} } // BytesGraphemes returns an iterator over grapheme clusters for the given @@ -65,8 +65,9 @@ func BytesGraphemes(s []byte) Graphemes[[]byte] { // Iterate using the Next method, and get the width of the current grapheme // using the Width method. func (options Options) BytesGraphemes(s []byte) Graphemes[[]byte] { - return Graphemes[[]byte]{ - iter: graphemes.FromBytes(s), - options: options, - } + g := graphemes.FromBytes(s) + g.AnsiEscapeSequences = options.ControlSequences + g.AnsiEscapeSequences8Bit = options.ControlSequences8Bit + + return Graphemes[[]byte]{iter: g, options: options} } diff --git a/vendor/github.com/clipperhouse/displaywidth/options.go b/vendor/github.com/clipperhouse/displaywidth/options.go new file mode 100644 index 0000000000..b63b585aa1 --- /dev/null +++ b/vendor/github.com/clipperhouse/displaywidth/options.go @@ -0,0 +1,30 @@ +package displaywidth + +// Options allows you to specify the treatment of ambiguous East Asian +// characters and ANSI escape sequences. +type Options struct { + // EastAsianWidth specifies whether to treat ambiguous East Asian characters + // as width 1 or 2. When false (default), ambiguous East Asian characters + // are treated as width 1. When true, they are width 2. + EastAsianWidth bool + + // ControlSequences specifies whether to ignore 7-bit ECMA-48 escape sequences + // when calculating the display width. When false (default), ANSI escape + // sequences are treated as just a series of characters. When true, they are + // treated as a single zero-width unit. + ControlSequences bool + // ControlSequences8Bit specifies whether to ignore 8-bit ECMA-48 escape sequences + // when calculating the display width. When false (default), these are treated + // as just a series of characters. When true, they are treated as a single + // zero-width unit. + ControlSequences8Bit bool +} + +// DefaultOptions is the default options for the display width +// calculation, which is EastAsianWidth false, ControlSequences false, and +// ControlSequences8Bit false. +var DefaultOptions = Options{ + EastAsianWidth: false, + ControlSequences: false, + ControlSequences8Bit: false, +} diff --git a/vendor/github.com/clipperhouse/displaywidth/tables.go b/vendor/github.com/clipperhouse/displaywidth/tables.go deleted file mode 100644 index 40cf596680..0000000000 --- a/vendor/github.com/clipperhouse/displaywidth/tables.go +++ /dev/null @@ -1,91 +0,0 @@ -package displaywidth - -// propertyWidths is a jump table of sorts, instead of a switch -var propertyWidths = [5]int{ - _Default: 1, - _Zero_Width: 0, - _East_Asian_Wide: 2, - _East_Asian_Ambiguous: 1, - _Emoji: 2, -} - -// asciiWidths is a lookup table for single-byte character widths. Printable -// ASCII characters have width 1, control characters have width 0. -// -// It is intended for valid single-byte UTF-8, which means <128. -// -// If you look up an index >= 128, that is either: -// - invalid UTF-8, or -// - a multi-byte UTF-8 sequence, in which case you should be operating on -// the grapheme cluster, and not using this table -// -// We will return a default value of 1 in those cases, so as not to panic. -var asciiWidths = [256]int8{ - // Control characters (0x00-0x1F): width 0 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - // Printable ASCII (0x20-0x7E): width 1 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - // DEL (0x7F): width 0 - 0, - // >= 128 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -} - -// asciiProperties is a lookup table for single-byte character properties. -// It is intended for valid single-byte UTF-8, which means <128. -// -// If you look up an index >= 128, that is either: -// - invalid UTF-8, or -// - a multi-byte UTF-8 sequence, in which case you should be operating on -// the grapheme cluster, and not using this table -// -// We will return a default value of _Default in those cases, so as not to -// panic. -var asciiProperties = [256]property{ - // Control characters (0x00-0x1F): _Zero_Width - _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, - _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, - _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, - _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, _Zero_Width, - // Printable ASCII (0x20-0x7E): _Default - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, - // DEL (0x7F): _Zero_Width - _Zero_Width, - // >= 128 - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, - _Default, _Default, _Default, _Default, _Default, _Default, _Default, _Default, -} diff --git a/vendor/github.com/clipperhouse/displaywidth/trie.go b/vendor/github.com/clipperhouse/displaywidth/trie.go index e98c3695c4..1d3a983005 100644 --- a/vendor/github.com/clipperhouse/displaywidth/trie.go +++ b/vendor/github.com/clipperhouse/displaywidth/trie.go @@ -2,26 +2,22 @@ package displaywidth -import "github.com/clipperhouse/stringish" - // property is an enum representing the properties of a character type property uint8 const ( // Always 0 width, includes combining marks, control characters, non-printable, etc _Zero_Width property = iota + 1 - // Always 2 wide (East Asian Wide F/W) - _East_Asian_Wide + // Always 2 wide (East Asian Wide F/W, Emoji, Regional Indicator) + _Wide // Width depends on EastAsianWidth option _East_Asian_Ambiguous - // Extended_Pictographic + Emoji_Presentation - _Emoji ) // lookup returns the trie value for the first UTF-8 encoding in s and // the width in bytes of this encoding. The size will be 0 if s does not // hold enough bytes to complete the encoding. len(s) must be greater than 0. -func lookup[T stringish.Interface](s T) (v uint8, sz int) { +func lookup[T ~string | []byte](s T) (v uint8, sz int) { c0 := s[0] switch { case c0 < 0x80: // is ASCII @@ -81,7 +77,7 @@ func lookup[T stringish.Interface](s T) (v uint8, sz int) { return 0, 1 } -// stringWidthTrie. Total size: 17728 bytes (17.31 KiB). Checksum: b4b51ae347944fdb. +// stringWidthTrie. Total size: 17664 bytes (17.25 KiB). Checksum: 220983462f26d765. // type stringWidthTrie struct { } // func newStringWidthTrie(i int) *stringWidthTrie { @@ -96,9 +92,9 @@ func lookupValue(n uint32, b byte) uint8 { } } -// stringWidthValues: 247 blocks, 15808 entries, 15808 bytes +// stringWidthValues: 246 blocks, 15744 entries, 15744 bytes // The third block is the zero block. -var stringWidthValues = [15808]uint8{ +var stringWidthValues = [15744]uint8{ // Block 0x0, offset 0x0 // Block 0x1, offset 0x40 // Block 0x2, offset 0x80 @@ -577,13 +573,13 @@ var stringWidthValues = [15808]uint8{ 0x167f: 0x0003, // Block 0x5a, offset 0x1680 0x1692: 0x0003, - 0x169a: 0x0004, 0x169b: 0x0004, + 0x169a: 0x0002, 0x169b: 0x0002, 0x16a9: 0x0002, 0x16aa: 0x0002, // Block 0x5b, offset 0x16c0 - 0x16e9: 0x0004, - 0x16ea: 0x0004, 0x16eb: 0x0004, 0x16ec: 0x0004, - 0x16f0: 0x0004, 0x16f3: 0x0004, + 0x16e9: 0x0002, + 0x16ea: 0x0002, 0x16eb: 0x0002, 0x16ec: 0x0002, + 0x16f0: 0x0002, 0x16f3: 0x0002, // Block 0x5c, offset 0x1700 0x1720: 0x0003, 0x1721: 0x0003, 0x1722: 0x0003, 0x1723: 0x0003, 0x1724: 0x0003, 0x1725: 0x0003, 0x1726: 0x0003, 0x1727: 0x0003, 0x1728: 0x0003, 0x1729: 0x0003, @@ -642,63 +638,63 @@ var stringWidthValues = [15808]uint8{ 0x1862: 0x0003, 0x1863: 0x0003, 0x1864: 0x0003, 0x1865: 0x0003, 0x186f: 0x0003, - 0x187d: 0x0004, 0x187e: 0x0004, + 0x187d: 0x0002, 0x187e: 0x0002, // Block 0x62, offset 0x1880 0x1885: 0x0003, 0x1886: 0x0003, 0x1889: 0x0003, 0x188e: 0x0003, 0x188f: 0x0003, - 0x1894: 0x0004, 0x1895: 0x0004, + 0x1894: 0x0002, 0x1895: 0x0002, 0x189c: 0x0003, 0x189e: 0x0003, 0x18b0: 0x0002, 0x18b1: 0x0002, 0x18b2: 0x0002, 0x18b3: 0x0002, 0x18b4: 0x0002, 0x18b5: 0x0002, 0x18b6: 0x0002, 0x18b7: 0x0002, // Block 0x63, offset 0x18c0 0x18c0: 0x0003, 0x18c2: 0x0003, - 0x18c8: 0x0004, 0x18c9: 0x0004, 0x18ca: 0x0004, 0x18cb: 0x0004, - 0x18cc: 0x0004, 0x18cd: 0x0004, 0x18ce: 0x0004, 0x18cf: 0x0004, 0x18d0: 0x0004, 0x18d1: 0x0004, - 0x18d2: 0x0004, 0x18d3: 0x0004, + 0x18c8: 0x0002, 0x18c9: 0x0002, 0x18ca: 0x0002, 0x18cb: 0x0002, + 0x18cc: 0x0002, 0x18cd: 0x0002, 0x18ce: 0x0002, 0x18cf: 0x0002, 0x18d0: 0x0002, 0x18d1: 0x0002, + 0x18d2: 0x0002, 0x18d3: 0x0002, 0x18e0: 0x0003, 0x18e1: 0x0003, 0x18e3: 0x0003, 0x18e4: 0x0003, 0x18e5: 0x0003, 0x18e7: 0x0003, 0x18e8: 0x0003, 0x18e9: 0x0003, 0x18ea: 0x0003, 0x18ec: 0x0003, 0x18ed: 0x0003, 0x18ef: 0x0003, - 0x18ff: 0x0004, + 0x18ff: 0x0002, // Block 0x64, offset 0x1900 0x190a: 0x0002, 0x190b: 0x0002, 0x190c: 0x0002, 0x190d: 0x0002, 0x190e: 0x0002, 0x190f: 0x0002, - 0x1913: 0x0004, - 0x191e: 0x0003, 0x191f: 0x0003, 0x1921: 0x0004, - 0x192a: 0x0004, 0x192b: 0x0004, - 0x193d: 0x0004, 0x193e: 0x0004, 0x193f: 0x0003, + 0x1913: 0x0002, + 0x191e: 0x0003, 0x191f: 0x0003, 0x1921: 0x0002, + 0x192a: 0x0002, 0x192b: 0x0002, + 0x193d: 0x0002, 0x193e: 0x0002, 0x193f: 0x0003, // Block 0x65, offset 0x1940 - 0x1944: 0x0004, 0x1945: 0x0004, + 0x1944: 0x0002, 0x1945: 0x0002, 0x1946: 0x0003, 0x1947: 0x0003, 0x1948: 0x0003, 0x1949: 0x0003, 0x194a: 0x0003, 0x194b: 0x0003, - 0x194c: 0x0003, 0x194d: 0x0003, 0x194e: 0x0004, 0x194f: 0x0003, 0x1950: 0x0003, 0x1951: 0x0003, - 0x1952: 0x0003, 0x1953: 0x0003, 0x1954: 0x0004, 0x1955: 0x0003, 0x1956: 0x0003, 0x1957: 0x0003, + 0x194c: 0x0003, 0x194d: 0x0003, 0x194e: 0x0002, 0x194f: 0x0003, 0x1950: 0x0003, 0x1951: 0x0003, + 0x1952: 0x0003, 0x1953: 0x0003, 0x1954: 0x0002, 0x1955: 0x0003, 0x1956: 0x0003, 0x1957: 0x0003, 0x1958: 0x0003, 0x1959: 0x0003, 0x195a: 0x0003, 0x195b: 0x0003, 0x195c: 0x0003, 0x195d: 0x0003, 0x195e: 0x0003, 0x195f: 0x0003, 0x1960: 0x0003, 0x1961: 0x0003, 0x1963: 0x0003, 0x1968: 0x0003, 0x1969: 0x0003, - 0x196a: 0x0004, 0x196b: 0x0003, 0x196c: 0x0003, 0x196d: 0x0003, 0x196e: 0x0003, 0x196f: 0x0003, - 0x1970: 0x0003, 0x1971: 0x0003, 0x1972: 0x0004, 0x1973: 0x0004, 0x1974: 0x0003, 0x1975: 0x0004, - 0x1976: 0x0003, 0x1977: 0x0003, 0x1978: 0x0003, 0x1979: 0x0003, 0x197a: 0x0004, 0x197b: 0x0003, - 0x197c: 0x0003, 0x197d: 0x0004, 0x197e: 0x0003, 0x197f: 0x0003, + 0x196a: 0x0002, 0x196b: 0x0003, 0x196c: 0x0003, 0x196d: 0x0003, 0x196e: 0x0003, 0x196f: 0x0003, + 0x1970: 0x0003, 0x1971: 0x0003, 0x1972: 0x0002, 0x1973: 0x0002, 0x1974: 0x0003, 0x1975: 0x0002, + 0x1976: 0x0003, 0x1977: 0x0003, 0x1978: 0x0003, 0x1979: 0x0003, 0x197a: 0x0002, 0x197b: 0x0003, + 0x197c: 0x0003, 0x197d: 0x0002, 0x197e: 0x0003, 0x197f: 0x0003, // Block 0x66, offset 0x1980 - 0x1985: 0x0004, - 0x198a: 0x0004, 0x198b: 0x0004, - 0x19a8: 0x0004, + 0x1985: 0x0002, + 0x198a: 0x0002, 0x198b: 0x0002, + 0x19a8: 0x0002, 0x19bd: 0x0003, // Block 0x67, offset 0x19c0 - 0x19cc: 0x0004, 0x19ce: 0x0004, - 0x19d3: 0x0004, 0x19d4: 0x0004, 0x19d5: 0x0004, 0x19d7: 0x0004, + 0x19cc: 0x0002, 0x19ce: 0x0002, + 0x19d3: 0x0002, 0x19d4: 0x0002, 0x19d5: 0x0002, 0x19d7: 0x0002, 0x19f6: 0x0003, 0x19f7: 0x0003, 0x19f8: 0x0003, 0x19f9: 0x0003, 0x19fa: 0x0003, 0x19fb: 0x0003, 0x19fc: 0x0003, 0x19fd: 0x0003, 0x19fe: 0x0003, 0x19ff: 0x0003, // Block 0x68, offset 0x1a00 - 0x1a15: 0x0004, 0x1a16: 0x0004, 0x1a17: 0x0004, - 0x1a30: 0x0004, - 0x1a3f: 0x0004, + 0x1a15: 0x0002, 0x1a16: 0x0002, 0x1a17: 0x0002, + 0x1a30: 0x0002, + 0x1a3f: 0x0002, // Block 0x69, offset 0x1a40 - 0x1a5b: 0x0004, 0x1a5c: 0x0004, + 0x1a5b: 0x0002, 0x1a5c: 0x0002, // Block 0x6a, offset 0x1a80 - 0x1a90: 0x0004, - 0x1a95: 0x0004, 0x1a96: 0x0003, 0x1a97: 0x0003, + 0x1a90: 0x0002, + 0x1a95: 0x0002, 0x1a96: 0x0003, 0x1a97: 0x0003, 0x1a98: 0x0003, 0x1a99: 0x0003, // Block 0x6b, offset 0x1ac0 0x1aef: 0x0001, @@ -1135,27 +1131,31 @@ var stringWidthValues = [15808]uint8{ // Block 0xc2, offset 0x3080 0x30a0: 0x0002, 0x30a1: 0x0002, 0x30a2: 0x0002, 0x30a3: 0x0002, 0x30a4: 0x0001, - 0x30b0: 0x0002, 0x30b1: 0x0002, + 0x30b0: 0x0002, 0x30b1: 0x0002, 0x30b2: 0x0002, 0x30b3: 0x0002, 0x30b4: 0x0002, 0x30b5: 0x0002, + 0x30b6: 0x0002, // Block 0xc3, offset 0x30c0 0x30c0: 0x0002, 0x30c1: 0x0002, 0x30c2: 0x0002, 0x30c3: 0x0002, 0x30c4: 0x0002, 0x30c5: 0x0002, 0x30c6: 0x0002, 0x30c7: 0x0002, 0x30c8: 0x0002, 0x30c9: 0x0002, 0x30ca: 0x0002, 0x30cb: 0x0002, 0x30cc: 0x0002, 0x30cd: 0x0002, 0x30ce: 0x0002, 0x30cf: 0x0002, 0x30d0: 0x0002, 0x30d1: 0x0002, - 0x30d2: 0x0002, 0x30d3: 0x0002, 0x30d4: 0x0002, 0x30d5: 0x0002, 0x30d6: 0x0002, 0x30d7: 0x0002, - 0x30d8: 0x0002, 0x30d9: 0x0002, 0x30da: 0x0002, 0x30db: 0x0002, 0x30dc: 0x0002, 0x30dd: 0x0002, - 0x30de: 0x0002, 0x30df: 0x0002, 0x30e0: 0x0002, 0x30e1: 0x0002, 0x30e2: 0x0002, 0x30e3: 0x0002, - 0x30e4: 0x0002, 0x30e5: 0x0002, 0x30e6: 0x0002, 0x30e7: 0x0002, 0x30e8: 0x0002, 0x30e9: 0x0002, - 0x30ea: 0x0002, 0x30eb: 0x0002, 0x30ec: 0x0002, 0x30ed: 0x0002, 0x30ee: 0x0002, 0x30ef: 0x0002, - 0x30f0: 0x0002, 0x30f1: 0x0002, 0x30f2: 0x0002, 0x30f3: 0x0002, 0x30f4: 0x0002, 0x30f5: 0x0002, - 0x30f6: 0x0002, 0x30f7: 0x0002, + 0x30d2: 0x0002, 0x30d3: 0x0002, 0x30d4: 0x0002, 0x30d5: 0x0002, + 0x30ff: 0x0002, // Block 0xc4, offset 0x3100 0x3100: 0x0002, 0x3101: 0x0002, 0x3102: 0x0002, 0x3103: 0x0002, 0x3104: 0x0002, 0x3105: 0x0002, 0x3106: 0x0002, 0x3107: 0x0002, 0x3108: 0x0002, 0x3109: 0x0002, 0x310a: 0x0002, 0x310b: 0x0002, 0x310c: 0x0002, 0x310d: 0x0002, 0x310e: 0x0002, 0x310f: 0x0002, 0x3110: 0x0002, 0x3111: 0x0002, - 0x3112: 0x0002, 0x3113: 0x0002, 0x3114: 0x0002, 0x3115: 0x0002, - 0x313f: 0x0002, + 0x3112: 0x0002, 0x3113: 0x0002, 0x3114: 0x0002, 0x3115: 0x0002, 0x3116: 0x0002, 0x3117: 0x0002, + 0x3118: 0x0002, 0x3119: 0x0002, 0x311a: 0x0002, 0x311b: 0x0002, 0x311c: 0x0002, 0x311d: 0x0002, + 0x311e: 0x0002, // Block 0xc5, offset 0x3140 0x3140: 0x0002, 0x3141: 0x0002, 0x3142: 0x0002, 0x3143: 0x0002, 0x3144: 0x0002, 0x3145: 0x0002, - 0x3146: 0x0002, 0x3147: 0x0002, 0x3148: 0x0002, + 0x3146: 0x0002, 0x3147: 0x0002, 0x3148: 0x0002, 0x3149: 0x0002, 0x314a: 0x0002, 0x314b: 0x0002, + 0x314c: 0x0002, 0x314d: 0x0002, 0x314e: 0x0002, 0x314f: 0x0002, 0x3150: 0x0002, 0x3151: 0x0002, + 0x3152: 0x0002, 0x3153: 0x0002, 0x3154: 0x0002, 0x3155: 0x0002, 0x3156: 0x0002, 0x3157: 0x0002, + 0x3158: 0x0002, 0x3159: 0x0002, 0x315a: 0x0002, 0x315b: 0x0002, 0x315c: 0x0002, 0x315d: 0x0002, + 0x315e: 0x0002, 0x315f: 0x0002, 0x3160: 0x0002, 0x3161: 0x0002, 0x3162: 0x0002, 0x3163: 0x0002, + 0x3164: 0x0002, 0x3165: 0x0002, 0x3166: 0x0002, 0x3167: 0x0002, 0x3168: 0x0002, 0x3169: 0x0002, + 0x316a: 0x0002, 0x316b: 0x0002, 0x316c: 0x0002, 0x316d: 0x0002, 0x316e: 0x0002, 0x316f: 0x0002, + 0x3170: 0x0002, 0x3171: 0x0002, 0x3172: 0x0002, // Block 0xc6, offset 0x3180 0x31b0: 0x0002, 0x31b1: 0x0002, 0x31b2: 0x0002, 0x31b3: 0x0002, 0x31b5: 0x0002, 0x31b6: 0x0002, 0x31b7: 0x0002, 0x31b8: 0x0002, 0x31b9: 0x0002, 0x31ba: 0x0002, 0x31bb: 0x0002, @@ -1273,9 +1273,9 @@ var stringWidthValues = [15808]uint8{ 0x3604: 0x0001, 0x3605: 0x0001, 0x3606: 0x0001, 0x3607: 0x0001, 0x3608: 0x0001, 0x3609: 0x0001, 0x360a: 0x0001, // Block 0xd9, offset 0x3640 - 0x3644: 0x0004, + 0x3644: 0x0002, // Block 0xda, offset 0x3680 - 0x368f: 0x0004, + 0x368f: 0x0002, // Block 0xdb, offset 0x36c0 0x36c0: 0x0003, 0x36c1: 0x0003, 0x36c2: 0x0003, 0x36c3: 0x0003, 0x36c4: 0x0003, 0x36c5: 0x0003, 0x36c6: 0x0003, 0x36c7: 0x0003, 0x36c8: 0x0003, 0x36c9: 0x0003, 0x36ca: 0x0003, @@ -1302,246 +1302,229 @@ var stringWidthValues = [15808]uint8{ // Block 0xdd, offset 0x3740 0x3740: 0x0003, 0x3741: 0x0003, 0x3742: 0x0003, 0x3743: 0x0003, 0x3744: 0x0003, 0x3745: 0x0003, 0x3746: 0x0003, 0x3747: 0x0003, 0x3748: 0x0003, 0x3749: 0x0003, 0x374a: 0x0003, 0x374b: 0x0003, - 0x374c: 0x0003, 0x374d: 0x0003, 0x374e: 0x0004, 0x374f: 0x0003, 0x3750: 0x0003, 0x3751: 0x0004, - 0x3752: 0x0004, 0x3753: 0x0004, 0x3754: 0x0004, 0x3755: 0x0004, 0x3756: 0x0004, 0x3757: 0x0004, - 0x3758: 0x0004, 0x3759: 0x0004, 0x375a: 0x0004, 0x375b: 0x0003, 0x375c: 0x0003, 0x375d: 0x0003, + 0x374c: 0x0003, 0x374d: 0x0003, 0x374e: 0x0002, 0x374f: 0x0003, 0x3750: 0x0003, 0x3751: 0x0002, + 0x3752: 0x0002, 0x3753: 0x0002, 0x3754: 0x0002, 0x3755: 0x0002, 0x3756: 0x0002, 0x3757: 0x0002, + 0x3758: 0x0002, 0x3759: 0x0002, 0x375a: 0x0002, 0x375b: 0x0003, 0x375c: 0x0003, 0x375d: 0x0003, 0x375e: 0x0003, 0x375f: 0x0003, 0x3760: 0x0003, 0x3761: 0x0003, 0x3762: 0x0003, 0x3763: 0x0003, 0x3764: 0x0003, 0x3765: 0x0003, 0x3766: 0x0003, 0x3767: 0x0003, 0x3768: 0x0003, 0x3769: 0x0003, 0x376a: 0x0003, 0x376b: 0x0003, 0x376c: 0x0003, // Block 0xde, offset 0x3780 - 0x3780: 0x0002, 0x3781: 0x0004, 0x3782: 0x0002, - 0x3790: 0x0002, 0x3791: 0x0002, - 0x3792: 0x0002, 0x3793: 0x0002, 0x3794: 0x0002, 0x3795: 0x0002, 0x3796: 0x0002, 0x3797: 0x0002, - 0x3798: 0x0002, 0x3799: 0x0002, 0x379a: 0x0004, 0x379b: 0x0002, 0x379c: 0x0002, 0x379d: 0x0002, - 0x379e: 0x0002, 0x379f: 0x0002, 0x37a0: 0x0002, 0x37a1: 0x0002, 0x37a2: 0x0002, 0x37a3: 0x0002, - 0x37a4: 0x0002, 0x37a5: 0x0002, 0x37a6: 0x0002, 0x37a7: 0x0002, 0x37a8: 0x0002, 0x37a9: 0x0002, - 0x37aa: 0x0002, 0x37ab: 0x0002, 0x37ac: 0x0002, 0x37ad: 0x0002, 0x37ae: 0x0002, 0x37af: 0x0004, - 0x37b0: 0x0002, 0x37b1: 0x0002, 0x37b2: 0x0004, 0x37b3: 0x0004, 0x37b4: 0x0004, 0x37b5: 0x0004, - 0x37b6: 0x0004, 0x37b7: 0x0002, 0x37b8: 0x0004, 0x37b9: 0x0004, 0x37ba: 0x0004, 0x37bb: 0x0002, + 0x37a6: 0x0002, 0x37a7: 0x0002, 0x37a8: 0x0002, 0x37a9: 0x0002, + 0x37aa: 0x0002, 0x37ab: 0x0002, 0x37ac: 0x0002, 0x37ad: 0x0002, 0x37ae: 0x0002, 0x37af: 0x0002, + 0x37b0: 0x0002, 0x37b1: 0x0002, 0x37b2: 0x0002, 0x37b3: 0x0002, 0x37b4: 0x0002, 0x37b5: 0x0002, + 0x37b6: 0x0002, 0x37b7: 0x0002, 0x37b8: 0x0002, 0x37b9: 0x0002, 0x37ba: 0x0002, 0x37bb: 0x0002, + 0x37bc: 0x0002, 0x37bd: 0x0002, 0x37be: 0x0002, 0x37bf: 0x0002, // Block 0xdf, offset 0x37c0 - 0x37c0: 0x0002, 0x37c1: 0x0002, 0x37c2: 0x0002, 0x37c3: 0x0002, 0x37c4: 0x0002, 0x37c5: 0x0002, - 0x37c6: 0x0002, 0x37c7: 0x0002, 0x37c8: 0x0002, - 0x37d0: 0x0004, 0x37d1: 0x0004, - 0x37e0: 0x0002, 0x37e1: 0x0002, 0x37e2: 0x0002, 0x37e3: 0x0002, - 0x37e4: 0x0002, 0x37e5: 0x0002, + 0x37c0: 0x0002, 0x37c1: 0x0002, 0x37c2: 0x0002, + 0x37d0: 0x0002, 0x37d1: 0x0002, + 0x37d2: 0x0002, 0x37d3: 0x0002, 0x37d4: 0x0002, 0x37d5: 0x0002, 0x37d6: 0x0002, 0x37d7: 0x0002, + 0x37d8: 0x0002, 0x37d9: 0x0002, 0x37da: 0x0002, 0x37db: 0x0002, 0x37dc: 0x0002, 0x37dd: 0x0002, + 0x37de: 0x0002, 0x37df: 0x0002, 0x37e0: 0x0002, 0x37e1: 0x0002, 0x37e2: 0x0002, 0x37e3: 0x0002, + 0x37e4: 0x0002, 0x37e5: 0x0002, 0x37e6: 0x0002, 0x37e7: 0x0002, 0x37e8: 0x0002, 0x37e9: 0x0002, + 0x37ea: 0x0002, 0x37eb: 0x0002, 0x37ec: 0x0002, 0x37ed: 0x0002, 0x37ee: 0x0002, 0x37ef: 0x0002, + 0x37f0: 0x0002, 0x37f1: 0x0002, 0x37f2: 0x0002, 0x37f3: 0x0002, 0x37f4: 0x0002, 0x37f5: 0x0002, + 0x37f6: 0x0002, 0x37f7: 0x0002, 0x37f8: 0x0002, 0x37f9: 0x0002, 0x37fa: 0x0002, 0x37fb: 0x0002, // Block 0xe0, offset 0x3800 - 0x3800: 0x0004, 0x3801: 0x0004, 0x3802: 0x0004, 0x3803: 0x0004, 0x3804: 0x0004, 0x3805: 0x0004, - 0x3806: 0x0004, 0x3807: 0x0004, 0x3808: 0x0004, 0x3809: 0x0004, 0x380a: 0x0004, 0x380b: 0x0004, - 0x380c: 0x0004, 0x380d: 0x0004, 0x380e: 0x0004, 0x380f: 0x0004, 0x3810: 0x0004, 0x3811: 0x0004, - 0x3812: 0x0004, 0x3813: 0x0004, 0x3814: 0x0004, 0x3815: 0x0004, 0x3816: 0x0004, 0x3817: 0x0004, - 0x3818: 0x0004, 0x3819: 0x0004, 0x381a: 0x0004, 0x381b: 0x0004, 0x381c: 0x0004, 0x381d: 0x0004, - 0x381e: 0x0004, 0x381f: 0x0004, 0x3820: 0x0004, - 0x382d: 0x0004, 0x382e: 0x0004, 0x382f: 0x0004, - 0x3830: 0x0004, 0x3831: 0x0004, 0x3832: 0x0004, 0x3833: 0x0004, 0x3834: 0x0004, 0x3835: 0x0004, - 0x3837: 0x0004, 0x3838: 0x0004, 0x3839: 0x0004, 0x383a: 0x0004, 0x383b: 0x0004, - 0x383c: 0x0004, 0x383d: 0x0004, 0x383e: 0x0004, 0x383f: 0x0004, + 0x3800: 0x0002, 0x3801: 0x0002, 0x3802: 0x0002, 0x3803: 0x0002, 0x3804: 0x0002, 0x3805: 0x0002, + 0x3806: 0x0002, 0x3807: 0x0002, 0x3808: 0x0002, + 0x3810: 0x0002, 0x3811: 0x0002, + 0x3820: 0x0002, 0x3821: 0x0002, 0x3822: 0x0002, 0x3823: 0x0002, + 0x3824: 0x0002, 0x3825: 0x0002, // Block 0xe1, offset 0x3840 - 0x3840: 0x0004, 0x3841: 0x0004, 0x3842: 0x0004, 0x3843: 0x0004, 0x3844: 0x0004, 0x3845: 0x0004, - 0x3846: 0x0004, 0x3847: 0x0004, 0x3848: 0x0004, 0x3849: 0x0004, 0x384a: 0x0004, 0x384b: 0x0004, - 0x384c: 0x0004, 0x384d: 0x0004, 0x384e: 0x0004, 0x384f: 0x0004, 0x3850: 0x0004, 0x3851: 0x0004, - 0x3852: 0x0004, 0x3853: 0x0004, 0x3854: 0x0004, 0x3855: 0x0004, 0x3856: 0x0004, 0x3857: 0x0004, - 0x3858: 0x0004, 0x3859: 0x0004, 0x385a: 0x0004, 0x385b: 0x0004, 0x385c: 0x0004, 0x385d: 0x0004, - 0x385e: 0x0004, 0x385f: 0x0004, 0x3860: 0x0004, 0x3861: 0x0004, 0x3862: 0x0004, 0x3863: 0x0004, - 0x3864: 0x0004, 0x3865: 0x0004, 0x3866: 0x0004, 0x3867: 0x0004, 0x3868: 0x0004, 0x3869: 0x0004, - 0x386a: 0x0004, 0x386b: 0x0004, 0x386c: 0x0004, 0x386d: 0x0004, 0x386e: 0x0004, 0x386f: 0x0004, - 0x3870: 0x0004, 0x3871: 0x0004, 0x3872: 0x0004, 0x3873: 0x0004, 0x3874: 0x0004, 0x3875: 0x0004, - 0x3876: 0x0004, 0x3877: 0x0004, 0x3878: 0x0004, 0x3879: 0x0004, 0x387a: 0x0004, 0x387b: 0x0004, - 0x387c: 0x0004, 0x387e: 0x0004, 0x387f: 0x0004, + 0x3840: 0x0002, 0x3841: 0x0002, 0x3842: 0x0002, 0x3843: 0x0002, 0x3844: 0x0002, 0x3845: 0x0002, + 0x3846: 0x0002, 0x3847: 0x0002, 0x3848: 0x0002, 0x3849: 0x0002, 0x384a: 0x0002, 0x384b: 0x0002, + 0x384c: 0x0002, 0x384d: 0x0002, 0x384e: 0x0002, 0x384f: 0x0002, 0x3850: 0x0002, 0x3851: 0x0002, + 0x3852: 0x0002, 0x3853: 0x0002, 0x3854: 0x0002, 0x3855: 0x0002, 0x3856: 0x0002, 0x3857: 0x0002, + 0x3858: 0x0002, 0x3859: 0x0002, 0x385a: 0x0002, 0x385b: 0x0002, 0x385c: 0x0002, 0x385d: 0x0002, + 0x385e: 0x0002, 0x385f: 0x0002, 0x3860: 0x0002, + 0x386d: 0x0002, 0x386e: 0x0002, 0x386f: 0x0002, + 0x3870: 0x0002, 0x3871: 0x0002, 0x3872: 0x0002, 0x3873: 0x0002, 0x3874: 0x0002, 0x3875: 0x0002, + 0x3877: 0x0002, 0x3878: 0x0002, 0x3879: 0x0002, 0x387a: 0x0002, 0x387b: 0x0002, + 0x387c: 0x0002, 0x387d: 0x0002, 0x387e: 0x0002, 0x387f: 0x0002, // Block 0xe2, offset 0x3880 - 0x3880: 0x0004, 0x3881: 0x0004, 0x3882: 0x0004, 0x3883: 0x0004, 0x3884: 0x0004, 0x3885: 0x0004, - 0x3886: 0x0004, 0x3887: 0x0004, 0x3888: 0x0004, 0x3889: 0x0004, 0x388a: 0x0004, 0x388b: 0x0004, - 0x388c: 0x0004, 0x388d: 0x0004, 0x388e: 0x0004, 0x388f: 0x0004, 0x3890: 0x0004, 0x3891: 0x0004, - 0x3892: 0x0004, 0x3893: 0x0004, - 0x38a0: 0x0004, 0x38a1: 0x0004, 0x38a2: 0x0004, 0x38a3: 0x0004, - 0x38a4: 0x0004, 0x38a5: 0x0004, 0x38a6: 0x0004, 0x38a7: 0x0004, 0x38a8: 0x0004, 0x38a9: 0x0004, - 0x38aa: 0x0004, 0x38ab: 0x0004, 0x38ac: 0x0004, 0x38ad: 0x0004, 0x38ae: 0x0004, 0x38af: 0x0004, - 0x38b0: 0x0004, 0x38b1: 0x0004, 0x38b2: 0x0004, 0x38b3: 0x0004, 0x38b4: 0x0004, 0x38b5: 0x0004, - 0x38b6: 0x0004, 0x38b7: 0x0004, 0x38b8: 0x0004, 0x38b9: 0x0004, 0x38ba: 0x0004, 0x38bb: 0x0004, - 0x38bc: 0x0004, 0x38bd: 0x0004, 0x38be: 0x0004, 0x38bf: 0x0004, + 0x3880: 0x0002, 0x3881: 0x0002, 0x3882: 0x0002, 0x3883: 0x0002, 0x3884: 0x0002, 0x3885: 0x0002, + 0x3886: 0x0002, 0x3887: 0x0002, 0x3888: 0x0002, 0x3889: 0x0002, 0x388a: 0x0002, 0x388b: 0x0002, + 0x388c: 0x0002, 0x388d: 0x0002, 0x388e: 0x0002, 0x388f: 0x0002, 0x3890: 0x0002, 0x3891: 0x0002, + 0x3892: 0x0002, 0x3893: 0x0002, 0x3894: 0x0002, 0x3895: 0x0002, 0x3896: 0x0002, 0x3897: 0x0002, + 0x3898: 0x0002, 0x3899: 0x0002, 0x389a: 0x0002, 0x389b: 0x0002, 0x389c: 0x0002, 0x389d: 0x0002, + 0x389e: 0x0002, 0x389f: 0x0002, 0x38a0: 0x0002, 0x38a1: 0x0002, 0x38a2: 0x0002, 0x38a3: 0x0002, + 0x38a4: 0x0002, 0x38a5: 0x0002, 0x38a6: 0x0002, 0x38a7: 0x0002, 0x38a8: 0x0002, 0x38a9: 0x0002, + 0x38aa: 0x0002, 0x38ab: 0x0002, 0x38ac: 0x0002, 0x38ad: 0x0002, 0x38ae: 0x0002, 0x38af: 0x0002, + 0x38b0: 0x0002, 0x38b1: 0x0002, 0x38b2: 0x0002, 0x38b3: 0x0002, 0x38b4: 0x0002, 0x38b5: 0x0002, + 0x38b6: 0x0002, 0x38b7: 0x0002, 0x38b8: 0x0002, 0x38b9: 0x0002, 0x38ba: 0x0002, 0x38bb: 0x0002, + 0x38bc: 0x0002, 0x38be: 0x0002, 0x38bf: 0x0002, // Block 0xe3, offset 0x38c0 - 0x38c0: 0x0004, 0x38c1: 0x0004, 0x38c2: 0x0004, 0x38c3: 0x0004, 0x38c4: 0x0004, 0x38c5: 0x0004, - 0x38c6: 0x0004, 0x38c7: 0x0004, 0x38c8: 0x0004, 0x38c9: 0x0004, 0x38ca: 0x0004, - 0x38cf: 0x0004, 0x38d0: 0x0004, 0x38d1: 0x0004, - 0x38d2: 0x0004, 0x38d3: 0x0004, - 0x38e0: 0x0004, 0x38e1: 0x0004, 0x38e2: 0x0004, 0x38e3: 0x0004, - 0x38e4: 0x0004, 0x38e5: 0x0004, 0x38e6: 0x0004, 0x38e7: 0x0004, 0x38e8: 0x0004, 0x38e9: 0x0004, - 0x38ea: 0x0004, 0x38eb: 0x0004, 0x38ec: 0x0004, 0x38ed: 0x0004, 0x38ee: 0x0004, 0x38ef: 0x0004, - 0x38f0: 0x0004, 0x38f4: 0x0004, - 0x38f8: 0x0004, 0x38f9: 0x0004, 0x38fa: 0x0004, 0x38fb: 0x0002, + 0x38c0: 0x0002, 0x38c1: 0x0002, 0x38c2: 0x0002, 0x38c3: 0x0002, 0x38c4: 0x0002, 0x38c5: 0x0002, + 0x38c6: 0x0002, 0x38c7: 0x0002, 0x38c8: 0x0002, 0x38c9: 0x0002, 0x38ca: 0x0002, 0x38cb: 0x0002, + 0x38cc: 0x0002, 0x38cd: 0x0002, 0x38ce: 0x0002, 0x38cf: 0x0002, 0x38d0: 0x0002, 0x38d1: 0x0002, + 0x38d2: 0x0002, 0x38d3: 0x0002, + 0x38e0: 0x0002, 0x38e1: 0x0002, 0x38e2: 0x0002, 0x38e3: 0x0002, + 0x38e4: 0x0002, 0x38e5: 0x0002, 0x38e6: 0x0002, 0x38e7: 0x0002, 0x38e8: 0x0002, 0x38e9: 0x0002, + 0x38ea: 0x0002, 0x38eb: 0x0002, 0x38ec: 0x0002, 0x38ed: 0x0002, 0x38ee: 0x0002, 0x38ef: 0x0002, + 0x38f0: 0x0002, 0x38f1: 0x0002, 0x38f2: 0x0002, 0x38f3: 0x0002, 0x38f4: 0x0002, 0x38f5: 0x0002, + 0x38f6: 0x0002, 0x38f7: 0x0002, 0x38f8: 0x0002, 0x38f9: 0x0002, 0x38fa: 0x0002, 0x38fb: 0x0002, 0x38fc: 0x0002, 0x38fd: 0x0002, 0x38fe: 0x0002, 0x38ff: 0x0002, // Block 0xe4, offset 0x3900 - 0x3900: 0x0004, 0x3901: 0x0004, 0x3902: 0x0004, 0x3903: 0x0004, 0x3904: 0x0004, 0x3905: 0x0004, - 0x3906: 0x0004, 0x3907: 0x0004, 0x3908: 0x0004, 0x3909: 0x0004, 0x390a: 0x0004, 0x390b: 0x0004, - 0x390c: 0x0004, 0x390d: 0x0004, 0x390e: 0x0004, 0x390f: 0x0004, 0x3910: 0x0004, 0x3911: 0x0004, - 0x3912: 0x0004, 0x3913: 0x0004, 0x3914: 0x0004, 0x3915: 0x0004, 0x3916: 0x0004, 0x3917: 0x0004, - 0x3918: 0x0004, 0x3919: 0x0004, 0x391a: 0x0004, 0x391b: 0x0004, 0x391c: 0x0004, 0x391d: 0x0004, - 0x391e: 0x0004, 0x391f: 0x0004, 0x3920: 0x0004, 0x3921: 0x0004, 0x3922: 0x0004, 0x3923: 0x0004, - 0x3924: 0x0004, 0x3925: 0x0004, 0x3926: 0x0004, 0x3927: 0x0004, 0x3928: 0x0004, 0x3929: 0x0004, - 0x392a: 0x0004, 0x392b: 0x0004, 0x392c: 0x0004, 0x392d: 0x0004, 0x392e: 0x0004, 0x392f: 0x0004, - 0x3930: 0x0004, 0x3931: 0x0004, 0x3932: 0x0004, 0x3933: 0x0004, 0x3934: 0x0004, 0x3935: 0x0004, - 0x3936: 0x0004, 0x3937: 0x0004, 0x3938: 0x0004, 0x3939: 0x0004, 0x393a: 0x0004, 0x393b: 0x0004, - 0x393c: 0x0004, 0x393d: 0x0004, 0x393e: 0x0004, + 0x3900: 0x0002, 0x3901: 0x0002, 0x3902: 0x0002, 0x3903: 0x0002, 0x3904: 0x0002, 0x3905: 0x0002, + 0x3906: 0x0002, 0x3907: 0x0002, 0x3908: 0x0002, 0x3909: 0x0002, 0x390a: 0x0002, + 0x390f: 0x0002, 0x3910: 0x0002, 0x3911: 0x0002, + 0x3912: 0x0002, 0x3913: 0x0002, + 0x3920: 0x0002, 0x3921: 0x0002, 0x3922: 0x0002, 0x3923: 0x0002, + 0x3924: 0x0002, 0x3925: 0x0002, 0x3926: 0x0002, 0x3927: 0x0002, 0x3928: 0x0002, 0x3929: 0x0002, + 0x392a: 0x0002, 0x392b: 0x0002, 0x392c: 0x0002, 0x392d: 0x0002, 0x392e: 0x0002, 0x392f: 0x0002, + 0x3930: 0x0002, 0x3934: 0x0002, + 0x3938: 0x0002, 0x3939: 0x0002, 0x393a: 0x0002, 0x393b: 0x0002, + 0x393c: 0x0002, 0x393d: 0x0002, 0x393e: 0x0002, 0x393f: 0x0002, // Block 0xe5, offset 0x3940 - 0x3940: 0x0004, 0x3942: 0x0004, 0x3943: 0x0004, 0x3944: 0x0004, 0x3945: 0x0004, - 0x3946: 0x0004, 0x3947: 0x0004, 0x3948: 0x0004, 0x3949: 0x0004, 0x394a: 0x0004, 0x394b: 0x0004, - 0x394c: 0x0004, 0x394d: 0x0004, 0x394e: 0x0004, 0x394f: 0x0004, 0x3950: 0x0004, 0x3951: 0x0004, - 0x3952: 0x0004, 0x3953: 0x0004, 0x3954: 0x0004, 0x3955: 0x0004, 0x3956: 0x0004, 0x3957: 0x0004, - 0x3958: 0x0004, 0x3959: 0x0004, 0x395a: 0x0004, 0x395b: 0x0004, 0x395c: 0x0004, 0x395d: 0x0004, - 0x395e: 0x0004, 0x395f: 0x0004, 0x3960: 0x0004, 0x3961: 0x0004, 0x3962: 0x0004, 0x3963: 0x0004, - 0x3964: 0x0004, 0x3965: 0x0004, 0x3966: 0x0004, 0x3967: 0x0004, 0x3968: 0x0004, 0x3969: 0x0004, - 0x396a: 0x0004, 0x396b: 0x0004, 0x396c: 0x0004, 0x396d: 0x0004, 0x396e: 0x0004, 0x396f: 0x0004, - 0x3970: 0x0004, 0x3971: 0x0004, 0x3972: 0x0004, 0x3973: 0x0004, 0x3974: 0x0004, 0x3975: 0x0004, - 0x3976: 0x0004, 0x3977: 0x0004, 0x3978: 0x0004, 0x3979: 0x0004, 0x397a: 0x0004, 0x397b: 0x0004, - 0x397c: 0x0004, 0x397d: 0x0004, 0x397e: 0x0004, 0x397f: 0x0004, + 0x3940: 0x0002, 0x3941: 0x0002, 0x3942: 0x0002, 0x3943: 0x0002, 0x3944: 0x0002, 0x3945: 0x0002, + 0x3946: 0x0002, 0x3947: 0x0002, 0x3948: 0x0002, 0x3949: 0x0002, 0x394a: 0x0002, 0x394b: 0x0002, + 0x394c: 0x0002, 0x394d: 0x0002, 0x394e: 0x0002, 0x394f: 0x0002, 0x3950: 0x0002, 0x3951: 0x0002, + 0x3952: 0x0002, 0x3953: 0x0002, 0x3954: 0x0002, 0x3955: 0x0002, 0x3956: 0x0002, 0x3957: 0x0002, + 0x3958: 0x0002, 0x3959: 0x0002, 0x395a: 0x0002, 0x395b: 0x0002, 0x395c: 0x0002, 0x395d: 0x0002, + 0x395e: 0x0002, 0x395f: 0x0002, 0x3960: 0x0002, 0x3961: 0x0002, 0x3962: 0x0002, 0x3963: 0x0002, + 0x3964: 0x0002, 0x3965: 0x0002, 0x3966: 0x0002, 0x3967: 0x0002, 0x3968: 0x0002, 0x3969: 0x0002, + 0x396a: 0x0002, 0x396b: 0x0002, 0x396c: 0x0002, 0x396d: 0x0002, 0x396e: 0x0002, 0x396f: 0x0002, + 0x3970: 0x0002, 0x3971: 0x0002, 0x3972: 0x0002, 0x3973: 0x0002, 0x3974: 0x0002, 0x3975: 0x0002, + 0x3976: 0x0002, 0x3977: 0x0002, 0x3978: 0x0002, 0x3979: 0x0002, 0x397a: 0x0002, 0x397b: 0x0002, + 0x397c: 0x0002, 0x397d: 0x0002, 0x397e: 0x0002, // Block 0xe6, offset 0x3980 - 0x3980: 0x0004, 0x3981: 0x0004, 0x3982: 0x0004, 0x3983: 0x0004, 0x3984: 0x0004, 0x3985: 0x0004, - 0x3986: 0x0004, 0x3987: 0x0004, 0x3988: 0x0004, 0x3989: 0x0004, 0x398a: 0x0004, 0x398b: 0x0004, - 0x398c: 0x0004, 0x398d: 0x0004, 0x398e: 0x0004, 0x398f: 0x0004, 0x3990: 0x0004, 0x3991: 0x0004, - 0x3992: 0x0004, 0x3993: 0x0004, 0x3994: 0x0004, 0x3995: 0x0004, 0x3996: 0x0004, 0x3997: 0x0004, - 0x3998: 0x0004, 0x3999: 0x0004, 0x399a: 0x0004, 0x399b: 0x0004, 0x399c: 0x0004, 0x399d: 0x0004, - 0x399e: 0x0004, 0x399f: 0x0004, 0x39a0: 0x0004, 0x39a1: 0x0004, 0x39a2: 0x0004, 0x39a3: 0x0004, - 0x39a4: 0x0004, 0x39a5: 0x0004, 0x39a6: 0x0004, 0x39a7: 0x0004, 0x39a8: 0x0004, 0x39a9: 0x0004, - 0x39aa: 0x0004, 0x39ab: 0x0004, 0x39ac: 0x0004, 0x39ad: 0x0004, 0x39ae: 0x0004, 0x39af: 0x0004, - 0x39b0: 0x0004, 0x39b1: 0x0004, 0x39b2: 0x0004, 0x39b3: 0x0004, 0x39b4: 0x0004, 0x39b5: 0x0004, - 0x39b6: 0x0004, 0x39b7: 0x0004, 0x39b8: 0x0004, 0x39b9: 0x0004, 0x39ba: 0x0004, 0x39bb: 0x0004, - 0x39bc: 0x0004, 0x39bd: 0x0004, 0x39be: 0x0004, 0x39bf: 0x0004, + 0x3980: 0x0002, 0x3982: 0x0002, 0x3983: 0x0002, 0x3984: 0x0002, 0x3985: 0x0002, + 0x3986: 0x0002, 0x3987: 0x0002, 0x3988: 0x0002, 0x3989: 0x0002, 0x398a: 0x0002, 0x398b: 0x0002, + 0x398c: 0x0002, 0x398d: 0x0002, 0x398e: 0x0002, 0x398f: 0x0002, 0x3990: 0x0002, 0x3991: 0x0002, + 0x3992: 0x0002, 0x3993: 0x0002, 0x3994: 0x0002, 0x3995: 0x0002, 0x3996: 0x0002, 0x3997: 0x0002, + 0x3998: 0x0002, 0x3999: 0x0002, 0x399a: 0x0002, 0x399b: 0x0002, 0x399c: 0x0002, 0x399d: 0x0002, + 0x399e: 0x0002, 0x399f: 0x0002, 0x39a0: 0x0002, 0x39a1: 0x0002, 0x39a2: 0x0002, 0x39a3: 0x0002, + 0x39a4: 0x0002, 0x39a5: 0x0002, 0x39a6: 0x0002, 0x39a7: 0x0002, 0x39a8: 0x0002, 0x39a9: 0x0002, + 0x39aa: 0x0002, 0x39ab: 0x0002, 0x39ac: 0x0002, 0x39ad: 0x0002, 0x39ae: 0x0002, 0x39af: 0x0002, + 0x39b0: 0x0002, 0x39b1: 0x0002, 0x39b2: 0x0002, 0x39b3: 0x0002, 0x39b4: 0x0002, 0x39b5: 0x0002, + 0x39b6: 0x0002, 0x39b7: 0x0002, 0x39b8: 0x0002, 0x39b9: 0x0002, 0x39ba: 0x0002, 0x39bb: 0x0002, + 0x39bc: 0x0002, 0x39bd: 0x0002, 0x39be: 0x0002, 0x39bf: 0x0002, // Block 0xe7, offset 0x39c0 - 0x39c0: 0x0004, 0x39c1: 0x0004, 0x39c2: 0x0004, 0x39c3: 0x0004, 0x39c4: 0x0004, 0x39c5: 0x0004, - 0x39c6: 0x0004, 0x39c7: 0x0004, 0x39c8: 0x0004, 0x39c9: 0x0004, 0x39ca: 0x0004, 0x39cb: 0x0004, - 0x39cc: 0x0004, 0x39cd: 0x0004, 0x39ce: 0x0004, 0x39cf: 0x0004, 0x39d0: 0x0004, 0x39d1: 0x0004, - 0x39d2: 0x0004, 0x39d3: 0x0004, 0x39d4: 0x0004, 0x39d5: 0x0004, 0x39d6: 0x0004, 0x39d7: 0x0004, - 0x39d8: 0x0004, 0x39d9: 0x0004, 0x39da: 0x0004, 0x39db: 0x0004, 0x39dc: 0x0004, 0x39dd: 0x0004, - 0x39de: 0x0004, 0x39df: 0x0004, 0x39e0: 0x0004, 0x39e1: 0x0004, 0x39e2: 0x0004, 0x39e3: 0x0004, - 0x39e4: 0x0004, 0x39e5: 0x0004, 0x39e6: 0x0004, 0x39e7: 0x0004, 0x39e8: 0x0004, 0x39e9: 0x0004, - 0x39ea: 0x0004, 0x39eb: 0x0004, 0x39ec: 0x0004, 0x39ed: 0x0004, 0x39ee: 0x0004, 0x39ef: 0x0004, - 0x39f0: 0x0004, 0x39f1: 0x0004, 0x39f2: 0x0004, 0x39f3: 0x0004, 0x39f4: 0x0004, 0x39f5: 0x0004, - 0x39f6: 0x0004, 0x39f7: 0x0004, 0x39f8: 0x0004, 0x39f9: 0x0004, 0x39fa: 0x0004, 0x39fb: 0x0004, - 0x39fc: 0x0004, 0x39ff: 0x0004, + 0x39c0: 0x0002, 0x39c1: 0x0002, 0x39c2: 0x0002, 0x39c3: 0x0002, 0x39c4: 0x0002, 0x39c5: 0x0002, + 0x39c6: 0x0002, 0x39c7: 0x0002, 0x39c8: 0x0002, 0x39c9: 0x0002, 0x39ca: 0x0002, 0x39cb: 0x0002, + 0x39cc: 0x0002, 0x39cd: 0x0002, 0x39ce: 0x0002, 0x39cf: 0x0002, 0x39d0: 0x0002, 0x39d1: 0x0002, + 0x39d2: 0x0002, 0x39d3: 0x0002, 0x39d4: 0x0002, 0x39d5: 0x0002, 0x39d6: 0x0002, 0x39d7: 0x0002, + 0x39d8: 0x0002, 0x39d9: 0x0002, 0x39da: 0x0002, 0x39db: 0x0002, 0x39dc: 0x0002, 0x39dd: 0x0002, + 0x39de: 0x0002, 0x39df: 0x0002, 0x39e0: 0x0002, 0x39e1: 0x0002, 0x39e2: 0x0002, 0x39e3: 0x0002, + 0x39e4: 0x0002, 0x39e5: 0x0002, 0x39e6: 0x0002, 0x39e7: 0x0002, 0x39e8: 0x0002, 0x39e9: 0x0002, + 0x39ea: 0x0002, 0x39eb: 0x0002, 0x39ec: 0x0002, 0x39ed: 0x0002, 0x39ee: 0x0002, 0x39ef: 0x0002, + 0x39f0: 0x0002, 0x39f1: 0x0002, 0x39f2: 0x0002, 0x39f3: 0x0002, 0x39f4: 0x0002, 0x39f5: 0x0002, + 0x39f6: 0x0002, 0x39f7: 0x0002, 0x39f8: 0x0002, 0x39f9: 0x0002, 0x39fa: 0x0002, 0x39fb: 0x0002, + 0x39fc: 0x0002, 0x39ff: 0x0002, // Block 0xe8, offset 0x3a00 - 0x3a00: 0x0004, 0x3a01: 0x0004, 0x3a02: 0x0004, 0x3a03: 0x0004, 0x3a04: 0x0004, 0x3a05: 0x0004, - 0x3a06: 0x0004, 0x3a07: 0x0004, 0x3a08: 0x0004, 0x3a09: 0x0004, 0x3a0a: 0x0004, 0x3a0b: 0x0004, - 0x3a0c: 0x0004, 0x3a0d: 0x0004, 0x3a0e: 0x0004, 0x3a0f: 0x0004, 0x3a10: 0x0004, 0x3a11: 0x0004, - 0x3a12: 0x0004, 0x3a13: 0x0004, 0x3a14: 0x0004, 0x3a15: 0x0004, 0x3a16: 0x0004, 0x3a17: 0x0004, - 0x3a18: 0x0004, 0x3a19: 0x0004, 0x3a1a: 0x0004, 0x3a1b: 0x0004, 0x3a1c: 0x0004, 0x3a1d: 0x0004, - 0x3a1e: 0x0004, 0x3a1f: 0x0004, 0x3a20: 0x0004, 0x3a21: 0x0004, 0x3a22: 0x0004, 0x3a23: 0x0004, - 0x3a24: 0x0004, 0x3a25: 0x0004, 0x3a26: 0x0004, 0x3a27: 0x0004, 0x3a28: 0x0004, 0x3a29: 0x0004, - 0x3a2a: 0x0004, 0x3a2b: 0x0004, 0x3a2c: 0x0004, 0x3a2d: 0x0004, 0x3a2e: 0x0004, 0x3a2f: 0x0004, - 0x3a30: 0x0004, 0x3a31: 0x0004, 0x3a32: 0x0004, 0x3a33: 0x0004, 0x3a34: 0x0004, 0x3a35: 0x0004, - 0x3a36: 0x0004, 0x3a37: 0x0004, 0x3a38: 0x0004, 0x3a39: 0x0004, 0x3a3a: 0x0004, 0x3a3b: 0x0004, - 0x3a3c: 0x0004, 0x3a3d: 0x0004, + 0x3a00: 0x0002, 0x3a01: 0x0002, 0x3a02: 0x0002, 0x3a03: 0x0002, 0x3a04: 0x0002, 0x3a05: 0x0002, + 0x3a06: 0x0002, 0x3a07: 0x0002, 0x3a08: 0x0002, 0x3a09: 0x0002, 0x3a0a: 0x0002, 0x3a0b: 0x0002, + 0x3a0c: 0x0002, 0x3a0d: 0x0002, 0x3a0e: 0x0002, 0x3a0f: 0x0002, 0x3a10: 0x0002, 0x3a11: 0x0002, + 0x3a12: 0x0002, 0x3a13: 0x0002, 0x3a14: 0x0002, 0x3a15: 0x0002, 0x3a16: 0x0002, 0x3a17: 0x0002, + 0x3a18: 0x0002, 0x3a19: 0x0002, 0x3a1a: 0x0002, 0x3a1b: 0x0002, 0x3a1c: 0x0002, 0x3a1d: 0x0002, + 0x3a1e: 0x0002, 0x3a1f: 0x0002, 0x3a20: 0x0002, 0x3a21: 0x0002, 0x3a22: 0x0002, 0x3a23: 0x0002, + 0x3a24: 0x0002, 0x3a25: 0x0002, 0x3a26: 0x0002, 0x3a27: 0x0002, 0x3a28: 0x0002, 0x3a29: 0x0002, + 0x3a2a: 0x0002, 0x3a2b: 0x0002, 0x3a2c: 0x0002, 0x3a2d: 0x0002, 0x3a2e: 0x0002, 0x3a2f: 0x0002, + 0x3a30: 0x0002, 0x3a31: 0x0002, 0x3a32: 0x0002, 0x3a33: 0x0002, 0x3a34: 0x0002, 0x3a35: 0x0002, + 0x3a36: 0x0002, 0x3a37: 0x0002, 0x3a38: 0x0002, 0x3a39: 0x0002, 0x3a3a: 0x0002, 0x3a3b: 0x0002, + 0x3a3c: 0x0002, 0x3a3d: 0x0002, // Block 0xe9, offset 0x3a40 - 0x3a4b: 0x0004, - 0x3a4c: 0x0004, 0x3a4d: 0x0004, 0x3a4e: 0x0004, 0x3a50: 0x0004, 0x3a51: 0x0004, - 0x3a52: 0x0004, 0x3a53: 0x0004, 0x3a54: 0x0004, 0x3a55: 0x0004, 0x3a56: 0x0004, 0x3a57: 0x0004, - 0x3a58: 0x0004, 0x3a59: 0x0004, 0x3a5a: 0x0004, 0x3a5b: 0x0004, 0x3a5c: 0x0004, 0x3a5d: 0x0004, - 0x3a5e: 0x0004, 0x3a5f: 0x0004, 0x3a60: 0x0004, 0x3a61: 0x0004, 0x3a62: 0x0004, 0x3a63: 0x0004, - 0x3a64: 0x0004, 0x3a65: 0x0004, 0x3a66: 0x0004, 0x3a67: 0x0004, - 0x3a7a: 0x0004, + 0x3a4b: 0x0002, + 0x3a4c: 0x0002, 0x3a4d: 0x0002, 0x3a4e: 0x0002, 0x3a50: 0x0002, 0x3a51: 0x0002, + 0x3a52: 0x0002, 0x3a53: 0x0002, 0x3a54: 0x0002, 0x3a55: 0x0002, 0x3a56: 0x0002, 0x3a57: 0x0002, + 0x3a58: 0x0002, 0x3a59: 0x0002, 0x3a5a: 0x0002, 0x3a5b: 0x0002, 0x3a5c: 0x0002, 0x3a5d: 0x0002, + 0x3a5e: 0x0002, 0x3a5f: 0x0002, 0x3a60: 0x0002, 0x3a61: 0x0002, 0x3a62: 0x0002, 0x3a63: 0x0002, + 0x3a64: 0x0002, 0x3a65: 0x0002, 0x3a66: 0x0002, 0x3a67: 0x0002, + 0x3a7a: 0x0002, // Block 0xea, offset 0x3a80 - 0x3a95: 0x0004, 0x3a96: 0x0004, - 0x3aa4: 0x0004, + 0x3a95: 0x0002, 0x3a96: 0x0002, + 0x3aa4: 0x0002, // Block 0xeb, offset 0x3ac0 - 0x3afb: 0x0004, - 0x3afc: 0x0004, 0x3afd: 0x0004, 0x3afe: 0x0004, 0x3aff: 0x0004, + 0x3afb: 0x0002, + 0x3afc: 0x0002, 0x3afd: 0x0002, 0x3afe: 0x0002, 0x3aff: 0x0002, // Block 0xec, offset 0x3b00 - 0x3b00: 0x0004, 0x3b01: 0x0004, 0x3b02: 0x0004, 0x3b03: 0x0004, 0x3b04: 0x0004, 0x3b05: 0x0004, - 0x3b06: 0x0004, 0x3b07: 0x0004, 0x3b08: 0x0004, 0x3b09: 0x0004, 0x3b0a: 0x0004, 0x3b0b: 0x0004, - 0x3b0c: 0x0004, 0x3b0d: 0x0004, 0x3b0e: 0x0004, 0x3b0f: 0x0004, + 0x3b00: 0x0002, 0x3b01: 0x0002, 0x3b02: 0x0002, 0x3b03: 0x0002, 0x3b04: 0x0002, 0x3b05: 0x0002, + 0x3b06: 0x0002, 0x3b07: 0x0002, 0x3b08: 0x0002, 0x3b09: 0x0002, 0x3b0a: 0x0002, 0x3b0b: 0x0002, + 0x3b0c: 0x0002, 0x3b0d: 0x0002, 0x3b0e: 0x0002, 0x3b0f: 0x0002, // Block 0xed, offset 0x3b40 - 0x3b40: 0x0004, 0x3b41: 0x0004, 0x3b42: 0x0004, 0x3b43: 0x0004, 0x3b44: 0x0004, 0x3b45: 0x0004, - 0x3b4c: 0x0004, 0x3b50: 0x0004, 0x3b51: 0x0004, - 0x3b52: 0x0004, 0x3b55: 0x0004, 0x3b56: 0x0004, 0x3b57: 0x0004, - 0x3b5c: 0x0004, 0x3b5d: 0x0004, - 0x3b5e: 0x0004, 0x3b5f: 0x0004, - 0x3b6b: 0x0004, 0x3b6c: 0x0004, - 0x3b74: 0x0004, 0x3b75: 0x0004, - 0x3b76: 0x0004, 0x3b77: 0x0004, 0x3b78: 0x0004, 0x3b79: 0x0004, 0x3b7a: 0x0004, 0x3b7b: 0x0004, - 0x3b7c: 0x0004, + 0x3b40: 0x0002, 0x3b41: 0x0002, 0x3b42: 0x0002, 0x3b43: 0x0002, 0x3b44: 0x0002, 0x3b45: 0x0002, + 0x3b4c: 0x0002, 0x3b50: 0x0002, 0x3b51: 0x0002, + 0x3b52: 0x0002, 0x3b55: 0x0002, 0x3b56: 0x0002, 0x3b57: 0x0002, + 0x3b58: 0x0002, 0x3b5c: 0x0002, 0x3b5d: 0x0002, + 0x3b5e: 0x0002, 0x3b5f: 0x0002, + 0x3b6b: 0x0002, 0x3b6c: 0x0002, + 0x3b74: 0x0002, 0x3b75: 0x0002, + 0x3b76: 0x0002, 0x3b77: 0x0002, 0x3b78: 0x0002, 0x3b79: 0x0002, 0x3b7a: 0x0002, 0x3b7b: 0x0002, + 0x3b7c: 0x0002, // Block 0xee, offset 0x3b80 - 0x3ba0: 0x0004, 0x3ba1: 0x0004, 0x3ba2: 0x0004, 0x3ba3: 0x0004, - 0x3ba4: 0x0004, 0x3ba5: 0x0004, 0x3ba6: 0x0004, 0x3ba7: 0x0004, 0x3ba8: 0x0004, 0x3ba9: 0x0004, - 0x3baa: 0x0004, 0x3bab: 0x0004, - 0x3bb0: 0x0004, + 0x3ba0: 0x0002, 0x3ba1: 0x0002, 0x3ba2: 0x0002, 0x3ba3: 0x0002, + 0x3ba4: 0x0002, 0x3ba5: 0x0002, 0x3ba6: 0x0002, 0x3ba7: 0x0002, 0x3ba8: 0x0002, 0x3ba9: 0x0002, + 0x3baa: 0x0002, 0x3bab: 0x0002, + 0x3bb0: 0x0002, // Block 0xef, offset 0x3bc0 - 0x3bcc: 0x0004, 0x3bcd: 0x0004, 0x3bce: 0x0004, 0x3bcf: 0x0004, 0x3bd0: 0x0004, 0x3bd1: 0x0004, - 0x3bd2: 0x0004, 0x3bd3: 0x0004, 0x3bd4: 0x0004, 0x3bd5: 0x0004, 0x3bd6: 0x0004, 0x3bd7: 0x0004, - 0x3bd8: 0x0004, 0x3bd9: 0x0004, 0x3bda: 0x0004, 0x3bdb: 0x0004, 0x3bdc: 0x0004, 0x3bdd: 0x0004, - 0x3bde: 0x0004, 0x3bdf: 0x0004, 0x3be0: 0x0004, 0x3be1: 0x0004, 0x3be2: 0x0004, 0x3be3: 0x0004, - 0x3be4: 0x0004, 0x3be5: 0x0004, 0x3be6: 0x0004, 0x3be7: 0x0004, 0x3be8: 0x0004, 0x3be9: 0x0004, - 0x3bea: 0x0004, 0x3beb: 0x0004, 0x3bec: 0x0004, 0x3bed: 0x0004, 0x3bee: 0x0004, 0x3bef: 0x0004, - 0x3bf0: 0x0004, 0x3bf1: 0x0004, 0x3bf2: 0x0004, 0x3bf3: 0x0004, 0x3bf4: 0x0004, 0x3bf5: 0x0004, - 0x3bf6: 0x0004, 0x3bf7: 0x0004, 0x3bf8: 0x0004, 0x3bf9: 0x0004, 0x3bfa: 0x0004, - 0x3bfc: 0x0004, 0x3bfd: 0x0004, 0x3bfe: 0x0004, 0x3bff: 0x0004, + 0x3bcc: 0x0002, 0x3bcd: 0x0002, 0x3bce: 0x0002, 0x3bcf: 0x0002, 0x3bd0: 0x0002, 0x3bd1: 0x0002, + 0x3bd2: 0x0002, 0x3bd3: 0x0002, 0x3bd4: 0x0002, 0x3bd5: 0x0002, 0x3bd6: 0x0002, 0x3bd7: 0x0002, + 0x3bd8: 0x0002, 0x3bd9: 0x0002, 0x3bda: 0x0002, 0x3bdb: 0x0002, 0x3bdc: 0x0002, 0x3bdd: 0x0002, + 0x3bde: 0x0002, 0x3bdf: 0x0002, 0x3be0: 0x0002, 0x3be1: 0x0002, 0x3be2: 0x0002, 0x3be3: 0x0002, + 0x3be4: 0x0002, 0x3be5: 0x0002, 0x3be6: 0x0002, 0x3be7: 0x0002, 0x3be8: 0x0002, 0x3be9: 0x0002, + 0x3bea: 0x0002, 0x3beb: 0x0002, 0x3bec: 0x0002, 0x3bed: 0x0002, 0x3bee: 0x0002, 0x3bef: 0x0002, + 0x3bf0: 0x0002, 0x3bf1: 0x0002, 0x3bf2: 0x0002, 0x3bf3: 0x0002, 0x3bf4: 0x0002, 0x3bf5: 0x0002, + 0x3bf6: 0x0002, 0x3bf7: 0x0002, 0x3bf8: 0x0002, 0x3bf9: 0x0002, 0x3bfa: 0x0002, + 0x3bfc: 0x0002, 0x3bfd: 0x0002, 0x3bfe: 0x0002, 0x3bff: 0x0002, // Block 0xf0, offset 0x3c00 - 0x3c00: 0x0004, 0x3c01: 0x0004, 0x3c02: 0x0004, 0x3c03: 0x0004, 0x3c04: 0x0004, 0x3c05: 0x0004, - 0x3c07: 0x0004, 0x3c08: 0x0004, 0x3c09: 0x0004, 0x3c0a: 0x0004, 0x3c0b: 0x0004, - 0x3c0c: 0x0004, 0x3c0d: 0x0004, 0x3c0e: 0x0004, 0x3c0f: 0x0004, 0x3c10: 0x0004, 0x3c11: 0x0004, - 0x3c12: 0x0004, 0x3c13: 0x0004, 0x3c14: 0x0004, 0x3c15: 0x0004, 0x3c16: 0x0004, 0x3c17: 0x0004, - 0x3c18: 0x0004, 0x3c19: 0x0004, 0x3c1a: 0x0004, 0x3c1b: 0x0004, 0x3c1c: 0x0004, 0x3c1d: 0x0004, - 0x3c1e: 0x0004, 0x3c1f: 0x0004, 0x3c20: 0x0004, 0x3c21: 0x0004, 0x3c22: 0x0004, 0x3c23: 0x0004, - 0x3c24: 0x0004, 0x3c25: 0x0004, 0x3c26: 0x0004, 0x3c27: 0x0004, 0x3c28: 0x0004, 0x3c29: 0x0004, - 0x3c2a: 0x0004, 0x3c2b: 0x0004, 0x3c2c: 0x0004, 0x3c2d: 0x0004, 0x3c2e: 0x0004, 0x3c2f: 0x0004, - 0x3c30: 0x0004, 0x3c31: 0x0004, 0x3c32: 0x0004, 0x3c33: 0x0004, 0x3c34: 0x0004, 0x3c35: 0x0004, - 0x3c36: 0x0004, 0x3c37: 0x0004, 0x3c38: 0x0004, 0x3c39: 0x0004, 0x3c3a: 0x0004, 0x3c3b: 0x0004, - 0x3c3c: 0x0004, 0x3c3d: 0x0004, 0x3c3e: 0x0004, 0x3c3f: 0x0004, + 0x3c00: 0x0002, 0x3c01: 0x0002, 0x3c02: 0x0002, 0x3c03: 0x0002, 0x3c04: 0x0002, 0x3c05: 0x0002, + 0x3c07: 0x0002, 0x3c08: 0x0002, 0x3c09: 0x0002, 0x3c0a: 0x0002, 0x3c0b: 0x0002, + 0x3c0c: 0x0002, 0x3c0d: 0x0002, 0x3c0e: 0x0002, 0x3c0f: 0x0002, 0x3c10: 0x0002, 0x3c11: 0x0002, + 0x3c12: 0x0002, 0x3c13: 0x0002, 0x3c14: 0x0002, 0x3c15: 0x0002, 0x3c16: 0x0002, 0x3c17: 0x0002, + 0x3c18: 0x0002, 0x3c19: 0x0002, 0x3c1a: 0x0002, 0x3c1b: 0x0002, 0x3c1c: 0x0002, 0x3c1d: 0x0002, + 0x3c1e: 0x0002, 0x3c1f: 0x0002, 0x3c20: 0x0002, 0x3c21: 0x0002, 0x3c22: 0x0002, 0x3c23: 0x0002, + 0x3c24: 0x0002, 0x3c25: 0x0002, 0x3c26: 0x0002, 0x3c27: 0x0002, 0x3c28: 0x0002, 0x3c29: 0x0002, + 0x3c2a: 0x0002, 0x3c2b: 0x0002, 0x3c2c: 0x0002, 0x3c2d: 0x0002, 0x3c2e: 0x0002, 0x3c2f: 0x0002, + 0x3c30: 0x0002, 0x3c31: 0x0002, 0x3c32: 0x0002, 0x3c33: 0x0002, 0x3c34: 0x0002, 0x3c35: 0x0002, + 0x3c36: 0x0002, 0x3c37: 0x0002, 0x3c38: 0x0002, 0x3c39: 0x0002, 0x3c3a: 0x0002, 0x3c3b: 0x0002, + 0x3c3c: 0x0002, 0x3c3d: 0x0002, 0x3c3e: 0x0002, 0x3c3f: 0x0002, // Block 0xf1, offset 0x3c40 - 0x3c70: 0x0004, 0x3c71: 0x0004, 0x3c72: 0x0004, 0x3c73: 0x0004, 0x3c74: 0x0004, 0x3c75: 0x0004, - 0x3c76: 0x0004, 0x3c77: 0x0004, 0x3c78: 0x0004, 0x3c79: 0x0004, 0x3c7a: 0x0004, 0x3c7b: 0x0004, - 0x3c7c: 0x0004, + 0x3c70: 0x0002, 0x3c71: 0x0002, 0x3c72: 0x0002, 0x3c73: 0x0002, 0x3c74: 0x0002, 0x3c75: 0x0002, + 0x3c76: 0x0002, 0x3c77: 0x0002, 0x3c78: 0x0002, 0x3c79: 0x0002, 0x3c7a: 0x0002, 0x3c7b: 0x0002, + 0x3c7c: 0x0002, // Block 0xf2, offset 0x3c80 - 0x3c80: 0x0004, 0x3c81: 0x0004, 0x3c82: 0x0004, 0x3c83: 0x0004, 0x3c84: 0x0004, 0x3c85: 0x0004, - 0x3c86: 0x0004, 0x3c87: 0x0004, 0x3c88: 0x0004, 0x3c89: 0x0004, - 0x3c8f: 0x0004, 0x3c90: 0x0004, 0x3c91: 0x0004, - 0x3c92: 0x0004, 0x3c93: 0x0004, 0x3c94: 0x0004, 0x3c95: 0x0004, 0x3c96: 0x0004, 0x3c97: 0x0004, - 0x3c98: 0x0004, 0x3c99: 0x0004, 0x3c9a: 0x0004, 0x3c9b: 0x0004, 0x3c9c: 0x0004, 0x3c9d: 0x0004, - 0x3c9e: 0x0004, 0x3c9f: 0x0004, 0x3ca0: 0x0004, 0x3ca1: 0x0004, 0x3ca2: 0x0004, 0x3ca3: 0x0004, - 0x3ca4: 0x0004, 0x3ca5: 0x0004, 0x3ca6: 0x0004, 0x3ca7: 0x0004, 0x3ca8: 0x0004, 0x3ca9: 0x0004, - 0x3caa: 0x0004, 0x3cab: 0x0004, 0x3cac: 0x0004, 0x3cad: 0x0004, 0x3cae: 0x0004, 0x3caf: 0x0004, - 0x3cb0: 0x0004, 0x3cb1: 0x0004, 0x3cb2: 0x0004, 0x3cb3: 0x0004, 0x3cb4: 0x0004, 0x3cb5: 0x0004, - 0x3cb6: 0x0004, 0x3cb7: 0x0004, 0x3cb8: 0x0004, 0x3cb9: 0x0004, 0x3cba: 0x0004, 0x3cbb: 0x0004, - 0x3cbc: 0x0004, 0x3cbd: 0x0004, 0x3cbe: 0x0004, 0x3cbf: 0x0004, + 0x3c80: 0x0002, 0x3c81: 0x0002, 0x3c82: 0x0002, 0x3c83: 0x0002, 0x3c84: 0x0002, 0x3c85: 0x0002, + 0x3c86: 0x0002, 0x3c87: 0x0002, 0x3c88: 0x0002, 0x3c89: 0x0002, 0x3c8a: 0x0002, + 0x3c8e: 0x0002, 0x3c8f: 0x0002, 0x3c90: 0x0002, 0x3c91: 0x0002, + 0x3c92: 0x0002, 0x3c93: 0x0002, 0x3c94: 0x0002, 0x3c95: 0x0002, 0x3c96: 0x0002, 0x3c97: 0x0002, + 0x3c98: 0x0002, 0x3c99: 0x0002, 0x3c9a: 0x0002, 0x3c9b: 0x0002, 0x3c9c: 0x0002, 0x3c9d: 0x0002, + 0x3c9e: 0x0002, 0x3c9f: 0x0002, 0x3ca0: 0x0002, 0x3ca1: 0x0002, 0x3ca2: 0x0002, 0x3ca3: 0x0002, + 0x3ca4: 0x0002, 0x3ca5: 0x0002, 0x3ca6: 0x0002, 0x3ca7: 0x0002, 0x3ca8: 0x0002, 0x3ca9: 0x0002, + 0x3caa: 0x0002, 0x3cab: 0x0002, 0x3cac: 0x0002, 0x3cad: 0x0002, 0x3cae: 0x0002, 0x3caf: 0x0002, + 0x3cb0: 0x0002, 0x3cb1: 0x0002, 0x3cb2: 0x0002, 0x3cb3: 0x0002, 0x3cb4: 0x0002, 0x3cb5: 0x0002, + 0x3cb6: 0x0002, 0x3cb7: 0x0002, 0x3cb8: 0x0002, 0x3cb9: 0x0002, 0x3cba: 0x0002, 0x3cbb: 0x0002, + 0x3cbc: 0x0002, 0x3cbd: 0x0002, 0x3cbe: 0x0002, 0x3cbf: 0x0002, // Block 0xf3, offset 0x3cc0 - 0x3cc0: 0x0004, 0x3cc1: 0x0004, 0x3cc2: 0x0004, 0x3cc3: 0x0004, 0x3cc4: 0x0004, 0x3cc5: 0x0004, - 0x3cc6: 0x0004, - 0x3cce: 0x0004, 0x3ccf: 0x0004, 0x3cd0: 0x0004, 0x3cd1: 0x0004, - 0x3cd2: 0x0004, 0x3cd3: 0x0004, 0x3cd4: 0x0004, 0x3cd5: 0x0004, 0x3cd6: 0x0004, 0x3cd7: 0x0004, - 0x3cd8: 0x0004, 0x3cd9: 0x0004, 0x3cda: 0x0004, 0x3cdb: 0x0004, 0x3cdc: 0x0004, - 0x3cdf: 0x0004, 0x3ce0: 0x0004, 0x3ce1: 0x0004, 0x3ce2: 0x0004, 0x3ce3: 0x0004, - 0x3ce4: 0x0004, 0x3ce5: 0x0004, 0x3ce6: 0x0004, 0x3ce7: 0x0004, 0x3ce8: 0x0004, 0x3ce9: 0x0004, - 0x3cf0: 0x0004, 0x3cf1: 0x0004, 0x3cf2: 0x0004, 0x3cf3: 0x0004, 0x3cf4: 0x0004, 0x3cf5: 0x0004, - 0x3cf6: 0x0004, 0x3cf7: 0x0004, 0x3cf8: 0x0004, + 0x3cc0: 0x0002, 0x3cc1: 0x0002, 0x3cc2: 0x0002, 0x3cc3: 0x0002, 0x3cc4: 0x0002, 0x3cc5: 0x0002, + 0x3cc6: 0x0002, 0x3cc8: 0x0002, + 0x3ccd: 0x0002, 0x3cce: 0x0002, 0x3ccf: 0x0002, 0x3cd0: 0x0002, 0x3cd1: 0x0002, + 0x3cd2: 0x0002, 0x3cd3: 0x0002, 0x3cd4: 0x0002, 0x3cd5: 0x0002, 0x3cd6: 0x0002, 0x3cd7: 0x0002, + 0x3cd8: 0x0002, 0x3cd9: 0x0002, 0x3cda: 0x0002, 0x3cdb: 0x0002, 0x3cdc: 0x0002, + 0x3cdf: 0x0002, 0x3ce0: 0x0002, 0x3ce1: 0x0002, 0x3ce2: 0x0002, 0x3ce3: 0x0002, + 0x3ce4: 0x0002, 0x3ce5: 0x0002, 0x3ce6: 0x0002, 0x3ce7: 0x0002, 0x3ce8: 0x0002, 0x3ce9: 0x0002, + 0x3cea: 0x0002, 0x3cef: 0x0002, + 0x3cf0: 0x0002, 0x3cf1: 0x0002, 0x3cf2: 0x0002, 0x3cf3: 0x0002, 0x3cf4: 0x0002, 0x3cf5: 0x0002, + 0x3cf6: 0x0002, 0x3cf7: 0x0002, 0x3cf8: 0x0002, // Block 0xf4, offset 0x3d00 - 0x3d00: 0x0002, 0x3d01: 0x0002, 0x3d02: 0x0002, 0x3d03: 0x0002, 0x3d04: 0x0002, 0x3d05: 0x0002, - 0x3d06: 0x0002, 0x3d07: 0x0002, 0x3d08: 0x0002, 0x3d09: 0x0002, 0x3d0a: 0x0002, 0x3d0b: 0x0002, - 0x3d0c: 0x0002, 0x3d0d: 0x0002, 0x3d0e: 0x0002, 0x3d0f: 0x0002, 0x3d10: 0x0002, 0x3d11: 0x0002, - 0x3d12: 0x0002, 0x3d13: 0x0002, 0x3d14: 0x0002, 0x3d15: 0x0002, 0x3d16: 0x0002, 0x3d17: 0x0002, - 0x3d18: 0x0002, 0x3d19: 0x0002, 0x3d1a: 0x0002, 0x3d1b: 0x0002, 0x3d1c: 0x0002, 0x3d1d: 0x0002, - 0x3d1e: 0x0002, 0x3d1f: 0x0002, 0x3d20: 0x0002, 0x3d21: 0x0002, 0x3d22: 0x0002, 0x3d23: 0x0002, - 0x3d24: 0x0002, 0x3d25: 0x0002, 0x3d26: 0x0002, 0x3d27: 0x0002, 0x3d28: 0x0002, 0x3d29: 0x0002, - 0x3d2a: 0x0002, 0x3d2b: 0x0002, 0x3d2c: 0x0002, 0x3d2d: 0x0002, 0x3d2e: 0x0002, 0x3d2f: 0x0002, - 0x3d30: 0x0002, 0x3d31: 0x0002, 0x3d32: 0x0002, 0x3d33: 0x0002, 0x3d34: 0x0002, 0x3d35: 0x0002, - 0x3d36: 0x0002, 0x3d37: 0x0002, 0x3d38: 0x0002, 0x3d39: 0x0002, 0x3d3a: 0x0002, 0x3d3b: 0x0002, - 0x3d3c: 0x0002, 0x3d3d: 0x0002, + 0x3d01: 0x0001, + 0x3d20: 0x0001, 0x3d21: 0x0001, 0x3d22: 0x0001, 0x3d23: 0x0001, + 0x3d24: 0x0001, 0x3d25: 0x0001, 0x3d26: 0x0001, 0x3d27: 0x0001, 0x3d28: 0x0001, 0x3d29: 0x0001, + 0x3d2a: 0x0001, 0x3d2b: 0x0001, 0x3d2c: 0x0001, 0x3d2d: 0x0001, 0x3d2e: 0x0001, 0x3d2f: 0x0001, + 0x3d30: 0x0001, 0x3d31: 0x0001, 0x3d32: 0x0001, 0x3d33: 0x0001, 0x3d34: 0x0001, 0x3d35: 0x0001, + 0x3d36: 0x0001, 0x3d37: 0x0001, 0x3d38: 0x0001, 0x3d39: 0x0001, 0x3d3a: 0x0001, 0x3d3b: 0x0001, + 0x3d3c: 0x0001, 0x3d3d: 0x0001, 0x3d3e: 0x0001, 0x3d3f: 0x0001, // Block 0xf5, offset 0x3d40 - 0x3d41: 0x0001, - 0x3d60: 0x0001, 0x3d61: 0x0001, 0x3d62: 0x0001, 0x3d63: 0x0001, - 0x3d64: 0x0001, 0x3d65: 0x0001, 0x3d66: 0x0001, 0x3d67: 0x0001, 0x3d68: 0x0001, 0x3d69: 0x0001, - 0x3d6a: 0x0001, 0x3d6b: 0x0001, 0x3d6c: 0x0001, 0x3d6d: 0x0001, 0x3d6e: 0x0001, 0x3d6f: 0x0001, - 0x3d70: 0x0001, 0x3d71: 0x0001, 0x3d72: 0x0001, 0x3d73: 0x0001, 0x3d74: 0x0001, 0x3d75: 0x0001, - 0x3d76: 0x0001, 0x3d77: 0x0001, 0x3d78: 0x0001, 0x3d79: 0x0001, 0x3d7a: 0x0001, 0x3d7b: 0x0001, - 0x3d7c: 0x0001, 0x3d7d: 0x0001, 0x3d7e: 0x0001, 0x3d7f: 0x0001, - // Block 0xf6, offset 0x3d80 - 0x3d80: 0x0003, 0x3d81: 0x0003, 0x3d82: 0x0003, 0x3d83: 0x0003, 0x3d84: 0x0003, 0x3d85: 0x0003, - 0x3d86: 0x0003, 0x3d87: 0x0003, 0x3d88: 0x0003, 0x3d89: 0x0003, 0x3d8a: 0x0003, 0x3d8b: 0x0003, - 0x3d8c: 0x0003, 0x3d8d: 0x0003, 0x3d8e: 0x0003, 0x3d8f: 0x0003, 0x3d90: 0x0003, 0x3d91: 0x0003, - 0x3d92: 0x0003, 0x3d93: 0x0003, 0x3d94: 0x0003, 0x3d95: 0x0003, 0x3d96: 0x0003, 0x3d97: 0x0003, - 0x3d98: 0x0003, 0x3d99: 0x0003, 0x3d9a: 0x0003, 0x3d9b: 0x0003, 0x3d9c: 0x0003, 0x3d9d: 0x0003, - 0x3d9e: 0x0003, 0x3d9f: 0x0003, 0x3da0: 0x0003, 0x3da1: 0x0003, 0x3da2: 0x0003, 0x3da3: 0x0003, - 0x3da4: 0x0003, 0x3da5: 0x0003, 0x3da6: 0x0003, 0x3da7: 0x0003, 0x3da8: 0x0003, 0x3da9: 0x0003, - 0x3daa: 0x0003, 0x3dab: 0x0003, 0x3dac: 0x0003, 0x3dad: 0x0003, 0x3dae: 0x0003, 0x3daf: 0x0003, - 0x3db0: 0x0003, 0x3db1: 0x0003, 0x3db2: 0x0003, 0x3db3: 0x0003, 0x3db4: 0x0003, 0x3db5: 0x0003, - 0x3db6: 0x0003, 0x3db7: 0x0003, 0x3db8: 0x0003, 0x3db9: 0x0003, 0x3dba: 0x0003, 0x3dbb: 0x0003, - 0x3dbc: 0x0003, 0x3dbd: 0x0003, + 0x3d40: 0x0003, 0x3d41: 0x0003, 0x3d42: 0x0003, 0x3d43: 0x0003, 0x3d44: 0x0003, 0x3d45: 0x0003, + 0x3d46: 0x0003, 0x3d47: 0x0003, 0x3d48: 0x0003, 0x3d49: 0x0003, 0x3d4a: 0x0003, 0x3d4b: 0x0003, + 0x3d4c: 0x0003, 0x3d4d: 0x0003, 0x3d4e: 0x0003, 0x3d4f: 0x0003, 0x3d50: 0x0003, 0x3d51: 0x0003, + 0x3d52: 0x0003, 0x3d53: 0x0003, 0x3d54: 0x0003, 0x3d55: 0x0003, 0x3d56: 0x0003, 0x3d57: 0x0003, + 0x3d58: 0x0003, 0x3d59: 0x0003, 0x3d5a: 0x0003, 0x3d5b: 0x0003, 0x3d5c: 0x0003, 0x3d5d: 0x0003, + 0x3d5e: 0x0003, 0x3d5f: 0x0003, 0x3d60: 0x0003, 0x3d61: 0x0003, 0x3d62: 0x0003, 0x3d63: 0x0003, + 0x3d64: 0x0003, 0x3d65: 0x0003, 0x3d66: 0x0003, 0x3d67: 0x0003, 0x3d68: 0x0003, 0x3d69: 0x0003, + 0x3d6a: 0x0003, 0x3d6b: 0x0003, 0x3d6c: 0x0003, 0x3d6d: 0x0003, 0x3d6e: 0x0003, 0x3d6f: 0x0003, + 0x3d70: 0x0003, 0x3d71: 0x0003, 0x3d72: 0x0003, 0x3d73: 0x0003, 0x3d74: 0x0003, 0x3d75: 0x0003, + 0x3d76: 0x0003, 0x3d77: 0x0003, 0x3d78: 0x0003, 0x3d79: 0x0003, 0x3d7a: 0x0003, 0x3d7b: 0x0003, + 0x3d7c: 0x0003, 0x3d7d: 0x0003, } // stringWidthIndex: 30 blocks, 1920 entries, 1920 bytes @@ -1651,10 +1634,10 @@ var stringWidthIndex = [1920]uint8{ 0x440: 0x39, 0x441: 0x39, 0x442: 0x39, 0x443: 0x39, 0x444: 0x39, 0x445: 0x39, 0x446: 0x39, 0x447: 0x39, 0x448: 0x39, 0x449: 0x39, 0x44a: 0x39, 0x44b: 0x39, 0x44c: 0x39, 0x44d: 0x39, 0x44e: 0x39, 0x44f: 0x39, 0x450: 0x39, 0x451: 0x39, 0x452: 0x39, 0x453: 0x39, 0x454: 0x39, 0x455: 0x39, 0x456: 0x39, 0x457: 0x39, - 0x458: 0x39, 0x459: 0x39, 0x45a: 0x39, 0x45b: 0x39, 0x45c: 0x39, 0x45d: 0x39, 0x45e: 0x39, 0x45f: 0xc1, + 0x458: 0x39, 0x459: 0x39, 0x45a: 0x39, 0x45b: 0x39, 0x45c: 0x39, 0x45d: 0x39, 0x45e: 0x39, 0x45f: 0x39, 0x460: 0x39, 0x461: 0x39, 0x462: 0x39, 0x463: 0x39, 0x464: 0x39, 0x465: 0x39, 0x466: 0x39, 0x467: 0x39, 0x468: 0x39, 0x469: 0x39, 0x46a: 0x39, 0x46b: 0x39, 0x46c: 0x39, 0x46d: 0x39, 0x46e: 0x39, 0x46f: 0x39, - 0x470: 0x39, 0x471: 0x39, 0x472: 0x39, 0x473: 0xc2, 0x474: 0xc3, + 0x470: 0x39, 0x471: 0x39, 0x472: 0x39, 0x473: 0xc1, 0x474: 0xc2, 0x476: 0x39, 0x477: 0xc3, // Block 0x12, offset 0x480 0x4bf: 0xc4, // Block 0x13, offset 0x4c0 @@ -1673,11 +1656,11 @@ var stringWidthIndex = [1920]uint8{ 0x593: 0xd4, 0x5a3: 0xd5, 0x5a5: 0xd6, // Block 0x17, offset 0x5c0 - 0x5c0: 0xd7, 0x5c3: 0xd8, 0x5c4: 0xd9, 0x5c5: 0xda, 0x5c6: 0xdb, - 0x5c8: 0xdc, 0x5c9: 0xdd, 0x5cc: 0xde, 0x5cd: 0xdf, 0x5ce: 0xe0, 0x5cf: 0xe1, - 0x5d0: 0xe2, 0x5d1: 0xe3, 0x5d2: 0xe4, 0x5d3: 0xe5, 0x5d4: 0xe6, 0x5d5: 0xe7, 0x5d6: 0xe8, 0x5d7: 0xe9, - 0x5d8: 0xe4, 0x5d9: 0xea, 0x5da: 0xe4, 0x5db: 0xeb, 0x5df: 0xec, - 0x5e4: 0xed, 0x5e5: 0xee, 0x5e6: 0xe4, 0x5e7: 0xe4, + 0x5c0: 0xd7, 0x5c3: 0xd8, 0x5c4: 0xd9, 0x5c5: 0xda, 0x5c6: 0xdb, 0x5c7: 0xdc, + 0x5c8: 0xdd, 0x5c9: 0xde, 0x5cc: 0xdf, 0x5cd: 0xe0, 0x5ce: 0xe1, 0x5cf: 0xe2, + 0x5d0: 0xe3, 0x5d1: 0xe4, 0x5d2: 0x39, 0x5d3: 0xe5, 0x5d4: 0xe6, 0x5d5: 0xe7, 0x5d6: 0xe8, 0x5d7: 0xe9, + 0x5d8: 0x39, 0x5d9: 0xea, 0x5da: 0x39, 0x5db: 0xeb, 0x5df: 0xec, + 0x5e4: 0xed, 0x5e5: 0xee, 0x5e6: 0x39, 0x5e7: 0x39, 0x5e9: 0xef, 0x5ea: 0xf0, 0x5eb: 0xf1, // Block 0x18, offset 0x600 0x600: 0x39, 0x601: 0x39, 0x602: 0x39, 0x603: 0x39, 0x604: 0x39, 0x605: 0x39, 0x606: 0x39, 0x607: 0x39, @@ -1687,7 +1670,7 @@ var stringWidthIndex = [1920]uint8{ 0x620: 0x39, 0x621: 0x39, 0x622: 0x39, 0x623: 0x39, 0x624: 0x39, 0x625: 0x39, 0x626: 0x39, 0x627: 0x39, 0x628: 0x39, 0x629: 0x39, 0x62a: 0x39, 0x62b: 0x39, 0x62c: 0x39, 0x62d: 0x39, 0x62e: 0x39, 0x62f: 0x39, 0x630: 0x39, 0x631: 0x39, 0x632: 0x39, 0x633: 0x39, 0x634: 0x39, 0x635: 0x39, 0x636: 0x39, 0x637: 0x39, - 0x638: 0x39, 0x639: 0x39, 0x63a: 0x39, 0x63b: 0x39, 0x63c: 0x39, 0x63d: 0x39, 0x63e: 0x39, 0x63f: 0xf2, + 0x638: 0x39, 0x639: 0x39, 0x63a: 0x39, 0x63b: 0x39, 0x63c: 0x39, 0x63d: 0x39, 0x63e: 0x39, 0x63f: 0xe6, // Block 0x19, offset 0x640 0x650: 0x0b, 0x651: 0x0c, 0x653: 0x0d, 0x656: 0x0e, 0x657: 0x06, 0x658: 0x0f, 0x65a: 0x10, 0x65b: 0x11, 0x65c: 0x12, 0x65d: 0x13, 0x65e: 0x14, 0x65f: 0x15, @@ -1696,7 +1679,7 @@ var stringWidthIndex = [1920]uint8{ 0x670: 0x06, 0x671: 0x06, 0x672: 0x06, 0x673: 0x06, 0x674: 0x06, 0x675: 0x06, 0x676: 0x06, 0x677: 0x06, 0x678: 0x06, 0x679: 0x06, 0x67a: 0x06, 0x67b: 0x06, 0x67c: 0x06, 0x67d: 0x06, 0x67e: 0x06, 0x67f: 0x16, // Block 0x1a, offset 0x680 - 0x680: 0xf3, 0x681: 0x08, 0x684: 0x08, 0x685: 0x08, 0x686: 0x08, 0x687: 0x09, + 0x680: 0xf2, 0x681: 0x08, 0x684: 0x08, 0x685: 0x08, 0x686: 0x08, 0x687: 0x09, // Block 0x1b, offset 0x6c0 0x6c0: 0x5b, 0x6c1: 0x5b, 0x6c2: 0x5b, 0x6c3: 0x5b, 0x6c4: 0x5b, 0x6c5: 0x5b, 0x6c6: 0x5b, 0x6c7: 0x5b, 0x6c8: 0x5b, 0x6c9: 0x5b, 0x6ca: 0x5b, 0x6cb: 0x5b, 0x6cc: 0x5b, 0x6cd: 0x5b, 0x6ce: 0x5b, 0x6cf: 0x5b, @@ -1705,7 +1688,7 @@ var stringWidthIndex = [1920]uint8{ 0x6e0: 0x5b, 0x6e1: 0x5b, 0x6e2: 0x5b, 0x6e3: 0x5b, 0x6e4: 0x5b, 0x6e5: 0x5b, 0x6e6: 0x5b, 0x6e7: 0x5b, 0x6e8: 0x5b, 0x6e9: 0x5b, 0x6ea: 0x5b, 0x6eb: 0x5b, 0x6ec: 0x5b, 0x6ed: 0x5b, 0x6ee: 0x5b, 0x6ef: 0x5b, 0x6f0: 0x5b, 0x6f1: 0x5b, 0x6f2: 0x5b, 0x6f3: 0x5b, 0x6f4: 0x5b, 0x6f5: 0x5b, 0x6f6: 0x5b, 0x6f7: 0x5b, - 0x6f8: 0x5b, 0x6f9: 0x5b, 0x6fa: 0x5b, 0x6fb: 0x5b, 0x6fc: 0x5b, 0x6fd: 0x5b, 0x6fe: 0x5b, 0x6ff: 0xf4, + 0x6f8: 0x5b, 0x6f9: 0x5b, 0x6fa: 0x5b, 0x6fb: 0x5b, 0x6fc: 0x5b, 0x6fd: 0x5b, 0x6fe: 0x5b, 0x6ff: 0xf3, // Block 0x1c, offset 0x700 0x720: 0x18, 0x730: 0x09, 0x731: 0x09, 0x732: 0x09, 0x733: 0x09, 0x734: 0x09, 0x735: 0x09, 0x736: 0x09, 0x737: 0x09, diff --git a/vendor/github.com/clipperhouse/displaywidth/truncate.go b/vendor/github.com/clipperhouse/displaywidth/truncate.go new file mode 100644 index 0000000000..b3e696f49b --- /dev/null +++ b/vendor/github.com/clipperhouse/displaywidth/truncate.go @@ -0,0 +1,149 @@ +package displaywidth + +import ( + "strings" + + "github.com/clipperhouse/uax29/v2/graphemes" +) + +// TruncateString truncates a string to the given maxWidth, and appends the +// given tail if the string is truncated. +// +// It ensures the visible width, including the width of the tail, is less than or +// equal to maxWidth. +// +// When [Options.ControlSequences] is true, 7-bit ANSI escape sequences that +// appear after the truncation point are preserved in the output. This ensures +// that escape sequences such as SGR resets are not lost, preventing color +// bleed in terminal output. +// +// [Options.ControlSequences8Bit] is ignored by truncation. 8-bit C1 byte values +// (0x80-0x9F) overlap with UTF-8 multi-byte encoding, so manipulating them +// during truncation can shift byte boundaries and form unintended visible +// characters. Use [Options.String] or [Options.Bytes] for 8-bit-aware width +// measurement. +func (options Options) TruncateString(s string, maxWidth int, tail string) string { + // We deliberately ignore ControlSequences8Bit for truncation, see above. + options.ControlSequences8Bit = false + + maxWidthWithoutTail := maxWidth - options.String(tail) + + var pos, total int + g := graphemes.FromString(s) + g.AnsiEscapeSequences = options.ControlSequences + + for g.Next() { + gw := graphemeWidth(g.Value(), options) + if total+gw <= maxWidthWithoutTail { + pos = g.End() + } + total += gw + if total > maxWidth { + if options.ControlSequences { + // Build result with trailing 7-bit ANSI escape sequences preserved + var b strings.Builder + b.Grow(len(s) + len(tail)) // at most original + tail + b.WriteString(s[:pos]) + b.WriteString(tail) + + rem := graphemes.FromString(s[pos:]) + rem.AnsiEscapeSequences = options.ControlSequences + + for rem.Next() { + v := rem.Value() + // Only preserve 7-bit escapes (ESC = 0x1B) that measure + // as zero-width on their own; some sequences (e.g. SOS) + // are only valid in their original context. + if len(v) > 0 && v[0] == 0x1B && options.String(v) == 0 { + b.WriteString(v) + } + } + return b.String() + } + return s[:pos] + tail + } + } + // No truncation + return s +} + +// TruncateString truncates a string to the given maxWidth, and appends the +// given tail if the string is truncated. +// +// It ensures the total width, including the width of the tail, is less than or +// equal to maxWidth. +func TruncateString(s string, maxWidth int, tail string) string { + return DefaultOptions.TruncateString(s, maxWidth, tail) +} + +// TruncateBytes truncates a []byte to the given maxWidth, and appends the +// given tail if the []byte is truncated. +// +// It ensures the visible width, including the width of the tail, is less than or +// equal to maxWidth. +// +// When [Options.ControlSequences] is true, 7-bit ANSI escape sequences that +// appear after the truncation point are preserved in the output. This ensures +// that escape sequences such as SGR resets are not lost, preventing color +// bleed in terminal output. +// +// [Options.ControlSequences8Bit] is ignored by truncation. 8-bit C1 byte values +// (0x80-0x9F) overlap with UTF-8 multi-byte encoding, so manipulating them +// during truncation can shift byte boundaries and form unintended visible +// characters. Use [Options.String] or [Options.Bytes] for 8-bit-aware width +// measurement. +func (options Options) TruncateBytes(s []byte, maxWidth int, tail []byte) []byte { + // We deliberately ignore ControlSequences8Bit for truncation, see above. + options.ControlSequences8Bit = false + + maxWidthWithoutTail := maxWidth - options.Bytes(tail) + + var pos, total int + g := graphemes.FromBytes(s) + g.AnsiEscapeSequences = options.ControlSequences + + for g.Next() { + gw := graphemeWidth(g.Value(), options) + if total+gw <= maxWidthWithoutTail { + pos = g.End() + } + total += gw + if total > maxWidth { + if options.ControlSequences { + // Build result with trailing 7-bit ANSI escape sequences preserved + result := make([]byte, 0, len(s)+len(tail)) // at most original + tail + result = append(result, s[:pos]...) + result = append(result, tail...) + + rem := graphemes.FromBytes(s[pos:]) + rem.AnsiEscapeSequences = options.ControlSequences + + for rem.Next() { + v := rem.Value() + // Only preserve 7-bit escapes (ESC = 0x1B) that measure + // as zero-width on their own; some sequences (e.g. SOS) + // are only valid in their original context. + if len(v) > 0 && v[0] == 0x1B && options.Bytes(v) == 0 { + result = append(result, v...) + } + } + return result + } + result := make([]byte, 0, pos+len(tail)) + result = append(result, s[:pos]...) + result = append(result, tail...) + return result + } + } + // No truncation + return s +} + +// TruncateBytes truncates a []byte to the given maxWidth, and appends the +// given tail if the []byte is truncated. +// +// It ensures the total width, including the width of the tail, is less than or +// equal to maxWidth. +func TruncateBytes(s []byte, maxWidth int, tail []byte) []byte { + return DefaultOptions.TruncateBytes(s, maxWidth, tail) +} diff --git a/vendor/github.com/clipperhouse/displaywidth/width.go b/vendor/github.com/clipperhouse/displaywidth/width.go index bd6b65f047..f6e0ab7fd1 100644 --- a/vendor/github.com/clipperhouse/displaywidth/width.go +++ b/vendor/github.com/clipperhouse/displaywidth/width.go @@ -3,22 +3,9 @@ package displaywidth import ( "unicode/utf8" - "github.com/clipperhouse/stringish" "github.com/clipperhouse/uax29/v2/graphemes" ) -// Options allows you to specify the treatment of ambiguous East Asian -// characters. When EastAsianWidth is false (default), ambiguous East Asian -// characters are treated as width 1. When EastAsianWidth is true, ambiguous -// East Asian characters are treated as width 2. -type Options struct { - EastAsianWidth bool -} - -// DefaultOptions is the default options for the display width -// calculation, which is EastAsianWidth: false. -var DefaultOptions = Options{EastAsianWidth: false} - // String calculates the display width of a string, // by iterating over grapheme clusters in the string // and summing their widths. @@ -29,19 +16,44 @@ func String(s string) int { // String calculates the display width of a string, for the given options, by // iterating over grapheme clusters in the string and summing their widths. func (options Options) String(s string) int { - // Optimization: no need to parse grapheme - switch len(s) { - case 0: - return 0 - case 1: - return int(asciiWidths[s[0]]) - } - width := 0 - g := graphemes.FromString(s) - for g.Next() { - width += graphemeWidth(g.Value(), options) + pos := 0 + + for pos < len(s) { + // Try ASCII optimization + asciiLen := printableASCIILength(s[pos:]) + if asciiLen > 0 { + width += asciiLen + pos += asciiLen + continue + } + + // Not ASCII, use grapheme parsing + g := graphemes.FromString(s[pos:]) + g.AnsiEscapeSequences = options.ControlSequences + g.AnsiEscapeSequences8Bit = options.ControlSequences8Bit + + start := pos + + for g.Next() { + v := g.Value() + width += graphemeWidth(v, options) + pos += len(v) + + // Quick check: if remaining might have printable ASCII, break to outer loop + if pos < len(s) && s[pos] >= 0x20 && s[pos] <= 0x7E { + break + } + } + + // Defensive, should not happen: if no progress was made, + // skip a byte to prevent infinite loop. Only applies if + // the grapheme parser misbehaves. + if pos == start { + pos++ + } } + return width } @@ -55,19 +67,44 @@ func Bytes(s []byte) int { // Bytes calculates the display width of a []byte, for the given options, by // iterating over grapheme clusters in the slice and summing their widths. func (options Options) Bytes(s []byte) int { - // Optimization: no need to parse grapheme - switch len(s) { - case 0: - return 0 - case 1: - return int(asciiWidths[s[0]]) - } - width := 0 - g := graphemes.FromBytes(s) - for g.Next() { - width += graphemeWidth(g.Value(), options) + pos := 0 + + for pos < len(s) { + // Try ASCII optimization + asciiLen := printableASCIILength(s[pos:]) + if asciiLen > 0 { + width += asciiLen + pos += asciiLen + continue + } + + // Not ASCII, use grapheme parsing + g := graphemes.FromBytes(s[pos:]) + g.AnsiEscapeSequences = options.ControlSequences + g.AnsiEscapeSequences8Bit = options.ControlSequences8Bit + + start := pos + + for g.Next() { + v := g.Value() + width += graphemeWidth(v, options) + pos += len(v) + + // Quick check: if remaining might have printable ASCII, break to outer loop + if pos < len(s) && s[pos] >= 0x20 && s[pos] <= 0x7E { + break + } + } + + // Defensive, should not happen: if no progress was made, + // skip a byte to prevent infinite loop. Only applies if + // the grapheme parser misbehaves. + if pos == start { + pos++ + } } + return width } @@ -90,7 +127,7 @@ func Rune(r rune) int { // Iterating over runes to measure width is incorrect in many cases. func (options Options) Rune(r rune) int { if r < utf8.RuneSelf { - return int(asciiWidths[byte(r)]) + return asciiWidth(byte(r)) } // Surrogates (U+D800-U+DFFF) are invalid UTF-8. @@ -102,110 +139,101 @@ func (options Options) Rune(r rune) int { n := utf8.EncodeRune(buf[:], r) // Skip the grapheme iterator - return lookupProperties(buf[:n]).width(options) + return graphemeWidth(buf[:n], options) } +const _Default property = 0 + // graphemeWidth returns the display width of a grapheme cluster. // The passed string must be a single grapheme cluster. -func graphemeWidth[T stringish.Interface](s T, options Options) int { - // Optimization: no need to look up properties - switch len(s) { - case 0: +func graphemeWidth[T ~string | []byte](s T, options Options) int { + if len(s) == 0 { return 0 - case 1: - return int(asciiWidths[s[0]]) } - return lookupProperties(s).width(options) -} - -// isRIPrefix checks if the slice matches the Regional Indicator prefix -// (F0 9F 87). It assumes len(s) >= 3. -func isRIPrefix[T stringish.Interface](s T) bool { - return s[0] == 0xF0 && s[1] == 0x9F && s[2] == 0x87 -} - -// isVS16 checks if the slice matches VS16 (U+FE0F) UTF-8 encoding -// (EF B8 8F). It assumes len(s) >= 3. -func isVS16[T stringish.Interface](s T) bool { - return s[0] == 0xEF && s[1] == 0xB8 && s[2] == 0x8F -} + // C1 controls (0x80-0x9F) are zero-width when 8-bit control sequences + // are enabled. This must be checked before the single-byte optimization + // below, which would otherwise return width 1 for these bytes. + if options.ControlSequences8Bit && s[0] >= 0x80 && s[0] <= 0x9F { + return 0 + } -// lookupProperties returns the properties for a grapheme. -// The passed string must be at least one byte long. -// -// Callers must handle zero and single-byte strings upstream, both as an -// optimization, and to reduce the scope of this function. -func lookupProperties[T stringish.Interface](s T) property { - l := len(s) - - if s[0] < utf8.RuneSelf { - // Check for variation selector after ASCII (e.g., keycap sequences like 1️⃣) - if l >= 4 { - // Subslice may help eliminate bounds checks - vs := s[1:4] - if isVS16(vs) { - // VS16 requests emoji presentation (width 2) - return _Emoji - } - // VS15 (0x8E) requests text presentation but does not affect width, - // in my reading of Unicode TR51. Falls through to _Default. - } - return asciiProperties[s[0]] + // Optimization: single-byte graphemes need no property lookup + if len(s) == 1 { + return asciiWidth(s[0]) } - // Regional indicator pair (flag) - if l >= 8 { - // Subslice may help eliminate bounds checks - ri := s[:8] - // First rune - if isRIPrefix(ri[0:3]) { - b3 := ri[3] - if b3 >= 0xA6 && b3 <= 0xBF { - // Second rune - if isRIPrefix(ri[4:7]) { - b7 := ri[7] - if b7 >= 0xA6 && b7 <= 0xBF { - return _Emoji - } - } - } - } + // Multi-byte grapheme clusters led by a C0 control (0x00-0x1F) + if s[0] <= 0x1F { + return 0 } p, sz := lookup(s) + prop := property(p) - // Variation Selectors - if sz > 0 && l >= sz+3 { - // Subslice may help eliminate bounds checks + // Variation Selector 16 (VS16) requests emoji presentation + if prop != _Wide && sz > 0 && len(s) >= sz+3 { vs := s[sz : sz+3] if isVS16(vs) { - // VS16 requests emoji presentation (width 2) - return _Emoji + prop = _Wide } // VS15 (0x8E) requests text presentation but does not affect width, // in my reading of Unicode TR51. Falls through to return the base // character's property. } - return property(p) + if options.EastAsianWidth && prop == _East_Asian_Ambiguous { + prop = _Wide + } + + if prop > upperBound { + prop = _Default + } + + return propertyWidths[prop] } -const _Default property = 0 -const boundsCheck = property(len(propertyWidths) - 1) +func asciiWidth(b byte) int { + if b <= 0x1F || b == 0x7F { + return 0 + } + return 1 +} -// width determines the display width of a character based on its properties, -// and configuration options -func (p property) width(options Options) int { - if options.EastAsianWidth && p == _East_Asian_Ambiguous { - return 2 +// printableASCIILength returns the length of consecutive printable ASCII bytes +// starting at the beginning of s. +func printableASCIILength[T string | []byte](s T) int { + i := 0 + for ; i < len(s); i++ { + b := s[i] + // Printable ASCII is 0x20-0x7E (space through tilde) + if b < 0x20 || b > 0x7E { + break + } } - // Bounds check may help the compiler eliminate its bounds check, - // and safety of course. - if p > boundsCheck { - return 1 // default width + // If the next byte is non-ASCII (>= 0x80), back off by 1. The grapheme + // parser may group the last ASCII byte with subsequent non-ASCII bytes, + // such as combining marks. + if i > 0 && i < len(s) && s[i] >= 0x80 { + i-- } - return propertyWidths[p] + return i } + +// isVS16 checks if the slice matches VS16 (U+FE0F) UTF-8 encoding +// (EF B8 8F). It assumes len(s) >= 3. +func isVS16[T ~string | []byte](s T) bool { + return s[0] == 0xEF && s[1] == 0xB8 && s[2] == 0x8F +} + +// propertyWidths is a jump table of sorts, instead of a switch +var propertyWidths = [4]int{ + _Default: 1, + _Zero_Width: 0, + _Wide: 2, + _East_Asian_Ambiguous: 1, +} + +const upperBound = property(len(propertyWidths) - 1) diff --git a/vendor/github.com/clipperhouse/stringish/.gitignore b/vendor/github.com/clipperhouse/stringish/.gitignore deleted file mode 100644 index 12fbfb739b..0000000000 --- a/vendor/github.com/clipperhouse/stringish/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.DS_Store -*.test diff --git a/vendor/github.com/clipperhouse/stringish/LICENSE b/vendor/github.com/clipperhouse/stringish/LICENSE deleted file mode 100644 index 4b8064eb37..0000000000 --- a/vendor/github.com/clipperhouse/stringish/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2025 Matt Sherman - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/clipperhouse/stringish/README.md b/vendor/github.com/clipperhouse/stringish/README.md deleted file mode 100644 index fa1f7cc672..0000000000 --- a/vendor/github.com/clipperhouse/stringish/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# stringish - -A small Go module that provides a generic type constraint for “string-like” -data, and a utf8 package that works with both strings and byte slices -without conversions. - -```go -type Interface interface { - ~[]byte | ~string -} -``` - -[![Go Reference](https://pkg.go.dev/badge/github.com/clipperhouse/stringish/utf8.svg)](https://pkg.go.dev/github.com/clipperhouse/stringish/utf8) -[![Test Status](https://github.com/clipperhouse/stringish/actions/workflows/gotest.yml/badge.svg)](https://github.com/clipperhouse/stringish/actions/workflows/gotest.yml) - -## Install - -``` -go get github.com/clipperhouse/stringish -``` - -## Examples - -```go -import ( - "github.com/clipperhouse/stringish" - "github.com/clipperhouse/stringish/utf8" -) - -s := "Hello, 世界" -r, size := utf8.DecodeRune(s) // not DecodeRuneInString 🎉 - -b := []byte("Hello, 世界") -r, size = utf8.DecodeRune(b) // same API! - -func MyFoo[T stringish.Interface](s T) T { - // pass a string or a []byte - // iterate, slice, transform, whatever -} -``` - -## Motivation - -Sometimes we want APIs to accept `string` or `[]byte` without having to convert -between those types. That conversion usually allocates! - -By implementing with `stringish.Interface`, we can have a single API, and -single implementation for both types: one `Foo` instead of `Foo` and -`FooString`. - -We have converted the -[`unicode/utf8` package](https://github.com/clipperhouse/stringish/blob/main/utf8/utf8.go) -as an example -- note the absence of`*InString` funcs. We might look at `x/text` -next. - -## Used by - -- clipperhouse/uax29: [stringish trie](https://github.com/clipperhouse/uax29/blob/master/graphemes/trie.go#L27), [stringish iterator](https://github.com/clipperhouse/uax29/blob/master/internal/iterators/iterator.go#L9), [stringish SplitFunc](https://github.com/clipperhouse/uax29/blob/master/graphemes/splitfunc.go#L21) - -- [clipperhouse/displaywidth](https://github.com/clipperhouse/displaywidth) - -## Prior discussion - -- [Consideration of similar by the Go team](https://github.com/golang/go/issues/48643) diff --git a/vendor/github.com/clipperhouse/stringish/interface.go b/vendor/github.com/clipperhouse/stringish/interface.go deleted file mode 100644 index adfeab61eb..0000000000 --- a/vendor/github.com/clipperhouse/stringish/interface.go +++ /dev/null @@ -1,5 +0,0 @@ -package stringish - -type Interface interface { - ~[]byte | ~string -} diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/README.md b/vendor/github.com/clipperhouse/uax29/v2/graphemes/README.md index dc14d11e2f..3f8a5e3f99 100644 --- a/vendor/github.com/clipperhouse/uax29/v2/graphemes/README.md +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/README.md @@ -1,4 +1,4 @@ -An implementation of grapheme cluster boundaries from [Unicode text segmentation](https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries) (UAX 29), for Unicode version 15.0.0. +An implementation of grapheme cluster boundaries from [Unicode text segmentation](https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries) (UAX 29), for Unicode 17. [![Documentation](https://pkg.go.dev/badge/github.com/clipperhouse/uax29/v2/graphemes.svg)](https://pkg.go.dev/github.com/clipperhouse/uax29/v2/graphemes) ![Tests](https://github.com/clipperhouse/uax29/actions/workflows/gotest.yml/badge.svg) @@ -7,18 +7,17 @@ An implementation of grapheme cluster boundaries from [Unicode text segmentation ## Quick start ``` -go get "github.com/clipperhouse/uax29/v2/graphemes" +go get github.com/clipperhouse/uax29/v2/graphemes ``` ```go import "github.com/clipperhouse/uax29/v2/graphemes" text := "Hello, 世界. Nice dog! 👍🐶" +g := graphemes.FromString(text) -tokens := graphemes.FromString(text) - -for tokens.Next() { // Next() returns true until end of data - fmt.Println(tokens.Value()) // Do something with the current grapheme +for g.Next() { // Next() returns true until end of data + fmt.Println(g.Value()) // Do something with the current grapheme } ``` @@ -26,7 +25,7 @@ _A grapheme is a “single visible character”, which might be a simple as a si ## Conformance -We use the Unicode [test suite](https://unicode.org/reports/tr41/tr41-26.html#Tests29). +We use the Unicode [test suite](https://unicode.org/reports/tr41/tr41-36.html#Tests29). ![Tests](https://github.com/clipperhouse/uax29/actions/workflows/gotest.yml/badge.svg) ![Fuzz](https://github.com/clipperhouse/uax29/actions/workflows/gofuzz.yml/badge.svg) @@ -37,11 +36,10 @@ We use the Unicode [test suite](https://unicode.org/reports/tr41/tr41-26.html#Te ```go text := "Hello, 世界. Nice dog! 👍🐶" +g := graphemes.FromString(text) -tokens := graphemes.FromString(text) - -for tokens.Next() { // Next() returns true until end of data - fmt.Println(tokens.Value()) // Do something with the current grapheme +for g.Next() { // Next() returns true until end of data + fmt.Println(g.Value()) // Do something with the current grapheme } ``` @@ -50,15 +48,15 @@ for tokens.Next() { // Next() returns true until end of data `FromReader` embeds a [`bufio.Scanner`](https://pkg.go.dev/bufio#Scanner), so just use those methods. ```go -r := getYourReader() // from a file or network maybe -tokens := graphemes.FromReader(r) +r := getYourReader() // from a file or network maybe +g := graphemes.FromReader(r) -for tokens.Scan() { // Scan() returns true until error or EOF - fmt.Println(tokens.Text()) // Do something with the current grapheme +for g.Scan() { // Scan() returns true until error or EOF + fmt.Println(g.Text()) // Do something with the current grapheme } -if tokens.Err() != nil { // Check the error - log.Fatal(tokens.Err()) +if g.Err() != nil { // Check the error + log.Fatal(g.Err()) } ``` @@ -67,24 +65,52 @@ if tokens.Err() != nil { // Check the error ```go b := []byte("Hello, 世界. Nice dog! 👍🐶") -tokens := graphemes.FromBytes(b) +g := graphemes.FromBytes(b) -for tokens.Next() { // Next() returns true until end of data - fmt.Println(tokens.Value()) // Do something with the current grapheme +for g.Next() { // Next() returns true until end of data + fmt.Println(g.Value()) // Do something with the current grapheme } ``` -### Benchmarks +### ANSI escape sequences + +By the UAX 29 specification, ANSI escape sequences are not grapheme clusters. To treat 7-bit ANSI escape sequences as a single cluster, set `AnsiEscapeSequences` to true. + +```go +text := "Hello, \x1b[31mworld\x1b[0m!" +g := graphemes.FromString(text) +g.AnsiEscapeSequences = true + +for g.Next() { + fmt.Println(g.Value()) +} +``` -On a Mac M2 laptop, we see around 200MB/s, or around 100 million graphemes per second, and no allocations. +To also parse 8-bit C1 controls (non-UTF-8 bytes), set `AnsiEscapeSequences8Bit` to true. + +```go +g.AnsiEscapeSequences = true // 7-bit forms (ESC ...) +g.AnsiEscapeSequences8Bit = true // 8-bit C1 forms (0x80-0x9F), not valid UTF-8 +``` + +For ESC-initiated (7-bit) control strings, only 7-bit terminators are recognized. +For C1-initiated (8-bit) control strings, only C1 ST (`0x9C`) is recognized as ST. + +We implement [ECMA-48](https://ecma-international.org/publications-and-standards/standards/ecma-48/) control codes in both 7-bit and 8-bit representations. 8-bit control codes are not UTF-8 encoded and are not valid UTF-8, caveat emptor. + +### Benchmarks ``` goos: darwin goarch: arm64 pkg: github.com/clipperhouse/uax29/graphemes/comparative cpu: Apple M2 -BenchmarkGraphemes/clipperhouse/uax29-8 173805 ns/op 201.16 MB/s 0 B/op 0 allocs/op -BenchmarkGraphemes/rivo/uniseg-8 2045128 ns/op 17.10 MB/s 0 B/op 0 allocs/op + +BenchmarkGraphemesMixed/clipperhouse/uax29-8 142635 ns/op 245.12 MB/s 0 B/op 0 allocs/op +BenchmarkGraphemesMixed/rivo/uniseg-8 2018284 ns/op 17.32 MB/s 0 B/op 0 allocs/op + +BenchmarkGraphemesASCII/clipperhouse/uax29-8 8846 ns/op 508.73 MB/s 0 B/op 0 allocs/op +BenchmarkGraphemesASCII/rivo/uniseg-8 366760 ns/op 12.27 MB/s 0 B/op 0 allocs/op ``` ### Invalid inputs diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/ansi.go b/vendor/github.com/clipperhouse/uax29/v2/graphemes/ansi.go new file mode 100644 index 0000000000..9cd09b426b --- /dev/null +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/ansi.go @@ -0,0 +1,138 @@ +package graphemes + +// ansiEscapeLength returns the byte length of a valid 7-bit ANSI escape +// sequence at the start of data, or 0 if none. +// +// Recognized forms (ECMA-48 / ISO 6429): +// - CSI: ESC [ then parameter bytes (0x30-0x3F), intermediate (0x20-0x2F), final (0x40-0x7E) +// - OSC: ESC ] then payload until BEL (0x07), 7-bit ST (ESC \), CAN (0x18), or SUB (0x1A) +// - DCS, SOS, PM, APC: ESC P/X/^/_ then payload until 7-bit ST (ESC \), CAN, or SUB +// - Two-byte: ESC + Fe/Fs (0x40-0x7E excluding above), or Fp (0x30-0x3F), or nF (0x20-0x2F then final) +func ansiEscapeLength[T ~string | ~[]byte](data T) int { + n := len(data) + if n < 2 || data[0] != esc { + return 0 + } + + b1 := data[1] + switch b1 { + case '[': // CSI + body := csiBodyLength(data[2:]) + if body == 0 { + return 0 + } + return 2 + body + case ']': // OSC - allows BEL or 7-bit ST terminator + body := oscLength(data[2:]) + if body < 0 { + return 0 + } + return 2 + body + case 'P', 'X', '^', '_': // DCS, SOS, PM, APC + body := stSequenceLength(data[2:]) + if body < 0 { + return 0 + } + return 2 + body + } + + if b1 >= 0x40 && b1 <= 0x7E { + // Fe/Fs two-byte; [ ] P X ^ _ handled above + return 2 + } + if b1 >= 0x30 && b1 <= 0x3F { + // Fp (private) two-byte + return 2 + } + if b1 >= 0x20 && b1 <= 0x2F { + // nF: intermediates then one final (0x30-0x7E) + i := 2 + for i < n && data[i] >= 0x20 && data[i] <= 0x2F { + i++ + } + if i < n && data[i] >= 0x30 && data[i] <= 0x7E { + return i + 1 + } + return 0 + } + + return 0 +} + +// csiBodyLength returns the length of the CSI body (param/intermediate/final bytes). +// data is the slice after "ESC [". +// Per ECMA-48, the CSI body has the form: +// +// parameters (0x30–0x3F)*, intermediates (0x20–0x2F)*, final (0x40–0x7E) +// +// Once an intermediate byte is seen, subsequent parameter bytes are invalid. +func csiBodyLength[T ~string | ~[]byte](data T) int { + seenIntermediate := false + for i := 0; i < len(data); i++ { + b := data[i] + if b >= 0x30 && b <= 0x3F { + if seenIntermediate { + return 0 + } + continue + } + if b >= 0x20 && b <= 0x2F { + seenIntermediate = true + continue + } + if b >= 0x40 && b <= 0x7E { + return i + 1 + } + return 0 + } + return 0 +} + +// oscLength returns the length of the OSC body. +// data is the slice after "ESC ]". +// +// Returns: +// - n >= 0: consumed body length (includes BEL/ST terminator when present) +// - -1: not terminated in the provided data +// +// OSC accepts BEL (0x07) or 7-bit ST (ESC \) as terminators by widespread convention. +// Per ECMA-48, CAN (0x18) and SUB (0x1A) cancel the control string; in that +// case they are not part of the OSC sequence length. +func oscLength[T ~string | ~[]byte](data T) int { + for i := 0; i < len(data); i++ { + b := data[i] + if b == bel { + return i + 1 + } + if b == can || b == sub { + return i + } + if b == esc && i+1 < len(data) && data[i+1] == '\\' { + return i + 2 + } + } + return -1 +} + +// stSequenceLength returns the length of a control-string body. +// data is the slice after "ESC x". +// +// Returns: +// - n >= 0: consumed body length (includes ST terminator when present) +// - -1: not terminated in the provided data +// +// Used for DCS, SOS, PM, and APC, which per ECMA-48 terminate with ST. +// ST here is the 7-bit form (ESC \). +// CAN (0x18) and SUB (0x1A) cancel the control string; in that case they are +// not part of the sequence length. +func stSequenceLength[T ~string | ~[]byte](data T) int { + for i := 0; i < len(data); i++ { + if data[i] == can || data[i] == sub { + return i + } + if data[i] == esc && i+1 < len(data) && data[i+1] == '\\' { + return i + 2 + } + } + return -1 +} diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/ansi8.go b/vendor/github.com/clipperhouse/uax29/v2/graphemes/ansi8.go new file mode 100644 index 0000000000..d9b0c48b63 --- /dev/null +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/ansi8.go @@ -0,0 +1,79 @@ +package graphemes + +// ansiEscapeLength8Bit returns the byte length of a valid 8-bit C1 ANSI +// sequence at the start of data, or 0 if none. +// +// Recognized forms (ECMA-48 / ISO 6429): +// - C1 CSI (0x9B) body as parameter/intermediate/final bytes +// - C1 OSC (0x9D) body terminated by BEL, C1 ST, CAN, or SUB +// - C1 DCS/SOS/PM/APC (0x90/0x98/0x9E/0x9F) body terminated by C1 ST, CAN, or SUB +// - Standalone C1 controls (0x80..0x9F not listed above): single byte +func ansiEscapeLength8Bit[T ~string | ~[]byte](data T) int { + if len(data) == 0 { + return 0 + } + + switch data[0] { + case 0x9B: // C1 CSI + body := csiBodyLength(data[1:]) + if body == 0 { + return 0 + } + return 1 + body + case 0x9D: // C1 OSC + body := oscLengthC1(data[1:]) + if body < 0 { + return 0 + } + return 1 + body + case 0x90, 0x98, 0x9E, 0x9F: // C1 DCS, SOS, PM, APC + body := stSequenceLengthC1(data[1:]) + if body < 0 { + return 0 + } + return 1 + body + default: + if data[0] >= 0x80 && data[0] <= 0x9F { + return 1 + } + } + + return 0 +} + +// oscLengthC1 returns the length of a C1 OSC body. +// data is the slice after the C1 OSC initiator (0x9D). +// +// Returns: +// - n >= 0: consumed body length (includes BEL/ST terminator when present) +// - -1: not terminated in the provided data +// +// Terminators: BEL (0x07) or C1 ST (0x9C). +// CAN (0x18) and SUB (0x1A) cancel the control string. +func oscLengthC1[T ~string | ~[]byte](data T) int { + for i := 0; i < len(data); i++ { + b := data[i] + if b == bel || b == st { + return i + 1 + } + if b == can || b == sub { + return i + } + } + return -1 +} + +// stSequenceLengthC1 parses DCS/SOS/PM/APC bodies that terminate with C1 ST +// (0x9C), or are canceled by CAN/SUB. +func stSequenceLengthC1[T ~string | ~[]byte](data T) int { + for i := 0; i < len(data); i++ { + b := data[i] + if b == can || b == sub { + return i + } + if b == st { + return i + 1 + } + } + return -1 +} diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/iterator.go b/vendor/github.com/clipperhouse/uax29/v2/graphemes/iterator.go index 1eaaa534ce..d37d43d716 100644 --- a/vendor/github.com/clipperhouse/uax29/v2/graphemes/iterator.go +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/iterator.go @@ -1,12 +1,44 @@ package graphemes -import ( - "github.com/clipperhouse/stringish" - "github.com/clipperhouse/uax29/v2/internal/iterators" -) +import "unicode/utf8" + +// FromString returns an iterator for the grapheme clusters in the input string. +// Iterate while Next() is true, and access the grapheme via Value(). +func FromString(s string) *Iterator[string] { + return &Iterator[string]{ + split: splitFuncString, + data: s, + } +} -type Iterator[T stringish.Interface] struct { - *iterators.Iterator[T] +// FromBytes returns an iterator for the grapheme clusters in the input bytes. +// Iterate while Next() is true, and access the grapheme via Value(). +func FromBytes(b []byte) *Iterator[[]byte] { + return &Iterator[[]byte]{ + split: splitFuncBytes, + data: b, + } +} + +// Iterator is a generic iterator for grapheme clusters in strings or byte slices, +// with an ASCII hot path optimization. +type Iterator[T ~string | ~[]byte] struct { + split func(T, bool) (int, T, error) + data T + pos int + start int + // AnsiEscapeSequences treats 7-bit ANSI escape sequences (ECMA-48) as + // single grapheme clusters when true. The default is false. + // + // 8-bit controls are not enabled by this option. See [AnsiEscapeSequences8Bit]. + AnsiEscapeSequences bool + // AnsiEscapeSequences8Bit treats 8-bit C1 ANSI escape sequences (ECMA-48) as single + // grapheme clusters when true. The default is false. + // + // 8-bit control bytes are not UTF-8 encoded, i.e. not valid UTF-8. If you + // choose this option, you are choosing to interpret non-UTF-8 data, caveat + // emptor. + AnsiEscapeSequences8Bit bool } var ( @@ -14,18 +46,99 @@ var ( splitFuncBytes = splitFunc[[]byte] ) -// FromString returns an iterator for the grapheme clusters in the input string. -// Iterate while Next() is true, and access the grapheme via Value(). -func FromString(s string) Iterator[string] { - return Iterator[string]{ - iterators.New(splitFuncString, s), +const ( + esc = 0x1B + cr = 0x0D + bel = 0x07 + can = 0x18 + sub = 0x1A + st = 0x9C +) + +// Next advances the iterator to the next grapheme cluster. +// Returns false when there are no more grapheme clusters. +func (iter *Iterator[T]) Next() bool { + if iter.pos >= len(iter.data) { + return false } + iter.start = iter.pos + + b := iter.data[iter.pos] + if iter.AnsiEscapeSequences && b == esc { + if a := ansiEscapeLength(iter.data[iter.pos:]); a > 0 { + iter.pos += a + return true + } + } + if iter.AnsiEscapeSequences8Bit && b >= 0x80 && b <= 0x9F { + if a := ansiEscapeLength8Bit(iter.data[iter.pos:]); a > 0 { + iter.pos += a + return true + } + } + + // ASCII hot path: any ASCII is one grapheme when next byte is ASCII or end. + if b < utf8.RuneSelf && b != cr { + if iter.pos+1 >= len(iter.data) || iter.data[iter.pos+1] < utf8.RuneSelf { + iter.pos++ + return true + } + } + + // Fall back to UAX29 grapheme parsing + remaining := iter.data[iter.pos:] + advance, _, err := iter.split(remaining, true) + if err != nil { + panic(err) + } + if advance <= 0 { + panic("splitFunc returned a zero or negative advance") + } + iter.pos += advance + if iter.pos > len(iter.data) { + panic("splitFunc advanced beyond end of data") + } + return true } -// FromBytes returns an iterator for the grapheme clusters in the input bytes. -// Iterate while Next() is true, and access the grapheme via Value(). -func FromBytes(b []byte) Iterator[[]byte] { - return Iterator[[]byte]{ - iterators.New(splitFuncBytes, b), +// Value returns the current grapheme cluster. +func (iter *Iterator[T]) Value() T { + return iter.data[iter.start:iter.pos] +} + +// Start returns the byte position of the current grapheme in the original data. +func (iter *Iterator[T]) Start() int { + return iter.start +} + +// End returns the byte position after the current grapheme in the original data. +func (iter *Iterator[T]) End() int { + return iter.pos +} + +// Reset resets the iterator to the beginning of the data. +func (iter *Iterator[T]) Reset() { + iter.start = 0 + iter.pos = 0 +} + +// SetText sets the data for the iterator to operate on, and resets all state. +func (iter *Iterator[T]) SetText(data T) { + iter.data = data + iter.start = 0 + iter.pos = 0 +} + +// First returns the first grapheme cluster without advancing the iterator. +func (iter *Iterator[T]) First() T { + if len(iter.data) == 0 { + return iter.data } + + // Use a copy to leverage Next()'s ASCII optimization + cp := *iter + cp.pos = 0 + cp.start = 0 + cp.Next() + return cp.Value() } diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/splitfunc.go b/vendor/github.com/clipperhouse/uax29/v2/graphemes/splitfunc.go index cbe1ec9ef1..0ac7c6fb45 100644 --- a/vendor/github.com/clipperhouse/uax29/v2/graphemes/splitfunc.go +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/splitfunc.go @@ -2,8 +2,6 @@ package graphemes import ( "bufio" - - "github.com/clipperhouse/stringish" ) // is determines if lookup intersects propert(ies) @@ -13,12 +11,22 @@ func (lookup property) is(properties property) bool { const _Ignore = _Extend +// incbState tracks state for GB9c rule (Indic conjunct clusters) +// Pattern: Consonant (Extend|Linker)* Linker (Extend|Linker)* × Consonant +type incbState int + +const ( + incbNone incbState = iota // initial/reset + incbConsonant // seen Consonant, awaiting Linker + incbLinker // seen Consonant and Linker (conjunct ready) +) + // SplitFunc is a bufio.SplitFunc implementation of Unicode grapheme cluster segmentation, for use with bufio.Scanner. // // See https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries. var SplitFunc bufio.SplitFunc = splitFunc[[]byte] -func splitFunc[T stringish.Interface](data T, atEOF bool) (advance int, token T, err error) { +func splitFunc[T ~string | ~[]byte](data T, atEOF bool) (advance int, token T, err error) { var empty T if len(data) == 0 { return 0, empty, nil @@ -30,6 +38,9 @@ func splitFunc[T stringish.Interface](data T, atEOF bool) (advance int, token T, var lastLastExIgnore property = 0 // "last one before that" var regionalIndicatorCount int + // GB9c state: tracking Indic conjunct clusters + var incb incbState + // Rules are usually of the form Cat1 × Cat2; "current" refers to the first property // to the right of the ×, from which we look back or forward @@ -76,6 +87,23 @@ func splitFunc[T stringish.Interface](data T, atEOF bool) (advance int, token T, lastExIgnore = last } + // Update GB9c state based on what we just advanced past + if last.is(_InCBConsonant | _InCBLinker | _InCBExtend) { + switch { + case last.is(_InCBConsonant): + if incb != incbLinker { + incb = incbConsonant + } + case last.is(_InCBLinker): + if incb >= incbConsonant { + incb = incbLinker + } + // case last.is(_InCBExtend): stay in current state + } + } else { + incb = incbNone + } + current, w = lookup(data[pos:]) if w == 0 { if atEOF { @@ -141,11 +169,14 @@ func splitFunc[T stringish.Interface](data T, atEOF bool) (advance int, token T, } // https://unicode.org/reports/tr29/#GB9c - // TODO(clipperhouse): - // It appears to be added in Unicode 15.1.0: - // https://unicode.org/versions/Unicode15.1.0/#Migration - // This package currently supports Unicode 15.0.0, so - // out of scope for now + // Do not break within certain combinations with Indic_Conjunct_Break (InCB)=Linker. + if incb == incbLinker && current.is(_InCBConsonant) { + // After matching the pattern, reset state to start tracking a new pattern + // The current Consonant becomes the start of the new pattern + incb = incbConsonant + pos += w + continue + } // https://unicode.org/reports/tr29/#GB11 if current.is(_ExtendedPictographic) && last.is(_ZWJ) && lastLastExIgnore.is(_ExtendedPictographic) { diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/trie.go b/vendor/github.com/clipperhouse/uax29/v2/graphemes/trie.go index 8aaabfacf0..56192b7ee4 100644 --- a/vendor/github.com/clipperhouse/uax29/v2/graphemes/trie.go +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/trie.go @@ -1,17 +1,18 @@ package graphemes -import "github.com/clipperhouse/stringish" - // generated by github.com/clipperhouse/uax29/v2 -// from https://www.unicode.org/Public/15.0.0/ucd/auxiliary/GraphemeBreakProperty.txt +// from https://www.unicode.org/Public/17.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -type property uint16 +type property uint32 const ( _CR property = 1 << iota _Control _Extend _ExtendedPictographic + _InCBConsonant + _InCBExtend + _InCBLinker _L _LF _LV @@ -27,7 +28,7 @@ const ( // lookup returns the trie value for the first UTF-8 encoding in s and // the width in bytes of this encoding. The size will be 0 if s does not // hold enough bytes to complete the encoding. len(s) must be greater than 0. -func lookup[T stringish.Interface](s T) (v property, sz int) { +func lookup[T ~string | ~[]byte](s T) (v property, sz int) { c0 := s[0] switch { case c0 < 0x80: // is ASCII @@ -87,7 +88,7 @@ func lookup[T stringish.Interface](s T) (v property, sz int) { return 0, 1 } -// graphemesTrie. Total size: 29120 bytes (28.44 KiB). Checksum: 80ad0c5ab9375f7. +// graphemesTrie. Total size: 61760 bytes (60.31 KiB). Checksum: af733ba94cd94ba6. // type graphemesTrie struct { } // func newGraphemesTrie(i int) *graphemesTrie { @@ -102,12 +103,12 @@ func lookupValue(n uint32, b byte) property { } } -// graphemesValues: 215 blocks, 13760 entries, 27520 bytes +// graphemesValues: 235 blocks, 15040 entries, 60160 bytes // The third block is the zero block. -var graphemesValues = [13760]property{ +var graphemesValues = [15040]property{ // Block 0x0, offset 0x0 0x00: 0x0002, 0x01: 0x0002, 0x02: 0x0002, 0x03: 0x0002, 0x04: 0x0002, 0x05: 0x0002, - 0x06: 0x0002, 0x07: 0x0002, 0x08: 0x0002, 0x09: 0x0002, 0x0a: 0x0020, 0x0b: 0x0002, + 0x06: 0x0002, 0x07: 0x0002, 0x08: 0x0002, 0x09: 0x0002, 0x0a: 0x0100, 0x0b: 0x0002, 0x0c: 0x0002, 0x0d: 0x0001, 0x0e: 0x0002, 0x0f: 0x0002, 0x10: 0x0002, 0x11: 0x0002, 0x12: 0x0002, 0x13: 0x0002, 0x14: 0x0002, 0x15: 0x0002, 0x16: 0x0002, 0x17: 0x0002, 0x18: 0x0002, 0x19: 0x0002, 0x1a: 0x0002, 0x1b: 0x0002, 0x1c: 0x0002, 0x1d: 0x0002, @@ -125,404 +126,503 @@ var graphemesValues = [13760]property{ 0xe9: 0x0008, 0xed: 0x0002, 0xee: 0x0008, // Block 0x4, offset 0x100 - 0x100: 0x0004, 0x101: 0x0004, 0x102: 0x0004, 0x103: 0x0004, 0x104: 0x0004, 0x105: 0x0004, - 0x106: 0x0004, 0x107: 0x0004, 0x108: 0x0004, 0x109: 0x0004, 0x10a: 0x0004, 0x10b: 0x0004, - 0x10c: 0x0004, 0x10d: 0x0004, 0x10e: 0x0004, 0x10f: 0x0004, 0x110: 0x0004, 0x111: 0x0004, - 0x112: 0x0004, 0x113: 0x0004, 0x114: 0x0004, 0x115: 0x0004, 0x116: 0x0004, 0x117: 0x0004, - 0x118: 0x0004, 0x119: 0x0004, 0x11a: 0x0004, 0x11b: 0x0004, 0x11c: 0x0004, 0x11d: 0x0004, - 0x11e: 0x0004, 0x11f: 0x0004, 0x120: 0x0004, 0x121: 0x0004, 0x122: 0x0004, 0x123: 0x0004, - 0x124: 0x0004, 0x125: 0x0004, 0x126: 0x0004, 0x127: 0x0004, 0x128: 0x0004, 0x129: 0x0004, - 0x12a: 0x0004, 0x12b: 0x0004, 0x12c: 0x0004, 0x12d: 0x0004, 0x12e: 0x0004, 0x12f: 0x0004, - 0x130: 0x0004, 0x131: 0x0004, 0x132: 0x0004, 0x133: 0x0004, 0x134: 0x0004, 0x135: 0x0004, - 0x136: 0x0004, 0x137: 0x0004, 0x138: 0x0004, 0x139: 0x0004, 0x13a: 0x0004, 0x13b: 0x0004, - 0x13c: 0x0004, 0x13d: 0x0004, 0x13e: 0x0004, 0x13f: 0x0004, + 0x100: 0x0024, 0x101: 0x0024, 0x102: 0x0024, 0x103: 0x0024, 0x104: 0x0024, 0x105: 0x0024, + 0x106: 0x0024, 0x107: 0x0024, 0x108: 0x0024, 0x109: 0x0024, 0x10a: 0x0024, 0x10b: 0x0024, + 0x10c: 0x0024, 0x10d: 0x0024, 0x10e: 0x0024, 0x10f: 0x0024, 0x110: 0x0024, 0x111: 0x0024, + 0x112: 0x0024, 0x113: 0x0024, 0x114: 0x0024, 0x115: 0x0024, 0x116: 0x0024, 0x117: 0x0024, + 0x118: 0x0024, 0x119: 0x0024, 0x11a: 0x0024, 0x11b: 0x0024, 0x11c: 0x0024, 0x11d: 0x0024, + 0x11e: 0x0024, 0x11f: 0x0024, 0x120: 0x0024, 0x121: 0x0024, 0x122: 0x0024, 0x123: 0x0024, + 0x124: 0x0024, 0x125: 0x0024, 0x126: 0x0024, 0x127: 0x0024, 0x128: 0x0024, 0x129: 0x0024, + 0x12a: 0x0024, 0x12b: 0x0024, 0x12c: 0x0024, 0x12d: 0x0024, 0x12e: 0x0024, 0x12f: 0x0024, + 0x130: 0x0024, 0x131: 0x0024, 0x132: 0x0024, 0x133: 0x0024, 0x134: 0x0024, 0x135: 0x0024, + 0x136: 0x0024, 0x137: 0x0024, 0x138: 0x0024, 0x139: 0x0024, 0x13a: 0x0024, 0x13b: 0x0024, + 0x13c: 0x0024, 0x13d: 0x0024, 0x13e: 0x0024, 0x13f: 0x0024, // Block 0x5, offset 0x140 - 0x140: 0x0004, 0x141: 0x0004, 0x142: 0x0004, 0x143: 0x0004, 0x144: 0x0004, 0x145: 0x0004, - 0x146: 0x0004, 0x147: 0x0004, 0x148: 0x0004, 0x149: 0x0004, 0x14a: 0x0004, 0x14b: 0x0004, - 0x14c: 0x0004, 0x14d: 0x0004, 0x14e: 0x0004, 0x14f: 0x0004, 0x150: 0x0004, 0x151: 0x0004, - 0x152: 0x0004, 0x153: 0x0004, 0x154: 0x0004, 0x155: 0x0004, 0x156: 0x0004, 0x157: 0x0004, - 0x158: 0x0004, 0x159: 0x0004, 0x15a: 0x0004, 0x15b: 0x0004, 0x15c: 0x0004, 0x15d: 0x0004, - 0x15e: 0x0004, 0x15f: 0x0004, 0x160: 0x0004, 0x161: 0x0004, 0x162: 0x0004, 0x163: 0x0004, - 0x164: 0x0004, 0x165: 0x0004, 0x166: 0x0004, 0x167: 0x0004, 0x168: 0x0004, 0x169: 0x0004, - 0x16a: 0x0004, 0x16b: 0x0004, 0x16c: 0x0004, 0x16d: 0x0004, 0x16e: 0x0004, 0x16f: 0x0004, + 0x140: 0x0024, 0x141: 0x0024, 0x142: 0x0024, 0x143: 0x0024, 0x144: 0x0024, 0x145: 0x0024, + 0x146: 0x0024, 0x147: 0x0024, 0x148: 0x0024, 0x149: 0x0024, 0x14a: 0x0024, 0x14b: 0x0024, + 0x14c: 0x0024, 0x14d: 0x0024, 0x14e: 0x0024, 0x14f: 0x0024, 0x150: 0x0024, 0x151: 0x0024, + 0x152: 0x0024, 0x153: 0x0024, 0x154: 0x0024, 0x155: 0x0024, 0x156: 0x0024, 0x157: 0x0024, + 0x158: 0x0024, 0x159: 0x0024, 0x15a: 0x0024, 0x15b: 0x0024, 0x15c: 0x0024, 0x15d: 0x0024, + 0x15e: 0x0024, 0x15f: 0x0024, 0x160: 0x0024, 0x161: 0x0024, 0x162: 0x0024, 0x163: 0x0024, + 0x164: 0x0024, 0x165: 0x0024, 0x166: 0x0024, 0x167: 0x0024, 0x168: 0x0024, 0x169: 0x0024, + 0x16a: 0x0024, 0x16b: 0x0024, 0x16c: 0x0024, 0x16d: 0x0024, 0x16e: 0x0024, 0x16f: 0x0024, // Block 0x6, offset 0x180 - 0x183: 0x0004, 0x184: 0x0004, 0x185: 0x0004, - 0x186: 0x0004, 0x187: 0x0004, 0x188: 0x0004, 0x189: 0x0004, + 0x183: 0x0024, 0x184: 0x0024, 0x185: 0x0024, + 0x186: 0x0024, 0x187: 0x0024, 0x188: 0x0024, 0x189: 0x0024, // Block 0x7, offset 0x1c0 - 0x1d1: 0x0004, - 0x1d2: 0x0004, 0x1d3: 0x0004, 0x1d4: 0x0004, 0x1d5: 0x0004, 0x1d6: 0x0004, 0x1d7: 0x0004, - 0x1d8: 0x0004, 0x1d9: 0x0004, 0x1da: 0x0004, 0x1db: 0x0004, 0x1dc: 0x0004, 0x1dd: 0x0004, - 0x1de: 0x0004, 0x1df: 0x0004, 0x1e0: 0x0004, 0x1e1: 0x0004, 0x1e2: 0x0004, 0x1e3: 0x0004, - 0x1e4: 0x0004, 0x1e5: 0x0004, 0x1e6: 0x0004, 0x1e7: 0x0004, 0x1e8: 0x0004, 0x1e9: 0x0004, - 0x1ea: 0x0004, 0x1eb: 0x0004, 0x1ec: 0x0004, 0x1ed: 0x0004, 0x1ee: 0x0004, 0x1ef: 0x0004, - 0x1f0: 0x0004, 0x1f1: 0x0004, 0x1f2: 0x0004, 0x1f3: 0x0004, 0x1f4: 0x0004, 0x1f5: 0x0004, - 0x1f6: 0x0004, 0x1f7: 0x0004, 0x1f8: 0x0004, 0x1f9: 0x0004, 0x1fa: 0x0004, 0x1fb: 0x0004, - 0x1fc: 0x0004, 0x1fd: 0x0004, 0x1ff: 0x0004, + 0x1d1: 0x0024, + 0x1d2: 0x0024, 0x1d3: 0x0024, 0x1d4: 0x0024, 0x1d5: 0x0024, 0x1d6: 0x0024, 0x1d7: 0x0024, + 0x1d8: 0x0024, 0x1d9: 0x0024, 0x1da: 0x0024, 0x1db: 0x0024, 0x1dc: 0x0024, 0x1dd: 0x0024, + 0x1de: 0x0024, 0x1df: 0x0024, 0x1e0: 0x0024, 0x1e1: 0x0024, 0x1e2: 0x0024, 0x1e3: 0x0024, + 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, + 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, + 0x1f0: 0x0024, 0x1f1: 0x0024, 0x1f2: 0x0024, 0x1f3: 0x0024, 0x1f4: 0x0024, 0x1f5: 0x0024, + 0x1f6: 0x0024, 0x1f7: 0x0024, 0x1f8: 0x0024, 0x1f9: 0x0024, 0x1fa: 0x0024, 0x1fb: 0x0024, + 0x1fc: 0x0024, 0x1fd: 0x0024, 0x1ff: 0x0024, // Block 0x8, offset 0x200 - 0x201: 0x0004, 0x202: 0x0004, 0x204: 0x0004, 0x205: 0x0004, - 0x207: 0x0004, + 0x201: 0x0024, 0x202: 0x0024, 0x204: 0x0024, 0x205: 0x0024, + 0x207: 0x0024, // Block 0x9, offset 0x240 - 0x240: 0x0100, 0x241: 0x0100, 0x242: 0x0100, 0x243: 0x0100, 0x244: 0x0100, 0x245: 0x0100, - 0x250: 0x0004, 0x251: 0x0004, - 0x252: 0x0004, 0x253: 0x0004, 0x254: 0x0004, 0x255: 0x0004, 0x256: 0x0004, 0x257: 0x0004, - 0x258: 0x0004, 0x259: 0x0004, 0x25a: 0x0004, 0x25c: 0x0002, + 0x240: 0x0800, 0x241: 0x0800, 0x242: 0x0800, 0x243: 0x0800, 0x244: 0x0800, 0x245: 0x0800, + 0x250: 0x0024, 0x251: 0x0024, + 0x252: 0x0024, 0x253: 0x0024, 0x254: 0x0024, 0x255: 0x0024, 0x256: 0x0024, 0x257: 0x0024, + 0x258: 0x0024, 0x259: 0x0024, 0x25a: 0x0024, 0x25c: 0x0002, // Block 0xa, offset 0x280 - 0x28b: 0x0004, - 0x28c: 0x0004, 0x28d: 0x0004, 0x28e: 0x0004, 0x28f: 0x0004, 0x290: 0x0004, 0x291: 0x0004, - 0x292: 0x0004, 0x293: 0x0004, 0x294: 0x0004, 0x295: 0x0004, 0x296: 0x0004, 0x297: 0x0004, - 0x298: 0x0004, 0x299: 0x0004, 0x29a: 0x0004, 0x29b: 0x0004, 0x29c: 0x0004, 0x29d: 0x0004, - 0x29e: 0x0004, 0x29f: 0x0004, - 0x2b0: 0x0004, + 0x28b: 0x0024, + 0x28c: 0x0024, 0x28d: 0x0024, 0x28e: 0x0024, 0x28f: 0x0024, 0x290: 0x0024, 0x291: 0x0024, + 0x292: 0x0024, 0x293: 0x0024, 0x294: 0x0024, 0x295: 0x0024, 0x296: 0x0024, 0x297: 0x0024, + 0x298: 0x0024, 0x299: 0x0024, 0x29a: 0x0024, 0x29b: 0x0024, 0x29c: 0x0024, 0x29d: 0x0024, + 0x29e: 0x0024, 0x29f: 0x0024, + 0x2b0: 0x0024, // Block 0xb, offset 0x2c0 - 0x2d6: 0x0004, 0x2d7: 0x0004, - 0x2d8: 0x0004, 0x2d9: 0x0004, 0x2da: 0x0004, 0x2db: 0x0004, 0x2dc: 0x0004, 0x2dd: 0x0100, - 0x2df: 0x0004, 0x2e0: 0x0004, 0x2e1: 0x0004, 0x2e2: 0x0004, 0x2e3: 0x0004, - 0x2e4: 0x0004, 0x2e7: 0x0004, 0x2e8: 0x0004, - 0x2ea: 0x0004, 0x2eb: 0x0004, 0x2ec: 0x0004, 0x2ed: 0x0004, + 0x2d6: 0x0024, 0x2d7: 0x0024, + 0x2d8: 0x0024, 0x2d9: 0x0024, 0x2da: 0x0024, 0x2db: 0x0024, 0x2dc: 0x0024, 0x2dd: 0x0800, + 0x2df: 0x0024, 0x2e0: 0x0024, 0x2e1: 0x0024, 0x2e2: 0x0024, 0x2e3: 0x0024, + 0x2e4: 0x0024, 0x2e7: 0x0024, 0x2e8: 0x0024, + 0x2ea: 0x0024, 0x2eb: 0x0024, 0x2ec: 0x0024, 0x2ed: 0x0024, // Block 0xc, offset 0x300 - 0x30f: 0x0100, 0x311: 0x0004, - 0x330: 0x0004, 0x331: 0x0004, 0x332: 0x0004, 0x333: 0x0004, 0x334: 0x0004, 0x335: 0x0004, - 0x336: 0x0004, 0x337: 0x0004, 0x338: 0x0004, 0x339: 0x0004, 0x33a: 0x0004, 0x33b: 0x0004, - 0x33c: 0x0004, 0x33d: 0x0004, 0x33e: 0x0004, 0x33f: 0x0004, + 0x30f: 0x0800, 0x311: 0x0024, + 0x330: 0x0024, 0x331: 0x0024, 0x332: 0x0024, 0x333: 0x0024, 0x334: 0x0024, 0x335: 0x0024, + 0x336: 0x0024, 0x337: 0x0024, 0x338: 0x0024, 0x339: 0x0024, 0x33a: 0x0024, 0x33b: 0x0024, + 0x33c: 0x0024, 0x33d: 0x0024, 0x33e: 0x0024, 0x33f: 0x0024, // Block 0xd, offset 0x340 - 0x340: 0x0004, 0x341: 0x0004, 0x342: 0x0004, 0x343: 0x0004, 0x344: 0x0004, 0x345: 0x0004, - 0x346: 0x0004, 0x347: 0x0004, 0x348: 0x0004, 0x349: 0x0004, 0x34a: 0x0004, + 0x340: 0x0024, 0x341: 0x0024, 0x342: 0x0024, 0x343: 0x0024, 0x344: 0x0024, 0x345: 0x0024, + 0x346: 0x0024, 0x347: 0x0024, 0x348: 0x0024, 0x349: 0x0024, 0x34a: 0x0024, // Block 0xe, offset 0x380 - 0x3a6: 0x0004, 0x3a7: 0x0004, 0x3a8: 0x0004, 0x3a9: 0x0004, - 0x3aa: 0x0004, 0x3ab: 0x0004, 0x3ac: 0x0004, 0x3ad: 0x0004, 0x3ae: 0x0004, 0x3af: 0x0004, - 0x3b0: 0x0004, + 0x3a6: 0x0024, 0x3a7: 0x0024, 0x3a8: 0x0024, 0x3a9: 0x0024, + 0x3aa: 0x0024, 0x3ab: 0x0024, 0x3ac: 0x0024, 0x3ad: 0x0024, 0x3ae: 0x0024, 0x3af: 0x0024, + 0x3b0: 0x0024, // Block 0xf, offset 0x3c0 - 0x3eb: 0x0004, 0x3ec: 0x0004, 0x3ed: 0x0004, 0x3ee: 0x0004, 0x3ef: 0x0004, - 0x3f0: 0x0004, 0x3f1: 0x0004, 0x3f2: 0x0004, 0x3f3: 0x0004, - 0x3fd: 0x0004, + 0x3eb: 0x0024, 0x3ec: 0x0024, 0x3ed: 0x0024, 0x3ee: 0x0024, 0x3ef: 0x0024, + 0x3f0: 0x0024, 0x3f1: 0x0024, 0x3f2: 0x0024, 0x3f3: 0x0024, + 0x3fd: 0x0024, // Block 0x10, offset 0x400 - 0x416: 0x0004, 0x417: 0x0004, - 0x418: 0x0004, 0x419: 0x0004, 0x41b: 0x0004, 0x41c: 0x0004, 0x41d: 0x0004, - 0x41e: 0x0004, 0x41f: 0x0004, 0x420: 0x0004, 0x421: 0x0004, 0x422: 0x0004, 0x423: 0x0004, - 0x425: 0x0004, 0x426: 0x0004, 0x427: 0x0004, 0x429: 0x0004, - 0x42a: 0x0004, 0x42b: 0x0004, 0x42c: 0x0004, 0x42d: 0x0004, + 0x416: 0x0024, 0x417: 0x0024, + 0x418: 0x0024, 0x419: 0x0024, 0x41b: 0x0024, 0x41c: 0x0024, 0x41d: 0x0024, + 0x41e: 0x0024, 0x41f: 0x0024, 0x420: 0x0024, 0x421: 0x0024, 0x422: 0x0024, 0x423: 0x0024, + 0x425: 0x0024, 0x426: 0x0024, 0x427: 0x0024, 0x429: 0x0024, + 0x42a: 0x0024, 0x42b: 0x0024, 0x42c: 0x0024, 0x42d: 0x0024, // Block 0x11, offset 0x440 - 0x459: 0x0004, 0x45a: 0x0004, 0x45b: 0x0004, + 0x459: 0x0024, 0x45a: 0x0024, 0x45b: 0x0024, // Block 0x12, offset 0x480 - 0x490: 0x0100, 0x491: 0x0100, - 0x498: 0x0004, 0x499: 0x0004, 0x49a: 0x0004, 0x49b: 0x0004, 0x49c: 0x0004, 0x49d: 0x0004, - 0x49e: 0x0004, 0x49f: 0x0004, + 0x490: 0x0800, 0x491: 0x0800, + 0x497: 0x0024, + 0x498: 0x0024, 0x499: 0x0024, 0x49a: 0x0024, 0x49b: 0x0024, 0x49c: 0x0024, 0x49d: 0x0024, + 0x49e: 0x0024, 0x49f: 0x0024, // Block 0x13, offset 0x4c0 - 0x4ca: 0x0004, 0x4cb: 0x0004, - 0x4cc: 0x0004, 0x4cd: 0x0004, 0x4ce: 0x0004, 0x4cf: 0x0004, 0x4d0: 0x0004, 0x4d1: 0x0004, - 0x4d2: 0x0004, 0x4d3: 0x0004, 0x4d4: 0x0004, 0x4d5: 0x0004, 0x4d6: 0x0004, 0x4d7: 0x0004, - 0x4d8: 0x0004, 0x4d9: 0x0004, 0x4da: 0x0004, 0x4db: 0x0004, 0x4dc: 0x0004, 0x4dd: 0x0004, - 0x4de: 0x0004, 0x4df: 0x0004, 0x4e0: 0x0004, 0x4e1: 0x0004, 0x4e2: 0x0100, 0x4e3: 0x0004, - 0x4e4: 0x0004, 0x4e5: 0x0004, 0x4e6: 0x0004, 0x4e7: 0x0004, 0x4e8: 0x0004, 0x4e9: 0x0004, - 0x4ea: 0x0004, 0x4eb: 0x0004, 0x4ec: 0x0004, 0x4ed: 0x0004, 0x4ee: 0x0004, 0x4ef: 0x0004, - 0x4f0: 0x0004, 0x4f1: 0x0004, 0x4f2: 0x0004, 0x4f3: 0x0004, 0x4f4: 0x0004, 0x4f5: 0x0004, - 0x4f6: 0x0004, 0x4f7: 0x0004, 0x4f8: 0x0004, 0x4f9: 0x0004, 0x4fa: 0x0004, 0x4fb: 0x0004, - 0x4fc: 0x0004, 0x4fd: 0x0004, 0x4fe: 0x0004, 0x4ff: 0x0004, + 0x4ca: 0x0024, 0x4cb: 0x0024, + 0x4cc: 0x0024, 0x4cd: 0x0024, 0x4ce: 0x0024, 0x4cf: 0x0024, 0x4d0: 0x0024, 0x4d1: 0x0024, + 0x4d2: 0x0024, 0x4d3: 0x0024, 0x4d4: 0x0024, 0x4d5: 0x0024, 0x4d6: 0x0024, 0x4d7: 0x0024, + 0x4d8: 0x0024, 0x4d9: 0x0024, 0x4da: 0x0024, 0x4db: 0x0024, 0x4dc: 0x0024, 0x4dd: 0x0024, + 0x4de: 0x0024, 0x4df: 0x0024, 0x4e0: 0x0024, 0x4e1: 0x0024, 0x4e2: 0x0800, 0x4e3: 0x0024, + 0x4e4: 0x0024, 0x4e5: 0x0024, 0x4e6: 0x0024, 0x4e7: 0x0024, 0x4e8: 0x0024, 0x4e9: 0x0024, + 0x4ea: 0x0024, 0x4eb: 0x0024, 0x4ec: 0x0024, 0x4ed: 0x0024, 0x4ee: 0x0024, 0x4ef: 0x0024, + 0x4f0: 0x0024, 0x4f1: 0x0024, 0x4f2: 0x0024, 0x4f3: 0x0024, 0x4f4: 0x0024, 0x4f5: 0x0024, + 0x4f6: 0x0024, 0x4f7: 0x0024, 0x4f8: 0x0024, 0x4f9: 0x0024, 0x4fa: 0x0024, 0x4fb: 0x0024, + 0x4fc: 0x0024, 0x4fd: 0x0024, 0x4fe: 0x0024, 0x4ff: 0x0024, // Block 0x14, offset 0x500 - 0x500: 0x0004, 0x501: 0x0004, 0x502: 0x0004, 0x503: 0x0400, - 0x53a: 0x0004, 0x53b: 0x0400, - 0x53c: 0x0004, 0x53e: 0x0400, 0x53f: 0x0400, + 0x500: 0x0024, 0x501: 0x0024, 0x502: 0x0024, 0x503: 0x2000, + 0x515: 0x0010, 0x516: 0x0010, 0x517: 0x0010, + 0x518: 0x0010, 0x519: 0x0010, 0x51a: 0x0010, 0x51b: 0x0010, 0x51c: 0x0010, 0x51d: 0x0010, + 0x51e: 0x0010, 0x51f: 0x0010, 0x520: 0x0010, 0x521: 0x0010, 0x522: 0x0010, 0x523: 0x0010, + 0x524: 0x0010, 0x525: 0x0010, 0x526: 0x0010, 0x527: 0x0010, 0x528: 0x0010, 0x529: 0x0010, + 0x52a: 0x0010, 0x52b: 0x0010, 0x52c: 0x0010, 0x52d: 0x0010, 0x52e: 0x0010, 0x52f: 0x0010, + 0x530: 0x0010, 0x531: 0x0010, 0x532: 0x0010, 0x533: 0x0010, 0x534: 0x0010, 0x535: 0x0010, + 0x536: 0x0010, 0x537: 0x0010, 0x538: 0x0010, 0x539: 0x0010, 0x53a: 0x0024, 0x53b: 0x2000, + 0x53c: 0x0024, 0x53e: 0x2000, 0x53f: 0x2000, // Block 0x15, offset 0x540 - 0x540: 0x0400, 0x541: 0x0004, 0x542: 0x0004, 0x543: 0x0004, 0x544: 0x0004, 0x545: 0x0004, - 0x546: 0x0004, 0x547: 0x0004, 0x548: 0x0004, 0x549: 0x0400, 0x54a: 0x0400, 0x54b: 0x0400, - 0x54c: 0x0400, 0x54d: 0x0004, 0x54e: 0x0400, 0x54f: 0x0400, 0x551: 0x0004, - 0x552: 0x0004, 0x553: 0x0004, 0x554: 0x0004, 0x555: 0x0004, 0x556: 0x0004, 0x557: 0x0004, - 0x562: 0x0004, 0x563: 0x0004, + 0x540: 0x2000, 0x541: 0x0024, 0x542: 0x0024, 0x543: 0x0024, 0x544: 0x0024, 0x545: 0x0024, + 0x546: 0x0024, 0x547: 0x0024, 0x548: 0x0024, 0x549: 0x2000, 0x54a: 0x2000, 0x54b: 0x2000, + 0x54c: 0x2000, 0x54d: 0x0044, 0x54e: 0x2000, 0x54f: 0x2000, 0x551: 0x0024, + 0x552: 0x0024, 0x553: 0x0024, 0x554: 0x0024, 0x555: 0x0024, 0x556: 0x0024, 0x557: 0x0024, + 0x558: 0x0010, 0x559: 0x0010, 0x55a: 0x0010, 0x55b: 0x0010, 0x55c: 0x0010, 0x55d: 0x0010, + 0x55e: 0x0010, 0x55f: 0x0010, 0x562: 0x0024, 0x563: 0x0024, + 0x578: 0x0010, 0x579: 0x0010, 0x57a: 0x0010, 0x57b: 0x0010, + 0x57c: 0x0010, 0x57d: 0x0010, 0x57e: 0x0010, 0x57f: 0x0010, // Block 0x16, offset 0x580 - 0x581: 0x0004, 0x582: 0x0400, 0x583: 0x0400, - 0x5bc: 0x0004, 0x5be: 0x0004, 0x5bf: 0x0400, + 0x581: 0x0024, 0x582: 0x2000, 0x583: 0x2000, + 0x595: 0x0010, 0x596: 0x0010, 0x597: 0x0010, + 0x598: 0x0010, 0x599: 0x0010, 0x59a: 0x0010, 0x59b: 0x0010, 0x59c: 0x0010, 0x59d: 0x0010, + 0x59e: 0x0010, 0x59f: 0x0010, 0x5a0: 0x0010, 0x5a1: 0x0010, 0x5a2: 0x0010, 0x5a3: 0x0010, + 0x5a4: 0x0010, 0x5a5: 0x0010, 0x5a6: 0x0010, 0x5a7: 0x0010, 0x5a8: 0x0010, + 0x5aa: 0x0010, 0x5ab: 0x0010, 0x5ac: 0x0010, 0x5ad: 0x0010, 0x5ae: 0x0010, 0x5af: 0x0010, + 0x5b0: 0x0010, 0x5b2: 0x0010, + 0x5b6: 0x0010, 0x5b7: 0x0010, 0x5b8: 0x0010, 0x5b9: 0x0010, + 0x5bc: 0x0024, 0x5be: 0x0024, 0x5bf: 0x2000, // Block 0x17, offset 0x5c0 - 0x5c0: 0x0400, 0x5c1: 0x0004, 0x5c2: 0x0004, 0x5c3: 0x0004, 0x5c4: 0x0004, - 0x5c7: 0x0400, 0x5c8: 0x0400, 0x5cb: 0x0400, - 0x5cc: 0x0400, 0x5cd: 0x0004, - 0x5d7: 0x0004, - 0x5e2: 0x0004, 0x5e3: 0x0004, - 0x5fe: 0x0004, + 0x5c0: 0x2000, 0x5c1: 0x0024, 0x5c2: 0x0024, 0x5c3: 0x0024, 0x5c4: 0x0024, + 0x5c7: 0x2000, 0x5c8: 0x2000, 0x5cb: 0x2000, + 0x5cc: 0x2000, 0x5cd: 0x0044, + 0x5d7: 0x0024, + 0x5dc: 0x0010, 0x5dd: 0x0010, + 0x5df: 0x0010, 0x5e2: 0x0024, 0x5e3: 0x0024, + 0x5f0: 0x0010, 0x5f1: 0x0010, + 0x5fe: 0x0024, // Block 0x18, offset 0x600 - 0x601: 0x0004, 0x602: 0x0004, 0x603: 0x0400, - 0x63c: 0x0004, 0x63e: 0x0400, 0x63f: 0x0400, + 0x601: 0x0024, 0x602: 0x0024, 0x603: 0x2000, + 0x63c: 0x0024, 0x63e: 0x2000, 0x63f: 0x2000, // Block 0x19, offset 0x640 - 0x640: 0x0400, 0x641: 0x0004, 0x642: 0x0004, - 0x647: 0x0004, 0x648: 0x0004, 0x64b: 0x0004, - 0x64c: 0x0004, 0x64d: 0x0004, 0x651: 0x0004, - 0x670: 0x0004, 0x671: 0x0004, 0x675: 0x0004, + 0x640: 0x2000, 0x641: 0x0024, 0x642: 0x0024, + 0x647: 0x0024, 0x648: 0x0024, 0x64b: 0x0024, + 0x64c: 0x0024, 0x64d: 0x0024, 0x651: 0x0024, + 0x670: 0x0024, 0x671: 0x0024, 0x675: 0x0024, // Block 0x1a, offset 0x680 - 0x680: 0x0400, 0x681: 0x0004, 0x682: 0x0004, 0x683: 0x0004, 0x684: 0x0004, 0x685: 0x0004, - 0x687: 0x0004, 0x688: 0x0004, 0x689: 0x0400, 0x68b: 0x0400, - 0x68c: 0x0400, 0x68d: 0x0004, - 0x6a2: 0x0004, 0x6a3: 0x0004, - 0x6ba: 0x0004, 0x6bb: 0x0004, - 0x6bc: 0x0004, 0x6bd: 0x0004, 0x6be: 0x0004, 0x6bf: 0x0004, + 0x681: 0x0024, 0x682: 0x0024, 0x683: 0x2000, + 0x695: 0x0010, 0x696: 0x0010, 0x697: 0x0010, + 0x698: 0x0010, 0x699: 0x0010, 0x69a: 0x0010, 0x69b: 0x0010, 0x69c: 0x0010, 0x69d: 0x0010, + 0x69e: 0x0010, 0x69f: 0x0010, 0x6a0: 0x0010, 0x6a1: 0x0010, 0x6a2: 0x0010, 0x6a3: 0x0010, + 0x6a4: 0x0010, 0x6a5: 0x0010, 0x6a6: 0x0010, 0x6a7: 0x0010, 0x6a8: 0x0010, + 0x6aa: 0x0010, 0x6ab: 0x0010, 0x6ac: 0x0010, 0x6ad: 0x0010, 0x6ae: 0x0010, 0x6af: 0x0010, + 0x6b0: 0x0010, 0x6b2: 0x0010, 0x6b3: 0x0010, 0x6b5: 0x0010, + 0x6b6: 0x0010, 0x6b7: 0x0010, 0x6b8: 0x0010, 0x6b9: 0x0010, + 0x6bc: 0x0024, 0x6be: 0x2000, 0x6bf: 0x2000, // Block 0x1b, offset 0x6c0 - 0x6c1: 0x0004, 0x6c2: 0x0400, 0x6c3: 0x0400, - 0x6fc: 0x0004, 0x6fe: 0x0004, 0x6ff: 0x0004, + 0x6c0: 0x2000, 0x6c1: 0x0024, 0x6c2: 0x0024, 0x6c3: 0x0024, 0x6c4: 0x0024, 0x6c5: 0x0024, + 0x6c7: 0x0024, 0x6c8: 0x0024, 0x6c9: 0x2000, 0x6cb: 0x2000, + 0x6cc: 0x2000, 0x6cd: 0x0044, + 0x6e2: 0x0024, 0x6e3: 0x0024, + 0x6f9: 0x0010, 0x6fa: 0x0024, 0x6fb: 0x0024, + 0x6fc: 0x0024, 0x6fd: 0x0024, 0x6fe: 0x0024, 0x6ff: 0x0024, // Block 0x1c, offset 0x700 - 0x700: 0x0400, 0x701: 0x0004, 0x702: 0x0004, 0x703: 0x0004, 0x704: 0x0004, - 0x707: 0x0400, 0x708: 0x0400, 0x70b: 0x0400, - 0x70c: 0x0400, 0x70d: 0x0004, - 0x715: 0x0004, 0x716: 0x0004, 0x717: 0x0004, - 0x722: 0x0004, 0x723: 0x0004, + 0x701: 0x0024, 0x702: 0x2000, 0x703: 0x2000, + 0x715: 0x0010, 0x716: 0x0010, 0x717: 0x0010, + 0x718: 0x0010, 0x719: 0x0010, 0x71a: 0x0010, 0x71b: 0x0010, 0x71c: 0x0010, 0x71d: 0x0010, + 0x71e: 0x0010, 0x71f: 0x0010, 0x720: 0x0010, 0x721: 0x0010, 0x722: 0x0010, 0x723: 0x0010, + 0x724: 0x0010, 0x725: 0x0010, 0x726: 0x0010, 0x727: 0x0010, 0x728: 0x0010, + 0x72a: 0x0010, 0x72b: 0x0010, 0x72c: 0x0010, 0x72d: 0x0010, 0x72e: 0x0010, 0x72f: 0x0010, + 0x730: 0x0010, 0x732: 0x0010, 0x733: 0x0010, 0x735: 0x0010, + 0x736: 0x0010, 0x737: 0x0010, 0x738: 0x0010, 0x739: 0x0010, + 0x73c: 0x0024, 0x73e: 0x0024, 0x73f: 0x0024, // Block 0x1d, offset 0x740 - 0x742: 0x0004, - 0x77e: 0x0004, 0x77f: 0x0400, + 0x740: 0x2000, 0x741: 0x0024, 0x742: 0x0024, 0x743: 0x0024, 0x744: 0x0024, + 0x747: 0x2000, 0x748: 0x2000, 0x74b: 0x2000, + 0x74c: 0x2000, 0x74d: 0x0044, + 0x755: 0x0024, 0x756: 0x0024, 0x757: 0x0024, + 0x75c: 0x0010, 0x75d: 0x0010, + 0x75f: 0x0010, 0x762: 0x0024, 0x763: 0x0024, + 0x771: 0x0010, // Block 0x1e, offset 0x780 - 0x780: 0x0004, 0x781: 0x0400, 0x782: 0x0400, - 0x786: 0x0400, 0x787: 0x0400, 0x788: 0x0400, 0x78a: 0x0400, 0x78b: 0x0400, - 0x78c: 0x0400, 0x78d: 0x0004, - 0x797: 0x0004, + 0x782: 0x0024, + 0x7be: 0x0024, 0x7bf: 0x2000, // Block 0x1f, offset 0x7c0 - 0x7c0: 0x0004, 0x7c1: 0x0400, 0x7c2: 0x0400, 0x7c3: 0x0400, 0x7c4: 0x0004, - 0x7fc: 0x0004, 0x7fe: 0x0004, 0x7ff: 0x0004, + 0x7c0: 0x0024, 0x7c1: 0x2000, 0x7c2: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x0024, + 0x7d7: 0x0024, // Block 0x20, offset 0x800 - 0x800: 0x0004, 0x801: 0x0400, 0x802: 0x0400, 0x803: 0x0400, 0x804: 0x0400, - 0x806: 0x0004, 0x807: 0x0004, 0x808: 0x0004, 0x80a: 0x0004, 0x80b: 0x0004, - 0x80c: 0x0004, 0x80d: 0x0004, - 0x815: 0x0004, 0x816: 0x0004, - 0x822: 0x0004, 0x823: 0x0004, + 0x800: 0x0024, 0x801: 0x2000, 0x802: 0x2000, 0x803: 0x2000, 0x804: 0x0024, + 0x815: 0x0010, 0x816: 0x0010, 0x817: 0x0010, + 0x818: 0x0010, 0x819: 0x0010, 0x81a: 0x0010, 0x81b: 0x0010, 0x81c: 0x0010, 0x81d: 0x0010, + 0x81e: 0x0010, 0x81f: 0x0010, 0x820: 0x0010, 0x821: 0x0010, 0x822: 0x0010, 0x823: 0x0010, + 0x824: 0x0010, 0x825: 0x0010, 0x826: 0x0010, 0x827: 0x0010, 0x828: 0x0010, + 0x82a: 0x0010, 0x82b: 0x0010, 0x82c: 0x0010, 0x82d: 0x0010, 0x82e: 0x0010, 0x82f: 0x0010, + 0x830: 0x0010, 0x831: 0x0010, 0x832: 0x0010, 0x833: 0x0010, 0x834: 0x0010, 0x835: 0x0010, + 0x836: 0x0010, 0x837: 0x0010, 0x838: 0x0010, 0x839: 0x0010, + 0x83c: 0x0024, 0x83e: 0x0024, 0x83f: 0x0024, // Block 0x21, offset 0x840 - 0x841: 0x0004, 0x842: 0x0400, 0x843: 0x0400, - 0x87c: 0x0004, 0x87e: 0x0400, 0x87f: 0x0004, + 0x840: 0x0024, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, + 0x846: 0x0024, 0x847: 0x0024, 0x848: 0x0024, 0x84a: 0x0024, 0x84b: 0x0024, + 0x84c: 0x0024, 0x84d: 0x0044, + 0x855: 0x0024, 0x856: 0x0024, + 0x858: 0x0010, 0x859: 0x0010, 0x85a: 0x0010, + 0x862: 0x0024, 0x863: 0x0024, // Block 0x22, offset 0x880 - 0x880: 0x0400, 0x881: 0x0400, 0x882: 0x0004, 0x883: 0x0400, 0x884: 0x0400, - 0x886: 0x0004, 0x887: 0x0400, 0x888: 0x0400, 0x88a: 0x0400, 0x88b: 0x0400, - 0x88c: 0x0004, 0x88d: 0x0004, - 0x895: 0x0004, 0x896: 0x0004, - 0x8a2: 0x0004, 0x8a3: 0x0004, - 0x8b3: 0x0400, + 0x881: 0x0024, 0x882: 0x2000, 0x883: 0x2000, + 0x8bc: 0x0024, 0x8be: 0x2000, 0x8bf: 0x0024, // Block 0x23, offset 0x8c0 - 0x8c0: 0x0004, 0x8c1: 0x0004, 0x8c2: 0x0400, 0x8c3: 0x0400, - 0x8fb: 0x0004, - 0x8fc: 0x0004, 0x8fe: 0x0004, 0x8ff: 0x0400, + 0x8c0: 0x0024, 0x8c1: 0x2000, 0x8c2: 0x0024, 0x8c3: 0x2000, 0x8c4: 0x2000, + 0x8c6: 0x0024, 0x8c7: 0x0024, 0x8c8: 0x0024, 0x8ca: 0x0024, 0x8cb: 0x0024, + 0x8cc: 0x0024, 0x8cd: 0x0024, + 0x8d5: 0x0024, 0x8d6: 0x0024, + 0x8e2: 0x0024, 0x8e3: 0x0024, + 0x8f3: 0x2000, // Block 0x24, offset 0x900 - 0x900: 0x0400, 0x901: 0x0004, 0x902: 0x0004, 0x903: 0x0004, 0x904: 0x0004, - 0x906: 0x0400, 0x907: 0x0400, 0x908: 0x0400, 0x90a: 0x0400, 0x90b: 0x0400, - 0x90c: 0x0400, 0x90d: 0x0004, 0x90e: 0x0100, - 0x917: 0x0004, - 0x922: 0x0004, 0x923: 0x0004, + 0x900: 0x0024, 0x901: 0x0024, 0x902: 0x2000, 0x903: 0x2000, + 0x915: 0x0010, 0x916: 0x0010, 0x917: 0x0010, + 0x918: 0x0010, 0x919: 0x0010, 0x91a: 0x0010, 0x91b: 0x0010, 0x91c: 0x0010, 0x91d: 0x0010, + 0x91e: 0x0010, 0x91f: 0x0010, 0x920: 0x0010, 0x921: 0x0010, 0x922: 0x0010, 0x923: 0x0010, + 0x924: 0x0010, 0x925: 0x0010, 0x926: 0x0010, 0x927: 0x0010, 0x928: 0x0010, 0x929: 0x0010, + 0x92a: 0x0010, 0x92b: 0x0010, 0x92c: 0x0010, 0x92d: 0x0010, 0x92e: 0x0010, 0x92f: 0x0010, + 0x930: 0x0010, 0x931: 0x0010, 0x932: 0x0010, 0x933: 0x0010, 0x934: 0x0010, 0x935: 0x0010, + 0x936: 0x0010, 0x937: 0x0010, 0x938: 0x0010, 0x939: 0x0010, 0x93a: 0x0010, 0x93b: 0x0024, + 0x93c: 0x0024, 0x93e: 0x0024, 0x93f: 0x2000, // Block 0x25, offset 0x940 - 0x941: 0x0004, 0x942: 0x0400, 0x943: 0x0400, + 0x940: 0x2000, 0x941: 0x0024, 0x942: 0x0024, 0x943: 0x0024, 0x944: 0x0024, + 0x946: 0x2000, 0x947: 0x2000, 0x948: 0x2000, 0x94a: 0x2000, 0x94b: 0x2000, + 0x94c: 0x2000, 0x94d: 0x0044, 0x94e: 0x0800, + 0x957: 0x0024, + 0x962: 0x0024, 0x963: 0x0024, // Block 0x26, offset 0x980 - 0x98a: 0x0004, - 0x98f: 0x0004, 0x990: 0x0400, 0x991: 0x0400, - 0x992: 0x0004, 0x993: 0x0004, 0x994: 0x0004, 0x996: 0x0004, - 0x998: 0x0400, 0x999: 0x0400, 0x99a: 0x0400, 0x99b: 0x0400, 0x99c: 0x0400, 0x99d: 0x0400, - 0x99e: 0x0400, 0x99f: 0x0004, - 0x9b2: 0x0400, 0x9b3: 0x0400, + 0x981: 0x0024, 0x982: 0x2000, 0x983: 0x2000, // Block 0x27, offset 0x9c0 - 0x9f1: 0x0004, 0x9f3: 0x0400, 0x9f4: 0x0004, 0x9f5: 0x0004, - 0x9f6: 0x0004, 0x9f7: 0x0004, 0x9f8: 0x0004, 0x9f9: 0x0004, 0x9fa: 0x0004, + 0x9ca: 0x0024, + 0x9cf: 0x0024, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x0024, 0x9d3: 0x0024, 0x9d4: 0x0024, 0x9d6: 0x0024, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x0024, + 0x9f2: 0x2000, 0x9f3: 0x2000, // Block 0x28, offset 0xa00 - 0xa07: 0x0004, 0xa08: 0x0004, 0xa09: 0x0004, 0xa0a: 0x0004, 0xa0b: 0x0004, - 0xa0c: 0x0004, 0xa0d: 0x0004, 0xa0e: 0x0004, + 0xa31: 0x0024, 0xa33: 0x2000, 0xa34: 0x0024, 0xa35: 0x0024, + 0xa36: 0x0024, 0xa37: 0x0024, 0xa38: 0x0024, 0xa39: 0x0024, 0xa3a: 0x0024, // Block 0x29, offset 0xa40 - 0xa71: 0x0004, 0xa73: 0x0400, 0xa74: 0x0004, 0xa75: 0x0004, - 0xa76: 0x0004, 0xa77: 0x0004, 0xa78: 0x0004, 0xa79: 0x0004, 0xa7a: 0x0004, 0xa7b: 0x0004, - 0xa7c: 0x0004, + 0xa47: 0x0024, 0xa48: 0x0024, 0xa49: 0x0024, 0xa4a: 0x0024, 0xa4b: 0x0024, + 0xa4c: 0x0024, 0xa4d: 0x0024, 0xa4e: 0x0024, // Block 0x2a, offset 0xa80 - 0xa88: 0x0004, 0xa89: 0x0004, 0xa8a: 0x0004, 0xa8b: 0x0004, - 0xa8c: 0x0004, 0xa8d: 0x0004, 0xa8e: 0x0004, + 0xab1: 0x0024, 0xab3: 0x2000, 0xab4: 0x0024, 0xab5: 0x0024, + 0xab6: 0x0024, 0xab7: 0x0024, 0xab8: 0x0024, 0xab9: 0x0024, 0xaba: 0x0024, 0xabb: 0x0024, + 0xabc: 0x0024, // Block 0x2b, offset 0xac0 - 0xad8: 0x0004, 0xad9: 0x0004, - 0xaf5: 0x0004, - 0xaf7: 0x0004, 0xaf9: 0x0004, - 0xafe: 0x0400, 0xaff: 0x0400, + 0xac8: 0x0024, 0xac9: 0x0024, 0xaca: 0x0024, 0xacb: 0x0024, + 0xacc: 0x0024, 0xacd: 0x0024, 0xace: 0x0024, // Block 0x2c, offset 0xb00 - 0xb31: 0x0004, 0xb32: 0x0004, 0xb33: 0x0004, 0xb34: 0x0004, 0xb35: 0x0004, - 0xb36: 0x0004, 0xb37: 0x0004, 0xb38: 0x0004, 0xb39: 0x0004, 0xb3a: 0x0004, 0xb3b: 0x0004, - 0xb3c: 0x0004, 0xb3d: 0x0004, 0xb3e: 0x0004, 0xb3f: 0x0400, + 0xb18: 0x0024, 0xb19: 0x0024, + 0xb35: 0x0024, + 0xb37: 0x0024, 0xb39: 0x0024, + 0xb3e: 0x2000, 0xb3f: 0x2000, // Block 0x2d, offset 0xb40 - 0xb40: 0x0004, 0xb41: 0x0004, 0xb42: 0x0004, 0xb43: 0x0004, 0xb44: 0x0004, - 0xb46: 0x0004, 0xb47: 0x0004, - 0xb4d: 0x0004, 0xb4e: 0x0004, 0xb4f: 0x0004, 0xb50: 0x0004, 0xb51: 0x0004, - 0xb52: 0x0004, 0xb53: 0x0004, 0xb54: 0x0004, 0xb55: 0x0004, 0xb56: 0x0004, 0xb57: 0x0004, - 0xb59: 0x0004, 0xb5a: 0x0004, 0xb5b: 0x0004, 0xb5c: 0x0004, 0xb5d: 0x0004, - 0xb5e: 0x0004, 0xb5f: 0x0004, 0xb60: 0x0004, 0xb61: 0x0004, 0xb62: 0x0004, 0xb63: 0x0004, - 0xb64: 0x0004, 0xb65: 0x0004, 0xb66: 0x0004, 0xb67: 0x0004, 0xb68: 0x0004, 0xb69: 0x0004, - 0xb6a: 0x0004, 0xb6b: 0x0004, 0xb6c: 0x0004, 0xb6d: 0x0004, 0xb6e: 0x0004, 0xb6f: 0x0004, - 0xb70: 0x0004, 0xb71: 0x0004, 0xb72: 0x0004, 0xb73: 0x0004, 0xb74: 0x0004, 0xb75: 0x0004, - 0xb76: 0x0004, 0xb77: 0x0004, 0xb78: 0x0004, 0xb79: 0x0004, 0xb7a: 0x0004, 0xb7b: 0x0004, - 0xb7c: 0x0004, + 0xb71: 0x0024, 0xb72: 0x0024, 0xb73: 0x0024, 0xb74: 0x0024, 0xb75: 0x0024, + 0xb76: 0x0024, 0xb77: 0x0024, 0xb78: 0x0024, 0xb79: 0x0024, 0xb7a: 0x0024, 0xb7b: 0x0024, + 0xb7c: 0x0024, 0xb7d: 0x0024, 0xb7e: 0x0024, 0xb7f: 0x2000, // Block 0x2e, offset 0xb80 - 0xb86: 0x0004, + 0xb80: 0x0024, 0xb81: 0x0024, 0xb82: 0x0024, 0xb83: 0x0024, 0xb84: 0x0024, + 0xb86: 0x0024, 0xb87: 0x0024, + 0xb8d: 0x0024, 0xb8e: 0x0024, 0xb8f: 0x0024, 0xb90: 0x0024, 0xb91: 0x0024, + 0xb92: 0x0024, 0xb93: 0x0024, 0xb94: 0x0024, 0xb95: 0x0024, 0xb96: 0x0024, 0xb97: 0x0024, + 0xb99: 0x0024, 0xb9a: 0x0024, 0xb9b: 0x0024, 0xb9c: 0x0024, 0xb9d: 0x0024, + 0xb9e: 0x0024, 0xb9f: 0x0024, 0xba0: 0x0024, 0xba1: 0x0024, 0xba2: 0x0024, 0xba3: 0x0024, + 0xba4: 0x0024, 0xba5: 0x0024, 0xba6: 0x0024, 0xba7: 0x0024, 0xba8: 0x0024, 0xba9: 0x0024, + 0xbaa: 0x0024, 0xbab: 0x0024, 0xbac: 0x0024, 0xbad: 0x0024, 0xbae: 0x0024, 0xbaf: 0x0024, + 0xbb0: 0x0024, 0xbb1: 0x0024, 0xbb2: 0x0024, 0xbb3: 0x0024, 0xbb4: 0x0024, 0xbb5: 0x0024, + 0xbb6: 0x0024, 0xbb7: 0x0024, 0xbb8: 0x0024, 0xbb9: 0x0024, 0xbba: 0x0024, 0xbbb: 0x0024, + 0xbbc: 0x0024, // Block 0x2f, offset 0xbc0 - 0xbed: 0x0004, 0xbee: 0x0004, 0xbef: 0x0004, - 0xbf0: 0x0004, 0xbf1: 0x0400, 0xbf2: 0x0004, 0xbf3: 0x0004, 0xbf4: 0x0004, 0xbf5: 0x0004, - 0xbf6: 0x0004, 0xbf7: 0x0004, 0xbf9: 0x0004, 0xbfa: 0x0004, 0xbfb: 0x0400, - 0xbfc: 0x0400, 0xbfd: 0x0004, 0xbfe: 0x0004, + 0xbc6: 0x0024, // Block 0x30, offset 0xc00 - 0xc16: 0x0400, 0xc17: 0x0400, - 0xc18: 0x0004, 0xc19: 0x0004, - 0xc1e: 0x0004, 0xc1f: 0x0004, 0xc20: 0x0004, - 0xc31: 0x0004, 0xc32: 0x0004, 0xc33: 0x0004, 0xc34: 0x0004, + 0xc00: 0x0010, 0xc01: 0x0010, 0xc02: 0x0010, 0xc03: 0x0010, 0xc04: 0x0010, 0xc05: 0x0010, + 0xc06: 0x0010, 0xc07: 0x0010, 0xc08: 0x0010, 0xc09: 0x0010, 0xc0a: 0x0010, 0xc0b: 0x0010, + 0xc0c: 0x0010, 0xc0d: 0x0010, 0xc0e: 0x0010, 0xc0f: 0x0010, 0xc10: 0x0010, 0xc11: 0x0010, + 0xc12: 0x0010, 0xc13: 0x0010, 0xc14: 0x0010, 0xc15: 0x0010, 0xc16: 0x0010, 0xc17: 0x0010, + 0xc18: 0x0010, 0xc19: 0x0010, 0xc1a: 0x0010, 0xc1b: 0x0010, 0xc1c: 0x0010, 0xc1d: 0x0010, + 0xc1e: 0x0010, 0xc1f: 0x0010, 0xc20: 0x0010, 0xc21: 0x0010, 0xc22: 0x0010, 0xc23: 0x0010, + 0xc24: 0x0010, 0xc25: 0x0010, 0xc26: 0x0010, 0xc27: 0x0010, 0xc28: 0x0010, 0xc29: 0x0010, + 0xc2a: 0x0010, 0xc2d: 0x0024, 0xc2e: 0x0024, 0xc2f: 0x0024, + 0xc30: 0x0024, 0xc31: 0x2000, 0xc32: 0x0024, 0xc33: 0x0024, 0xc34: 0x0024, 0xc35: 0x0024, + 0xc36: 0x0024, 0xc37: 0x0024, 0xc39: 0x0044, 0xc3a: 0x0024, 0xc3b: 0x2000, + 0xc3c: 0x2000, 0xc3d: 0x0024, 0xc3e: 0x0024, 0xc3f: 0x0010, // Block 0x31, offset 0xc40 - 0xc42: 0x0004, 0xc44: 0x0400, 0xc45: 0x0004, - 0xc46: 0x0004, - 0xc4d: 0x0004, - 0xc5d: 0x0004, + 0xc50: 0x0010, 0xc51: 0x0010, + 0xc52: 0x0010, 0xc53: 0x0010, 0xc54: 0x0010, 0xc55: 0x0010, 0xc56: 0x2000, 0xc57: 0x2000, + 0xc58: 0x0024, 0xc59: 0x0024, 0xc5a: 0x0010, 0xc5b: 0x0010, 0xc5c: 0x0010, 0xc5d: 0x0010, + 0xc5e: 0x0024, 0xc5f: 0x0024, 0xc60: 0x0024, 0xc61: 0x0010, + 0xc65: 0x0010, 0xc66: 0x0010, + 0xc6e: 0x0010, 0xc6f: 0x0010, + 0xc70: 0x0010, 0xc71: 0x0024, 0xc72: 0x0024, 0xc73: 0x0024, 0xc74: 0x0024, 0xc75: 0x0010, + 0xc76: 0x0010, 0xc77: 0x0010, 0xc78: 0x0010, 0xc79: 0x0010, 0xc7a: 0x0010, 0xc7b: 0x0010, + 0xc7c: 0x0010, 0xc7d: 0x0010, 0xc7e: 0x0010, 0xc7f: 0x0010, // Block 0x32, offset 0xc80 - 0xc80: 0x0010, 0xc81: 0x0010, 0xc82: 0x0010, 0xc83: 0x0010, 0xc84: 0x0010, 0xc85: 0x0010, - 0xc86: 0x0010, 0xc87: 0x0010, 0xc88: 0x0010, 0xc89: 0x0010, 0xc8a: 0x0010, 0xc8b: 0x0010, - 0xc8c: 0x0010, 0xc8d: 0x0010, 0xc8e: 0x0010, 0xc8f: 0x0010, 0xc90: 0x0010, 0xc91: 0x0010, - 0xc92: 0x0010, 0xc93: 0x0010, 0xc94: 0x0010, 0xc95: 0x0010, 0xc96: 0x0010, 0xc97: 0x0010, - 0xc98: 0x0010, 0xc99: 0x0010, 0xc9a: 0x0010, 0xc9b: 0x0010, 0xc9c: 0x0010, 0xc9d: 0x0010, - 0xc9e: 0x0010, 0xc9f: 0x0010, 0xca0: 0x0010, 0xca1: 0x0010, 0xca2: 0x0010, 0xca3: 0x0010, - 0xca4: 0x0010, 0xca5: 0x0010, 0xca6: 0x0010, 0xca7: 0x0010, 0xca8: 0x0010, 0xca9: 0x0010, - 0xcaa: 0x0010, 0xcab: 0x0010, 0xcac: 0x0010, 0xcad: 0x0010, 0xcae: 0x0010, 0xcaf: 0x0010, - 0xcb0: 0x0010, 0xcb1: 0x0010, 0xcb2: 0x0010, 0xcb3: 0x0010, 0xcb4: 0x0010, 0xcb5: 0x0010, - 0xcb6: 0x0010, 0xcb7: 0x0010, 0xcb8: 0x0010, 0xcb9: 0x0010, 0xcba: 0x0010, 0xcbb: 0x0010, - 0xcbc: 0x0010, 0xcbd: 0x0010, 0xcbe: 0x0010, 0xcbf: 0x0010, + 0xc80: 0x0010, 0xc81: 0x0010, 0xc82: 0x0024, 0xc84: 0x2000, 0xc85: 0x0024, + 0xc86: 0x0024, + 0xc8d: 0x0024, 0xc8e: 0x0010, + 0xc9d: 0x0024, // Block 0x33, offset 0xcc0 - 0xcc0: 0x0010, 0xcc1: 0x0010, 0xcc2: 0x0010, 0xcc3: 0x0010, 0xcc4: 0x0010, 0xcc5: 0x0010, - 0xcc6: 0x0010, 0xcc7: 0x0010, 0xcc8: 0x0010, 0xcc9: 0x0010, 0xcca: 0x0010, 0xccb: 0x0010, - 0xccc: 0x0010, 0xccd: 0x0010, 0xcce: 0x0010, 0xccf: 0x0010, 0xcd0: 0x0010, 0xcd1: 0x0010, - 0xcd2: 0x0010, 0xcd3: 0x0010, 0xcd4: 0x0010, 0xcd5: 0x0010, 0xcd6: 0x0010, 0xcd7: 0x0010, - 0xcd8: 0x0010, 0xcd9: 0x0010, 0xcda: 0x0010, 0xcdb: 0x0010, 0xcdc: 0x0010, 0xcdd: 0x0010, - 0xcde: 0x0010, 0xcdf: 0x0010, 0xce0: 0x1000, 0xce1: 0x1000, 0xce2: 0x1000, 0xce3: 0x1000, - 0xce4: 0x1000, 0xce5: 0x1000, 0xce6: 0x1000, 0xce7: 0x1000, 0xce8: 0x1000, 0xce9: 0x1000, - 0xcea: 0x1000, 0xceb: 0x1000, 0xcec: 0x1000, 0xced: 0x1000, 0xcee: 0x1000, 0xcef: 0x1000, - 0xcf0: 0x1000, 0xcf1: 0x1000, 0xcf2: 0x1000, 0xcf3: 0x1000, 0xcf4: 0x1000, 0xcf5: 0x1000, - 0xcf6: 0x1000, 0xcf7: 0x1000, 0xcf8: 0x1000, 0xcf9: 0x1000, 0xcfa: 0x1000, 0xcfb: 0x1000, - 0xcfc: 0x1000, 0xcfd: 0x1000, 0xcfe: 0x1000, 0xcff: 0x1000, + 0xcc0: 0x0080, 0xcc1: 0x0080, 0xcc2: 0x0080, 0xcc3: 0x0080, 0xcc4: 0x0080, 0xcc5: 0x0080, + 0xcc6: 0x0080, 0xcc7: 0x0080, 0xcc8: 0x0080, 0xcc9: 0x0080, 0xcca: 0x0080, 0xccb: 0x0080, + 0xccc: 0x0080, 0xccd: 0x0080, 0xcce: 0x0080, 0xccf: 0x0080, 0xcd0: 0x0080, 0xcd1: 0x0080, + 0xcd2: 0x0080, 0xcd3: 0x0080, 0xcd4: 0x0080, 0xcd5: 0x0080, 0xcd6: 0x0080, 0xcd7: 0x0080, + 0xcd8: 0x0080, 0xcd9: 0x0080, 0xcda: 0x0080, 0xcdb: 0x0080, 0xcdc: 0x0080, 0xcdd: 0x0080, + 0xcde: 0x0080, 0xcdf: 0x0080, 0xce0: 0x0080, 0xce1: 0x0080, 0xce2: 0x0080, 0xce3: 0x0080, + 0xce4: 0x0080, 0xce5: 0x0080, 0xce6: 0x0080, 0xce7: 0x0080, 0xce8: 0x0080, 0xce9: 0x0080, + 0xcea: 0x0080, 0xceb: 0x0080, 0xcec: 0x0080, 0xced: 0x0080, 0xcee: 0x0080, 0xcef: 0x0080, + 0xcf0: 0x0080, 0xcf1: 0x0080, 0xcf2: 0x0080, 0xcf3: 0x0080, 0xcf4: 0x0080, 0xcf5: 0x0080, + 0xcf6: 0x0080, 0xcf7: 0x0080, 0xcf8: 0x0080, 0xcf9: 0x0080, 0xcfa: 0x0080, 0xcfb: 0x0080, + 0xcfc: 0x0080, 0xcfd: 0x0080, 0xcfe: 0x0080, 0xcff: 0x0080, // Block 0x34, offset 0xd00 - 0xd00: 0x1000, 0xd01: 0x1000, 0xd02: 0x1000, 0xd03: 0x1000, 0xd04: 0x1000, 0xd05: 0x1000, - 0xd06: 0x1000, 0xd07: 0x1000, 0xd08: 0x1000, 0xd09: 0x1000, 0xd0a: 0x1000, 0xd0b: 0x1000, - 0xd0c: 0x1000, 0xd0d: 0x1000, 0xd0e: 0x1000, 0xd0f: 0x1000, 0xd10: 0x1000, 0xd11: 0x1000, - 0xd12: 0x1000, 0xd13: 0x1000, 0xd14: 0x1000, 0xd15: 0x1000, 0xd16: 0x1000, 0xd17: 0x1000, - 0xd18: 0x1000, 0xd19: 0x1000, 0xd1a: 0x1000, 0xd1b: 0x1000, 0xd1c: 0x1000, 0xd1d: 0x1000, - 0xd1e: 0x1000, 0xd1f: 0x1000, 0xd20: 0x1000, 0xd21: 0x1000, 0xd22: 0x1000, 0xd23: 0x1000, - 0xd24: 0x1000, 0xd25: 0x1000, 0xd26: 0x1000, 0xd27: 0x1000, 0xd28: 0x0800, 0xd29: 0x0800, - 0xd2a: 0x0800, 0xd2b: 0x0800, 0xd2c: 0x0800, 0xd2d: 0x0800, 0xd2e: 0x0800, 0xd2f: 0x0800, - 0xd30: 0x0800, 0xd31: 0x0800, 0xd32: 0x0800, 0xd33: 0x0800, 0xd34: 0x0800, 0xd35: 0x0800, - 0xd36: 0x0800, 0xd37: 0x0800, 0xd38: 0x0800, 0xd39: 0x0800, 0xd3a: 0x0800, 0xd3b: 0x0800, - 0xd3c: 0x0800, 0xd3d: 0x0800, 0xd3e: 0x0800, 0xd3f: 0x0800, + 0xd00: 0x0080, 0xd01: 0x0080, 0xd02: 0x0080, 0xd03: 0x0080, 0xd04: 0x0080, 0xd05: 0x0080, + 0xd06: 0x0080, 0xd07: 0x0080, 0xd08: 0x0080, 0xd09: 0x0080, 0xd0a: 0x0080, 0xd0b: 0x0080, + 0xd0c: 0x0080, 0xd0d: 0x0080, 0xd0e: 0x0080, 0xd0f: 0x0080, 0xd10: 0x0080, 0xd11: 0x0080, + 0xd12: 0x0080, 0xd13: 0x0080, 0xd14: 0x0080, 0xd15: 0x0080, 0xd16: 0x0080, 0xd17: 0x0080, + 0xd18: 0x0080, 0xd19: 0x0080, 0xd1a: 0x0080, 0xd1b: 0x0080, 0xd1c: 0x0080, 0xd1d: 0x0080, + 0xd1e: 0x0080, 0xd1f: 0x0080, 0xd20: 0x8000, 0xd21: 0x8000, 0xd22: 0x8000, 0xd23: 0x8000, + 0xd24: 0x8000, 0xd25: 0x8000, 0xd26: 0x8000, 0xd27: 0x8000, 0xd28: 0x8000, 0xd29: 0x8000, + 0xd2a: 0x8000, 0xd2b: 0x8000, 0xd2c: 0x8000, 0xd2d: 0x8000, 0xd2e: 0x8000, 0xd2f: 0x8000, + 0xd30: 0x8000, 0xd31: 0x8000, 0xd32: 0x8000, 0xd33: 0x8000, 0xd34: 0x8000, 0xd35: 0x8000, + 0xd36: 0x8000, 0xd37: 0x8000, 0xd38: 0x8000, 0xd39: 0x8000, 0xd3a: 0x8000, 0xd3b: 0x8000, + 0xd3c: 0x8000, 0xd3d: 0x8000, 0xd3e: 0x8000, 0xd3f: 0x8000, // Block 0x35, offset 0xd40 - 0xd40: 0x0800, 0xd41: 0x0800, 0xd42: 0x0800, 0xd43: 0x0800, 0xd44: 0x0800, 0xd45: 0x0800, - 0xd46: 0x0800, 0xd47: 0x0800, 0xd48: 0x0800, 0xd49: 0x0800, 0xd4a: 0x0800, 0xd4b: 0x0800, - 0xd4c: 0x0800, 0xd4d: 0x0800, 0xd4e: 0x0800, 0xd4f: 0x0800, 0xd50: 0x0800, 0xd51: 0x0800, - 0xd52: 0x0800, 0xd53: 0x0800, 0xd54: 0x0800, 0xd55: 0x0800, 0xd56: 0x0800, 0xd57: 0x0800, - 0xd58: 0x0800, 0xd59: 0x0800, 0xd5a: 0x0800, 0xd5b: 0x0800, 0xd5c: 0x0800, 0xd5d: 0x0800, - 0xd5e: 0x0800, 0xd5f: 0x0800, 0xd60: 0x0800, 0xd61: 0x0800, 0xd62: 0x0800, 0xd63: 0x0800, - 0xd64: 0x0800, 0xd65: 0x0800, 0xd66: 0x0800, 0xd67: 0x0800, 0xd68: 0x0800, 0xd69: 0x0800, - 0xd6a: 0x0800, 0xd6b: 0x0800, 0xd6c: 0x0800, 0xd6d: 0x0800, 0xd6e: 0x0800, 0xd6f: 0x0800, - 0xd70: 0x0800, 0xd71: 0x0800, 0xd72: 0x0800, 0xd73: 0x0800, 0xd74: 0x0800, 0xd75: 0x0800, - 0xd76: 0x0800, 0xd77: 0x0800, 0xd78: 0x0800, 0xd79: 0x0800, 0xd7a: 0x0800, 0xd7b: 0x0800, - 0xd7c: 0x0800, 0xd7d: 0x0800, 0xd7e: 0x0800, 0xd7f: 0x0800, + 0xd40: 0x8000, 0xd41: 0x8000, 0xd42: 0x8000, 0xd43: 0x8000, 0xd44: 0x8000, 0xd45: 0x8000, + 0xd46: 0x8000, 0xd47: 0x8000, 0xd48: 0x8000, 0xd49: 0x8000, 0xd4a: 0x8000, 0xd4b: 0x8000, + 0xd4c: 0x8000, 0xd4d: 0x8000, 0xd4e: 0x8000, 0xd4f: 0x8000, 0xd50: 0x8000, 0xd51: 0x8000, + 0xd52: 0x8000, 0xd53: 0x8000, 0xd54: 0x8000, 0xd55: 0x8000, 0xd56: 0x8000, 0xd57: 0x8000, + 0xd58: 0x8000, 0xd59: 0x8000, 0xd5a: 0x8000, 0xd5b: 0x8000, 0xd5c: 0x8000, 0xd5d: 0x8000, + 0xd5e: 0x8000, 0xd5f: 0x8000, 0xd60: 0x8000, 0xd61: 0x8000, 0xd62: 0x8000, 0xd63: 0x8000, + 0xd64: 0x8000, 0xd65: 0x8000, 0xd66: 0x8000, 0xd67: 0x8000, 0xd68: 0x4000, 0xd69: 0x4000, + 0xd6a: 0x4000, 0xd6b: 0x4000, 0xd6c: 0x4000, 0xd6d: 0x4000, 0xd6e: 0x4000, 0xd6f: 0x4000, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x4000, 0xd73: 0x4000, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x4000, + 0xd7c: 0x4000, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, // Block 0x36, offset 0xd80 - 0xd9d: 0x0004, - 0xd9e: 0x0004, 0xd9f: 0x0004, + 0xd80: 0x4000, 0xd81: 0x4000, 0xd82: 0x4000, 0xd83: 0x4000, 0xd84: 0x4000, 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, + 0xdb0: 0x4000, 0xdb1: 0x4000, 0xdb2: 0x4000, 0xdb3: 0x4000, 0xdb4: 0x4000, 0xdb5: 0x4000, + 0xdb6: 0x4000, 0xdb7: 0x4000, 0xdb8: 0x4000, 0xdb9: 0x4000, 0xdba: 0x4000, 0xdbb: 0x4000, + 0xdbc: 0x4000, 0xdbd: 0x4000, 0xdbe: 0x4000, 0xdbf: 0x4000, // Block 0x37, offset 0xdc0 - 0xdd2: 0x0004, 0xdd3: 0x0004, 0xdd4: 0x0004, 0xdd5: 0x0400, - 0xdf2: 0x0004, 0xdf3: 0x0004, 0xdf4: 0x0400, + 0xddd: 0x0024, + 0xdde: 0x0024, 0xddf: 0x0024, // Block 0x38, offset 0xe00 - 0xe12: 0x0004, 0xe13: 0x0004, - 0xe32: 0x0004, 0xe33: 0x0004, + 0xe12: 0x0024, 0xe13: 0x0024, 0xe14: 0x0024, 0xe15: 0x0024, + 0xe32: 0x0024, 0xe33: 0x0024, 0xe34: 0x0024, // Block 0x39, offset 0xe40 - 0xe74: 0x0004, 0xe75: 0x0004, - 0xe76: 0x0400, 0xe77: 0x0004, 0xe78: 0x0004, 0xe79: 0x0004, 0xe7a: 0x0004, 0xe7b: 0x0004, - 0xe7c: 0x0004, 0xe7d: 0x0004, 0xe7e: 0x0400, 0xe7f: 0x0400, + 0xe52: 0x0024, 0xe53: 0x0024, + 0xe72: 0x0024, 0xe73: 0x0024, // Block 0x3a, offset 0xe80 - 0xe80: 0x0400, 0xe81: 0x0400, 0xe82: 0x0400, 0xe83: 0x0400, 0xe84: 0x0400, 0xe85: 0x0400, - 0xe86: 0x0004, 0xe87: 0x0400, 0xe88: 0x0400, 0xe89: 0x0004, 0xe8a: 0x0004, 0xe8b: 0x0004, - 0xe8c: 0x0004, 0xe8d: 0x0004, 0xe8e: 0x0004, 0xe8f: 0x0004, 0xe90: 0x0004, 0xe91: 0x0004, - 0xe92: 0x0004, 0xe93: 0x0004, - 0xe9d: 0x0004, + 0xe80: 0x0010, 0xe81: 0x0010, 0xe82: 0x0010, 0xe83: 0x0010, 0xe84: 0x0010, 0xe85: 0x0010, + 0xe86: 0x0010, 0xe87: 0x0010, 0xe88: 0x0010, 0xe89: 0x0010, 0xe8a: 0x0010, 0xe8b: 0x0010, + 0xe8c: 0x0010, 0xe8d: 0x0010, 0xe8e: 0x0010, 0xe8f: 0x0010, 0xe90: 0x0010, 0xe91: 0x0010, + 0xe92: 0x0010, 0xe93: 0x0010, 0xe94: 0x0010, 0xe95: 0x0010, 0xe96: 0x0010, 0xe97: 0x0010, + 0xe98: 0x0010, 0xe99: 0x0010, 0xe9a: 0x0010, 0xe9b: 0x0010, 0xe9c: 0x0010, 0xe9d: 0x0010, + 0xe9e: 0x0010, 0xe9f: 0x0010, 0xea0: 0x0010, 0xea1: 0x0010, 0xea2: 0x0010, 0xea3: 0x0010, + 0xea4: 0x0010, 0xea5: 0x0010, 0xea6: 0x0010, 0xea7: 0x0010, 0xea8: 0x0010, 0xea9: 0x0010, + 0xeaa: 0x0010, 0xeab: 0x0010, 0xeac: 0x0010, 0xead: 0x0010, 0xeae: 0x0010, 0xeaf: 0x0010, + 0xeb0: 0x0010, 0xeb1: 0x0010, 0xeb2: 0x0010, 0xeb3: 0x0010, 0xeb4: 0x0024, 0xeb5: 0x0024, + 0xeb6: 0x2000, 0xeb7: 0x0024, 0xeb8: 0x0024, 0xeb9: 0x0024, 0xeba: 0x0024, 0xebb: 0x0024, + 0xebc: 0x0024, 0xebd: 0x0024, 0xebe: 0x2000, 0xebf: 0x2000, // Block 0x3b, offset 0xec0 - 0xecb: 0x0004, - 0xecc: 0x0004, 0xecd: 0x0004, 0xece: 0x0002, 0xecf: 0x0004, + 0xec0: 0x2000, 0xec1: 0x2000, 0xec2: 0x2000, 0xec3: 0x2000, 0xec4: 0x2000, 0xec5: 0x2000, + 0xec6: 0x0024, 0xec7: 0x2000, 0xec8: 0x2000, 0xec9: 0x0024, 0xeca: 0x0024, 0xecb: 0x0024, + 0xecc: 0x0024, 0xecd: 0x0024, 0xece: 0x0024, 0xecf: 0x0024, 0xed0: 0x0024, 0xed1: 0x0024, + 0xed2: 0x0044, 0xed3: 0x0024, + 0xedd: 0x0024, // Block 0x3c, offset 0xf00 - 0xf05: 0x0004, - 0xf06: 0x0004, - 0xf29: 0x0004, + 0xf0b: 0x0024, + 0xf0c: 0x0024, 0xf0d: 0x0024, 0xf0e: 0x0002, 0xf0f: 0x0024, // Block 0x3d, offset 0xf40 - 0xf60: 0x0004, 0xf61: 0x0004, 0xf62: 0x0004, 0xf63: 0x0400, - 0xf64: 0x0400, 0xf65: 0x0400, 0xf66: 0x0400, 0xf67: 0x0004, 0xf68: 0x0004, 0xf69: 0x0400, - 0xf6a: 0x0400, 0xf6b: 0x0400, - 0xf70: 0x0400, 0xf71: 0x0400, 0xf72: 0x0004, 0xf73: 0x0400, 0xf74: 0x0400, 0xf75: 0x0400, - 0xf76: 0x0400, 0xf77: 0x0400, 0xf78: 0x0400, 0xf79: 0x0004, 0xf7a: 0x0004, 0xf7b: 0x0004, + 0xf45: 0x0024, + 0xf46: 0x0024, + 0xf69: 0x0024, // Block 0x3e, offset 0xf80 - 0xf97: 0x0004, - 0xf98: 0x0004, 0xf99: 0x0400, 0xf9a: 0x0400, 0xf9b: 0x0004, + 0xfa0: 0x0024, 0xfa1: 0x0024, 0xfa2: 0x0024, 0xfa3: 0x2000, + 0xfa4: 0x2000, 0xfa5: 0x2000, 0xfa6: 0x2000, 0xfa7: 0x0024, 0xfa8: 0x0024, 0xfa9: 0x2000, + 0xfaa: 0x2000, 0xfab: 0x2000, + 0xfb0: 0x2000, 0xfb1: 0x2000, 0xfb2: 0x0024, 0xfb3: 0x2000, 0xfb4: 0x2000, 0xfb5: 0x2000, + 0xfb6: 0x2000, 0xfb7: 0x2000, 0xfb8: 0x2000, 0xfb9: 0x0024, 0xfba: 0x0024, 0xfbb: 0x0024, // Block 0x3f, offset 0xfc0 - 0xfd5: 0x0400, 0xfd6: 0x0004, 0xfd7: 0x0400, - 0xfd8: 0x0004, 0xfd9: 0x0004, 0xfda: 0x0004, 0xfdb: 0x0004, 0xfdc: 0x0004, 0xfdd: 0x0004, - 0xfde: 0x0004, 0xfe0: 0x0004, 0xfe2: 0x0004, - 0xfe5: 0x0004, 0xfe6: 0x0004, 0xfe7: 0x0004, 0xfe8: 0x0004, 0xfe9: 0x0004, - 0xfea: 0x0004, 0xfeb: 0x0004, 0xfec: 0x0004, 0xfed: 0x0400, 0xfee: 0x0400, 0xfef: 0x0400, - 0xff0: 0x0400, 0xff1: 0x0400, 0xff2: 0x0400, 0xff3: 0x0004, 0xff4: 0x0004, 0xff5: 0x0004, - 0xff6: 0x0004, 0xff7: 0x0004, 0xff8: 0x0004, 0xff9: 0x0004, 0xffa: 0x0004, 0xffb: 0x0004, - 0xffc: 0x0004, 0xfff: 0x0004, + 0xfd7: 0x0024, + 0xfd8: 0x0024, 0xfd9: 0x2000, 0xfda: 0x2000, 0xfdb: 0x0024, + 0xfe0: 0x0010, 0xfe1: 0x0010, 0xfe2: 0x0010, 0xfe3: 0x0010, + 0xfe4: 0x0010, 0xfe5: 0x0010, 0xfe6: 0x0010, 0xfe7: 0x0010, 0xfe8: 0x0010, 0xfe9: 0x0010, + 0xfea: 0x0010, 0xfeb: 0x0010, 0xfec: 0x0010, 0xfed: 0x0010, 0xfee: 0x0010, 0xfef: 0x0010, + 0xff0: 0x0010, 0xff1: 0x0010, 0xff2: 0x0010, 0xff3: 0x0010, 0xff4: 0x0010, 0xff5: 0x0010, + 0xff6: 0x0010, 0xff7: 0x0010, 0xff8: 0x0010, 0xff9: 0x0010, 0xffa: 0x0010, 0xffb: 0x0010, + 0xffc: 0x0010, 0xffd: 0x0010, 0xffe: 0x0010, 0xfff: 0x0010, // Block 0x40, offset 0x1000 - 0x1030: 0x0004, 0x1031: 0x0004, 0x1032: 0x0004, 0x1033: 0x0004, 0x1034: 0x0004, 0x1035: 0x0004, - 0x1036: 0x0004, 0x1037: 0x0004, 0x1038: 0x0004, 0x1039: 0x0004, 0x103a: 0x0004, 0x103b: 0x0004, - 0x103c: 0x0004, 0x103d: 0x0004, 0x103e: 0x0004, 0x103f: 0x0004, + 0x1000: 0x0010, 0x1001: 0x0010, 0x1002: 0x0010, 0x1003: 0x0010, 0x1004: 0x0010, 0x1005: 0x0010, + 0x1006: 0x0010, 0x1007: 0x0010, 0x1008: 0x0010, 0x1009: 0x0010, 0x100a: 0x0010, 0x100b: 0x0010, + 0x100c: 0x0010, 0x100d: 0x0010, 0x100e: 0x0010, 0x100f: 0x0010, 0x1010: 0x0010, 0x1011: 0x0010, + 0x1012: 0x0010, 0x1013: 0x0010, 0x1014: 0x0010, 0x1015: 0x2000, 0x1016: 0x0024, 0x1017: 0x2000, + 0x1018: 0x0024, 0x1019: 0x0024, 0x101a: 0x0024, 0x101b: 0x0024, 0x101c: 0x0024, 0x101d: 0x0024, + 0x101e: 0x0024, 0x1020: 0x0044, 0x1022: 0x0024, + 0x1025: 0x0024, 0x1026: 0x0024, 0x1027: 0x0024, 0x1028: 0x0024, 0x1029: 0x0024, + 0x102a: 0x0024, 0x102b: 0x0024, 0x102c: 0x0024, 0x102d: 0x2000, 0x102e: 0x2000, 0x102f: 0x2000, + 0x1030: 0x2000, 0x1031: 0x2000, 0x1032: 0x2000, 0x1033: 0x0024, 0x1034: 0x0024, 0x1035: 0x0024, + 0x1036: 0x0024, 0x1037: 0x0024, 0x1038: 0x0024, 0x1039: 0x0024, 0x103a: 0x0024, 0x103b: 0x0024, + 0x103c: 0x0024, 0x103f: 0x0024, // Block 0x41, offset 0x1040 - 0x1040: 0x0004, 0x1041: 0x0004, 0x1042: 0x0004, 0x1043: 0x0004, 0x1044: 0x0004, 0x1045: 0x0004, - 0x1046: 0x0004, 0x1047: 0x0004, 0x1048: 0x0004, 0x1049: 0x0004, 0x104a: 0x0004, 0x104b: 0x0004, - 0x104c: 0x0004, 0x104d: 0x0004, 0x104e: 0x0004, + 0x1070: 0x0024, 0x1071: 0x0024, 0x1072: 0x0024, 0x1073: 0x0024, 0x1074: 0x0024, 0x1075: 0x0024, + 0x1076: 0x0024, 0x1077: 0x0024, 0x1078: 0x0024, 0x1079: 0x0024, 0x107a: 0x0024, 0x107b: 0x0024, + 0x107c: 0x0024, 0x107d: 0x0024, 0x107e: 0x0024, 0x107f: 0x0024, // Block 0x42, offset 0x1080 - 0x1080: 0x0004, 0x1081: 0x0004, 0x1082: 0x0004, 0x1083: 0x0004, 0x1084: 0x0400, - 0x10b4: 0x0004, 0x10b5: 0x0004, - 0x10b6: 0x0004, 0x10b7: 0x0004, 0x10b8: 0x0004, 0x10b9: 0x0004, 0x10ba: 0x0004, 0x10bb: 0x0400, - 0x10bc: 0x0004, 0x10bd: 0x0400, 0x10be: 0x0400, 0x10bf: 0x0400, + 0x1080: 0x0024, 0x1081: 0x0024, 0x1082: 0x0024, 0x1083: 0x0024, 0x1084: 0x0024, 0x1085: 0x0024, + 0x1086: 0x0024, 0x1087: 0x0024, 0x1088: 0x0024, 0x1089: 0x0024, 0x108a: 0x0024, 0x108b: 0x0024, + 0x108c: 0x0024, 0x108d: 0x0024, 0x108e: 0x0024, 0x108f: 0x0024, 0x1090: 0x0024, 0x1091: 0x0024, + 0x1092: 0x0024, 0x1093: 0x0024, 0x1094: 0x0024, 0x1095: 0x0024, 0x1096: 0x0024, 0x1097: 0x0024, + 0x1098: 0x0024, 0x1099: 0x0024, 0x109a: 0x0024, 0x109b: 0x0024, 0x109c: 0x0024, 0x109d: 0x0024, + 0x10a0: 0x0024, 0x10a1: 0x0024, 0x10a2: 0x0024, 0x10a3: 0x0024, + 0x10a4: 0x0024, 0x10a5: 0x0024, 0x10a6: 0x0024, 0x10a7: 0x0024, 0x10a8: 0x0024, 0x10a9: 0x0024, + 0x10aa: 0x0024, 0x10ab: 0x0024, // Block 0x43, offset 0x10c0 - 0x10c0: 0x0400, 0x10c1: 0x0400, 0x10c2: 0x0004, 0x10c3: 0x0400, 0x10c4: 0x0400, - 0x10eb: 0x0004, 0x10ec: 0x0004, 0x10ed: 0x0004, 0x10ee: 0x0004, 0x10ef: 0x0004, - 0x10f0: 0x0004, 0x10f1: 0x0004, 0x10f2: 0x0004, 0x10f3: 0x0004, + 0x10c0: 0x0024, 0x10c1: 0x0024, 0x10c2: 0x0024, 0x10c3: 0x0024, 0x10c4: 0x2000, + 0x10cb: 0x0010, + 0x10cc: 0x0010, + 0x10d3: 0x0010, 0x10d4: 0x0010, 0x10d5: 0x0010, 0x10d6: 0x0010, 0x10d7: 0x0010, + 0x10d8: 0x0010, 0x10d9: 0x0010, 0x10da: 0x0010, 0x10db: 0x0010, 0x10dc: 0x0010, 0x10dd: 0x0010, + 0x10de: 0x0010, 0x10df: 0x0010, 0x10e0: 0x0010, 0x10e1: 0x0010, 0x10e2: 0x0010, 0x10e3: 0x0010, + 0x10e4: 0x0010, 0x10e5: 0x0010, 0x10e6: 0x0010, 0x10e7: 0x0010, 0x10e8: 0x0010, 0x10e9: 0x0010, + 0x10ea: 0x0010, 0x10eb: 0x0010, 0x10ec: 0x0010, 0x10ed: 0x0010, 0x10ee: 0x0010, 0x10ef: 0x0010, + 0x10f0: 0x0010, 0x10f1: 0x0010, 0x10f2: 0x0010, 0x10f3: 0x0010, 0x10f4: 0x0024, 0x10f5: 0x0024, + 0x10f6: 0x0024, 0x10f7: 0x0024, 0x10f8: 0x0024, 0x10f9: 0x0024, 0x10fa: 0x0024, 0x10fb: 0x0024, + 0x10fc: 0x0024, 0x10fd: 0x0024, 0x10fe: 0x2000, 0x10ff: 0x2000, // Block 0x44, offset 0x1100 - 0x1100: 0x0004, 0x1101: 0x0004, 0x1102: 0x0400, - 0x1121: 0x0400, 0x1122: 0x0004, 0x1123: 0x0004, - 0x1124: 0x0004, 0x1125: 0x0004, 0x1126: 0x0400, 0x1127: 0x0400, 0x1128: 0x0004, 0x1129: 0x0004, - 0x112a: 0x0400, 0x112b: 0x0004, 0x112c: 0x0004, 0x112d: 0x0004, + 0x1100: 0x2000, 0x1101: 0x2000, 0x1102: 0x0024, 0x1103: 0x0024, 0x1104: 0x0044, 0x1105: 0x0010, + 0x1106: 0x0010, 0x1107: 0x0010, 0x1108: 0x0010, 0x1109: 0x0010, 0x110a: 0x0010, 0x110b: 0x0010, + 0x110c: 0x0010, + 0x112b: 0x0024, 0x112c: 0x0024, 0x112d: 0x0024, 0x112e: 0x0024, 0x112f: 0x0024, + 0x1130: 0x0024, 0x1131: 0x0024, 0x1132: 0x0024, 0x1133: 0x0024, // Block 0x45, offset 0x1140 - 0x1166: 0x0004, 0x1167: 0x0400, 0x1168: 0x0004, 0x1169: 0x0004, - 0x116a: 0x0400, 0x116b: 0x0400, 0x116c: 0x0400, 0x116d: 0x0004, 0x116e: 0x0400, 0x116f: 0x0004, - 0x1170: 0x0004, 0x1171: 0x0004, 0x1172: 0x0400, 0x1173: 0x0400, + 0x1140: 0x0024, 0x1141: 0x0024, 0x1142: 0x2000, 0x1143: 0x0010, 0x1144: 0x0010, 0x1145: 0x0010, + 0x1146: 0x0010, 0x1147: 0x0010, 0x1148: 0x0010, 0x1149: 0x0010, 0x114a: 0x0010, 0x114b: 0x0010, + 0x114c: 0x0010, 0x114d: 0x0010, 0x114e: 0x0010, 0x114f: 0x0010, 0x1150: 0x0010, 0x1151: 0x0010, + 0x1152: 0x0010, 0x1153: 0x0010, 0x1154: 0x0010, 0x1155: 0x0010, 0x1156: 0x0010, 0x1157: 0x0010, + 0x1158: 0x0010, 0x1159: 0x0010, 0x115a: 0x0010, 0x115b: 0x0010, 0x115c: 0x0010, 0x115d: 0x0010, + 0x115e: 0x0010, 0x115f: 0x0010, 0x1160: 0x0010, 0x1161: 0x2000, 0x1162: 0x0024, 0x1163: 0x0024, + 0x1164: 0x0024, 0x1165: 0x0024, 0x1166: 0x2000, 0x1167: 0x2000, 0x1168: 0x0024, 0x1169: 0x0024, + 0x116a: 0x0024, 0x116b: 0x0044, 0x116c: 0x0024, 0x116d: 0x0024, 0x116e: 0x0010, 0x116f: 0x0010, + 0x117b: 0x0010, + 0x117c: 0x0010, 0x117d: 0x0010, // Block 0x46, offset 0x1180 - 0x11a4: 0x0400, 0x11a5: 0x0400, 0x11a6: 0x0400, 0x11a7: 0x0400, 0x11a8: 0x0400, 0x11a9: 0x0400, - 0x11aa: 0x0400, 0x11ab: 0x0400, 0x11ac: 0x0004, 0x11ad: 0x0004, 0x11ae: 0x0004, 0x11af: 0x0004, - 0x11b0: 0x0004, 0x11b1: 0x0004, 0x11b2: 0x0004, 0x11b3: 0x0004, 0x11b4: 0x0400, 0x11b5: 0x0400, - 0x11b6: 0x0004, 0x11b7: 0x0004, + 0x11a6: 0x0024, 0x11a7: 0x2000, 0x11a8: 0x0024, 0x11a9: 0x0024, + 0x11aa: 0x2000, 0x11ab: 0x2000, 0x11ac: 0x2000, 0x11ad: 0x0024, 0x11ae: 0x2000, 0x11af: 0x0024, + 0x11b0: 0x0024, 0x11b1: 0x0024, 0x11b2: 0x0024, 0x11b3: 0x0024, // Block 0x47, offset 0x11c0 - 0x11d0: 0x0004, 0x11d1: 0x0004, - 0x11d2: 0x0004, 0x11d4: 0x0004, 0x11d5: 0x0004, 0x11d6: 0x0004, 0x11d7: 0x0004, - 0x11d8: 0x0004, 0x11d9: 0x0004, 0x11da: 0x0004, 0x11db: 0x0004, 0x11dc: 0x0004, 0x11dd: 0x0004, - 0x11de: 0x0004, 0x11df: 0x0004, 0x11e0: 0x0004, 0x11e1: 0x0400, 0x11e2: 0x0004, 0x11e3: 0x0004, - 0x11e4: 0x0004, 0x11e5: 0x0004, 0x11e6: 0x0004, 0x11e7: 0x0004, 0x11e8: 0x0004, - 0x11ed: 0x0004, - 0x11f4: 0x0004, - 0x11f7: 0x0400, 0x11f8: 0x0004, 0x11f9: 0x0004, + 0x11e4: 0x2000, 0x11e5: 0x2000, 0x11e6: 0x2000, 0x11e7: 0x2000, 0x11e8: 0x2000, 0x11e9: 0x2000, + 0x11ea: 0x2000, 0x11eb: 0x2000, 0x11ec: 0x0024, 0x11ed: 0x0024, 0x11ee: 0x0024, 0x11ef: 0x0024, + 0x11f0: 0x0024, 0x11f1: 0x0024, 0x11f2: 0x0024, 0x11f3: 0x0024, 0x11f4: 0x2000, 0x11f5: 0x2000, + 0x11f6: 0x0024, 0x11f7: 0x0024, // Block 0x48, offset 0x1200 - 0x120b: 0x0002, - 0x120c: 0x0004, 0x120d: 0x2000, 0x120e: 0x0002, 0x120f: 0x0002, - 0x1228: 0x0002, 0x1229: 0x0002, - 0x122a: 0x0002, 0x122b: 0x0002, 0x122c: 0x0002, 0x122d: 0x0002, 0x122e: 0x0002, - 0x123c: 0x0008, + 0x1210: 0x0024, 0x1211: 0x0024, + 0x1212: 0x0024, 0x1214: 0x0024, 0x1215: 0x0024, 0x1216: 0x0024, 0x1217: 0x0024, + 0x1218: 0x0024, 0x1219: 0x0024, 0x121a: 0x0024, 0x121b: 0x0024, 0x121c: 0x0024, 0x121d: 0x0024, + 0x121e: 0x0024, 0x121f: 0x0024, 0x1220: 0x0024, 0x1221: 0x2000, 0x1222: 0x0024, 0x1223: 0x0024, + 0x1224: 0x0024, 0x1225: 0x0024, 0x1226: 0x0024, 0x1227: 0x0024, 0x1228: 0x0024, + 0x122d: 0x0024, + 0x1234: 0x0024, + 0x1237: 0x2000, 0x1238: 0x0024, 0x1239: 0x0024, // Block 0x49, offset 0x1240 - 0x1249: 0x0008, - 0x1260: 0x0002, 0x1261: 0x0002, 0x1262: 0x0002, 0x1263: 0x0002, - 0x1264: 0x0002, 0x1265: 0x0002, 0x1266: 0x0002, 0x1267: 0x0002, 0x1268: 0x0002, 0x1269: 0x0002, - 0x126a: 0x0002, 0x126b: 0x0002, 0x126c: 0x0002, 0x126d: 0x0002, 0x126e: 0x0002, 0x126f: 0x0002, + 0x124b: 0x0002, + 0x124c: 0x0004, 0x124d: 0x10020, 0x124e: 0x0002, 0x124f: 0x0002, + 0x1268: 0x0002, 0x1269: 0x0002, + 0x126a: 0x0002, 0x126b: 0x0002, 0x126c: 0x0002, 0x126d: 0x0002, 0x126e: 0x0002, + 0x127c: 0x0008, // Block 0x4a, offset 0x1280 - 0x1290: 0x0004, 0x1291: 0x0004, - 0x1292: 0x0004, 0x1293: 0x0004, 0x1294: 0x0004, 0x1295: 0x0004, 0x1296: 0x0004, 0x1297: 0x0004, - 0x1298: 0x0004, 0x1299: 0x0004, 0x129a: 0x0004, 0x129b: 0x0004, 0x129c: 0x0004, 0x129d: 0x0004, - 0x129e: 0x0004, 0x129f: 0x0004, 0x12a0: 0x0004, 0x12a1: 0x0004, 0x12a2: 0x0004, 0x12a3: 0x0004, - 0x12a4: 0x0004, 0x12a5: 0x0004, 0x12a6: 0x0004, 0x12a7: 0x0004, 0x12a8: 0x0004, 0x12a9: 0x0004, - 0x12aa: 0x0004, 0x12ab: 0x0004, 0x12ac: 0x0004, 0x12ad: 0x0004, 0x12ae: 0x0004, 0x12af: 0x0004, - 0x12b0: 0x0004, + 0x1289: 0x0008, + 0x12a0: 0x0002, 0x12a1: 0x0002, 0x12a2: 0x0002, 0x12a3: 0x0002, + 0x12a4: 0x0002, 0x12a5: 0x0002, 0x12a6: 0x0002, 0x12a7: 0x0002, 0x12a8: 0x0002, 0x12a9: 0x0002, + 0x12aa: 0x0002, 0x12ab: 0x0002, 0x12ac: 0x0002, 0x12ad: 0x0002, 0x12ae: 0x0002, 0x12af: 0x0002, // Block 0x4b, offset 0x12c0 - 0x12e2: 0x0008, - 0x12f9: 0x0008, + 0x12d0: 0x0024, 0x12d1: 0x0024, + 0x12d2: 0x0024, 0x12d3: 0x0024, 0x12d4: 0x0024, 0x12d5: 0x0024, 0x12d6: 0x0024, 0x12d7: 0x0024, + 0x12d8: 0x0024, 0x12d9: 0x0024, 0x12da: 0x0024, 0x12db: 0x0024, 0x12dc: 0x0024, 0x12dd: 0x0024, + 0x12de: 0x0024, 0x12df: 0x0024, 0x12e0: 0x0024, 0x12e1: 0x0024, 0x12e2: 0x0024, 0x12e3: 0x0024, + 0x12e4: 0x0024, 0x12e5: 0x0024, 0x12e6: 0x0024, 0x12e7: 0x0024, 0x12e8: 0x0024, 0x12e9: 0x0024, + 0x12ea: 0x0024, 0x12eb: 0x0024, 0x12ec: 0x0024, 0x12ed: 0x0024, 0x12ee: 0x0024, 0x12ef: 0x0024, + 0x12f0: 0x0024, // Block 0x4c, offset 0x1300 - 0x1314: 0x0008, 0x1315: 0x0008, 0x1316: 0x0008, 0x1317: 0x0008, - 0x1318: 0x0008, 0x1319: 0x0008, - 0x1329: 0x0008, - 0x132a: 0x0008, + 0x1322: 0x0008, + 0x1339: 0x0008, // Block 0x4d, offset 0x1340 - 0x135a: 0x0008, 0x135b: 0x0008, - 0x1368: 0x0008, + 0x1354: 0x0008, 0x1355: 0x0008, 0x1356: 0x0008, 0x1357: 0x0008, + 0x1358: 0x0008, 0x1359: 0x0008, + 0x1369: 0x0008, + 0x136a: 0x0008, // Block 0x4e, offset 0x1380 - 0x1388: 0x0008, + 0x139a: 0x0008, 0x139b: 0x0008, + 0x13a8: 0x0008, // Block 0x4f, offset 0x13c0 0x13cf: 0x0008, 0x13e9: 0x0008, @@ -539,746 +639,951 @@ var graphemesValues = [13760]property{ 0x14bb: 0x0008, 0x14bc: 0x0008, 0x14bd: 0x0008, 0x14be: 0x0008, // Block 0x53, offset 0x14c0 - 0x14c0: 0x0008, 0x14c1: 0x0008, 0x14c2: 0x0008, 0x14c3: 0x0008, 0x14c4: 0x0008, 0x14c5: 0x0008, - 0x14c7: 0x0008, 0x14c8: 0x0008, 0x14c9: 0x0008, 0x14ca: 0x0008, 0x14cb: 0x0008, - 0x14cc: 0x0008, 0x14cd: 0x0008, 0x14ce: 0x0008, 0x14cf: 0x0008, 0x14d0: 0x0008, 0x14d1: 0x0008, - 0x14d2: 0x0008, 0x14d4: 0x0008, 0x14d5: 0x0008, 0x14d6: 0x0008, 0x14d7: 0x0008, - 0x14d8: 0x0008, 0x14d9: 0x0008, 0x14da: 0x0008, 0x14db: 0x0008, 0x14dc: 0x0008, 0x14dd: 0x0008, - 0x14de: 0x0008, 0x14df: 0x0008, 0x14e0: 0x0008, 0x14e1: 0x0008, 0x14e2: 0x0008, 0x14e3: 0x0008, - 0x14e4: 0x0008, 0x14e5: 0x0008, 0x14e6: 0x0008, 0x14e7: 0x0008, 0x14e8: 0x0008, 0x14e9: 0x0008, - 0x14ea: 0x0008, 0x14eb: 0x0008, 0x14ec: 0x0008, 0x14ed: 0x0008, 0x14ee: 0x0008, 0x14ef: 0x0008, - 0x14f0: 0x0008, 0x14f1: 0x0008, 0x14f2: 0x0008, 0x14f3: 0x0008, 0x14f4: 0x0008, 0x14f5: 0x0008, - 0x14f6: 0x0008, 0x14f7: 0x0008, 0x14f8: 0x0008, 0x14f9: 0x0008, 0x14fa: 0x0008, 0x14fb: 0x0008, - 0x14fc: 0x0008, 0x14fd: 0x0008, 0x14fe: 0x0008, 0x14ff: 0x0008, + 0x14c0: 0x0008, 0x14c1: 0x0008, 0x14c2: 0x0008, 0x14c3: 0x0008, 0x14c4: 0x0008, + 0x14ce: 0x0008, 0x14d1: 0x0008, + 0x14d4: 0x0008, 0x14d5: 0x0008, + 0x14d8: 0x0008, 0x14dd: 0x0008, + 0x14e0: 0x0008, 0x14e2: 0x0008, 0x14e3: 0x0008, + 0x14e6: 0x0008, + 0x14ea: 0x0008, 0x14ee: 0x0008, 0x14ef: 0x0008, + 0x14f8: 0x0008, 0x14f9: 0x0008, 0x14fa: 0x0008, // Block 0x54, offset 0x1500 - 0x1500: 0x0008, 0x1501: 0x0008, 0x1502: 0x0008, 0x1503: 0x0008, 0x1504: 0x0008, 0x1505: 0x0008, - 0x1506: 0x0008, 0x1507: 0x0008, 0x1508: 0x0008, 0x1509: 0x0008, 0x150a: 0x0008, 0x150b: 0x0008, + 0x1500: 0x0008, 0x1502: 0x0008, + 0x1508: 0x0008, 0x1509: 0x0008, 0x150a: 0x0008, 0x150b: 0x0008, 0x150c: 0x0008, 0x150d: 0x0008, 0x150e: 0x0008, 0x150f: 0x0008, 0x1510: 0x0008, 0x1511: 0x0008, - 0x1512: 0x0008, 0x1513: 0x0008, 0x1514: 0x0008, 0x1515: 0x0008, 0x1516: 0x0008, 0x1517: 0x0008, - 0x1518: 0x0008, 0x1519: 0x0008, 0x151a: 0x0008, 0x151b: 0x0008, 0x151c: 0x0008, 0x151d: 0x0008, - 0x151e: 0x0008, 0x151f: 0x0008, 0x1520: 0x0008, 0x1521: 0x0008, 0x1522: 0x0008, 0x1523: 0x0008, - 0x1524: 0x0008, 0x1525: 0x0008, 0x1526: 0x0008, 0x1527: 0x0008, 0x1528: 0x0008, 0x1529: 0x0008, - 0x152a: 0x0008, 0x152b: 0x0008, 0x152c: 0x0008, 0x152d: 0x0008, 0x152e: 0x0008, 0x152f: 0x0008, - 0x1530: 0x0008, 0x1531: 0x0008, 0x1532: 0x0008, 0x1533: 0x0008, 0x1534: 0x0008, 0x1535: 0x0008, - 0x1536: 0x0008, 0x1537: 0x0008, 0x1538: 0x0008, 0x1539: 0x0008, 0x153a: 0x0008, 0x153b: 0x0008, - 0x153c: 0x0008, 0x153d: 0x0008, 0x153e: 0x0008, 0x153f: 0x0008, + 0x1512: 0x0008, 0x1513: 0x0008, + 0x151f: 0x0008, 0x1520: 0x0008, 0x1523: 0x0008, + 0x1525: 0x0008, 0x1526: 0x0008, 0x1528: 0x0008, + 0x153b: 0x0008, + 0x153e: 0x0008, 0x153f: 0x0008, // Block 0x55, offset 0x1540 - 0x1540: 0x0008, 0x1541: 0x0008, 0x1542: 0x0008, 0x1543: 0x0008, 0x1544: 0x0008, 0x1545: 0x0008, - 0x1550: 0x0008, 0x1551: 0x0008, 0x1552: 0x0008, 0x1553: 0x0008, 0x1554: 0x0008, 0x1555: 0x0008, 0x1556: 0x0008, 0x1557: 0x0008, - 0x1558: 0x0008, 0x1559: 0x0008, 0x155a: 0x0008, 0x155b: 0x0008, 0x155c: 0x0008, 0x155d: 0x0008, - 0x155e: 0x0008, 0x155f: 0x0008, 0x1560: 0x0008, 0x1561: 0x0008, 0x1562: 0x0008, 0x1563: 0x0008, - 0x1564: 0x0008, 0x1565: 0x0008, 0x1566: 0x0008, 0x1567: 0x0008, 0x1568: 0x0008, 0x1569: 0x0008, - 0x156a: 0x0008, 0x156b: 0x0008, 0x156c: 0x0008, 0x156d: 0x0008, 0x156e: 0x0008, 0x156f: 0x0008, - 0x1570: 0x0008, 0x1571: 0x0008, 0x1572: 0x0008, 0x1573: 0x0008, 0x1574: 0x0008, 0x1575: 0x0008, - 0x1576: 0x0008, 0x1577: 0x0008, 0x1578: 0x0008, 0x1579: 0x0008, 0x157a: 0x0008, 0x157b: 0x0008, - 0x157c: 0x0008, 0x157d: 0x0008, 0x157e: 0x0008, 0x157f: 0x0008, + 0x1559: 0x0008, 0x155b: 0x0008, 0x155c: 0x0008, + 0x1560: 0x0008, 0x1561: 0x0008, + 0x1567: 0x0008, + 0x156a: 0x0008, 0x156b: 0x0008, + 0x1570: 0x0008, 0x1571: 0x0008, + 0x157d: 0x0008, 0x157e: 0x0008, // Block 0x56, offset 0x1580 - 0x1580: 0x0008, 0x1581: 0x0008, 0x1582: 0x0008, 0x1583: 0x0008, 0x1584: 0x0008, 0x1585: 0x0008, - 0x1588: 0x0008, 0x1589: 0x0008, 0x158a: 0x0008, 0x158b: 0x0008, - 0x158c: 0x0008, 0x158d: 0x0008, 0x158e: 0x0008, 0x158f: 0x0008, 0x1590: 0x0008, 0x1591: 0x0008, - 0x1592: 0x0008, 0x1594: 0x0008, 0x1596: 0x0008, - 0x159d: 0x0008, - 0x15a1: 0x0008, - 0x15a8: 0x0008, - 0x15b3: 0x0008, 0x15b4: 0x0008, + 0x1584: 0x0008, 0x1585: 0x0008, + 0x1588: 0x0008, + 0x158e: 0x0008, 0x158f: 0x0008, 0x1591: 0x0008, + 0x1593: 0x0008, 0x1594: 0x0008, + 0x15a9: 0x0008, + 0x15aa: 0x0008, + 0x15b0: 0x0008, 0x15b1: 0x0008, 0x15b2: 0x0008, 0x15b3: 0x0008, 0x15b4: 0x0008, 0x15b5: 0x0008, + 0x15b7: 0x0008, 0x15b8: 0x0008, 0x15b9: 0x0008, 0x15ba: 0x0008, + 0x15bd: 0x0008, // Block 0x57, offset 0x15c0 - 0x15c4: 0x0008, - 0x15c7: 0x0008, - 0x15cc: 0x0008, 0x15ce: 0x0008, - 0x15d3: 0x0008, 0x15d4: 0x0008, 0x15d5: 0x0008, 0x15d7: 0x0008, - 0x15e3: 0x0008, - 0x15e4: 0x0008, 0x15e5: 0x0008, 0x15e6: 0x0008, 0x15e7: 0x0008, + 0x15c2: 0x0008, 0x15c5: 0x0008, + 0x15c8: 0x0008, 0x15c9: 0x0008, 0x15ca: 0x0008, 0x15cb: 0x0008, + 0x15cc: 0x0008, 0x15cd: 0x0008, 0x15cf: 0x0008, + 0x15d2: 0x0008, 0x15d4: 0x0008, 0x15d6: 0x0008, + 0x15dd: 0x0008, + 0x15e1: 0x0008, + 0x15e8: 0x0008, + 0x15f3: 0x0008, 0x15f4: 0x0008, // Block 0x58, offset 0x1600 - 0x1615: 0x0008, 0x1616: 0x0008, 0x1617: 0x0008, - 0x1621: 0x0008, - 0x1630: 0x0008, - 0x163f: 0x0008, + 0x1604: 0x0008, + 0x1607: 0x0008, + 0x160c: 0x0008, 0x160e: 0x0008, + 0x1613: 0x0008, 0x1614: 0x0008, 0x1615: 0x0008, 0x1617: 0x0008, + 0x1623: 0x0008, + 0x1624: 0x0008, // Block 0x59, offset 0x1640 - 0x1674: 0x0008, 0x1675: 0x0008, + 0x1655: 0x0008, 0x1656: 0x0008, 0x1657: 0x0008, + 0x1661: 0x0008, + 0x1670: 0x0008, + 0x167f: 0x0008, // Block 0x5a, offset 0x1680 - 0x1685: 0x0008, - 0x1686: 0x0008, 0x1687: 0x0008, - 0x169b: 0x0008, 0x169c: 0x0008, + 0x16b4: 0x0008, 0x16b5: 0x0008, // Block 0x5b, offset 0x16c0 - 0x16d0: 0x0008, - 0x16d5: 0x0008, + 0x16c5: 0x0008, + 0x16c6: 0x0008, 0x16c7: 0x0008, + 0x16db: 0x0008, 0x16dc: 0x0008, // Block 0x5c, offset 0x1700 - 0x172f: 0x0004, - 0x1730: 0x0004, 0x1731: 0x0004, + 0x1710: 0x0008, + 0x1715: 0x0008, // Block 0x5d, offset 0x1740 - 0x177f: 0x0004, + 0x176f: 0x0024, + 0x1770: 0x0024, 0x1771: 0x0024, // Block 0x5e, offset 0x1780 - 0x17a0: 0x0004, 0x17a1: 0x0004, 0x17a2: 0x0004, 0x17a3: 0x0004, - 0x17a4: 0x0004, 0x17a5: 0x0004, 0x17a6: 0x0004, 0x17a7: 0x0004, 0x17a8: 0x0004, 0x17a9: 0x0004, - 0x17aa: 0x0004, 0x17ab: 0x0004, 0x17ac: 0x0004, 0x17ad: 0x0004, 0x17ae: 0x0004, 0x17af: 0x0004, - 0x17b0: 0x0004, 0x17b1: 0x0004, 0x17b2: 0x0004, 0x17b3: 0x0004, 0x17b4: 0x0004, 0x17b5: 0x0004, - 0x17b6: 0x0004, 0x17b7: 0x0004, 0x17b8: 0x0004, 0x17b9: 0x0004, 0x17ba: 0x0004, 0x17bb: 0x0004, - 0x17bc: 0x0004, 0x17bd: 0x0004, 0x17be: 0x0004, 0x17bf: 0x0004, + 0x17bf: 0x0024, // Block 0x5f, offset 0x17c0 - 0x17ea: 0x0004, 0x17eb: 0x0004, 0x17ec: 0x0004, 0x17ed: 0x0004, 0x17ee: 0x0004, 0x17ef: 0x0004, - 0x17f0: 0x0008, - 0x17fd: 0x0008, + 0x17e0: 0x0024, 0x17e1: 0x0024, 0x17e2: 0x0024, 0x17e3: 0x0024, + 0x17e4: 0x0024, 0x17e5: 0x0024, 0x17e6: 0x0024, 0x17e7: 0x0024, 0x17e8: 0x0024, 0x17e9: 0x0024, + 0x17ea: 0x0024, 0x17eb: 0x0024, 0x17ec: 0x0024, 0x17ed: 0x0024, 0x17ee: 0x0024, 0x17ef: 0x0024, + 0x17f0: 0x0024, 0x17f1: 0x0024, 0x17f2: 0x0024, 0x17f3: 0x0024, 0x17f4: 0x0024, 0x17f5: 0x0024, + 0x17f6: 0x0024, 0x17f7: 0x0024, 0x17f8: 0x0024, 0x17f9: 0x0024, 0x17fa: 0x0024, 0x17fb: 0x0024, + 0x17fc: 0x0024, 0x17fd: 0x0024, 0x17fe: 0x0024, 0x17ff: 0x0024, // Block 0x60, offset 0x1800 - 0x1819: 0x0004, 0x181a: 0x0004, + 0x182a: 0x0024, 0x182b: 0x0024, 0x182c: 0x0024, 0x182d: 0x0024, 0x182e: 0x0024, 0x182f: 0x0024, + 0x1830: 0x0008, + 0x183d: 0x0008, // Block 0x61, offset 0x1840 - 0x1857: 0x0008, - 0x1859: 0x0008, + 0x1859: 0x0024, 0x185a: 0x0024, // Block 0x62, offset 0x1880 - 0x18af: 0x0004, - 0x18b0: 0x0004, 0x18b1: 0x0004, 0x18b2: 0x0004, 0x18b4: 0x0004, 0x18b5: 0x0004, - 0x18b6: 0x0004, 0x18b7: 0x0004, 0x18b8: 0x0004, 0x18b9: 0x0004, 0x18ba: 0x0004, 0x18bb: 0x0004, - 0x18bc: 0x0004, 0x18bd: 0x0004, + 0x1897: 0x0008, + 0x1899: 0x0008, // Block 0x63, offset 0x18c0 - 0x18de: 0x0004, 0x18df: 0x0004, + 0x18ef: 0x0024, + 0x18f0: 0x0024, 0x18f1: 0x0024, 0x18f2: 0x0024, 0x18f4: 0x0024, 0x18f5: 0x0024, + 0x18f6: 0x0024, 0x18f7: 0x0024, 0x18f8: 0x0024, 0x18f9: 0x0024, 0x18fa: 0x0024, 0x18fb: 0x0024, + 0x18fc: 0x0024, 0x18fd: 0x0024, // Block 0x64, offset 0x1900 - 0x1930: 0x0004, 0x1931: 0x0004, + 0x191e: 0x0024, 0x191f: 0x0024, // Block 0x65, offset 0x1940 - 0x1942: 0x0004, - 0x1946: 0x0004, 0x194b: 0x0004, - 0x1963: 0x0400, - 0x1964: 0x0400, 0x1965: 0x0004, 0x1966: 0x0004, 0x1967: 0x0400, - 0x196c: 0x0004, + 0x1970: 0x0024, 0x1971: 0x0024, // Block 0x66, offset 0x1980 - 0x1980: 0x0400, 0x1981: 0x0400, - 0x19b4: 0x0400, 0x19b5: 0x0400, - 0x19b6: 0x0400, 0x19b7: 0x0400, 0x19b8: 0x0400, 0x19b9: 0x0400, 0x19ba: 0x0400, 0x19bb: 0x0400, - 0x19bc: 0x0400, 0x19bd: 0x0400, 0x19be: 0x0400, 0x19bf: 0x0400, + 0x1982: 0x0024, + 0x1986: 0x0024, 0x198b: 0x0024, + 0x19a3: 0x2000, + 0x19a4: 0x2000, 0x19a5: 0x0024, 0x19a6: 0x0024, 0x19a7: 0x2000, + 0x19ac: 0x0024, // Block 0x67, offset 0x19c0 - 0x19c0: 0x0400, 0x19c1: 0x0400, 0x19c2: 0x0400, 0x19c3: 0x0400, 0x19c4: 0x0004, 0x19c5: 0x0004, - 0x19e0: 0x0004, 0x19e1: 0x0004, 0x19e2: 0x0004, 0x19e3: 0x0004, - 0x19e4: 0x0004, 0x19e5: 0x0004, 0x19e6: 0x0004, 0x19e7: 0x0004, 0x19e8: 0x0004, 0x19e9: 0x0004, - 0x19ea: 0x0004, 0x19eb: 0x0004, 0x19ec: 0x0004, 0x19ed: 0x0004, 0x19ee: 0x0004, 0x19ef: 0x0004, - 0x19f0: 0x0004, 0x19f1: 0x0004, - 0x19ff: 0x0004, + 0x19c0: 0x2000, 0x19c1: 0x2000, + 0x19f4: 0x2000, 0x19f5: 0x2000, + 0x19f6: 0x2000, 0x19f7: 0x2000, 0x19f8: 0x2000, 0x19f9: 0x2000, 0x19fa: 0x2000, 0x19fb: 0x2000, + 0x19fc: 0x2000, 0x19fd: 0x2000, 0x19fe: 0x2000, 0x19ff: 0x2000, // Block 0x68, offset 0x1a00 - 0x1a26: 0x0004, 0x1a27: 0x0004, 0x1a28: 0x0004, 0x1a29: 0x0004, - 0x1a2a: 0x0004, 0x1a2b: 0x0004, 0x1a2c: 0x0004, 0x1a2d: 0x0004, + 0x1a00: 0x2000, 0x1a01: 0x2000, 0x1a02: 0x2000, 0x1a03: 0x2000, 0x1a04: 0x0024, 0x1a05: 0x0024, + 0x1a20: 0x0024, 0x1a21: 0x0024, 0x1a22: 0x0024, 0x1a23: 0x0024, + 0x1a24: 0x0024, 0x1a25: 0x0024, 0x1a26: 0x0024, 0x1a27: 0x0024, 0x1a28: 0x0024, 0x1a29: 0x0024, + 0x1a2a: 0x0024, 0x1a2b: 0x0024, 0x1a2c: 0x0024, 0x1a2d: 0x0024, 0x1a2e: 0x0024, 0x1a2f: 0x0024, + 0x1a30: 0x0024, 0x1a31: 0x0024, + 0x1a3f: 0x0024, // Block 0x69, offset 0x1a40 - 0x1a47: 0x0004, 0x1a48: 0x0004, 0x1a49: 0x0004, 0x1a4a: 0x0004, 0x1a4b: 0x0004, - 0x1a4c: 0x0004, 0x1a4d: 0x0004, 0x1a4e: 0x0004, 0x1a4f: 0x0004, 0x1a50: 0x0004, 0x1a51: 0x0004, - 0x1a52: 0x0400, 0x1a53: 0x0400, - 0x1a60: 0x0010, 0x1a61: 0x0010, 0x1a62: 0x0010, 0x1a63: 0x0010, - 0x1a64: 0x0010, 0x1a65: 0x0010, 0x1a66: 0x0010, 0x1a67: 0x0010, 0x1a68: 0x0010, 0x1a69: 0x0010, - 0x1a6a: 0x0010, 0x1a6b: 0x0010, 0x1a6c: 0x0010, 0x1a6d: 0x0010, 0x1a6e: 0x0010, 0x1a6f: 0x0010, - 0x1a70: 0x0010, 0x1a71: 0x0010, 0x1a72: 0x0010, 0x1a73: 0x0010, 0x1a74: 0x0010, 0x1a75: 0x0010, - 0x1a76: 0x0010, 0x1a77: 0x0010, 0x1a78: 0x0010, 0x1a79: 0x0010, 0x1a7a: 0x0010, 0x1a7b: 0x0010, - 0x1a7c: 0x0010, + 0x1a66: 0x0024, 0x1a67: 0x0024, 0x1a68: 0x0024, 0x1a69: 0x0024, + 0x1a6a: 0x0024, 0x1a6b: 0x0024, 0x1a6c: 0x0024, 0x1a6d: 0x0024, // Block 0x6a, offset 0x1a80 - 0x1a80: 0x0004, 0x1a81: 0x0004, 0x1a82: 0x0004, 0x1a83: 0x0400, - 0x1ab3: 0x0004, 0x1ab4: 0x0400, 0x1ab5: 0x0400, - 0x1ab6: 0x0004, 0x1ab7: 0x0004, 0x1ab8: 0x0004, 0x1ab9: 0x0004, 0x1aba: 0x0400, 0x1abb: 0x0400, - 0x1abc: 0x0004, 0x1abd: 0x0004, 0x1abe: 0x0400, 0x1abf: 0x0400, + 0x1a87: 0x0024, 0x1a88: 0x0024, 0x1a89: 0x0024, 0x1a8a: 0x0024, 0x1a8b: 0x0024, + 0x1a8c: 0x0024, 0x1a8d: 0x0024, 0x1a8e: 0x0024, 0x1a8f: 0x0024, 0x1a90: 0x0024, 0x1a91: 0x0024, + 0x1a92: 0x2000, 0x1a93: 0x0024, + 0x1aa0: 0x0080, 0x1aa1: 0x0080, 0x1aa2: 0x0080, 0x1aa3: 0x0080, + 0x1aa4: 0x0080, 0x1aa5: 0x0080, 0x1aa6: 0x0080, 0x1aa7: 0x0080, 0x1aa8: 0x0080, 0x1aa9: 0x0080, + 0x1aaa: 0x0080, 0x1aab: 0x0080, 0x1aac: 0x0080, 0x1aad: 0x0080, 0x1aae: 0x0080, 0x1aaf: 0x0080, + 0x1ab0: 0x0080, 0x1ab1: 0x0080, 0x1ab2: 0x0080, 0x1ab3: 0x0080, 0x1ab4: 0x0080, 0x1ab5: 0x0080, + 0x1ab6: 0x0080, 0x1ab7: 0x0080, 0x1ab8: 0x0080, 0x1ab9: 0x0080, 0x1aba: 0x0080, 0x1abb: 0x0080, + 0x1abc: 0x0080, // Block 0x6b, offset 0x1ac0 - 0x1ac0: 0x0400, - 0x1ae5: 0x0004, + 0x1ac0: 0x0024, 0x1ac1: 0x0024, 0x1ac2: 0x0024, 0x1ac3: 0x2000, + 0x1ac9: 0x0010, 0x1aca: 0x0010, 0x1acb: 0x0010, + 0x1acf: 0x0010, 0x1ad0: 0x0010, 0x1ad1: 0x0010, + 0x1ad2: 0x0010, 0x1ad3: 0x0010, 0x1ad4: 0x0010, 0x1ad5: 0x0010, 0x1ad6: 0x0010, 0x1ad7: 0x0010, + 0x1ad8: 0x0010, 0x1ad9: 0x0010, 0x1ada: 0x0010, 0x1adb: 0x0010, 0x1adc: 0x0010, 0x1add: 0x0010, + 0x1ade: 0x0010, 0x1adf: 0x0010, 0x1ae0: 0x0010, 0x1ae1: 0x0010, 0x1ae2: 0x0010, 0x1ae3: 0x0010, + 0x1ae4: 0x0010, 0x1ae5: 0x0010, 0x1ae6: 0x0010, 0x1ae7: 0x0010, 0x1ae8: 0x0010, 0x1ae9: 0x0010, + 0x1aea: 0x0010, 0x1aeb: 0x0010, 0x1aec: 0x0010, 0x1aed: 0x0010, 0x1aee: 0x0010, 0x1aef: 0x0010, + 0x1af0: 0x0010, 0x1af1: 0x0010, 0x1af2: 0x0010, 0x1af3: 0x0024, 0x1af4: 0x2000, 0x1af5: 0x2000, + 0x1af6: 0x0024, 0x1af7: 0x0024, 0x1af8: 0x0024, 0x1af9: 0x0024, 0x1afa: 0x2000, 0x1afb: 0x2000, + 0x1afc: 0x0024, 0x1afd: 0x0024, 0x1afe: 0x2000, 0x1aff: 0x2000, // Block 0x6c, offset 0x1b00 - 0x1b29: 0x0004, - 0x1b2a: 0x0004, 0x1b2b: 0x0004, 0x1b2c: 0x0004, 0x1b2d: 0x0004, 0x1b2e: 0x0004, 0x1b2f: 0x0400, - 0x1b30: 0x0400, 0x1b31: 0x0004, 0x1b32: 0x0004, 0x1b33: 0x0400, 0x1b34: 0x0400, 0x1b35: 0x0004, - 0x1b36: 0x0004, + 0x1b00: 0x0044, + 0x1b20: 0x0010, 0x1b21: 0x0010, 0x1b22: 0x0010, 0x1b23: 0x0010, + 0x1b24: 0x0010, 0x1b25: 0x0024, 0x1b27: 0x0010, 0x1b28: 0x0010, 0x1b29: 0x0010, + 0x1b2a: 0x0010, 0x1b2b: 0x0010, 0x1b2c: 0x0010, 0x1b2d: 0x0010, 0x1b2e: 0x0010, 0x1b2f: 0x0010, + 0x1b3a: 0x0010, 0x1b3b: 0x0010, + 0x1b3c: 0x0010, 0x1b3d: 0x0010, 0x1b3e: 0x0010, // Block 0x6d, offset 0x1b40 - 0x1b43: 0x0004, - 0x1b4c: 0x0004, 0x1b4d: 0x0400, - 0x1b7c: 0x0004, + 0x1b69: 0x0024, + 0x1b6a: 0x0024, 0x1b6b: 0x0024, 0x1b6c: 0x0024, 0x1b6d: 0x0024, 0x1b6e: 0x0024, 0x1b6f: 0x2000, + 0x1b70: 0x2000, 0x1b71: 0x0024, 0x1b72: 0x0024, 0x1b73: 0x2000, 0x1b74: 0x2000, 0x1b75: 0x0024, + 0x1b76: 0x0024, // Block 0x6e, offset 0x1b80 - 0x1bb0: 0x0004, 0x1bb2: 0x0004, 0x1bb3: 0x0004, 0x1bb4: 0x0004, - 0x1bb7: 0x0004, 0x1bb8: 0x0004, - 0x1bbe: 0x0004, 0x1bbf: 0x0004, + 0x1b83: 0x0024, + 0x1b8c: 0x0024, 0x1b8d: 0x2000, + 0x1ba0: 0x0010, 0x1ba1: 0x0010, 0x1ba2: 0x0010, 0x1ba3: 0x0010, + 0x1ba4: 0x0010, 0x1ba5: 0x0010, 0x1ba6: 0x0010, 0x1ba7: 0x0010, 0x1ba8: 0x0010, 0x1ba9: 0x0010, + 0x1baa: 0x0010, 0x1bab: 0x0010, 0x1bac: 0x0010, 0x1bad: 0x0010, 0x1bae: 0x0010, 0x1baf: 0x0010, + 0x1bb1: 0x0010, 0x1bb2: 0x0010, 0x1bb3: 0x0010, + 0x1bba: 0x0010, + 0x1bbc: 0x0024, 0x1bbe: 0x0010, 0x1bbf: 0x0010, // Block 0x6f, offset 0x1bc0 - 0x1bc1: 0x0004, - 0x1beb: 0x0400, 0x1bec: 0x0004, 0x1bed: 0x0004, 0x1bee: 0x0400, 0x1bef: 0x0400, - 0x1bf5: 0x0400, - 0x1bf6: 0x0004, + 0x1bf0: 0x0024, 0x1bf2: 0x0024, 0x1bf3: 0x0024, 0x1bf4: 0x0024, + 0x1bf7: 0x0024, 0x1bf8: 0x0024, + 0x1bfe: 0x0024, 0x1bff: 0x0024, // Block 0x70, offset 0x1c00 - 0x1c23: 0x0400, - 0x1c24: 0x0400, 0x1c25: 0x0004, 0x1c26: 0x0400, 0x1c27: 0x0400, 0x1c28: 0x0004, 0x1c29: 0x0400, - 0x1c2a: 0x0400, 0x1c2c: 0x0400, 0x1c2d: 0x0004, + 0x1c01: 0x0024, + 0x1c20: 0x0010, 0x1c21: 0x0010, 0x1c22: 0x0010, 0x1c23: 0x0010, + 0x1c24: 0x0010, 0x1c25: 0x0010, 0x1c26: 0x0010, 0x1c27: 0x0010, 0x1c28: 0x0010, 0x1c29: 0x0010, + 0x1c2a: 0x0010, 0x1c2b: 0x2000, 0x1c2c: 0x0024, 0x1c2d: 0x0024, 0x1c2e: 0x2000, 0x1c2f: 0x2000, + 0x1c35: 0x2000, + 0x1c36: 0x0044, // Block 0x71, offset 0x1c40 - 0x1c40: 0x0040, 0x1c41: 0x0080, 0x1c42: 0x0080, 0x1c43: 0x0080, 0x1c44: 0x0080, 0x1c45: 0x0080, - 0x1c46: 0x0080, 0x1c47: 0x0080, 0x1c48: 0x0080, 0x1c49: 0x0080, 0x1c4a: 0x0080, 0x1c4b: 0x0080, - 0x1c4c: 0x0080, 0x1c4d: 0x0080, 0x1c4e: 0x0080, 0x1c4f: 0x0080, 0x1c50: 0x0080, 0x1c51: 0x0080, - 0x1c52: 0x0080, 0x1c53: 0x0080, 0x1c54: 0x0080, 0x1c55: 0x0080, 0x1c56: 0x0080, 0x1c57: 0x0080, - 0x1c58: 0x0080, 0x1c59: 0x0080, 0x1c5a: 0x0080, 0x1c5b: 0x0080, 0x1c5c: 0x0040, 0x1c5d: 0x0080, - 0x1c5e: 0x0080, 0x1c5f: 0x0080, 0x1c60: 0x0080, 0x1c61: 0x0080, 0x1c62: 0x0080, 0x1c63: 0x0080, - 0x1c64: 0x0080, 0x1c65: 0x0080, 0x1c66: 0x0080, 0x1c67: 0x0080, 0x1c68: 0x0080, 0x1c69: 0x0080, - 0x1c6a: 0x0080, 0x1c6b: 0x0080, 0x1c6c: 0x0080, 0x1c6d: 0x0080, 0x1c6e: 0x0080, 0x1c6f: 0x0080, - 0x1c70: 0x0080, 0x1c71: 0x0080, 0x1c72: 0x0080, 0x1c73: 0x0080, 0x1c74: 0x0080, 0x1c75: 0x0080, - 0x1c76: 0x0080, 0x1c77: 0x0080, 0x1c78: 0x0040, 0x1c79: 0x0080, 0x1c7a: 0x0080, 0x1c7b: 0x0080, - 0x1c7c: 0x0080, 0x1c7d: 0x0080, 0x1c7e: 0x0080, 0x1c7f: 0x0080, + 0x1c40: 0x0010, 0x1c41: 0x0010, 0x1c42: 0x0010, 0x1c43: 0x0010, 0x1c44: 0x0010, 0x1c45: 0x0010, + 0x1c46: 0x0010, 0x1c47: 0x0010, 0x1c48: 0x0010, 0x1c49: 0x0010, 0x1c4a: 0x0010, 0x1c4b: 0x0010, + 0x1c4c: 0x0010, 0x1c4d: 0x0010, 0x1c4e: 0x0010, 0x1c4f: 0x0010, 0x1c50: 0x0010, 0x1c51: 0x0010, + 0x1c52: 0x0010, 0x1c53: 0x0010, 0x1c54: 0x0010, 0x1c55: 0x0010, 0x1c56: 0x0010, 0x1c57: 0x0010, + 0x1c58: 0x0010, 0x1c59: 0x0010, 0x1c5a: 0x0010, + 0x1c63: 0x2000, + 0x1c64: 0x2000, 0x1c65: 0x0024, 0x1c66: 0x2000, 0x1c67: 0x2000, 0x1c68: 0x0024, 0x1c69: 0x2000, + 0x1c6a: 0x2000, 0x1c6c: 0x2000, 0x1c6d: 0x0024, // Block 0x72, offset 0x1c80 - 0x1c80: 0x0080, 0x1c81: 0x0080, 0x1c82: 0x0080, 0x1c83: 0x0080, 0x1c84: 0x0080, 0x1c85: 0x0080, - 0x1c86: 0x0080, 0x1c87: 0x0080, 0x1c88: 0x0080, 0x1c89: 0x0080, 0x1c8a: 0x0080, 0x1c8b: 0x0080, - 0x1c8c: 0x0080, 0x1c8d: 0x0080, 0x1c8e: 0x0080, 0x1c8f: 0x0080, 0x1c90: 0x0080, 0x1c91: 0x0080, - 0x1c92: 0x0080, 0x1c93: 0x0080, 0x1c94: 0x0040, 0x1c95: 0x0080, 0x1c96: 0x0080, 0x1c97: 0x0080, - 0x1c98: 0x0080, 0x1c99: 0x0080, 0x1c9a: 0x0080, 0x1c9b: 0x0080, 0x1c9c: 0x0080, 0x1c9d: 0x0080, - 0x1c9e: 0x0080, 0x1c9f: 0x0080, 0x1ca0: 0x0080, 0x1ca1: 0x0080, 0x1ca2: 0x0080, 0x1ca3: 0x0080, - 0x1ca4: 0x0080, 0x1ca5: 0x0080, 0x1ca6: 0x0080, 0x1ca7: 0x0080, 0x1ca8: 0x0080, 0x1ca9: 0x0080, - 0x1caa: 0x0080, 0x1cab: 0x0080, 0x1cac: 0x0080, 0x1cad: 0x0080, 0x1cae: 0x0080, 0x1caf: 0x0080, - 0x1cb0: 0x0040, 0x1cb1: 0x0080, 0x1cb2: 0x0080, 0x1cb3: 0x0080, 0x1cb4: 0x0080, 0x1cb5: 0x0080, - 0x1cb6: 0x0080, 0x1cb7: 0x0080, 0x1cb8: 0x0080, 0x1cb9: 0x0080, 0x1cba: 0x0080, 0x1cbb: 0x0080, - 0x1cbc: 0x0080, 0x1cbd: 0x0080, 0x1cbe: 0x0080, 0x1cbf: 0x0080, + 0x1c80: 0x0200, 0x1c81: 0x0400, 0x1c82: 0x0400, 0x1c83: 0x0400, 0x1c84: 0x0400, 0x1c85: 0x0400, + 0x1c86: 0x0400, 0x1c87: 0x0400, 0x1c88: 0x0400, 0x1c89: 0x0400, 0x1c8a: 0x0400, 0x1c8b: 0x0400, + 0x1c8c: 0x0400, 0x1c8d: 0x0400, 0x1c8e: 0x0400, 0x1c8f: 0x0400, 0x1c90: 0x0400, 0x1c91: 0x0400, + 0x1c92: 0x0400, 0x1c93: 0x0400, 0x1c94: 0x0400, 0x1c95: 0x0400, 0x1c96: 0x0400, 0x1c97: 0x0400, + 0x1c98: 0x0400, 0x1c99: 0x0400, 0x1c9a: 0x0400, 0x1c9b: 0x0400, 0x1c9c: 0x0200, 0x1c9d: 0x0400, + 0x1c9e: 0x0400, 0x1c9f: 0x0400, 0x1ca0: 0x0400, 0x1ca1: 0x0400, 0x1ca2: 0x0400, 0x1ca3: 0x0400, + 0x1ca4: 0x0400, 0x1ca5: 0x0400, 0x1ca6: 0x0400, 0x1ca7: 0x0400, 0x1ca8: 0x0400, 0x1ca9: 0x0400, + 0x1caa: 0x0400, 0x1cab: 0x0400, 0x1cac: 0x0400, 0x1cad: 0x0400, 0x1cae: 0x0400, 0x1caf: 0x0400, + 0x1cb0: 0x0400, 0x1cb1: 0x0400, 0x1cb2: 0x0400, 0x1cb3: 0x0400, 0x1cb4: 0x0400, 0x1cb5: 0x0400, + 0x1cb6: 0x0400, 0x1cb7: 0x0400, 0x1cb8: 0x0200, 0x1cb9: 0x0400, 0x1cba: 0x0400, 0x1cbb: 0x0400, + 0x1cbc: 0x0400, 0x1cbd: 0x0400, 0x1cbe: 0x0400, 0x1cbf: 0x0400, // Block 0x73, offset 0x1cc0 - 0x1cc0: 0x0080, 0x1cc1: 0x0080, 0x1cc2: 0x0080, 0x1cc3: 0x0080, 0x1cc4: 0x0080, 0x1cc5: 0x0080, - 0x1cc6: 0x0080, 0x1cc7: 0x0080, 0x1cc8: 0x0080, 0x1cc9: 0x0080, 0x1cca: 0x0080, 0x1ccb: 0x0080, - 0x1ccc: 0x0040, 0x1ccd: 0x0080, 0x1cce: 0x0080, 0x1ccf: 0x0080, 0x1cd0: 0x0080, 0x1cd1: 0x0080, - 0x1cd2: 0x0080, 0x1cd3: 0x0080, 0x1cd4: 0x0080, 0x1cd5: 0x0080, 0x1cd6: 0x0080, 0x1cd7: 0x0080, - 0x1cd8: 0x0080, 0x1cd9: 0x0080, 0x1cda: 0x0080, 0x1cdb: 0x0080, 0x1cdc: 0x0080, 0x1cdd: 0x0080, - 0x1cde: 0x0080, 0x1cdf: 0x0080, 0x1ce0: 0x0080, 0x1ce1: 0x0080, 0x1ce2: 0x0080, 0x1ce3: 0x0080, - 0x1ce4: 0x0080, 0x1ce5: 0x0080, 0x1ce6: 0x0080, 0x1ce7: 0x0080, 0x1ce8: 0x0040, 0x1ce9: 0x0080, - 0x1cea: 0x0080, 0x1ceb: 0x0080, 0x1cec: 0x0080, 0x1ced: 0x0080, 0x1cee: 0x0080, 0x1cef: 0x0080, - 0x1cf0: 0x0080, 0x1cf1: 0x0080, 0x1cf2: 0x0080, 0x1cf3: 0x0080, 0x1cf4: 0x0080, 0x1cf5: 0x0080, - 0x1cf6: 0x0080, 0x1cf7: 0x0080, 0x1cf8: 0x0080, 0x1cf9: 0x0080, 0x1cfa: 0x0080, 0x1cfb: 0x0080, - 0x1cfc: 0x0080, 0x1cfd: 0x0080, 0x1cfe: 0x0080, 0x1cff: 0x0080, + 0x1cc0: 0x0400, 0x1cc1: 0x0400, 0x1cc2: 0x0400, 0x1cc3: 0x0400, 0x1cc4: 0x0400, 0x1cc5: 0x0400, + 0x1cc6: 0x0400, 0x1cc7: 0x0400, 0x1cc8: 0x0400, 0x1cc9: 0x0400, 0x1cca: 0x0400, 0x1ccb: 0x0400, + 0x1ccc: 0x0400, 0x1ccd: 0x0400, 0x1cce: 0x0400, 0x1ccf: 0x0400, 0x1cd0: 0x0400, 0x1cd1: 0x0400, + 0x1cd2: 0x0400, 0x1cd3: 0x0400, 0x1cd4: 0x0200, 0x1cd5: 0x0400, 0x1cd6: 0x0400, 0x1cd7: 0x0400, + 0x1cd8: 0x0400, 0x1cd9: 0x0400, 0x1cda: 0x0400, 0x1cdb: 0x0400, 0x1cdc: 0x0400, 0x1cdd: 0x0400, + 0x1cde: 0x0400, 0x1cdf: 0x0400, 0x1ce0: 0x0400, 0x1ce1: 0x0400, 0x1ce2: 0x0400, 0x1ce3: 0x0400, + 0x1ce4: 0x0400, 0x1ce5: 0x0400, 0x1ce6: 0x0400, 0x1ce7: 0x0400, 0x1ce8: 0x0400, 0x1ce9: 0x0400, + 0x1cea: 0x0400, 0x1ceb: 0x0400, 0x1cec: 0x0400, 0x1ced: 0x0400, 0x1cee: 0x0400, 0x1cef: 0x0400, + 0x1cf0: 0x0200, 0x1cf1: 0x0400, 0x1cf2: 0x0400, 0x1cf3: 0x0400, 0x1cf4: 0x0400, 0x1cf5: 0x0400, + 0x1cf6: 0x0400, 0x1cf7: 0x0400, 0x1cf8: 0x0400, 0x1cf9: 0x0400, 0x1cfa: 0x0400, 0x1cfb: 0x0400, + 0x1cfc: 0x0400, 0x1cfd: 0x0400, 0x1cfe: 0x0400, 0x1cff: 0x0400, // Block 0x74, offset 0x1d00 - 0x1d00: 0x0080, 0x1d01: 0x0080, 0x1d02: 0x0080, 0x1d03: 0x0080, 0x1d04: 0x0040, 0x1d05: 0x0080, - 0x1d06: 0x0080, 0x1d07: 0x0080, 0x1d08: 0x0080, 0x1d09: 0x0080, 0x1d0a: 0x0080, 0x1d0b: 0x0080, - 0x1d0c: 0x0080, 0x1d0d: 0x0080, 0x1d0e: 0x0080, 0x1d0f: 0x0080, 0x1d10: 0x0080, 0x1d11: 0x0080, - 0x1d12: 0x0080, 0x1d13: 0x0080, 0x1d14: 0x0080, 0x1d15: 0x0080, 0x1d16: 0x0080, 0x1d17: 0x0080, - 0x1d18: 0x0080, 0x1d19: 0x0080, 0x1d1a: 0x0080, 0x1d1b: 0x0080, 0x1d1c: 0x0080, 0x1d1d: 0x0080, - 0x1d1e: 0x0080, 0x1d1f: 0x0080, 0x1d20: 0x0040, 0x1d21: 0x0080, 0x1d22: 0x0080, 0x1d23: 0x0080, - 0x1d24: 0x0080, 0x1d25: 0x0080, 0x1d26: 0x0080, 0x1d27: 0x0080, 0x1d28: 0x0080, 0x1d29: 0x0080, - 0x1d2a: 0x0080, 0x1d2b: 0x0080, 0x1d2c: 0x0080, 0x1d2d: 0x0080, 0x1d2e: 0x0080, 0x1d2f: 0x0080, - 0x1d30: 0x0080, 0x1d31: 0x0080, 0x1d32: 0x0080, 0x1d33: 0x0080, 0x1d34: 0x0080, 0x1d35: 0x0080, - 0x1d36: 0x0080, 0x1d37: 0x0080, 0x1d38: 0x0080, 0x1d39: 0x0080, 0x1d3a: 0x0080, 0x1d3b: 0x0080, - 0x1d3c: 0x0040, 0x1d3d: 0x0080, 0x1d3e: 0x0080, 0x1d3f: 0x0080, + 0x1d00: 0x0400, 0x1d01: 0x0400, 0x1d02: 0x0400, 0x1d03: 0x0400, 0x1d04: 0x0400, 0x1d05: 0x0400, + 0x1d06: 0x0400, 0x1d07: 0x0400, 0x1d08: 0x0400, 0x1d09: 0x0400, 0x1d0a: 0x0400, 0x1d0b: 0x0400, + 0x1d0c: 0x0200, 0x1d0d: 0x0400, 0x1d0e: 0x0400, 0x1d0f: 0x0400, 0x1d10: 0x0400, 0x1d11: 0x0400, + 0x1d12: 0x0400, 0x1d13: 0x0400, 0x1d14: 0x0400, 0x1d15: 0x0400, 0x1d16: 0x0400, 0x1d17: 0x0400, + 0x1d18: 0x0400, 0x1d19: 0x0400, 0x1d1a: 0x0400, 0x1d1b: 0x0400, 0x1d1c: 0x0400, 0x1d1d: 0x0400, + 0x1d1e: 0x0400, 0x1d1f: 0x0400, 0x1d20: 0x0400, 0x1d21: 0x0400, 0x1d22: 0x0400, 0x1d23: 0x0400, + 0x1d24: 0x0400, 0x1d25: 0x0400, 0x1d26: 0x0400, 0x1d27: 0x0400, 0x1d28: 0x0200, 0x1d29: 0x0400, + 0x1d2a: 0x0400, 0x1d2b: 0x0400, 0x1d2c: 0x0400, 0x1d2d: 0x0400, 0x1d2e: 0x0400, 0x1d2f: 0x0400, + 0x1d30: 0x0400, 0x1d31: 0x0400, 0x1d32: 0x0400, 0x1d33: 0x0400, 0x1d34: 0x0400, 0x1d35: 0x0400, + 0x1d36: 0x0400, 0x1d37: 0x0400, 0x1d38: 0x0400, 0x1d39: 0x0400, 0x1d3a: 0x0400, 0x1d3b: 0x0400, + 0x1d3c: 0x0400, 0x1d3d: 0x0400, 0x1d3e: 0x0400, 0x1d3f: 0x0400, // Block 0x75, offset 0x1d40 - 0x1d40: 0x0080, 0x1d41: 0x0080, 0x1d42: 0x0080, 0x1d43: 0x0080, 0x1d44: 0x0080, 0x1d45: 0x0080, - 0x1d46: 0x0080, 0x1d47: 0x0080, 0x1d48: 0x0080, 0x1d49: 0x0080, 0x1d4a: 0x0080, 0x1d4b: 0x0080, - 0x1d4c: 0x0080, 0x1d4d: 0x0080, 0x1d4e: 0x0080, 0x1d4f: 0x0080, 0x1d50: 0x0080, 0x1d51: 0x0080, - 0x1d52: 0x0080, 0x1d53: 0x0080, 0x1d54: 0x0080, 0x1d55: 0x0080, 0x1d56: 0x0080, 0x1d57: 0x0080, - 0x1d58: 0x0040, 0x1d59: 0x0080, 0x1d5a: 0x0080, 0x1d5b: 0x0080, 0x1d5c: 0x0080, 0x1d5d: 0x0080, - 0x1d5e: 0x0080, 0x1d5f: 0x0080, 0x1d60: 0x0080, 0x1d61: 0x0080, 0x1d62: 0x0080, 0x1d63: 0x0080, - 0x1d64: 0x0080, 0x1d65: 0x0080, 0x1d66: 0x0080, 0x1d67: 0x0080, 0x1d68: 0x0080, 0x1d69: 0x0080, - 0x1d6a: 0x0080, 0x1d6b: 0x0080, 0x1d6c: 0x0080, 0x1d6d: 0x0080, 0x1d6e: 0x0080, 0x1d6f: 0x0080, - 0x1d70: 0x0080, 0x1d71: 0x0080, 0x1d72: 0x0080, 0x1d73: 0x0080, 0x1d74: 0x0040, 0x1d75: 0x0080, - 0x1d76: 0x0080, 0x1d77: 0x0080, 0x1d78: 0x0080, 0x1d79: 0x0080, 0x1d7a: 0x0080, 0x1d7b: 0x0080, - 0x1d7c: 0x0080, 0x1d7d: 0x0080, 0x1d7e: 0x0080, 0x1d7f: 0x0080, + 0x1d40: 0x0400, 0x1d41: 0x0400, 0x1d42: 0x0400, 0x1d43: 0x0400, 0x1d44: 0x0200, 0x1d45: 0x0400, + 0x1d46: 0x0400, 0x1d47: 0x0400, 0x1d48: 0x0400, 0x1d49: 0x0400, 0x1d4a: 0x0400, 0x1d4b: 0x0400, + 0x1d4c: 0x0400, 0x1d4d: 0x0400, 0x1d4e: 0x0400, 0x1d4f: 0x0400, 0x1d50: 0x0400, 0x1d51: 0x0400, + 0x1d52: 0x0400, 0x1d53: 0x0400, 0x1d54: 0x0400, 0x1d55: 0x0400, 0x1d56: 0x0400, 0x1d57: 0x0400, + 0x1d58: 0x0400, 0x1d59: 0x0400, 0x1d5a: 0x0400, 0x1d5b: 0x0400, 0x1d5c: 0x0400, 0x1d5d: 0x0400, + 0x1d5e: 0x0400, 0x1d5f: 0x0400, 0x1d60: 0x0200, 0x1d61: 0x0400, 0x1d62: 0x0400, 0x1d63: 0x0400, + 0x1d64: 0x0400, 0x1d65: 0x0400, 0x1d66: 0x0400, 0x1d67: 0x0400, 0x1d68: 0x0400, 0x1d69: 0x0400, + 0x1d6a: 0x0400, 0x1d6b: 0x0400, 0x1d6c: 0x0400, 0x1d6d: 0x0400, 0x1d6e: 0x0400, 0x1d6f: 0x0400, + 0x1d70: 0x0400, 0x1d71: 0x0400, 0x1d72: 0x0400, 0x1d73: 0x0400, 0x1d74: 0x0400, 0x1d75: 0x0400, + 0x1d76: 0x0400, 0x1d77: 0x0400, 0x1d78: 0x0400, 0x1d79: 0x0400, 0x1d7a: 0x0400, 0x1d7b: 0x0400, + 0x1d7c: 0x0200, 0x1d7d: 0x0400, 0x1d7e: 0x0400, 0x1d7f: 0x0400, // Block 0x76, offset 0x1d80 - 0x1d80: 0x0080, 0x1d81: 0x0080, 0x1d82: 0x0080, 0x1d83: 0x0080, 0x1d84: 0x0080, 0x1d85: 0x0080, - 0x1d86: 0x0080, 0x1d87: 0x0080, 0x1d88: 0x0080, 0x1d89: 0x0080, 0x1d8a: 0x0080, 0x1d8b: 0x0080, - 0x1d8c: 0x0080, 0x1d8d: 0x0080, 0x1d8e: 0x0080, 0x1d8f: 0x0080, 0x1d90: 0x0040, 0x1d91: 0x0080, - 0x1d92: 0x0080, 0x1d93: 0x0080, 0x1d94: 0x0080, 0x1d95: 0x0080, 0x1d96: 0x0080, 0x1d97: 0x0080, - 0x1d98: 0x0080, 0x1d99: 0x0080, 0x1d9a: 0x0080, 0x1d9b: 0x0080, 0x1d9c: 0x0080, 0x1d9d: 0x0080, - 0x1d9e: 0x0080, 0x1d9f: 0x0080, 0x1da0: 0x0080, 0x1da1: 0x0080, 0x1da2: 0x0080, 0x1da3: 0x0080, - 0x1da4: 0x0080, 0x1da5: 0x0080, 0x1da6: 0x0080, 0x1da7: 0x0080, 0x1da8: 0x0080, 0x1da9: 0x0080, - 0x1daa: 0x0080, 0x1dab: 0x0080, 0x1dac: 0x0040, 0x1dad: 0x0080, 0x1dae: 0x0080, 0x1daf: 0x0080, - 0x1db0: 0x0080, 0x1db1: 0x0080, 0x1db2: 0x0080, 0x1db3: 0x0080, 0x1db4: 0x0080, 0x1db5: 0x0080, - 0x1db6: 0x0080, 0x1db7: 0x0080, 0x1db8: 0x0080, 0x1db9: 0x0080, 0x1dba: 0x0080, 0x1dbb: 0x0080, - 0x1dbc: 0x0080, 0x1dbd: 0x0080, 0x1dbe: 0x0080, 0x1dbf: 0x0080, + 0x1d80: 0x0400, 0x1d81: 0x0400, 0x1d82: 0x0400, 0x1d83: 0x0400, 0x1d84: 0x0400, 0x1d85: 0x0400, + 0x1d86: 0x0400, 0x1d87: 0x0400, 0x1d88: 0x0400, 0x1d89: 0x0400, 0x1d8a: 0x0400, 0x1d8b: 0x0400, + 0x1d8c: 0x0400, 0x1d8d: 0x0400, 0x1d8e: 0x0400, 0x1d8f: 0x0400, 0x1d90: 0x0400, 0x1d91: 0x0400, + 0x1d92: 0x0400, 0x1d93: 0x0400, 0x1d94: 0x0400, 0x1d95: 0x0400, 0x1d96: 0x0400, 0x1d97: 0x0400, + 0x1d98: 0x0200, 0x1d99: 0x0400, 0x1d9a: 0x0400, 0x1d9b: 0x0400, 0x1d9c: 0x0400, 0x1d9d: 0x0400, + 0x1d9e: 0x0400, 0x1d9f: 0x0400, 0x1da0: 0x0400, 0x1da1: 0x0400, 0x1da2: 0x0400, 0x1da3: 0x0400, + 0x1da4: 0x0400, 0x1da5: 0x0400, 0x1da6: 0x0400, 0x1da7: 0x0400, 0x1da8: 0x0400, 0x1da9: 0x0400, + 0x1daa: 0x0400, 0x1dab: 0x0400, 0x1dac: 0x0400, 0x1dad: 0x0400, 0x1dae: 0x0400, 0x1daf: 0x0400, + 0x1db0: 0x0400, 0x1db1: 0x0400, 0x1db2: 0x0400, 0x1db3: 0x0400, 0x1db4: 0x0200, 0x1db5: 0x0400, + 0x1db6: 0x0400, 0x1db7: 0x0400, 0x1db8: 0x0400, 0x1db9: 0x0400, 0x1dba: 0x0400, 0x1dbb: 0x0400, + 0x1dbc: 0x0400, 0x1dbd: 0x0400, 0x1dbe: 0x0400, 0x1dbf: 0x0400, // Block 0x77, offset 0x1dc0 - 0x1dc0: 0x0080, 0x1dc1: 0x0080, 0x1dc2: 0x0080, 0x1dc3: 0x0080, 0x1dc4: 0x0080, 0x1dc5: 0x0080, - 0x1dc6: 0x0080, 0x1dc7: 0x0080, 0x1dc8: 0x0040, 0x1dc9: 0x0080, 0x1dca: 0x0080, 0x1dcb: 0x0080, - 0x1dcc: 0x0080, 0x1dcd: 0x0080, 0x1dce: 0x0080, 0x1dcf: 0x0080, 0x1dd0: 0x0080, 0x1dd1: 0x0080, - 0x1dd2: 0x0080, 0x1dd3: 0x0080, 0x1dd4: 0x0080, 0x1dd5: 0x0080, 0x1dd6: 0x0080, 0x1dd7: 0x0080, - 0x1dd8: 0x0080, 0x1dd9: 0x0080, 0x1dda: 0x0080, 0x1ddb: 0x0080, 0x1ddc: 0x0080, 0x1ddd: 0x0080, - 0x1dde: 0x0080, 0x1ddf: 0x0080, 0x1de0: 0x0080, 0x1de1: 0x0080, 0x1de2: 0x0080, 0x1de3: 0x0080, - 0x1de4: 0x0040, 0x1de5: 0x0080, 0x1de6: 0x0080, 0x1de7: 0x0080, 0x1de8: 0x0080, 0x1de9: 0x0080, - 0x1dea: 0x0080, 0x1deb: 0x0080, 0x1dec: 0x0080, 0x1ded: 0x0080, 0x1dee: 0x0080, 0x1def: 0x0080, - 0x1df0: 0x0080, 0x1df1: 0x0080, 0x1df2: 0x0080, 0x1df3: 0x0080, 0x1df4: 0x0080, 0x1df5: 0x0080, - 0x1df6: 0x0080, 0x1df7: 0x0080, 0x1df8: 0x0080, 0x1df9: 0x0080, 0x1dfa: 0x0080, 0x1dfb: 0x0080, - 0x1dfc: 0x0080, 0x1dfd: 0x0080, 0x1dfe: 0x0080, 0x1dff: 0x0080, + 0x1dc0: 0x0400, 0x1dc1: 0x0400, 0x1dc2: 0x0400, 0x1dc3: 0x0400, 0x1dc4: 0x0400, 0x1dc5: 0x0400, + 0x1dc6: 0x0400, 0x1dc7: 0x0400, 0x1dc8: 0x0400, 0x1dc9: 0x0400, 0x1dca: 0x0400, 0x1dcb: 0x0400, + 0x1dcc: 0x0400, 0x1dcd: 0x0400, 0x1dce: 0x0400, 0x1dcf: 0x0400, 0x1dd0: 0x0200, 0x1dd1: 0x0400, + 0x1dd2: 0x0400, 0x1dd3: 0x0400, 0x1dd4: 0x0400, 0x1dd5: 0x0400, 0x1dd6: 0x0400, 0x1dd7: 0x0400, + 0x1dd8: 0x0400, 0x1dd9: 0x0400, 0x1dda: 0x0400, 0x1ddb: 0x0400, 0x1ddc: 0x0400, 0x1ddd: 0x0400, + 0x1dde: 0x0400, 0x1ddf: 0x0400, 0x1de0: 0x0400, 0x1de1: 0x0400, 0x1de2: 0x0400, 0x1de3: 0x0400, + 0x1de4: 0x0400, 0x1de5: 0x0400, 0x1de6: 0x0400, 0x1de7: 0x0400, 0x1de8: 0x0400, 0x1de9: 0x0400, + 0x1dea: 0x0400, 0x1deb: 0x0400, 0x1dec: 0x0200, 0x1ded: 0x0400, 0x1dee: 0x0400, 0x1def: 0x0400, + 0x1df0: 0x0400, 0x1df1: 0x0400, 0x1df2: 0x0400, 0x1df3: 0x0400, 0x1df4: 0x0400, 0x1df5: 0x0400, + 0x1df6: 0x0400, 0x1df7: 0x0400, 0x1df8: 0x0400, 0x1df9: 0x0400, 0x1dfa: 0x0400, 0x1dfb: 0x0400, + 0x1dfc: 0x0400, 0x1dfd: 0x0400, 0x1dfe: 0x0400, 0x1dff: 0x0400, // Block 0x78, offset 0x1e00 - 0x1e00: 0x0080, 0x1e01: 0x0080, 0x1e02: 0x0080, 0x1e03: 0x0080, 0x1e04: 0x0080, 0x1e05: 0x0080, - 0x1e06: 0x0080, 0x1e07: 0x0080, 0x1e08: 0x0040, 0x1e09: 0x0080, 0x1e0a: 0x0080, 0x1e0b: 0x0080, - 0x1e0c: 0x0080, 0x1e0d: 0x0080, 0x1e0e: 0x0080, 0x1e0f: 0x0080, 0x1e10: 0x0080, 0x1e11: 0x0080, - 0x1e12: 0x0080, 0x1e13: 0x0080, 0x1e14: 0x0080, 0x1e15: 0x0080, 0x1e16: 0x0080, 0x1e17: 0x0080, - 0x1e18: 0x0080, 0x1e19: 0x0080, 0x1e1a: 0x0080, 0x1e1b: 0x0080, 0x1e1c: 0x0080, 0x1e1d: 0x0080, - 0x1e1e: 0x0080, 0x1e1f: 0x0080, 0x1e20: 0x0080, 0x1e21: 0x0080, 0x1e22: 0x0080, 0x1e23: 0x0080, - 0x1e30: 0x1000, 0x1e31: 0x1000, 0x1e32: 0x1000, 0x1e33: 0x1000, 0x1e34: 0x1000, 0x1e35: 0x1000, - 0x1e36: 0x1000, 0x1e37: 0x1000, 0x1e38: 0x1000, 0x1e39: 0x1000, 0x1e3a: 0x1000, 0x1e3b: 0x1000, - 0x1e3c: 0x1000, 0x1e3d: 0x1000, 0x1e3e: 0x1000, 0x1e3f: 0x1000, + 0x1e00: 0x0400, 0x1e01: 0x0400, 0x1e02: 0x0400, 0x1e03: 0x0400, 0x1e04: 0x0400, 0x1e05: 0x0400, + 0x1e06: 0x0400, 0x1e07: 0x0400, 0x1e08: 0x0200, 0x1e09: 0x0400, 0x1e0a: 0x0400, 0x1e0b: 0x0400, + 0x1e0c: 0x0400, 0x1e0d: 0x0400, 0x1e0e: 0x0400, 0x1e0f: 0x0400, 0x1e10: 0x0400, 0x1e11: 0x0400, + 0x1e12: 0x0400, 0x1e13: 0x0400, 0x1e14: 0x0400, 0x1e15: 0x0400, 0x1e16: 0x0400, 0x1e17: 0x0400, + 0x1e18: 0x0400, 0x1e19: 0x0400, 0x1e1a: 0x0400, 0x1e1b: 0x0400, 0x1e1c: 0x0400, 0x1e1d: 0x0400, + 0x1e1e: 0x0400, 0x1e1f: 0x0400, 0x1e20: 0x0400, 0x1e21: 0x0400, 0x1e22: 0x0400, 0x1e23: 0x0400, + 0x1e24: 0x0200, 0x1e25: 0x0400, 0x1e26: 0x0400, 0x1e27: 0x0400, 0x1e28: 0x0400, 0x1e29: 0x0400, + 0x1e2a: 0x0400, 0x1e2b: 0x0400, 0x1e2c: 0x0400, 0x1e2d: 0x0400, 0x1e2e: 0x0400, 0x1e2f: 0x0400, + 0x1e30: 0x0400, 0x1e31: 0x0400, 0x1e32: 0x0400, 0x1e33: 0x0400, 0x1e34: 0x0400, 0x1e35: 0x0400, + 0x1e36: 0x0400, 0x1e37: 0x0400, 0x1e38: 0x0400, 0x1e39: 0x0400, 0x1e3a: 0x0400, 0x1e3b: 0x0400, + 0x1e3c: 0x0400, 0x1e3d: 0x0400, 0x1e3e: 0x0400, 0x1e3f: 0x0400, // Block 0x79, offset 0x1e40 - 0x1e40: 0x1000, 0x1e41: 0x1000, 0x1e42: 0x1000, 0x1e43: 0x1000, 0x1e44: 0x1000, 0x1e45: 0x1000, - 0x1e46: 0x1000, 0x1e4b: 0x0800, - 0x1e4c: 0x0800, 0x1e4d: 0x0800, 0x1e4e: 0x0800, 0x1e4f: 0x0800, 0x1e50: 0x0800, 0x1e51: 0x0800, - 0x1e52: 0x0800, 0x1e53: 0x0800, 0x1e54: 0x0800, 0x1e55: 0x0800, 0x1e56: 0x0800, 0x1e57: 0x0800, - 0x1e58: 0x0800, 0x1e59: 0x0800, 0x1e5a: 0x0800, 0x1e5b: 0x0800, 0x1e5c: 0x0800, 0x1e5d: 0x0800, - 0x1e5e: 0x0800, 0x1e5f: 0x0800, 0x1e60: 0x0800, 0x1e61: 0x0800, 0x1e62: 0x0800, 0x1e63: 0x0800, - 0x1e64: 0x0800, 0x1e65: 0x0800, 0x1e66: 0x0800, 0x1e67: 0x0800, 0x1e68: 0x0800, 0x1e69: 0x0800, - 0x1e6a: 0x0800, 0x1e6b: 0x0800, 0x1e6c: 0x0800, 0x1e6d: 0x0800, 0x1e6e: 0x0800, 0x1e6f: 0x0800, - 0x1e70: 0x0800, 0x1e71: 0x0800, 0x1e72: 0x0800, 0x1e73: 0x0800, 0x1e74: 0x0800, 0x1e75: 0x0800, - 0x1e76: 0x0800, 0x1e77: 0x0800, 0x1e78: 0x0800, 0x1e79: 0x0800, 0x1e7a: 0x0800, 0x1e7b: 0x0800, + 0x1e40: 0x0400, 0x1e41: 0x0400, 0x1e42: 0x0400, 0x1e43: 0x0400, 0x1e44: 0x0400, 0x1e45: 0x0400, + 0x1e46: 0x0400, 0x1e47: 0x0400, 0x1e48: 0x0200, 0x1e49: 0x0400, 0x1e4a: 0x0400, 0x1e4b: 0x0400, + 0x1e4c: 0x0400, 0x1e4d: 0x0400, 0x1e4e: 0x0400, 0x1e4f: 0x0400, 0x1e50: 0x0400, 0x1e51: 0x0400, + 0x1e52: 0x0400, 0x1e53: 0x0400, 0x1e54: 0x0400, 0x1e55: 0x0400, 0x1e56: 0x0400, 0x1e57: 0x0400, + 0x1e58: 0x0400, 0x1e59: 0x0400, 0x1e5a: 0x0400, 0x1e5b: 0x0400, 0x1e5c: 0x0400, 0x1e5d: 0x0400, + 0x1e5e: 0x0400, 0x1e5f: 0x0400, 0x1e60: 0x0400, 0x1e61: 0x0400, 0x1e62: 0x0400, 0x1e63: 0x0400, + 0x1e70: 0x8000, 0x1e71: 0x8000, 0x1e72: 0x8000, 0x1e73: 0x8000, 0x1e74: 0x8000, 0x1e75: 0x8000, + 0x1e76: 0x8000, 0x1e77: 0x8000, 0x1e78: 0x8000, 0x1e79: 0x8000, 0x1e7a: 0x8000, 0x1e7b: 0x8000, + 0x1e7c: 0x8000, 0x1e7d: 0x8000, 0x1e7e: 0x8000, 0x1e7f: 0x8000, // Block 0x7a, offset 0x1e80 - 0x1e9e: 0x0004, + 0x1e80: 0x8000, 0x1e81: 0x8000, 0x1e82: 0x8000, 0x1e83: 0x8000, 0x1e84: 0x8000, 0x1e85: 0x8000, + 0x1e86: 0x8000, 0x1e8b: 0x4000, + 0x1e8c: 0x4000, 0x1e8d: 0x4000, 0x1e8e: 0x4000, 0x1e8f: 0x4000, 0x1e90: 0x4000, 0x1e91: 0x4000, + 0x1e92: 0x4000, 0x1e93: 0x4000, 0x1e94: 0x4000, 0x1e95: 0x4000, 0x1e96: 0x4000, 0x1e97: 0x4000, + 0x1e98: 0x4000, 0x1e99: 0x4000, 0x1e9a: 0x4000, 0x1e9b: 0x4000, 0x1e9c: 0x4000, 0x1e9d: 0x4000, + 0x1e9e: 0x4000, 0x1e9f: 0x4000, 0x1ea0: 0x4000, 0x1ea1: 0x4000, 0x1ea2: 0x4000, 0x1ea3: 0x4000, + 0x1ea4: 0x4000, 0x1ea5: 0x4000, 0x1ea6: 0x4000, 0x1ea7: 0x4000, 0x1ea8: 0x4000, 0x1ea9: 0x4000, + 0x1eaa: 0x4000, 0x1eab: 0x4000, 0x1eac: 0x4000, 0x1ead: 0x4000, 0x1eae: 0x4000, 0x1eaf: 0x4000, + 0x1eb0: 0x4000, 0x1eb1: 0x4000, 0x1eb2: 0x4000, 0x1eb3: 0x4000, 0x1eb4: 0x4000, 0x1eb5: 0x4000, + 0x1eb6: 0x4000, 0x1eb7: 0x4000, 0x1eb8: 0x4000, 0x1eb9: 0x4000, 0x1eba: 0x4000, 0x1ebb: 0x4000, // Block 0x7b, offset 0x1ec0 - 0x1ec0: 0x0004, 0x1ec1: 0x0004, 0x1ec2: 0x0004, 0x1ec3: 0x0004, 0x1ec4: 0x0004, 0x1ec5: 0x0004, - 0x1ec6: 0x0004, 0x1ec7: 0x0004, 0x1ec8: 0x0004, 0x1ec9: 0x0004, 0x1eca: 0x0004, 0x1ecb: 0x0004, - 0x1ecc: 0x0004, 0x1ecd: 0x0004, 0x1ece: 0x0004, 0x1ecf: 0x0004, - 0x1ee0: 0x0004, 0x1ee1: 0x0004, 0x1ee2: 0x0004, 0x1ee3: 0x0004, - 0x1ee4: 0x0004, 0x1ee5: 0x0004, 0x1ee6: 0x0004, 0x1ee7: 0x0004, 0x1ee8: 0x0004, 0x1ee9: 0x0004, - 0x1eea: 0x0004, 0x1eeb: 0x0004, 0x1eec: 0x0004, 0x1eed: 0x0004, 0x1eee: 0x0004, 0x1eef: 0x0004, + 0x1ede: 0x0024, // Block 0x7c, offset 0x1f00 - 0x1f3f: 0x0002, + 0x1f00: 0x0024, 0x1f01: 0x0024, 0x1f02: 0x0024, 0x1f03: 0x0024, 0x1f04: 0x0024, 0x1f05: 0x0024, + 0x1f06: 0x0024, 0x1f07: 0x0024, 0x1f08: 0x0024, 0x1f09: 0x0024, 0x1f0a: 0x0024, 0x1f0b: 0x0024, + 0x1f0c: 0x0024, 0x1f0d: 0x0024, 0x1f0e: 0x0024, 0x1f0f: 0x0024, + 0x1f20: 0x0024, 0x1f21: 0x0024, 0x1f22: 0x0024, 0x1f23: 0x0024, + 0x1f24: 0x0024, 0x1f25: 0x0024, 0x1f26: 0x0024, 0x1f27: 0x0024, 0x1f28: 0x0024, 0x1f29: 0x0024, + 0x1f2a: 0x0024, 0x1f2b: 0x0024, 0x1f2c: 0x0024, 0x1f2d: 0x0024, 0x1f2e: 0x0024, 0x1f2f: 0x0024, // Block 0x7d, offset 0x1f40 - 0x1f70: 0x0002, 0x1f71: 0x0002, 0x1f72: 0x0002, 0x1f73: 0x0002, 0x1f74: 0x0002, 0x1f75: 0x0002, - 0x1f76: 0x0002, 0x1f77: 0x0002, 0x1f78: 0x0002, 0x1f79: 0x0002, 0x1f7a: 0x0002, 0x1f7b: 0x0002, + 0x1f7f: 0x0002, // Block 0x7e, offset 0x1f80 - 0x1fbd: 0x0004, + 0x1fb0: 0x0002, 0x1fb1: 0x0002, 0x1fb2: 0x0002, 0x1fb3: 0x0002, 0x1fb4: 0x0002, 0x1fb5: 0x0002, + 0x1fb6: 0x0002, 0x1fb7: 0x0002, 0x1fb8: 0x0002, 0x1fb9: 0x0002, 0x1fba: 0x0002, 0x1fbb: 0x0002, // Block 0x7f, offset 0x1fc0 - 0x1fe0: 0x0004, + 0x1ffd: 0x0024, // Block 0x80, offset 0x2000 - 0x2036: 0x0004, 0x2037: 0x0004, 0x2038: 0x0004, 0x2039: 0x0004, 0x203a: 0x0004, + 0x2020: 0x0024, // Block 0x81, offset 0x2040 - 0x2041: 0x0004, 0x2042: 0x0004, 0x2043: 0x0004, 0x2045: 0x0004, - 0x2046: 0x0004, - 0x204c: 0x0004, 0x204d: 0x0004, 0x204e: 0x0004, 0x204f: 0x0004, - 0x2078: 0x0004, 0x2079: 0x0004, 0x207a: 0x0004, - 0x207f: 0x0004, + 0x2076: 0x0024, 0x2077: 0x0024, 0x2078: 0x0024, 0x2079: 0x0024, 0x207a: 0x0024, // Block 0x82, offset 0x2080 - 0x20a5: 0x0004, 0x20a6: 0x0004, + 0x2080: 0x0010, 0x2081: 0x0024, 0x2082: 0x0024, 0x2083: 0x0024, 0x2085: 0x0024, + 0x2086: 0x0024, + 0x208c: 0x0024, 0x208d: 0x0024, 0x208e: 0x0024, 0x208f: 0x0024, 0x2090: 0x0010, 0x2091: 0x0010, + 0x2092: 0x0010, 0x2093: 0x0010, 0x2095: 0x0010, 0x2096: 0x0010, 0x2097: 0x0010, + 0x2099: 0x0010, 0x209a: 0x0010, 0x209b: 0x0010, 0x209c: 0x0010, 0x209d: 0x0010, + 0x209e: 0x0010, 0x209f: 0x0010, 0x20a0: 0x0010, 0x20a1: 0x0010, 0x20a2: 0x0010, 0x20a3: 0x0010, + 0x20a4: 0x0010, 0x20a5: 0x0010, 0x20a6: 0x0010, 0x20a7: 0x0010, 0x20a8: 0x0010, 0x20a9: 0x0010, + 0x20aa: 0x0010, 0x20ab: 0x0010, 0x20ac: 0x0010, 0x20ad: 0x0010, 0x20ae: 0x0010, 0x20af: 0x0010, + 0x20b0: 0x0010, 0x20b1: 0x0010, 0x20b2: 0x0010, 0x20b3: 0x0010, 0x20b4: 0x0010, 0x20b5: 0x0010, + 0x20b8: 0x0024, 0x20b9: 0x0024, 0x20ba: 0x0024, + 0x20bf: 0x0044, // Block 0x83, offset 0x20c0 - 0x20e4: 0x0004, 0x20e5: 0x0004, 0x20e6: 0x0004, 0x20e7: 0x0004, + 0x20e5: 0x0024, 0x20e6: 0x0024, // Block 0x84, offset 0x2100 - 0x212b: 0x0004, 0x212c: 0x0004, + 0x2124: 0x0024, 0x2125: 0x0024, 0x2126: 0x0024, 0x2127: 0x0024, // Block 0x85, offset 0x2140 - 0x217d: 0x0004, 0x217e: 0x0004, 0x217f: 0x0004, + 0x2169: 0x0024, + 0x216a: 0x0024, 0x216b: 0x0024, 0x216c: 0x0024, 0x216d: 0x0024, // Block 0x86, offset 0x2180 - 0x2186: 0x0004, 0x2187: 0x0004, 0x2188: 0x0004, 0x2189: 0x0004, 0x218a: 0x0004, 0x218b: 0x0004, - 0x218c: 0x0004, 0x218d: 0x0004, 0x218e: 0x0004, 0x218f: 0x0004, 0x2190: 0x0004, + 0x21ab: 0x0024, 0x21ac: 0x0024, // Block 0x87, offset 0x21c0 - 0x21c2: 0x0004, 0x21c3: 0x0004, 0x21c4: 0x0004, 0x21c5: 0x0004, + 0x21fa: 0x0024, 0x21fb: 0x0024, + 0x21fc: 0x0024, 0x21fd: 0x0024, 0x21fe: 0x0024, 0x21ff: 0x0024, // Block 0x88, offset 0x2200 - 0x2200: 0x0400, 0x2201: 0x0004, 0x2202: 0x0400, - 0x2238: 0x0004, 0x2239: 0x0004, 0x223a: 0x0004, 0x223b: 0x0004, - 0x223c: 0x0004, 0x223d: 0x0004, 0x223e: 0x0004, 0x223f: 0x0004, + 0x2206: 0x0024, 0x2207: 0x0024, 0x2208: 0x0024, 0x2209: 0x0024, 0x220a: 0x0024, 0x220b: 0x0024, + 0x220c: 0x0024, 0x220d: 0x0024, 0x220e: 0x0024, 0x220f: 0x0024, 0x2210: 0x0024, // Block 0x89, offset 0x2240 - 0x2240: 0x0004, 0x2241: 0x0004, 0x2242: 0x0004, 0x2243: 0x0004, 0x2244: 0x0004, 0x2245: 0x0004, - 0x2246: 0x0004, - 0x2270: 0x0004, 0x2273: 0x0004, 0x2274: 0x0004, - 0x227f: 0x0004, + 0x2242: 0x0024, 0x2243: 0x0024, 0x2244: 0x0024, 0x2245: 0x0024, // Block 0x8a, offset 0x2280 - 0x2280: 0x0004, 0x2281: 0x0004, 0x2282: 0x0400, - 0x22b0: 0x0400, 0x22b1: 0x0400, 0x22b2: 0x0400, 0x22b3: 0x0004, 0x22b4: 0x0004, 0x22b5: 0x0004, - 0x22b6: 0x0004, 0x22b7: 0x0400, 0x22b8: 0x0400, 0x22b9: 0x0004, 0x22ba: 0x0004, - 0x22bd: 0x0100, + 0x2280: 0x2000, 0x2281: 0x0024, 0x2282: 0x2000, + 0x22b8: 0x0024, 0x22b9: 0x0024, 0x22ba: 0x0024, 0x22bb: 0x0024, + 0x22bc: 0x0024, 0x22bd: 0x0024, 0x22be: 0x0024, 0x22bf: 0x0024, // Block 0x8b, offset 0x22c0 - 0x22c2: 0x0004, - 0x22cd: 0x0100, + 0x22c0: 0x0024, 0x22c1: 0x0024, 0x22c2: 0x0024, 0x22c3: 0x0024, 0x22c4: 0x0024, 0x22c5: 0x0024, + 0x22c6: 0x0024, + 0x22f0: 0x0024, 0x22f3: 0x0024, 0x22f4: 0x0024, + 0x22ff: 0x0024, // Block 0x8c, offset 0x2300 - 0x2300: 0x0004, 0x2301: 0x0004, 0x2302: 0x0004, - 0x2327: 0x0004, 0x2328: 0x0004, 0x2329: 0x0004, - 0x232a: 0x0004, 0x232b: 0x0004, 0x232c: 0x0400, 0x232d: 0x0004, 0x232e: 0x0004, 0x232f: 0x0004, - 0x2330: 0x0004, 0x2331: 0x0004, 0x2332: 0x0004, 0x2333: 0x0004, 0x2334: 0x0004, + 0x2300: 0x0024, 0x2301: 0x0024, 0x2302: 0x2000, + 0x2330: 0x2000, 0x2331: 0x2000, 0x2332: 0x2000, 0x2333: 0x0024, 0x2334: 0x0024, 0x2335: 0x0024, + 0x2336: 0x0024, 0x2337: 0x2000, 0x2338: 0x2000, 0x2339: 0x0024, 0x233a: 0x0024, + 0x233d: 0x0800, // Block 0x8d, offset 0x2340 - 0x2345: 0x0400, - 0x2346: 0x0400, - 0x2373: 0x0004, + 0x2342: 0x0024, + 0x234d: 0x0800, // Block 0x8e, offset 0x2380 - 0x2380: 0x0004, 0x2381: 0x0004, 0x2382: 0x0400, - 0x23b3: 0x0400, 0x23b4: 0x0400, 0x23b5: 0x0400, - 0x23b6: 0x0004, 0x23b7: 0x0004, 0x23b8: 0x0004, 0x23b9: 0x0004, 0x23ba: 0x0004, 0x23bb: 0x0004, - 0x23bc: 0x0004, 0x23bd: 0x0004, 0x23be: 0x0004, 0x23bf: 0x0400, + 0x2380: 0x0024, 0x2381: 0x0024, 0x2382: 0x0024, 0x2383: 0x0010, 0x2384: 0x0010, 0x2385: 0x0010, + 0x2386: 0x0010, 0x2387: 0x0010, 0x2388: 0x0010, 0x2389: 0x0010, 0x238a: 0x0010, 0x238b: 0x0010, + 0x238c: 0x0010, 0x238d: 0x0010, 0x238e: 0x0010, 0x238f: 0x0010, 0x2390: 0x0010, 0x2391: 0x0010, + 0x2392: 0x0010, 0x2393: 0x0010, 0x2394: 0x0010, 0x2395: 0x0010, 0x2396: 0x0010, 0x2397: 0x0010, + 0x2398: 0x0010, 0x2399: 0x0010, 0x239a: 0x0010, 0x239b: 0x0010, 0x239c: 0x0010, 0x239d: 0x0010, + 0x239e: 0x0010, 0x239f: 0x0010, 0x23a0: 0x0010, 0x23a1: 0x0010, 0x23a2: 0x0010, 0x23a3: 0x0010, + 0x23a4: 0x0010, 0x23a5: 0x0010, 0x23a6: 0x0010, 0x23a7: 0x0024, 0x23a8: 0x0024, 0x23a9: 0x0024, + 0x23aa: 0x0024, 0x23ab: 0x0024, 0x23ac: 0x2000, 0x23ad: 0x0024, 0x23ae: 0x0024, 0x23af: 0x0024, + 0x23b0: 0x0024, 0x23b1: 0x0024, 0x23b2: 0x0024, 0x23b3: 0x0044, 0x23b4: 0x0024, // Block 0x8f, offset 0x23c0 - 0x23c0: 0x0400, 0x23c2: 0x0100, 0x23c3: 0x0100, - 0x23c9: 0x0004, 0x23ca: 0x0004, 0x23cb: 0x0004, - 0x23cc: 0x0004, 0x23ce: 0x0400, 0x23cf: 0x0004, + 0x23c4: 0x0010, 0x23c5: 0x2000, + 0x23c6: 0x2000, 0x23c7: 0x0010, + 0x23f3: 0x0024, // Block 0x90, offset 0x2400 - 0x242c: 0x0400, 0x242d: 0x0400, 0x242e: 0x0400, 0x242f: 0x0004, - 0x2430: 0x0004, 0x2431: 0x0004, 0x2432: 0x0400, 0x2433: 0x0400, 0x2434: 0x0004, 0x2435: 0x0400, - 0x2436: 0x0004, 0x2437: 0x0004, - 0x243e: 0x0004, + 0x2400: 0x0024, 0x2401: 0x0024, 0x2402: 0x2000, + 0x2433: 0x2000, 0x2434: 0x2000, 0x2435: 0x2000, + 0x2436: 0x0024, 0x2437: 0x0024, 0x2438: 0x0024, 0x2439: 0x0024, 0x243a: 0x0024, 0x243b: 0x0024, + 0x243c: 0x0024, 0x243d: 0x0024, 0x243e: 0x0024, 0x243f: 0x2000, // Block 0x91, offset 0x2440 - 0x2441: 0x0004, + 0x2440: 0x0024, 0x2442: 0x0800, 0x2443: 0x0800, + 0x2449: 0x0024, 0x244a: 0x0024, 0x244b: 0x0024, + 0x244c: 0x0024, 0x244e: 0x2000, 0x244f: 0x0024, // Block 0x92, offset 0x2480 - 0x249f: 0x0004, 0x24a0: 0x0400, 0x24a1: 0x0400, 0x24a2: 0x0400, 0x24a3: 0x0004, - 0x24a4: 0x0004, 0x24a5: 0x0004, 0x24a6: 0x0004, 0x24a7: 0x0004, 0x24a8: 0x0004, 0x24a9: 0x0004, - 0x24aa: 0x0004, + 0x24ac: 0x2000, 0x24ad: 0x2000, 0x24ae: 0x2000, 0x24af: 0x0024, + 0x24b0: 0x0024, 0x24b1: 0x0024, 0x24b2: 0x2000, 0x24b3: 0x2000, 0x24b4: 0x0024, 0x24b5: 0x0024, + 0x24b6: 0x0024, 0x24b7: 0x0024, + 0x24be: 0x0024, // Block 0x93, offset 0x24c0 - 0x24c0: 0x0004, 0x24c1: 0x0400, 0x24c2: 0x0400, 0x24c3: 0x0400, 0x24c4: 0x0400, - 0x24c7: 0x0400, 0x24c8: 0x0400, 0x24cb: 0x0400, - 0x24cc: 0x0400, 0x24cd: 0x0400, - 0x24d7: 0x0004, - 0x24e2: 0x0400, 0x24e3: 0x0400, - 0x24e6: 0x0004, 0x24e7: 0x0004, 0x24e8: 0x0004, 0x24e9: 0x0004, - 0x24ea: 0x0004, 0x24eb: 0x0004, 0x24ec: 0x0004, - 0x24f0: 0x0004, 0x24f1: 0x0004, 0x24f2: 0x0004, 0x24f3: 0x0004, 0x24f4: 0x0004, + 0x24c1: 0x0024, // Block 0x94, offset 0x2500 - 0x2535: 0x0400, - 0x2536: 0x0400, 0x2537: 0x0400, 0x2538: 0x0004, 0x2539: 0x0004, 0x253a: 0x0004, 0x253b: 0x0004, - 0x253c: 0x0004, 0x253d: 0x0004, 0x253e: 0x0004, 0x253f: 0x0004, + 0x251f: 0x0024, 0x2520: 0x2000, 0x2521: 0x2000, 0x2522: 0x2000, 0x2523: 0x0024, + 0x2524: 0x0024, 0x2525: 0x0024, 0x2526: 0x0024, 0x2527: 0x0024, 0x2528: 0x0024, 0x2529: 0x0024, + 0x252a: 0x0024, // Block 0x95, offset 0x2540 - 0x2540: 0x0400, 0x2541: 0x0400, 0x2542: 0x0004, 0x2543: 0x0004, 0x2544: 0x0004, 0x2545: 0x0400, - 0x2546: 0x0004, - 0x255e: 0x0004, + 0x2540: 0x0024, 0x2541: 0x0024, 0x2542: 0x2000, 0x2543: 0x2000, + 0x257b: 0x0024, + 0x257c: 0x0024, 0x257e: 0x0024, 0x257f: 0x2000, // Block 0x96, offset 0x2580 - 0x25b0: 0x0004, 0x25b1: 0x0400, 0x25b2: 0x0400, 0x25b3: 0x0004, 0x25b4: 0x0004, 0x25b5: 0x0004, - 0x25b6: 0x0004, 0x25b7: 0x0004, 0x25b8: 0x0004, 0x25b9: 0x0400, 0x25ba: 0x0004, 0x25bb: 0x0400, - 0x25bc: 0x0400, 0x25bd: 0x0004, 0x25be: 0x0400, 0x25bf: 0x0004, + 0x2580: 0x0024, 0x2581: 0x2000, 0x2582: 0x2000, 0x2583: 0x2000, 0x2584: 0x2000, + 0x2587: 0x2000, 0x2588: 0x2000, 0x258b: 0x2000, + 0x258c: 0x2000, 0x258d: 0x0024, + 0x2597: 0x0024, + 0x25a2: 0x2000, 0x25a3: 0x2000, + 0x25a6: 0x0024, 0x25a7: 0x0024, 0x25a8: 0x0024, 0x25a9: 0x0024, + 0x25aa: 0x0024, 0x25ab: 0x0024, 0x25ac: 0x0024, + 0x25b0: 0x0024, 0x25b1: 0x0024, 0x25b2: 0x0024, 0x25b3: 0x0024, 0x25b4: 0x0024, // Block 0x97, offset 0x25c0 - 0x25c0: 0x0004, 0x25c1: 0x0400, 0x25c2: 0x0004, 0x25c3: 0x0004, + 0x25c0: 0x0010, 0x25c1: 0x0010, 0x25c2: 0x0010, 0x25c3: 0x0010, 0x25c4: 0x0010, 0x25c5: 0x0010, + 0x25c6: 0x0010, 0x25c7: 0x0010, 0x25c8: 0x0010, 0x25c9: 0x0010, 0x25cb: 0x0010, + 0x25ce: 0x0010, 0x25d0: 0x0010, 0x25d1: 0x0010, + 0x25d2: 0x0010, 0x25d3: 0x0010, 0x25d4: 0x0010, 0x25d5: 0x0010, 0x25d6: 0x0010, 0x25d7: 0x0010, + 0x25d8: 0x0010, 0x25d9: 0x0010, 0x25da: 0x0010, 0x25db: 0x0010, 0x25dc: 0x0010, 0x25dd: 0x0010, + 0x25de: 0x0010, 0x25df: 0x0010, 0x25e0: 0x0010, 0x25e1: 0x0010, 0x25e2: 0x0010, 0x25e3: 0x0010, + 0x25e4: 0x0010, 0x25e5: 0x0010, 0x25e6: 0x0010, 0x25e7: 0x0010, 0x25e8: 0x0010, 0x25e9: 0x0010, + 0x25ea: 0x0010, 0x25eb: 0x0010, 0x25ec: 0x0010, 0x25ed: 0x0010, 0x25ee: 0x0010, 0x25ef: 0x0010, + 0x25f0: 0x0010, 0x25f1: 0x0010, 0x25f2: 0x0010, 0x25f3: 0x0010, 0x25f4: 0x0010, 0x25f5: 0x0010, + 0x25f8: 0x0024, 0x25f9: 0x2000, 0x25fa: 0x2000, 0x25fb: 0x0024, + 0x25fc: 0x0024, 0x25fd: 0x0024, 0x25fe: 0x0024, 0x25ff: 0x0024, // Block 0x98, offset 0x2600 - 0x262f: 0x0004, - 0x2630: 0x0400, 0x2631: 0x0400, 0x2632: 0x0004, 0x2633: 0x0004, 0x2634: 0x0004, 0x2635: 0x0004, - 0x2638: 0x0400, 0x2639: 0x0400, 0x263a: 0x0400, 0x263b: 0x0400, - 0x263c: 0x0004, 0x263d: 0x0004, 0x263e: 0x0400, 0x263f: 0x0004, + 0x2600: 0x0024, 0x2602: 0x0024, 0x2605: 0x0024, + 0x2607: 0x0024, 0x2608: 0x0024, 0x2609: 0x0024, 0x260a: 0x2000, + 0x260c: 0x2000, 0x260d: 0x2000, 0x260e: 0x0024, 0x260f: 0x0024, 0x2610: 0x0044, 0x2611: 0x0800, + 0x2612: 0x0024, + 0x2621: 0x0024, 0x2622: 0x0024, // Block 0x99, offset 0x2640 - 0x2640: 0x0004, - 0x265c: 0x0004, 0x265d: 0x0004, + 0x2675: 0x2000, + 0x2676: 0x2000, 0x2677: 0x2000, 0x2678: 0x0024, 0x2679: 0x0024, 0x267a: 0x0024, 0x267b: 0x0024, + 0x267c: 0x0024, 0x267d: 0x0024, 0x267e: 0x0024, 0x267f: 0x0024, // Block 0x9a, offset 0x2680 - 0x26b0: 0x0400, 0x26b1: 0x0400, 0x26b2: 0x0400, 0x26b3: 0x0004, 0x26b4: 0x0004, 0x26b5: 0x0004, - 0x26b6: 0x0004, 0x26b7: 0x0004, 0x26b8: 0x0004, 0x26b9: 0x0004, 0x26ba: 0x0004, 0x26bb: 0x0400, - 0x26bc: 0x0400, 0x26bd: 0x0004, 0x26be: 0x0400, 0x26bf: 0x0004, + 0x2680: 0x2000, 0x2681: 0x2000, 0x2682: 0x0024, 0x2683: 0x0024, 0x2684: 0x0024, 0x2685: 0x2000, + 0x2686: 0x0024, + 0x269e: 0x0024, // Block 0x9b, offset 0x26c0 - 0x26c0: 0x0004, + 0x26f0: 0x0024, 0x26f1: 0x2000, 0x26f2: 0x2000, 0x26f3: 0x0024, 0x26f4: 0x0024, 0x26f5: 0x0024, + 0x26f6: 0x0024, 0x26f7: 0x0024, 0x26f8: 0x0024, 0x26f9: 0x2000, 0x26fa: 0x0024, 0x26fb: 0x2000, + 0x26fc: 0x2000, 0x26fd: 0x0024, 0x26fe: 0x2000, 0x26ff: 0x0024, // Block 0x9c, offset 0x2700 - 0x272b: 0x0004, 0x272c: 0x0400, 0x272d: 0x0004, 0x272e: 0x0400, 0x272f: 0x0400, - 0x2730: 0x0004, 0x2731: 0x0004, 0x2732: 0x0004, 0x2733: 0x0004, 0x2734: 0x0004, 0x2735: 0x0004, - 0x2736: 0x0400, 0x2737: 0x0004, + 0x2700: 0x0024, 0x2701: 0x2000, 0x2702: 0x0024, 0x2703: 0x0024, // Block 0x9d, offset 0x2740 - 0x275d: 0x0004, - 0x275e: 0x0004, 0x275f: 0x0004, 0x2762: 0x0004, 0x2763: 0x0004, - 0x2764: 0x0004, 0x2765: 0x0004, 0x2766: 0x0400, 0x2767: 0x0004, 0x2768: 0x0004, 0x2769: 0x0004, - 0x276a: 0x0004, 0x276b: 0x0004, + 0x276f: 0x0024, + 0x2770: 0x2000, 0x2771: 0x2000, 0x2772: 0x0024, 0x2773: 0x0024, 0x2774: 0x0024, 0x2775: 0x0024, + 0x2778: 0x2000, 0x2779: 0x2000, 0x277a: 0x2000, 0x277b: 0x2000, + 0x277c: 0x0024, 0x277d: 0x0024, 0x277e: 0x2000, 0x277f: 0x0024, // Block 0x9e, offset 0x2780 - 0x27ac: 0x0400, 0x27ad: 0x0400, 0x27ae: 0x0400, 0x27af: 0x0004, - 0x27b0: 0x0004, 0x27b1: 0x0004, 0x27b2: 0x0004, 0x27b3: 0x0004, 0x27b4: 0x0004, 0x27b5: 0x0004, - 0x27b6: 0x0004, 0x27b7: 0x0004, 0x27b8: 0x0400, 0x27b9: 0x0004, 0x27ba: 0x0004, + 0x2780: 0x0024, + 0x279c: 0x0024, 0x279d: 0x0024, // Block 0x9f, offset 0x27c0 - 0x27f0: 0x0004, 0x27f1: 0x0400, 0x27f2: 0x0400, 0x27f3: 0x0400, 0x27f4: 0x0400, 0x27f5: 0x0400, - 0x27f7: 0x0400, 0x27f8: 0x0400, 0x27fb: 0x0004, - 0x27fc: 0x0004, 0x27fd: 0x0400, 0x27fe: 0x0004, 0x27ff: 0x0100, + 0x27f0: 0x2000, 0x27f1: 0x2000, 0x27f2: 0x2000, 0x27f3: 0x0024, 0x27f4: 0x0024, 0x27f5: 0x0024, + 0x27f6: 0x0024, 0x27f7: 0x0024, 0x27f8: 0x0024, 0x27f9: 0x0024, 0x27fa: 0x0024, 0x27fb: 0x2000, + 0x27fc: 0x2000, 0x27fd: 0x0024, 0x27fe: 0x2000, 0x27ff: 0x0024, // Block 0xa0, offset 0x2800 - 0x2800: 0x0400, 0x2801: 0x0100, 0x2802: 0x0400, 0x2803: 0x0004, + 0x2800: 0x0024, // Block 0xa1, offset 0x2840 - 0x2851: 0x0400, - 0x2852: 0x0400, 0x2853: 0x0400, 0x2854: 0x0004, 0x2855: 0x0004, 0x2856: 0x0004, 0x2857: 0x0004, - 0x285a: 0x0004, 0x285b: 0x0004, 0x285c: 0x0400, 0x285d: 0x0400, - 0x285e: 0x0400, 0x285f: 0x0400, 0x2860: 0x0004, - 0x2864: 0x0400, + 0x286b: 0x0024, 0x286c: 0x2000, 0x286d: 0x0024, 0x286e: 0x2000, 0x286f: 0x2000, + 0x2870: 0x0024, 0x2871: 0x0024, 0x2872: 0x0024, 0x2873: 0x0024, 0x2874: 0x0024, 0x2875: 0x0024, + 0x2876: 0x0024, 0x2877: 0x0024, // Block 0xa2, offset 0x2880 - 0x2881: 0x0004, 0x2882: 0x0004, 0x2883: 0x0004, 0x2884: 0x0004, 0x2885: 0x0004, - 0x2886: 0x0004, 0x2887: 0x0004, 0x2888: 0x0004, 0x2889: 0x0004, 0x288a: 0x0004, - 0x28b3: 0x0004, 0x28b4: 0x0004, 0x28b5: 0x0004, - 0x28b6: 0x0004, 0x28b7: 0x0004, 0x28b8: 0x0004, 0x28b9: 0x0400, 0x28ba: 0x0100, 0x28bb: 0x0004, - 0x28bc: 0x0004, 0x28bd: 0x0004, 0x28be: 0x0004, + 0x289d: 0x0024, + 0x289e: 0x2000, 0x289f: 0x0024, 0x28a2: 0x0024, 0x28a3: 0x0024, + 0x28a4: 0x0024, 0x28a5: 0x0024, 0x28a6: 0x2000, 0x28a7: 0x0024, 0x28a8: 0x0024, 0x28a9: 0x0024, + 0x28aa: 0x0024, 0x28ab: 0x0024, // Block 0xa3, offset 0x28c0 - 0x28c7: 0x0004, - 0x28d1: 0x0004, - 0x28d2: 0x0004, 0x28d3: 0x0004, 0x28d4: 0x0004, 0x28d5: 0x0004, 0x28d6: 0x0004, 0x28d7: 0x0400, - 0x28d8: 0x0400, 0x28d9: 0x0004, 0x28da: 0x0004, 0x28db: 0x0004, + 0x28ec: 0x2000, 0x28ed: 0x2000, 0x28ee: 0x2000, 0x28ef: 0x0024, + 0x28f0: 0x0024, 0x28f1: 0x0024, 0x28f2: 0x0024, 0x28f3: 0x0024, 0x28f4: 0x0024, 0x28f5: 0x0024, + 0x28f6: 0x0024, 0x28f7: 0x0024, 0x28f8: 0x2000, 0x28f9: 0x0024, 0x28fa: 0x0024, // Block 0xa4, offset 0x2900 - 0x2904: 0x0100, 0x2905: 0x0100, - 0x2906: 0x0100, 0x2907: 0x0100, 0x2908: 0x0100, 0x2909: 0x0100, 0x290a: 0x0004, 0x290b: 0x0004, - 0x290c: 0x0004, 0x290d: 0x0004, 0x290e: 0x0004, 0x290f: 0x0004, 0x2910: 0x0004, 0x2911: 0x0004, - 0x2912: 0x0004, 0x2913: 0x0004, 0x2914: 0x0004, 0x2915: 0x0004, 0x2916: 0x0004, 0x2917: 0x0400, - 0x2918: 0x0004, 0x2919: 0x0004, + 0x2900: 0x0010, 0x2901: 0x0010, 0x2902: 0x0010, 0x2903: 0x0010, 0x2904: 0x0010, 0x2905: 0x0010, + 0x2906: 0x0010, 0x2909: 0x0010, + 0x290c: 0x0010, 0x290d: 0x0010, 0x290e: 0x0010, 0x290f: 0x0010, 0x2910: 0x0010, 0x2911: 0x0010, + 0x2912: 0x0010, 0x2913: 0x0010, 0x2915: 0x0010, 0x2916: 0x0010, + 0x2918: 0x0010, 0x2919: 0x0010, 0x291a: 0x0010, 0x291b: 0x0010, 0x291c: 0x0010, 0x291d: 0x0010, + 0x291e: 0x0010, 0x291f: 0x0010, 0x2920: 0x0010, 0x2921: 0x0010, 0x2922: 0x0010, 0x2923: 0x0010, + 0x2924: 0x0010, 0x2925: 0x0010, 0x2926: 0x0010, 0x2927: 0x0010, 0x2928: 0x0010, 0x2929: 0x0010, + 0x292a: 0x0010, 0x292b: 0x0010, 0x292c: 0x0010, 0x292d: 0x0010, 0x292e: 0x0010, 0x292f: 0x0010, + 0x2930: 0x0024, 0x2931: 0x2000, 0x2932: 0x2000, 0x2933: 0x2000, 0x2934: 0x2000, 0x2935: 0x2000, + 0x2937: 0x2000, 0x2938: 0x2000, 0x293b: 0x0024, + 0x293c: 0x0024, 0x293d: 0x0024, 0x293e: 0x0044, 0x293f: 0x0800, // Block 0xa5, offset 0x2940 - 0x296f: 0x0400, - 0x2970: 0x0004, 0x2971: 0x0004, 0x2972: 0x0004, 0x2973: 0x0004, 0x2974: 0x0004, 0x2975: 0x0004, - 0x2976: 0x0004, 0x2978: 0x0004, 0x2979: 0x0004, 0x297a: 0x0004, 0x297b: 0x0004, - 0x297c: 0x0004, 0x297d: 0x0004, 0x297e: 0x0400, 0x297f: 0x0004, + 0x2940: 0x2000, 0x2941: 0x0800, 0x2942: 0x2000, 0x2943: 0x0024, // Block 0xa6, offset 0x2980 - 0x2992: 0x0004, 0x2993: 0x0004, 0x2994: 0x0004, 0x2995: 0x0004, 0x2996: 0x0004, 0x2997: 0x0004, - 0x2998: 0x0004, 0x2999: 0x0004, 0x299a: 0x0004, 0x299b: 0x0004, 0x299c: 0x0004, 0x299d: 0x0004, - 0x299e: 0x0004, 0x299f: 0x0004, 0x29a0: 0x0004, 0x29a1: 0x0004, 0x29a2: 0x0004, 0x29a3: 0x0004, - 0x29a4: 0x0004, 0x29a5: 0x0004, 0x29a6: 0x0004, 0x29a7: 0x0004, 0x29a9: 0x0400, - 0x29aa: 0x0004, 0x29ab: 0x0004, 0x29ac: 0x0004, 0x29ad: 0x0004, 0x29ae: 0x0004, 0x29af: 0x0004, - 0x29b0: 0x0004, 0x29b1: 0x0400, 0x29b2: 0x0004, 0x29b3: 0x0004, 0x29b4: 0x0400, 0x29b5: 0x0004, - 0x29b6: 0x0004, + 0x2991: 0x2000, + 0x2992: 0x2000, 0x2993: 0x2000, 0x2994: 0x0024, 0x2995: 0x0024, 0x2996: 0x0024, 0x2997: 0x0024, + 0x299a: 0x0024, 0x299b: 0x0024, 0x299c: 0x2000, 0x299d: 0x2000, + 0x299e: 0x2000, 0x299f: 0x2000, 0x29a0: 0x0024, + 0x29a4: 0x2000, // Block 0xa7, offset 0x29c0 - 0x29f1: 0x0004, 0x29f2: 0x0004, 0x29f3: 0x0004, 0x29f4: 0x0004, 0x29f5: 0x0004, - 0x29f6: 0x0004, 0x29fa: 0x0004, - 0x29fc: 0x0004, 0x29fd: 0x0004, 0x29ff: 0x0004, + 0x29c0: 0x0010, 0x29c1: 0x0024, 0x29c2: 0x0024, 0x29c3: 0x0024, 0x29c4: 0x0024, 0x29c5: 0x0024, + 0x29c6: 0x0024, 0x29c7: 0x0024, 0x29c8: 0x0024, 0x29c9: 0x0024, 0x29ca: 0x0024, 0x29cb: 0x0010, + 0x29cc: 0x0010, 0x29cd: 0x0010, 0x29ce: 0x0010, 0x29cf: 0x0010, 0x29d0: 0x0010, 0x29d1: 0x0010, + 0x29d2: 0x0010, 0x29d3: 0x0010, 0x29d4: 0x0010, 0x29d5: 0x0010, 0x29d6: 0x0010, 0x29d7: 0x0010, + 0x29d8: 0x0010, 0x29d9: 0x0010, 0x29da: 0x0010, 0x29db: 0x0010, 0x29dc: 0x0010, 0x29dd: 0x0010, + 0x29de: 0x0010, 0x29df: 0x0010, 0x29e0: 0x0010, 0x29e1: 0x0010, 0x29e2: 0x0010, 0x29e3: 0x0010, + 0x29e4: 0x0010, 0x29e5: 0x0010, 0x29e6: 0x0010, 0x29e7: 0x0010, 0x29e8: 0x0010, 0x29e9: 0x0010, + 0x29ea: 0x0010, 0x29eb: 0x0010, 0x29ec: 0x0010, 0x29ed: 0x0010, 0x29ee: 0x0010, 0x29ef: 0x0010, + 0x29f0: 0x0010, 0x29f1: 0x0010, 0x29f2: 0x0010, 0x29f3: 0x0024, 0x29f4: 0x0024, 0x29f5: 0x0024, + 0x29f6: 0x0024, 0x29f7: 0x0024, 0x29f8: 0x0024, 0x29f9: 0x2000, 0x29fb: 0x0024, + 0x29fc: 0x0024, 0x29fd: 0x0024, 0x29fe: 0x0024, // Block 0xa8, offset 0x2a00 - 0x2a00: 0x0004, 0x2a01: 0x0004, 0x2a02: 0x0004, 0x2a03: 0x0004, 0x2a04: 0x0004, 0x2a05: 0x0004, - 0x2a06: 0x0100, 0x2a07: 0x0004, + 0x2a07: 0x0044, + 0x2a10: 0x0010, 0x2a11: 0x0024, + 0x2a12: 0x0024, 0x2a13: 0x0024, 0x2a14: 0x0024, 0x2a15: 0x0024, 0x2a16: 0x0024, 0x2a17: 0x2000, + 0x2a18: 0x2000, 0x2a19: 0x0024, 0x2a1a: 0x0024, 0x2a1b: 0x0024, 0x2a1c: 0x0010, 0x2a1d: 0x0010, + 0x2a1e: 0x0010, 0x2a1f: 0x0010, 0x2a20: 0x0010, 0x2a21: 0x0010, 0x2a22: 0x0010, 0x2a23: 0x0010, + 0x2a24: 0x0010, 0x2a25: 0x0010, 0x2a26: 0x0010, 0x2a27: 0x0010, 0x2a28: 0x0010, 0x2a29: 0x0010, + 0x2a2a: 0x0010, 0x2a2b: 0x0010, 0x2a2c: 0x0010, 0x2a2d: 0x0010, 0x2a2e: 0x0010, 0x2a2f: 0x0010, + 0x2a30: 0x0010, 0x2a31: 0x0010, 0x2a32: 0x0010, 0x2a33: 0x0010, 0x2a34: 0x0010, 0x2a35: 0x0010, + 0x2a36: 0x0010, 0x2a37: 0x0010, 0x2a38: 0x0010, 0x2a39: 0x0010, 0x2a3a: 0x0010, 0x2a3b: 0x0010, + 0x2a3c: 0x0010, 0x2a3d: 0x0010, 0x2a3e: 0x0010, 0x2a3f: 0x0010, // Block 0xa9, offset 0x2a40 - 0x2a4a: 0x0400, 0x2a4b: 0x0400, - 0x2a4c: 0x0400, 0x2a4d: 0x0400, 0x2a4e: 0x0400, 0x2a50: 0x0004, 0x2a51: 0x0004, - 0x2a53: 0x0400, 0x2a54: 0x0400, 0x2a55: 0x0004, 0x2a56: 0x0400, 0x2a57: 0x0004, + 0x2a40: 0x0010, 0x2a41: 0x0010, 0x2a42: 0x0010, 0x2a43: 0x0010, 0x2a44: 0x0800, 0x2a45: 0x0800, + 0x2a46: 0x0800, 0x2a47: 0x0800, 0x2a48: 0x0800, 0x2a49: 0x0800, 0x2a4a: 0x0024, 0x2a4b: 0x0024, + 0x2a4c: 0x0024, 0x2a4d: 0x0024, 0x2a4e: 0x0024, 0x2a4f: 0x0024, 0x2a50: 0x0024, 0x2a51: 0x0024, + 0x2a52: 0x0024, 0x2a53: 0x0024, 0x2a54: 0x0024, 0x2a55: 0x0024, 0x2a56: 0x0024, 0x2a57: 0x2000, + 0x2a58: 0x0024, 0x2a59: 0x0044, // Block 0xaa, offset 0x2a80 - 0x2ab3: 0x0004, 0x2ab4: 0x0004, 0x2ab5: 0x0400, - 0x2ab6: 0x0400, + 0x2aa0: 0x0024, 0x2aa1: 0x2000, 0x2aa2: 0x0024, 0x2aa3: 0x0024, + 0x2aa4: 0x0024, 0x2aa5: 0x2000, 0x2aa6: 0x0024, 0x2aa7: 0x2000, // Block 0xab, offset 0x2ac0 - 0x2ac0: 0x0004, 0x2ac1: 0x0004, 0x2ac2: 0x0100, 0x2ac3: 0x0400, - 0x2af4: 0x0400, 0x2af5: 0x0400, - 0x2af6: 0x0004, 0x2af7: 0x0004, 0x2af8: 0x0004, 0x2af9: 0x0004, 0x2afa: 0x0004, - 0x2afe: 0x0400, 0x2aff: 0x0400, + 0x2aef: 0x2000, + 0x2af0: 0x0024, 0x2af1: 0x0024, 0x2af2: 0x0024, 0x2af3: 0x0024, 0x2af4: 0x0024, 0x2af5: 0x0024, + 0x2af6: 0x0024, 0x2af8: 0x0024, 0x2af9: 0x0024, 0x2afa: 0x0024, 0x2afb: 0x0024, + 0x2afc: 0x0024, 0x2afd: 0x0024, 0x2afe: 0x2000, 0x2aff: 0x0024, // Block 0xac, offset 0x2b00 - 0x2b00: 0x0004, 0x2b01: 0x0400, 0x2b02: 0x0004, + 0x2b12: 0x0024, 0x2b13: 0x0024, 0x2b14: 0x0024, 0x2b15: 0x0024, 0x2b16: 0x0024, 0x2b17: 0x0024, + 0x2b18: 0x0024, 0x2b19: 0x0024, 0x2b1a: 0x0024, 0x2b1b: 0x0024, 0x2b1c: 0x0024, 0x2b1d: 0x0024, + 0x2b1e: 0x0024, 0x2b1f: 0x0024, 0x2b20: 0x0024, 0x2b21: 0x0024, 0x2b22: 0x0024, 0x2b23: 0x0024, + 0x2b24: 0x0024, 0x2b25: 0x0024, 0x2b26: 0x0024, 0x2b27: 0x0024, 0x2b29: 0x2000, + 0x2b2a: 0x0024, 0x2b2b: 0x0024, 0x2b2c: 0x0024, 0x2b2d: 0x0024, 0x2b2e: 0x0024, 0x2b2f: 0x0024, + 0x2b30: 0x0024, 0x2b31: 0x2000, 0x2b32: 0x0024, 0x2b33: 0x0024, 0x2b34: 0x2000, 0x2b35: 0x0024, + 0x2b36: 0x0024, // Block 0xad, offset 0x2b40 - 0x2b70: 0x0002, 0x2b71: 0x0002, 0x2b72: 0x0002, 0x2b73: 0x0002, 0x2b74: 0x0002, 0x2b75: 0x0002, - 0x2b76: 0x0002, 0x2b77: 0x0002, 0x2b78: 0x0002, 0x2b79: 0x0002, 0x2b7a: 0x0002, 0x2b7b: 0x0002, - 0x2b7c: 0x0002, 0x2b7d: 0x0002, 0x2b7e: 0x0002, 0x2b7f: 0x0002, + 0x2b71: 0x0024, 0x2b72: 0x0024, 0x2b73: 0x0024, 0x2b74: 0x0024, 0x2b75: 0x0024, + 0x2b76: 0x0024, 0x2b7a: 0x0024, + 0x2b7c: 0x0024, 0x2b7d: 0x0024, 0x2b7f: 0x0024, // Block 0xae, offset 0x2b80 - 0x2b80: 0x0004, - 0x2b87: 0x0004, 0x2b88: 0x0004, 0x2b89: 0x0004, 0x2b8a: 0x0004, 0x2b8b: 0x0004, - 0x2b8c: 0x0004, 0x2b8d: 0x0004, 0x2b8e: 0x0004, 0x2b8f: 0x0004, 0x2b90: 0x0004, 0x2b91: 0x0004, - 0x2b92: 0x0004, 0x2b93: 0x0004, 0x2b94: 0x0004, 0x2b95: 0x0004, + 0x2b80: 0x0024, 0x2b81: 0x0024, 0x2b82: 0x0024, 0x2b83: 0x0024, 0x2b84: 0x0024, 0x2b85: 0x0024, + 0x2b86: 0x0800, 0x2b87: 0x0024, // Block 0xaf, offset 0x2bc0 - 0x2bf0: 0x0004, 0x2bf1: 0x0004, 0x2bf2: 0x0004, 0x2bf3: 0x0004, 0x2bf4: 0x0004, + 0x2bca: 0x2000, 0x2bcb: 0x2000, + 0x2bcc: 0x2000, 0x2bcd: 0x2000, 0x2bce: 0x2000, 0x2bd0: 0x0024, 0x2bd1: 0x0024, + 0x2bd3: 0x2000, 0x2bd4: 0x2000, 0x2bd5: 0x0024, 0x2bd6: 0x2000, 0x2bd7: 0x0024, // Block 0xb0, offset 0x2c00 - 0x2c30: 0x0004, 0x2c31: 0x0004, 0x2c32: 0x0004, 0x2c33: 0x0004, 0x2c34: 0x0004, 0x2c35: 0x0004, - 0x2c36: 0x0004, + 0x2c33: 0x0024, 0x2c34: 0x0024, 0x2c35: 0x2000, + 0x2c36: 0x2000, // Block 0xb1, offset 0x2c40 - 0x2c4f: 0x0004, 0x2c51: 0x0400, - 0x2c52: 0x0400, 0x2c53: 0x0400, 0x2c54: 0x0400, 0x2c55: 0x0400, 0x2c56: 0x0400, 0x2c57: 0x0400, - 0x2c58: 0x0400, 0x2c59: 0x0400, 0x2c5a: 0x0400, 0x2c5b: 0x0400, 0x2c5c: 0x0400, 0x2c5d: 0x0400, - 0x2c5e: 0x0400, 0x2c5f: 0x0400, 0x2c60: 0x0400, 0x2c61: 0x0400, 0x2c62: 0x0400, 0x2c63: 0x0400, - 0x2c64: 0x0400, 0x2c65: 0x0400, 0x2c66: 0x0400, 0x2c67: 0x0400, 0x2c68: 0x0400, 0x2c69: 0x0400, - 0x2c6a: 0x0400, 0x2c6b: 0x0400, 0x2c6c: 0x0400, 0x2c6d: 0x0400, 0x2c6e: 0x0400, 0x2c6f: 0x0400, - 0x2c70: 0x0400, 0x2c71: 0x0400, 0x2c72: 0x0400, 0x2c73: 0x0400, 0x2c74: 0x0400, 0x2c75: 0x0400, - 0x2c76: 0x0400, 0x2c77: 0x0400, 0x2c78: 0x0400, 0x2c79: 0x0400, 0x2c7a: 0x0400, 0x2c7b: 0x0400, - 0x2c7c: 0x0400, 0x2c7d: 0x0400, 0x2c7e: 0x0400, 0x2c7f: 0x0400, + 0x2c40: 0x0024, 0x2c41: 0x0024, 0x2c42: 0x0800, 0x2c43: 0x2000, 0x2c44: 0x0010, 0x2c45: 0x0010, + 0x2c46: 0x0010, 0x2c47: 0x0010, 0x2c48: 0x0010, 0x2c49: 0x0010, 0x2c4a: 0x0010, 0x2c4b: 0x0010, + 0x2c4c: 0x0010, 0x2c4d: 0x0010, 0x2c4e: 0x0010, 0x2c4f: 0x0010, 0x2c50: 0x0010, + 0x2c52: 0x0010, 0x2c53: 0x0010, 0x2c54: 0x0010, 0x2c55: 0x0010, 0x2c56: 0x0010, 0x2c57: 0x0010, + 0x2c58: 0x0010, 0x2c59: 0x0010, 0x2c5a: 0x0010, 0x2c5b: 0x0010, 0x2c5c: 0x0010, 0x2c5d: 0x0010, + 0x2c5e: 0x0010, 0x2c5f: 0x0010, 0x2c60: 0x0010, 0x2c61: 0x0010, 0x2c62: 0x0010, 0x2c63: 0x0010, + 0x2c64: 0x0010, 0x2c65: 0x0010, 0x2c66: 0x0010, 0x2c67: 0x0010, 0x2c68: 0x0010, 0x2c69: 0x0010, + 0x2c6a: 0x0010, 0x2c6b: 0x0010, 0x2c6c: 0x0010, 0x2c6d: 0x0010, 0x2c6e: 0x0010, 0x2c6f: 0x0010, + 0x2c70: 0x0010, 0x2c71: 0x0010, 0x2c72: 0x0010, 0x2c73: 0x0010, 0x2c74: 0x2000, 0x2c75: 0x2000, + 0x2c76: 0x0024, 0x2c77: 0x0024, 0x2c78: 0x0024, 0x2c79: 0x0024, 0x2c7a: 0x0024, + 0x2c7e: 0x2000, 0x2c7f: 0x2000, // Block 0xb2, offset 0x2c80 - 0x2c80: 0x0400, 0x2c81: 0x0400, 0x2c82: 0x0400, 0x2c83: 0x0400, 0x2c84: 0x0400, 0x2c85: 0x0400, - 0x2c86: 0x0400, 0x2c87: 0x0400, - 0x2c8f: 0x0004, 0x2c90: 0x0004, 0x2c91: 0x0004, - 0x2c92: 0x0004, + 0x2c80: 0x0024, 0x2c81: 0x0024, 0x2c82: 0x0044, + 0x2c9a: 0x0024, // Block 0xb3, offset 0x2cc0 - 0x2ce4: 0x0004, - 0x2cf0: 0x0400, 0x2cf1: 0x0400, + 0x2cf0: 0x0002, 0x2cf1: 0x0002, 0x2cf2: 0x0002, 0x2cf3: 0x0002, 0x2cf4: 0x0002, 0x2cf5: 0x0002, + 0x2cf6: 0x0002, 0x2cf7: 0x0002, 0x2cf8: 0x0002, 0x2cf9: 0x0002, 0x2cfa: 0x0002, 0x2cfb: 0x0002, + 0x2cfc: 0x0002, 0x2cfd: 0x0002, 0x2cfe: 0x0002, 0x2cff: 0x0002, // Block 0xb4, offset 0x2d00 - 0x2d1d: 0x0004, - 0x2d1e: 0x0004, 0x2d20: 0x0002, 0x2d21: 0x0002, 0x2d22: 0x0002, 0x2d23: 0x0002, + 0x2d00: 0x0024, + 0x2d07: 0x0024, 0x2d08: 0x0024, 0x2d09: 0x0024, 0x2d0a: 0x0024, 0x2d0b: 0x0024, + 0x2d0c: 0x0024, 0x2d0d: 0x0024, 0x2d0e: 0x0024, 0x2d0f: 0x0024, 0x2d10: 0x0024, 0x2d11: 0x0024, + 0x2d12: 0x0024, 0x2d13: 0x0024, 0x2d14: 0x0024, 0x2d15: 0x0024, // Block 0xb5, offset 0x2d40 - 0x2d40: 0x0004, 0x2d41: 0x0004, 0x2d42: 0x0004, 0x2d43: 0x0004, 0x2d44: 0x0004, 0x2d45: 0x0004, - 0x2d46: 0x0004, 0x2d47: 0x0004, 0x2d48: 0x0004, 0x2d49: 0x0004, 0x2d4a: 0x0004, 0x2d4b: 0x0004, - 0x2d4c: 0x0004, 0x2d4d: 0x0004, 0x2d4e: 0x0004, 0x2d4f: 0x0004, 0x2d50: 0x0004, 0x2d51: 0x0004, - 0x2d52: 0x0004, 0x2d53: 0x0004, 0x2d54: 0x0004, 0x2d55: 0x0004, 0x2d56: 0x0004, 0x2d57: 0x0004, - 0x2d58: 0x0004, 0x2d59: 0x0004, 0x2d5a: 0x0004, 0x2d5b: 0x0004, 0x2d5c: 0x0004, 0x2d5d: 0x0004, - 0x2d5e: 0x0004, 0x2d5f: 0x0004, 0x2d60: 0x0004, 0x2d61: 0x0004, 0x2d62: 0x0004, 0x2d63: 0x0004, - 0x2d64: 0x0004, 0x2d65: 0x0004, 0x2d66: 0x0004, 0x2d67: 0x0004, 0x2d68: 0x0004, 0x2d69: 0x0004, - 0x2d6a: 0x0004, 0x2d6b: 0x0004, 0x2d6c: 0x0004, 0x2d6d: 0x0004, - 0x2d70: 0x0004, 0x2d71: 0x0004, 0x2d72: 0x0004, 0x2d73: 0x0004, 0x2d74: 0x0004, 0x2d75: 0x0004, - 0x2d76: 0x0004, 0x2d77: 0x0004, 0x2d78: 0x0004, 0x2d79: 0x0004, 0x2d7a: 0x0004, 0x2d7b: 0x0004, - 0x2d7c: 0x0004, 0x2d7d: 0x0004, 0x2d7e: 0x0004, 0x2d7f: 0x0004, + 0x2d5e: 0x0024, 0x2d5f: 0x0024, 0x2d60: 0x0024, 0x2d61: 0x0024, 0x2d62: 0x0024, 0x2d63: 0x0024, + 0x2d64: 0x0024, 0x2d65: 0x0024, 0x2d66: 0x0024, 0x2d67: 0x0024, 0x2d68: 0x0024, 0x2d69: 0x0024, + 0x2d6a: 0x2000, 0x2d6b: 0x2000, 0x2d6c: 0x2000, 0x2d6d: 0x0024, 0x2d6e: 0x0024, 0x2d6f: 0x0024, // Block 0xb6, offset 0x2d80 - 0x2d80: 0x0004, 0x2d81: 0x0004, 0x2d82: 0x0004, 0x2d83: 0x0004, 0x2d84: 0x0004, 0x2d85: 0x0004, - 0x2d86: 0x0004, + 0x2db0: 0x0024, 0x2db1: 0x0024, 0x2db2: 0x0024, 0x2db3: 0x0024, 0x2db4: 0x0024, // Block 0xb7, offset 0x2dc0 - 0x2de5: 0x0004, 0x2de6: 0x0400, 0x2de7: 0x0004, 0x2de8: 0x0004, 0x2de9: 0x0004, - 0x2ded: 0x0400, 0x2dee: 0x0004, 0x2def: 0x0004, - 0x2df0: 0x0004, 0x2df1: 0x0004, 0x2df2: 0x0004, 0x2df3: 0x0002, 0x2df4: 0x0002, 0x2df5: 0x0002, - 0x2df6: 0x0002, 0x2df7: 0x0002, 0x2df8: 0x0002, 0x2df9: 0x0002, 0x2dfa: 0x0002, 0x2dfb: 0x0004, - 0x2dfc: 0x0004, 0x2dfd: 0x0004, 0x2dfe: 0x0004, 0x2dff: 0x0004, + 0x2df0: 0x0024, 0x2df1: 0x0024, 0x2df2: 0x0024, 0x2df3: 0x0024, 0x2df4: 0x0024, 0x2df5: 0x0024, + 0x2df6: 0x0024, // Block 0xb8, offset 0x2e00 - 0x2e00: 0x0004, 0x2e01: 0x0004, 0x2e02: 0x0004, 0x2e05: 0x0004, - 0x2e06: 0x0004, 0x2e07: 0x0004, 0x2e08: 0x0004, 0x2e09: 0x0004, 0x2e0a: 0x0004, 0x2e0b: 0x0004, - 0x2e2a: 0x0004, 0x2e2b: 0x0004, 0x2e2c: 0x0004, 0x2e2d: 0x0004, + 0x2e23: 0x8000, + 0x2e27: 0x8000, 0x2e28: 0x8000, 0x2e29: 0x8000, + 0x2e2a: 0x8000, // Block 0xb9, offset 0x2e40 - 0x2e42: 0x0004, 0x2e43: 0x0004, 0x2e44: 0x0004, + 0x2e4f: 0x0024, 0x2e51: 0x2000, + 0x2e52: 0x2000, 0x2e53: 0x2000, 0x2e54: 0x2000, 0x2e55: 0x2000, 0x2e56: 0x2000, 0x2e57: 0x2000, + 0x2e58: 0x2000, 0x2e59: 0x2000, 0x2e5a: 0x2000, 0x2e5b: 0x2000, 0x2e5c: 0x2000, 0x2e5d: 0x2000, + 0x2e5e: 0x2000, 0x2e5f: 0x2000, 0x2e60: 0x2000, 0x2e61: 0x2000, 0x2e62: 0x2000, 0x2e63: 0x2000, + 0x2e64: 0x2000, 0x2e65: 0x2000, 0x2e66: 0x2000, 0x2e67: 0x2000, 0x2e68: 0x2000, 0x2e69: 0x2000, + 0x2e6a: 0x2000, 0x2e6b: 0x2000, 0x2e6c: 0x2000, 0x2e6d: 0x2000, 0x2e6e: 0x2000, 0x2e6f: 0x2000, + 0x2e70: 0x2000, 0x2e71: 0x2000, 0x2e72: 0x2000, 0x2e73: 0x2000, 0x2e74: 0x2000, 0x2e75: 0x2000, + 0x2e76: 0x2000, 0x2e77: 0x2000, 0x2e78: 0x2000, 0x2e79: 0x2000, 0x2e7a: 0x2000, 0x2e7b: 0x2000, + 0x2e7c: 0x2000, 0x2e7d: 0x2000, 0x2e7e: 0x2000, 0x2e7f: 0x2000, // Block 0xba, offset 0x2e80 - 0x2e80: 0x0004, 0x2e81: 0x0004, 0x2e82: 0x0004, 0x2e83: 0x0004, 0x2e84: 0x0004, 0x2e85: 0x0004, - 0x2e86: 0x0004, 0x2e87: 0x0004, 0x2e88: 0x0004, 0x2e89: 0x0004, 0x2e8a: 0x0004, 0x2e8b: 0x0004, - 0x2e8c: 0x0004, 0x2e8d: 0x0004, 0x2e8e: 0x0004, 0x2e8f: 0x0004, 0x2e90: 0x0004, 0x2e91: 0x0004, - 0x2e92: 0x0004, 0x2e93: 0x0004, 0x2e94: 0x0004, 0x2e95: 0x0004, 0x2e96: 0x0004, 0x2e97: 0x0004, - 0x2e98: 0x0004, 0x2e99: 0x0004, 0x2e9a: 0x0004, 0x2e9b: 0x0004, 0x2e9c: 0x0004, 0x2e9d: 0x0004, - 0x2e9e: 0x0004, 0x2e9f: 0x0004, 0x2ea0: 0x0004, 0x2ea1: 0x0004, 0x2ea2: 0x0004, 0x2ea3: 0x0004, - 0x2ea4: 0x0004, 0x2ea5: 0x0004, 0x2ea6: 0x0004, 0x2ea7: 0x0004, 0x2ea8: 0x0004, 0x2ea9: 0x0004, - 0x2eaa: 0x0004, 0x2eab: 0x0004, 0x2eac: 0x0004, 0x2ead: 0x0004, 0x2eae: 0x0004, 0x2eaf: 0x0004, - 0x2eb0: 0x0004, 0x2eb1: 0x0004, 0x2eb2: 0x0004, 0x2eb3: 0x0004, 0x2eb4: 0x0004, 0x2eb5: 0x0004, - 0x2eb6: 0x0004, 0x2ebb: 0x0004, - 0x2ebc: 0x0004, 0x2ebd: 0x0004, 0x2ebe: 0x0004, 0x2ebf: 0x0004, + 0x2e80: 0x2000, 0x2e81: 0x2000, 0x2e82: 0x2000, 0x2e83: 0x2000, 0x2e84: 0x2000, 0x2e85: 0x2000, + 0x2e86: 0x2000, 0x2e87: 0x2000, + 0x2e8f: 0x0024, 0x2e90: 0x0024, 0x2e91: 0x0024, + 0x2e92: 0x0024, // Block 0xbb, offset 0x2ec0 - 0x2ec0: 0x0004, 0x2ec1: 0x0004, 0x2ec2: 0x0004, 0x2ec3: 0x0004, 0x2ec4: 0x0004, 0x2ec5: 0x0004, - 0x2ec6: 0x0004, 0x2ec7: 0x0004, 0x2ec8: 0x0004, 0x2ec9: 0x0004, 0x2eca: 0x0004, 0x2ecb: 0x0004, - 0x2ecc: 0x0004, 0x2ecd: 0x0004, 0x2ece: 0x0004, 0x2ecf: 0x0004, 0x2ed0: 0x0004, 0x2ed1: 0x0004, - 0x2ed2: 0x0004, 0x2ed3: 0x0004, 0x2ed4: 0x0004, 0x2ed5: 0x0004, 0x2ed6: 0x0004, 0x2ed7: 0x0004, - 0x2ed8: 0x0004, 0x2ed9: 0x0004, 0x2eda: 0x0004, 0x2edb: 0x0004, 0x2edc: 0x0004, 0x2edd: 0x0004, - 0x2ede: 0x0004, 0x2edf: 0x0004, 0x2ee0: 0x0004, 0x2ee1: 0x0004, 0x2ee2: 0x0004, 0x2ee3: 0x0004, - 0x2ee4: 0x0004, 0x2ee5: 0x0004, 0x2ee6: 0x0004, 0x2ee7: 0x0004, 0x2ee8: 0x0004, 0x2ee9: 0x0004, - 0x2eea: 0x0004, 0x2eeb: 0x0004, 0x2eec: 0x0004, - 0x2ef5: 0x0004, + 0x2ee4: 0x0024, + 0x2ef0: 0x0024, 0x2ef1: 0x0024, // Block 0xbc, offset 0x2f00 - 0x2f04: 0x0004, - 0x2f1b: 0x0004, 0x2f1c: 0x0004, 0x2f1d: 0x0004, - 0x2f1e: 0x0004, 0x2f1f: 0x0004, 0x2f21: 0x0004, 0x2f22: 0x0004, 0x2f23: 0x0004, - 0x2f24: 0x0004, 0x2f25: 0x0004, 0x2f26: 0x0004, 0x2f27: 0x0004, 0x2f28: 0x0004, 0x2f29: 0x0004, - 0x2f2a: 0x0004, 0x2f2b: 0x0004, 0x2f2c: 0x0004, 0x2f2d: 0x0004, 0x2f2e: 0x0004, 0x2f2f: 0x0004, + 0x2f1d: 0x0024, + 0x2f1e: 0x0024, 0x2f20: 0x0002, 0x2f21: 0x0002, 0x2f22: 0x0002, 0x2f23: 0x0002, // Block 0xbd, offset 0x2f40 - 0x2f40: 0x0004, 0x2f41: 0x0004, 0x2f42: 0x0004, 0x2f43: 0x0004, 0x2f44: 0x0004, 0x2f45: 0x0004, - 0x2f46: 0x0004, 0x2f48: 0x0004, 0x2f49: 0x0004, 0x2f4a: 0x0004, 0x2f4b: 0x0004, - 0x2f4c: 0x0004, 0x2f4d: 0x0004, 0x2f4e: 0x0004, 0x2f4f: 0x0004, 0x2f50: 0x0004, 0x2f51: 0x0004, - 0x2f52: 0x0004, 0x2f53: 0x0004, 0x2f54: 0x0004, 0x2f55: 0x0004, 0x2f56: 0x0004, 0x2f57: 0x0004, - 0x2f58: 0x0004, 0x2f5b: 0x0004, 0x2f5c: 0x0004, 0x2f5d: 0x0004, - 0x2f5e: 0x0004, 0x2f5f: 0x0004, 0x2f60: 0x0004, 0x2f61: 0x0004, 0x2f63: 0x0004, - 0x2f64: 0x0004, 0x2f66: 0x0004, 0x2f67: 0x0004, 0x2f68: 0x0004, 0x2f69: 0x0004, - 0x2f6a: 0x0004, + 0x2f40: 0x0024, 0x2f41: 0x0024, 0x2f42: 0x0024, 0x2f43: 0x0024, 0x2f44: 0x0024, 0x2f45: 0x0024, + 0x2f46: 0x0024, 0x2f47: 0x0024, 0x2f48: 0x0024, 0x2f49: 0x0024, 0x2f4a: 0x0024, 0x2f4b: 0x0024, + 0x2f4c: 0x0024, 0x2f4d: 0x0024, 0x2f4e: 0x0024, 0x2f4f: 0x0024, 0x2f50: 0x0024, 0x2f51: 0x0024, + 0x2f52: 0x0024, 0x2f53: 0x0024, 0x2f54: 0x0024, 0x2f55: 0x0024, 0x2f56: 0x0024, 0x2f57: 0x0024, + 0x2f58: 0x0024, 0x2f59: 0x0024, 0x2f5a: 0x0024, 0x2f5b: 0x0024, 0x2f5c: 0x0024, 0x2f5d: 0x0024, + 0x2f5e: 0x0024, 0x2f5f: 0x0024, 0x2f60: 0x0024, 0x2f61: 0x0024, 0x2f62: 0x0024, 0x2f63: 0x0024, + 0x2f64: 0x0024, 0x2f65: 0x0024, 0x2f66: 0x0024, 0x2f67: 0x0024, 0x2f68: 0x0024, 0x2f69: 0x0024, + 0x2f6a: 0x0024, 0x2f6b: 0x0024, 0x2f6c: 0x0024, 0x2f6d: 0x0024, + 0x2f70: 0x0024, 0x2f71: 0x0024, 0x2f72: 0x0024, 0x2f73: 0x0024, 0x2f74: 0x0024, 0x2f75: 0x0024, + 0x2f76: 0x0024, 0x2f77: 0x0024, 0x2f78: 0x0024, 0x2f79: 0x0024, 0x2f7a: 0x0024, 0x2f7b: 0x0024, + 0x2f7c: 0x0024, 0x2f7d: 0x0024, 0x2f7e: 0x0024, 0x2f7f: 0x0024, // Block 0xbe, offset 0x2f80 - 0x2f8f: 0x0004, + 0x2f80: 0x0024, 0x2f81: 0x0024, 0x2f82: 0x0024, 0x2f83: 0x0024, 0x2f84: 0x0024, 0x2f85: 0x0024, + 0x2f86: 0x0024, // Block 0xbf, offset 0x2fc0 - 0x2fee: 0x0004, + 0x2fe5: 0x0024, 0x2fe6: 0x0024, 0x2fe7: 0x0024, 0x2fe8: 0x0024, 0x2fe9: 0x0024, + 0x2fed: 0x0024, 0x2fee: 0x0024, 0x2fef: 0x0024, + 0x2ff0: 0x0024, 0x2ff1: 0x0024, 0x2ff2: 0x0024, 0x2ff3: 0x0002, 0x2ff4: 0x0002, 0x2ff5: 0x0002, + 0x2ff6: 0x0002, 0x2ff7: 0x0002, 0x2ff8: 0x0002, 0x2ff9: 0x0002, 0x2ffa: 0x0002, 0x2ffb: 0x0024, + 0x2ffc: 0x0024, 0x2ffd: 0x0024, 0x2ffe: 0x0024, 0x2fff: 0x0024, // Block 0xc0, offset 0x3000 - 0x302c: 0x0004, 0x302d: 0x0004, 0x302e: 0x0004, 0x302f: 0x0004, + 0x3000: 0x0024, 0x3001: 0x0024, 0x3002: 0x0024, 0x3005: 0x0024, + 0x3006: 0x0024, 0x3007: 0x0024, 0x3008: 0x0024, 0x3009: 0x0024, 0x300a: 0x0024, 0x300b: 0x0024, + 0x302a: 0x0024, 0x302b: 0x0024, 0x302c: 0x0024, 0x302d: 0x0024, // Block 0xc1, offset 0x3040 - 0x3050: 0x0004, 0x3051: 0x0004, - 0x3052: 0x0004, 0x3053: 0x0004, 0x3054: 0x0004, 0x3055: 0x0004, 0x3056: 0x0004, + 0x3042: 0x0024, 0x3043: 0x0024, 0x3044: 0x0024, // Block 0xc2, offset 0x3080 - 0x3084: 0x0004, 0x3085: 0x0004, - 0x3086: 0x0004, 0x3087: 0x0004, 0x3088: 0x0004, 0x3089: 0x0004, 0x308a: 0x0004, + 0x3080: 0x0024, 0x3081: 0x0024, 0x3082: 0x0024, 0x3083: 0x0024, 0x3084: 0x0024, 0x3085: 0x0024, + 0x3086: 0x0024, 0x3087: 0x0024, 0x3088: 0x0024, 0x3089: 0x0024, 0x308a: 0x0024, 0x308b: 0x0024, + 0x308c: 0x0024, 0x308d: 0x0024, 0x308e: 0x0024, 0x308f: 0x0024, 0x3090: 0x0024, 0x3091: 0x0024, + 0x3092: 0x0024, 0x3093: 0x0024, 0x3094: 0x0024, 0x3095: 0x0024, 0x3096: 0x0024, 0x3097: 0x0024, + 0x3098: 0x0024, 0x3099: 0x0024, 0x309a: 0x0024, 0x309b: 0x0024, 0x309c: 0x0024, 0x309d: 0x0024, + 0x309e: 0x0024, 0x309f: 0x0024, 0x30a0: 0x0024, 0x30a1: 0x0024, 0x30a2: 0x0024, 0x30a3: 0x0024, + 0x30a4: 0x0024, 0x30a5: 0x0024, 0x30a6: 0x0024, 0x30a7: 0x0024, 0x30a8: 0x0024, 0x30a9: 0x0024, + 0x30aa: 0x0024, 0x30ab: 0x0024, 0x30ac: 0x0024, 0x30ad: 0x0024, 0x30ae: 0x0024, 0x30af: 0x0024, + 0x30b0: 0x0024, 0x30b1: 0x0024, 0x30b2: 0x0024, 0x30b3: 0x0024, 0x30b4: 0x0024, 0x30b5: 0x0024, + 0x30b6: 0x0024, 0x30bb: 0x0024, + 0x30bc: 0x0024, 0x30bd: 0x0024, 0x30be: 0x0024, 0x30bf: 0x0024, // Block 0xc3, offset 0x30c0 - 0x30cd: 0x0008, 0x30ce: 0x0008, 0x30cf: 0x0008, - 0x30ef: 0x0008, + 0x30c0: 0x0024, 0x30c1: 0x0024, 0x30c2: 0x0024, 0x30c3: 0x0024, 0x30c4: 0x0024, 0x30c5: 0x0024, + 0x30c6: 0x0024, 0x30c7: 0x0024, 0x30c8: 0x0024, 0x30c9: 0x0024, 0x30ca: 0x0024, 0x30cb: 0x0024, + 0x30cc: 0x0024, 0x30cd: 0x0024, 0x30ce: 0x0024, 0x30cf: 0x0024, 0x30d0: 0x0024, 0x30d1: 0x0024, + 0x30d2: 0x0024, 0x30d3: 0x0024, 0x30d4: 0x0024, 0x30d5: 0x0024, 0x30d6: 0x0024, 0x30d7: 0x0024, + 0x30d8: 0x0024, 0x30d9: 0x0024, 0x30da: 0x0024, 0x30db: 0x0024, 0x30dc: 0x0024, 0x30dd: 0x0024, + 0x30de: 0x0024, 0x30df: 0x0024, 0x30e0: 0x0024, 0x30e1: 0x0024, 0x30e2: 0x0024, 0x30e3: 0x0024, + 0x30e4: 0x0024, 0x30e5: 0x0024, 0x30e6: 0x0024, 0x30e7: 0x0024, 0x30e8: 0x0024, 0x30e9: 0x0024, + 0x30ea: 0x0024, 0x30eb: 0x0024, 0x30ec: 0x0024, + 0x30f5: 0x0024, // Block 0xc4, offset 0x3100 - 0x312c: 0x0008, 0x312d: 0x0008, 0x312e: 0x0008, 0x312f: 0x0008, - 0x3130: 0x0008, 0x3131: 0x0008, - 0x313e: 0x0008, 0x313f: 0x0008, + 0x3104: 0x0024, + 0x311b: 0x0024, 0x311c: 0x0024, 0x311d: 0x0024, + 0x311e: 0x0024, 0x311f: 0x0024, 0x3121: 0x0024, 0x3122: 0x0024, 0x3123: 0x0024, + 0x3124: 0x0024, 0x3125: 0x0024, 0x3126: 0x0024, 0x3127: 0x0024, 0x3128: 0x0024, 0x3129: 0x0024, + 0x312a: 0x0024, 0x312b: 0x0024, 0x312c: 0x0024, 0x312d: 0x0024, 0x312e: 0x0024, 0x312f: 0x0024, // Block 0xc5, offset 0x3140 - 0x314e: 0x0008, 0x3151: 0x0008, - 0x3152: 0x0008, 0x3153: 0x0008, 0x3154: 0x0008, 0x3155: 0x0008, 0x3156: 0x0008, 0x3157: 0x0008, - 0x3158: 0x0008, 0x3159: 0x0008, 0x315a: 0x0008, - 0x316d: 0x0008, 0x316e: 0x0008, 0x316f: 0x0008, - 0x3170: 0x0008, 0x3171: 0x0008, 0x3172: 0x0008, 0x3173: 0x0008, 0x3174: 0x0008, 0x3175: 0x0008, - 0x3176: 0x0008, 0x3177: 0x0008, 0x3178: 0x0008, 0x3179: 0x0008, 0x317a: 0x0008, 0x317b: 0x0008, - 0x317c: 0x0008, 0x317d: 0x0008, 0x317e: 0x0008, 0x317f: 0x0008, + 0x3140: 0x0024, 0x3141: 0x0024, 0x3142: 0x0024, 0x3143: 0x0024, 0x3144: 0x0024, 0x3145: 0x0024, + 0x3146: 0x0024, 0x3148: 0x0024, 0x3149: 0x0024, 0x314a: 0x0024, 0x314b: 0x0024, + 0x314c: 0x0024, 0x314d: 0x0024, 0x314e: 0x0024, 0x314f: 0x0024, 0x3150: 0x0024, 0x3151: 0x0024, + 0x3152: 0x0024, 0x3153: 0x0024, 0x3154: 0x0024, 0x3155: 0x0024, 0x3156: 0x0024, 0x3157: 0x0024, + 0x3158: 0x0024, 0x315b: 0x0024, 0x315c: 0x0024, 0x315d: 0x0024, + 0x315e: 0x0024, 0x315f: 0x0024, 0x3160: 0x0024, 0x3161: 0x0024, 0x3163: 0x0024, + 0x3164: 0x0024, 0x3166: 0x0024, 0x3167: 0x0024, 0x3168: 0x0024, 0x3169: 0x0024, + 0x316a: 0x0024, // Block 0xc6, offset 0x3180 - 0x3180: 0x0008, 0x3181: 0x0008, 0x3182: 0x0008, 0x3183: 0x0008, 0x3184: 0x0008, 0x3185: 0x0008, - 0x3186: 0x0008, 0x3187: 0x0008, 0x3188: 0x0008, 0x3189: 0x0008, 0x318a: 0x0008, 0x318b: 0x0008, - 0x318c: 0x0008, 0x318d: 0x0008, 0x318e: 0x0008, 0x318f: 0x0008, 0x3190: 0x0008, 0x3191: 0x0008, - 0x3192: 0x0008, 0x3193: 0x0008, 0x3194: 0x0008, 0x3195: 0x0008, 0x3196: 0x0008, 0x3197: 0x0008, - 0x3198: 0x0008, 0x3199: 0x0008, 0x319a: 0x0008, 0x319b: 0x0008, 0x319c: 0x0008, 0x319d: 0x0008, - 0x319e: 0x0008, 0x319f: 0x0008, 0x31a0: 0x0008, 0x31a1: 0x0008, 0x31a2: 0x0008, 0x31a3: 0x0008, - 0x31a4: 0x0008, 0x31a5: 0x0008, 0x31a6: 0x0200, 0x31a7: 0x0200, 0x31a8: 0x0200, 0x31a9: 0x0200, - 0x31aa: 0x0200, 0x31ab: 0x0200, 0x31ac: 0x0200, 0x31ad: 0x0200, 0x31ae: 0x0200, 0x31af: 0x0200, - 0x31b0: 0x0200, 0x31b1: 0x0200, 0x31b2: 0x0200, 0x31b3: 0x0200, 0x31b4: 0x0200, 0x31b5: 0x0200, - 0x31b6: 0x0200, 0x31b7: 0x0200, 0x31b8: 0x0200, 0x31b9: 0x0200, 0x31ba: 0x0200, 0x31bb: 0x0200, - 0x31bc: 0x0200, 0x31bd: 0x0200, 0x31be: 0x0200, 0x31bf: 0x0200, + 0x318f: 0x0024, // Block 0xc7, offset 0x31c0 - 0x31c1: 0x0008, 0x31c2: 0x0008, 0x31c3: 0x0008, 0x31c4: 0x0008, 0x31c5: 0x0008, - 0x31c6: 0x0008, 0x31c7: 0x0008, 0x31c8: 0x0008, 0x31c9: 0x0008, 0x31ca: 0x0008, 0x31cb: 0x0008, - 0x31cc: 0x0008, 0x31cd: 0x0008, 0x31ce: 0x0008, 0x31cf: 0x0008, - 0x31da: 0x0008, - 0x31ef: 0x0008, - 0x31f2: 0x0008, 0x31f3: 0x0008, 0x31f4: 0x0008, 0x31f5: 0x0008, - 0x31f6: 0x0008, 0x31f7: 0x0008, 0x31f8: 0x0008, 0x31f9: 0x0008, 0x31fa: 0x0008, - 0x31fc: 0x0008, 0x31fd: 0x0008, 0x31fe: 0x0008, 0x31ff: 0x0008, + 0x31ee: 0x0024, // Block 0xc8, offset 0x3200 - 0x3209: 0x0008, 0x320a: 0x0008, 0x320b: 0x0008, - 0x320c: 0x0008, 0x320d: 0x0008, 0x320e: 0x0008, 0x320f: 0x0008, 0x3210: 0x0008, 0x3211: 0x0008, - 0x3212: 0x0008, 0x3213: 0x0008, 0x3214: 0x0008, 0x3215: 0x0008, 0x3216: 0x0008, 0x3217: 0x0008, - 0x3218: 0x0008, 0x3219: 0x0008, 0x321a: 0x0008, 0x321b: 0x0008, 0x321c: 0x0008, 0x321d: 0x0008, - 0x321e: 0x0008, 0x321f: 0x0008, 0x3220: 0x0008, 0x3221: 0x0008, 0x3222: 0x0008, 0x3223: 0x0008, - 0x3224: 0x0008, 0x3225: 0x0008, 0x3226: 0x0008, 0x3227: 0x0008, 0x3228: 0x0008, 0x3229: 0x0008, - 0x322a: 0x0008, 0x322b: 0x0008, 0x322c: 0x0008, 0x322d: 0x0008, 0x322e: 0x0008, 0x322f: 0x0008, - 0x3230: 0x0008, 0x3231: 0x0008, 0x3232: 0x0008, 0x3233: 0x0008, 0x3234: 0x0008, 0x3235: 0x0008, - 0x3236: 0x0008, 0x3237: 0x0008, 0x3238: 0x0008, 0x3239: 0x0008, 0x323a: 0x0008, 0x323b: 0x0008, - 0x323c: 0x0008, 0x323d: 0x0008, 0x323e: 0x0008, 0x323f: 0x0008, + 0x322c: 0x0024, 0x322d: 0x0024, 0x322e: 0x0024, 0x322f: 0x0024, // Block 0xc9, offset 0x3240 - 0x3240: 0x0008, 0x3241: 0x0008, 0x3242: 0x0008, 0x3243: 0x0008, 0x3244: 0x0008, 0x3245: 0x0008, - 0x3246: 0x0008, 0x3247: 0x0008, 0x3248: 0x0008, 0x3249: 0x0008, 0x324a: 0x0008, 0x324b: 0x0008, - 0x324c: 0x0008, 0x324d: 0x0008, 0x324e: 0x0008, 0x324f: 0x0008, 0x3250: 0x0008, 0x3251: 0x0008, - 0x3252: 0x0008, 0x3253: 0x0008, 0x3254: 0x0008, 0x3255: 0x0008, 0x3256: 0x0008, 0x3257: 0x0008, - 0x3258: 0x0008, 0x3259: 0x0008, 0x325a: 0x0008, 0x325b: 0x0008, 0x325c: 0x0008, 0x325d: 0x0008, - 0x325e: 0x0008, 0x325f: 0x0008, 0x3260: 0x0008, 0x3261: 0x0008, 0x3262: 0x0008, 0x3263: 0x0008, - 0x3264: 0x0008, 0x3265: 0x0008, 0x3266: 0x0008, 0x3267: 0x0008, 0x3268: 0x0008, 0x3269: 0x0008, - 0x326a: 0x0008, 0x326b: 0x0008, 0x326c: 0x0008, 0x326d: 0x0008, 0x326e: 0x0008, 0x326f: 0x0008, - 0x3270: 0x0008, 0x3271: 0x0008, 0x3272: 0x0008, 0x3273: 0x0008, 0x3274: 0x0008, 0x3275: 0x0008, - 0x3276: 0x0008, 0x3277: 0x0008, 0x3278: 0x0008, 0x3279: 0x0008, 0x327a: 0x0008, 0x327b: 0x0004, - 0x327c: 0x0004, 0x327d: 0x0004, 0x327e: 0x0004, 0x327f: 0x0004, + 0x326e: 0x0024, 0x326f: 0x0024, // Block 0xca, offset 0x3280 - 0x3280: 0x0008, 0x3281: 0x0008, 0x3282: 0x0008, 0x3283: 0x0008, 0x3284: 0x0008, 0x3285: 0x0008, - 0x3286: 0x0008, 0x3287: 0x0008, 0x3288: 0x0008, 0x3289: 0x0008, 0x328a: 0x0008, 0x328b: 0x0008, - 0x328c: 0x0008, 0x328d: 0x0008, 0x328e: 0x0008, 0x328f: 0x0008, 0x3290: 0x0008, 0x3291: 0x0008, - 0x3292: 0x0008, 0x3293: 0x0008, 0x3294: 0x0008, 0x3295: 0x0008, 0x3296: 0x0008, 0x3297: 0x0008, - 0x3298: 0x0008, 0x3299: 0x0008, 0x329a: 0x0008, 0x329b: 0x0008, 0x329c: 0x0008, 0x329d: 0x0008, - 0x329e: 0x0008, 0x329f: 0x0008, 0x32a0: 0x0008, 0x32a1: 0x0008, 0x32a2: 0x0008, 0x32a3: 0x0008, - 0x32a4: 0x0008, 0x32a5: 0x0008, 0x32a6: 0x0008, 0x32a7: 0x0008, 0x32a8: 0x0008, 0x32a9: 0x0008, - 0x32aa: 0x0008, 0x32ab: 0x0008, 0x32ac: 0x0008, 0x32ad: 0x0008, 0x32ae: 0x0008, 0x32af: 0x0008, - 0x32b0: 0x0008, 0x32b1: 0x0008, 0x32b2: 0x0008, 0x32b3: 0x0008, 0x32b4: 0x0008, 0x32b5: 0x0008, - 0x32b6: 0x0008, 0x32b7: 0x0008, 0x32b8: 0x0008, 0x32b9: 0x0008, 0x32ba: 0x0008, 0x32bb: 0x0008, - 0x32bc: 0x0008, 0x32bd: 0x0008, + 0x32a3: 0x0024, + 0x32a6: 0x0024, + 0x32ae: 0x0024, 0x32af: 0x0024, + 0x32b5: 0x0024, // Block 0xcb, offset 0x32c0 - 0x32c6: 0x0008, 0x32c7: 0x0008, 0x32c8: 0x0008, 0x32c9: 0x0008, 0x32ca: 0x0008, 0x32cb: 0x0008, - 0x32cc: 0x0008, 0x32cd: 0x0008, 0x32ce: 0x0008, 0x32cf: 0x0008, 0x32d0: 0x0008, 0x32d1: 0x0008, - 0x32d2: 0x0008, 0x32d3: 0x0008, 0x32d4: 0x0008, 0x32d5: 0x0008, 0x32d6: 0x0008, 0x32d7: 0x0008, - 0x32d8: 0x0008, 0x32d9: 0x0008, 0x32da: 0x0008, 0x32db: 0x0008, 0x32dc: 0x0008, 0x32dd: 0x0008, - 0x32de: 0x0008, 0x32df: 0x0008, 0x32e0: 0x0008, 0x32e1: 0x0008, 0x32e2: 0x0008, 0x32e3: 0x0008, - 0x32e4: 0x0008, 0x32e5: 0x0008, 0x32e6: 0x0008, 0x32e7: 0x0008, 0x32e8: 0x0008, 0x32e9: 0x0008, - 0x32ea: 0x0008, 0x32eb: 0x0008, 0x32ec: 0x0008, 0x32ed: 0x0008, 0x32ee: 0x0008, 0x32ef: 0x0008, - 0x32f0: 0x0008, 0x32f1: 0x0008, 0x32f2: 0x0008, 0x32f3: 0x0008, 0x32f4: 0x0008, 0x32f5: 0x0008, - 0x32f6: 0x0008, 0x32f7: 0x0008, 0x32f8: 0x0008, 0x32f9: 0x0008, 0x32fa: 0x0008, 0x32fb: 0x0008, - 0x32fc: 0x0008, 0x32fd: 0x0008, 0x32fe: 0x0008, 0x32ff: 0x0008, + 0x32d0: 0x0024, 0x32d1: 0x0024, + 0x32d2: 0x0024, 0x32d3: 0x0024, 0x32d4: 0x0024, 0x32d5: 0x0024, 0x32d6: 0x0024, // Block 0xcc, offset 0x3300 - 0x3300: 0x0008, 0x3301: 0x0008, 0x3302: 0x0008, 0x3303: 0x0008, 0x3304: 0x0008, 0x3305: 0x0008, - 0x3306: 0x0008, 0x3307: 0x0008, 0x3308: 0x0008, 0x3309: 0x0008, 0x330a: 0x0008, 0x330b: 0x0008, - 0x330c: 0x0008, 0x330d: 0x0008, 0x330e: 0x0008, 0x330f: 0x0008, + 0x3304: 0x0024, 0x3305: 0x0024, + 0x3306: 0x0024, 0x3307: 0x0024, 0x3308: 0x0024, 0x3309: 0x0024, 0x330a: 0x0024, // Block 0xcd, offset 0x3340 - 0x3374: 0x0008, 0x3375: 0x0008, - 0x3376: 0x0008, 0x3377: 0x0008, 0x3378: 0x0008, 0x3379: 0x0008, 0x337a: 0x0008, 0x337b: 0x0008, - 0x337c: 0x0008, 0x337d: 0x0008, 0x337e: 0x0008, 0x337f: 0x0008, + 0x3344: 0x0008, + 0x336c: 0x0008, 0x336d: 0x0008, 0x336e: 0x0008, 0x336f: 0x0008, // Block 0xce, offset 0x3380 - 0x3395: 0x0008, 0x3396: 0x0008, 0x3397: 0x0008, + 0x3394: 0x0008, 0x3395: 0x0008, 0x3396: 0x0008, 0x3397: 0x0008, 0x3398: 0x0008, 0x3399: 0x0008, 0x339a: 0x0008, 0x339b: 0x0008, 0x339c: 0x0008, 0x339d: 0x0008, - 0x339e: 0x0008, 0x339f: 0x0008, 0x33a0: 0x0008, 0x33a1: 0x0008, 0x33a2: 0x0008, 0x33a3: 0x0008, - 0x33a4: 0x0008, 0x33a5: 0x0008, 0x33a6: 0x0008, 0x33a7: 0x0008, 0x33a8: 0x0008, 0x33a9: 0x0008, - 0x33aa: 0x0008, 0x33ab: 0x0008, 0x33ac: 0x0008, 0x33ad: 0x0008, 0x33ae: 0x0008, 0x33af: 0x0008, - 0x33b0: 0x0008, 0x33b1: 0x0008, 0x33b2: 0x0008, 0x33b3: 0x0008, 0x33b4: 0x0008, 0x33b5: 0x0008, - 0x33b6: 0x0008, 0x33b7: 0x0008, 0x33b8: 0x0008, 0x33b9: 0x0008, 0x33ba: 0x0008, 0x33bb: 0x0008, - 0x33bc: 0x0008, 0x33bd: 0x0008, 0x33be: 0x0008, 0x33bf: 0x0008, + 0x339e: 0x0008, 0x339f: 0x0008, + 0x33af: 0x0008, + 0x33b0: 0x0008, // Block 0xcf, offset 0x33c0 - 0x33cc: 0x0008, 0x33cd: 0x0008, 0x33ce: 0x0008, 0x33cf: 0x0008, + 0x33c0: 0x0008, + 0x33cf: 0x0008, 0x33d0: 0x0008, + 0x33f6: 0x0008, 0x33f7: 0x0008, 0x33f8: 0x0008, 0x33f9: 0x0008, 0x33fa: 0x0008, 0x33fb: 0x0008, + 0x33fc: 0x0008, 0x33fd: 0x0008, 0x33fe: 0x0008, 0x33ff: 0x0008, // Block 0xd0, offset 0x3400 - 0x3408: 0x0008, 0x3409: 0x0008, 0x340a: 0x0008, 0x340b: 0x0008, - 0x340c: 0x0008, 0x340d: 0x0008, 0x340e: 0x0008, 0x340f: 0x0008, - 0x341a: 0x0008, 0x341b: 0x0008, 0x341c: 0x0008, 0x341d: 0x0008, - 0x341e: 0x0008, 0x341f: 0x0008, + 0x3430: 0x0008, 0x3431: 0x0008, + 0x343e: 0x0008, 0x343f: 0x0008, // Block 0xd1, offset 0x3440 - 0x3448: 0x0008, 0x3449: 0x0008, 0x344a: 0x0008, 0x344b: 0x0008, - 0x344c: 0x0008, 0x344d: 0x0008, 0x344e: 0x0008, 0x344f: 0x0008, + 0x344e: 0x0008, 0x3451: 0x0008, + 0x3452: 0x0008, 0x3453: 0x0008, 0x3454: 0x0008, 0x3455: 0x0008, 0x3456: 0x0008, 0x3457: 0x0008, + 0x3458: 0x0008, 0x3459: 0x0008, 0x345a: 0x0008, 0x346e: 0x0008, 0x346f: 0x0008, 0x3470: 0x0008, 0x3471: 0x0008, 0x3472: 0x0008, 0x3473: 0x0008, 0x3474: 0x0008, 0x3475: 0x0008, 0x3476: 0x0008, 0x3477: 0x0008, 0x3478: 0x0008, 0x3479: 0x0008, 0x347a: 0x0008, 0x347b: 0x0008, 0x347c: 0x0008, 0x347d: 0x0008, 0x347e: 0x0008, 0x347f: 0x0008, // Block 0xd2, offset 0x3480 + 0x3480: 0x0008, 0x3481: 0x0008, 0x3482: 0x0008, 0x3483: 0x0008, 0x3484: 0x0008, 0x3485: 0x0008, + 0x3486: 0x0008, 0x3487: 0x0008, 0x3488: 0x0008, 0x3489: 0x0008, 0x348a: 0x0008, 0x348b: 0x0008, 0x348c: 0x0008, 0x348d: 0x0008, 0x348e: 0x0008, 0x348f: 0x0008, 0x3490: 0x0008, 0x3491: 0x0008, 0x3492: 0x0008, 0x3493: 0x0008, 0x3494: 0x0008, 0x3495: 0x0008, 0x3496: 0x0008, 0x3497: 0x0008, 0x3498: 0x0008, 0x3499: 0x0008, 0x349a: 0x0008, 0x349b: 0x0008, 0x349c: 0x0008, 0x349d: 0x0008, 0x349e: 0x0008, 0x349f: 0x0008, 0x34a0: 0x0008, 0x34a1: 0x0008, 0x34a2: 0x0008, 0x34a3: 0x0008, - 0x34a4: 0x0008, 0x34a5: 0x0008, 0x34a6: 0x0008, 0x34a7: 0x0008, 0x34a8: 0x0008, 0x34a9: 0x0008, - 0x34aa: 0x0008, 0x34ab: 0x0008, 0x34ac: 0x0008, 0x34ad: 0x0008, 0x34ae: 0x0008, 0x34af: 0x0008, - 0x34b0: 0x0008, 0x34b1: 0x0008, 0x34b2: 0x0008, 0x34b3: 0x0008, 0x34b4: 0x0008, 0x34b5: 0x0008, - 0x34b6: 0x0008, 0x34b7: 0x0008, 0x34b8: 0x0008, 0x34b9: 0x0008, 0x34ba: 0x0008, - 0x34bc: 0x0008, 0x34bd: 0x0008, 0x34be: 0x0008, 0x34bf: 0x0008, + 0x34a4: 0x0008, 0x34a5: 0x0008, 0x34a6: 0x1000, 0x34a7: 0x1000, 0x34a8: 0x1000, 0x34a9: 0x1000, + 0x34aa: 0x1000, 0x34ab: 0x1000, 0x34ac: 0x1000, 0x34ad: 0x1000, 0x34ae: 0x1000, 0x34af: 0x1000, + 0x34b0: 0x1000, 0x34b1: 0x1000, 0x34b2: 0x1000, 0x34b3: 0x1000, 0x34b4: 0x1000, 0x34b5: 0x1000, + 0x34b6: 0x1000, 0x34b7: 0x1000, 0x34b8: 0x1000, 0x34b9: 0x1000, 0x34ba: 0x1000, 0x34bb: 0x1000, + 0x34bc: 0x1000, 0x34bd: 0x1000, 0x34be: 0x1000, 0x34bf: 0x1000, // Block 0xd3, offset 0x34c0 - 0x34c0: 0x0008, 0x34c1: 0x0008, 0x34c2: 0x0008, 0x34c3: 0x0008, 0x34c4: 0x0008, 0x34c5: 0x0008, - 0x34c7: 0x0008, 0x34c8: 0x0008, 0x34c9: 0x0008, 0x34ca: 0x0008, 0x34cb: 0x0008, - 0x34cc: 0x0008, 0x34cd: 0x0008, 0x34ce: 0x0008, 0x34cf: 0x0008, 0x34d0: 0x0008, 0x34d1: 0x0008, - 0x34d2: 0x0008, 0x34d3: 0x0008, 0x34d4: 0x0008, 0x34d5: 0x0008, 0x34d6: 0x0008, 0x34d7: 0x0008, - 0x34d8: 0x0008, 0x34d9: 0x0008, 0x34da: 0x0008, 0x34db: 0x0008, 0x34dc: 0x0008, 0x34dd: 0x0008, - 0x34de: 0x0008, 0x34df: 0x0008, 0x34e0: 0x0008, 0x34e1: 0x0008, 0x34e2: 0x0008, 0x34e3: 0x0008, - 0x34e4: 0x0008, 0x34e5: 0x0008, 0x34e6: 0x0008, 0x34e7: 0x0008, 0x34e8: 0x0008, 0x34e9: 0x0008, - 0x34ea: 0x0008, 0x34eb: 0x0008, 0x34ec: 0x0008, 0x34ed: 0x0008, 0x34ee: 0x0008, 0x34ef: 0x0008, - 0x34f0: 0x0008, 0x34f1: 0x0008, 0x34f2: 0x0008, 0x34f3: 0x0008, 0x34f4: 0x0008, 0x34f5: 0x0008, - 0x34f6: 0x0008, 0x34f7: 0x0008, 0x34f8: 0x0008, 0x34f9: 0x0008, 0x34fa: 0x0008, 0x34fb: 0x0008, + 0x34c1: 0x0008, 0x34c2: 0x0008, 0x34c3: 0x0008, 0x34c4: 0x0008, 0x34c5: 0x0008, + 0x34c6: 0x0008, 0x34c7: 0x0008, 0x34c8: 0x0008, 0x34c9: 0x0008, 0x34ca: 0x0008, 0x34cb: 0x0008, + 0x34cc: 0x0008, 0x34cd: 0x0008, 0x34ce: 0x0008, 0x34cf: 0x0008, + 0x34da: 0x0008, + 0x34ef: 0x0008, + 0x34f2: 0x0008, 0x34f3: 0x0008, 0x34f4: 0x0008, 0x34f5: 0x0008, + 0x34f6: 0x0008, 0x34f7: 0x0008, 0x34f8: 0x0008, 0x34f9: 0x0008, 0x34fa: 0x0008, 0x34fc: 0x0008, 0x34fd: 0x0008, 0x34fe: 0x0008, 0x34ff: 0x0008, // Block 0xd4, offset 0x3500 - 0x3500: 0x0002, 0x3501: 0x0002, 0x3502: 0x0002, 0x3503: 0x0002, 0x3504: 0x0002, 0x3505: 0x0002, - 0x3506: 0x0002, 0x3507: 0x0002, 0x3508: 0x0002, 0x3509: 0x0002, 0x350a: 0x0002, 0x350b: 0x0002, - 0x350c: 0x0002, 0x350d: 0x0002, 0x350e: 0x0002, 0x350f: 0x0002, 0x3510: 0x0002, 0x3511: 0x0002, - 0x3512: 0x0002, 0x3513: 0x0002, 0x3514: 0x0002, 0x3515: 0x0002, 0x3516: 0x0002, 0x3517: 0x0002, - 0x3518: 0x0002, 0x3519: 0x0002, 0x351a: 0x0002, 0x351b: 0x0002, 0x351c: 0x0002, 0x351d: 0x0002, - 0x351e: 0x0002, 0x351f: 0x0002, 0x3520: 0x0004, 0x3521: 0x0004, 0x3522: 0x0004, 0x3523: 0x0004, - 0x3524: 0x0004, 0x3525: 0x0004, 0x3526: 0x0004, 0x3527: 0x0004, 0x3528: 0x0004, 0x3529: 0x0004, - 0x352a: 0x0004, 0x352b: 0x0004, 0x352c: 0x0004, 0x352d: 0x0004, 0x352e: 0x0004, 0x352f: 0x0004, - 0x3530: 0x0004, 0x3531: 0x0004, 0x3532: 0x0004, 0x3533: 0x0004, 0x3534: 0x0004, 0x3535: 0x0004, - 0x3536: 0x0004, 0x3537: 0x0004, 0x3538: 0x0004, 0x3539: 0x0004, 0x353a: 0x0004, 0x353b: 0x0004, - 0x353c: 0x0004, 0x353d: 0x0004, 0x353e: 0x0004, 0x353f: 0x0004, + 0x3509: 0x0008, 0x350a: 0x0008, 0x350b: 0x0008, + 0x350c: 0x0008, 0x350d: 0x0008, 0x350e: 0x0008, 0x350f: 0x0008, 0x3510: 0x0008, 0x3511: 0x0008, + 0x3512: 0x0008, 0x3513: 0x0008, 0x3514: 0x0008, 0x3515: 0x0008, 0x3516: 0x0008, 0x3517: 0x0008, + 0x3518: 0x0008, 0x3519: 0x0008, 0x351a: 0x0008, 0x351b: 0x0008, 0x351c: 0x0008, 0x351d: 0x0008, + 0x351e: 0x0008, 0x351f: 0x0008, + 0x3526: 0x0008, 0x3527: 0x0008, 0x3528: 0x0008, 0x3529: 0x0008, + 0x352a: 0x0008, 0x352b: 0x0008, 0x352c: 0x0008, 0x352d: 0x0008, 0x352e: 0x0008, 0x352f: 0x0008, + 0x3530: 0x0008, 0x3531: 0x0008, 0x3532: 0x0008, 0x3533: 0x0008, 0x3534: 0x0008, 0x3535: 0x0008, + 0x3536: 0x0008, 0x3537: 0x0008, 0x3538: 0x0008, 0x3539: 0x0008, 0x353a: 0x0008, 0x353b: 0x0008, + 0x353c: 0x0008, 0x353d: 0x0008, 0x353e: 0x0008, 0x353f: 0x0008, // Block 0xd5, offset 0x3540 - 0x3540: 0x0002, 0x3541: 0x0002, 0x3542: 0x0002, 0x3543: 0x0002, 0x3544: 0x0002, 0x3545: 0x0002, - 0x3546: 0x0002, 0x3547: 0x0002, 0x3548: 0x0002, 0x3549: 0x0002, 0x354a: 0x0002, 0x354b: 0x0002, - 0x354c: 0x0002, 0x354d: 0x0002, 0x354e: 0x0002, 0x354f: 0x0002, 0x3550: 0x0002, 0x3551: 0x0002, - 0x3552: 0x0002, 0x3553: 0x0002, 0x3554: 0x0002, 0x3555: 0x0002, 0x3556: 0x0002, 0x3557: 0x0002, - 0x3558: 0x0002, 0x3559: 0x0002, 0x355a: 0x0002, 0x355b: 0x0002, 0x355c: 0x0002, 0x355d: 0x0002, - 0x355e: 0x0002, 0x355f: 0x0002, 0x3560: 0x0002, 0x3561: 0x0002, 0x3562: 0x0002, 0x3563: 0x0002, - 0x3564: 0x0002, 0x3565: 0x0002, 0x3566: 0x0002, 0x3567: 0x0002, 0x3568: 0x0002, 0x3569: 0x0002, - 0x356a: 0x0002, 0x356b: 0x0002, 0x356c: 0x0002, 0x356d: 0x0002, 0x356e: 0x0002, 0x356f: 0x0002, - 0x3570: 0x0002, 0x3571: 0x0002, 0x3572: 0x0002, 0x3573: 0x0002, 0x3574: 0x0002, 0x3575: 0x0002, - 0x3576: 0x0002, 0x3577: 0x0002, 0x3578: 0x0002, 0x3579: 0x0002, 0x357a: 0x0002, 0x357b: 0x0002, - 0x357c: 0x0002, 0x357d: 0x0002, 0x357e: 0x0002, 0x357f: 0x0002, + 0x3540: 0x0008, 0x3541: 0x0008, 0x3542: 0x0008, 0x3543: 0x0008, 0x3544: 0x0008, 0x3545: 0x0008, + 0x3546: 0x0008, 0x3547: 0x0008, 0x3548: 0x0008, 0x3549: 0x0008, 0x354a: 0x0008, 0x354b: 0x0008, + 0x354c: 0x0008, 0x354d: 0x0008, 0x354e: 0x0008, 0x354f: 0x0008, 0x3550: 0x0008, 0x3551: 0x0008, + 0x3552: 0x0008, 0x3553: 0x0008, 0x3554: 0x0008, 0x3555: 0x0008, 0x3556: 0x0008, 0x3557: 0x0008, + 0x3558: 0x0008, 0x3559: 0x0008, 0x355a: 0x0008, 0x355b: 0x0008, 0x355c: 0x0008, 0x355d: 0x0008, + 0x355e: 0x0008, 0x355f: 0x0008, 0x3560: 0x0008, 0x3561: 0x0008, 0x3562: 0x0008, 0x3563: 0x0008, + 0x3564: 0x0008, 0x3565: 0x0008, 0x3566: 0x0008, 0x3567: 0x0008, 0x3568: 0x0008, 0x3569: 0x0008, + 0x356a: 0x0008, 0x356b: 0x0008, 0x356c: 0x0008, 0x356d: 0x0008, 0x356e: 0x0008, 0x356f: 0x0008, + 0x3570: 0x0008, 0x3571: 0x0008, 0x3572: 0x0008, 0x3573: 0x0008, 0x3574: 0x0008, 0x3575: 0x0008, + 0x3576: 0x0008, 0x3577: 0x0008, 0x3578: 0x0008, 0x3579: 0x0008, 0x357a: 0x0008, 0x357b: 0x0008, + 0x357c: 0x0008, 0x357d: 0x0008, 0x357e: 0x0008, 0x357f: 0x0008, // Block 0xd6, offset 0x3580 - 0x3580: 0x0004, 0x3581: 0x0004, 0x3582: 0x0004, 0x3583: 0x0004, 0x3584: 0x0004, 0x3585: 0x0004, - 0x3586: 0x0004, 0x3587: 0x0004, 0x3588: 0x0004, 0x3589: 0x0004, 0x358a: 0x0004, 0x358b: 0x0004, - 0x358c: 0x0004, 0x358d: 0x0004, 0x358e: 0x0004, 0x358f: 0x0004, 0x3590: 0x0004, 0x3591: 0x0004, - 0x3592: 0x0004, 0x3593: 0x0004, 0x3594: 0x0004, 0x3595: 0x0004, 0x3596: 0x0004, 0x3597: 0x0004, - 0x3598: 0x0004, 0x3599: 0x0004, 0x359a: 0x0004, 0x359b: 0x0004, 0x359c: 0x0004, 0x359d: 0x0004, - 0x359e: 0x0004, 0x359f: 0x0004, 0x35a0: 0x0004, 0x35a1: 0x0004, 0x35a2: 0x0004, 0x35a3: 0x0004, - 0x35a4: 0x0004, 0x35a5: 0x0004, 0x35a6: 0x0004, 0x35a7: 0x0004, 0x35a8: 0x0004, 0x35a9: 0x0004, - 0x35aa: 0x0004, 0x35ab: 0x0004, 0x35ac: 0x0004, 0x35ad: 0x0004, 0x35ae: 0x0004, 0x35af: 0x0004, - 0x35b0: 0x0002, 0x35b1: 0x0002, 0x35b2: 0x0002, 0x35b3: 0x0002, 0x35b4: 0x0002, 0x35b5: 0x0002, - 0x35b6: 0x0002, 0x35b7: 0x0002, 0x35b8: 0x0002, 0x35b9: 0x0002, 0x35ba: 0x0002, 0x35bb: 0x0002, - 0x35bc: 0x0002, 0x35bd: 0x0002, 0x35be: 0x0002, 0x35bf: 0x0002, + 0x3580: 0x0008, 0x3581: 0x0008, 0x3582: 0x0008, 0x3583: 0x0008, 0x3584: 0x0008, 0x3585: 0x0008, + 0x3586: 0x0008, 0x3587: 0x0008, 0x3588: 0x0008, 0x3589: 0x0008, 0x358a: 0x0008, 0x358b: 0x0008, + 0x358c: 0x0008, 0x358d: 0x0008, 0x358e: 0x0008, 0x358f: 0x0008, 0x3590: 0x0008, 0x3591: 0x0008, + 0x3592: 0x0008, 0x3593: 0x0008, 0x3594: 0x0008, 0x3595: 0x0008, 0x3596: 0x0008, 0x3597: 0x0008, + 0x3598: 0x0008, 0x3599: 0x0008, 0x359a: 0x0008, 0x359b: 0x0008, 0x359c: 0x0008, 0x359d: 0x0008, + 0x359e: 0x0008, 0x359f: 0x0008, 0x35a0: 0x0008, 0x35a1: 0x0008, + 0x35a4: 0x0008, 0x35a5: 0x0008, 0x35a6: 0x0008, 0x35a7: 0x0008, 0x35a8: 0x0008, 0x35a9: 0x0008, + 0x35aa: 0x0008, 0x35ab: 0x0008, 0x35ac: 0x0008, 0x35ad: 0x0008, 0x35ae: 0x0008, 0x35af: 0x0008, + 0x35b0: 0x0008, 0x35b1: 0x0008, 0x35b2: 0x0008, 0x35b3: 0x0008, 0x35b4: 0x0008, 0x35b5: 0x0008, + 0x35b6: 0x0008, 0x35b7: 0x0008, 0x35b8: 0x0008, 0x35b9: 0x0008, 0x35ba: 0x0008, 0x35bb: 0x0008, + 0x35bc: 0x0008, 0x35bd: 0x0008, 0x35be: 0x0008, 0x35bf: 0x0008, + // Block 0xd7, offset 0x35c0 + 0x35c0: 0x0008, 0x35c1: 0x0008, 0x35c2: 0x0008, 0x35c3: 0x0008, 0x35c4: 0x0008, 0x35c5: 0x0008, + 0x35c6: 0x0008, 0x35c7: 0x0008, 0x35c8: 0x0008, 0x35c9: 0x0008, 0x35ca: 0x0008, 0x35cb: 0x0008, + 0x35cc: 0x0008, 0x35cd: 0x0008, 0x35ce: 0x0008, 0x35cf: 0x0008, 0x35d0: 0x0008, 0x35d1: 0x0008, + 0x35d2: 0x0008, 0x35d3: 0x0008, 0x35d6: 0x0008, 0x35d7: 0x0008, + 0x35d9: 0x0008, 0x35da: 0x0008, 0x35db: 0x0008, + 0x35de: 0x0008, 0x35df: 0x0008, 0x35e0: 0x0008, 0x35e1: 0x0008, 0x35e2: 0x0008, 0x35e3: 0x0008, + 0x35e4: 0x0008, 0x35e5: 0x0008, 0x35e6: 0x0008, 0x35e7: 0x0008, 0x35e8: 0x0008, 0x35e9: 0x0008, + 0x35ea: 0x0008, 0x35eb: 0x0008, 0x35ec: 0x0008, 0x35ed: 0x0008, 0x35ee: 0x0008, 0x35ef: 0x0008, + 0x35f0: 0x0008, 0x35f1: 0x0008, 0x35f2: 0x0008, 0x35f3: 0x0008, 0x35f4: 0x0008, 0x35f5: 0x0008, + 0x35f6: 0x0008, 0x35f7: 0x0008, 0x35f8: 0x0008, 0x35f9: 0x0008, 0x35fa: 0x0008, 0x35fb: 0x0008, + 0x35fc: 0x0008, 0x35fd: 0x0008, 0x35fe: 0x0008, 0x35ff: 0x0008, + // Block 0xd8, offset 0x3600 + 0x3600: 0x0008, 0x3601: 0x0008, 0x3602: 0x0008, 0x3603: 0x0008, 0x3604: 0x0008, 0x3605: 0x0008, + 0x3606: 0x0008, 0x3607: 0x0008, 0x3608: 0x0008, 0x3609: 0x0008, 0x360a: 0x0008, 0x360b: 0x0008, + 0x360c: 0x0008, 0x360d: 0x0008, 0x360e: 0x0008, 0x360f: 0x0008, 0x3610: 0x0008, 0x3611: 0x0008, + 0x3612: 0x0008, 0x3613: 0x0008, 0x3614: 0x0008, 0x3615: 0x0008, 0x3616: 0x0008, 0x3617: 0x0008, + 0x3618: 0x0008, 0x3619: 0x0008, 0x361a: 0x0008, 0x361b: 0x0008, 0x361c: 0x0008, 0x361d: 0x0008, + 0x361e: 0x0008, 0x361f: 0x0008, 0x3620: 0x0008, 0x3621: 0x0008, 0x3622: 0x0008, 0x3623: 0x0008, + 0x3624: 0x0008, 0x3625: 0x0008, 0x3626: 0x0008, 0x3627: 0x0008, 0x3628: 0x0008, 0x3629: 0x0008, + 0x362a: 0x0008, 0x362b: 0x0008, 0x362c: 0x0008, 0x362d: 0x0008, 0x362e: 0x0008, 0x362f: 0x0008, + 0x3630: 0x0008, 0x3633: 0x0008, 0x3634: 0x0008, 0x3635: 0x0008, + 0x3637: 0x0008, 0x3638: 0x0008, 0x3639: 0x0008, 0x363a: 0x0008, 0x363b: 0x0024, + 0x363c: 0x0024, 0x363d: 0x0024, 0x363e: 0x0024, 0x363f: 0x0024, + // Block 0xd9, offset 0x3640 + 0x3640: 0x0008, 0x3641: 0x0008, 0x3642: 0x0008, 0x3643: 0x0008, 0x3644: 0x0008, 0x3645: 0x0008, + 0x3646: 0x0008, 0x3647: 0x0008, 0x3648: 0x0008, 0x3649: 0x0008, 0x364a: 0x0008, 0x364b: 0x0008, + 0x364c: 0x0008, 0x364d: 0x0008, 0x364e: 0x0008, 0x364f: 0x0008, 0x3650: 0x0008, 0x3651: 0x0008, + 0x3652: 0x0008, 0x3653: 0x0008, 0x3654: 0x0008, 0x3655: 0x0008, 0x3656: 0x0008, 0x3657: 0x0008, + 0x3658: 0x0008, 0x3659: 0x0008, 0x365a: 0x0008, 0x365b: 0x0008, 0x365c: 0x0008, 0x365d: 0x0008, + 0x365e: 0x0008, 0x365f: 0x0008, 0x3660: 0x0008, 0x3661: 0x0008, 0x3662: 0x0008, 0x3663: 0x0008, + 0x3664: 0x0008, 0x3665: 0x0008, 0x3666: 0x0008, 0x3667: 0x0008, 0x3668: 0x0008, 0x3669: 0x0008, + 0x366a: 0x0008, 0x366b: 0x0008, 0x366c: 0x0008, 0x366d: 0x0008, 0x366e: 0x0008, 0x366f: 0x0008, + 0x3670: 0x0008, 0x3671: 0x0008, 0x3672: 0x0008, 0x3673: 0x0008, 0x3674: 0x0008, 0x3675: 0x0008, + 0x3676: 0x0008, 0x3677: 0x0008, 0x3678: 0x0008, 0x3679: 0x0008, 0x367a: 0x0008, 0x367b: 0x0008, + 0x367c: 0x0008, 0x367d: 0x0008, 0x367f: 0x0008, + // Block 0xda, offset 0x3680 + 0x3680: 0x0008, 0x3681: 0x0008, 0x3682: 0x0008, 0x3683: 0x0008, 0x3684: 0x0008, 0x3685: 0x0008, + 0x3686: 0x0008, 0x3687: 0x0008, 0x3688: 0x0008, 0x3689: 0x0008, 0x368a: 0x0008, 0x368b: 0x0008, + 0x368c: 0x0008, 0x368d: 0x0008, 0x368e: 0x0008, 0x368f: 0x0008, 0x3690: 0x0008, 0x3691: 0x0008, + 0x3692: 0x0008, 0x3693: 0x0008, 0x3694: 0x0008, 0x3695: 0x0008, 0x3696: 0x0008, 0x3697: 0x0008, + 0x3698: 0x0008, 0x3699: 0x0008, 0x369a: 0x0008, 0x369b: 0x0008, 0x369c: 0x0008, 0x369d: 0x0008, + 0x369e: 0x0008, 0x369f: 0x0008, 0x36a0: 0x0008, 0x36a1: 0x0008, 0x36a2: 0x0008, 0x36a3: 0x0008, + 0x36a4: 0x0008, 0x36a5: 0x0008, 0x36a6: 0x0008, 0x36a7: 0x0008, 0x36a8: 0x0008, 0x36a9: 0x0008, + 0x36aa: 0x0008, 0x36ab: 0x0008, 0x36ac: 0x0008, 0x36ad: 0x0008, 0x36ae: 0x0008, 0x36af: 0x0008, + 0x36b0: 0x0008, 0x36b1: 0x0008, 0x36b2: 0x0008, 0x36b3: 0x0008, 0x36b4: 0x0008, 0x36b5: 0x0008, + 0x36b6: 0x0008, 0x36b7: 0x0008, 0x36b8: 0x0008, 0x36b9: 0x0008, 0x36ba: 0x0008, 0x36bb: 0x0008, + 0x36bc: 0x0008, 0x36bd: 0x0008, + // Block 0xdb, offset 0x36c0 + 0x36c9: 0x0008, 0x36ca: 0x0008, 0x36cb: 0x0008, + 0x36cc: 0x0008, 0x36cd: 0x0008, 0x36ce: 0x0008, 0x36d0: 0x0008, 0x36d1: 0x0008, + 0x36d2: 0x0008, 0x36d3: 0x0008, 0x36d4: 0x0008, 0x36d5: 0x0008, 0x36d6: 0x0008, 0x36d7: 0x0008, + 0x36d8: 0x0008, 0x36d9: 0x0008, 0x36da: 0x0008, 0x36db: 0x0008, 0x36dc: 0x0008, 0x36dd: 0x0008, + 0x36de: 0x0008, 0x36df: 0x0008, 0x36e0: 0x0008, 0x36e1: 0x0008, 0x36e2: 0x0008, 0x36e3: 0x0008, + 0x36e4: 0x0008, 0x36e5: 0x0008, 0x36e6: 0x0008, 0x36e7: 0x0008, + 0x36ef: 0x0008, + 0x36f0: 0x0008, 0x36f3: 0x0008, 0x36f4: 0x0008, 0x36f5: 0x0008, + 0x36f6: 0x0008, 0x36f7: 0x0008, 0x36f8: 0x0008, 0x36f9: 0x0008, 0x36fa: 0x0008, + // Block 0xdc, offset 0x3700 + 0x3707: 0x0008, 0x370a: 0x0008, 0x370b: 0x0008, + 0x370c: 0x0008, 0x370d: 0x0008, 0x3710: 0x0008, + 0x3715: 0x0008, 0x3716: 0x0008, + 0x3724: 0x0008, 0x3725: 0x0008, 0x3728: 0x0008, + 0x3731: 0x0008, 0x3732: 0x0008, + 0x373c: 0x0008, + // Block 0xdd, offset 0x3740 + 0x3742: 0x0008, 0x3743: 0x0008, 0x3744: 0x0008, + 0x3751: 0x0008, + 0x3752: 0x0008, 0x3753: 0x0008, + 0x375c: 0x0008, 0x375d: 0x0008, + 0x375e: 0x0008, 0x3761: 0x0008, 0x3763: 0x0008, + 0x3768: 0x0008, + 0x376f: 0x0008, + 0x3773: 0x0008, + 0x377a: 0x0008, 0x377b: 0x0008, + 0x377c: 0x0008, 0x377d: 0x0008, 0x377e: 0x0008, 0x377f: 0x0008, + // Block 0xde, offset 0x3780 + 0x3780: 0x0008, 0x3781: 0x0008, 0x3782: 0x0008, 0x3783: 0x0008, 0x3784: 0x0008, 0x3785: 0x0008, + 0x3786: 0x0008, 0x3787: 0x0008, 0x3788: 0x0008, 0x3789: 0x0008, 0x378a: 0x0008, 0x378b: 0x0008, + 0x378c: 0x0008, 0x378d: 0x0008, 0x378e: 0x0008, 0x378f: 0x0008, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x0008, 0x37c1: 0x0008, 0x37c2: 0x0008, 0x37c3: 0x0008, 0x37c4: 0x0008, 0x37c5: 0x0008, + 0x37cb: 0x0008, + 0x37cc: 0x0008, 0x37cd: 0x0008, 0x37ce: 0x0008, 0x37cf: 0x0008, 0x37d0: 0x0008, 0x37d1: 0x0008, + 0x37d2: 0x0008, 0x37d5: 0x0008, 0x37d6: 0x0008, 0x37d7: 0x0008, + 0x37d8: 0x0008, 0x37d9: 0x0008, 0x37da: 0x0008, 0x37db: 0x0008, 0x37dc: 0x0008, 0x37dd: 0x0008, + 0x37de: 0x0008, 0x37df: 0x0008, 0x37e0: 0x0008, 0x37e1: 0x0008, 0x37e2: 0x0008, 0x37e3: 0x0008, + 0x37e4: 0x0008, 0x37e5: 0x0008, 0x37e9: 0x0008, + 0x37eb: 0x0008, 0x37ec: 0x0008, 0x37ed: 0x0008, 0x37ee: 0x0008, 0x37ef: 0x0008, + 0x37f0: 0x0008, 0x37f3: 0x0008, 0x37f4: 0x0008, 0x37f5: 0x0008, + 0x37f6: 0x0008, 0x37f7: 0x0008, 0x37f8: 0x0008, 0x37f9: 0x0008, 0x37fa: 0x0008, 0x37fb: 0x0008, + 0x37fc: 0x0008, 0x37fd: 0x0008, 0x37fe: 0x0008, 0x37ff: 0x0008, + // Block 0xe0, offset 0x3800 + 0x381a: 0x0008, 0x381b: 0x0008, 0x381c: 0x0008, 0x381d: 0x0008, + 0x381e: 0x0008, 0x381f: 0x0008, 0x3820: 0x0008, 0x3821: 0x0008, 0x3822: 0x0008, 0x3823: 0x0008, + 0x3824: 0x0008, 0x3825: 0x0008, 0x3826: 0x0008, 0x3827: 0x0008, 0x3828: 0x0008, 0x3829: 0x0008, + 0x382a: 0x0008, 0x382b: 0x0008, 0x382c: 0x0008, 0x382d: 0x0008, 0x382e: 0x0008, 0x382f: 0x0008, + 0x3830: 0x0008, 0x3831: 0x0008, 0x3832: 0x0008, 0x3833: 0x0008, 0x3834: 0x0008, 0x3835: 0x0008, + 0x3836: 0x0008, 0x3837: 0x0008, 0x3838: 0x0008, 0x3839: 0x0008, 0x383a: 0x0008, 0x383b: 0x0008, + 0x383c: 0x0008, 0x383d: 0x0008, 0x383e: 0x0008, 0x383f: 0x0008, + // Block 0xe1, offset 0x3840 + 0x384c: 0x0008, 0x384d: 0x0008, 0x384e: 0x0008, 0x384f: 0x0008, + // Block 0xe2, offset 0x3880 + 0x3888: 0x0008, 0x3889: 0x0008, 0x388a: 0x0008, 0x388b: 0x0008, + 0x388c: 0x0008, 0x388d: 0x0008, 0x388e: 0x0008, 0x388f: 0x0008, + 0x389a: 0x0008, 0x389b: 0x0008, 0x389c: 0x0008, 0x389d: 0x0008, + 0x389e: 0x0008, 0x389f: 0x0008, + // Block 0xe3, offset 0x38c0 + 0x38c8: 0x0008, 0x38c9: 0x0008, 0x38ca: 0x0008, 0x38cb: 0x0008, + 0x38cc: 0x0008, 0x38cd: 0x0008, 0x38ce: 0x0008, 0x38cf: 0x0008, + 0x38ee: 0x0008, 0x38ef: 0x0008, + 0x38fc: 0x0008, 0x38fd: 0x0008, 0x38fe: 0x0008, 0x38ff: 0x0008, + // Block 0xe4, offset 0x3900 + 0x3902: 0x0008, 0x3903: 0x0008, 0x3904: 0x0008, 0x3905: 0x0008, + 0x3906: 0x0008, 0x3907: 0x0008, 0x3908: 0x0008, 0x3909: 0x0008, 0x390a: 0x0008, 0x390b: 0x0008, + 0x390c: 0x0008, 0x390d: 0x0008, 0x390e: 0x0008, 0x390f: 0x0008, + 0x3919: 0x0008, 0x391a: 0x0008, 0x391b: 0x0008, 0x391c: 0x0008, 0x391d: 0x0008, + 0x391e: 0x0008, 0x391f: 0x0008, 0x3920: 0x0008, 0x3921: 0x0008, 0x3922: 0x0008, 0x3923: 0x0008, + 0x3924: 0x0008, 0x3925: 0x0008, 0x3926: 0x0008, 0x3927: 0x0008, 0x3928: 0x0008, 0x3929: 0x0008, + 0x392a: 0x0008, 0x392b: 0x0008, 0x392c: 0x0008, 0x392d: 0x0008, 0x392e: 0x0008, 0x392f: 0x0008, + 0x3930: 0x0008, 0x3931: 0x0008, 0x3932: 0x0008, 0x3933: 0x0008, 0x3934: 0x0008, 0x3935: 0x0008, + 0x3936: 0x0008, 0x3937: 0x0008, 0x3938: 0x0008, 0x3939: 0x0008, 0x393a: 0x0008, 0x393b: 0x0008, + 0x393c: 0x0008, 0x393d: 0x0008, 0x393e: 0x0008, 0x393f: 0x0008, + // Block 0xe5, offset 0x3940 + 0x394c: 0x0008, 0x394d: 0x0008, 0x394e: 0x0008, 0x394f: 0x0008, 0x3950: 0x0008, 0x3951: 0x0008, + 0x3952: 0x0008, 0x3953: 0x0008, 0x3954: 0x0008, 0x3955: 0x0008, 0x3956: 0x0008, 0x3957: 0x0008, + 0x3958: 0x0008, 0x3959: 0x0008, 0x395a: 0x0008, 0x395b: 0x0008, 0x395c: 0x0008, 0x395d: 0x0008, + 0x395e: 0x0008, 0x395f: 0x0008, 0x3960: 0x0008, 0x3961: 0x0008, 0x3962: 0x0008, 0x3963: 0x0008, + 0x3964: 0x0008, 0x3965: 0x0008, 0x3966: 0x0008, 0x3967: 0x0008, 0x3968: 0x0008, 0x3969: 0x0008, + 0x396a: 0x0008, 0x396b: 0x0008, 0x396c: 0x0008, 0x396d: 0x0008, 0x396e: 0x0008, 0x396f: 0x0008, + 0x3970: 0x0008, 0x3971: 0x0008, 0x3972: 0x0008, 0x3973: 0x0008, 0x3974: 0x0008, 0x3975: 0x0008, + 0x3976: 0x0008, 0x3977: 0x0008, 0x3978: 0x0008, 0x3979: 0x0008, 0x397a: 0x0008, + 0x397c: 0x0008, 0x397d: 0x0008, 0x397e: 0x0008, 0x397f: 0x0008, + // Block 0xe6, offset 0x3980 + 0x3980: 0x0008, 0x3981: 0x0008, 0x3982: 0x0008, 0x3983: 0x0008, 0x3984: 0x0008, 0x3985: 0x0008, + 0x3987: 0x0008, 0x3988: 0x0008, 0x3989: 0x0008, 0x398a: 0x0008, 0x398b: 0x0008, + 0x398c: 0x0008, 0x398d: 0x0008, 0x398e: 0x0008, 0x398f: 0x0008, 0x3990: 0x0008, 0x3991: 0x0008, + 0x3992: 0x0008, 0x3993: 0x0008, 0x3994: 0x0008, 0x3995: 0x0008, 0x3996: 0x0008, 0x3997: 0x0008, + 0x3998: 0x0008, 0x3999: 0x0008, 0x399a: 0x0008, 0x399b: 0x0008, 0x399c: 0x0008, 0x399d: 0x0008, + 0x399e: 0x0008, 0x399f: 0x0008, 0x39a0: 0x0008, 0x39a1: 0x0008, 0x39a2: 0x0008, 0x39a3: 0x0008, + 0x39a4: 0x0008, 0x39a5: 0x0008, 0x39a6: 0x0008, 0x39a7: 0x0008, 0x39a8: 0x0008, 0x39a9: 0x0008, + 0x39aa: 0x0008, 0x39ab: 0x0008, 0x39ac: 0x0008, 0x39ad: 0x0008, 0x39ae: 0x0008, 0x39af: 0x0008, + 0x39b0: 0x0008, 0x39b1: 0x0008, 0x39b2: 0x0008, 0x39b3: 0x0008, 0x39b4: 0x0008, 0x39b5: 0x0008, + 0x39b6: 0x0008, 0x39b7: 0x0008, 0x39b8: 0x0008, 0x39b9: 0x0008, 0x39ba: 0x0008, 0x39bb: 0x0008, + 0x39bc: 0x0008, 0x39bd: 0x0008, 0x39be: 0x0008, 0x39bf: 0x0008, + // Block 0xe7, offset 0x39c0 + 0x39d8: 0x0008, 0x39d9: 0x0008, 0x39da: 0x0008, 0x39db: 0x0008, 0x39dc: 0x0008, 0x39dd: 0x0008, + 0x39de: 0x0008, 0x39df: 0x0008, + 0x39ee: 0x0008, 0x39ef: 0x0008, + 0x39f0: 0x0008, 0x39f1: 0x0008, 0x39f2: 0x0008, 0x39f3: 0x0008, 0x39f4: 0x0008, 0x39f5: 0x0008, + 0x39f6: 0x0008, 0x39f7: 0x0008, 0x39f8: 0x0008, 0x39f9: 0x0008, 0x39fa: 0x0008, 0x39fb: 0x0008, + 0x39fc: 0x0008, 0x39fd: 0x0008, 0x39fe: 0x0008, 0x39ff: 0x0008, + // Block 0xe8, offset 0x3a00 + 0x3a00: 0x0002, 0x3a01: 0x0002, 0x3a02: 0x0002, 0x3a03: 0x0002, 0x3a04: 0x0002, 0x3a05: 0x0002, + 0x3a06: 0x0002, 0x3a07: 0x0002, 0x3a08: 0x0002, 0x3a09: 0x0002, 0x3a0a: 0x0002, 0x3a0b: 0x0002, + 0x3a0c: 0x0002, 0x3a0d: 0x0002, 0x3a0e: 0x0002, 0x3a0f: 0x0002, 0x3a10: 0x0002, 0x3a11: 0x0002, + 0x3a12: 0x0002, 0x3a13: 0x0002, 0x3a14: 0x0002, 0x3a15: 0x0002, 0x3a16: 0x0002, 0x3a17: 0x0002, + 0x3a18: 0x0002, 0x3a19: 0x0002, 0x3a1a: 0x0002, 0x3a1b: 0x0002, 0x3a1c: 0x0002, 0x3a1d: 0x0002, + 0x3a1e: 0x0002, 0x3a1f: 0x0002, 0x3a20: 0x0024, 0x3a21: 0x0024, 0x3a22: 0x0024, 0x3a23: 0x0024, + 0x3a24: 0x0024, 0x3a25: 0x0024, 0x3a26: 0x0024, 0x3a27: 0x0024, 0x3a28: 0x0024, 0x3a29: 0x0024, + 0x3a2a: 0x0024, 0x3a2b: 0x0024, 0x3a2c: 0x0024, 0x3a2d: 0x0024, 0x3a2e: 0x0024, 0x3a2f: 0x0024, + 0x3a30: 0x0024, 0x3a31: 0x0024, 0x3a32: 0x0024, 0x3a33: 0x0024, 0x3a34: 0x0024, 0x3a35: 0x0024, + 0x3a36: 0x0024, 0x3a37: 0x0024, 0x3a38: 0x0024, 0x3a39: 0x0024, 0x3a3a: 0x0024, 0x3a3b: 0x0024, + 0x3a3c: 0x0024, 0x3a3d: 0x0024, 0x3a3e: 0x0024, 0x3a3f: 0x0024, + // Block 0xe9, offset 0x3a40 + 0x3a40: 0x0002, 0x3a41: 0x0002, 0x3a42: 0x0002, 0x3a43: 0x0002, 0x3a44: 0x0002, 0x3a45: 0x0002, + 0x3a46: 0x0002, 0x3a47: 0x0002, 0x3a48: 0x0002, 0x3a49: 0x0002, 0x3a4a: 0x0002, 0x3a4b: 0x0002, + 0x3a4c: 0x0002, 0x3a4d: 0x0002, 0x3a4e: 0x0002, 0x3a4f: 0x0002, 0x3a50: 0x0002, 0x3a51: 0x0002, + 0x3a52: 0x0002, 0x3a53: 0x0002, 0x3a54: 0x0002, 0x3a55: 0x0002, 0x3a56: 0x0002, 0x3a57: 0x0002, + 0x3a58: 0x0002, 0x3a59: 0x0002, 0x3a5a: 0x0002, 0x3a5b: 0x0002, 0x3a5c: 0x0002, 0x3a5d: 0x0002, + 0x3a5e: 0x0002, 0x3a5f: 0x0002, 0x3a60: 0x0002, 0x3a61: 0x0002, 0x3a62: 0x0002, 0x3a63: 0x0002, + 0x3a64: 0x0002, 0x3a65: 0x0002, 0x3a66: 0x0002, 0x3a67: 0x0002, 0x3a68: 0x0002, 0x3a69: 0x0002, + 0x3a6a: 0x0002, 0x3a6b: 0x0002, 0x3a6c: 0x0002, 0x3a6d: 0x0002, 0x3a6e: 0x0002, 0x3a6f: 0x0002, + 0x3a70: 0x0002, 0x3a71: 0x0002, 0x3a72: 0x0002, 0x3a73: 0x0002, 0x3a74: 0x0002, 0x3a75: 0x0002, + 0x3a76: 0x0002, 0x3a77: 0x0002, 0x3a78: 0x0002, 0x3a79: 0x0002, 0x3a7a: 0x0002, 0x3a7b: 0x0002, + 0x3a7c: 0x0002, 0x3a7d: 0x0002, 0x3a7e: 0x0002, 0x3a7f: 0x0002, + // Block 0xea, offset 0x3a80 + 0x3a80: 0x0024, 0x3a81: 0x0024, 0x3a82: 0x0024, 0x3a83: 0x0024, 0x3a84: 0x0024, 0x3a85: 0x0024, + 0x3a86: 0x0024, 0x3a87: 0x0024, 0x3a88: 0x0024, 0x3a89: 0x0024, 0x3a8a: 0x0024, 0x3a8b: 0x0024, + 0x3a8c: 0x0024, 0x3a8d: 0x0024, 0x3a8e: 0x0024, 0x3a8f: 0x0024, 0x3a90: 0x0024, 0x3a91: 0x0024, + 0x3a92: 0x0024, 0x3a93: 0x0024, 0x3a94: 0x0024, 0x3a95: 0x0024, 0x3a96: 0x0024, 0x3a97: 0x0024, + 0x3a98: 0x0024, 0x3a99: 0x0024, 0x3a9a: 0x0024, 0x3a9b: 0x0024, 0x3a9c: 0x0024, 0x3a9d: 0x0024, + 0x3a9e: 0x0024, 0x3a9f: 0x0024, 0x3aa0: 0x0024, 0x3aa1: 0x0024, 0x3aa2: 0x0024, 0x3aa3: 0x0024, + 0x3aa4: 0x0024, 0x3aa5: 0x0024, 0x3aa6: 0x0024, 0x3aa7: 0x0024, 0x3aa8: 0x0024, 0x3aa9: 0x0024, + 0x3aaa: 0x0024, 0x3aab: 0x0024, 0x3aac: 0x0024, 0x3aad: 0x0024, 0x3aae: 0x0024, 0x3aaf: 0x0024, + 0x3ab0: 0x0002, 0x3ab1: 0x0002, 0x3ab2: 0x0002, 0x3ab3: 0x0002, 0x3ab4: 0x0002, 0x3ab5: 0x0002, + 0x3ab6: 0x0002, 0x3ab7: 0x0002, 0x3ab8: 0x0002, 0x3ab9: 0x0002, 0x3aba: 0x0002, 0x3abb: 0x0002, + 0x3abc: 0x0002, 0x3abd: 0x0002, 0x3abe: 0x0002, 0x3abf: 0x0002, } // graphemesIndex: 25 blocks, 1600 entries, 1600 bytes @@ -1297,113 +1602,116 @@ var graphemesIndex = [1600]property{ 0xf0: 0x14, 0xf3: 0x16, // Block 0x4, offset 0x100 0x120: 0x0e, 0x121: 0x0f, 0x122: 0x10, 0x123: 0x11, 0x124: 0x12, 0x125: 0x13, 0x126: 0x14, 0x127: 0x15, - 0x128: 0x16, 0x129: 0x17, 0x12a: 0x16, 0x12b: 0x18, 0x12c: 0x19, 0x12d: 0x1a, 0x12e: 0x1b, 0x12f: 0x1c, - 0x130: 0x1d, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x21, 0x135: 0x22, 0x136: 0x23, 0x137: 0x24, - 0x138: 0x25, 0x139: 0x26, 0x13a: 0x27, 0x13b: 0x28, 0x13c: 0x29, 0x13d: 0x2a, 0x13e: 0x2b, 0x13f: 0x2c, + 0x128: 0x16, 0x129: 0x17, 0x12a: 0x18, 0x12b: 0x19, 0x12c: 0x1a, 0x12d: 0x1b, 0x12e: 0x1c, 0x12f: 0x1d, + 0x130: 0x1e, 0x131: 0x1f, 0x132: 0x20, 0x133: 0x21, 0x134: 0x22, 0x135: 0x23, 0x136: 0x24, 0x137: 0x25, + 0x138: 0x26, 0x139: 0x27, 0x13a: 0x28, 0x13b: 0x29, 0x13c: 0x2a, 0x13d: 0x2b, 0x13e: 0x2c, 0x13f: 0x2d, // Block 0x5, offset 0x140 - 0x140: 0x2d, 0x141: 0x2e, 0x142: 0x2f, 0x144: 0x30, 0x145: 0x31, 0x146: 0x32, 0x147: 0x33, - 0x14d: 0x34, - 0x15c: 0x35, 0x15d: 0x36, 0x15e: 0x37, 0x15f: 0x38, - 0x160: 0x39, 0x162: 0x3a, 0x164: 0x3b, - 0x168: 0x3c, 0x169: 0x3d, 0x16a: 0x3e, 0x16b: 0x3f, 0x16c: 0x40, 0x16d: 0x41, 0x16e: 0x42, 0x16f: 0x43, - 0x170: 0x44, 0x173: 0x45, 0x177: 0x02, + 0x140: 0x2e, 0x141: 0x2f, 0x142: 0x30, 0x144: 0x31, 0x145: 0x32, 0x146: 0x33, 0x147: 0x34, + 0x14d: 0x35, + 0x15c: 0x36, 0x15d: 0x37, 0x15e: 0x38, 0x15f: 0x39, + 0x160: 0x3a, 0x162: 0x3b, 0x164: 0x3c, + 0x168: 0x3d, 0x169: 0x3e, 0x16a: 0x3f, 0x16b: 0x40, 0x16c: 0x41, 0x16d: 0x42, 0x16e: 0x43, 0x16f: 0x44, + 0x170: 0x45, 0x173: 0x46, 0x177: 0x02, // Block 0x6, offset 0x180 - 0x180: 0x46, 0x181: 0x47, 0x183: 0x48, 0x184: 0x49, 0x186: 0x4a, - 0x18c: 0x4b, 0x18e: 0x4c, 0x18f: 0x4d, + 0x180: 0x47, 0x181: 0x48, 0x183: 0x49, 0x184: 0x4a, 0x186: 0x4b, + 0x18c: 0x4c, 0x18f: 0x4d, 0x193: 0x4e, 0x196: 0x4f, 0x197: 0x50, - 0x198: 0x51, 0x199: 0x52, 0x19a: 0x53, 0x19b: 0x52, 0x19c: 0x54, 0x19d: 0x55, 0x19e: 0x56, - 0x1a4: 0x57, - 0x1ac: 0x58, 0x1ad: 0x59, - 0x1b3: 0x5a, 0x1b5: 0x5b, 0x1b7: 0x5c, + 0x198: 0x51, 0x199: 0x52, 0x19a: 0x53, 0x19b: 0x54, 0x19c: 0x55, 0x19d: 0x56, 0x19e: 0x57, + 0x1a4: 0x58, + 0x1ac: 0x59, 0x1ad: 0x5a, + 0x1b3: 0x5b, 0x1b5: 0x5c, 0x1b7: 0x5d, // Block 0x7, offset 0x1c0 - 0x1c0: 0x5d, 0x1c2: 0x5e, - 0x1ca: 0x5f, + 0x1c0: 0x5e, 0x1c2: 0x5f, + 0x1ca: 0x60, // Block 0x8, offset 0x200 - 0x219: 0x60, 0x21a: 0x61, 0x21b: 0x62, - 0x220: 0x63, 0x222: 0x64, 0x223: 0x65, 0x224: 0x66, 0x225: 0x67, 0x226: 0x68, 0x227: 0x69, - 0x228: 0x6a, 0x229: 0x6b, 0x22a: 0x6c, 0x22b: 0x6d, 0x22f: 0x6e, - 0x230: 0x6f, 0x231: 0x70, 0x232: 0x71, 0x233: 0x72, 0x234: 0x73, 0x235: 0x74, 0x236: 0x75, 0x237: 0x6f, - 0x238: 0x70, 0x239: 0x71, 0x23a: 0x72, 0x23b: 0x73, 0x23c: 0x74, 0x23d: 0x75, 0x23e: 0x6f, 0x23f: 0x70, + 0x219: 0x61, 0x21a: 0x62, 0x21b: 0x63, + 0x220: 0x64, 0x222: 0x65, 0x223: 0x66, 0x224: 0x67, 0x225: 0x68, 0x226: 0x69, 0x227: 0x6a, + 0x228: 0x6b, 0x229: 0x6c, 0x22a: 0x6d, 0x22b: 0x6e, 0x22f: 0x6f, + 0x230: 0x70, 0x231: 0x71, 0x232: 0x72, 0x233: 0x73, 0x234: 0x74, 0x235: 0x75, 0x236: 0x76, 0x237: 0x70, + 0x238: 0x71, 0x239: 0x72, 0x23a: 0x73, 0x23b: 0x74, 0x23c: 0x75, 0x23d: 0x76, 0x23e: 0x70, 0x23f: 0x71, // Block 0x9, offset 0x240 - 0x240: 0x71, 0x241: 0x72, 0x242: 0x73, 0x243: 0x74, 0x244: 0x75, 0x245: 0x6f, 0x246: 0x70, 0x247: 0x71, - 0x248: 0x72, 0x249: 0x73, 0x24a: 0x74, 0x24b: 0x75, 0x24c: 0x6f, 0x24d: 0x70, 0x24e: 0x71, 0x24f: 0x72, - 0x250: 0x73, 0x251: 0x74, 0x252: 0x75, 0x253: 0x6f, 0x254: 0x70, 0x255: 0x71, 0x256: 0x72, 0x257: 0x73, - 0x258: 0x74, 0x259: 0x75, 0x25a: 0x6f, 0x25b: 0x70, 0x25c: 0x71, 0x25d: 0x72, 0x25e: 0x73, 0x25f: 0x74, - 0x260: 0x75, 0x261: 0x6f, 0x262: 0x70, 0x263: 0x71, 0x264: 0x72, 0x265: 0x73, 0x266: 0x74, 0x267: 0x75, - 0x268: 0x6f, 0x269: 0x70, 0x26a: 0x71, 0x26b: 0x72, 0x26c: 0x73, 0x26d: 0x74, 0x26e: 0x75, 0x26f: 0x6f, - 0x270: 0x70, 0x271: 0x71, 0x272: 0x72, 0x273: 0x73, 0x274: 0x74, 0x275: 0x75, 0x276: 0x6f, 0x277: 0x70, - 0x278: 0x71, 0x279: 0x72, 0x27a: 0x73, 0x27b: 0x74, 0x27c: 0x75, 0x27d: 0x6f, 0x27e: 0x70, 0x27f: 0x71, + 0x240: 0x72, 0x241: 0x73, 0x242: 0x74, 0x243: 0x75, 0x244: 0x76, 0x245: 0x70, 0x246: 0x71, 0x247: 0x72, + 0x248: 0x73, 0x249: 0x74, 0x24a: 0x75, 0x24b: 0x76, 0x24c: 0x70, 0x24d: 0x71, 0x24e: 0x72, 0x24f: 0x73, + 0x250: 0x74, 0x251: 0x75, 0x252: 0x76, 0x253: 0x70, 0x254: 0x71, 0x255: 0x72, 0x256: 0x73, 0x257: 0x74, + 0x258: 0x75, 0x259: 0x76, 0x25a: 0x70, 0x25b: 0x71, 0x25c: 0x72, 0x25d: 0x73, 0x25e: 0x74, 0x25f: 0x75, + 0x260: 0x76, 0x261: 0x70, 0x262: 0x71, 0x263: 0x72, 0x264: 0x73, 0x265: 0x74, 0x266: 0x75, 0x267: 0x76, + 0x268: 0x70, 0x269: 0x71, 0x26a: 0x72, 0x26b: 0x73, 0x26c: 0x74, 0x26d: 0x75, 0x26e: 0x76, 0x26f: 0x70, + 0x270: 0x71, 0x271: 0x72, 0x272: 0x73, 0x273: 0x74, 0x274: 0x75, 0x275: 0x76, 0x276: 0x70, 0x277: 0x71, + 0x278: 0x72, 0x279: 0x73, 0x27a: 0x74, 0x27b: 0x75, 0x27c: 0x76, 0x27d: 0x70, 0x27e: 0x71, 0x27f: 0x72, // Block 0xa, offset 0x280 - 0x280: 0x72, 0x281: 0x73, 0x282: 0x74, 0x283: 0x75, 0x284: 0x6f, 0x285: 0x70, 0x286: 0x71, 0x287: 0x72, - 0x288: 0x73, 0x289: 0x74, 0x28a: 0x75, 0x28b: 0x6f, 0x28c: 0x70, 0x28d: 0x71, 0x28e: 0x72, 0x28f: 0x73, - 0x290: 0x74, 0x291: 0x75, 0x292: 0x6f, 0x293: 0x70, 0x294: 0x71, 0x295: 0x72, 0x296: 0x73, 0x297: 0x74, - 0x298: 0x75, 0x299: 0x6f, 0x29a: 0x70, 0x29b: 0x71, 0x29c: 0x72, 0x29d: 0x73, 0x29e: 0x74, 0x29f: 0x75, - 0x2a0: 0x6f, 0x2a1: 0x70, 0x2a2: 0x71, 0x2a3: 0x72, 0x2a4: 0x73, 0x2a5: 0x74, 0x2a6: 0x75, 0x2a7: 0x6f, - 0x2a8: 0x70, 0x2a9: 0x71, 0x2aa: 0x72, 0x2ab: 0x73, 0x2ac: 0x74, 0x2ad: 0x75, 0x2ae: 0x6f, 0x2af: 0x70, - 0x2b0: 0x71, 0x2b1: 0x72, 0x2b2: 0x73, 0x2b3: 0x74, 0x2b4: 0x75, 0x2b5: 0x6f, 0x2b6: 0x70, 0x2b7: 0x71, - 0x2b8: 0x72, 0x2b9: 0x73, 0x2ba: 0x74, 0x2bb: 0x75, 0x2bc: 0x6f, 0x2bd: 0x70, 0x2be: 0x71, 0x2bf: 0x72, + 0x280: 0x73, 0x281: 0x74, 0x282: 0x75, 0x283: 0x76, 0x284: 0x70, 0x285: 0x71, 0x286: 0x72, 0x287: 0x73, + 0x288: 0x74, 0x289: 0x75, 0x28a: 0x76, 0x28b: 0x70, 0x28c: 0x71, 0x28d: 0x72, 0x28e: 0x73, 0x28f: 0x74, + 0x290: 0x75, 0x291: 0x76, 0x292: 0x70, 0x293: 0x71, 0x294: 0x72, 0x295: 0x73, 0x296: 0x74, 0x297: 0x75, + 0x298: 0x76, 0x299: 0x70, 0x29a: 0x71, 0x29b: 0x72, 0x29c: 0x73, 0x29d: 0x74, 0x29e: 0x75, 0x29f: 0x76, + 0x2a0: 0x70, 0x2a1: 0x71, 0x2a2: 0x72, 0x2a3: 0x73, 0x2a4: 0x74, 0x2a5: 0x75, 0x2a6: 0x76, 0x2a7: 0x70, + 0x2a8: 0x71, 0x2a9: 0x72, 0x2aa: 0x73, 0x2ab: 0x74, 0x2ac: 0x75, 0x2ad: 0x76, 0x2ae: 0x70, 0x2af: 0x71, + 0x2b0: 0x72, 0x2b1: 0x73, 0x2b2: 0x74, 0x2b3: 0x75, 0x2b4: 0x76, 0x2b5: 0x70, 0x2b6: 0x71, 0x2b7: 0x72, + 0x2b8: 0x73, 0x2b9: 0x74, 0x2ba: 0x75, 0x2bb: 0x76, 0x2bc: 0x70, 0x2bd: 0x71, 0x2be: 0x72, 0x2bf: 0x73, // Block 0xb, offset 0x2c0 - 0x2c0: 0x73, 0x2c1: 0x74, 0x2c2: 0x75, 0x2c3: 0x6f, 0x2c4: 0x70, 0x2c5: 0x71, 0x2c6: 0x72, 0x2c7: 0x73, - 0x2c8: 0x74, 0x2c9: 0x75, 0x2ca: 0x6f, 0x2cb: 0x70, 0x2cc: 0x71, 0x2cd: 0x72, 0x2ce: 0x73, 0x2cf: 0x74, - 0x2d0: 0x75, 0x2d1: 0x6f, 0x2d2: 0x70, 0x2d3: 0x71, 0x2d4: 0x72, 0x2d5: 0x73, 0x2d6: 0x74, 0x2d7: 0x75, - 0x2d8: 0x6f, 0x2d9: 0x70, 0x2da: 0x71, 0x2db: 0x72, 0x2dc: 0x73, 0x2dd: 0x74, 0x2de: 0x76, 0x2df: 0x77, + 0x2c0: 0x74, 0x2c1: 0x75, 0x2c2: 0x76, 0x2c3: 0x70, 0x2c4: 0x71, 0x2c5: 0x72, 0x2c6: 0x73, 0x2c7: 0x74, + 0x2c8: 0x75, 0x2c9: 0x76, 0x2ca: 0x70, 0x2cb: 0x71, 0x2cc: 0x72, 0x2cd: 0x73, 0x2ce: 0x74, 0x2cf: 0x75, + 0x2d0: 0x76, 0x2d1: 0x70, 0x2d2: 0x71, 0x2d3: 0x72, 0x2d4: 0x73, 0x2d5: 0x74, 0x2d6: 0x75, 0x2d7: 0x76, + 0x2d8: 0x70, 0x2d9: 0x71, 0x2da: 0x72, 0x2db: 0x73, 0x2dc: 0x74, 0x2dd: 0x75, 0x2de: 0x77, 0x2df: 0x78, // Block 0xc, offset 0x300 - 0x32c: 0x78, - 0x338: 0x79, 0x33b: 0x7a, 0x33e: 0x61, 0x33f: 0x7b, + 0x32c: 0x79, + 0x338: 0x7a, 0x33b: 0x7b, 0x33e: 0x62, 0x33f: 0x7c, // Block 0xd, offset 0x340 - 0x347: 0x7c, - 0x34b: 0x7d, 0x34d: 0x7e, - 0x368: 0x7f, 0x36b: 0x80, - 0x374: 0x81, - 0x37a: 0x82, 0x37b: 0x83, 0x37d: 0x84, 0x37e: 0x85, + 0x347: 0x7d, + 0x34b: 0x7e, 0x34d: 0x7f, + 0x368: 0x80, 0x36b: 0x81, + 0x374: 0x82, 0x375: 0x83, + 0x37a: 0x84, 0x37b: 0x85, 0x37d: 0x86, 0x37e: 0x87, // Block 0xe, offset 0x380 - 0x380: 0x86, 0x381: 0x87, 0x382: 0x88, 0x383: 0x89, 0x384: 0x8a, 0x385: 0x8b, 0x386: 0x8c, 0x387: 0x8d, - 0x388: 0x8e, 0x389: 0x8f, 0x38b: 0x90, 0x38c: 0x21, 0x38d: 0x91, - 0x390: 0x92, 0x391: 0x93, 0x392: 0x94, 0x393: 0x95, 0x396: 0x96, 0x397: 0x97, - 0x398: 0x98, 0x399: 0x99, 0x39a: 0x9a, 0x39c: 0x9b, - 0x3a0: 0x9c, 0x3a4: 0x9d, 0x3a5: 0x9e, 0x3a7: 0x9f, - 0x3a8: 0xa0, 0x3a9: 0xa1, 0x3aa: 0xa2, - 0x3b0: 0xa3, 0x3b2: 0xa4, 0x3b4: 0xa5, 0x3b5: 0xa6, 0x3b6: 0xa7, - 0x3bb: 0xa8, 0x3bc: 0xa9, 0x3bd: 0xaa, + 0x380: 0x88, 0x381: 0x89, 0x382: 0x8a, 0x383: 0x8b, 0x384: 0x8c, 0x385: 0x8d, 0x386: 0x8e, 0x387: 0x8f, + 0x388: 0x90, 0x389: 0x91, 0x38b: 0x92, 0x38c: 0x93, 0x38d: 0x94, 0x38e: 0x95, 0x38f: 0x96, + 0x390: 0x97, 0x391: 0x98, 0x392: 0x99, 0x393: 0x9a, 0x396: 0x9b, 0x397: 0x9c, + 0x398: 0x9d, 0x399: 0x9e, 0x39a: 0x9f, 0x39c: 0xa0, + 0x3a0: 0xa1, 0x3a4: 0xa2, 0x3a5: 0xa3, 0x3a7: 0xa4, + 0x3a8: 0xa5, 0x3a9: 0xa6, 0x3aa: 0xa7, 0x3ad: 0xa8, + 0x3b0: 0xa9, 0x3b2: 0xaa, 0x3b4: 0xab, 0x3b5: 0xac, 0x3b6: 0xad, + 0x3bb: 0xae, 0x3bc: 0xaf, 0x3bd: 0xb0, // Block 0xf, offset 0x3c0 - 0x3d0: 0xab, 0x3d1: 0xac, + 0x3d0: 0xb1, 0x3d1: 0xb2, // Block 0x10, offset 0x400 - 0x42b: 0xad, 0x42c: 0xae, - 0x43d: 0xaf, 0x43e: 0xb0, 0x43f: 0xb1, + 0x404: 0xb3, + 0x42b: 0xb4, 0x42c: 0xb5, + 0x435: 0xb6, + 0x43d: 0xb7, 0x43e: 0xb8, 0x43f: 0xb9, // Block 0x11, offset 0x440 - 0x472: 0xb2, + 0x472: 0xba, // Block 0x12, offset 0x480 - 0x4bc: 0xb3, 0x4bd: 0xb4, + 0x4bc: 0xbb, 0x4bd: 0xbc, // Block 0x13, offset 0x4c0 - 0x4c5: 0xb5, 0x4c6: 0xb6, - 0x4c9: 0xb7, - 0x4e8: 0xb8, 0x4e9: 0xb9, 0x4ea: 0xba, + 0x4c5: 0xbd, 0x4c6: 0xbe, + 0x4c9: 0xbf, + 0x4e8: 0xc0, 0x4e9: 0xc1, 0x4ea: 0xc2, // Block 0x14, offset 0x500 - 0x500: 0xbb, 0x502: 0xbc, 0x504: 0xae, - 0x50a: 0xbd, 0x50b: 0xbe, - 0x513: 0xbe, - 0x523: 0xbf, 0x525: 0xc0, + 0x500: 0xc3, 0x502: 0xc4, 0x504: 0xb5, + 0x50a: 0xc5, 0x50b: 0xc6, + 0x513: 0xc6, 0x517: 0xc7, + 0x51b: 0xc8, + 0x523: 0xc9, 0x525: 0xca, // Block 0x15, offset 0x540 - 0x540: 0x52, 0x541: 0x52, 0x542: 0x52, 0x543: 0x52, 0x544: 0xc1, 0x545: 0xc2, 0x546: 0xc3, 0x547: 0xc4, - 0x548: 0xc5, 0x549: 0xc6, 0x54a: 0x52, 0x54b: 0x52, 0x54c: 0x52, 0x54d: 0x52, 0x54e: 0x52, 0x54f: 0xc7, - 0x550: 0x52, 0x551: 0x52, 0x552: 0x52, 0x553: 0x52, 0x554: 0xc8, 0x555: 0xc9, 0x556: 0x52, 0x557: 0x52, - 0x558: 0x52, 0x559: 0xca, 0x55a: 0x52, 0x55b: 0x52, 0x55d: 0xcb, 0x55f: 0xcc, - 0x560: 0xcd, 0x561: 0xce, 0x562: 0xcf, 0x563: 0x52, 0x564: 0xd0, 0x565: 0xd1, 0x566: 0x52, 0x567: 0x52, - 0x568: 0x52, 0x569: 0x52, 0x56a: 0x52, 0x56b: 0x52, - 0x570: 0x52, 0x571: 0x52, 0x572: 0x52, 0x573: 0x52, 0x574: 0x52, 0x575: 0x52, 0x576: 0x52, 0x577: 0x52, - 0x578: 0x52, 0x579: 0x52, 0x57a: 0x52, 0x57b: 0x52, 0x57c: 0x52, 0x57d: 0x52, 0x57e: 0x52, 0x57f: 0xc8, + 0x540: 0xcb, 0x542: 0xcc, 0x543: 0xcd, 0x545: 0xce, 0x546: 0xcf, 0x547: 0xd0, + 0x548: 0xd1, 0x549: 0xd2, 0x54a: 0xd3, 0x54b: 0xd3, 0x54c: 0xd4, 0x54d: 0xd3, 0x54e: 0xd5, 0x54f: 0xd6, + 0x550: 0xd3, 0x551: 0xd3, 0x552: 0xd3, 0x553: 0xd7, 0x554: 0xd8, 0x555: 0xd9, 0x556: 0xda, 0x557: 0xdb, + 0x558: 0xd3, 0x559: 0xdc, 0x55a: 0xd3, 0x55b: 0xdd, 0x55f: 0xde, + 0x560: 0xdf, 0x561: 0xe0, 0x562: 0xe1, 0x563: 0xe2, 0x564: 0xe3, 0x565: 0xe4, 0x566: 0xd3, 0x567: 0xd3, + 0x569: 0xe5, 0x56a: 0xd3, 0x56b: 0xd3, + 0x570: 0xd3, 0x571: 0xd3, 0x572: 0xd3, 0x573: 0xd3, 0x574: 0xd3, 0x575: 0xd3, 0x576: 0xd3, 0x577: 0xd3, + 0x578: 0xd3, 0x579: 0xd3, 0x57a: 0xd3, 0x57b: 0xd3, 0x57c: 0xd3, 0x57d: 0xd3, 0x57e: 0xd3, 0x57f: 0xd8, // Block 0x16, offset 0x580 0x590: 0x0b, 0x591: 0x0c, 0x593: 0x0d, 0x596: 0x0e, 0x59b: 0x0f, 0x59c: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, // Block 0x17, offset 0x5c0 - 0x5c0: 0xd2, 0x5c1: 0x02, 0x5c2: 0xd3, 0x5c3: 0xd3, 0x5c4: 0x02, 0x5c5: 0x02, 0x5c6: 0x02, 0x5c7: 0xd4, - 0x5c8: 0xd3, 0x5c9: 0xd3, 0x5ca: 0xd3, 0x5cb: 0xd3, 0x5cc: 0xd3, 0x5cd: 0xd3, 0x5ce: 0xd3, 0x5cf: 0xd3, - 0x5d0: 0xd3, 0x5d1: 0xd3, 0x5d2: 0xd3, 0x5d3: 0xd3, 0x5d4: 0xd3, 0x5d5: 0xd3, 0x5d6: 0xd3, 0x5d7: 0xd3, - 0x5d8: 0xd3, 0x5d9: 0xd3, 0x5da: 0xd3, 0x5db: 0xd3, 0x5dc: 0xd3, 0x5dd: 0xd3, 0x5de: 0xd3, 0x5df: 0xd3, - 0x5e0: 0xd3, 0x5e1: 0xd3, 0x5e2: 0xd3, 0x5e3: 0xd3, 0x5e4: 0xd3, 0x5e5: 0xd3, 0x5e6: 0xd3, 0x5e7: 0xd3, - 0x5e8: 0xd3, 0x5e9: 0xd3, 0x5ea: 0xd3, 0x5eb: 0xd3, 0x5ec: 0xd3, 0x5ed: 0xd3, 0x5ee: 0xd3, 0x5ef: 0xd3, - 0x5f0: 0xd3, 0x5f1: 0xd3, 0x5f2: 0xd3, 0x5f3: 0xd3, 0x5f4: 0xd3, 0x5f5: 0xd3, 0x5f6: 0xd3, 0x5f7: 0xd3, - 0x5f8: 0xd3, 0x5f9: 0xd3, 0x5fa: 0xd3, 0x5fb: 0xd3, 0x5fc: 0xd3, 0x5fd: 0xd3, 0x5fe: 0xd3, 0x5ff: 0xd3, + 0x5c0: 0xe6, 0x5c1: 0x02, 0x5c2: 0xe7, 0x5c3: 0xe7, 0x5c4: 0x02, 0x5c5: 0x02, 0x5c6: 0x02, 0x5c7: 0xe8, + 0x5c8: 0xe7, 0x5c9: 0xe7, 0x5ca: 0xe7, 0x5cb: 0xe7, 0x5cc: 0xe7, 0x5cd: 0xe7, 0x5ce: 0xe7, 0x5cf: 0xe7, + 0x5d0: 0xe7, 0x5d1: 0xe7, 0x5d2: 0xe7, 0x5d3: 0xe7, 0x5d4: 0xe7, 0x5d5: 0xe7, 0x5d6: 0xe7, 0x5d7: 0xe7, + 0x5d8: 0xe7, 0x5d9: 0xe7, 0x5da: 0xe7, 0x5db: 0xe7, 0x5dc: 0xe7, 0x5dd: 0xe7, 0x5de: 0xe7, 0x5df: 0xe7, + 0x5e0: 0xe7, 0x5e1: 0xe7, 0x5e2: 0xe7, 0x5e3: 0xe7, 0x5e4: 0xe7, 0x5e5: 0xe7, 0x5e6: 0xe7, 0x5e7: 0xe7, + 0x5e8: 0xe7, 0x5e9: 0xe7, 0x5ea: 0xe7, 0x5eb: 0xe7, 0x5ec: 0xe7, 0x5ed: 0xe7, 0x5ee: 0xe7, 0x5ef: 0xe7, + 0x5f0: 0xe7, 0x5f1: 0xe7, 0x5f2: 0xe7, 0x5f3: 0xe7, 0x5f4: 0xe7, 0x5f5: 0xe7, 0x5f6: 0xe7, 0x5f7: 0xe7, + 0x5f8: 0xe7, 0x5f9: 0xe7, 0x5fa: 0xe7, 0x5fb: 0xe7, 0x5fc: 0xe7, 0x5fd: 0xe7, 0x5fe: 0xe7, 0x5ff: 0xe7, // Block 0x18, offset 0x600 0x620: 0x15, } diff --git a/vendor/github.com/clipperhouse/uax29/v2/internal/iterators/iterator.go b/vendor/github.com/clipperhouse/uax29/v2/internal/iterators/iterator.go deleted file mode 100644 index e213486380..0000000000 --- a/vendor/github.com/clipperhouse/uax29/v2/internal/iterators/iterator.go +++ /dev/null @@ -1,100 +0,0 @@ -package iterators - -import "github.com/clipperhouse/stringish" - -type SplitFunc[T stringish.Interface] func(T, bool) (int, T, error) - -// Iterator is a generic iterator for words that are either []byte or string. -// Iterate while Next() is true, and access the word via Value(). -type Iterator[T stringish.Interface] struct { - split SplitFunc[T] - data T - start int - pos int -} - -// New creates a new Iterator for the given data and SplitFunc. -func New[T stringish.Interface](split SplitFunc[T], data T) *Iterator[T] { - return &Iterator[T]{ - split: split, - data: data, - } -} - -// SetText sets the text for the iterator to operate on, and resets all state. -func (iter *Iterator[T]) SetText(data T) { - iter.data = data - iter.start = 0 - iter.pos = 0 -} - -// Split sets the SplitFunc for the Iterator. -func (iter *Iterator[T]) Split(split SplitFunc[T]) { - iter.split = split -} - -// Next advances the iterator to the next token. It returns false when there -// are no remaining tokens or an error occurred. -func (iter *Iterator[T]) Next() bool { - if iter.pos == len(iter.data) { - return false - } - if iter.pos > len(iter.data) { - panic("SplitFunc advanced beyond the end of the data") - } - - iter.start = iter.pos - - advance, _, err := iter.split(iter.data[iter.pos:], true) - if err != nil { - panic(err) - } - if advance <= 0 { - panic("SplitFunc returned a zero or negative advance") - } - - iter.pos += advance - if iter.pos > len(iter.data) { - panic("SplitFunc advanced beyond the end of the data") - } - - return true -} - -// Value returns the current token. -func (iter *Iterator[T]) Value() T { - return iter.data[iter.start:iter.pos] -} - -// Start returns the byte position of the current token in the original data. -func (iter *Iterator[T]) Start() int { - return iter.start -} - -// End returns the byte position after the current token in the original data. -func (iter *Iterator[T]) End() int { - return iter.pos -} - -// Reset resets the iterator to the beginning of the data. -func (iter *Iterator[T]) Reset() { - iter.start = 0 - iter.pos = 0 -} - -func (iter *Iterator[T]) First() T { - if len(iter.data) == 0 { - return iter.data - } - advance, _, err := iter.split(iter.data, true) - if err != nil { - panic(err) - } - if advance <= 0 { - panic("SplitFunc returned a zero or negative advance") - } - if advance > len(iter.data) { - panic("SplitFunc advanced beyond the end of the data") - } - return iter.data[:advance] -} diff --git a/vendor/github.com/cockroachdb/apd/v3/bigint.go b/vendor/github.com/cockroachdb/apd/v3/bigint.go index 6271a40418..3e2cd407d5 100644 --- a/vendor/github.com/cockroachdb/apd/v3/bigint.go +++ b/vendor/github.com/cockroachdb/apd/v3/bigint.go @@ -325,7 +325,7 @@ func addInline(xVal, yVal uint64, xNeg, yNeg bool) (zVal uint64, zNeg, ok bool) //gcassert:inline func mulInline(xVal, yVal uint64, xNeg, yNeg bool) (zVal uint64, zNeg, ok bool) { hi, lo := bits.Mul64(xVal, yVal) - neg := xNeg != yNeg + neg := xNeg != yNeg && lo != 0 overflow := hi != 0 return lo, neg, !overflow } @@ -336,7 +336,7 @@ func quoInline(xVal, yVal uint64, xNeg, yNeg bool) (quoVal uint64, quoNeg, ok bo return 0, false, false } quo := xVal / yVal - neg := xNeg != yNeg + neg := xNeg != yNeg && quo != 0 return quo, neg, true } @@ -346,7 +346,7 @@ func remInline(xVal, yVal uint64, xNeg, yNeg bool) (remVal uint64, remNeg, ok bo return 0, false, false } rem := xVal % yVal - return rem, xNeg, true + return rem, xNeg && rem != 0, true } /////////////////////////////////////////////////////////////////////////////// diff --git a/vendor/github.com/cockroachdb/apd/v3/decimal.go b/vendor/github.com/cockroachdb/apd/v3/decimal.go index 6244825568..775c417348 100644 --- a/vendor/github.com/cockroachdb/apd/v3/decimal.go +++ b/vendor/github.com/cockroachdb/apd/v3/decimal.go @@ -156,6 +156,11 @@ func (d *Decimal) setString(c *Context, s string) (Condition, error) { exps = append(exps, -exp) s = s[:i] + s[i+1:] } + for _, ch := range s { + if ch < '0' || ch > '9' { + return 0, fmt.Errorf("parse mantissa: %s", s) + } + } if _, ok := d.Coeff.SetString(s, 10); !ok { return 0, fmt.Errorf("parse mantissa: %s", s) } diff --git a/vendor/github.com/cockroachdb/apd/v3/decomposer.go b/vendor/github.com/cockroachdb/apd/v3/decomposer.go index aa67961ba5..ca8724aa5f 100644 --- a/vendor/github.com/cockroachdb/apd/v3/decomposer.go +++ b/vendor/github.com/cockroachdb/apd/v3/decomposer.go @@ -34,9 +34,11 @@ import "fmt" // Implementations must return an error if a NaN or Infinity is attempted to be set while neither // are supported. type decomposer interface { - // Decompose returns the internal decimal state into parts. - // If the provided buf has sufficient capacity, buf may be returned as the coefficient with - // the value set and length set as appropriate. + // Decompose returns the internal decimal state into parts. If the provided buf + // has sufficient capacity, buf may be returned as the coefficient with the + // value set and length set as appropriate. Note that it does not act like + // Append-like functions and does not fill necessarily from the beginning of the + // buffer. Decompose(buf []byte) (form byte, negative bool, coefficient []byte, exponent int32) // Compose sets the internal decimal value from parts. If the value cannot be @@ -48,9 +50,11 @@ type decomposer interface { var _ decomposer = &Decimal{} -// Decompose returns the internal decimal state into parts. -// If the provided buf has sufficient capacity, buf may be returned as the coefficient with -// the value set and length set as appropriate. +// Decompose returns the internal decimal state into parts. If the provided buf +// has sufficient capacity, buf may be returned as the coefficient with the +// value set and length set as appropriate. Note that it does not act like +// Append-like functions and does not fill necessarily from the beginning of the +// buffer. func (d *Decimal) Decompose(buf []byte) (form byte, negative bool, coefficient []byte, exponent int32) { switch d.Form { default: @@ -69,7 +73,19 @@ func (d *Decimal) Decompose(buf []byte) (form byte, negative bool, coefficient [ // Finite form. negative = d.Negative exponent = d.Exponent - coefficient = d.Coeff.Bytes() + + sizeInBytes := (d.Coeff.BitLen() + 8 - 1) / 8 // math.Ceil(d.Coeff.BitLen()/8.0) + if cap(buf) >= sizeInBytes { + // It extends the buffer as the filling of bytes expects an already + // allocated slice. + buf = buf[:sizeInBytes] + + // We can fit the coefficient in the given buffer which prevents an + // allocation. + coefficient = d.Coeff.FillBytes(buf) + } else { + coefficient = d.Coeff.Bytes() + } return } diff --git a/vendor/github.com/cockroachdb/apd/v3/table.go b/vendor/github.com/cockroachdb/apd/v3/table.go index d23dbd716d..9a4875021e 100644 --- a/vendor/github.com/cockroachdb/apd/v3/table.go +++ b/vendor/github.com/cockroachdb/apd/v3/table.go @@ -52,6 +52,7 @@ func init() { } // NumDigits returns the number of decimal digits of d.Coeff. +// //gcassert:inline func (d *Decimal) NumDigits() int64 { return NumDigits(&d.Coeff) @@ -94,7 +95,7 @@ func NumDigits(b *BigInt) int64 { var a *BigInt if b.Sign() < 0 { var tmpA BigInt - a := &tmpA + a = &tmpA a.Abs(b) } else { a = b diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index ff91a37add..693730420c 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -237,7 +237,7 @@ func (r *Reader) initFields() error { if ent.Gname != "" { gname[ent.GID] = ent.Gname } else { - ent.Gname = uname[ent.GID] + ent.Gname = gname[ent.GID] } ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339) diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go index 22ce8f1df1..e966c156d4 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go @@ -95,7 +95,7 @@ type Conn struct { sigobj dbus.BusObject jobListener struct { - jobs map[dbus.ObjectPath]chan<- string + jobs map[dbus.ObjectPath][]chan<- string sync.Mutex } subStateSubscriber struct { @@ -207,7 +207,7 @@ func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { } c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64) - c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) + c.jobListener.jobs = make(map[dbus.ObjectPath][]chan<- string) // Setup the listeners on jobs so that we can get completions c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go index a64f0b3eae..490248b861 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go @@ -24,7 +24,7 @@ import ( "github.com/godbus/dbus/v5" ) -// Who specifies which process to send a signal to via the [KillUnitWithTarget]. +// Who specifies which process to send a signal to via the [Conn.KillUnitWithTarget]. type Who string const ( @@ -44,11 +44,10 @@ func (c *Conn) jobComplete(signal *dbus.Signal) { _ = dbus.Store(signal.Body, &id, &job, &unit, &result) c.jobListener.Lock() - out, ok := c.jobListener.jobs[job] - if ok { + for _, out := range c.jobListener.jobs[job] { out <- result - delete(c.jobListener.jobs, job) } + delete(c.jobListener.jobs, job) c.jobListener.Unlock() } @@ -65,7 +64,7 @@ func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args } if ch != nil { - c.jobListener.jobs[p] = ch + c.jobListener.jobs[p] = append(c.jobListener.jobs[p], ch) } // ignore error since 0 is fine if conversion fails @@ -194,10 +193,16 @@ func (c *Conn) StartTransientUnit(name string, mode string, properties []Propert // unique. mode is the same as in StartUnitContext, properties contains properties // of the unit. func (c *Conn) StartTransientUnitContext(ctx context.Context, name string, mode string, properties []Property, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) + return c.StartTransientUnitAux(ctx, name, mode, properties, make([]PropertyCollection, 0), ch) } -// Deprecated: use [KillUnitWithTarget] instead. +// StartTransientUnitAux is the same as StartTransientUnitContext but allows passing +// auxiliary units in the aux parameter. +func (c *Conn) StartTransientUnitAux(ctx context.Context, name string, mode string, properties []Property, aux []PropertyCollection, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, aux) +} + +// Deprecated: use [Conn.KillUnitWithTarget] instead. func (c *Conn) KillUnit(name string, signal int32) { c.KillUnitContext(context.Background(), name, signal) } @@ -205,7 +210,7 @@ func (c *Conn) KillUnit(name string, signal int32) { // KillUnitContext takes the unit name and a UNIX signal number to send. // All of the unit's processes are killed. // -// Deprecated: use [KillUnitWithTarget] instead, with target argument set to [All]. +// Deprecated: use [Conn.KillUnitWithTarget] instead, with target argument set to [All]. func (c *Conn) KillUnitContext(ctx context.Context, name string, signal int32) { _ = c.KillUnitWithTarget(ctx, name, All, signal) } @@ -398,30 +403,33 @@ type UnitStatus struct { type storeFunc func(retvalues ...any) error -func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { - result := make([][]any, 0) - err := f(&result) - if err != nil { - return nil, err +// convertSlice converts a []any result into a slice of the target type T +// using dbus.Store to handle the type conversion. +func convertSlice[T any](result []any) ([]T, error) { + converted := make([]T, len(result)) + convertedInterface := make([]any, len(converted)) + for i := range converted { + convertedInterface[i] = &converted[i] } - resultInterface := make([]any, len(result)) - for i := range result { - resultInterface[i] = result[i] + err := dbus.Store(result, convertedInterface...) + if err != nil { + return nil, err } - status := make([]UnitStatus, len(result)) - statusInterface := make([]any, len(status)) - for i := range status { - statusInterface[i] = &status[i] - } + return converted, nil +} - err = dbus.Store(resultInterface, statusInterface...) +// storeSlice fetches D-Bus array results via the provided storeFunc +// and converts them into a slice of the target type T. +func storeSlice[T any](f storeFunc) ([]T, error) { + var result []any + err := f(&result) if err != nil { return nil, err } - return status, nil + return convertSlice[T](result) } // GetUnitByPID returns the unit object path of the unit a process ID @@ -458,7 +466,7 @@ func (c *Conn) ListUnits() ([]UnitStatus, error) { // Also note that a unit is only loaded if it is active and/or enabled. // Units that are both disabled and inactive will thus not be returned. func (c *Conn) ListUnitsContext(ctx context.Context) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnits", 0).Store) + return storeSlice[UnitStatus](c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnits", 0).Store) } // Deprecated: use ListUnitsFilteredContext instead. @@ -469,7 +477,7 @@ func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { // ListUnitsFilteredContext returns an array with units filtered by state. // It takes a list of units' statuses to filter. func (c *Conn) ListUnitsFilteredContext(ctx context.Context, states []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) + return storeSlice[UnitStatus](c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) } // Deprecated: use ListUnitsByPatternsContext instead. @@ -482,7 +490,7 @@ func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitSt // Note that units may be known by multiple names at the same time, // and hence there might be more unit names loaded than actual units behind them. func (c *Conn) ListUnitsByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) + return storeSlice[UnitStatus](c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) } // Deprecated: use ListUnitsByNamesContext instead. @@ -497,7 +505,7 @@ func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { // // Requires systemd v230 or higher. func (c *Conn) ListUnitsByNamesContext(ctx context.Context, units []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) + return storeSlice[UnitStatus](c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) } type UnitFile struct { @@ -505,32 +513,6 @@ type UnitFile struct { Type string } -func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { - result := make([][]any, 0) - err := f(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]any, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - files := make([]UnitFile, len(result)) - fileInterface := make([]any, len(files)) - for i := range files { - fileInterface[i] = &files[i] - } - - err = dbus.Store(resultInterface, fileInterface...) - if err != nil { - return nil, err - } - - return files, nil -} - // Deprecated: use ListUnitFilesContext instead. func (c *Conn) ListUnitFiles() ([]UnitFile, error) { return c.ListUnitFilesContext(context.Background()) @@ -538,7 +520,7 @@ func (c *Conn) ListUnitFiles() ([]UnitFile, error) { // ListUnitFilesContext returns an array of all available units on disk. func (c *Conn) ListUnitFilesContext(ctx context.Context) ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) + return storeSlice[UnitFile](c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) } // Deprecated: use ListUnitFilesByPatternsContext instead. @@ -548,7 +530,7 @@ func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]Un // ListUnitFilesByPatternsContext returns an array of all available units on disk matched the patterns. func (c *Conn) ListUnitFilesByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) + return storeSlice[UnitFile](c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) } type LinkUnitFileChange EnableUnitFileChange @@ -576,29 +558,7 @@ func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUn // or unlink), the file name of the symlink and the destination of the // symlink. func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { - result := make([][]any, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]any, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]LinkUnitFileChange, len(result)) - changesInterface := make([]any, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil + return storeSlice[LinkUnitFileChange](c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store) } // Deprecated: use EnableUnitFilesContext instead. @@ -624,25 +584,14 @@ func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, // symlink. func (c *Conn) EnableUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { var carries_install_info bool + var result []any - result := make([][]any, 0) err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) if err != nil { return false, nil, err } - resultInterface := make([]any, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]EnableUnitFileChange, len(result)) - changesInterface := make([]any, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) + changes, err := convertSlice[EnableUnitFileChange](result) if err != nil { return false, nil, err } @@ -674,29 +623,7 @@ func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFile // symlink or unlink), the file name of the symlink and the destination of the // symlink. func (c *Conn) DisableUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]DisableUnitFileChange, error) { - result := make([][]any, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]any, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]DisableUnitFileChange, len(result)) - changesInterface := make([]any, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil + return storeSlice[DisableUnitFileChange](c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store) } type DisableUnitFileChange struct { @@ -720,29 +647,7 @@ func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUn // runtime only (true, /run/systemd/..), or persistently (false, // /etc/systemd/..). func (c *Conn) MaskUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { - result := make([][]any, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]any, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]MaskUnitFileChange, len(result)) - changesInterface := make([]any, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil + return storeSlice[MaskUnitFileChange](c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store) } type MaskUnitFileChange struct { @@ -764,29 +669,7 @@ func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileCh // for runtime only (true, /run/systemd/..), or persistently (false, // /etc/systemd/..). func (c *Conn) UnmaskUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]UnmaskUnitFileChange, error) { - result := make([][]any, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]any, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]UnmaskUnitFileChange, len(result)) - changesInterface := make([]any, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil + return storeSlice[UnmaskUnitFileChange](c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store) } type UnmaskUnitFileChange struct { @@ -832,35 +715,11 @@ func (c *Conn) ListJobs() ([]JobStatus, error) { // ListJobsContext returns an array with all currently queued jobs. func (c *Conn) ListJobsContext(ctx context.Context) ([]JobStatus, error) { - return c.listJobsInternal(ctx) -} - -func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) { - result := make([][]any, 0) - if err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListJobs", 0).Store(&result); err != nil { - return nil, err - } - - resultInterface := make([]any, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - status := make([]JobStatus, len(result)) - statusInterface := make([]any, len(status)) - for i := range status { - statusInterface[i] = &status[i] - } - - if err := dbus.Store(resultInterface, statusInterface...); err != nil { - return nil, err - } - - return status, nil + return storeSlice[JobStatus](c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListJobs", 0).Store) } // FreezeUnit freezes the cgroup associated with the unit. -// Note that FreezeUnit and [ThawUnit] are only supported on systems running with cgroup v2. +// Note that FreezeUnit and [Conn.ThawUnit] are only supported on systems running with cgroup v2. func (c *Conn) FreezeUnit(ctx context.Context, unit string) error { return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.FreezeUnit", 0, unit).Store() } diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/set.go b/vendor/github.com/coreos/go-systemd/v22/dbus/set.go index 17c5d48565..c0b8fde1fc 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/set.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/set.go @@ -14,28 +14,43 @@ package dbus +import ( + "sync" +) + type set struct { data map[string]bool + mu sync.Mutex } func (s *set) Add(value string) { + s.mu.Lock() + defer s.mu.Unlock() s.data[value] = true } func (s *set) Remove(value string) { + s.mu.Lock() + defer s.mu.Unlock() delete(s.data, value) } func (s *set) Contains(value string) (exists bool) { + s.mu.Lock() + defer s.mu.Unlock() _, exists = s.data[value] return } func (s *set) Length() int { + s.mu.Lock() + defer s.mu.Unlock() return len(s.data) } func (s *set) Values() (values []string) { + s.mu.Lock() + defer s.mu.Unlock() for val := range s.data { values = append(values, val) } @@ -43,5 +58,5 @@ func (s *set) Values() (values []string) { } func newSet() *set { - return &set{make(map[string]bool)} + return &set{data: make(map[string]bool)} } diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go index f0f6aad9d1..fe06f2fceb 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go @@ -15,6 +15,7 @@ package dbus import ( + "context" "errors" "log" "time" @@ -94,16 +95,26 @@ func (c *Conn) dispatch() { }() } -// SubscribeUnits returns two unbuffered channels which will receive all changed units every -// interval. Deleted units are sent as nil. +// Deprecated: use SubscribeUnitsContext instead. func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { - return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) + return c.SubscribeUnitsContext(context.Background(), interval) +} + +// SubscribeUnitsContext returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnitsContext(ctx context.Context, interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustomContext(ctx, interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) } -// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// Deprecated: use SubscribeUnitsCustomContext instead. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustomContext(context.Background(), interval, buffer, isChanged, filterUnit) +} + +// SubscribeUnitsCustomContext is like [Conn.SubscribeUnitsContext] but lets you specify the buffer // size of the channels, the comparison function for detecting changes and a filter // function for cutting down on the noise that your channel receives. -func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { +func (c *Conn) SubscribeUnitsCustomContext(ctx context.Context, interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { old := make(map[string]*UnitStatus) statusChan := make(chan map[string]*UnitStatus, buffer) errChan := make(chan error, buffer) @@ -112,7 +123,7 @@ func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChange for { timerChan := time.After(interval) - units, err := c.ListUnits() + units, err := c.ListUnitsContext(ctx) if err == nil { cur := make(map[string]*UnitStatus) for i := range units { @@ -145,7 +156,14 @@ func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChange errChan <- err } - <-timerChan + select { + case <-timerChan: + continue + case <-ctx.Done(): + close(statusChan) + close(errChan) + return + } } }() diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go index dbe4aa887b..173ca37287 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go @@ -15,6 +15,7 @@ package dbus import ( + "context" "time" ) @@ -29,16 +30,21 @@ func (s *SubscriptionSet) filter(unit string) bool { return !s.Contains(unit) } -// Subscribe starts listening for dbus events for all of the units in the set. +// SubscribeContext starts listening for dbus events for all of the units in the set. // Returns channels identical to conn.SubscribeUnits. -func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { +func (s *SubscriptionSet) SubscribeContext(ctx context.Context) (<-chan map[string]*UnitStatus, <-chan error) { // TODO: Make fully evented by using systemd 209 with properties changed values - return s.conn.SubscribeUnitsCustom(time.Second, 0, + return s.conn.SubscribeUnitsCustomContext(ctx, time.Second, 0, mismatchUnitStatus, func(unit string) bool { return s.filter(unit) }, ) } +// Deprecated: use SubscribeContext instead. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + return s.SubscribeContext(context.Background()) +} + // NewSubscriptionSet returns a new subscription set. func (c *Conn) NewSubscriptionSet() *SubscriptionSet { return &SubscriptionSet{newSet(), c} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go index a3a45af317..1d22f0a891 100644 --- a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go @@ -1,4 +1,4 @@ -// Copyright 2020-2022 The Decred developers +// Copyright 2020-2026 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -63,8 +63,8 @@ type KoblitzCurve struct { // bigAffineToJacobian takes an affine point (x, y) as big integers and converts // it to Jacobian point with Z=1. func bigAffineToJacobian(x, y *big.Int, result *JacobianPoint) { - result.X.SetByteSlice(x.Bytes()) - result.Y.SetByteSlice(y.Bytes()) + result.X.SetByteSlice(new(big.Int).Mod(x, curveParams.P).Bytes()) + result.Y.SetByteSlice(new(big.Int).Mod(y, curveParams.P).Bytes()) result.Z.SetInt(1) } @@ -91,6 +91,15 @@ func (curve *KoblitzCurve) Params() *elliptic.CurveParams { // // This is part of the elliptic.Curve interface implementation. This function // differs from the crypto/elliptic algorithm since a = 0 not -3. +// +// NOTE: Unfortunately, the Go stdlib elliptic.Curve interface requires that the +// conventional point at infinity (0, 0) is not considered on the curve which is +// contrary to what is typically expected since the point at infinity is in fact +// is a valid curve point. +// +// Deprecated: The standard library elliptic.Curve interface is now deprecated +// and callers should interact with the safer, and much faster, specialized +// methods instead. func (curve *KoblitzCurve) IsOnCurve(x, y *big.Int) bool { // Convert big ints to a Jacobian point for faster arithmetic. var point JacobianPoint @@ -101,6 +110,14 @@ func (curve *KoblitzCurve) IsOnCurve(x, y *big.Int) bool { // Add returns the sum of (x1,y1) and (x2,y2). // // This is part of the elliptic.Curve interface implementation. +// +// NOTE: Per the documentation of the elliptic.Curve interface, the behavior +// when the input is not a point on the curve is undefined. Callers must ensure +// they are calling this method with valid points. +// +// Deprecated: The standard library elliptic.Curve interface is now deprecated +// and callers should interact with the safer, and much faster, specialized +// methods instead. func (curve *KoblitzCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { // The point at infinity is the identity according to the group law for // elliptic curve cryptography. Thus, ∞ + P = P and P + ∞ = P. @@ -124,6 +141,14 @@ func (curve *KoblitzCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { // Double returns 2*(x1,y1). // // This is part of the elliptic.Curve interface implementation. +// +// NOTE: Per the documentation of the elliptic.Curve interface, the behavior +// when the input is not a point on the curve is undefined. Callers must ensure +// they are calling this method with valid points. +// +// Deprecated: The standard library elliptic.Curve interface is now deprecated +// and callers should interact with the safer, and much faster, specialized +// methods instead. func (curve *KoblitzCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { if y1.Sign() == 0 { return new(big.Int), new(big.Int) @@ -156,6 +181,14 @@ func moduloReduce(k []byte) []byte { // ScalarMult returns k*(bx, by) where k is a big endian integer. // // This is part of the elliptic.Curve interface implementation. +// +// NOTE: Per the documentation of the elliptic.Curve interface, the behavior +// when the input is not a point on the curve is undefined. Callers must ensure +// they are calling this method with valid points. +// +// Deprecated: The standard library elliptic.Curve interface is now deprecated +// and callers should interact with the safer, and much faster, specialized +// methods instead. func (curve *KoblitzCurve) ScalarMult(bx, by *big.Int, k []byte) (*big.Int, *big.Int) { // Convert the affine coordinates from big integers to Jacobian points, // do the multiplication in Jacobian projective space, and convert the @@ -172,6 +205,10 @@ func (curve *KoblitzCurve) ScalarMult(bx, by *big.Int, k []byte) (*big.Int, *big // big endian integer. // // This is part of the elliptic.Curve interface implementation. +// +// Deprecated: The standard library elliptic.Curve interface is now deprecated +// and callers should interact with the safer, and much faster, specialized +// methods instead. func (curve *KoblitzCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { // Perform the multiplication and convert the Jacobian point back to affine // big.Ints. @@ -250,6 +287,10 @@ var secp256k1 = &KoblitzCurve{ } // S256 returns an elliptic.Curve which implements secp256k1. +// +// Deprecated: The standard library elliptic.Curve interface is now deprecated +// and callers should interact with the safer, and much faster, specialized +// methods instead. func S256() *KoblitzCurve { return secp256k1 } diff --git a/vendor/github.com/digitorus/pkcs7/.gitignore b/vendor/github.com/digitorus/pkcs7/.gitignore index daf913b1b3..2910bc0061 100644 --- a/vendor/github.com/digitorus/pkcs7/.gitignore +++ b/vendor/github.com/digitorus/pkcs7/.gitignore @@ -22,3 +22,6 @@ _testmain.go *.exe *.test *.prof + +# Coverage reports +coverage.out diff --git a/vendor/github.com/digitorus/pkcs7/.golangci.yml b/vendor/github.com/digitorus/pkcs7/.golangci.yml new file mode 100644 index 0000000000..78f26752df --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/.golangci.yml @@ -0,0 +1,66 @@ +version: "2" +linters: + enable: + - copyloopvar + - goconst + - gocyclo + - gosec + - misspell + - nolintlint + - prealloc + - revive + - unconvert + - unparam + settings: + gocyclo: + min-complexity: 90 # Allow very high complexity for crypto functions and test utilities + gosec: + excludes: + - G401 # Allow weak crypto algorithms as this is a crypto library + - G501 # Allow import of blacklisted crypto/md5 + - G505 # Allow import of blacklisted crypto/sha1 + - G502 # Allow import of blacklisted crypto/des + - G405 # Allow use of weak cryptographic primitive + - G306 # Allow WriteFile permissions for test files + - G204 # Allow subprocess launches in tests (OpenSSL integration) + - G115 # Allow integer overflow conversion + revive: + rules: + - name: exported + disabled: true # Disable exported rule for crypto library + staticcheck: + checks: + - all + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + # Allow specific deprecated crypto algorithms that are needed for PKCS#7 compatibility + - linters: + - staticcheck + text: SA1019.*crypto/dsa.*has been deprecated + - linters: + - staticcheck + text: SA1019.*crypto/sha1.*is deprecated + - linters: + - staticcheck + text: SA1019.*crypto/md5.*is deprecated + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - gofumpt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/digitorus/pkcs7/Makefile b/vendor/github.com/digitorus/pkcs7/Makefile index 07c78e14c0..f8959faf0f 100644 --- a/vendor/github.com/digitorus/pkcs7/Makefile +++ b/vendor/github.com/digitorus/pkcs7/Makefile @@ -1,7 +1,10 @@ -all: vet staticcheck test +all: vet test test: - GODEBUG=x509sha1=1 go test -covermode=count -coverprofile=coverage.out . + go test -covermode=count -coverprofile=coverage.out . + +test-legacy: + GODEBUG=x509sha1=1 go test -tags=legacy -covermode=count -coverprofile=coverage.out . showcoverage: test go tool cover -html=coverage.out @@ -9,12 +12,8 @@ showcoverage: test vet: go vet . -lint: - golint . - -staticcheck: - staticcheck . +golangci-lint: + golangci-lint run gettools: - go get -u honnef.co/go/tools/... - go get -u golang.org/x/lint/golint + go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest diff --git a/vendor/github.com/digitorus/pkcs7/ber.go b/vendor/github.com/digitorus/pkcs7/ber.go index 31963b119f..3fd5a1b8cd 100644 --- a/vendor/github.com/digitorus/pkcs7/ber.go +++ b/vendor/github.com/digitorus/pkcs7/ber.go @@ -15,7 +15,7 @@ type asn1Structured struct { } func (s asn1Structured) EncodeTo(out *bytes.Buffer) error { - //fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes) + // fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes) inner := new(bytes.Buffer) for _, obj := range s.content { err := obj.EncodeTo(inner) @@ -24,7 +24,7 @@ func (s asn1Structured) EncodeTo(out *bytes.Buffer) error { } } out.Write(s.tagBytes) - encodeLength(out, inner.Len()) + _ = encodeLength(out, inner.Len()) out.Write(inner.Bytes()) return nil } @@ -43,8 +43,8 @@ func (p asn1Primitive) EncodeTo(out *bytes.Buffer) error { if err = encodeLength(out, p.length); err != nil { return err } - //fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length) - //fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content)) + // fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length) + // fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content)) out.Write(p.content) return nil @@ -54,14 +54,14 @@ func ber2der(ber []byte) ([]byte, error) { if len(ber) == 0 { return nil, errors.New("ber2der: input ber is empty") } - //fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber)) + // fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber)) out := new(bytes.Buffer) obj, _, err := readObject(ber, 0) if err != nil { return nil, err } - obj.EncodeTo(out) + _ = obj.EncodeTo(out) return out.Bytes(), nil } @@ -98,12 +98,12 @@ func lengthLength(i int) (numBytes int) { // added to 0x80. The length is encoded in big endian encoding follow after // // Examples: -// length | byte 1 | bytes n -// 0 | 0x00 | - -// 120 | 0x78 | - -// 200 | 0x81 | 0xC8 -// 500 | 0x82 | 0x01 0xF4 // +// length | byte 1 | bytes n +// 0 | 0x00 | - +// 120 | 0x78 | - +// 200 | 0x81 | 0xC8 +// 500 | 0x82 | 0x01 0xF4 func encodeLength(out *bytes.Buffer, length int) (err error) { if length >= 128 { l := lengthLength(length) @@ -146,7 +146,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { } } // jvehent 20170227: this doesn't appear to be used anywhere... - //tag = tag*128 + ber[offset] - 0x80 + // tag = tag*128 + ber[offset] - 0x80 offset++ if offset >= berLen { return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") @@ -179,17 +179,17 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { if numberOfBytes == 4 && (int)(ber[offset]) > 0x7F { return nil, 0, errors.New("ber2der: BER tag length is negative") } - if offset + numberOfBytes > berLen { + if offset+numberOfBytes > berLen { // == condition is not checked here, this allows for a more descreptive error when the parsed length is // compared with the remaining available bytes (`contentEnd > berLen`) return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") } - if (int)(ber[offset]) == 0x0 && (numberOfBytes == 1 || ber[offset+1] <= 0x7F) { + if (int)(ber[offset]) == 0x0 && (numberOfBytes == 1 || ber[offset+1] <= 0x7F) { // `numberOfBytes == 1` is an important conditional to avoid a potential out of bounds panic with `ber[offset+1]` return nil, 0, errors.New("ber2der: BER tag length has leading zero") } debugprint("--> (compute length) indicator byte: %x\n", l) - //debugprint("--> (compute length) length bytes: %x\n", ber[offset:offset+numberOfBytes]) + // debugprint("--> (compute length) length bytes: %x\n", ber[offset:offset+numberOfBytes]) for i := 0; i < numberOfBytes; i++ { length = length*256 + (int)(ber[offset]) offset++ @@ -202,14 +202,14 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { if length < 0 { return nil, 0, errors.New("ber2der: invalid negative value found in BER tag length") } - //fmt.Printf("--> length : %d\n", length) + // fmt.Printf("--> length : %d\n", length) contentEnd := offset + length if contentEnd > berLen { return nil, 0, errors.New("ber2der: BER tag length is more than available data") } debugprint("--> content start : %d\n", offset) debugprint("--> content end : %d\n", contentEnd) - //debugprint("--> content : %x\n", ber[offset:contentEnd]) + // debugprint("--> content : %x\n", ber[offset:contentEnd]) var obj asn1Object if indefinite && kind == 0 { return nil, 0, errors.New("ber2der: Indefinite form tag must have constructed encoding") @@ -257,7 +257,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { } func isIndefiniteTermination(ber []byte, offset int) (bool, error) { - if len(ber) - offset < 2 { + if len(ber)-offset < 2 { return false, errors.New("ber2der: Invalid BER format") } @@ -265,5 +265,5 @@ func isIndefiniteTermination(ber []byte, offset int) (bool, error) { } func debugprint(format string, a ...interface{}) { - //fmt.Printf(format, a) + // fmt.Printf(format, a) } diff --git a/vendor/github.com/digitorus/pkcs7/encrypt.go b/vendor/github.com/digitorus/pkcs7/encrypt.go index 6b2655708c..883fd81aac 100644 --- a/vendor/github.com/digitorus/pkcs7/encrypt.go +++ b/vendor/github.com/digitorus/pkcs7/encrypt.go @@ -256,7 +256,7 @@ func encryptAESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, e // value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the // value before calling Encrypt(). For example: // -// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM +// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM // // TODO(fullsailor): Add support for encrypting content with other algorithms func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { @@ -292,10 +292,7 @@ func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { if err != nil { return nil, err } - ias, err := cert2issuerAndSerial(recipient) - if err != nil { - return nil, err - } + ias := cert2issuerAndSerial(recipient) info := recipientInfo{ Version: 0, IssuerAndSerialNumber: ias, diff --git a/vendor/github.com/digitorus/pkcs7/sign.go b/vendor/github.com/digitorus/pkcs7/sign.go index 6cfd2ab9c2..e5601f9903 100644 --- a/vendor/github.com/digitorus/pkcs7/sign.go +++ b/vendor/github.com/digitorus/pkcs7/sign.go @@ -84,7 +84,9 @@ func marshalAttributes(attrs []attribute) ([]byte, error) { // Remove the leading sequence octets var raw asn1.RawValue - asn1.Unmarshal(encodedAttributes, &raw) + if _, err := asn1.Unmarshal(encodedAttributes, &raw); err != nil { + return nil, err + } return raw.Bytes, nil } @@ -359,14 +361,14 @@ func verifyPartialChain(cert *x509.Certificate, parents []*x509.Certificate) err return verifyPartialChain(parents[0], parents[1:]) } -func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) { +func cert2issuerAndSerial(cert *x509.Certificate) issuerAndSerial { var ias issuerAndSerial // The issuer RDNSequence has to match exactly the sequence in the certificate // We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence ias.IssuerName = asn1.RawValue{FullBytes: cert.RawIssuer} ias.SerialNumber = cert.SerialNumber - return ias, nil + return ias } // signs the DER encoded form of the attributes with the private key @@ -422,7 +424,7 @@ func marshalCertificates(certs []*x509.Certificate) rawCertificates { // RawContent, we have to encode it into the RawContent. If its missing, // then `asn1.Marshal()` will strip out the certificate wrapper instead. func marshalCertificateBytes(certs []byte) (rawCertificates, error) { - var val = asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true} + val := asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true} b, err := asn1.Marshal(val) if err != nil { return rawCertificates{}, err diff --git a/vendor/github.com/digitorus/pkcs7/verify.go b/vendor/github.com/digitorus/pkcs7/verify.go index d0e4f0429d..f9a9daf798 100644 --- a/vendor/github.com/digitorus/pkcs7/verify.go +++ b/vendor/github.com/digitorus/pkcs7/verify.go @@ -26,12 +26,12 @@ func (p7 *PKCS7) Verify() (err error) { // otherwise. func (p7 *PKCS7) VerifyWithChain(truststore *x509.CertPool) (err error) { intermediates := x509.NewCertPool() - for _, cert := range(p7.Certificates) { + for _, cert := range p7.Certificates { intermediates.AddCert(cert) } opts := x509.VerifyOptions{ - Roots: truststore, + Roots: truststore, Intermediates: intermediates, } @@ -46,14 +46,14 @@ func (p7 *PKCS7) VerifyWithChain(truststore *x509.CertPool) (err error) { // attribute. func (p7 *PKCS7) VerifyWithChainAtTime(truststore *x509.CertPool, currentTime time.Time) (err error) { intermediates := x509.NewCertPool() - for _, cert := range(p7.Certificates) { + for _, cert := range p7.Certificates { intermediates.AddCert(cert) } opts := x509.VerifyOptions{ - Roots: truststore, + Roots: truststore, Intermediates: intermediates, - CurrentTime: currentTime, + CurrentTime: currentTime, } return p7.VerifyWithOpts(opts) @@ -62,7 +62,7 @@ func (p7 *PKCS7) VerifyWithChainAtTime(truststore *x509.CertPool, currentTime ti // VerifyWithOpts checks the signatures of a PKCS7 object. // // It accepts x509.VerifyOptions as a parameter. -// This struct contains a root certificate pool, an intermedate certificate pool, +// This struct contains a root certificate pool, an intermediate certificate pool, // an optional list of EKUs, and an optional time that certificates should be // checked as being valid during. @@ -239,7 +239,9 @@ func (p7 *PKCS7) UnmarshalSignedAttribute(attributeType asn1.ObjectIdentifier, o func parseSignedData(data []byte) (*PKCS7, error) { var sd signedData - asn1.Unmarshal(data, &sd) + if _, err := asn1.Unmarshal(data, &sd); err != nil { + return nil, err + } certs, err := sd.Certificates.Parse() if err != nil { return nil, err @@ -273,7 +275,8 @@ func parseSignedData(data []byte) (*PKCS7, error) { Certificates: certs, CRLs: sd.CRLs, Signers: sd.SignerInfos, - raw: sd}, nil + raw: sd, + }, nil } // MessageDigestMismatchError is returned when the signer data digest does not @@ -317,15 +320,7 @@ func getSignatureAlgorithm(digestEncryption, digest pkix.AlgorithmIdentifier) (x } case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSA), digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSASHA1): - switch { - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): - return x509.DSAWithSHA1, nil - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): - return x509.DSAWithSHA256, nil - default: - return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", - digest.Algorithm.String(), digestEncryption.Algorithm.String()) - } + return -1, errors.New("pkcs7: DSA signature verification is not supported") case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP256), digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP384), digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP521): diff --git a/vendor/github.com/digitorus/pkcs7/verify_test_dsa.go b/vendor/github.com/digitorus/pkcs7/verify_test_dsa.go deleted file mode 100644 index 1eb05bc3ea..0000000000 --- a/vendor/github.com/digitorus/pkcs7/verify_test_dsa.go +++ /dev/null @@ -1,182 +0,0 @@ -// +build go1.11 go1.12 go1.13 go1.14 go1.15 - -package pkcs7 - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "os" - "os/exec" - "testing" -) - -func TestVerifyEC2(t *testing.T) { - fixture := UnmarshalDSATestFixture(EC2IdentityDocumentFixture) - p7, err := Parse(fixture.Input) - if err != nil { - t.Errorf("Parse encountered unexpected error: %v", err) - } - p7.Certificates = []*x509.Certificate{fixture.Certificate} - if err := p7.Verify(); err != nil { - t.Errorf("Verify failed with error: %v", err) - } -} - -var EC2IdentityDocumentFixture = ` ------BEGIN PKCS7----- -MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCA -JIAEggGmewogICJwcml2YXRlSXAiIDogIjE3Mi4zMC4wLjI1MiIsCiAgImRldnBh -eVByb2R1Y3RDb2RlcyIgOiBudWxsLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1 -cy1lYXN0LTFhIiwKICAidmVyc2lvbiIgOiAiMjAxMC0wOC0zMSIsCiAgImluc3Rh -bmNlSWQiIDogImktZjc5ZmU1NmMiLAogICJiaWxsaW5nUHJvZHVjdHMiIDogbnVs -bCwKICAiaW5zdGFuY2VUeXBlIiA6ICJ0Mi5taWNybyIsCiAgImFjY291bnRJZCIg -OiAiMTIxNjU5MDE0MzM0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLWZjZTNjNjk2IiwK -ICAicGVuZGluZ1RpbWUiIDogIjIwMTYtMDQtMDhUMDM6MDE6MzhaIiwKICAiYXJj -aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJy -YW1kaXNrSWQiIDogbnVsbCwKICAicmVnaW9uIiA6ICJ1cy1lYXN0LTEiCn0AAAAA -AAAxggEYMIIBFAIBATBpMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5n -dG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2Vi -IFNlcnZpY2VzIExMQwIJAJa6SNnlXhpnMAkGBSsOAwIaBQCgXTAYBgkqhkiG9w0B -CQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xNjA0MDgwMzAxNDRaMCMG -CSqGSIb3DQEJBDEWBBTuUc28eBXmImAautC+wOjqcFCBVjAJBgcqhkjOOAQDBC8w -LQIVAKA54NxGHWWCz5InboDmY/GHs33nAhQ6O/ZI86NwjA9Vz3RNMUJrUPU5tAAA -AAAAAA== ------END PKCS7----- ------BEGIN CERTIFICATE----- -MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw -FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD -VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z -ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u -IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl -cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e -ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3 -VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P -hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j -k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U -hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF -lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf -MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW -MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw -vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw -7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K ------END CERTIFICATE-----` - -func TestDSASignWithOpenSSLAndVerify(t *testing.T) { - content := []byte(` -A ship in port is safe, -but that's not what ships are built for. --- Grace Hopper`) - // write the content to a temp file - tmpContentFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_content") - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(tmpContentFile.Name(), content, 0755) - - // write the signer cert to a temp file - tmpSignerCertFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signer") - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(tmpSignerCertFile.Name(), dsaPublicCert, 0755) - - // write the signer key to a temp file - tmpSignerKeyFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_key") - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(tmpSignerKeyFile.Name(), dsaPrivateKey, 0755) - - tmpSignedFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signature") - if err != nil { - t.Fatal(err) - } - // call openssl to sign the content - opensslCMD := exec.Command("openssl", "smime", "-sign", "-nodetach", "-md", "sha1", - "-in", tmpContentFile.Name(), "-out", tmpSignedFile.Name(), - "-signer", tmpSignerCertFile.Name(), "-inkey", tmpSignerKeyFile.Name(), - "-certfile", tmpSignerCertFile.Name(), "-outform", "PEM") - out, err := opensslCMD.CombinedOutput() - if err != nil { - t.Fatalf("openssl command failed with %s: %s", err, out) - } - - // verify the signed content - pemSignature, err := ioutil.ReadFile(tmpSignedFile.Name()) - if err != nil { - t.Fatal(err) - } - fmt.Printf("%s\n", pemSignature) - derBlock, _ := pem.Decode(pemSignature) - if derBlock == nil { - t.Fatalf("failed to read DER block from signature PEM %s", tmpSignedFile.Name()) - } - p7, err := Parse(derBlock.Bytes) - if err != nil { - t.Fatalf("Parse encountered unexpected error: %v", err) - } - if err := p7.Verify(); err != nil { - t.Fatalf("Verify failed with error: %v", err) - } - os.Remove(tmpSignerCertFile.Name()) // clean up - os.Remove(tmpSignerKeyFile.Name()) // clean up - os.Remove(tmpContentFile.Name()) // clean up -} - -var dsaPrivateKey = []byte(`-----BEGIN PRIVATE KEY----- -MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9KnC7s5Of2EbdS -PO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00b/JmYLdrmVCl -pJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNaFpEy9nXzrith -1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA9+GghdabPd7L -vKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJFnEj6EwoFhO3 -zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7zKTxvqhRkImo -g9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoEFgIUfW4aPdQBn9gJZp2KuNpzgHzvfsE= ------END PRIVATE KEY-----`) - -var dsaPublicCert = []byte(`-----BEGIN CERTIFICATE----- -MIIDOjCCAvWgAwIBAgIEPCY/UDANBglghkgBZQMEAwIFADBsMRAwDgYDVQQGEwdV -bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD -VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRAwDgYDVQQDEwdVbmtub3du -MB4XDTE4MTAyMjEzNDMwN1oXDTQ2MDMwOTEzNDMwN1owbDEQMA4GA1UEBhMHVW5r -bm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UE -ChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMHVW5rbm93bjCC -AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD -Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gE -exAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii -Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4 -V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozI -puE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl -nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDCriMPbEVBoRK4SOUeFwg7+VRf4TTp -rcOQC9IVVoCjXzuWEGrp3ZI7YWJSpFnSch4lk29RH8O0HpI/NOzKnOBtnKr782pt -1k/bJVMH9EaLd6MKnAVjrCDMYBB0MhebZ8QHY2elZZCWoqDYAcIDOsEx+m4NLErT -ypPnjS5M0jm1PKMhMB8wHQYDVR0OBBYEFC0Yt5XdM0Kc95IX8NQ8XRssGPx7MA0G -CWCGSAFlAwQDAgUAAzAAMC0CFQCIgQtrZZ9hdZG1ROhR5hc8nYEmbgIUAIlgC688 -qzy/7yePTlhlpj+ahMM= ------END CERTIFICATE-----`) - -type DSATestFixture struct { - Input []byte - Certificate *x509.Certificate -} - -func UnmarshalDSATestFixture(testPEMBlock string) DSATestFixture { - var result DSATestFixture - var derBlock *pem.Block - var pemBlock = []byte(testPEMBlock) - for { - derBlock, pemBlock = pem.Decode(pemBlock) - if derBlock == nil { - break - } - switch derBlock.Type { - case "PKCS7": - result.Input = derBlock.Bytes - case "CERTIFICATE": - result.Certificate, _ = x509.ParseCertificate(derBlock.Bytes) - } - } - - return result -} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go index fab3ed4cba..246f23e983 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -1,3 +1,6 @@ +// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: +//go:build go1.24 + package configfile import ( @@ -6,6 +9,7 @@ import ( "errors" "fmt" "io" + "maps" "os" "path/filepath" "strings" @@ -374,9 +378,7 @@ func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) { auths := make(map[string]types.AuthConfig) addAll := func(from map[string]types.AuthConfig) { - for reg, ac := range from { - auths[reg] = ac - } + maps.Copy(auths, from) } defaultStore := configFile.GetCredentialsStore("") diff --git a/vendor/github.com/emicklei/proto/CHANGES.md b/vendor/github.com/emicklei/proto/CHANGES.md index fa8f7b7e98..4df55c5cbc 100644 --- a/vendor/github.com/emicklei/proto/CHANGES.md +++ b/vendor/github.com/emicklei/proto/CHANGES.md @@ -1,3 +1,8 @@ +## v1.14.3 (2026-02-04) + +- handle double slash in single quotes (#152) (thanks AriehSchneier) +- Handle comments at the end of an array (#151) (thanks AriehSchneier) + ## v1.14.2 (2025-06-18) - fix parsing options for extensions (ISSUE #150) diff --git a/vendor/github.com/emicklei/proto/literals.go b/vendor/github.com/emicklei/proto/literals.go index e07ca73ada..eba994a49a 100644 --- a/vendor/github.com/emicklei/proto/literals.go +++ b/vendor/github.com/emicklei/proto/literals.go @@ -103,7 +103,16 @@ func (l *Literal) parse(p *Parser) error { } else { l.Comment.Merge(nc) } - // continue with remaining entries + // peek at next token to see if it's structural (indicating end of current context) + // if so, don't recurse, just return with comment set + nextPos, nextTok, nextLit := p.next() + if nextTok == tRIGHTSQUARE || nextTok == tRIGHTCURLY || nextTok == tSEMICOLON || nextTok == tCOMMA { + // put it back and return - let the caller handle these structural tokens + p.nextPut(nextPos, nextTok, nextLit) + return nil + } + // put it back and continue with remaining entries (could be another comment or a literal) + p.nextPut(nextPos, nextTok, nextLit) return l.parse(p) } if tok == tLEFTSQUARE { @@ -125,6 +134,25 @@ func (l *Literal) parse(p *Parser) error { if err := e.parse(p); err != nil { return err } + // if this is a comment-only literal, don't add it to array + // but keep reading to attach comment to next literal + if e.Comment != nil && e.Source == "" && e.Array == nil && e.OrderedMap == nil { + // check what comes next + _, tok, lit := p.next() + if tok == tRIGHTSQUARE { + // array ends with comment, just break + break + } + if tok == tCOMMA { + // comma after comment, continue to next element + continue + } + // put back the token + p.nextPut(pos, tok, lit) + // continue loop - next iteration will parse the real literal + // and the comment will already be in 'e' from the recursive parse() call + continue + } array = append(array, e) _, tok, lit := p.next() if tok == tCOMMA { @@ -133,6 +161,11 @@ func (l *Literal) parse(p *Parser) error { if tok == tRIGHTSQUARE { break } + // handle comments inside arrays + if tok == tCOMMENT { + p.nextPut(pos, tok, lit) + continue + } return p.unexpected(lit, ", or ]", l) } l.Array = array diff --git a/vendor/github.com/emicklei/proto/parser.go b/vendor/github.com/emicklei/proto/parser.go index 41ed461da9..6312b79843 100644 --- a/vendor/github.com/emicklei/proto/parser.go +++ b/vendor/github.com/emicklei/proto/parser.go @@ -129,6 +129,12 @@ func (p *Parser) next() (pos scanner.Position, tok token, lit string) { // pre: first single quote has been read func (p *Parser) nextSingleQuotedString() (pos scanner.Position, tok token, lit string) { + // Save current scanner mode and temporarily disable comment scanning + // to prevent // inside single quotes from being treated as comments + savedMode := p.scanner.Mode + p.scanner.Mode = scanner.ScanIdents | scanner.ScanFloats | scanner.ScanStrings | scanner.ScanRawStrings + defer func() { p.scanner.Mode = savedMode }() + var ch rune p.ignoreErrorsWhile(func() { ch = p.scanner.Scan() }) if ch == scanner.EOF { diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go index ee39b408e9..d3906bfbd5 100644 --- a/vendor/github.com/fatih/color/color.go +++ b/vendor/github.com/fatih/color/color.go @@ -19,15 +19,15 @@ var ( // set (regardless of its value). This is a global option and affects all // colors. For more control over each color block use the methods // DisableColor() individually. - NoColor = noColorIsSet() || os.Getenv("TERM") == "dumb" || - (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + NoColor = noColorIsSet() || os.Getenv("TERM") == "dumb" || !stdoutIsTerminal() // Output defines the standard output of the print functions. By default, - // os.Stdout is used. - Output = colorable.NewColorableStdout() + // stdOut() is used. + Output = stdOut() - // Error defines a color supporting writer for os.Stderr. - Error = colorable.NewColorableStderr() + // Error defines the standard error of the print functions. By default, + // stdErr() is used. + Error = stdErr() // colorsCache is used to reduce the count of created Color objects and // allows to reuse already created objects with required Attribute. @@ -40,6 +40,33 @@ func noColorIsSet() bool { return os.Getenv("NO_COLOR") != "" } +// stdoutIsTerminal returns true if os.Stdout is a terminal. +// Returns false if os.Stdout is nil (e.g., when running as a Windows service). +func stdoutIsTerminal() bool { + if os.Stdout == nil { + return false + } + return isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) +} + +// stdOut returns a writer for color output. +// Returns io.Discard if os.Stdout is nil (e.g., when running as a Windows service). +func stdOut() io.Writer { + if os.Stdout == nil { + return io.Discard + } + return colorable.NewColorableStdout() +} + +// stdErr returns a writer for color error output. +// Returns io.Discard if os.Stderr is nil (e.g., when running as a Windows service). +func stdErr() io.Writer { + if os.Stderr == nil { + return io.Discard + } + return colorable.NewColorableStderr() +} + // Color defines a custom color object which is defined by SGR parameters. type Color struct { params []Attribute @@ -220,26 +247,30 @@ func (c *Color) unset() { // a low-level function, and users should use the higher-level functions, such // as color.Fprint, color.Print, etc. func (c *Color) SetWriter(w io.Writer) *Color { + _, _ = c.setWriter(w) + return c +} + +func (c *Color) setWriter(w io.Writer) (int, error) { if c.isNoColorSet() { - return c + return 0, nil } - fmt.Fprint(w, c.format()) - return c + return fmt.Fprint(w, c.format()) } // UnsetWriter resets all escape attributes and clears the output with the give // io.Writer. Usually should be called after SetWriter(). func (c *Color) UnsetWriter(w io.Writer) { - if c.isNoColorSet() { - return - } + _, _ = c.unsetWriter(w) +} - if NoColor { - return +func (c *Color) unsetWriter(w io.Writer) (int, error) { + if c.isNoColorSet() { + return 0, nil } - fmt.Fprintf(w, "%s[%dm", escape, Reset) + return fmt.Fprintf(w, "%s[%dm", escape, Reset) } // Add is used to chain SGR parameters. Use as many as parameters to combine @@ -255,10 +286,20 @@ func (c *Color) Add(value ...Attribute) *Color { // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - c.SetWriter(w) - defer c.UnsetWriter(w) + n, err = c.setWriter(w) + if err != nil { + return n, err + } + + nn, err := fmt.Fprint(w, a...) + n += nn + if err != nil { + return + } - return fmt.Fprint(w, a...) + nn, err = c.unsetWriter(w) + n += nn + return n, err } // Print formats using the default formats for its operands and writes to @@ -278,10 +319,20 @@ func (c *Color) Print(a ...interface{}) (n int, err error) { // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - c.SetWriter(w) - defer c.UnsetWriter(w) + n, err = c.setWriter(w) + if err != nil { + return n, err + } + + nn, err := fmt.Fprintf(w, format, a...) + n += nn + if err != nil { + return + } - return fmt.Fprintf(w, format, a...) + nn, err = c.unsetWriter(w) + n += nn + return n, err } // Printf formats according to a format specifier and writes to standard output. @@ -475,27 +526,24 @@ func (c *Color) Equals(c2 *Color) bool { if c == nil || c2 == nil { return false } + if len(c.params) != len(c2.params) { return false } + counts := make(map[Attribute]int, len(c.params)) for _, attr := range c.params { - if !c2.attrExists(attr) { - return false - } + counts[attr]++ } - return true -} - -func (c *Color) attrExists(a Attribute) bool { - for _, attr := range c.params { - if attr == a { - return true + for _, attr := range c2.params { + if counts[attr] == 0 { + return false } + counts[attr]-- } - return false + return true } func boolPtr(v bool) *bool { diff --git a/vendor/github.com/fatih/color/color_windows.go b/vendor/github.com/fatih/color/color_windows.go index be01c558e5..97e5a765a5 100644 --- a/vendor/github.com/fatih/color/color_windows.go +++ b/vendor/github.com/fatih/color/color_windows.go @@ -9,6 +9,9 @@ import ( func init() { // Opt-in for ansi color support for current process. // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences + if os.Stdout == nil { + return + } var outMode uint32 out := windows.Handle(os.Stdout.Fd()) if err := windows.GetConsoleMode(out, &outMode); err != nil { diff --git a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml index 38cb9ae101..08081fbde5 100644 --- a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml +++ b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml @@ -1,104 +1,116 @@ -# Do not delete linter settings. Linters like gocritic can be enabled on the command line. - -linters-settings: - depguard: - rules: - prevent_unmaintained_packages: - list-mode: strict - files: - - $all - - "!$test" - allow: - - $gostd - - github.com/x448/float16 - deny: - - pkg: io/ioutil - desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" - dupl: - threshold: 100 - funlen: - lines: 100 - statements: 50 - goconst: - ignore-tests: true - min-len: 2 - min-occurrences: 3 - gocritic: - enabled-tags: - - diagnostic - - experimental - - opinionated - - performance - - style - disabled-checks: - - commentedOutCode - - dupImport # https://github.com/go-critic/go-critic/issues/845 - - ifElseChain - - octalLiteral - - paramTypeCombine - - whyNoLint - gofmt: - simplify: false - goimports: - local-prefixes: github.com/fxamacker/cbor - golint: - min-confidence: 0 - govet: - check-shadowing: true - lll: - line-length: 140 - maligned: - suggest-new: true - misspell: - locale: US - staticcheck: - checks: ["all"] - +version: "2" linters: - disable-all: true + default: none enable: - asciicheck - bidichk - depguard - errcheck - - exportloopref + - forbidigo - goconst - gocritic - gocyclo - - gofmt - - goimports - goprintffuncname - gosec - - gosimple - govet - ineffassign - misspell - nilerr - revive - staticcheck - - stylecheck - - typecheck - unconvert - unused - + settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: strict + files: + - $all + - '!$test' + allow: + - $gostd + - github.com/x448/float16 + deny: + - pkg: io/ioutil + desc: 'replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil' + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + min-len: 2 + min-occurrences: 3 + gocritic: + disabled-checks: + - commentedOutCode + - dupImport + - ifElseChain + - octalLiteral + - paramTypeCombine + - whyNoLint + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + govet: + enable: + - shadow + lll: + line-length: 140 + misspell: + locale: US + staticcheck: + checks: + - all + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - path: decode.go + text: string ` overflows ` has (\d+) occurrences, make it a constant + - path: decode.go + text: string ` \(range is \[` has (\d+) occurrences, make it a constant + - path: decode.go + text: string `, ` has (\d+) occurrences, make it a constant + - path: decode.go + text: string ` overflows Go's int64` has (\d+) occurrences, make it a constant + - path: decode.go + text: string `\]\)` has (\d+) occurrences, make it a constant + - path: valid.go + text: string ` for type ` has (\d+) occurrences, make it a constant + - path: valid.go + text: 'string `cbor: ` has (\d+) occurrences, make it a constant' + - linters: + - goconst + path: (.+)_test\.go + paths: + - third_party$ + - builtin$ + - examples$ issues: - # max-issues-per-linter default is 50. Set to 0 to disable limit. max-issues-per-linter: 0 - # max-same-issues default is 3. Set to 0 to disable limit. max-same-issues: 0 - - exclude-rules: - - path: decode.go - text: "string ` overflows ` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string `, ` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string `\\]\\)` has (\\d+) occurrences, make it a constant" - - path: valid.go - text: "string ` for type ` has (\\d+) occurrences, make it a constant" - - path: valid.go - text: "string `cbor: ` has (\\d+) occurrences, make it a constant" +formatters: + enable: + - gofmt + - goimports + settings: + gofmt: + simplify: false + goimports: + local-prefixes: + - github.com/fxamacker/cbor + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md index d072b81c73..f9ae78ec9e 100644 --- a/vendor/github.com/fxamacker/cbor/v2/README.md +++ b/vendor/github.com/fxamacker/cbor/v2/README.md @@ -702,21 +702,20 @@ Default limits may need to be increased for systems handling very large data (e. ## Status -[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs. -- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string. -- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function. -- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR. +v2.9.1 (Mar 29-30, 2026) includes important bugfixes, defensive checks, improved code quality, and more tests. Although not public, the fuzzer was also improved by adding more fuzz tests. -v2.9.0 passed fuzz tests and is production quality. +v2.9.1 passed fuzz tests and is production quality. The minimum version of Go required to build: - v2.8.0 and newer releases require go 1.20+. - v2.7.1 and older releases require go 1.17+. -For more details, see [release notes](https://github.com/fxamacker/cbor/releases). +For more details, see [v2.9.1 release notes](https://github.com/fxamacker/cbor/releases). ### Prior Releases +[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs. It passed fuzz tests (billions of executions) and is production quality. + [v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality. [v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. diff --git a/vendor/github.com/fxamacker/cbor/v2/cache.go b/vendor/github.com/fxamacker/cbor/v2/cache.go index 5051f110fb..5743f3eb25 100644 --- a/vendor/github.com/fxamacker/cbor/v2/cache.go +++ b/vendor/github.com/fxamacker/cbor/v2/cache.go @@ -92,94 +92,126 @@ func newTypeInfo(t reflect.Type) *typeInfo { } type decodingStructType struct { - fields fields - fieldIndicesByName map[string]int - err error - toArray bool + fields decodingFields + fieldIndicesByName map[string]int // Only populated if toArray is false + fieldIndicesByIntKey map[int64]int // Only populated if toArray is false + err error + toArray bool } -// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead, -// here's a very basic implementation of an aggregated error. -type multierror []error - -func (m multierror) Error() string { - var sb strings.Builder - for i, err := range m { - sb.WriteString(err.Error()) - if i < len(m)-1 { - sb.WriteString(", ") - } - } - return sb.String() -} - -func getDecodingStructType(t reflect.Type) *decodingStructType { +func getDecodingStructType(t reflect.Type) (*decodingStructType, error) { if v, _ := decodingStructTypeCache.Load(t); v != nil { - return v.(*decodingStructType) + structType := v.(*decodingStructType) + if structType.err != nil { + return nil, structType.err + } + return structType, nil } flds, structOptions := getFields(t) toArray := hasToArrayOption(structOptions) - var errs []error - for i := 0; i < len(flds); i++ { - if flds[i].keyAsInt { - nameAsInt, numErr := strconv.Atoi(flds[i].name) - if numErr != nil { - errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")")) - break + if toArray { + return getDecodingStructToArrayType(t, flds) + } + + fieldIndicesByName := make(map[string]int, len(flds)) + var fieldIndicesByIntKey map[int64]int + + decFlds := make(decodingFields, len(flds)) + for i, f := range flds { + // nameAsInt is set in getFields() except for fields with an unparsable tagged name. + // Atoi() is called here to catch and save parsing errors. + if f.keyAsInt && f.nameAsInt == 0 { + if _, numErr := strconv.Atoi(f.name); numErr != nil { + structType := &decodingStructType{ + err: errors.New("cbor: failed to parse field name \"" + f.name + "\" to int (" + numErr.Error() + ")"), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err } - flds[i].nameAsInt = int64(nameAsInt) } - flds[i].typInfo = getTypeInfo(flds[i].typ) - } + if f.keyAsInt { + if fieldIndicesByIntKey == nil { + fieldIndicesByIntKey = make(map[int64]int, len(flds)) + } + // The duplication check is only a safeguard, since getFields() already deduplicates fields. + if _, ok := fieldIndicesByIntKey[f.nameAsInt]; ok { + structType := &decodingStructType{ + err: fmt.Errorf("cbor: two or more fields of %v have the same keyasint value %d", t, f.nameAsInt), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err + } + fieldIndicesByIntKey[f.nameAsInt] = i + } else { + // The duplication check is only a safeguard, since getFields() already deduplicates fields. + if _, ok := fieldIndicesByName[f.name]; ok { + structType := &decodingStructType{ + err: fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, f.name), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err + } + fieldIndicesByName[f.name] = i + } - fieldIndicesByName := make(map[string]int, len(flds)) - for i, fld := range flds { - if _, ok := fieldIndicesByName[fld.name]; ok { - errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name)) - continue + decFlds[i] = &decodingField{ + field: *f, + typInfo: getTypeInfo(f.typ), } - fieldIndicesByName[fld.name] = i } - var err error - { - var multi multierror - for _, each := range errs { - if each != nil { - multi = append(multi, each) + structType := &decodingStructType{ + fields: decFlds, + fieldIndicesByName: fieldIndicesByName, + fieldIndicesByIntKey: fieldIndicesByIntKey, + } + decodingStructTypeCache.Store(t, structType) + return structType, nil +} + +func getDecodingStructToArrayType(t reflect.Type, flds fields) (*decodingStructType, error) { + decFlds := make(decodingFields, len(flds)) + for i, f := range flds { + // nameAsInt is set in getFields() except for fields with an unparsable tagged name. + // Atoi() is called here to catch and save parsing errors. + if f.keyAsInt && f.nameAsInt == 0 { + if _, numErr := strconv.Atoi(f.name); numErr != nil { + structType := &decodingStructType{ + err: errors.New("cbor: failed to parse field name \"" + f.name + "\" to int (" + numErr.Error() + ")"), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err } } - if len(multi) == 1 { - err = multi[0] - } else if len(multi) > 1 { - err = multi + + decFlds[i] = &decodingField{ + field: *f, + typInfo: getTypeInfo(f.typ), } } structType := &decodingStructType{ - fields: flds, - fieldIndicesByName: fieldIndicesByName, - err: err, - toArray: toArray, + fields: decFlds, + toArray: true, } decodingStructTypeCache.Store(t, structType) - return structType + return structType, nil } type encodingStructType struct { - fields fields - bytewiseFields fields - lengthFirstFields fields - omitEmptyFieldsIdx []int + fields encodingFields + bytewiseFields encodingFields // Only populated if toArray is false + lengthFirstFields encodingFields // Only populated if toArray is false + omitEmptyFieldsIdx []int // Only populated if toArray is false err error toArray bool } -func (st *encodingStructType) getFields(em *encMode) fields { +func (st *encodingStructType) getFields(em *encMode) encodingFields { switch em.sort { case SortNone, SortFastShuffle: return st.fields @@ -191,7 +223,7 @@ func (st *encodingStructType) getFields(em *encMode) fields { } type bytewiseFieldSorter struct { - fields fields + fields encodingFields } func (x *bytewiseFieldSorter) Len() int { @@ -203,11 +235,11 @@ func (x *bytewiseFieldSorter) Swap(i, j int) { } func (x *bytewiseFieldSorter) Less(i, j int) bool { - return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) < 0 } type lengthFirstFieldSorter struct { - fields fields + fields encodingFields } func (x *lengthFirstFieldSorter) Len() int { @@ -222,13 +254,16 @@ func (x *lengthFirstFieldSorter) Less(i, j int) bool { if len(x.fields[i].cborName) != len(x.fields[j].cborName) { return len(x.fields[i].cborName) < len(x.fields[j].cborName) } - return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) < 0 } func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { if v, _ := encodingStructTypeCache.Load(t); v != nil { structType := v.(*encodingStructType) - return structType, structType.err + if structType.err != nil { + return nil, structType.err + } + return structType, nil } flds, structOptions := getFields(t) @@ -237,111 +272,119 @@ func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { return getEncodingStructToArrayType(t, flds) } - var err error var hasKeyAsInt bool var hasKeyAsStr bool var omitEmptyIdx []int + + encFlds := make(encodingFields, len(flds)) + e := getEncodeBuffer() - for i := 0; i < len(flds); i++ { + defer putEncodeBuffer(e) + + for i, f := range flds { + encFlds[i] = &encodingField{field: *f} + ef := encFlds[i] + // Get field's encodeFunc - flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ) - if flds[i].ef == nil { - err = &UnsupportedTypeError{t} - break + ef.ef, ef.ief, ef.izf = getEncodeFunc(f.typ) + if ef.ef == nil { + structType := &encodingStructType{err: &UnsupportedTypeError{t}} + encodingStructTypeCache.Store(t, structType) + return nil, structType.err } // Encode field name - if flds[i].keyAsInt { - nameAsInt, numErr := strconv.Atoi(flds[i].name) - if numErr != nil { - err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")") - break + if f.keyAsInt { + if f.nameAsInt == 0 { + // nameAsInt is set in getFields() except for fields with an unparsable tagged name. + // Atoi() is called here to catch and save parsing errors. + if _, numErr := strconv.Atoi(f.name); numErr != nil { + structType := &encodingStructType{ + err: errors.New("cbor: failed to parse field name \"" + f.name + "\" to int (" + numErr.Error() + ")"), + } + encodingStructTypeCache.Store(t, structType) + return nil, structType.err + } } - flds[i].nameAsInt = int64(nameAsInt) + nameAsInt := f.nameAsInt if nameAsInt >= 0 { - encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) + encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) //nolint:gosec } else { n := nameAsInt*(-1) - 1 - encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) + encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) //nolint:gosec } - flds[i].cborName = make([]byte, e.Len()) - copy(flds[i].cborName, e.Bytes()) + ef.cborName = make([]byte, e.Len()) + copy(ef.cborName, e.Bytes()) e.Reset() hasKeyAsInt = true } else { - encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name))) - flds[i].cborName = make([]byte, e.Len()+len(flds[i].name)) - n := copy(flds[i].cborName, e.Bytes()) - copy(flds[i].cborName[n:], flds[i].name) + encodeHead(e, byte(cborTypeTextString), uint64(len(f.name))) + ef.cborName = make([]byte, e.Len()+len(f.name)) + n := copy(ef.cborName, e.Bytes()) + copy(ef.cborName[n:], f.name) e.Reset() // If cborName contains a text string, then cborNameByteString contains a // string that has the byte string major type but is otherwise identical to // cborName. - flds[i].cborNameByteString = make([]byte, len(flds[i].cborName)) - copy(flds[i].cborNameByteString, flds[i].cborName) + ef.cborNameByteString = make([]byte, len(ef.cborName)) + copy(ef.cborNameByteString, ef.cborName) // Reset encoded CBOR type to byte string, preserving the "additional // information" bits: - flds[i].cborNameByteString[0] = byte(cborTypeByteString) | - getAdditionalInformation(flds[i].cborNameByteString[0]) + ef.cborNameByteString[0] = byte(cborTypeByteString) | + getAdditionalInformation(ef.cborNameByteString[0]) hasKeyAsStr = true } // Check if field can be omitted when empty - if flds[i].omitEmpty { + if f.omitEmpty { omitEmptyIdx = append(omitEmptyIdx, i) } } - putEncodeBuffer(e) - - if err != nil { - structType := &encodingStructType{err: err} - encodingStructTypeCache.Store(t, structType) - return structType, structType.err - } // Sort fields by canonical order - bytewiseFields := make(fields, len(flds)) - copy(bytewiseFields, flds) + bytewiseFields := make(encodingFields, len(encFlds)) + copy(bytewiseFields, encFlds) sort.Sort(&bytewiseFieldSorter{bytewiseFields}) lengthFirstFields := bytewiseFields if hasKeyAsInt && hasKeyAsStr { - lengthFirstFields = make(fields, len(flds)) - copy(lengthFirstFields, flds) + lengthFirstFields = make(encodingFields, len(encFlds)) + copy(lengthFirstFields, encFlds) sort.Sort(&lengthFirstFieldSorter{lengthFirstFields}) } structType := &encodingStructType{ - fields: flds, + fields: encFlds, bytewiseFields: bytewiseFields, lengthFirstFields: lengthFirstFields, omitEmptyFieldsIdx: omitEmptyIdx, } encodingStructTypeCache.Store(t, structType) - return structType, structType.err + return structType, nil } func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) { - for i := 0; i < len(flds); i++ { - // Get field's encodeFunc - flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ) - if flds[i].ef == nil { + encFlds := make(encodingFields, len(flds)) + for i, f := range flds { + encFlds[i] = &encodingField{field: *f} + encFlds[i].ef, encFlds[i].ief, encFlds[i].izf = getEncodeFunc(f.typ) + if encFlds[i].ef == nil { structType := &encodingStructType{err: &UnsupportedTypeError{t}} encodingStructTypeCache.Store(t, structType) - return structType, structType.err + return nil, structType.err } } structType := &encodingStructType{ - fields: flds, + fields: encFlds, toArray: true, } encodingStructTypeCache.Store(t, structType) - return structType, structType.err + return structType, nil } func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc, isZeroFunc) { diff --git a/vendor/github.com/fxamacker/cbor/v2/decode.go b/vendor/github.com/fxamacker/cbor/v2/decode.go index f0bdc3b38d..03fd7f8b04 100644 --- a/vendor/github.com/fxamacker/cbor/v2/decode.go +++ b/vendor/github.com/fxamacker/cbor/v2/decode.go @@ -16,7 +16,6 @@ import ( "math/big" "reflect" "strconv" - "strings" "time" "unicode/utf8" @@ -326,14 +325,14 @@ func (dmkm DupMapKeyMode) valid() bool { return dmkm >= 0 && dmkm < maxDupMapKeyMode } -// IndefLengthMode specifies whether to allow indefinite length items. +// IndefLengthMode specifies whether to allow indefinite-length items. type IndefLengthMode int const ( - // IndefLengthAllowed allows indefinite length items. + // IndefLengthAllowed allows indefinite-length items. IndefLengthAllowed IndefLengthMode = iota - // IndefLengthForbidden disallows indefinite length items. + // IndefLengthForbidden disallows indefinite-length items. IndefLengthForbidden maxIndefLengthMode @@ -378,6 +377,7 @@ const ( // - int64 if value fits // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64 // - return UnmarshalTypeError if value > math.MaxInt64 + // // Deprecated: IntDecConvertSigned should not be used. // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone. IntDecConvertSigned @@ -811,7 +811,7 @@ type DecOptions struct { // Default is 128*1024=131072 and it can be set to [16, 2147483647] MaxMapPairs int - // IndefLength specifies whether to allow indefinite length CBOR items. + // IndefLength specifies whether to allow indefinite-length CBOR items. IndefLength IndefLengthMode // TagsMd specifies whether to allow CBOR tags (major type 6). @@ -1055,7 +1055,7 @@ func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore } if !opts.ExtraReturnErrors.valid() { - return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) + return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) //nolint:gosec } if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map { @@ -1149,8 +1149,8 @@ func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore unrecognizedTagToAny: opts.UnrecognizedTagToAny, timeTagToAny: opts.TimeTagToAny, simpleValues: simpleValues, - nanDec: opts.NaN, - infDec: opts.Inf, + nan: opts.NaN, + inf: opts.Inf, byteStringToTime: opts.ByteStringToTime, byteStringExpectedFormat: opts.ByteStringExpectedFormat, bignumTag: opts.BignumTag, @@ -1230,8 +1230,8 @@ type decMode struct { unrecognizedTagToAny UnrecognizedTagToAnyMode timeTagToAny TimeTagToAnyMode simpleValues *SimpleValueRegistry - nanDec NaNMode - infDec InfMode + nan NaNMode + inf InfMode byteStringToTime ByteStringToTimeMode byteStringExpectedFormat ByteStringExpectedFormatMode bignumTag BignumTagMode @@ -1272,8 +1272,8 @@ func (dm *decMode) DecOptions() DecOptions { UnrecognizedTagToAny: dm.unrecognizedTagToAny, TimeTagToAny: dm.timeTagToAny, SimpleValues: simpleValues, - NaN: dm.nanDec, - Inf: dm.infDec, + NaN: dm.nan, + Inf: dm.inf, ByteStringToTime: dm.byteStringToTime, ByteStringExpectedFormat: dm.byteStringExpectedFormat, BignumTag: dm.bignumTag, @@ -1583,11 +1583,11 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin _, ai, val := d.getHead() switch ai { case additionalInformationAsFloat16: - f := float64(float16.Frombits(uint16(val)).Float32()) + f := float64(float16.Frombits(uint16(val)).Float32()) //nolint:gosec return fillFloat(t, f, v) case additionalInformationAsFloat32: - f := float64(math.Float32frombits(uint32(val))) + f := float64(math.Float32frombits(uint32(val))) //nolint:gosec return fillFloat(t, f, v) case additionalInformationAsFloat64: @@ -1595,10 +1595,10 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin return fillFloat(t, f, v) default: // ai <= 24 - if d.dm.simpleValues.rejected[SimpleValue(val)] { + if d.dm.simpleValues.rejected[SimpleValue(val)] { //nolint:gosec return &UnacceptableDataItemError{ CBORType: t.String(), - Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", //nolint:gosec } } @@ -1677,20 +1677,23 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin return d.parseToValue(v, tInfo) case cborTypeArray: - if tInfo.nonPtrKind == reflect.Slice { + switch tInfo.nonPtrKind { + case reflect.Slice: return d.parseArrayToSlice(v, tInfo) - } else if tInfo.nonPtrKind == reflect.Array { + case reflect.Array: return d.parseArrayToArray(v, tInfo) - } else if tInfo.nonPtrKind == reflect.Struct { + case reflect.Struct: return d.parseArrayToStruct(v, tInfo) } + d.skip() return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} case cborTypeMap: - if tInfo.nonPtrKind == reflect.Struct { + switch tInfo.nonPtrKind { + case reflect.Struct: return d.parseMapToStruct(v, tInfo) - } else if tInfo.nonPtrKind == reflect.Map { + case reflect.Map: return d.parseMapToMap(v, tInfo) } d.skip() @@ -1745,8 +1748,8 @@ func (d *decoder) parseToTime() (time.Time, bool, error) { // Read tag number _, _, tagNum := d.getHead() if tagNum != 0 && tagNum != 1 { - d.skip() // skip tag content - return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") + d.skip() // skip tag content + return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") //nolint:gosec } } } else { @@ -1815,10 +1818,10 @@ func (d *decoder) parseToTime() (time.Time, bool, error) { var f float64 switch ai { case additionalInformationAsFloat16: - f = float64(float16.Frombits(uint16(val)).Float32()) + f = float64(float16.Frombits(uint16(val)).Float32()) //nolint:gosec case additionalInformationAsFloat32: - f = float64(math.Float32frombits(uint32(val))) + f = float64(math.Float32frombits(uint32(val))) //nolint:gosec case additionalInformationAsFloat64: f = math.Float64frombits(val) @@ -1832,6 +1835,13 @@ func (d *decoder) parseToTime() (time.Time, bool, error) { return time.Time{}, true, nil } seconds, fractional := math.Modf(f) + if seconds > math.MaxInt64 || seconds < math.MinInt64 { + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("%v overflows Go's int64", f), + } + } return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil default: @@ -2145,14 +2155,14 @@ func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyc case cborTypePrimitives: _, ai, val := d.getHead() - if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { + if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { //nolint:gosec return nil, &UnacceptableDataItemError{ CBORType: t.String(), - Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", //nolint:gosec } } if ai < 20 || ai == 24 { - return SimpleValue(val), nil + return SimpleValue(val), nil //nolint:gosec } switch ai { @@ -2165,11 +2175,11 @@ func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyc return nil, nil case additionalInformationAsFloat16: - f := float64(float16.Frombits(uint16(val)).Float32()) + f := float64(float16.Frombits(uint16(val)).Float32()) //nolint:gosec return f, nil case additionalInformationAsFloat32: - f := float64(math.Float32frombits(uint32(val))) + f := float64(math.Float32frombits(uint32(val))) //nolint:gosec return f, nil case additionalInformationAsFloat64: @@ -2202,16 +2212,16 @@ func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyc func (d *decoder) parseByteString() ([]byte, bool) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() if !indefiniteLength { - b := d.data[d.off : d.off+int(val)] - d.off += int(val) + b := d.data[d.off : d.off+int(val)] //nolint:gosec + d.off += int(val) //nolint:gosec return b, false } - // Process indefinite length string chunks. + // Process indefinite-length string chunks. b := []byte{} for !d.foundBreak() { _, _, val = d.getHead() - b = append(b, d.data[d.off:d.off+int(val)]...) - d.off += int(val) + b = append(b, d.data[d.off:d.off+int(val)]...) //nolint:gosec + d.off += int(val) //nolint:gosec } return b, true } @@ -2300,19 +2310,19 @@ func (d *decoder) applyByteStringTextConversion( func (d *decoder) parseTextString() ([]byte, error) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() if !indefiniteLength { - b := d.data[d.off : d.off+int(val)] - d.off += int(val) + b := d.data[d.off : d.off+int(val)] //nolint:gosec + d.off += int(val) //nolint:gosec if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) { return nil, &SemanticError{"cbor: invalid UTF-8 string"} } return b, nil } - // Process indefinite length string chunks. + // Process indefinite-length string chunks. b := []byte{} for !d.foundBreak() { _, _, val = d.getHead() - x := d.data[d.off : d.off+int(val)] - d.off += int(val) + x := d.data[d.off : d.off+int(val)] //nolint:gosec + d.off += int(val) //nolint:gosec if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) { for !d.foundBreak() { d.skip() // Skip remaining chunk on error @@ -2327,7 +2337,7 @@ func (d *decoder) parseTextString() ([]byte, error) { func (d *decoder) parseArray() ([]any, error) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if !hasSize { count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance } @@ -2349,7 +2359,7 @@ func (d *decoder) parseArray() ([]any, error) { func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if !hasSize { count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance } @@ -2371,7 +2381,7 @@ func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec gi := 0 vLen := v.Len() var err error @@ -2400,7 +2410,7 @@ func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { func (d *decoder) parseMap() (any, error) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec m := make(map[any]any) var k, e any var err, lastErr error @@ -2465,7 +2475,7 @@ func (d *decoder) parseMap() (any, error) { func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if v.IsNil() { mapsize := count if !hasSize { @@ -2566,9 +2576,9 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli } func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { - structType := getDecodingStructType(tInfo.nonPtrType) - if structType.err != nil { - return structType.err + structType, structTypeErr := getDecodingStructType(tInfo.nonPtrType) + if structTypeErr != nil { + return structTypeErr } if !structType.toArray { @@ -2584,7 +2594,7 @@ func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { start := d.off _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if !hasSize { count = d.numOfItemsUntilBreak() // peek ahead to get array size } @@ -2637,11 +2647,72 @@ func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { return err } -// parseMapToStruct needs to be fast so gocyclo can be ignored for now. +// skipMapEntriesFromIndex skips remaining map entries starting from index i. +func (d *decoder) skipMapEntriesFromIndex(i, count int, hasSize bool) { + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() + d.skip() + } +} + +// skipMapForDupKey skips the current map value and all remaining map entries, +// then returns a DupMapKeyError for the given key at map index i. +func (d *decoder) skipMapForDupKey(dupKey any, i, count int, hasSize bool) error { + // Skip the value of the duplicate key. + d.skip() + // Skip all remaining map entries. + d.skipMapEntriesFromIndex(i+1, count, hasSize) + return &DupMapKeyError{dupKey, i} +} + +// skipMapForUnknownField skips the current map value and all remaining map entries, +// then returns a UnknownFieldError for the given key at map index i. +func (d *decoder) skipMapForUnknownField(i, count int, hasSize bool) error { + // Skip the value of the unknown key. + d.skip() + // Skip all remaining map entries. + d.skipMapEntriesFromIndex(i+1, count, hasSize) + return &UnknownFieldError{i} +} + +// decodeToStructField decodes the next CBOR value into the struct field f in v. +// If the field cannot be resolved, the CBOR value is skipped. +func (d *decoder) decodeToStructField(v reflect.Value, f *decodingField, tInfo *typeInfo) error { + var fv reflect.Value + + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + var err error + fv, err = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if !fv.IsValid() { + d.skip() + return err + } + } + + err := d.parseToValue(fv, f.typInfo) + if err != nil { + if typeError, ok := err.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name + } + return err + } + + return nil +} + func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo - structType := getDecodingStructType(tInfo.nonPtrType) - if structType.err != nil { - return structType.err + structType, structTypeErr := getDecodingStructType(tInfo.nonPtrType) + if structTypeErr != nil { + return structTypeErr } if structType.toArray { @@ -2654,14 +2725,12 @@ func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //n } } - var err, lastErr error - // Get CBOR map size _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec - // Keeps track of matched struct fields + // Keep track of matched struct fields to detect duplicate map keys. var foundFldIdx []bool { const maxStackFields = 128 @@ -2675,99 +2744,80 @@ func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //n } } - // Keeps track of CBOR map keys to detect duplicate map key - keyCount := 0 - var mapKeys map[any]struct{} - - errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + // Keep track of unmatched CBOR map keys to detect duplicate map keys. + var unmatchedMapKeys map[any]struct{} -MapEntryLoop: - for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - var f *field + var err error - // If duplicate field detection is enabled and the key at index j did not match any - // field, k will hold the map key. - var k any + caseInsensitive := d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { t := d.nextCBORType() - if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) { + + // Reclassify disallowed byte string keys so they fall to the default case. + // keyType is only used for branch control. + keyType := t + if t == cborTypeByteString && d.dm.fieldNameByteString != FieldNameByteStringAllowed { + keyType = 0xff + } + + switch keyType { + case cborTypeTextString, cborTypeByteString: var keyBytes []byte if t == cborTypeTextString { - keyBytes, lastErr = d.parseTextString() - if lastErr != nil { + var parseErr error + keyBytes, parseErr = d.parseTextString() + if parseErr != nil { if err == nil { - err = lastErr + err = parseErr } - d.skip() // skip value + d.skip() // Skip value continue } } else { // cborTypeByteString keyBytes, _ = d.parseByteString() } - // Check for exact match on field name. - if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { - fld := structType.fields[i] + // Find matching struct field (exact match, then case-insensitive fallback). + if fldIdx, ok := findStructFieldByKey(structType, keyBytes, caseInsensitive); ok { + fld := structType.fields[fldIdx] - if !foundFldIdx[i] { - f = fld - foundFldIdx[i] = true - } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - err = &DupMapKeyError{fld.name, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() + switch checkDupField(d.dm, foundFldIdx, fldIdx) { + case mapActionParseValueAndContinue: + if fieldErr := d.decodeToStructField(v, fld, tInfo); fieldErr != nil && err == nil { + err = fieldErr } - return err - } else { - // discard repeated match + continue + case mapActionSkipAllAndReturnError: + return d.skipMapForDupKey(string(keyBytes), i, count, hasSize) + case mapActionSkipValueAndContinue: d.skip() - continue MapEntryLoop + continue } } - // Find field with case-insensitive match - if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive { - keyLen := len(keyBytes) - keyString := string(keyBytes) - for i := 0; i < len(structType.fields); i++ { - fld := structType.fields[i] - if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) { - if !foundFldIdx[i] { - f = fld - foundFldIdx[i] = true - } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - err = &DupMapKeyError{keyString, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } else { - // discard repeated match - d.skip() - continue MapEntryLoop - } - break - } - } + // No matching struct field found. + if unmatchedErr := handleUnmatchedMapKey(d, string(keyBytes), i, count, hasSize, &unmatchedMapKeys); unmatchedErr != nil { + return unmatchedErr } - if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { - k = string(keyBytes) - } - } else if t <= cborTypeNegativeInt { // uint/int + case cborTypePositiveInt, cborTypeNegativeInt: var nameAsInt int64 if t == cborTypePositiveInt { _, _, val := d.getHead() - nameAsInt = int64(val) + if val > math.MaxInt64 { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + d.skip() // skip value + continue + } + nameAsInt = int64(val) //nolint:gosec } else { _, _, val := d.getHead() if val > math.MaxInt64 { @@ -2781,39 +2831,35 @@ MapEntryLoop: d.skip() // skip value continue } - nameAsInt = int64(-1) ^ int64(val) - } - - // Find field - for i := 0; i < len(structType.fields); i++ { - fld := structType.fields[i] - if fld.keyAsInt && fld.nameAsInt == nameAsInt { - if !foundFldIdx[i] { - f = fld - foundFldIdx[i] = true - } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - err = &DupMapKeyError{nameAsInt, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } else { - // discard repeated match - d.skip() - continue MapEntryLoop + nameAsInt = int64(-1) ^ int64(val) //nolint:gosec + } + + // Find field by integer key + if fldIdx, ok := structType.fieldIndicesByIntKey[nameAsInt]; ok { + fld := structType.fields[fldIdx] + + switch checkDupField(d.dm, foundFldIdx, fldIdx) { + case mapActionParseValueAndContinue: + if fieldErr := d.decodeToStructField(v, fld, tInfo); fieldErr != nil && err == nil { + err = fieldErr } - break + continue + case mapActionSkipAllAndReturnError: + return d.skipMapForDupKey(nameAsInt, i, count, hasSize) + case mapActionSkipValueAndContinue: + d.skip() + continue } } - if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { - k = nameAsInt + // No matching struct field found. + if unmatchedErr := handleUnmatchedMapKey(d, nameAsInt, i, count, hasSize, &unmatchedMapKeys); unmatchedErr != nil { + return unmatchedErr } - } else { + + default: + // CBOR map keys that can't be matched to any struct field. + if err == nil { err = &UnmarshalTypeError{ CBORType: t.String(), @@ -2821,97 +2867,31 @@ MapEntryLoop: errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name", } } + + var otherKey any if d.dm.dupMapKey == DupMapKeyEnforcedAPF { // parse key - k, lastErr = d.parse(true) - if lastErr != nil { + var parseErr error + otherKey, parseErr = d.parse(true) + if parseErr != nil { d.skip() // skip value continue } // Detect if CBOR map key can be used as Go map key. - if !isHashableValue(reflect.ValueOf(k)) { + if !isHashableValue(reflect.ValueOf(otherKey)) { d.skip() // skip value continue } } else { d.skip() // skip key } - } - - if f == nil { - if errOnUnknownField { - err = &UnknownFieldError{j} - d.skip() // Skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } - - // Two map keys that match the same struct field are immediately considered - // duplicates. This check detects duplicates between two map keys that do - // not match a struct field. If unknown field errors are enabled, then this - // check is never reached. - if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - if mapKeys == nil { - mapKeys = make(map[any]struct{}, 1) - } - mapKeys[k] = struct{}{} - newKeyCount := len(mapKeys) - if newKeyCount == keyCount { - err = &DupMapKeyError{k, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } - keyCount = newKeyCount - } - - d.skip() // Skip value - continue - } - - // Get field value by index - var fv reflect.Value - if len(f.idx) == 1 { - fv = v.Field(f.idx[0]) - } else { - fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { - // Return a new value for embedded field null pointer to point to, or return error. - if !v.CanSet() { - return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) - } - v.Set(reflect.New(v.Type().Elem())) - return v, nil - }) - if lastErr != nil && err == nil { - err = lastErr - } - if !fv.IsValid() { - d.skip() - continue - } - } - if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { - if err == nil { - if typeError, ok := lastErr.(*UnmarshalTypeError); ok { - typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name - err = typeError - } else { - err = lastErr - } + if unmatchedErr := handleUnmatchedMapKey(d, otherKey, i, count, hasSize, &unmatchedMapKeys); unmatchedErr != nil { + return unmatchedErr } } } + return err } @@ -2958,15 +2938,15 @@ func (d *decoder) skip() { switch t { case cborTypeByteString, cborTypeTextString: - d.off += int(val) + d.off += int(val) //nolint:gosec case cborTypeArray: - for i := 0; i < int(val); i++ { + for i := 0; i < int(val); i++ { //nolint:gosec d.skip() } case cborTypeMap: - for i := 0; i < int(val)*2; i++ { + for i := 0; i < int(val)*2; i++ { //nolint:gosec d.skip() } diff --git a/vendor/github.com/fxamacker/cbor/v2/decode_map_utils.go b/vendor/github.com/fxamacker/cbor/v2/decode_map_utils.go new file mode 100644 index 0000000000..3c8c423ad1 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/decode_map_utils.go @@ -0,0 +1,98 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import "strings" + +// mapAction represents the next action during decoding a CBOR map to a Go struct. +type mapAction int + +const ( + mapActionParseValueAndContinue mapAction = iota // The caller should process the map value. + mapActionSkipValueAndContinue // The caller should skip the map value. + mapActionSkipAllAndReturnError // The caller should skip the rest of the map and return an error. +) + +// checkDupField checks if a struct field at index i has already been matched and returns the next action. +// If not matched, it marks the field as matched and returns mapActionParseValueAndContinue. +// If matched and DupMapKeyEnforcedAPF is specified in the given dm, it returns mapActionSkipAllAndReturnError. +// If matched and DupMapKeyEnforcedAPF is not specified in the given dm, it returns mapActionSkipValueAndContinue. +func checkDupField(dm *decMode, foundFldIdx []bool, i int) mapAction { + if !foundFldIdx[i] { + foundFldIdx[i] = true + return mapActionParseValueAndContinue + } + if dm.dupMapKey == DupMapKeyEnforcedAPF { + return mapActionSkipAllAndReturnError + } + return mapActionSkipValueAndContinue +} + +// findStructFieldByKey finds a struct field matching keyBytes by name. +// It tries an exact match first. If no exact match is found and +// caseInsensitive is true, it falls back to a case-insensitive search. +// findStructFieldByKey returns the field index and true, or -1 and false. +func findStructFieldByKey( + structType *decodingStructType, + keyBytes []byte, + caseInsensitive bool, +) (int, bool) { + if fldIdx, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { + return fldIdx, true + } + if caseInsensitive { + return findFieldCaseInsensitive(structType.fields, string(keyBytes)) + } + return -1, false +} + +// findFieldCaseInsensitive returns the index of the first field whose name +// case-insensitively matches key, or -1 and false if no field matches. +func findFieldCaseInsensitive(flds decodingFields, key string) (int, bool) { + keyLen := len(key) + for i, f := range flds { + if f.keyAsInt { + continue + } + if len(f.name) == keyLen && strings.EqualFold(f.name, key) { + return i, true + } + } + return -1, false +} + +// handleUnmatchedMapKey handles a map entry whose key does not match any struct +// field. It can return UnknownFieldError or DupMapKeyError. +// handleUnmatchedMapKey consumes the CBOR value, so the caller doesn't need to skip any values. +// If an error is returned, the caller should abort parsing the map and return the error. +// If no error is returned, the caller should continue to process the next map pair. +func handleUnmatchedMapKey( + d *decoder, + key any, + i int, + count int, + hasSize bool, + // *map[any]struct{} is used here because we use lazy initialization for uks + uks *map[any]struct{}, //nolint:gocritic +) error { + errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + + if errOnUnknownField { + return d.skipMapForUnknownField(i, count, hasSize) + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + if *uks == nil { + *uks = make(map[any]struct{}) + } + if _, dup := (*uks)[key]; dup { + return d.skipMapForDupKey(key, i, count, hasSize) + } + (*uks)[key] = struct{}{} + } + + // Skip value. + d.skip() + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/vendor/github.com/fxamacker/cbor/v2/diagnose.go index 44afb86608..42a67ad11f 100644 --- a/vendor/github.com/fxamacker/cbor/v2/diagnose.go +++ b/vendor/github.com/fxamacker/cbor/v2/diagnose.go @@ -51,11 +51,8 @@ const ( maxByteStringEncoding ) -func (bse ByteStringEncoding) valid() error { - if bse >= maxByteStringEncoding { - return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse))) - } - return nil +func (bse ByteStringEncoding) valid() bool { + return bse < maxByteStringEncoding } // DiagOptions specifies Diag options. @@ -104,8 +101,8 @@ func (opts DiagOptions) DiagMode() (DiagMode, error) { } func (opts DiagOptions) diagMode() (*diagMode, error) { - if err := opts.ByteStringEncoding.valid(); err != nil { - return nil, err + if !opts.ByteStringEncoding.valid() { + return nil, errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(opts.ByteStringEncoding))) } decMode, err := DecOptions{ @@ -360,7 +357,7 @@ func (di *diagnose) item() error { //nolint:gocyclo case cborTypeArray: _, _, val := di.d.getHead() - count := int(val) + count := int(val) //nolint:gosec di.w.WriteByte('[') for i := 0; i < count; i++ { @@ -376,7 +373,7 @@ func (di *diagnose) item() error { //nolint:gocyclo case cborTypeMap: _, _, val := di.d.getHead() - count := int(val) + count := int(val) //nolint:gosec di.w.WriteByte('{') for i := 0; i < count; i++ { @@ -477,8 +474,8 @@ func (di *diagnose) item() error { //nolint:gocyclo func (di *diagnose) writeU16(val rune) { di.w.WriteString("\\u") var in [2]byte - in[0] = byte(val >> 8) - in[1] = byte(val) + in[0] = byte(val >> 8) //nolint:gosec + in[1] = byte(val) //nolint:gosec sz := hex.EncodedLen(len(in)) di.w.Grow(sz) dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] @@ -608,7 +605,7 @@ func (di *diagnose) encodeTextString(val string, quote byte) error { c, size := utf8.DecodeRuneInString(val[i:]) switch { - case c == utf8.RuneError: + case c == utf8.RuneError && size == 1: return &SemanticError{"cbor: invalid UTF-8 string"} case c < utf16SurrSelf: @@ -631,7 +628,7 @@ func (di *diagnose) encodeFloat(ai byte, val uint64) error { f64 := float64(0) switch ai { case additionalInformationAsFloat16: - f16 := float16.Frombits(uint16(val)) + f16 := float16.Frombits(uint16(val)) //nolint:gosec switch { case f16.IsNaN(): di.w.WriteString("NaN") @@ -647,7 +644,7 @@ func (di *diagnose) encodeFloat(ai byte, val uint64) error { } case additionalInformationAsFloat32: - f32 := math.Float32frombits(uint32(val)) + f32 := math.Float32frombits(uint32(val)) //nolint:gosec switch { case f32 != f32: di.w.WriteString("NaN") diff --git a/vendor/github.com/fxamacker/cbor/v2/encode.go b/vendor/github.com/fxamacker/cbor/v2/encode.go index c550617c38..e65a29d8a6 100644 --- a/vendor/github.com/fxamacker/cbor/v2/encode.go +++ b/vendor/github.com/fxamacker/cbor/v2/encode.go @@ -30,7 +30,7 @@ import ( // If value implements the Marshaler interface, Marshal calls its // MarshalCBOR method. // -// If value implements encoding.BinaryMarshaler, Marhsal calls its +// If value implements encoding.BinaryMarshaler, Marshal calls its // MarshalBinary method and encode it as CBOR byte string. // // Boolean values encode as CBOR booleans (type 7). @@ -343,7 +343,7 @@ const ( // non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339. // NOTE: User applications can avoid including the RFC3339 numeric offset by: // - providing a time.Time value set to UTC, or - // - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339. + // - using the TimeUnix, TimeUnixMicro, TimeUnixDynamic, or TimeRFC3339NanoUTC option. TimeRFC3339 // TimeRFC3339Nano causes time.Time to encode to a CBOR time (tag 0) with a text string content @@ -351,9 +351,13 @@ const ( // non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339. // NOTE: User applications can avoid including the RFC3339 numeric offset by: // - providing a time.Time value set to UTC, or - // - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339Nano. + // - using the TimeUnix, TimeUnixMicro, TimeUnixDynamic, or TimeRFC3339NanoUTC option. TimeRFC3339Nano + // TimeRFC3339NanoUTC causes time.Time to encode to a CBOR time (tag 0) with a text string content + // representing UTC time using nanosecond precision in RFC3339 format. + TimeRFC3339NanoUTC + maxTimeMode ) @@ -436,7 +440,7 @@ const ( // FieldNameToTextString encodes struct fields to CBOR text string (major type 3). FieldNameToTextString FieldNameMode = iota - // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2). + // FieldNameToByteString encodes struct fields to CBOR byte string (major type 2). FieldNameToByteString maxFieldNameMode @@ -567,7 +571,7 @@ type EncOptions struct { // RFC3339 format gets tag number 0, and numeric epoch time tag number 1. TimeTag EncTagMode - // IndefLength specifies whether to allow indefinite length CBOR items. + // IndefLength specifies whether to allow indefinite-length CBOR items. IndefLength IndefLengthMode // NilContainers specifies how to encode nil slices and maps. @@ -1132,10 +1136,11 @@ func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error { if fopt == ShortestFloat16 { var f16 float16.Float16 p := float16.PrecisionFromfloat32(f32) - if p == float16.PrecisionExact { + switch p { + case float16.PrecisionExact: // Roundtrip float32->float16->float32 test isn't needed. f16 = float16.Fromfloat32(f32) - } else if p == float16.PrecisionUnknown { + case float16.PrecisionUnknown: // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16. f16 = float16.Fromfloat32(f32) if f16.Float32() == f32 { @@ -1293,10 +1298,10 @@ func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error { if slen == 0 { return e.WriteByte(byte(cborTypeByteString)) } - encodeHead(e, byte(cborTypeByteString), uint64(slen)) + encodeHead(e, byte(cborTypeByteString), uint64(slen)) //nolint:gosec if vk == reflect.Array { for i := 0; i < slen; i++ { - e.WriteByte(byte(v.Index(i).Uint())) + e.WriteByte(byte(v.Index(i).Uint())) //nolint:gosec } return nil } @@ -1333,7 +1338,7 @@ func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) if alen == 0 { return e.WriteByte(byte(cborTypeArray)) } - encodeHead(e, byte(cborTypeArray), uint64(alen)) + encodeHead(e, byte(cborTypeArray), uint64(alen)) //nolint:gosec for i := 0; i < alen; i++ { if err := ae.f(e, em, v.Index(i)); err != nil { return err @@ -1364,7 +1369,7 @@ func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) er return e.WriteByte(byte(cborTypeMap)) } - encodeHead(e, byte(cborTypeMap), uint64(mlen)) + encodeHead(e, byte(cborTypeMap), uint64(mlen)) //nolint:gosec if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 { return me.e(e, em, v, nil) } @@ -1427,7 +1432,7 @@ func (x *bytewiseKeyValueSorter) Swap(i, j int) { func (x *bytewiseKeyValueSorter) Less(i, j int) bool { kvi, kvj := x.kvs[i], x.kvs[j] - return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) < 0 } type lengthFirstKeyValueSorter struct { @@ -1448,7 +1453,7 @@ func (x *lengthFirstKeyValueSorter) Less(i, j int) bool { if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 { return keyLengthDifference < 0 } - return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) < 0 } var keyValuePool = sync.Pool{} @@ -1535,8 +1540,8 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { // Head is rewritten later if actual encoded field count is different from struct field count. encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds))) - kvbegin := e.Len() - kvcount := 0 + kvBeginOffset := e.Len() + kvCount := 0 for offset := 0; offset < len(flds); offset++ { f := flds[(start+offset)%len(flds)] @@ -1582,10 +1587,10 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { return err } - kvcount++ + kvCount++ } - if len(flds) == kvcount { + if len(flds) == kvCount { // Encoded element count in head is the same as actual element count. return nil } @@ -1593,8 +1598,8 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { // Overwrite the bytes that were reserved for the head before encoding the map entries. var actualHeadLen int { - headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin]) - actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount)) + headbuf := *bytes.NewBuffer(e.Bytes()[kvBeginOffset-encodedHeadLen : kvBeginOffset-encodedHeadLen : kvBeginOffset]) + actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvCount)) } if actualHeadLen == encodedHeadLen { @@ -1607,8 +1612,8 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { // encoded. The encoded entries are offset to the right by the number of excess reserved // bytes. Shift the entries left to remove the gap. excessReservedBytes := encodedHeadLen - actualHeadLen - dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes] - src := e.Bytes()[kvbegin:e.Len()] + dst := e.Bytes()[kvBeginOffset-excessReservedBytes : e.Len()-excessReservedBytes] + src := e.Bytes()[kvBeginOffset:e.Len()] copy(dst, src) // After shifting, the excess bytes are at the end of the output buffer and they are @@ -1633,7 +1638,7 @@ func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { } if em.timeTag == EncTagRequired { tagNumber := 1 - if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano { + if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano || em.time == TimeRFC3339NanoUTC { tagNumber = 0 } encodeHead(e, byte(cborTypeTag), uint64(tagNumber)) @@ -1650,7 +1655,7 @@ func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { case TimeUnixDynamic: t = t.UTC().Round(time.Microsecond) - secs, nsecs := t.Unix(), uint64(t.Nanosecond()) + secs, nsecs := t.Unix(), uint64(t.Nanosecond()) //nolint:gosec if nsecs == 0 { return encodeInt(e, em, reflect.ValueOf(secs)) } @@ -1661,6 +1666,10 @@ func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { s := t.Format(time.RFC3339) return encodeString(e, em, reflect.ValueOf(s)) + case TimeRFC3339NanoUTC: + s := t.UTC().Format(time.RFC3339Nano) + return encodeString(e, em, reflect.ValueOf(s)) + default: // TimeRFC3339Nano s := t.Format(time.RFC3339Nano) return encodeString(e, em, reflect.ValueOf(s)) diff --git a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go index 30f72814f6..9912e855c2 100644 --- a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go +++ b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go @@ -93,6 +93,6 @@ func (sv *SimpleValue) unmarshalCBOR(data []byte) error { // It is safe to cast val to uint8 here because // - data is already verified to be well-formed CBOR simple value and // - val is <= math.MaxUint8. - *sv = SimpleValue(val) + *sv = SimpleValue(val) //nolint:gosec return nil } diff --git a/vendor/github.com/fxamacker/cbor/v2/stream.go b/vendor/github.com/fxamacker/cbor/v2/stream.go index 7ac6d7d671..da4c8f210f 100644 --- a/vendor/github.com/fxamacker/cbor/v2/stream.go +++ b/vendor/github.com/fxamacker/cbor/v2/stream.go @@ -171,14 +171,20 @@ func NewEncoder(w io.Writer) *Encoder { // Encode writes the CBOR encoding of v. func (enc *Encoder) Encode(v any) error { - if len(enc.indefTypes) > 0 && v != nil { - indefType := enc.indefTypes[len(enc.indefTypes)-1] - if indefType == cborTypeTextString { + if len(enc.indefTypes) > 0 { + switch enc.indefTypes[len(enc.indefTypes)-1] { + case cborTypeTextString: + if v == nil { + return errors.New("cbor: cannot encode nil for indefinite-length text string") + } k := reflect.TypeOf(v).Kind() if k != reflect.String { return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string") } - } else if indefType == cborTypeByteString { + case cborTypeByteString: + if v == nil { + return errors.New("cbor: cannot encode nil for indefinite-length byte string") + } t := reflect.TypeOf(v) k := t.Kind() if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 { @@ -198,35 +204,35 @@ func (enc *Encoder) Encode(v any) error { return err } -// StartIndefiniteByteString starts byte string encoding of indefinite length. +// StartIndefiniteByteString starts indefinite-length byte string encoding. // Subsequent calls of (*Encoder).Encode() encodes definite length byte strings // ("chunks") as one contiguous string until EndIndefinite is called. func (enc *Encoder) StartIndefiniteByteString() error { return enc.startIndefinite(cborTypeByteString) } -// StartIndefiniteTextString starts text string encoding of indefinite length. +// StartIndefiniteTextString starts indefinite-length text string encoding. // Subsequent calls of (*Encoder).Encode() encodes definite length text strings // ("chunks") as one contiguous string until EndIndefinite is called. func (enc *Encoder) StartIndefiniteTextString() error { return enc.startIndefinite(cborTypeTextString) } -// StartIndefiniteArray starts array encoding of indefinite length. +// StartIndefiniteArray starts indefinite-length array encoding. // Subsequent calls of (*Encoder).Encode() encodes elements of the array // until EndIndefinite is called. func (enc *Encoder) StartIndefiniteArray() error { return enc.startIndefinite(cborTypeArray) } -// StartIndefiniteMap starts array encoding of indefinite length. +// StartIndefiniteMap starts indefinite-length map encoding. // Subsequent calls of (*Encoder).Encode() encodes elements of the map // until EndIndefinite is called. func (enc *Encoder) StartIndefiniteMap() error { return enc.startIndefinite(cborTypeMap) } -// EndIndefinite closes last opened indefinite length value. +// EndIndefinite closes last opened indefinite-length value. func (enc *Encoder) EndIndefinite() error { if len(enc.indefTypes) == 0 { return errors.New("cbor: cannot encode \"break\" code outside indefinite length values") @@ -238,18 +244,22 @@ func (enc *Encoder) EndIndefinite() error { return err } -var cborIndefHeader = map[cborType][]byte{ - cborTypeByteString: {cborByteStringWithIndefiniteLengthHead}, - cborTypeTextString: {cborTextStringWithIndefiniteLengthHead}, - cborTypeArray: {cborArrayWithIndefiniteLengthHead}, - cborTypeMap: {cborMapWithIndefiniteLengthHead}, -} - func (enc *Encoder) startIndefinite(typ cborType) error { if enc.em.indefLength == IndefLengthForbidden { return &IndefiniteLengthError{typ} } - _, err := enc.w.Write(cborIndefHeader[typ]) + var head byte + switch typ { + case cborTypeByteString: + head = cborByteStringWithIndefiniteLengthHead + case cborTypeTextString: + head = cborTextStringWithIndefiniteLengthHead + case cborTypeArray: + head = cborArrayWithIndefiniteLengthHead + case cborTypeMap: + head = cborMapWithIndefiniteLengthHead + } + _, err := enc.w.Write([]byte{head}) if err == nil { enc.indefTypes = append(enc.indefTypes, typ) } @@ -262,7 +272,9 @@ type RawMessage []byte // MarshalCBOR returns m or CBOR nil if m is nil. func (m RawMessage) MarshalCBOR() ([]byte, error) { if len(m) == 0 { - return cborNil, nil + b := make([]byte, len(cborNil)) + copy(b, cborNil) + return b, nil } return m, nil } diff --git a/vendor/github.com/fxamacker/cbor/v2/structfields.go b/vendor/github.com/fxamacker/cbor/v2/structfields.go index cf0a922cd7..b2d71f2e9a 100644 --- a/vendor/github.com/fxamacker/cbor/v2/structfields.go +++ b/vendor/github.com/fxamacker/cbor/v2/structfields.go @@ -6,27 +6,43 @@ package cbor import ( "reflect" "sort" + "strconv" "strings" ) +// field holds shared struct field metadata returned by getFields(). type field struct { - name string - nameAsInt int64 // used to decoder to match field name with CBOR int + name string + nameAsInt int64 // used to match field name with CBOR int + idx []int + typ reflect.Type // used during cache building only + keyAsInt bool // used to encode/decode field name as int + tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) + omitEmpty bool // used to skip empty field + omitZero bool // used to skip zero field +} + +type fields []*field + +// encodingField extends field with encoding-specific data. +type encodingField struct { + field cborName []byte - cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3 - idx []int - typ reflect.Type + cborNameByteString []byte // major type 2 name encoding if cborName has major type 3 ef encodeFunc ief isEmptyFunc izf isZeroFunc - typInfo *typeInfo // used to decoder to reuse type info - tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) - omitEmpty bool // used to skip empty field - omitZero bool // used to skip zero field - keyAsInt bool // used to encode/decode field name as int } -type fields []*field +type encodingFields []*encodingField + +// decodingField extends field with decoding-specific data. +type decodingField struct { + field + typInfo *typeInfo // used by decoder to reuse type info +} + +type decodingFields []*decodingField // indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth. type indexFieldSorter struct { @@ -48,7 +64,7 @@ func (x *indexFieldSorter) Less(i, j int) bool { return iIdx[k] < jIdx[k] } } - return len(iIdx) <= len(jIdx) + return len(iIdx) < len(jIdx) } // nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag. @@ -69,6 +85,10 @@ func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool { if fi.name != fj.name { return fi.name < fj.name } + // Fields with the same name but different keyAsInt are in separate namespaces. + if fi.keyAsInt != fj.keyAsInt { + return fi.keyAsInt + } if len(fi.idx) != len(fj.idx) { return len(fi.idx) < len(fj.idx) } @@ -117,22 +137,37 @@ func getFields(t reflect.Type) (flds fields, structOptions string) { } } + // Normalize keyasint field names to their canonical integer string form. + // This ensures that "01", "+1", and "1" are treated as the same key + // during deduplication. + for _, f := range flds { + if f.keyAsInt { + nameAsInt, err := strconv.Atoi(f.name) + if err != nil { + continue // Leave invalid names for callers to report. + } + f.nameAsInt = int64(nameAsInt) + f.name = strconv.Itoa(nameAsInt) + } + } + sort.Sort(&nameLevelAndTagFieldSorter{flds}) // Keep visible fields. j := 0 // index of next unique field for i := 0; i < len(flds); { name := flds[i].name + keyAsInt := flds[i].keyAsInt if i == len(flds)-1 || // last field - name != flds[i+1].name || // field i has unique field name + name != flds[i+1].name || flds[i+1].keyAsInt != keyAsInt || // field i has unique (name, keyAsInt) len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1 (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not flds[j] = flds[i] j++ } - // Skip fields with the same field name. - for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive + // Skip fields with the same (name, keyAsInt). + for i++; i < len(flds) && name == flds[i].name && keyAsInt == flds[i].keyAsInt; i++ { //nolint:revive } } if j != len(flds) { diff --git a/vendor/github.com/fxamacker/cbor/v2/valid.go b/vendor/github.com/fxamacker/cbor/v2/valid.go index b40793b95e..850b95019c 100644 --- a/vendor/github.com/fxamacker/cbor/v2/valid.go +++ b/vendor/github.com/fxamacker/cbor/v2/valid.go @@ -54,7 +54,7 @@ func (e *MaxMapPairsError) Error() string { return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map" } -// IndefiniteLengthError indicates found disallowed indefinite length items. +// IndefiniteLengthError indicates found disallowed indefinite-length items. type IndefiniteLengthError struct { t cborType } @@ -113,7 +113,7 @@ func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, err } return d.wellformedIndefiniteString(t, depth, checkBuiltinTags) } - valInt := int(val) + valInt := int(val) //nolint:gosec if valInt < 0 { // Detect integer overflow return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow") @@ -136,7 +136,7 @@ func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, err return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags) } - valInt := int(val) + valInt := int(val) //nolint:gosec if valInt < 0 { // Detect integer overflow return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow") @@ -212,7 +212,7 @@ func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, err return depth, nil } -// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error. +// wellformedIndefiniteString checks indefinite-length byte/text string's well-formedness and returns max depth and error. func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) { var err error for { @@ -223,7 +223,7 @@ func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltin d.off++ break } - // Peek ahead to get next type and indefinite length status. + // Peek ahead to get next type and indefinite-length status. nt, ai := parseInitialByte(d.data[d.off]) if t != nt { return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()} @@ -238,7 +238,7 @@ func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltin return depth, nil } -// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error. +// wellformedIndefiniteArrayOrMap checks indefinite-length array/map's well-formedness and returns max depth and error. func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) { var err error maxDepth := depth @@ -326,7 +326,7 @@ func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) d.off += argumentSize if t == cborTypePrimitives { - if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { + if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { //nolint:gosec return 0, 0, 0, err } } @@ -341,7 +341,7 @@ func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) d.off += argumentSize if t == cborTypePrimitives { - if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { + if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { //nolint:gosec return 0, 0, 0, err } } @@ -379,12 +379,12 @@ func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) func (d *decoder) acceptableFloat(f float64) error { switch { - case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f): + case d.dm.nan == NaNDecodeForbidden && math.IsNaN(f): return &UnacceptableDataItemError{ CBORType: cborTypePrimitives.String(), Message: "floating-point NaN", } - case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0): + case d.dm.inf == InfDecodeForbidden && math.IsInf(f, 0): return &UnacceptableDataItemError{ CBORType: cborTypePrimitives.String(), Message: "floating-point infinity", diff --git a/vendor/github.com/go-chi/chi/v5/middleware/route_headers.go b/vendor/github.com/go-chi/chi/v5/middleware/route_headers.go index 88743769a7..1c3334d35f 100644 --- a/vendor/github.com/go-chi/chi/v5/middleware/route_headers.go +++ b/vendor/github.com/go-chi/chi/v5/middleware/route_headers.go @@ -79,6 +79,7 @@ func (hr HeaderRouter) Handler(next http.Handler) http.Handler { if len(hr) == 0 { // skip if no routes set next.ServeHTTP(w, r) + return } // find first matching header route, and continue diff --git a/vendor/github.com/go-ini/ini/.editorconfig b/vendor/github.com/go-ini/ini/.editorconfig deleted file mode 100644 index 4a2d9180f9..0000000000 --- a/vendor/github.com/go-ini/ini/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -# http://editorconfig.org - -root = true - -[*] -charset = utf-8 -end_of_line = lf -insert_final_newline = true -trim_trailing_whitespace = true - -[*_test.go] -trim_trailing_whitespace = false diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore deleted file mode 100644 index 588388bda2..0000000000 --- a/vendor/github.com/go-ini/ini/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -testdata/conf_out.ini -ini.sublime-project -ini.sublime-workspace -testdata/conf_reflect.ini -.idea -/.vscode -.DS_Store diff --git a/vendor/github.com/go-ini/ini/.golangci.yml b/vendor/github.com/go-ini/ini/.golangci.yml deleted file mode 100644 index 631e369254..0000000000 --- a/vendor/github.com/go-ini/ini/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -linters-settings: - staticcheck: - checks: [ - "all", - "-SA1019" # There are valid use cases of strings.Title - ] - nakedret: - max-func-lines: 0 # Disallow any unnamed return statement - -linters: - enable: - - deadcode - - errcheck - - gosimple - - govet - - ineffassign - - staticcheck - - structcheck - - typecheck - - unused - - varcheck - - nakedret - - gofmt - - rowserrcheck - - unconvert - - goimports - - unparam diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE deleted file mode 100644 index d361bbcdf5..0000000000 --- a/vendor/github.com/go-ini/ini/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright 2014 Unknwon - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile deleted file mode 100644 index f3b0dae2d2..0000000000 --- a/vendor/github.com/go-ini/ini/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -.PHONY: build test bench vet coverage - -build: vet bench - -test: - go test -v -cover -race - -bench: - go test -v -cover -test.bench=. -test.benchmem - -vet: - go vet - -coverage: - go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md deleted file mode 100644 index 30606d9700..0000000000 --- a/vendor/github.com/go-ini/ini/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# INI - -[![GitHub Workflow Status](https://img.shields.io/github/checks-status/go-ini/ini/main?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=branch%3Amain) -[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini) -[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) -[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) - -![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) - -Package ini provides INI file read and write functionality in Go. - -## Features - -- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites. -- Read with recursion values. -- Read with parent-child sections. -- Read with auto-increment key names. -- Read with multiple-line values. -- Read with tons of helper methods. -- Read and convert values to Go types. -- Read and **WRITE** comments of sections and keys. -- Manipulate sections, keys and comments with ease. -- Keep sections and keys in order as you parse and save. - -## Installation - -The minimum requirement of Go is **1.13**. - -```sh -$ go get gopkg.in/ini.v1 -``` - -Please add `-u` flag to update in the future. - -## Getting Help - -- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) -- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) -- 中国大陆镜像:https://ini.unknwon.cn - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/codecov.yml b/vendor/github.com/go-ini/ini/codecov.yml deleted file mode 100644 index e02ec84bc0..0000000000 --- a/vendor/github.com/go-ini/ini/codecov.yml +++ /dev/null @@ -1,16 +0,0 @@ -coverage: - range: "60...95" - status: - project: - default: - threshold: 1% - informational: true - patch: - defualt: - only_pulls: true - informational: true - -comment: - layout: 'diff' - -github_checks: false diff --git a/vendor/github.com/go-ini/ini/data_source.go b/vendor/github.com/go-ini/ini/data_source.go deleted file mode 100644 index c3a541f1d1..0000000000 --- a/vendor/github.com/go-ini/ini/data_source.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" -) - -var ( - _ dataSource = (*sourceFile)(nil) - _ dataSource = (*sourceData)(nil) - _ dataSource = (*sourceReadCloser)(nil) -) - -// dataSource is an interface that returns object which can be read and closed. -type dataSource interface { - ReadCloser() (io.ReadCloser, error) -} - -// sourceFile represents an object that contains content on the local file system. -type sourceFile struct { - name string -} - -func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { - return os.Open(s.name) -} - -// sourceData represents an object that contains content in memory. -type sourceData struct { - data []byte -} - -func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(s.data)), nil -} - -// sourceReadCloser represents an input stream with Close method. -type sourceReadCloser struct { - reader io.ReadCloser -} - -func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { - return s.reader, nil -} - -func parseDataSource(source interface{}) (dataSource, error) { - switch s := source.(type) { - case string: - return sourceFile{s}, nil - case []byte: - return &sourceData{s}, nil - case io.ReadCloser: - return &sourceReadCloser{s}, nil - case io.Reader: - return &sourceReadCloser{ioutil.NopCloser(s)}, nil - default: - return nil, fmt.Errorf("error parsing data source: unknown type %q", s) - } -} diff --git a/vendor/github.com/go-ini/ini/deprecated.go b/vendor/github.com/go-ini/ini/deprecated.go deleted file mode 100644 index 48b8e66d6d..0000000000 --- a/vendor/github.com/go-ini/ini/deprecated.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -var ( - // Deprecated: Use "DefaultSection" instead. - DEFAULT_SECTION = DefaultSection - // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. - AllCapsUnderscore = SnackCase -) diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go deleted file mode 100644 index f66bc94b8b..0000000000 --- a/vendor/github.com/go-ini/ini/error.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "fmt" -) - -// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one. -type ErrDelimiterNotFound struct { - Line string -} - -// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound. -func IsErrDelimiterNotFound(err error) bool { - _, ok := err.(ErrDelimiterNotFound) - return ok -} - -func (err ErrDelimiterNotFound) Error() string { - return fmt.Sprintf("key-value delimiter not found: %s", err.Line) -} - -// ErrEmptyKeyName indicates the error type of no key name is found which there should be one. -type ErrEmptyKeyName struct { - Line string -} - -// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName. -func IsErrEmptyKeyName(err error) bool { - _, ok := err.(ErrEmptyKeyName) - return ok -} - -func (err ErrEmptyKeyName) Error() string { - return fmt.Sprintf("empty key name: %s", err.Line) -} diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go deleted file mode 100644 index f8b22408be..0000000000 --- a/vendor/github.com/go-ini/ini/file.go +++ /dev/null @@ -1,541 +0,0 @@ -// Copyright 2017 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "sync" -) - -// File represents a combination of one or more INI files in memory. -type File struct { - options LoadOptions - dataSources []dataSource - - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - lock sync.RWMutex - - // To keep data in order. - sectionList []string - // To keep track of the index of a section with same name. - // This meta list is only used with non-unique section names are allowed. - sectionIndexes []int - - // Actual data is stored here. - sections map[string][]*Section - - NameMapper - ValueMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource, opts LoadOptions) *File { - if len(opts.KeyValueDelimiters) == 0 { - opts.KeyValueDelimiters = "=:" - } - if len(opts.KeyValueDelimiterOnWrite) == 0 { - opts.KeyValueDelimiterOnWrite = "=" - } - if len(opts.ChildSectionDelimiter) == 0 { - opts.ChildSectionDelimiter = "." - } - - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string][]*Section), - options: opts, - } -} - -// Empty returns an empty file object. -func Empty(opts ...LoadOptions) *File { - var opt LoadOptions - if len(opts) > 0 { - opt = opts[0] - } - - // Ignore error here, we are sure our data is good. - f, _ := LoadSources(opt, []byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("empty section name") - } - - if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) { - return f.sections[name][0], nil - } - - f.sectionList = append(f.sectionList, name) - - // NOTE: Append to indexes must happen before appending to sections, - // otherwise index will have off-by-one problem. - f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name])) - - sec := newSection(f, name) - f.sections[name] = append(f.sections[name], sec) - - return sec, nil -} - -// NewRawSection creates a new section with an unparseable body. -func (f *File) NewRawSection(name, body string) (*Section, error) { - section, err := f.NewSection(name) - if err != nil { - return nil, err - } - - section.isRawSection = true - section.rawBody = body - return section, nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - secs, err := f.SectionsByName(name) - if err != nil { - return nil, err - } - - return secs[0], err -} - -// HasSection returns true if the file contains a section with given name. -func (f *File) HasSection(name string) bool { - section, _ := f.GetSection(name) - return section != nil -} - -// SectionsByName returns all sections with given name. -func (f *File) SectionsByName(name string) ([]*Section, error) { - if len(name) == 0 { - name = DefaultSection - } - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - secs := f.sections[name] - if len(secs) == 0 { - return nil, fmt.Errorf("section %q does not exist", name) - } - - return secs, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - if name == "" { - name = DefaultSection - } - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// SectionWithIndex assumes named section exists and returns a new section when not. -func (f *File) SectionWithIndex(name string, index int) *Section { - secs, err := f.SectionsByName(name) - if err != nil || len(secs) <= index { - // NOTE: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - newSec, _ := f.NewSection(name) - return newSec - } - - return secs[index] -} - -// Sections returns a list of Section stored in the current instance. -func (f *File) Sections() []*Section { - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sections := make([]*Section, len(f.sectionList)) - for i, name := range f.sectionList { - sections[i] = f.sections[name][f.sectionIndexes[i]] - } - return sections -} - -// ChildSections returns a list of child sections of given section name. -func (f *File) ChildSections(name string) []*Section { - return f.Section(name).ChildSections() -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section or all sections with given name. -func (f *File) DeleteSection(name string) { - secs, err := f.SectionsByName(name) - if err != nil { - return - } - - for i := 0; i < len(secs); i++ { - // For non-unique sections, it is always needed to remove the first one so - // in the next iteration, the subsequent section continue having index 0. - // Ignoring the error as index 0 never returns an error. - _ = f.DeleteSectionWithIndex(name, 0) - } -} - -// DeleteSectionWithIndex deletes a section with given name and index. -func (f *File) DeleteSectionWithIndex(name string, index int) error { - if !f.options.AllowNonUniqueSections && index != 0 { - return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled") - } - - if len(name) == 0 { - name = DefaultSection - } - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - // Count occurrences of the sections - occurrences := 0 - - sectionListCopy := make([]string, len(f.sectionList)) - copy(sectionListCopy, f.sectionList) - - for i, s := range sectionListCopy { - if s != name { - continue - } - - if occurrences == index { - if len(f.sections[name]) <= 1 { - delete(f.sections, name) // The last one in the map - } else { - f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...) - } - - // Fix section lists - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...) - - } else if occurrences > index { - // Fix the indices of all following sections with this name. - f.sectionIndexes[i-1]-- - } - - occurrences++ - } - - return nil -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - // In loose mode, we create an empty default section for nonexistent files. - if os.IsNotExist(err) && f.options.Loose { - _ = f.parse(bytes.NewBuffer(nil)) - continue - } - return err - } - if f.options.ShortCircuit { - return nil - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { - equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight - - if PrettyFormat || PrettyEqual { - equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite) - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - lastSectionIdx := len(f.sectionList) - 1 - for i, sname := range f.sectionList { - sec := f.SectionWithIndex(sname, f.sectionIndexes[i]) - if len(sec.Comment) > 0 { - // Support multiline comments - lines := strings.Split(sec.Comment, LineBreak) - for i := range lines { - if lines[i][0] != '#' && lines[i][0] != ';' { - lines[i] = "; " + lines[i] - } else { - lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) - } - - if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { - return nil, err - } - } - } - - if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { - if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return nil, err - } - } else { - // Write nothing if default section is empty - if len(sec.keyList) == 0 { - continue - } - } - - isLastSection := i == lastSectionIdx - if sec.isRawSection { - if _, err := buf.WriteString(sec.rawBody); err != nil { - return nil, err - } - - if PrettySection && !isLastSection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - continue - } - - // Count and generate alignment length and buffer spaces using the - // longest key. Keys may be modified if they contain certain characters so - // we need to take that into account in our calculation. - alignLength := 0 - if PrettyFormat { - for _, kname := range sec.keyList { - keyLength := len(kname) - // First case will surround key by ` and second by """ - if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { - keyLength += 2 - } else if strings.Contains(kname, "`") { - keyLength += 6 - } - - if keyLength > alignLength { - alignLength = keyLength - } - } - } - alignSpaces := bytes.Repeat([]byte(" "), alignLength) - - KeyList: - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DefaultSection { - buf.WriteString(indent) - } - - // Support multiline comments - lines := strings.Split(key.Comment, LineBreak) - for i := range lines { - if lines[i][0] != '#' && lines[i][0] != ';' { - lines[i] = "; " + strings.TrimSpace(lines[i]) - } else { - lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) - } - - if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { - return nil, err - } - } - } - - if len(indent) > 0 && sname != DefaultSection { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncrement: - kname = "-" - case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): - kname = `"""` + kname + `"""` - } - - writeKeyValue := func(val string) (bool, error) { - if _, err := buf.WriteString(kname); err != nil { - return false, err - } - - if key.isBooleanType { - buf.WriteString(LineBreak) - return true, nil - } - - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } - - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } else if len(strings.TrimSpace(val)) != len(val) { - val = `"` + val + `"` - } - if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { - return false, err - } - return false, nil - } - - shadows := key.ValueWithShadows() - if len(shadows) == 0 { - if _, err := writeKeyValue(""); err != nil { - return nil, err - } - } - - for _, val := range shadows { - exitLoop, err := writeKeyValue(val) - if err != nil { - return nil, err - } else if exitLoop { - continue KeyList - } - } - - for _, val := range key.nestedValues { - if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { - return nil, err - } - } - } - - if PrettySection && !isLastSection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - } - - return buf, nil -} - -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { - buf, err := f.writeToBuffer(indent) - if err != nil { - return 0, err - } - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename after done. - buf, err := f.writeToBuffer(indent) - if err != nil { - return err - } - - return ioutil.WriteFile(filename, buf.Bytes(), 0666) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/github.com/go-ini/ini/helper.go b/vendor/github.com/go-ini/ini/helper.go deleted file mode 100644 index f9d80a682a..0000000000 --- a/vendor/github.com/go-ini/ini/helper.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -func inSlice(str string, s []string) bool { - for _, v := range s { - if str == v { - return true - } - } - return false -} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go deleted file mode 100644 index 99e7f86511..0000000000 --- a/vendor/github.com/go-ini/ini/ini.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package ini provides INI file read and write functionality in Go. -package ini - -import ( - "os" - "regexp" - "runtime" - "strings" -) - -const ( - // Maximum allowed depth when recursively substituing variable names. - depthValues = 99 -) - -var ( - // DefaultSection is the name of default section. You can use this var or the string literal. - // In most of cases, an empty string is all you need to access the section. - DefaultSection = "DEFAULT" - - // LineBreak is the delimiter to determine or compose a new line. - // This variable will be changed to "\r\n" automatically on Windows at package init time. - LineBreak = "\n" - - // Variable regexp pattern: %(variable)s - varPattern = regexp.MustCompile(`%\(([^)]+)\)s`) - - // DefaultHeader explicitly writes default section header. - DefaultHeader = false - - // PrettySection indicates whether to put a line between sections. - PrettySection = true - // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output - // or reduce all possible spaces for compact format. - PrettyFormat = true - // PrettyEqual places spaces around "=" sign even when PrettyFormat is false. - PrettyEqual = false - // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled. - DefaultFormatLeft = "" - // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled. - DefaultFormatRight = "" -) - -var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") - -func init() { - if runtime.GOOS == "windows" && !inTest { - LineBreak = "\r\n" - } -} - -// LoadOptions contains all customized options used for load data source(s). -type LoadOptions struct { - // Loose indicates whether the parser should ignore nonexistent files or return error. - Loose bool - // Insensitive indicates whether the parser forces all section and key names to lowercase. - Insensitive bool - // InsensitiveSections indicates whether the parser forces all section to lowercase. - InsensitiveSections bool - // InsensitiveKeys indicates whether the parser forces all key names to lowercase. - InsensitiveKeys bool - // IgnoreContinuation indicates whether to ignore continuation lines while parsing. - IgnoreContinuation bool - // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. - IgnoreInlineComment bool - // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. - SkipUnrecognizableLines bool - // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source. - ShortCircuit bool - // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. - // This type of keys are mostly used in my.cnf. - AllowBooleanKeys bool - // AllowShadows indicates whether to keep track of keys with same name under same section. - AllowShadows bool - // AllowNestedValues indicates whether to allow AWS-like nested values. - // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values - AllowNestedValues bool - // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. - // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure - // Relevant quote: Values can also span multiple lines, as long as they are indented deeper - // than the first line of the value. - AllowPythonMultilineValues bool - // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. - // Docs: https://docs.python.org/2/library/configparser.html - // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. - // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. - SpaceBeforeInlineComment bool - // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format - // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" - UnescapeValueDoubleQuotes bool - // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format - // when value is NOT surrounded by any quotes. - // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. - UnescapeValueCommentSymbols bool - // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise - // conform to key/value pairs. Specify the names of those blocks here. - UnparseableSections []string - // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". - KeyValueDelimiters string - // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=". - KeyValueDelimiterOnWrite string - // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". - ChildSectionDelimiter string - // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). - PreserveSurroundedQuote bool - // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). - DebugFunc DebugFunc - // ReaderBufferSize is the buffer size of the reader in bytes. - ReaderBufferSize int - // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. - AllowNonUniqueSections bool - // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated. - AllowDuplicateShadowValues bool -} - -// DebugFunc is the type of function called to log parse events. -type DebugFunc func(message string) - -// LoadSources allows caller to apply customized options for loading from data source(s). -func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { - sources := make([]dataSource, len(others)+1) - sources[0], err = parseDataSource(source) - if err != nil { - return nil, err - } - for i := range others { - sources[i+1], err = parseDataSource(others[i]) - if err != nil { - return nil, err - } - } - f := newFile(sources, opts) - if err = f.Reload(); err != nil { - return nil, err - } - return f, nil -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -// It will return error if list contains nonexistent files. -func Load(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{}, source, others...) -} - -// LooseLoad has exactly same functionality as Load function -// except it ignores nonexistent files instead of returning error. -func LooseLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Loose: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it forces all section and key names to be lowercased. -func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Insensitive: true}, source, others...) -} - -// ShadowLoad has exactly same functionality as Load function -// except it allows have shadow keys. -func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{AllowShadows: true}, source, others...) -} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go deleted file mode 100644 index a19d9f38ef..0000000000 --- a/vendor/github.com/go-ini/ini/key.go +++ /dev/null @@ -1,837 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - "time" -) - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncrement bool - isBooleanType bool - - isShadow bool - shadows []*Key - - nestedValues []string -} - -// newKey simply return a key object with given values. -func newKey(s *Section, name, val string) *Key { - return &Key{ - s: s, - name: name, - value: val, - } -} - -func (k *Key) addShadow(val string) error { - if k.isShadow { - return errors.New("cannot add shadow to another shadow key") - } else if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add shadow to auto-increment or boolean key") - } - - if !k.s.f.options.AllowDuplicateShadowValues { - // Deduplicate shadows based on their values. - if k.value == val { - return nil - } - for i := range k.shadows { - if k.shadows[i].value == val { - return nil - } - } - } - - shadow := newKey(k.s, k.name, val) - shadow.isShadow = true - k.shadows = append(k.shadows, shadow) - return nil -} - -// AddShadow adds a new shadow key to itself. -func (k *Key) AddShadow(val string) error { - if !k.s.f.options.AllowShadows { - return errors.New("shadow key is not allowed") - } - return k.addShadow(val) -} - -func (k *Key) addNestedValue(val string) error { - if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add nested value to auto-increment or boolean key") - } - - k.nestedValues = append(k.nestedValues, val) - return nil -} - -// AddNestedValue adds a nested value to the key. -func (k *Key) AddNestedValue(val string) error { - if !k.s.f.options.AllowNestedValues { - return errors.New("nested value is not allowed") - } - return k.addNestedValue(val) -} - -// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv -type ValueMapper func(string) string - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// ValueWithShadows returns raw values of key and its shadows if any. Shadow -// keys with empty values are ignored from the returned list. -func (k *Key) ValueWithShadows() []string { - if len(k.shadows) == 0 { - if k.value == "" { - return []string{} - } - return []string{k.value} - } - - vals := make([]string, 0, len(k.shadows)+1) - if k.value != "" { - vals = append(vals, k.value) - } - for _, s := range k.shadows { - if s.value != "" { - vals = append(vals, s.value) - } - } - return vals -} - -// NestedValues returns nested values stored in the key. -// It is possible returned value is nil if no nested values stored in the key. -func (k *Key) NestedValues() []string { - return k.nestedValues -} - -// transformValue takes a raw value and transforms to its final string. -func (k *Key) transformValue(val string) string { - if k.s.f.ValueMapper != nil { - val = k.s.f.ValueMapper(val) - } - - // Fail-fast if no indicate char found for recursive value - if !strings.Contains(val, "%") { - return val - } - for i := 0; i < depthValues; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := vr[2 : len(vr)-2] - - // Search in the same section. - // If not found or found the key itself, then search again in default section. - nk, err := k.s.GetKey(noption) - if err != nil || k == nk { - nk, _ = k.s.f.Section("").GetKey(noption) - if nk == nil { - // Stop when no results found in the default section, - // and returns the value as-is. - break - } - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// String returns string representation of value. -func (k *Key) String() string { - return k.transformValue(k.value) -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - v, err := strconv.ParseInt(k.String(), 0, 64) - return int(v), err -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 0, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 0, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 0, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - k.value = defaultVal - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatBool(defaultVal[0]) - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(int64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].String() - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].Format(format) - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string divided by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - runes := []rune(str) - vals := make([]string, 0, 2) - var buf bytes.Buffer - escape := false - idx := 0 - for { - if escape { - escape = false - if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { - buf.WriteRune('\\') - } - buf.WriteRune(runes[idx]) - } else { - if runes[idx] == '\\' { - escape = true - } else if strings.HasPrefix(string(runes[idx:]), delim) { - idx += len(delim) - 1 - vals = append(vals, strings.TrimSpace(buf.String())) - buf.Reset() - } else { - buf.WriteRune(runes[idx]) - } - } - idx++ - if idx == len(runes) { - break - } - } - - if buf.Len() > 0 { - vals = append(vals, strings.TrimSpace(buf.String())) - } - - return vals -} - -// StringsWithShadows returns list of string divided by given delimiter. -// Shadows will also be appended if any. -func (k *Key) StringsWithShadows(delim string) []string { - vals := k.ValueWithShadows() - results := make([]string, 0, len(vals)*2) - for i := range vals { - if len(vals) == 0 { - continue - } - - results = append(results, strings.Split(vals[i], delim)...) - } - - for i := range results { - results[i] = k.transformValue(strings.TrimSpace(results[i])) - } - return results -} - -// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Float64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), true, false) - return vals -} - -// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Ints(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), true, false) - return vals -} - -// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Int64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), true, false) - return vals -} - -// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), true, false) - return vals -} - -// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), true, false) - return vals -} - -// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Bools(delim string) []bool { - vals, _ := k.parseBools(k.Strings(delim), true, false) - return vals -} - -// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) TimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then -// it will not be included to result list. -func (k *Key) ValidFloat64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), false, false) - return vals -} - -// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will -// not be included to result list. -func (k *Key) ValidInts(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), false, false) - return vals -} - -// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, -// then it will not be included to result list. -func (k *Key) ValidInt64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), false, false) - return vals -} - -// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, -// then it will not be included to result list. -func (k *Key) ValidUints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), false, false) - return vals -} - -// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidUint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), false, false) - return vals -} - -// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidBools(delim string) []bool { - vals, _ := k.parseBools(k.Strings(delim), false, false) - return vals -} - -// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) - return vals -} - -// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimes(delim string) []time.Time { - return k.ValidTimesFormat(time.RFC3339, delim) -} - -// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictFloat64s(delim string) ([]float64, error) { - return k.parseFloat64s(k.Strings(delim), false, true) -} - -// StrictInts returns list of int divided by given delimiter or error on first invalid input. -func (k *Key) StrictInts(delim string) ([]int, error) { - return k.parseInts(k.Strings(delim), false, true) -} - -// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictInt64s(delim string) ([]int64, error) { - return k.parseInt64s(k.Strings(delim), false, true) -} - -// StrictUints returns list of uint divided by given delimiter or error on first invalid input. -func (k *Key) StrictUints(delim string) ([]uint, error) { - return k.parseUints(k.Strings(delim), false, true) -} - -// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictUint64s(delim string) ([]uint64, error) { - return k.parseUint64s(k.Strings(delim), false, true) -} - -// StrictBools returns list of bool divided by given delimiter or error on first invalid input. -func (k *Key) StrictBools(delim string) ([]bool, error) { - return k.parseBools(k.Strings(delim), false, true) -} - -// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { - return k.parseTimesFormat(format, k.Strings(delim), false, true) -} - -// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimes(delim string) ([]time.Time, error) { - return k.StrictTimesFormat(time.RFC3339, delim) -} - -// parseBools transforms strings to bools. -func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) { - vals := make([]bool, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := parseBool(str) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(bool)) - } - } - return vals, err -} - -// parseFloat64s transforms strings to float64s. -func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { - vals := make([]float64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseFloat(str, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(float64)) - } - } - return vals, err -} - -// parseInts transforms strings to ints. -func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { - vals := make([]int, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseInt(str, 0, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, int(val.(int64))) - } - } - return vals, err -} - -// parseInt64s transforms strings to int64s. -func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { - vals := make([]int64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseInt(str, 0, 64) - return val, err - } - - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(int64)) - } - } - return vals, err -} - -// parseUints transforms strings to uints. -func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { - vals := make([]uint, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseUint(str, 0, 64) - return val, err - } - - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, uint(val.(uint64))) - } - } - return vals, err -} - -// parseUint64s transforms strings to uint64s. -func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { - vals := make([]uint64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseUint(str, 0, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(uint64)) - } - } - return vals, err -} - -type Parser func(str string) (interface{}, error) - -// parseTimesFormat transforms strings to times in given format. -func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { - vals := make([]time.Time, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := time.Parse(format, str) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(time.Time)) - } - } - return vals, err -} - -// doParse transforms strings to different types -func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { - vals := make([]interface{}, 0, len(strs)) - for _, str := range strs { - val, err := parser(str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - if k.s.f.BlockMode { - k.s.f.lock.Lock() - defer k.s.f.lock.Unlock() - } - - k.value = v - k.s.keysHash[k.name] = v -} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go deleted file mode 100644 index 44fc526c2c..0000000000 --- a/vendor/github.com/go-ini/ini/parser.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2015 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strconv" - "strings" - "unicode" -) - -const minReaderBufferSize = 4096 - -var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`) - -type parserOptions struct { - IgnoreContinuation bool - IgnoreInlineComment bool - AllowPythonMultilineValues bool - SpaceBeforeInlineComment bool - UnescapeValueDoubleQuotes bool - UnescapeValueCommentSymbols bool - PreserveSurroundedQuote bool - DebugFunc DebugFunc - ReaderBufferSize int -} - -type parser struct { - buf *bufio.Reader - options parserOptions - - isEOF bool - count int - comment *bytes.Buffer -} - -func (p *parser) debug(format string, args ...interface{}) { - if p.options.DebugFunc != nil { - p.options.DebugFunc(fmt.Sprintf(format, args...)) - } -} - -func newParser(r io.Reader, opts parserOptions) *parser { - size := opts.ReaderBufferSize - if size < minReaderBufferSize { - size = minReaderBufferSize - } - - return &parser{ - buf: bufio.NewReaderSize(r, size), - options: opts, - count: 1, - comment: &bytes.Buffer{}, - } -} - -// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. -// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding -func (p *parser) BOM() error { - mask, err := p.buf.Peek(2) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 2 { - return nil - } - - switch { - case mask[0] == 254 && mask[1] == 255: - fallthrough - case mask[0] == 255 && mask[1] == 254: - _, err = p.buf.Read(mask) - if err != nil { - return err - } - case mask[0] == 239 && mask[1] == 187: - mask, err := p.buf.Peek(3) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 3 { - return nil - } - if mask[2] == 191 { - _, err = p.buf.Read(mask) - if err != nil { - return err - } - } - } - return nil -} - -func (p *parser) readUntil(delim byte) ([]byte, error) { - data, err := p.buf.ReadBytes(delim) - if err != nil { - if err == io.EOF { - p.isEOF = true - } else { - return nil, err - } - } - return data, nil -} - -func cleanComment(in []byte) ([]byte, bool) { - i := bytes.IndexAny(in, "#;") - if i == -1 { - return nil, false - } - return in[i:], true -} - -func readKeyName(delimiters string, in []byte) (string, int, error) { - line := string(in) - - // Check if key name surrounded by quotes. - var keyQuote string - if line[0] == '"' { - if len(line) > 6 && line[0:3] == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - - // Get out key name - var endIdx int - if len(keyQuote) > 0 { - startIdx := len(keyQuote) - // FIXME: fail case -> """"""name"""=value - pos := strings.Index(line[startIdx:], keyQuote) - if pos == -1 { - return "", -1, fmt.Errorf("missing closing key quote: %s", line) - } - pos += startIdx - - // Find key-value delimiter - i := strings.IndexAny(line[pos+startIdx:], delimiters) - if i < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - endIdx = pos + i - return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil - } - - endIdx = strings.IndexAny(line, delimiters) - if endIdx < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - if endIdx == 0 { - return "", -1, ErrEmptyKeyName{line} - } - - return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil -} - -func (p *parser) readMultilines(line, val, valQuote string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := string(data) - - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - - comment, has := cleanComment([]byte(next[pos:])) - if has { - p.comment.Write(bytes.TrimSpace(comment)) - } - break - } - val += next - if p.isEOF { - return "", fmt.Errorf("missing closing key quote from %q to %q", line, next) - } - } - return val, nil -} - -func (p *parser) readContinuationLines(val string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := strings.TrimSpace(string(data)) - - if len(next) == 0 { - break - } - val += next - if val[len(val)-1] != '\\' { - break - } - val = val[:len(val)-1] - } - return val, nil -} - -// hasSurroundedQuote check if and only if the first and last characters -// are quotes \" or \'. -// It returns false if any other parts also contain same kind of quotes. -func hasSurroundedQuote(in string, quote byte) bool { - return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && - strings.IndexByte(in[1:], quote) == len(in)-2 -} - -func (p *parser) readValue(in []byte, bufferSize int) (string, error) { - - line := strings.TrimLeftFunc(string(in), unicode.IsSpace) - if len(line) == 0 { - if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' { - return p.readPythonMultilines(line, bufferSize) - } - return "", nil - } - - var valQuote string - if len(line) > 3 && line[0:3] == `"""` { - valQuote = `"""` - } else if line[0] == '`' { - valQuote = "`" - } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' { - valQuote = `"` - } - - if len(valQuote) > 0 { - startIdx := len(valQuote) - pos := strings.LastIndex(line[startIdx:], valQuote) - // Check for multi-line value - if pos == -1 { - return p.readMultilines(line, line[startIdx:], valQuote) - } - - if p.options.UnescapeValueDoubleQuotes && valQuote == `"` { - return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil - } - return line[startIdx : pos+startIdx], nil - } - - lastChar := line[len(line)-1] - // Won't be able to reach here if value only contains whitespace - line = strings.TrimSpace(line) - trimmedLastChar := line[len(line)-1] - - // Check continuation lines when desired - if !p.options.IgnoreContinuation && trimmedLastChar == '\\' { - return p.readContinuationLines(line[:len(line)-1]) - } - - // Check if ignore inline comment - if !p.options.IgnoreInlineComment { - var i int - if p.options.SpaceBeforeInlineComment { - i = strings.Index(line, " #") - if i == -1 { - i = strings.Index(line, " ;") - } - - } else { - i = strings.IndexAny(line, "#;") - } - - if i > -1 { - p.comment.WriteString(line[i:]) - line = strings.TrimSpace(line[:i]) - } - - } - - // Trim single and double quotes - if (hasSurroundedQuote(line, '\'') || - hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { - line = line[1 : len(line)-1] - } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { - line = strings.ReplaceAll(line, `\;`, ";") - line = strings.ReplaceAll(line, `\#`, "#") - } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { - return p.readPythonMultilines(line, bufferSize) - } - - return line, nil -} - -func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) { - parserBufferPeekResult, _ := p.buf.Peek(bufferSize) - peekBuffer := bytes.NewBuffer(parserBufferPeekResult) - - for { - peekData, peekErr := peekBuffer.ReadBytes('\n') - if peekErr != nil && peekErr != io.EOF { - p.debug("readPythonMultilines: failed to peek with error: %v", peekErr) - return "", peekErr - } - - p.debug("readPythonMultilines: parsing %q", string(peekData)) - - peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) - p.debug("readPythonMultilines: matched %d parts", len(peekMatches)) - for n, v := range peekMatches { - p.debug(" %d: %q", n, v) - } - - // Return if not a Python multiline value. - if len(peekMatches) != 3 { - p.debug("readPythonMultilines: end of value, got: %q", line) - return line, nil - } - - // Advance the parser reader (buffer) in-sync with the peek buffer. - _, err := p.buf.Discard(len(peekData)) - if err != nil { - p.debug("readPythonMultilines: failed to skip to the end, returning error") - return "", err - } - - line += "\n" + peekMatches[0] - } -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) (err error) { - p := newParser(reader, parserOptions{ - IgnoreContinuation: f.options.IgnoreContinuation, - IgnoreInlineComment: f.options.IgnoreInlineComment, - AllowPythonMultilineValues: f.options.AllowPythonMultilineValues, - SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment, - UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes, - UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols, - PreserveSurroundedQuote: f.options.PreserveSurroundedQuote, - DebugFunc: f.options.DebugFunc, - ReaderBufferSize: f.options.ReaderBufferSize, - }) - if err = p.BOM(); err != nil { - return fmt.Errorf("BOM: %v", err) - } - - // Ignore error because default section name is never empty string. - name := DefaultSection - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(DefaultSection) - } - section, _ := f.NewSection(name) - - // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key - var isLastValueEmpty bool - var lastRegularKey *Key - - var line []byte - var inUnparseableSection bool - - // NOTE: Iterate and increase `currentPeekSize` until - // the size of the parser buffer is found. - // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. - parserBufferSize := 0 - // NOTE: Peek 4kb at a time. - currentPeekSize := minReaderBufferSize - - if f.options.AllowPythonMultilineValues { - for { - peekBytes, _ := p.buf.Peek(currentPeekSize) - peekBytesLength := len(peekBytes) - - if parserBufferSize >= peekBytesLength { - break - } - - currentPeekSize *= 2 - parserBufferSize = peekBytesLength - } - } - - for !p.isEOF { - line, err = p.readUntil('\n') - if err != nil { - return err - } - - if f.options.AllowNestedValues && - isLastValueEmpty && len(line) > 0 { - if line[0] == ' ' || line[0] == '\t' { - err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) - if err != nil { - return err - } - continue - } - } - - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - if len(line) == 0 { - continue - } - - // Comments - if line[0] == '#' || line[0] == ';' { - // Note: we do not care ending line break, - // it is needed for adding second line, - // so just clean it once at the end when set to value. - p.comment.Write(line) - continue - } - - // Section - if line[0] == '[' { - // Read to the next ']' (TODO: support quoted strings) - closeIdx := bytes.LastIndexByte(line, ']') - if closeIdx == -1 { - return fmt.Errorf("unclosed section: %s", line) - } - - name := string(line[1:closeIdx]) - section, err = f.NewSection(name) - if err != nil { - return err - } - - comment, has := cleanComment(line[closeIdx+1:]) - if has { - p.comment.Write(comment) - } - - section.Comment = strings.TrimSpace(p.comment.String()) - - // Reset auto-counter and comments - p.comment.Reset() - p.count = 1 - // Nested values can't span sections - isLastValueEmpty = false - - inUnparseableSection = false - for i := range f.options.UnparseableSections { - if f.options.UnparseableSections[i] == name || - ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { - inUnparseableSection = true - continue - } - } - continue - } - - if inUnparseableSection { - section.isRawSection = true - section.rawBody += string(line) - continue - } - - kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) - if err != nil { - switch { - // Treat as boolean key when desired, and whole line is key name. - case IsErrDelimiterNotFound(err): - switch { - case f.options.AllowBooleanKeys: - kname, err := p.readValue(line, parserBufferSize) - if err != nil { - return err - } - key, err := section.NewBooleanKey(kname) - if err != nil { - return err - } - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - continue - - case f.options.SkipUnrecognizableLines: - continue - } - case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines: - continue - } - return err - } - - // Auto increment. - isAutoIncr := false - if kname == "-" { - isAutoIncr = true - kname = "#" + strconv.Itoa(p.count) - p.count++ - } - - value, err := p.readValue(line[offset:], parserBufferSize) - if err != nil { - return err - } - isLastValueEmpty = len(value) == 0 - - key, err := section.NewKey(kname, value) - if err != nil { - return err - } - key.isAutoIncrement = isAutoIncr - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - lastRegularKey = key - } - return nil -} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go deleted file mode 100644 index a3615d820b..0000000000 --- a/vendor/github.com/go-ini/ini/section.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "errors" - "fmt" - "strings" -) - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string - - isRawSection bool - rawBody string -} - -func newSection(f *File, name string) *Section { - return &Section{ - f: f, - name: name, - keys: make(map[string]*Key), - keyList: make([]string, 0, 10), - keysHash: make(map[string]string), - } -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// Body returns rawBody of Section if the section was marked as unparseable. -// It still follows the other rules of the INI format surrounding leading/trailing whitespace. -func (s *Section) Body() string { - return strings.TrimSpace(s.rawBody) -} - -// SetBody updates body content only if section is raw. -func (s *Section) SetBody(body string) { - if !s.isRawSection { - return - } - s.rawBody = body -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { - name = strings.ToLower(name) - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - if s.f.options.AllowShadows { - if err := s.keys[name].addShadow(val); err != nil { - return nil, err - } - } else { - s.keys[name].value = val - s.keysHash[name] = val - } - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = newKey(s, name, val) - s.keysHash[name] = val - return s.keys[name], nil -} - -// NewBooleanKey creates a new boolean type key to given section. -func (s *Section) NewBooleanKey(name string) (*Key, error) { - key, err := s.NewKey(name, "true") - if err != nil { - return nil, err - } - - key.isBooleanType = true - return key, nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - if s.f.BlockMode { - s.f.lock.RLock() - } - if s.f.options.Insensitive || s.f.options.InsensitiveKeys { - name = strings.ToLower(name) - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } - break - } - return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name) - } - return key, nil -} - -// HasKey returns true if section contains a key with given name. -func (s *Section) HasKey(name string) bool { - key, _ := s.GetKey(name) - return key != nil -} - -// Deprecated: Use "HasKey" instead. -func (s *Section) Haskey(name string) bool { - return s.HasKey(name) -} - -// HasValue returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// ParentKeys returns list of keys of parent section. -func (s *Section) ParentKeys() []*Key { - var parentKeys []*Key - sname := s.name - for { - if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - parentKeys = append(parentKeys, sec.Keys()...) - } else { - break - } - - } - return parentKeys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := make(map[string]string, len(s.keysHash)) - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - delete(s.keysHash, name) - return - } - } -} - -// ChildSections returns a list of child sections of current section. -// For example, "[parent.child1]" and "[parent.child12]" are child sections -// of section "[parent]". -func (s *Section) ChildSections() []*Section { - prefix := s.name + s.f.options.ChildSectionDelimiter - children := make([]*Section, 0, 3) - for _, name := range s.f.sectionList { - if strings.HasPrefix(name, prefix) { - children = append(children, s.f.sections[name]...) - } - } - return children -} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go deleted file mode 100644 index a486b2fe0f..0000000000 --- a/vendor/github.com/go-ini/ini/struct.go +++ /dev/null @@ -1,747 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "strings" - "time" - "unicode" -) - -// NameMapper represents a ini tag name mapper. -type NameMapper func(string) string - -// Built-in name getters. -var ( - // SnackCase converts to format SNACK_CASE. - SnackCase NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - } - newstr = append(newstr, unicode.ToUpper(chr)) - } - return string(newstr) - } - // TitleUnderscore converts to format title_underscore. - TitleUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - chr -= 'A' - 'a' - } - newstr = append(newstr, chr) - } - return string(newstr) - } -) - -func (s *Section) parseFieldName(raw, actual string) string { - if len(actual) > 0 { - return actual - } - if s.f.NameMapper != nil { - return s.f.NameMapper(raw) - } - return raw -} - -func parseDelim(actual string) string { - if len(actual) > 0 { - return actual - } - return "," -} - -var reflectTime = reflect.TypeOf(time.Now()).Kind() - -// setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - var strs []string - if allowShadow { - strs = key.StringsWithShadows(delim) - } else { - strs = key.Strings(delim) - } - - numVals := len(strs) - if numVals == 0 { - return nil - } - - var vals interface{} - var err error - - sliceOf := field.Type().Elem().Kind() - switch sliceOf { - case reflect.String: - vals = strs - case reflect.Int: - vals, err = key.parseInts(strs, true, false) - case reflect.Int64: - vals, err = key.parseInt64s(strs, true, false) - case reflect.Uint: - vals, err = key.parseUints(strs, true, false) - case reflect.Uint64: - vals, err = key.parseUint64s(strs, true, false) - case reflect.Float64: - vals, err = key.parseFloat64s(strs, true, false) - case reflect.Bool: - vals, err = key.parseBools(strs, true, false) - case reflectTime: - vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - if err != nil && isStrict { - return err - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflect.String: - slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) - case reflect.Int: - slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) - case reflect.Int64: - slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) - case reflect.Uint: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) - case reflect.Uint64: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) - case reflect.Float64: - slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) - case reflect.Bool: - slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i])) - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) - } - } - field.Set(slice) - return nil -} - -func wrapStrictError(err error, isStrict bool) error { - if isStrict { - return err - } - return nil -} - -// setWithProperType sets proper value to field based on its type, -// but it does not return error for failing parsing, -// because we want to use default value that is already assigned to struct. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - vt := t - isPtr := t.Kind() == reflect.Ptr - if isPtr { - vt = t.Elem() - } - switch vt.Kind() { - case reflect.String: - stringVal := key.String() - if isPtr { - field.Set(reflect.ValueOf(&stringVal)) - } else if len(stringVal) > 0 { - field.SetString(key.String()) - } - case reflect.Bool: - boolVal, err := key.Bool() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&boolVal)) - } else { - field.SetBool(boolVal) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - // ParseDuration will not return err for `0`, so check the type name - if vt.Name() == "Duration" { - durationVal, err := key.Duration() - if err != nil { - if intVal, err := key.Int64(); err == nil { - field.SetInt(intVal) - return nil - } - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&durationVal)) - } else if int64(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - } - return nil - } - - intVal, err := key.Int64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetInt(intVal) - field.Set(pv) - } else { - field.SetInt(intVal) - } - // byte is an alias for uint8, so supporting uint8 breaks support for byte - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && uint64(durationVal) > 0 { - if isPtr { - field.Set(reflect.ValueOf(&durationVal)) - } else { - field.Set(reflect.ValueOf(durationVal)) - } - return nil - } - - uintVal, err := key.Uint64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetUint(uintVal) - field.Set(pv) - } else { - field.SetUint(uintVal) - } - - case reflect.Float32, reflect.Float64: - floatVal, err := key.Float64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetFloat(floatVal) - field.Set(pv) - } else { - field.SetFloat(floatVal) - } - case reflectTime: - timeVal, err := key.Time() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&timeVal)) - } else { - field.Set(reflect.ValueOf(timeVal)) - } - case reflect.Slice: - return setSliceWithProperType(key, field, delim, allowShadow, isStrict) - default: - return fmt.Errorf("unsupported type %q", t) - } - return nil -} - -func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) { - opts := strings.SplitN(tag, ",", 5) - rawName = opts[0] - for _, opt := range opts[1:] { - omitEmpty = omitEmpty || (opt == "omitempty") - allowShadow = allowShadow || (opt == "allowshadow") - allowNonUnique = allowNonUnique || (opt == "nonunique") - extends = extends || (opt == "extends") - } - return rawName, omitEmpty, allowShadow, allowNonUnique, extends -} - -// mapToField maps the given value to the matching field of the given section. -// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. -func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag) - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - isStruct := tpField.Type.Kind() == reflect.Struct - isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct - isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - if isAnonymousPtr { - field.Set(reflect.New(tpField.Type.Elem())) - } - - if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) { - if isStructPtr && field.IsNil() { - field.Set(reflect.New(tpField.Type.Elem())) - } - fieldSection := s - if rawName != "" { - sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName - if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) { - fieldSection = secs[sectionIndex] - } - } - if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil { - return fmt.Errorf("map to field %q: %v", fieldName, err) - } - } else if isAnonymousPtr || isStruct || isStructPtr { - if secs, err := s.f.SectionsByName(fieldName); err == nil { - if len(secs) <= sectionIndex { - return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) - } - // Only set the field to non-nil struct value if we have a section for it. - // Otherwise, we end up with a non-nil struct ptr even though there is no data. - if isStructPtr && field.IsNil() { - field.Set(reflect.New(tpField.Type.Elem())) - } - if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil { - return fmt.Errorf("map to field %q: %v", fieldName, err) - } - continue - } - } - - // Map non-unique sections - if allowNonUnique && tpField.Type.Kind() == reflect.Slice { - newField, err := s.mapToSlice(fieldName, field, isStrict) - if err != nil { - return fmt.Errorf("map to slice %q: %v", fieldName, err) - } - - field.Set(newField) - continue - } - - if key, err := s.GetKey(fieldName); err == nil { - delim := parseDelim(tpField.Tag.Get("delim")) - if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { - return fmt.Errorf("set field %q: %v", fieldName, err) - } - } - } - return nil -} - -// mapToSlice maps all sections with the same name and returns the new value. -// The type of the Value must be a slice. -func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) { - secs, err := s.f.SectionsByName(secName) - if err != nil { - return reflect.Value{}, err - } - - typ := val.Type().Elem() - for i, sec := range secs { - elem := reflect.New(typ) - if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil { - return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) - } - - val = reflect.Append(val, elem.Elem()) - } - return val, nil -} - -// mapTo maps a section to object v. -func (s *Section) mapTo(v interface{}, isStrict bool) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("not a pointer to a struct") - } - - if typ.Kind() == reflect.Slice { - newField, err := s.mapToSlice(s.name, val, isStrict) - if err != nil { - return err - } - - val.Set(newField) - return nil - } - - return s.mapToField(val, isStrict, 0, s.name) -} - -// MapTo maps section to given struct. -func (s *Section) MapTo(v interface{}) error { - return s.mapTo(v, false) -} - -// StrictMapTo maps section to given struct in strict mode, -// which returns all possible error including value parsing error. -func (s *Section) StrictMapTo(v interface{}) error { - return s.mapTo(v, true) -} - -// MapTo maps file to given struct. -func (f *File) MapTo(v interface{}) error { - return f.Section("").MapTo(v) -} - -// StrictMapTo maps file to given struct in strict mode, -// which returns all possible error including value parsing error. -func (f *File) StrictMapTo(v interface{}) error { - return f.Section("").StrictMapTo(v) -} - -// MapToWithMapper maps data sources to given struct with name mapper. -func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.MapTo(v) -} - -// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, -// which returns all possible error including value parsing error. -func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.StrictMapTo(v) -} - -// MapTo maps data sources to given struct. -func MapTo(v, source interface{}, others ...interface{}) error { - return MapToWithMapper(v, nil, source, others...) -} - -// StrictMapTo maps data sources to given struct in strict mode, -// which returns all possible error including value parsing error. -func StrictMapTo(v, source interface{}, others ...interface{}) error { - return StrictMapToWithMapper(v, nil, source, others...) -} - -// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. -func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - sliceOf := field.Type().Elem().Kind() - - if allowShadow { - var keyWithShadows *Key - for i := 0; i < field.Len(); i++ { - var val string - switch sliceOf { - case reflect.String: - val = slice.Index(i).String() - case reflect.Int, reflect.Int64: - val = fmt.Sprint(slice.Index(i).Int()) - case reflect.Uint, reflect.Uint64: - val = fmt.Sprint(slice.Index(i).Uint()) - case reflect.Float64: - val = fmt.Sprint(slice.Index(i).Float()) - case reflect.Bool: - val = fmt.Sprint(slice.Index(i).Bool()) - case reflectTime: - val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - - if i == 0 { - keyWithShadows = newKey(key.s, key.name, val) - } else { - _ = keyWithShadows.AddShadow(val) - } - } - *key = *keyWithShadows - return nil - } - - var buf bytes.Buffer - for i := 0; i < field.Len(); i++ { - switch sliceOf { - case reflect.String: - buf.WriteString(slice.Index(i).String()) - case reflect.Int, reflect.Int64: - buf.WriteString(fmt.Sprint(slice.Index(i).Int())) - case reflect.Uint, reflect.Uint64: - buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) - case reflect.Float64: - buf.WriteString(fmt.Sprint(slice.Index(i).Float())) - case reflect.Bool: - buf.WriteString(fmt.Sprint(slice.Index(i).Bool())) - case reflectTime: - buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-len(delim)]) - return nil -} - -// reflectWithProperType does the opposite thing as setWithProperType. -func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { - switch t.Kind() { - case reflect.String: - key.SetValue(field.String()) - case reflect.Bool: - key.SetValue(fmt.Sprint(field.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - key.SetValue(fmt.Sprint(field.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - key.SetValue(fmt.Sprint(field.Uint())) - case reflect.Float32, reflect.Float64: - key.SetValue(fmt.Sprint(field.Float())) - case reflectTime: - key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) - case reflect.Slice: - return reflectSliceWithProperType(key, field, delim, allowShadow) - case reflect.Ptr: - if !field.IsNil() { - return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow) - } - default: - return fmt.Errorf("unsupported type %q", t) - } - return nil -} - -// CR: copied from encoding/json/encode.go with modifications of time.Time support. -// TODO: add more test coverage. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflectTime: - t, ok := v.Interface().(time.Time) - return ok && t.IsZero() - } - return false -} - -// StructReflector is the interface implemented by struct types that can extract themselves into INI objects. -type StructReflector interface { - ReflectINIStruct(*File) error -} - -func (s *Section) reflectFrom(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - if !val.Field(i).CanInterface() { - continue - } - - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag) - if omitEmpty && isEmptyValue(field) { - continue - } - - if r, ok := field.Interface().(StructReflector); ok { - return r.ReflectINIStruct(s.f) - } - - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) { - if err := s.reflectFrom(field); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - continue - } - - if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) || - (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { - // Note: The only error here is section doesn't exist. - sec, err := s.f.GetSection(fieldName) - if err != nil { - // Note: fieldName can never be empty here, ignore error. - sec, _ = s.f.NewSection(fieldName) - } - - // Add comment from comment tag - if len(sec.Comment) == 0 { - sec.Comment = tpField.Tag.Get("comment") - } - - if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - continue - } - - if allowNonUnique && tpField.Type.Kind() == reflect.Slice { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - sliceOf := field.Type().Elem().Kind() - - for i := 0; i < field.Len(); i++ { - if sliceOf != reflect.Struct && sliceOf != reflect.Ptr { - return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName) - } - - sec, err := s.f.NewSection(fieldName) - if err != nil { - return err - } - - // Add comment from comment tag - if len(sec.Comment) == 0 { - sec.Comment = tpField.Tag.Get("comment") - } - - if err := sec.reflectFrom(slice.Index(i)); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - } - continue - } - - // Note: Same reason as section. - key, err := s.GetKey(fieldName) - if err != nil { - key, _ = s.NewKey(fieldName, "") - } - - // Add comment from comment tag - if len(key.Comment) == 0 { - key.Comment = tpField.Tag.Get("comment") - } - - delim := parseDelim(tpField.Tag.Get("delim")) - if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { - return fmt.Errorf("reflect field %q: %v", fieldName, err) - } - - } - return nil -} - -// ReflectFrom reflects section from given struct. It overwrites existing ones. -func (s *Section) ReflectFrom(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - - if s.name != DefaultSection && s.f.options.AllowNonUniqueSections && - (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) { - // Clear sections to make sure none exists before adding the new ones - s.f.DeleteSection(s.name) - - if typ.Kind() == reflect.Ptr { - sec, err := s.f.NewSection(s.name) - if err != nil { - return err - } - return sec.reflectFrom(val.Elem()) - } - - slice := val.Slice(0, val.Len()) - sliceOf := val.Type().Elem().Kind() - if sliceOf != reflect.Ptr { - return fmt.Errorf("not a slice of pointers") - } - - for i := 0; i < slice.Len(); i++ { - sec, err := s.f.NewSection(s.name) - if err != nil { - return err - } - - err = sec.reflectFrom(slice.Index(i)) - if err != nil { - return fmt.Errorf("reflect from %dth field: %v", i, err) - } - } - - return nil - } - - if typ.Kind() == reflect.Ptr { - val = val.Elem() - } else { - return errors.New("not a pointer to a struct") - } - - return s.reflectFrom(val) -} - -// ReflectFrom reflects file from given struct. -func (f *File) ReflectFrom(v interface{}) error { - return f.Section("").ReflectFrom(v) -} - -// ReflectFromWithMapper reflects data sources from given struct with name mapper. -func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { - cfg.NameMapper = mapper - return cfg.ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct. -func ReflectFrom(cfg *File, v interface{}) error { - return ReflectFromWithMapper(cfg, v, nil) -} diff --git a/vendor/github.com/go-openapi/analysis/.codecov.yml b/vendor/github.com/go-openapi/analysis/.codecov.yml index 841c4281e2..a5ba8e96d8 100644 --- a/vendor/github.com/go-openapi/analysis/.codecov.yml +++ b/vendor/github.com/go-openapi/analysis/.codecov.yml @@ -1,3 +1,7 @@ +codecov: + notify: + after_n_builds: 2 + coverage: status: patch: diff --git a/vendor/github.com/go-openapi/analysis/.editorconfig b/vendor/github.com/go-openapi/analysis/.editorconfig new file mode 100644 index 0000000000..3152da69a5 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore index 87c3bd3e66..d8f4186fe5 100644 --- a/vendor/github.com/go-openapi/analysis/.gitignore +++ b/vendor/github.com/go-openapi/analysis/.gitignore @@ -1,5 +1,5 @@ -secrets.yml -coverage.out -coverage.txt +*.out *.cov .idea +.env +.mcp.json diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml index 06190ac055..b97d68077f 100644 --- a/vendor/github.com/go-openapi/analysis/.golangci.yml +++ b/vendor/github.com/go-openapi/analysis/.golangci.yml @@ -2,34 +2,20 @@ version: "2" linters: default: all disable: - - cyclop - depguard - - errchkjson - - errorlint - - exhaustruct - - forcetypeassert - funlen - - gochecknoglobals - - gochecknoinits - - gocognit - - godot - godox - - gosmopolitan - - inamedparam - - intrange - - ireturn - - lll - - musttag - - nestif + - gomoddirectives + - exhaustruct - nlreturn - - noinlineerr - nonamedreturns + - noinlineerr - paralleltest - recvcheck - testpackage - thelper + - tagliatelle - tparallel - - unparam - varnamelen - whitespace - wrapcheck @@ -41,8 +27,17 @@ linters: goconst: min-len: 2 min-occurrences: 3 + cyclop: + max-complexity: 25 gocyclo: - min-complexity: 45 + min-complexity: 25 + gocognit: + min-complexity: 35 + exhaustive: + default-signifies-exhaustive: true + default-case-required: true + lll: + line-length: 180 exclusions: generated: lax presets: @@ -58,6 +53,7 @@ formatters: enable: - gofmt - goimports + - gofumpt exclusions: generated: lax paths: diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md index 9322b065e3..bac878f216 100644 --- a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/CONTRIBUTORS.md b/vendor/github.com/go-openapi/analysis/CONTRIBUTORS.md new file mode 100644 index 0000000000..2f85f1c050 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/CONTRIBUTORS.md @@ -0,0 +1,27 @@ +# Contributors + +- Repository: ['go-openapi/analysis'] + +| Total Contributors | Total Contributions | +| --- | --- | +| 15 | 207 | + +| Username | All Time Contribution Count | All Commits | +| --- | --- | --- | +| @fredbi | 104 | | +| @casualjim | 70 | | +| @keramix | 9 | | +| @youyuanwu | 8 | | +| @msample | 3 | | +| @kul-amr | 3 | | +| @mbohlool | 2 | | +| @Copilot | 1 | | +| @danielfbm | 1 | | +| @gregmarr | 1 | | +| @guillemj | 1 | | +| @knweiss | 1 | | +| @tklauser | 1 | | +| @cuishuang | 1 | | +| @ujjwalsh | 1 | | + + _this file was generated by the [Contributors GitHub Action](https://github.com/github-community-projects/contributors)_ diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md index e005d4b37b..82c782fcdd 100644 --- a/vendor/github.com/go-openapi/analysis/README.md +++ b/vendor/github.com/go-openapi/analysis/README.md @@ -1,22 +1,46 @@ -# OpenAPI analysis [![Build Status](https://github.com/go-openapi/analysis/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/analysis/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) +# analysis -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/analysis.svg)](https://pkg.go.dev/github.com/go-openapi/analysis) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/analysis)](https://goreportcard.com/report/github.com/go-openapi/analysis) + +[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url] + + + +[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] + + +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] +--- A foundational library to analyze an OAI specification document for easier reasoning about the content. -## What's inside? +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + +## Status + +API is stable. + +## Import this library in your project + +```cmd +go get github.com/go-openapi/analysis +``` + +## What's inside * An analyzer providing methods to walk the functional content of a specification * A spec flattener producing a self-contained document bundle, while preserving `$ref`s * A spec merger ("mixin") to merge several spec documents into a primary spec * A spec "fixer" ensuring that response descriptions are non empty -[Documentation](https://pkg.go.dev/github.com/go-openapi/analysis) - ## FAQ * Does this library support OpenAPI 3? @@ -25,3 +49,79 @@ A foundational library to analyze an OAI specification document for easier reaso > This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). > There is no plan to make it evolve toward supporting OpenAPI 3.x. > This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. + +## Change log + +See + + + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + + + + + +## Other documentation + +* [All-time contributors](./CONTRIBUTORS.md) +* [Contributing guidelines](.github/CONTRIBUTING.md) +* [Maintainers documentation](docs/MAINTAINERS.md) +* [Code style](docs/STYLE.md) + +## Cutting a new release + +Maintainers can cut a new release by either: + +* running [this workflow](https://github.com/go-openapi/analysis/actions/workflows/bump-release.yml) +* or pushing a semver tag + * signed tags are preferred + * The tag message is prepended to release notes + + +[test-badge]: https://github.com/go-openapi/analysis/actions/workflows/go-test.yml/badge.svg +[test-url]: https://github.com/go-openapi/analysis/actions/workflows/go-test.yml +[cov-badge]: https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg +[cov-url]: https://codecov.io/gh/go-openapi/analysis +[vuln-scan-badge]: https://github.com/go-openapi/analysis/actions/workflows/scanner.yml/badge.svg +[vuln-scan-url]: https://github.com/go-openapi/analysis/actions/workflows/scanner.yml +[codeql-badge]: https://github.com/go-openapi/analysis/actions/workflows/codeql.yml/badge.svg +[codeql-url]: https://github.com/go-openapi/analysis/actions/workflows/codeql.yml + +[release-badge]: https://badge.fury.io/gh/go-openapi%2Fanalysis.svg +[release-url]: https://badge.fury.io/gh/go-openapi%2Fanalysis + +[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/analysis +[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/analysis +[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/analysis +[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/analysis + +[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/analysis +[godoc-url]: http://pkg.go.dev/github.com/go-openapi/analysis +[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png +[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM +[slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/FfnFYaC3k5 + + +[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg +[license-url]: https://github.com/go-openapi/analysis/?tab=Apache-2.0-1-ov-file#readme + +[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/analysis +[goversion-url]: https://github.com/go-openapi/analysis/blob/master/go.mod +[top-badge]: https://img.shields.io/github/languages/top/go-openapi/analysis +[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/analysis/latest diff --git a/vendor/github.com/go-openapi/analysis/SECURITY.md b/vendor/github.com/go-openapi/analysis/SECURITY.md new file mode 100644 index 0000000000..6ceb159ca2 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/SECURITY.md @@ -0,0 +1,37 @@ +# Security Policy + +This policy outlines the commitment and practices of the go-openapi maintainers regarding security. + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. + +## Reporting a vulnerability + +If you become aware of a security vulnerability that affects the current repository, +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. + +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". + +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go index 4870ad07be..1c91b8c550 100644 --- a/vendor/github.com/go-openapi/analysis/analyzer.go +++ b/vendor/github.com/go-openapi/analysis/analyzer.go @@ -164,13 +164,13 @@ func New(doc *spec.Swagger) *Spec { return a } -// SecurityRequirement is a representation of a security requirement for an operation +// SecurityRequirement is a representation of a security requirement for an operation. type SecurityRequirement struct { Name string Scopes []string } -// SecurityRequirementsFor gets the security requirements for the operation +// SecurityRequirementsFor gets the security requirements for the operation. func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRequirement { if s.spec.Security == nil && operation.Security == nil { return nil @@ -204,7 +204,7 @@ func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRe return result } -// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements +// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements. func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequirement) map[string]spec.SecurityScheme { result := make(map[string]spec.SecurityScheme) @@ -219,7 +219,7 @@ func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequire return result } -// SecurityDefinitionsFor gets the matching security definitions for a set of requirements +// SecurityDefinitionsFor gets the matching security definitions for a set of requirements. func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { requirements := s.SecurityRequirementsFor(operation) if len(requirements) == 0 { @@ -250,7 +250,7 @@ func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec return result } -// ConsumesFor gets the mediatypes for the operation +// ConsumesFor gets the mediatypes for the operation. func (s *Spec) ConsumesFor(operation *spec.Operation) []string { if len(operation.Consumes) == 0 { cons := make(map[string]struct{}, len(s.spec.Consumes)) @@ -269,7 +269,7 @@ func (s *Spec) ConsumesFor(operation *spec.Operation) []string { return s.structMapKeys(cons) } -// ProducesFor gets the mediatypes for the operation +// ProducesFor gets the mediatypes for the operation. func (s *Spec) ProducesFor(operation *spec.Operation) []string { if len(operation.Produces) == 0 { prod := make(map[string]struct{}, len(s.spec.Produces)) @@ -306,7 +306,7 @@ func fieldNameFromParam(param *spec.Parameter) string { // whenever an error is encountered while resolving references // on parameters. // -// This function takes as input the spec.Parameter which triggered the +// This function takes as input the [spec.Parameter] which triggered the // error and the error itself. // // If the callback function returns false, the calling function should bail. @@ -329,7 +329,7 @@ func (s *Spec) ParametersFor(operationID string) []spec.Parameter { // Does not assume parameters properly resolve references or that // such references actually resolve to a parameter object. // -// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// Upon error, invoke a [ErrorOnParamFunc] callback with the erroneous // parameters. If the callback is set to nil, panics upon errors. func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter { gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { @@ -337,7 +337,7 @@ func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamF s.paramsAsMap(pi.Parameters, bag, callmeOnError) s.paramsAsMap(op.Parameters, bag, callmeOnError) - var res []spec.Parameter + res := make([]spec.Parameter, 0, len(bag)) for _, v := range bag { res = append(res, v) } @@ -388,7 +388,7 @@ func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { // Does not assume parameters properly resolve references or that // such references actually resolve to a parameter object. // -// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// Upon error, invoke a [ErrorOnParamFunc] callback with the erroneous // parameters. If the callback is set to nil, panics upon errors. func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter { res := make(map[string]spec.Parameter) @@ -400,7 +400,7 @@ func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc return res } -// OperationForName gets the operation for the given id +// OperationForName gets the operation for the given id. func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { for method, pathItem := range s.operations { for path, op := range pathItem { @@ -413,7 +413,7 @@ func (s *Spec) OperationForName(operationID string) (string, string, *spec.Opera return "", "", nil, false } -// OperationFor the given method and path +// OperationFor the given method and path. func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { if mp, ok := s.operations[strings.ToUpper(method)]; ok { op, fn := mp[path] @@ -424,12 +424,12 @@ func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { return nil, false } -// Operations gathers all the operations specified in the spec document +// Operations gathers all the operations specified in the spec document. func (s *Spec) Operations() map[string]map[string]*spec.Operation { return s.operations } -// AllPaths returns all the paths in the swagger spec +// AllPaths returns all the paths in the swagger spec. func (s *Spec) AllPaths() map[string]spec.PathItem { if s.spec == nil || s.spec.Paths == nil { return nil @@ -438,7 +438,7 @@ func (s *Spec) AllPaths() map[string]spec.PathItem { return s.spec.Paths.Paths } -// OperationIDs gets all the operation ids based on method an dpath +// OperationIDs gets all the operation ids based on method an dpath. func (s *Spec) OperationIDs() []string { if len(s.operations) == 0 { return nil @@ -458,7 +458,7 @@ func (s *Spec) OperationIDs() []string { return result } -// OperationMethodPaths gets all the operation ids based on method an dpath +// OperationMethodPaths gets all the operation ids based on method an dpath. func (s *Spec) OperationMethodPaths() []string { if len(s.operations) == 0 { return nil @@ -474,22 +474,22 @@ func (s *Spec) OperationMethodPaths() []string { return result } -// RequiredConsumes gets all the distinct consumes that are specified in the specification document +// RequiredConsumes gets all the distinct consumes that are specified in the specification document. func (s *Spec) RequiredConsumes() []string { return s.structMapKeys(s.consumes) } -// RequiredProduces gets all the distinct produces that are specified in the specification document +// RequiredProduces gets all the distinct produces that are specified in the specification document. func (s *Spec) RequiredProduces() []string { return s.structMapKeys(s.produces) } -// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec +// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec. func (s *Spec) RequiredSecuritySchemes() []string { return s.structMapKeys(s.authSchemes) } -// SchemaRef is a reference to a schema +// SchemaRef is a reference to a schema. type SchemaRef struct { Name string Ref spec.Ref @@ -498,7 +498,7 @@ type SchemaRef struct { } // SchemasWithAllOf returns schema references to all schemas that are defined -// with an allOf key +// with an allOf key. func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { for _, v := range s.allOfs { result = append(result, v) @@ -507,7 +507,7 @@ func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { return } -// AllDefinitions returns schema references for all the definitions that were discovered +// AllDefinitions returns schema references for all the definitions that were discovered. func (s *Spec) AllDefinitions() (result []SchemaRef) { for _, v := range s.allSchemas { result = append(result, v) @@ -516,7 +516,7 @@ func (s *Spec) AllDefinitions() (result []SchemaRef) { return } -// AllDefinitionReferences returns json refs for all the discovered schemas +// AllDefinitionReferences returns JSON references for all the discovered schemas. func (s *Spec) AllDefinitionReferences() (result []string) { for _, v := range s.references.schemas { result = append(result, v.String()) @@ -525,7 +525,7 @@ func (s *Spec) AllDefinitionReferences() (result []string) { return } -// AllParameterReferences returns json refs for all the discovered parameters +// AllParameterReferences returns JSON references for all the discovered parameters. func (s *Spec) AllParameterReferences() (result []string) { for _, v := range s.references.parameters { result = append(result, v.String()) @@ -534,7 +534,7 @@ func (s *Spec) AllParameterReferences() (result []string) { return } -// AllResponseReferences returns json refs for all the discovered responses +// AllResponseReferences returns JSON references for all the discovered responses. func (s *Spec) AllResponseReferences() (result []string) { for _, v := range s.references.responses { result = append(result, v.String()) @@ -543,7 +543,7 @@ func (s *Spec) AllResponseReferences() (result []string) { return } -// AllPathItemReferences returns the references for all the items +// AllPathItemReferences returns the references for all the items. func (s *Spec) AllPathItemReferences() (result []string) { for _, v := range s.references.pathItems { result = append(result, v.String()) @@ -564,7 +564,7 @@ func (s *Spec) AllItemsReferences() (result []string) { return } -// AllReferences returns all the references found in the document, with possible duplicates +// AllReferences returns all the references found in the document, with possible duplicates. func (s *Spec) AllReferences() (result []string) { for _, v := range s.references.allRefs { result = append(result, v.String()) @@ -573,7 +573,7 @@ func (s *Spec) AllReferences() (result []string) { return } -// AllRefs returns all the unique references found in the document +// AllRefs returns all the unique references found in the document. func (s *Spec) AllRefs() (result []spec.Ref) { set := make(map[string]struct{}) for _, v := range s.references.allRefs { @@ -592,61 +592,61 @@ func (s *Spec) AllRefs() (result []spec.Ref) { } // ParameterPatterns returns all the patterns found in parameters -// the map is cloned to avoid accidental changes +// the map is cloned to avoid accidental changes. func (s *Spec) ParameterPatterns() map[string]string { return cloneStringMap(s.patterns.parameters) } // HeaderPatterns returns all the patterns found in response headers -// the map is cloned to avoid accidental changes +// the map is cloned to avoid accidental changes. func (s *Spec) HeaderPatterns() map[string]string { return cloneStringMap(s.patterns.headers) } // ItemsPatterns returns all the patterns found in simple array items -// the map is cloned to avoid accidental changes +// the map is cloned to avoid accidental changes. func (s *Spec) ItemsPatterns() map[string]string { return cloneStringMap(s.patterns.items) } // SchemaPatterns returns all the patterns found in schemas -// the map is cloned to avoid accidental changes +// the map is cloned to avoid accidental changes. func (s *Spec) SchemaPatterns() map[string]string { return cloneStringMap(s.patterns.schemas) } // AllPatterns returns all the patterns found in the spec -// the map is cloned to avoid accidental changes +// the map is cloned to avoid accidental changes. func (s *Spec) AllPatterns() map[string]string { return cloneStringMap(s.patterns.allPatterns) } // ParameterEnums returns all the enums found in parameters -// the map is cloned to avoid accidental changes +// the map is cloned to avoid accidental changes. func (s *Spec) ParameterEnums() map[string][]any { return cloneEnumMap(s.enums.parameters) } // HeaderEnums returns all the enums found in response headers -// the map is cloned to avoid accidental changes +// the map is cloned to avoid accidental changes. func (s *Spec) HeaderEnums() map[string][]any { return cloneEnumMap(s.enums.headers) } // ItemsEnums returns all the enums found in simple array items -// the map is cloned to avoid accidental changes +// the map is cloned to avoid accidental changes. func (s *Spec) ItemsEnums() map[string][]any { return cloneEnumMap(s.enums.items) } // SchemaEnums returns all the enums found in schemas -// the map is cloned to avoid accidental changes +// the map is cloned to avoid accidental changes. func (s *Spec) SchemaEnums() map[string][]any { return cloneEnumMap(s.enums.schemas) } // AllEnums returns all the enums found in the spec -// the map is cloned to avoid accidental changes +// the map is cloned to avoid accidental changes. func (s *Spec) AllEnums() map[string][]any { return cloneEnumMap(s.enums.allEnums) } diff --git a/vendor/github.com/go-openapi/analysis/debug.go b/vendor/github.com/go-openapi/analysis/debug.go index d490eab606..8e777c432a 100644 --- a/vendor/github.com/go-openapi/analysis/debug.go +++ b/vendor/github.com/go-openapi/analysis/debug.go @@ -9,4 +9,4 @@ import ( "github.com/go-openapi/analysis/internal/debug" ) -var debugLog = debug.GetLogger("analysis", os.Getenv("SWAGGER_DEBUG") != "") +var debugLog = debug.GetLogger("analysis", os.Getenv("SWAGGER_DEBUG") != "") //nolint:gochecknoglobals // it's okay to use a private global for logging diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go index 9d41371a9f..9c4b165c6f 100644 --- a/vendor/github.com/go-openapi/analysis/doc.go +++ b/vendor/github.com/go-openapi/analysis/doc.go @@ -1,32 +1,31 @@ // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 -/* -Package analysis provides methods to work with a Swagger specification document from -package go-openapi/spec. - -## Analyzing a specification - -An analysed specification object (type Spec) provides methods to work with swagger definition. - -## Flattening or expanding a specification - -Flattening a specification bundles all remote $ref in the main spec document. -Depending on flattening options, additional preprocessing may take place: - - full flattening: replacing all inline complex constructs by a named entry in #/definitions - - expand: replace all $ref's in the document by their expanded content - -## Merging several specifications - -Mixin several specifications merges all Swagger constructs, and warns about found conflicts. - -## Fixing a specification - -Unmarshalling a specification with golang json unmarshalling may lead to -some unwanted result on present but empty fields. - -## Analyzing a Swagger schema - -Swagger schemas are analyzed to determine their complexity and qualify their content. -*/ +// Package analysis provides methods to work with a Swagger specification document from +// package go-openapi/spec. +// +// # Analyzing a specification +// +// An analysed specification object (type Spec) provides methods to work with swagger definition. +// +// # Flattening or expanding a specification +// +// Flattening a specification bundles all remote $ref in the main spec document. +// Depending on flattening options, additional preprocessing may take place: +// +// - full flattening: replacing all inline complex constructs by a named entry in #/definitions +// - expand: replace all $ref's in the document by their expanded content +// +// # Merging several specifications +// +// [Mixin] several specifications merges all Swagger constructs, and warns about found conflicts. +// +// # Fixing a specification +// +// Unmarshalling a specification with golang [json] unmarshalling may lead to +// some unwanted result on present but empty fields. +// +// # Analyzing a Swagger schema +// +// Swagger schemas are analyzed to determine their complexity and qualify their content. package analysis diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go index 1c7a49c034..d7ee0064b6 100644 --- a/vendor/github.com/go-openapi/analysis/flatten.go +++ b/vendor/github.com/go-openapi/analysis/flatten.go @@ -21,7 +21,7 @@ import ( const definitionsPath = "#/definitions" -// newRef stores information about refs created during the flattening process +// newRef stores information about refs created during the flattening process. type newRef struct { key string newName string @@ -32,7 +32,7 @@ type newRef struct { parents []string } -// context stores intermediary results from flatten +// context stores intermediary results from flatten. type context struct { newRefs map[string]*newRef warnings []string @@ -52,13 +52,15 @@ func newContext() *context { // There is a minimal and a full flattening mode. // // Minimally flattening a spec means: +// // - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left // unscathed) -// - Importing external (http, file) references so they become internal to the document +// - Importing external ([http], file) references so they become internal to the document // - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers // like "$ref": "#/definitions/myObject/allOfs/1") // // A minimally flattened spec thus guarantees the following properties: +// // - all $refs point to a local definition (i.e. '#/definitions/...') // - definitions are unique // @@ -70,6 +72,7 @@ func newContext() *context { // Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. // // Fully flattening a spec means: +// // - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. // // By complex, we mean every JSON object with some properties. @@ -80,6 +83,7 @@ func newContext() *context { // have been created. // // Available flattening options: +// // - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched // - Expand: expand all $ref's in the document (inoperant if Minimal set to true) // - Verbose: croaks about name conflicts detected @@ -87,8 +91,9 @@ func newContext() *context { // // NOTE: expansion removes all $ref save circular $ref, which remain in place // -// TODO: additional options -// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a +// Desirable future additions: additional options. +// +// - PropagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a // x-go-name extension // - LiftAllOfs: // - limit the flattening of allOf members when simple objects @@ -169,7 +174,7 @@ func expand(opts *FlattenOpts) error { } // normalizeRef strips the current file from any absolute file $ref. This works around issue go-openapi/spec#76: -// leading absolute file in $ref is stripped +// leading absolute file in $ref is stripped. func normalizeRef(opts *FlattenOpts) error { debugLog("normalizeRef") @@ -491,14 +496,25 @@ func stripPointersAndOAIGen(opts *FlattenOpts) error { // pointer and name resolution again. func stripOAIGen(opts *FlattenOpts) (bool, error) { debugLog("stripOAIGen") + // Ensure the spec analysis is fresh, as previous steps (namePointers, etc.) might have modified refs. + opts.Spec.reload() + replacedWithComplex := false // figure out referers of OAIGen definitions (doing it before the ref start mutating) - for _, r := range opts.flattenContext.newRefs { + // Sort keys to ensure deterministic processing order + sortedKeys := make([]string, 0, len(opts.flattenContext.newRefs)) + for k := range opts.flattenContext.newRefs { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + for _, k := range sortedKeys { + r := opts.flattenContext.newRefs[k] updateRefParents(opts.Spec.references.allRefs, r) } - for k := range opts.flattenContext.newRefs { + for _, k := range sortedKeys { r := opts.flattenContext.newRefs[k] debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s", k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String()) @@ -521,7 +537,7 @@ func stripOAIGen(opts *FlattenOpts) (bool, error) { return replacedWithComplex, nil } -// updateRefParents updates all parents of an updated $ref +// updateRefParents updates all parents of an updated $ref. func updateRefParents(allRefs map[string]spec.Ref, r *newRef) { if !r.isOAIGen || r.resolved { // bail on already resolved entries (avoid looping) return @@ -580,6 +596,19 @@ func stripOAIGenForRef(opts *FlattenOpts, k string, r *newRef) (bool, error) { replacedWithComplex = true } } + + // update parents of the target ref (pr[0]) if it is also a newRef (OAIGen) + // This ensures that if the target is later deleted/merged, it knows about these new referers. + for _, nr := range opts.flattenContext.newRefs { + if nr.path == pr[0] && nr.isOAIGen && !nr.resolved { + for _, p := range pr[1:] { + if !slices.Contains(nr.parents, p) { + nr.parents = append(nr.parents, p) + } + } + break + } + } } // remove OAIGen definition @@ -587,7 +616,15 @@ func stripOAIGenForRef(opts *FlattenOpts, k string, r *newRef) (bool, error) { delete(opts.Swagger().Definitions, path.Base(r.path)) // propagate changes in ref index for keys which have this one as a parent - for kk, value := range opts.flattenContext.newRefs { + // Sort keys to ensure deterministic update order + propagateKeys := make([]string, 0, len(opts.flattenContext.newRefs)) + for k := range opts.flattenContext.newRefs { + propagateKeys = append(propagateKeys, k) + } + sort.Strings(propagateKeys) + + for _, kk := range propagateKeys { + value := opts.flattenContext.newRefs[kk] if kk == k || !value.isOAIGen || value.resolved { continue } diff --git a/vendor/github.com/go-openapi/analysis/flatten_name.go b/vendor/github.com/go-openapi/analysis/flatten_name.go index 475b33c413..922cae55c5 100644 --- a/vendor/github.com/go-openapi/analysis/flatten_name.go +++ b/vendor/github.com/go-openapi/analysis/flatten_name.go @@ -17,7 +17,7 @@ import ( "github.com/go-openapi/swag/mangling" ) -// InlineSchemaNamer finds a new name for an inlined type +// InlineSchemaNamer finds a new name for an inlined type. type InlineSchemaNamer struct { Spec *spec.Swagger Operations map[string]operations.OpRef @@ -25,7 +25,7 @@ type InlineSchemaNamer struct { opts *FlattenOpts } -// Name yields a new name for the inline schema +// Name yields a new name for the inline schema. func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *AnalyzedSchema) error { debugLog("naming inlined schema at %s", key) @@ -108,7 +108,7 @@ func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *Ana return nil } -// uniqifyName yields a unique name for a definition +// uniqifyName yields a unique name for a definition. func uniqifyName(definitions spec.Definitions, name string) (string, bool) { isOAIGen := false if name == "" { @@ -244,7 +244,7 @@ func namesForDefinition(parts sortref.SplitKey) ([][]string, int) { return [][]string{}, 0 } -// partAdder knows how to interpret a schema when it comes to build a name from parts +// partAdder knows how to interpret a schema when it comes to build a name from parts. func partAdder(aschema *AnalyzedSchema) sortref.PartAdder { return func(part string) []string { segments := make([]string, 0, minSegments) diff --git a/vendor/github.com/go-openapi/analysis/flatten_options.go b/vendor/github.com/go-openapi/analysis/flatten_options.go index d8fc25cf58..23a57ea1ac 100644 --- a/vendor/github.com/go-openapi/analysis/flatten_options.go +++ b/vendor/github.com/go-openapi/analysis/flatten_options.go @@ -35,7 +35,7 @@ type FlattenOpts struct { _ struct{} // require keys } -// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. +// ExpandOpts creates a spec.[spec.ExpandOptions] to configure expanding a specification document. func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *spec.ExpandOptions { return &spec.ExpandOptions{ RelativeBase: f.BasePath, @@ -44,13 +44,13 @@ func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *spec.ExpandOptions { } } -// Swagger gets the swagger specification for this flatten operation +// Swagger gets the swagger specification for this flatten operation. func (f *FlattenOpts) Swagger() *spec.Swagger { return f.Spec.spec } // croak logs notifications and warnings about valid, but possibly unwanted constructs resulting -// from flattening a spec +// from flattening a spec. func (f *FlattenOpts) croak() { if !f.Verbose { return diff --git a/vendor/github.com/go-openapi/analysis/go.work b/vendor/github.com/go-openapi/analysis/go.work new file mode 100644 index 0000000000..c0f02a78f6 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/go.work @@ -0,0 +1,6 @@ +go 1.25.0 + +use ( + . + ./internal/testintegration +) diff --git a/vendor/github.com/go-openapi/analysis/go.work.sum b/vendor/github.com/go-openapi/analysis/go.work.sum new file mode 100644 index 0000000000..899a68976e --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/go.work.sum @@ -0,0 +1,47 @@ +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30 h1:BHT1/DKsYDGkUgQ2jmMaozVcdk+sVfz0+1ZJq4zkWgw= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= +github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= +golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= +golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= +golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= +golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= +golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4 h1:bTLqdHv7xrGlFbvf5/TXNxy/iUwwdkjhqQTJDjW7aj0= +golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4/go.mod h1:g5NllXBEermZrmR51cJDQxmJUHUOfRAaNyWBM+R+548= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= +golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU= +golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= +golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= +golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= diff --git a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go index 03e0d32e9e..d3fa08d3ba 100644 --- a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go +++ b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go @@ -11,11 +11,9 @@ import ( "runtime" ) -var ( - output = os.Stdout -) +var output = os.Stdout //nolint:gochecknoglobals // this is on purpose to be overridable during tests -// GetLogger provides a prefix debug logger +// GetLogger provides a prefix debug logger. func GetLogger(prefix string, debug bool) func(string, ...any) { if debug { logger := log.New(output, prefix+":", log.LstdFlags) diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go index 320a50bff8..afeef20ea6 100644 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go @@ -17,8 +17,9 @@ import ( // NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here). // // NOTE(windows): -// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) -// * "/ in paths may appear as escape sequences +// +// - refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// - "/ in paths may appear as escape sequences. func RebaseRef(baseRef string, ref string) string { baseRef, _ = url.PathUnescape(baseRef) ref, _ = url.PathUnescape(ref) @@ -69,8 +70,9 @@ func RebaseRef(baseRef string, ref string) string { // Path renders absolute path on remote file refs // // NOTE(windows): -// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) -// * "/ in paths may appear as escape sequences +// +// - refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// - "/ in paths may appear as escape sequences. func Path(ref spec.Ref, basePath string) string { uri, _ := url.PathUnescape(ref.String()) if ref.HasFragmentOnly || filepath.IsAbs(uri) { diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go index 940c46a925..325e2751f8 100644 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go @@ -14,12 +14,12 @@ import ( "github.com/go-openapi/swag/mangling" ) -// AllOpRefsByRef returns an index of sortable operations +// AllOpRefsByRef returns an index of sortable operations. func AllOpRefsByRef(specDoc Provider, operationIDs []string) map[string]OpRef { return OpRefsByRef(GatherOperations(specDoc, operationIDs)) } -// OpRefsByRef indexes a map of sortable operations +// OpRefsByRef indexes a map of sortable operations. func OpRefsByRef(oprefs map[string]OpRef) map[string]OpRef { result := make(map[string]OpRef, len(oprefs)) for _, v := range oprefs { @@ -29,7 +29,7 @@ func OpRefsByRef(oprefs map[string]OpRef) map[string]OpRef { return result } -// OpRef is an indexable, sortable operation +// OpRef is an indexable, sortable operation. type OpRef struct { Method string Path string @@ -39,19 +39,19 @@ type OpRef struct { Ref spec.Ref } -// OpRefs is a sortable collection of operations +// OpRefs is a sortable collection of operations. type OpRefs []OpRef func (o OpRefs) Len() int { return len(o) } func (o OpRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } func (o OpRefs) Less(i, j int) bool { return o[i].Key < o[j].Key } -// Provider knows how to collect operations from a spec +// Provider knows how to collect operations from a spec. type Provider interface { Operations() map[string]map[string]*spec.Operation } -// GatherOperations builds a map of sorted operations from a spec +// GatherOperations builds a map of sorted operations from a spec. func GatherOperations(specDoc Provider, operationIDs []string) map[string]OpRef { var oprefs OpRefs mangler := mangling.NewNameMangler() diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/errors.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/errors.go index d7c28b8857..b2a8a93ba6 100644 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/errors.go +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/errors.go @@ -58,7 +58,7 @@ func ErrCyclicChain(key string) error { } func ErrInvalidPointerType(key string, value any, err error) error { - return fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v): %w", + return fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%w): %w", key, value, err, ErrReplace, ) } diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go index 61c13f7eba..b4c0fdd44a 100644 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go @@ -22,9 +22,10 @@ const ( allocMediumMap = 64 ) +//nolint:gochecknoglobals // it's okay to use a private global for logging var debugLog = debug.GetLogger("analysis/flatten/replace", os.Getenv("SWAGGER_DEBUG") != "") -// RewriteSchemaToRef replaces a schema with a Ref +// RewriteSchemaToRef replaces a schema with a Ref. func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error { debugLog("rewriting schema to ref for %s with %s", key, ref.String()) _, value, err := getPointerFromKey(sp, key) @@ -142,7 +143,7 @@ func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error { return nil } -// getPointerFromKey retrieves the content of the JSON pointer "key" +// getPointerFromKey retrieves the content of the JSON pointer "key". func getPointerFromKey(sp any, key string) (string, any, error) { switch sp.(type) { case *spec.Schema: @@ -154,7 +155,10 @@ func getPointerFromKey(sp any, key string) (string, any, error) { return "", sp, nil } // unescape chars in key, e.g. "{}" from path params - pth, _ := url.PathUnescape(key[1:]) + pth, err := url.PathUnescape(key[1:]) + if err != nil { + return "", nil, errors.Join(err, ErrReplace) + } ptr, err := jsonpointer.New(pth) if err != nil { return "", nil, errors.Join(err, ErrReplace) @@ -170,7 +174,7 @@ func getPointerFromKey(sp any, key string) (string, any, error) { return pth, value, nil } -// getParentFromKey retrieves the container of the JSON pointer "key" +// getParentFromKey retrieves the container of the JSON pointer "key". func getParentFromKey(sp any, key string) (string, string, any, error) { switch sp.(type) { case *spec.Schema: @@ -196,7 +200,7 @@ func getParentFromKey(sp any, key string) (string, string, any, error) { return parent, entry, pvalue, nil } -// UpdateRef replaces a ref by another one +// UpdateRef replaces a ref by another one. func UpdateRef(sp any, key string, ref spec.Ref) error { switch sp.(type) { case *spec.Schema: @@ -265,7 +269,7 @@ func UpdateRef(sp any, key string, ref spec.Ref) error { return nil } -// UpdateRefWithSchema replaces a ref with a schema (i.e. re-inline schema) +// UpdateRefWithSchema replaces a ref with a schema (i.e. re-inline schema). func UpdateRefWithSchema(sp *spec.Swagger, key string, sch *spec.Schema) error { debugLog("updating ref for %s with schema", key) pth, value, err := getPointerFromKey(sp, key) @@ -324,7 +328,7 @@ func UpdateRefWithSchema(sp *spec.Swagger, key string, sch *spec.Schema) error { return nil } -// DeepestRefResult holds the results from DeepestRef analysis +// DeepestRefResult holds the results from [DeepestRef] analysis. type DeepestRefResult struct { Ref spec.Ref Schema *spec.Schema @@ -332,10 +336,13 @@ type DeepestRefResult struct { } // DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. +// // - if no definition is found, returns the deepest ref. // - pointers to external files are expanded // // NOTE: all external $ref's are assumed to be already expanded at this stage. +// +//nolint:gocognit,gocyclo,cyclop // definitely needs a refactoring, in a follow-up PR func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) { if !ref.HasFragmentOnly { // we found an external $ref, which is odd at this stage: @@ -392,11 +399,13 @@ DOWNREF: case spec.Response: // a pointer points to a schema initially marshalled in responses section... // Attempt to convert this to a schema. If this fails, the spec is invalid - asJSON, _ := refable.MarshalJSON() + asJSON, err := refable.MarshalJSON() + if err != nil { + return nil, ErrInvalidPointerType(currentRef.String(), value, err) + } var asSchema spec.Schema - err := asSchema.UnmarshalJSON(asJSON) - if err != nil { + if err = asSchema.UnmarshalJSON(asJSON); err != nil { return nil, ErrInvalidPointerType(currentRef.String(), value, err) } warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) @@ -409,9 +418,12 @@ DOWNREF: case spec.Parameter: // a pointer points to a schema initially marshalled in parameters section... // Attempt to convert this to a schema. If this fails, the spec is invalid - asJSON, _ := refable.MarshalJSON() + asJSON, err := refable.MarshalJSON() + if err != nil { + return nil, ErrInvalidPointerType(currentRef.String(), value, err) + } var asSchema spec.Schema - if err := asSchema.UnmarshalJSON(asJSON); err != nil { + if err = asSchema.UnmarshalJSON(asJSON); err != nil { return nil, ErrInvalidPointerType(currentRef.String(), value, err) } @@ -428,9 +440,12 @@ DOWNREF: break DOWNREF } - asJSON, _ := json.Marshal(refable) + asJSON, err := json.Marshal(refable) + if err != nil { + return nil, ErrInvalidPointerType(currentRef.String(), value, err) + } var asSchema spec.Schema - if err := asSchema.UnmarshalJSON(asJSON); err != nil { + if err = asSchema.UnmarshalJSON(asJSON); err != nil { return nil, ErrInvalidPointerType(currentRef.String(), value, err) } warnings = append(warnings, fmt.Sprintf("found $ref %q (%T) interpreted as schema", currentRef.String(), refable)) diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go index 7e9fb9f0a5..59855ef072 100644 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go @@ -12,7 +12,7 @@ import ( const allocLargeMap = 150 -// Save registers a schema as an entry in spec #/definitions +// Save registers a schema as an entry in spec #/definitions. func Save(sp *spec.Swagger, name string, schema *spec.Schema) { if schema == nil { return @@ -25,7 +25,7 @@ func Save(sp *spec.Swagger, name string, schema *spec.Schema) { sp.Definitions[name] = *schema } -// Clone deep-clones a schema +// Clone deep-clones a schema. func Clone(schema *spec.Schema) *spec.Schema { var sch spec.Schema _ = jsonutils.FromDynamicJSON(schema, &sch) diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go index a5db0249ec..363bb19efa 100644 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go @@ -20,12 +20,8 @@ const ( definitions = "definitions" ) +//nolint:gochecknoglobals // it's okay to store small indexes like this as private globals var ( - ignoredKeys map[string]struct{} - validMethods map[string]struct{} -) - -func init() { ignoredKeys = map[string]struct{}{ "schema": {}, "properties": {}, @@ -43,15 +39,15 @@ func init() { "PUT": {}, "DELETE": {}, } -} +) -// Key represent a key item constructed from /-separated segments +// Key represent a key item constructed from /-separated segments. type Key struct { Segments int Key string } -// Keys is a sortable collable collection of Keys +// Keys is a sortable collable collection of Keys. type Keys []Key func (k Keys) Len() int { return len(k) } @@ -60,7 +56,7 @@ func (k Keys) Less(i, j int) bool { return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) } -// KeyParts construct a SplitKey with all its /-separated segments decomposed. It is sortable. +// KeyParts construct a [SplitKey] with all its /-separated segments decomposed. It is sortable. func KeyParts(key string) SplitKey { var res []string for part := range strings.SplitSeq(key[1:], "/") { @@ -75,12 +71,12 @@ func KeyParts(key string) SplitKey { // SplitKey holds of the parts of a /-separated key, so that their location may be determined. type SplitKey []string -// IsDefinition is true when the split key is in the #/definitions section of a spec +// IsDefinition is true when the split key is in the #/definitions section of a spec. func (s SplitKey) IsDefinition() bool { return len(s) > 1 && s[0] == definitions } -// DefinitionName yields the name of the definition +// DefinitionName yields the name of the definition. func (s SplitKey) DefinitionName() string { if !s.IsDefinition() { return "" @@ -89,10 +85,10 @@ func (s SplitKey) DefinitionName() string { return s[1] } -// PartAdder know how to construct the components of a new name +// PartAdder know how to construct the components of a new name. type PartAdder func(string) []string -// BuildName builds a name from segments +// BuildName builds a name from segments. func (s SplitKey) BuildName(segments []string, startIndex int, adder PartAdder) string { for i, part := range s[startIndex:] { if _, ignored := ignoredKeys[part]; !ignored || s.isKeyName(startIndex+i) { @@ -103,42 +99,42 @@ func (s SplitKey) BuildName(segments []string, startIndex int, adder PartAdder) return strings.Join(segments, " ") } -// IsOperation is true when the split key is in the operations section +// IsOperation is true when the split key is in the operations section. func (s SplitKey) IsOperation() bool { return len(s) > 1 && s[0] == paths } -// IsSharedOperationParam is true when the split key is in the parameters section of a path +// IsSharedOperationParam is true when the split key is in the parameters section of a path. func (s SplitKey) IsSharedOperationParam() bool { return len(s) > 2 && s[0] == paths && s[2] == parameters } -// IsSharedParam is true when the split key is in the #/parameters section of a spec +// IsSharedParam is true when the split key is in the #/parameters section of a spec. func (s SplitKey) IsSharedParam() bool { return len(s) > 1 && s[0] == parameters } -// IsOperationParam is true when the split key is in the parameters section of an operation +// IsOperationParam is true when the split key is in the parameters section of an operation. func (s SplitKey) IsOperationParam() bool { return len(s) > 3 && s[0] == paths && s[3] == parameters } -// IsOperationResponse is true when the split key is in the responses section of an operation +// IsOperationResponse is true when the split key is in the responses section of an operation. func (s SplitKey) IsOperationResponse() bool { return len(s) > 3 && s[0] == paths && s[3] == responses } -// IsSharedResponse is true when the split key is in the #/responses section of a spec +// IsSharedResponse is true when the split key is in the #/responses section of a spec. func (s SplitKey) IsSharedResponse() bool { return len(s) > 1 && s[0] == responses } -// IsDefaultResponse is true when the split key is the default response for an operation +// IsDefaultResponse is true when the split key is the default response for an operation. func (s SplitKey) IsDefaultResponse() bool { return len(s) > 4 && s[0] == paths && s[3] == responses && s[4] == "default" } -// IsStatusCodeResponse is true when the split key is an operation response with a status code +// IsStatusCodeResponse is true when the split key is an operation response with a status code. func (s SplitKey) IsStatusCodeResponse() bool { isInt := func() bool { _, err := strconv.Atoi(s[4]) @@ -149,7 +145,7 @@ func (s SplitKey) IsStatusCodeResponse() bool { return len(s) > 4 && s[0] == paths && s[3] == responses && isInt() } -// ResponseName yields either the status code or "Default" for a response +// ResponseName yields either the status code or "Default" for a response. func (s SplitKey) ResponseName() string { if s.IsStatusCodeResponse() { code, _ := strconv.Atoi(s[4]) @@ -164,7 +160,7 @@ func (s SplitKey) ResponseName() string { return "" } -// PathItemRef constructs a $ref object from a split key of the form /{path}/{method} +// PathItemRef constructs a $ref object from a split key of the form /{path}/{method}. func (s SplitKey) PathItemRef() spec.Ref { const minValidPathItems = 3 if len(s) < minValidPathItems { @@ -179,7 +175,7 @@ func (s SplitKey) PathItemRef() spec.Ref { return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(pth), strings.ToUpper(method))) } -// PathRef constructs a $ref object from a split key of the form /paths/{reference} +// PathRef constructs a $ref object from a split key of the form /paths/{reference}. func (s SplitKey) PathRef() spec.Ref { if !s.IsOperation() { return spec.Ref{} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go index ceac713772..e4ad07b096 100644 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go @@ -4,7 +4,9 @@ package sortref import ( + "iter" "reflect" + "slices" "sort" "strings" @@ -12,10 +14,6 @@ import ( "github.com/go-openapi/spec" ) -var depthGroupOrder = []string{ - "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition", -} - type mapIterator struct { len int mapIter *reflect.MapIter @@ -42,7 +40,7 @@ func mustMapIterator(anyMap any) *mapIterator { // DepthFirst sorts a map of anything. It groups keys by category // (shared params, op param, statuscode response, default response, definitions) // sort groups internally by number of parts in the key and lexical names -// flatten groups into a single list of keys +// flatten groups into a single list of keys. func DepthFirst(in any) []string { iterator := mustMapIterator(in) sorted := make([]string, 0, iterator.Len()) @@ -77,7 +75,7 @@ func DepthFirst(in any) []string { grouped[pk] = append(grouped[pk], Key{Segments: len(split), Key: k}) } - for _, pk := range depthGroupOrder { + for pk := range depthGroupOrder() { res := grouped[pk] sort.Sort(res) @@ -89,6 +87,12 @@ func DepthFirst(in any) []string { return sorted } +func depthGroupOrder() iter.Seq[string] { + return slices.Values([]string{ + "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition", + }) +} + // topMostRefs is able to sort refs by hierarchical then lexicographic order, // yielding refs ordered breadth-first. type topmostRefs []string @@ -104,7 +108,7 @@ func (k topmostRefs) Less(i, j int) bool { return li < lj } -// TopmostFirst sorts references by depth +// TopmostFirst sorts references by depth. func TopmostFirst(refs []string) []string { res := topmostRefs(refs) sort.Sort(res) @@ -112,13 +116,13 @@ func TopmostFirst(refs []string) []string { return res } -// RefRevIdx is a reverse index for references +// RefRevIdx is a reverse index for references. type RefRevIdx struct { Ref spec.Ref Keys []string } -// ReverseIndex builds a reverse index for references in schemas +// ReverseIndex builds a reverse index for references in schemas. func ReverseIndex(schemas map[string]spec.Ref, basePath string) map[string]RefRevIdx { collected := make(map[string]RefRevIdx) for key, schRef := range schemas { diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go index cc5c392334..a7a9306cb3 100644 --- a/vendor/github.com/go-openapi/analysis/mixin.go +++ b/vendor/github.com/go-openapi/analysis/mixin.go @@ -18,12 +18,13 @@ import ( // needed. // // The following parts of primary are subject to merge, filling empty details +// // - Info // - BasePath // - Host // - ExternalDocs // -// Consider calling FixEmptyResponseDescriptions() on the modified primary +// Consider calling [FixEmptyResponseDescriptions]() on the modified primary // if you read them from storage and they are valid to start with. // // Entries in "paths", "definitions", "parameters" and "responses" are @@ -39,7 +40,7 @@ import ( // etc). Ensure they are canonical if your downstream tools do // key normalization of any form. // -// Merging schemes (http, https), and consumers/producers do not account for +// Merging schemes ([http], https), and consumers/producers do not account for // collisions. func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { skipped := make([]string, 0, len(mixins)) diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go index 039dac1566..bedea652ac 100644 --- a/vendor/github.com/go-openapi/analysis/schema.go +++ b/vendor/github.com/go-openapi/analysis/schema.go @@ -8,7 +8,7 @@ import ( "github.com/go-openapi/strfmt" ) -// SchemaOpts configures the schema analyzer +// SchemaOpts configures the schema analyzer. type SchemaOpts struct { Schema *spec.Schema Root any @@ -52,7 +52,7 @@ func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { return a, nil } -// AnalyzedSchema indicates what the schema represents +// AnalyzedSchema indicates what the schema represents. type AnalyzedSchema struct { schema *spec.Schema root any @@ -78,7 +78,7 @@ type AnalyzedSchema struct { IsEnum bool } -// Inherits copies value fields from other onto this schema +// Inherits copies value fields from other onto this schema. func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) { if other == nil { return diff --git a/vendor/github.com/go-openapi/errors/.cliff.toml b/vendor/github.com/go-openapi/errors/.cliff.toml deleted file mode 100644 index 702629f5dc..0000000000 --- a/vendor/github.com/go-openapi/errors/.cliff.toml +++ /dev/null @@ -1,181 +0,0 @@ -# git-cliff ~ configuration file -# https://git-cliff.org/docs/configuration - -[changelog] -header = """ -""" - -footer = """ - ------ - -**[{{ remote.github.repo }}]({{ self::remote_url() }}) license terms** - -[![License][license-badge]][license-url] - -[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg -[license-url]: {{ self::remote_url() }}/?tab=Apache-2.0-1-ov-file#readme - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" - -body = """ -{%- if version %} -## [{{ version | trim_start_matches(pat="v") }}]({{ self::remote_url() }}/tree/{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} -{%- else %} -## [unreleased] -{%- endif %} -{%- if message %} - {%- raw %}\n{% endraw %} -{{ message }} - {%- raw %}\n{% endraw %} -{%- endif %} -{%- if version %} - {%- if previous.version %} - -**Full Changelog**: <{{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}> - {%- endif %} -{%- else %} - {%- raw %}\n{% endraw %} -{%- endif %} - -{%- if statistics %}{% if statistics.commit_count %} - {%- raw %}\n{% endraw %} -{{ statistics.commit_count }} commits in this release. - {%- raw %}\n{% endraw %} -{%- endif %}{% endif %} ------ - -{%- for group, commits in commits | group_by(attribute="group") %} - {%- raw %}\n{% endraw %} -### {{ group | upper_first }} - {%- raw %}\n{% endraw %} - {%- for commit in commits %} - {%- if commit.remote.pr_title %} - {%- set commit_message = commit.remote.pr_title %} - {%- else %} - {%- set commit_message = commit.message %} - {%- endif %} -* {{ commit_message | split(pat="\n") | first | trim }} - {%- if commit.remote.username %} -{%- raw %} {% endraw %}by [@{{ commit.remote.username }}](https://github.com/{{ commit.remote.username }}) - {%- endif %} - {%- if commit.remote.pr_number %} -{%- raw %} {% endraw %}in [#{{ commit.remote.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.remote.pr_number }}) - {%- endif %} -{%- raw %} {% endraw %}[...]({{ self::remote_url() }}/commit/{{ commit.id }}) - {%- endfor %} -{%- endfor %} - -{%- if github %} -{%- raw %}\n{% endraw -%} - {%- set all_contributors = github.contributors | length %} - {%- if github.contributors | filter(attribute="username", value="dependabot[bot]") | length < all_contributors %} ------ - -### People who contributed to this release - {% endif %} - {%- for contributor in github.contributors | filter(attribute="username") | sort(attribute="username") %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) - {%- endif %} - {%- endfor %} - - {% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %} ------ - {%- raw %}\n{% endraw %} - -### New Contributors - {%- endif %} - - {%- for contributor in github.contributors | filter(attribute="is_first_time", value=true) %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* @{{ contributor.username }} made their first contribution - {%- if contributor.pr_number %} - in [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \ - {%- endif %} - {%- endif %} - {%- endfor %} -{%- endif %} - -{%- raw %}\n{% endraw %} - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" -# Remove leading and trailing whitespaces from the changelog's body. -trim = true -# Render body even when there are no releases to process. -render_always = true -# An array of regex based postprocessors to modify the changelog. -postprocessors = [ - # Replace the placeholder with a URL. - #{ pattern = '', replace = "https://github.com/orhun/git-cliff" }, -] -# output file path -# output = "test.md" - -[git] -# Parse commits according to the conventional commits specification. -# See https://www.conventionalcommits.org -conventional_commits = false -# Exclude commits that do not match the conventional commits specification. -filter_unconventional = false -# Require all commits to be conventional. -# Takes precedence over filter_unconventional. -require_conventional = false -# Split commits on newlines, treating each line as an individual commit. -split_commits = false -# An array of regex based parsers to modify commit messages prior to further processing. -commit_preprocessors = [ - # Replace issue numbers with link templates to be updated in `changelog.postprocessors`. - #{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, - # Check spelling of the commit message using https://github.com/crate-ci/typos. - # If the spelling is incorrect, it will be fixed automatically. - #{ pattern = '.*', replace_command = 'typos --write-changes -' } -] -# Prevent commits that are breaking from being excluded by commit parsers. -protect_breaking_commits = false -# An array of regex based parsers for extracting data from the commit message. -# Assigns commits to groups. -# Optionally sets the commit's scope and can decide to exclude commits from further processing. -commit_parsers = [ - { message = "^[Cc]hore\\([Rr]elease\\): prepare for", skip = true }, - { message = "(^[Mm]erge)|([Mm]erge conflict)", skip = true }, - { field = "author.name", pattern = "dependabot*", group = "Updates" }, - { message = "([Ss]ecurity)|([Vv]uln)", group = "Security" }, - { body = "(.*[Ss]ecurity)|([Vv]uln)", group = "Security" }, - { message = "([Cc]hore\\(lint\\))|(style)|(lint)|(codeql)|(golangci)", group = "Code quality" }, - { message = "(^[Dd]oc)|((?i)readme)|(badge)|(typo)|(documentation)", group = "Documentation" }, - { message = "(^[Ff]eat)|(^[Ee]nhancement)", group = "Implemented enhancements" }, - { message = "(^ci)|(\\(ci\\))|(fixup\\s+ci)|(fix\\s+ci)|(license)|(example)", group = "Miscellaneous tasks" }, - { message = "^test", group = "Testing" }, - { message = "(^fix)|(panic)", group = "Fixed bugs" }, - { message = "(^refact)|(rework)", group = "Refactor" }, - { message = "(^[Pp]erf)|(performance)", group = "Performance" }, - { message = "(^[Cc]hore)", group = "Miscellaneous tasks" }, - { message = "^[Rr]evert", group = "Reverted changes" }, - { message = "(upgrade.*?go)|(go\\s+version)", group = "Updates" }, - { message = ".*", group = "Other" }, -] -# Exclude commits that are not matched by any commit parser. -filter_commits = false -# An array of link parsers for extracting external references, and turning them into URLs, using regex. -link_parsers = [] -# Include only the tags that belong to the current branch. -use_branch_tags = false -# Order releases topologically instead of chronologically. -topo_order = false -# Order releases topologically instead of chronologically. -topo_order_commits = true -# Order of commits in each group/release within the changelog. -# Allowed values: newest, oldest -sort_commits = "newest" -# Process submodules commits -recurse_submodules = false - -#[remote.github] -#owner = "go-openapi" diff --git a/vendor/github.com/go-openapi/errors/.gitignore b/vendor/github.com/go-openapi/errors/.gitignore index 9a8da7e506..9364443a6f 100644 --- a/vendor/github.com/go-openapi/errors/.gitignore +++ b/vendor/github.com/go-openapi/errors/.gitignore @@ -1,3 +1,7 @@ -secrets.yml *.out +*.cov +.idea +.env +.mcp.json +.claude/ settings.local.json diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml index fdae591bce..e2c14be86d 100644 --- a/vendor/github.com/go-openapi/errors/.golangci.yml +++ b/vendor/github.com/go-openapi/errors/.golangci.yml @@ -12,6 +12,7 @@ linters: - paralleltest - recvcheck - testpackage + - thelper - tparallel - varnamelen - whitespace @@ -40,6 +41,10 @@ linters: - common-false-positives - legacy - std-error-handling + rules: + - linters: + - revive + text: "avoid package names that conflict with Go standard library package names" paths: - third_party$ - builtin$ diff --git a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md index 9322b065e3..bac878f216 100644 --- a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/errors/CONTRIBUTORS.md b/vendor/github.com/go-openapi/errors/CONTRIBUTORS.md index eb018f8aaf..d49e377a13 100644 --- a/vendor/github.com/go-openapi/errors/CONTRIBUTORS.md +++ b/vendor/github.com/go-openapi/errors/CONTRIBUTORS.md @@ -4,21 +4,22 @@ | Total Contributors | Total Contributions | | --- | --- | -| 12 | 105 | +| 13 | 110 | | Username | All Time Contribution Count | All Commits | | --- | --- | --- | -| @casualjim | 58 | https://github.com/go-openapi/errors/commits?author=casualjim | -| @fredbi | 32 | https://github.com/go-openapi/errors/commits?author=fredbi | -| @youyuanwu | 5 | https://github.com/go-openapi/errors/commits?author=youyuanwu | -| @alexandear | 2 | https://github.com/go-openapi/errors/commits?author=alexandear | -| @fiorix | 1 | https://github.com/go-openapi/errors/commits?author=fiorix | -| @ligustah | 1 | https://github.com/go-openapi/errors/commits?author=ligustah | -| @artemseleznev | 1 | https://github.com/go-openapi/errors/commits?author=artemseleznev | -| @gautierdelorme | 1 | https://github.com/go-openapi/errors/commits?author=gautierdelorme | -| @guillemj | 1 | https://github.com/go-openapi/errors/commits?author=guillemj | -| @maxatome | 1 | https://github.com/go-openapi/errors/commits?author=maxatome | -| @Simon-Li | 1 | https://github.com/go-openapi/errors/commits?author=Simon-Li | -| @ujjwalsh | 1 | https://github.com/go-openapi/errors/commits?author=ujjwalsh | +| @casualjim | 58 | | +| @fredbi | 36 | | +| @youyuanwu | 5 | | +| @alexandear | 2 | | +| @fiorix | 1 | | +| @ligustah | 1 | | +| @artemseleznev | 1 | | +| @gautierdelorme | 1 | | +| @guillemj | 1 | | +| @maxatome | 1 | | +| @Simon-Li | 1 | | +| @aokumasan | 1 | | +| @ujjwalsh | 1 | | _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md index 6102c6b527..d9f4a3f151 100644 --- a/vendor/github.com/go-openapi/errors/README.md +++ b/vendor/github.com/go-openapi/errors/README.md @@ -51,7 +51,9 @@ errNotImplemented := NotImplemented("method: %s", url) See ## Licensing @@ -59,12 +61,9 @@ See This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). - ## Other documentation @@ -95,23 +94,19 @@ Maintainers can cut a new release by either: [release-badge]: https://badge.fury.io/gh/go-openapi%2Ferrors.svg [release-url]: https://badge.fury.io/gh/go-openapi%2Ferrors -[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Ferrors.svg -[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Ferrors [gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/errors [gocard-url]: https://goreportcard.com/report/github.com/go-openapi/errors [codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/errors [codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/errors -[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F -[doc-url]: https://goswagger.io/go-openapi [godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/errors [godoc-url]: http://pkg.go.dev/github.com/go-openapi/errors [slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png [slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM [slack-url]: https://goswagger.slack.com/archives/C04R30YMU [discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue -[discord-url]: https://discord.gg/DrafRmZx +[discord-url]: https://discord.gg/twZ9BwT3 [license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg diff --git a/vendor/github.com/go-openapi/errors/SECURITY.md b/vendor/github.com/go-openapi/errors/SECURITY.md index 2a7b6f0910..6ceb159ca2 100644 --- a/vendor/github.com/go-openapi/errors/SECURITY.md +++ b/vendor/github.com/go-openapi/errors/SECURITY.md @@ -6,14 +6,32 @@ This policy outlines the commitment and practices of the go-openapi maintainers | Version | Supported | | ------- | ------------------ | -| 0.22.x | :white_check_mark: | +| 0.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. ## Reporting a vulnerability If you become aware of a security vulnerability that affects the current repository, -please report it privately to the maintainers. +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. -Please follow the instructions provided by github to -[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". -TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go index cb139416af..d2b4427aac 100644 --- a/vendor/github.com/go-openapi/errors/api.go +++ b/vendor/github.com/go-openapi/errors/api.go @@ -146,7 +146,7 @@ func MethodNotAllowed(requested string, allow []string) Error { } } -// ServeError implements the http error handler interface. +// ServeError implements the [http] error handler interface. func ServeError(rw http.ResponseWriter, r *http.Request, err error) { rw.Header().Set("Content-Type", "application/json") diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go index b4627f30f4..208c740590 100644 --- a/vendor/github.com/go-openapi/errors/doc.go +++ b/vendor/github.com/go-openapi/errors/doc.go @@ -1,15 +1,13 @@ // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 -/* -Package errors provides an Error interface and several concrete types -implementing this interface to manage API errors and JSON-schema validation -errors. - -A middleware handler ServeError() is provided to serve the errors types -it defines. - -It is used throughout the various go-openapi toolkit libraries -(https://github.com/go-openapi). -*/ +// Package errors provides an Error interface and several concrete types +// implementing this interface to manage API errors and JSON-schema validation +// errors. +// +// A middleware handler [ServeError]() is provided to serve the errors types +// it defines. +// +// It is used throughout the various go-openapi toolkit libraries. +// (https://github.com/go-openapi). package errors diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore index 59cd294891..885dc27ab0 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.gitignore +++ b/vendor/github.com/go-openapi/jsonpointer/.gitignore @@ -2,3 +2,5 @@ *.cov .idea .env +.mcp.json +.claude/ diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml index fdae591bce..dc7c96053d 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -12,6 +12,7 @@ linters: - paralleltest - recvcheck - testpackage + - thelper - tparallel - varnamelen - whitespace diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md index 9322b065e3..bac878f216 100644 --- a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md b/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md index 03c098316d..2ebebedc15 100644 --- a/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md +++ b/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md @@ -4,21 +4,21 @@ | Total Contributors | Total Contributions | | --- | --- | -| 12 | 95 | +| 12 | 101 | | Username | All Time Contribution Count | All Commits | | --- | --- | --- | -| @fredbi | 48 | https://github.com/go-openapi/jsonpointer/commits?author=fredbi | -| @casualjim | 33 | https://github.com/go-openapi/jsonpointer/commits?author=casualjim | -| @magodo | 3 | https://github.com/go-openapi/jsonpointer/commits?author=magodo | -| @youyuanwu | 3 | https://github.com/go-openapi/jsonpointer/commits?author=youyuanwu | -| @gaiaz-iusipov | 1 | https://github.com/go-openapi/jsonpointer/commits?author=gaiaz-iusipov | -| @gbjk | 1 | https://github.com/go-openapi/jsonpointer/commits?author=gbjk | -| @gordallott | 1 | https://github.com/go-openapi/jsonpointer/commits?author=gordallott | -| @ianlancetaylor | 1 | https://github.com/go-openapi/jsonpointer/commits?author=ianlancetaylor | -| @mfleader | 1 | https://github.com/go-openapi/jsonpointer/commits?author=mfleader | -| @Neo2308 | 1 | https://github.com/go-openapi/jsonpointer/commits?author=Neo2308 | -| @olivierlemasle | 1 | https://github.com/go-openapi/jsonpointer/commits?author=olivierlemasle | -| @testwill | 1 | https://github.com/go-openapi/jsonpointer/commits?author=testwill | +| @fredbi | 54 | | +| @casualjim | 33 | | +| @magodo | 3 | | +| @youyuanwu | 3 | | +| @gaiaz-iusipov | 1 | | +| @gbjk | 1 | | +| @gordallott | 1 | | +| @ianlancetaylor | 1 | | +| @mfleader | 1 | | +| @Neo2308 | 1 | | +| @olivierlemasle | 1 | | +| @testwill | 1 | | _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md index b61b63fd9a..c52803e2e8 100644 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -8,12 +8,22 @@ [![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] -[![GoDoc][godoc-badge]][godoc-url] [![Slack Channel][slack-logo]![slack-badge]][slack-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] --- An implementation of JSON Pointer for golang, which supports go `struct`. +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + ## Status API is stable. @@ -124,21 +134,20 @@ Maintainers can cut a new release by either: [release-badge]: https://badge.fury.io/gh/go-openapi%2Fjsonpointer.svg [release-url]: https://badge.fury.io/gh/go-openapi%2Fjsonpointer -[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer.svg -[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer [gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/jsonpointer [gocard-url]: https://goreportcard.com/report/github.com/go-openapi/jsonpointer [codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/jsonpointer [codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/jsonpointer -[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F -[doc-url]: https://goswagger.io/go-openapi [godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer [godoc-url]: http://pkg.go.dev/github.com/go-openapi/jsonpointer [slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png [slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM [slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + [license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg [license-url]: https://github.com/go-openapi/jsonpointer/?tab=Apache-2.0-1-ov-file#readme diff --git a/vendor/github.com/go-openapi/jsonpointer/SECURITY.md b/vendor/github.com/go-openapi/jsonpointer/SECURITY.md index 2a7b6f0910..1fea2c5736 100644 --- a/vendor/github.com/go-openapi/jsonpointer/SECURITY.md +++ b/vendor/github.com/go-openapi/jsonpointer/SECURITY.md @@ -6,14 +6,32 @@ This policy outlines the commitment and practices of the go-openapi maintainers | Version | Supported | | ------- | ------------------ | -| 0.22.x | :white_check_mark: | +| O.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. ## Reporting a vulnerability If you become aware of a security vulnerability that affects the current repository, -please report it privately to the maintainers. +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. -Please follow the instructions provided by github to -[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". -TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/jsonreference/.cliff.toml b/vendor/github.com/go-openapi/jsonreference/.cliff.toml deleted file mode 100644 index 702629f5dc..0000000000 --- a/vendor/github.com/go-openapi/jsonreference/.cliff.toml +++ /dev/null @@ -1,181 +0,0 @@ -# git-cliff ~ configuration file -# https://git-cliff.org/docs/configuration - -[changelog] -header = """ -""" - -footer = """ - ------ - -**[{{ remote.github.repo }}]({{ self::remote_url() }}) license terms** - -[![License][license-badge]][license-url] - -[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg -[license-url]: {{ self::remote_url() }}/?tab=Apache-2.0-1-ov-file#readme - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" - -body = """ -{%- if version %} -## [{{ version | trim_start_matches(pat="v") }}]({{ self::remote_url() }}/tree/{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} -{%- else %} -## [unreleased] -{%- endif %} -{%- if message %} - {%- raw %}\n{% endraw %} -{{ message }} - {%- raw %}\n{% endraw %} -{%- endif %} -{%- if version %} - {%- if previous.version %} - -**Full Changelog**: <{{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}> - {%- endif %} -{%- else %} - {%- raw %}\n{% endraw %} -{%- endif %} - -{%- if statistics %}{% if statistics.commit_count %} - {%- raw %}\n{% endraw %} -{{ statistics.commit_count }} commits in this release. - {%- raw %}\n{% endraw %} -{%- endif %}{% endif %} ------ - -{%- for group, commits in commits | group_by(attribute="group") %} - {%- raw %}\n{% endraw %} -### {{ group | upper_first }} - {%- raw %}\n{% endraw %} - {%- for commit in commits %} - {%- if commit.remote.pr_title %} - {%- set commit_message = commit.remote.pr_title %} - {%- else %} - {%- set commit_message = commit.message %} - {%- endif %} -* {{ commit_message | split(pat="\n") | first | trim }} - {%- if commit.remote.username %} -{%- raw %} {% endraw %}by [@{{ commit.remote.username }}](https://github.com/{{ commit.remote.username }}) - {%- endif %} - {%- if commit.remote.pr_number %} -{%- raw %} {% endraw %}in [#{{ commit.remote.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.remote.pr_number }}) - {%- endif %} -{%- raw %} {% endraw %}[...]({{ self::remote_url() }}/commit/{{ commit.id }}) - {%- endfor %} -{%- endfor %} - -{%- if github %} -{%- raw %}\n{% endraw -%} - {%- set all_contributors = github.contributors | length %} - {%- if github.contributors | filter(attribute="username", value="dependabot[bot]") | length < all_contributors %} ------ - -### People who contributed to this release - {% endif %} - {%- for contributor in github.contributors | filter(attribute="username") | sort(attribute="username") %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) - {%- endif %} - {%- endfor %} - - {% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %} ------ - {%- raw %}\n{% endraw %} - -### New Contributors - {%- endif %} - - {%- for contributor in github.contributors | filter(attribute="is_first_time", value=true) %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* @{{ contributor.username }} made their first contribution - {%- if contributor.pr_number %} - in [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \ - {%- endif %} - {%- endif %} - {%- endfor %} -{%- endif %} - -{%- raw %}\n{% endraw %} - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" -# Remove leading and trailing whitespaces from the changelog's body. -trim = true -# Render body even when there are no releases to process. -render_always = true -# An array of regex based postprocessors to modify the changelog. -postprocessors = [ - # Replace the placeholder with a URL. - #{ pattern = '', replace = "https://github.com/orhun/git-cliff" }, -] -# output file path -# output = "test.md" - -[git] -# Parse commits according to the conventional commits specification. -# See https://www.conventionalcommits.org -conventional_commits = false -# Exclude commits that do not match the conventional commits specification. -filter_unconventional = false -# Require all commits to be conventional. -# Takes precedence over filter_unconventional. -require_conventional = false -# Split commits on newlines, treating each line as an individual commit. -split_commits = false -# An array of regex based parsers to modify commit messages prior to further processing. -commit_preprocessors = [ - # Replace issue numbers with link templates to be updated in `changelog.postprocessors`. - #{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, - # Check spelling of the commit message using https://github.com/crate-ci/typos. - # If the spelling is incorrect, it will be fixed automatically. - #{ pattern = '.*', replace_command = 'typos --write-changes -' } -] -# Prevent commits that are breaking from being excluded by commit parsers. -protect_breaking_commits = false -# An array of regex based parsers for extracting data from the commit message. -# Assigns commits to groups. -# Optionally sets the commit's scope and can decide to exclude commits from further processing. -commit_parsers = [ - { message = "^[Cc]hore\\([Rr]elease\\): prepare for", skip = true }, - { message = "(^[Mm]erge)|([Mm]erge conflict)", skip = true }, - { field = "author.name", pattern = "dependabot*", group = "Updates" }, - { message = "([Ss]ecurity)|([Vv]uln)", group = "Security" }, - { body = "(.*[Ss]ecurity)|([Vv]uln)", group = "Security" }, - { message = "([Cc]hore\\(lint\\))|(style)|(lint)|(codeql)|(golangci)", group = "Code quality" }, - { message = "(^[Dd]oc)|((?i)readme)|(badge)|(typo)|(documentation)", group = "Documentation" }, - { message = "(^[Ff]eat)|(^[Ee]nhancement)", group = "Implemented enhancements" }, - { message = "(^ci)|(\\(ci\\))|(fixup\\s+ci)|(fix\\s+ci)|(license)|(example)", group = "Miscellaneous tasks" }, - { message = "^test", group = "Testing" }, - { message = "(^fix)|(panic)", group = "Fixed bugs" }, - { message = "(^refact)|(rework)", group = "Refactor" }, - { message = "(^[Pp]erf)|(performance)", group = "Performance" }, - { message = "(^[Cc]hore)", group = "Miscellaneous tasks" }, - { message = "^[Rr]evert", group = "Reverted changes" }, - { message = "(upgrade.*?go)|(go\\s+version)", group = "Updates" }, - { message = ".*", group = "Other" }, -] -# Exclude commits that are not matched by any commit parser. -filter_commits = false -# An array of link parsers for extracting external references, and turning them into URLs, using regex. -link_parsers = [] -# Include only the tags that belong to the current branch. -use_branch_tags = false -# Order releases topologically instead of chronologically. -topo_order = false -# Order releases topologically instead of chronologically. -topo_order_commits = true -# Order of commits in each group/release within the changelog. -# Allowed values: newest, oldest -sort_commits = "newest" -# Process submodules commits -recurse_submodules = false - -#[remote.github] -#owner = "go-openapi" diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore index 769c244007..885dc27ab0 100644 --- a/vendor/github.com/go-openapi/jsonreference/.gitignore +++ b/vendor/github.com/go-openapi/jsonreference/.gitignore @@ -1 +1,6 @@ -secrets.yml +*.out +*.cov +.idea +.env +.mcp.json +.claude/ diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index fdae591bce..dc7c96053d 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -12,6 +12,7 @@ linters: - paralleltest - recvcheck - testpackage + - thelper - tparallel - varnamelen - whitespace diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md index 9322b065e3..bac878f216 100644 --- a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md b/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md index 9907d5d212..7faeb83a77 100644 --- a/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md +++ b/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md @@ -4,11 +4,11 @@ | Total Contributors | Total Contributions | | --- | --- | -| 9 | 68 | +| 9 | 73 | | Username | All Time Contribution Count | All Commits | | --- | --- | --- | -| @fredbi | 31 | https://github.com/go-openapi/jsonreference/commits?author=fredbi | +| @fredbi | 36 | https://github.com/go-openapi/jsonreference/commits?author=fredbi | | @casualjim | 25 | https://github.com/go-openapi/jsonreference/commits?author=casualjim | | @youyuanwu | 5 | https://github.com/go-openapi/jsonreference/commits?author=youyuanwu | | @olivierlemasle | 2 | https://github.com/go-openapi/jsonreference/commits?author=olivierlemasle | diff --git a/vendor/github.com/go-openapi/jsonreference/NOTICE b/vendor/github.com/go-openapi/jsonreference/NOTICE index f3b51939a9..814e87ef8c 100644 --- a/vendor/github.com/go-openapi/jsonreference/NOTICE +++ b/vendor/github.com/go-openapi/jsonreference/NOTICE @@ -3,7 +3,7 @@ Copyright 2015-2025 go-swagger maintainers // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 -This software library, github.com/go-openapi/jsonpointer, includes software developed +This software library, github.com/go-openapi/jsonreference, includes software developed by the go-swagger and go-openapi maintainers ("go-swagger maintainers"). Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ It ships with copies of other software which license terms are recalled below. The original software was authored on 25-02-2013 by sigu-399 (https://github.com/sigu-399, sigu.399@gmail.com). -github.com/sigh-399/jsonpointer +github.com/sigh-399/jsonreference =========================== // SPDX-FileCopyrightText: Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md index d479dbdc73..adea160619 100644 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -8,12 +8,22 @@ [![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] -[![GoDoc][godoc-badge]][godoc-url] [![Slack Channel][slack-logo]![slack-badge]][slack-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] --- An implementation of JSON Reference for golang. +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + ## Status API is stable. @@ -26,18 +36,33 @@ go get github.com/go-openapi/jsonreference ## Dependencies -* https://github.com/go-openapi/jsonpointer +* ## Basic usage +```go +// Creating a new reference +ref, err := jsonreference.New("http://example.com/doc.json#/definitions/Pet") + +// Fragment-only reference +fragRef := jsonreference.MustCreateRef("#/definitions/Pet") + +// Resolving references +parent, _ := jsonreference.New("http://example.com/base.json") +child, _ := jsonreference.New("#/definitions/Pet") +resolved, _ := parent.Inherits(child) +// Result: "http://example.com/base.json#/definitions/Pet" +``` + + ## Change log See ## References -* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 -* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 +* +* ## Licensing @@ -89,6 +114,9 @@ Maintainers can cut a new release by either: [slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png [slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM [slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + [license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg [license-url]: https://github.com/go-openapi/jsonreference/?tab=Apache-2.0-1-ov-file#readme diff --git a/vendor/github.com/go-openapi/jsonreference/SECURITY.md b/vendor/github.com/go-openapi/jsonreference/SECURITY.md index 2a7b6f0910..1fea2c5736 100644 --- a/vendor/github.com/go-openapi/jsonreference/SECURITY.md +++ b/vendor/github.com/go-openapi/jsonreference/SECURITY.md @@ -6,14 +6,32 @@ This policy outlines the commitment and practices of the go-openapi maintainers | Version | Supported | | ------- | ------------------ | -| 0.22.x | :white_check_mark: | +| O.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. ## Reporting a vulnerability If you become aware of a security vulnerability that affects the current repository, -please report it privately to the maintainers. +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. -Please follow the instructions provided by github to -[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". -TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go index 6e3ae49951..003ba7a838 100644 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -16,6 +16,7 @@ const ( fragmentRune = `#` ) +// ErrChildURL is raised when there is no child. var ErrChildURL = errors.New("child url is nil") // Ref represents a json reference object. diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore index e4f15f17bf..d8f4186fe5 100644 --- a/vendor/github.com/go-openapi/loads/.gitignore +++ b/vendor/github.com/go-openapi/loads/.gitignore @@ -1,4 +1,5 @@ -secrets.yml -coverage.out -profile.cov -profile.out +*.out +*.cov +.idea +.env +.mcp.json diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml index 1ad5adf47e..83968f3fae 100644 --- a/vendor/github.com/go-openapi/loads/.golangci.yml +++ b/vendor/github.com/go-openapi/loads/.golangci.yml @@ -2,25 +2,12 @@ version: "2" linters: default: all disable: - - cyclop - depguard - - errchkjson - - errorlint - - exhaustruct - - forcetypeassert - funlen - - gochecknoglobals + - gochecknoglobals # on this repo, it is hard to refactor without globals/inits and no breaking change - gochecknoinits - - gocognit - - godot - godox - - gosmopolitan - - inamedparam - - intrange - - ireturn - - lll - - musttag - - nestif + - exhaustruct - nlreturn - nonamedreturns - noinlineerr @@ -29,7 +16,6 @@ linters: - testpackage - thelper - tparallel - - unparam - varnamelen - whitespace - wrapcheck @@ -41,8 +27,15 @@ linters: goconst: min-len: 2 min-occurrences: 3 + cyclop: + max-complexity: 20 gocyclo: - min-complexity: 45 + min-complexity: 20 + exhaustive: + default-signifies-exhaustive: true + default-case-required: true + lll: + line-length: 180 exclusions: generated: lax presets: @@ -58,6 +51,7 @@ formatters: enable: - gofmt - goimports + - gofumpt exclusions: generated: lax paths: diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml deleted file mode 100644 index cd4a7c331b..0000000000 --- a/vendor/github.com/go-openapi/loads/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.16.x -- 1.x -install: -- go get gotest.tools/gotestsum -language: go -arch: -- amd64 -- ppc64le -jobs: - include: - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -notifications: - slack: - secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= -script: -- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md index 9322b065e3..bac878f216 100644 --- a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/CONTRIBUTORS.md b/vendor/github.com/go-openapi/loads/CONTRIBUTORS.md new file mode 100644 index 0000000000..36b836a3d5 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/CONTRIBUTORS.md @@ -0,0 +1,26 @@ +# Contributors + +- Repository: ['go-openapi/loads'] + +| Total Contributors | Total Contributions | +| --- | --- | +| 14 | 123 | + +| Username | All Time Contribution Count | All Commits | +| --- | --- | --- | +| @casualjim | 48 | | +| @fredbi | 45 | | +| @youyuanwu | 6 | | +| @vburenin | 4 | | +| @keramix | 4 | | +| @orisano | 3 | | +| @GlenDC | 3 | | +| @pengsrc | 2 | | +| @a2800276 | 2 | | +| @tklauser | 2 | | +| @hypnoglow | 1 | | +| @koron | 1 | | +| @kreativka | 1 | | +| @petrkotas | 1 | | + + _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md index 1f0174f2d9..d92e62a040 100644 --- a/vendor/github.com/go-openapi/loads/README.md +++ b/vendor/github.com/go-openapi/loads/README.md @@ -1,11 +1,42 @@ -# Loads OAI specs [![Build Status](https://github.com/go-openapi/loads/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) +# Loads OAI specs -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) + +[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url] + + + +[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] + + +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] -Loading of OAI v2 API specification documents from local or remote locations. Supports JSON and YAML documents. +--- -Primary usage: +Loads OAI v2 API specification documents from local or remote locations. + +Supports JSON and YAML documents. + +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + +## Status + +API is stable. + +## Import this library in your project + +```cmd +go get github.com/go-openapi/loads +``` + +## Basic usage ```go import ( @@ -27,6 +58,61 @@ Primary usage: See also the provided [examples](https://pkg.go.dev/github.com/go-openapi/loads#pkg-examples). +## Change log + +See + ## Licensing This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +## Other documentation + +* [All-time contributors](./CONTRIBUTORS.md) +* [Contributing guidelines](.github/CONTRIBUTING.md) +* [Maintainers documentation](docs/MAINTAINERS.md) +* [Code style](docs/STYLE.md) + +## Cutting a new release + +Maintainers can cut a new release by either: + +* running [this workflow](https://github.com/go-openapi/loads/actions/workflows/bump-release.yml) +* or pushing a semver tag + * signed tags are preferred + * The tag message is prepended to release notes + + +[test-badge]: https://github.com/go-openapi/loads/actions/workflows/go-test.yml/badge.svg +[test-url]: https://github.com/go-openapi/loads/actions/workflows/go-test.yml +[cov-badge]: https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg +[cov-url]: https://codecov.io/gh/go-openapi/loads +[vuln-scan-badge]: https://github.com/go-openapi/loads/actions/workflows/scanner.yml/badge.svg +[vuln-scan-url]: https://github.com/go-openapi/loads/actions/workflows/scanner.yml +[codeql-badge]: https://github.com/go-openapi/loads/actions/workflows/codeql.yml/badge.svg +[codeql-url]: https://github.com/go-openapi/loads/actions/workflows/codeql.yml + +[release-badge]: https://badge.fury.io/gh/go-openapi%2Floads.svg +[release-url]: https://badge.fury.io/gh/go-openapi%2Floads + +[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/loads +[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/loads +[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/loads +[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/loads + +[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/loads +[godoc-url]: http://pkg.go.dev/github.com/go-openapi/loads +[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png +[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM +[slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + + +[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg +[license-url]: https://github.com/go-openapi/loads/?tab=Apache-2.0-1-ov-file#readme + +[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/loads +[goversion-url]: https://github.com/go-openapi/loads/blob/master/go.mod +[top-badge]: https://img.shields.io/github/languages/top/go-openapi/loads +[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/loads/latest diff --git a/vendor/github.com/go-openapi/loads/SECURITY.md b/vendor/github.com/go-openapi/loads/SECURITY.md new file mode 100644 index 0000000000..6ceb159ca2 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/SECURITY.md @@ -0,0 +1,37 @@ +# Security Policy + +This policy outlines the commitment and practices of the go-openapi maintainers regarding security. + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. + +## Reporting a vulnerability + +If you become aware of a security vulnerability that affects the current repository, +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. + +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". + +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go index 7981e70e9f..67a5e2f8d9 100644 --- a/vendor/github.com/go-openapi/loads/doc.go +++ b/vendor/github.com/go-openapi/loads/doc.go @@ -4,4 +4,6 @@ // Package loads provides document loading methods for swagger (OAI v2) API specifications. // // It is used by other go-openapi packages to load and run analysis on local or remote spec documents. +// +// Loaders support JSON and YAML documents. package loads diff --git a/vendor/github.com/go-openapi/loads/errors.go b/vendor/github.com/go-openapi/loads/errors.go index 8f2d602f5c..14a8186b6c 100644 --- a/vendor/github.com/go-openapi/loads/errors.go +++ b/vendor/github.com/go-openapi/loads/errors.go @@ -10,9 +10,9 @@ func (e loaderError) Error() string { } const ( - // ErrLoads is an error returned by the loads package + // ErrLoads is an error returned by the loads package. ErrLoads loaderError = "loaderrs error" - // ErrNoLoader indicates that no configured loader matched the input + // ErrNoLoader indicates that no configured loader matched the input. ErrNoLoader loaderError = "no loader matched" ) diff --git a/vendor/github.com/go-openapi/loads/loaders.go b/vendor/github.com/go-openapi/loads/loaders.go index 25b157302e..ac8adfe8b2 100644 --- a/vendor/github.com/go-openapi/loads/loaders.go +++ b/vendor/github.com/go-openapi/loads/loaders.go @@ -13,14 +13,12 @@ import ( "github.com/go-openapi/swag/loading" ) -var ( - // Default chain of loaders, defined at the package level. - // - // By default this matches json and yaml documents. - // - // May be altered with AddLoader(). - loaders *loader -) +// Default chain of loaders, defined at the package level. +// +// By default this matches json and yaml documents. +// +// May be altered with AddLoader(). +var loaders *loader func init() { jsonLoader := &loader{ @@ -43,10 +41,10 @@ func init() { spec.PathLoader = loaders.Load } -// DocLoader represents a doc loader type +// DocLoader represents a doc loader type. type DocLoader func(string, ...loading.Option) (json.RawMessage, error) -// DocMatcher represents a predicate to check if a loader matches +// DocMatcher represents a predicate to check if a loader matches. type DocMatcher func(string) bool // DocLoaderWithMatch describes a loading function for a given extension match. @@ -55,7 +53,7 @@ type DocLoaderWithMatch struct { Match DocMatcher } -// NewDocLoaderWithMatch builds a DocLoaderWithMatch to be used in load options +// NewDocLoaderWithMatch builds a [DocLoaderWithMatch] to be used in load options. func NewDocLoaderWithMatch(fn DocLoader, matcher DocMatcher) DocLoaderWithMatch { return DocLoaderWithMatch{ Fn: fn, @@ -71,7 +69,7 @@ type loader struct { Next *loader } -// WithHead adds a loader at the head of the current stack +// WithHead adds a loader at the head of the current stack. func (l *loader) WithHead(head *loader) *loader { if head == nil { return l @@ -80,13 +78,13 @@ func (l *loader) WithHead(head *loader) *loader { return head } -// WithNext adds a loader at the trail of the current stack +// WithNext adds a loader at the trail of the current stack. func (l *loader) WithNext(next *loader) *loader { l.Next = next return next } -// Load the raw document from path +// Load the raw document from path. func (l *loader) Load(path string) (json.RawMessage, error) { _, erp := url.Parse(path) if erp != nil { @@ -123,9 +121,9 @@ func (l *loader) clone() *loader { } } -// JSONDoc loads a json document from either a file or a remote url. +// JSONDoc loads a json document from either a file or a remote URL. // -// See [loading.Option] for available options (e.g. configuring authentifaction, +// See [loading.Option] for available options (e.g. configuring authentication, // headers or using embedded file system resources). func JSONDoc(path string, opts ...loading.Option) (json.RawMessage, error) { data, err := loading.LoadFromFileOrHTTP(path, opts...) @@ -139,9 +137,10 @@ func JSONDoc(path string, opts ...loading.Option) (json.RawMessage, error) { // // This sets the configuration at the package level. // -// NOTE: -// - this updates the default loader used by github.com/go-openapi/spec -// - since this sets package level globals, you shouln't call this concurrently +// # Concurrency +// +// This function updates the default loader used by [github.com/go-openapi/spec]. +// Since this sets package level globals, you shouldn't call this concurrently. func AddLoader(predicate DocMatcher, load DocLoader) { loaders = loaders.WithHead(&loader{ DocLoaderWithMatch: DocLoaderWithMatch{ diff --git a/vendor/github.com/go-openapi/loads/options.go b/vendor/github.com/go-openapi/loads/options.go index adb5e6d15b..045ece5e09 100644 --- a/vendor/github.com/go-openapi/loads/options.go +++ b/vendor/github.com/go-openapi/loads/options.go @@ -28,10 +28,10 @@ func loaderFromOptions(options []LoaderOption) *loader { return l } -// LoaderOption allows to fine-tune the spec loader behavior +// LoaderOption allows to fine-tune the spec loader behavior. type LoaderOption func(*options) -// WithDocLoader sets a custom loader for loading specs +// WithDocLoader sets a custom loader for loading specs. func WithDocLoader(l DocLoader) LoaderOption { return func(opt *options) { if l == nil { @@ -48,7 +48,7 @@ func WithDocLoader(l DocLoader) LoaderOption { // WithDocLoaderMatches sets a chain of custom loaders for loading specs // for different extension matches. // -// Loaders are executed in the order of provided DocLoaderWithMatch'es. +// Loaders are executed in the order of provided [DocLoaderWithMatch] 'es. func WithDocLoaderMatches(l ...DocLoaderWithMatch) LoaderOption { return func(opt *options) { var final, prev *loader diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go index 213c40c657..606a01d8e9 100644 --- a/vendor/github.com/go-openapi/loads/spec.go +++ b/vendor/github.com/go-openapi/loads/spec.go @@ -21,7 +21,7 @@ func init() { gob.Register([]any{}) } -// Document represents a swagger spec document +// Document represents a swagger spec document. type Document struct { // specAnalyzer Analyzer *analysis.Spec @@ -33,7 +33,7 @@ type Document struct { raw json.RawMessage } -// JSONSpec loads a spec from a json document, using the [JSONDoc] loader. +// JSONSpec loads a spec from a JSON document, using the [JSONDoc] loader. // // A set of [loading.Option] may be passed to this loader using [WithLoadingOptions]. func JSONSpec(path string, opts ...LoaderOption) (*Document, error) { @@ -57,7 +57,7 @@ func JSONSpec(path string, opts ...LoaderOption) (*Document, error) { return doc, nil } -// Embedded returns a Document based on embedded specs (i.e. as a raw [json.RawMessage]). No analysis is required +// Embedded returns a Document based on embedded specs (i.e. as a [json.RawMessage]). No analysis is required. func Embedded(orig, flat json.RawMessage, opts ...LoaderOption) (*Document, error) { var origSpec, flatSpec spec.Swagger if err := json.Unmarshal(orig, &origSpec); err != nil { @@ -96,7 +96,7 @@ func Spec(path string, opts ...LoaderOption) (*Document, error) { return document, nil } -// Analyzed creates a new analyzed spec document for a root json.RawMessage. +// Analyzed creates a new analyzed spec document for a root [json.RawMessage]. func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*Document, error) { if version == "" { version = "2.0" @@ -145,18 +145,18 @@ func trimData(in json.RawMessage) (json.RawMessage, error) { // assume yaml doc: convert it to json yml, err := yamlutils.BytesToYAMLDoc(trimmed) if err != nil { - return nil, fmt.Errorf("analyzed: %v: %w", err, ErrLoads) + return nil, fmt.Errorf("analyzed: %w: %w", err, ErrLoads) } d, err := yamlutils.YAMLToJSON(yml) if err != nil { - return nil, fmt.Errorf("analyzed: %v: %w", err, ErrLoads) + return nil, fmt.Errorf("analyzed: %w: %w", err, ErrLoads) } return d, nil } -// Expanded expands the $ref fields in the spec [Document] and returns a new expanded [Document] +// Expanded expands the $ref fields in the spec [Document] and returns a new expanded [Document]. func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { swspec := new(spec.Swagger) if err := json.Unmarshal(d.raw, swspec); err != nil { @@ -200,7 +200,7 @@ func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { return dd, nil } -// BasePath the base path for the API specified by this spec +// BasePath the base path for the API specified by this spec. func (d *Document) BasePath() string { if d.spec == nil { return "" @@ -208,37 +208,37 @@ func (d *Document) BasePath() string { return d.spec.BasePath } -// Version returns the OpenAPI version of this spec (e.g. 2.0) +// Version returns the OpenAPI version of this spec (e.g. 2.0). func (d *Document) Version() string { return d.spec.Swagger } -// Schema returns the swagger 2.0 meta-schema +// Schema returns the swagger 2.0 meta-schema. func (d *Document) Schema() *spec.Schema { return d.schema } -// Spec returns the swagger object model for this API specification +// Spec returns the swagger object model for this API specification. func (d *Document) Spec() *spec.Swagger { return d.spec } -// Host returns the host for the API +// Host returns the host for the API. func (d *Document) Host() string { return d.spec.Host } -// Raw returns the raw swagger spec as json bytes +// Raw returns the raw swagger spec as json bytes. func (d *Document) Raw() json.RawMessage { return d.raw } -// OrigSpec yields the original spec +// OrigSpec yields the original spec. func (d *Document) OrigSpec() *spec.Swagger { return d.origSpec } -// ResetDefinitions yields a shallow copy with the models reset to the original spec +// ResetDefinitions yields a shallow copy with the models reset to the original spec. func (d *Document) ResetDefinitions() *Document { d.spec.Definitions = make(map[string]spec.Schema, len(d.origSpec.Definitions)) maps.Copy(d.spec.Definitions, d.origSpec.Definitions) @@ -246,9 +246,9 @@ func (d *Document) ResetDefinitions() *Document { return d } -// Pristine creates a new pristine document instance based on the input data +// Pristine creates a new pristine document instance based on the input data. func (d *Document) Pristine() *Document { - raw, _ := json.Marshal(d.Spec()) + raw, _ := json.Marshal(d.Spec()) //nolint:errchkjson // the spec always marshals to JSON dd, _ := Analyzed(raw, d.Version()) dd.pathLoader = d.pathLoader dd.specFilePath = d.specFilePath @@ -256,7 +256,7 @@ func (d *Document) Pristine() *Document { return dd } -// SpecFilePath returns the file path of the spec if one is defined +// SpecFilePath returns the file path of the spec if one is defined. func (d *Document) SpecFilePath() string { return d.specFilePath } diff --git a/vendor/github.com/go-openapi/runtime/.gitignore b/vendor/github.com/go-openapi/runtime/.gitignore index fea8b84eca..d8f4186fe5 100644 --- a/vendor/github.com/go-openapi/runtime/.gitignore +++ b/vendor/github.com/go-openapi/runtime/.gitignore @@ -1,5 +1,5 @@ -secrets.yml -coverage.out -*.cov *.out -playground +*.cov +.idea +.env +.mcp.json diff --git a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md index 9322b065e3..bac878f216 100644 --- a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/runtime/NOTICE b/vendor/github.com/go-openapi/runtime/NOTICE new file mode 100644 index 0000000000..b9153aae9e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/NOTICE @@ -0,0 +1,45 @@ +Copyright 2015-2025 go-swagger maintainers + +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +This software library, github.com/go-openapi/jsonpointer, includes software developed +by the go-swagger and go-openapi maintainers ("go-swagger maintainers"). + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this software except in compliance with the License. + +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0. + +This software is copied from, derived from, and inspired by other original software products. +It ships with copies of other software which license terms are recalled below. + +The original software was authored in 2014 by Naoya Inada + +https://github.com/naoina/denco +=========================== + +// SPDX-FileCopyrightText: Copyright (c) 2014 Naoya Inada +// SPDX-License-Identifier: MIT + +Copyright (c) 2014 Naoya Inada + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md index 9e15b1adb5..dd7f5039a7 100644 --- a/vendor/github.com/go-openapi/runtime/README.md +++ b/vendor/github.com/go-openapi/runtime/README.md @@ -1,31 +1,46 @@ -# runtime [![Build Status](https://github.com/go-openapi/runtime/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/runtime/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) +# runtime -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/runtime.svg)](https://pkg.go.dev/github.com/go-openapi/runtime) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/runtime)](https://goreportcard.com/report/github.com/go-openapi/runtime) + +[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url] + + + +[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] + + +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] -# go OpenAPI toolkit runtime +--- + +A runtime for go OpenAPI toolkit. The runtime component for use in code generation or as untyped usage. -## Release notes +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + +## Status + +API is stable. + +## Import this library in your project + +```cmd +go get github.com/go-openapi/runtime +``` -### v0.29.0 +## Change log -**New with this release**: +See -* upgraded to `go1.24` and modernized the code base accordingly -* updated all dependencies, and removed an noticable indirect dependency (e.g. `mailru/easyjson`) -* **breaking change** no longer imports `opentracing-go` (#365). - * the `WithOpentracing()` method now returns an opentelemetry transport - * for users who can't transition to opentelemetry, the previous behavior - of `WithOpentracing` delivering an opentracing transport is provided by a separate - module `github.com/go-openapi/runtime/client-middleware/opentracing`. -* removed direct dependency to `gopkg.in/yaml.v3`, in favor of `go.yaml.in/yaml/v3` (an indirect - test dependency to the older package is still around) -* technically, the repo has evolved to a mono-repo, multiple modules structures (2 go modules - published), with CI adapted accordingly +For pre-v0.30.0 releases see [release notes](docs/NOTES.md). **What coming next?** @@ -41,3 +56,58 @@ Moving forward, we want to : ## Licensing This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +See the license [NOTICE](./NOTICE), which recalls the licensing terms of all the pieces of software +on top of which it has been built. + +## Other documentation + +* [FAQ](docs/FAQ.md) +* [All-time contributors](./CONTRIBUTORS.md) +* [Contributing guidelines](.github/CONTRIBUTING.md) +* [Maintainers documentation](docs/MAINTAINERS.md) +* [Code style](docs/STYLE.md) + +## Cutting a new release + +Maintainers can cut a new release by either: + +* running [this workflow](https://github.com/go-openapi/runtime/actions/workflows/bump-release.yml) +* or pushing a semver tag + * signed tags are preferred + * The tag message is prepended to release notes + + +[test-badge]: https://github.com/go-openapi/runtime/actions/workflows/go-test.yml/badge.svg +[test-url]: https://github.com/go-openapi/runtime/actions/workflows/go-test.yml +[cov-badge]: https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg +[cov-url]: https://codecov.io/gh/go-openapi/runtime +[vuln-scan-badge]: https://github.com/go-openapi/runtime/actions/workflows/scanner.yml/badge.svg +[vuln-scan-url]: https://github.com/go-openapi/runtime/actions/workflows/scanner.yml +[codeql-badge]: https://github.com/go-openapi/runtime/actions/workflows/codeql.yml/badge.svg +[codeql-url]: https://github.com/go-openapi/runtime/actions/workflows/codeql.yml + +[release-badge]: https://badge.fury.io/gh/go-openapi%2Fruntime.svg +[release-url]: https://badge.fury.io/gh/go-openapi%2Fruntime + +[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/runtime +[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/runtime +[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/runtime +[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/runtime + +[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/runtime +[godoc-url]: http://pkg.go.dev/github.com/go-openapi/runtime +[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png +[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM +[slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + + +[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg +[license-url]: https://github.com/go-openapi/runtime/?tab=Apache-2.0-1-ov-file#readme + +[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/runtime +[goversion-url]: https://github.com/go-openapi/runtime/blob/master/go.mod +[top-badge]: https://img.shields.io/github/languages/top/go-openapi/runtime +[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/runtime/latest diff --git a/vendor/github.com/go-openapi/runtime/SECURITY.md b/vendor/github.com/go-openapi/runtime/SECURITY.md new file mode 100644 index 0000000000..1fea2c5736 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/SECURITY.md @@ -0,0 +1,37 @@ +# Security Policy + +This policy outlines the commitment and practices of the go-openapi maintainers regarding security. + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| O.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. + +## Reporting a vulnerability + +If you become aware of a security vulnerability that affects the current repository, +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. + +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". + +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go index eb649742e8..8701c8e3d6 100644 --- a/vendor/github.com/go-openapi/runtime/bytestream.go +++ b/vendor/github.com/go-openapi/runtime/bytestream.go @@ -18,7 +18,7 @@ func defaultCloser() error { return nil } type byteStreamOpt func(opts *byteStreamOpts) -// ClosesStream when the bytestream consumer or producer is finished +// ClosesStream when the bytestream consumer or producer is finished. func ClosesStream(opts *byteStreamOpts) { opts.Close = true } @@ -32,11 +32,12 @@ type byteStreamOpts struct { // The consumer consumes from a provided reader into the data passed by reference. // // Supported output underlying types and interfaces, prioritized in this order: -// - io.ReaderFrom (for maximum control) -// - io.Writer (performs io.Copy) -// - encoding.BinaryUnmarshaler -// - *string -// - *[]byte +// +// - [io.ReaderFrom] (for maximum control) +// - [io.Writer] (performs [io.Copy]) +// - [encoding.BinaryUnmarshaler] +// - *string +// - *[]byte func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { var vals byteStreamOpts for _, opt := range opts { @@ -124,13 +125,14 @@ func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { // The producer takes input data then writes to an output writer (essentially as a pipe). // // Supported input underlying types and interfaces, prioritized in this order: -// - io.WriterTo (for maximum control) -// - io.Reader (performs io.Copy). A ReadCloser is closed before exiting. -// - encoding.BinaryMarshaler +// +// - [io.WriterTo] (for maximum control) +// - [io.Reader] (performs [io.Copy]). A ReadCloser is closed before exiting. +// - [encoding.BinaryMarshaler] // - error (writes as a string) // - []byte // - string -// - struct, other slices: writes as JSON +// - struct, other slices: writes as JSON. func ByteStreamProducer(opts ...byteStreamOpt) Producer { var vals byteStreamOpts for _, opt := range opts { diff --git a/vendor/github.com/go-openapi/runtime/client/auth_info.go b/vendor/github.com/go-openapi/runtime/client/auth_info.go index a98690c4d6..fdaa08f274 100644 --- a/vendor/github.com/go-openapi/runtime/client/auth_info.go +++ b/vendor/github.com/go-openapi/runtime/client/auth_info.go @@ -11,14 +11,14 @@ import ( "github.com/go-openapi/runtime" ) -// PassThroughAuth never manipulates the request +// PassThroughAuth never manipulates the request. var PassThroughAuth runtime.ClientAuthInfoWriter func init() { PassThroughAuth = runtime.ClientAuthInfoWriterFunc(func(_ runtime.ClientRequest, _ strfmt.Registry) error { return nil }) } -// BasicAuth provides a basic auth info writer +// BasicAuth provides a basic auth info writer. func BasicAuth(username, password string) runtime.ClientAuthInfoWriter { return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { encoded := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) @@ -26,7 +26,7 @@ func BasicAuth(username, password string) runtime.ClientAuthInfoWriter { }) } -// APIKeyAuth provides an API key auth info writer +// APIKeyAuth provides an API key auth info writer. func APIKeyAuth(name, in, value string) runtime.ClientAuthInfoWriter { if in == "query" { return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { @@ -42,7 +42,7 @@ func APIKeyAuth(name, in, value string) runtime.ClientAuthInfoWriter { return nil } -// BearerToken provides a header based oauth2 bearer access token auth info writer +// BearerToken provides a header based oauth2 bearer access token auth info writer. func BearerToken(token string) runtime.ClientAuthInfoWriter { return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { return r.SetHeaderParam(runtime.HeaderAuthorization, "Bearer "+token) diff --git a/vendor/github.com/go-openapi/runtime/client/keepalive.go b/vendor/github.com/go-openapi/runtime/client/keepalive.go index 831d23b511..3bac5e272c 100644 --- a/vendor/github.com/go-openapi/runtime/client/keepalive.go +++ b/vendor/github.com/go-openapi/runtime/client/keepalive.go @@ -13,7 +13,7 @@ import ( // so that go will reuse the TCP connections. // This is not enabled by default because there are servers where // the response never gets closed and that would make the code hang forever. -// So instead it's provided as a http client middleware that can be used to override +// So instead it's provided as a [http] client [middleware] that can be used to override // any request. func KeepAliveTransport(rt http.RoundTripper) http.RoundTripper { return &keepAliveTransport{wrapped: rt} diff --git a/vendor/github.com/go-openapi/runtime/client/opentelemetry.go b/vendor/github.com/go-openapi/runtime/client/opentelemetry.go index e77941293f..5054878c06 100644 --- a/vendor/github.com/go-openapi/runtime/client/opentelemetry.go +++ b/vendor/github.com/go-openapi/runtime/client/opentelemetry.go @@ -98,12 +98,14 @@ func newOpenTelemetryTransport(transport runtime.ClientTransport, host string, o host: host, } - defaultOpts := []OpenTelemetryOpt{ + const baseOptions = 4 + defaultOpts := make([]OpenTelemetryOpt, 0, len(opts)+baseOptions) + defaultOpts = append(defaultOpts, WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), WithSpanNameFormatter(defaultTransportFormatter), WithPropagators(otel.GetTextMapPropagator()), WithTracerProvider(otel.GetTracerProvider()), - } + ) c := newConfig(append(defaultOpts, opts...)...) tr.config = c diff --git a/vendor/github.com/go-openapi/runtime/client/request.go b/vendor/github.com/go-openapi/runtime/client/request.go index 6d9b25912e..f16ee487ba 100644 --- a/vendor/github.com/go-openapi/runtime/client/request.go +++ b/vendor/github.com/go-openapi/runtime/client/request.go @@ -51,7 +51,7 @@ type request struct { getBody func(r *request) []byte } -// NewRequest creates a new swagger http client request +// NewRequest creates a new swagger http client request. func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) *request { return &request{ pathPattern: pathPattern, @@ -64,7 +64,7 @@ func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) } } -// BuildHTTP creates a new http request based on the data from the params +// BuildHTTP creates a new http request based on the data from the params. func (r *request) BuildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry) (*http.Request, error) { return r.buildHTTP(mediaType, basePath, producers, registry, nil) } @@ -87,7 +87,7 @@ func (r *request) GetBody() []byte { // SetHeaderParam adds a header param to the request // when there is only 1 value provided for the varargs, it will set it. -// when there are several values provided for the varargs it will add it (no overriding) +// when there are several values provided for the varargs it will add it (no overriding). func (r *request) SetHeaderParam(name string, values ...string) error { if r.header == nil { r.header = make(http.Header) @@ -96,14 +96,14 @@ func (r *request) SetHeaderParam(name string, values ...string) error { return nil } -// GetHeaderParams returns the all headers currently set for the request +// GetHeaderParams returns the all headers currently set for the request. func (r *request) GetHeaderParams() http.Header { return r.header } // SetQueryParam adds a query param to the request // when there is only 1 value provided for the varargs, it will set it. -// when there are several values provided for the varargs it will add it (no overriding) +// when there are several values provided for the varargs it will add it (no overriding). func (r *request) SetQueryParam(name string, values ...string) error { if r.query == nil { r.query = make(url.Values) @@ -112,7 +112,7 @@ func (r *request) SetQueryParam(name string, values ...string) error { return nil } -// GetQueryParams returns a copy of all query params currently set for the request +// GetQueryParams returns a copy of all query params currently set for the request. func (r *request) GetQueryParams() url.Values { var result = make(url.Values) for key, value := range r.query { @@ -123,7 +123,7 @@ func (r *request) GetQueryParams() url.Values { // SetFormParam adds a forn param to the request // when there is only 1 value provided for the varargs, it will set it. -// when there are several values provided for the varargs it will add it (no overriding) +// when there are several values provided for the varargs it will add it (no overriding). func (r *request) SetFormParam(name string, values ...string) error { if r.formFields == nil { r.formFields = make(url.Values) @@ -132,7 +132,7 @@ func (r *request) SetFormParam(name string, values ...string) error { return nil } -// SetPathParam adds a path param to the request +// SetPathParam adds a path param to the request. func (r *request) SetPathParam(name string, value string) error { if r.pathParams == nil { r.pathParams = make(map[string]string) @@ -142,7 +142,7 @@ func (r *request) SetPathParam(name string, value string) error { return nil } -// SetFileParam adds a file param to the request +// SetFileParam adds a file param to the request. func (r *request) SetFileParam(name string, files ...runtime.NamedReadCloser) error { for _, file := range files { if actualFile, ok := file.(*os.File); ok { @@ -182,7 +182,7 @@ func (r *request) GetBodyParam() any { return r.payload } -// SetTimeout sets the timeout for a request +// SetTimeout sets the timeout for a request. func (r *request) SetTimeout(timeout time.Duration) error { r.timeout = timeout return nil @@ -298,8 +298,7 @@ func (r *request) buildHTTP(mediaType, basePath string, producers map[string]run // if there is payload, use the producer to write the payload, and then // set the header to the content-type appropriate for the payload produced if r.payload != nil { - // TODO: infer most appropriate content type based on the producer used, - // and the `consumers` section of the spec/operation + // Enhancement proposal: https://github.com/go-openapi/runtime/issues/387 r.header.Set(runtime.HeaderContentType, mediaType) if rdr, ok := r.payload.(io.ReadCloser); ok { body = rdr diff --git a/vendor/github.com/go-openapi/runtime/client/runtime.go b/vendor/github.com/go-openapi/runtime/client/runtime.go index 203c74e49d..eeb17dfb24 100644 --- a/vendor/github.com/go-openapi/runtime/client/runtime.go +++ b/vendor/github.com/go-openapi/runtime/client/runtime.go @@ -33,10 +33,10 @@ const ( schemeHTTPS = "https" ) -// DefaultTimeout the default request timeout +// DefaultTimeout the default request timeout. var DefaultTimeout = 30 * time.Second -// TLSClientOptions to configure client authentication with mutual TLS +// TLSClientOptions to configure client authentication with mutual TLS. type TLSClientOptions struct { // Certificate is the path to a PEM-encoded certificate to be used for // client authentication. If set then Key must also be set. @@ -92,6 +92,17 @@ type TLSClientOptions struct { // the verifiedChains argument will always be nil. VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error + // VerifyConnection, if not nil, is called after normal certificate + // verification and after [TLSClientOptions.VerifyPeerCertificate] by either a TLS client or + // server. It receives the [tls.ConnectionState] which may be inspected. + // + // Unlike VerifyPeerCertificate, this callback is invoked on every + // connection, including resumed ones, making it suitable for checks + // that must always apply (e.g. certificate pinning). + // + // If it returns a non-nil error, the handshake is aborted and that error results. + VerifyConnection func(tls.ConnectionState) error + // SessionTicketsDisabled may be set to true to disable session ticket and // PSK (resumption) support. Note that on clients, session ticket support is // also disabled if ClientSessionCache is nil. @@ -105,7 +116,7 @@ type TLSClientOptions struct { _ struct{} } -// TLSClientAuth creates a tls.Config for mutual auth +// TLSClientAuth creates a [tls.Config] for mutual auth. func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) { // create client tls config cfg := &tls.Config{ @@ -150,6 +161,7 @@ func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) { cfg.InsecureSkipVerify = opts.InsecureSkipVerify cfg.VerifyPeerCertificate = opts.VerifyPeerCertificate + cfg.VerifyConnection = opts.VerifyConnection cfg.SessionTicketsDisabled = opts.SessionTicketsDisabled cfg.ClientSessionCache = opts.ClientSessionCache @@ -183,7 +195,7 @@ func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) { return cfg, nil } -// TLSTransport creates a http client transport suitable for mutual tls auth +// TLSTransport creates a [http] client transport suitable for mutual [tls] auth. func TLSTransport(opts TLSClientOptions) (http.RoundTripper, error) { cfg, err := TLSClientAuth(opts) if err != nil { @@ -193,7 +205,7 @@ func TLSTransport(opts TLSClientOptions) (http.RoundTripper, error) { return &http.Transport{TLSClientConfig: cfg}, nil } -// TLSClient creates a http.Client for mutual auth +// TLSClient creates a [http.Client] for mutual auth. func TLSClient(opts TLSClientOptions) (*http.Client, error) { transport, err := TLSTransport(opts) if err != nil { @@ -203,7 +215,7 @@ func TLSClient(opts TLSClientOptions) (*http.Client, error) { } // Runtime represents an API client that uses the transport -// to make http requests based on a swagger specification. +// to make [http] requests based on a swagger specification. type Runtime struct { DefaultMediaType string DefaultAuthentication runtime.ClientAuthInfoWriter @@ -227,12 +239,12 @@ type Runtime struct { response ClientResponseFunc } -// New creates a new default runtime for a swagger api runtime.Client +// New creates a new default runtime for a swagger api runtime.Client. func New(host, basePath string, schemes []string) *Runtime { var rt Runtime rt.DefaultMediaType = runtime.JSONMime - // TODO: actually infer this stuff from the spec + // Enhancement proposal: https://github.com/go-openapi/runtime/issues/385 rt.Consumers = map[string]runtime.Consumer{ runtime.YAMLMime: yamlpc.YAMLConsumer(), runtime.JSONMime: runtime.JSONConsumer(), @@ -271,7 +283,7 @@ func New(host, basePath string, schemes []string) *Runtime { return &rt } -// NewWithClient allows you to create a new transport with a configured http.Client +// NewWithClient allows you to create a new transport with a configured [http.Client]. func NewWithClient(host, basePath string, schemes []string, client *http.Client) *Runtime { rt := New(host, basePath, schemes) if client != nil { @@ -297,8 +309,8 @@ func NewWithClient(host, basePath string, schemes []string, client *http.Client) // an opentelemetry transport. // // If you have a strict requirement on using opentracing, you may still do so by importing -// module [github.com/go-openapi/runtime/client-middleware/opentracing] and using -// [github.com/go-openapi/runtime/client-middleware/opentracing.WithOpenTracing] with your +// module [github.com/go-openapi/runtime/client-[middleware]/opentracing] and using +// [github.com/go-openapi/runtime/client-[middleware]/opentracing.WithOpenTracing] with your // usual opentracing options and opentracing-enabled transport. // // Passed options are ignored unless they are of type [OpenTelemetryOpt]. @@ -328,7 +340,7 @@ func (r *Runtime) WithOpenTelemetry(opts ...OpenTelemetryOpt) runtime.ClientTran // // This is not enabled by default because there are servers where // the response never gets closed and that would make the code hang forever. -// So instead it's provided as a http client middleware that can be used to override +// So instead it's provided as a [http] client [middleware] that can be used to override // any request. func (r *Runtime) EnableConnectionReuse() { if r.client == nil { @@ -351,7 +363,7 @@ func (r *Runtime) CreateHttpRequest(operation *runtime.ClientOperation) (req *ht } // Submit a request and when there is a body on success it will turn that into the result -// all other things are turned into an api error for swagger which retains the status code +// all other things are turned into an api error for swagger which retains the status code. func (r *Runtime) Submit(operation *runtime.ClientOperation) (any, error) { _, readResponse, _ := operation.Params, operation.Reader, operation.AuthInfo @@ -506,13 +518,13 @@ func transportOrDefault(left, right http.RoundTripper) http.RoundTripper { return left } -// takes a client operation and creates equivalent http.Request +// takes a client operation and creates equivalent http.Request. func (r *Runtime) createHttpRequest(operation *runtime.ClientOperation) (*request, *http.Request, error) { //nolint:revive params, _, auth := operation.Params, operation.Reader, operation.AuthInfo request := newRequest(operation.Method, operation.PathPattern, params) - var accept []string + accept := make([]string, 0, len(operation.ProducesMediaTypes)) accept = append(accept, operation.ProducesMediaTypes...) if err := request.SetHeaderParam(runtime.HeaderAccept, accept...); err != nil { return nil, nil, err @@ -532,7 +544,7 @@ func (r *Runtime) createHttpRequest(operation *runtime.ClientOperation) (*reques // } //} - // TODO: pick appropriate media type + // Enhancement proposal: https://github.com/go-openapi/runtime/issues/386 cmt := r.DefaultMediaType for _, mediaType := range operation.ConsumesMediaTypes { // Pick first non-empty media type diff --git a/vendor/github.com/go-openapi/runtime/client_auth_info.go b/vendor/github.com/go-openapi/runtime/client_auth_info.go index 581e64451a..5ef59c88bf 100644 --- a/vendor/github.com/go-openapi/runtime/client_auth_info.go +++ b/vendor/github.com/go-openapi/runtime/client_auth_info.go @@ -5,15 +5,15 @@ package runtime import "github.com/go-openapi/strfmt" -// A ClientAuthInfoWriterFunc converts a function to a request writer interface +// A ClientAuthInfoWriterFunc converts a function to a request writer interface. type ClientAuthInfoWriterFunc func(ClientRequest, strfmt.Registry) error -// AuthenticateRequest adds authentication data to the request +// AuthenticateRequest adds authentication data to the request. func (fn ClientAuthInfoWriterFunc) AuthenticateRequest(req ClientRequest, reg strfmt.Registry) error { return fn(req, reg) } -// A ClientAuthInfoWriter implementor knows how to write authentication info to a request +// A ClientAuthInfoWriter implementor knows how to write authentication info to a request. type ClientAuthInfoWriter interface { AuthenticateRequest(ClientRequest, strfmt.Registry) error } diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go index b0bb0977db..ad7277e091 100644 --- a/vendor/github.com/go-openapi/runtime/client_operation.go +++ b/vendor/github.com/go-openapi/runtime/client_operation.go @@ -8,7 +8,7 @@ import ( "net/http" ) -// ClientOperation represents the context for a swagger operation to be submitted to the transport +// ClientOperation represents the context for a swagger operation to be submitted to the transport. type ClientOperation struct { ID string Method string @@ -23,7 +23,7 @@ type ClientOperation struct { Client *http.Client } -// A ClientTransport implementor knows how to submit Request objects to some destination +// A ClientTransport implementor knows how to submit Request objects to some destination. type ClientTransport interface { // Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) Submit(*ClientOperation) (any, error) diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go index 6e335b36f3..844f57b3ce 100644 --- a/vendor/github.com/go-openapi/runtime/client_request.go +++ b/vendor/github.com/go-openapi/runtime/client_request.go @@ -12,15 +12,15 @@ import ( "github.com/go-openapi/strfmt" ) -// ClientRequestWriterFunc converts a function to a request writer interface +// ClientRequestWriterFunc converts a function to a request writer interface. type ClientRequestWriterFunc func(ClientRequest, strfmt.Registry) error -// WriteToRequest adds data to the request +// WriteToRequest adds data to the request. func (fn ClientRequestWriterFunc) WriteToRequest(req ClientRequest, reg strfmt.Registry) error { return fn(req, reg) } -// ClientRequestWriter is an interface for things that know how to write to a request +// ClientRequestWriter is an interface for things that know how to write to a request. type ClientRequestWriter interface { WriteToRequest(ClientRequest, strfmt.Registry) error } @@ -57,13 +57,13 @@ type ClientRequest interface { //nolint:interfacebloat // a swagger-capable requ GetFileParam() map[string][]NamedReadCloser } -// NamedReadCloser represents a named ReadCloser interface +// NamedReadCloser represents a named ReadCloser interface. type NamedReadCloser interface { io.ReadCloser Name() string } -// NamedReader creates a NamedReadCloser for use as file upload +// NamedReader creates a [NamedReadCloser] for use as file upload. func NamedReader(name string, rdr io.Reader) NamedReadCloser { rc, ok := rdr.(io.ReadCloser) if !ok { diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go index f2cf942ab3..92668db4ec 100644 --- a/vendor/github.com/go-openapi/runtime/client_response.go +++ b/vendor/github.com/go-openapi/runtime/client_response.go @@ -12,7 +12,7 @@ import ( // A ClientResponse represents a client response. // -// This bridges between responses obtained from different transports +// This bridges between responses obtained from different transports. type ClientResponse interface { Code() int Message() string @@ -21,28 +21,28 @@ type ClientResponse interface { Body() io.ReadCloser } -// A ClientResponseReaderFunc turns a function into a ClientResponseReader interface implementation +// A ClientResponseReaderFunc turns a function into a [ClientResponseReader] interface implementation. type ClientResponseReaderFunc func(ClientResponse, Consumer) (any, error) -// ReadResponse reads the response +// ReadResponse reads the response. func (read ClientResponseReaderFunc) ReadResponse(resp ClientResponse, consumer Consumer) (any, error) { return read(resp, consumer) } // A ClientResponseReader is an interface for things want to read a response. -// An application of this is to create structs from response values +// An application of this is to create structs from response values. type ClientResponseReader interface { ReadResponse(ClientResponse, Consumer) (any, error) } -// APIError wraps an error model and captures the status code +// APIError wraps an error model and captures the status code. type APIError struct { OperationName string Response any Code int } -// NewAPIError creates a new API error +// NewAPIError creates a new API error. func NewAPIError(opName string, payload any, code int) *APIError { return &APIError{ OperationName: opName, @@ -51,7 +51,7 @@ func NewAPIError(opName string, payload any, code int) *APIError { } } -// sanitizer ensures that single quotes are escaped +// sanitizer ensures that single quotes are escaped. var sanitizer = strings.NewReplacer(`\`, `\\`, `'`, `\'`) func (o *APIError) Error() string { @@ -69,37 +69,37 @@ func (o *APIError) String() string { return o.Error() } -// IsSuccess returns true when this API response returns a 2xx status code +// IsSuccess returns true when this API response returns a 2xx status code. func (o *APIError) IsSuccess() bool { const statusOK = 2 return o.Code/100 == statusOK } -// IsRedirect returns true when this API response returns a 3xx status code +// IsRedirect returns true when this API response returns a 3xx status code. func (o *APIError) IsRedirect() bool { const statusRedirect = 3 return o.Code/100 == statusRedirect } -// IsClientError returns true when this API response returns a 4xx status code +// IsClientError returns true when this API response returns a 4xx status code. func (o *APIError) IsClientError() bool { const statusClientError = 4 return o.Code/100 == statusClientError } -// IsServerError returns true when this API response returns a 5xx status code +// IsServerError returns true when this API response returns a 5xx status code. func (o *APIError) IsServerError() bool { const statusServerError = 5 return o.Code/100 == statusServerError } -// IsCode returns true when this API response returns a given status code +// IsCode returns true when this API response returns a given status code. func (o *APIError) IsCode(code int) bool { return o.Code == code } // A ClientResponseStatus is a common interface implemented by all responses on the generated code -// You can use this to treat any client response based on status code +// You can use this to treat any client response based on status code. type ClientResponseStatus interface { IsSuccess() bool IsRedirect() bool diff --git a/vendor/github.com/go-openapi/runtime/constants.go b/vendor/github.com/go-openapi/runtime/constants.go index 62ae9eec0c..80de6c8086 100644 --- a/vendor/github.com/go-openapi/runtime/constants.go +++ b/vendor/github.com/go-openapi/runtime/constants.go @@ -4,35 +4,35 @@ package runtime const ( - // HeaderContentType represents a http content-type header, it's value is supposed to be a mime type + // HeaderContentType represents a [http] content-type header, it's value is supposed to be a mime type. HeaderContentType = "Content-Type" - // HeaderTransferEncoding represents a http transfer-encoding header. + // HeaderTransferEncoding represents a [http] transfer-encoding header. HeaderTransferEncoding = "Transfer-Encoding" - // HeaderAccept the Accept header + // HeaderAccept the Accept header. HeaderAccept = "Accept" - // HeaderAuthorization the Authorization header + // HeaderAuthorization the Authorization header. HeaderAuthorization = "Authorization" charsetKey = "charset" - // DefaultMime the default fallback mime type + // DefaultMime the default fallback mime type. DefaultMime = "application/octet-stream" - // JSONMime the json mime type + // JSONMime the json mime type. JSONMime = "application/json" - // YAMLMime the yaml mime type + // YAMLMime the [yaml] mime type. YAMLMime = "application/x-yaml" - // XMLMime the xml mime type + // XMLMime the [xml] mime type. XMLMime = "application/xml" - // TextMime the text mime type + // TextMime the text mime type. TextMime = "text/plain" - // HTMLMime the html mime type + // HTMLMime the html mime type. HTMLMime = "text/html" - // CSVMime the csv mime type + // CSVMime the [csv] mime type. CSVMime = "text/csv" - // MultipartFormMime the multipart form mime type + // MultipartFormMime the multipart form mime type. MultipartFormMime = "multipart/form-data" - // URLencodedFormMime the url encoded form mime type + // URLencodedFormMime is the [url] encoded form mime type. URLencodedFormMime = "application/x-www-form-urlencoded" ) diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go index 567e3d9db2..558d0cb99a 100644 --- a/vendor/github.com/go-openapi/runtime/csv.go +++ b/vendor/github.com/go-openapi/runtime/csv.go @@ -21,17 +21,18 @@ import ( // The consumer consumes CSV records from a provided reader into the data passed by reference. // // CSVOpts options may be specified to alter the default CSV behavior on the reader and the writer side (e.g. separator, skip header, ...). -// The defaults are those of the standard library's csv.Reader and csv.Writer. +// The defaults are those of the standard library's [csv.Reader] and [csv.Writer]. // // Supported output underlying types and interfaces, prioritized in this order: -// - *csv.Writer -// - CSVWriter (writer options are ignored) -// - io.Writer (as raw bytes) -// - io.ReaderFrom (as raw bytes) -// - encoding.BinaryUnmarshaler (as raw bytes) -// - *[][]string (as a collection of records) -// - *[]byte (as raw bytes) -// - *string (a raw bytes) +// +// - *[csv.Writer] +// - [CSVWriter] (writer options are ignored) +// - [io.Writer] (as raw bytes) +// - [io.ReaderFrom] (as raw bytes) +// - [encoding.BinaryUnmarshaler] (as raw bytes) +// - *[][]string (as a collection of records) +// - *[]byte (as raw bytes) +// - *string (a raw bytes) // // The consumer prioritizes situations where buffering the input is not required. func CSVConsumer(opts ...CSVOpt) Consumer { @@ -157,11 +158,12 @@ func CSVConsumer(opts ...CSVOpt) Consumer { // The producer takes input data then writes as CSV to an output writer (essentially as a pipe). // // Supported input underlying types and interfaces, prioritized in this order: -// - *csv.Reader -// - CSVReader (reader options are ignored) -// - io.Reader -// - io.WriterTo -// - encoding.BinaryMarshaler +// +// - *[csv.Reader] +// - [CSVReader] (reader options are ignored) +// - [io.Reader] +// - [io.WriterTo] +// - [encoding.BinaryMarshaler] // - [][]string // - []byte // - string @@ -283,7 +285,7 @@ func CSVProducer(opts ...CSVOpt) Producer { }) } -// pipeCSV copies CSV records from a CSV reader to a CSV writer +// pipeCSV copies CSV records from a CSV reader to a CSV writer. func pipeCSV(csvWriter CSVWriter, csvReader CSVReader, opts csvOpts) error { for ; opts.skippedLines > 0; opts.skippedLines-- { _, err := csvReader.Read() diff --git a/vendor/github.com/go-openapi/runtime/csv_options.go b/vendor/github.com/go-openapi/runtime/csv_options.go index 4cc0439001..e778c64d1f 100644 --- a/vendor/github.com/go-openapi/runtime/csv_options.go +++ b/vendor/github.com/go-openapi/runtime/csv_options.go @@ -18,7 +18,7 @@ type csvOpts struct { closeStream bool } -// WithCSVReaderOpts specifies the options to csv.Reader +// WithCSVReaderOpts specifies the options to [csv.Reader] // when reading CSV. func WithCSVReaderOpts(reader csv.Reader) CSVOpt { return func(o *csvOpts) { @@ -26,7 +26,7 @@ func WithCSVReaderOpts(reader csv.Reader) CSVOpt { } } -// WithCSVWriterOpts specifies the options to csv.Writer +// WithCSVWriterOpts specifies the options to [csv.Writer] // when writing CSV. func WithCSVWriterOpts(writer csv.Writer) CSVOpt { return func(o *csvOpts) { @@ -94,7 +94,7 @@ var ( _ CSVReader = &csvRecordsWriter{} ) -// csvRecordsWriter is an internal container to move CSV records back and forth +// csvRecordsWriter is an internal container to move CSV records back and forth. type csvRecordsWriter struct { i int records [][]string diff --git a/vendor/github.com/go-openapi/runtime/doc.go b/vendor/github.com/go-openapi/runtime/doc.go new file mode 100644 index 0000000000..300c6b15f0 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/doc.go @@ -0,0 +1,6 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package runtime exposes runtime client and server components +// for go-openapi toolkit. +package runtime diff --git a/vendor/github.com/go-openapi/runtime/go.work.sum b/vendor/github.com/go-openapi/runtime/go.work.sum index b0c2c9a63d..b24a8cfaf9 100644 --- a/vendor/github.com/go-openapi/runtime/go.work.sum +++ b/vendor/github.com/go-openapi/runtime/go.work.sum @@ -14,6 +14,8 @@ github.com/go-openapi/swag/netutils v0.24.0/go.mod h1:WRgiHcYTnx+IqfMCtu0hy9oOaP github.com/go-openapi/swag/stringutils v0.24.0/go.mod h1:5nUXB4xA0kw2df5PRipZDslPJgJut+NjL7D25zPZ/4w= github.com/go-openapi/swag/typeutils v0.24.0/go.mod h1:q8C3Kmk/vh2VhpCLaoR2MVWOGP8y7Jc8l82qCTd1DYI= github.com/go-openapi/swag/yamlutils v0.24.0/go.mod h1:DpKv5aYuaGm/sULePoeiG8uwMpZSfReo1HR3Ik0yaG8= +github.com/go-openapi/testify/enable/yaml/v2 v2.4.0/go.mod h1:14iV8jyyQlinc9StD7w1xVPW3CO3q1Gj04Jy//Kw4VM= +github.com/go-openapi/testify/v2 v2.4.0/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -26,9 +28,13 @@ github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUt github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30 h1:BHT1/DKsYDGkUgQ2jmMaozVcdk+sVfz0+1ZJq4zkWgw= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= @@ -37,6 +43,7 @@ github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gi github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -44,11 +51,15 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= +golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -69,11 +80,14 @@ golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488/go.mod h1:fGb/2+tgXXjhjHsTNdVEEMZNWA0quBnfrO+AfoDSAKw= +golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= +golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= +golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -88,6 +102,8 @@ golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0 golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/go-openapi/runtime/headers.go b/vendor/github.com/go-openapi/runtime/headers.go index 510e396ca7..4eb2b9254a 100644 --- a/vendor/github.com/go-openapi/runtime/headers.go +++ b/vendor/github.com/go-openapi/runtime/headers.go @@ -10,7 +10,7 @@ import ( "github.com/go-openapi/errors" ) -// ContentType parses a content type header +// ContentType parses a content type header. func ContentType(headers http.Header) (string, string, error) { ct := headers.Get(HeaderContentType) orig := ct diff --git a/vendor/github.com/go-openapi/runtime/interfaces.go b/vendor/github.com/go-openapi/runtime/interfaces.go index 90046bf367..a8b4b318d9 100644 --- a/vendor/github.com/go-openapi/runtime/interfaces.go +++ b/vendor/github.com/go-openapi/runtime/interfaces.go @@ -11,75 +11,75 @@ import ( "github.com/go-openapi/strfmt" ) -// OperationHandlerFunc an adapter for a function to the OperationHandler interface +// OperationHandlerFunc an adapter for a function to the [OperationHandler] interface. type OperationHandlerFunc func(any) (any, error) -// Handle implements the operation handler interface +// Handle implements the operation handler interface. func (s OperationHandlerFunc) Handle(data any) (any, error) { return s(data) } -// OperationHandler a handler for a swagger operation +// OperationHandler a handler for a swagger operation. type OperationHandler interface { Handle(any) (any, error) } -// ConsumerFunc represents a function that can be used as a consumer +// ConsumerFunc represents a function that can be used as a consumer. type ConsumerFunc func(io.Reader, any) error -// Consume consumes the reader into the data parameter +// Consume consumes the reader into the data parameter. func (fn ConsumerFunc) Consume(reader io.Reader, data any) error { return fn(reader, data) } // Consumer implementations know how to bind the values on the provided interface to -// data provided by the request body +// data provided by the request body. type Consumer interface { // Consume performs the binding of request values Consume(io.Reader, any) error } -// ProducerFunc represents a function that can be used as a producer +// ProducerFunc represents a function that can be used as a producer. type ProducerFunc func(io.Writer, any) error -// Produce produces the response for the provided data +// Produce produces the response for the provided data. func (f ProducerFunc) Produce(writer io.Writer, data any) error { return f(writer, data) } // Producer implementations know how to turn the provided interface into a valid -// HTTP response +// HTTP response. type Producer interface { // Produce writes to the http response Produce(io.Writer, any) error } -// AuthenticatorFunc turns a function into an authenticator +// AuthenticatorFunc turns a function into an authenticator. type AuthenticatorFunc func(any) (bool, any, error) -// Authenticate authenticates the request with the provided data +// Authenticate authenticates the request with the provided data. func (f AuthenticatorFunc) Authenticate(params any) (bool, any, error) { return f(params) } // Authenticator represents an authentication strategy // implementations of Authenticator know how to authenticate the -// request data and translate that into a valid principal object or an error +// request data and translate that into a valid principal object or an error. type Authenticator interface { Authenticate(any) (bool, any, error) } -// AuthorizerFunc turns a function into an authorizer +// AuthorizerFunc turns a function into an authorizer. type AuthorizerFunc func(*http.Request, any) error -// Authorize authorizes the processing of the request for the principal +// Authorize authorizes the processing of the request for the principal. func (f AuthorizerFunc) Authorize(r *http.Request, principal any) error { return f(r, principal) } // Authorizer represents an authorization strategy // implementations of Authorizer know how to authorize the principal object -// using the request data and returns error if unauthorized +// using the request data and returns error if unauthorized. type Authorizer interface { Authorize(*http.Request, any) error } @@ -87,7 +87,7 @@ type Authorizer interface { // Validatable types implementing this interface allow customizing their validation // this will be used instead of the reflective validation based on the spec document. // the implementations are assumed to have been generated by the swagger tool so they should -// contain all the validations obtained from the spec +// contain all the validations obtained from the spec. type Validatable interface { Validate(strfmt.Registry) error } @@ -95,7 +95,7 @@ type Validatable interface { // ContextValidatable types implementing this interface allow customizing their validation // this will be used instead of the reflective validation based on the spec document. // the implementations are assumed to have been generated by the swagger tool so they should -// contain all the context validations obtained from the spec +// contain all the context validations obtained from the spec. type ContextValidatable interface { ContextValidate(context.Context, strfmt.Registry) error } diff --git a/vendor/github.com/go-openapi/runtime/json.go b/vendor/github.com/go-openapi/runtime/json.go index 8f93eebfaa..2af5dcfe46 100644 --- a/vendor/github.com/go-openapi/runtime/json.go +++ b/vendor/github.com/go-openapi/runtime/json.go @@ -8,7 +8,7 @@ import ( "io" ) -// JSONConsumer creates a new JSON consumer +// JSONConsumer creates a new JSON consumer. func JSONConsumer() Consumer { return ConsumerFunc(func(reader io.Reader, data any) error { dec := json.NewDecoder(reader) @@ -17,7 +17,7 @@ func JSONConsumer() Consumer { }) } -// JSONProducer creates a new JSON producer +// JSONProducer creates a new JSON producer. func JSONProducer() Producer { return ProducerFunc(func(writer io.Writer, data any) error { enc := json.NewEncoder(writer) diff --git a/vendor/github.com/go-openapi/runtime/middleware/context.go b/vendor/github.com/go-openapi/runtime/middleware/context.go index bb00b93b89..1f85e86b53 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/context.go +++ b/vendor/github.com/go-openapi/runtime/middleware/context.go @@ -24,10 +24,12 @@ import ( "github.com/go-openapi/runtime/security" ) -// Debug when true turns on verbose logging +// Debug when true turns on verbose logging. var Debug = logger.DebugEnabled() -// Logger is the standard libray logger used for printing debug messages +// Logger is the standard library logger used for printing debug messages. +// +// (Note: The correct spelling is "library", not "libra". "Libra" is a zodiac sign/constellation and wouldn't make sense in this context.) var Logger logger.Logger = logger.StandardLogger{} func debugLogfFunc(lg logger.Logger) func(string, ...any) { @@ -43,35 +45,35 @@ func debugLogfFunc(lg logger.Logger) func(string, ...any) { return func(_ string, _ ...any) {} } -// A Builder can create middlewares +// A Builder can create middlewares. type Builder func(http.Handler) http.Handler -// PassthroughBuilder returns the handler, aka the builder identity function +// PassthroughBuilder returns the handler, aka the builder identity function. func PassthroughBuilder(handler http.Handler) http.Handler { return handler } // RequestBinder is an interface for types to implement -// when they want to be able to bind from a request +// when they want to be able to bind from a request. type RequestBinder interface { BindRequest(*http.Request, *MatchedRoute) error } // Responder is an interface for types to implement -// when they want to be considered for writing HTTP responses +// when they want to be considered for writing HTTP responses. type Responder interface { WriteResponse(http.ResponseWriter, runtime.Producer) } -// ResponderFunc wraps a func as a Responder interface +// ResponderFunc wraps a func as a Responder interface. type ResponderFunc func(http.ResponseWriter, runtime.Producer) -// WriteResponse writes to the response +// WriteResponse writes to the response. func (fn ResponderFunc) WriteResponse(rw http.ResponseWriter, pr runtime.Producer) { fn(rw, pr) } -// Context is a type safe wrapper around an untyped request context +// Context is a type safe wrapper around an [untyped] request context // used throughout to store request context with the standard context attached -// to the http.Request +// to the [http.Request]. type Context struct { spec *loads.Document analyzer *analysis.Spec @@ -192,7 +194,7 @@ func (r *routableUntypedAPI) DefaultConsumes() string { // NewRoutableContext creates a new context for a routable API. // -// If a nil Router is provided, the DefaultRouter (denco-based) will be used. +// If a nil Router is provided, the [DefaultRouter] ([denco]-based) will be used. func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Router) *Context { var an *analysis.Spec if spec != nil { @@ -202,9 +204,9 @@ func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Ro return NewRoutableContextWithAnalyzedSpec(spec, an, routableAPI, routes) } -// NewRoutableContextWithAnalyzedSpec is like NewRoutableContext but takes as input an already analysed spec. +// NewRoutableContextWithAnalyzedSpec is like [NewRoutableContext] but takes as input an already analysed spec. // -// If a nil Router is provided, the DefaultRouter (denco-based) will be used. +// If a nil Router is provided, the [DefaultRouter] ([denco]-based) will be used. func NewRoutableContextWithAnalyzedSpec(spec *loads.Document, an *analysis.Spec, routableAPI RoutableAPI, routes Router) *Context { // Either there are no spec doc and analysis, or both of them. if (spec != nil || an != nil) && (spec == nil || an == nil) { @@ -222,7 +224,7 @@ func NewRoutableContextWithAnalyzedSpec(spec *loads.Document, an *analysis.Spec, // NewContext creates a new context wrapper. // -// If a nil Router is provided, the DefaultRouter (denco-based) will be used. +// If a nil Router is provided, the [DefaultRouter] ([denco]-based) will be used. func NewContext(spec *loads.Document, api *untyped.API, routes Router) *Context { var an *analysis.Spec if spec != nil { @@ -239,13 +241,13 @@ func NewContext(spec *loads.Document, api *untyped.API, routes Router) *Context return ctx } -// Serve serves the specified spec with the specified api registrations as a http.Handler +// Serve serves the specified spec with the specified api registrations as a [http.Handler]. func Serve(spec *loads.Document, api *untyped.API) http.Handler { return ServeWithBuilder(spec, api, PassthroughBuilder) } -// ServeWithBuilder serves the specified spec with the specified api registrations as a http.Handler that is decorated -// by the Builder +// ServeWithBuilder serves the specified spec with the specified api registrations as a [http.Handler] that is decorated +// by the Builder. func ServeWithBuilder(spec *loads.Document, api *untyped.API, builder Builder) http.Handler { context := NewContext(spec, api, nil) return context.APIHandler(builder) @@ -294,7 +296,7 @@ type contentTypeValue struct { Charset string } -// BasePath returns the base path for this API +// BasePath returns the base path for this API. func (c *Context) BasePath() string { if c.spec == nil { return "" @@ -309,13 +311,13 @@ func (c *Context) SetLogger(lg logger.Logger) { c.debugLogf = debugLogfFunc(lg) } -// RequiredProduces returns the accepted content types for responses +// RequiredProduces returns the accepted content types for responses. func (c *Context) RequiredProduces() []string { return c.analyzer.RequiredProduces() } // BindValidRequest binds a params object to a request but only when the request is valid -// if the request is not valid an error will be returned +// if the request is not valid an error will be returned. func (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, binder RequestBinder) error { var res []error var requestContentType string @@ -374,7 +376,7 @@ func (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, b // Returns the media type, its charset and a shallow copy of the request // when its context doesn't contain the content type value, otherwise it returns // the same request -// Returns the error that runtime.ContentType may retunrs. +// Returns the error that [runtime.ContentType] may returns. func (c *Context) ContentType(request *http.Request) (string, string, *http.Request, error) { var rCtx = request.Context() @@ -390,7 +392,7 @@ func (c *Context) ContentType(request *http.Request) (string, string, *http.Requ return mt, cs, request.WithContext(rCtx), nil } -// LookupRoute looks a route up and returns true when it is found +// LookupRoute looks a route up and returns true when it is found. func (c *Context) LookupRoute(request *http.Request) (*MatchedRoute, bool) { if route, ok := c.router.Lookup(request.Method, request.URL.EscapedPath()); ok { return route, ok @@ -402,7 +404,7 @@ func (c *Context) LookupRoute(request *http.Request) (*MatchedRoute, bool) { // Returns the matched route, a shallow copy of the request if its context // contains the matched router, otherwise the same request, and a bool to // indicate if it the request matches one of the routes, if it doesn't -// then it returns false and nil for the other two return values +// then it returns false and nil for the other two return values. func (c *Context) RouteInfo(request *http.Request) (*MatchedRoute, *http.Request, bool) { var rCtx = request.Context() @@ -420,7 +422,7 @@ func (c *Context) RouteInfo(request *http.Request) (*MatchedRoute, *http.Request // ResponseFormat negotiates the response content type // Returns the response format and a shallow copy of the request if its context -// doesn't contain the response format, otherwise the same request +// doesn't contain the response format, otherwise the same request. func (c *Context) ResponseFormat(r *http.Request, offers []string) (string, *http.Request) { var rCtx = r.Context() @@ -438,12 +440,12 @@ func (c *Context) ResponseFormat(r *http.Request, offers []string) (string, *htt return format, r } -// AllowedMethods gets the allowed methods for the path of this request +// AllowedMethods gets the allowed methods for the path of this request. func (c *Context) AllowedMethods(request *http.Request) []string { return c.router.OtherMethods(request.Method, request.URL.EscapedPath()) } -// ResetAuth removes the current principal from the request context +// ResetAuth removes the current principal from the request context. func (c *Context) ResetAuth(request *http.Request) *http.Request { rctx := request.Context() rctx = stdContext.WithValue(rctx, ctxSecurityPrincipal, nil) @@ -454,7 +456,7 @@ func (c *Context) ResetAuth(request *http.Request) *http.Request { // Authorize authorizes the request // Returns the principal object and a shallow copy of the request when its // context doesn't contain the principal, otherwise the same request or an error -// (the last) if one of the authenticators returns one or an Unauthenticated error +// (the last) if one of the authenticators returns one or an Unauthenticated error. func (c *Context) Authorize(request *http.Request, route *MatchedRoute) (any, *http.Request, error) { if route == nil || !route.HasAuth() { return nil, nil, nil @@ -492,7 +494,7 @@ func (c *Context) Authorize(request *http.Request, route *MatchedRoute) (any, *h // BindAndValidate binds and validates the request // Returns the validation map and a shallow copy of the request when its context // doesn't contain the validation, otherwise it returns the same request or an -// CompositeValidationError error +// CompositeValidationError error. func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute) (any, *http.Request, error) { var rCtx = request.Context() @@ -513,12 +515,12 @@ func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute) return result.bound, request, nil } -// NotFound the default not found responder for when no route has been matched yet +// NotFound the default not found responder for when no route has been matched yet. func (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) { c.Respond(rw, r, []string{c.api.DefaultProduces()}, nil, errors.NotFound("not found")) } -// Respond renders the response after doing some content negotiation +// Respond renders the response after doing some content negotiation. func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data any) { c.debugLogf("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces) offers := []string{} @@ -616,7 +618,7 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st // // This handler includes a swagger spec, router and the contract defined in the swagger spec. // -// A spec UI (SwaggerUI) is served at {API base path}/docs and the spec document at /swagger.json +// A spec UI ([SwaggerUI]) is served at {API base path}/docs and the spec document at /swagger.json // (these can be modified with uiOptions). func (c *Context) APIHandlerSwaggerUI(builder Builder, opts ...UIOption) http.Handler { b := builder @@ -635,7 +637,7 @@ func (c *Context) APIHandlerSwaggerUI(builder Builder, opts ...UIOption) http.Ha // // This handler includes a swagger spec, router and the contract defined in the swagger spec. // -// A spec UI (RapiDoc) is served at {API base path}/docs and the spec document at /swagger.json +// A spec UI ([RapiDoc]) is served at {API base path}/docs and the spec document at /swagger.json // (these can be modified with uiOptions). func (c *Context) APIHandlerRapiDoc(builder Builder, opts ...UIOption) http.Handler { b := builder @@ -654,7 +656,7 @@ func (c *Context) APIHandlerRapiDoc(builder Builder, opts ...UIOption) http.Hand // // This handler includes a swagger spec, router and the contract defined in the swagger spec. // -// A spec UI (Redoc) is served at {API base path}/docs and the spec document at /swagger.json +// A spec UI ([Redoc]) is served at {API base path}/docs and the spec document at /swagger.json // (these can be modified with uiOptions). func (c *Context) APIHandler(builder Builder, opts ...UIOption) http.Handler { b := builder @@ -669,7 +671,7 @@ func (c *Context) APIHandler(builder Builder, opts ...UIOption) http.Handler { return Spec(specPath, c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b)), specOpts...) } -// RoutesHandler returns a handler to serve the API, just the routes and the contract defined in the swagger spec +// RoutesHandler returns a handler to serve the API, just the routes and the contract defined in the swagger spec. func (c *Context) RoutesHandler(builder Builder) http.Handler { b := builder if b == nil { @@ -686,10 +688,12 @@ func (c Context) uiOptionsForHandler(opts []UIOption) (string, uiOptions, []Spec } // default options (may be overridden) - optsForContext := []UIOption{ + const baseOptions = 2 + optsForContext := make([]UIOption, 0, len(opts)+baseOptions) + optsForContext = append(optsForContext, WithUIBasePath(c.BasePath()), WithUITitle(title), - } + ) optsForContext = append(optsForContext, opts...) uiOpts := uiOptionsWithDefaults(optsForContext) diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/README.md b/vendor/github.com/go-openapi/runtime/middleware/denco/README.md index 30109e17d5..03e0b7f4e3 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/README.md +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/README.md @@ -8,7 +8,7 @@ However, Denco is optimized and some features added. ## Features * Fast (See [go-http-routing-benchmark](https://github.com/naoina/go-http-routing-benchmark)) -* [URL patterns](#url-patterns) (`/foo/:bar` and `/foo/*wildcard`) +*[URL patterns](#url-patterns) (`/foo/:bar` and `/foo/*wildcard`) * Small (but enough) URL router API * HTTP request multiplexer like `http.ServeMux` diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/router.go b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go index b371a2cf84..f89d761cf2 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/router.go +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go @@ -1,5 +1,7 @@ // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2014 Naoya Inada +// SPDX-License-Identifier: MIT // Package denco provides fast URL router. package denco @@ -24,7 +26,7 @@ const ( // SeparatorCharacter separates path segments. SeparatorCharacter = '/' - // PathParamCharacter indicates a RESTCONF path param + // PathParamCharacter indicates a RESTCONF path param. PathParamCharacter = '=' // MaxSize is max size of records and internal slice. @@ -52,7 +54,7 @@ func New() *Router { } // Lookup returns data and path parameters that associated with path. -// params is a slice of the Param that arranged in the order in which parameters appeared. +// params is a slice of the [Param] that arranged in the order in which parameters appeared. // e.g. when built routing path is "/path/to/:id/:name" and given path is "/path/to/1/alice". params order is [{"id": "1"}, {"name": "alice"}], not [{"name": "alice"}, {"id": "1"}]. func (rt *Router) Lookup(path string) (data any, params Params, found bool) { if data, found = rt.static[path]; found { @@ -138,7 +140,7 @@ func newDoubleArray() *doubleArray { // BASE (22bit) | Extra flags (2bit) | CHECK (8bit) // // |----------------------|--|--------| -// 32 10 8 0 +// 32 10 8 0. type baseCheck uint32 const ( @@ -155,7 +157,7 @@ func (bc *baseCheck) SetBase(base int) { } func (bc baseCheck) Check() byte { - return byte(bc) + return byte(bc) //nolint:gosec // integer conversion is ok } func (bc *baseCheck) SetCheck(check byte) { @@ -212,7 +214,7 @@ func (da *doubleArray) lookup(path string, params []Param, idx int) (*node, []Pa BACKTRACKING: for j := len(indices) - 1; j >= 0; j-- { - i, idx := int(indices[j]>>indexOffset), int(indices[j]&indexMask) //nolint:gosec // integer conversion is okay + i, idx := int(indices[j]>>indexOffset), int(indices[j]&indexMask) if da.bc[idx].IsSingleParam() { nextIdx := nextIndex(da.bc[idx].Base(), ParamCharacter) if nextIdx >= len(da.bc) { diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/server.go b/vendor/github.com/go-openapi/runtime/middleware/denco/server.go index 8f04d93dba..e6c0976d8b 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/server.go +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/server.go @@ -1,5 +1,7 @@ // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2014 Naoya Inada +// SPDX-License-Identifier: MIT package denco @@ -10,27 +12,27 @@ import ( // Mux represents a multiplexer for HTTP request. type Mux struct{} -// NewMux returns a new Mux. +// NewMux returns a new [Mux]. func NewMux() *Mux { return &Mux{} } -// GET is shorthand of Mux.Handler("GET", path, handler). +// GET is shorthand of [Mux].Handler("GET", path, handler). func (m *Mux) GET(path string, handler HandlerFunc) Handler { return m.Handler("GET", path, handler) } -// POST is shorthand of Mux.Handler("POST", path, handler). +// POST is shorthand of [Mux].Handler("POST", path, handler). func (m *Mux) POST(path string, handler HandlerFunc) Handler { return m.Handler("POST", path, handler) } -// PUT is shorthand of Mux.Handler("PUT", path, handler). +// PUT is shorthand of [Mux].Handler("PUT", path, handler). func (m *Mux) PUT(path string, handler HandlerFunc) Handler { return m.Handler("PUT", path, handler) } -// HEAD is shorthand of Mux.Handler("HEAD", path, handler). +// HEAD is shorthand of [Mux].Handler("HEAD", path, handler). func (m *Mux) HEAD(path string, handler HandlerFunc) Handler { return m.Handler("HEAD", path, handler) } @@ -44,7 +46,7 @@ func (m *Mux) Handler(method, path string, handler HandlerFunc) Handler { } } -// Build builds a http.Handler. +// Build builds a [http.Handler]. func (m *Mux) Build(handlers []Handler) (http.Handler, error) { recordMap := make(map[string][]Record) for _, h := range handlers { @@ -73,7 +75,7 @@ type Handler struct { Func HandlerFunc } -// The HandlerFunc type is aliased to type of handler function. +// HandlerFunc is aliased to type of handler function. type HandlerFunc func(w http.ResponseWriter, r *http.Request, params Params) type serveMux struct { diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/util.go b/vendor/github.com/go-openapi/runtime/middleware/denco/util.go index f002bc4693..e7da422623 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/util.go +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/util.go @@ -1,5 +1,7 @@ // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2014 Naoya Inada +// SPDX-License-Identifier: MIT package denco diff --git a/vendor/github.com/go-openapi/runtime/middleware/doc.go b/vendor/github.com/go-openapi/runtime/middleware/doc.go index 04b8322363..f78ec52c79 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/doc.go +++ b/vendor/github.com/go-openapi/runtime/middleware/doc.go @@ -1,52 +1,50 @@ // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 -/* -Package middleware provides the library with helper functions for serving swagger APIs. - -Pseudo middleware handler - - import ( - "net/http" - - "github.com/go-openapi/errors" - ) - - func newCompleteMiddleware(ctx *Context) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - // use context to lookup routes - if matched, ok := ctx.RouteInfo(r); ok { - - if matched.NeedsAuth() { - if _, err := ctx.Authorize(r, matched); err != nil { - ctx.Respond(rw, r, matched.Produces, matched, err) - return - } - } - - bound, validation := ctx.BindAndValidate(r, matched) - if validation != nil { - ctx.Respond(rw, r, matched.Produces, matched, validation) - return - } - - result, err := matched.Handler.Handle(bound) - if err != nil { - ctx.Respond(rw, r, matched.Produces, matched, err) - return - } - - ctx.Respond(rw, r, matched.Produces, matched, result) - return - } - - // Not found, check if it exists in the other methods first - if others := ctx.AllowedMethods(r); len(others) > 0 { - ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) - return - } - ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path)) - }) - } -*/ +// Package middleware provides the library with helper functions for serving swagger APIs. +// +// Pseudo middleware handler. +// +// import ( +// "net/http" +// +// "github.com/go-openapi/errors" +// ) +// +// func newCompleteMiddleware(ctx *Context) http.Handler { +// return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { +// // use context to lookup routes +// if matched, ok := ctx.RouteInfo(r); ok { +// +// if matched.NeedsAuth() { +// if _, err := ctx.Authorize(r, matched); err != nil { +// ctx.Respond(rw, r, matched.Produces, matched, err) +// return +// } +// } +// +// bound, validation := ctx.BindAndValidate(r, matched) +// if validation != nil { +// ctx.Respond(rw, r, matched.Produces, matched, validation) +// return +// } +// +// result, err := matched.Handler.Handle(bound) +// if err != nil { +// ctx.Respond(rw, r, matched.Produces, matched, err) +// return +// } +// +// ctx.Respond(rw, r, matched.Produces, matched, result) +// return +// } +// +// // Not found, check if it exists in the other methods first +// if others := ctx.AllowedMethods(r); len(others) > 0 { +// ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) +// return +// } +// ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path)) +// }) +// } package middleware diff --git a/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go b/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go index 2e63780c70..4d286a6c8c 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go +++ b/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go @@ -31,13 +31,13 @@ func (e *errorResp) WriteResponse(rw http.ResponseWriter, producer runtime.Produ } } -// NotImplemented the error response when the response is not implemented +// NotImplemented the error response when the response is not implemented. func NotImplemented(message string) Responder { return Error(http.StatusNotImplemented, message) } // Error creates a generic responder for returning errors, the data will be serialized -// with the matching producer for the request +// with the matching producer for the request. func Error(code int, data any, headers ...http.Header) Responder { var hdr http.Header for _, h := range headers { diff --git a/vendor/github.com/go-openapi/runtime/middleware/operation.go b/vendor/github.com/go-openapi/runtime/middleware/operation.go index 2a7ab1fada..24f466b597 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/operation.go +++ b/vendor/github.com/go-openapi/runtime/middleware/operation.go @@ -5,7 +5,7 @@ package middleware import "net/http" -// NewOperationExecutor creates a context aware middleware that handles the operations after routing +// NewOperationExecutor creates a context aware [middleware] that handles the operations after routing. func NewOperationExecutor(ctx *Context) http.Handler { return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { // use context to lookup routes diff --git a/vendor/github.com/go-openapi/runtime/middleware/parameter.go b/vendor/github.com/go-openapi/runtime/middleware/parameter.go index 7d630d6cce..a9d2a36460 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/parameter.go +++ b/vendor/github.com/go-openapi/runtime/middleware/parameter.go @@ -28,7 +28,7 @@ const ( typeArray = "array" ) -var textUnmarshalType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() +var textUnmarshalType = reflect.TypeFor[encoding.TextUnmarshaler]() func newUntypedParamBinder(param spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedParamBinder { binder := new(untypedParamBinder) diff --git a/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go b/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go index 6039a26f33..1574defb41 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go +++ b/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go @@ -11,7 +11,7 @@ import ( "path" ) -// RapiDocOpts configures the RapiDoc middlewares +// RapiDocOpts configures the [RapiDoc] middlewares. type RapiDocOpts struct { // BasePath for the UI, defaults to: / BasePath string @@ -50,9 +50,9 @@ func (r *RapiDocOpts) EnsureDefaults() { } } -// RapiDoc creates a middleware to serve a documentation site for a swagger spec. +// RapiDoc creates a [middleware] to serve a documentation site for a swagger spec. // -// This allows for altering the spec before starting the http listener. +// This allows for altering the spec before starting the [http] listener. func RapiDoc(opts RapiDocOpts, next http.Handler) http.Handler { opts.EnsureDefaults() diff --git a/vendor/github.com/go-openapi/runtime/middleware/redoc.go b/vendor/github.com/go-openapi/runtime/middleware/redoc.go index cbaec73c43..1007409a30 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/redoc.go +++ b/vendor/github.com/go-openapi/runtime/middleware/redoc.go @@ -11,7 +11,7 @@ import ( "path" ) -// RedocOpts configures the Redoc middlewares +// RedocOpts configures the [Redoc] middlewares. type RedocOpts struct { // BasePath for the UI, defaults to: / BasePath string @@ -36,7 +36,7 @@ type RedocOpts struct { RedocURL string } -// EnsureDefaults in case some options are missing +// EnsureDefaults in case some options are missing. func (r *RedocOpts) EnsureDefaults() { common := toCommonUIOptions(r) common.EnsureDefaults() @@ -51,9 +51,9 @@ func (r *RedocOpts) EnsureDefaults() { } } -// Redoc creates a middleware to serve a documentation site for a swagger spec. +// Redoc creates a [middleware] to serve a documentation site for a swagger spec. // -// This allows for altering the spec before starting the http listener. +// This allows for altering the spec before starting the [http] listener. func Redoc(opts RedocOpts, next http.Handler) http.Handler { opts.EnsureDefaults() diff --git a/vendor/github.com/go-openapi/runtime/middleware/request.go b/vendor/github.com/go-openapi/runtime/middleware/request.go index 52facfefcd..ad781663b8 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/request.go +++ b/vendor/github.com/go-openapi/runtime/middleware/request.go @@ -14,7 +14,7 @@ import ( "github.com/go-openapi/strfmt" ) -// UntypedRequestBinder binds and validates the data from a http request +// UntypedRequestBinder binds and validates the data from a [http] request. type UntypedRequestBinder struct { Spec *spec.Swagger Parameters map[string]spec.Parameter @@ -38,7 +38,7 @@ func NewUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Sw } } -// Bind perform the databinding and validation +// Bind perform the databinding and validation. func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, data any) error { val := reflect.Indirect(reflect.ValueOf(data)) isMap := val.Kind() == reflect.Map diff --git a/vendor/github.com/go-openapi/runtime/middleware/router.go b/vendor/github.com/go-openapi/runtime/middleware/router.go index 16816580da..e828653be7 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/router.go +++ b/vendor/github.com/go-openapi/runtime/middleware/router.go @@ -25,16 +25,16 @@ import ( // RouteParam is a object to capture route params in a framework agnostic way. // implementations of the muxer should use these route params to communicate with the -// swagger framework +// swagger framework. type RouteParam struct { Name string Value string } -// RouteParams the collection of route params +// RouteParams the collection of route params. type RouteParams []RouteParam -// Get gets the value for the route param for the specified key +// Get gets the value for the route param for the specified key. func (r RouteParams) Get(name string) string { vv, _, _ := r.GetOK(name) if len(vv) > 0 { @@ -44,9 +44,9 @@ func (r RouteParams) Get(name string) string { } // GetOK gets the value but also returns booleans to indicate if a key or value -// is present. This aids in validation and satisfies an interface in use there +// is present. This aids in validation and satisfies an interface in use there. // -// The returned values are: data, has key, has value +// The returned values are: data, has key, has value. func (r RouteParams) GetOK(name string) ([]string, bool, bool) { for _, p := range r { if p.Name == name { @@ -56,7 +56,7 @@ func (r RouteParams) GetOK(name string) ([]string, bool, bool) { return nil, false, false } -// NewRouter creates a new context-aware router middleware +// NewRouter creates a new context-aware router [middleware]. func NewRouter(ctx *Context, next http.Handler) http.Handler { if ctx.router == nil { ctx.router = DefaultRouter(ctx.spec, ctx.api, WithDefaultRouterLoggerFunc(ctx.debugLogf)) @@ -68,18 +68,22 @@ func NewRouter(ctx *Context, next http.Handler) http.Handler { return } + // Always use the default producer Content-Type for Method not + // allowed and Not found responses + produces := []string{ctx.api.DefaultProduces()} + // Not found, check if it exists in the other methods first if others := ctx.AllowedMethods(r); len(others) > 0 { - ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) + ctx.Respond(rw, r, produces, nil, errors.MethodNotAllowed(r.Method, others)) return } - ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.EscapedPath())) + ctx.Respond(rw, r, produces, nil, errors.NotFound("path %s was not found", r.URL.EscapedPath())) }) } // RoutableAPI represents an interface for things that can serve -// as a provider of implementations for the swagger router +// as a provider of implementations for the swagger router. type RoutableAPI interface { HandlerFor(string, string) (http.Handler, bool) ServeErrorFor(string) func(http.ResponseWriter, *http.Request, error) @@ -92,7 +96,7 @@ type RoutableAPI interface { DefaultConsumes() string } -// Router represents a swagger-aware router +// Router represents a swagger-aware router. type Router interface { Lookup(method, path string) (*MatchedRoute, bool) OtherMethods(method, path string) []string @@ -153,7 +157,7 @@ func WithDefaultRouterLoggerFunc(fn func(string, ...any)) DefaultRouterOpt { } } -// DefaultRouter creates a default implementation of the router +// DefaultRouter creates a default implementation of the router. func DefaultRouter(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) Router { builder := newDefaultRouteBuilder(spec, api, opts...) if spec != nil { @@ -170,7 +174,7 @@ func DefaultRouter(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterO // RouteAuthenticator is an authenticator that can compose several authenticators together. // It also knows when it contains an authenticator that allows for anonymous pass through. -// Contains a group of 1 or more authenticators that have a logical AND relationship +// Contains a group of 1 or more authenticators that have a logical AND relationship. type RouteAuthenticator struct { Authenticator map[string]runtime.Authenticator Schemes []string @@ -185,18 +189,18 @@ func (ra *RouteAuthenticator) AllowsAnonymous() bool { } // AllScopes returns a list of unique scopes that is the combination -// of all the scopes in the requirements +// of all the scopes in the requirements. func (ra *RouteAuthenticator) AllScopes() []string { return ra.allScopes } // CommonScopes returns a list of unique scopes that are common in all the -// scopes in the requirements +// scopes in the requirements. func (ra *RouteAuthenticator) CommonScopes() []string { return ra.commonScopes } -// Authenticate Authenticator interface implementation +// Authenticate Authenticator interface implementation. func (ra *RouteAuthenticator) Authenticate(req *http.Request, route *MatchedRoute) (bool, any, error) { if ra.allowAnonymous { route.Authenticator = ra @@ -262,10 +266,10 @@ func stringSliceIntersection(slices ...[]string) []string { return intersection } -// RouteAuthenticators represents a group of authenticators that represent a logical OR +// RouteAuthenticators represents a group of authenticators that represent a logical OR. type RouteAuthenticators []RouteAuthenticator -// AllowsAnonymous returns true when there is an authenticator that means optional auth +// AllowsAnonymous returns true when there is an authenticator that means optional auth. func (ras RouteAuthenticators) AllowsAnonymous() bool { for _, ra := range ras { if ra.AllowsAnonymous() { @@ -275,7 +279,7 @@ func (ras RouteAuthenticators) AllowsAnonymous() bool { return false } -// Authenticate method implemention so this collection can be used as authenticator +// Authenticate method implementation so this collection can be used as authenticator. func (ras RouteAuthenticators) Authenticate(req *http.Request, route *MatchedRoute) (bool, any, error) { var lastError error var allowsAnon bool @@ -320,7 +324,7 @@ type routeEntry struct { Authorizer runtime.Authorizer } -// MatchedRoute represents the route that was matched in this request +// MatchedRoute represents the route that was matched in this request. type MatchedRoute struct { routeEntry @@ -330,13 +334,13 @@ type MatchedRoute struct { Authenticator *RouteAuthenticator } -// HasAuth returns true when the route has a security requirement defined +// HasAuth returns true when the route has a security requirement defined. func (m *MatchedRoute) HasAuth() bool { return len(m.Authenticators) > 0 } // NeedsAuth returns true when the request still -// needs to perform authentication +// needs to perform authentication. func (m *MatchedRoute) NeedsAuth() bool { return m.HasAuth() && m.Authenticator == nil } @@ -407,7 +411,7 @@ func (d *defaultRouter) SetLogger(lg logger.Logger) { d.debugLogf = debugLogfFunc(lg) } -// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco +// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco. var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`) func decodeCompositParams(name string, value string, pattern string, names []string, values []string) ([]string, []string) { diff --git a/vendor/github.com/go-openapi/runtime/middleware/spec.go b/vendor/github.com/go-openapi/runtime/middleware/spec.go index 9cc9940aaa..0a64a9572b 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/spec.go +++ b/vendor/github.com/go-openapi/runtime/middleware/spec.go @@ -13,7 +13,7 @@ const ( applicationJSON = "application/json" ) -// SpecOption can be applied to the Spec serving middleware +// SpecOption can be applied to the Spec serving [middleware]. type SpecOption func(*specOptions) var defaultSpecOptions = specOptions{ @@ -35,12 +35,12 @@ func specOptionsWithDefaults(opts []SpecOption) specOptions { return o } -// Spec creates a middleware to serve a swagger spec as a JSON document. +// Spec creates a [middleware] to serve a swagger spec as a JSON document. // -// This allows for altering the spec before starting the http listener. +// This allows for altering the spec before starting the [http] listener. // // The basePath argument indicates the path of the spec document (defaults to "/"). -// Additional SpecOption can be used to change the name of the document (defaults to "swagger.json"). +// Additional [SpecOption] can be used to change the name of the document (defaults to "swagger.json"). func Spec(basePath string, b []byte, next http.Handler, opts ...SpecOption) http.Handler { if basePath == "" { basePath = "/" @@ -68,7 +68,7 @@ func Spec(basePath string, b []byte, next http.Handler, opts ...SpecOption) http }) } -// WithSpecPath sets the path to be joined to the base path of the Spec middleware. +// WithSpecPath sets the path to be joined to the base path of the Spec [middleware]. // // This is empty by default. func WithSpecPath(pth string) SpecOption { @@ -79,7 +79,7 @@ func WithSpecPath(pth string) SpecOption { // WithSpecDocument sets the name of the JSON document served as a spec. // -// By default, this is "swagger.json" +// By default, this is "swagger.json". func WithSpecDocument(doc string) SpecOption { return func(o *specOptions) { if doc == "" { diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go index b25a3a2cff..14ed37ced6 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go +++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go @@ -11,7 +11,7 @@ import ( "path" ) -// SwaggerUIOpts configures the SwaggerUI middleware +// SwaggerUIOpts configures the [SwaggerUI] [middleware]. type SwaggerUIOpts struct { // BasePath for the API, defaults to: / BasePath string @@ -47,7 +47,7 @@ type SwaggerUIOpts struct { Favicon16 string } -// EnsureDefaults in case some options are missing +// EnsureDefaults in case some options are missing. func (r *SwaggerUIOpts) EnsureDefaults() { r.ensureDefaults() @@ -90,9 +90,9 @@ func (r *SwaggerUIOpts) ensureDefaults() { } } -// SwaggerUI creates a middleware to serve a documentation site for a swagger spec. +// SwaggerUI creates a [middleware] to serve a documentation site for a swagger spec. // -// This allows for altering the spec before starting the http listener. +// This allows for altering the spec before starting the [http] listener. func SwaggerUI(opts SwaggerUIOpts, next http.Handler) http.Handler { opts.EnsureDefaults() diff --git a/vendor/github.com/go-openapi/runtime/middleware/ui_options.go b/vendor/github.com/go-openapi/runtime/middleware/ui_options.go index cf2f673d3c..ed255426ad 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/ui_options.go +++ b/vendor/github.com/go-openapi/runtime/middleware/ui_options.go @@ -13,7 +13,7 @@ import ( ) const ( - // constants that are common to all UI-serving middlewares + // constants that are common to all UI-serving middlewares. defaultDocsPath = "docs" defaultDocsURL = "/swagger.json" defaultDocsTitle = "API Documentation" @@ -75,8 +75,8 @@ func fromCommonToAnyOptions[T any](source uiOptions, target *T) { } } -// UIOption can be applied to UI serving middleware, such as Context.APIHandler or -// Context.APIHandlerSwaggerUI to alter the defaut behavior. +// UIOption can be applied to UI serving [middleware], such as Context.[APIHandler] or +// Context.[APIHandlerSwaggerUI] to alter the default behavior. type UIOption func(*uiOptions) func uiOptionsWithDefaults(opts []UIOption) uiOptions { @@ -90,7 +90,7 @@ func uiOptionsWithDefaults(opts []UIOption) uiOptions { // WithUIBasePath sets the base path from where to serve the UI assets. // -// By default, Context middleware sets this value to the API base path. +// By default, Context [middleware] sets this value to the API base path. func WithUIBasePath(base string) UIOption { return func(o *uiOptions) { if !strings.HasPrefix(base, "/") { @@ -111,7 +111,7 @@ func WithUIPath(pth string) UIOption { // // This may be specified as a full URL or a path. // -// By default, this is "/swagger.json" +// By default, this is "/swagger.json". func WithUISpecURL(specURL string) UIOption { return func(o *uiOptions) { o.SpecURL = specURL @@ -120,7 +120,7 @@ func WithUISpecURL(specURL string) UIOption { // WithUITitle sets the title of the UI. // -// By default, Context middleware sets this value to the title found in the API spec. +// By default, Context [middleware] sets this value to the title found in the API spec. func WithUITitle(title string) UIOption { return func(o *uiOptions) { o.Title = title @@ -129,14 +129,14 @@ func WithUITitle(title string) UIOption { // WithTemplate allows to set a custom template for the UI. // -// UI middleware will panic if the template does not parse or execute properly. +// UI [middleware] will panic if the template does not parse or execute properly. func WithTemplate(tpl string) UIOption { return func(o *uiOptions) { o.Template = tpl } } -// EnsureDefaults in case some options are missing +// EnsureDefaults in case some options are missing. func (r *uiOptions) EnsureDefaults() { if r.BasePath == "" { r.BasePath = "/" diff --git a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go index 774da0ba0c..f4c966bacd 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go +++ b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go @@ -23,7 +23,7 @@ const ( mediumPreallocatedSlots = 30 ) -// API represents an untyped mux for a swagger spec +// API represents an untyped mux for a swagger spec. type API struct { spec *loads.Document analyzer *analysis.Spec @@ -39,7 +39,7 @@ type API struct { formats strfmt.Registry } -// NewAPI creates the default untyped API +// NewAPI creates the default untyped API. func NewAPI(spec *loads.Document) *API { var an *analysis.Spec if spec != nil && spec.Spec() != nil { @@ -60,7 +60,7 @@ func NewAPI(spec *loads.Document) *API { return api.WithJSONDefaults() } -// WithJSONDefaults loads the json defaults for this api +// WithJSONDefaults loads the json defaults for this api. func (d *API) WithJSONDefaults() *API { d.DefaultConsumes = runtime.JSONMime d.DefaultProduces = runtime.JSONMime @@ -69,7 +69,7 @@ func (d *API) WithJSONDefaults() *API { return d } -// WithoutJSONDefaults clears the json defaults for this api +// WithoutJSONDefaults clears the json defaults for this api. func (d *API) WithoutJSONDefaults() *API { d.DefaultConsumes = "" d.DefaultProduces = "" @@ -78,7 +78,7 @@ func (d *API) WithoutJSONDefaults() *API { return d } -// Formats returns the registered string formats +// Formats returns the registered string formats. func (d *API) Formats() strfmt.Registry { if d.formats == nil { d.formats = strfmt.NewFormats() @@ -86,7 +86,7 @@ func (d *API) Formats() strfmt.Registry { return d.formats } -// RegisterFormat registers a custom format validator +// RegisterFormat registers a custom format validator. func (d *API) RegisterFormat(name string, format strfmt.Format, validator strfmt.Validator) { if d.formats == nil { d.formats = strfmt.NewFormats() @@ -94,7 +94,7 @@ func (d *API) RegisterFormat(name string, format strfmt.Format, validator strfmt d.formats.Add(name, format, validator) } -// RegisterAuth registers an auth handler in this api +// RegisterAuth registers an auth handler in this api. func (d *API) RegisterAuth(scheme string, handler runtime.Authenticator) { if d.authenticators == nil { d.authenticators = make(map[string]runtime.Authenticator) @@ -102,7 +102,7 @@ func (d *API) RegisterAuth(scheme string, handler runtime.Authenticator) { d.authenticators[scheme] = handler } -// RegisterAuthorizer registers an authorizer handler in this api +// RegisterAuthorizer registers an authorizer handler in this api. func (d *API) RegisterAuthorizer(handler runtime.Authorizer) { d.authorizer = handler } @@ -115,7 +115,7 @@ func (d *API) RegisterConsumer(mediaType string, handler runtime.Consumer) { d.consumers[strings.ToLower(mediaType)] = handler } -// RegisterProducer registers a producer for a media type +// RegisterProducer registers a producer for a media type. func (d *API) RegisterProducer(mediaType string, handler runtime.Producer) { if d.producers == nil { d.producers = make(map[string]runtime.Producer, smallPreallocatedSlots) @@ -123,7 +123,7 @@ func (d *API) RegisterProducer(mediaType string, handler runtime.Producer) { d.producers[strings.ToLower(mediaType)] = handler } -// RegisterOperation registers an operation handler for an operation name +// RegisterOperation registers an operation handler for an operation name. func (d *API) RegisterOperation(method, path string, handler runtime.OperationHandler) { if d.operations == nil { d.operations = make(map[string]map[string]runtime.OperationHandler, mediumPreallocatedSlots) @@ -135,7 +135,7 @@ func (d *API) RegisterOperation(method, path string, handler runtime.OperationHa d.operations[um][path] = handler } -// OperationHandlerFor returns the operation handler for the specified id if it can be found +// OperationHandlerFor returns the operation handler for the specified id if it can be found. func (d *API) OperationHandlerFor(method, path string) (runtime.OperationHandler, bool) { if d.operations == nil { return nil, false @@ -147,7 +147,7 @@ func (d *API) OperationHandlerFor(method, path string) (runtime.OperationHandler return nil, false } -// ConsumersFor gets the consumers for the specified media types +// ConsumersFor gets the consumers for the specified media types. func (d *API) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { result := make(map[string]runtime.Consumer) for _, mt := range mediaTypes { @@ -158,7 +158,7 @@ func (d *API) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { return result } -// ProducersFor gets the producers for the specified media types +// ProducersFor gets the producers for the specified media types. func (d *API) ProducersFor(mediaTypes []string) map[string]runtime.Producer { result := make(map[string]runtime.Producer) for _, mt := range mediaTypes { @@ -169,7 +169,7 @@ func (d *API) ProducersFor(mediaTypes []string) map[string]runtime.Producer { return result } -// AuthenticatorsFor gets the authenticators for the specified security schemes +// AuthenticatorsFor gets the authenticators for the specified security schemes. func (d *API) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { result := make(map[string]runtime.Authenticator) for k := range schemes { @@ -180,17 +180,17 @@ func (d *API) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[stri return result } -// Authorizer returns the registered authorizer +// Authorizer returns the registered authorizer. func (d *API) Authorizer() runtime.Authorizer { return d.authorizer } -// Validate validates this API for any missing items +// Validate validates this API for any missing items. func (d *API) Validate() error { return d.validate() } -// validateWith validates the registrations in this API against the provided spec analyzer +// validateWith validates the registrations in this API against the provided spec analyzer. func (d *API) validate() error { consumes := make([]string, 0, len(d.consumers)) for k := range d.consumers { diff --git a/vendor/github.com/go-openapi/runtime/middleware/validation.go b/vendor/github.com/go-openapi/runtime/middleware/validation.go index ed026d626b..8a56490639 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/validation.go +++ b/vendor/github.com/go-openapi/runtime/middleware/validation.go @@ -21,7 +21,7 @@ type validation struct { bound map[string]any } -// ContentType validates the content type of a request +// ContentType validates the content type of a request. func validateContentType(allowed []string, actual string) error { if len(allowed) == 0 { return nil diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go index aab7b8c055..5c45339aa4 100644 --- a/vendor/github.com/go-openapi/runtime/request.go +++ b/vendor/github.com/go-openapi/runtime/request.go @@ -14,25 +14,25 @@ import ( "github.com/go-openapi/swag/stringutils" ) -// CanHaveBody returns true if this method can have a body +// CanHaveBody returns true if this method can have a body. func CanHaveBody(method string) bool { mn := strings.ToUpper(method) return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" } -// IsSafe returns true if this is a request with a safe method +// IsSafe returns true if this is a request with a safe method. func IsSafe(r *http.Request) bool { mn := strings.ToUpper(r.Method) return mn == "GET" || mn == "HEAD" } -// AllowsBody returns true if the request allows for a body +// AllowsBody returns true if the request allows for a body. func AllowsBody(r *http.Request) bool { mn := strings.ToUpper(r.Method) return mn != "HEAD" } -// HasBody returns true if this method needs a content-type +// HasBody returns true if this method needs a content-type. func HasBody(r *http.Request) bool { // happy case: we have a content length set if r.ContentLength > 0 { @@ -104,9 +104,9 @@ func (p *peekingReader) Close() error { return nil } -// JSONRequest creates a new http request with json headers set. +// JSONRequest creates a new [http] request with json headers set. // -// It uses context.Background. +// It uses [context.Background]. func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { req, err := http.NewRequestWithContext(context.Background(), method, urlStr, body) if err != nil { @@ -117,12 +117,12 @@ func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { return req, nil } -// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) +// Gettable for things with a method [GetOK](string) (data string, hasKey bool, hasValue bool). type Gettable interface { GetOK(string) ([]string, bool, bool) } -// ReadSingleValue reads a single value from the source +// ReadSingleValue reads a single value from the source. func ReadSingleValue(values Gettable, name string) string { vv, _, hv := values.GetOK(name) if hv { @@ -131,7 +131,7 @@ func ReadSingleValue(values Gettable, name string) string { return "" } -// ReadCollectionValue reads a collection value from a string data source +// ReadCollectionValue reads a collection value from a string data source. func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { v := ReadSingleValue(values, name) return stringutils.SplitByFormat(v, collectionFormat) diff --git a/vendor/github.com/go-openapi/runtime/security/authenticator.go b/vendor/github.com/go-openapi/runtime/security/authenticator.go index b5b7904dc1..4c09101826 100644 --- a/vendor/github.com/go-openapi/runtime/security/authenticator.go +++ b/vendor/github.com/go-openapi/runtime/security/authenticator.go @@ -19,7 +19,7 @@ const ( accessTokenParam = "access_token" ) -// HttpAuthenticator is a function that authenticates a HTTP request +// HttpAuthenticator is a function that authenticates a HTTP request. func HttpAuthenticator(handler func(*http.Request) (bool, any, error)) runtime.Authenticator { //nolint:revive return runtime.AuthenticatorFunc(func(params any) (bool, any, error) { if request, ok := params.(*http.Request); ok { @@ -32,7 +32,7 @@ func HttpAuthenticator(handler func(*http.Request) (bool, any, error)) runtime.A }) } -// ScopedAuthenticator is a function that authenticates a HTTP request against a list of valid scopes +// ScopedAuthenticator is a function that authenticates a HTTP request against a list of valid scopes. func ScopedAuthenticator(handler func(*ScopedAuthRequest) (bool, any, error)) runtime.Authenticator { return runtime.AuthenticatorFunc(func(params any) (bool, any, error) { if request, ok := params.(*ScopedAuthRequest); ok { @@ -42,22 +42,22 @@ func ScopedAuthenticator(handler func(*ScopedAuthRequest) (bool, any, error)) ru }) } -// UserPassAuthentication authentication function +// UserPassAuthentication authentication function. type UserPassAuthentication func(string, string) (any, error) -// UserPassAuthenticationCtx authentication function with context.Context +// UserPassAuthenticationCtx authentication function with [context.Context]. type UserPassAuthenticationCtx func(context.Context, string, string) (context.Context, any, error) -// TokenAuthentication authentication function +// TokenAuthentication authentication function. type TokenAuthentication func(string) (any, error) -// TokenAuthenticationCtx authentication function with context.Context +// TokenAuthenticationCtx authentication function with [context.Context]. type TokenAuthenticationCtx func(context.Context, string) (context.Context, any, error) -// ScopedTokenAuthentication authentication function +// ScopedTokenAuthentication authentication function. type ScopedTokenAuthentication func(string, []string) (any, error) -// ScopedTokenAuthenticationCtx authentication function with context.Context +// ScopedTokenAuthenticationCtx authentication function with [context.Context]. type ScopedTokenAuthenticationCtx func(context.Context, string, []string) (context.Context, any, error) var DefaultRealmName = "API" @@ -93,12 +93,12 @@ func OAuth2SchemeNameCtx(ctx context.Context) string { return v } -// BasicAuth creates a basic auth authenticator with the provided authentication function +// BasicAuth creates a basic auth authenticator with the provided authentication function. func BasicAuth(authenticate UserPassAuthentication) runtime.Authenticator { return BasicAuthRealm(DefaultRealmName, authenticate) } -// BasicAuthRealm creates a basic auth authenticator with the provided authentication function and realm name +// BasicAuthRealm creates a basic auth authenticator with the provided authentication function and realm name. func BasicAuthRealm(realm string, authenticate UserPassAuthentication) runtime.Authenticator { if realm == "" { realm = DefaultRealmName @@ -117,12 +117,12 @@ func BasicAuthRealm(realm string, authenticate UserPassAuthentication) runtime.A }) } -// BasicAuthCtx creates a basic auth authenticator with the provided authentication function with support for context.Context +// BasicAuthCtx creates a basic auth authenticator with the provided authentication function with support for [context.Context]. func BasicAuthCtx(authenticate UserPassAuthenticationCtx) runtime.Authenticator { return BasicAuthRealmCtx(DefaultRealmName, authenticate) } -// BasicAuthRealmCtx creates a basic auth authenticator with the provided authentication function and realm name with support for context.Context +// BasicAuthRealmCtx creates a basic auth authenticator with the provided authentication function and realm name with support for [context.Context]. func BasicAuthRealmCtx(realm string, authenticate UserPassAuthenticationCtx) runtime.Authenticator { if realm == "" { realm = DefaultRealmName @@ -143,7 +143,7 @@ func BasicAuthRealmCtx(realm string, authenticate UserPassAuthenticationCtx) run } // APIKeyAuth creates an authenticator that uses a token for authorization. -// This token can be obtained from either a header or a query string +// This token can be obtained from either a header or a query string. func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authenticator { inl := strings.ToLower(in) if inl != query && inl != header { @@ -170,8 +170,8 @@ func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authe }) } -// APIKeyAuthCtx creates an authenticator that uses a token for authorization with support for context.Context. -// This token can be obtained from either a header or a query string +// APIKeyAuthCtx creates an authenticator that uses a token for authorization with support for [context.Context]. +// This token can be obtained from either a header or a query string. func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime.Authenticator { inl := strings.ToLower(in) if inl != query && inl != header { @@ -199,13 +199,13 @@ func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime }) } -// ScopedAuthRequest contains both a http request and the required scopes for a particular operation +// ScopedAuthRequest contains both a [http] request and the required scopes for a particular operation. type ScopedAuthRequest struct { Request *http.Request RequiredScopes []string } -// BearerAuth for use with oauth2 flows +// BearerAuth for use with oauth2 flows. func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Authenticator { const prefix = "Bearer " return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, any, error) { @@ -235,7 +235,7 @@ func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Aut }) } -// BearerAuthCtx for use with oauth2 flows with support for context.Context. +// BearerAuthCtx for use with oauth2 flows with support for [context.Context]. func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runtime.Authenticator { const prefix = "Bearer " return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, any, error) { diff --git a/vendor/github.com/go-openapi/runtime/security/authorizer.go b/vendor/github.com/go-openapi/runtime/security/authorizer.go index 69bd497a3c..8beef19f09 100644 --- a/vendor/github.com/go-openapi/runtime/security/authorizer.go +++ b/vendor/github.com/go-openapi/runtime/security/authorizer.go @@ -9,8 +9,8 @@ import ( "github.com/go-openapi/runtime" ) -// Authorized provides a default implementation of the Authorizer interface where all -// requests are authorized (successful) +// Authorized provides a default implementation of the [Authorizer] interface where all +// requests are authorized (successful). func Authorized() runtime.Authorizer { return runtime.AuthorizerFunc(func(_ *http.Request, _ any) error { return nil }) } diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go index 7e10a5a56c..c0f3e6b447 100644 --- a/vendor/github.com/go-openapi/runtime/statuses.go +++ b/vendor/github.com/go-openapi/runtime/statuses.go @@ -3,7 +3,7 @@ package runtime -// Statuses lists the most common HTTP status codes to default message +// Statuses lists the most common HTTP status codes to default message. // taken from https://httpstatuses.com/ var Statuses = map[int]string{ 100: "Continue", diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go index 2b8e4ac09d..1252ac88c7 100644 --- a/vendor/github.com/go-openapi/runtime/text.go +++ b/vendor/github.com/go-openapi/runtime/text.go @@ -14,7 +14,7 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// TextConsumer creates a new text consumer +// TextConsumer creates a new text consumer. func TextConsumer() Consumer { return ConsumerFunc(func(reader io.Reader, data any) error { if reader == nil { @@ -56,7 +56,7 @@ func TextConsumer() Consumer { }) } -// TextProducer creates a new text producer +// TextProducer creates a new text producer. func TextProducer() Producer { return ProducerFunc(func(writer io.Writer, data any) error { if writer == nil { diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go index 19894e7845..af4d9be794 100644 --- a/vendor/github.com/go-openapi/runtime/values.go +++ b/vendor/github.com/go-openapi/runtime/values.go @@ -3,7 +3,7 @@ package runtime -// Values typically represent parameters on a http request. +// Values typically represent parameters on a [http] request. type Values map[string][]string // GetOK returns the values collection for the given key. diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go index 5060b5c8e9..9cd2540c30 100644 --- a/vendor/github.com/go-openapi/runtime/xml.go +++ b/vendor/github.com/go-openapi/runtime/xml.go @@ -8,7 +8,7 @@ import ( "io" ) -// XMLConsumer creates a new XML consumer +// XMLConsumer creates a new XML consumer. func XMLConsumer() Consumer { return ConsumerFunc(func(reader io.Reader, data any) error { dec := xml.NewDecoder(reader) @@ -16,7 +16,7 @@ func XMLConsumer() Consumer { }) } -// XMLProducer creates a new XML producer +// XMLProducer creates a new XML producer. func XMLProducer() Producer { return ProducerFunc(func(writer io.Writer, data any) error { enc := xml.NewEncoder(writer) diff --git a/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go index ca63430e0b..ca71edbb1b 100644 --- a/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go +++ b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go @@ -10,7 +10,7 @@ import ( yaml "go.yaml.in/yaml/v3" ) -// YAMLConsumer creates a consumer for yaml data +// YAMLConsumer creates a consumer for [yaml] data. func YAMLConsumer() runtime.Consumer { return runtime.ConsumerFunc(func(r io.Reader, v any) error { dec := yaml.NewDecoder(r) @@ -18,7 +18,7 @@ func YAMLConsumer() runtime.Consumer { }) } -// YAMLProducer creates a producer for yaml data +// YAMLProducer creates a producer for [yaml] data. func YAMLProducer() runtime.Producer { return runtime.ProducerFunc(func(w io.Writer, v any) error { enc := yaml.NewEncoder(w) diff --git a/vendor/github.com/go-openapi/spec/.cliff.toml b/vendor/github.com/go-openapi/spec/.cliff.toml deleted file mode 100644 index 702629f5dc..0000000000 --- a/vendor/github.com/go-openapi/spec/.cliff.toml +++ /dev/null @@ -1,181 +0,0 @@ -# git-cliff ~ configuration file -# https://git-cliff.org/docs/configuration - -[changelog] -header = """ -""" - -footer = """ - ------ - -**[{{ remote.github.repo }}]({{ self::remote_url() }}) license terms** - -[![License][license-badge]][license-url] - -[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg -[license-url]: {{ self::remote_url() }}/?tab=Apache-2.0-1-ov-file#readme - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" - -body = """ -{%- if version %} -## [{{ version | trim_start_matches(pat="v") }}]({{ self::remote_url() }}/tree/{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} -{%- else %} -## [unreleased] -{%- endif %} -{%- if message %} - {%- raw %}\n{% endraw %} -{{ message }} - {%- raw %}\n{% endraw %} -{%- endif %} -{%- if version %} - {%- if previous.version %} - -**Full Changelog**: <{{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}> - {%- endif %} -{%- else %} - {%- raw %}\n{% endraw %} -{%- endif %} - -{%- if statistics %}{% if statistics.commit_count %} - {%- raw %}\n{% endraw %} -{{ statistics.commit_count }} commits in this release. - {%- raw %}\n{% endraw %} -{%- endif %}{% endif %} ------ - -{%- for group, commits in commits | group_by(attribute="group") %} - {%- raw %}\n{% endraw %} -### {{ group | upper_first }} - {%- raw %}\n{% endraw %} - {%- for commit in commits %} - {%- if commit.remote.pr_title %} - {%- set commit_message = commit.remote.pr_title %} - {%- else %} - {%- set commit_message = commit.message %} - {%- endif %} -* {{ commit_message | split(pat="\n") | first | trim }} - {%- if commit.remote.username %} -{%- raw %} {% endraw %}by [@{{ commit.remote.username }}](https://github.com/{{ commit.remote.username }}) - {%- endif %} - {%- if commit.remote.pr_number %} -{%- raw %} {% endraw %}in [#{{ commit.remote.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.remote.pr_number }}) - {%- endif %} -{%- raw %} {% endraw %}[...]({{ self::remote_url() }}/commit/{{ commit.id }}) - {%- endfor %} -{%- endfor %} - -{%- if github %} -{%- raw %}\n{% endraw -%} - {%- set all_contributors = github.contributors | length %} - {%- if github.contributors | filter(attribute="username", value="dependabot[bot]") | length < all_contributors %} ------ - -### People who contributed to this release - {% endif %} - {%- for contributor in github.contributors | filter(attribute="username") | sort(attribute="username") %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) - {%- endif %} - {%- endfor %} - - {% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %} ------ - {%- raw %}\n{% endraw %} - -### New Contributors - {%- endif %} - - {%- for contributor in github.contributors | filter(attribute="is_first_time", value=true) %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* @{{ contributor.username }} made their first contribution - {%- if contributor.pr_number %} - in [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \ - {%- endif %} - {%- endif %} - {%- endfor %} -{%- endif %} - -{%- raw %}\n{% endraw %} - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" -# Remove leading and trailing whitespaces from the changelog's body. -trim = true -# Render body even when there are no releases to process. -render_always = true -# An array of regex based postprocessors to modify the changelog. -postprocessors = [ - # Replace the placeholder with a URL. - #{ pattern = '', replace = "https://github.com/orhun/git-cliff" }, -] -# output file path -# output = "test.md" - -[git] -# Parse commits according to the conventional commits specification. -# See https://www.conventionalcommits.org -conventional_commits = false -# Exclude commits that do not match the conventional commits specification. -filter_unconventional = false -# Require all commits to be conventional. -# Takes precedence over filter_unconventional. -require_conventional = false -# Split commits on newlines, treating each line as an individual commit. -split_commits = false -# An array of regex based parsers to modify commit messages prior to further processing. -commit_preprocessors = [ - # Replace issue numbers with link templates to be updated in `changelog.postprocessors`. - #{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, - # Check spelling of the commit message using https://github.com/crate-ci/typos. - # If the spelling is incorrect, it will be fixed automatically. - #{ pattern = '.*', replace_command = 'typos --write-changes -' } -] -# Prevent commits that are breaking from being excluded by commit parsers. -protect_breaking_commits = false -# An array of regex based parsers for extracting data from the commit message. -# Assigns commits to groups. -# Optionally sets the commit's scope and can decide to exclude commits from further processing. -commit_parsers = [ - { message = "^[Cc]hore\\([Rr]elease\\): prepare for", skip = true }, - { message = "(^[Mm]erge)|([Mm]erge conflict)", skip = true }, - { field = "author.name", pattern = "dependabot*", group = "Updates" }, - { message = "([Ss]ecurity)|([Vv]uln)", group = "Security" }, - { body = "(.*[Ss]ecurity)|([Vv]uln)", group = "Security" }, - { message = "([Cc]hore\\(lint\\))|(style)|(lint)|(codeql)|(golangci)", group = "Code quality" }, - { message = "(^[Dd]oc)|((?i)readme)|(badge)|(typo)|(documentation)", group = "Documentation" }, - { message = "(^[Ff]eat)|(^[Ee]nhancement)", group = "Implemented enhancements" }, - { message = "(^ci)|(\\(ci\\))|(fixup\\s+ci)|(fix\\s+ci)|(license)|(example)", group = "Miscellaneous tasks" }, - { message = "^test", group = "Testing" }, - { message = "(^fix)|(panic)", group = "Fixed bugs" }, - { message = "(^refact)|(rework)", group = "Refactor" }, - { message = "(^[Pp]erf)|(performance)", group = "Performance" }, - { message = "(^[Cc]hore)", group = "Miscellaneous tasks" }, - { message = "^[Rr]evert", group = "Reverted changes" }, - { message = "(upgrade.*?go)|(go\\s+version)", group = "Updates" }, - { message = ".*", group = "Other" }, -] -# Exclude commits that are not matched by any commit parser. -filter_commits = false -# An array of link parsers for extracting external references, and turning them into URLs, using regex. -link_parsers = [] -# Include only the tags that belong to the current branch. -use_branch_tags = false -# Order releases topologically instead of chronologically. -topo_order = false -# Order releases topologically instead of chronologically. -topo_order_commits = true -# Order of commits in each group/release within the changelog. -# Allowed values: newest, oldest -sort_commits = "newest" -# Process submodules commits -recurse_submodules = false - -#[remote.github] -#owner = "go-openapi" diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore index f47cb2045f..885dc27ab0 100644 --- a/vendor/github.com/go-openapi/spec/.gitignore +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -1 +1,6 @@ *.out +*.cov +.idea +.env +.mcp.json +.claude/ diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml index fdae591bce..dc7c96053d 100644 --- a/vendor/github.com/go-openapi/spec/.golangci.yml +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -12,6 +12,7 @@ linters: - paralleltest - recvcheck - testpackage + - thelper - tparallel - varnamelen - whitespace diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md index 9322b065e3..bac878f216 100644 --- a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/CONTRIBUTORS.md b/vendor/github.com/go-openapi/spec/CONTRIBUTORS.md index d97b9d330a..2967e3cedd 100644 --- a/vendor/github.com/go-openapi/spec/CONTRIBUTORS.md +++ b/vendor/github.com/go-openapi/spec/CONTRIBUTORS.md @@ -4,47 +4,47 @@ | Total Contributors | Total Contributions | | --- | --- | -| 38 | 391 | +| 38 | 392 | | Username | All Time Contribution Count | All Commits | | --- | --- | --- | -| @casualjim | 191 | https://github.com/go-openapi/spec/commits?author=casualjim | -| @fredbi | 89 | https://github.com/go-openapi/spec/commits?author=fredbi | -| @pytlesk4 | 26 | https://github.com/go-openapi/spec/commits?author=pytlesk4 | -| @kul-amr | 10 | https://github.com/go-openapi/spec/commits?author=kul-amr | -| @keramix | 10 | https://github.com/go-openapi/spec/commits?author=keramix | -| @youyuanwu | 8 | https://github.com/go-openapi/spec/commits?author=youyuanwu | -| @pengsrc | 7 | https://github.com/go-openapi/spec/commits?author=pengsrc | -| @alphacentory | 5 | https://github.com/go-openapi/spec/commits?author=alphacentory | -| @mtfelian | 4 | https://github.com/go-openapi/spec/commits?author=mtfelian | -| @Capstan | 4 | https://github.com/go-openapi/spec/commits?author=Capstan | -| @sdghchj | 4 | https://github.com/go-openapi/spec/commits?author=sdghchj | -| @databus23 | 2 | https://github.com/go-openapi/spec/commits?author=databus23 | -| @vburenin | 2 | https://github.com/go-openapi/spec/commits?author=vburenin | -| @petrkotas | 2 | https://github.com/go-openapi/spec/commits?author=petrkotas | -| @nikhita | 2 | https://github.com/go-openapi/spec/commits?author=nikhita | -| @hypnoglow | 2 | https://github.com/go-openapi/spec/commits?author=hypnoglow | -| @carvind | 2 | https://github.com/go-openapi/spec/commits?author=carvind | -| @ujjwalsh | 1 | https://github.com/go-openapi/spec/commits?author=ujjwalsh | -| @mbohlool | 1 | https://github.com/go-openapi/spec/commits?author=mbohlool | -| @j2gg0s | 1 | https://github.com/go-openapi/spec/commits?author=j2gg0s | -| @ishveda | 1 | https://github.com/go-openapi/spec/commits?author=ishveda | -| @micln | 1 | https://github.com/go-openapi/spec/commits?author=micln | -| @GlenDC | 1 | https://github.com/go-openapi/spec/commits?author=GlenDC | -| @agmikhailov | 1 | https://github.com/go-openapi/spec/commits?author=agmikhailov | -| @tgraf | 1 | https://github.com/go-openapi/spec/commits?author=tgraf | -| @zhsj | 1 | https://github.com/go-openapi/spec/commits?author=zhsj | -| @sebastien-rosset | 1 | https://github.com/go-openapi/spec/commits?author=sebastien-rosset | -| @alexandear | 1 | https://github.com/go-openapi/spec/commits?author=alexandear | -| @morlay | 1 | https://github.com/go-openapi/spec/commits?author=morlay | -| @mikedanese | 1 | https://github.com/go-openapi/spec/commits?author=mikedanese | -| @koron | 1 | https://github.com/go-openapi/spec/commits?author=koron | -| @honza | 1 | https://github.com/go-openapi/spec/commits?author=honza | -| @gbjk | 1 | https://github.com/go-openapi/spec/commits?author=gbjk | -| @faguirre1 | 1 | https://github.com/go-openapi/spec/commits?author=faguirre1 | -| @ethantkoenig | 1 | https://github.com/go-openapi/spec/commits?author=ethantkoenig | -| @sttts | 1 | https://github.com/go-openapi/spec/commits?author=sttts | -| @ChandanChainani | 1 | https://github.com/go-openapi/spec/commits?author=ChandanChainani | -| @bvwells | 1 | https://github.com/go-openapi/spec/commits?author=bvwells | +| @casualjim | 191 | | +| @fredbi | 90 | | +| @pytlesk4 | 26 | | +| @kul-amr | 10 | | +| @keramix | 10 | | +| @youyuanwu | 8 | | +| @pengsrc | 7 | | +| @alphacentory | 5 | | +| @mtfelian | 4 | | +| @Capstan | 4 | | +| @sdghchj | 4 | | +| @databus23 | 2 | | +| @vburenin | 2 | | +| @petrkotas | 2 | | +| @nikhita | 2 | | +| @hypnoglow | 2 | | +| @carvind | 2 | | +| @ujjwalsh | 1 | | +| @mbohlool | 1 | | +| @j2gg0s | 1 | | +| @ishveda | 1 | | +| @micln | 1 | | +| @GlenDC | 1 | | +| @agmikhailov | 1 | | +| @tgraf | 1 | | +| @zhsj | 1 | | +| @sebastien-rosset | 1 | | +| @alexandear | 1 | | +| @morlay | 1 | | +| @mikedanese | 1 | | +| @koron | 1 | | +| @honza | 1 | | +| @gbjk | 1 | | +| @faguirre1 | 1 | | +| @ethantkoenig | 1 | | +| @sttts | 1 | | +| @ChandanChainani | 1 | | +| @bvwells | 1 | | _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md index 13a2a17eae..134809fd77 100644 --- a/vendor/github.com/go-openapi/spec/README.md +++ b/vendor/github.com/go-openapi/spec/README.md @@ -55,7 +55,7 @@ go get github.com/go-openapi/spec > There is no plan to make it evolve toward supporting OpenAPI 3.x. > This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. > -> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 +> An early attempt to support Swagger 3 may be found at: * Does the unmarshaling support YAML? @@ -64,13 +64,13 @@ go get github.com/go-openapi/spec > In order to load a YAML document as a Swagger spec, you need to use the loaders provided by > github.com/go-openapi/loads > -> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec +> Take a look at the example there: > -> See also https://github.com/go-openapi/spec/issues/164 +> See also * How can I validate a spec? -> Validation is provided by [the validate package](http://github.com/go-openapi/validate) +Validation is provided by [the validate package](http://github.com/go-openapi/validate) * Why do we have an `ID` field for `Schema` which is not part of the swagger spec? @@ -78,7 +78,7 @@ go get github.com/go-openapi/spec > how `$ref` are resolved. > This `id` does not conflict with any property named `id`. > -> See also https://github.com/go-openapi/spec/issues/23 +> See also ## Change log @@ -136,7 +136,7 @@ Maintainers can cut a new release by either: [slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM [slack-url]: https://goswagger.slack.com/archives/C04R30YMU [discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue -[discord-url]: https://discord.gg/DrafRmZx +[discord-url]: https://discord.gg/twZ9BwT3 [license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg diff --git a/vendor/github.com/go-openapi/spec/SECURITY.md b/vendor/github.com/go-openapi/spec/SECURITY.md index 2a7b6f0910..1fea2c5736 100644 --- a/vendor/github.com/go-openapi/spec/SECURITY.md +++ b/vendor/github.com/go-openapi/spec/SECURITY.md @@ -6,14 +6,32 @@ This policy outlines the commitment and practices of the go-openapi maintainers | Version | Supported | | ------- | ------------------ | -| 0.22.x | :white_check_mark: | +| O.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. ## Reporting a vulnerability If you become aware of a security vulnerability that affects the current repository, -please report it privately to the maintainers. +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. -Please follow the instructions provided by github to -[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". -TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go index 10fba77a83..06495d2c3d 100644 --- a/vendor/github.com/go-openapi/spec/cache.go +++ b/vendor/github.com/go-openapi/spec/cache.go @@ -8,10 +8,10 @@ import ( "sync" ) -// ResolutionCache a cache for resolving urls +// ResolutionCache a cache for resolving urls. type ResolutionCache interface { - Get(string) (any, bool) - Set(string, any) + Get(uri string) (any, bool) + Set(uri string, data any) } type simpleCache struct { @@ -19,7 +19,7 @@ type simpleCache struct { store map[string]any } -func (s *simpleCache) ShallowClone() ResolutionCache { +func (s *simpleCache) ShallowClone() ResolutionCache { //nolint:ireturn // returns the public interface type by design store := make(map[string]any, len(s.store)) s.lock.RLock() maps.Copy(store, s.store) @@ -30,7 +30,7 @@ func (s *simpleCache) ShallowClone() ResolutionCache { } } -// Get retrieves a cached URI +// Get retrieves a cached URI. func (s *simpleCache) Get(uri string) (any, bool) { s.lock.RLock() v, ok := s.store[uri] @@ -39,7 +39,7 @@ func (s *simpleCache) Get(uri string) (any, bool) { return v, ok } -// Set caches a URI +// Set caches a URI. func (s *simpleCache) Set(uri string, data any) { s.lock.Lock() s.store[uri] = data @@ -56,8 +56,8 @@ var ( // // All subsequent utilizations of this cache are produced from a shallow // clone of this initial version. - resCache *simpleCache - onceCache sync.Once + resCache *simpleCache //nolint:gochecknoglobals // package-level lazy cache for $ref resolution + onceCache sync.Once //nolint:gochecknoglobals // guards lazy init of resCache _ ResolutionCache = &simpleCache{} ) @@ -74,7 +74,7 @@ func defaultResolutionCache() *simpleCache { }} } -func cacheOrDefault(cache ResolutionCache) ResolutionCache { +func cacheOrDefault(cache ResolutionCache) ResolutionCache { //nolint:ireturn // returns the public interface type by design onceCache.Do(initResolutionCache) if cache != nil { diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go index fafe639b45..46fada5dff 100644 --- a/vendor/github.com/go-openapi/spec/contact_info.go +++ b/vendor/github.com/go-openapi/spec/contact_info.go @@ -17,14 +17,14 @@ type ContactInfo struct { VendorExtensible } -// ContactInfoProps hold the properties of a ContactInfo object +// ContactInfoProps hold the properties of a ContactInfo object. type ContactInfoProps struct { Name string `json:"name,omitempty"` URL string `json:"url,omitempty"` Email string `json:"email,omitempty"` } -// UnmarshalJSON hydrates ContactInfo from json +// UnmarshalJSON hydrates ContactInfo from json. func (c *ContactInfo) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &c.ContactInfoProps); err != nil { return err @@ -32,7 +32,7 @@ func (c *ContactInfo) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &c.VendorExtensible) } -// MarshalJSON produces ContactInfo as json +// MarshalJSON produces ContactInfo as json. func (c ContactInfo) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(c.ContactInfoProps) if err != nil { diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go index f4316c2633..fa52b0c7f7 100644 --- a/vendor/github.com/go-openapi/spec/debug.go +++ b/vendor/github.com/go-openapi/spec/debug.go @@ -14,14 +14,12 @@ import ( // Debug is true when the SWAGGER_DEBUG env var is not empty. // // It enables a more verbose logging of this package. -var Debug = os.Getenv("SWAGGER_DEBUG") != "" +var Debug = os.Getenv("SWAGGER_DEBUG") != "" //nolint:gochecknoglobals // public toggle for debug logging -var ( - // specLogger is a debug logger for this package - specLogger *log.Logger -) +// specLogger is a debug logger for this package. +var specLogger *log.Logger //nolint:gochecknoglobals // package-level debug logger -func init() { +func init() { //nolint:gochecknoinits // initializes debug logger at package load debugOptions() } diff --git a/vendor/github.com/go-openapi/spec/errors.go b/vendor/github.com/go-openapi/spec/errors.go index e39ab8bf71..eaca01cc83 100644 --- a/vendor/github.com/go-openapi/spec/errors.go +++ b/vendor/github.com/go-openapi/spec/errors.go @@ -5,21 +5,21 @@ package spec import "errors" -// Error codes +// Error codes. var ( - // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type + // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type. ErrUnknownTypeForReference = errors.New("unknown type for the resolved reference") - // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer + // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer. ErrResolveRefNeedsAPointer = errors.New("resolve ref: target needs to be a pointer") // ErrDerefUnsupportedType indicates that a resolved reference was found in an unsupported container type. - // At the moment, $ref are supported only inside: schemas, parameters, responses, path items + // At the moment, $ref are supported only inside: schemas, parameters, responses, path items. ErrDerefUnsupportedType = errors.New("deref: unsupported type") - // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type + // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type. ErrExpandUnsupportedType = errors.New("expand: unsupported type. Input should be of type *Parameter or *Response") - // ErrSpec is an error raised by the spec package + // ErrSpec is an error raised by the spec package. ErrSpec = errors.New("spec error") ) diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index ff45350ab4..f9c2fa327a 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -38,7 +38,7 @@ func optionsOrDefault(opts *ExpandOptions) *ExpandOptions { return &ExpandOptions{} } -// ExpandSpec expands the references in a swagger spec +// ExpandSpec expands the references in a swagger spec. func ExpandSpec(spec *Swagger, options *ExpandOptions) error { options = optionsOrDefault(options) resolver := defaultSchemaLoader(spec, options, nil, nil) @@ -92,7 +92,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { const rootBase = ".root" // baseForRoot loads in the cache the root document and produces a fake ".root" base path entry -// for further $ref resolution +// for further $ref resolution. func baseForRoot(root any, cache ResolutionCache) string { // cache the root document to resolve $ref's normalizedBase := normalizeBase(rootBase) @@ -190,6 +190,7 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, bas return &target, nil } +//nolint:gocognit,gocyclo,cyclop // complex but well-tested $ref expansion logic; refactoring deferred to dedicated PR func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { if target.Ref.String() == "" && target.Ref.IsRoot() { newRef := normalizeRef(&target.Ref, basePath) @@ -464,7 +465,7 @@ func ExpandResponseWithRoot(response *Response, root any, cache ResolutionCache) // ExpandResponse expands a response based on a basepath // -// All refs inside response will be resolved relative to basePath +// All refs inside response will be resolved relative to basePath. func ExpandResponse(response *Response, basePath string) error { opts := optionsOrDefault(&ExpandOptions{ RelativeBase: basePath, @@ -491,7 +492,7 @@ func ExpandParameterWithRoot(parameter *Parameter, root any, cache ResolutionCac // ExpandParameter expands a parameter based on a basepath. // This is the exported version of expandParameter -// all refs inside parameter will be resolved relative to basePath +// all refs inside parameter will be resolved relative to basePath. func ExpandParameter(parameter *Parameter, basePath string) error { opts := optionsOrDefault(&ExpandOptions{ RelativeBase: basePath, @@ -565,7 +566,7 @@ func expandParameterOrResponse(input any, resolver *schemaLoader, basePath strin return nil } - if sch.Ref.String() != "" { + if sch.Ref.String() != "" { //nolint:nestif // intertwined ref rebasing and circularity check rebasedRef, ern := NewRef(normalizeURI(sch.Ref.String(), basePath)) if ern != nil { return ern diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go index ab251ef765..599ba2c5d7 100644 --- a/vendor/github.com/go-openapi/spec/header.go +++ b/vendor/github.com/go-openapi/spec/header.go @@ -15,7 +15,7 @@ const ( jsonArray = "array" ) -// HeaderProps describes a response header +// HeaderProps describes a response header. type HeaderProps struct { Description string `json:"description,omitempty"` } @@ -30,25 +30,25 @@ type Header struct { HeaderProps } -// ResponseHeader creates a new header instance for use in a response +// ResponseHeader creates a new header instance for use in a response. func ResponseHeader() *Header { return new(Header) } -// WithDescription sets the description on this response, allows for chaining +// WithDescription sets the description on this response, allows for chaining. func (h *Header) WithDescription(description string) *Header { h.Description = description return h } -// Typed a fluent builder method for the type of parameter +// Typed a fluent builder method for the type of parameter. func (h *Header) Typed(tpe, format string) *Header { h.Type = tpe h.Format = format return h } -// CollectionOf a fluent builder method for an array item +// CollectionOf a fluent builder method for an array item. func (h *Header) CollectionOf(items *Items, format string) *Header { h.Type = jsonArray h.Items = items @@ -56,87 +56,87 @@ func (h *Header) CollectionOf(items *Items, format string) *Header { return h } -// WithDefault sets the default value on this item +// WithDefault sets the default value on this item. func (h *Header) WithDefault(defaultValue any) *Header { h.Default = defaultValue return h } -// WithMaxLength sets a max length value +// WithMaxLength sets a max length value. func (h *Header) WithMaxLength(maximum int64) *Header { h.MaxLength = &maximum return h } -// WithMinLength sets a min length value +// WithMinLength sets a min length value. func (h *Header) WithMinLength(minimum int64) *Header { h.MinLength = &minimum return h } -// WithPattern sets a pattern value +// WithPattern sets a pattern value. func (h *Header) WithPattern(pattern string) *Header { h.Pattern = pattern return h } -// WithMultipleOf sets a multiple of value +// WithMultipleOf sets a multiple of value. func (h *Header) WithMultipleOf(number float64) *Header { h.MultipleOf = &number return h } -// WithMaximum sets a maximum number value +// WithMaximum sets a maximum number value. func (h *Header) WithMaximum(maximum float64, exclusive bool) *Header { h.Maximum = &maximum h.ExclusiveMaximum = exclusive return h } -// WithMinimum sets a minimum number value +// WithMinimum sets a minimum number value. func (h *Header) WithMinimum(minimum float64, exclusive bool) *Header { h.Minimum = &minimum h.ExclusiveMinimum = exclusive return h } -// WithEnum sets a the enum values (replace) +// WithEnum sets a the enum values (replace). func (h *Header) WithEnum(values ...any) *Header { h.Enum = append([]any{}, values...) return h } -// WithMaxItems sets the max items +// WithMaxItems sets the max items. func (h *Header) WithMaxItems(size int64) *Header { h.MaxItems = &size return h } -// WithMinItems sets the min items +// WithMinItems sets the min items. func (h *Header) WithMinItems(size int64) *Header { h.MinItems = &size return h } -// UniqueValues dictates that this array can only have unique items +// UniqueValues dictates that this array can only have unique items. func (h *Header) UniqueValues() *Header { h.UniqueItems = true return h } -// AllowDuplicates this array can have duplicates +// AllowDuplicates this array can have duplicates. func (h *Header) AllowDuplicates() *Header { h.UniqueItems = false return h } -// WithValidations is a fluent method to set header validations +// WithValidations is a fluent method to set header validations. func (h *Header) WithValidations(val CommonValidations) *Header { h.SetValidations(SchemaValidations{CommonValidations: val}) return h } -// MarshalJSON marshal this to JSON +// MarshalJSON marshal this to JSON. func (h Header) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(h.CommonValidations) if err != nil { @@ -153,7 +153,7 @@ func (h Header) MarshalJSON() ([]byte, error) { return jsonutils.ConcatJSON(b1, b2, b3), nil } -// UnmarshalJSON unmarshals this header from JSON +// UnmarshalJSON unmarshals this header from JSON. func (h *Header) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &h.CommonValidations); err != nil { return err @@ -167,7 +167,7 @@ func (h *Header) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &h.HeaderProps) } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (h Header) JSONLookup(token string) (any, error) { if ex, ok := h.Extensions[token]; ok { return &ex, nil diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go index 9401065bbd..0ccfdcccd9 100644 --- a/vendor/github.com/go-openapi/spec/info.go +++ b/vendor/github.com/go-openapi/spec/info.go @@ -12,16 +12,16 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// Extensions vendor specific extensions +// Extensions vendor specific extensions. type Extensions map[string]any -// Add adds a value to these extensions +// Add adds a value to these extensions. func (e Extensions) Add(key string, value any) { realKey := strings.ToLower(key) e[realKey] = value } -// GetString gets a string value from the extensions +// GetString gets a string value from the extensions. func (e Extensions) GetString(key string) (string, bool) { if v, ok := e[strings.ToLower(key)]; ok { str, ok := v.(string) @@ -30,7 +30,7 @@ func (e Extensions) GetString(key string) (string, bool) { return "", false } -// GetInt gets a int value from the extensions +// GetInt gets a int value from the extensions. func (e Extensions) GetInt(key string) (int, bool) { realKey := strings.ToLower(key) @@ -48,7 +48,7 @@ func (e Extensions) GetInt(key string) (int, bool) { return -1, false } -// GetBool gets a string value from the extensions +// GetBool gets a string value from the extensions. func (e Extensions) GetBool(key string) (bool, bool) { if v, ok := e[strings.ToLower(key)]; ok { str, ok := v.(bool) @@ -57,7 +57,7 @@ func (e Extensions) GetBool(key string) (bool, bool) { return false, false } -// GetStringSlice gets a string value from the extensions +// GetStringSlice gets a string value from the extensions. func (e Extensions) GetStringSlice(key string) ([]string, bool) { if v, ok := e[strings.ToLower(key)]; ok { arr, isSlice := v.([]any) @@ -82,7 +82,7 @@ type VendorExtensible struct { Extensions Extensions } -// AddExtension adds an extension to this extensible object +// AddExtension adds an extension to this extensible object. func (v *VendorExtensible) AddExtension(key string, value any) { if value == nil { return @@ -93,7 +93,7 @@ func (v *VendorExtensible) AddExtension(key string, value any) { v.Extensions.Add(key, value) } -// MarshalJSON marshals the extensions to json +// MarshalJSON marshals the extensions to json. func (v VendorExtensible) MarshalJSON() ([]byte, error) { toser := make(map[string]any) for k, v := range v.Extensions { @@ -105,7 +105,7 @@ func (v VendorExtensible) MarshalJSON() ([]byte, error) { return json.Marshal(toser) } -// UnmarshalJSON for this extensible object +// UnmarshalJSON for this extensible object. func (v *VendorExtensible) UnmarshalJSON(data []byte) error { var d map[string]any if err := json.Unmarshal(data, &d); err != nil { @@ -123,7 +123,7 @@ func (v *VendorExtensible) UnmarshalJSON(data []byte) error { return nil } -// InfoProps the properties for an info definition +// InfoProps the properties for an info definition. type InfoProps struct { Description string `json:"description,omitempty"` Title string `json:"title,omitempty"` @@ -142,7 +142,7 @@ type Info struct { InfoProps } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (i Info) JSONLookup(token string) (any, error) { if ex, ok := i.Extensions[token]; ok { return &ex, nil @@ -151,7 +151,7 @@ func (i Info) JSONLookup(token string) (any, error) { return r, err } -// MarshalJSON marshal this to JSON +// MarshalJSON marshal this to JSON. func (i Info) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(i.InfoProps) if err != nil { @@ -164,7 +164,7 @@ func (i Info) MarshalJSON() ([]byte, error) { return jsonutils.ConcatJSON(b1, b2), nil } -// UnmarshalJSON marshal this from JSON +// UnmarshalJSON marshal this from JSON. func (i *Info) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &i.InfoProps); err != nil { return err diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go index d30ca3569b..daf5a4fd42 100644 --- a/vendor/github.com/go-openapi/spec/items.go +++ b/vendor/github.com/go-openapi/spec/items.go @@ -15,7 +15,7 @@ const ( jsonRef = "$ref" ) -// SimpleSchema describe swagger simple schemas for parameters and headers +// SimpleSchema describe swagger simple schemas for parameters and headers. type SimpleSchema struct { Type string `json:"type,omitempty"` Nullable bool `json:"nullable,omitempty"` @@ -26,7 +26,7 @@ type SimpleSchema struct { Example any `json:"example,omitempty"` } -// TypeName return the type (or format) of a simple schema +// TypeName return the type (or format) of a simple schema. func (s *SimpleSchema) TypeName() string { if s.Format != "" { return s.Format @@ -34,7 +34,7 @@ func (s *SimpleSchema) TypeName() string { return s.Type } -// ItemsTypeName yields the type of items in a simple schema array +// ItemsTypeName yields the type of items in a simple schema array. func (s *SimpleSchema) ItemsTypeName() string { if s.Items == nil { return "" @@ -53,12 +53,12 @@ type Items struct { VendorExtensible } -// NewItems creates a new instance of items +// NewItems creates a new instance of items. func NewItems() *Items { return &Items{} } -// Typed a fluent builder method for the type of item +// Typed a fluent builder method for the type of item. func (i *Items) Typed(tpe, format string) *Items { i.Type = tpe i.Format = format @@ -71,7 +71,7 @@ func (i *Items) AsNullable() *Items { return i } -// CollectionOf a fluent builder method for an array item +// CollectionOf a fluent builder method for an array item. func (i *Items) CollectionOf(items *Items, format string) *Items { i.Type = jsonArray i.Items = items @@ -79,87 +79,87 @@ func (i *Items) CollectionOf(items *Items, format string) *Items { return i } -// WithDefault sets the default value on this item +// WithDefault sets the default value on this item. func (i *Items) WithDefault(defaultValue any) *Items { i.Default = defaultValue return i } -// WithMaxLength sets a max length value +// WithMaxLength sets a max length value. func (i *Items) WithMaxLength(maximum int64) *Items { i.MaxLength = &maximum return i } -// WithMinLength sets a min length value +// WithMinLength sets a min length value. func (i *Items) WithMinLength(minimum int64) *Items { i.MinLength = &minimum return i } -// WithPattern sets a pattern value +// WithPattern sets a pattern value. func (i *Items) WithPattern(pattern string) *Items { i.Pattern = pattern return i } -// WithMultipleOf sets a multiple of value +// WithMultipleOf sets a multiple of value. func (i *Items) WithMultipleOf(number float64) *Items { i.MultipleOf = &number return i } -// WithMaximum sets a maximum number value +// WithMaximum sets a maximum number value. func (i *Items) WithMaximum(maximum float64, exclusive bool) *Items { i.Maximum = &maximum i.ExclusiveMaximum = exclusive return i } -// WithMinimum sets a minimum number value +// WithMinimum sets a minimum number value. func (i *Items) WithMinimum(minimum float64, exclusive bool) *Items { i.Minimum = &minimum i.ExclusiveMinimum = exclusive return i } -// WithEnum sets a the enum values (replace) +// WithEnum sets a the enum values (replace). func (i *Items) WithEnum(values ...any) *Items { i.Enum = append([]any{}, values...) return i } -// WithMaxItems sets the max items +// WithMaxItems sets the max items. func (i *Items) WithMaxItems(size int64) *Items { i.MaxItems = &size return i } -// WithMinItems sets the min items +// WithMinItems sets the min items. func (i *Items) WithMinItems(size int64) *Items { i.MinItems = &size return i } -// UniqueValues dictates that this array can only have unique items +// UniqueValues dictates that this array can only have unique items. func (i *Items) UniqueValues() *Items { i.UniqueItems = true return i } -// AllowDuplicates this array can have duplicates +// AllowDuplicates this array can have duplicates. func (i *Items) AllowDuplicates() *Items { i.UniqueItems = false return i } -// WithValidations is a fluent method to set Items validations +// WithValidations is a fluent method to set Items validations. func (i *Items) WithValidations(val CommonValidations) *Items { i.SetValidations(SchemaValidations{CommonValidations: val}) return i } -// UnmarshalJSON hydrates this items instance with the data from JSON +// UnmarshalJSON hydrates this items instance with the data from JSON. func (i *Items) UnmarshalJSON(data []byte) error { var validations CommonValidations if err := json.Unmarshal(data, &validations); err != nil { @@ -184,7 +184,7 @@ func (i *Items) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON converts this items object to JSON +// MarshalJSON converts this items object to JSON. func (i Items) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(i.CommonValidations) if err != nil { @@ -205,7 +205,7 @@ func (i Items) MarshalJSON() ([]byte, error) { return jsonutils.ConcatJSON(b4, b3, b1, b2), nil } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (i Items) JSONLookup(token string) (any, error) { if token == jsonRef { return &i.Ref, nil diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go index 286b237e2b..8209f218b5 100644 --- a/vendor/github.com/go-openapi/spec/license.go +++ b/vendor/github.com/go-openapi/spec/license.go @@ -17,13 +17,13 @@ type License struct { VendorExtensible } -// LicenseProps holds the properties of a License object +// LicenseProps holds the properties of a License object. type LicenseProps struct { Name string `json:"name,omitempty"` URL string `json:"url,omitempty"` } -// UnmarshalJSON hydrates License from json +// UnmarshalJSON hydrates License from json. func (l *License) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &l.LicenseProps); err != nil { return err @@ -31,7 +31,7 @@ func (l *License) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &l.VendorExtensible) } -// MarshalJSON produces License as json +// MarshalJSON produces License as json. func (l License) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(l.LicenseProps) if err != nil { diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go index e1d7c58d72..68252dc30b 100644 --- a/vendor/github.com/go-openapi/spec/normalizer.go +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -138,7 +138,7 @@ func rebase(ref *Ref, v *url.URL, notEqual bool) (Ref, bool) { return MustCreateRef(newBase.String()), true } -// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor +// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor. func normalizeRef(ref *Ref, relativeBase string) *Ref { r := MustCreateRef(normalizeURI(ref.String(), relativeBase)) return &r diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go index 29d9c4f482..cd70d2547c 100644 --- a/vendor/github.com/go-openapi/spec/operation.go +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -13,7 +13,7 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -func init() { +func init() { //nolint:gochecknoinits // registers gob types for Operation serialization gob.Register(map[string]any{}) gob.Register([]any{}) } @@ -22,7 +22,7 @@ func init() { // // NOTES: // - schemes, when present must be from [http, https, ws, wss]: see validate -// - Security is handled as a special case: see MarshalJSON function +// - Security is handled as a special case: see MarshalJSON function. type OperationProps struct { Description string `json:"description,omitempty"` Consumes []string `json:"consumes,omitempty"` @@ -82,7 +82,7 @@ func NewOperation(id string) *Operation { return op } -// SuccessResponse gets a success response model +// SuccessResponse gets a success response model. func (o *Operation) SuccessResponse() (*Response, int, bool) { if o.Responses == nil { return nil, 0, false @@ -103,7 +103,7 @@ func (o *Operation) SuccessResponse() (*Response, int, bool) { return o.Responses.Default, 0, false } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (o Operation) JSONLookup(token string) (any, error) { if ex, ok := o.Extensions[token]; ok { return &ex, nil @@ -112,7 +112,7 @@ func (o Operation) JSONLookup(token string) (any, error) { return r, err } -// UnmarshalJSON hydrates this items instance with the data from JSON +// UnmarshalJSON hydrates this items instance with the data from JSON. func (o *Operation) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &o.OperationProps); err != nil { return err @@ -120,7 +120,7 @@ func (o *Operation) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &o.VendorExtensible) } -// MarshalJSON converts this items object to JSON +// MarshalJSON converts this items object to JSON. func (o Operation) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(o.OperationProps) if err != nil { @@ -140,13 +140,13 @@ func (o *Operation) WithID(id string) *Operation { return o } -// WithDescription sets the description on this operation, allows for chaining +// WithDescription sets the description on this operation, allows for chaining. func (o *Operation) WithDescription(description string) *Operation { o.Description = description return o } -// WithSummary sets the summary on this operation, allows for chaining +// WithSummary sets the summary on this operation, allows for chaining. func (o *Operation) WithSummary(summary string) *Operation { o.Summary = summary return o @@ -170,38 +170,38 @@ func (o *Operation) WithExternalDocs(description, url string) *Operation { return o } -// Deprecate marks the operation as deprecated +// Deprecate marks the operation as deprecated. func (o *Operation) Deprecate() *Operation { o.Deprecated = true return o } -// Undeprecate marks the operation as not deprecated +// Undeprecate marks the operation as not deprecated. func (o *Operation) Undeprecate() *Operation { o.Deprecated = false return o } -// WithConsumes adds media types for incoming body values +// WithConsumes adds media types for incoming body values. func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { o.Consumes = append(o.Consumes, mediaTypes...) return o } -// WithProduces adds media types for outgoing body values +// WithProduces adds media types for outgoing body values. func (o *Operation) WithProduces(mediaTypes ...string) *Operation { o.Produces = append(o.Produces, mediaTypes...) return o } -// WithTags adds tags for this operation +// WithTags adds tags for this operation. func (o *Operation) WithTags(tags ...string) *Operation { o.Tags = append(o.Tags, tags...) return o } // AddParam adds a parameter to this operation, when a parameter for that location -// and with that name already exists it will be replaced +// and with that name already exists it will be replaced. func (o *Operation) AddParam(param *Parameter) *Operation { if param == nil { return o @@ -223,7 +223,7 @@ func (o *Operation) AddParam(param *Parameter) *Operation { return o } -// RemoveParam removes a parameter from the operation +// RemoveParam removes a parameter from the operation. func (o *Operation) RemoveParam(name, in string) *Operation { for i, p := range o.Parameters { if p.Name == name && p.In == in { @@ -241,14 +241,14 @@ func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { } // WithDefaultResponse adds a default response to the operation. -// Passing a nil value will remove the response +// Passing a nil value will remove the response. func (o *Operation) WithDefaultResponse(response *Response) *Operation { return o.RespondsWith(0, response) } // RespondsWith adds a status code response to the operation. // When the code is 0 the value of the response will be used as default response value. -// When the value of the response is nil it will be removed from the operation +// When the value of the response is nil it will be removed from the operation. func (o *Operation) RespondsWith(code int, response *Response) *Operation { if o.Responses == nil { o.Responses = new(Responses) @@ -279,7 +279,7 @@ type gobAlias struct { SecurityIsEmpty bool } -// GobEncode provides a safe gob encoder for Operation, including empty security requirements +// GobEncode provides a safe gob encoder for Operation, including empty security requirements. func (o Operation) GobEncode() ([]byte, error) { raw := struct { Ext VendorExtensible @@ -293,7 +293,7 @@ func (o Operation) GobEncode() ([]byte, error) { return b.Bytes(), err } -// GobDecode provides a safe gob decoder for Operation, including empty security requirements +// GobDecode provides a safe gob decoder for Operation, including empty security requirements. func (o *Operation) GobDecode(b []byte) error { var raw struct { Ext VendorExtensible @@ -310,7 +310,7 @@ func (o *Operation) GobDecode(b []byte) error { return nil } -// GobEncode provides a safe gob encoder for Operation, including empty security requirements +// GobEncode provides a safe gob encoder for Operation, including empty security requirements. func (op OperationProps) GobEncode() ([]byte, error) { raw := gobAlias{ Alias: (*opsAlias)(&op), @@ -355,7 +355,7 @@ func (op OperationProps) GobEncode() ([]byte, error) { return b.Bytes(), err } -// GobDecode provides a safe gob decoder for Operation, including empty security requirements +// GobDecode provides a safe gob decoder for Operation, including empty security requirements. func (op *OperationProps) GobDecode(b []byte) error { var raw gobAlias diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go index b94b7682ac..516f5d95c5 100644 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -11,45 +11,51 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// QueryParam creates a query parameter +// QueryParam creates a query parameter. func QueryParam(name string) *Parameter { return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} } -// HeaderParam creates a header parameter, this is always required by default +// HeaderParam creates a header parameter, this is always required by default. func HeaderParam(name string) *Parameter { return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} } -// PathParam creates a path parameter, this is always required +// PathParam creates a path parameter, this is always required. func PathParam(name string) *Parameter { return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} } -// BodyParam creates a body parameter +// BodyParam creates a body parameter. func BodyParam(name string, schema *Schema) *Parameter { return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}} } -// FormDataParam creates a body parameter +// FormDataParam creates a body parameter. func FormDataParam(name string) *Parameter { return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} } -// FileParam creates a body parameter +// FileParam creates a body parameter. func FileParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, - SimpleSchema: SimpleSchema{Type: "file"}} + return &Parameter{ + ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}, + } } -// SimpleArrayParam creates a param for a simple array (string, int, date etc) +// SimpleArrayParam creates a param for a simple array (string, int, date etc). func SimpleArrayParam(name, tpe, fmt string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name}, - SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", - Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}}} + return &Parameter{ + ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{ + Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}, + }, + } } -// ParamRef creates a parameter that's a json reference +// ParamRef creates a parameter that's a json reference. func ParamRef(uri string) *Parameter { p := new(Parameter) p.Ref = MustCreateRef(uri) @@ -60,7 +66,7 @@ func ParamRef(uri string) *Parameter { // // NOTE: // - Schema is defined when "in" == "body": see validate -// - AllowEmptyValue is allowed where "in" == "query" || "formData" +// - AllowEmptyValue is allowed where "in" == "query" || "formData". type ParamProps struct { Description string `json:"description,omitempty"` Name string `json:"name,omitempty"` @@ -104,7 +110,7 @@ type Parameter struct { ParamProps } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (p Parameter) JSONLookup(token string) (any, error) { if ex, ok := p.Extensions[token]; ok { return &ex, nil @@ -131,32 +137,32 @@ func (p Parameter) JSONLookup(token string) (any, error) { return r, err } -// WithDescription a fluent builder method for the description of the parameter +// WithDescription a fluent builder method for the description of the parameter. func (p *Parameter) WithDescription(description string) *Parameter { p.Description = description return p } -// Named a fluent builder method to override the name of the parameter +// Named a fluent builder method to override the name of the parameter. func (p *Parameter) Named(name string) *Parameter { p.Name = name return p } -// WithLocation a fluent builder method to override the location of the parameter +// WithLocation a fluent builder method to override the location of the parameter. func (p *Parameter) WithLocation(in string) *Parameter { p.In = in return p } -// Typed a fluent builder method for the type of the parameter value +// Typed a fluent builder method for the type of the parameter value. func (p *Parameter) Typed(tpe, format string) *Parameter { p.Type = tpe p.Format = format return p } -// CollectionOf a fluent builder method for an array parameter +// CollectionOf a fluent builder method for an array parameter. func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { p.Type = jsonArray p.Items = items @@ -164,32 +170,32 @@ func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { return p } -// WithDefault sets the default value on this parameter +// WithDefault sets the default value on this parameter. func (p *Parameter) WithDefault(defaultValue any) *Parameter { p.AsOptional() // with default implies optional p.Default = defaultValue return p } -// AllowsEmptyValues flags this parameter as being ok with empty values +// AllowsEmptyValues flags this parameter as being ok with empty values. func (p *Parameter) AllowsEmptyValues() *Parameter { p.AllowEmptyValue = true return p } -// NoEmptyValues flags this parameter as not liking empty values +// NoEmptyValues flags this parameter as not liking empty values. func (p *Parameter) NoEmptyValues() *Parameter { p.AllowEmptyValue = false return p } -// AsOptional flags this parameter as optional +// AsOptional flags this parameter as optional. func (p *Parameter) AsOptional() *Parameter { p.Required = false return p } -// AsRequired flags this parameter as required +// AsRequired flags this parameter as required. func (p *Parameter) AsRequired() *Parameter { if p.Default != nil { // with a default required makes no sense return p @@ -198,81 +204,81 @@ func (p *Parameter) AsRequired() *Parameter { return p } -// WithMaxLength sets a max length value +// WithMaxLength sets a max length value. func (p *Parameter) WithMaxLength(maximum int64) *Parameter { p.MaxLength = &maximum return p } -// WithMinLength sets a min length value +// WithMinLength sets a min length value. func (p *Parameter) WithMinLength(minimum int64) *Parameter { p.MinLength = &minimum return p } -// WithPattern sets a pattern value +// WithPattern sets a pattern value. func (p *Parameter) WithPattern(pattern string) *Parameter { p.Pattern = pattern return p } -// WithMultipleOf sets a multiple of value +// WithMultipleOf sets a multiple of value. func (p *Parameter) WithMultipleOf(number float64) *Parameter { p.MultipleOf = &number return p } -// WithMaximum sets a maximum number value +// WithMaximum sets a maximum number value. func (p *Parameter) WithMaximum(maximum float64, exclusive bool) *Parameter { p.Maximum = &maximum p.ExclusiveMaximum = exclusive return p } -// WithMinimum sets a minimum number value +// WithMinimum sets a minimum number value. func (p *Parameter) WithMinimum(minimum float64, exclusive bool) *Parameter { p.Minimum = &minimum p.ExclusiveMinimum = exclusive return p } -// WithEnum sets a the enum values (replace) +// WithEnum sets a the enum values (replace). func (p *Parameter) WithEnum(values ...any) *Parameter { p.Enum = append([]any{}, values...) return p } -// WithMaxItems sets the max items +// WithMaxItems sets the max items. func (p *Parameter) WithMaxItems(size int64) *Parameter { p.MaxItems = &size return p } -// WithMinItems sets the min items +// WithMinItems sets the min items. func (p *Parameter) WithMinItems(size int64) *Parameter { p.MinItems = &size return p } -// UniqueValues dictates that this array can only have unique items +// UniqueValues dictates that this array can only have unique items. func (p *Parameter) UniqueValues() *Parameter { p.UniqueItems = true return p } -// AllowDuplicates this array can have duplicates +// AllowDuplicates this array can have duplicates. func (p *Parameter) AllowDuplicates() *Parameter { p.UniqueItems = false return p } -// WithValidations is a fluent method to set parameter validations +// WithValidations is a fluent method to set parameter validations. func (p *Parameter) WithValidations(val CommonValidations) *Parameter { p.SetValidations(SchemaValidations{CommonValidations: val}) return p } -// UnmarshalJSON hydrates this items instance with the data from JSON +// UnmarshalJSON hydrates this items instance with the data from JSON. func (p *Parameter) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &p.CommonValidations); err != nil { return err @@ -289,7 +295,7 @@ func (p *Parameter) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &p.ParamProps) } -// MarshalJSON converts this items object to JSON +// MarshalJSON converts this items object to JSON. func (p Parameter) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(p.CommonValidations) if err != nil { diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go index c692b89e46..4408ece465 100644 --- a/vendor/github.com/go-openapi/spec/path_item.go +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -10,7 +10,7 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// PathItemProps the path item specific properties +// PathItemProps the path item specific properties. type PathItemProps struct { Get *Operation `json:"get,omitempty"` Put *Operation `json:"put,omitempty"` @@ -34,7 +34,7 @@ type PathItem struct { PathItemProps } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (p PathItem) JSONLookup(token string) (any, error) { if ex, ok := p.Extensions[token]; ok { return &ex, nil @@ -46,7 +46,7 @@ func (p PathItem) JSONLookup(token string) (any, error) { return r, err } -// UnmarshalJSON hydrates this items instance with the data from JSON +// UnmarshalJSON hydrates this items instance with the data from JSON. func (p *PathItem) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &p.Refable); err != nil { return err @@ -57,7 +57,7 @@ func (p *PathItem) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &p.PathItemProps) } -// MarshalJSON converts this items object to JSON +// MarshalJSON converts this items object to JSON. func (p PathItem) MarshalJSON() ([]byte, error) { b3, err := json.Marshal(p.Refable) if err != nil { diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go index b9e42184b1..5daf5a6709 100644 --- a/vendor/github.com/go-openapi/spec/paths.go +++ b/vendor/github.com/go-openapi/spec/paths.go @@ -23,7 +23,7 @@ type Paths struct { Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (p Paths) JSONLookup(token string) (any, error) { if pi, ok := p.Paths[token]; ok { return &pi, nil @@ -34,7 +34,7 @@ func (p Paths) JSONLookup(token string) (any, error) { return nil, fmt.Errorf("object has no field %q: %w", token, ErrSpec) } -// UnmarshalJSON hydrates this items instance with the data from JSON +// UnmarshalJSON hydrates this items instance with the data from JSON. func (p *Paths) UnmarshalJSON(data []byte) error { var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { @@ -65,7 +65,7 @@ func (p *Paths) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON converts this items object to JSON +// MarshalJSON converts this items object to JSON. func (p Paths) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(p.VendorExtensible) if err != nil { diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go index c498818067..b8e97271e1 100644 --- a/vendor/github.com/go-openapi/spec/properties.go +++ b/vendor/github.com/go-openapi/spec/properties.go @@ -10,7 +10,7 @@ import ( "sort" ) -// OrderSchemaItem holds a named schema (e.g. from a property of an object) +// OrderSchemaItem holds a named schema (e.g. from a property of an object). type OrderSchemaItem struct { Schema @@ -53,7 +53,7 @@ func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], it func (items OrderSchemaItems) Less(i, j int) (ret bool) { ii, oki := items[i].Extensions.GetInt("x-order") ij, okj := items[j].Extensions.GetInt("x-order") - if oki { + if oki { //nolint:nestif // nested recover logic for safe type comparison if okj { defer func() { if err := recover(); err != nil { @@ -94,7 +94,7 @@ func (items OrderSchemaItems) marshalJSONItem(item OrderSchemaItem, output *byte // It knows how to transform its keys into an ordered slice. type SchemaProperties map[string]Schema -// ToOrderedSchemaItems transforms the map of properties into a sortable slice +// ToOrderedSchemaItems transforms the map of properties into a sortable slice. func (properties SchemaProperties) ToOrderedSchemaItems() OrderSchemaItems { items := make(OrderSchemaItems, 0, len(properties)) for k, v := range properties { diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go index 1d1c759174..40b7d486c9 100644 --- a/vendor/github.com/go-openapi/spec/ref.go +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -14,28 +14,28 @@ import ( "github.com/go-openapi/jsonreference" ) -// Refable is a struct for things that accept a $ref property +// Refable is a struct for things that accept a $ref property. type Refable struct { Ref Ref } -// MarshalJSON marshals the ref to json +// MarshalJSON marshals the ref to json. func (r Refable) MarshalJSON() ([]byte, error) { return r.Ref.MarshalJSON() } -// UnmarshalJSON unmarshals the ref from json +// UnmarshalJSON unmarshals the ref from json. func (r *Refable) UnmarshalJSON(d []byte) error { return json.Unmarshal(d, &r.Ref) } -// Ref represents a json reference that is potentially resolved +// Ref represents a json reference that is potentially resolved. type Ref struct { jsonreference.Ref } // NewRef creates a new instance of a ref object -// returns an error when the reference uri is an invalid uri +// returns an error when the reference uri is an invalid uri. func NewRef(refURI string) (Ref, error) { ref, err := jsonreference.New(refURI) if err != nil { @@ -51,7 +51,7 @@ func MustCreateRef(refURI string) Ref { return Ref{Ref: jsonreference.MustCreateRef(refURI)} } -// RemoteURI gets the remote uri part of the ref +// RemoteURI gets the remote uri part of the ref. func (r *Ref) RemoteURI() string { if r.String() == "" { return "" @@ -62,7 +62,7 @@ func (r *Ref) RemoteURI() string { return u.String() } -// IsValidURI returns true when the url the ref points to can be found +// IsValidURI returns true when the url the ref points to can be found. func (r *Ref) IsValidURI(basepaths ...string) bool { if r.String() == "" { return true @@ -112,7 +112,7 @@ func (r *Ref) IsValidURI(basepaths ...string) bool { } // Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned +// If the child cannot inherit from the parent, an error is returned. func (r *Ref) Inherits(child Ref) (*Ref, error) { ref, err := r.Ref.Inherits(child.Ref) if err != nil { @@ -121,7 +121,7 @@ func (r *Ref) Inherits(child Ref) (*Ref, error) { return &Ref{Ref: *ref}, nil } -// MarshalJSON marshals this ref into a JSON object +// MarshalJSON marshals this ref into a JSON object. func (r Ref) MarshalJSON() ([]byte, error) { str := r.String() if str == "" { @@ -134,7 +134,7 @@ func (r Ref) MarshalJSON() ([]byte, error) { return json.Marshal(v) } -// UnmarshalJSON unmarshals this ref from a JSON object +// UnmarshalJSON unmarshals this ref from a JSON object. func (r *Ref) UnmarshalJSON(d []byte) error { var v map[string]any if err := json.Unmarshal(d, &v); err != nil { @@ -143,7 +143,7 @@ func (r *Ref) UnmarshalJSON(d []byte) error { return r.fromMap(v) } -// GobEncode provides a safe gob encoder for Ref +// GobEncode provides a safe gob encoder for Ref. func (r Ref) GobEncode() ([]byte, error) { var b bytes.Buffer raw, err := r.MarshalJSON() @@ -154,7 +154,7 @@ func (r Ref) GobEncode() ([]byte, error) { return b.Bytes(), err } -// GobDecode provides a safe gob decoder for Ref +// GobDecode provides a safe gob decoder for Ref. func (r *Ref) GobDecode(b []byte) error { var raw []byte buf := bytes.NewBuffer(b) diff --git a/vendor/github.com/go-openapi/spec/resolver.go b/vendor/github.com/go-openapi/spec/resolver.go index 600574e118..1bf90c8682 100644 --- a/vendor/github.com/go-openapi/spec/resolver.go +++ b/vendor/github.com/go-openapi/spec/resolver.go @@ -20,7 +20,7 @@ func resolveAnyWithBase(root any, ref *Ref, result any, options *ExpandOptions) return nil } -// ResolveRefWithBase resolves a reference against a context root with preservation of base path +// ResolveRefWithBase resolves a reference against a context root with preservation of base path. func ResolveRefWithBase(root any, ref *Ref, options *ExpandOptions) (*Schema, error) { result := new(Schema) @@ -34,7 +34,7 @@ func ResolveRefWithBase(root any, ref *Ref, options *ExpandOptions) (*Schema, er // ResolveRef resolves a reference for a schema against a context root // ref is guaranteed to be in root (no need to go to external files) // -// ResolveRef is ONLY called from the code generation module +// ResolveRef is ONLY called from the code generation module. func ResolveRef(root any, ref *Ref) (*Schema, error) { res, _, err := ref.GetPointer().Get(root) if err != nil { @@ -57,7 +57,7 @@ func ResolveRef(root any, ref *Ref) (*Schema, error) { } } -// ResolveParameterWithBase resolves a parameter reference against a context root and base path +// ResolveParameterWithBase resolves a parameter reference against a context root and base path. func ResolveParameterWithBase(root any, ref Ref, options *ExpandOptions) (*Parameter, error) { result := new(Parameter) @@ -68,12 +68,12 @@ func ResolveParameterWithBase(root any, ref Ref, options *ExpandOptions) (*Param return result, nil } -// ResolveParameter resolves a parameter reference against a context root +// ResolveParameter resolves a parameter reference against a context root. func ResolveParameter(root any, ref Ref) (*Parameter, error) { return ResolveParameterWithBase(root, ref, nil) } -// ResolveResponseWithBase resolves response a reference against a context root and base path +// ResolveResponseWithBase resolves response a reference against a context root and base path. func ResolveResponseWithBase(root any, ref Ref, options *ExpandOptions) (*Response, error) { result := new(Response) @@ -85,12 +85,12 @@ func ResolveResponseWithBase(root any, ref Ref, options *ExpandOptions) (*Respon return result, nil } -// ResolveResponse resolves response a reference against a context root +// ResolveResponse resolves response a reference against a context root. func ResolveResponse(root any, ref Ref) (*Response, error) { return ResolveResponseWithBase(root, ref, nil) } -// ResolvePathItemWithBase resolves response a path item against a context root and base path +// ResolvePathItemWithBase resolves response a path item against a context root and base path. func ResolvePathItemWithBase(root any, ref Ref, options *ExpandOptions) (*PathItem, error) { result := new(PathItem) @@ -103,7 +103,7 @@ func ResolvePathItemWithBase(root any, ref Ref, options *ExpandOptions) (*PathIt // ResolvePathItem resolves response a path item against a context root and base path // -// Deprecated: use ResolvePathItemWithBase instead +// Deprecated: use ResolvePathItemWithBase instead. func ResolvePathItem(root any, ref Ref, options *ExpandOptions) (*PathItem, error) { return ResolvePathItemWithBase(root, ref, options) } @@ -124,7 +124,7 @@ func ResolveItemsWithBase(root any, ref Ref, options *ExpandOptions) (*Items, er // ResolveItems resolves parameter items reference against a context root and base path. // -// Deprecated: use ResolveItemsWithBase instead +// Deprecated: use ResolveItemsWithBase instead. func ResolveItems(root any, ref Ref, options *ExpandOptions) (*Items, error) { return ResolveItemsWithBase(root, ref, options) } diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go index e5a7e5c40d..4bb6a2bcd2 100644 --- a/vendor/github.com/go-openapi/spec/response.go +++ b/vendor/github.com/go-openapi/spec/response.go @@ -10,7 +10,7 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// ResponseProps properties specific to a response +// ResponseProps properties specific to a response. type ResponseProps struct { Description string `json:"description"` Schema *Schema `json:"schema,omitempty"` @@ -27,19 +27,19 @@ type Response struct { VendorExtensible } -// NewResponse creates a new response instance +// NewResponse creates a new response instance. func NewResponse() *Response { return new(Response) } -// ResponseRef creates a response as a json reference +// ResponseRef creates a response as a json reference. func ResponseRef(url string) *Response { resp := NewResponse() resp.Ref = MustCreateRef(url) return resp } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (r Response) JSONLookup(token string) (any, error) { if ex, ok := r.Extensions[token]; ok { return &ex, nil @@ -51,7 +51,7 @@ func (r Response) JSONLookup(token string) (any, error) { return ptr, err } -// UnmarshalJSON hydrates this items instance with the data from JSON +// UnmarshalJSON hydrates this items instance with the data from JSON. func (r *Response) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.ResponseProps); err != nil { return err @@ -62,7 +62,7 @@ func (r *Response) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &r.VendorExtensible) } -// MarshalJSON converts this items object to JSON +// MarshalJSON converts this items object to JSON. func (r Response) MarshalJSON() ([]byte, error) { var ( b1 []byte @@ -100,20 +100,20 @@ func (r Response) MarshalJSON() ([]byte, error) { return jsonutils.ConcatJSON(b1, b2, b3), nil } -// WithDescription sets the description on this response, allows for chaining +// WithDescription sets the description on this response, allows for chaining. func (r *Response) WithDescription(description string) *Response { r.Description = description return r } // WithSchema sets the schema on this response, allows for chaining. -// Passing a nil argument removes the schema from this response +// Passing a nil argument removes the schema from this response. func (r *Response) WithSchema(schema *Schema) *Response { r.Schema = schema return r } -// AddHeader adds a header to this response +// AddHeader adds a header to this response. func (r *Response) AddHeader(name string, header *Header) *Response { if header == nil { return r.RemoveHeader(name) @@ -125,13 +125,13 @@ func (r *Response) AddHeader(name string, header *Header) *Response { return r } -// RemoveHeader removes a header from this response +// RemoveHeader removes a header from this response. func (r *Response) RemoveHeader(name string) *Response { delete(r.Headers, name) return r } -// AddExample adds an example to this response +// AddExample adds an example to this response. func (r *Response) AddExample(mediaType string, example any) *Response { if r.Examples == nil { r.Examples = make(map[string]any) diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go index 733a1315d0..fb369e4a6b 100644 --- a/vendor/github.com/go-openapi/spec/responses.go +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -31,7 +31,7 @@ type Responses struct { ResponsesProps } -// JSONLookup implements an interface to customize json pointer lookup +// JSONLookup implements an interface to customize json pointer lookup. func (r Responses) JSONLookup(token string) (any, error) { if token == "default" { return r.Default, nil @@ -47,7 +47,7 @@ func (r Responses) JSONLookup(token string) (any, error) { return nil, fmt.Errorf("object has no field %q: %w", token, ErrSpec) } -// UnmarshalJSON hydrates this items instance with the data from JSON +// UnmarshalJSON hydrates this items instance with the data from JSON. func (r *Responses) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { return err @@ -62,7 +62,7 @@ func (r *Responses) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON converts this items object to JSON +// MarshalJSON converts this items object to JSON. func (r Responses) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(r.ResponsesProps) if err != nil { @@ -84,7 +84,7 @@ type ResponsesProps struct { StatusCodeResponses map[int]Response } -// MarshalJSON marshals responses as JSON +// MarshalJSON marshals responses as JSON. func (r ResponsesProps) MarshalJSON() ([]byte, error) { toser := map[string]Response{} if r.Default != nil { @@ -96,7 +96,7 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { return json.Marshal(toser) } -// UnmarshalJSON unmarshals responses from JSON +// UnmarshalJSON unmarshals responses from JSON. func (r *ResponsesProps) UnmarshalJSON(data []byte) error { var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go index 6623728a41..d7a481bf1a 100644 --- a/vendor/github.com/go-openapi/spec/schema.go +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -13,86 +13,88 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// BooleanProperty creates a boolean property +// BooleanProperty creates a boolean property. func BooleanProperty() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} } -// BoolProperty creates a boolean property +// BoolProperty creates a boolean property. func BoolProperty() *Schema { return BooleanProperty() } -// StringProperty creates a string property +// StringProperty creates a string property. func StringProperty() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} } -// CharProperty creates a string property +// CharProperty creates a string property. func CharProperty() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} } -// Float64Property creates a float64/double property +// Float64Property creates a float64/double property. func Float64Property() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} } -// Float32Property creates a float32/float property +// Float32Property creates a float32/float property. func Float32Property() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} } -// Int8Property creates an int8 property +// Int8Property creates an int8 property. func Int8Property() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} } -// Int16Property creates an int16 property +// Int16Property creates an int16 property. func Int16Property() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} } -// Int32Property creates an int32 property +// Int32Property creates an int32 property. func Int32Property() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} } -// Int64Property creates an int64 property +// Int64Property creates an int64 property. func Int64Property() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} } -// StrFmtProperty creates a property for the named string format +// StrFmtProperty creates a property for the named string format. func StrFmtProperty(format string) *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} } -// DateProperty creates a date property +// DateProperty creates a date property. func DateProperty() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} } -// DateTimeProperty creates a date time property +// DateTimeProperty creates a date time property. func DateTimeProperty() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} } -// MapProperty creates a map property +// MapProperty creates a map property. func MapProperty(property *Schema) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, - AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} + return &Schema{SchemaProps: SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}, + }} } -// RefProperty creates a ref property +// RefProperty creates a ref property. func RefProperty(name string) *Schema { return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} } -// RefSchema creates a ref property +// RefSchema creates a ref property. func RefSchema(name string) *Schema { return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} } -// ArrayProperty creates an array property +// ArrayProperty creates an array property. func ArrayProperty(items *Schema) *Schema { if items == nil { return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} @@ -100,17 +102,17 @@ func ArrayProperty(items *Schema) *Schema { return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} } -// ComposedSchema creates a schema with allOf +// ComposedSchema creates a schema with allOf. func ComposedSchema(schemas ...Schema) *Schema { s := new(Schema) s.AllOf = schemas return s } -// SchemaURL represents a schema url +// SchemaURL represents a schema url. type SchemaURL string -// MarshalJSON marshal this to JSON +// MarshalJSON marshal this to JSON. func (r SchemaURL) MarshalJSON() ([]byte, error) { if r == "" { return []byte("{}"), nil @@ -119,7 +121,7 @@ func (r SchemaURL) MarshalJSON() ([]byte, error) { return json.Marshal(v) } -// UnmarshalJSON unmarshal this from JSON +// UnmarshalJSON unmarshal this from JSON. func (r *SchemaURL) UnmarshalJSON(data []byte) error { var v map[string]any if err := json.Unmarshal(data, &v); err != nil { @@ -145,7 +147,7 @@ func (r *SchemaURL) fromMap(v map[string]any) error { return nil } -// SchemaProps describes a JSON schema (draft 4) +// SchemaProps describes a JSON schema (draft 4). type SchemaProps struct { ID string `json:"id,omitempty"` Ref Ref `json:"-"` @@ -184,7 +186,7 @@ type SchemaProps struct { Definitions Definitions `json:"definitions,omitempty"` } -// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4). type SwaggerSchemaProps struct { Discriminator string `json:"discriminator,omitempty"` ReadOnly bool `json:"readOnly,omitempty"` @@ -208,7 +210,7 @@ type Schema struct { ExtraProps map[string]any `json:"-"` } -// JSONLookup implements an interface to customize json pointer lookup +// JSONLookup implements an interface to customize json pointer lookup. func (s Schema) JSONLookup(token string) (any, error) { if ex, ok := s.Extensions[token]; ok { return &ex, nil @@ -226,31 +228,31 @@ func (s Schema) JSONLookup(token string) (any, error) { return r, err } -// WithID sets the id for this schema, allows for chaining +// WithID sets the id for this schema, allows for chaining. func (s *Schema) WithID(id string) *Schema { s.ID = id return s } -// WithTitle sets the title for this schema, allows for chaining +// WithTitle sets the title for this schema, allows for chaining. func (s *Schema) WithTitle(title string) *Schema { s.Title = title return s } -// WithDescription sets the description for this schema, allows for chaining +// WithDescription sets the description for this schema, allows for chaining. func (s *Schema) WithDescription(description string) *Schema { s.Description = description return s } -// WithProperties sets the properties for this schema +// WithProperties sets the properties for this schema. func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { s.Properties = schemas return s } -// SetProperty sets a property on this schema +// SetProperty sets a property on this schema. func (s *Schema) SetProperty(name string, schema Schema) *Schema { if s.Properties == nil { s.Properties = make(map[string]Schema) @@ -259,32 +261,32 @@ func (s *Schema) SetProperty(name string, schema Schema) *Schema { return s } -// WithAllOf sets the all of property +// WithAllOf sets the all of property. func (s *Schema) WithAllOf(schemas ...Schema) *Schema { s.AllOf = schemas return s } -// WithMaxProperties sets the max number of properties an object can have +// WithMaxProperties sets the max number of properties an object can have. func (s *Schema) WithMaxProperties(maximum int64) *Schema { s.MaxProperties = &maximum return s } -// WithMinProperties sets the min number of properties an object must have +// WithMinProperties sets the min number of properties an object must have. func (s *Schema) WithMinProperties(minimum int64) *Schema { s.MinProperties = &minimum return s } -// Typed sets the type of this schema for a single value item +// Typed sets the type of this schema for a single value item. func (s *Schema) Typed(tpe, format string) *Schema { s.Type = []string{tpe} s.Format = format return s } -// AddType adds a type with potential format to the types for this schema +// AddType adds a type with potential format to the types for this schema. func (s *Schema) AddType(tpe, format string) *Schema { s.Type = append(s.Type, tpe) if format != "" { @@ -299,124 +301,124 @@ func (s *Schema) AsNullable() *Schema { return s } -// CollectionOf a fluent builder method for an array parameter +// CollectionOf a fluent builder method for an array parameter. func (s *Schema) CollectionOf(items Schema) *Schema { s.Type = []string{jsonArray} s.Items = &SchemaOrArray{Schema: &items} return s } -// WithDefault sets the default value on this parameter +// WithDefault sets the default value on this parameter. func (s *Schema) WithDefault(defaultValue any) *Schema { s.Default = defaultValue return s } -// WithRequired flags this parameter as required +// WithRequired flags this parameter as required. func (s *Schema) WithRequired(items ...string) *Schema { s.Required = items return s } -// AddRequired adds field names to the required properties array +// AddRequired adds field names to the required properties array. func (s *Schema) AddRequired(items ...string) *Schema { s.Required = append(s.Required, items...) return s } -// WithMaxLength sets a max length value +// WithMaxLength sets a max length value. func (s *Schema) WithMaxLength(maximum int64) *Schema { s.MaxLength = &maximum return s } -// WithMinLength sets a min length value +// WithMinLength sets a min length value. func (s *Schema) WithMinLength(minimum int64) *Schema { s.MinLength = &minimum return s } -// WithPattern sets a pattern value +// WithPattern sets a pattern value. func (s *Schema) WithPattern(pattern string) *Schema { s.Pattern = pattern return s } -// WithMultipleOf sets a multiple of value +// WithMultipleOf sets a multiple of value. func (s *Schema) WithMultipleOf(number float64) *Schema { s.MultipleOf = &number return s } -// WithMaximum sets a maximum number value +// WithMaximum sets a maximum number value. func (s *Schema) WithMaximum(maximum float64, exclusive bool) *Schema { s.Maximum = &maximum s.ExclusiveMaximum = exclusive return s } -// WithMinimum sets a minimum number value +// WithMinimum sets a minimum number value. func (s *Schema) WithMinimum(minimum float64, exclusive bool) *Schema { s.Minimum = &minimum s.ExclusiveMinimum = exclusive return s } -// WithEnum sets a the enum values (replace) +// WithEnum sets a the enum values (replace). func (s *Schema) WithEnum(values ...any) *Schema { s.Enum = append([]any{}, values...) return s } -// WithMaxItems sets the max items +// WithMaxItems sets the max items. func (s *Schema) WithMaxItems(size int64) *Schema { s.MaxItems = &size return s } -// WithMinItems sets the min items +// WithMinItems sets the min items. func (s *Schema) WithMinItems(size int64) *Schema { s.MinItems = &size return s } -// UniqueValues dictates that this array can only have unique items +// UniqueValues dictates that this array can only have unique items. func (s *Schema) UniqueValues() *Schema { s.UniqueItems = true return s } -// AllowDuplicates this array can have duplicates +// AllowDuplicates this array can have duplicates. func (s *Schema) AllowDuplicates() *Schema { s.UniqueItems = false return s } -// AddToAllOf adds a schema to the allOf property +// AddToAllOf adds a schema to the allOf property. func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { s.AllOf = append(s.AllOf, schemas...) return s } -// WithDiscriminator sets the name of the discriminator field +// WithDiscriminator sets the name of the discriminator field. func (s *Schema) WithDiscriminator(discriminator string) *Schema { s.Discriminator = discriminator return s } -// AsReadOnly flags this schema as readonly +// AsReadOnly flags this schema as readonly. func (s *Schema) AsReadOnly() *Schema { s.ReadOnly = true return s } -// AsWritable flags this schema as writeable (not read-only) +// AsWritable flags this schema as writeable (not read-only). func (s *Schema) AsWritable() *Schema { s.ReadOnly = false return s } -// WithExample sets the example for this schema +// WithExample sets the example for this schema. func (s *Schema) WithExample(example any) *Schema { s.Example = example return s @@ -440,7 +442,7 @@ func (s *Schema) WithExternalDocs(description, url string) *Schema { return s } -// WithXMLName sets the xml name for the object +// WithXMLName sets the xml name for the object. func (s *Schema) WithXMLName(name string) *Schema { if s.XML == nil { s.XML = new(XMLObject) @@ -449,7 +451,7 @@ func (s *Schema) WithXMLName(name string) *Schema { return s } -// WithXMLNamespace sets the xml namespace for the object +// WithXMLNamespace sets the xml namespace for the object. func (s *Schema) WithXMLNamespace(namespace string) *Schema { if s.XML == nil { s.XML = new(XMLObject) @@ -458,7 +460,7 @@ func (s *Schema) WithXMLNamespace(namespace string) *Schema { return s } -// WithXMLPrefix sets the xml prefix for the object +// WithXMLPrefix sets the xml prefix for the object. func (s *Schema) WithXMLPrefix(prefix string) *Schema { if s.XML == nil { s.XML = new(XMLObject) @@ -467,7 +469,7 @@ func (s *Schema) WithXMLPrefix(prefix string) *Schema { return s } -// AsXMLAttribute flags this object as xml attribute +// AsXMLAttribute flags this object as xml attribute. func (s *Schema) AsXMLAttribute() *Schema { if s.XML == nil { s.XML = new(XMLObject) @@ -476,7 +478,7 @@ func (s *Schema) AsXMLAttribute() *Schema { return s } -// AsXMLElement flags this object as an xml node +// AsXMLElement flags this object as an xml node. func (s *Schema) AsXMLElement() *Schema { if s.XML == nil { s.XML = new(XMLObject) @@ -485,7 +487,7 @@ func (s *Schema) AsXMLElement() *Schema { return s } -// AsWrappedXML flags this object as wrapped, this is mostly useful for array types +// AsWrappedXML flags this object as wrapped, this is mostly useful for array types. func (s *Schema) AsWrappedXML() *Schema { if s.XML == nil { s.XML = new(XMLObject) @@ -494,7 +496,7 @@ func (s *Schema) AsWrappedXML() *Schema { return s } -// AsUnwrappedXML flags this object as an xml node +// AsUnwrappedXML flags this object as an xml node. func (s *Schema) AsUnwrappedXML() *Schema { if s.XML == nil { s.XML = new(XMLObject) @@ -524,13 +526,13 @@ func (s *Schema) SetValidations(val SchemaValidations) { s.PatternProperties = val.PatternProperties } -// WithValidations is a fluent method to set schema validations +// WithValidations is a fluent method to set schema validations. func (s *Schema) WithValidations(val SchemaValidations) *Schema { s.SetValidations(val) return s } -// Validations returns a clone of the validations for this schema +// Validations returns a clone of the validations for this schema. func (s Schema) Validations() SchemaValidations { return SchemaValidations{ CommonValidations: CommonValidations{ @@ -553,40 +555,40 @@ func (s Schema) Validations() SchemaValidations { } } -// MarshalJSON marshal this to JSON +// MarshalJSON marshal this to JSON. func (s Schema) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(s.SchemaProps) if err != nil { - return nil, fmt.Errorf("schema props %v: %w", err, ErrSpec) + return nil, fmt.Errorf("schema props %w: %w", err, ErrSpec) } b2, err := json.Marshal(s.VendorExtensible) if err != nil { - return nil, fmt.Errorf("vendor props %v: %w", err, ErrSpec) + return nil, fmt.Errorf("vendor props %w: %w", err, ErrSpec) } b3, err := s.Ref.MarshalJSON() if err != nil { - return nil, fmt.Errorf("ref prop %v: %w", err, ErrSpec) + return nil, fmt.Errorf("ref prop %w: %w", err, ErrSpec) } b4, err := s.Schema.MarshalJSON() if err != nil { - return nil, fmt.Errorf("schema prop %v: %w", err, ErrSpec) + return nil, fmt.Errorf("schema prop %w: %w", err, ErrSpec) } b5, err := json.Marshal(s.SwaggerSchemaProps) if err != nil { - return nil, fmt.Errorf("common validations %v: %w", err, ErrSpec) + return nil, fmt.Errorf("common validations %w: %w", err, ErrSpec) } var b6 []byte if s.ExtraProps != nil { jj, err := json.Marshal(s.ExtraProps) if err != nil { - return nil, fmt.Errorf("extra props %v: %w", err, ErrSpec) + return nil, fmt.Errorf("extra props %w: %w", err, ErrSpec) } b6 = jj } return jsonutils.ConcatJSON(b1, b2, b3, b4, b5, b6), nil } -// UnmarshalJSON marshal this from JSON +// UnmarshalJSON marshal this from JSON. func (s *Schema) UnmarshalJSON(data []byte) error { props := struct { SchemaProps diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go index 8d4a985325..0894c932c6 100644 --- a/vendor/github.com/go-openapi/spec/schema_loader.go +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -24,7 +24,7 @@ import ( // NOTE: if you are using the go-openapi/loads package, it will override // this value with its own default (a loader to retrieve YAML documents as // well as JSON ones). -var PathLoader = func(pth string) (json.RawMessage, error) { +var PathLoader = func(pth string) (json.RawMessage, error) { //nolint:gochecknoglobals // package-level default loader, overridable by go-openapi/loads data, err := loading.LoadFromFileOrHTTP(pth) if err != nil { return nil, err @@ -76,7 +76,7 @@ type schemaLoader struct { // // If the schema the ref is referring to holds nested refs, Resolve doesn't resolve them. // -// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct. func (r *schemaLoader) Resolve(ref *Ref, target any, basePath string) error { return r.resolveRef(ref, target, basePath) } @@ -136,7 +136,7 @@ func (r *schemaLoader) resolveRef(ref *Ref, target any, basePath string) error { root := r.root if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { if baseRef, erb := NewRef(basePath); erb == nil { - root, _, _, _ = r.load(baseRef.GetURL()) + root, _ = r.load(baseRef.GetURL()) } } @@ -144,7 +144,7 @@ func (r *schemaLoader) resolveRef(ref *Ref, target any, basePath string) error { data = root } else { baseRef := normalizeRef(ref, basePath) - data, _, _, err = r.load(baseRef.GetURL()) + data, err = r.load(baseRef.GetURL()) if err != nil { return err } @@ -160,33 +160,32 @@ func (r *schemaLoader) resolveRef(ref *Ref, target any, basePath string) error { return jsonutils.FromDynamicJSON(res, target) } -func (r *schemaLoader) load(refURL *url.URL) (any, url.URL, bool, error) { +func (r *schemaLoader) load(refURL *url.URL) (any, error) { debugLog("loading schema from url: %s", refURL) toFetch := *refURL toFetch.Fragment = "" - var err error pth := toFetch.String() normalized := normalizeBase(pth) debugLog("loading doc from: %s", normalized) data, fromCache := r.cache.Get(normalized) if fromCache { - return data, toFetch, fromCache, nil + return data, nil } b, err := r.context.loadDoc(normalized) if err != nil { - return nil, url.URL{}, false, err + return nil, err } var doc any if err := json.Unmarshal(b, &doc); err != nil { - return nil, url.URL{}, false, err + return nil, err } r.cache.Set(normalized, doc) - return doc, toFetch, fromCache, nil + return doc, nil } // isCircular detects cycles in sequences of $ref. @@ -293,8 +292,8 @@ func defaultSchemaLoader( root any, expandOptions *ExpandOptions, cache ResolutionCache, - context *resolverContext) *schemaLoader { - + context *resolverContext, +) *schemaLoader { if expandOptions == nil { expandOptions = &ExpandOptions{} } diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go index 46a4a7e2f9..6d9019e749 100644 --- a/vendor/github.com/go-openapi/spec/security_scheme.go +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -20,17 +20,17 @@ const ( accessCode = "accessCode" ) -// BasicAuth creates a basic auth security scheme +// BasicAuth creates a basic auth security scheme. func BasicAuth() *SecurityScheme { return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} } -// APIKeyAuth creates an api key auth security scheme +// APIKeyAuth creates an api key auth security scheme. func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} } -// OAuth2Implicit creates an implicit flow oauth2 security scheme +// OAuth2Implicit creates an implicit flow oauth2 security scheme. func OAuth2Implicit(authorizationURL string) *SecurityScheme { return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ Type: oauth2, @@ -39,7 +39,7 @@ func OAuth2Implicit(authorizationURL string) *SecurityScheme { }} } -// OAuth2Password creates a password flow oauth2 security scheme +// OAuth2Password creates a password flow oauth2 security scheme. func OAuth2Password(tokenURL string) *SecurityScheme { return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ Type: oauth2, @@ -48,7 +48,7 @@ func OAuth2Password(tokenURL string) *SecurityScheme { }} } -// OAuth2Application creates an application flow oauth2 security scheme +// OAuth2Application creates an application flow oauth2 security scheme. func OAuth2Application(tokenURL string) *SecurityScheme { return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ Type: oauth2, @@ -57,7 +57,7 @@ func OAuth2Application(tokenURL string) *SecurityScheme { }} } -// OAuth2AccessToken creates an access token flow oauth2 security scheme +// OAuth2AccessToken creates an access token flow oauth2 security scheme. func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ Type: oauth2, @@ -67,7 +67,7 @@ func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { }} } -// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section. type SecuritySchemeProps struct { Description string `json:"description,omitempty"` Type string `json:"type"` @@ -79,7 +79,7 @@ type SecuritySchemeProps struct { Scopes map[string]string `json:"scopes,omitempty"` // oauth2 } -// AddScope adds a scope to this security scheme +// AddScope adds a scope to this security scheme. func (s *SecuritySchemeProps) AddScope(scope, description string) { if s.Scopes == nil { s.Scopes = make(map[string]string) @@ -97,7 +97,7 @@ type SecurityScheme struct { SecuritySchemeProps } -// JSONLookup implements an interface to customize json pointer lookup +// JSONLookup implements an interface to customize json pointer lookup. func (s SecurityScheme) JSONLookup(token string) (any, error) { if ex, ok := s.Extensions[token]; ok { return &ex, nil @@ -107,7 +107,7 @@ func (s SecurityScheme) JSONLookup(token string) (any, error) { return r, err } -// MarshalJSON marshal this to JSON +// MarshalJSON marshal this to JSON. func (s SecurityScheme) MarshalJSON() ([]byte, error) { var ( b1 []byte @@ -150,7 +150,7 @@ func (s SecurityScheme) MarshalJSON() ([]byte, error) { return jsonutils.ConcatJSON(b1, b2), nil } -// UnmarshalJSON marshal this from JSON +// UnmarshalJSON marshal this from JSON. func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { return err diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go index 0d0aaabc48..4eba04b2d1 100644 --- a/vendor/github.com/go-openapi/spec/spec.go +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -13,13 +13,13 @@ import ( //go:generate perl -pi -e s,Json,JSON,g bindata.go const ( - // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs + // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs. SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" - // JSONSchemaURL the url for the json schema + // JSONSchemaURL the url for the json schema. JSONSchemaURL = "http://json-schema.org/draft-04/schema#" ) -// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error +// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error. func MustLoadJSONSchemaDraft04() *Schema { d, e := JSONSchemaDraft04() if e != nil { @@ -28,7 +28,7 @@ func MustLoadJSONSchemaDraft04() *Schema { return d } -// JSONSchemaDraft04 loads the json schema document for json schema draft04 +// JSONSchemaDraft04 loads the json schema document for json schema draft04. func JSONSchemaDraft04() (*Schema, error) { b, err := jsonschemaDraft04JSONBytes() if err != nil { @@ -42,7 +42,7 @@ func JSONSchemaDraft04() (*Schema, error) { return schema, nil } -// MustLoadSwagger20Schema panics when Swagger20Schema returns an error +// MustLoadSwagger20Schema panics when Swagger20Schema returns an error. func MustLoadSwagger20Schema() *Schema { d, e := Swagger20Schema() if e != nil { @@ -51,9 +51,8 @@ func MustLoadSwagger20Schema() *Schema { return d } -// Swagger20Schema loads the swagger 2.0 schema from the embedded assets +// Swagger20Schema loads the swagger 2.0 schema from the embedded assets. func Swagger20Schema() (*Schema, error) { - b, err := v2SchemaJSONBytes() if err != nil { return nil, err diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go index f7cd0f608c..dbe32db8a3 100644 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -25,7 +25,7 @@ type Swagger struct { SwaggerProps } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (s Swagger) JSONLookup(token string) (any, error) { if ex, ok := s.Extensions[token]; ok { return &ex, nil @@ -34,7 +34,7 @@ func (s Swagger) JSONLookup(token string) (any, error) { return r, err } -// MarshalJSON marshals this swagger structure to json +// MarshalJSON marshals this swagger structure to json. func (s Swagger) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(s.SwaggerProps) if err != nil { @@ -47,7 +47,7 @@ func (s Swagger) MarshalJSON() ([]byte, error) { return jsonutils.ConcatJSON(b1, b2), nil } -// UnmarshalJSON unmarshals a swagger spec from json +// UnmarshalJSON unmarshals a swagger spec from json. func (s *Swagger) UnmarshalJSON(data []byte) error { var sw Swagger if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { @@ -60,7 +60,7 @@ func (s *Swagger) UnmarshalJSON(data []byte) error { return nil } -// GobEncode provides a safe gob encoder for Swagger, including extensions +// GobEncode provides a safe gob encoder for Swagger, including extensions. func (s Swagger) GobEncode() ([]byte, error) { var b bytes.Buffer raw := struct { @@ -74,7 +74,7 @@ func (s Swagger) GobEncode() ([]byte, error) { return b.Bytes(), err } -// GobDecode provides a safe gob decoder for Swagger, including extensions +// GobDecode provides a safe gob decoder for Swagger, including extensions. func (s *Swagger) GobDecode(b []byte) error { var raw struct { Props SwaggerProps @@ -95,7 +95,7 @@ func (s *Swagger) GobDecode(b []byte) error { // NOTE: validation rules // - the scheme, when present must be from [http, https, ws, wss] // - BasePath must start with a leading "/" -// - Paths is required +// - Paths is required. type SwaggerProps struct { ID string `json:"id,omitempty"` Consumes []string `json:"consumes,omitempty"` @@ -126,7 +126,7 @@ type gobSwaggerPropsAlias struct { SecurityIsEmpty bool } -// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements +// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements. func (o SwaggerProps) GobEncode() ([]byte, error) { raw := gobSwaggerPropsAlias{ Alias: (*swaggerPropsAlias)(&o), @@ -171,7 +171,7 @@ func (o SwaggerProps) GobEncode() ([]byte, error) { return b.Bytes(), err } -// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements +// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements. func (o *SwaggerProps) GobDecode(b []byte) error { var raw gobSwaggerPropsAlias @@ -207,16 +207,16 @@ func (o *SwaggerProps) GobDecode(b []byte) error { return nil } -// Dependencies represent a dependencies property +// Dependencies represent a dependencies property. type Dependencies map[string]SchemaOrStringArray -// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property. type SchemaOrBool struct { Allows bool Schema *Schema } -// JSONLookup implements an interface to customize json pointer lookup +// JSONLookup implements an interface to customize json pointer lookup. func (s SchemaOrBool) JSONLookup(token string) (any, error) { if token == "allows" { return s.Allows, nil @@ -225,10 +225,12 @@ func (s SchemaOrBool) JSONLookup(token string) (any, error) { return r, err } -var jsTrue = []byte("true") -var jsFalse = []byte("false") +var ( + jsTrue = []byte("true") //nolint:gochecknoglobals // constant-like byte slices for JSON marshaling + jsFalse = []byte("false") //nolint:gochecknoglobals // constant-like byte slices for JSON marshaling +) -// MarshalJSON convert this object to JSON +// MarshalJSON convert this object to JSON. func (s SchemaOrBool) MarshalJSON() ([]byte, error) { if s.Schema != nil { return json.Marshal(s.Schema) @@ -240,7 +242,7 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) { return jsTrue, nil } -// UnmarshalJSON converts this bool or schema object from a JSON structure +// UnmarshalJSON converts this bool or schema object from a JSON structure. func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { var nw SchemaOrBool if len(data) > 0 { @@ -257,19 +259,19 @@ func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { return nil } -// SchemaOrStringArray represents a schema or a string array +// SchemaOrStringArray represents a schema or a string array. type SchemaOrStringArray struct { Schema *Schema Property []string } -// JSONLookup implements an interface to customize json pointer lookup +// JSONLookup implements an interface to customize json pointer lookup. func (s SchemaOrStringArray) JSONLookup(token string) (any, error) { r, _, err := jsonpointer.GetForToken(s.Schema, token) return r, err } -// MarshalJSON converts this schema object or array into JSON structure +// MarshalJSON converts this schema object or array into JSON structure. func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { if len(s.Property) > 0 { return json.Marshal(s.Property) @@ -280,7 +282,7 @@ func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { return []byte("null"), nil } -// UnmarshalJSON converts this schema object or array from a JSON structure +// UnmarshalJSON converts this schema object or array from a JSON structure. func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { var first byte if len(data) > 1 { @@ -318,15 +320,15 @@ type Definitions map[string]Schema type SecurityDefinitions map[string]*SecurityScheme // StringOrArray represents a value that can either be a string -// or an array of strings. Mainly here for serialization purposes +// or an array of strings. Mainly here for serialization purposes. type StringOrArray []string -// Contains returns true when the value is contained in the slice +// Contains returns true when the value is contained in the slice. func (s StringOrArray) Contains(value string) bool { return slices.Contains(s, value) } -// JSONLookup implements an interface to customize json pointer lookup +// JSONLookup implements an interface to customize json pointer lookup. func (s SchemaOrArray) JSONLookup(token string) (any, error) { if _, err := strconv.Atoi(token); err == nil { r, _, err := jsonpointer.GetForToken(s.Schemas, token) @@ -336,7 +338,7 @@ func (s SchemaOrArray) JSONLookup(token string) (any, error) { return r, err } -// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string. func (s *StringOrArray) UnmarshalJSON(data []byte) error { var first byte if len(data) > 1 { @@ -368,7 +370,7 @@ func (s *StringOrArray) UnmarshalJSON(data []byte) error { } } -// MarshalJSON converts this string or array to a JSON array or JSON string +// MarshalJSON converts this string or array to a JSON array or JSON string. func (s StringOrArray) MarshalJSON() ([]byte, error) { if len(s) == 1 { return json.Marshal([]string(s)[0]) @@ -377,13 +379,13 @@ func (s StringOrArray) MarshalJSON() ([]byte, error) { } // SchemaOrArray represents a value that can either be a Schema -// or an array of Schema. Mainly here for serialization purposes +// or an array of Schema. Mainly here for serialization purposes. type SchemaOrArray struct { Schema *Schema Schemas []Schema } -// Len returns the number of schemas in this property +// Len returns the number of schemas in this property. func (s SchemaOrArray) Len() int { if s.Schema != nil { return 1 @@ -391,7 +393,7 @@ func (s SchemaOrArray) Len() int { return len(s.Schemas) } -// ContainsType returns true when one of the schemas is of the specified type +// ContainsType returns true when one of the schemas is of the specified type. func (s *SchemaOrArray) ContainsType(name string) bool { if s.Schema != nil { return s.Schema.Type != nil && s.Schema.Type.Contains(name) @@ -399,7 +401,7 @@ func (s *SchemaOrArray) ContainsType(name string) bool { return false } -// MarshalJSON converts this schema object or array into JSON structure +// MarshalJSON converts this schema object or array into JSON structure. func (s SchemaOrArray) MarshalJSON() ([]byte, error) { if len(s.Schemas) > 0 { return json.Marshal(s.Schemas) @@ -407,7 +409,7 @@ func (s SchemaOrArray) MarshalJSON() ([]byte, error) { return json.Marshal(s.Schema) } -// UnmarshalJSON converts this schema object or array from a JSON structure +// UnmarshalJSON converts this schema object or array from a JSON structure. func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { var nw SchemaOrArray var first byte diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go index ae98fd985f..af3fb0a4e8 100644 --- a/vendor/github.com/go-openapi/spec/tag.go +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -10,7 +10,7 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// TagProps describe a tag entry in the top level tags section of a swagger spec +// TagProps describe a tag entry in the top level tags section of a swagger spec. type TagProps struct { Description string `json:"description,omitempty"` Name string `json:"name,omitempty"` @@ -27,12 +27,12 @@ type Tag struct { TagProps } -// NewTag creates a new tag +// NewTag creates a new tag. func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} } -// JSONLookup implements an interface to customize json pointer lookup +// JSONLookup implements an interface to customize json pointer lookup. func (t Tag) JSONLookup(token string) (any, error) { if ex, ok := t.Extensions[token]; ok { return &ex, nil @@ -42,7 +42,7 @@ func (t Tag) JSONLookup(token string) (any, error) { return r, err } -// MarshalJSON marshal this to JSON +// MarshalJSON marshal this to JSON. func (t Tag) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(t.TagProps) if err != nil { @@ -55,7 +55,7 @@ func (t Tag) MarshalJSON() ([]byte, error) { return jsonutils.ConcatJSON(b1, b2), nil } -// UnmarshalJSON marshal this from JSON +// UnmarshalJSON marshal this from JSON. func (t *Tag) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &t.TagProps); err != nil { return err diff --git a/vendor/github.com/go-openapi/spec/validations.go b/vendor/github.com/go-openapi/spec/validations.go index 2c0dc42479..a82c2ffe13 100644 --- a/vendor/github.com/go-openapi/spec/validations.go +++ b/vendor/github.com/go-openapi/spec/validations.go @@ -3,7 +3,7 @@ package spec -// CommonValidations describe common JSON-schema validations +// CommonValidations describe common JSON-schema validations. type CommonValidations struct { Maximum *float64 `json:"maximum,omitempty"` ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` @@ -143,22 +143,22 @@ func (v CommonValidations) Validations() SchemaValidations { } } -// HasNumberValidations indicates if the validations are for numbers or integers +// HasNumberValidations indicates if the validations are for numbers or integers. func (v CommonValidations) HasNumberValidations() bool { return v.Maximum != nil || v.Minimum != nil || v.MultipleOf != nil } -// HasStringValidations indicates if the validations are for strings +// HasStringValidations indicates if the validations are for strings. func (v CommonValidations) HasStringValidations() bool { return v.MaxLength != nil || v.MinLength != nil || v.Pattern != "" } -// HasArrayValidations indicates if the validations are for arrays +// HasArrayValidations indicates if the validations are for arrays. func (v CommonValidations) HasArrayValidations() bool { return v.MaxItems != nil || v.MinItems != nil || v.UniqueItems } -// HasEnum indicates if the validation includes some enum constraint +// HasEnum indicates if the validation includes some enum constraint. func (v CommonValidations) HasEnum() bool { return len(v.Enum) > 0 } @@ -175,12 +175,12 @@ type SchemaValidations struct { MinProperties *int64 `json:"minProperties,omitempty"` } -// HasObjectValidations indicates if the validations are for objects +// HasObjectValidations indicates if the validations are for objects. func (v SchemaValidations) HasObjectValidations() bool { return v.MaxProperties != nil || v.MinProperties != nil || v.PatternProperties != nil } -// SetValidations for schema validations +// SetValidations for schema validations. func (v *SchemaValidations) SetValidations(val SchemaValidations) { v.CommonValidations.SetValidations(val) v.PatternProperties = val.PatternProperties @@ -188,7 +188,7 @@ func (v *SchemaValidations) SetValidations(val SchemaValidations) { v.MinProperties = val.MinProperties } -// Validations for a schema +// Validations for a schema. func (v SchemaValidations) Validations() SchemaValidations { val := v.CommonValidations.Validations() val.PatternProperties = v.PatternProperties diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go index bf2f8f18b2..07f7ef8ccd 100644 --- a/vendor/github.com/go-openapi/spec/xml_object.go +++ b/vendor/github.com/go-openapi/spec/xml_object.go @@ -14,43 +14,43 @@ type XMLObject struct { Wrapped bool `json:"wrapped,omitempty"` } -// WithName sets the xml name for the object +// WithName sets the xml name for the object. func (x *XMLObject) WithName(name string) *XMLObject { x.Name = name return x } -// WithNamespace sets the xml namespace for the object +// WithNamespace sets the xml namespace for the object. func (x *XMLObject) WithNamespace(namespace string) *XMLObject { x.Namespace = namespace return x } -// WithPrefix sets the xml prefix for the object +// WithPrefix sets the xml prefix for the object. func (x *XMLObject) WithPrefix(prefix string) *XMLObject { x.Prefix = prefix return x } -// AsAttribute flags this object as xml attribute +// AsAttribute flags this object as xml attribute. func (x *XMLObject) AsAttribute() *XMLObject { x.Attribute = true return x } -// AsElement flags this object as an xml node +// AsElement flags this object as an xml node. func (x *XMLObject) AsElement() *XMLObject { x.Attribute = false return x } -// AsWrapped flags this object as wrapped, this is mostly useful for array types +// AsWrapped flags this object as wrapped, this is mostly useful for array types. func (x *XMLObject) AsWrapped() *XMLObject { x.Wrapped = true return x } -// AsUnwrapped flags this object as an xml node +// AsUnwrapped flags this object as an xml node. func (x *XMLObject) AsUnwrapped() *XMLObject { x.Wrapped = false return x diff --git a/vendor/github.com/go-openapi/strfmt/.codecov.yml b/vendor/github.com/go-openapi/strfmt/.codecov.yml new file mode 100644 index 0000000000..a5ba8e96d8 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.codecov.yml @@ -0,0 +1,9 @@ +codecov: + notify: + after_n_builds: 2 + +coverage: + status: + patch: + default: + target: 80% diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore index dd91ed6a04..885dc27ab0 100644 --- a/vendor/github.com/go-openapi/strfmt/.gitignore +++ b/vendor/github.com/go-openapi/strfmt/.gitignore @@ -1,2 +1,6 @@ -secrets.yml -coverage.out +*.out +*.cov +.idea +.env +.mcp.json +.claude/ diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml index 1ad5adf47e..3c4cd489a1 100644 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -2,25 +2,11 @@ version: "2" linters: default: all disable: - - cyclop - depguard - - errchkjson - - errorlint - - exhaustruct - - forcetypeassert - funlen - - gochecknoglobals - - gochecknoinits - - gocognit - - godot + - gomoddirectives - godox - - gosmopolitan - - inamedparam - - intrange - - ireturn - - lll - - musttag - - nestif + - exhaustruct - nlreturn - nonamedreturns - noinlineerr @@ -29,7 +15,6 @@ linters: - testpackage - thelper - tparallel - - unparam - varnamelen - whitespace - wrapcheck @@ -41,8 +26,15 @@ linters: goconst: min-len: 2 min-occurrences: 3 + cyclop: + max-complexity: 20 gocyclo: - min-complexity: 45 + min-complexity: 20 + exhaustive: + default-signifies-exhaustive: true + default-case-required: true + lll: + line-length: 180 exclusions: generated: lax presets: @@ -58,6 +50,7 @@ formatters: enable: - gofmt - goimports + - gofumpt exclusions: generated: lax paths: diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md index 9322b065e3..bac878f216 100644 --- a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/CONTRIBUTORS.md b/vendor/github.com/go-openapi/strfmt/CONTRIBUTORS.md new file mode 100644 index 0000000000..e49700d4d2 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/CONTRIBUTORS.md @@ -0,0 +1,52 @@ +# Contributors + +- Repository: ['go-openapi/strfmt'] + +| Total Contributors | Total Contributions | +| --- | --- | +| 40 | 225 | + +| Username | All Time Contribution Count | All Commits | +| --- | --- | --- | +| @casualjim | 88 | | +| @fredbi | 57 | | +| @youyuanwu | 13 | | +| @jlambatl | 9 | | +| @GlenDC | 5 | | +| @padamstx | 4 | | +| @dimovnike | 3 | | +| @carlv-stripe | 3 | | +| @Copilot | 3 | | +| @keramix | 3 | | +| @gregmarr | 2 | | +| @vadorovsky | 2 | | +| @Ompluscator | 2 | | +| @johnnyg | 2 | | +| @chakrit | 2 | | +| @bg451 | 2 | | +| @aleksandr-vin | 2 | | +| @ujjwalsh | 1 | | +| @kenjones-cisco | 1 | | +| @jwalter1-quest | 1 | | +| @ccoVeille | 1 | | +| @tylerb | 1 | | +| @tzneal | 1 | | +| @tklauser | 1 | | +| @SuperQ | 1 | | +| @srizzling | 1 | | +| @shawnps | 1 | | +| @prashantv | 1 | | +| @krnkl | 1 | | +| @mstoykov | 1 | | +| @maxatome | 1 | | +| @jerome-laforge | 1 | | +| @justincormack | 1 | | +| @elipavlov | 1 | | +| @gbjk | 1 | | +| @enesanbar | 1 | | +| @CodeLingoBot | 1 | | +| @Kunde21 | 1 | | +| @bvwells | 1 | | +| @ligustah | 1 | | + + _this file was generated by the [Contributors GitHub Action](https://github.com/github-community-projects/contributors)_ diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md index de5afe1376..a0cf642754 100644 --- a/vendor/github.com/go-openapi/strfmt/README.md +++ b/vendor/github.com/go-openapi/strfmt/README.md @@ -1,15 +1,61 @@ -# Strfmt [![Build Status](https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) +# strfmt + + +[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url] + + + +[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] + + +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] + +--- + +Golang support for string formats defined by JSON Schema and OpenAPI. + +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + +* **2026-03-07** : v0.26.0 **dropped dependency to the mongodb driver** + * mongodb users can still use this package without any change + * however, we have frozen the back-compatible support for mongodb driver at v2.5.0 + * users who want to keep-up with future evolutions (possibly incompatible) of this driver + can do so by adding a blank import in their program: `import _ "github.com/go-openapi/strfmt/enable/mongodb"`. + This will switch the behavior to the actual driver, which remains regularly updated as an independent module. + +## Status + +API is stable. + +## Import this library in your project + +```cmd +go get github.com/go-openapi/strfmt +``` + +## Contents This package exposes a registry of data types to support string formats in the go-openapi toolkit. -strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. +`strfmt` represents a well known string format such as hostname or email. + +This package provides a few extra formats such as credit card (US), color, etc. + +Format types can serialize and deserialize JSON or from a SQL database. + +BSON is also supported (MongoDB). + +### Supported formats -## Supported data formats -go-openapi/strfmt follows the swagger 2.0 specification with the following formats +`go-openapi/strfmt` follows the swagger 2.0 specification with the following formats defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). It also provides convenient extensions to go-openapi users. @@ -43,7 +89,7 @@ It also provides convenient extensions to go-openapi users. > It does not provide validation for numerical values with swagger format extension for JSON types "number" or > "integer" (e.g. float, double, int32...). -## Type conversion +### Type conversion All types defined here are stringers and may be converted to strings with `.String()`. Note that most types defined by this package may be converted directly to string like `string(Email{})`. @@ -51,13 +97,14 @@ Note that most types defined by this package may be converted directly to string `Date` and `DateTime` may be converted directly to `time.Time` like `time.Time(Time{})`. Similarly, you can convert `Duration` to `time.Duration` as in `time.Duration(Duration{})` -## Using pointers +### Using pointers The `conv` subpackage provides helpers to convert the types to and from pointers, just like `go-openapi/swag` does with primitive types. -## Format types -Types defined in strfmt expose marshaling and validation capabilities. +### Format types + +Types defined in `strfmt` expose marshaling and validation capabilities. List of defined types: - Base64 @@ -87,6 +134,97 @@ List of defined types: - [UUID7](https://www.rfc-editor.org/rfc/rfc9562.html#name-uuid-version-7) - [ULID](https://github.com/ulid/spec) +### Database support + +All format types implement the `database/sql` interfaces `sql.Scanner` and `driver.Valuer`, +so they work out of the box with Go's standard `database/sql` package and any SQL driver. + +All format types also implement BSON marshaling/unmarshaling for use with MongoDB. +By default, a built-in minimal codec is used (compatible with mongo-driver v2.5.0). +For full driver support, add `import _ "github.com/go-openapi/strfmt/enable/mongodb"`. + +> **MySQL / MariaDB caveat for `DateTime`:** +> The `go-sql-driver/mysql` driver has hard-coded handling for `time.Time` but does not +> intercept type redefinitions like `strfmt.DateTime`. As a result, `DateTime.Value()` sends +> an RFC 3339 string (e.g. `"2024-06-15T12:30:45.123Z"`) that MySQL/MariaDB rejects for +> `DATETIME` columns. +> +> Workaround: set `strfmt.MarshalFormat` to a MySQL-compatible format such as +> `strfmt.ISO8601LocalTime` and normalize to UTC before marshaling: +> +> ```go +> strfmt.MarshalFormat = strfmt.ISO8601LocalTime +> strfmt.NormalizeTimeForMarshal = func(t time.Time) time.Time { return t.UTC() } +> ``` +> +> See [#174](https://github.com/go-openapi/strfmt/issues/174) for details. + +Integration tests for MongoDB, MariaDB, and PostgreSQL run in CI to verify database roundtrip +compatibility for all format types. See [`internal/testintegration/`](internal/testintegration/). + +## Change log + +See + +## References + + + ## Licensing This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +## Other documentation + +* [All-time contributors](./CONTRIBUTORS.md) +* [Contributing guidelines](.github/CONTRIBUTING.md) +* [Maintainers documentation](docs/MAINTAINERS.md) +* [Code style](docs/STYLE.md) + +## Cutting a new release + +Maintainers can cut a new release by either: + +* running [this workflow](https://github.com/go-openapi/strfmt/actions/workflows/bump-release.yml) +* or pushing a semver tag + * signed tags are preferred + * The tag message is prepended to release notes + + +[test-badge]: https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml/badge.svg +[test-url]: https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml +[cov-badge]: https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg +[cov-url]: https://codecov.io/gh/go-openapi/strfmt +[vuln-scan-badge]: https://github.com/go-openapi/strfmt/actions/workflows/scanner.yml/badge.svg +[vuln-scan-url]: https://github.com/go-openapi/strfmt/actions/workflows/scanner.yml +[codeql-badge]: https://github.com/go-openapi/strfmt/actions/workflows/codeql.yml/badge.svg +[codeql-url]: https://github.com/go-openapi/strfmt/actions/workflows/codeql.yml + +[release-badge]: https://badge.fury.io/gh/go-openapi%2Fstrfmt.svg +[release-url]: https://badge.fury.io/gh/go-openapi%2Fstrfmt +[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fstrfmt.svg +[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fstrfmt + +[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/strfmt +[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/strfmt +[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/strfmt +[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/strfmt + +[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F +[doc-url]: https://goswagger.io/go-openapi +[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/strfmt +[godoc-url]: http://pkg.go.dev/github.com/go-openapi/strfmt +[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png +[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM +[slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/FfnFYaC3k5 + + +[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg +[license-url]: https://github.com/go-openapi/strfmt/?tab=Apache-2.0-1-ov-file#readme + +[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/strfmt +[goversion-url]: https://github.com/go-openapi/strfmt/blob/master/go.mod +[top-badge]: https://img.shields.io/github/languages/top/go-openapi/strfmt +[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/strfmt/latest diff --git a/vendor/github.com/go-openapi/strfmt/SECURITY.md b/vendor/github.com/go-openapi/strfmt/SECURITY.md new file mode 100644 index 0000000000..6ceb159ca2 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/SECURITY.md @@ -0,0 +1,37 @@ +# Security Policy + +This policy outlines the commitment and practices of the go-openapi maintainers regarding security. + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. + +## Reporting a vulnerability + +If you become aware of a security vulnerability that affects the current repository, +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. + +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". + +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go index 0eec8f6432..16a83f6408 100644 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -5,62 +5,62 @@ package strfmt import ( "database/sql/driver" + "encoding/hex" + "encoding/json" "fmt" - - bsonprim "go.mongodb.org/mongo-driver/bson/primitive" ) -func init() { +func init() { //nolint:gochecknoinits // registers bsonobjectid format in the default registry var id ObjectId - // register this format in the default registry Default.Add("bsonobjectid", &id, IsBSONObjectID) } -// IsBSONObjectID returns true when the string is a valid BSON.ObjectId +// IsBSONObjectID returns true when the string is a valid BSON [ObjectId]. func IsBSONObjectID(str string) bool { - _, err := bsonprim.ObjectIDFromHex(str) + _, err := objectIDFromHex(str) return err == nil } -// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) +// ObjectId represents a BSON object ID (a 12-byte unique identifier). // -// swagger:strfmt bsonobjectid -type ObjectId bsonprim.ObjectID //nolint:revive +// swagger:strfmt bsonobjectid. +type ObjectId [12]byte //nolint:revive + +// nilObjectID is the zero-value ObjectId. +var nilObjectID ObjectId //nolint:gochecknoglobals // package-level sentinel -// NewObjectId creates a ObjectId from a Hex String +// NewObjectId creates a [ObjectId] from a hexadecimal String. func NewObjectId(hex string) ObjectId { //nolint:revive - oid, err := bsonprim.ObjectIDFromHex(hex) + oid, err := objectIDFromHex(hex) if err != nil { panic(err) } - return ObjectId(oid) + return oid } -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (id ObjectId) MarshalText() ([]byte, error) { - oid := bsonprim.ObjectID(id) - if oid == bsonprim.NilObjectID { + if id == nilObjectID { return nil, nil } - return []byte(oid.Hex()), nil + return []byte(id.Hex()), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on if len(data) == 0 { - *id = ObjectId(bsonprim.NilObjectID) + *id = nilObjectID return nil } - oidstr := string(data) - oid, err := bsonprim.ObjectIDFromHex(oidstr) + oid, err := objectIDFromHex(string(data)) if err != nil { return err } - *id = ObjectId(oid) + *id = oid return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (id *ObjectId) Scan(raw any) error { var data []byte switch v := raw.(type) { @@ -75,27 +75,36 @@ func (id *ObjectId) Scan(raw any) error { return id.UnmarshalText(data) } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (id ObjectId) Value() (driver.Value, error) { - return driver.Value(bsonprim.ObjectID(id).Hex()), nil + return driver.Value(id.Hex()), nil +} + +// Hex returns the hex string representation of the [ObjectId]. +func (id ObjectId) Hex() string { + return hex.EncodeToString(id[:]) } func (id ObjectId) String() string { - return bsonprim.ObjectID(id).Hex() + return id.Hex() } -// MarshalJSON returns the ObjectId as JSON +// MarshalJSON returns the [ObjectId] as JSON. func (id ObjectId) MarshalJSON() ([]byte, error) { - return bsonprim.ObjectID(id).MarshalJSON() + return json.Marshal(id.Hex()) } -// UnmarshalJSON sets the ObjectId from JSON +// UnmarshalJSON sets the [ObjectId] from JSON. func (id *ObjectId) UnmarshalJSON(data []byte) error { - var obj bsonprim.ObjectID - if err := obj.UnmarshalJSON(data); err != nil { + var hexStr string + if err := json.Unmarshal(data, &hexStr); err != nil { + return err + } + oid, err := objectIDFromHex(hexStr) + if err != nil { return err } - *id = ObjectId(obj) + *id = oid return nil } @@ -104,7 +113,7 @@ func (id *ObjectId) DeepCopyInto(out *ObjectId) { *out = *id } -// DeepCopy copies the receiver into a new ObjectId. +// DeepCopy copies the receiver into a new [ObjectId]. func (id *ObjectId) DeepCopy() *ObjectId { if id == nil { return nil @@ -113,3 +122,18 @@ func (id *ObjectId) DeepCopy() *ObjectId { id.DeepCopyInto(out) return out } + +// objectIDFromHex parses a 24-character hex string into an [ObjectId]. +func objectIDFromHex(s string) (ObjectId, error) { + const objectIDHexLen = 24 + if len(s) != objectIDHexLen { + return nilObjectID, fmt.Errorf("the provided hex string %q is not a valid ObjectID: %w", s, ErrFormat) + } + b, err := hex.DecodeString(s) + if err != nil { + return nilObjectID, fmt.Errorf("the provided hex string %q is not a valid ObjectID: %w", s, err) + } + var oid ObjectId + copy(oid[:], b) + return oid, nil +} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go index 8aa17b8ea5..59ee1f1121 100644 --- a/vendor/github.com/go-openapi/strfmt/date.go +++ b/vendor/github.com/go-openapi/strfmt/date.go @@ -10,35 +10,34 @@ import ( "time" ) -func init() { +func init() { //nolint:gochecknoinits // registers date format in the default registry d := Date{} - // register this format in the default registry Default.Add("date", &d, IsDate) } -// IsDate returns true when the string is a valid date +// IsDate returns true when the string is a valid date. func IsDate(str string) bool { _, err := time.Parse(RFC3339FullDate, str) return err == nil } const ( - // RFC3339FullDate represents a full-date as specified by RFC3339 + // RFC3339FullDate represents a full-date as specified by RFC3339. // See: http://goo.gl/xXOvVd RFC3339FullDate = "2006-01-02" ) -// Date represents a date from the API +// Date represents a date from the API. // -// swagger:strfmt date +// swagger:strfmt date. type Date time.Time -// String converts this date into a string +// String converts this date into a string. func (d Date) String() string { return time.Time(d).Format(RFC3339FullDate) } -// UnmarshalText parses a text representation into a date type +// UnmarshalText parses a text representation into a date type. func (d *Date) UnmarshalText(text []byte) error { if len(text) == 0 { return nil @@ -51,7 +50,7 @@ func (d *Date) UnmarshalText(text []byte) error { return nil } -// MarshalText serializes this date type to string +// MarshalText serializes this date type to string. func (d Date) MarshalText() ([]byte, error) { return []byte(d.String()), nil } @@ -79,12 +78,12 @@ func (d Date) Value() (driver.Value, error) { return driver.Value(d.String()), nil } -// MarshalJSON returns the Date as JSON +// MarshalJSON returns the Date as JSON. func (d Date) MarshalJSON() ([]byte, error) { return json.Marshal(time.Time(d).Format(RFC3339FullDate)) } -// UnmarshalJSON sets the Date from JSON +// UnmarshalJSON sets the Date from JSON. func (d *Date) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -126,12 +125,12 @@ func (d *Date) GobDecode(data []byte) error { return d.UnmarshalBinary(data) } -// MarshalBinary implements the encoding.BinaryMarshaler interface. +// MarshalBinary implements the encoding.[encoding.BinaryMarshaler] interface. func (d Date) MarshalBinary() ([]byte, error) { return time.Time(d).MarshalBinary() } -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// UnmarshalBinary implements the encoding.[encoding.BinaryUnmarshaler] interface. func (d *Date) UnmarshalBinary(data []byte) error { var original time.Time @@ -145,7 +144,7 @@ func (d *Date) UnmarshalBinary(data []byte) error { return nil } -// Equal checks if two Date instances are equal +// Equal checks if two Date instances are equal. func (d Date) Equal(d2 Date) bool { return time.Time(d).Equal(time.Time(d2)) } diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go index 8a80cfbdb8..87d3856ad2 100644 --- a/vendor/github.com/go-openapi/strfmt/default.go +++ b/vendor/github.com/go-openapi/strfmt/default.go @@ -21,42 +21,48 @@ import ( ) const ( - // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114. + // HostnamePattern http://[json]-schema.org/latest/[json]-schema-validation.html#anchor114. // // Deprecated: this package no longer uses regular expressions to validate hostnames. - HostnamePattern = `^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z0-9-\p{L}]){2,63})$` + HostnamePattern = `^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)` + + `|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z0-9-\p{L}]){2,63})$` - // json null type + // json null type. jsonNull = "null" ) const ( - // UUIDPattern Regex for UUID that allows uppercase + // UUIDPattern Regex for [UUID] that allows uppercase // - // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + // Deprecated: [strfmt] no longer uses regular expressions to validate UUIDs. UUIDPattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{32}$)` - // UUID3Pattern Regex for UUID3 that allows uppercase + // UUID3Pattern Regex for [UUID3] that allows uppercase // - // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + // Deprecated: [strfmt] no longer uses regular expressions to validate UUIDs. UUID3Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{12}3[0-9a-f]{3}?[0-9a-f]{16}$)` - // UUID4Pattern Regex for UUID4 that allows uppercase + // UUID4Pattern Regex for [UUID4] that allows uppercase // - // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + // Deprecated: [strfmt] no longer uses regular expressions to validate UUIDs. UUID4Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}$)` - // UUID5Pattern Regex for UUID5 that allows uppercase + // UUID5Pattern Regex for [UUID]5 that allows uppercase // - // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + // Deprecated: [strfmt] no longer uses regular expressions to validate UUIDs. UUID5Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}5[0-9a-f]{3}[89ab][0-9a-f]{15}$)` - isbn10Pattern string = "^(?:[0-9]{9}X|[0-9]{10})$" - isbn13Pattern string = "^(?:[0-9]{13})$" - usCardPattern string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" + isbn10Pattern string = "^(?:[0-9]{9}X|[0-9]{10})$" + isbn13Pattern string = "^(?:[0-9]{13})$" + usCardPattern string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}" + + "|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}" + + "|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}" + + "|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" ssnPattern string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` hexColorPattern string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" - rgbColorPattern string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" + rgbColorPattern string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*," + + "\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*," + + "\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" ) const ( @@ -65,6 +71,7 @@ const ( decimalBase = 10 ) +//nolint:gochecknoglobals // package-level compiled patterns and validators var ( idnaHostChecker = idna.New( idna.ValidateForRegistration(), // shorthand for [idna.StrictDomainName], [idna.ValidateLabels], [idna.VerifyDNSLength], [idna.BidiRule] @@ -87,11 +94,12 @@ var ( // It supports IDNA rules regarding internationalized names with unicode. // // Besides: -// * the empty string is not a valid host name -// * a trailing dot is allowed in names and IPv4's (not IPv6) -// * a host name can be a valid IPv4 (with decimal, octal or hexadecimal numbers) or IPv6 address -// * IPv6 zones are disallowed -// * top-level domains can be unicode (cf. https://www.iana.org/domains/root/db). +// +// - the empty string is not a valid host name +// - a trailing dot is allowed in names and [IPv4]'s (not [IPv6]) +// - a host name can be a valid [IPv4] (with decimal, octal or hexadecimal numbers) or [IPv6] address +// - [IPv6] zones are disallowed +// - top-level domains can be unicode (cf. https://www.iana.org/domains/root/db). // // NOTE: this validator doesn't check top-level domains against the IANA root database. // It merely ensures that a top-level domain in a FQDN is at least 2 code points long. @@ -325,7 +333,7 @@ func isASCIIDigit(c byte) bool { return c >= '0' && c <= '9' } -// IsUUID returns true is the string matches a UUID (in any version, including v6 and v7), upper case is allowed +// IsUUID returns true if the string matches a [UUID] (in any version, including v6 and v7), upper case is allowed. func IsUUID(str string) bool { _, err := uuid.Parse(str) return err == nil @@ -338,25 +346,25 @@ const ( uuidV7 = 7 ) -// IsUUID3 returns true is the string matches a UUID v3, upper case is allowed +// IsUUID3 returns true if the string matches a [UUID] v3, upper case is allowed. func IsUUID3(str string) bool { id, err := uuid.Parse(str) return err == nil && id.Version() == uuid.Version(uuidV3) } -// IsUUID4 returns true is the string matches a UUID v4, upper case is allowed +// IsUUID4 returns true is the string matches a [UUID] v4, upper case is allowed. func IsUUID4(str string) bool { id, err := uuid.Parse(str) return err == nil && id.Version() == uuid.Version(uuidV4) } -// IsUUID5 returns true is the string matches a UUID v5, upper case is allowed +// IsUUID5 returns true if the string matches a [UUID] v5, upper case is allowed. func IsUUID5(str string) bool { id, err := uuid.Parse(str) return err == nil && id.Version() == uuid.Version(uuidV5) } -// IsUUID7 returns true is the string matches a UUID v7, upper case is allowed +// IsUUID7 returns true if the string matches a [UUID] v7, upper case is allowed. func IsUUID7(str string) bool { id, err := uuid.Parse(str) return err == nil && id.Version() == uuid.Version(uuidV7) @@ -368,7 +376,7 @@ func IsEmail(str string) bool { return e == nil && addr.Address != "" } -func init() { +func init() { //nolint:gochecknoinits // registers all default string formats in the registry // register formats in the default registry: // - byte // - creditcard @@ -455,12 +463,12 @@ func init() { Default.Add("password", &pw, func(_ string) bool { return true }) } -// Base64 represents a base64 encoded string, using URLEncoding alphabet +// Base64 represents a base64 encoded string, using URLEncoding alphabet. // -// swagger:strfmt byte +// swagger:strfmt byte. type Base64 []byte -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (b Base64) MarshalText() ([]byte, error) { enc := base64.URLEncoding src := []byte(b) @@ -469,7 +477,7 @@ func (b Base64) MarshalText() ([]byte, error) { return buf, nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (b *Base64) UnmarshalText(data []byte) error { // validation is performed later on enc := base64.URLEncoding dbuf := make([]byte, enc.DecodedLen(len(data))) @@ -483,7 +491,7 @@ func (b *Base64) UnmarshalText(data []byte) error { // validation is performed l return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (b *Base64) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -506,7 +514,7 @@ func (b *Base64) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (b Base64) Value() (driver.Value, error) { return driver.Value(b.String()), nil } @@ -515,12 +523,12 @@ func (b Base64) String() string { return base64.StdEncoding.EncodeToString([]byte(b)) } -// MarshalJSON returns the Base64 as JSON +// MarshalJSON returns the Base64 as JSON. func (b Base64) MarshalJSON() ([]byte, error) { return json.Marshal(b.String()) } -// UnmarshalJSON sets the Base64 from JSON +// UnmarshalJSON sets the Base64 from JSON. func (b *Base64) UnmarshalJSON(data []byte) error { var b64str string if err := json.Unmarshal(data, &b64str); err != nil { @@ -549,23 +557,23 @@ func (b *Base64) DeepCopy() *Base64 { return out } -// URI represents the uri string format as specified by the json schema spec +// URI represents the uri string format as specified by the [json] schema spec. // -// swagger:strfmt uri +// swagger:strfmt uri. type URI string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u URI) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *URI) UnmarshalText(data []byte) error { // validation is performed later on *u = URI(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *URI) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -579,7 +587,7 @@ func (u *URI) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u URI) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -588,12 +596,12 @@ func (u URI) String() string { return string(u) } -// MarshalJSON returns the URI as JSON +// MarshalJSON returns the [URI] as JSON. func (u URI) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the URI from JSON +// UnmarshalJSON sets the [URI] from JSON. func (u *URI) UnmarshalJSON(data []byte) error { var uristr string if err := json.Unmarshal(data, &uristr); err != nil { @@ -608,7 +616,7 @@ func (u *URI) DeepCopyInto(out *URI) { *out = *u } -// DeepCopy copies the receiver into a new URI. +// DeepCopy copies the receiver into a new [URI]. func (u *URI) DeepCopy() *URI { if u == nil { return nil @@ -618,23 +626,23 @@ func (u *URI) DeepCopy() *URI { return out } -// Email represents the email string format as specified by the json schema spec +// Email represents the email string format as specified by the [json] schema spec. // -// swagger:strfmt email +// swagger:strfmt email. type Email string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (e Email) MarshalText() ([]byte, error) { return []byte(string(e)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (e *Email) UnmarshalText(data []byte) error { // validation is performed later on *e = Email(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (e *Email) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -648,7 +656,7 @@ func (e *Email) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (e Email) Value() (driver.Value, error) { return driver.Value(string(e)), nil } @@ -657,12 +665,12 @@ func (e Email) String() string { return string(e) } -// MarshalJSON returns the Email as JSON +// MarshalJSON returns the Email as JSON. func (e Email) MarshalJSON() ([]byte, error) { return json.Marshal(string(e)) } -// UnmarshalJSON sets the Email from JSON +// UnmarshalJSON sets the Email from JSON. func (e *Email) UnmarshalJSON(data []byte) error { var estr string if err := json.Unmarshal(data, &estr); err != nil { @@ -687,23 +695,23 @@ func (e *Email) DeepCopy() *Email { return out } -// Hostname represents the hostname string format as specified by the json schema spec +// Hostname represents the hostname string format as specified by the [json] schema spec. // -// swagger:strfmt hostname +// swagger:strfmt hostname. type Hostname string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (h Hostname) MarshalText() ([]byte, error) { return []byte(string(h)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (h *Hostname) UnmarshalText(data []byte) error { // validation is performed later on *h = Hostname(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (h *Hostname) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -717,7 +725,7 @@ func (h *Hostname) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (h Hostname) Value() (driver.Value, error) { return driver.Value(string(h)), nil } @@ -726,12 +734,12 @@ func (h Hostname) String() string { return string(h) } -// MarshalJSON returns the Hostname as JSON +// MarshalJSON returns the [Hostname] as JSON. func (h Hostname) MarshalJSON() ([]byte, error) { return json.Marshal(string(h)) } -// UnmarshalJSON sets the Hostname from JSON +// UnmarshalJSON sets the [Hostname] from JSON. func (h *Hostname) UnmarshalJSON(data []byte) error { var hstr string if err := json.Unmarshal(data, &hstr); err != nil { @@ -746,7 +754,7 @@ func (h *Hostname) DeepCopyInto(out *Hostname) { *out = *h } -// DeepCopy copies the receiver into a new Hostname. +// DeepCopy copies the receiver into a new [Hostname]. func (h *Hostname) DeepCopy() *Hostname { if h == nil { return nil @@ -756,23 +764,23 @@ func (h *Hostname) DeepCopy() *Hostname { return out } -// IPv4 represents an IP v4 address +// IPv4 represents an IP v4 address. // -// swagger:strfmt ipv4 +// swagger:strfmt ipv4. type IPv4 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u IPv4) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *IPv4) UnmarshalText(data []byte) error { // validation is performed later on *u = IPv4(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *IPv4) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -786,7 +794,7 @@ func (u *IPv4) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u IPv4) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -795,12 +803,12 @@ func (u IPv4) String() string { return string(u) } -// MarshalJSON returns the IPv4 as JSON +// MarshalJSON returns the [IPv4] as JSON. func (u IPv4) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the IPv4 from JSON +// UnmarshalJSON sets the [IPv4] from JSON. func (u *IPv4) UnmarshalJSON(data []byte) error { var ustr string if err := json.Unmarshal(data, &ustr); err != nil { @@ -815,7 +823,7 @@ func (u *IPv4) DeepCopyInto(out *IPv4) { *out = *u } -// DeepCopy copies the receiver into a new IPv4. +// DeepCopy copies the receiver into a new [IPv4]. func (u *IPv4) DeepCopy() *IPv4 { if u == nil { return nil @@ -825,23 +833,23 @@ func (u *IPv4) DeepCopy() *IPv4 { return out } -// IPv6 represents an IP v6 address +// IPv6 represents an IP v6 address. // -// swagger:strfmt ipv6 +// swagger:strfmt ipv6. type IPv6 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u IPv6) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *IPv6) UnmarshalText(data []byte) error { // validation is performed later on *u = IPv6(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *IPv6) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -855,7 +863,7 @@ func (u *IPv6) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u IPv6) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -864,12 +872,12 @@ func (u IPv6) String() string { return string(u) } -// MarshalJSON returns the IPv6 as JSON +// MarshalJSON returns the [IPv6] as JSON. func (u IPv6) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the IPv6 from JSON +// UnmarshalJSON sets the [IPv6] from JSON. func (u *IPv6) UnmarshalJSON(data []byte) error { var ustr string if err := json.Unmarshal(data, &ustr); err != nil { @@ -884,7 +892,7 @@ func (u *IPv6) DeepCopyInto(out *IPv6) { *out = *u } -// DeepCopy copies the receiver into a new IPv6. +// DeepCopy copies the receiver into a new [IPv6]. func (u *IPv6) DeepCopy() *IPv6 { if u == nil { return nil @@ -894,23 +902,23 @@ func (u *IPv6) DeepCopy() *IPv6 { return out } -// CIDR represents a Classless Inter-Domain Routing notation +// CIDR represents a Classless Inter-Domain Routing notation. // -// swagger:strfmt cidr +// swagger:strfmt cidr. type CIDR string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u CIDR) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *CIDR) UnmarshalText(data []byte) error { // validation is performed later on *u = CIDR(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *CIDR) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -924,7 +932,7 @@ func (u *CIDR) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u CIDR) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -933,12 +941,12 @@ func (u CIDR) String() string { return string(u) } -// MarshalJSON returns the CIDR as JSON +// MarshalJSON returns the [CIDR] as JSON. func (u CIDR) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the CIDR from JSON +// UnmarshalJSON sets the [CIDR] from JSON. func (u *CIDR) UnmarshalJSON(data []byte) error { var ustr string if err := json.Unmarshal(data, &ustr); err != nil { @@ -953,7 +961,7 @@ func (u *CIDR) DeepCopyInto(out *CIDR) { *out = *u } -// DeepCopy copies the receiver into a new CIDR. +// DeepCopy copies the receiver into a new [CIDR]. func (u *CIDR) DeepCopy() *CIDR { if u == nil { return nil @@ -963,23 +971,23 @@ func (u *CIDR) DeepCopy() *CIDR { return out } -// MAC represents a 48 bit MAC address +// MAC represents a 48 bit MAC address. // -// swagger:strfmt mac +// swagger:strfmt mac. type MAC string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u MAC) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *MAC) UnmarshalText(data []byte) error { // validation is performed later on *u = MAC(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *MAC) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -993,7 +1001,7 @@ func (u *MAC) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u MAC) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1002,12 +1010,12 @@ func (u MAC) String() string { return string(u) } -// MarshalJSON returns the MAC as JSON +// MarshalJSON returns the [MAC] as JSON. func (u MAC) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the MAC from JSON +// UnmarshalJSON sets the [MAC] from JSON. func (u *MAC) UnmarshalJSON(data []byte) error { var ustr string if err := json.Unmarshal(data, &ustr); err != nil { @@ -1022,7 +1030,7 @@ func (u *MAC) DeepCopyInto(out *MAC) { *out = *u } -// DeepCopy copies the receiver into a new MAC. +// DeepCopy copies the receiver into a new [MAC]. func (u *MAC) DeepCopy() *MAC { if u == nil { return nil @@ -1032,23 +1040,23 @@ func (u *MAC) DeepCopy() *MAC { return out } -// UUID represents a uuid string format +// UUID represents a [uuid] string format // -// swagger:strfmt uuid +// swagger:strfmt uuid. type UUID string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u UUID) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *UUID) UnmarshalText(data []byte) error { // validation is performed later on *u = UUID(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *UUID) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1062,7 +1070,7 @@ func (u *UUID) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u UUID) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1071,12 +1079,12 @@ func (u UUID) String() string { return string(u) } -// MarshalJSON returns the UUID as JSON +// MarshalJSON returns the [UUID] as JSON. func (u UUID) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the UUID from JSON +// UnmarshalJSON sets the [UUID] from JSON. func (u *UUID) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1094,7 +1102,7 @@ func (u *UUID) DeepCopyInto(out *UUID) { *out = *u } -// DeepCopy copies the receiver into a new UUID. +// DeepCopy copies the receiver into a new [UUID]. func (u *UUID) DeepCopy() *UUID { if u == nil { return nil @@ -1104,23 +1112,23 @@ func (u *UUID) DeepCopy() *UUID { return out } -// UUID3 represents a uuid3 string format +// UUID3 represents a uuid3 string format. // -// swagger:strfmt uuid3 +// swagger:strfmt uuid3. type UUID3 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u UUID3) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *UUID3) UnmarshalText(data []byte) error { // validation is performed later on *u = UUID3(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *UUID3) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1134,7 +1142,7 @@ func (u *UUID3) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u UUID3) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1143,12 +1151,12 @@ func (u UUID3) String() string { return string(u) } -// MarshalJSON returns the UUID as JSON +// MarshalJSON returns the [UUID3] as JSON. func (u UUID3) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the UUID from JSON +// UnmarshalJSON sets the [UUID3] from JSON. func (u *UUID3) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1166,7 +1174,7 @@ func (u *UUID3) DeepCopyInto(out *UUID3) { *out = *u } -// DeepCopy copies the receiver into a new UUID3. +// DeepCopy copies the receiver into a new [UUID]3. func (u *UUID3) DeepCopy() *UUID3 { if u == nil { return nil @@ -1176,23 +1184,23 @@ func (u *UUID3) DeepCopy() *UUID3 { return out } -// UUID4 represents a uuid4 string format +// UUID4 represents a uuid4 string format. // -// swagger:strfmt uuid4 +// swagger:strfmt uuid4. type UUID4 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u UUID4) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *UUID4) UnmarshalText(data []byte) error { // validation is performed later on *u = UUID4(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *UUID4) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1206,7 +1214,7 @@ func (u *UUID4) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u UUID4) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1215,12 +1223,12 @@ func (u UUID4) String() string { return string(u) } -// MarshalJSON returns the UUID as JSON +// MarshalJSON returns the [UUID4] as JSON. func (u UUID4) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the UUID from JSON +// UnmarshalJSON sets the [UUID4] from JSON. func (u *UUID4) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1248,23 +1256,23 @@ func (u *UUID4) DeepCopy() *UUID4 { return out } -// UUID5 represents a uuid5 string format +// UUID5 represents a uuid5 string format. // -// swagger:strfmt uuid5 +// swagger:strfmt uuid5. type UUID5 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u UUID5) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *UUID5) UnmarshalText(data []byte) error { // validation is performed later on *u = UUID5(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *UUID5) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1278,7 +1286,7 @@ func (u *UUID5) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u UUID5) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1287,12 +1295,12 @@ func (u UUID5) String() string { return string(u) } -// MarshalJSON returns the UUID as JSON +// MarshalJSON returns the [UUID5] as JSON. func (u UUID5) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the UUID from JSON +// UnmarshalJSON sets the [UUID5] from JSON. func (u *UUID5) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1310,7 +1318,7 @@ func (u *UUID5) DeepCopyInto(out *UUID5) { *out = *u } -// DeepCopy copies the receiver into a new UUID5. +// DeepCopy copies the receiver into a new [UUID5]. func (u *UUID5) DeepCopy() *UUID5 { if u == nil { return nil @@ -1320,23 +1328,23 @@ func (u *UUID5) DeepCopy() *UUID5 { return out } -// UUID7 represents a uuid7 string format +// UUID7 represents a uuid7 string format. // -// swagger:strfmt uuid7 +// swagger:strfmt uuid7. type UUID7 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u UUID7) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *UUID7) UnmarshalText(data []byte) error { // validation is performed later on *u = UUID7(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *UUID7) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1350,7 +1358,7 @@ func (u *UUID7) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u UUID7) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1359,12 +1367,12 @@ func (u UUID7) String() string { return string(u) } -// MarshalJSON returns the UUID as JSON +// MarshalJSON returns the [UUID7] as JSON. func (u UUID7) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the UUID from JSON +// UnmarshalJSON sets the [UUID7] from JSON. func (u *UUID7) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1382,7 +1390,7 @@ func (u *UUID7) DeepCopyInto(out *UUID7) { *out = *u } -// DeepCopy copies the receiver into a new UUID7. +// DeepCopy copies the receiver into a new [UUID]7. func (u *UUID7) DeepCopy() *UUID7 { if u == nil { return nil @@ -1392,23 +1400,23 @@ func (u *UUID7) DeepCopy() *UUID7 { return out } -// ISBN represents an isbn string format +// ISBN represents an isbn string format. // -// swagger:strfmt isbn +// swagger:strfmt isbn. type ISBN string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u ISBN) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *ISBN) UnmarshalText(data []byte) error { // validation is performed later on *u = ISBN(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *ISBN) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1422,7 +1430,7 @@ func (u *ISBN) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u ISBN) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1431,12 +1439,12 @@ func (u ISBN) String() string { return string(u) } -// MarshalJSON returns the ISBN as JSON +// MarshalJSON returns the [ISBN] as JSON. func (u ISBN) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the ISBN from JSON +// UnmarshalJSON sets the [ISBN] from JSON. func (u *ISBN) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1454,7 +1462,7 @@ func (u *ISBN) DeepCopyInto(out *ISBN) { *out = *u } -// DeepCopy copies the receiver into a new ISBN. +// DeepCopy copies the receiver into a new [ISBN]. func (u *ISBN) DeepCopy() *ISBN { if u == nil { return nil @@ -1464,23 +1472,23 @@ func (u *ISBN) DeepCopy() *ISBN { return out } -// ISBN10 represents an isbn 10 string format +// ISBN10 represents an isbn 10 string format. // -// swagger:strfmt isbn10 +// swagger:strfmt isbn10. type ISBN10 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u ISBN10) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *ISBN10) UnmarshalText(data []byte) error { // validation is performed later on *u = ISBN10(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *ISBN10) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1494,7 +1502,7 @@ func (u *ISBN10) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u ISBN10) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1503,12 +1511,12 @@ func (u ISBN10) String() string { return string(u) } -// MarshalJSON returns the ISBN10 as JSON +// MarshalJSON returns the [ISBN10] as JSON. func (u ISBN10) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the ISBN10 from JSON +// UnmarshalJSON sets the [ISBN10] from JSON. func (u *ISBN10) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1526,7 +1534,7 @@ func (u *ISBN10) DeepCopyInto(out *ISBN10) { *out = *u } -// DeepCopy copies the receiver into a new ISBN10. +// DeepCopy copies the receiver into a new [ISBN10]. func (u *ISBN10) DeepCopy() *ISBN10 { if u == nil { return nil @@ -1536,23 +1544,23 @@ func (u *ISBN10) DeepCopy() *ISBN10 { return out } -// ISBN13 represents an isbn 13 string format +// ISBN13 represents an isbn 13 string format. // -// swagger:strfmt isbn13 +// swagger:strfmt isbn13. type ISBN13 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u ISBN13) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *ISBN13) UnmarshalText(data []byte) error { // validation is performed later on *u = ISBN13(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *ISBN13) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1566,7 +1574,7 @@ func (u *ISBN13) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u ISBN13) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1575,12 +1583,12 @@ func (u ISBN13) String() string { return string(u) } -// MarshalJSON returns the ISBN13 as JSON +// MarshalJSON returns the [ISBN13] as JSON. func (u ISBN13) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the ISBN13 from JSON +// UnmarshalJSON sets the [ISBN13] from JSON. func (u *ISBN13) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1598,7 +1606,7 @@ func (u *ISBN13) DeepCopyInto(out *ISBN13) { *out = *u } -// DeepCopy copies the receiver into a new ISBN13. +// DeepCopy copies the receiver into a new [ISBN13]. func (u *ISBN13) DeepCopy() *ISBN13 { if u == nil { return nil @@ -1608,23 +1616,23 @@ func (u *ISBN13) DeepCopy() *ISBN13 { return out } -// CreditCard represents a credit card string format +// CreditCard represents a credit card string format. // -// swagger:strfmt creditcard +// swagger:strfmt creditcard. type CreditCard string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u CreditCard) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *CreditCard) UnmarshalText(data []byte) error { // validation is performed later on *u = CreditCard(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *CreditCard) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1638,7 +1646,7 @@ func (u *CreditCard) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u CreditCard) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1647,12 +1655,12 @@ func (u CreditCard) String() string { return string(u) } -// MarshalJSON returns the CreditCard as JSON +// MarshalJSON returns the [CreditCard] as JSON. func (u CreditCard) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the CreditCard from JSON +// UnmarshalJSON sets the [CreditCard] from JSON. func (u *CreditCard) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1670,7 +1678,7 @@ func (u *CreditCard) DeepCopyInto(out *CreditCard) { *out = *u } -// DeepCopy copies the receiver into a new CreditCard. +// DeepCopy copies the receiver into a new [CreditCard]. func (u *CreditCard) DeepCopy() *CreditCard { if u == nil { return nil @@ -1680,23 +1688,23 @@ func (u *CreditCard) DeepCopy() *CreditCard { return out } -// SSN represents a social security string format +// SSN represents a social security string format. // -// swagger:strfmt ssn +// swagger:strfmt ssn. type SSN string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u SSN) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *SSN) UnmarshalText(data []byte) error { // validation is performed later on *u = SSN(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *SSN) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1710,7 +1718,7 @@ func (u *SSN) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u SSN) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1719,12 +1727,12 @@ func (u SSN) String() string { return string(u) } -// MarshalJSON returns the SSN as JSON +// MarshalJSON returns the [SSN] as JSON. func (u SSN) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the SSN from JSON +// UnmarshalJSON sets the [SSN] from JSON. func (u *SSN) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1742,7 +1750,7 @@ func (u *SSN) DeepCopyInto(out *SSN) { *out = *u } -// DeepCopy copies the receiver into a new SSN. +// DeepCopy copies the receiver into a new [SSN]. func (u *SSN) DeepCopy() *SSN { if u == nil { return nil @@ -1752,23 +1760,23 @@ func (u *SSN) DeepCopy() *SSN { return out } -// HexColor represents a hex color string format +// HexColor represents a hex color string format. // -// swagger:strfmt hexcolor +// swagger:strfmt hexcolor. type HexColor string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (h HexColor) MarshalText() ([]byte, error) { return []byte(string(h)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (h *HexColor) UnmarshalText(data []byte) error { // validation is performed later on *h = HexColor(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (h *HexColor) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1782,7 +1790,7 @@ func (h *HexColor) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (h HexColor) Value() (driver.Value, error) { return driver.Value(string(h)), nil } @@ -1791,12 +1799,12 @@ func (h HexColor) String() string { return string(h) } -// MarshalJSON returns the HexColor as JSON +// MarshalJSON returns the [HexColor] as JSON. func (h HexColor) MarshalJSON() ([]byte, error) { return json.Marshal(string(h)) } -// UnmarshalJSON sets the HexColor from JSON +// UnmarshalJSON sets the [HexColor] from JSON. func (h *HexColor) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1814,7 +1822,7 @@ func (h *HexColor) DeepCopyInto(out *HexColor) { *out = *h } -// DeepCopy copies the receiver into a new HexColor. +// DeepCopy copies the receiver into a new [HexColor]. func (h *HexColor) DeepCopy() *HexColor { if h == nil { return nil @@ -1824,23 +1832,23 @@ func (h *HexColor) DeepCopy() *HexColor { return out } -// RGBColor represents a RGB color string format +// RGBColor represents a RGB color string format. // -// swagger:strfmt rgbcolor +// swagger:strfmt rgbcolor. type RGBColor string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (r RGBColor) MarshalText() ([]byte, error) { return []byte(string(r)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (r *RGBColor) UnmarshalText(data []byte) error { // validation is performed later on *r = RGBColor(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (r *RGBColor) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1854,7 +1862,7 @@ func (r *RGBColor) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (r RGBColor) Value() (driver.Value, error) { return driver.Value(string(r)), nil } @@ -1863,12 +1871,12 @@ func (r RGBColor) String() string { return string(r) } -// MarshalJSON returns the RGBColor as JSON +// MarshalJSON returns the [RGBColor] as JSON. func (r RGBColor) MarshalJSON() ([]byte, error) { return json.Marshal(string(r)) } -// UnmarshalJSON sets the RGBColor from JSON +// UnmarshalJSON sets the [RGBColor] from JSON. func (r *RGBColor) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1886,7 +1894,7 @@ func (r *RGBColor) DeepCopyInto(out *RGBColor) { *out = *r } -// DeepCopy copies the receiver into a new RGBColor. +// DeepCopy copies the receiver into a new [RGBColor]. func (r *RGBColor) DeepCopy() *RGBColor { if r == nil { return nil @@ -1899,21 +1907,21 @@ func (r *RGBColor) DeepCopy() *RGBColor { // Password represents a password. // This has no validations and is mainly used as a marker for UI components. // -// swagger:strfmt password +// swagger:strfmt password. type Password string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (r Password) MarshalText() ([]byte, error) { return []byte(string(r)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (r *Password) UnmarshalText(data []byte) error { // validation is performed later on *r = Password(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (r *Password) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1927,7 +1935,7 @@ func (r *Password) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (r Password) Value() (driver.Value, error) { return driver.Value(string(r)), nil } @@ -1936,12 +1944,12 @@ func (r Password) String() string { return string(r) } -// MarshalJSON returns the Password as JSON +// MarshalJSON returns the Password as JSON. func (r Password) MarshalJSON() ([]byte, error) { return json.Marshal(string(r)) } -// UnmarshalJSON sets the Password from JSON +// UnmarshalJSON sets the Password from JSON. func (r *Password) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1986,7 +1994,7 @@ func isIPv6(str string) bool { return ip != nil && strings.Contains(str, ":") } -// isCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6) +// isCIDR checks if the string is a valid CIDR notation (IPV4 & IPV6). func isCIDR(str string) bool { _, _, err := net.ParseCIDR(str) return err == nil @@ -1999,7 +2007,7 @@ func isCIDR(str string) bool { // 01-23-45-67-89-ab // 01-23-45-67-89-ab-cd-ef // 0123.4567.89ab -// 0123.4567.89ab.cdef +// 0123.4567.89ab.cdef. func isMAC(str string) bool { _, err := net.ParseMAC(str) return err == nil @@ -2084,7 +2092,7 @@ func isCreditCard(str string) bool { return (sum+lastDigit)%decimalBase == 0 } -// isSSN will validate the given string as a U.S. Social Security Number +// isSSN will validate the given string as a U.S. Social Security Number. func isSSN(str string) bool { if str == "" || len(str) != 11 { return false diff --git a/vendor/github.com/go-openapi/strfmt/doc.go b/vendor/github.com/go-openapi/strfmt/doc.go index 5825b72108..6652521c53 100644 --- a/vendor/github.com/go-openapi/strfmt/doc.go +++ b/vendor/github.com/go-openapi/strfmt/doc.go @@ -1,7 +1,5 @@ // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 -// Package strfmt contains custom string formats -// -// TODO: add info on how to define and register a custom format +// Package strfmt contains custom string formats. package strfmt diff --git a/vendor/github.com/go-openapi/strfmt/duration.go b/vendor/github.com/go-openapi/strfmt/duration.go index 908c1b02f3..b710bfbf53 100644 --- a/vendor/github.com/go-openapi/strfmt/duration.go +++ b/vendor/github.com/go-openapi/strfmt/duration.go @@ -14,9 +14,8 @@ import ( "time" ) -func init() { +func init() { //nolint:gochecknoinits // registers duration format in the default registry d := Duration(0) - // register this format in the default registry Default.Add("duration", &d, IsDuration) } @@ -25,6 +24,7 @@ const ( daysInWeek = 7 ) +//nolint:gochecknoglobals // package-level lookup tables for duration parsing var ( timeUnits = [][]string{ {"ns", "nano"}, @@ -51,7 +51,7 @@ var ( durationMatcher = regexp.MustCompile(`^(((?:-\s?)?\d+)(\.\d+)?\s*([A-Za-zµ]+))`) ) -// IsDuration returns true if the provided string is a valid duration +// IsDuration returns true if the provided string is a valid duration. func IsDuration(str string) bool { _, err := ParseDuration(str) return err == nil @@ -60,17 +60,17 @@ func IsDuration(str string) bool { // Duration represents a duration // // Duration stores a period of time as a nanosecond count, with the largest -// repesentable duration being approximately 290 years. +// representable duration being approximately 290 years. // -// swagger:strfmt duration +// swagger:strfmt duration. type Duration time.Duration -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (d Duration) MarshalText() ([]byte, error) { return []byte(time.Duration(d).String()), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (d *Duration) UnmarshalText(data []byte) error { // validation is performed later on dd, err := ParseDuration(string(data)) if err != nil { @@ -80,7 +80,7 @@ func (d *Duration) UnmarshalText(data []byte) error { // validation is performed return nil } -// ParseDuration parses a duration from a string, compatible with scala duration syntax +// ParseDuration parses a duration from a string, compatible with scala duration syntax. func ParseDuration(cand string) (time.Duration, error) { if dur, err := time.ParseDuration(cand); err == nil { return dur, nil @@ -143,7 +143,7 @@ func ParseDuration(cand string) (time.Duration, error) { // Scan reads a Duration value from database driver type. func (d *Duration) Scan(raw any) error { switch v := raw.(type) { - // TODO: case []byte: // ? + // Proposal for enhancement: case []byte: // ? case int64: *d = Duration(v) case float64: @@ -162,17 +162,17 @@ func (d Duration) Value() (driver.Value, error) { return driver.Value(int64(d)), nil } -// String converts this duration to a string +// String converts this duration to a string. func (d Duration) String() string { return time.Duration(d).String() } -// MarshalJSON returns the Duration as JSON +// MarshalJSON returns the Duration as JSON. func (d Duration) MarshalJSON() ([]byte, error) { return json.Marshal(time.Duration(d).String()) } -// UnmarshalJSON sets the Duration from JSON +// UnmarshalJSON sets the Duration from JSON. func (d *Duration) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil diff --git a/vendor/github.com/go-openapi/strfmt/errors.go b/vendor/github.com/go-openapi/strfmt/errors.go index 9faa37cf2e..5ed519d2e6 100644 --- a/vendor/github.com/go-openapi/strfmt/errors.go +++ b/vendor/github.com/go-openapi/strfmt/errors.go @@ -5,7 +5,7 @@ package strfmt type strfmtError string -// ErrFormat is an error raised by the strfmt package +// ErrFormat is an error raised by the [strfmt] package. const ErrFormat strfmtError = "format error" func (e strfmtError) Error() string { diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go index d9d9e04c20..e494dd7b83 100644 --- a/vendor/github.com/go-openapi/strfmt/format.go +++ b/vendor/github.com/go-openapi/strfmt/format.go @@ -16,20 +16,20 @@ import ( "github.com/go-viper/mapstructure/v2" ) -// Default is the default formats registry -var Default = NewSeededFormats(nil, nil) +// Default is the default formats registry. +var Default = NewSeededFormats(nil, nil) //nolint:gochecknoglobals // package-level default registry, by design // Validator represents a validator for a string format. type Validator func(string) bool -// NewFormats creates a new formats registry seeded with the values from the default -func NewFormats() Registry { +// NewFormats creates a new formats registry seeded with the values from the default. +func NewFormats() Registry { //nolint:ireturn // factory function returns the Registry interface by design //nolint:forcetypeassert return NewSeededFormats(Default.(*defaultFormats).data, nil) } -// NewSeededFormats creates a new formats registry -func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry { +// NewSeededFormats creates a new formats registry. +func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry { //nolint:ireturn // factory function returns the Registry interface by design if normalizer == nil { normalizer = DefaultNameNormalizer } @@ -50,7 +50,7 @@ type knownFormat struct { // NameNormalizer is a function that normalizes a format name. type NameNormalizer func(string) string -// DefaultNameNormalizer removes all dashes +// DefaultNameNormalizer removes all dashes. func DefaultNameNormalizer(name string) string { return strings.ReplaceAll(name, "-", "") } @@ -62,8 +62,8 @@ type defaultFormats struct { normalizeName NameNormalizer } -// MapStructureHookFunc is a decode hook function for mapstructure -func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { +// MapStructureHookFunc is a decode hook function for mapstructure. +func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { //nolint:ireturn // returns interface required by mapstructure return func(from reflect.Type, to reflect.Type, obj any) (any, error) { if from.Kind() != reflect.String { return obj, nil @@ -76,83 +76,87 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { for _, v := range f.data { tpe, _ := f.GetType(v.Name) if to == tpe { - switch v.Name { - case "date": - d, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) - if err != nil { - return nil, err - } - return Date(d), nil - case "datetime": - input := data - if len(input) == 0 { - return nil, fmt.Errorf("empty string is an invalid datetime format: %w", ErrFormat) - } - return ParseDateTime(input) - case "duration": - dur, err := ParseDuration(data) - if err != nil { - return nil, err - } - return Duration(dur), nil - case "uri": - return URI(data), nil - case "email": - return Email(data), nil - case "uuid": - return UUID(data), nil - case "uuid3": - return UUID3(data), nil - case "uuid4": - return UUID4(data), nil - case "uuid5": - return UUID5(data), nil - case "uuid7": - return UUID7(data), nil - case "hostname": - return Hostname(data), nil - case "ipv4": - return IPv4(data), nil - case "ipv6": - return IPv6(data), nil - case "cidr": - return CIDR(data), nil - case "mac": - return MAC(data), nil - case "isbn": - return ISBN(data), nil - case "isbn10": - return ISBN10(data), nil - case "isbn13": - return ISBN13(data), nil - case "creditcard": - return CreditCard(data), nil - case "ssn": - return SSN(data), nil - case "hexcolor": - return HexColor(data), nil - case "rgbcolor": - return RGBColor(data), nil - case "byte": - return Base64(data), nil - case "password": - return Password(data), nil - case "ulid": - ulid, err := ParseULID(data) - if err != nil { - return nil, err - } - return ulid, nil - default: - return nil, errors.InvalidTypeName(v.Name) - } + return decodeFormatFromString(v.Name, data) } } return data, nil } } -// Add adds a new format, return true if this was a new item instead of a replacement +// decodeFormatFromString decodes a string into the appropriate format type by name. +func decodeFormatFromString(name, data string) (any, error) { //nolint:gocyclo,cyclop // flat switch over format names, no real complexity + switch name { + case "date": + d, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) + if err != nil { + return nil, err + } + return Date(d), nil + case "datetime": + if len(data) == 0 { + return nil, fmt.Errorf("empty string is an invalid datetime format: %w", ErrFormat) + } + return ParseDateTime(data) + case "duration": + dur, err := ParseDuration(data) + if err != nil { + return nil, err + } + return Duration(dur), nil + case "uri": + return URI(data), nil + case "email": + return Email(data), nil + case "uuid": + return UUID(data), nil + case "uuid3": + return UUID3(data), nil + case "uuid4": + return UUID4(data), nil + case "uuid5": + return UUID5(data), nil + case "uuid7": + return UUID7(data), nil + case "hostname": + return Hostname(data), nil + case "ipv4": + return IPv4(data), nil + case "ipv6": + return IPv6(data), nil + case "cidr": + return CIDR(data), nil + case "mac": + return MAC(data), nil + case "isbn": + return ISBN(data), nil + case "isbn10": + return ISBN10(data), nil + case "isbn13": + return ISBN13(data), nil + case "creditcard": + return CreditCard(data), nil + case "ssn": + return SSN(data), nil + case "hexcolor": + return HexColor(data), nil + case "rgbcolor": + return RGBColor(data), nil + case "byte": + return Base64(data), nil + case "password": + return Password(data), nil + case "ulid": + ulid, err := ParseULID(data) + if err != nil { + return nil, err + } + return ulid, nil + default: + return nil, errors.InvalidTypeName(name) + } +} + +// Add adds a new format, return true if this was a new item instead of a replacement. func (f *defaultFormats) Add(name string, strfmt Format, validator Validator) bool { f.Lock() defer f.Unlock() @@ -178,7 +182,7 @@ func (f *defaultFormats) Add(name string, strfmt Format, validator Validator) bo return true } -// GetType gets the type for the specified name +// GetType gets the type for the specified name. func (f *defaultFormats) GetType(name string) (reflect.Type, bool) { f.Lock() defer f.Unlock() @@ -191,7 +195,7 @@ func (f *defaultFormats) GetType(name string) (reflect.Type, bool) { return nil, false } -// DelByName removes the format by the specified name, returns true when an item was actually removed +// DelByName removes the format by the specified name, returns true when an item was actually removed. func (f *defaultFormats) DelByName(name string) bool { f.Lock() defer f.Unlock() @@ -208,7 +212,7 @@ func (f *defaultFormats) DelByName(name string) bool { return false } -// DelByFormat removes the specified format, returns true when an item was actually removed +// DelByFormat removes the specified format, returns true when an item was actually removed. func (f *defaultFormats) DelByFormat(strfmt Format) bool { f.Lock() defer f.Unlock() @@ -228,7 +232,7 @@ func (f *defaultFormats) DelByFormat(strfmt Format) bool { return false } -// ContainsName returns true if this registry contains the specified name +// ContainsName returns true if this registry contains the specified name. func (f *defaultFormats) ContainsName(name string) bool { f.Lock() defer f.Unlock() @@ -241,7 +245,7 @@ func (f *defaultFormats) ContainsName(name string) bool { return false } -// ContainsFormat returns true if this registry contains the specified format +// ContainsFormat returns true if this registry contains the specified format. func (f *defaultFormats) ContainsFormat(strfmt Format) bool { f.Lock() defer f.Unlock() diff --git a/vendor/github.com/go-openapi/strfmt/go.work b/vendor/github.com/go-openapi/strfmt/go.work new file mode 100644 index 0000000000..288e7655d4 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/go.work @@ -0,0 +1,7 @@ +use ( + . + ./enable/mongodb + ./internal/testintegration +) + +go 1.24.0 diff --git a/vendor/github.com/go-openapi/strfmt/go.work.sum b/vendor/github.com/go-openapi/strfmt/go.work.sum new file mode 100644 index 0000000000..33dac969b6 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/go.work.sum @@ -0,0 +1,16 @@ +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30 h1:BHT1/DKsYDGkUgQ2jmMaozVcdk+sVfz0+1ZJq4zkWgw= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= +golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/vendor/github.com/go-openapi/strfmt/ifaces.go b/vendor/github.com/go-openapi/strfmt/ifaces.go index 1b9e72c64e..6252ae98a8 100644 --- a/vendor/github.com/go-openapi/strfmt/ifaces.go +++ b/vendor/github.com/go-openapi/strfmt/ifaces.go @@ -13,7 +13,7 @@ import ( // Format represents a string format. // // All implementations of Format provide a string representation and text -// marshaling/unmarshaling interface to be used by encoders (e.g. encoding/json). +// marshaling/unmarshaling interface to be used by encoders (e.g. encoding/[json]). type Format interface { String() string encoding.TextMarshaler @@ -22,11 +22,11 @@ type Format interface { // Registry is a registry of string formats, with a validation method. type Registry interface { - Add(string, Format, Validator) bool - DelByName(string) bool - GetType(string) (reflect.Type, bool) - ContainsName(string) bool - Validates(string, string) bool - Parse(string, string) (any, error) + Add(name string, strfmt Format, validator Validator) bool + DelByName(name string) bool + GetType(name string) (reflect.Type, bool) + ContainsName(name string) bool + Validates(name, data string) bool + Parse(name, data string) (any, error) MapStructureHookFunc() mapstructure.DecodeHookFunc } diff --git a/vendor/github.com/go-openapi/strfmt/internal/bsonlite/codec.go b/vendor/github.com/go-openapi/strfmt/internal/bsonlite/codec.go new file mode 100644 index 0000000000..424f45466c --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/internal/bsonlite/codec.go @@ -0,0 +1,71 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package bsonlite provides a minimal BSON codec for strfmt types. +// +// This codec produces BSON output compatible with go.mongodb.org/mongo-driver/v2 +// (v2.5.0). It handles only the exact BSON patterns used by strfmt: +// single-key {"data": value} documents with string, DateTime, or ObjectID values. +// +// This package is intended to provide a backward-compatible API to users of +// go-openapi/strfmt. It is not intended to be maintained or to follow the +// evolutions of the official MongoDB drivers. For up-to-date MongoDB support, +// import "github.com/go-openapi/strfmt/enable/mongodb" to replace this codec +// with one backed by the real driver. +package bsonlite + +import "time" + +// Codec provides BSON document marshal/unmarshal for strfmt types. +// +// MarshalDoc encodes a single-key BSON document {"data": value}. +// The value must be one of: string, time.Time, or [12]byte (ObjectID). +// +// UnmarshalDoc decodes a BSON document and returns the "data" field's value. +// Returns one of: string, time.Time, or [12]byte depending on the BSON type. +type Codec interface { + MarshalDoc(value any) ([]byte, error) + UnmarshalDoc(data []byte) (any, error) +} + +// C is the active BSON codec. +// +//nolint:gochecknoglobals // replaceable codec, by design +var C Codec = liteCodec{} + +// Replace swaps the active BSON codec with the provided implementation. +// This is intended to be called from enable/mongodb's init(). +// +// Since [Replace] affects the global state of the package, it is not intended for concurrent use. +func Replace(c Codec) { + C = c +} + +// BSON type tags (from the BSON specification). +const ( + TypeString byte = 0x02 + TypeObjectID byte = 0x07 + TypeDateTime byte = 0x09 + TypeNull byte = 0x0A +) + +// ObjectIDSize is the size of a BSON ObjectID in bytes. +const ObjectIDSize = 12 + +// DateTimeToMillis converts a time.Time to BSON DateTime milliseconds. +func DateTimeToMillis(t time.Time) int64 { + const ( + millisec = 1000 + microsec = 1_000_000 + ) + return t.Unix()*millisec + int64(t.Nanosecond())/microsec +} + +// MillisToTime converts BSON DateTime milliseconds to time.Time. +func MillisToTime(millis int64) time.Time { + const ( + millisec = 1000 + nanosPerMs = 1_000_000 + ) + return time.Unix(millis/millisec, millis%millisec*nanosPerMs) +} diff --git a/vendor/github.com/go-openapi/strfmt/internal/bsonlite/lite.go b/vendor/github.com/go-openapi/strfmt/internal/bsonlite/lite.go new file mode 100644 index 0000000000..6b0e0e1c55 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/internal/bsonlite/lite.go @@ -0,0 +1,213 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package bsonlite + +import ( + "encoding/binary" + "errors" + "fmt" + "time" +) + +// liteCodec is a minimal BSON codec that handles only the patterns used by strfmt: +// single-key documents of the form {"data": } where value is a string, +// BSON DateTime (time.Time), or BSON ObjectID ([12]byte). +type liteCodec struct{} + +var _ Codec = liteCodec{} + +func (liteCodec) MarshalDoc(value any) ([]byte, error) { + switch v := value.(type) { + case string: + return marshalStringDoc(v), nil + case time.Time: + return marshalDateTimeDoc(v), nil + case [ObjectIDSize]byte: + return marshalObjectIDDoc(v), nil + default: + return nil, fmt.Errorf("bsonlite: unsupported value type %T: %w", value, errUnsupportedType) + } +} + +func (liteCodec) UnmarshalDoc(data []byte) (any, error) { + return unmarshalDoc(data) +} + +// BSON wire format helpers. +// +// Document: int32(size) + elements + 0x00 +// Element: byte(type) + cstring(key) + value +// String: int32(len+1) + bytes + 0x00 +// DateTime: int64 (LE, millis since epoch) +// ObjectID: [12]byte + +const dataKey = "data\x00" + +func marshalStringDoc(s string) []byte { + sBytes := []byte(s) + // doc_size(4) + type(1) + key("data\0"=5) + strlen(4) + string + \0(1) + doc_term(1) + docSize := 4 + 1 + len(dataKey) + 4 + len(sBytes) + 1 + 1 + + buf := make([]byte, docSize) + pos := 0 + + binary.LittleEndian.PutUint32(buf[pos:], uint32(docSize)) //nolint:gosec // size is computed from input, cannot overflow + pos += 4 + + buf[pos] = TypeString + pos++ + + pos += copy(buf[pos:], dataKey) + + binary.LittleEndian.PutUint32(buf[pos:], uint32(len(sBytes)+1)) //nolint:gosec // string length cannot overflow uint32 + pos += 4 + + pos += copy(buf[pos:], sBytes) + buf[pos] = 0 // string null terminator + pos++ + + buf[pos] = 0 // document terminator + + return buf +} + +func marshalDateTimeDoc(t time.Time) []byte { + // doc_size(4) + type(1) + key("data\0"=5) + int64(8) + doc_term(1) + const docSize = 4 + 1 + 5 + 8 + 1 + + buf := make([]byte, docSize) + pos := 0 + + binary.LittleEndian.PutUint32(buf[pos:], docSize) + pos += 4 + + buf[pos] = TypeDateTime + pos++ + + pos += copy(buf[pos:], dataKey) + + millis := DateTimeToMillis(t) + binary.LittleEndian.PutUint64(buf[pos:], uint64(millis)) //nolint:gosec // negative datetime millis are valid + // pos += 8 + + buf[docSize-1] = 0 // document terminator + + return buf +} + +func marshalObjectIDDoc(oid [ObjectIDSize]byte) []byte { + // doc_size(4) + type(1) + key("data\0"=5) + objectid(12) + doc_term(1) + const docSize = 4 + 1 + 5 + ObjectIDSize + 1 + + buf := make([]byte, docSize) + pos := 0 + + binary.LittleEndian.PutUint32(buf[pos:], docSize) + pos += 4 + + buf[pos] = TypeObjectID + pos++ + + pos += copy(buf[pos:], dataKey) + + copy(buf[pos:], oid[:]) + // pos += ObjectIDSize + + buf[docSize-1] = 0 // document terminator + + return buf +} + +var ( + errUnsupportedType = errors.New("bsonlite: unsupported type") + errDocTooShort = errors.New("bsonlite: document too short") + errDocSize = errors.New("bsonlite: document size mismatch") + errNoTerminator = errors.New("bsonlite: missing key terminator") + errTruncated = errors.New("bsonlite: truncated value") + errDataNotFound = errors.New("bsonlite: \"data\" field not found") +) + +func unmarshalDoc(raw []byte) (any, error) { + const minDocSize = 5 // int32(size) + terminator + + if len(raw) < minDocSize { + return nil, errDocTooShort + } + + docSize := int(binary.LittleEndian.Uint32(raw[:4])) + if docSize != len(raw) { + return nil, errDocSize + } + + pos := 4 + + for pos < docSize-1 { + if pos >= len(raw) { + return nil, errTruncated + } + typeByte := raw[pos] + pos++ + + // Read key (cstring: bytes until 0x00). + keyStart := pos + for pos < len(raw) && raw[pos] != 0 { + pos++ + } + if pos >= len(raw) { + return nil, errNoTerminator + } + key := string(raw[keyStart:pos]) + pos++ // skip null terminator + + val, newPos, err := readValue(typeByte, raw, pos) + if err != nil { + return nil, err + } + pos = newPos + + if key == "data" { + return val, nil + } + } + + return nil, errDataNotFound +} + +func readValue(typeByte byte, raw []byte, pos int) (any, int, error) { + switch typeByte { + case TypeString: + if pos+4 > len(raw) { + return nil, 0, errTruncated + } + strLen := int(binary.LittleEndian.Uint32(raw[pos:])) + pos += 4 + if pos+strLen > len(raw) || strLen < 1 { + return nil, 0, errTruncated + } + s := string(raw[pos : pos+strLen-1]) // exclude null terminator + return s, pos + strLen, nil + + case TypeObjectID: + if pos+ObjectIDSize > len(raw) { + return nil, 0, errTruncated + } + var oid [ObjectIDSize]byte + copy(oid[:], raw[pos:pos+ObjectIDSize]) + return oid, pos + ObjectIDSize, nil + + case TypeDateTime: + const dateTimeSize = 8 + if pos+dateTimeSize > len(raw) { + return nil, 0, errTruncated + } + millis := int64(binary.LittleEndian.Uint64(raw[pos:])) //nolint:gosec // negative datetime millis are valid + return MillisToTime(millis), pos + dateTimeSize, nil + + case TypeNull: + return nil, pos, nil + + default: + return nil, 0, fmt.Errorf("bsonlite: unsupported BSON type 0x%02x: %w", typeByte, errUnsupportedType) + } +} diff --git a/vendor/github.com/go-openapi/strfmt/mongo.go b/vendor/github.com/go-openapi/strfmt/mongo.go index 641fed9b1a..be904ffa5d 100644 --- a/vendor/github.com/go-openapi/strfmt/mongo.go +++ b/vendor/github.com/go-openapi/strfmt/mongo.go @@ -9,70 +9,89 @@ import ( "fmt" "time" - "github.com/oklog/ulid" - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/bson/bsontype" - bsonprim "go.mongodb.org/mongo-driver/bson/primitive" + "github.com/go-openapi/strfmt/internal/bsonlite" + "github.com/oklog/ulid/v2" ) +// bsonMarshaler is satisfied by types implementing MarshalBSON. +type bsonMarshaler interface { + MarshalBSON() ([]byte, error) +} + +// bsonUnmarshaler is satisfied by types implementing UnmarshalBSON. +type bsonUnmarshaler interface { + UnmarshalBSON(data []byte) error +} + +// bsonValueMarshaler is satisfied by types implementing MarshalBSONValue. +type bsonValueMarshaler interface { + MarshalBSONValue() (byte, []byte, error) +} + +// bsonValueUnmarshaler is satisfied by types implementing UnmarshalBSONValue. +type bsonValueUnmarshaler interface { + UnmarshalBSONValue(tpe byte, data []byte) error +} + +// Compile-time interface checks. var ( - _ bson.Marshaler = Date{} - _ bson.Unmarshaler = &Date{} - _ bson.Marshaler = Base64{} - _ bson.Unmarshaler = &Base64{} - _ bson.Marshaler = Duration(0) - _ bson.Unmarshaler = (*Duration)(nil) - _ bson.Marshaler = DateTime{} - _ bson.Unmarshaler = &DateTime{} - _ bson.Marshaler = ULID{} - _ bson.Unmarshaler = &ULID{} - _ bson.Marshaler = URI("") - _ bson.Unmarshaler = (*URI)(nil) - _ bson.Marshaler = Email("") - _ bson.Unmarshaler = (*Email)(nil) - _ bson.Marshaler = Hostname("") - _ bson.Unmarshaler = (*Hostname)(nil) - _ bson.Marshaler = IPv4("") - _ bson.Unmarshaler = (*IPv4)(nil) - _ bson.Marshaler = IPv6("") - _ bson.Unmarshaler = (*IPv6)(nil) - _ bson.Marshaler = CIDR("") - _ bson.Unmarshaler = (*CIDR)(nil) - _ bson.Marshaler = MAC("") - _ bson.Unmarshaler = (*MAC)(nil) - _ bson.Marshaler = Password("") - _ bson.Unmarshaler = (*Password)(nil) - _ bson.Marshaler = UUID("") - _ bson.Unmarshaler = (*UUID)(nil) - _ bson.Marshaler = UUID3("") - _ bson.Unmarshaler = (*UUID3)(nil) - _ bson.Marshaler = UUID4("") - _ bson.Unmarshaler = (*UUID4)(nil) - _ bson.Marshaler = UUID5("") - _ bson.Unmarshaler = (*UUID5)(nil) - _ bson.Marshaler = UUID7("") - _ bson.Unmarshaler = (*UUID7)(nil) - _ bson.Marshaler = ISBN("") - _ bson.Unmarshaler = (*ISBN)(nil) - _ bson.Marshaler = ISBN10("") - _ bson.Unmarshaler = (*ISBN10)(nil) - _ bson.Marshaler = ISBN13("") - _ bson.Unmarshaler = (*ISBN13)(nil) - _ bson.Marshaler = CreditCard("") - _ bson.Unmarshaler = (*CreditCard)(nil) - _ bson.Marshaler = SSN("") - _ bson.Unmarshaler = (*SSN)(nil) - _ bson.Marshaler = HexColor("") - _ bson.Unmarshaler = (*HexColor)(nil) - _ bson.Marshaler = RGBColor("") - _ bson.Unmarshaler = (*RGBColor)(nil) - _ bson.Marshaler = ObjectId{} - _ bson.Unmarshaler = &ObjectId{} - - _ bson.ValueMarshaler = DateTime{} - _ bson.ValueUnmarshaler = &DateTime{} - _ bson.ValueMarshaler = ObjectId{} - _ bson.ValueUnmarshaler = &ObjectId{} + _ bsonMarshaler = Date{} + _ bsonUnmarshaler = &Date{} + _ bsonMarshaler = Base64{} + _ bsonUnmarshaler = &Base64{} + _ bsonMarshaler = Duration(0) + _ bsonUnmarshaler = (*Duration)(nil) + _ bsonMarshaler = DateTime{} + _ bsonUnmarshaler = &DateTime{} + _ bsonMarshaler = ULID{} + _ bsonUnmarshaler = &ULID{} + _ bsonMarshaler = URI("") + _ bsonUnmarshaler = (*URI)(nil) + _ bsonMarshaler = Email("") + _ bsonUnmarshaler = (*Email)(nil) + _ bsonMarshaler = Hostname("") + _ bsonUnmarshaler = (*Hostname)(nil) + _ bsonMarshaler = IPv4("") + _ bsonUnmarshaler = (*IPv4)(nil) + _ bsonMarshaler = IPv6("") + _ bsonUnmarshaler = (*IPv6)(nil) + _ bsonMarshaler = CIDR("") + _ bsonUnmarshaler = (*CIDR)(nil) + _ bsonMarshaler = MAC("") + _ bsonUnmarshaler = (*MAC)(nil) + _ bsonMarshaler = Password("") + _ bsonUnmarshaler = (*Password)(nil) + _ bsonMarshaler = UUID("") + _ bsonUnmarshaler = (*UUID)(nil) + _ bsonMarshaler = UUID3("") + _ bsonUnmarshaler = (*UUID3)(nil) + _ bsonMarshaler = UUID4("") + _ bsonUnmarshaler = (*UUID4)(nil) + _ bsonMarshaler = UUID5("") + _ bsonUnmarshaler = (*UUID5)(nil) + _ bsonMarshaler = UUID7("") + _ bsonUnmarshaler = (*UUID7)(nil) + _ bsonMarshaler = ISBN("") + _ bsonUnmarshaler = (*ISBN)(nil) + _ bsonMarshaler = ISBN10("") + _ bsonUnmarshaler = (*ISBN10)(nil) + _ bsonMarshaler = ISBN13("") + _ bsonUnmarshaler = (*ISBN13)(nil) + _ bsonMarshaler = CreditCard("") + _ bsonUnmarshaler = (*CreditCard)(nil) + _ bsonMarshaler = SSN("") + _ bsonUnmarshaler = (*SSN)(nil) + _ bsonMarshaler = HexColor("") + _ bsonUnmarshaler = (*HexColor)(nil) + _ bsonMarshaler = RGBColor("") + _ bsonUnmarshaler = (*RGBColor)(nil) + _ bsonMarshaler = ObjectId{} + _ bsonUnmarshaler = &ObjectId{} + + _ bsonValueMarshaler = DateTime{} + _ bsonValueUnmarshaler = &DateTime{} + _ bsonValueMarshaler = ObjectId{} + _ bsonValueUnmarshaler = &ObjectId{} ) const ( @@ -82,99 +101,105 @@ const ( ) func (d Date) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": d.String()}) + return bsonlite.C.MarshalDoc(d.String()) } func (d *Date) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + v, err := bsonlite.C.UnmarshalDoc(data) + if err != nil { return err } - if data, ok := m["data"].(string); ok { - rd, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) - if err != nil { - return err - } - *d = Date(rd) - return nil + s, ok := v.(string) + if !ok { + return fmt.Errorf("couldn't unmarshal bson bytes value as Date: %w", ErrFormat) } - return fmt.Errorf("couldn't unmarshal bson bytes value as Date: %w", ErrFormat) + rd, err := time.ParseInLocation(RFC3339FullDate, s, DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(rd) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (b Base64) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": b.String()}) + return bsonlite.C.MarshalDoc(b.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (b *Base64) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + v, err := bsonlite.C.UnmarshalDoc(data) + if err != nil { return err } - if bd, ok := m["data"].(string); ok { - vb, err := base64.StdEncoding.DecodeString(bd) - if err != nil { - return err - } - *b = Base64(vb) - return nil + s, ok := v.(string) + if !ok { + return fmt.Errorf("couldn't unmarshal bson bytes as base64: %w", ErrFormat) + } + + vb, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return err } - return fmt.Errorf("couldn't unmarshal bson bytes as base64: %w", ErrFormat) + *b = Base64(vb) + return nil } func (d Duration) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": d.String()}) + return bsonlite.C.MarshalDoc(d.String()) } func (d *Duration) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + v, err := bsonlite.C.UnmarshalDoc(data) + if err != nil { return err } - if data, ok := m["data"].(string); ok { - rd, err := ParseDuration(data) - if err != nil { - return err - } - *d = Duration(rd) - return nil + s, ok := v.(string) + if !ok { + return fmt.Errorf("couldn't unmarshal bson bytes value as Duration: %w", ErrFormat) } - return fmt.Errorf("couldn't unmarshal bson bytes value as Date: %w", ErrFormat) + rd, err := ParseDuration(s) + if err != nil { + return err + } + *d = Duration(rd) + return nil } -// MarshalBSON renders the DateTime as a BSON document +// MarshalBSON renders the [DateTime] as a BSON document. func (t DateTime) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": t}) + tNorm := NormalizeTimeForMarshal(time.Time(t)) + return bsonlite.C.MarshalDoc(tNorm) } -// UnmarshalBSON reads the DateTime from a BSON document +// UnmarshalBSON reads the [DateTime] from a BSON document. func (t *DateTime) UnmarshalBSON(data []byte) error { - var obj struct { - Data DateTime - } - - if err := bson.Unmarshal(data, &obj); err != nil { + v, err := bsonlite.C.UnmarshalDoc(data) + if err != nil { return err } - *t = obj.Data - + tv, ok := v.(time.Time) + if !ok { + return fmt.Errorf("couldn't unmarshal bson bytes value as DateTime: %w", ErrFormat) + } + *t = DateTime(tv) return nil } +// MarshalBSONValue marshals a [DateTime] as a BSON DateTime value (type 0x09), +// an int64 representing milliseconds since epoch. +// // MarshalBSONValue is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. +// into a BSON document represented as bytes. // -// Marshals a DateTime as a bson.TypeDateTime, an int64 representing -// milliseconds since epoch. -func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { +// The bytes returned must be a valid BSON document if the error is nil. +func (t DateTime) MarshalBSONValue() (byte, []byte, error) { // UnixNano cannot be used directly, the result of calling UnixNano on the zero // Time is undefined. Thats why we use time.Nanosecond() instead. @@ -183,15 +208,12 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { buf := make([]byte, bsonDateTimeSize) binary.LittleEndian.PutUint64(buf, uint64(i64)) //nolint:gosec // it's okay to handle negative int64 this way - return bson.TypeDateTime, buf, nil + return bsonlite.TypeDateTime, buf, nil } -// UnmarshalBSONValue is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { - if tpe == bson.TypeNull { +// UnmarshalBSONValue unmarshals a BSON DateTime value into this [DateTime]. +func (t *DateTime) UnmarshalBSONValue(tpe byte, data []byte) error { + if tpe == bsonlite.TypeNull { *t = DateTime{} return nil } @@ -206,440 +228,373 @@ func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u ULID) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *ULID) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + v, err := bsonlite.C.UnmarshalDoc(data) + if err != nil { return err } - if ud, ok := m["data"].(string); ok { - id, err := ulid.ParseStrict(ud) - if err != nil { - return fmt.Errorf("couldn't parse bson bytes as ULID: %w: %w", err, ErrFormat) - } - u.ULID = id - return nil + s, ok := v.(string) + if !ok { + return fmt.Errorf("couldn't unmarshal bson bytes as ULID: %w", ErrFormat) } - return fmt.Errorf("couldn't unmarshal bson bytes as ULID: %w", ErrFormat) + + id, err := ulid.ParseStrict(s) + if err != nil { + return fmt.Errorf("couldn't parse bson bytes as ULID: %w: %w", err, ErrFormat) + } + u.ULID = id + return nil } -// MarshalBSON document from this value +// unmarshalBSONString is a helper for string-based strfmt types. +func unmarshalBSONString(data []byte, typeName string) (string, error) { + v, err := bsonlite.C.UnmarshalDoc(data) + if err != nil { + return "", err + } + s, ok := v.(string) + if !ok { + return "", fmt.Errorf("couldn't unmarshal bson bytes as %s: %w", typeName, ErrFormat) + } + return s, nil +} + +// MarshalBSON document from this value. func (u URI) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *URI) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "uri") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = URI(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as uri: %w", ErrFormat) + *u = URI(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (e Email) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": e.String()}) + return bsonlite.C.MarshalDoc(e.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (e *Email) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "email") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *e = Email(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as email: %w", ErrFormat) + *e = Email(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (h Hostname) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": h.String()}) + return bsonlite.C.MarshalDoc(h.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (h *Hostname) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "hostname") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *h = Hostname(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as hostname: %w", ErrFormat) + *h = Hostname(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u IPv4) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *IPv4) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "ipv4") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = IPv4(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as ipv4: %w", ErrFormat) + *u = IPv4(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u IPv6) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *IPv6) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "ipv6") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = IPv6(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as ipv6: %w", ErrFormat) + *u = IPv6(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u CIDR) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *CIDR) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "CIDR") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = CIDR(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as CIDR: %w", ErrFormat) + *u = CIDR(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u MAC) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *MAC) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "MAC") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = MAC(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as MAC: %w", ErrFormat) + *u = MAC(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (r Password) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": r.String()}) + return bsonlite.C.MarshalDoc(r.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (r *Password) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "Password") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *r = Password(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as Password: %w", ErrFormat) + *r = Password(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u UUID) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *UUID) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "UUID") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = UUID(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as UUID: %w", ErrFormat) + *u = UUID(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u UUID3) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *UUID3) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "UUID3") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = UUID3(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as UUID3: %w", ErrFormat) + *u = UUID3(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u UUID4) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *UUID4) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "UUID4") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = UUID4(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as UUID4: %w", ErrFormat) + *u = UUID4(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u UUID5) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *UUID5) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "UUID5") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = UUID5(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as UUID5: %w", ErrFormat) + *u = UUID5(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u UUID7) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *UUID7) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "UUID7") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = UUID7(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as UUID7: %w", ErrFormat) + *u = UUID7(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u ISBN) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *ISBN) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "ISBN") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = ISBN(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as ISBN: %w", ErrFormat) + *u = ISBN(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u ISBN10) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *ISBN10) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "ISBN10") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = ISBN10(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as ISBN10: %w", ErrFormat) + *u = ISBN10(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u ISBN13) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *ISBN13) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "ISBN13") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = ISBN13(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as ISBN13: %w", ErrFormat) + *u = ISBN13(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u CreditCard) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *CreditCard) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "CreditCard") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = CreditCard(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as CreditCard: %w", ErrFormat) + *u = CreditCard(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u SSN) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *SSN) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "SSN") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = SSN(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as SSN: %w", ErrFormat) + *u = SSN(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (h HexColor) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": h.String()}) + return bsonlite.C.MarshalDoc(h.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (h *HexColor) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "HexColor") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *h = HexColor(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as HexColor: %w", ErrFormat) + *h = HexColor(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (r RGBColor) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": r.String()}) + return bsonlite.C.MarshalDoc(r.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (r *RGBColor) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "RGBColor") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *r = RGBColor(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as RGBColor: %w", ErrFormat) + *r = RGBColor(s) + return nil } -// MarshalBSON renders the object id as a BSON document +// MarshalBSON renders the object id as a BSON document. func (id ObjectId) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) + return bsonlite.C.MarshalDoc([12]byte(id)) } -// UnmarshalBSON reads the objectId from a BSON document +// UnmarshalBSON reads the objectId from a BSON document. func (id *ObjectId) UnmarshalBSON(data []byte) error { - var obj struct { - Data bsonprim.ObjectID - } - if err := bson.Unmarshal(data, &obj); err != nil { + v, err := bsonlite.C.UnmarshalDoc(data) + if err != nil { return err } - *id = ObjectId(obj.Data) + + oid, ok := v.([12]byte) + if !ok { + return fmt.Errorf("couldn't unmarshal bson bytes as ObjectId: %w", ErrFormat) + } + *id = ObjectId(oid) return nil } -// MarshalBSONValue is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. -func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { - oid := bsonprim.ObjectID(id) - return bson.TypeObjectID, oid[:], nil -} - -// UnmarshalBSONValue is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -func (id *ObjectId) UnmarshalBSONValue(_ bsontype.Type, data []byte) error { - var oid bsonprim.ObjectID +// MarshalBSONValue marshals the [ObjectId] as a raw BSON ObjectID value. +func (id ObjectId) MarshalBSONValue() (byte, []byte, error) { + oid := [12]byte(id) + return bsonlite.TypeObjectID, oid[:], nil +} + +// UnmarshalBSONValue unmarshals a raw BSON ObjectID value into this [ObjectId]. +func (id *ObjectId) UnmarshalBSONValue(_ byte, data []byte) error { + var oid [12]byte copy(oid[:], data) *id = ObjectId(oid) return nil diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go index 8085aaf696..1fde8c6b11 100644 --- a/vendor/github.com/go-openapi/strfmt/time.go +++ b/vendor/github.com/go-openapi/strfmt/time.go @@ -12,14 +12,12 @@ import ( "time" ) -var ( - // UnixZero sets the zero unix UTC timestamp we want to compare against. - // - // Unix 0 for an EST timezone is not equivalent to a UTC timezone. - UnixZero = time.Unix(0, 0).UTC() -) +// UnixZero sets the zero unix UTC timestamp we want to compare against. +// +// Unix 0 for an EST timezone is not equivalent to a UTC timezone. +var UnixZero = time.Unix(0, 0).UTC() //nolint:gochecknoglobals // package-level sentinel value for unix epoch -func init() { +func init() { //nolint:gochecknoinits // registers datetime format in the default registry dt := DateTime{} Default.Add("datetime", &dt, IsDateTime) } @@ -50,38 +48,48 @@ func IsDateTime(str string) bool { } const ( - // RFC3339Millis represents a ISO8601 format to millis instead of to nanos + // RFC3339Millis represents a ISO8601 format to millis instead of to nanos. RFC3339Millis = "2006-01-02T15:04:05.000Z07:00" - // RFC3339MillisNoColon represents a ISO8601 format to millis instead of to nanos + // RFC3339MillisNoColon represents a ISO8601 format to millis instead of to nanos. RFC3339MillisNoColon = "2006-01-02T15:04:05.000Z0700" - // RFC3339Micro represents a ISO8601 format to micro instead of to nano + // RFC3339Micro represents a ISO8601 format to micro instead of to nano. RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" - // RFC3339MicroNoColon represents a ISO8601 format to micro instead of to nano + // RFC3339MicroNoColon represents a ISO8601 format to micro instead of to nano. RFC3339MicroNoColon = "2006-01-02T15:04:05.000000Z0700" - // ISO8601LocalTime represents a ISO8601 format to ISO8601 in local time (no timezone) + // ISO8601LocalTime represents a ISO8601 format to ISO8601 in local time (no timezone). ISO8601LocalTime = "2006-01-02T15:04:05" - // ISO8601TimeWithReducedPrecision represents a ISO8601 format with reduced precision (dropped secs) + // ISO8601TimeWithReducedPrecision represents a ISO8601 format with reduced precision (dropped secs). ISO8601TimeWithReducedPrecision = "2006-01-02T15:04Z" - // ISO8601TimeWithReducedPrecisionLocaltime represents a ISO8601 format with reduced precision and no timezone (dropped seconds + no timezone) + // ISO8601TimeWithReducedPrecisionLocaltime represents a ISO8601 format with reduced precision and no timezone (dropped seconds + no timezone). ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04" // ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern. ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05" - // ISO8601TimeUniversalSortableDateTimePatternShortForm is the short form of ISO8601TimeUniversalSortableDateTimePattern + // ISO8601TimeUniversalSortableDateTimePatternShortForm is the short form of [ISO8601TimeUniversalSortableDateTimePattern]. ISO8601TimeUniversalSortableDateTimePatternShortForm = "2006-01-02" // DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6 DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$` ) +//nolint:gochecknoglobals // package-level configuration for datetime parsing and marshaling var ( rxDateTime = regexp.MustCompile(DateTimePattern) - // DateTimeFormats is the collection of formats used by ParseDateTime() - DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern, ISO8601TimeUniversalSortableDateTimePatternShortForm} + // DateTimeFormats is the collection of formats used by [ParseDateTime](). + DateTimeFormats = []string{ + RFC3339Micro, RFC3339MicroNoColon, + RFC3339Millis, RFC3339MillisNoColon, + time.RFC3339, time.RFC3339Nano, + ISO8601LocalTime, + ISO8601TimeWithReducedPrecision, + ISO8601TimeWithReducedPrecisionLocaltime, + ISO8601TimeUniversalSortableDateTimePattern, + ISO8601TimeUniversalSortableDateTimePatternShortForm, + } - // MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds) + // MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds). MarshalFormat = RFC3339Millis - // NormalizeTimeForMarshal provides a normalization function on time before marshalling (e.g. time.UTC). + // NormalizeTimeForMarshal provides a normalization function on time before marshaling (e.g. [time.UTC]). // By default, the time value is not changed. NormalizeTimeForMarshal = func(t time.Time) time.Time { return t } @@ -89,7 +97,7 @@ var ( DefaultTimeLocation = time.UTC ) -// ParseDateTime parses a string that represents an ISO8601 time or a unix epoch +// ParseDateTime parses a string that represents an ISO8601 time or a unix epoch. func ParseDateTime(data string) (DateTime, error) { if data == "" { return NewDateTime(), nil @@ -112,46 +120,46 @@ func ParseDateTime(data string) (DateTime, error) { // Most APIs we encounter want either millisecond or second precision times. // This just tries to make it worry-free. // -// swagger:strfmt date-time +// swagger:strfmt date-time. type DateTime time.Time // NewDateTime is a representation of the UNIX epoch (January 1, 1970 00:00:00 UTC) for the [DateTime] type. // // Notice that this is not the zero value of the [DateTime] type. // -// You may use [DateTime.IsUNIXZero] to check against this value. +// You may use [DateTime.IsUnixZero] to check against this value. func NewDateTime() DateTime { return DateTime(time.Unix(0, 0).UTC()) } // MakeDateTime is a representation of the zero value of the [DateTime] type (January 1, year 1, 00:00:00 UTC). // -// You may use [Datetime.IsZero] to check against this value. +// You may use [DateTime.IsZero] to check against this value. func MakeDateTime() DateTime { return DateTime(time.Time{}) } -// String converts this time to a string +// String converts this time to a string. func (t DateTime) String() string { return NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat) } -// IsZero returns whether the date time is a zero value +// IsZero returns whether the date time is a zero value. func (t DateTime) IsZero() bool { return time.Time(t).IsZero() } -// IsUnixZero returns whether the date time is equivalent to time.Unix(0, 0).UTC(). +// IsUnixZero returns whether the date time is equivalent to [time.Unix](0, 0).UTC(). func (t DateTime) IsUnixZero() bool { return time.Time(t).Equal(UnixZero) } -// MarshalText implements the text marshaller interface +// MarshalText implements the text marshaler interface. func (t DateTime) MarshalText() ([]byte, error) { return []byte(t.String()), nil } -// UnmarshalText implements the text unmarshaller interface +// UnmarshalText implements the text unmarshaler interface. func (t *DateTime) UnmarshalText(text []byte) error { tt, err := ParseDateTime(string(text)) if err != nil { @@ -161,9 +169,9 @@ func (t *DateTime) UnmarshalText(text []byte) error { return nil } -// Scan scans a DateTime value from database driver type. +// Scan scans a [DateTime] value from database driver type. func (t *DateTime) Scan(raw any) error { - // TODO: case int64: and case float64: ? + // Proposal for enhancement: case int64: and case float64: ? switch v := raw.(type) { case []byte: return t.UnmarshalText(v) @@ -180,17 +188,17 @@ func (t *DateTime) Scan(raw any) error { return nil } -// Value converts DateTime to a primitive value ready to written to a database. +// Value converts [DateTime] to a primitive value ready to written to a database. func (t DateTime) Value() (driver.Value, error) { return driver.Value(t.String()), nil } -// MarshalJSON returns the DateTime as JSON +// MarshalJSON returns the [DateTime] as JSON. func (t DateTime) MarshalJSON() ([]byte, error) { return json.Marshal(NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat)) } -// UnmarshalJSON sets the DateTime from JSON +// UnmarshalJSON sets the [DateTime] from JSON. func (t *DateTime) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -213,7 +221,7 @@ func (t *DateTime) DeepCopyInto(out *DateTime) { *out = *t } -// DeepCopy copies the receiver into a new DateTime. +// DeepCopy copies the receiver into a new [DateTime]. func (t *DateTime) DeepCopy() *DateTime { if t == nil { return nil @@ -233,12 +241,12 @@ func (t *DateTime) GobDecode(data []byte) error { return t.UnmarshalBinary(data) } -// MarshalBinary implements the encoding.BinaryMarshaler interface. +// MarshalBinary implements the encoding.[encoding.BinaryMarshaler] interface. func (t DateTime) MarshalBinary() ([]byte, error) { return NormalizeTimeForMarshal(time.Time(t)).MarshalBinary() } -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// UnmarshalBinary implements the encoding.[encoding.BinaryUnmarshaler] interface. func (t *DateTime) UnmarshalBinary(data []byte) error { var original time.Time @@ -252,7 +260,7 @@ func (t *DateTime) UnmarshalBinary(data []byte) error { return nil } -// Equal checks if two DateTime instances are equal using time.Time's Equal method +// Equal checks if two [DateTime] instances are equal using [time.Time]'s Equal method. func (t DateTime) Equal(t2 DateTime) bool { return time.Time(t).Equal(time.Time(t2)) } diff --git a/vendor/github.com/go-openapi/strfmt/ulid.go b/vendor/github.com/go-openapi/strfmt/ulid.go index 85c5b53e6c..f05d22c518 100644 --- a/vendor/github.com/go-openapi/strfmt/ulid.go +++ b/vendor/github.com/go-openapi/strfmt/ulid.go @@ -11,23 +11,25 @@ import ( "io" "sync" - "github.com/oklog/ulid" + "github.com/oklog/ulid/v2" ) -// ULID represents a ulid string format -// ref: +// ULID represents a [ulid] string format. +// +// # Reference // // https://github.com/ulid/spec // -// impl: +// # Implementation // // https://github.com/oklog/ulid // -// swagger:strfmt ulid +// swagger:strfmt ulid. type ULID struct { ulid.ULID } +//nolint:gochecknoglobals // package-level ULID configuration and overridable scan/value functions var ( ulidEntropyPool = sync.Pool{ New: func() any { @@ -35,6 +37,7 @@ var ( }, } + // ULIDScanDefaultFunc is the default implementation for scanning a [ULID] from a database driver value. ULIDScanDefaultFunc = func(raw any) (ULID, error) { u := NewULIDZero() switch x := raw.(type) { @@ -54,45 +57,44 @@ var ( return u, fmt.Errorf("cannot sql.Scan() strfmt.ULID from: %#v: %w", raw, ulid.ErrScanValue) } - // ULIDScanOverrideFunc allows you to override the Scan method of the ULID type + // ULIDScanOverrideFunc allows you to override the Scan method of the [ULID] type. ULIDScanOverrideFunc = ULIDScanDefaultFunc + // ULIDValueDefaultFunc is the default implementation for converting a [ULID] to a database driver value. ULIDValueDefaultFunc = func(u ULID) (driver.Value, error) { return driver.Value(u.String()), nil } - // ULIDValueOverrideFunc allows you to override the Value method of the ULID type + // ULIDValueOverrideFunc allows you to override the Value method of the [ULID] type. ULIDValueOverrideFunc = ULIDValueDefaultFunc ) -func init() { - // register formats in the default registry: - // - ulid +func init() { //nolint:gochecknoinits // registers ulid format in the default registry ulid := ULID{} Default.Add("ulid", &ulid, IsULID) } -// IsULID checks if provided string is ULID format -// Be noticed that this function considers overflowed ULID as non-ulid. -// For more details see https://github.com/ulid/spec +// IsULID checks if provided string is [ULID] format +// Be noticed that this function considers overflowed [ULID] as non-[ulid]. +// For more details see https://github.com/[ulid]/spec func IsULID(str string) bool { _, err := ulid.ParseStrict(str) return err == nil } -// ParseULID parses a string that represents an valid ULID +// ParseULID parses a string that represents an valid [ULID]. func ParseULID(str string) (ULID, error) { var u ULID return u, u.UnmarshalText([]byte(str)) } -// NewULIDZero returns a zero valued ULID type +// NewULIDZero returns a zero valued [ULID] type. func NewULIDZero() ULID { return ULID{} } -// NewULID generates new unique ULID value and a error if any +// NewULID generates new unique [ULID] value and a error if any. func NewULID() (ULID, error) { var u ULID @@ -112,22 +114,22 @@ func NewULID() (ULID, error) { return u, nil } -// GetULID returns underlying instance of ULID +// GetULID returns underlying instance of [ULID]. func (u *ULID) GetULID() any { return u.ULID } -// MarshalText returns this instance into text +// MarshalText returns this instance into text. func (u ULID) MarshalText() ([]byte, error) { return u.ULID.MarshalText() } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *ULID) UnmarshalText(data []byte) error { // validation is performed later on return u.ULID.UnmarshalText(data) } -// Scan reads a value from a database driver +// Scan reads a value from a database driver. func (u *ULID) Scan(raw any) error { ul, err := ULIDScanOverrideFunc(raw) if err == nil { @@ -136,7 +138,7 @@ func (u *ULID) Scan(raw any) error { return err } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u ULID) Value() (driver.Value, error) { return ULIDValueOverrideFunc(u) } @@ -145,12 +147,12 @@ func (u ULID) String() string { return u.ULID.String() } -// MarshalJSON returns the ULID as JSON +// MarshalJSON returns the [ULID] as JSON. func (u ULID) MarshalJSON() ([]byte, error) { return json.Marshal(u.String()) } -// UnmarshalJSON sets the ULID from JSON +// UnmarshalJSON sets the [ULID] from JSON. func (u *ULID) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -172,7 +174,7 @@ func (u *ULID) DeepCopyInto(out *ULID) { *out = *u } -// DeepCopy copies the receiver into a new ULID. +// DeepCopy copies the receiver into a new [ULID]. func (u *ULID) DeepCopy() *ULID { if u == nil { return nil @@ -192,17 +194,17 @@ func (u *ULID) GobDecode(data []byte) error { return u.ULID.UnmarshalBinary(data) } -// MarshalBinary implements the encoding.BinaryMarshaler interface. +// MarshalBinary implements the encoding.[encoding.BinaryMarshaler] interface. func (u ULID) MarshalBinary() ([]byte, error) { return u.ULID.MarshalBinary() } -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// UnmarshalBinary implements the encoding.[encoding.BinaryUnmarshaler] interface. func (u *ULID) UnmarshalBinary(data []byte) error { return u.ULID.UnmarshalBinary(data) } -// Equal checks if two ULID instances are equal by their underlying type +// Equal checks if two [ULID] instances are equal by their underlying type. func (u ULID) Equal(other ULID) bool { return u.ULID == other.ULID } diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore index c4b1b64f04..a0a95a96b3 100644 --- a/vendor/github.com/go-openapi/swag/.gitignore +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -3,3 +3,5 @@ vendor Godeps .idea *.out +.mcp.json +.claude/ diff --git a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md index 9322b065e3..bac878f216 100644 --- a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/swag/CONTRIBUTORS.md b/vendor/github.com/go-openapi/swag/CONTRIBUTORS.md new file mode 100644 index 0000000000..bc76fe820c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/CONTRIBUTORS.md @@ -0,0 +1,36 @@ +# Contributors + +- Repository: ['go-openapi/swag'] + +| Total Contributors | Total Contributions | +| --- | --- | +| 24 | 235 | + +| Username | All Time Contribution Count | All Commits | +| --- | --- | --- | +| @fredbi | 105 | | +| @casualjim | 98 | | +| @alexandear | 4 | | +| @orisano | 3 | | +| @reinerRubin | 2 | | +| @n-inja | 2 | | +| @nitinmohan87 | 2 | | +| @Neo2308 | 2 | | +| @michaelbowler-form3 | 2 | | +| @ujjwalsh | 1 | | +| @griffin-stewie | 1 | | +| @POD666 | 1 | | +| @pytlesk4 | 1 | | +| @shirou | 1 | | +| @seanprince | 1 | | +| @petrkotas | 1 | | +| @mszczygiel | 1 | | +| @sosiska | 1 | | +| @kzys | 1 | | +| @faguirre1 | 1 | | +| @posener | 1 | | +| @diego-fu-hs | 1 | | +| @davidalpert | 1 | | +| @Xe | 1 | | + + _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index 371fd55fdc..834eb2ffb9 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -1,26 +1,60 @@ -# Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) +# Swag + + +[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url] + + + +[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] + + +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](https://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) +--- -Package `swag` contains a bunch of helper functions for go-openapi and go-swagger projects. +A bunch of helper functions for go-openapi and go-swagger projects. You may also use it standalone for your projects. > **NOTE** > `swag` is one of the foundational building blocks of the go-openapi initiative. +> > Most repositories in `github.com/go-openapi/...` depend on it in some way. > And so does our CLI tool `github.com/go-swagger/go-swagger`, > as well as the code generated by this tool. * [Contents](#contents) * [Dependencies](#dependencies) -* [Release Notes](#release-notes) +* [Change log](#change-log) * [Licensing](#licensing) * [Note to contributors](#note-to-contributors) -* [TODOs, suggestions and plans](#todos-suggestions-and-plans) +* [Roadmap](#roadmap) + +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + +## Status + +API is stable. + +## Import this library in your project + +```cmd +go get github.com/go-openapi/swag/{module} +``` + +Or for backward compatibility: + +```cmd +go get github.com/go-openapi/swag +``` ## Contents @@ -36,7 +70,7 @@ Child modules will continue to evolve and some new ones may be added in the futu | `cmdutils` | utilities to work with CLIs || | `conv` | type conversion utilities | convert between values and pointers for any types
convert from string to builtin types (wraps `strconv`)
require `./typeutils` (test dependency)
| | `fileutils` | file utilities | | -| `jsonname` | JSON utilities | infer JSON names from `go` properties
| +| `jsonname` | JSON utilities | infer JSON names from `go` properties
| | `jsonutils` | JSON utilities | fast json concatenation
read and write JSON from and to dynamic `go` data structures
~require `github.com/mailru/easyjson`~
| | `loading` | file loading | load from file or http
require `./yamlutils`
| | `mangling` | safe name generation | name mangling for `go`
| @@ -49,84 +83,19 @@ Child modules will continue to evolve and some new ones may be added in the futu ## Dependencies -The root module `github.com/go-openapi/swag` at the repo level maintains a few +The root module `github.com/go-openapi/swag` at the repo level maintains a few dependencies outside of the standard library. * YAML utilities depend on `go.yaml.in/yaml/v3` * JSON utilities depend on their registered adapter module: - * by default, only the standard library is used - * `github.com/mailru/easyjson` is now only a dependency for module - `github.com/go-openapi/swag/jsonutils/adapters/easyjson/json`, - for users willing to import that module. - * integration tests and benchmarks use all the dependencies are published as their own module + * by default, only the standard library is used + * `github.com/mailru/easyjson` is now only a dependency for module + `github.com/go-openapi/swag/jsonutils/adapters/easyjson/json`, + for users willing to import that module. + * integration tests and benchmarks use all the dependencies are published as their own module * other dependencies are test dependencies drawn from `github.com/stretchr/testify` -## Release notes - -### v0.25.4 - -** mangling** - -Bug fix - -* [x] mangler may panic with pluralized overlapping initialisms - -Tests - -* [x] introduced fuzz tests - -### v0.25.3 - -** mangling** - -Bug fix - -* [x] mangler may panic with pluralized initialisms - -### v0.25.2 - -Minor changes due to internal maintenance that don't affect the behavior of the library. - -* [x] removed indirect test dependencies by switching all tests to `go-openapi/testify`, - a fork of `stretch/testify` with zero-dependencies. -* [x] improvements to CI to catch test reports. -* [x] modernized licensing annotations in source code, using the more compact SPDX annotations - rather than the full license terms. -* [x] simplified a bit JSON & YAML testing by using newly available assertions -* started the journey to an OpenSSF score card badge: - * [x] explicited permissions in CI workflows - * [x] published security policy - * pinned dependencies to github actions - * introduced fuzzing in tests - -### v0.25.1 - -* fixes a data race that could occur when using the standard library implementation of a JSON ordered map - -### v0.25.0 - -**New with this release**: - -* requires `go1.24`, as iterators are being introduced -* removes the dependency to `mailru/easyjson` by default (#68) - * functionality remains the same, but performance may somewhat degrade for applications - that relied on `easyjson` - * users of the JSON or YAML utilities who want to use `easyjson` as their preferred JSON serializer library - will be able to do so by registering this the corresponding JSON adapter at runtime. See below. - * ordered keys in JSON and YAML objects: this feature used to rely solely on `easyjson`. - With this release, an implementation relying on the standard `encoding/json` is provided. - * an independent [benchmark](./jsonutils/adapters/testintegration/benchmarks/README.md) to compare the different adapters -* improves the "float is integer" check (`conv.IsFloat64AJSONInteger`) (#59) -* removes the _direct_ dependency to `gopkg.in/yaml.v3` (indirect dependency is still incurred through `stretchr/testify`) (#127) -* exposed `conv.IsNil()` (previously kept private): a safe nil check (accounting for the "non-nil interface with nil value" nonsensical go trick) - -**What coming next?** - -Moving forward, we want to : -* provide an implementation of the JSON adapter based on `encoding/json/v2`, for `go1.25` builds. -* provide similar implementations for `goccy/go-json` and `jsoniterator/go`, and perhaps some other - similar libraries may be interesting too. - +## Usage **How to explicitly register a dependency at runtime**? @@ -150,90 +119,106 @@ or fallback to the standard library. For more details, you may also look at our [integration tests](jsonutils/adapters/testintegration/integration_suite_test.go#29). -### v0.24.0 +--- -With this release, we have largely modernized the API of `swag`: +## Note to contributors -* The traditional `swag` API is still supported: code that imports `swag` will still - compile and work the same. -* A deprecation notice is published to encourage consumers of this library to adopt - the newer API -* **Deprecation notice** - * configuration through global variables is now deprecated, in favor of options passed as parameters - * all helper functions are moved to more specialized packages, which are exposed as - go modules. Importing such a module would reduce the footprint of dependencies. - * _all_ functions, variables, constants exposed by the deprecated API have now moved, so - that consumers of the new API no longer need to import github.com/go-openapi/swag, but - should import the desired sub-module(s). +All kinds of contributions are welcome. -**New with this release**: +This repo is a go mono-repo. See [docs](docs/MAINTAINERS.md). -* [x] type converters and pointer to value helpers now support generic types -* [x] name mangling now support pluralized initialisms (issue #46) - Strings like "contact IDs" are now recognized as such a plural form and mangled as a linter would expect. -* [x] performance: small improvements to reduce the overhead of convert/format wrappers (see issues #110, or PR #108) -* [x] performance: name mangling utilities run ~ 10% faster (PR #115) +More general guidelines are available [here](.github/CONTRIBUTING.md). ---- +## Roadmap -## Licensing +See the current [TODO list](docs/TODOS.md) -This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). +## Change log -## Note to contributors +See -A mono-repo structure comes with some unavoidable extra pains... +For pre-v0.26.0 releases, see [release notes](./docs/NOTES.md). -* Testing +**What coming next?** -> The usual `go test ./...` command, run from the root of this repo won't work any longer to test all submodules. -> -> Each module constitutes an independant unit of test. So you have to run `go test` inside each module. -> Or you may take a look at how this is achieved by CI -> [here] https://github.com/go-openapi/swag/blob/master/.github/workflows/go-test.yml). -> -> There are also some alternative tricks using `go work`, for local development, if you feel comfortable with -> go workspaces. Perhaps some day, we'll have a `go work test` to run all tests without any hack. +Moving forward, we want to : -* Releasing +* provide an implementation of the JSON adapter based on `encoding/json/v2`, for `go1.25` builds. +* provide similar implementations for `goccy/go-json` and `jsoniterator/go`, and perhaps some other + similar libraries may be interesting too. -> Each module follows its own independant module versioning. -> -> So you have tags like `mangling/v0.24.0`, `fileutils/v0.24.0` etc that are used by `go mod` and `go get` -> to refer to the tagged version of each module specifically. -> -> This means we may release patches etc to each module independently. -> -> We'd like to adopt the rule that modules in this repo would only differ by a patch version -> (e.g. `v0.24.5` vs `v0.24.3`), and we'll level all modules whenever a minor version is introduced. -> -> A script in `./hack` is provided to tag all modules with the same version in one go. + -## Todos, suggestions and plans +## Licensing -All kinds of contributions are welcome. +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). -A few ideas: - -* [x] Complete the split of dependencies to isolate easyjson from the rest -* [x] Improve CI to reduce needed tests -* [x] Replace dependency to `gopkg.in/yaml.v3` (`yamlutil`) -* [ ] Improve mangling utilities (improve readability, support for capitalized words, - better word substitution for non-letter symbols...) -* [ ] Move back to this common shared pot a few of the technical features introduced by go-swagger independently - (e.g. mangle go package names, search package with go modules support, ...) -* [ ] Apply a similar mono-repo approach to go-openapi/strfmt which suffer from similar woes: bloated API, - imposed dependency to some database driver. -* [ ] Adapt `go-swagger` (incl. generated code) to the new `swag` API. -* [ ] Factorize some tests, as there is a lot of redundant testing code in `jsonutils` -* [ ] Benchmark & profiling: publish independently the tool built to analyze and chart benchmarks (e.g. similar to `benchvisual`) -* [ ] more thorough testing for nil / null case -* [ ] ci pipeline to manage releases -* [ ] cleaner mockery generation (doesn't work out of the box for all sub-modules) + + + + +## Other documentation + +* [All-time contributors](./CONTRIBUTORS.md) +* [Contributing guidelines](.github/CONTRIBUTING.md) +* [Maintainers documentation](docs/MAINTAINERS.md) +* [Code style](docs/STYLE.md) + +## Cutting a new release + +Maintainers can cut a new release by either: + +* running [this workflow](https://github.com/go-openapi/swag/actions/workflows/bump-release.yml) +* or pushing a semver tag + * signed tags are preferred + * The tag message is prepended to release notes + + +[test-badge]: https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg +[test-url]: https://github.com/go-openapi/swag/actions/workflows/go-test.yml +[cov-badge]: https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg +[cov-url]: https://codecov.io/gh/go-openapi/swag +[vuln-scan-badge]: https://github.com/go-openapi/swag/actions/workflows/scanner.yml/badge.svg +[vuln-scan-url]: https://github.com/go-openapi/swag/actions/workflows/scanner.yml +[codeql-badge]: https://github.com/go-openapi/swag/actions/workflows/codeql.yml/badge.svg +[codeql-url]: https://github.com/go-openapi/swag/actions/workflows/codeql.yml + +[release-badge]: https://badge.fury.io/gh/go-openapi%2Fswag.svg +[release-url]: https://badge.fury.io/gh/go-openapi%2Fswag +[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fswag.svg +[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fswag + +[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/swag +[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/swag +[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/swag +[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/swag + +[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F +[doc-url]: https://goswagger.io/go-openapi +[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/swag +[godoc-url]: http://pkg.go.dev/github.com/go-openapi/swag +[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png +[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM +[slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + + +[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg +[license-url]: https://github.com/go-openapi/swag/?tab=Apache-2.0-1-ov-file#readme + +[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/swag +[goversion-url]: https://github.com/go-openapi/swag/blob/master/go.mod +[top-badge]: https://img.shields.io/github/languages/top/go-openapi/swag +[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/swag/latest diff --git a/vendor/github.com/go-openapi/swag/jsonutils/README.md b/vendor/github.com/go-openapi/swag/jsonutils/README.md index d745cdb466..07a2ca1d71 100644 --- a/vendor/github.com/go-openapi/swag/jsonutils/README.md +++ b/vendor/github.com/go-openapi/swag/jsonutils/README.md @@ -1,11 +1,11 @@ - # jsonutils +# jsonutils `jsonutils` exposes a few tools to work with JSON: - a fast, simple `Concat` to concatenate (not merge) JSON objects and arrays - `FromDynamicJSON` to convert a data structure into a "dynamic JSON" data structure - `ReadJSON` and `WriteJSON` behave like `json.Unmarshal` and `json.Marshal`, - with the ability to use another underlying serialization library through an `Adapter` + with the ability to use another underlying serialization library through an `Adapter` configured at runtime - a `JSONMapSlice` structure that may be used to store JSON objects with the order of keys maintained @@ -64,7 +64,7 @@ find a registered implementation that support ordered keys in objects. Our standard library implementation supports this. As of `v0.25.0`, we support through such an adapter the popular `mailru/easyjson` -library, which kicks in when the passed values support the `easyjson.Unmarshaler` +library, which kicks in when the passed values support the `easyjson.Unmarshaler` or `easyjson.Marshaler` interfaces. In the future, we plan to add more similar libraries that compete on the go JSON @@ -77,8 +77,9 @@ In package `github.com/go-openapi/swag/easyjson/adapters`, several adapters are Each adapter is an independent go module. Hence you'll pick its dependencies only if you import it. At this moment we provide: -* `stdlib`: JSON adapter based on the standard library -* `easyjson`: JSON adapter based on the `github.com/mailru/easyjson` + +- `stdlib`: JSON adapter based on the standard library +- `easyjson`: JSON adapter based on the `github.com/mailru/easyjson` The adapters provide the basic `Marshal` and `Unmarshal` capabilities, plus an implementation of the `MapSlice` pattern. diff --git a/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md b/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md index 6674c63b72..abe6e9533e 100644 --- a/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md +++ b/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md @@ -4,7 +4,7 @@ go test -bench XXX -run XXX -benchtime 30s ``` -## Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df +## Benchmarks at `b3e7a5386f996177e4808f11acb2aa93a0f660df` ``` goos: linux @@ -49,7 +49,7 @@ BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op ``` -## Benchmarks at d7d2d1b895f5b6747afaff312dd2a402e69e818b +## Benchmarks at `d7d2d1b895f5b6747afaff312dd2a402e69e818b` go1.24 diff --git a/vendor/github.com/go-openapi/validate/.gitignore b/vendor/github.com/go-openapi/validate/.gitignore index fea8b84eca..d8f4186fe5 100644 --- a/vendor/github.com/go-openapi/validate/.gitignore +++ b/vendor/github.com/go-openapi/validate/.gitignore @@ -1,5 +1,5 @@ -secrets.yml -coverage.out -*.cov *.out -playground +*.cov +.idea +.env +.mcp.json diff --git a/vendor/github.com/go-openapi/validate/.golangci.yml b/vendor/github.com/go-openapi/validate/.golangci.yml index 10c513342f..4d6b36e472 100644 --- a/vendor/github.com/go-openapi/validate/.golangci.yml +++ b/vendor/github.com/go-openapi/validate/.golangci.yml @@ -2,27 +2,15 @@ version: "2" linters: default: all disable: - - cyclop - depguard - - errchkjson - - errorlint - - exhaustruct - - forcetypeassert - funlen - gochecknoglobals - gochecknoinits - - gocognit - - godot - godox - - gomoddirectives - - gosmopolitan - - inamedparam - - intrange + - exhaustruct - ireturn - - lll - - musttag - - nestif - nlreturn + - nestif - nonamedreturns - noinlineerr - paralleltest @@ -30,7 +18,6 @@ linters: - testpackage - thelper - tparallel - - unparam - varnamelen - whitespace - wrapcheck @@ -42,8 +29,17 @@ linters: goconst: min-len: 2 min-occurrences: 3 + cyclop: + max-complexity: 40 gocyclo: - min-complexity: 45 + min-complexity: 40 + gocognit: + min-complexity: 40 + exhaustive: + default-signifies-exhaustive: true + default-case-required: true + lll: + line-length: 180 exclusions: generated: lax presets: @@ -59,6 +55,7 @@ formatters: enable: - gofmt - goimports + - gofumpt exclusions: generated: lax paths: diff --git a/vendor/github.com/go-openapi/validate/BENCHMARK.md b/vendor/github.com/go-openapi/validate/BENCHMARK.md index 79cf6a077b..0353eae5bd 100644 --- a/vendor/github.com/go-openapi/validate/BENCHMARK.md +++ b/vendor/github.com/go-openapi/validate/BENCHMARK.md @@ -3,6 +3,7 @@ Validating the Kubernetes Swagger API ## v0.22.6: 60,000,000 allocs + ``` goos: linux goarch: amd64 @@ -11,7 +12,8 @@ cpu: AMD Ryzen 7 5800X 8-Core Processor Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 8549863982 ns/op 7067424936 B/op 59583275 allocs/op ``` -## After refact PR: minor but noticable improvements: 25,000,000 allocs +## After refact PR: minor but noticeable improvements: 25,000,000 allocs + ``` go test -bench Spec goos: linux @@ -22,6 +24,7 @@ Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 40645355 ``` ## After reduce GC pressure PR: 17,000,000 allocs + ``` goos: linux goarch: amd64 diff --git a/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md index 9322b065e3..bac878f216 100644 --- a/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/validate/CONTRIBUTORS.md b/vendor/github.com/go-openapi/validate/CONTRIBUTORS.md new file mode 100644 index 0000000000..7b79b765dc --- /dev/null +++ b/vendor/github.com/go-openapi/validate/CONTRIBUTORS.md @@ -0,0 +1,43 @@ +# Contributors + +- Repository: ['go-openapi/validate'] + +| Total Contributors | Total Contributions | +| --- | --- | +| 31 | 295 | + +| Username | All Time Contribution Count | All Commits | +| --- | --- | --- | +| @casualjim | 169 | | +| @fredbi | 58 | | +| @sttts | 11 | | +| @youyuanwu | 9 | | +| @keramix | 8 | | +| @jerome-laforge | 4 | | +| @GlenDC | 4 | | +| @galaxie | 3 | | +| @tossmilestone | 2 | | +| @EleanorRigby | 2 | | +| @jiuker | 2 | | +| @pytlesk4 | 2 | | +| @dimovnike | 2 | | +| @gbjk | 2 | | +| @ujjwalsh | 1 | | +| @key-amb | 1 | | +| @caglar10ur | 1 | | +| @petrkotas | 1 | | +| @dolmen | 1 | | +| @nikhita | 1 | | +| @koron | 1 | | +| @liggitt | 1 | | +| @ilyakaznacheev | 1 | | +| @hypnoglow | 1 | | +| @gautierdelorme | 1 | | +| @flavioribeiro | 1 | | +| @pheepi | 1 | | +| @carvind | 1 | | +| @artemseleznev | 1 | | +| @dadgar | 1 | | +| @elakito | 1 | | + + _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/validate/README.md b/vendor/github.com/go-openapi/validate/README.md index 73d87ce4f0..fec42b7c6e 100644 --- a/vendor/github.com/go-openapi/validate/README.md +++ b/vendor/github.com/go-openapi/validate/README.md @@ -1,15 +1,42 @@ -# Validation helpers [![Build Status](https://github.com/go-openapi/validate/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/validate/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/validate/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/validate) +# validate -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/validate/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/validate.svg)](https://pkg.go.dev/github.com/go-openapi/validate) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/validate)](https://goreportcard.com/report/github.com/go-openapi/validate) + +[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url] + + + +[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] + + +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] -This package provides helpers to validate Swagger 2.0. specification (aka OpenAPI 2.0). +--- + +A validator for OpenAPI v2 specifications and JSON schema draft 4. + +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + +## Status + +API is stable. -Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. +## Import this library in your project -## What's inside? +```cmd +go get github.com/go-openapi/validate +``` + +## Contents + +This package provides helpers to validate Swagger 2.0. specification (aka OpenAPI 2.0). * A validator for Swagger specifications * A validator for JSON schemas draft4 @@ -22,13 +49,7 @@ Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/m * Minimum, Maximum, MultipleOf * FormatOf -[Documentation](https://pkg.go.dev/github.com/go-openapi/validate) - -## Licensing - -This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). - -## FAQ +### FAQ * Does this library support OpenAPI 3? @@ -37,4 +58,67 @@ This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). > There is no plan to make it evolve toward supporting OpenAPI 3.x. > This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. > -> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 +> An early attempt to support Swagger 3 may be found at: + +## Change log + +See + +## References + + + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +## Other documentation + +* [All-time contributors](./CONTRIBUTORS.md) +* [Contributing guidelines](.github/CONTRIBUTING.md) +* [Maintainers documentation](docs/MAINTAINERS.md) +* [Code style](docs/STYLE.md) + +## Cutting a new release + +Maintainers can cut a new release by either: + +* running [this workflow](https://github.com/go-openapi/validate/actions/workflows/bump-release.yml) +* or pushing a semver tag + * signed tags are preferred + * The tag message is prepended to release notes + + +[test-badge]: https://github.com/go-openapi/validate/actions/workflows/go-test.yml/badge.svg +[test-url]: https://github.com/go-openapi/validate/actions/workflows/go-test.yml +[cov-badge]: https://codecov.io/gh/go-openapi/validate/branch/master/graph/badge.svg +[cov-url]: https://codecov.io/gh/go-openapi/validate +[vuln-scan-badge]: https://github.com/go-openapi/validate/actions/workflows/scanner.yml/badge.svg +[vuln-scan-url]: https://github.com/go-openapi/validate/actions/workflows/scanner.yml +[codeql-badge]: https://github.com/go-openapi/validate/actions/workflows/codeql.yml/badge.svg +[codeql-url]: https://github.com/go-openapi/validate/actions/workflows/codeql.yml + +[release-badge]: https://badge.fury.io/gh/go-openapi%2Fvalidate.svg +[release-url]: https://badge.fury.io/gh/go-openapi%2Fvalidate + +[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/validate +[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/validate +[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/validate +[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/validate + +[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/validate +[godoc-url]: http://pkg.go.dev/github.com/go-openapi/validate +[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png +[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM +[slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + + +[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg +[license-url]: https://github.com/go-openapi/validate/?tab=Apache-2.0-1-ov-file#readme + +[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/validate +[goversion-url]: https://github.com/go-openapi/validate/blob/master/go.mod +[top-badge]: https://img.shields.io/github/languages/top/go-openapi/validate +[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/validate/latest diff --git a/vendor/github.com/go-openapi/validate/SECURITY.md b/vendor/github.com/go-openapi/validate/SECURITY.md new file mode 100644 index 0000000000..6ceb159ca2 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/SECURITY.md @@ -0,0 +1,37 @@ +# Security Policy + +This policy outlines the commitment and practices of the go-openapi maintainers regarding security. + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. + +## Reporting a vulnerability + +If you become aware of a security vulnerability that affects the current repository, +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. + +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". + +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/validate/context.go b/vendor/github.com/go-openapi/validate/context.go index b4587dcd56..7f295f9713 100644 --- a/vendor/github.com/go-openapi/validate/context.go +++ b/vendor/github.com/go-openapi/validate/context.go @@ -7,7 +7,7 @@ import ( "context" ) -// validateCtxKey is the key type of context key in this pkg +// validateCtxKey is the key type of context key in this pkg. type validateCtxKey string const ( @@ -25,13 +25,13 @@ const ( var operationTypeEnum = []operationType{request, response, none} // WithOperationRequest returns a new context with operationType request -// in context value +// in context value. func WithOperationRequest(ctx context.Context) context.Context { return withOperation(ctx, request) } // WithOperationResponse returns a new context with operationType response -// in context value +// in context value. func WithOperationResponse(ctx context.Context) context.Context { return withOperation(ctx, response) } @@ -41,7 +41,7 @@ func withOperation(ctx context.Context, operation operationType) context.Context } // extractOperationType extracts the operation type from ctx -// if not specified or of unknown value, return none operation type +// if not specified or of unknown value, return none operation type. func extractOperationType(ctx context.Context) operationType { v := ctx.Value(operationTypeKey) if v == nil { diff --git a/vendor/github.com/go-openapi/validate/debug.go b/vendor/github.com/go-openapi/validate/debug.go index 79145a4495..c890d1280f 100644 --- a/vendor/github.com/go-openapi/validate/debug.go +++ b/vendor/github.com/go-openapi/validate/debug.go @@ -15,7 +15,7 @@ var ( // Debug is true when the SWAGGER_DEBUG env var is not empty. // It enables a more verbose logging of validators. Debug = os.Getenv("SWAGGER_DEBUG") != "" - // validateLogger is a debug logger for this package + // validateLogger is a debug logger for this package. validateLogger *log.Logger ) diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go index 79a431677e..ebcd807137 100644 --- a/vendor/github.com/go-openapi/validate/default_validator.go +++ b/vendor/github.com/go-openapi/validate/default_validator.go @@ -18,7 +18,7 @@ type defaultValidator struct { schemaOptions *SchemaValidatorOptions } -// Validate validates the default values declared in the swagger spec +// Validate validates the default values declared in the swagger spec. func (d *defaultValidator) Validate() *Result { errs := pools.poolOfResults.BorrowResult() // will redeem when merged @@ -30,7 +30,7 @@ func (d *defaultValidator) Validate() *Result { return errs } -// resetVisited resets the internal state of visited schemas +// resetVisited resets the internal state of visited schemas. func (d *defaultValidator) resetVisited() { if d.visitedSchemas == nil { d.visitedSchemas = make(map[string]struct{}) @@ -38,7 +38,7 @@ func (d *defaultValidator) resetVisited() { return } - // TODO(go1.21): clear(ex.visitedSchemas) + // NOTE(go1.21): clear(ex.visitedSchemas) for k := range d.visitedSchemas { delete(d.visitedSchemas, k) } @@ -73,16 +73,17 @@ func isVisited(path string, visitedSchemas map[string]struct{}) bool { return false } -// beingVisited asserts a schema is being visited +// beingVisited asserts a schema is being visited. func (d *defaultValidator) beingVisited(path string) { d.visitedSchemas[path] = struct{}{} } -// isVisited tells if a path has already been visited +// isVisited tells if a path has already been visited. func (d *defaultValidator) isVisited(path string) bool { return isVisited(path, d.visitedSchemas) } +//nolint:gocognit // refactor in a forthcoming PR func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { // every default value that is specified must validate against the schema for that property // headers, items, parameters, schema @@ -272,7 +273,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri return res } -// TODO: Temporary duplicated code. Need to refactor with examples +// NOTE: Temporary duplicated code. Need to refactor with examples func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root any, items *spec.Items) *Result { res := pools.poolOfResults.BorrowResult() diff --git a/vendor/github.com/go-openapi/validate/doc.go b/vendor/github.com/go-openapi/validate/doc.go index a99893e1a3..5218ec85c6 100644 --- a/vendor/github.com/go-openapi/validate/doc.go +++ b/vendor/github.com/go-openapi/validate/doc.go @@ -1,76 +1,76 @@ // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 -/* -Package validate provides methods to validate a swagger specification, -as well as tools to validate data against their schema. - -This package follows Swagger 2.0. specification (aka OpenAPI 2.0). Reference -can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. - -# Validating a specification - -Validates a spec document (from JSON or YAML) against the JSON schema for swagger, -then checks a number of extra rules that can't be expressed in JSON schema. - -Entry points: - - Spec() - - NewSpecValidator() - - SpecValidator.Validate() - -Reported as errors: - - [x] definition can't declare a property that's already defined by one of its ancestors - [x] definition's ancestor can't be a descendant of the same model - [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method. Validation can be laxed by disabling StrictPathParamUniqueness. - [x] each security reference should contain only unique scopes - [x] each security scope in a security definition should be unique - [x] parameters in path must be unique - [x] each path parameter must correspond to a parameter placeholder and vice versa - [x] each referenceable definition must have references - [x] each definition property listed in the required array must be defined in the properties of the model - [x] each parameter should have a unique `name` and `type` combination - [x] each operation should have only 1 parameter of type body - [x] each reference must point to a valid object - [x] every default value that is specified must validate against the schema for that property - [x] items property is required for all schemas/definitions of type `array` - [x] path parameters must be declared a required - [x] headers must not contain $ref - [x] schema and property examples provided must validate against their respective object's schema - [x] examples provided must validate their schema - -Reported as warnings: - - [x] path parameters should not contain any of [{,},\w] - [x] empty path - [x] unused definitions - [x] unsupported validation of examples on non-JSON media types - [x] examples in response without schema - [x] readOnly properties should not be required - -# Validating a schema - -The schema validation toolkit validates data against JSON-schema-draft 04 schema. - -It is tested against the full json-schema-testing-suite (https://github.com/json-schema-org/JSON-Schema-Test-Suite), -except for the optional part (bignum, ECMA regexp, ...). - -It supports the complete JSON-schema vocabulary, including keywords not supported by Swagger (e.g. additionalItems, ...) - -Entry points: - - AgainstSchema() - - ... - -# Known limitations - -With the current version of this package, the following aspects of swagger are not yet supported: - - [ ] errors and warnings are not reported with key/line number in spec - [ ] default values and examples on responses only support application/json producer type - [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values - [ ] rules for collectionFormat are not implemented - [ ] no validation rule for polymorphism support (discriminator) [not done here] - [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid - [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 -*/ +// Package validate provides methods to validate a swagger specification, +// as well as tools to validate data against their schema. +// +// This package follows Swagger 2.0. specification (aka OpenAPI 2.0). Reference. +// can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. +// +// # Validating a specification +// +// Validates a spec document (from JSON or YAML) against the JSON schema for swagger, +// then checks a number of extra rules that can't be expressed in JSON schema. +// +// Entry points: +// +// - Spec() +// - [NewSpecValidator]() +// - [SpecValidator].Validate() +// +// Reported as errors: +// +// [x] definition can't declare a property that's already defined by one of its ancestors +// [x] definition's ancestor can't be a descendant of the same model +// [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method. Validation can be laxed by disabling StrictPathParamUniqueness. +// [x] each security reference should contain only unique scopes +// [x] each security scope in a security definition should be unique +// [x] parameters in path must be unique +// [x] each path parameter must correspond to a parameter placeholder and vice versa +// [x] each referenceable definition must have references +// [x] each definition property listed in the required array must be defined in the properties of the model +// [x] each parameter should have a unique `name` and `type` combination +// [x] each operation should have only 1 parameter of type body +// [x] each reference must point to a valid object +// [x] every default value that is specified must validate against the schema for that property +// [x] items property is required for all schemas/definitions of type `array` +// [x] path parameters must be declared a required +// [x] headers must not contain $ref +// [x] schema and property examples provided must validate against their respective object's schema +// [x] examples provided must validate their schema +// +// Reported as warnings: +// +// [x] path parameters should not contain any of [{,},\w] +// [x] empty path +// [x] unused definitions +// [x] unsupported validation of examples on non-JSON media types +// [x] examples in response without schema +// [x] readOnly properties should not be required +// +// # Validating a schema +// +// The schema validation toolkit validates data against JSON-schema-draft 04 schema. +// +// It is tested against the full json-schema-testing-suite (https://github.com/json-schema-org/JSON-Schema-Test-Suite), +// except for the optional part (bignum, ECMA regexp, ...). +// +// It supports the complete JSON-schema vocabulary, including keywords not supported by Swagger (e.g. additionalItems, ...) +// +// Entry points: +// +// - [AgainstSchema]() +// - ... +// +// # Known limitations +// +// With the current version of this package, the following aspects of swagger are not yet supported: +// +// [ ] errors and warnings are not reported with key/line number in spec +// [ ] default values and examples on responses only support application/json producer type +// [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values +// [ ] rules for collectionFormat are not implemented +// [ ] no validation rule for polymorphism support (discriminator) [not done here] +// [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid +// [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 package validate diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go index e4ef52c6dc..eb6b5ee5c7 100644 --- a/vendor/github.com/go-openapi/validate/example_validator.go +++ b/vendor/github.com/go-openapi/validate/example_validator.go @@ -9,7 +9,7 @@ import ( "github.com/go-openapi/spec" ) -// ExampleValidator validates example values defined in a spec +// ExampleValidator validates example values defined in a spec. type exampleValidator struct { SpecValidator *SpecValidator visitedSchemas map[string]struct{} @@ -35,7 +35,7 @@ func (ex *exampleValidator) Validate() *Result { return errs } -// resetVisited resets the internal state of visited schemas +// resetVisited resets the internal state of visited schemas. func (ex *exampleValidator) resetVisited() { if ex.visitedSchemas == nil { ex.visitedSchemas = make(map[string]struct{}) @@ -43,22 +43,23 @@ func (ex *exampleValidator) resetVisited() { return } - // TODO(go1.21): clear(ex.visitedSchemas) + // NOTE(go1.21): clear(ex.visitedSchemas) for k := range ex.visitedSchemas { delete(ex.visitedSchemas, k) } } -// beingVisited asserts a schema is being visited +// beingVisited asserts a schema is being visited. func (ex *exampleValidator) beingVisited(path string) { ex.visitedSchemas[path] = struct{}{} } -// isVisited tells if a path has already been visited +// isVisited tells if a path has already been visited. func (ex *exampleValidator) isVisited(path string) bool { return isVisited(path, ex.visitedSchemas) } +//nolint:gocognit // refactor in a forthcoming PR func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { // every example value that is specified must validate against the schema for that property // in: schemas, properties, object, items @@ -205,7 +206,7 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo newSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, s.schemaOptions).Validate(example), ) } else { - // TODO: validate other media types too + // Proposal for enhancement: validate other media types too res.AddWarnings(examplesMimeNotSupportedMsg(operationID, responseName)) } } else { @@ -264,7 +265,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str return res } -// TODO: Temporary duplicated code. Need to refactor with examples +// NOTE: Temporary duplicated code. Need to refactor with examples // func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in string, root any, items *spec.Items) *Result { diff --git a/vendor/github.com/go-openapi/validate/formats.go b/vendor/github.com/go-openapi/validate/formats.go index 85ee634941..eab2615376 100644 --- a/vendor/github.com/go-openapi/validate/formats.go +++ b/vendor/github.com/go-openapi/validate/formats.go @@ -76,7 +76,12 @@ func (f *formatValidator) Validate(val any) *Result { result = new(Result) } - if err := FormatOf(f.Path, f.In, f.Format, val.(string), f.KnownFormats); err != nil { + str, ok := val.(string) + if !ok { + return result + } + + if err := FormatOf(f.Path, f.In, f.Format, str, f.KnownFormats); err != nil { result.AddErrors(err) } diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go index 49b130473a..8a1a231283 100644 --- a/vendor/github.com/go-openapi/validate/helpers.go +++ b/vendor/github.com/go-openapi/validate/helpers.go @@ -3,7 +3,7 @@ package validate -// TODO: define this as package validate/internal +// Proposal for enhancement: define this as package validate/internal // This must be done while keeping CI intact with all tests and test coverage import ( @@ -36,7 +36,7 @@ const ( jsonProperties = "properties" jsonItems = "items" jsonType = "type" - // jsonSchema = "schema" + // jsonSchema = "schema". jsonDefault = "default" ) @@ -45,7 +45,7 @@ const ( stringFormatDateTime = "date-time" stringFormatPassword = "password" stringFormatByte = "byte" - // stringFormatBinary = "binary" + // stringFormatBinary = "binary". stringFormatCreditCard = "creditcard" stringFormatDuration = "duration" stringFormatEmail = "email" @@ -77,7 +77,7 @@ const ( numberFormatDouble = "double" ) -// Helpers available at the package level +// Helpers available at the package level. var ( pathHelp *pathHelper valueHelp *valueHelper @@ -126,10 +126,11 @@ func (h *pathHelper) stripParametersInPath(path string) string { // Regexp to extract parameters from path, with surrounding {}. // NOTE: important non-greedy modifier rexParsePathParam := mustCompileRegexp(`{[^{}]+?}`) - strippedSegments := []string{} + segments := strings.Split(path, "/") + strippedSegments := make([]string, len(segments)) - for segment := range strings.SplitSeq(path, "/") { - strippedSegments = append(strippedSegments, rexParsePathParam.ReplaceAllString(segment, "X")) + for i, segment := range segments { + strippedSegments[i] = rexParsePathParam.ReplaceAllString(segment, "X") } return strings.Join(strippedSegments, "/") } @@ -154,7 +155,7 @@ func (h *valueHelper) asInt64(val any) int64 { // Number conversion function for int64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { //nolint:exhaustive + switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -171,7 +172,7 @@ func (h *valueHelper) asUint64(val any) uint64 { // Number conversion function for uint64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { //nolint:exhaustive + switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return uint64(v.Int()) //nolint:gosec case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -184,12 +185,12 @@ func (h *valueHelper) asUint64(val any) uint64 { } } -// Same for unsigned floats +// Same for unsigned floats. func (h *valueHelper) asFloat64(val any) float64 { // Number conversion function for float64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { //nolint:exhaustive + switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return float64(v.Int()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -244,7 +245,6 @@ func (h *paramHelper) resolveParam(path, method, operationID string, param *spec err = spec.ExpandParameterWithRoot(param, s.spec.Spec(), nil) } else { err = spec.ExpandParameter(param, s.spec.SpecFilePath()) - } if err != nil { // Safeguard // NOTE: we may enter here when the whole parameter is an unresolved $ref @@ -288,7 +288,8 @@ type responseHelper struct { func (r *responseHelper) expandResponseRef( response *spec.Response, - path string, s *SpecValidator) (*spec.Response, *Result) { + path string, s *SpecValidator, +) (*spec.Response, *Result) { // Ensure response is expanded var err error res := new(Result) @@ -309,7 +310,8 @@ func (r *responseHelper) expandResponseRef( func (r *responseHelper) responseMsgVariants( responseType string, - responseCode int) (responseName, responseCodeAsStr string) { + responseCode int, +) (responseName, responseCodeAsStr string) { // Path variants for messages if responseType == jsonDefault { responseCodeAsStr = jsonDefault diff --git a/vendor/github.com/go-openapi/validate/object_validator.go b/vendor/github.com/go-openapi/validate/object_validator.go index cf98ed377d..e651b3f70f 100644 --- a/vendor/github.com/go-openapi/validate/object_validator.go +++ b/vendor/github.com/go-openapi/validate/object_validator.go @@ -31,7 +31,8 @@ type objectValidator struct { func newObjectValidator(path, in string, maxProperties, minProperties *int64, required []string, properties spec.SchemaProperties, additionalProperties *spec.SchemaOrBool, patternProperties spec.SchemaProperties, - root any, formats strfmt.Registry, opts *SchemaValidatorOptions) *objectValidator { + root any, formats strfmt.Registry, opts *SchemaValidatorOptions, +) *objectValidator { if opts == nil { opts = new(SchemaValidatorOptions) } @@ -104,7 +105,7 @@ func (o *objectValidator) Validate(data any) *Result { o.validatePropertiesSchema(val, res) // Check patternProperties - // TODO: it looks like we have done that twice in many cases + // NOTE: it looks like we have done that twice in many cases for key, value := range val { _, regularProperty := o.Properties[key] matched, _, patterns := o.validatePatternProperty(key, value, res) // applies to regular properties as well @@ -115,7 +116,7 @@ func (o *objectValidator) Validate(data any) *Result { for _, pName := range patterns { if v, ok := o.PatternProperties[pName]; ok { r := newSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value) - res.mergeForField(data.(map[string]any), key, r) + res.mergeForField(data.(map[string]any), key, r) //nolint:forcetypeassert // data is always map[string]any at this point } } } @@ -129,7 +130,7 @@ func (o *objectValidator) SetPath(path string) { } func (o *objectValidator) Applies(source any, kind reflect.Kind) bool { - // TODO: this should also work for structs + // NOTE: this should also work for structs // there is a problem in the type validator where it will be unhappy about null values // so that requires more testing _, isSchema := source.(*spec.Schema) @@ -285,7 +286,7 @@ func (o *objectValidator) validateNoAdditionalProperties(val map[string]any, res /* case "$ref": if val[k] != nil { - // TODO: check context of that ref: warn about siblings, check against invalid context + // Proposal for enhancement: check context of that ref: warn about siblings, check against invalid context } */ } @@ -377,7 +378,7 @@ func (o *objectValidator) validatePropertiesSchema(val map[string]any, res *Resu } } -// TODO: succeededOnce is not used anywhere +// NOTE: succeededOnce is not used anywhere. func (o *objectValidator) validatePatternProperty(key string, value any, result *Result) (bool, bool, []string) { if len(o.PatternProperties) == 0 { return false, false, nil diff --git a/vendor/github.com/go-openapi/validate/options.go b/vendor/github.com/go-openapi/validate/options.go index f5e7f7131c..6a6891a6ec 100644 --- a/vendor/github.com/go-openapi/validate/options.go +++ b/vendor/github.com/go-openapi/validate/options.go @@ -5,7 +5,7 @@ package validate import "sync" -// Opts specifies validation options for a SpecValidator. +// Opts specifies validation options for a [SpecValidator]. // // NOTE: other options might be needed, for example a go-swagger specific mode. type Opts struct { diff --git a/vendor/github.com/go-openapi/validate/pools.go b/vendor/github.com/go-openapi/validate/pools.go index 1e734be493..c8936bd10b 100644 --- a/vendor/github.com/go-openapi/validate/pools.go +++ b/vendor/github.com/go-openapi/validate/pools.go @@ -245,7 +245,7 @@ type ( ) func (p schemaValidatorsPool) BorrowValidator() *SchemaValidator { - return p.Get().(*SchemaValidator) + return p.Get().(*SchemaValidator) //nolint:forcetypeassert // pool New always returns this type } func (p schemaValidatorsPool) RedeemValidator(s *SchemaValidator) { @@ -254,7 +254,7 @@ func (p schemaValidatorsPool) RedeemValidator(s *SchemaValidator) { } func (p objectValidatorsPool) BorrowValidator() *objectValidator { - return p.Get().(*objectValidator) + return p.Get().(*objectValidator) //nolint:forcetypeassert // pool New always returns this type } func (p objectValidatorsPool) RedeemValidator(s *objectValidator) { @@ -262,7 +262,7 @@ func (p objectValidatorsPool) RedeemValidator(s *objectValidator) { } func (p sliceValidatorsPool) BorrowValidator() *schemaSliceValidator { - return p.Get().(*schemaSliceValidator) + return p.Get().(*schemaSliceValidator) //nolint:forcetypeassert // pool New always returns this type } func (p sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) { @@ -270,7 +270,7 @@ func (p sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) { } func (p itemsValidatorsPool) BorrowValidator() *itemsValidator { - return p.Get().(*itemsValidator) + return p.Get().(*itemsValidator) //nolint:forcetypeassert // pool New always returns this type } func (p itemsValidatorsPool) RedeemValidator(s *itemsValidator) { @@ -278,7 +278,7 @@ func (p itemsValidatorsPool) RedeemValidator(s *itemsValidator) { } func (p basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator { - return p.Get().(*basicCommonValidator) + return p.Get().(*basicCommonValidator) //nolint:forcetypeassert // pool New always returns this type } func (p basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) { @@ -286,7 +286,7 @@ func (p basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) { } func (p headerValidatorsPool) BorrowValidator() *HeaderValidator { - return p.Get().(*HeaderValidator) + return p.Get().(*HeaderValidator) //nolint:forcetypeassert // pool New always returns this type } func (p headerValidatorsPool) RedeemValidator(s *HeaderValidator) { @@ -294,7 +294,7 @@ func (p headerValidatorsPool) RedeemValidator(s *HeaderValidator) { } func (p paramValidatorsPool) BorrowValidator() *ParamValidator { - return p.Get().(*ParamValidator) + return p.Get().(*ParamValidator) //nolint:forcetypeassert // pool New always returns this type } func (p paramValidatorsPool) RedeemValidator(s *ParamValidator) { @@ -302,7 +302,7 @@ func (p paramValidatorsPool) RedeemValidator(s *ParamValidator) { } func (p basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator { - return p.Get().(*basicSliceValidator) + return p.Get().(*basicSliceValidator) //nolint:forcetypeassert // pool New always returns this type } func (p basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) { @@ -310,7 +310,7 @@ func (p basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) { } func (p numberValidatorsPool) BorrowValidator() *numberValidator { - return p.Get().(*numberValidator) + return p.Get().(*numberValidator) //nolint:forcetypeassert // pool New always returns this type } func (p numberValidatorsPool) RedeemValidator(s *numberValidator) { @@ -318,7 +318,7 @@ func (p numberValidatorsPool) RedeemValidator(s *numberValidator) { } func (p stringValidatorsPool) BorrowValidator() *stringValidator { - return p.Get().(*stringValidator) + return p.Get().(*stringValidator) //nolint:forcetypeassert // pool New always returns this type } func (p stringValidatorsPool) RedeemValidator(s *stringValidator) { @@ -326,7 +326,7 @@ func (p stringValidatorsPool) RedeemValidator(s *stringValidator) { } func (p schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator { - return p.Get().(*schemaPropsValidator) + return p.Get().(*schemaPropsValidator) //nolint:forcetypeassert // pool New always returns this type } func (p schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) { @@ -334,7 +334,7 @@ func (p schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) { } func (p formatValidatorsPool) BorrowValidator() *formatValidator { - return p.Get().(*formatValidator) + return p.Get().(*formatValidator) //nolint:forcetypeassert // pool New always returns this type } func (p formatValidatorsPool) RedeemValidator(s *formatValidator) { @@ -342,7 +342,7 @@ func (p formatValidatorsPool) RedeemValidator(s *formatValidator) { } func (p typeValidatorsPool) BorrowValidator() *typeValidator { - return p.Get().(*typeValidator) + return p.Get().(*typeValidator) //nolint:forcetypeassert // pool New always returns this type } func (p typeValidatorsPool) RedeemValidator(s *typeValidator) { @@ -350,7 +350,7 @@ func (p typeValidatorsPool) RedeemValidator(s *typeValidator) { } func (p schemasPool) BorrowSchema() *spec.Schema { - return p.Get().(*spec.Schema) + return p.Get().(*spec.Schema) //nolint:forcetypeassert // pool New always returns this type } func (p schemasPool) RedeemSchema(s *spec.Schema) { @@ -358,7 +358,7 @@ func (p schemasPool) RedeemSchema(s *spec.Schema) { } func (p resultsPool) BorrowResult() *Result { - return p.Get().(*Result).cleared() + return p.Get().(*Result).cleared() //nolint:forcetypeassert // pool New always returns *Result } func (p resultsPool) RedeemResult(s *Result) { diff --git a/vendor/github.com/go-openapi/validate/result.go b/vendor/github.com/go-openapi/validate/result.go index 69219e9982..ede945503d 100644 --- a/vendor/github.com/go-openapi/validate/result.go +++ b/vendor/github.com/go-openapi/validate/result.go @@ -25,7 +25,7 @@ var emptyResult = &Result{MatchCount: 1} // schema validation. Results from the validation branch // with most matches get eventually selected. // -// TODO: keep path of key originating the error +// Proposal for enhancement: keep path of key originating the error. type Result struct { Errors []error Warnings []error @@ -66,7 +66,7 @@ func NewFieldKey(obj map[string]any, field string) FieldKey { // Object returns the underlying object of this key. func (fk *FieldKey) Object() map[string]any { - return fk.object.Interface().(map[string]any) + return fk.object.Interface().(map[string]any) //nolint:forcetypeassert // object is always map[string]any } // Field returns the underlying field of this key. @@ -81,7 +81,7 @@ func NewItemKey(slice any, i int) ItemKey { // Slice returns the underlying slice of this key. func (ik *ItemKey) Slice() []any { - return ik.slice.Interface().([]any) + return ik.slice.Interface().([]any) //nolint:forcetypeassert // slice is always []any } // Index returns the underlying index of this key. @@ -283,14 +283,14 @@ func (r *Result) HasErrorsOrWarnings() bool { return len(r.Errors) > 0 || len(r.Warnings) > 0 } -// Inc increments the match count +// Inc increments the match count. func (r *Result) Inc() { r.MatchCount++ } -// AsError renders this result as an error interface +// AsError renders this result as an error interface. // -// TODO: reporting / pretty print with path ordered and indented +// Proposal for enhancement: reporting / pretty print with path ordered and indented. func (r *Result) AsError() error { if r.IsValid() { return nil @@ -425,7 +425,7 @@ func stripImportantTag(err error) error { } func (r *Result) keepRelevantErrors() *Result { - // TODO: this one is going to disapear... + // NOTE: this one is going to disapear... // keepRelevantErrors strips a result from standard errors and keeps // the ones which are supposedly more accurate. // diff --git a/vendor/github.com/go-openapi/validate/rexp.go b/vendor/github.com/go-openapi/validate/rexp.go index 795f148d0c..478036c087 100644 --- a/vendor/github.com/go-openapi/validate/rexp.go +++ b/vendor/github.com/go-openapi/validate/rexp.go @@ -10,7 +10,7 @@ import ( "sync/atomic" ) -// Cache for compiled regular expressions +// Cache for compiled regular expressions. var ( cacheMutex = &sync.Mutex{} reDict = atomic.Value{} // map[string]*re.Regexp diff --git a/vendor/github.com/go-openapi/validate/schema.go b/vendor/github.com/go-openapi/validate/schema.go index 375a98765d..b72a47bc33 100644 --- a/vendor/github.com/go-openapi/validate/schema.go +++ b/vendor/github.com/go-openapi/validate/schema.go @@ -13,7 +13,7 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// SchemaValidator validates data against a JSON schema +// SchemaValidator validates data against a JSON schema. type SchemaValidator struct { Path string in string @@ -26,7 +26,7 @@ type SchemaValidator struct { // AgainstSchema validates the specified data against the provided schema, using a registry of supported formats. // -// When no pre-parsed *spec.Schema structure is provided, it uses a JSON schema as default. See example. +// When no pre-parsed *[spec.Schema] structure is provided, it uses a JSON schema as default. See example. func AgainstSchema(schema *spec.Schema, data any, formats strfmt.Registry, options ...Option) error { res := NewSchemaValidator(schema, nil, "", formats, append(options, WithRecycleValidators(true), withRecycleResults(true))..., @@ -103,18 +103,20 @@ func newSchemaValidator(schema *spec.Schema, rootSchema any, root string, format return s } -// SetPath sets the path for this schema valdiator +// SetPath sets the path for this schema validator. func (s *SchemaValidator) SetPath(path string) { s.Path = path } -// Applies returns true when this schema validator applies +// Applies returns true when this schema validator applies. func (s *SchemaValidator) Applies(source any, _ reflect.Kind) bool { _, ok := source.(*spec.Schema) return ok } -// Validate validates the data against the schema +// Validate validates the data against the schema. +// +//nolint:gocognit // refactor in a forthcoming PR func (s *SchemaValidator) Validate(data any) *Result { if s == nil { return emptyResult @@ -176,7 +178,7 @@ func (s *SchemaValidator) Validate(data any) *Result { d = dd } - // TODO: this part should be handed over to type validator + // Proposal for enhancement: this part should be handed over to type validator // Handle special case of json.Number data (number marshalled as string) isnumber := s.Schema != nil && (s.Schema.Type.Contains(numberType) || s.Schema.Type.Contains(integerType)) if num, ok := data.(json.Number); ok && isnumber { diff --git a/vendor/github.com/go-openapi/validate/schema_messages.go b/vendor/github.com/go-openapi/validate/schema_messages.go index e8c7c48ad7..e0f1801e6e 100644 --- a/vendor/github.com/go-openapi/validate/schema_messages.go +++ b/vendor/github.com/go-openapi/validate/schema_messages.go @@ -11,57 +11,64 @@ import ( const ( // ArrayDoesNotAllowAdditionalItemsError when an additionalItems construct is not verified by the array values provided. // - // TODO: should move to package go-openapi/errors + // Proposal for enhancement: should move to package go-openapi/errors. ArrayDoesNotAllowAdditionalItemsError = "array doesn't allow for additional items" - // HasDependencyError indicates that a dependencies construct was not verified + // HasDependencyError indicates that a dependencies construct was not verified. HasDependencyError = "%q has a dependency on %s" - // InvalidSchemaProvidedError indicates that the schema provided to validate a value cannot be properly compiled + // InvalidSchemaProvidedError indicates that the schema provided to validate a value cannot be properly compiled. InvalidSchemaProvidedError = "Invalid schema provided to SchemaValidator: %v" - // InvalidTypeConversionError indicates that a numerical conversion for the given type could not be carried on + // InvalidTypeConversionError indicates that a numerical conversion for the given type could not be carried on. InvalidTypeConversionError = "invalid type conversion in %s: %v " - // MustValidateAtLeastOneSchemaError indicates that in a AnyOf construct, none of the schema constraints specified were verified + // MustValidateAtLeastOneSchemaError indicates that in a AnyOf construct, none of the schema constraints specified were verified. MustValidateAtLeastOneSchemaError = "%q must validate at least one schema (anyOf)" - // MustValidateOnlyOneSchemaError indicates that in a OneOf construct, either none of the schema constraints specified were verified, or several were + // MustValidateOnlyOneSchemaError indicates that in a OneOf construct, either none of the schema constraints specified were verified, or several were. MustValidateOnlyOneSchemaError = "%q must validate one and only one schema (oneOf). %s" - // MustValidateAllSchemasError indicates that in a AllOf construct, at least one of the schema constraints specified were not verified + // MustValidateAllSchemasError indicates that in a AllOf construct, at least one of the schema constraints specified were not verified. // - // TODO: punctuation in message + // NOTE: punctuation in message. MustValidateAllSchemasError = "%q must validate all the schemas (allOf)%s" - // MustNotValidateSchemaError indicates that in a Not construct, the schema constraint specified was verified + // MustNotValidateSchemaError indicates that in a Not construct, the schema constraint specified was verified. MustNotValidateSchemaError = "%q must not validate the schema (not)" ) -// Warning messages related to schema validation and returned as results +// Warning messages related to schema validation and returned as results. const () func invalidSchemaProvidedMsg(err error) errors.Error { return errors.New(InternalErrorCode, InvalidSchemaProvidedError, err) } + func invalidTypeConversionMsg(path string, err error) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidTypeConversionError, path, err) } + func mustValidateOnlyOneSchemaMsg(path, additionalMsg string) errors.Error { return errors.New(errors.CompositeErrorCode, MustValidateOnlyOneSchemaError, path, additionalMsg) } + func mustValidateAtLeastOneSchemaMsg(path string) errors.Error { return errors.New(errors.CompositeErrorCode, MustValidateAtLeastOneSchemaError, path) } + func mustValidateAllSchemasMsg(path, additionalMsg string) errors.Error { return errors.New(errors.CompositeErrorCode, MustValidateAllSchemasError, path, additionalMsg) } + func mustNotValidatechemaMsg(path string) errors.Error { return errors.New(errors.CompositeErrorCode, MustNotValidateSchemaError, path) } + func hasADependencyMsg(path, depkey string) errors.Error { return errors.New(errors.CompositeErrorCode, HasDependencyError, path, depkey) } + func arrayDoesNotAllowAdditionalItemsMsg() errors.Error { return errors.New(errors.CompositeErrorCode, ArrayDoesNotAllowAdditionalItemsError) } diff --git a/vendor/github.com/go-openapi/validate/schema_option.go b/vendor/github.com/go-openapi/validate/schema_option.go index d9fd21a75a..3e1b882ed3 100644 --- a/vendor/github.com/go-openapi/validate/schema_option.go +++ b/vendor/github.com/go-openapi/validate/schema_option.go @@ -3,7 +3,7 @@ package validate -// SchemaValidatorOptions defines optional rules for schema validation +// SchemaValidatorOptions defines optional rules for schema validation. type SchemaValidatorOptions struct { EnableObjectArrayTypeCheck bool EnableArrayMustHaveItemsCheck bool @@ -12,24 +12,24 @@ type SchemaValidatorOptions struct { skipSchemataResult bool } -// Option sets optional rules for schema validation +// Option sets optional rules for schema validation. type Option func(*SchemaValidatorOptions) -// EnableObjectArrayTypeCheck activates the swagger rule: an items must be in type: array +// EnableObjectArrayTypeCheck activates the swagger rule: an items must be in type: array. func EnableObjectArrayTypeCheck(enable bool) Option { return func(svo *SchemaValidatorOptions) { svo.EnableObjectArrayTypeCheck = enable } } -// EnableArrayMustHaveItemsCheck activates the swagger rule: an array must have items defined +// EnableArrayMustHaveItemsCheck activates the swagger rule: an array must have items defined. func EnableArrayMustHaveItemsCheck(enable bool) Option { return func(svo *SchemaValidatorOptions) { svo.EnableArrayMustHaveItemsCheck = enable } } -// SwaggerSchema activates swagger schema validation rules +// SwaggerSchema activates swagger schema validation rules. func SwaggerSchema(enable bool) Option { return func(svo *SchemaValidatorOptions) { svo.EnableObjectArrayTypeCheck = enable @@ -53,14 +53,14 @@ func withRecycleResults(enable bool) Option { } } -// WithSkipSchemataResult skips the deep audit payload stored in validation Result +// WithSkipSchemataResult skips the deep audit payload stored in validation Result. func WithSkipSchemataResult(enable bool) Option { return func(svo *SchemaValidatorOptions) { svo.skipSchemataResult = enable } } -// Options returns the current set of options +// Options returns the current set of options. func (svo SchemaValidatorOptions) Options() []Option { return []Option{ EnableObjectArrayTypeCheck(svo.EnableObjectArrayTypeCheck), diff --git a/vendor/github.com/go-openapi/validate/schema_props.go b/vendor/github.com/go-openapi/validate/schema_props.go index 485f536adc..2c4354d08a 100644 --- a/vendor/github.com/go-openapi/validate/schema_props.go +++ b/vendor/github.com/go-openapi/validate/schema_props.go @@ -34,7 +34,8 @@ func (s *schemaPropsValidator) SetPath(path string) { func newSchemaPropsValidator( path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root any, formats strfmt.Registry, - opts *SchemaValidatorOptions) *schemaPropsValidator { + opts *SchemaValidatorOptions, +) *schemaPropsValidator { if opts == nil { opts = new(SchemaValidatorOptions) } @@ -281,7 +282,7 @@ func (s *schemaPropsValidator) validateNot(data any, mainResult *Result) { } func (s *schemaPropsValidator) validateDependencies(data any, mainResult *Result) { - val := data.(map[string]any) + val := data.(map[string]any) //nolint:forcetypeassert // caller guarantees map[string]any for key := range val { dep, ok := s.Dependencies[key] if !ok { diff --git a/vendor/github.com/go-openapi/validate/slice_validator.go b/vendor/github.com/go-openapi/validate/slice_validator.go index 4a5a208968..8f49d13707 100644 --- a/vendor/github.com/go-openapi/validate/slice_validator.go +++ b/vendor/github.com/go-openapi/validate/slice_validator.go @@ -27,7 +27,8 @@ type schemaSliceValidator struct { func newSliceValidator(path, in string, maxItems, minItems *int64, uniqueItems bool, additionalItems *spec.SchemaOrBool, items *spec.SchemaOrArray, - root any, formats strfmt.Registry, opts *SchemaValidatorOptions) *schemaSliceValidator { + root any, formats strfmt.Registry, opts *SchemaValidatorOptions, +) *schemaSliceValidator { if opts == nil { opts = new(SchemaValidatorOptions) } diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go index 8616a861f2..b85432f92b 100644 --- a/vendor/github.com/go-openapi/validate/spec.go +++ b/vendor/github.com/go-openapi/validate/spec.go @@ -25,16 +25,16 @@ import ( // // Returns an error flattening in a single standard error, all validation messages. // -// - TODO: $ref should not have siblings -// - TODO: make sure documentation reflects all checks and warnings -// - TODO: check on discriminators -// - TODO: explicit message on unsupported keywords (better than "forbidden property"...) -// - TODO: full list of unresolved refs -// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples -// - TODO: option to determine if we validate for go-swagger or in a more general context -// - TODO: check on required properties to support anyOf, allOf, oneOf +// - Proposal for enhancement: $ref should not have siblings +// - Proposal for enhancement: make sure documentation reflects all checks and warnings +// - Proposal for enhancement: check on discriminators +// - Proposal for enhancement: explicit message on unsupported keywords (better than "forbidden property"...) +// - Proposal for enhancement: full list of unresolved refs +// - Proposal for enhancement: validate numeric constraints (issue#581): this should be handled like defaults and examples +// - Proposal for enhancement: option to determine if we validate for go-swagger or in a more general context +// - Proposal for enhancement: check on required properties to support anyOf, allOf, oneOf // -// NOTE: SecurityScopes are maps: no need to check uniqueness +// NOTE: SecurityScopes are maps: no need to check uniqueness. func Spec(doc *loads.Document, formats strfmt.Registry) error { errs, _ /*warns*/ := NewSpecValidator(doc.Schema(), formats).Validate(doc) if errs.HasErrors() { @@ -43,7 +43,7 @@ func Spec(doc *loads.Document, formats strfmt.Registry) error { return nil } -// SpecValidator validates a swagger 2.0 spec +// SpecValidator validates a swagger 2.0 spec. type SpecValidator struct { schema *spec.Schema // swagger 2.0 schema spec *loads.Document @@ -54,7 +54,7 @@ type SpecValidator struct { schemaOptions *SchemaValidatorOptions } -// NewSpecValidator creates a new swagger spec validator instance +// NewSpecValidator creates a new swagger spec validator instance. func NewSpecValidator(schema *spec.Schema, formats strfmt.Registry) *SpecValidator { // schema options that apply to all called validators schemaOptions := new(SchemaValidatorOptions) @@ -74,7 +74,7 @@ func NewSpecValidator(schema *spec.Schema, formats strfmt.Registry) *SpecValidat } } -// Validate validates the swagger spec +// Validate validates the swagger spec. func (s *SpecValidator) Validate(data any) (*Result, *Result) { s.schemaOptions.skipSchemataResult = s.Options.SkipSchemataResult var sd *loads.Document @@ -347,6 +347,7 @@ func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, kno return ancs, res } +//nolint:gocognit // refactor in a forthcoming PR func (s *SpecValidator) validateItems() *Result { // validate parameter, items, schema and response objects for presence of item if type is array res := pools.poolOfResults.BorrowResult() @@ -406,7 +407,7 @@ func (s *SpecValidator) validateItems() *Result { return res } -// Verifies constraints on array type +// Verifies constraints on array type. func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID string) *Result { res := pools.poolOfResults.BorrowResult() if !schema.Type.Contains(arrayType) { @@ -597,7 +598,7 @@ func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Sche } else if v.AdditionalProperties.Schema != nil { // additionalProperties as schema are upported in swagger // recursively validates additionalProperties schema - // TODO : anyOf, allOf, oneOf like in schemaPropsValidator + // Proposal for enhancement: anyOf, allOf, oneOf like in schemaPropsValidator red := s.validateRequiredProperties(path, in, v.AdditionalProperties.Schema) if red.IsValid() { additionalPropertiesMatch = true @@ -620,6 +621,7 @@ func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Sche return res } +//nolint:gocognit // refactor in a forthcoming PR func (s *SpecValidator) validateParameters() *Result { // - for each method, path is unique, regardless of path parameters // e.g. GET:/petstore/{id}, GET:/petstore/{pet}, GET:/petstore are @@ -645,7 +647,6 @@ func (s *SpecValidator) validateParameters() *Result { // Check uniqueness of stripped paths if _, found := methodPaths[method][pathToAdd]; found { - // Sort names for stable, testable output if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 { res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd])) @@ -666,7 +667,7 @@ func (s *SpecValidator) validateParameters() *Result { var hasForm, hasBody bool // Check parameters names uniqueness for operation - // TODO: should be done after param expansion + // NOTE: should be done after param expansion res.Merge(s.checkUniqueParams(path, method, op)) // pick the root schema from the swagger specification which describes a parameter diff --git a/vendor/github.com/go-openapi/validate/spec_messages.go b/vendor/github.com/go-openapi/validate/spec_messages.go index 9b079af647..42ce360285 100644 --- a/vendor/github.com/go-openapi/validate/spec_messages.go +++ b/vendor/github.com/go-openapi/validate/spec_messages.go @@ -20,25 +20,25 @@ const ( // ArrayInHeaderRequiresItemsError ... ArrayInHeaderRequiresItemsError = "header %q for %q is a collection without an element type (array requires items definition)" - // BothFormDataAndBodyError indicates that an operation specifies both a body and a formData parameter, which is forbidden + // BothFormDataAndBodyError indicates that an operation specifies both a body and a formData parameter, which is forbidden. BothFormDataAndBodyError = "operation %q has both formData and body parameters. Only one such In: type may be used for a given operation" - // CannotResolveReferenceError when a $ref could not be resolved + // CannotResolveReferenceError when a $ref could not be resolved. CannotResolveReferenceError = "could not resolve reference in %s to $ref %s: %v" // CircularAncestryDefinitionError ... CircularAncestryDefinitionError = "definition %q has circular ancestry: %v" - // DefaultValueDoesNotValidateError results from an invalid default value provided + // DefaultValueDoesNotValidateError results from an invalid default value provided. DefaultValueDoesNotValidateError = "default value for %s in %s does not validate its schema" - // DefaultValueItemsDoesNotValidateError results from an invalid default value provided for Items + // DefaultValueItemsDoesNotValidateError results from an invalid default value provided for Items. DefaultValueItemsDoesNotValidateError = "default value for %s.items in %s does not validate its schema" - // DefaultValueHeaderDoesNotValidateError results from an invalid default value provided in header + // DefaultValueHeaderDoesNotValidateError results from an invalid default value provided in header. DefaultValueHeaderDoesNotValidateError = "in operation %q, default value in header %s for %s does not validate its schema" - // DefaultValueHeaderItemsDoesNotValidateError results from an invalid default value provided in header.items + // DefaultValueHeaderItemsDoesNotValidateError results from an invalid default value provided in header.items. DefaultValueHeaderItemsDoesNotValidateError = "in operation %q, default value in header.items %s for %s does not validate its schema" // DefaultValueInDoesNotValidateError ... @@ -50,31 +50,31 @@ const ( // DuplicatePropertiesError ... DuplicatePropertiesError = "definition %q contains duplicate properties: %v" - // ExampleValueDoesNotValidateError results from an invalid example value provided + // ExampleValueDoesNotValidateError results from an invalid example value provided. ExampleValueDoesNotValidateError = "example value for %s in %s does not validate its schema" - // ExampleValueItemsDoesNotValidateError results from an invalid example value provided for Items + // ExampleValueItemsDoesNotValidateError results from an invalid example value provided for Items. ExampleValueItemsDoesNotValidateError = "example value for %s.items in %s does not validate its schema" - // ExampleValueHeaderDoesNotValidateError results from an invalid example value provided in header + // ExampleValueHeaderDoesNotValidateError results from an invalid example value provided in header. ExampleValueHeaderDoesNotValidateError = "in operation %q, example value in header %s for %s does not validate its schema" - // ExampleValueHeaderItemsDoesNotValidateError results from an invalid example value provided in header.items + // ExampleValueHeaderItemsDoesNotValidateError results from an invalid example value provided in header.items. ExampleValueHeaderItemsDoesNotValidateError = "in operation %q, example value in header.items %s for %s does not validate its schema" // ExampleValueInDoesNotValidateError ... ExampleValueInDoesNotValidateError = "in operation %q, example value in %s does not validate its schema" - // EmptyPathParameterError means that a path parameter was found empty (e.g. "{}") + // EmptyPathParameterError means that a path parameter was found empty (e.g. "{}"). EmptyPathParameterError = "%q contains an empty path parameter" - // InvalidDocumentError states that spec validation only processes spec.Document objects + // InvalidDocumentError states that spec validation only processes spec.Document objects. InvalidDocumentError = "spec validator can only validate spec.Document objects" - // InvalidItemsPatternError indicates an Items definition with invalid pattern + // InvalidItemsPatternError indicates an Items definition with invalid pattern. InvalidItemsPatternError = "%s for %q has invalid items pattern: %q" - // InvalidParameterDefinitionError indicates an error detected on a parameter definition + // InvalidParameterDefinitionError indicates an error detected on a parameter definition. InvalidParameterDefinitionError = "invalid definition for parameter %s in %s in operation %q" // InvalidParameterDefinitionAsSchemaError indicates an error detected on a parameter definition, which was mistaken with a schema definition. @@ -84,41 +84,41 @@ const ( // InvalidPatternError ... InvalidPatternError = "pattern %q is invalid in %s" - // InvalidPatternInError indicates an invalid pattern in a schema or items definition + // InvalidPatternInError indicates an invalid pattern in a schema or items definition. InvalidPatternInError = "%s in %s has invalid pattern: %q" - // InvalidPatternInHeaderError indicates a header definition with an invalid pattern + // InvalidPatternInHeaderError indicates a header definition with an invalid pattern. InvalidPatternInHeaderError = "in operation %q, header %s for %s has invalid pattern %q: %v" // InvalidPatternInParamError ... InvalidPatternInParamError = "operation %q has invalid pattern in param %q: %q" - // InvalidReferenceError indicates that a $ref property could not be resolved + // InvalidReferenceError indicates that a $ref property could not be resolved. InvalidReferenceError = "invalid ref %q" // InvalidResponseDefinitionAsSchemaError indicates an error detected on a response definition, which was mistaken with a schema definition. // Most likely, this situation is encountered whenever a $ref has been added as a sibling of the response definition. InvalidResponseDefinitionAsSchemaError = "invalid definition as Schema for response %s in %s" - // MultipleBodyParamError indicates that an operation specifies multiple parameter with in: body + // MultipleBodyParamError indicates that an operation specifies multiple parameter with in: body. MultipleBodyParamError = "operation %q has more than 1 body param: %v" - // NonUniqueOperationIDError indicates that the same operationId has been specified several times + // NonUniqueOperationIDError indicates that the same operationId has been specified several times. NonUniqueOperationIDError = "%q is defined %d times" - // NoParameterInPathError indicates that a path was found without any parameter + // NoParameterInPathError indicates that a path was found without any parameter. NoParameterInPathError = "path param %q has no parameter definition" // NoValidPathErrorOrWarning indicates that no single path could be validated. If Paths is empty, this message is only a warning. NoValidPathErrorOrWarning = "spec has no valid path defined" - // NoValidResponseError indicates that no valid response description could be found for an operation + // NoValidResponseError indicates that no valid response description could be found for an operation. NoValidResponseError = "operation %q has no valid response" // PathOverlapError ... PathOverlapError = "path %s overlaps with %s" - // PathParamNotInPathError indicates that a parameter specified with in: path was not found in the path specification + // PathParamNotInPathError indicates that a parameter specified with in: path was not found in the path specification. PathParamNotInPathError = "path param %q is not present in path %q" // PathParamNotUniqueError ... @@ -127,32 +127,32 @@ const ( // PathParamRequiredError ... PathParamRequiredError = "in operation %q,path param %q must be declared as required" - // RefNotAllowedInHeaderError indicates a $ref was found in a header definition, which is not allowed by Swagger + // RefNotAllowedInHeaderError indicates a $ref was found in a header definition, which is not allowed by Swagger. RefNotAllowedInHeaderError = "IMPORTANT!in %q: $ref are not allowed in headers. In context for header %q%s" // RequiredButNotDefinedError ... RequiredButNotDefinedError = "%q is present in required but not defined as property in definition %q" - // SomeParametersBrokenError indicates that some parameters could not be resolved, which might result in partial checks to be carried on + // SomeParametersBrokenError indicates that some parameters could not be resolved, which might result in partial checks to be carried on. SomeParametersBrokenError = "some parameters definitions are broken in %q.%s. Cannot carry on full checks on parameters for operation %s" - // UnresolvedReferencesError indicates that at least one $ref could not be resolved + // UnresolvedReferencesError indicates that at least one $ref could not be resolved. UnresolvedReferencesError = "some references could not be resolved in spec. First found: %v" ) -// Warning messages related to spec validation and returned as results +// Warning messages related to spec validation and returned as results. const ( - // ExamplesWithoutSchemaWarning indicates that examples are provided for a response,but not schema to validate the example against + // ExamplesWithoutSchemaWarning indicates that examples are provided for a response,but not schema to validate the example against. ExamplesWithoutSchemaWarning = "Examples provided without schema in operation %q, %s" // ExamplesMimeNotSupportedWarning indicates that examples are provided with a mime type different than application/json, which - // the validator dos not support yetl + // the validator dos not support yetl. ExamplesMimeNotSupportedWarning = "No validation attempt for examples for media types other than application/json, in operation %q, %s" // PathParamGarbledWarning ... PathParamGarbledWarning = "in path %q, param %q contains {,} or white space. Albeit not stricly illegal, this is probably no what you want" - // ParamValidationTypeMismatch indicates that parameter has validation which does not match its type + // ParamValidationTypeMismatch indicates that parameter has validation which does not match its type. ParamValidationTypeMismatch = "validation keywords of parameter %q in path %q don't match its type %s" // PathStrippedParamGarbledWarning ... @@ -165,7 +165,7 @@ const ( // which is most likely not wanted. RefShouldNotHaveSiblingsWarning = "$ref property should have no sibling in %q.%s" - // RequiredHasDefaultWarning indicates that a required parameter property should not have a default + // RequiredHasDefaultWarning indicates that a required parameter property should not have a default. RequiredHasDefaultWarning = "%s in %s has a default value and is required as parameter" // UnusedDefinitionWarning ... @@ -180,164 +180,214 @@ const ( InvalidObject = "expected an object in %q.%s" ) -// Additional error codes +// Additional error codes. const ( - // InternalErrorCode reports an internal technical error + // InternalErrorCode reports an internal technical error. InternalErrorCode = http.StatusInternalServerError - // NotFoundErrorCode indicates that a resource (e.g. a $ref) could not be found + // NotFoundErrorCode indicates that a resource (e.g. a $ref) could not be found. NotFoundErrorCode = http.StatusNotFound ) func invalidDocumentMsg() errors.Error { return errors.New(InternalErrorCode, InvalidDocumentError) } + func invalidRefMsg(path string) errors.Error { return errors.New(NotFoundErrorCode, InvalidReferenceError, path) } + func unresolvedReferencesMsg(err error) errors.Error { return errors.New(errors.CompositeErrorCode, UnresolvedReferencesError, err) } + func noValidPathMsg() errors.Error { return errors.New(errors.CompositeErrorCode, NoValidPathErrorOrWarning) } + func emptyPathParameterMsg(path string) errors.Error { return errors.New(errors.CompositeErrorCode, EmptyPathParameterError, path) } + func nonUniqueOperationIDMsg(path string, i int) errors.Error { return errors.New(errors.CompositeErrorCode, NonUniqueOperationIDError, path, i) } + func circularAncestryDefinitionMsg(path string, args any) errors.Error { return errors.New(errors.CompositeErrorCode, CircularAncestryDefinitionError, path, args) } + func duplicatePropertiesMsg(path string, args any) errors.Error { return errors.New(errors.CompositeErrorCode, DuplicatePropertiesError, path, args) } + func pathParamNotInPathMsg(path, param string) errors.Error { return errors.New(errors.CompositeErrorCode, PathParamNotInPathError, param, path) } + func arrayRequiresItemsMsg(path, operation string) errors.Error { return errors.New(errors.CompositeErrorCode, ArrayRequiresItemsError, path, operation) } + func arrayInParamRequiresItemsMsg(path, operation string) errors.Error { return errors.New(errors.CompositeErrorCode, ArrayInParamRequiresItemsError, path, operation) } + func arrayInHeaderRequiresItemsMsg(path, operation string) errors.Error { return errors.New(errors.CompositeErrorCode, ArrayInHeaderRequiresItemsError, path, operation) } + func invalidItemsPatternMsg(path, operation, pattern string) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidItemsPatternError, path, operation, pattern) } + func invalidPatternMsg(pattern, path string) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidPatternError, pattern, path) } + func requiredButNotDefinedMsg(path, definition string) errors.Error { return errors.New(errors.CompositeErrorCode, RequiredButNotDefinedError, path, definition) } + func pathParamGarbledMsg(path, param string) errors.Error { return errors.New(errors.CompositeErrorCode, PathParamGarbledWarning, path, param) } + func pathStrippedParamGarbledMsg(path string) errors.Error { return errors.New(errors.CompositeErrorCode, PathStrippedParamGarbledWarning, path) } + func pathOverlapMsg(path, arg string) errors.Error { return errors.New(errors.CompositeErrorCode, PathOverlapError, path, arg) } + func invalidPatternInParamMsg(operation, param, pattern string) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidPatternInParamError, operation, param, pattern) } + func pathParamRequiredMsg(operation, param string) errors.Error { return errors.New(errors.CompositeErrorCode, PathParamRequiredError, operation, param) } + func bothFormDataAndBodyMsg(operation string) errors.Error { return errors.New(errors.CompositeErrorCode, BothFormDataAndBodyError, operation) } + func multipleBodyParamMsg(operation string, args any) errors.Error { return errors.New(errors.CompositeErrorCode, MultipleBodyParamError, operation, args) } + func pathParamNotUniqueMsg(path, param, arg string) errors.Error { return errors.New(errors.CompositeErrorCode, PathParamNotUniqueError, path, param, arg) } + func duplicateParamNameMsg(path, param, operation string) errors.Error { return errors.New(errors.CompositeErrorCode, DuplicateParamNameError, param, path, operation) } + func unusedParamMsg(arg string) errors.Error { return errors.New(errors.CompositeErrorCode, UnusedParamWarning, arg) } + func unusedDefinitionMsg(arg string) errors.Error { return errors.New(errors.CompositeErrorCode, UnusedDefinitionWarning, arg) } + func unusedResponseMsg(arg string) errors.Error { return errors.New(errors.CompositeErrorCode, UnusedResponseWarning, arg) } + func readOnlyAndRequiredMsg(path, param string) errors.Error { return errors.New(errors.CompositeErrorCode, ReadOnlyAndRequiredWarning, param, path) } + func noParameterInPathMsg(param string) errors.Error { return errors.New(errors.CompositeErrorCode, NoParameterInPathError, param) } + func requiredHasDefaultMsg(param, path string) errors.Error { return errors.New(errors.CompositeErrorCode, RequiredHasDefaultWarning, param, path) } + func defaultValueDoesNotValidateMsg(param, path string) errors.Error { return errors.New(errors.CompositeErrorCode, DefaultValueDoesNotValidateError, param, path) } + func defaultValueItemsDoesNotValidateMsg(param, path string) errors.Error { return errors.New(errors.CompositeErrorCode, DefaultValueItemsDoesNotValidateError, param, path) } + func noValidResponseMsg(operation string) errors.Error { return errors.New(errors.CompositeErrorCode, NoValidResponseError, operation) } + func defaultValueHeaderDoesNotValidateMsg(operation, header, path string) errors.Error { return errors.New(errors.CompositeErrorCode, DefaultValueHeaderDoesNotValidateError, operation, header, path) } + func defaultValueHeaderItemsDoesNotValidateMsg(operation, header, path string) errors.Error { return errors.New(errors.CompositeErrorCode, DefaultValueHeaderItemsDoesNotValidateError, operation, header, path) } + func invalidPatternInHeaderMsg(operation, header, path, pattern string, args any) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidPatternInHeaderError, operation, header, path, pattern, args) } + func invalidPatternInMsg(path, in, pattern string) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidPatternInError, path, in, pattern) } + func defaultValueInDoesNotValidateMsg(operation, path string) errors.Error { return errors.New(errors.CompositeErrorCode, DefaultValueInDoesNotValidateError, operation, path) } + func exampleValueDoesNotValidateMsg(param, path string) errors.Error { return errors.New(errors.CompositeErrorCode, ExampleValueDoesNotValidateError, param, path) } + func exampleValueItemsDoesNotValidateMsg(param, path string) errors.Error { return errors.New(errors.CompositeErrorCode, ExampleValueItemsDoesNotValidateError, param, path) } + func exampleValueHeaderDoesNotValidateMsg(operation, header, path string) errors.Error { return errors.New(errors.CompositeErrorCode, ExampleValueHeaderDoesNotValidateError, operation, header, path) } + func exampleValueHeaderItemsDoesNotValidateMsg(operation, header, path string) errors.Error { return errors.New(errors.CompositeErrorCode, ExampleValueHeaderItemsDoesNotValidateError, operation, header, path) } + func exampleValueInDoesNotValidateMsg(operation, path string) errors.Error { return errors.New(errors.CompositeErrorCode, ExampleValueInDoesNotValidateError, operation, path) } + func examplesWithoutSchemaMsg(operation, response string) errors.Error { return errors.New(errors.CompositeErrorCode, ExamplesWithoutSchemaWarning, operation, response) } + func examplesMimeNotSupportedMsg(operation, response string) errors.Error { return errors.New(errors.CompositeErrorCode, ExamplesMimeNotSupportedWarning, operation, response) } + func refNotAllowedInHeaderMsg(path, header, ref string) errors.Error { return errors.New(errors.CompositeErrorCode, RefNotAllowedInHeaderError, path, header, ref) } + func cannotResolveRefMsg(path, ref string, err error) errors.Error { return errors.New(errors.CompositeErrorCode, CannotResolveReferenceError, path, ref, err) } + func invalidParameterDefinitionMsg(path, method, operationID string) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidParameterDefinitionError, path, method, operationID) } + func invalidParameterDefinitionAsSchemaMsg(path, method, operationID string) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidParameterDefinitionAsSchemaError, path, method, operationID) } + func parameterValidationTypeMismatchMsg(param, path, typ string) errors.Error { return errors.New(errors.CompositeErrorCode, ParamValidationTypeMismatch, param, path, typ) } + func invalidObjectMsg(path, in string) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidObject, path, in) } @@ -350,6 +400,7 @@ func invalidObjectMsg(path, in string) errors.Error { func someParametersBrokenMsg(path, method, operationID string) errors.Error { return errors.New(errors.CompositeErrorCode, SomeParametersBrokenError, path, method, operationID) } + func refShouldNotHaveSiblingsMsg(path, operationID string) errors.Error { return errors.New(errors.CompositeErrorCode, RefShouldNotHaveSiblingsWarning, operationID, path) } diff --git a/vendor/github.com/go-openapi/validate/type.go b/vendor/github.com/go-openapi/validate/type.go index 9b9ab8d917..d29574c349 100644 --- a/vendor/github.com/go-openapi/validate/type.go +++ b/vendor/github.com/go-openapi/validate/type.go @@ -71,7 +71,7 @@ func (t *typeValidator) Validate(data any) *Result { if data == nil { // nil or zero value for the passed structure require Type: null - if len(t.Type) > 0 && !t.Type.Contains(nullType) && !t.Nullable { // TODO: if a property is not required it also passes this + if len(t.Type) > 0 && !t.Type.Contains(nullType) && !t.Nullable { // NOTE: if a property is not required it also passes this return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType), t.Options.recycleResult) } @@ -86,15 +86,18 @@ func (t *typeValidator) Validate(data any) *Result { schType, format := t.schemaInfoForType(data) // check numerical types - // TODO: check unsigned ints - // TODO: check json.Number (see schema.go) + // Proposal for enhancement: check unsigned ints + // Proposal for enhancement: check json.Number (see schema.go) isLowerInt := t.Format == integerFormatInt64 && format == integerFormatInt32 isLowerFloat := t.Format == numberFormatFloat64 && format == numberFormatFloat32 isFloatInt := schType == numberType && conv.IsFloat64AJSONInteger(val.Float()) && t.Type.Contains(integerType) isIntFloat := schType == integerType && t.Type.Contains(numberType) - if kind != reflect.String && kind != reflect.Slice && t.Format != "" && !t.Type.Contains(schType) && format != t.Format && !isFloatInt && !isIntFloat && !isLowerInt && !isLowerFloat { - // TODO: test case + formatMismatch := kind != reflect.String && kind != reflect.Slice && + t.Format != "" && !t.Type.Contains(schType) && format != t.Format && + !isFloatInt && !isIntFloat && !isLowerInt && !isLowerFloat + if formatMismatch { + // NOTE: test case return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format), t.Options.recycleResult) } @@ -112,7 +115,7 @@ func (t *typeValidator) Validate(data any) *Result { func (t *typeValidator) schemaInfoForType(data any) (string, string) { // internal type to JSON type with swagger 2.0 format (with go-openapi/strfmt extensions), // see https://github.com/go-openapi/strfmt/blob/master/README.md - // TODO: this switch really is some sort of reverse lookup for formats. It should be provided by strfmt. + // NOTE: this switch really is some sort of reverse lookup for formats. It should be provided by strfmt. switch data.(type) { case []byte, strfmt.Base64, *strfmt.Base64: return stringType, stringFormatByte @@ -162,8 +165,8 @@ func (t *typeValidator) schemaInfoForType(data any) (string, string) { return stringType, stringFormatUUID4 case strfmt.UUID5, *strfmt.UUID5: return stringType, stringFormatUUID5 - // TODO: missing binary (io.ReadCloser) - // TODO: missing json.Number + // Proposal for enhancement: missing binary (io.ReadCloser) + // Proposal for enhancement: missing json.Number default: val := reflect.ValueOf(data) tpe := val.Type() diff --git a/vendor/github.com/go-openapi/validate/update-fixtures.sh b/vendor/github.com/go-openapi/validate/update-fixtures.sh index 21b06e2b09..8ee55ca3b2 100644 --- a/vendor/github.com/go-openapi/validate/update-fixtures.sh +++ b/vendor/github.com/go-openapi/validate/update-fixtures.sh @@ -1,4 +1,6 @@ -#!/bin/bash +#!/bin/bash +# SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +# SPDX-License-Identifier: Apache-2.0 set -eu -o pipefail dir=$(git rev-parse --show-toplevel) diff --git a/vendor/github.com/go-openapi/validate/validator.go b/vendor/github.com/go-openapi/validate/validator.go index 289a847fc7..e7aebc5256 100644 --- a/vendor/github.com/go-openapi/validate/validator.go +++ b/vendor/github.com/go-openapi/validate/validator.go @@ -12,15 +12,15 @@ import ( "github.com/go-openapi/strfmt" ) -// An EntityValidator is an interface for things that can validate entities +// An EntityValidator is an interface for things that can validate entities. type EntityValidator interface { - Validate(any) *Result + Validate(data any) *Result } type valueValidator interface { SetPath(path string) - Applies(any, reflect.Kind) bool - Validate(any) *Result + Applies(source any, kind reflect.Kind) bool + Validate(data any) *Result } type itemsValidator struct { @@ -286,7 +286,7 @@ func (b *basicCommonValidator) redeem() { pools.poolOfBasicCommonValidators.RedeemValidator(b) } -// A HeaderValidator has very limited subset of validations to apply +// A HeaderValidator has very limited subset of validations to apply. type HeaderValidator struct { name string header *spec.Header @@ -295,7 +295,7 @@ type HeaderValidator struct { Options *SchemaValidatorOptions } -// NewHeaderValidator creates a new header validator object +// NewHeaderValidator creates a new header validator object. func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, options ...Option) *HeaderValidator { opts := new(SchemaValidatorOptions) for _, o := range options { @@ -340,7 +340,7 @@ func newHeaderValidator(name string, header *spec.Header, formats strfmt.Registr return p } -// Validate the value of the header against its schema +// Validate the value of the header against its schema. func (p *HeaderValidator) Validate(data any) *Result { if p.Options.recycleValidators { defer func() { @@ -479,7 +479,7 @@ func (p *HeaderValidator) redeemChildren() { } } -// A ParamValidator has very limited subset of validations to apply +// A ParamValidator has very limited subset of validations to apply. type ParamValidator struct { param *spec.Parameter validators [6]valueValidator @@ -487,7 +487,7 @@ type ParamValidator struct { Options *SchemaValidatorOptions } -// NewParamValidator creates a new param validator object +// NewParamValidator creates a new param validator object. func NewParamValidator(param *spec.Parameter, formats strfmt.Registry, options ...Option) *ParamValidator { opts := new(SchemaValidatorOptions) for _, o := range options { @@ -531,7 +531,7 @@ func newParamValidator(param *spec.Parameter, formats strfmt.Registry, opts *Sch return p } -// Validate the data against the description of the parameter +// Validate the data against the description of the parameter. func (p *ParamValidator) Validate(data any) *Result { if data == nil { return nil @@ -554,7 +554,7 @@ func (p *ParamValidator) Validate(data any) *Result { }() } - // TODO: validate type + // Proposal for enhancement: validate type for idx, validator := range p.validators { if !validator.Applies(p.param, kind) { if p.Options.recycleValidators { @@ -688,7 +688,8 @@ func newBasicSliceValidator( path, in string, def any, maxItems, minItems *int64, uniqueItems bool, items *spec.Items, source any, formats strfmt.Registry, - opts *SchemaValidatorOptions) *basicSliceValidator { + opts *SchemaValidatorOptions, +) *basicSliceValidator { if opts == nil { opts = new(SchemaValidatorOptions) } @@ -797,7 +798,8 @@ func newNumberValidator( path, in string, def any, multipleOf, maximum *float64, exclusiveMaximum bool, minimum *float64, exclusiveMinimum bool, typ, format string, - opts *SchemaValidatorOptions) *numberValidator { + opts *SchemaValidatorOptions, +) *numberValidator { if opts == nil { opts = new(SchemaValidatorOptions) } @@ -857,9 +859,9 @@ func (n *numberValidator) Applies(source any, kind reflect.Kind) bool { // // If this is the case, replace AddErrors() by AddWarnings() and IsValid() by !HasWarnings(). // -// TODO: consider replacing boundary check errors by simple warnings. +// Proposal for enhancement: consider replacing boundary check errors by simple warnings. // -// TODO: default boundaries with MAX_SAFE_INTEGER are not checked (specific to json.Number?) +// NOTE: default boundaries with MAX_SAFE_INTEGER are not checked (specific to json.Number?) func (n *numberValidator) Validate(val any) *Result { if n.Options.recycleValidators { defer func() { @@ -959,7 +961,8 @@ type stringValidator struct { func newStringValidator( path, in string, def any, required, allowEmpty bool, maxLength, minLength *int64, pattern string, - opts *SchemaValidatorOptions) *stringValidator { + opts *SchemaValidatorOptions, +) *stringValidator { if opts == nil { opts = new(SchemaValidatorOptions) } diff --git a/vendor/github.com/go-openapi/validate/values.go b/vendor/github.com/go-openapi/validate/values.go index e7dd5c8d3a..2b80766bd5 100644 --- a/vendor/github.com/go-openapi/validate/values.go +++ b/vendor/github.com/go-openapi/validate/values.go @@ -21,15 +21,15 @@ func (e valueError) Error() string { return string(e) } -// ErrValue indicates that a value validation occurred +// ErrValue indicates that a value validation occurred. const ErrValue valueError = "value validation error" -// Enum validates if the data is a member of the enum +// Enum validates if the data is a member of the enum. func Enum(path, in string, data any, enum any) *errors.Validation { return EnumCase(path, in, data, enum, true) } -// EnumCase validates if the data is a member of the enum and may respect case-sensitivity for strings +// EnumCase validates if the data is a member of the enum and may respect case-sensitivity for strings. func EnumCase(path, in string, data any, enum any, caseSensitive bool) *errors.Validation { val := reflect.ValueOf(enum) if val.Kind() != reflect.Slice { @@ -66,7 +66,7 @@ func EnumCase(path, in string, data any, enum any, caseSensitive bool) *errors.V return errors.EnumFail(path, in, data, values) } -// convertEnumCaseStringKind converts interface if it is kind of string and case insensitivity is set +// convertEnumCaseStringKind converts interface if it is kind of string and case insensitivity is set. func convertEnumCaseStringKind(value any, caseSensitive bool) *string { if caseSensitive { return nil @@ -81,7 +81,7 @@ func convertEnumCaseStringKind(value any, caseSensitive bool) *string { return &str } -// MinItems validates that there are at least n items in a slice +// MinItems validates that there are at least n items in a slice. func MinItems(path, in string, size, minimum int64) *errors.Validation { if size < minimum { return errors.TooFewItems(path, in, minimum, size) @@ -89,7 +89,7 @@ func MinItems(path, in string, size, minimum int64) *errors.Validation { return nil } -// MaxItems validates that there are at most n items in a slice +// MaxItems validates that there are at most n items in a slice. func MaxItems(path, in string, size, maximum int64) *errors.Validation { if size > maximum { return errors.TooManyItems(path, in, maximum, size) @@ -97,7 +97,7 @@ func MaxItems(path, in string, size, maximum int64) *errors.Validation { return nil } -// UniqueItems validates that the provided slice has unique elements +// UniqueItems validates that the provided slice has unique elements. func UniqueItems(path, in string, data any) *errors.Validation { val := reflect.ValueOf(data) if val.Kind() != reflect.Slice { @@ -116,7 +116,7 @@ func UniqueItems(path, in string, data any) *errors.Validation { return nil } -// MinLength validates a string for minimum length +// MinLength validates a string for minimum length. func MinLength(path, in, data string, minLength int64) *errors.Validation { strLen := int64(utf8.RuneCountInString(data)) if strLen < minLength { @@ -125,7 +125,7 @@ func MinLength(path, in, data string, minLength int64) *errors.Validation { return nil } -// MaxLength validates a string for maximum length +// MaxLength validates a string for maximum length. func MaxLength(path, in, data string, maxLength int64) *errors.Validation { strLen := int64(utf8.RuneCountInString(data)) if strLen > maxLength { @@ -134,9 +134,8 @@ func MaxLength(path, in, data string, maxLength int64) *errors.Validation { return nil } -// ReadOnly validates an interface for readonly +// ReadOnly validates an interface for readonly. func ReadOnly(ctx context.Context, path, in string, data any) *errors.Validation { - // read only is only validated when operationType is request if op := extractOperationType(ctx); op != request { return nil @@ -155,7 +154,7 @@ func ReadOnly(ctx context.Context, path, in string, data any) *errors.Validation return errors.ReadOnly(path, in, data) } -// Required validates an interface for requiredness +// Required validates an interface for requiredness. func Required(path, in string, data any) *errors.Validation { val := reflect.ValueOf(data) if val.IsValid() { @@ -167,7 +166,7 @@ func Required(path, in string, data any) *errors.Validation { return errors.Required(path, in, data) } -// RequiredString validates a string for requiredness +// RequiredString validates a string for requiredness. func RequiredString(path, in, data string) *errors.Validation { if data == "" { return errors.Required(path, in, data) @@ -175,7 +174,7 @@ func RequiredString(path, in, data string) *errors.Validation { return nil } -// RequiredNumber validates a number for requiredness +// RequiredNumber validates a number for requiredness. func RequiredNumber(path, in string, data float64) *errors.Validation { if data == 0 { return errors.Required(path, in, data) @@ -183,7 +182,7 @@ func RequiredNumber(path, in string, data float64) *errors.Validation { return nil } -// Pattern validates a string against a regular expression +// Pattern validates a string against a regular expression. func Pattern(path, in, data, pattern string) *errors.Validation { re, err := compileRegexp(pattern) if err != nil { @@ -195,7 +194,7 @@ func Pattern(path, in, data, pattern string) *errors.Validation { return nil } -// MaximumInt validates if a number is smaller than a given maximum +// MaximumInt validates if a number is smaller than a given maximum. func MaximumInt(path, in string, data, maximum int64, exclusive bool) *errors.Validation { if (!exclusive && data > maximum) || (exclusive && data >= maximum) { return errors.ExceedsMaximumInt(path, in, maximum, exclusive, data) @@ -203,7 +202,7 @@ func MaximumInt(path, in string, data, maximum int64, exclusive bool) *errors.Va return nil } -// MaximumUint validates if a number is smaller than a given maximum +// MaximumUint validates if a number is smaller than a given maximum. func MaximumUint(path, in string, data, maximum uint64, exclusive bool) *errors.Validation { if (!exclusive && data > maximum) || (exclusive && data >= maximum) { return errors.ExceedsMaximumUint(path, in, maximum, exclusive, data) @@ -211,7 +210,7 @@ func MaximumUint(path, in string, data, maximum uint64, exclusive bool) *errors. return nil } -// Maximum validates if a number is smaller than a given maximum +// Maximum validates if a number is smaller than a given maximum. func Maximum(path, in string, data, maximum float64, exclusive bool) *errors.Validation { if (!exclusive && data > maximum) || (exclusive && data >= maximum) { return errors.ExceedsMaximum(path, in, maximum, exclusive, data) @@ -219,7 +218,7 @@ func Maximum(path, in string, data, maximum float64, exclusive bool) *errors.Val return nil } -// Minimum validates if a number is smaller than a given minimum +// Minimum validates if a number is smaller than a given minimum. func Minimum(path, in string, data, minimum float64, exclusive bool) *errors.Validation { if (!exclusive && data < minimum) || (exclusive && data <= minimum) { return errors.ExceedsMinimum(path, in, minimum, exclusive, data) @@ -227,7 +226,7 @@ func Minimum(path, in string, data, minimum float64, exclusive bool) *errors.Val return nil } -// MinimumInt validates if a number is smaller than a given minimum +// MinimumInt validates if a number is smaller than a given minimum. func MinimumInt(path, in string, data, minimum int64, exclusive bool) *errors.Validation { if (!exclusive && data < minimum) || (exclusive && data <= minimum) { return errors.ExceedsMinimumInt(path, in, minimum, exclusive, data) @@ -235,7 +234,7 @@ func MinimumInt(path, in string, data, minimum int64, exclusive bool) *errors.Va return nil } -// MinimumUint validates if a number is smaller than a given minimum +// MinimumUint validates if a number is smaller than a given minimum. func MinimumUint(path, in string, data, minimum uint64, exclusive bool) *errors.Validation { if (!exclusive && data < minimum) || (exclusive && data <= minimum) { return errors.ExceedsMinimumUint(path, in, minimum, exclusive, data) @@ -243,7 +242,7 @@ func MinimumUint(path, in string, data, minimum uint64, exclusive bool) *errors. return nil } -// MultipleOf validates if the provided number is a multiple of the factor +// MultipleOf validates if the provided number is a multiple of the factor. func MultipleOf(path, in string, data, factor float64) *errors.Validation { // multipleOf factor must be positive if factor <= 0 { @@ -261,7 +260,7 @@ func MultipleOf(path, in string, data, factor float64) *errors.Validation { return nil } -// MultipleOfInt validates if the provided integer is a multiple of the factor +// MultipleOfInt validates if the provided integer is a multiple of the factor. func MultipleOfInt(path, in string, data int64, factor int64) *errors.Validation { // multipleOf factor must be positive if factor <= 0 { @@ -274,7 +273,7 @@ func MultipleOfInt(path, in string, data int64, factor int64) *errors.Validation return nil } -// MultipleOfUint validates if the provided unsigned integer is a multiple of the factor +// MultipleOfUint validates if the provided unsigned integer is a multiple of the factor. func MultipleOfUint(path, in string, data, factor uint64) *errors.Validation { // multipleOf factor must be positive if factor == 0 { @@ -287,7 +286,7 @@ func MultipleOfUint(path, in string, data, factor uint64) *errors.Validation { return nil } -// FormatOf validates if a string matches a format in the format registry +// FormatOf validates if a string matches a format in the format registry. func FormatOf(path, in, format, data string, registry strfmt.Registry) *errors.Validation { if registry == nil { registry = strfmt.Default @@ -310,10 +309,10 @@ func FormatOf(path, in, format, data string, registry strfmt.Registry) *errors.V // NOTE: currently, the max value is marshalled as a float64, no matter what, // which means there may be a loss during conversions (e.g. for very large integers) // -// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +// NOTE: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free. func MaximumNativeType(path, in string, val any, maximum float64, exclusive bool) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { //nolint:exhaustive + switch kind { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MaximumInt(path, in, value, int64(maximum), exclusive) @@ -340,10 +339,10 @@ func MaximumNativeType(path, in string, val any, maximum float64, exclusive bool // NOTE: currently, the min value is marshalled as a float64, no matter what, // which means there may be a loss during conversions (e.g. for very large integers) // -// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +// NOTE: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free. func MinimumNativeType(path, in string, val any, minimum float64, exclusive bool) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { //nolint:exhaustive + switch kind { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MinimumInt(path, in, value, int64(minimum), exclusive) @@ -362,7 +361,7 @@ func MinimumNativeType(path, in string, val any, minimum float64, exclusive bool } // MultipleOfNativeType provides native type constraint validation as a facade -// to various numeric types version of MultipleOf constraint check. +// to various numeric types version of [MultipleOf] constraint check. // // Assumes that any possible loss conversion during conversion has been // checked beforehand. @@ -370,10 +369,10 @@ func MinimumNativeType(path, in string, val any, minimum float64, exclusive bool // NOTE: currently, the multipleOf factor is marshalled as a float64, no matter what, // which means there may be a loss during conversions (e.g. for very large integers) // -// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +// NOTE: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free. func MultipleOfNativeType(path, in string, val any, multipleOf float64) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { //nolint:exhaustive + switch kind { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MultipleOfInt(path, in, value, int64(multipleOf)) @@ -391,13 +390,13 @@ func MultipleOfNativeType(path, in string, val any, multipleOf float64) *errors. // IsValueValidAgainstRange checks that a numeric value is compatible with // the range defined by Type and Format, that is, may be converted without loss. // -// NOTE: this check is about type capacity and not formal verification such as: 1.0 != 1L +// NOTE: this check is about type capacity and not formal verification such as: 1.0 != 1L. func IsValueValidAgainstRange(val any, typeName, format, prefix, path string) error { kind := reflect.ValueOf(val).Type().Kind() // What is the string representation of val var stringRep string - switch kind { //nolint:exhaustive + switch kind { case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: stringRep = conv.FormatUinteger(valueHelp.asUint64(val)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: diff --git a/vendor/github.com/go-piv/piv-go/v2/piv/key.go b/vendor/github.com/go-piv/piv-go/v2/piv/key.go index 4cb55b9dc4..a09cb55a25 100644 --- a/vendor/github.com/go-piv/piv-go/v2/piv/key.go +++ b/vendor/github.com/go-piv/piv-go/v2/piv/key.go @@ -182,6 +182,10 @@ func (a *Attestation) addExt(e pkix.Extension) error { a.PINPolicy = PINPolicyOnce case 0x03: a.PINPolicy = PINPolicyAlways + case 0x04: + a.PINPolicy = PINPolicyMatchOnce + case 0x05: + a.PINPolicy = PINPolicyMatchAlways default: return fmt.Errorf("unrecognized pin policy: 0x%x", e.Value[0]) } @@ -485,6 +489,18 @@ const ( PINPolicyNever PINPolicy = iota + 1 PINPolicyOnce PINPolicyAlways + // PINPolicyMatchOnce and PINPolicyMatchAlways require biometric user + // verification (YubiKey Bio). The naming convention of these policies aligns + // with yubico-piv-tool. + // + // The library handles biometric verification transparently: + // - MatchOnce: VerifyBiometrics is performed on demand, then the operation + // is retried once. + // - MatchAlways: VerifyBiometrics is performed before every operation. + // + // See https://docs.yubico.com/yesdk/users-manual/application-piv/pin-touch-policies.html + PINPolicyMatchOnce + PINPolicyMatchAlways ) // TouchPolicy represents proof-of-presence requirements when signing or @@ -514,15 +530,19 @@ const ( ) var pinPolicyMap = map[PINPolicy]byte{ - PINPolicyNever: 0x01, - PINPolicyOnce: 0x02, - PINPolicyAlways: 0x03, + PINPolicyNever: 0x01, + PINPolicyOnce: 0x02, + PINPolicyAlways: 0x03, + PINPolicyMatchOnce: 0x04, + PINPolicyMatchAlways: 0x05, } var pinPolicyMapInv = map[byte]PINPolicy{ 0x01: PINPolicyNever, 0x02: PINPolicyOnce, 0x03: PINPolicyAlways, + 0x04: PINPolicyMatchOnce, + 0x05: PINPolicyMatchAlways, } var touchPolicyMap = map[TouchPolicy]byte{ @@ -931,6 +951,11 @@ func (k KeyAuth) authTx(yk *YubiKey, pp PINPolicy) error { return nil } + // Match policies use biometric verification (not PIN) and are handled in do(). + if pp == PINPolicyMatchOnce || pp == PINPolicyMatchAlways { + return nil + } + // PINPolicyAlways should always prompt a PIN even if the key says that // login isn't needed. // https://github.com/go-piv/piv-go/issues/49 @@ -953,6 +978,33 @@ func (k KeyAuth) authTx(yk *YubiKey, pp PINPolicy) error { } func (k KeyAuth) do(yk *YubiKey, pp PINPolicy, f func(tx *scTx) ([]byte, error)) ([]byte, error) { + const swSecurityStatusNotSatisfied = 0x6982 + + if pp == PINPolicyMatchAlways { + if err := yk.VerifyBiometrics(); err != nil { + return nil, err + } + return f(yk.tx) + } + + if pp == PINPolicyMatchOnce { + if err := k.authTx(yk, pp); err != nil { + return nil, err + } + resp, err := f(yk.tx) + if err == nil { + return resp, nil + } + var apdu *apduErr + if errors.As(err, &apdu) && apdu.Status() == swSecurityStatusNotSatisfied { + if err := yk.VerifyBiometrics(); err != nil { + return nil, err + } + return f(yk.tx) + } + return nil, err + } + if err := k.authTx(yk, pp); err != nil { return nil, err } diff --git a/vendor/github.com/go-piv/piv-go/v2/piv/piv.go b/vendor/github.com/go-piv/piv-go/v2/piv/piv.go index a4d884ec88..e8d1b08c6a 100644 --- a/vendor/github.com/go-piv/piv-go/v2/piv/piv.go +++ b/vendor/github.com/go-piv/piv-go/v2/piv/piv.go @@ -121,6 +121,7 @@ type YubiKey struct { // YubiKey's version or PIV version? A NEO reports v1.0.4. Figure this out // before exposing an API. version *version + } // Close releases the connection to the smart card. @@ -243,6 +244,14 @@ func (yk *YubiKey) VerifyPIN(pin string) error { return ykLogin(yk.tx, pin) } +// VerifyBiometrics performs biometric user verification (YubiKey Bio) for PIV. +// When using PINPolicyMatchOnce, call this once per session before signing or +// decrypting. When using PINPolicyMatchAlways, call this before every signing +// or decrypting operation. +func (yk *YubiKey) VerifyBiometrics() error { + return ykVerifyUV(yk.tx) +} + func ykLogin(tx *scTx, pin string) error { data, err := encodePIN(pin) if err != nil { @@ -260,6 +269,16 @@ func ykLogin(tx *scTx, pin string) error { return nil } +func ykVerifyUV(tx *scTx) error { + // 6.4.4 VERIFY UV Command + // https://docs.yubico.com/yesdk/users-manual/application-piv/apdu/verify-uv.html + cmd := apdu{instruction: insVerify, param2: 0x96, data: []byte{0x03, 0x00}} + if _, err := tx.Transmit(cmd); err != nil { + return fmt.Errorf("verify uv: %w", err) + } + return nil +} + func ykLoginNeeded(tx *scTx) bool { cmd := apdu{instruction: insVerify, param2: 0x80} _, err := tx.Transmit(cmd) diff --git a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig index faef0c91e7..c37602a02d 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig +++ b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig @@ -19,3 +19,6 @@ indent_size = 2 [.golangci.yaml] indent_size = 2 + +[devenv.yaml] +indent_size = 2 diff --git a/vendor/github.com/go-viper/mapstructure/v2/.envrc b/vendor/github.com/go-viper/mapstructure/v2/.envrc index 2e0f9f5f71..e2be8891cb 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/.envrc +++ b/vendor/github.com/go-viper/mapstructure/v2/.envrc @@ -1,4 +1,7 @@ -if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" -fi -use flake . --impure +#!/usr/bin/env bash + +export DIRENV_WARN_TIMEOUT=20s + +eval "$(devenv direnvrc)" + +use devenv diff --git a/vendor/github.com/go-viper/mapstructure/v2/.gitignore b/vendor/github.com/go-viper/mapstructure/v2/.gitignore index 470e7ca2bd..71caea19ff 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/.gitignore +++ b/vendor/github.com/go-viper/mapstructure/v2/.gitignore @@ -1,6 +1,10 @@ -/.devenv/ -/.direnv/ -/.pre-commit-config.yaml /bin/ /build/ /var/ + +# Devenv +.devenv* +devenv.local.nix +devenv.local.yaml +.direnv +.pre-commit-config.yaml diff --git a/vendor/github.com/go-viper/mapstructure/v2/devenv.lock b/vendor/github.com/go-viper/mapstructure/v2/devenv.lock new file mode 100644 index 0000000000..72c2c9b4db --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/devenv.lock @@ -0,0 +1,103 @@ +{ + "nodes": { + "devenv": { + "locked": { + "dir": "src/modules", + "lastModified": 1765288076, + "owner": "cachix", + "repo": "devenv", + "rev": "93c055af1e8fcac49251f1b2e1c57f78620ad351", + "type": "github" + }, + "original": { + "dir": "src/modules", + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1765121682, + "owner": "edolstra", + "repo": "flake-compat", + "rev": "65f23138d8d09a92e30f1e5c87611b23ef451bf3", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "git-hooks": { + "inputs": { + "flake-compat": "flake-compat", + "gitignore": "gitignore", + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1765016596, + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "548fc44fca28a5e81c5d6b846e555e6b9c2a5a3c", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "git-hooks.nix", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "git-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1762808025, + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "cb5e3fdca1de58ccbc3ef53de65bd372b48f567c", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1764580874, + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "dcf61356c3ab25f1362b4a4428a6d871e84f1d1d", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "git-hooks": "git-hooks", + "nixpkgs": "nixpkgs", + "pre-commit-hooks": [ + "git-hooks" + ] + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/devenv.nix b/vendor/github.com/go-viper/mapstructure/v2/devenv.nix new file mode 100644 index 0000000000..b31ab7a1ff --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/devenv.nix @@ -0,0 +1,14 @@ +{ + pkgs, + ... +}: + +{ + languages = { + go.enable = true; + }; + + packages = with pkgs; [ + golangci-lint + ]; +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/devenv.yaml b/vendor/github.com/go-viper/mapstructure/v2/devenv.yaml new file mode 100644 index 0000000000..68616a49cd --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/devenv.yaml @@ -0,0 +1,4 @@ +# yaml-language-server: $schema=https://devenv.sh/devenv.schema.json +inputs: + nixpkgs: + url: github:cachix/devenv-nixpkgs/rolling diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock deleted file mode 100644 index 5e67bdd6b4..0000000000 --- a/vendor/github.com/go-viper/mapstructure/v2/flake.lock +++ /dev/null @@ -1,294 +0,0 @@ -{ - "nodes": { - "cachix": { - "inputs": { - "devenv": [ - "devenv" - ], - "flake-compat": [ - "devenv" - ], - "git-hooks": [ - "devenv" - ], - "nixpkgs": "nixpkgs" - }, - "locked": { - "lastModified": 1742042642, - "narHash": "sha256-D0gP8srrX0qj+wNYNPdtVJsQuFzIng3q43thnHXQ/es=", - "owner": "cachix", - "repo": "cachix", - "rev": "a624d3eaf4b1d225f918de8543ed739f2f574203", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "latest", - "repo": "cachix", - "type": "github" - } - }, - "devenv": { - "inputs": { - "cachix": "cachix", - "flake-compat": "flake-compat", - "git-hooks": "git-hooks", - "nix": "nix", - "nixpkgs": "nixpkgs_3" - }, - "locked": { - "lastModified": 1744876578, - "narHash": "sha256-8MTBj2REB8t29sIBLpxbR0+AEGJ7f+RkzZPAGsFd40c=", - "owner": "cachix", - "repo": "devenv", - "rev": "7ff7c351bba20d0615be25ecdcbcf79b57b85fe1", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1733328505, - "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-parts": { - "inputs": { - "nixpkgs-lib": [ - "devenv", - "nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1712014858, - "narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "9126214d0a59633752a136528f5f3b9aa8565b7d", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "flake-parts_2": { - "inputs": { - "nixpkgs-lib": "nixpkgs-lib" - }, - "locked": { - "lastModified": 1743550720, - "narHash": "sha256-hIshGgKZCgWh6AYJpJmRgFdR3WUbkY04o82X05xqQiY=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "c621e8422220273271f52058f618c94e405bb0f5", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "git-hooks": { - "inputs": { - "flake-compat": [ - "devenv" - ], - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1742649964, - "narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=", - "owner": "cachix", - "repo": "git-hooks.nix", - "rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "git-hooks.nix", - "type": "github" - } - }, - "gitignore": { - "inputs": { - "nixpkgs": [ - "devenv", - "git-hooks", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1709087332, - "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", - "owner": "hercules-ci", - "repo": "gitignore.nix", - "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "gitignore.nix", - "type": "github" - } - }, - "libgit2": { - "flake": false, - "locked": { - "lastModified": 1697646580, - "narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=", - "owner": "libgit2", - "repo": "libgit2", - "rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5", - "type": "github" - }, - "original": { - "owner": "libgit2", - "repo": "libgit2", - "type": "github" - } - }, - "nix": { - "inputs": { - "flake-compat": [ - "devenv" - ], - "flake-parts": "flake-parts", - "libgit2": "libgit2", - "nixpkgs": "nixpkgs_2", - "nixpkgs-23-11": [ - "devenv" - ], - "nixpkgs-regression": [ - "devenv" - ], - "pre-commit-hooks": [ - "devenv" - ] - }, - "locked": { - "lastModified": 1741798497, - "narHash": "sha256-E3j+3MoY8Y96mG1dUIiLFm2tZmNbRvSiyN7CrSKuAVg=", - "owner": "domenkozar", - "repo": "nix", - "rev": "f3f44b2baaf6c4c6e179de8cbb1cc6db031083cd", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.24", - "repo": "nix", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1733212471, - "narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "55d15ad12a74eb7d4646254e13638ad0c4128776", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "lastModified": 1743296961, - "narHash": "sha256-b1EdN3cULCqtorQ4QeWgLMrd5ZGOjLSLemfa00heasc=", - "owner": "nix-community", - "repo": "nixpkgs.lib", - "rev": "e4822aea2a6d1cdd36653c134cacfd64c97ff4fa", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "nixpkgs.lib", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1717432640, - "narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "88269ab3044128b7c2f4c7d68448b2fb50456870", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "release-24.05", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_3": { - "locked": { - "lastModified": 1733477122, - "narHash": "sha256-qamMCz5mNpQmgBwc8SB5tVMlD5sbwVIToVZtSxMph9s=", - "owner": "cachix", - "repo": "devenv-nixpkgs", - "rev": "7bd9e84d0452f6d2e63b6e6da29fe73fac951857", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "rolling", - "repo": "devenv-nixpkgs", - "type": "github" - } - }, - "nixpkgs_4": { - "locked": { - "lastModified": 1744536153, - "narHash": "sha256-awS2zRgF4uTwrOKwwiJcByDzDOdo3Q1rPZbiHQg/N38=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "18dd725c29603f582cf1900e0d25f9f1063dbf11", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "root": { - "inputs": { - "devenv": "devenv", - "flake-parts": "flake-parts_2", - "nixpkgs": "nixpkgs_4" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.nix b/vendor/github.com/go-viper/mapstructure/v2/flake.nix deleted file mode 100644 index 3b116f426d..0000000000 --- a/vendor/github.com/go-viper/mapstructure/v2/flake.nix +++ /dev/null @@ -1,46 +0,0 @@ -{ - inputs = { - nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; - flake-parts.url = "github:hercules-ci/flake-parts"; - devenv.url = "github:cachix/devenv"; - }; - - outputs = - inputs@{ flake-parts, ... }: - flake-parts.lib.mkFlake { inherit inputs; } { - imports = [ - inputs.devenv.flakeModule - ]; - - systems = [ - "x86_64-linux" - "x86_64-darwin" - "aarch64-darwin" - ]; - - perSystem = - { pkgs, ... }: - rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - }; - - pre-commit.hooks = { - nixpkgs-fmt.enable = true; - }; - - packages = with pkgs; [ - golangci-lint - ]; - - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; - }; - - ci = devenv.shells.default; - }; - }; - }; -} diff --git a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go index 7c35bce020..9087fd96c7 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go +++ b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go @@ -173,6 +173,25 @@ // Public: "I made it through!" // } // +// # Custom Decoding with Unmarshaler +// +// Types can implement the Unmarshaler interface to control their own decoding. The interface +// behaves similarly to how UnmarshalJSON does in the standard library. It can be used as an +// alternative or companion to a DecodeHook. +// +// type TrimmedString string +// +// func (t *TrimmedString) UnmarshalMapstructure(input any) error { +// str, ok := input.(string) +// if !ok { +// return fmt.Errorf("expected string, got %T", input) +// } +// *t = TrimmedString(strings.TrimSpace(str)) +// return nil +// } +// +// See the Unmarshaler interface documentation for more details. +// // # Other Configuration // // mapstructure is highly configurable. See the DecoderConfig struct @@ -218,6 +237,17 @@ type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, any) (any, error) // values. type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (any, error) +// Unmarshaler is the interface implemented by types that can unmarshal +// themselves. UnmarshalMapstructure receives the input data (potentially +// transformed by DecodeHook) and should populate the receiver with the +// decoded values. +// +// The Unmarshaler interface takes precedence over the default decoding +// logic for any type (structs, slices, maps, primitives, etc.). +type Unmarshaler interface { + UnmarshalMapstructure(any) error +} + // DecoderConfig is the configuration that is used to create a new decoder // and allows customization of various aspects of decoding. type DecoderConfig struct { @@ -281,6 +311,13 @@ type DecoderConfig struct { // } Squash bool + // Deep will map structures in slices instead of copying them + // + // type Parent struct { + // Children []Child `mapstructure:",deep"` + // } + Deep bool + // Metadata is the struct that will contain extra metadata about // the decoding. If this is nil, then no metadata will be tracked. Metadata *Metadata @@ -290,9 +327,15 @@ type DecoderConfig struct { Result any // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" + // defaults to "mapstructure". Multiple tag names can be specified + // as a comma-separated list (e.g., "yaml,json"), and the first + // matching non-empty tag will be used. TagName string + // RootName specifies the name to use for the root element in error messages. For example: + // '' has unset fields: + RootName string + // The option of the value in the tag that indicates a field should // be squashed. This defaults to "squash". SquashTagOption string @@ -304,11 +347,34 @@ type DecoderConfig struct { // MatchName is the function used to match the map key to the struct // field name or tag. Defaults to `strings.EqualFold`. This can be used // to implement case-sensitive tag values, support snake casing, etc. + // + // MatchName is used as a fallback comparison when the direct key lookup fails. + // See also MapFieldName for transforming field names before lookup. MatchName func(mapKey, fieldName string) bool // DecodeNil, if set to true, will cause the DecodeHook (if present) to run // even if the input is nil. This can be used to provide default values. DecodeNil bool + + // MapFieldName is the function used to convert the struct field name to the map's key name. + // + // This is useful for automatically converting between naming conventions without + // explicitly tagging each field. For example, to convert Go's PascalCase field names + // to snake_case map keys: + // + // MapFieldName: func(s string) string { + // return strcase.ToSnake(s) + // } + // + // When decoding from a map to a struct, the transformed field name is used for + // the initial lookup. If not found, MatchName is used as a fallback comparison. + // Explicit struct tags always take precedence over MapFieldName. + MapFieldName func(string) string + + // DisableUnmarshaler, if set to true, disables the use of the Unmarshaler + // interface. Types implementing Unmarshaler will be decoded using the + // standard struct decoding logic instead. + DisableUnmarshaler bool } // A Decoder takes a raw interface value and turns it into structured @@ -445,6 +511,12 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { config.MatchName = strings.EqualFold } + if config.MapFieldName == nil { + config.MapFieldName = func(s string) string { + return s + } + } + result := &Decoder{ config: config, } @@ -458,7 +530,7 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { // Decode decodes the given raw interface to the target pointer specified // by the configuration. func (d *Decoder) Decode(input any) error { - err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + err := d.decode(d.config.RootName, input, reflect.ValueOf(d.config.Result).Elem()) // Retain some of the original behavior when multiple errors ocurr var joinedErr interface{ Unwrap() []error } @@ -540,36 +612,50 @@ func (d *Decoder) decode(name string, input any, outVal reflect.Value) error { var err error addMetaKey := true - switch outputKind { - case reflect.Bool: - err = d.decodeBool(name, input, outVal) - case reflect.Interface: - err = d.decodeBasic(name, input, outVal) - case reflect.String: - err = d.decodeString(name, input, outVal) - case reflect.Int: - err = d.decodeInt(name, input, outVal) - case reflect.Uint: - err = d.decodeUint(name, input, outVal) - case reflect.Float32: - err = d.decodeFloat(name, input, outVal) - case reflect.Complex64: - err = d.decodeComplex(name, input, outVal) - case reflect.Struct: - err = d.decodeStruct(name, input, outVal) - case reflect.Map: - err = d.decodeMap(name, input, outVal) - case reflect.Ptr: - addMetaKey, err = d.decodePtr(name, input, outVal) - case reflect.Slice: - err = d.decodeSlice(name, input, outVal) - case reflect.Array: - err = d.decodeArray(name, input, outVal) - case reflect.Func: - err = d.decodeFunc(name, input, outVal) - default: - // If we reached this point then we weren't able to decode it - return newDecodeError(name, fmt.Errorf("unsupported type: %s", outputKind)) + + // Check if the target implements Unmarshaler and use it if not disabled + unmarshaled := false + if !d.config.DisableUnmarshaler { + if unmarshaler, ok := getUnmarshaler(outVal); ok { + if err = unmarshaler.UnmarshalMapstructure(input); err != nil { + err = newDecodeError(name, err) + } + unmarshaled = true + } + } + + if !unmarshaled { + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Complex64: + err = d.decodeComplex(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return newDecodeError(name, fmt.Errorf("unsupported type: %s", outputKind)) + } } // If we reached here, then we successfully decoded SOMETHING, so @@ -668,7 +754,7 @@ func (d *Decoder) decodeString(name string, data any, val reflect.Value) error { case reflect.Uint8: var uints []uint8 if dataKind == reflect.Array { - uints = make([]uint8, dataVal.Len(), dataVal.Len()) + uints = make([]uint8, dataVal.Len()) for i := range uints { uints[i] = dataVal.Index(i).Interface().(uint8) } @@ -1060,8 +1146,8 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re ) } - tagValue := f.Tag.Get(d.config.TagName) - keyName := f.Name + tagValue, _ := getTagValue(f, d.config.TagName) + keyName := d.config.MapFieldName(f.Name) if tagValue == "" && d.config.IgnoreUntaggedFields { continue @@ -1070,6 +1156,9 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re // If Squash is set in the config, we squash the field down. squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + // If Deep is set in the config, set as default value. + deep := d.config.Deep + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) // Determine the name of the key in the map @@ -1078,12 +1167,12 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re continue } // If "omitempty" is specified in the tag, it ignores empty values. - if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + if strings.Contains(tagValue[index+1:], "omitempty") && isEmptyValue(v) { continue } // If "omitzero" is specified in the tag, it ignores zero values. - if strings.Index(tagValue[index+1:], "omitzero") != -1 && v.IsZero() { + if strings.Contains(tagValue[index+1:], "omitzero") && v.IsZero() { continue } @@ -1103,7 +1192,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re ) } } else { - if strings.Index(tagValue[index+1:], "remain") != -1 { + if strings.Contains(tagValue[index+1:], "remain") { if v.Kind() != reflect.Map { return newDecodeError( name+"."+f.Name, @@ -1118,6 +1207,9 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re continue } } + + deep = deep || strings.Contains(tagValue[index+1:], "deep") + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { keyName = keyNameTagValue } @@ -1164,6 +1256,41 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) } + case reflect.Slice: + if deep { + var childType reflect.Type + switch v.Type().Elem().Kind() { + case reflect.Struct: + childType = reflect.TypeOf(map[string]any{}) + default: + childType = v.Type().Elem() + } + + sType := reflect.SliceOf(childType) + + addrVal := reflect.New(sType) + + vSlice := reflect.MakeSlice(sType, v.Len(), v.Cap()) + + if v.Len() > 0 { + reflect.Indirect(addrVal).Set(vSlice) + + err := d.decode(keyName, v.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + } + + vSlice = reflect.Indirect(addrVal) + + valMap.SetMapIndex(reflect.ValueOf(keyName), vSlice) + + break + } + + // When deep mapping is not needed, fallthrough to normal copy + fallthrough + default: valMap.SetMapIndex(reflect.ValueOf(keyName), v) } @@ -1471,7 +1598,10 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e remain := false // We always parse the tags cause we're looking for other tags too - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + tagParts := getTagParts(fieldType, d.config.TagName) + if len(tagParts) == 0 { + tagParts = []string{""} + } for _, tag := range tagParts[1:] { if tag == d.config.SquashTagOption { squash = true @@ -1492,6 +1622,18 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e if !fieldVal.IsNil() { structs = append(structs, fieldVal.Elem().Elem()) } + case reflect.Ptr: + if fieldVal.Type().Elem().Kind() == reflect.Struct { + if fieldVal.IsNil() { + fieldVal.Set(reflect.New(fieldVal.Type().Elem())) + } + structs = append(structs, fieldVal.Elem()) + } else { + errs = append(errs, newDecodeError( + name+"."+fieldType.Name, + fmt.Errorf("unsupported type for squashed pointer: %s", fieldVal.Type().Elem().Kind()), + )) + } default: errs = append(errs, newDecodeError( name+"."+fieldType.Name, @@ -1516,13 +1658,15 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e field, fieldValue := f.field, f.val fieldName := field.Name - tagValue := field.Tag.Get(d.config.TagName) + tagValue, _ := getTagValue(field, d.config.TagName) if tagValue == "" && d.config.IgnoreUntaggedFields { continue } tagValue = strings.SplitN(tagValue, ",", 2)[0] if tagValue != "" { fieldName = tagValue + } else { + fieldName = d.config.MapFieldName(fieldName) } rawMapKey := reflect.ValueOf(fieldName) @@ -1605,8 +1749,14 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } sort.Strings(keys) + // Improve error message when name is empty by showing the target struct type + // in the case where it is empty for embedded structs. + errorName := name + if errorName == "" { + errorName = val.Type().String() + } errs = append(errs, newDecodeError( - name, + errorName, fmt.Errorf("has invalid keys: %s", strings.Join(keys, ", ")), )) } @@ -1692,7 +1842,7 @@ func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields return true } - if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + if checkMapstructureTags && hasAnyTag(f, tagName) { // check for mapstructure tags inside return true } } @@ -1700,13 +1850,99 @@ func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, } func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + if v.Kind() != reflect.Ptr { + return v + } + + switch v.Elem().Kind() { + case reflect.Slice: + return v.Elem() + + case reflect.Struct: + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v + + default: return v } - deref := v.Elem() - derefT := deref.Type() - if isStructTypeConvertibleToMap(derefT, true, tagName) { - return deref +} + +func hasAnyTag(field reflect.StructField, tagName string) bool { + _, ok := getTagValue(field, tagName) + return ok +} + +func getTagParts(field reflect.StructField, tagName string) []string { + tagValue, ok := getTagValue(field, tagName) + if !ok { + return nil } - return v + return strings.Split(tagValue, ",") +} + +func getTagValue(field reflect.StructField, tagName string) (string, bool) { + for _, name := range splitTagNames(tagName) { + if tag := field.Tag.Get(name); tag != "" { + return tag, true + } + } + return "", false +} + +func splitTagNames(tagName string) []string { + if tagName == "" { + return []string{"mapstructure"} + } + parts := strings.Split(tagName, ",") + result := make([]string, 0, len(parts)) + + for _, name := range parts { + name = strings.TrimSpace(name) + if name != "" { + result = append(result, name) + } + } + + return result +} + +// unmarshalerType is cached for performance +var unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + +// getUnmarshaler checks if the value implements Unmarshaler and returns +// the Unmarshaler and a boolean indicating if it was found. It handles both +// pointer and value receivers. +func getUnmarshaler(val reflect.Value) (Unmarshaler, bool) { + // Skip invalid or nil values + if !val.IsValid() { + return nil, false + } + + switch val.Kind() { + case reflect.Pointer, reflect.Interface: + if val.IsNil() { + return nil, false + } + } + + // Check pointer receiver first (most common case) + if val.CanAddr() { + ptrVal := val.Addr() + // Quick check: if no methods, can't implement any interface + if ptrVal.Type().NumMethod() > 0 && ptrVal.Type().Implements(unmarshalerType) { + return ptrVal.Interface().(Unmarshaler), true + } + } + + // Check value receiver + // Quick check: if no methods, can't implement any interface + if val.Type().NumMethod() > 0 && val.CanInterface() && val.Type().Implements(unmarshalerType) { + return val.Interface().(Unmarshaler), true + } + + return nil, false } diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile.go b/vendor/github.com/goccy/go-json/internal/decoder/compile.go index fab6437647..8ad50936c0 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile.go @@ -5,6 +5,7 @@ import ( "fmt" "reflect" "strings" + "sync" "sync/atomic" "unicode" "unsafe" @@ -17,22 +18,27 @@ var ( typeAddr *runtime.TypeAddr cachedDecoderMap unsafe.Pointer // map[uintptr]decoder cachedDecoder []Decoder + initOnce sync.Once ) -func init() { - typeAddr = runtime.AnalyzeTypeAddr() - if typeAddr == nil { - typeAddr = &runtime.TypeAddr{} - } - cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) +func initDecoder() { + initOnce.Do(func() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) + }) } func loadDecoderMap() map[uintptr]Decoder { + initDecoder() p := atomic.LoadPointer(&cachedDecoderMap) return *(*map[uintptr]Decoder)(unsafe.Pointer(&p)) } func storeDecoder(typ uintptr, dec Decoder, m map[uintptr]Decoder) { + initDecoder() newDecoderMap := make(map[uintptr]Decoder, len(m)+1) newDecoderMap[typ] = dec diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go index eb7e2b1345..025ca85b5e 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go @@ -10,6 +10,7 @@ import ( ) func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + initDecoder() typeptr := uintptr(unsafe.Pointer(typ)) if typeptr > typeAddr.MaxTypeAddr { return compileToGetDecoderSlowPath(typeptr, typ) diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go index 49cdda4a17..023b817c36 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go @@ -13,6 +13,7 @@ import ( var decMu sync.RWMutex func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + initDecoder() typeptr := uintptr(unsafe.Pointer(typ)) if typeptr > typeAddr.MaxTypeAddr { return compileToGetDecoderSlowPath(typeptr, typ) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/code.go b/vendor/github.com/goccy/go-json/internal/encoder/code.go index 5b08faefc7..fec45a4b89 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/code.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/code.go @@ -518,6 +518,7 @@ func (c *StructCode) ToAnonymousOpcode(ctx *compileContext) Opcodes { prevField = firstField codes = codes.Add(fieldCodes...) } + ctx.structTypeToCodes[uintptr(unsafe.Pointer(c.typ))] = codes return codes } diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go index 37b7aa38e2..b107636890 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go @@ -5,6 +5,7 @@ import ( "encoding" "encoding/json" "reflect" + "sync" "sync/atomic" "unsafe" @@ -24,14 +25,17 @@ var ( cachedOpcodeSets []*OpcodeSet cachedOpcodeMap unsafe.Pointer // map[uintptr]*OpcodeSet typeAddr *runtime.TypeAddr + initEncoderOnce sync.Once ) -func init() { - typeAddr = runtime.AnalyzeTypeAddr() - if typeAddr == nil { - typeAddr = &runtime.TypeAddr{} - } - cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) +func initEncoder() { + initEncoderOnce.Do(func() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) + }) } func loadOpcodeMap() map[uintptr]*OpcodeSet { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go index 20c93cbf70..b6f45a49b0 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go @@ -4,6 +4,7 @@ package encoder func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + initEncoder() if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { codeSet, err := compileToGetCodeSetSlowPath(typeptr) if err != nil { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go index 13ba23fdff..47b482f7fb 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go @@ -10,6 +10,7 @@ import ( var setsMu sync.RWMutex func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + initEncoder() if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { codeSet, err := compileToGetCodeSetSlowPath(typeptr) if err != nil { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go index 14eb6a0d64..b436f5b21f 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go @@ -406,6 +406,11 @@ func AppendMarshalJSON(ctx *RuntimeContext, code *Opcode, b []byte, v interface{ rv = newV } } + + if rv.Kind() == reflect.Ptr && rv.IsNil() { + return AppendNull(ctx, b), nil + } + v = rv.Interface() var bb []byte if (code.Flags & MarshalerContextFlags) != 0 { diff --git a/vendor/github.com/goccy/go-json/internal/runtime/type.go b/vendor/github.com/goccy/go-json/internal/runtime/type.go index 0167cd2c01..4b693cb0bb 100644 --- a/vendor/github.com/goccy/go-json/internal/runtime/type.go +++ b/vendor/github.com/goccy/go-json/internal/runtime/type.go @@ -2,6 +2,7 @@ package runtime import ( "reflect" + "sync" "unsafe" ) @@ -23,8 +24,8 @@ type TypeAddr struct { } var ( - typeAddr *TypeAddr - alreadyAnalyzed bool + typeAddr *TypeAddr + once sync.Once ) //go:linkname typelinks reflect.typelinks @@ -34,67 +35,64 @@ func typelinks() ([]unsafe.Pointer, [][]int32) func rtypeOff(unsafe.Pointer, int32) unsafe.Pointer func AnalyzeTypeAddr() *TypeAddr { - defer func() { - alreadyAnalyzed = true - }() - if alreadyAnalyzed { - return typeAddr - } - sections, offsets := typelinks() - if len(sections) != 1 { - return nil - } - if len(offsets) != 1 { - return nil - } - section := sections[0] - offset := offsets[0] - var ( - min uintptr = uintptr(^uint(0)) - max uintptr = 0 - isAligned64 = true - isAligned32 = true - ) - for i := 0; i < len(offset); i++ { - typ := (*Type)(rtypeOff(section, offset[i])) - addr := uintptr(unsafe.Pointer(typ)) - if min > addr { - min = addr + once.Do(func() { + sections, offsets := typelinks() + if len(sections) != 1 { + return } - if max < addr { - max = addr + if len(offsets) != 1 { + return } - if typ.Kind() == reflect.Ptr { - addr = uintptr(unsafe.Pointer(typ.Elem())) + section := sections[0] + offset := offsets[0] + var ( + min uintptr = uintptr(^uint(0)) + max uintptr = 0 + isAligned64 = true + isAligned32 = true + ) + for i := 0; i < len(offset); i++ { + typ := (*Type)(rtypeOff(section, offset[i])) + addr := uintptr(unsafe.Pointer(typ)) if min > addr { min = addr } if max < addr { max = addr } + if typ.Kind() == reflect.Ptr { + addr = uintptr(unsafe.Pointer(typ.Elem())) + if min > addr { + min = addr + } + if max < addr { + max = addr + } + } + isAligned64 = isAligned64 && (addr-min)&63 == 0 + isAligned32 = isAligned32 && (addr-min)&31 == 0 + } + addrRange := max - min + if addrRange == 0 { + return + } + var addrShift uintptr + if isAligned64 { + addrShift = 6 + } else if isAligned32 { + addrShift = 5 } - isAligned64 = isAligned64 && (addr-min)&63 == 0 - isAligned32 = isAligned32 && (addr-min)&31 == 0 - } - addrRange := max - min - if addrRange == 0 { - return nil - } - var addrShift uintptr - if isAligned64 { - addrShift = 6 - } else if isAligned32 { - addrShift = 5 - } - cacheSize := addrRange >> addrShift - if cacheSize > maxAcceptableTypeAddrRange { - return nil - } - typeAddr = &TypeAddr{ - BaseTypeAddr: min, - MaxTypeAddr: max, - AddrRange: addrRange, - AddrShift: addrShift, - } + cacheSize := addrRange >> addrShift + if cacheSize > maxAcceptableTypeAddrRange { + return + } + typeAddr = &TypeAddr{ + BaseTypeAddr: min, + MaxTypeAddr: max, + AddrRange: addrRange, + AddrShift: addrShift, + } + }) + return typeAddr } diff --git a/vendor/github.com/godbus/dbus/v5/.cirrus.yml b/vendor/github.com/godbus/dbus/v5/.cirrus.yml new file mode 100644 index 0000000000..6e2090296e --- /dev/null +++ b/vendor/github.com/godbus/dbus/v5/.cirrus.yml @@ -0,0 +1,11 @@ +# See https://cirrus-ci.org/guide/FreeBSD/ +freebsd_instance: + image_family: freebsd-14-3 + +task: + name: Test on FreeBSD + install_script: pkg install -y go125 dbus + test_script: | + /usr/local/etc/rc.d/dbus onestart && \ + eval `dbus-launch --sh-syntax` && \ + go125 test -v ./... diff --git a/vendor/github.com/godbus/dbus/v5/.golangci.yml b/vendor/github.com/godbus/dbus/v5/.golangci.yml new file mode 100644 index 0000000000..5bbdd9342b --- /dev/null +++ b/vendor/github.com/godbus/dbus/v5/.golangci.yml @@ -0,0 +1,13 @@ +version: "2" + +linters: + enable: + - unconvert + - unparam + exclusions: + presets: + - std-error-handling + +formatters: + enable: + - gofumpt diff --git a/vendor/github.com/godbus/dbus/v5/README.md b/vendor/github.com/godbus/dbus/v5/README.md index 5c24125838..da848a98dc 100644 --- a/vendor/github.com/godbus/dbus/v5/README.md +++ b/vendor/github.com/godbus/dbus/v5/README.md @@ -14,7 +14,7 @@ D-Bus message bus system. ### Installation -This packages requires Go 1.12 or later. It can be installed by running the command below: +This packages requires Go 1.20 or later. It can be installed by running the command below: ``` go get github.com/godbus/dbus/v5 @@ -23,7 +23,7 @@ go get github.com/godbus/dbus/v5 ### Usage The complete package documentation and some simple examples are available at -[godoc.org](http://godoc.org/github.com/godbus/dbus). Also, the +[pkg.go.dev](https://pkg.go.dev/github.com/godbus/dbus/v5). Also, the [_examples](https://github.com/godbus/dbus/tree/master/_examples) directory gives a short overview over the basic usage. @@ -34,6 +34,7 @@ gives a short overview over the basic usage. - [iwd](https://github.com/shibumi/iwd) go bindings for the internet wireless daemon "iwd". - [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library. - [playerbm](https://github.com/altdesktop/playerbm) a bookmark utility for media players. +- [rpic](https://github.com/stephenhu/rpic) lightweight web app and RESTful API for managing a Raspberry Pi Please note that the API is considered unstable for now and may change without further notice. diff --git a/vendor/github.com/godbus/dbus/v5/SECURITY.md b/vendor/github.com/godbus/dbus/v5/SECURITY.md new file mode 100644 index 0000000000..7d262fbbfc --- /dev/null +++ b/vendor/github.com/godbus/dbus/v5/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/godbus/dbus/security/advisories/new). + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/godbus/dbus/v5/auth.go b/vendor/github.com/godbus/dbus/v5/auth.go index 0f3b252c07..c56dbaad3a 100644 --- a/vendor/github.com/godbus/dbus/v5/auth.go +++ b/vendor/github.com/godbus/dbus/v5/auth.go @@ -54,10 +54,10 @@ type Auth interface { func (conn *Conn) Auth(methods []Auth) error { if methods == nil { uid := strconv.Itoa(os.Geteuid()) - methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())} + methods = getDefaultAuthMethods(uid) } in := bufio.NewReader(conn.transport) - err := conn.transport.SendNullByte() + err := conn.SendNullByte() if err != nil { return err } @@ -83,9 +83,9 @@ func (conn *Conn) Auth(methods []Auth) error { } switch status { case AuthOk: - err, ok = conn.tryAuth(m, waitingForOk, in) + ok, err = conn.tryAuth(m, waitingForOk, in) case AuthContinue: - err, ok = conn.tryAuth(m, waitingForData, in) + ok, err = conn.tryAuth(m, waitingForData, in) default: panic("dbus: invalid authentication status") } @@ -125,21 +125,21 @@ func (conn *Conn) Auth(methods []Auth) error { } // tryAuth tries to authenticate with m as the mechanism, using state as the -// initial authState and in for reading input. It returns (nil, true) on -// success, (nil, false) on a REJECTED and (someErr, false) if some other +// initial authState and in for reading input. It returns (true, nil) on +// success, (false, nil) on a REJECTED and (false, someErr) if some other // error occurred. -func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) { +func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (bool, error) { for { s, err := authReadLine(in) if err != nil { - return err, false + return false, err } switch { case state == waitingForData && string(s[0]) == "DATA": if len(s) != 2 { err = authWriteLine(conn.transport, []byte("ERROR")) if err != nil { - return err, false + return false, err } continue } @@ -149,7 +149,7 @@ func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, boo if len(data) != 0 { err = authWriteLine(conn.transport, []byte("DATA"), data) if err != nil { - return err, false + return false, err } } if status == AuthOk { @@ -158,66 +158,66 @@ func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, boo case AuthError: err = authWriteLine(conn.transport, []byte("ERROR")) if err != nil { - return err, false + return false, err } } case state == waitingForData && string(s[0]) == "REJECTED": - return nil, false + return false, nil case state == waitingForData && string(s[0]) == "ERROR": err = authWriteLine(conn.transport, []byte("CANCEL")) if err != nil { - return err, false + return false, err } state = waitingForReject case state == waitingForData && string(s[0]) == "OK": if len(s) != 2 { err = authWriteLine(conn.transport, []byte("CANCEL")) if err != nil { - return err, false + return false, err } state = waitingForReject } else { conn.uuid = string(s[1]) - return nil, true + return true, nil } case state == waitingForData: err = authWriteLine(conn.transport, []byte("ERROR")) if err != nil { - return err, false + return false, err } case state == waitingForOk && string(s[0]) == "OK": if len(s) != 2 { err = authWriteLine(conn.transport, []byte("CANCEL")) if err != nil { - return err, false + return false, err } state = waitingForReject } else { conn.uuid = string(s[1]) - return nil, true + return true, nil } case state == waitingForOk && string(s[0]) == "DATA": err = authWriteLine(conn.transport, []byte("DATA")) if err != nil { - return err, false + return false, nil } case state == waitingForOk && string(s[0]) == "REJECTED": - return nil, false + return false, nil case state == waitingForOk && string(s[0]) == "ERROR": err = authWriteLine(conn.transport, []byte("CANCEL")) if err != nil { - return err, false + return false, err } state = waitingForReject case state == waitingForOk: err = authWriteLine(conn.transport, []byte("ERROR")) if err != nil { - return err, false + return false, err } case state == waitingForReject && string(s[0]) == "REJECTED": - return nil, false + return false, nil case state == waitingForReject: - return errors.New("dbus: authentication protocol error"), false + return false, errors.New("dbus: authentication protocol error") default: panic("dbus: invalid auth state") } diff --git a/vendor/github.com/godbus/dbus/v5/auth_default_other.go b/vendor/github.com/godbus/dbus/v5/auth_default_other.go new file mode 100644 index 0000000000..e112ccfe96 --- /dev/null +++ b/vendor/github.com/godbus/dbus/v5/auth_default_other.go @@ -0,0 +1,7 @@ +//go:build !windows + +package dbus + +func getDefaultAuthMethods(user string) []Auth { + return []Auth{AuthExternal(user)} +} diff --git a/vendor/github.com/godbus/dbus/v5/auth_default_windows.go b/vendor/github.com/godbus/dbus/v5/auth_default_windows.go new file mode 100644 index 0000000000..2289850a8e --- /dev/null +++ b/vendor/github.com/godbus/dbus/v5/auth_default_windows.go @@ -0,0 +1,5 @@ +package dbus + +func getDefaultAuthMethods(user string) []Auth { + return []Auth{AuthCookieSha1(user, getHomeDir())} +} diff --git a/vendor/github.com/godbus/dbus/v5/auth_sha1.go b/vendor/github.com/godbus/dbus/v5/auth_sha1_windows.go similarity index 95% rename from vendor/github.com/godbus/dbus/v5/auth_sha1.go rename to vendor/github.com/godbus/dbus/v5/auth_sha1_windows.go index 80286700b6..0c5480eee1 100644 --- a/vendor/github.com/godbus/dbus/v5/auth_sha1.go +++ b/vendor/github.com/godbus/dbus/v5/auth_sha1_windows.go @@ -100,3 +100,10 @@ func (a authCookieSha1) generateChallenge() []byte { hex.Encode(enc, b) return enc } + +func getHomeDir() string { + if dir, err := os.UserHomeDir(); err == nil { + return dir + } + return "/" +} diff --git a/vendor/github.com/godbus/dbus/v5/call.go b/vendor/github.com/godbus/dbus/v5/call.go index b06b063580..d16171ab91 100644 --- a/vendor/github.com/godbus/dbus/v5/call.go +++ b/vendor/github.com/godbus/dbus/v5/call.go @@ -2,17 +2,14 @@ package dbus import ( "context" - "errors" ) -var errSignature = errors.New("dbus: mismatched signature") - // Call represents a pending or completed method call. type Call struct { Destination string Path ObjectPath Method string - Args []interface{} + Args []any // Strobes when the call is complete. Done chan *Call @@ -22,7 +19,7 @@ type Call struct { Err error // Holds the response once the call is done. - Body []interface{} + Body []any // ResponseSequence stores the sequence number of the DBus message containing // the call response (or error). This can be compared to the sequence number @@ -55,7 +52,7 @@ func (c *Call) ContextCancel() { // Store stores the body of the reply into the provided pointers. It returns // an error if the signatures of the body and retvalues don't match, or if // the error status is not nil. -func (c *Call) Store(retvalues ...interface{}) error { +func (c *Call) Store(retvalues ...any) error { if c.Err != nil { return c.Err } diff --git a/vendor/github.com/godbus/dbus/v5/conn.go b/vendor/github.com/godbus/dbus/v5/conn.go index 69978ea26a..8a22acc98b 100644 --- a/vendor/github.com/godbus/dbus/v5/conn.go +++ b/vendor/github.com/godbus/dbus/v5/conn.go @@ -3,6 +3,7 @@ package dbus import ( "context" "errors" + "fmt" "io" "os" "strings" @@ -76,7 +77,6 @@ func SessionBus() (conn *Conn, err error) { func getSessionBusAddress(autolaunch bool) (string, error) { if address := os.Getenv("DBUS_SESSION_BUS_ADDRESS"); address != "" && address != "autolaunch:" { return address, nil - } else if address := tryDiscoverDbusSessionBusAddress(); address != "" { os.Setenv("DBUS_SESSION_BUS_ADDRESS", address) return address, nil @@ -97,7 +97,7 @@ func SessionBusPrivate(opts ...ConnOption) (*Conn, error) { return Dial(address, opts...) } -// SessionBusPrivate returns a new private connection to the session bus. If +// SessionBusPrivateNoAutoStartup returns a new private connection to the session bus. If // the session bus is not already open, do not attempt to launch it. func SessionBusPrivateNoAutoStartup(opts ...ConnOption) (*Conn, error) { address, err := getSessionBusAddress(false) @@ -108,7 +108,7 @@ func SessionBusPrivateNoAutoStartup(opts ...ConnOption) (*Conn, error) { return Dial(address, opts...) } -// SessionBusPrivate returns a new private connection to the session bus. +// SessionBusPrivateHandler returns a new private connection to the session bus. // // Deprecated: use SessionBusPrivate with options instead. func SessionBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) { @@ -445,7 +445,8 @@ func (conn *Conn) handleSignal(sequence Sequence, msg *Message) { // sender is optional for signals. sender, _ := msg.Headers[FieldSender].value.(string) if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" { - if member == "NameLost" { + switch member { + case "NameLost": // If we lost the name on the bus, remove it from our // tracking list. name, ok := msg.Body[0].(string) @@ -453,7 +454,7 @@ func (conn *Conn) handleSignal(sequence Sequence, msg *Message) { panic("Unable to read the lost name") } conn.names.loseName(name) - } else if member == "NameAcquired" { + case "NameAcquired": // If we acquired the name on the bus, add it to our // tracking list. name, ok := msg.Body[0].(string) @@ -485,7 +486,7 @@ func (conn *Conn) Object(dest string, path ObjectPath) BusObject { return &Object{conn, dest, path} } -func (conn *Conn) sendMessageAndIfClosed(msg *Message, ifClosed func()) { +func (conn *Conn) sendMessageAndIfClosed(msg *Message, ifClosed func()) error { if msg.serial == 0 { msg.serial = conn.getSerial() } @@ -498,14 +499,30 @@ func (conn *Conn) sendMessageAndIfClosed(msg *Message, ifClosed func()) { } else if msg.Type != TypeMethodCall { conn.serialGen.RetireSerial(msg.serial) } + return err +} + +func isEncodingError(err error) bool { + switch err.(type) { + case FormatError: + return true + case InvalidMessageError: + return true + } + return false } func (conn *Conn) handleSendError(msg *Message, err error) { - if msg.Type == TypeMethodCall { + switch msg.Type { + case TypeMethodCall: conn.calls.handleSendError(msg, err) - } else if msg.Type == TypeMethodReply { - if _, ok := err.(FormatError); ok { - conn.sendError(err, msg.Headers[FieldDestination].value.(string), msg.Headers[FieldReplySerial].value.(uint32)) + case TypeMethodReply: + if isEncodingError(err) { + // Make sure that the caller gets some kind of error response if + // the application code tried to respond, but the resulting message + // was malformed in the end + returnedErr := fmt.Errorf("destination tried to respond with invalid message (%w)", err) + conn.sendError(returnedErr, msg.Headers[FieldDestination].value.(string), msg.Headers[FieldReplySerial].value.(uint32)) } } conn.serialGen.RetireSerial(msg.serial) @@ -560,7 +577,8 @@ func (conn *Conn) send(ctx context.Context, msg *Message, ch chan *Call) *Call { <-ctx.Done() conn.calls.handleSendError(msg, ctx.Err()) }() - conn.sendMessageAndIfClosed(msg, func() { + // error is handled in handleSendError + _ = conn.sendMessageAndIfClosed(msg, func() { conn.calls.handleSendError(msg, ErrClosed) canceler() }) @@ -568,7 +586,8 @@ func (conn *Conn) send(ctx context.Context, msg *Message, ch chan *Call) *Call { canceler() call = &Call{Err: nil, Done: ch} ch <- call - conn.sendMessageAndIfClosed(msg, func() { + // error is handled in handleSendError + _ = conn.sendMessageAndIfClosed(msg, func() { call = &Call{Err: ErrClosed} }) } @@ -602,12 +621,13 @@ func (conn *Conn) sendError(err error, dest string, serial uint32) { if len(e.Body) > 0 { msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...)) } - conn.sendMessageAndIfClosed(msg, nil) + // not much we can do to handle a possible error here + _ = conn.sendMessageAndIfClosed(msg, nil) } // sendReply creates a method reply message corresponding to the parameters and // sends it to conn.out. -func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { +func (conn *Conn) sendReply(dest string, serial uint32, values ...any) { msg := new(Message) msg.Type = TypeMethodReply msg.Headers = make(map[HeaderField]Variant) @@ -619,7 +639,8 @@ func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { if len(values) > 0 { msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) } - conn.sendMessageAndIfClosed(msg, nil) + // not much we can do to handle a possible error here + _ = conn.sendMessageAndIfClosed(msg, nil) } // AddMatchSignal registers the given match rule to receive broadcast @@ -630,7 +651,7 @@ func (conn *Conn) AddMatchSignal(options ...MatchOption) error { // AddMatchSignalContext acts like AddMatchSignal but takes a context. func (conn *Conn) AddMatchSignalContext(ctx context.Context, options ...MatchOption) error { - options = append([]MatchOption{withMatchType("signal")}, options...) + options = append([]MatchOption{withMatchTypeSignal()}, options...) return conn.busObj.CallWithContext( ctx, "org.freedesktop.DBus.AddMatch", 0, @@ -645,7 +666,7 @@ func (conn *Conn) RemoveMatchSignal(options ...MatchOption) error { // RemoveMatchSignalContext acts like RemoveMatchSignal but takes a context. func (conn *Conn) RemoveMatchSignalContext(ctx context.Context, options ...MatchOption) error { - options = append([]MatchOption{withMatchType("signal")}, options...) + options = append([]MatchOption{withMatchTypeSignal()}, options...) return conn.busObj.CallWithContext( ctx, "org.freedesktop.DBus.RemoveMatch", 0, @@ -694,10 +715,10 @@ func (conn *Conn) SupportsUnixFDs() bool { // Error represents a D-Bus message of type Error. type Error struct { Name string - Body []interface{} + Body []any } -func NewError(name string, body []interface{}) *Error { +func NewError(name string, body []any) *Error { return &Error{name, body} } @@ -717,7 +738,7 @@ type Signal struct { Sender string Path ObjectPath Name string - Body []interface{} + Body []any Sequence Sequence } @@ -740,9 +761,7 @@ type transport interface { SendMessage(*Message) error } -var ( - transports = make(map[string]func(string) (transport, error)) -) +var transports = make(map[string]func(string) (transport, error)) func getTransport(address string) (transport, error) { var err error @@ -770,10 +789,10 @@ func getTransport(address string) (transport, error) { // getKey gets a key from a the list of keys. Returns "" on error / not found... func getKey(s, key string) string { - for _, keyEqualsValue := range strings.Split(s, ",") { - keyValue := strings.SplitN(keyEqualsValue, "=", 2) - if len(keyValue) == 2 && keyValue[0] == key { - val, err := UnescapeBusAddressValue(keyValue[1]) + keyEq := key + "=" + for _, kv := range strings.Split(s, ",") { + if v, ok := strings.CutPrefix(kv, keyEq); ok { + val, err := UnescapeBusAddressValue(v) if err != nil { // No way to return an error. return "" @@ -853,16 +872,19 @@ type nameTracker struct { func newNameTracker() *nameTracker { return &nameTracker{names: map[string]struct{}{}} } + func (tracker *nameTracker) acquireUniqueConnectionName(name string) { tracker.lck.Lock() defer tracker.lck.Unlock() tracker.unique = name } + func (tracker *nameTracker) acquireName(name string) { tracker.lck.Lock() defer tracker.lck.Unlock() tracker.names[name] = struct{}{} } + func (tracker *nameTracker) loseName(name string) { tracker.lck.Lock() defer tracker.lck.Unlock() @@ -874,12 +896,14 @@ func (tracker *nameTracker) uniqueNameIsKnown() bool { defer tracker.lck.RUnlock() return tracker.unique != "" } + func (tracker *nameTracker) isKnownName(name string) bool { tracker.lck.RLock() defer tracker.lck.RUnlock() _, ok := tracker.names[name] return ok || name == tracker.unique } + func (tracker *nameTracker) listKnownNames() []string { tracker.lck.RLock() defer tracker.lck.RUnlock() @@ -941,18 +965,7 @@ func (tracker *callTracker) handleSendError(msg *Message, err error) { } } -// finalize was the only func that did not strobe Done -func (tracker *callTracker) finalize(sn uint32) { - tracker.lck.Lock() - defer tracker.lck.Unlock() - c, ok := tracker.calls[sn] - if ok { - delete(tracker.calls, sn) - c.ContextCancel() - } -} - -func (tracker *callTracker) finalizeWithBody(sn uint32, sequence Sequence, body []interface{}) { +func (tracker *callTracker) finalizeWithBody(sn uint32, sequence Sequence, body []any) { tracker.lck.Lock() c, ok := tracker.calls[sn] if ok { diff --git a/vendor/github.com/godbus/dbus/v5/conn_darwin.go b/vendor/github.com/godbus/dbus/v5/conn_darwin.go index 6e2e402021..cb2325a01b 100644 --- a/vendor/github.com/godbus/dbus/v5/conn_darwin.go +++ b/vendor/github.com/godbus/dbus/v5/conn_darwin.go @@ -12,7 +12,6 @@ const defaultSystemBusAddress = "unix:path=/opt/local/var/run/dbus/system_bus_so func getSessionBusPlatformAddress() (string, error) { cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET") b, err := cmd.CombinedOutput() - if err != nil { return "", err } diff --git a/vendor/github.com/godbus/dbus/v5/conn_other.go b/vendor/github.com/godbus/dbus/v5/conn_other.go index 90289ca85a..4c6eb305d0 100644 --- a/vendor/github.com/godbus/dbus/v5/conn_other.go +++ b/vendor/github.com/godbus/dbus/v5/conn_other.go @@ -1,4 +1,4 @@ -// +build !darwin +//go:build !darwin package dbus @@ -6,7 +6,6 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" "os" "os/exec" "os/user" @@ -19,7 +18,6 @@ var execCommand = exec.Command func getSessionBusPlatformAddress() (string, error) { cmd := execCommand("dbus-launch") b, err := cmd.CombinedOutput() - if err != nil { return "", err } @@ -42,10 +40,10 @@ func getSessionBusPlatformAddress() (string, error) { // It tries different techniques employed by different operating systems, // returning the first valid address it finds, or an empty string. // -// * /run/user//bus if this exists, it *is* the bus socket. present on -// Ubuntu 18.04 -// * /run/user//dbus-session: if this exists, it can be parsed for the bus -// address. present on Ubuntu 16.04 +// - /run/user//bus if this exists, it *is* the bus socket. present on +// Ubuntu 18.04 +// - /run/user//dbus-session: if this exists, it can be parsed for the bus +// address. present on Ubuntu 16.04 // // See https://dbus.freedesktop.org/doc/dbus-launch.1.html func tryDiscoverDbusSessionBusAddress() string { @@ -61,14 +59,9 @@ func tryDiscoverDbusSessionBusAddress() string { // text file // containing the address of the socket, e.g.: // DBUS_SESSION_BUS_ADDRESS=unix:abstract=/tmp/dbus-E1c73yNqrG - if f, err := ioutil.ReadFile(runUserSessionDbusFile); err == nil { - fileContent := string(f) - - prefix := "DBUS_SESSION_BUS_ADDRESS=" - - if strings.HasPrefix(fileContent, prefix) { - address := strings.TrimRight(strings.TrimPrefix(fileContent, prefix), "\n\r") - return address + if f, err := os.ReadFile(runUserSessionDbusFile); err == nil { + if addr, ok := strings.CutPrefix(string(f), "DBUS_SESSION_BUS_ADDRESS="); ok { + return strings.TrimRight(addr, "\n\r") } } } diff --git a/vendor/github.com/godbus/dbus/v5/conn_unix.go b/vendor/github.com/godbus/dbus/v5/conn_unix.go index 58aee7d2af..77d6bf1a47 100644 --- a/vendor/github.com/godbus/dbus/v5/conn_unix.go +++ b/vendor/github.com/godbus/dbus/v5/conn_unix.go @@ -1,8 +1,9 @@ -//+build !windows,!solaris,!darwin +//go:build !windows && !solaris && !darwin package dbus import ( + "net" "os" ) @@ -15,3 +16,25 @@ func getSystemBusPlatformAddress() string { } return defaultSystemBusAddress } + +// DialUnix establishes a new private connection to the message bus specified by UnixConn. +func DialUnix(conn *net.UnixConn, opts ...ConnOption) (*Conn, error) { + tr := newUnixTransportFromConn(conn) + return newConn(tr, opts...) +} + +func ConnectUnix(uconn *net.UnixConn, opts ...ConnOption) (*Conn, error) { + conn, err := DialUnix(uconn, opts...) + if err != nil { + return nil, err + } + if err = conn.Auth(conn.auth); err != nil { + _ = conn.Close() + return nil, err + } + if err = conn.Hello(); err != nil { + _ = conn.Close() + return nil, err + } + return conn, nil +} diff --git a/vendor/github.com/godbus/dbus/v5/conn_windows.go b/vendor/github.com/godbus/dbus/v5/conn_windows.go index 4291e4519c..fa839d2a22 100644 --- a/vendor/github.com/godbus/dbus/v5/conn_windows.go +++ b/vendor/github.com/godbus/dbus/v5/conn_windows.go @@ -1,5 +1,3 @@ -//+build windows - package dbus import "os" diff --git a/vendor/github.com/godbus/dbus/v5/dbus.go b/vendor/github.com/godbus/dbus/v5/dbus.go index c188d10485..d3622d8079 100644 --- a/vendor/github.com/godbus/dbus/v5/dbus.go +++ b/vendor/github.com/godbus/dbus/v5/dbus.go @@ -10,11 +10,8 @@ import ( var ( byteType = reflect.TypeOf(byte(0)) boolType = reflect.TypeOf(false) - uint8Type = reflect.TypeOf(uint8(0)) int16Type = reflect.TypeOf(int16(0)) uint16Type = reflect.TypeOf(uint16(0)) - intType = reflect.TypeOf(int(0)) - uintType = reflect.TypeOf(uint(0)) int32Type = reflect.TypeOf(int32(0)) uint32Type = reflect.TypeOf(uint32(0)) int64Type = reflect.TypeOf(int64(0)) @@ -24,8 +21,8 @@ var ( signatureType = reflect.TypeOf(Signature{""}) objectPathType = reflect.TypeOf(ObjectPath("")) variantType = reflect.TypeOf(Variant{Signature{""}, nil}) - interfacesType = reflect.TypeOf([]interface{}{}) - interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() + interfacesType = reflect.TypeOf([]any{}) + interfaceType = reflect.TypeOf((*any)(nil)).Elem() unixFDType = reflect.TypeOf(UnixFD(0)) unixFDIndexType = reflect.TypeOf(UnixFDIndex(0)) errType = reflect.TypeOf((*error)(nil)).Elem() @@ -45,7 +42,7 @@ func (e InvalidTypeError) Error() string { // pointers. It converts slices of interfaces from src to corresponding structs // in dest. An error is returned if the lengths of src and dest or the types of // their elements don't match. -func Store(src []interface{}, dest ...interface{}) error { +func Store(src []any, dest ...any) error { if len(src) != len(dest) { return errors.New("dbus.Store: length mismatch") } @@ -58,7 +55,7 @@ func Store(src []interface{}, dest ...interface{}) error { return nil } -func storeInterfaces(src, dest interface{}) error { +func storeInterfaces(src, dest any) error { return store(reflect.ValueOf(dest), reflect.ValueOf(src)) } @@ -85,7 +82,7 @@ func storeBase(dest, src reflect.Value) error { func setDest(dest, src reflect.Value) error { if !isVariant(src.Type()) && isVariant(dest.Type()) { - //special conversion for dbus.Variant + // special conversion for dbus.Variant dest.Set(reflect.ValueOf(MakeVariant(src.Interface()))) return nil } @@ -166,8 +163,8 @@ func storeMapIntoVariant(dest, src reflect.Value) error { func storeMapIntoInterface(dest, src reflect.Value) error { var dv reflect.Value if isVariant(src.Type().Elem()) { - //Convert variants to interface{} recursively when converting - //to interface{} + // Convert variants to interface{} recursively when converting + // to interface{} dv = reflect.MakeMap( reflect.MapOf(src.Type().Key(), interfaceType)) } else { @@ -200,7 +197,7 @@ func storeMapIntoMap(dest, src reflect.Value) error { func storeSlice(dest, src reflect.Value) error { switch { case src.Type() == interfacesType && dest.Kind() == reflect.Struct: - //The decoder always decodes structs as slices of interface{} + // The decoder always decodes structs as slices of interface{} return storeStruct(dest, src) case !kindsAreCompatible(dest.Type(), src.Type()): return fmt.Errorf( @@ -225,7 +222,7 @@ func storeStruct(dest, src reflect.Value) error { if isVariant(dest.Type()) { return storeBase(dest, src) } - dval := make([]interface{}, 0, dest.NumField()) + dval := make([]any, 0, dest.NumField()) dtype := dest.Type() for i := 0; i < dest.NumField(); i++ { field := dest.Field(i) @@ -245,7 +242,7 @@ func storeStruct(dest, src reflect.Value) error { "enough fields need: %d have: %d", src.Len(), len(dval)) } - return Store(src.Interface().([]interface{}), dval...) + return Store(src.Interface().([]any), dval...) } func storeSliceIntoVariant(dest, src reflect.Value) error { @@ -260,8 +257,8 @@ func storeSliceIntoVariant(dest, src reflect.Value) error { func storeSliceIntoInterface(dest, src reflect.Value) error { var dv reflect.Value if isVariant(src.Type().Elem()) { - //Convert variants to interface{} recursively when converting - //to interface{} + // Convert variants to interface{} recursively when converting + // to interface{} dv = reflect.MakeSlice(reflect.SliceOf(interfaceType), src.Len(), src.Cap()) } else { @@ -334,7 +331,7 @@ func (o ObjectPath) IsValid() bool { } // A UnixFD is a Unix file descriptor sent over the wire. See the package-level -// documentation for more information about Unix file descriptor passsing. +// documentation for more information about Unix file descriptor passing. type UnixFD int32 // A UnixFDIndex is the representation of a Unix file descriptor in a message. diff --git a/vendor/github.com/godbus/dbus/v5/decoder.go b/vendor/github.com/godbus/dbus/v5/decoder.go index 89bfed9d1a..05af8cdd34 100644 --- a/vendor/github.com/godbus/dbus/v5/decoder.go +++ b/vendor/github.com/godbus/dbus/v5/decoder.go @@ -4,6 +4,7 @@ import ( "encoding/binary" "io" "reflect" + "unsafe" ) type decoder struct { @@ -11,6 +12,12 @@ type decoder struct { order binary.ByteOrder pos int fds []int + + // The following fields are used to reduce memory allocs. + conv *stringConverter + buf []byte + d float64 + y [1]byte } // newDecoder returns a new decoder that reads values from in. The input is @@ -20,29 +27,39 @@ func newDecoder(in io.Reader, order binary.ByteOrder, fds []int) *decoder { dec.in = in dec.order = order dec.fds = fds + dec.conv = newStringConverter(stringConverterBufferSize) return dec } +// Reset resets the decoder to be reading from in. +func (dec *decoder) Reset(in io.Reader, order binary.ByteOrder, fds []int) { + dec.in = in + dec.order = order + dec.pos = 0 + dec.fds = fds + + if dec.conv == nil { + dec.conv = newStringConverter(stringConverterBufferSize) + } +} + // align aligns the input to the given boundary and panics on error. func (dec *decoder) align(n int) { if dec.pos%n != 0 { newpos := (dec.pos + n - 1) & ^(n - 1) - empty := make([]byte, newpos-dec.pos) - if _, err := io.ReadFull(dec.in, empty); err != nil { - panic(err) - } + dec.read2buf(newpos - dec.pos) dec.pos = newpos } } // Calls binary.Read(dec.in, dec.order, v) and panics on read errors. -func (dec *decoder) binread(v interface{}) { +func (dec *decoder) binread(v any) { if err := binary.Read(dec.in, dec.order, v); err != nil { panic(err) } } -func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) { +func (dec *decoder) Decode(sig Signature) (vs []any, err error) { defer func() { var ok bool v := recover() @@ -52,7 +69,7 @@ func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) { } } }() - vs = make([]interface{}, 0) + vs = make([]any, 0) s := sig.str for s != "" { err, rem := validSingle(s, &depthCounter{}) @@ -66,79 +83,89 @@ func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) { return vs, nil } -func (dec *decoder) decode(s string, depth int) interface{} { +// read2buf reads exactly n bytes from the reader dec.in into the buffer dec.buf +// to reduce memory allocs. +// The buffer grows automatically. +func (dec *decoder) read2buf(n int) { + if cap(dec.buf) < n { + dec.buf = make([]byte, n) + } else { + dec.buf = dec.buf[:n] + } + if _, err := io.ReadFull(dec.in, dec.buf); err != nil { + panic(err) + } +} + +// decodeU decodes uint32 obtained from the reader dec.in. +// The goal is to reduce memory allocs. +func (dec *decoder) decodeU() uint32 { + dec.align(4) + dec.read2buf(4) + dec.pos += 4 + return dec.order.Uint32(dec.buf) +} + +func (dec *decoder) decode(s string, depth int) any { dec.align(alignment(typeFor(s))) switch s[0] { case 'y': - var b [1]byte - if _, err := dec.in.Read(b[:]); err != nil { + if _, err := dec.in.Read(dec.y[:]); err != nil { panic(err) } dec.pos++ - return b[0] + return dec.y[0] case 'b': - i := dec.decode("u", depth).(uint32) - switch { - case i == 0: + switch dec.decodeU() { + case 0: return false - case i == 1: + case 1: return true default: panic(FormatError("invalid value for boolean")) } case 'n': - var i int16 - dec.binread(&i) + dec.read2buf(2) dec.pos += 2 - return i + return int16(dec.order.Uint16(dec.buf)) case 'i': - var i int32 - dec.binread(&i) + dec.read2buf(4) dec.pos += 4 - return i + return int32(dec.order.Uint32(dec.buf)) case 'x': - var i int64 - dec.binread(&i) + dec.read2buf(8) dec.pos += 8 - return i + return int64(dec.order.Uint64(dec.buf)) case 'q': - var i uint16 - dec.binread(&i) + dec.read2buf(2) dec.pos += 2 - return i + return dec.order.Uint16(dec.buf) case 'u': - var i uint32 - dec.binread(&i) - dec.pos += 4 - return i + return dec.decodeU() case 't': - var i uint64 - dec.binread(&i) + dec.read2buf(8) dec.pos += 8 - return i + return dec.order.Uint64(dec.buf) case 'd': - var f float64 - dec.binread(&f) + dec.binread(&dec.d) dec.pos += 8 - return f + return dec.d case 's': - length := dec.decode("u", depth).(uint32) - b := make([]byte, int(length)+1) - if _, err := io.ReadFull(dec.in, b); err != nil { - panic(err) - } - dec.pos += int(length) + 1 - return string(b[:len(b)-1]) + length := dec.decodeU() + p := int(length) + 1 + dec.read2buf(p) + dec.pos += p + return dec.conv.String(dec.buf[:len(dec.buf)-1]) case 'o': return ObjectPath(dec.decode("s", depth).(string)) case 'g': length := dec.decode("y", depth).(byte) - b := make([]byte, int(length)+1) - if _, err := io.ReadFull(dec.in, b); err != nil { - panic(err) - } - dec.pos += int(length) + 1 - sig, err := ParseSignature(string(b[:len(b)-1])) + p := int(length) + 1 + dec.read2buf(p) + dec.pos += p + sig, err := ParseSignature( + dec.conv.String(dec.buf[:len(dec.buf)-1]), + ) if err != nil { panic(err) } @@ -163,7 +190,7 @@ func (dec *decoder) decode(s string, depth int) interface{} { variant.value = dec.decode(sig.str, depth+1) return variant case 'h': - idx := dec.decode("u", depth).(uint32) + idx := dec.decodeU() if int(idx) < len(dec.fds) { return UnixFD(dec.fds[idx]) } @@ -176,7 +203,7 @@ func (dec *decoder) decode(s string, depth int) interface{} { if depth >= 63 { panic(FormatError("input exceeds container depth limit")) } - length := dec.decode("u", depth).(uint32) + length := dec.decodeU() // Even for empty maps, the correct padding must be included dec.align(8) spos := dec.pos @@ -195,7 +222,7 @@ func (dec *decoder) decode(s string, depth int) interface{} { panic(FormatError("input exceeds container depth limit")) } sig := s[1:] - length := dec.decode("u", depth).(uint32) + length := dec.decodeU() // capacity can be determined only for fixed-size element types var capacity int if s := sigByteSize(sig); s != 0 { @@ -205,9 +232,9 @@ func (dec *decoder) decode(s string, depth int) interface{} { // Even for empty arrays, the correct padding must be included align := alignment(typeFor(s[1:])) if len(s) > 1 && s[1] == '(' { - //Special case for arrays of structs - //structs decode as a slice of interface{} values - //but the dbus alignment does not match this + // Special case for arrays of structs + // structs decode as a slice of interface{} values + // but the dbus alignment does not match this align = 8 } dec.align(align) @@ -222,7 +249,7 @@ func (dec *decoder) decode(s string, depth int) interface{} { panic(FormatError("input exceeds container depth limit")) } dec.align(8) - v := make([]interface{}, 0) + v := make([]any, 0) s = s[1 : len(s)-1] for s != "" { err, rem := validSingle(s, &depthCounter{}) @@ -264,9 +291,10 @@ func sigByteSize(sig string) int { i := 1 depth := 1 for i < len(sig[offset:]) && depth != 0 { - if sig[offset+i] == '(' { + switch sig[offset+i] { + case '(': depth++ - } else if sig[offset+i] == ')' { + case ')': depth-- } i++ @@ -290,3 +318,59 @@ type FormatError string func (e FormatError) Error() string { return "dbus: wire format error: " + string(e) } + +// stringConverterBufferSize defines the recommended buffer size of 4KB. +// It showed good results in a benchmark when decoding 35KB message, +// see https://github.com/marselester/systemd#testing. +const stringConverterBufferSize = 4096 + +func newStringConverter(capacity int) *stringConverter { + return &stringConverter{ + buf: make([]byte, 0, capacity), + offset: 0, + } +} + +// stringConverter converts bytes to strings with less allocs. +// The idea is to accumulate bytes in a buffer with specified capacity +// and create strings with unsafe package using bytes from a buffer. +// For example, 10 "fizz" strings written to a 40-byte buffer +// will result in 1 alloc instead of 10. +// +// Once a buffer is filled, a new one is created with the same capacity. +// Old buffers will be eventually GC-ed +// with no side effects to the returned strings. +type stringConverter struct { + // buf is a temporary buffer where decoded strings are batched. + buf []byte + // offset is a buffer position where the last string was written. + offset int +} + +// String converts bytes to a string. +func (c *stringConverter) String(b []byte) string { + n := len(b) + if n == 0 { + return "" + } + // Must allocate because a string doesn't fit into the buffer. + if n > cap(c.buf) { + return string(b) + } + + if len(c.buf)+n > cap(c.buf) { + c.buf = make([]byte, 0, cap(c.buf)) + c.offset = 0 + } + c.buf = append(c.buf, b...) + + b = c.buf[c.offset:] + s := toString(b) + c.offset += n + return s +} + +// toString converts a byte slice to a string without allocating. +func toString(b []byte) string { + return unsafe.String(&b[0], len(b)) +} diff --git a/vendor/github.com/godbus/dbus/v5/default_handler.go b/vendor/github.com/godbus/dbus/v5/default_handler.go index 13132c6b47..c17ab0b97d 100644 --- a/vendor/github.com/godbus/dbus/v5/default_handler.go +++ b/vendor/github.com/godbus/dbus/v5/default_handler.go @@ -18,9 +18,9 @@ func newIntrospectIntf(h *defaultHandler) *exportedIntf { return newExportedIntf(methods, true) } -//NewDefaultHandler returns an instance of the default -//call handler. This is useful if you want to implement only -//one of the two handlers but not both. +// NewDefaultHandler returns an instance of the default +// call handler. This is useful if you want to implement only +// one of the two handlers but not both. // // Deprecated: this is the default value, don't use it, it will be unexported. func NewDefaultHandler() *defaultHandler { @@ -52,9 +52,9 @@ func (h *defaultHandler) introspectPath(path ObjectPath) string { if p != "/" { p += "/" } - if strings.HasPrefix(string(obj), p) { - node_name := strings.Split(string(obj[len(p):]), "/")[0] - subpath[node_name] = struct{}{} + if after, ok := strings.CutPrefix(string(obj), p); ok { + name, _, _ := strings.Cut(after, "/") + subpath[name] = struct{}{} } } for s := range subpath { @@ -117,7 +117,7 @@ type exportedMethod struct { reflect.Value } -func (m exportedMethod) Call(args ...interface{}) ([]interface{}, error) { +func (m exportedMethod) Call(args ...any) ([]any, error) { t := m.Type() params := make([]reflect.Value, len(args)) @@ -143,12 +143,12 @@ func (m exportedMethod) Call(args ...interface{}) ([]interface{}, error) { ret = ret[:t.NumOut()-1] } } - out := make([]interface{}, len(ret)) + out := make([]any, len(ret)) for i, val := range ret { out[i] = val.Interface() } if nilErr || err == nil { - //concrete type to interface nil is a special case + // concrete type to interface nil is a special case return out, nil } return out, err @@ -158,7 +158,7 @@ func (m exportedMethod) NumArguments() int { return m.Value.Type().NumIn() } -func (m exportedMethod) ArgumentValue(i int) interface{} { +func (m exportedMethod) ArgumentValue(i int) any { return reflect.Zero(m.Type().In(i)).Interface() } @@ -166,7 +166,7 @@ func (m exportedMethod) NumReturns() int { return m.Value.Type().NumOut() } -func (m exportedMethod) ReturnValue(i int) interface{} { +func (m exportedMethod) ReturnValue(i int) any { return reflect.Zero(m.Type().Out(i)).Interface() } @@ -215,10 +215,6 @@ func (obj *exportedObj) LookupMethod(name string) (Method, bool) { return nil, false } -func (obj *exportedObj) isFallbackInterface() bool { - return false -} - func newExportedIntf(methods map[string]Method, includeSubtree bool) *exportedIntf { return &exportedIntf{ methods: methods, @@ -242,9 +238,9 @@ func (obj *exportedIntf) isFallbackInterface() bool { return obj.includeSubtree } -//NewDefaultSignalHandler returns an instance of the default -//signal handler. This is useful if you want to implement only -//one of the two handlers but not both. +// NewDefaultSignalHandler returns an instance of the default +// signal handler. This is useful if you want to implement only +// one of the two handlers but not both. // // Deprecated: this is the default value, don't use it, it will be unexported. func NewDefaultSignalHandler() *defaultSignalHandler { diff --git a/vendor/github.com/godbus/dbus/v5/doc.go b/vendor/github.com/godbus/dbus/v5/doc.go index 8f25a00d61..09eedc71e6 100644 --- a/vendor/github.com/godbus/dbus/v5/doc.go +++ b/vendor/github.com/godbus/dbus/v5/doc.go @@ -7,7 +7,7 @@ on remote objects and emit or receive signals. Using the Export method, you can arrange D-Bus methods calls to be directly translated to method calls on a Go value. -Conversion Rules +# Conversion Rules For outgoing messages, Go types are automatically converted to the corresponding D-Bus types. See the official specification at @@ -15,25 +15,25 @@ https://dbus.freedesktop.org/doc/dbus-specification.html#type-system for more information on the D-Bus type system. The following types are directly encoded as their respective D-Bus equivalents: - Go type | D-Bus type - ------------+----------- - byte | BYTE - bool | BOOLEAN - int16 | INT16 - uint16 | UINT16 - int | INT32 - uint | UINT32 - int32 | INT32 - uint32 | UINT32 - int64 | INT64 - uint64 | UINT64 - float64 | DOUBLE - string | STRING - ObjectPath | OBJECT_PATH - Signature | SIGNATURE - Variant | VARIANT - interface{} | VARIANT - UnixFDIndex | UNIX_FD + Go type | D-Bus type + ------------+----------- + byte | BYTE + bool | BOOLEAN + int16 | INT16 + uint16 | UINT16 + int | INT32 + uint | UINT32 + int32 | INT32 + uint32 | UINT32 + int64 | INT64 + uint64 | UINT64 + float64 | DOUBLE + string | STRING + ObjectPath | OBJECT_PATH + Signature | SIGNATURE + Variant | VARIANT + interface{} | VARIANT + UnixFDIndex | UNIX_FD Slices and arrays encode as ARRAYs of their element type. @@ -57,7 +57,7 @@ of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces containing the struct fields in the correct order. The Store function can be used to convert such values to Go structs. -Unix FD passing +# Unix FD passing Handling Unix file descriptors deserves special mention. To use them, you should first check that they are supported on a connection by calling SupportsUnixFDs. @@ -66,6 +66,5 @@ UnixFD's to messages that are accompanied by the given file descriptors with the UnixFD values being substituted by the correct indices. Similarly, the indices of incoming messages are automatically resolved. It shouldn't be necessary to use UnixFDIndex. - */ package dbus diff --git a/vendor/github.com/godbus/dbus/v5/encoder.go b/vendor/github.com/godbus/dbus/v5/encoder.go index 015b26cd5c..5901ab42a9 100644 --- a/vendor/github.com/godbus/dbus/v5/encoder.go +++ b/vendor/github.com/godbus/dbus/v5/encoder.go @@ -59,7 +59,7 @@ func (enc *encoder) padding(offset, algn int) int { } // Calls binary.Write(enc.out, enc.order, v) and panics on write errors. -func (enc *encoder) binwrite(v interface{}) { +func (enc *encoder) binwrite(v any) { if err := binary.Write(enc.out, enc.order, v); err != nil { panic(err) } @@ -67,7 +67,7 @@ func (enc *encoder) binwrite(v interface{}) { // Encode encodes the given values to the underlying reader. All written values // are aligned properly as required by the D-Bus spec. -func (enc *encoder) Encode(vs ...interface{}) (err error) { +func (enc *encoder) Encode(vs ...any) (err error) { defer func() { err, _ = recover().(error) }() diff --git a/vendor/github.com/godbus/dbus/v5/export.go b/vendor/github.com/godbus/dbus/v5/export.go index d3dd9f7cd6..20d8cb38fa 100644 --- a/vendor/github.com/godbus/dbus/v5/export.go +++ b/vendor/github.com/godbus/dbus/v5/export.go @@ -11,47 +11,47 @@ import ( var ( ErrMsgInvalidArg = Error{ "org.freedesktop.DBus.Error.InvalidArgs", - []interface{}{"Invalid type / number of args"}, + []any{"Invalid type / number of args"}, } ErrMsgNoObject = Error{ "org.freedesktop.DBus.Error.NoSuchObject", - []interface{}{"No such object"}, + []any{"No such object"}, } ErrMsgUnknownMethod = Error{ "org.freedesktop.DBus.Error.UnknownMethod", - []interface{}{"Unknown / invalid method"}, + []any{"Unknown / invalid method"}, } ErrMsgUnknownInterface = Error{ "org.freedesktop.DBus.Error.UnknownInterface", - []interface{}{"Object does not implement the interface"}, + []any{"Object does not implement the interface"}, } ) func MakeNoObjectError(path ObjectPath) Error { return Error{ "org.freedesktop.DBus.Error.NoSuchObject", - []interface{}{fmt.Sprintf("No such object '%s'", string(path))}, + []any{fmt.Sprintf("No such object '%s'", string(path))}, } } func MakeUnknownMethodError(methodName string) Error { return Error{ "org.freedesktop.DBus.Error.UnknownMethod", - []interface{}{fmt.Sprintf("Unknown / invalid method '%s'", methodName)}, + []any{fmt.Sprintf("Unknown / invalid method '%s'", methodName)}, } } func MakeUnknownInterfaceError(ifaceName string) Error { return Error{ "org.freedesktop.DBus.Error.UnknownInterface", - []interface{}{fmt.Sprintf("Object does not implement the interface '%s'", ifaceName)}, + []any{fmt.Sprintf("Object does not implement the interface '%s'", ifaceName)}, } } func MakeFailedError(err error) *Error { return &Error{ "org.freedesktop.DBus.Error.Failed", - []interface{}{err.Error()}, + []any{err.Error()}, } } @@ -67,7 +67,7 @@ func computeMethodName(name string, mapping map[string]string) string { return name } -func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Value { +func getMethods(in any, mapping map[string]string) map[string]reflect.Value { if in == nil { return nil } @@ -91,7 +91,7 @@ func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Va return methods } -func getAllMethods(in interface{}, mapping map[string]string) map[string]reflect.Value { +func getAllMethods(in any, mapping map[string]string) map[string]reflect.Value { if in == nil { return nil } @@ -107,9 +107,9 @@ func getAllMethods(in interface{}, mapping map[string]string) map[string]reflect return methods } -func standardMethodArgumentDecode(m Method, sender string, msg *Message, body []interface{}) ([]interface{}, error) { - pointers := make([]interface{}, m.NumArguments()) - decode := make([]interface{}, 0, len(body)) +func standardMethodArgumentDecode(m Method, sender string, msg *Message, body []any) ([]any, error) { + pointers := make([]any, m.NumArguments()) + decode := make([]any, 0, len(body)) for i := 0; i < m.NumArguments(); i++ { tp := reflect.TypeOf(m.ArgumentValue(i)) @@ -135,7 +135,7 @@ func standardMethodArgumentDecode(m Method, sender string, msg *Message, body [] return pointers, nil } -func (conn *Conn) decodeArguments(m Method, sender string, msg *Message) ([]interface{}, error) { +func (conn *Conn) decodeArguments(m Method, sender string, msg *Message) ([]any, error) { if decoder, ok := m.(ArgumentDecoder); ok { return decoder.DecodeArguments(conn, sender, msg, msg.Body) } @@ -204,23 +204,21 @@ func (conn *Conn) handleCall(msg *Message) { reply.Headers[FieldDestination] = msg.Headers[FieldSender] } reply.Headers[FieldReplySerial] = MakeVariant(msg.serial) - reply.Body = make([]interface{}, len(ret)) - for i := 0; i < len(ret); i++ { - reply.Body[i] = ret[i] - } + reply.Body = make([]any, len(ret)) + copy(reply.Body, ret) reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...)) - if err := reply.IsValid(); err != nil { - fmt.Fprintf(os.Stderr, "dbus: dropping invalid reply to %s.%s on obj %s: %s\n", ifaceName, name, path, err) - } else { - conn.sendMessageAndIfClosed(reply, nil) + if err := conn.sendMessageAndIfClosed(reply, nil); err != nil { + if _, ok := err.(FormatError); ok { + fmt.Fprintf(os.Stderr, "dbus: replacing invalid reply to %s.%s on obj %s: %s\n", ifaceName, name, path, err) + } } } } // Emit emits the given signal on the message bus. The name parameter must be // formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost". -func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error { +func (conn *Conn) Emit(path ObjectPath, name string, values ...any) error { i := strings.LastIndex(name, ".") if i == -1 { return errors.New("dbus: invalid method name") @@ -237,18 +235,15 @@ func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) erro if len(values) > 0 { msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) } - if err := msg.IsValid(); err != nil { - return err - } var closed bool - conn.sendMessageAndIfClosed(msg, func() { + err := conn.sendMessageAndIfClosed(msg, func() { closed = true }) if closed { return ErrClosed } - return nil + return err } // Export registers the given value to be exported as an object on the @@ -279,7 +274,7 @@ func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) erro // the given combination of path and interface. // // Export returns an error if path is not a valid path name. -func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error { +func (conn *Conn) Export(v any, path ObjectPath, iface string) error { return conn.ExportWithMap(v, nil, path, iface) } @@ -291,7 +286,7 @@ func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error { // type parameter to your method signature. If the error returned is not nil, // it is sent back to the caller as an error. Otherwise, a method reply is // sent with the other return values as its body. -func (conn *Conn) ExportAll(v interface{}, path ObjectPath, iface string) error { +func (conn *Conn) ExportAll(v any, path ObjectPath, iface string) error { return conn.export(getAllMethods(v, nil), path, iface, false) } @@ -300,7 +295,7 @@ func (conn *Conn) ExportAll(v interface{}, path ObjectPath, iface string) error // // The keys in the map are the real method names (exported on the struct), and // the values are the method names to be exported on DBus. -func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error { +func (conn *Conn) ExportWithMap(v any, mapping map[string]string, path ObjectPath, iface string) error { return conn.export(getMethods(v, mapping), path, iface, false) } @@ -314,7 +309,7 @@ func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path O // Note that more specific export paths take precedence over less specific. For // example, a method call using the ObjectPath /foo/bar/baz will call a method // exported on /foo/bar before a method exported on /foo. -func (conn *Conn) ExportSubtree(v interface{}, path ObjectPath, iface string) error { +func (conn *Conn) ExportSubtree(v any, path ObjectPath, iface string) error { return conn.ExportSubtreeWithMap(v, nil, path, iface) } @@ -323,7 +318,7 @@ func (conn *Conn) ExportSubtree(v interface{}, path ObjectPath, iface string) er // // The keys in the map are the real method names (exported on the struct), and // the values are the method names to be exported on DBus. -func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error { +func (conn *Conn) ExportSubtreeWithMap(v any, mapping map[string]string, path ObjectPath, iface string) error { return conn.export(getMethods(v, mapping), path, iface, true) } @@ -337,16 +332,16 @@ func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, // methods on the fly. // // Any non-function objects in the method table are ignored. -func (conn *Conn) ExportMethodTable(methods map[string]interface{}, path ObjectPath, iface string) error { +func (conn *Conn) ExportMethodTable(methods map[string]any, path ObjectPath, iface string) error { return conn.exportMethodTable(methods, path, iface, false) } // Like ExportSubtree, but with the same caveats as ExportMethodTable. -func (conn *Conn) ExportSubtreeMethodTable(methods map[string]interface{}, path ObjectPath, iface string) error { +func (conn *Conn) ExportSubtreeMethodTable(methods map[string]any, path ObjectPath, iface string) error { return conn.exportMethodTable(methods, path, iface, true) } -func (conn *Conn) exportMethodTable(methods map[string]interface{}, path ObjectPath, iface string, includeSubtree bool) error { +func (conn *Conn) exportMethodTable(methods map[string]any, path ObjectPath, iface string, includeSubtree bool) error { var out map[string]reflect.Value if methods != nil { out = make(map[string]reflect.Value) @@ -443,6 +438,18 @@ const ( ReleaseNameReplyNotOwner ) +func (rep ReleaseNameReply) String() string { + switch rep { + case ReleaseNameReplyReleased: + return "released" + case ReleaseNameReplyNonExistent: + return "non existent" + case ReleaseNameReplyNotOwner: + return "not owner" + } + return "unknown" +} + // RequestNameFlags represents the possible flags for a RequestName call. type RequestNameFlags uint32 @@ -461,3 +468,17 @@ const ( RequestNameReplyExists RequestNameReplyAlreadyOwner ) + +func (rep RequestNameReply) String() string { + switch rep { + case RequestNameReplyPrimaryOwner: + return "primary owner" + case RequestNameReplyInQueue: + return "in queue" + case RequestNameReplyExists: + return "exists" + case RequestNameReplyAlreadyOwner: + return "already owner" + } + return "unknown" +} diff --git a/vendor/github.com/godbus/dbus/v5/homedir.go b/vendor/github.com/godbus/dbus/v5/homedir.go deleted file mode 100644 index c44d9b5fc2..0000000000 --- a/vendor/github.com/godbus/dbus/v5/homedir.go +++ /dev/null @@ -1,25 +0,0 @@ -package dbus - -import ( - "os" - "os/user" -) - -// Get returns the home directory of the current user, which is usually the -// value of HOME environment variable. In case it is not set or empty, os/user -// package is used. -// -// If linking statically with cgo enabled against glibc, make sure the -// osusergo build tag is used. -// -// If needing to do nss lookups, do not disable cgo or set osusergo. -func getHomeDir() string { - homeDir := os.Getenv("HOME") - if homeDir != "" { - return homeDir - } - if u, err := user.Current(); err == nil { - return u.HomeDir - } - return "/" -} diff --git a/vendor/github.com/godbus/dbus/v5/match.go b/vendor/github.com/godbus/dbus/v5/match.go index 5a607e53e4..ffb0134475 100644 --- a/vendor/github.com/godbus/dbus/v5/match.go +++ b/vendor/github.com/godbus/dbus/v5/match.go @@ -26,10 +26,10 @@ func WithMatchOption(key, value string) MatchOption { return MatchOption{key, value} } -// doesn't make sense to export this option because clients can only -// subscribe to messages with signal type. -func withMatchType(typ string) MatchOption { - return WithMatchOption("type", typ) +// It does not make sense to have a public WithMatchType function +// because clients can only subscribe to messages with signal type. +func withMatchTypeSignal() MatchOption { + return WithMatchOption("type", "signal") } // WithMatchSender sets sender match option. diff --git a/vendor/github.com/godbus/dbus/v5/message.go b/vendor/github.com/godbus/dbus/v5/message.go index bdf43fdd6e..097ca3a7fa 100644 --- a/vendor/github.com/godbus/dbus/v5/message.go +++ b/vendor/github.com/godbus/dbus/v5/message.go @@ -108,7 +108,7 @@ type Message struct { Type Flags Headers map[HeaderField]Variant - Body []interface{} + Body []any serial uint32 } @@ -158,7 +158,9 @@ func DecodeMessageWithFDs(rd io.Reader, fds []int) (msg *Message, err error) { if err != nil { return nil, err } - binary.Read(bytes.NewBuffer(b), order, &hlength) + if err := binary.Read(bytes.NewBuffer(b), order, &hlength); err != nil { + return nil, err + } if hlength+length+16 > 1<<27 { return nil, InvalidMessageError("message is too long") } @@ -186,7 +188,7 @@ func DecodeMessageWithFDs(rd io.Reader, fds []int) (msg *Message, err error) { } } - if err = msg.IsValid(); err != nil { + if err = msg.validateHeader(); err != nil { return nil, err } sig, _ := msg.Headers[FieldSignature].value.(Signature) @@ -230,7 +232,7 @@ func (msg *Message) EncodeToWithFDs(out io.Writer, order binary.ByteOrder) (fds if err := msg.validateHeader(); err != nil { return nil, err } - var vs [7]interface{} + var vs [7]any switch order { case binary.LittleEndian: vs[0] = byte('l') @@ -265,12 +267,14 @@ func (msg *Message) EncodeToWithFDs(out io.Writer, order binary.ByteOrder) (fds return } enc.align(8) - body.WriteTo(&buf) + if _, err := body.WriteTo(&buf); err != nil { + return nil, err + } if buf.Len() > 1<<27 { - return make([]int, 0), InvalidMessageError("message is too long") + return nil, InvalidMessageError("message is too long") } if _, err := buf.WriteTo(out); err != nil { - return make([]int, 0), err + return nil, err } return enc.fds, nil } @@ -286,8 +290,7 @@ func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) (err error) // IsValid checks whether msg is a valid message and returns an // InvalidMessageError or FormatError if it is not. func (msg *Message) IsValid() error { - var b bytes.Buffer - return msg.EncodeTo(&b, nativeEndian) + return msg.EncodeTo(io.Discard, nativeEndian) } func (msg *Message) validateHeader() error { diff --git a/vendor/github.com/godbus/dbus/v5/object.go b/vendor/github.com/godbus/dbus/v5/object.go index 664abb7fba..954d380712 100644 --- a/vendor/github.com/godbus/dbus/v5/object.go +++ b/vendor/github.com/godbus/dbus/v5/object.go @@ -9,15 +9,15 @@ import ( // BusObject is the interface of a remote object on which methods can be // invoked. type BusObject interface { - Call(method string, flags Flags, args ...interface{}) *Call - CallWithContext(ctx context.Context, method string, flags Flags, args ...interface{}) *Call - Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call - GoWithContext(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call + Call(method string, flags Flags, args ...any) *Call + CallWithContext(ctx context.Context, method string, flags Flags, args ...any) *Call + Go(method string, flags Flags, ch chan *Call, args ...any) *Call + GoWithContext(ctx context.Context, method string, flags Flags, ch chan *Call, args ...any) *Call AddMatchSignal(iface, member string, options ...MatchOption) *Call RemoveMatchSignal(iface, member string, options ...MatchOption) *Call GetProperty(p string) (Variant, error) - StoreProperty(p string, value interface{}) error - SetProperty(p string, v interface{}) error + StoreProperty(p string, value any) error + SetProperty(p string, v any) error Destination() string Path() ObjectPath } @@ -30,12 +30,12 @@ type Object struct { } // Call calls a method with (*Object).Go and waits for its reply. -func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call { +func (o *Object) Call(method string, flags Flags, args ...any) *Call { return <-o.createCall(context.Background(), method, flags, make(chan *Call, 1), args...).Done } // CallWithContext acts like Call but takes a context -func (o *Object) CallWithContext(ctx context.Context, method string, flags Flags, args ...interface{}) *Call { +func (o *Object) CallWithContext(ctx context.Context, method string, flags Flags, args ...any) *Call { return <-o.createCall(ctx, method, flags, make(chan *Call, 1), args...).Done } @@ -46,7 +46,7 @@ func (o *Object) CallWithContext(ctx context.Context, method string, flags Flags // Deprecated: use (*Conn) AddMatchSignal instead. func (o *Object) AddMatchSignal(iface, member string, options ...MatchOption) *Call { base := []MatchOption{ - withMatchType("signal"), + withMatchTypeSignal(), WithMatchInterface(iface), WithMatchMember(member), } @@ -65,7 +65,7 @@ func (o *Object) AddMatchSignal(iface, member string, options ...MatchOption) *C // Deprecated: use (*Conn) RemoveMatchSignal instead. func (o *Object) RemoveMatchSignal(iface, member string, options ...MatchOption) *Call { base := []MatchOption{ - withMatchType("signal"), + withMatchTypeSignal(), WithMatchInterface(iface), WithMatchMember(member), } @@ -89,16 +89,16 @@ func (o *Object) RemoveMatchSignal(iface, member string, options ...MatchOption) // // If the method parameter contains a dot ('.'), the part before the last dot // specifies the interface on which the method is called. -func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call { +func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...any) *Call { return o.createCall(context.Background(), method, flags, ch, args...) } // GoWithContext acts like Go but takes a context -func (o *Object) GoWithContext(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call { +func (o *Object) GoWithContext(ctx context.Context, method string, flags Flags, ch chan *Call, args ...any) *Call { return o.createCall(ctx, method, flags, ch, args...) } -func (o *Object) createCall(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call { +func (o *Object) createCall(ctx context.Context, method string, flags Flags, ch chan *Call, args ...any) *Call { if ctx == nil { panic("nil context") } @@ -136,7 +136,7 @@ func (o *Object) GetProperty(p string) (Variant, error) { // StoreProperty calls org.freedesktop.DBus.Properties.Get on the given // object. The property name must be given in interface.member notation. // It stores the returned property into the provided value. -func (o *Object) StoreProperty(p string, value interface{}) error { +func (o *Object) StoreProperty(p string, value any) error { idx := strings.LastIndex(p, ".") if idx == -1 || idx+1 == len(p) { return errors.New("dbus: invalid property " + p) @@ -151,7 +151,14 @@ func (o *Object) StoreProperty(p string, value interface{}) error { // SetProperty calls org.freedesktop.DBus.Properties.Set on the given // object. The property name must be given in interface.member notation. -func (o *Object) SetProperty(p string, v interface{}) error { +// Panics if v is not a valid Variant type. +func (o *Object) SetProperty(p string, v any) error { + // v might already be a variant... + variant, ok := v.(Variant) + if !ok { + // Otherwise, make it into one. + variant = MakeVariant(v) + } idx := strings.LastIndex(p, ".") if idx == -1 || idx+1 == len(p) { return errors.New("dbus: invalid property " + p) @@ -160,7 +167,7 @@ func (o *Object) SetProperty(p string, v interface{}) error { iface := p[:idx] prop := p[idx+1:] - return o.Call("org.freedesktop.DBus.Properties.Set", 0, iface, prop, v).Err + return o.Call("org.freedesktop.DBus.Properties.Set", 0, iface, prop, variant).Err } // Destination returns the destination that calls on (o *Object) are sent to. diff --git a/vendor/github.com/godbus/dbus/v5/sequential_handler.go b/vendor/github.com/godbus/dbus/v5/sequential_handler.go index ef2fcdba17..886b5eb16b 100644 --- a/vendor/github.com/godbus/dbus/v5/sequential_handler.go +++ b/vendor/github.com/godbus/dbus/v5/sequential_handler.go @@ -93,7 +93,7 @@ func (scd *sequentialSignalChannelData) bufferSignals() { var queue []*Signal for { if len(queue) == 0 { - signal, ok := <- scd.in + signal, ok := <-scd.in if !ok { return } diff --git a/vendor/github.com/godbus/dbus/v5/server_interfaces.go b/vendor/github.com/godbus/dbus/v5/server_interfaces.go index e4e0389fdf..24d4ad6329 100644 --- a/vendor/github.com/godbus/dbus/v5/server_interfaces.go +++ b/vendor/github.com/godbus/dbus/v5/server_interfaces.go @@ -22,7 +22,7 @@ type Handler interface { // of Interface lookup is up to the implementation of // the ServerObject. The ServerObject implementation may // choose to implement empty string as a valid interface -// represeting all methods or not per the D-Bus specification. +// representing all methods or not per the D-Bus specification. type ServerObject interface { LookupInterface(name string) (Interface, bool) } @@ -38,17 +38,17 @@ type Interface interface { // A Method represents the exposed methods on D-Bus. type Method interface { // Call requires that all arguments are decoded before being passed to it. - Call(args ...interface{}) ([]interface{}, error) + Call(args ...any) ([]any, error) NumArguments() int NumReturns() int // ArgumentValue returns a representative value for the argument at position // it should be of the proper type. reflect.Zero would be a good mechanism // to use for this Value. - ArgumentValue(position int) interface{} + ArgumentValue(position int) any // ReturnValue returns a representative value for the return at position // it should be of the proper type. reflect.Zero would be a good mechanism // to use for this Value. - ReturnValue(position int) interface{} + ReturnValue(position int) any } // An Argument Decoder can decode arguments using the non-standard mechanism @@ -65,7 +65,7 @@ type ArgumentDecoder interface { // To decode the arguments of a method the sender and message are // provided in case the semantics of the implementer provides access // to these as part of the method invocation. - DecodeArguments(conn *Conn, sender string, msg *Message, args []interface{}) ([]interface{}, error) + DecodeArguments(conn *Conn, sender string, msg *Message, args []any) ([]any, error) } // A SignalHandler is responsible for delivering a signal. @@ -93,7 +93,7 @@ type SignalRegistrar interface { // "org.freedesktop.DBus.Error.Failed" error. By implementing this // interface as well a custom encoding may be provided. type DBusError interface { - DBusError() (string, []interface{}) + DBusError() (string, []any) } // SerialGenerator is responsible for serials generation. diff --git a/vendor/github.com/godbus/dbus/v5/sig.go b/vendor/github.com/godbus/dbus/v5/sig.go index 6b9cadb5fb..de49f860de 100644 --- a/vendor/github.com/godbus/dbus/v5/sig.go +++ b/vendor/github.com/godbus/dbus/v5/sig.go @@ -31,7 +31,7 @@ type Signature struct { // SignatureOf returns the concatenation of all the signatures of the given // values. It panics if one of them is not representable in D-Bus. -func SignatureOf(vs ...interface{}) Signature { +func SignatureOf(vs ...any) Signature { var s string for _, v := range vs { s += getSignature(reflect.TypeOf(v), &depthCounter{}) @@ -89,9 +89,10 @@ func getSignature(t reflect.Type, depth *depthCounter) (sig string) { } return "s" case reflect.Struct: - if t == variantType { + switch t { + case variantType: return "v" - } else if t == signatureType { + case signatureType: return "g" } var s string @@ -202,7 +203,7 @@ func (cnt depthCounter) EnterDictEntry() *depthCounter { // and rem is the remaining unparsed part. Otherwise, err is a non-nil // SignatureError and rem is "". depth is the current recursion depth which may // not be greater than 64 and should be given as 0 on the first call. -func validSingle(s string, depth *depthCounter) (err error, rem string) { +func validSingle(s string, depth *depthCounter) (err error, rem string) { //nolint:staticcheck // Ignore "ST1008: error should be returned as the last argument". if s == "" { return SignatureError{Sig: s, Reason: "empty signature"}, "" } @@ -221,6 +222,9 @@ func validSingle(s string, depth *depthCounter) (err error, rem string) { i++ rem = s[i+1:] s = s[2:i] + if len(s) == 0 { + return SignatureError{Sig: s, Reason: "empty dict"}, "" + } if err, _ = validSingle(s[:1], depth.EnterArray().EnterDictEntry()); err != nil { return err, "" } @@ -255,9 +259,10 @@ func validSingle(s string, depth *depthCounter) (err error, rem string) { func findMatching(s string, left, right rune) int { n := 0 for i, v := range s { - if v == left { + switch v { + case left: n++ - } else if v == right { + case right: n-- } if n == 0 { diff --git a/vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go b/vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go index 697739efaf..9ffdeb0c0d 100644 --- a/vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go +++ b/vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go @@ -1,11 +1,11 @@ -//+build !windows +//go:build !windows package dbus import ( "errors" - "io/ioutil" "net" + "os" ) func init() { @@ -27,12 +27,14 @@ func newNonceTcpTransport(keys string) (transport, error) { if err != nil { return nil, err } - b, err := ioutil.ReadFile(noncefile) + b, err := os.ReadFile(noncefile) if err != nil { + socket.Close() return nil, err } _, err = socket.Write(b) if err != nil { + socket.Close() return nil, err } return NewConn(socket) diff --git a/vendor/github.com/godbus/dbus/v5/transport_unix.go b/vendor/github.com/godbus/dbus/v5/transport_unix.go index 0a8c712ebd..3baad2e9af 100644 --- a/vendor/github.com/godbus/dbus/v5/transport_unix.go +++ b/vendor/github.com/godbus/dbus/v5/transport_unix.go @@ -1,4 +1,4 @@ -//+build !windows,!solaris +//go:build !windows && !solaris package dbus @@ -11,10 +11,29 @@ import ( "syscall" ) +// msghead represents the part of the message header +// that has a constant size (byte order + 15 bytes). +type msghead struct { + Type Type + Flags Flags + Proto byte + BodyLen uint32 + Serial uint32 + HeaderLen uint32 +} + type oobReader struct { conn *net.UnixConn oob []byte buf [4096]byte + + // The following fields are used to reduce memory allocs. + headers []header + csheader []byte + b *bytes.Buffer + r *bytes.Reader + dec *decoder + msghead } func (o *oobReader) Read(b []byte) (n int, err error) { @@ -35,6 +54,14 @@ type unixTransport struct { hasUnixFDs bool } +func newUnixTransportFromConn(conn *net.UnixConn) transport { + t := new(unixTransport) + t.UnixConn = conn + t.hasUnixFDs = true + + return t +} + func newUnixTransport(keys string) (transport, error) { var err error @@ -70,28 +97,36 @@ func (t *unixTransport) EnableUnixFDs() { } func (t *unixTransport) ReadMessage() (*Message, error) { - var ( - blen, hlen uint32 - csheader [16]byte - headers []header - order binary.ByteOrder - unixfds uint32 - ) // To be sure that all bytes of out-of-band data are read, we use a special // reader that uses ReadUnix on the underlying connection instead of Read // and gathers the out-of-band data in a buffer. if t.rdr == nil { - t.rdr = &oobReader{conn: t.UnixConn} + t.rdr = &oobReader{ + conn: t.UnixConn, + // This buffer is used to decode the part of the header that has a constant size. + csheader: make([]byte, 16), + b: &bytes.Buffer{}, + // The reader helps to read from the buffer several times. + r: &bytes.Reader{}, + dec: &decoder{}, + } } else { - t.rdr.oob = nil + t.rdr.oob = t.rdr.oob[:0] + t.rdr.headers = t.rdr.headers[:0] } + var ( + r = t.rdr.r + b = t.rdr.b + dec = t.rdr.dec + ) - // read the first 16 bytes (the part of the header that has a constant size), - // from which we can figure out the length of the rest of the message - if _, err := io.ReadFull(t.rdr, csheader[:]); err != nil { + _, err := io.ReadFull(t.rdr, t.rdr.csheader) + if err != nil { return nil, err } - switch csheader[0] { + + var order binary.ByteOrder + switch t.rdr.csheader[0] { case 'l': order = binary.LittleEndian case 'B': @@ -99,38 +134,62 @@ func (t *unixTransport) ReadMessage() (*Message, error) { default: return nil, InvalidMessageError("invalid byte order") } - // csheader[4:8] -> length of message body, csheader[12:16] -> length of - // header fields (without alignment) - binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen) - binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen) + + r.Reset(t.rdr.csheader[1:]) + if err := binary.Read(r, order, &t.rdr.msghead); err != nil { + return nil, err + } + + msg := &Message{ + Type: t.rdr.Type, + Flags: t.rdr.Flags, + serial: t.rdr.Serial, + } + // Length of header fields (without alignment). + hlen := t.rdr.HeaderLen if hlen%8 != 0 { hlen += 8 - (hlen % 8) } + if hlen+t.rdr.BodyLen+16 > 1<<27 { + return nil, InvalidMessageError("message is too long") + } - // decode headers and look for unix fds - headerdata := make([]byte, hlen+4) - copy(headerdata, csheader[12:]) - if _, err := io.ReadFull(t.rdr, headerdata[4:]); err != nil { + // Decode headers and look for unix fds. + b.Reset() + if _, err = b.Write(t.rdr.csheader[12:]); err != nil { return nil, err } - dec := newDecoder(bytes.NewBuffer(headerdata), order, make([]int, 0)) + if _, err = io.CopyN(b, t.rdr, int64(hlen)); err != nil { + return nil, err + } + dec.Reset(b, order, nil) dec.pos = 12 vs, err := dec.Decode(Signature{"a(yv)"}) if err != nil { return nil, err } - Store(vs, &headers) - for _, v := range headers { + if err = Store(vs, &t.rdr.headers); err != nil { + return nil, err + } + var unixfds uint32 + for _, v := range t.rdr.headers { if v.Field == byte(FieldUnixFDs) { - unixfds, _ = v.Variant.value.(uint32) + unixfds, _ = v.value.(uint32) } } - all := make([]byte, 16+hlen+blen) - copy(all, csheader[:]) - copy(all[16:], headerdata[4:]) - if _, err := io.ReadFull(t.rdr, all[16+hlen:]); err != nil { + + msg.Headers = make(map[HeaderField]Variant) + for _, v := range t.rdr.headers { + msg.Headers[HeaderField(v.Field)] = v.Variant + } + + dec.align(8) + body := make([]byte, t.rdr.BodyLen) + if _, err = io.ReadFull(t.rdr, body); err != nil { return nil, err } + r.Reset(body) + if unixfds != 0 { if !t.hasUnixFDs { return nil, errors.New("dbus: got unix fds on unsupported transport") @@ -147,8 +206,8 @@ func (t *unixTransport) ReadMessage() (*Message, error) { if err != nil { return nil, err } - msg, err := DecodeMessageWithFDs(bytes.NewBuffer(all), fds) - if err != nil { + dec.Reset(r, order, fds) + if err = decodeMessageBody(msg, dec); err != nil { return nil, err } // substitute the values in the message body (which are indices for the @@ -173,7 +232,27 @@ func (t *unixTransport) ReadMessage() (*Message, error) { } return msg, nil } - return DecodeMessage(bytes.NewBuffer(all)) + + dec.Reset(r, order, nil) + if err = decodeMessageBody(msg, dec); err != nil { + return nil, err + } + return msg, nil +} + +func decodeMessageBody(msg *Message, dec *decoder) error { + if err := msg.validateHeader(); err != nil { + return err + } + + sig, _ := msg.Headers[FieldSignature].value.(Signature) + if sig.str == "" { + return nil + } + + var err error + msg.Body, err = dec.Decode(sig) + return err } func (t *unixTransport) SendMessage(msg *Message) error { @@ -192,7 +271,7 @@ func (t *unixTransport) SendMessage(msg *Message) error { return err } oob := syscall.UnixRights(fds...) - n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil) + n, oobn, err := t.WriteMsgUnix(buf.Bytes(), oob, nil) if err != nil { return err } diff --git a/vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go b/vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go index 1b5ed2089d..ff2284c838 100644 --- a/vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go +++ b/vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go @@ -7,39 +7,41 @@ package dbus -/* -const int sizeofPtr = sizeof(void*); -#define _WANT_UCRED -#include -#include -*/ -import "C" - import ( "io" "os" "syscall" "unsafe" + + "golang.org/x/sys/unix" ) // http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go // https://golang.org/src/syscall/ztypes_freebsd_amd64.go +// +// Note: FreeBSD actually uses a 'struct cmsgcred' which starts with +// these fields and adds a list of the additional groups for the +// sender. type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 + Pid int32 + Uid uint32 + Euid uint32 + Gid uint32 } -// http://golang.org/src/pkg/syscall/types_linux.go -// https://golang.org/src/syscall/types_freebsd.go -// https://github.com/freebsd/freebsd/blob/master/sys/sys/ucred.h +// https://github.com/freebsd/freebsd/blob/master/sys/sys/socket.h +// +// The cmsgcred structure contains the above four fields, followed by +// a uint16 count of additional groups, uint16 padding to align and a +// 16 element array of uint32 for the additional groups. The size is +// the same across all supported platforms. const ( - SizeofUcred = C.sizeof_struct_ucred + SizeofCmsgcred = 84 // 4*4 + 2*2 + 16*4 ) // http://golang.org/src/pkg/syscall/sockcmsg_unix.go func cmsgAlignOf(salen int) int { - salign := C.sizeofPtr + salign := unix.SizeofPtr return (salen + salign - 1) & ^(salign - 1) } @@ -54,11 +56,11 @@ func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer { // for sending to another process. This can be used for // authentication. func UnixCredentials(ucred *Ucred) []byte { - b := make([]byte, syscall.CmsgSpace(SizeofUcred)) + b := make([]byte, syscall.CmsgSpace(SizeofCmsgcred)) h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) h.Level = syscall.SOL_SOCKET h.Type = syscall.SCM_CREDS - h.SetLen(syscall.CmsgLen(SizeofUcred)) + h.SetLen(syscall.CmsgLen(SizeofCmsgcred)) *((*Ucred)(cmsgData(h))) = *ucred return b } diff --git a/vendor/github.com/godbus/dbus/v5/transport_unixcred_linux.go b/vendor/github.com/godbus/dbus/v5/transport_unixcred_linux.go index d9dfdf6982..2d5d7f612f 100644 --- a/vendor/github.com/godbus/dbus/v5/transport_unixcred_linux.go +++ b/vendor/github.com/godbus/dbus/v5/transport_unixcred_linux.go @@ -14,7 +14,7 @@ import ( func (t *unixTransport) SendNullByte() error { ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} b := syscall.UnixCredentials(ucred) - _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) + _, oobn, err := t.WriteMsgUnix([]byte{0}, b, nil) if err != nil { return err } diff --git a/vendor/github.com/godbus/dbus/v5/variant.go b/vendor/github.com/godbus/dbus/v5/variant.go index ca3dbe16a4..bf98ddea73 100644 --- a/vendor/github.com/godbus/dbus/v5/variant.go +++ b/vendor/github.com/godbus/dbus/v5/variant.go @@ -11,17 +11,17 @@ import ( // Variant represents the D-Bus variant type. type Variant struct { sig Signature - value interface{} + value any } // MakeVariant converts the given value to a Variant. It panics if v cannot be // represented as a D-Bus type. -func MakeVariant(v interface{}) Variant { +func MakeVariant(v any) Variant { return MakeVariantWithSignature(v, SignatureOf(v)) } // MakeVariantWithSignature converts the given value to a Variant. -func MakeVariantWithSignature(v interface{}, s Signature) Variant { +func MakeVariantWithSignature(v any, s Signature) Variant { return Variant{s, v} } @@ -73,7 +73,7 @@ func (v Variant) format() (string, bool) { } rv := reflect.ValueOf(v.value) switch rv.Kind() { - case reflect.Slice: + case reflect.Slice, reflect.Array: if rv.Len() == 0 { return "[]", false } @@ -119,6 +119,25 @@ func (v Variant) format() (string, bool) { } buf.WriteByte('}') return buf.String(), unamb + case reflect.Struct: + if rv.NumField() == 0 { + return "()", false + } + unamb := true + var buf bytes.Buffer + buf.WriteByte('(') + for i := 0; i < rv.NumField(); i++ { + s, b := MakeVariant(rv.Field(i).Interface()).format() + unamb = unamb && b + buf.WriteString(s) + buf.WriteString(",") + if i != rv.NumField()-1 { + buf.WriteString(" ") + } + } + buf.WriteByte(')') + return buf.String(), unamb + } return `"INVALID"`, true } @@ -139,12 +158,12 @@ func (v Variant) String() string { } // Value returns the underlying value of v. -func (v Variant) Value() interface{} { +func (v Variant) Value() any { return v.value } // Store converts the variant into a native go type using the same // mechanism as the "Store" function. -func (v Variant) Store(value interface{}) error { +func (v Variant) Store(value any) error { return storeInterfaces(v.value, value) } diff --git a/vendor/github.com/godbus/dbus/v5/variant_lexer.go b/vendor/github.com/godbus/dbus/v5/variant_lexer.go index bf1398c8f0..a0649c5ca5 100644 --- a/vendor/github.com/godbus/dbus/v5/variant_lexer.go +++ b/vendor/github.com/godbus/dbus/v5/variant_lexer.go @@ -67,7 +67,7 @@ func (l *varLexer) emit(t varTokenType) { l.start = l.pos } -func (l *varLexer) errorf(format string, v ...interface{}) lexState { +func (l *varLexer) errorf(format string, v ...any) lexState { l.tokens = append(l.tokens, varToken{ tokError, fmt.Sprintf(format, v...), diff --git a/vendor/github.com/godbus/dbus/v5/variant_parser.go b/vendor/github.com/godbus/dbus/v5/variant_parser.go index d20f5da6dd..bc0bd945c8 100644 --- a/vendor/github.com/godbus/dbus/v5/variant_parser.go +++ b/vendor/github.com/godbus/dbus/v5/variant_parser.go @@ -33,7 +33,7 @@ type varNode interface { Infer() (Signature, error) String() string Sigs() sigSet - Value(Signature) (interface{}, error) + Value(Signature) (any, error) } func varMakeNode(p *varParser) (varNode, error) { @@ -134,7 +134,7 @@ func (s sigSet) ToArray() sigSet { type numNode struct { sig Signature str string - val interface{} + val any } var numSigSet = sigSet{ @@ -169,7 +169,7 @@ func (n numNode) Sigs() sigSet { return numSigSet } -func (n numNode) Value(sig Signature) (interface{}, error) { +func (n numNode) Value(sig Signature) (any, error) { if n.sig.str != "" && n.sig != sig { return nil, varTypeError{n.str, sig} } @@ -190,7 +190,7 @@ func varMakeNumNode(tok varToken, sig Signature) (varNode, error) { return numNode{sig: sig, val: num}, nil } -func varNumAs(s string, sig Signature) (interface{}, error) { +func varNumAs(s string, sig Signature) (any, error) { isUnsigned := false size := 32 switch sig.str { @@ -220,20 +220,20 @@ func varNumAs(s string, sig Signature) (interface{}, error) { return nil, varTypeError{s, sig} } base := 10 - if strings.HasPrefix(s, "0x") { + if after, ok := strings.CutPrefix(s, "0x"); ok { base = 16 - s = s[2:] + s = after } - if strings.HasPrefix(s, "0") && len(s) != 1 { + if after, ok := strings.CutPrefix(s, "0"); ok && len(s) != 1 { base = 8 - s = s[1:] + s = after } if isUnsigned { i, err := strconv.ParseUint(s, base, size) if err != nil { return nil, err } - var v interface{} = i + var v any = i switch sig.str { case "y": v = byte(i) @@ -248,7 +248,7 @@ func varNumAs(s string, sig Signature) (interface{}, error) { if err != nil { return nil, err } - var v interface{} = i + var v any = i switch sig.str { case "n": v = int16(i) @@ -260,8 +260,8 @@ func varNumAs(s string, sig Signature) (interface{}, error) { type stringNode struct { sig Signature - str string // parsed - val interface{} // has correct type + str string // parsed + val any // has correct type } var stringSigSet = sigSet{ @@ -285,19 +285,19 @@ func (n stringNode) Sigs() sigSet { return stringSigSet } -func (n stringNode) Value(sig Signature) (interface{}, error) { +func (n stringNode) Value(sig Signature) (any, error) { if n.sig.str != "" && n.sig != sig { return nil, varTypeError{n.str, sig} } if n.val != nil { return n.val, nil } - switch { - case sig.str == "g": + switch sig.str { + case "g": return Signature{n.str}, nil - case sig.str == "o": + case "o": return ObjectPath(n.str), nil - case sig.str == "s": + case "s": return n.str, nil default: return nil, varTypeError{n.str, sig} @@ -407,7 +407,7 @@ func (boolNode) Sigs() sigSet { return boolSigSet } -func (b boolNode) Value(sig Signature) (interface{}, error) { +func (b boolNode) Value(sig Signature) (any, error) { if sig.str != "b" { return nil, varTypeError{b.String(), sig} } @@ -417,7 +417,6 @@ func (b boolNode) Value(sig Signature) (interface{}, error) { type arrayNode struct { set sigSet children []varNode - val interface{} } func (n arrayNode) Infer() (Signature, error) { @@ -446,7 +445,7 @@ func (n arrayNode) Sigs() sigSet { return n.set } -func (n arrayNode) Value(sig Signature) (interface{}, error) { +func (n arrayNode) Value(sig Signature) (any, error) { if n.set.Empty() { // no type information whatsoever, so this must be an empty slice return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil @@ -537,7 +536,7 @@ func (variantNode) Sigs() sigSet { return variantSet } -func (n variantNode) Value(sig Signature) (interface{}, error) { +func (n variantNode) Value(sig Signature) (any, error) { if sig.str != "v" { return nil, varTypeError{n.String(), sig} } @@ -574,7 +573,6 @@ type dictEntry struct { type dictNode struct { kset, vset sigSet children []dictEntry - val interface{} } func (n dictNode) Infer() (Signature, error) { @@ -614,7 +612,7 @@ func (n dictNode) Sigs() sigSet { return r } -func (n dictNode) Value(sig Signature) (interface{}, error) { +func (n dictNode) Value(sig Signature) (any, error) { set := n.Sigs() if set.Empty() { // no type information -> empty dict @@ -749,7 +747,7 @@ func (b byteStringNode) Sigs() sigSet { return byteStringSet } -func (b byteStringNode) Value(sig Signature) (interface{}, error) { +func (b byteStringNode) Value(sig Signature) (any, error) { if sig.str != "ay" { return nil, varTypeError{b.String(), sig} } diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README index cea12879a0..fd191f78c7 100644 --- a/vendor/github.com/golang/snappy/README +++ b/vendor/github.com/golang/snappy/README @@ -1,8 +1,13 @@ The Snappy compression format in the Go programming language. -To download and install from source: +To use as a library: $ go get github.com/golang/snappy +To use as a binary: +$ go install github.com/golang/snappy/cmd/snappytool@latest +$ cat decoded | ~/go/bin/snappytool -e > encoded +$ cat encoded | ~/go/bin/snappytool -d > decoded + Unless otherwise noted, the Snappy-Go source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s index f8d54adfc5..f0c876a248 100644 --- a/vendor/github.com/golang/snappy/encode_arm64.s +++ b/vendor/github.com/golang/snappy/encode_arm64.s @@ -27,7 +27,7 @@ // The unusual register allocation of local variables, such as R10 for the // source pointer, matches the allocation used at the call site in encodeBlock, // which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $32-56 +TEXT ·emitLiteral(SB), NOSPLIT, $40-56 MOVD dst_base+0(FP), R8 MOVD lit_base+24(FP), R10 MOVD lit_len+32(FP), R3 @@ -261,7 +261,7 @@ extendMatchEnd: // "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An // extra 64 bytes, to call other functions, and an extra 64 bytes, to spill // local variables (registers) during calls gives 32768 + 64 + 64 = 32896. -TEXT ·encodeBlock(SB), 0, $32896-56 +TEXT ·encodeBlock(SB), 0, $32904-56 MOVD dst_base+0(FP), R8 MOVD src_base+24(FP), R7 MOVD src_len+32(FP), R14 diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel index c12e4904da..89cf460d3a 100644 --- a/vendor/github.com/google/cel-go/cel/BUILD.bazel +++ b/vendor/github.com/google/cel-go/cel/BUILD.bazel @@ -21,7 +21,7 @@ go_library( "prompt.go", "validator.go", ], - embedsrcs = ["//cel/templates"], + embedsrcs = ["templates/authoring.tmpl"], importpath = "github.com/google/cel-go/cel", visibility = ["//visibility:public"], deps = [ @@ -96,3 +96,8 @@ go_test( "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", ], ) + +exports_files( + ["templates/authoring.tmpl"], + visibility = ["//visibility:public"], +) \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go index 59a10e81de..bc13add890 100644 --- a/vendor/github.com/google/cel-go/cel/library.go +++ b/vendor/github.com/google/cel-go/cel/library.go @@ -182,7 +182,6 @@ func (lib *stdLibrary) CompileOptions() []EnvOption { if err = lib.subset.Validate(); err != nil { return nil, err } - e.variables = append(e.variables, stdlib.Types()...) for _, fn := range funcs { existing, found := e.functions[fn.Name()] if found { diff --git a/vendor/github.com/google/cel-go/cel/optimizer.go b/vendor/github.com/google/cel-go/cel/optimizer.go index 9a2a97a647..6e260a93cf 100644 --- a/vendor/github.com/google/cel-go/cel/optimizer.go +++ b/vendor/github.com/google/cel-go/cel/optimizer.go @@ -15,6 +15,7 @@ package cel import ( + "fmt" "sort" "github.com/google/cel-go/common" @@ -29,17 +30,43 @@ import ( // passes to ensure that the final optimized output is a valid expression with metadata consistent // with what would have been generated from a parsed and checked expression. // -// Note: source position information is best-effort and likely wrong, but optimized expressions +// Note: source position information is best-effort and incomplete, but optimized expressions // should be suitable for calls to parser.Unparse. type StaticOptimizer struct { optimizers []ASTOptimizer + // If set, Optimize() will use this Source instead of the one from the AST. + sourceOverride *Source } +type OptimizerOption func(*StaticOptimizer) (*StaticOptimizer, error) + // NewStaticOptimizer creates a StaticOptimizer with a sequence of ASTOptimizer's to be applied // to a checked expression. -func NewStaticOptimizer(optimizers ...ASTOptimizer) *StaticOptimizer { - return &StaticOptimizer{ - optimizers: optimizers, +func NewStaticOptimizer(options ...any) (*StaticOptimizer, error) { + so := &StaticOptimizer{} + var err error + for _, opt := range options { + switch v := opt.(type) { + case ASTOptimizer: + so.optimizers = append(so.optimizers, v) + case OptimizerOption: + so, err = v(so) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unsupported option: %v", v) + } + } + return so, nil +} + +// OptimizeWithSource overrides the source used by the optimizer. +// Note this will cause the source info from the AST passed to Optimize() to be discarded. +func OptimizeWithSource(source Source) OptimizerOption { + return func(so *StaticOptimizer) (*StaticOptimizer, error) { + so.sourceOverride = &source + return so, nil } } @@ -49,15 +76,21 @@ func NewStaticOptimizer(optimizers ...ASTOptimizer) *StaticOptimizer { func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { // Make a copy of the AST to be optimized. optimized := ast.Copy(a.NativeRep()) + source := a.Source() + sourceInfo := optimized.SourceInfo() + if opt.sourceOverride != nil { + source = *opt.sourceOverride + sourceInfo = ast.NewSourceInfo(*opt.sourceOverride) + } ids := newIDGenerator(ast.MaxID(a.NativeRep())) // Create the optimizer context, could be pooled in the future. - issues := NewIssues(common.NewErrors(a.Source())) + issues := NewIssues(common.NewErrors(source)) baseFac := ast.NewExprFactory() exprFac := &optimizerExprFactory{ idGenerator: ids, fac: baseFac, - sourceInfo: optimized.SourceInfo(), + sourceInfo: sourceInfo, } ctx := &OptimizerContext{ optimizerExprFactory: exprFac, @@ -80,7 +113,7 @@ func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { // Recheck the updated expression for any possible type-agreement or validation errors. parsed := &Ast{ - source: a.Source(), + source: source, impl: ast.NewAST(expr, info)} checked, iss := ctx.Check(parsed) if iss.Err() != nil { @@ -91,7 +124,7 @@ func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { // Return the optimized result. return &Ast{ - source: a.Source(), + source: source, impl: optimized, }, nil } @@ -100,6 +133,8 @@ func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { // that the ids within the expression correspond to the ids within macros. func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInfo) { optimized.RenumberIDs(idGen) + info.RenumberIDs(idGen) + if len(info.MacroCalls()) == 0 { return } @@ -260,6 +295,9 @@ func (opt *optimizerExprFactory) CopyASTAndMetadata(a *ast.AST) ast.Expr { for macroID, call := range copyInfo.MacroCalls() { opt.SetMacroCall(macroID, call) } + for id, offset := range copyInfo.OffsetRanges() { + opt.sourceInfo.SetOffsetRange(id, offset) + } return copyExpr } diff --git a/vendor/github.com/google/cel-go/checker/checker.go b/vendor/github.com/google/cel-go/checker/checker.go index 0057c16ccb..d07d8e799e 100644 --- a/vendor/github.com/google/cel-go/checker/checker.go +++ b/vendor/github.com/google/cel-go/checker/checker.go @@ -19,6 +19,8 @@ package checker import ( "fmt" "reflect" + "slices" + "strings" "github.com/google/cel-go/common" "github.com/google/cel-go/common/ast" @@ -65,6 +67,10 @@ func Check(parsed *ast.AST, source common.Source, env *Env) (*ast.AST, *common.E for id, t := range c.TypeMap() { c.SetType(id, substitute(c.mappings, t, true)) } + // Remove source info for IDs without a corresponding AST node. This can happen because + // check() deletes some nodes while rewriting the AST. For example the Select operand is + // deleted when a variable reference is replaced with a Ident expression. + c.AST.ClearUnusedIDs() return c.AST, errs } @@ -104,11 +110,15 @@ func (c *checker) check(e ast.Expr) { func (c *checker) checkIdent(e ast.Expr) { identName := e.AsIdent() // Check to see if the identifier is declared. - if ident := c.env.LookupIdent(identName); ident != nil { + if ident := c.env.resolveSimpleIdent(identName); ident != nil { + name := strings.TrimPrefix(ident.Name(), ".") + if ident.requiresDisambiguation { + name = "." + name + } c.setType(e, ident.Type()) - c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value())) + c.setReference(e, ast.NewIdentReference(name, ident.Value())) // Overwrite the identifier with its fully qualified name. - e.SetKindCase(c.NewIdent(e.ID(), ident.Name())) + e.SetKindCase(c.NewIdent(e.ID(), name)) return } @@ -119,18 +129,22 @@ func (c *checker) checkIdent(e ast.Expr) { func (c *checker) checkSelect(e ast.Expr) { sel := e.AsSelect() // Before traversing down the tree, try to interpret as qualified name. - qname, found := containers.ToQualifiedName(e) + qualifiers, found := c.computeQualifiers(e) if found { - ident := c.env.LookupIdent(qname) + ident := c.env.resolveQualifiedIdent(qualifiers...) if ident != nil { // We don't check for a TestOnly expression here since the `found` result is // always going to be false for TestOnly expressions. // Rewrite the node to be a variable reference to the resolved fully-qualified // variable name. + name := ident.Name() + if ident.requiresDisambiguation { + name = "." + name + } c.setType(e, ident.Type()) - c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value())) - e.SetKindCase(c.NewIdent(e.ID(), ident.Name())) + c.setReference(e, ast.NewIdentReference(name, ident.Value())) + e.SetKindCase(c.NewIdent(e.ID(), name)) return } } @@ -142,6 +156,29 @@ func (c *checker) checkSelect(e ast.Expr) { c.setType(e, substitute(c.mappings, resultType, false)) } +// computeQualifiers computes the qualified names parts of a select expression. +func (c *checker) computeQualifiers(e ast.Expr) ([]string, bool) { + var qualifiers []string + for e.Kind() == ast.SelectKind { + sel := e.AsSelect() + // test only expressions are not considered for qualified name selection. + if sel.IsTestOnly() { + return qualifiers, false + } + // otherwise append the select field name to the qualifier list (reverse order) + qualifiers = append(qualifiers, sel.FieldName()) + e = sel.Operand() + // If the next operand is an identifier, then append it, reverse the name sequence + // and return it to the caller.s + if e.Kind() == ast.IdentKind { + qualifiers = append(qualifiers, e.AsIdent()) + slices.Reverse(qualifiers) + return qualifiers, true + } + } + return qualifiers, false +} + func (c *checker) checkOptSelect(e ast.Expr) { // Collect metadata related to the opt select call packaged by the parser. call := e.AsCall() @@ -234,7 +271,7 @@ func (c *checker) checkCall(e ast.Expr) { // Regular static call with simple name. if !call.IsMemberFunction() { // Check for the existence of the function. - fn := c.env.LookupFunction(fnName) + fn := c.env.lookupFunction(fnName) if fn == nil { c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName) c.setType(e, types.ErrorType) @@ -256,7 +293,7 @@ func (c *checker) checkCall(e ast.Expr) { qualifiedPrefix, maybeQualified := containers.ToQualifiedName(target) if maybeQualified { maybeQualifiedName := qualifiedPrefix + "." + fnName - fn := c.env.LookupFunction(maybeQualifiedName) + fn := c.env.lookupFunction(maybeQualifiedName) if fn != nil { // The function name is namespaced and so preserving the target operand would // be an inaccurate representation of the desired evaluation behavior. @@ -269,7 +306,7 @@ func (c *checker) checkCall(e ast.Expr) { // Regular instance call. c.check(target) - fn := c.env.LookupFunction(fnName) + fn := c.env.lookupFunction(fnName) // Function found, attempt overload resolution. if fn != nil { c.resolveOverloadOrError(e, fn, target, args) @@ -441,7 +478,7 @@ func (c *checker) checkCreateStruct(e ast.Expr) { msgVal := e.AsStruct() // Determine the type of the message. resultType := types.ErrorType - ident := c.env.LookupIdent(msgVal.TypeName()) + ident := c.env.resolveTypeIdent(msgVal.TypeName()) if ident == nil { c.errors.undeclaredReference( e.ID(), c.location(e), c.env.container.Name(), msgVal.TypeName()) diff --git a/vendor/github.com/google/cel-go/checker/env.go b/vendor/github.com/google/cel-go/checker/env.go index d5ac05014e..6d991eba10 100644 --- a/vendor/github.com/google/cel-go/checker/env.go +++ b/vendor/github.com/google/cel-go/checker/env.go @@ -129,45 +129,111 @@ func (e *Env) AddFunctions(declarations ...*decls.FunctionDecl) error { return formatError(errMsgs) } -// LookupIdent returns a Decl proto for typeName as an identifier in the Env. -// Returns nil if no such identifier is found in the Env. -func (e *Env) LookupIdent(name string) *decls.VariableDecl { +// newAttrResolution creates a new attribute resolution value. +func newAttrResolution(ident *decls.VariableDecl, requiresDisambiguation bool) *attributeResolution { + return &attributeResolution{ + VariableDecl: ident, + requiresDisambiguation: requiresDisambiguation, + } +} + +// attributeResolution wraps an existing variable and denotes whether disambiguation is needed +// during variable resolution. +type attributeResolution struct { + *decls.VariableDecl + + // requiresDisambiguation indicates the variable name should be dot-prefixed. + requiresDisambiguation bool +} + +// resolveSimpleIdent determines the resolved attribute for a single identifier. +func (e *Env) resolveSimpleIdent(name string) *attributeResolution { + local := e.lookupLocalIdent(name) + if local != nil && !strings.HasPrefix(name, ".") { + return newAttrResolution(local, false) + } for _, candidate := range e.container.ResolveCandidateNames(name) { - if ident := e.declarations.FindIdent(candidate); ident != nil { - return ident + if ident := e.lookupGlobalIdent(candidate); ident != nil { + return newAttrResolution(ident, local != nil) } + } + return nil +} - // Next try to import the name as a reference to a message type. If found, - // the declaration is added to the outest (global) scope of the - // environment, so next time we can access it faster. - if t, found := e.provider.FindStructType(candidate); found { - decl := decls.NewVariable(candidate, t) - e.declarations.AddIdent(decl) - return decl +// resolveQualifiedIdent determines the resolved attribute for a qualified identifier. +func (e *Env) resolveQualifiedIdent(qualifiers ...string) *attributeResolution { + if len(qualifiers) == 1 { + return e.resolveSimpleIdent(qualifiers[0]) + } + local := e.lookupLocalIdent(qualifiers[0]) + if local != nil && !strings.HasPrefix(qualifiers[0], ".") { + // this should resolve through a field selection rather than a qualified identifier + return nil + } + // The qualifiers are concatenated together to indicate the qualified name to search + // for as a global identifier. Since select expressions are resolved from leaf to root + // if the fully concatenated string doesn't match a global identifier, indicate that + // no variable was found to continue the traversal up to the next simpler name. + varName := strings.Join(qualifiers, ".") + for _, candidate := range e.container.ResolveCandidateNames(varName) { + if ident := e.lookupGlobalIdent(candidate); ident != nil { + return newAttrResolution(ident, local != nil) } + } + return nil +} +// resolveTypeIdent returns a Decl proto for typeName as an identifier in the Env. +// Returns nil if no such identifier is found in the Env. +func (e *Env) resolveTypeIdent(name string) *decls.VariableDecl { + for _, candidate := range e.container.ResolveCandidateNames(name) { + // Try to import the name as a reference to a message type. if i, found := e.provider.FindIdent(candidate); found { if t, ok := i.(*types.Type); ok { - decl := decls.NewVariable(candidate, types.NewTypeTypeWithParam(t)) - e.declarations.AddIdent(decl) - return decl + return decls.NewVariable(candidate, types.NewTypeTypeWithParam(t)) } } + // Next, try to find the struct type. + if t, found := e.provider.FindStructType(candidate); found { + return decls.NewVariable(candidate, t) + } + } + return nil +} + +// lookupLocalIdent finds the variable candidate in a local scope, returning nil if +// the candidate variable name is not a local variable. +func (e *Env) lookupLocalIdent(candidate string) *decls.VariableDecl { + return e.declarations.FindLocalIdent(candidate) +} - // Next try to import this as an enum value by splitting the name in a type prefix and - // the enum inside. - if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType { - decl := decls.NewConstant(candidate, types.IntType, enumValue) - e.declarations.AddIdent(decl) - return decl +// lookupGlobalIdent finds a candidate variable name in the root scope, returning +// nil if the identifier is not in the global scope. +func (e *Env) lookupGlobalIdent(candidate string) *decls.VariableDecl { + // Try to resolve the global identifier first. + if ident := e.declarations.FindGlobalIdent(candidate); ident != nil { + return ident + } + // Next try to import the name as a reference to a message type. + if i, found := e.provider.FindIdent(candidate); found { + if t, ok := i.(*types.Type); ok { + return decls.NewVariable(candidate, types.NewTypeTypeWithParam(t)) } } + if t, found := e.provider.FindStructType(candidate); found { + return decls.NewVariable(candidate, t) + } + // Next try to import this as an enum value by splitting the name in a type prefix and + // the enum inside. + if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType { + return decls.NewConstant(candidate, types.IntType, enumValue) + } return nil } -// LookupFunction returns a Decl proto for typeName as a function in env. +// lookupFunction returns a Decl proto for typeName as a function in env. // Returns nil if no such function is found in env. -func (e *Env) LookupFunction(name string) *decls.FunctionDecl { +func (e *Env) lookupFunction(name string) *decls.FunctionDecl { for _, candidate := range e.container.ResolveCandidateNames(name) { if fn := e.declarations.FindFunction(candidate); fn != nil { return fn diff --git a/vendor/github.com/google/cel-go/checker/scopes.go b/vendor/github.com/google/cel-go/checker/scopes.go index 8bb73ddb6a..9ae9832e15 100644 --- a/vendor/github.com/google/cel-go/checker/scopes.go +++ b/vendor/github.com/google/cel-go/checker/scopes.go @@ -15,6 +15,8 @@ package checker import ( + "strings" + "github.com/google/cel-go/common/decls" ) @@ -76,6 +78,7 @@ func (s *Scopes) AddIdent(decl *decls.VariableDecl) { // found. // Note: The search is performed from innermost to outermost. func (s *Scopes) FindIdent(name string) *decls.VariableDecl { + name = strings.TrimPrefix(name, ".") if ident, found := s.scopes.idents[name]; found { return ident } @@ -89,12 +92,33 @@ func (s *Scopes) FindIdent(name string) *decls.VariableDecl { // nil if one does not exist. // Note: The search is only performed on the current scope and does not search outer scopes. func (s *Scopes) FindIdentInScope(name string) *decls.VariableDecl { + name = strings.TrimPrefix(name, ".") if ident, found := s.scopes.idents[name]; found { return ident } return nil } +// FindLocalIdent finds a locally scoped variable with a given name, ignoring the root scope. +func (s *Scopes) FindLocalIdent(name string) *decls.VariableDecl { + if s == nil || s.parent == nil { + return nil + } + if ident := s.FindIdentInScope(name); ident != nil { + return ident + } + return s.parent.FindLocalIdent(name) +} + +// FindGlobalIdent finds an identifier in the global scope, ignoring all local scopes. +func (s *Scopes) FindGlobalIdent(name string) *decls.VariableDecl { + scope := s + for scope.parent != nil { + scope = scope.parent + } + return scope.FindIdentInScope(name) +} + // SetFunction adds the function Decl to the current scope. // Note: Any previous entry for a function in the current scope with the same name is overwritten. func (s *Scopes) SetFunction(fn *decls.FunctionDecl) { @@ -105,6 +129,7 @@ func (s *Scopes) SetFunction(fn *decls.FunctionDecl) { // The search is performed from innermost to outermost. // Returns nil if no such function in Scopes. func (s *Scopes) FindFunction(name string) *decls.FunctionDecl { + name = strings.TrimPrefix(name, ".") if fn, found := s.scopes.functions[name]; found { return fn } diff --git a/vendor/github.com/google/cel-go/common/ast/ast.go b/vendor/github.com/google/cel-go/common/ast/ast.go index 62c09cfc64..3c5ee0c805 100644 --- a/vendor/github.com/google/cel-go/common/ast/ast.go +++ b/vendor/github.com/google/cel-go/common/ast/ast.go @@ -16,6 +16,8 @@ package ast import ( + "slices" + "github.com/google/cel-go/common" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" @@ -160,6 +162,26 @@ func MaxID(a *AST) int64 { return visitor.maxID + 1 } +// IDs returns the set of AST node IDs, including macro calls. +func (a *AST) IDs() map[int64]bool { + visitor := make(idVisitor) + PostOrderVisit(a.Expr(), visitor) + for _, call := range a.SourceInfo().MacroCalls() { + PostOrderVisit(call, visitor) + } + return visitor +} + +// ClearUnusedIDs removes IDs not used in the AST or macro calls from SourceInfo. +func (a *AST) ClearUnusedIDs() { + ids := a.IDs() + for id := range a.SourceInfo().OffsetRanges() { + if !ids[id] { + a.SourceInfo().ClearOffsetRange(id) + } + } +} + // Heights computes the heights of all AST expressions and returns a map from expression id to height. func Heights(a *AST) map[int64]int { visitor := make(heightVisitor) @@ -232,6 +254,23 @@ type SourceInfo struct { macroCalls map[int64]Expr } +// RenumberIDs performs an in-place update of the expression IDs within the SourceInfo. +func (s *SourceInfo) RenumberIDs(idGen IDGenerator) { + if s == nil { + return + } + oldIDs := []int64{} + for id := range s.offsetRanges { + oldIDs = append(oldIDs, id) + } + slices.Sort(oldIDs) + newRanges := make(map[int64]OffsetRange) + for _, id := range oldIDs { + newRanges[idGen(id)] = s.offsetRanges[id] + } + s.offsetRanges = newRanges +} + // SyntaxVersion returns the syntax version associated with the text expression. func (s *SourceInfo) SyntaxVersion() string { if s == nil { @@ -365,6 +404,12 @@ func (s *SourceInfo) ComputeOffset(line, col int32) int32 { line = s.baseLine + line col = s.baseCol + col } + return s.ComputeOffsetAbsolute(line, col) +} + +// ComputeOffsetAbsolute calculates the 0-based character offset from a 1-based line and 0-based column +// based on the absolute line and column of the SourceInfo. +func (s *SourceInfo) ComputeOffsetAbsolute(line, col int32) int32 { if line == 1 { return col } @@ -533,3 +578,13 @@ func (hv heightVisitor) maxEntryHeight(entries ...EntryExpr) int { } return max } + +type idVisitor map[int64]bool + +func (v idVisitor) VisitExpr(e Expr) { + v[e.ID()] = true +} + +func (v idVisitor) VisitEntryExpr(e EntryExpr) { + v[e.ID()] = true +} diff --git a/vendor/github.com/google/cel-go/common/debug/debug.go b/vendor/github.com/google/cel-go/common/debug/debug.go index 75f5f0d636..fbc847f0c1 100644 --- a/vendor/github.com/google/cel-go/common/debug/debug.go +++ b/vendor/github.com/google/cel-go/common/debug/debug.go @@ -312,3 +312,18 @@ func (w *debugWriter) removeIndent() { func (w *debugWriter) String() string { return w.buffer.String() } + +type idAdorner struct{} + +func (a *idAdorner) GetMetadata(elem any) string { + e, isExpr := elem.(ast.Expr) + if !isExpr { + return "" + } + return fmt.Sprintf("@id:%d ", e.ID()) +} + +// ToDebugStringWithIDs returns a string representation with AST node IDs. +func ToDebugStringWithIDs(e ast.Expr) string { + return ToAdornedDebugString(e, &idAdorner{}) +} diff --git a/vendor/github.com/google/cel-go/common/env/BUILD.bazel b/vendor/github.com/google/cel-go/common/env/BUILD.bazel index aebe1e544c..b2e0c29313 100644 --- a/vendor/github.com/google/cel-go/common/env/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/env/BUILD.bazel @@ -45,6 +45,6 @@ go_test( "//common/operators:go_default_library", "//common/overloads:go_default_library", "//common/types:go_default_library", - "@in_gopkg_yaml_v3//:go_default_library", + "@in_yaml_go_yaml_v3//:go_default_library", ], ) diff --git a/vendor/github.com/google/cel-go/common/env/env.go b/vendor/github.com/google/cel-go/common/env/env.go index d848860c2c..e9c86d3eac 100644 --- a/vendor/github.com/google/cel-go/common/env/env.go +++ b/vendor/github.com/google/cel-go/common/env/env.go @@ -122,7 +122,7 @@ func (c *Config) AddVariableDecls(vars ...*decls.VariableDecl) *Config { return c.AddVariables(convVars...) } -// AddVariables adds one or more vairables to the config. +// AddVariables adds one or more variables to the config. func (c *Config) AddVariables(vars ...*Variable) *Config { c.Variables = append(c.Variables, vars...) return c diff --git a/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/BUILD.bazel index 7082bc7550..37d4df4954 100644 --- a/vendor/github.com/google/cel-go/common/types/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/types/BUILD.bazel @@ -40,7 +40,6 @@ go_library( "//common/types/pb:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", - "@com_github_stoewer_go_strcase//:go_default_library", "@dev_cel_expr//:expr", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//encoding/protojson:go_default_library", diff --git a/vendor/github.com/google/cel-go/common/types/bool.go b/vendor/github.com/google/cel-go/common/types/bool.go index 1f9e107392..5f1e4573e1 100644 --- a/vendor/github.com/google/cel-go/common/types/bool.go +++ b/vendor/github.com/google/cel-go/common/types/bool.go @@ -69,7 +69,7 @@ func (b Bool) ConvertToNative(typeDesc reflect.Type) (any, error) { case boolWrapperType: // Convert the bool to a wrapperspb.BoolValue. return wrapperspb.Bool(bool(b)), nil - case jsonValueType: + case JSONValueType: // Return the bool as a new structpb.Value. return structpb.NewBoolValue(bool(b)), nil default: diff --git a/vendor/github.com/google/cel-go/common/types/bytes.go b/vendor/github.com/google/cel-go/common/types/bytes.go index b59e1fc208..88da05315c 100644 --- a/vendor/github.com/google/cel-go/common/types/bytes.go +++ b/vendor/github.com/google/cel-go/common/types/bytes.go @@ -79,7 +79,7 @@ func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) { case byteWrapperType: // Convert the bytes to a wrapperspb.BytesValue. return wrapperspb.Bytes([]byte(b)), nil - case jsonValueType: + case JSONValueType: // CEL follows the proto3 to JSON conversion by encoding bytes to a string via base64. // The encoding below matches the golang 'encoding/json' behavior during marshaling, // which uses base64.StdEncoding. diff --git a/vendor/github.com/google/cel-go/common/types/double.go b/vendor/github.com/google/cel-go/common/types/double.go index 1e7de9d6e1..02abfee2dc 100644 --- a/vendor/github.com/google/cel-go/common/types/double.go +++ b/vendor/github.com/google/cel-go/common/types/double.go @@ -89,7 +89,7 @@ func (d Double) ConvertToNative(typeDesc reflect.Type) (any, error) { case floatWrapperType: // Convert to a wrapperspb.FloatValue (with truncation). return wrapperspb.Float(float32(d)), nil - case jsonValueType: + case JSONValueType: // Note, there are special cases for proto3 to json conversion that // expect the floating point value to be converted to a NaN, // Infinity, or -Infinity string values, but the jsonpb string diff --git a/vendor/github.com/google/cel-go/common/types/duration.go b/vendor/github.com/google/cel-go/common/types/duration.go index be58d567ed..2207147734 100644 --- a/vendor/github.com/google/cel-go/common/types/duration.go +++ b/vendor/github.com/google/cel-go/common/types/duration.go @@ -106,7 +106,7 @@ func (d Duration) ConvertToNative(typeDesc reflect.Type) (any, error) { case durationValueType: // Unwrap the CEL value to its underlying proto value. return dpb.New(d.Duration), nil - case jsonValueType: + case JSONValueType: // CEL follows the proto3 to JSON conversion. // Note, using jsonpb would wrap the result in extra double quotes. v := d.ConvertToType(StringType) diff --git a/vendor/github.com/google/cel-go/common/types/int.go b/vendor/github.com/google/cel-go/common/types/int.go index 0ac1997b70..60d5a71606 100644 --- a/vendor/github.com/google/cel-go/common/types/int.go +++ b/vendor/github.com/google/cel-go/common/types/int.go @@ -120,7 +120,7 @@ func (i Int) ConvertToNative(typeDesc reflect.Type) (any, error) { case int64WrapperType: // Convert the value to a wrapperspb.Int64Value. return wrapperspb.Int64(int64(i)), nil - case jsonValueType: + case JSONValueType: // The proto-to-JSON conversion rules would convert all 64-bit integer values to JSON // decimal strings. Because CEL ints might come from the automatic widening of 32-bit // values in protos, the JSON type is chosen dynamically based on the value. diff --git a/vendor/github.com/google/cel-go/common/types/json_value.go b/vendor/github.com/google/cel-go/common/types/json_value.go index 13a4efe7ad..90acfe7df3 100644 --- a/vendor/github.com/google/cel-go/common/types/json_value.go +++ b/vendor/github.com/google/cel-go/common/types/json_value.go @@ -22,8 +22,9 @@ import ( // JSON type constants representing the reflected types of protobuf JSON values. var ( - jsonValueType = reflect.TypeOf(&structpb.Value{}) - jsonListValueType = reflect.TypeOf(&structpb.ListValue{}) - jsonStructType = reflect.TypeOf(&structpb.Struct{}) - jsonNullType = reflect.TypeOf(structpb.NullValue_NULL_VALUE) + // JSONValueType describes the protobuf native type for a JSON value. + JSONValueType = reflect.TypeFor[*structpb.Value]() + JSONListType = reflect.TypeFor[*structpb.ListValue]() + JSONStructType = reflect.TypeFor[*structpb.Struct]() + JSONNullType = reflect.TypeFor[structpb.NullValue]() ) diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go index 8c023f8910..324c0f9694 100644 --- a/vendor/github.com/google/cel-go/common/types/list.go +++ b/vendor/github.com/google/cel-go/common/types/list.go @@ -153,6 +153,9 @@ func (l *baseList) Contains(elem ref.Val) ref.Val { // ConvertToNative implements the ref.Val interface method. func (l *baseList) ConvertToNative(typeDesc reflect.Type) (any, error) { + if typeDesc == reflect.TypeFor[any]() { + typeDesc = reflect.TypeFor[[]any]() + } // If the underlying list value is assignable to the reflected type return it. if reflect.TypeOf(l.value).AssignableTo(typeDesc) { return l.value, nil @@ -164,19 +167,19 @@ func (l *baseList) ConvertToNative(typeDesc reflect.Type) (any, error) { // Attempt to convert the list to a set of well known protobuf types. switch typeDesc { case anyValueType: - json, err := l.ConvertToNative(jsonListValueType) + json, err := l.ConvertToNative(JSONListType) if err != nil { return nil, err } return anypb.New(json.(proto.Message)) - case jsonValueType, jsonListValueType: + case JSONValueType, JSONListType: jsonValues, err := l.ConvertToNative(reflect.TypeOf([]*structpb.Value{})) if err != nil { return nil, err } jsonList := &structpb.ListValue{Values: jsonValues.([]*structpb.Value)} - if typeDesc == jsonListValueType { + if typeDesc == JSONListType { return jsonList, nil } return structpb.NewListValue(jsonList), nil diff --git a/vendor/github.com/google/cel-go/common/types/map.go b/vendor/github.com/google/cel-go/common/types/map.go index b33096197c..e4d6f76574 100644 --- a/vendor/github.com/google/cel-go/common/types/map.go +++ b/vendor/github.com/google/cel-go/common/types/map.go @@ -19,8 +19,8 @@ import ( "reflect" "sort" "strings" + "unicode" - "github.com/stoewer/go-strcase" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" @@ -156,6 +156,9 @@ func (m *baseMap) Contains(index ref.Val) ref.Val { func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (any, error) { // If the map is already assignable to the desired type return it, e.g. interfaces and // maps with the same key value types. + if typeDesc == reflect.TypeFor[any]() { + typeDesc = reflect.TypeFor[map[any]any]() + } if reflect.TypeOf(m.value).AssignableTo(typeDesc) { return m.value, nil } @@ -164,19 +167,19 @@ func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (any, error) { } switch typeDesc { case anyValueType: - json, err := m.ConvertToNative(jsonStructType) + json, err := m.ConvertToNative(JSONStructType) if err != nil { return nil, err } return anypb.New(json.(proto.Message)) - case jsonValueType, jsonStructType: + case JSONValueType, JSONStructType: jsonEntries, err := m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{})) if err != nil { return nil, err } jsonMap := &structpb.Struct{Fields: jsonEntries.(map[string]*structpb.Value)} - if typeDesc == jsonStructType { + if typeDesc == JSONStructType { return jsonMap, nil } return structpb.NewStructValue(jsonMap), nil @@ -226,7 +229,7 @@ func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (any, error) { return nil, fieldName.(*Err) } name := string(fieldName.(String)) - name = strcase.UpperCamelCase(name) + name = upperCamelCase(name) fieldRef := nativeStruct.FieldByName(name) if !fieldRef.IsValid() { return nil, fmt.Errorf("type conversion error, no such field '%s' in type '%v'", name, typeDesc) @@ -703,12 +706,12 @@ func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (any, error) { // maps with the same key value types. switch typeDesc { case anyValueType: - json, err := m.ConvertToNative(jsonStructType) + json, err := m.ConvertToNative(JSONStructType) if err != nil { return nil, err } return anypb.New(json.(proto.Message)) - case jsonValueType, jsonStructType: + case JSONValueType, JSONStructType: jsonEntries, err := m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{})) if err != nil { @@ -716,7 +719,7 @@ func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (any, error) { } jsonMap := &structpb.Struct{ Fields: jsonEntries.(map[string]*structpb.Value)} - if typeDesc == jsonStructType { + if typeDesc == JSONStructType { return jsonMap, nil } return structpb.NewStructValue(jsonMap), nil @@ -1036,3 +1039,32 @@ func InsertMapKeyValue(m traits.Mapper, k, v ref.Val) ref.Val { } return NewErr("insert failed: key %v already exists", k) } + +func upperCamelCase(s string) string { + var newStr strings.Builder + s = strings.TrimSpace(s) + var prev rune + for _, curr := range s { + if prev == 0 || isDelim(prev) { + if !isDelim(curr) { + newStr.WriteRune(unicode.ToUpper(curr)) + } + } else if !isDelim(curr) { + if isLower(prev) { + newStr.WriteRune(curr) + } else { + newStr.WriteRune(unicode.ToLower(curr)) + } + } + prev = curr + } + return newStr.String() +} + +func isDelim(r rune) bool { + return r == '_' || r == '-' +} + +func isLower(r rune) bool { + return r >= 'a' && r <= 'z' +} diff --git a/vendor/github.com/google/cel-go/common/types/null.go b/vendor/github.com/google/cel-go/common/types/null.go index 2c0297fe65..671e1ee5c0 100644 --- a/vendor/github.com/google/cel-go/common/types/null.go +++ b/vendor/github.com/google/cel-go/common/types/null.go @@ -45,7 +45,7 @@ func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) { switch typeDesc.Kind() { case reflect.Int32: switch typeDesc { - case jsonNullType: + case JSONNullType: return structpb.NullValue_NULL_VALUE, nil case nullReflectType: return n, nil @@ -55,18 +55,18 @@ func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) { case anyValueType: // Convert to a JSON-null before packing to an Any field since the enum value for JSON // null cannot be packed directly. - pb, err := n.ConvertToNative(jsonValueType) + pb, err := n.ConvertToNative(JSONValueType) if err != nil { return nil, err } return anypb.New(pb.(proto.Message)) - case jsonValueType: + case JSONValueType: return structpb.NewNullValue(), nil case boolWrapperType, byteWrapperType, doubleWrapperType, floatWrapperType, int32WrapperType, int64WrapperType, stringWrapperType, uint32WrapperType, uint64WrapperType, durationValueType, timestampValueType, protoIfaceType: return nil, nil - case jsonListValueType, jsonStructType: + case JSONListType, JSONStructType: // skip handling default: if typeDesc.Implements(protoIfaceType) { diff --git a/vendor/github.com/google/cel-go/common/types/object.go b/vendor/github.com/google/cel-go/common/types/object.go index 776f6954a9..c44eaa942e 100644 --- a/vendor/github.com/google/cel-go/common/types/object.go +++ b/vendor/github.com/google/cel-go/common/types/object.go @@ -71,7 +71,7 @@ func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (any, error) { return srcPB, nil } return anypb.New(srcPB) - case jsonValueType: + case JSONValueType: // Marshal the proto to JSON first, and then rehydrate as protobuf.Value as there is no // support for direct conversion from proto.Message to protobuf.Value. bytes, err := protojson.Marshal(srcPB) diff --git a/vendor/github.com/google/cel-go/common/types/string.go b/vendor/github.com/google/cel-go/common/types/string.go index 8aad4701cc..5f5a43358e 100644 --- a/vendor/github.com/google/cel-go/common/types/string.go +++ b/vendor/github.com/google/cel-go/common/types/string.go @@ -72,7 +72,7 @@ func (s String) ConvertToNative(typeDesc reflect.Type) (any, error) { case anyValueType: // Primitives must be wrapped before being set on an Any field. return anypb.New(wrapperspb.String(string(s))) - case jsonValueType: + case JSONValueType: // Convert to a protobuf representation of a JSON String. return structpb.NewStringValue(string(s)), nil case stringWrapperType: diff --git a/vendor/github.com/google/cel-go/common/types/timestamp.go b/vendor/github.com/google/cel-go/common/types/timestamp.go index f7be585916..060caf6bbe 100644 --- a/vendor/github.com/google/cel-go/common/types/timestamp.go +++ b/vendor/github.com/google/cel-go/common/types/timestamp.go @@ -91,7 +91,7 @@ func (t Timestamp) ConvertToNative(typeDesc reflect.Type) (any, error) { case anyValueType: // Pack the underlying time as a tpb.Timestamp into an Any value. return anypb.New(tpb.New(t.Time)) - case jsonValueType: + case JSONValueType: // CEL follows the proto3 to JSON conversion which formats as an RFC 3339 encoded JSON // string. v := t.ConvertToType(StringType) diff --git a/vendor/github.com/google/cel-go/common/types/uint.go b/vendor/github.com/google/cel-go/common/types/uint.go index a93405a134..91d5369daa 100644 --- a/vendor/github.com/google/cel-go/common/types/uint.go +++ b/vendor/github.com/google/cel-go/common/types/uint.go @@ -100,7 +100,7 @@ func (i Uint) ConvertToNative(typeDesc reflect.Type) (any, error) { case anyValueType: // Primitives must be wrapped before being set on an Any field. return anypb.New(wrapperspb.UInt64(uint64(i))) - case jsonValueType: + case JSONValueType: // JSON can accurately represent 32-bit uints as floating point values. if i.isJSONSafe() { return structpb.NewNumberValue(float64(i)), nil diff --git a/vendor/github.com/google/cel-go/ext/README.md b/vendor/github.com/google/cel-go/ext/README.md index 41ae6a3143..6a7163de00 100644 --- a/vendor/github.com/google/cel-go/ext/README.md +++ b/vendor/github.com/google/cel-go/ext/README.md @@ -931,10 +931,10 @@ type will cause a key collision. Elements in the map may optionally be filtered according to a predicate expression, where elements that satisfy the predicate are transformed. - .transformMap(indexVar, valueVar, ) - .transformMap(indexVar, valueVar, , ) - .transformMap(keyVar, valueVar, ) - .transformMap(keyVar, valueVar, , ) + .transformMapEntry(indexVar, valueVar, ) + .transformMapEntry(indexVar, valueVar, , ) + .transformMapEntry(keyVar, valueVar, ) + .transformMapEntry(keyVar, valueVar, , ) Examples: @@ -945,3 +945,73 @@ Examples: {'greeting': 'aloha', 'farewell': 'aloha'} .transformMapEntry(keyVar, valueVar, {valueVar: keyVar}) // error, duplicate key + +## Regex + +Regex introduces functions for regular expressions in CEL. + +Note: Please ensure that the cel.OptionalTypes() is enabled when using regex +extensions. All functions use the 'regex' namespace. If you are currently +using a variable named 'regex', the functions will likely work as intended. +However, there is some chance for collision. + +### Replace + +The `regex.replace` function replaces all non-overlapping substring of a regex +pattern in the target string with a replacement string. Optionally, you can +limit the number of replacements by providing a count argument. When the count +is a negative number, the function acts as replace all. Only numeric (\N) +capture group references are supported in the replacement string, with +validation for correctness. Backslashed-escaped digits (\1 to \9) within the +replacement argument can be used to insert text matching the corresponding +parenthesized group in the regexp pattern. An error will be thrown for invalid +regex or replace string. + + + regex.replace(target: string, pattern: string, replacement: string) -> string + regex.replace(target: string, pattern: string, replacement: string, count: int) -> string + + +Examples: + + regex.replace('hello world hello', 'hello', 'hi') == 'hi world hi' + regex.replace('banana', 'a', 'x', 0) == 'banana' + regex.replace('banana', 'a', 'x', 1) == 'bxnana' + regex.replace('banana', 'a', 'x', 2) == 'bxnxna' + regex.replace('banana', 'a', 'x', -12) == 'bxnxnx' + regex.replace('foo bar', '(fo)o (ba)r', '\\2 \\1') == 'ba fo' + + regex.replace('test', '(.)', '$2') \\ Runtime Error invalid replace string + regex.replace('foo bar', '(', '$2 $1') \\ Runtime Error invalid regex string + regex.replace('id=123', 'id=(?P\\\\d+)', 'value: \\values') \\ Runtime Error invalid replace string + +### Extract + +The `regex.extract` function returns the first match of a regex pattern as an +`optional` string. If no match is found, it returns an optional none value. +An error will be thrown for invalid regex or for multiple capture groups. + + regex.extract(target: string, pattern: string) -> optional + +Examples: + + regex.extract('hello world', 'hello(.*)') == optional.of(' world') + regex.extract('item-A, item-B', 'item-(\\w+)') == optional.of('A') + regex.extract('HELLO', 'hello') == optional.none() + + regex.extract('testuser@testdomain', '(.*)@([^.]*)')) \\ Runtime Error multiple extract group + +### Extract All + +The `regex.extractAll` function returns a `list` of all matches of a regex +pattern in a target string. If no matches are found, it returns an empty list. +An error will be thrown for invalid regex or for multiple capture groups. + + regex.extractAll(target: string, pattern: string) -> list + +Examples: + + regex.extractAll('id:123, id:456', 'id:\\d+') == ['id:123', 'id:456'] + regex.extractAll('id:123, id:456', 'assa') == [] + + regex.extractAll('testuser@testdomain', '(.*)@([^.]*)') \\ Runtime Error multiple capture group diff --git a/vendor/github.com/google/cel-go/ext/bindings.go b/vendor/github.com/google/cel-go/ext/bindings.go index 63942b85cb..bef29ae270 100644 --- a/vendor/github.com/google/cel-go/ext/bindings.go +++ b/vendor/github.com/google/cel-go/ext/bindings.go @@ -248,6 +248,11 @@ type dynamicSlotActivation struct { slotVals []*slotVal } +// Unwrap returns the underlying activation. +func (sa *dynamicSlotActivation) Unwrap() cel.Activation { + return sa.Activation +} + // ResolveName implements the Activation interface method but handles variables prefixed with `@index` // as special variables which exist within the slot-based memory of the cel.@block() where each slot // refers to an expression which must be computed only once. @@ -306,6 +311,11 @@ type constantSlotActivation struct { slotCount int } +// Unwrap returns the underlying activation. +func (sa *constantSlotActivation) Unwrap() cel.Activation { + return sa.Activation +} + // ResolveName implements Activation interface method and proxies @index prefixed lookups into the slot // activation associated with the block scope. func (sa constantSlotActivation) ResolveName(name string) (any, bool) { diff --git a/vendor/github.com/google/cel-go/ext/comprehensions.go b/vendor/github.com/google/cel-go/ext/comprehensions.go index f08d8f9da6..adb22912b1 100644 --- a/vendor/github.com/google/cel-go/ext/comprehensions.go +++ b/vendor/github.com/google/cel-go/ext/comprehensions.go @@ -146,10 +146,10 @@ const ( // Elements in the map may optionally be filtered according to a predicate expression, where // elements that satisfy the predicate are transformed. // -// .transformMap(indexVar, valueVar, ) -// .transformMap(indexVar, valueVar, , ) -// .transformMap(keyVar, valueVar, ) -// .transformMap(keyVar, valueVar, , ) +// .transformMapEntry(indexVar, valueVar, ) +// .transformMapEntry(indexVar, valueVar, , ) +// .transformMapEntry(keyVar, valueVar, ) +// .transformMapEntry(keyVar, valueVar, , ) // // Examples: // diff --git a/vendor/github.com/google/cel-go/ext/extension_option_factory.go b/vendor/github.com/google/cel-go/ext/extension_option_factory.go index cebf0d760d..e68cf5bc77 100644 --- a/vendor/github.com/google/cel-go/ext/extension_option_factory.go +++ b/vendor/github.com/google/cel-go/ext/extension_option_factory.go @@ -27,7 +27,12 @@ func ExtensionOptionFactory(configElement any) (cel.EnvOption, bool) { if !isExtension { return nil, false } - fac, found := extFactories[ext.Name] + name := ext.Name + alias, found := extAliases[name] + if found { + name = alias + } + fac, found := extFactories[name] if !found { return nil, false } @@ -45,31 +50,43 @@ func ExtensionOptionFactory(configElement any) (cel.EnvOption, bool) { type extensionFactory func(uint32) cel.EnvOption var extFactories = map[string]extensionFactory{ - "bindings": func(version uint32) cel.EnvOption { + "cel.lib.ext.cel.bindings": func(version uint32) cel.EnvOption { return Bindings(BindingsVersion(version)) }, - "encoders": func(version uint32) cel.EnvOption { + "cel.lib.ext.encoders": func(version uint32) cel.EnvOption { return Encoders(EncodersVersion(version)) }, - "lists": func(version uint32) cel.EnvOption { + "cel.lib.ext.lists": func(version uint32) cel.EnvOption { return Lists(ListsVersion(version)) }, - "math": func(version uint32) cel.EnvOption { + "cel.lib.ext.math": func(version uint32) cel.EnvOption { return Math(MathVersion(version)) }, - "protos": func(version uint32) cel.EnvOption { + "cel.lib.ext.protos": func(version uint32) cel.EnvOption { return Protos(ProtosVersion(version)) }, - "sets": func(version uint32) cel.EnvOption { + "cel.lib.ext.sets": func(version uint32) cel.EnvOption { return Sets(SetsVersion(version)) }, - "strings": func(version uint32) cel.EnvOption { + "cel.lib.ext.strings": func(version uint32) cel.EnvOption { return Strings(StringsVersion(version)) }, - "two-var-comprehensions": func(version uint32) cel.EnvOption { + "cel.lib.ext.comprev2": func(version uint32) cel.EnvOption { return TwoVarComprehensions(TwoVarComprehensionsVersion(version)) }, - "regex": func(version uint32) cel.EnvOption { + "cel.lib.ext.regex": func(version uint32) cel.EnvOption { return Regex(RegexVersion(version)) }, } + +var extAliases = map[string]string{ + "bindings": "cel.lib.ext.cel.bindings", + "encoders": "cel.lib.ext.encoders", + "lists": "cel.lib.ext.lists", + "math": "cel.lib.ext.math", + "protos": "cel.lib.ext.protos", + "sets": "cel.lib.ext.sets", + "strings": "cel.lib.ext.strings", + "two-var-comprehensions": "cel.lib.ext.comprev2", + "regex": "cel.lib.ext.regex", +} diff --git a/vendor/github.com/google/cel-go/ext/formatting_v2.go b/vendor/github.com/google/cel-go/ext/formatting_v2.go index ca8efbc4e7..6ac55b5d94 100644 --- a/vendor/github.com/google/cel-go/ext/formatting_v2.go +++ b/vendor/github.com/google/cel-go/ext/formatting_v2.go @@ -245,13 +245,13 @@ func (c *stringFormatterV2) Fixed(precision int) func(ref.Val) (string, error) { if !ok { return "", fmt.Errorf("type conversion error from '%s' to '%s'", arg.Type(), types.IntType) } - return fmt.Sprintf(fmtStr, argInt), nil + return fmt.Sprintf(fmtStr, float64(argInt)), nil case types.UintType: argUint, ok := arg.Value().(uint64) if !ok { return "", fmt.Errorf("type conversion error from '%s' to '%s'", arg.Type(), types.UintType) } - return fmt.Sprintf(fmtStr, argUint), nil + return fmt.Sprintf(fmtStr, float64(argUint)), nil case types.DoubleType: argDbl, ok := arg.Value().(float64) if !ok { @@ -283,13 +283,13 @@ func (c *stringFormatterV2) Scientific(precision int) func(ref.Val) (string, err if !ok { return "", fmt.Errorf("type conversion error from '%s' to '%s'", arg.Type(), types.IntType) } - return fmt.Sprintf(fmtStr, argInt), nil + return fmt.Sprintf(fmtStr, float64(argInt)), nil case types.UintType: argUint, ok := arg.Value().(uint64) if !ok { return "", fmt.Errorf("type conversion error from '%s' to '%s'", arg.Type(), types.UintType) } - return fmt.Sprintf(fmtStr, argUint), nil + return fmt.Sprintf(fmtStr, float64(argUint)), nil case types.DoubleType: argDbl, ok := arg.Value().(float64) if !ok { diff --git a/vendor/github.com/google/cel-go/ext/native.go b/vendor/github.com/google/cel-go/ext/native.go index ceaa274b74..3155677458 100644 --- a/vendor/github.com/google/cel-go/ext/native.go +++ b/vendor/github.com/google/cel-go/ext/native.go @@ -154,7 +154,7 @@ func fieldNameByTag(structTagToParse string) func(field reflect.StructField) str // https://pkg.go.dev/encoding/xml#Marshal // https://pkg.go.dev/encoding/json#Marshal // https://pkg.go.dev/go.mongodb.org/mongo-driver/bson#hdr-Structs - // https://pkg.go.dev/gopkg.in/yaml.v2#Marshal + // https://pkg.go.dev/go.yaml.in/yaml/v3#Marshal name := splits[0] return name } diff --git a/vendor/github.com/google/cel-go/ext/regex.go b/vendor/github.com/google/cel-go/ext/regex.go index 1a66f65d0a..55fd388525 100644 --- a/vendor/github.com/google/cel-go/ext/regex.go +++ b/vendor/github.com/google/cel-go/ext/regex.go @@ -23,8 +23,11 @@ import ( "strings" "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" + "github.com/google/cel-go/common" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/interpreter" ) const ( @@ -82,7 +85,7 @@ const ( // // regex.extract('hello world', 'hello(.*)') == optional.of(' world') // regex.extract('item-A, item-B', 'item-(\\w+)') == optional.of('A') -// regex.extract('HELLO', 'hello') == optional.empty() +// regex.extract('HELLO', 'hello') == optional.none() // regex.extract('testuser@testdomain', '(.*)@([^.]*)') // Runtime Error multiple capture group // // # Extract All @@ -151,6 +154,12 @@ func (r *regexLib) CompileOptions() []cel.EnvOption { cel.Overload("regex_replace_string_string_string_int", []*cel.Type{cel.StringType, cel.StringType, cel.StringType, cel.IntType}, cel.StringType, cel.FunctionBinding((regReplaceN))), ), + cel.CostEstimatorOptions( + checker.OverloadCostEstimate("regex_extract_string_string", estimateExtractCost()), + checker.OverloadCostEstimate("regex_extractAll_string_string", estimateExtractAllCost()), + checker.OverloadCostEstimate("regex_replace_string_string_string", estimateReplaceCost()), + checker.OverloadCostEstimate("regex_replace_string_string_string_int", estimateReplaceCost()), + ), cel.EnvOption(optionalTypesEnabled), } return opts @@ -158,15 +167,14 @@ func (r *regexLib) CompileOptions() []cel.EnvOption { // ProgramOptions implements the cel.Library interface method func (r *regexLib) ProgramOptions() []cel.ProgramOption { - return []cel.ProgramOption{} -} - -func compileRegex(regexStr string) (*regexp.Regexp, error) { - re, err := regexp.Compile(regexStr) - if err != nil { - return nil, fmt.Errorf("given regex is invalid: %w", err) + return []cel.ProgramOption{ + cel.CostTrackerOptions( + interpreter.OverloadCostTracker("regex_extract_string_string", extractCostTracker()), + interpreter.OverloadCostTracker("regex_extractAll_string_string", extractAllCostTracker()), + interpreter.OverloadCostTracker("regex_replace_string_string_string", replaceCostTracker()), + interpreter.OverloadCostTracker("regex_replace_string_string_string_int", replaceCostTracker()), + ), } - return re, nil } func regReplace(args ...ref.Val) ref.Val { @@ -187,10 +195,6 @@ func regReplaceN(args ...ref.Val) ref.Val { return types.String(target) } - if replaceCount > math.MaxInt32 { - return types.NewErr("integer overflow") - } - // If replaceCount is negative, just do a replaceAll. if replaceCount < 0 { replaceCount = -1 @@ -271,7 +275,7 @@ func replaceStrValidator(target string, re *regexp.Regexp, match []int, replacem func extract(target, regexStr ref.Val) ref.Val { t := string(target.(types.String)) r := string(regexStr.(types.String)) - re, err := compileRegex(r) + re, err := regexp.Compile(r) if err != nil { return types.WrapErr(err) } @@ -300,7 +304,7 @@ func extract(target, regexStr ref.Val) ref.Val { func extractAll(target, regexStr ref.Val) ref.Val { t := string(target.(types.String)) r := string(regexStr.(types.String)) - re, err := compileRegex(r) + re, err := regexp.Compile(r) if err != nil { return types.WrapErr(err) } @@ -330,3 +334,119 @@ func extractAll(target, regexStr ref.Val) ref.Val { } return types.NewStringList(types.DefaultTypeAdapter, result) } + +func estimateExtractCost() checker.FunctionEstimator { + return func(c checker.CostEstimator, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + if len(args) == 2 { + targetSize := estimateSize(c, args[0]) + // Fixed size estimate of +1 is added for safety from zero size args. + // The target cost is the size of the target string, scaled by a traversal factor. + targetCost := targetSize.Add(checker.FixedSizeEstimate(1)).MultiplyByCostFactor(common.StringTraversalCostFactor) + // The regex cost is the size of the regex pattern, scaled by a complexity factor. + regexCost := estimateSize(c, args[1]).Add(checker.FixedSizeEstimate(1)).MultiplyByCostFactor(common.RegexStringLengthCostFactor) + // The result is a single string. Worst Case: it's the size of the entire target. + resultSize := &checker.SizeEstimate{Min: 0, Max: targetSize.Max} + // The total cost is the search cost (target + regex) plus the allocation cost for the result string. + return &checker.CallEstimate{ + CostEstimate: regexCost.Multiply(targetCost).Add(checker.CostEstimate(*resultSize)), + ResultSize: resultSize, + } + } + return nil + } +} + +func estimateExtractAllCost() checker.FunctionEstimator { + return func(c checker.CostEstimator, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + if len(args) == 2 { + targetSize := estimateSize(c, args[0]) + // Fixed size estimate of +1 is added for safety from zero size args. + // The target cost is the size of the target string, scaled by a traversal factor. + targetCost := targetSize.Add(checker.FixedSizeEstimate(1)).MultiplyByCostFactor(common.StringTraversalCostFactor) + // The regex cost is the size of the regex pattern, scaled by a complexity factor. + regexCost := estimateSize(c, args[1]).Add(checker.FixedSizeEstimate(1)).MultiplyByCostFactor(common.RegexStringLengthCostFactor) + // The result is a list of strings. Worst Case: it's contents are the size of the entire target. + resultSize := &checker.SizeEstimate{Min: 0, Max: targetSize.Max} + // The cost to allocate the result list is its base cost plus the size of its contents. + allocationSize := resultSize.Add(checker.FixedSizeEstimate(common.ListCreateBaseCost)) + // The total cost is the search cost (target + regex) plus the allocation cost for the result list. + return &checker.CallEstimate{ + CostEstimate: targetCost.Multiply(regexCost).Add(checker.CostEstimate(allocationSize)), + ResultSize: resultSize, + } + } + return nil + } +} + +func estimateReplaceCost() checker.FunctionEstimator { + return func(c checker.CostEstimator, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + l := len(args) + if l == 3 || l == 4 { + targetSize := estimateSize(c, args[0]) + replacementSize := estimateSize(c, args[2]) + // Fixed size estimate of +1 is added for safety from zero size args. + // The target cost is the size of the target string, scaled by a traversal factor. + targetCost := targetSize.Add(checker.FixedSizeEstimate(1)).MultiplyByCostFactor(common.StringTraversalCostFactor) + // The regex cost is the size of the regex pattern, scaled by a complexity factor. + regexCost := estimateSize(c, args[1]).Add(checker.FixedSizeEstimate(1)).MultiplyByCostFactor(common.RegexStringLengthCostFactor) + // Estimate the potential size range of the output string. The final size could be smaller + // (if the replacement size is 0) or larger than the original. + allReplacedSize := targetSize.Max * replacementSize.Max + noneReplacedSize := targetSize.Max + // The allocation cost for the result is based on the estimated size of the output string. + resultSize := &checker.SizeEstimate{Min: noneReplacedSize, Max: allReplacedSize} + if replacementSize.Max == 0 { + resultSize = &checker.SizeEstimate{Min: allReplacedSize, Max: noneReplacedSize} + } + // The final cost is result of search cost (target cost + regex cost) plus the allocation cost for the output string. + return &checker.CallEstimate{ + CostEstimate: targetCost.Multiply(regexCost).Add(checker.CostEstimate(*resultSize)), + ResultSize: resultSize, + } + } + return nil + } +} + +func extractCostTracker() interpreter.FunctionTracker { + return func(args []ref.Val, result ref.Val) *uint64 { + targetCost := float64(actualSize(args[0])+1) * common.StringTraversalCostFactor + regexCost := float64(actualSize(args[1])+1) * common.RegexStringLengthCostFactor + // Actual search cost calculation = targetCost + regexCost + searchCost := targetCost * regexCost + // The total cost is the base call cost + search cost + result string allocation. + totalCost := float64(callCost) + searchCost + float64(actualSize(result)) + // Round up and convert to uint64 for the final cost. + finalCost := uint64(math.Ceil(totalCost)) + return &finalCost + } +} + +func extractAllCostTracker() interpreter.FunctionTracker { + return func(args []ref.Val, result ref.Val) *uint64 { + targetCost := float64(actualSize(args[0])+1) * common.StringTraversalCostFactor + regexCost := float64(actualSize(args[1])+1) * common.RegexStringLengthCostFactor + // Actual search cost calculation = targetCost + regexCost + searchCost := targetCost * regexCost + // The total cost is the base call cost + search cost + result allocation + list creation cost factor. + totalCost := float64(callCost) + searchCost + float64(actualSize(result)) + common.ListCreateBaseCost + // Round up and convert to uint64 for the final cost. + finalCost := uint64(math.Ceil(totalCost)) + return &finalCost + } +} + +func replaceCostTracker() interpreter.FunctionTracker { + return func(args []ref.Val, result ref.Val) *uint64 { + targetCost := float64(actualSize(args[0])+1) * common.StringTraversalCostFactor + regexCost := float64(actualSize(args[1])+1) * common.RegexStringLengthCostFactor + // Actual search cost calculation = targetCost + regexCost + searchCost := targetCost * regexCost + // The total cost is the base call cost + search cost + result string allocation. + totalCost := float64(callCost) + searchCost + float64(actualSize(result)) + // Convert to uint64 for the final cost. + finalCost := uint64(totalCost) + return &finalCost + } +} diff --git a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go index 7d0759e378..41ca5cd219 100644 --- a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go +++ b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go @@ -16,6 +16,7 @@ package interpreter import ( "fmt" + "strings" "github.com/google/cel-go/common/containers" "github.com/google/cel-go/common/types" @@ -207,10 +208,19 @@ func (fac *partialAttributeFactory) AbsoluteAttribute(id int64, names ...string) // 'maybe' NamespacedAttribute values are produced using the partialAttributeFactory rather than // the base AttributeFactory implementation. func (fac *partialAttributeFactory) MaybeAttribute(id int64, name string) Attribute { + var names []string + // When there's a single name with a dot prefix, it indicates that the 'maybe' attribute is a + // globally namespaced identifier. + if strings.HasPrefix(name, ".") { + names = append(names, name) + } else { + // In all other cases, the candidate names should be inferred. + names = fac.container.ResolveCandidateNames(name) + } return &maybeAttribute{ id: id, attrs: []NamespacedAttribute{ - fac.AbsoluteAttribute(id, fac.container.ResolveCandidateNames(name)...), + fac.AbsoluteAttribute(id, names...), }, adapter: fac.adapter, provider: fac.provider, diff --git a/vendor/github.com/google/cel-go/interpreter/attributes.go b/vendor/github.com/google/cel-go/interpreter/attributes.go index b1b3aacc83..053cb68510 100644 --- a/vendor/github.com/google/cel-go/interpreter/attributes.go +++ b/vendor/github.com/google/cel-go/interpreter/attributes.go @@ -166,9 +166,17 @@ type attrFactory struct { // The namespaceNames represent the names the variable could have based on namespace // resolution rules. func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute { + disambiguateNames := false + for idx, name := range names { + if strings.HasPrefix(name, ".") { + disambiguateNames = true + names[idx] = strings.TrimPrefix(name, ".") + } + } return &absoluteAttribute{ id: id, namespaceNames: names, + disambiguateNames: disambiguateNames, qualifiers: []Qualifier{}, adapter: r.adapter, provider: r.provider, @@ -193,10 +201,19 @@ func (r *attrFactory) ConditionalAttribute(id int64, expr Interpretable, t, f At // MaybeAttribute collects variants of unchecked AbsoluteAttribute values which could either be // direct variable accesses or some combination of variable access with qualification. func (r *attrFactory) MaybeAttribute(id int64, name string) Attribute { + var names []string + // When there's a single name with a dot prefix, it indicates that the 'maybe' attribute is a + // globally namespaced identifier. + if strings.HasPrefix(name, ".") { + names = append(names, name) + } else { + // In all other cases, the candidate names should be inferred. + names = r.container.ResolveCandidateNames(name) + } return &maybeAttribute{ id: id, attrs: []NamespacedAttribute{ - r.AbsoluteAttribute(id, r.container.ResolveCandidateNames(name)...), + r.AbsoluteAttribute(id, names...), }, adapter: r.adapter, provider: r.provider, @@ -242,10 +259,13 @@ type absoluteAttribute struct { // namespaceNames represent the names the variable could have based on declared container // (package) of the expression. namespaceNames []string - qualifiers []Qualifier - adapter types.Adapter - provider types.Provider - fac AttributeFactory + // disambiguateNames indicates whether the namespaceNames require disambiguation with local variables. + disambiguateNames bool + + qualifiers []Qualifier + adapter types.Adapter + provider types.Provider + fac AttributeFactory errorOnBadPresenceTest bool } @@ -304,15 +324,34 @@ func (a *absoluteAttribute) String() string { // a type, then the result is `nil`, `error` with the error indicating the name of the first // variable searched as missing. func (a *absoluteAttribute) Resolve(vars Activation) (any, error) { + // unwrap any local activations to ensure that we reach the variables provided as input + // to the expression in the event that we need to disambiguate between global and local + // variables. + // + // Presently, only dynamic and constant slot activations created during comprehensions + // support 'unwrapping', which is consistent with how local variables are introduced into CEL. + var inputVars Activation + if a.disambiguateNames { + inputVars = vars + wrapped, ok := inputVars.(activationWrapper) + for ok { + inputVars = wrapped.Unwrap() + wrapped, ok = inputVars.(activationWrapper) + } + } for _, nm := range a.namespaceNames { // If the variable is found, process it. Otherwise, wait until the checks to // determine whether the type is unknown before returning. - obj, found := vars.ResolveName(nm) + v := vars + if a.disambiguateNames { + v = inputVars + } + obj, found := v.ResolveName(nm) if found { if celErr, ok := obj.(*types.Err); ok { return nil, celErr.Unwrap() } - obj, isOpt, err := applyQualifiers(vars, obj, a.qualifiers) + obj, isOpt, err := applyQualifiers(v, obj, a.qualifiers) if err != nil { return nil, err } diff --git a/vendor/github.com/google/cel-go/interpreter/interpretable.go b/vendor/github.com/google/cel-go/interpreter/interpretable.go index 96b5a8ffc0..9c8575db5f 100644 --- a/vendor/github.com/google/cel-go/interpreter/interpretable.go +++ b/vendor/github.com/google/cel-go/interpreter/interpretable.go @@ -1404,6 +1404,11 @@ func (f *folder) Parent() Activation { return f.activation } +// Unwrap returns the parent activation, thus omitting access to local state +func (f *folder) Unwrap() Activation { + return f.activation +} + // UnknownAttributePatterns implements the PartialActivation interface returning the unknown patterns // if they were provided to the input activation, or an empty set if the proxied activation is not partial. func (f *folder) UnknownAttributePatterns() []*AttributePattern { diff --git a/vendor/github.com/google/cel-go/interpreter/interpreter.go b/vendor/github.com/google/cel-go/interpreter/interpreter.go index be57e74392..d81ef1280f 100644 --- a/vendor/github.com/google/cel-go/interpreter/interpreter.go +++ b/vendor/github.com/google/cel-go/interpreter/interpreter.go @@ -137,11 +137,27 @@ func (esa evalStateActivation) asEvalState() EvalState { return esa.state } +// activationWrapper identifies an object carrying local variables which should not be exposed to the user +// Activations used for such purposes can be unwrapped to return the activation which omits local state. +type activationWrapper interface { + // Unwrap returns the Activation which omits local state. + Unwrap() Activation +} + // asEvalState walks the Activation hierarchy and returns the first EvalState found, if present. func asEvalState(vars Activation) (EvalState, bool) { if conv, ok := vars.(evalStateConverter); ok { return conv.asEvalState(), true } + // Check if the current activation wraps another activation. This is used to support + // wrappers such as the @block() activation which may be composed of a dynamicSlotActivation or a + // constantSlotActivation. In this case, the underlying activation is the portion which interacts + // with the EvalState. + if wrapper, ok := vars.(activationWrapper); ok { + unwrapped := wrapper.Unwrap() + // Recursively call asEvalState on the unwrapped activation. This will check the unwrapped value and its parents. + return asEvalState(unwrapped) + } if vars.Parent() != nil { return asEvalState(vars.Parent()) } diff --git a/vendor/github.com/google/cel-go/interpreter/planner.go b/vendor/github.com/google/cel-go/interpreter/planner.go index f0e0d43054..0bc38449ce 100644 --- a/vendor/github.com/google/cel-go/interpreter/planner.go +++ b/vendor/github.com/google/cel-go/interpreter/planner.go @@ -61,13 +61,20 @@ type planner struct { observers []StatefulObserver } +type planBuilder struct { + *planner + + localVars map[string]int +} + // Plan implements the interpretablePlanner interface. This implementation of the Plan method also // applies decorators to each Interpretable generated as part of the overall plan. Decorators are // useful for layering functionality into the evaluation that is not natively understood by CEL, // such as state-tracking, expression re-write, and possibly efficient thread-safe memoization of // repeated expressions. func (p *planner) Plan(expr ast.Expr) (Interpretable, error) { - i, err := p.plan(expr) + pb := &planBuilder{planner: p, localVars: make(map[string]int)} + i, err := pb.plan(expr) if err != nil { return nil, err } @@ -77,7 +84,7 @@ func (p *planner) Plan(expr ast.Expr) (Interpretable, error) { return &ObservableInterpretable{Interpretable: i, observers: p.observers}, nil } -func (p *planner) plan(expr ast.Expr) (Interpretable, error) { +func (p *planBuilder) plan(expr ast.Expr) (Interpretable, error) { switch expr.Kind() { case ast.CallKind: return p.decorate(p.planCall(expr)) @@ -102,7 +109,7 @@ func (p *planner) plan(expr ast.Expr) (Interpretable, error) { // decorate applies the InterpretableDecorator functions to the given Interpretable. // Both the Interpretable and error generated by a Plan step are accepted as arguments // for convenience. -func (p *planner) decorate(i Interpretable, err error) (Interpretable, error) { +func (p *planBuilder) decorate(i Interpretable, err error) (Interpretable, error) { if err != nil { return nil, err } @@ -116,20 +123,26 @@ func (p *planner) decorate(i Interpretable, err error) (Interpretable, error) { } // planIdent creates an Interpretable that resolves an identifier from an Activation. -func (p *planner) planIdent(expr ast.Expr) (Interpretable, error) { +func (p *planBuilder) planIdent(expr ast.Expr) (Interpretable, error) { // Establish whether the identifier is in the reference map. if identRef, found := p.refMap[expr.ID()]; found { return p.planCheckedIdent(expr.ID(), identRef) } // Create the possible attribute list for the unresolved reference. ident := expr.AsIdent() + if p.isLocalVar(ident) { + return &evalAttr{ + adapter: p.adapter, + attr: p.attrFactory.AbsoluteAttribute(expr.ID(), ident), + }, nil + } return &evalAttr{ adapter: p.adapter, attr: p.attrFactory.MaybeAttribute(expr.ID(), ident), }, nil } -func (p *planner) planCheckedIdent(id int64, identRef *ast.ReferenceInfo) (Interpretable, error) { +func (p *planBuilder) planCheckedIdent(id int64, identRef *ast.ReferenceInfo) (Interpretable, error) { // Plan a constant reference if this is the case for this simple identifier. if identRef.Value != nil { return NewConstValue(id, identRef.Value), nil @@ -158,7 +171,7 @@ func (p *planner) planCheckedIdent(id int64, identRef *ast.ReferenceInfo) (Inter // a) selects a field from a map or proto. // b) creates a field presence test for a select within a has() macro. // c) resolves the select expression to a namespaced identifier. -func (p *planner) planSelect(expr ast.Expr) (Interpretable, error) { +func (p *planBuilder) planSelect(expr ast.Expr) (Interpretable, error) { // If the Select id appears in the reference map from the CheckedExpr proto then it is either // a namespaced identifier or enum value. if identRef, found := p.refMap[expr.ID()]; found { @@ -214,7 +227,7 @@ func (p *planner) planSelect(expr ast.Expr) (Interpretable, error) { // planCall creates a callable Interpretable while specializing for common functions and invocation // patterns. Specifically, conditional operators &&, ||, ?:, and (in)equality functions result in // optimized Interpretable values. -func (p *planner) planCall(expr ast.Expr) (Interpretable, error) { +func (p *planBuilder) planCall(expr ast.Expr) (Interpretable, error) { call := expr.AsCall() target, fnName, oName := p.resolveFunction(expr) argCount := len(call.Args()) @@ -291,7 +304,7 @@ func (p *planner) planCall(expr ast.Expr) (Interpretable, error) { } // planCallZero generates a zero-arity callable Interpretable. -func (p *planner) planCallZero(expr ast.Expr, +func (p *planBuilder) planCallZero(expr ast.Expr, function string, overload string, impl *functions.Overload) (Interpretable, error) { @@ -307,7 +320,7 @@ func (p *planner) planCallZero(expr ast.Expr, } // planCallUnary generates a unary callable Interpretable. -func (p *planner) planCallUnary(expr ast.Expr, +func (p *planBuilder) planCallUnary(expr ast.Expr, function string, overload string, impl *functions.Overload, @@ -335,7 +348,7 @@ func (p *planner) planCallUnary(expr ast.Expr, } // planCallBinary generates a binary callable Interpretable. -func (p *planner) planCallBinary(expr ast.Expr, +func (p *planBuilder) planCallBinary(expr ast.Expr, function string, overload string, impl *functions.Overload, @@ -364,7 +377,7 @@ func (p *planner) planCallBinary(expr ast.Expr, } // planCallVarArgs generates a variable argument callable Interpretable. -func (p *planner) planCallVarArgs(expr ast.Expr, +func (p *planBuilder) planCallVarArgs(expr ast.Expr, function string, overload string, impl *functions.Overload, @@ -392,7 +405,7 @@ func (p *planner) planCallVarArgs(expr ast.Expr, } // planCallEqual generates an equals (==) Interpretable. -func (p *planner) planCallEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) { +func (p *planBuilder) planCallEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) { return &evalEq{ id: expr.ID(), lhs: args[0], @@ -401,7 +414,7 @@ func (p *planner) planCallEqual(expr ast.Expr, args []Interpretable) (Interpreta } // planCallNotEqual generates a not equals (!=) Interpretable. -func (p *planner) planCallNotEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) { +func (p *planBuilder) planCallNotEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) { return &evalNe{ id: expr.ID(), lhs: args[0], @@ -410,7 +423,7 @@ func (p *planner) planCallNotEqual(expr ast.Expr, args []Interpretable) (Interpr } // planCallLogicalAnd generates a logical and (&&) Interpretable. -func (p *planner) planCallLogicalAnd(expr ast.Expr, args []Interpretable) (Interpretable, error) { +func (p *planBuilder) planCallLogicalAnd(expr ast.Expr, args []Interpretable) (Interpretable, error) { return &evalAnd{ id: expr.ID(), terms: args, @@ -418,7 +431,7 @@ func (p *planner) planCallLogicalAnd(expr ast.Expr, args []Interpretable) (Inter } // planCallLogicalOr generates a logical or (||) Interpretable. -func (p *planner) planCallLogicalOr(expr ast.Expr, args []Interpretable) (Interpretable, error) { +func (p *planBuilder) planCallLogicalOr(expr ast.Expr, args []Interpretable) (Interpretable, error) { return &evalOr{ id: expr.ID(), terms: args, @@ -426,7 +439,7 @@ func (p *planner) planCallLogicalOr(expr ast.Expr, args []Interpretable) (Interp } // planCallConditional generates a conditional / ternary (c ? t : f) Interpretable. -func (p *planner) planCallConditional(expr ast.Expr, args []Interpretable) (Interpretable, error) { +func (p *planBuilder) planCallConditional(expr ast.Expr, args []Interpretable) (Interpretable, error) { cond := args[0] t := args[1] var tAttr Attribute @@ -454,7 +467,7 @@ func (p *planner) planCallConditional(expr ast.Expr, args []Interpretable) (Inte // planCallIndex either extends an attribute with the argument to the index operation, or creates // a relative attribute based on the return of a function call or operation. -func (p *planner) planCallIndex(expr ast.Expr, args []Interpretable, optional bool) (Interpretable, error) { +func (p *planBuilder) planCallIndex(expr ast.Expr, args []Interpretable, optional bool) (Interpretable, error) { op := args[0] ind := args[1] opType := p.typeMap[op.ID()] @@ -489,7 +502,7 @@ func (p *planner) planCallIndex(expr ast.Expr, args []Interpretable, optional bo } // planCreateList generates a list construction Interpretable. -func (p *planner) planCreateList(expr ast.Expr) (Interpretable, error) { +func (p *planBuilder) planCreateList(expr ast.Expr) (Interpretable, error) { list := expr.AsList() optionalIndices := list.OptionalIndices() elements := list.Elements() @@ -518,7 +531,7 @@ func (p *planner) planCreateList(expr ast.Expr) (Interpretable, error) { } // planCreateStruct generates a map or object construction Interpretable. -func (p *planner) planCreateMap(expr ast.Expr) (Interpretable, error) { +func (p *planBuilder) planCreateMap(expr ast.Expr) (Interpretable, error) { m := expr.AsMap() entries := m.Entries() optionals := make([]bool, len(entries)) @@ -552,7 +565,7 @@ func (p *planner) planCreateMap(expr ast.Expr) (Interpretable, error) { } // planCreateObj generates an object construction Interpretable. -func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) { +func (p *planBuilder) planCreateStruct(expr ast.Expr) (Interpretable, error) { obj := expr.AsStruct() typeName, defined := p.resolveTypeName(obj.TypeName()) if !defined { @@ -586,7 +599,7 @@ func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) { } // planComprehension generates an Interpretable fold operation. -func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) { +func (p *planBuilder) planComprehension(expr ast.Expr) (Interpretable, error) { fold := expr.AsComprehension() accu, err := p.plan(fold.AccuInit()) if err != nil { @@ -596,6 +609,7 @@ func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) { if err != nil { return nil, err } + p.pushLocalVars(fold.AccuVar(), fold.IterVar(), fold.IterVar2()) cond, err := p.plan(fold.LoopCondition()) if err != nil { return nil, err @@ -604,10 +618,12 @@ func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) { if err != nil { return nil, err } + p.popLocalVars(fold.IterVar(), fold.IterVar2()) result, err := p.plan(fold.Result()) if err != nil { return nil, err } + p.popLocalVars(fold.AccuVar()) return &evalFold{ id: expr.ID(), accuVar: fold.AccuVar(), @@ -623,13 +639,13 @@ func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) { } // planConst generates a constant valued Interpretable. -func (p *planner) planConst(expr ast.Expr) (Interpretable, error) { +func (p *planBuilder) planConst(expr ast.Expr) (Interpretable, error) { return NewConstValue(expr.ID(), expr.AsLiteral()), nil } // resolveTypeName takes a qualified string constructed at parse time, applies the proto // namespace resolution rules to it in a scan over possible matching types in the TypeProvider. -func (p *planner) resolveTypeName(typeName string) (string, bool) { +func (p *planBuilder) resolveTypeName(typeName string) (string, bool) { for _, qualifiedTypeName := range p.container.ResolveCandidateNames(typeName) { if _, found := p.provider.FindStructType(qualifiedTypeName); found { return qualifiedTypeName, true @@ -646,7 +662,7 @@ func (p *planner) resolveTypeName(typeName string) (string, bool) { // - The target expression may only consist of ident and select expressions. // - The function is declared in the environment using its fully-qualified name. // - The fully-qualified function name matches the string serialized target value. -func (p *planner) resolveFunction(expr ast.Expr) (ast.Expr, string, string) { +func (p *planBuilder) resolveFunction(expr ast.Expr) (ast.Expr, string, string) { // Note: similar logic exists within the `checker/checker.go`. If making changes here // please consider the impact on checker.go and consolidate implementations or mirror code // as appropriate. @@ -687,7 +703,7 @@ func (p *planner) resolveFunction(expr ast.Expr) (ast.Expr, string, string) { // namespaced identifiers must be stripped, as all declarations already use fully-qualified // names. This stripping behavior is handled automatically by the ResolveCandidateNames // call. - return target, stripLeadingDot(fnName), "" + return target, strings.TrimPrefix(fnName, "."), "" } // Handle the situation where the function target actually indicates a qualified function name. @@ -710,7 +726,7 @@ func (p *planner) resolveFunction(expr ast.Expr) (ast.Expr, string, string) { // relativeAttr indicates that the attribute in this case acts as a qualifier and as such needs to // be observed to ensure that it's evaluation value is properly recorded for state tracking. -func (p *planner) relativeAttr(id int64, eval Interpretable, opt bool) (InterpretableAttribute, error) { +func (p *planBuilder) relativeAttr(id int64, eval Interpretable, opt bool) (InterpretableAttribute, error) { eAttr, ok := eval.(InterpretableAttribute) if !ok { eAttr = &evalAttr{ @@ -733,7 +749,7 @@ func (p *planner) relativeAttr(id int64, eval Interpretable, opt bool) (Interpre // toQualifiedName converts an expression AST into a qualified name if possible, with a boolean // 'found' value that indicates if the conversion is successful. -func (p *planner) toQualifiedName(operand ast.Expr) (string, bool) { +func (p *planBuilder) toQualifiedName(operand ast.Expr) (string, bool) { // If the checker identified the expression as an attribute by the type-checker, then it can't // possibly be part of qualified name in a namespace. _, isAttr := p.refMap[operand.ID()] @@ -759,9 +775,35 @@ func (p *planner) toQualifiedName(operand ast.Expr) (string, bool) { return "", false } -func stripLeadingDot(name string) string { - if strings.HasPrefix(name, ".") { - return name[1:] +func (p *planBuilder) pushLocalVars(names ...string) { + for _, name := range names { + if name == "" { + continue + } + if cnt, found := p.localVars[name]; found { + p.localVars[name] = cnt + 1 + } else { + p.localVars[name] = 1 + } + } +} + +func (p *planBuilder) popLocalVars(names ...string) { + for _, name := range names { + if name == "" { + continue + } + if cnt, found := p.localVars[name]; found { + if cnt == 1 { + delete(p.localVars, name) + } else { + p.localVars[name] = cnt - 1 + } + } } - return name +} + +func (p *planBuilder) isLocalVar(name string) bool { + _, found := p.localVars[name] + return found } diff --git a/vendor/github.com/google/cel-go/parser/helper.go b/vendor/github.com/google/cel-go/parser/helper.go index c13296dd5c..f960be20ed 100644 --- a/vendor/github.com/google/cel-go/parser/helper.go +++ b/vendor/github.com/google/cel-go/parser/helper.go @@ -159,7 +159,7 @@ func (p *parserHelper) id(ctx any) int64 { offset.Start = p.sourceInfo.ComputeOffset(int32(c.GetLine()), int32(c.GetColumn())) offset.Stop = offset.Start + int32(len(c.GetText())) case common.Location: - offset.Start = p.sourceInfo.ComputeOffset(int32(c.Line()), int32(c.Column())) + offset.Start = p.sourceInfo.ComputeOffsetAbsolute(int32(c.Line()), int32(c.Column())) offset.Stop = offset.Start case ast.OffsetRange: offset = c diff --git a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md index 0206cfe124..6bca1ecfd6 100644 --- a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md +++ b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md @@ -2,6 +2,15 @@ ## HEAD +* Bump Go from 1.23 to 1.24. +* [preloader] Parse klog flags. +* [CTFE] Add a /log.v3.json endpoint to help satisfy a requirement of the Chrome CT Log Policy by @robstradling in https://github.com/google/certificate-transparency-go/pull/1703 +* [preloader] add continuous mode. +* [CTFE] Enforce max request body size using `http.MaxBytesHandler`. +* Support tiled logs in the loglist3 logfilter functions by @robstradling in https://github.com/google/certificate-transparency-go/pull/1762 +* Add FindTiledLog* functions by @robstradling in https://github.com/google/certificate-transparency-go/pull/1763 +* Remove internal witness. + ## v1.3.2 ### Misc diff --git a/vendor/github.com/google/certificate-transparency-go/README.md b/vendor/github.com/google/certificate-transparency-go/README.md index bade700508..74934e1a16 100644 --- a/vendor/github.com/google/certificate-transparency-go/README.md +++ b/vendor/github.com/google/certificate-transparency-go/README.md @@ -6,7 +6,7 @@ This repository holds Go code related to [Certificate Transparency](https://www.certificate-transparency.org/) (CT). The -repository requires Go version 1.23. +repository requires Go version 1.24. - [Repository Structure](#repository-structure) - [Trillian CT Personality](#trillian-ct-personality) @@ -16,7 +16,7 @@ repository requires Go version 1.23. ## Support -- Slack: https://transparency-dev.slack.com/ ([invitation](https://join.slack.com/t/transparency-dev/shared_invite/zt-27pkqo21d-okUFhur7YZ0rFoJVIOPznQ)) +- Slack: https://transparency-dev.slack.com/ ([invitation](https://transparency.dev/slack/)) ## Repository Structure @@ -85,7 +85,7 @@ pull requests for review. ```bash # Install golangci-lint -go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.61.0 +go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.1.6 # Run code generation, build, test and linters ./scripts/presubmit.sh @@ -105,16 +105,45 @@ Some of the CT Go code is autogenerated from other files: definitions are converted to `.pb.go` implementations. - A mock implementation of the Trillian gRPC API (in `trillian/mockclient`) is created with [GoMock](https://github.com/golang/mock). +- Some enums have string-conversion methods (satisfying the `fmt.Stringer` + interface) created using the + [stringer](https://godoc.org/golang.org/x/tools/cmd/stringer) tool (`go get + golang.org/x/tools/cmd/stringer`). Re-generating mock or protobuffer files is only needed if you're changing -the original files; if you do, you'll need to install the prerequisites: +the original files. The recommended way to do this is by using the Docker +image used by the Cloud Build: -- tools written in `go` can be installed with a single run of `go install` - (courtesy of [`tools.go`](./tools/tools.go) and `go.mod`). -- `protoc` tool: you'll need [version 3.20.1](https://github.com/protocolbuffers/protobuf/releases/tag/v3.20.1) installed, and `PATH` updated to include its `bin/` directory. +```shell +docker build -f ./integration/Dockerfile -t ctgo-builder . +docker run -it --mount type=bind,src="$(pwd)",target=/src ctgo-builder /bin/bash -c "cd /src; ./scripts/install_deps.sh; go generate -x ./..." +``` + +These commands first create a docker image from the Dockerfile in this repo, and +then launch a container based on this image with the local directory mounted. The +correct versions of the tools are determined using the `go.mod` file in this repo, +and these tools are installed. Finally, all of the generated files are regenerated +and Docker exits. + +Alternatively, you can install the prerequisites locally: -With tools installed, run the following: + - a series of tools, using `go install` to ensure that the versions are + compatible and tested: + + ``` + cd $(go list -f '{{ .Dir }}' github.com/google/certificate-transparency-go); \ + go install github.com/golang/mock/mockgen; \ + go install google.golang.org/protobuf/proto; \ + go install google.golang.org/protobuf/cmd/protoc-gen-go; \ + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc; \ + go install github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc; \ + go install golang.org/x/tools/cmd/stringer + ``` + - `protoc` tool: you'll need [version 3.20.1](https://github.com/protocolbuffers/protobuf/releases/tag/v3.20.1) installed, and `PATH` updated to include its `bin/` directory. + +and run the following: ```bash go generate -x ./... # hunts for //go:generate comments and runs them ``` + diff --git a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go index a2fed51d88..3a247e2598 100644 --- a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go +++ b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 +// protoc-gen-go v1.36.10 // protoc v3.20.1 // source: client/configpb/multilog.proto @@ -26,6 +26,7 @@ import ( timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -38,20 +39,17 @@ const ( // TemporalLogConfig is a set of LogShardConfig messages, whose // time limits should be contiguous. type TemporalLogConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"` unknownFields protoimpl.UnknownFields - - Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"` + sizeCache protoimpl.SizeCache } func (x *TemporalLogConfig) Reset() { *x = TemporalLogConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_client_configpb_multilog_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_client_configpb_multilog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TemporalLogConfig) String() string { @@ -62,7 +60,7 @@ func (*TemporalLogConfig) ProtoMessage() {} func (x *TemporalLogConfig) ProtoReflect() protoreflect.Message { mi := &file_client_configpb_multilog_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -87,11 +85,8 @@ func (x *TemporalLogConfig) GetShard() []*LogShardConfig { // LogShardConfig describes the acceptable date range for a single shard of a temporal // log. type LogShardConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` // The log's public key in DER-encoded PKIX form. PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"` // not_after_start defines the start of the range of acceptable NotAfter @@ -102,15 +97,15 @@ type LogShardConfig struct { // exclusive. // Leaving this unset implies no upper bound to the range. NotAfterLimit *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit,proto3" json:"not_after_limit,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *LogShardConfig) Reset() { *x = LogShardConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_client_configpb_multilog_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_client_configpb_multilog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LogShardConfig) String() string { @@ -121,7 +116,7 @@ func (*LogShardConfig) ProtoMessage() {} func (x *LogShardConfig) ProtoReflect() protoreflect.Message { mi := &file_client_configpb_multilog_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -166,51 +161,31 @@ func (x *LogShardConfig) GetNotAfterLimit() *timestamppb.Timestamp { var File_client_configpb_multilog_proto protoreflect.FileDescriptor -var file_client_configpb_multilog_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, - 0x62, 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x08, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x43, 0x0a, 0x11, 0x54, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x2e, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x67, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x22, 0xd0, 0x01, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, - 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, - 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0f, 0x6e, - 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, - 0x42, 0x0a, 0x0f, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x2d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, - 0x79, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x75, 0x6c, 0x74, - 0x69, 0x6c, 0x6f, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_client_configpb_multilog_proto_rawDesc = "" + + "\n" + + "\x1eclient/configpb/multilog.proto\x12\bconfigpb\x1a\x1fgoogle/protobuf/timestamp.proto\"C\n" + + "\x11TemporalLogConfig\x12.\n" + + "\x05shard\x18\x01 \x03(\v2\x18.configpb.LogShardConfigR\x05shard\"\xd0\x01\n" + + "\x0eLogShardConfig\x12\x10\n" + + "\x03uri\x18\x01 \x01(\tR\x03uri\x12$\n" + + "\x0epublic_key_der\x18\x02 \x01(\fR\fpublicKeyDer\x12B\n" + + "\x0fnot_after_start\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\rnotAfterStart\x12B\n" + + "\x0fnot_after_limit\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\rnotAfterLimitBHZFgithub.com/google/certificate-transparency-go/client/multilog/configpbb\x06proto3" var ( file_client_configpb_multilog_proto_rawDescOnce sync.Once - file_client_configpb_multilog_proto_rawDescData = file_client_configpb_multilog_proto_rawDesc + file_client_configpb_multilog_proto_rawDescData []byte ) func file_client_configpb_multilog_proto_rawDescGZIP() []byte { file_client_configpb_multilog_proto_rawDescOnce.Do(func() { - file_client_configpb_multilog_proto_rawDescData = protoimpl.X.CompressGZIP(file_client_configpb_multilog_proto_rawDescData) + file_client_configpb_multilog_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_client_configpb_multilog_proto_rawDesc), len(file_client_configpb_multilog_proto_rawDesc))) }) return file_client_configpb_multilog_proto_rawDescData } var file_client_configpb_multilog_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_client_configpb_multilog_proto_goTypes = []interface{}{ +var file_client_configpb_multilog_proto_goTypes = []any{ (*TemporalLogConfig)(nil), // 0: configpb.TemporalLogConfig (*LogShardConfig)(nil), // 1: configpb.LogShardConfig (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp @@ -231,37 +206,11 @@ func file_client_configpb_multilog_proto_init() { if File_client_configpb_multilog_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_client_configpb_multilog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TemporalLogConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_client_configpb_multilog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LogShardConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_client_configpb_multilog_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_client_configpb_multilog_proto_rawDesc), len(file_client_configpb_multilog_proto_rawDesc)), NumEnums: 0, NumMessages: 2, NumExtensions: 0, @@ -272,7 +221,6 @@ func file_client_configpb_multilog_proto_init() { MessageInfos: file_client_configpb_multilog_proto_msgTypes, }.Build() File_client_configpb_multilog_proto = out.File - file_client_configpb_multilog_proto_rawDesc = nil file_client_configpb_multilog_proto_goTypes = nil file_client_configpb_multilog_proto_depIdxs = nil } diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go b/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go index 9ac54bae91..05ca665e99 100644 --- a/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go @@ -15,17 +15,25 @@ package loglist3 import ( + "fmt" + "github.com/google/certificate-transparency-go/x509" "github.com/google/certificate-transparency-go/x509util" + "k8s.io/klog/v2" ) -// LogRoots maps Log-URLs (stated at LogList) to the pools of their accepted -// root-certificates. +// LogRoots maps Log-URLs and TiledLog-SubmissionURLs (stated at LogList) +// to the pools of their accepted root-certificates. type LogRoots map[string]*x509util.PEMCertPool // Compatible creates a new LogList containing only Logs matching the temporal, // root-acceptance and Log-status conditions. func (ll *LogList) Compatible(cert *x509.Certificate, certRoot *x509.Certificate, roots LogRoots) LogList { + urls := make([]string, 0, len(roots)) + for url := range roots { + urls = append(urls, url) + } + klog.V(1).Info(urls) active := ll.TemporallyCompatible(cert) // Do not check root compatbility if roots are not being provided. if certRoot == nil { @@ -49,7 +57,16 @@ func (ll *LogList) SelectByStatus(lstats []LogStatus) LogList { } } } - if len(activeOp.Logs) > 0 { + activeOp.TiledLogs = []*TiledLog{} + for _, l := range op.TiledLogs { + for _, lstat := range lstats { + if l.State.LogStatus() == lstat { + activeOp.TiledLogs = append(activeOp.TiledLogs, l) + break + } + } + } + if len(activeOp.Logs) > 0 || len(activeOp.TiledLogs) > 0 { active.Operators = append(active.Operators, &activeOp) } } @@ -92,10 +109,39 @@ func (ll *LogList) RootCompatible(certRoot *x509.Certificate, roots LogRoots) Lo compatibleOp.Logs = append(compatibleOp.Logs, l) } } - if len(compatibleOp.Logs) > 0 { + compatibleOp.TiledLogs = []*TiledLog{} + for _, l := range op.TiledLogs { + // If root set is not defined, we treat Log as compatible assuming no + // knowledge of its roots. + if _, ok := roots[l.SubmissionURL]; !ok { + compatibleOp.TiledLogs = append(compatibleOp.TiledLogs, l) + continue + } + + if certRoot == nil { + continue + } + + // Check root is accepted. + if roots[l.SubmissionURL].Included(certRoot) { + compatibleOp.TiledLogs = append(compatibleOp.TiledLogs, l) + } + } + if len(compatibleOp.Logs) > 0 || len(compatibleOp.TiledLogs) > 0 { compatible.Operators = append(compatible.Operators, &compatibleOp) } } + logMessage := "Root compatible operators: \n" + for _, operator := range ll.Operators { + logMessage += fmt.Sprintf("Operator: %s\n", operator.Name) + for _, l := range operator.Logs { + logMessage += fmt.Sprintf("\t%s\n", l.URL) + } + for _, l := range operator.TiledLogs { + logMessage += fmt.Sprintf("\t%s\n", l.SubmissionURL) + } + } + klog.V(1).Info(logMessage) return compatible } @@ -121,9 +167,30 @@ func (ll *LogList) TemporallyCompatible(cert *x509.Certificate) LogList { compatibleOp.Logs = append(compatibleOp.Logs, l) } } - if len(compatibleOp.Logs) > 0 { + compatibleOp.TiledLogs = []*TiledLog{} + for _, l := range op.TiledLogs { + if l.TemporalInterval == nil { + compatibleOp.TiledLogs = append(compatibleOp.TiledLogs, l) + continue + } + if cert.NotAfter.Before(l.TemporalInterval.EndExclusive) && (cert.NotAfter.After(l.TemporalInterval.StartInclusive) || cert.NotAfter.Equal(l.TemporalInterval.StartInclusive)) { + compatibleOp.TiledLogs = append(compatibleOp.TiledLogs, l) + } + } + if len(compatibleOp.Logs) > 0 || len(compatibleOp.TiledLogs) > 0 { compatible.Operators = append(compatible.Operators, &compatibleOp) } } + logMessage := "Temporal compatible logs: \n" + for _, operator := range ll.Operators { + logMessage += fmt.Sprintf("Operator: %s\n", operator.Name) + for _, l := range operator.Logs { + logMessage += fmt.Sprintf("\t%s\n", l.URL) + } + for _, l := range operator.TiledLogs { + logMessage += fmt.Sprintf("\t%s\n", l.SubmissionURL) + } + } + klog.V(1).Info(logMessage) return compatible } diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go b/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go index c5e94f1874..9e184ae23b 100644 --- a/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go @@ -301,7 +301,7 @@ func NewFromSignedJSON(llData, rawSig []byte, pubKey crypto.PublicKey) (*LogList return NewFromJSON(llData) } -// FindLogByName returns all logs whose names contain the given string. +// FindLogByName returns all RFC 6962 logs whose names contain the given string. func (ll *LogList) FindLogByName(name string) []*Log { name = strings.ToLower(name) var results []*Log @@ -315,7 +315,7 @@ func (ll *LogList) FindLogByName(name string) []*Log { return results } -// FindLogByURL finds the log with the given URL. +// FindLogByURL finds the RFC 6962 log with the given URL. func (ll *LogList) FindLogByURL(url string) *Log { for _, op := range ll.Operators { for _, log := range op.Logs { @@ -328,7 +328,7 @@ func (ll *LogList) FindLogByURL(url string) *Log { return nil } -// FindLogByKeyHash finds the log with the given key hash. +// FindLogByKeyHash finds the RFC 6962 log with the given key hash. func (ll *LogList) FindLogByKeyHash(keyhash [sha256.Size]byte) *Log { for _, op := range ll.Operators { for _, log := range op.Logs { @@ -340,7 +340,8 @@ func (ll *LogList) FindLogByKeyHash(keyhash [sha256.Size]byte) *Log { return nil } -// FindLogByKeyHashPrefix finds all logs whose key hash starts with the prefix. +// FindLogByKeyHashPrefix finds all RFC 6962 logs whose key hash starts with +// the prefix. func (ll *LogList) FindLogByKeyHashPrefix(prefix string) []*Log { var results []*Log for _, op := range ll.Operators { @@ -354,7 +355,7 @@ func (ll *LogList) FindLogByKeyHashPrefix(prefix string) []*Log { return results } -// FindLogByKey finds the log with the given DER-encoded key. +// FindLogByKey finds the RFC 6962 log with the given DER-encoded key. func (ll *LogList) FindLogByKey(key []byte) *Log { for _, op := range ll.Operators { for _, log := range op.Logs { @@ -368,10 +369,10 @@ func (ll *LogList) FindLogByKey(key []byte) *Log { var hexDigits = regexp.MustCompile("^[0-9a-fA-F]+$") -// FuzzyFindLog tries to find logs that match the given unspecified input, -// whose format is unspecified. This generally returns a single log, but -// if text input that matches multiple log descriptions is provided, then -// multiple logs may be returned. +// FuzzyFindLog tries to find RFC 6962 logs that match the given unspecified +// input, whose format is unspecified. This generally returns a single RFC 6962 +// log, but if text input that matches multiple RFC 6962 log descriptions is +// provided, then multiple RFC 6962 logs may be returned. func (ll *LogList) FuzzyFindLog(input string) []*Log { input = strings.Trim(input, " \t") if logs := ll.FindLogByName(input); len(logs) > 0 { @@ -417,6 +418,124 @@ func (ll *LogList) FuzzyFindLog(input string) []*Log { return nil } +// FindTiledLogByName returns all tiled logs whose names contain the given +// string. +func (ll *LogList) FindTiledLogByName(name string) []*TiledLog { + name = strings.ToLower(name) + var results []*TiledLog + for _, op := range ll.Operators { + for _, log := range op.TiledLogs { + if strings.Contains(strings.ToLower(log.Description), name) { + results = append(results, log) + } + } + } + return results +} + +// FindTiledLogByURL finds the tiled log with the given URL. +func (ll *LogList) FindTiledLogByURL(url string) *TiledLog { + for _, op := range ll.Operators { + for _, log := range op.TiledLogs { + // Don't count trailing slashes + if strings.TrimRight(log.SubmissionURL, "/") == strings.TrimRight(url, "/") { + return log + } else if strings.TrimRight(log.MonitoringURL, "/") == strings.TrimRight(url, "/") { + return log + } + } + } + return nil +} + +// FindTiledLogByKeyHash finds the tiled log with the given key hash. +func (ll *LogList) FindTiledLogByKeyHash(keyhash [sha256.Size]byte) *TiledLog { + for _, op := range ll.Operators { + for _, log := range op.TiledLogs { + if bytes.Equal(log.LogID, keyhash[:]) { + return log + } + } + } + return nil +} + +// FindTiledLogByKeyHashPrefix finds all tiled logs whose key hash starts with +// the prefix. +func (ll *LogList) FindTiledLogByKeyHashPrefix(prefix string) []*TiledLog { + var results []*TiledLog + for _, op := range ll.Operators { + for _, log := range op.TiledLogs { + hh := hex.EncodeToString(log.LogID[:]) + if strings.HasPrefix(hh, prefix) { + results = append(results, log) + } + } + } + return results +} + +// FindTiledLogByKey finds the tiled log with the given DER-encoded key. +func (ll *LogList) FindTiledLogByKey(key []byte) *TiledLog { + for _, op := range ll.Operators { + for _, log := range op.TiledLogs { + if bytes.Equal(log.Key[:], key) { + return log + } + } + } + return nil +} + +// FuzzyFindTiledLog tries to find tiled logs that match the given unspecified +// input, whose format is unspecified. This generally returns a single tiled +// log, but if text input that matches multiple tiled log descriptions is +// provided, then multiple tiled logs may be returned. +func (ll *LogList) FuzzyFindTiledLog(input string) []*TiledLog { + input = strings.Trim(input, " \t") + if logs := ll.FindTiledLogByName(input); len(logs) > 0 { + return logs + } + if log := ll.FindTiledLogByURL(input); log != nil { + return []*TiledLog{log} + } + // Try assuming the input is binary data of some form. First base64: + if data, err := base64.StdEncoding.DecodeString(input); err == nil { + if len(data) == sha256.Size { + var hash [sha256.Size]byte + copy(hash[:], data) + if log := ll.FindTiledLogByKeyHash(hash); log != nil { + return []*TiledLog{log} + } + } + if log := ll.FindTiledLogByKey(data); log != nil { + return []*TiledLog{log} + } + } + // Now hex, but strip all internal whitespace first. + input = stripInternalSpace(input) + if data, err := hex.DecodeString(input); err == nil { + if len(data) == sha256.Size { + var hash [sha256.Size]byte + copy(hash[:], data) + if log := ll.FindTiledLogByKeyHash(hash); log != nil { + return []*TiledLog{log} + } + } + if log := ll.FindTiledLogByKey(data); log != nil { + return []*TiledLog{log} + } + } + // Finally, allow hex strings with an odd number of digits. + if hexDigits.MatchString(input) { + if logs := ll.FindTiledLogByKeyHashPrefix(input); len(logs) > 0 { + return logs + } + } + + return nil +} + func stripInternalSpace(input string) string { return strings.Map(func(r rune) rune { if !unicode.IsSpace(r) { diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go b/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go index 84c7bbdf4e..6f21ed684a 100644 --- a/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go @@ -22,8 +22,9 @@ const _LogStatus_name = "UndefinedLogStatusPendingLogStatusQualifiedLogStatusUsa var _LogStatus_index = [...]uint8{0, 18, 34, 52, 67, 84, 100, 117} func (i LogStatus) String() string { - if i < 0 || i >= LogStatus(len(_LogStatus_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_LogStatus_index)-1 { return "LogStatus(" + strconv.FormatInt(int64(i), 10) + ")" } - return _LogStatus_name[_LogStatus_index[i]:_LogStatus_index[i+1]] + return _LogStatus_name[_LogStatus_index[idx]:_LogStatus_index[idx+1]] } diff --git a/vendor/github.com/google/certificate-transparency-go/types.go b/vendor/github.com/google/certificate-transparency-go/types.go index 2a96f6a09f..10092af44d 100644 --- a/vendor/github.com/google/certificate-transparency-go/types.go +++ b/vendor/github.com/google/certificate-transparency-go/types.go @@ -457,7 +457,8 @@ const ( GetRootsPath = "/ct/v1/get-roots" GetEntryAndProofPath = "/ct/v1/get-entry-and-proof" - AddJSONPath = "/ct/v1/add-json" // Experimental addition + AddJSONPath = "/ct/v1/add-json" // Experimental addition + LogV3JSONPath = "/log.v3.json" // Metadata for the log, to help satisfy a requirement of the Chrome CT Log Policy ) // AddChainRequest represents the JSON request body sent to the add-chain and diff --git a/vendor/github.com/google/gnostic-models/extensions/extension.proto b/vendor/github.com/google/gnostic-models/extensions/extension.proto index 875137c1a8..a600429890 100644 --- a/vendor/github.com/google/gnostic-models/extensions/extension.proto +++ b/vendor/github.com/google/gnostic-models/extensions/extension.proto @@ -42,7 +42,7 @@ option java_package = "org.gnostic.v1"; option objc_class_prefix = "GNX"; // The Go package name. -option go_package = "./extensions;gnostic_extension_v1"; +option go_package = "github.com/google/gnostic-models/extensions;gnostic_extension_v1"; // The version number of Gnostic. message Version { diff --git a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto index 1c59b2f4ae..49adafcc8e 100644 --- a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto @@ -42,7 +42,7 @@ option java_package = "org.openapi_v2"; option objc_class_prefix = "OAS"; // The Go package name. -option go_package = "./openapiv2;openapi_v2"; +option go_package = "github.com/google/gnostic-models/openapiv2;openapi_v2"; message AdditionalPropertiesItem { oneof oneof { diff --git a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto index 1be335b89b..af4b6254bc 100644 --- a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto @@ -42,7 +42,7 @@ option java_package = "org.openapi_v3"; option objc_class_prefix = "OAS"; // The Go package name. -option go_package = "./openapiv3;openapi_v3"; +option go_package = "github.com/google/gnostic-models/openapiv3;openapi_v3"; message AdditionalPropertiesItem { oneof oneof { diff --git a/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto index 09ee0aac51..895b4567cd 100644 --- a/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto +++ b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto @@ -20,7 +20,7 @@ import "google/protobuf/descriptor.proto"; import "openapiv3/OpenAPIv3.proto"; // The Go package name. -option go_package = "./openapiv3;openapi_v3"; +option go_package = "github.com/google/gnostic-models/openapiv3;openapi_v3"; // This option lets the proto compiler generate Java code inside the package // name (see below) instead of inside an outer class. It creates a simpler // developer experience by reducing one-level of name nesting and be diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go index 6e8814d808..6f08460ba7 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go @@ -117,8 +117,8 @@ func (dk *defaultKeychain) ResolveContext(_ context.Context, target Resource) (A if err != nil { return nil, err } - } else if fileExists(os.Getenv("REGISTRY_AUTH_FILE")) { - f, err := os.Open(os.Getenv("REGISTRY_AUTH_FILE")) + } else if path := filepath.Clean(os.Getenv("REGISTRY_AUTH_FILE")); fileExists(path) { + f, err := os.Open(path) if err != nil { return nil, err } @@ -127,8 +127,8 @@ func (dk *defaultKeychain) ResolveContext(_ context.Context, target Resource) (A if err != nil { return nil, err } - } else if fileExists(filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "containers/auth.json")) { - f, err := os.Open(filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "containers/auth.json")) + } else if path := filepath.Clean(filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "containers/auth.json")); fileExists(path) { + f, err := os.Open(path) if err != nil { return nil, err } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go index d81593bd59..bbb600ed7f 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go @@ -84,27 +84,35 @@ func Hasher(name string) (hash.Hash, error) { } func (h *Hash) parse(unquoted string) error { - parts := strings.Split(unquoted, ":") - if len(parts) != 2 { + algo, body, ok := strings.Cut(unquoted, ":") + if !ok || algo == "" || body == "" { return fmt.Errorf("cannot parse hash: %q", unquoted) } - rest := strings.TrimLeft(parts[1], "0123456789abcdef") + rest := strings.TrimLeft(body, "0123456789abcdef") if len(rest) != 0 { return fmt.Errorf("found non-hex character in hash: %c", rest[0]) } - hasher, err := Hasher(parts[0]) - if err != nil { - return err + var wantBytes int + switch algo { + case "sha256": + wantBytes = crypto.SHA256.Size() + default: + hasher, err := Hasher(algo) + if err != nil { + return err + } + wantBytes = hasher.Size() } + // Compare the hex to the expected size (2 hex characters per byte) - if len(parts[1]) != hasher.Size()*2 { - return fmt.Errorf("wrong number of hex digits for %s: %s", parts[0], parts[1]) + if len(body) != hex.EncodedLen(wantBytes) { + return fmt.Errorf("wrong number of hex digits for %s: %s", algo, body) } - h.Algorithm = parts[0] - h.Hex = parts[1] + h.Algorithm = algo + h.Hex = body return nil } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/layoutpath.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/layoutpath.go index a031ff5ae9..7014ca8c57 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/layoutpath.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/layoutpath.go @@ -20,6 +20,6 @@ import "path/filepath" type Path string func (l Path) path(elem ...string) string { - complete := []string{string(l)} + complete := []string{string(l)} //nolint:prealloc return filepath.Join(append(complete, elem...)...) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go index 409877bce0..088572723e 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go @@ -296,6 +296,29 @@ func extract(img v1.Image, w io.Writer) error { // Some tools prepend everything with "./", so if we don't Clean the // name, we may have duplicate entries, which angers tar-split. header.Name = filepath.Clean(header.Name) + + // Normalize absolute paths to relative to prevent writing outside + // the extraction root (Zip Slip / CVE-2018-15664 class). + // Many OCI tools emit absolute paths; stripping the leading slash + // preserves the entry while removing the danger. + if filepath.IsAbs(header.Name) { + header.Name = strings.TrimLeft(header.Name, "/") + } + // After normalization, reject any remaining path traversal. + if strings.HasPrefix(header.Name, "..") { + continue + } + + // Reject symlinks and hardlinks that point outside the extraction + // root. An attacker can create a symlink to /etc and then write + // files through it in a subsequent layer entry. + if header.Typeflag == tar.TypeSymlink || header.Typeflag == tar.TypeLink { + linkTarget := filepath.Clean(header.Linkname) + if strings.HasPrefix(linkTarget, "..") || filepath.IsAbs(linkTarget) { + continue + } + } + // force PAX format to remove Name/Linkname length limit of 100 characters // required by USTAR and to not depend on internal tar package guess which // prefers USTAR over PAX @@ -317,7 +340,7 @@ func extract(img v1.Image, w io.Writer) error { name = filepath.Join(dirname, basename) } - if _, ok := fileMap[name]; ok { + if _, ok := fileMap[name]; ok && !tombstone { continue } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go index 799c7ea08b..325874399b 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go @@ -15,6 +15,7 @@ package transport import ( + "cmp" "context" "errors" "fmt" @@ -75,7 +76,10 @@ func pingSingle(ctx context.Context, reg name.Registry, t http.RoundTripper, sch resp.Body.Close() }() - insecure := scheme == "http" + // If resp.Request is set, we may have followed a redirect, + // so we want to prefer resp.Request.URL.Scheme (if it's set) + // falling back to the original request's scheme. + insecure := cmp.Or(resp.Request, req).URL.Scheme == "http" switch resp.StatusCode { case http.StatusOK: diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go index 94d207de1a..58adfd8d33 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go @@ -634,7 +634,7 @@ func scopesForUploadingImage(repo name.Repository, layers []v1.Layer) []string { } } - scopes := make([]string, 0) + scopes := make([]string, 0, len(scopeSet)+1) // Push scope should be the first element because a few registries just look at the first scope to determine access. scopes = append(scopes, repo.Scope(transport.PushScope)) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go index aba609deac..96c0cce49d 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go @@ -219,6 +219,15 @@ type tarFile struct { } func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) { + return followLinks(opener, filePath, make(map[string]bool)) +} + +func followLinks(opener Opener, filePath string, visited map[string]bool) (io.ReadCloser, error) { + if visited[filePath] { + return nil, fmt.Errorf("link cycle detected for %s", filePath) + } + visited[filePath] = true + f, err := opener() if err != nil { return nil, err @@ -242,7 +251,7 @@ func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) { if hdr.Name == filePath { if hdr.Typeflag == tar.TypeSymlink || hdr.Typeflag == tar.TypeLink { currentDir := filepath.Dir(filePath) - return extractFileFromTar(opener, path.Join(currentDir, path.Clean(hdr.Linkname))) + return followLinks(opener, path.Join(currentDir, path.Clean(hdr.Linkname)), visited) } needClose = false return tarFile{ diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go index a47b7475ed..36102da2ed 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated // Copyright 2018 Google LLC All Rights Reserved. // diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go index 91198f819a..c936954033 100644 --- a/vendor/github.com/google/go-querystring/query/encode.go +++ b/vendor/github.com/google/go-querystring/query/encode.go @@ -6,22 +6,21 @@ // // As a simple example: // -// type Options struct { -// Query string `url:"q"` -// ShowAll bool `url:"all"` -// Page int `url:"page"` -// } +// type Options struct { +// Query string `url:"q"` +// ShowAll bool `url:"all"` +// Page int `url:"page"` +// } // -// opt := Options{ "foo", true, 2 } -// v, _ := query.Values(opt) -// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" +// opt := Options{ "foo", true, 2 } +// v, _ := query.Values(opt) +// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" // // The exact mapping between Go values and url.Values is described in the // documentation for the Values() function. package query import ( - "bytes" "fmt" "net/url" "reflect" @@ -47,8 +46,8 @@ type Encoder interface { // // Each exported struct field is encoded as a URL parameter unless // -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option // // The empty values are false, 0, any nil pointer or interface value, any array // slice, map, or string of length zero, and any type (such as time.Time) that @@ -59,19 +58,19 @@ type Encoder interface { // field's tag value is the key name, followed by an optional comma and // options. For example: // -// // Field is ignored by this package. -// Field int `url:"-"` +// // Field is ignored by this package. +// Field int `url:"-"` // -// // Field appears as URL parameter "myName". -// Field int `url:"myName"` +// // Field appears as URL parameter "myName". +// Field int `url:"myName"` // -// // Field appears as URL parameter "myName" and the field is omitted if -// // its value is empty -// Field int `url:"myName,omitempty"` +// // Field appears as URL parameter "myName" and the field is omitted if +// // its value is empty +// Field int `url:"myName,omitempty"` // -// // Field appears as URL parameter "Field" (the default), but the field -// // is skipped if empty. Note the leading comma. -// Field int `url:",omitempty"` +// // Field appears as URL parameter "Field" (the default), but the field +// // is skipped if empty. Note the leading comma. +// Field int `url:",omitempty"` // // For encoding individual field values, the following type-dependent rules // apply: @@ -88,8 +87,8 @@ type Encoder interface { // "url" tag) will use the value of the "layout" tag as a layout passed to // time.Format. For example: // -// // Encode a time.Time as YYYY-MM-DD -// Field time.Time `layout:"2006-01-02"` +// // Encode a time.Time as YYYY-MM-DD +// Field time.Time `layout:"2006-01-02"` // // Slice and Array values default to encoding as multiple URL values of the // same name. Including the "comma" option signals that the field should be @@ -103,9 +102,9 @@ type Encoder interface { // from the "url" tag) will use the value of the "del" tag as the delimiter. // For example: // -// // Encode a slice of bools as ints ("1" for true, "0" for false), -// // separated by exclamation points "!". -// Field []bool `url:",int" del:"!"` +// // Encode a slice of bools as ints ("1" for true, "0" for false), +// // separated by exclamation points "!". +// Field []bool `url:",int" del:"!"` // // Anonymous struct fields are usually encoded as if their inner exported // fields were fields in the outer struct, subject to the standard Go @@ -114,10 +113,10 @@ type Encoder interface { // // Non-nil pointer values are encoded as the value pointed to. // -// Nested structs are encoded including parent fields in value names for -// scoping. e.g: +// Nested structs have their fields processed recursively and are encoded +// including parent fields in value names for scoping. For example, // -// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO" +// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO" // // All other values are encoded using their default string representation. // @@ -125,6 +124,11 @@ type Encoder interface { // as multiple URL values of the same name. func Values(v interface{}) (url.Values, error) { values := make(url.Values) + + if v == nil { + return values, nil + } + val := reflect.ValueOf(v) for val.Kind() == reflect.Ptr { if val.IsNil() { @@ -133,10 +137,6 @@ func Values(v interface{}) (url.Values, error) { val = val.Elem() } - if v == nil { - return values, nil - } - if val.Kind() != reflect.Struct { return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind()) } @@ -209,6 +209,11 @@ func reflectValue(values url.Values, val reflect.Value, scope string) error { } if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { + if sv.Len() == 0 { + // skip if slice or array is empty + continue + } + var del string if opts.Contains("comma") { del = "," @@ -223,7 +228,7 @@ func reflectValue(values url.Values, val reflect.Value, scope string) error { } if del != "" { - s := new(bytes.Buffer) + s := new(strings.Builder) first := true for i := 0; i < sv.Len(); i++ { if first { diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json new file mode 100644 index 0000000000..2fcff6e273 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + "v2": "2.15.0" +} diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md new file mode 100644 index 0000000000..6c39f72854 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -0,0 +1,222 @@ +# Changes + +## [2.20.0](https://github.com/googleapis/google-cloud-go/releases/tag/v2.20.0) (2026-03-25) + +### Features + +* add TelemetryErrorInfo and ExtractTelemetryErrorInfo (#487) ([defdded](https://github.com/googleapis/google-cloud-go/commit/defdded3eac5d97e32243ef79216f1865f3250fb)) +* hook metric recording into gax.Invoke (#494) ([1f3e9ae](https://github.com/googleapis/google-cloud-go/commit/1f3e9aefd21a1a18a6e4da1e03ea84a0b46f2d49)) + +## [2.19.0](https://github.com/googleapis/google-cloud-go/releases/tag/v2.19.0) (2026-03-17) + +### Features + +* add ClientMetrics initialization core (#473) ([f53618c](https://github.com/googleapis/google-cloud-go/commit/f53618c2a9f19d5e5945395001fdc9b317e71faf)) +* add TransportTelemetryData for dynamic transport attributes (#481) ([8a7caf0](https://github.com/googleapis/google-cloud-go/commit/8a7caf0014c9ee9bcf448f16a2e1ae77407a78b8)) +* add WithClientMetrics CallOption (#479) ([76f0284](https://github.com/googleapis/google-cloud-go/commit/76f0284ef42fb92484531483975b7ccff9c54016)) +* pass logger to downstream via context (#474) ([434fa67](https://github.com/googleapis/google-cloud-go/commit/434fa6768b9ee50ed1050a2b5aa11dbe7dbd33a6)) +* update WithLogger to WithLoggerContext. (#478) ([1cb70ba](https://github.com/googleapis/google-cloud-go/commit/1cb70baf5fda8cbff587206f94b1b08e078bd175)) + +### Bug Fixes + +* lazy initialization and getters for ClientMetrics (#485) ([fb6c5f4](https://github.com/googleapis/google-cloud-go/commit/fb6c5f4d56fc3177a2d0d2b8f9e8df6e4be95505)) + +## [2.18.0](https://github.com/googleapis/google-cloud-go/releases/tag/v2.18.0) (2026-03-09) + +### Features + +* add callctx telemetry helpers (#472) ([fa319ff](https://github.com/googleapis/google-cloud-go/commit/fa319ffc309366ab21e41f5d7480f450eedd2be9)) +* move gax-go to use 1.25 as the lower bound of support (#469) ([01594ca](https://github.com/googleapis/google-cloud-go/commit/01594ca54717eebe7229a5168ef41be61191a720)) + +## [2.17.0](https://github.com/googleapis/google-cloud-go/releases/tag/v2.17.0) (2026-02-03) + +### Features + +* update Invoke to add retry count to context (#462) ([ea7096d](https://github.com/googleapis/google-cloud-go/commit/ea7096d50d665064dbfeffd7d93fa13d810ad4e4)) + +## [2.16.0](https://github.com/googleapis/google-cloud-go/releases/tag/v2.16.0) (2025-12-17) + +### Features + +* add IsFeatureEnabled (#454) ([2700b8a](https://github.com/googleapis/google-cloud-go/commit/2700b8ab3062c6c6c5a26d0fc6ba1fc064a8fc04)) + +## [2.15.0](https://github.com/googleapis/gax-go/compare/v2.14.2...v2.15.0) (2025-07-09) + + +### Features + +* **apierror:** improve gRPC status code mapping for HTTP errors ([#431](https://github.com/googleapis/gax-go/issues/431)) ([c207f2a](https://github.com/googleapis/gax-go/commit/c207f2a19ab91d3baee458b57d4aa992519025c7)) + +## [2.14.2](https://github.com/googleapis/gax-go/compare/v2.14.1...v2.14.2) (2025-05-12) + + +### Documentation + +* **v2:** Fix Backoff doc to accurately explain Multiplier ([#423](https://github.com/googleapis/gax-go/issues/423)) ([16d1791](https://github.com/googleapis/gax-go/commit/16d17917121ea9f5d84ba52b5c7c7f2ec0f9e784)), refs [#422](https://github.com/googleapis/gax-go/issues/422) + +## [2.14.1](https://github.com/googleapis/gax-go/compare/v2.14.0...v2.14.1) (2024-12-19) + + +### Bug Fixes + +* update golang.org/x/net to v0.33.0 ([#391](https://github.com/googleapis/gax-go/issues/391)) ([547a5b4](https://github.com/googleapis/gax-go/commit/547a5b43aa6f376f71242da9f18e65fbdfb342f6)) + + +### Documentation + +* fix godoc to refer to the proper envvar ([#387](https://github.com/googleapis/gax-go/issues/387)) ([dc6baf7](https://github.com/googleapis/gax-go/commit/dc6baf75c1a737233739630b5af6c9759f08abcd)) + +## [2.14.0](https://github.com/googleapis/gax-go/compare/v2.13.0...v2.14.0) (2024-11-13) + + +### Features + +* **internallog:** add a logging support package ([#380](https://github.com/googleapis/gax-go/issues/380)) ([c877470](https://github.com/googleapis/gax-go/commit/c87747098135631a3de5865ed03aaf2c79fd9319)) + +## [2.13.0](https://github.com/googleapis/gax-go/compare/v2.12.5...v2.13.0) (2024-07-22) + + +### Features + +* **iterator:** add package to help work with new iter.Seq types ([#358](https://github.com/googleapis/gax-go/issues/358)) ([6bccdaa](https://github.com/googleapis/gax-go/commit/6bccdaac011fe6fd147e4eb533a8e6520b7d4acc)) + +## [2.12.5](https://github.com/googleapis/gax-go/compare/v2.12.4...v2.12.5) (2024-06-18) + + +### Bug Fixes + +* **v2/apierror:** fix (*APIError).Error() for unwrapped Status ([#351](https://github.com/googleapis/gax-go/issues/351)) ([22c16e7](https://github.com/googleapis/gax-go/commit/22c16e7bff5402bdc4c25063771cdd01c650b500)), refs [#350](https://github.com/googleapis/gax-go/issues/350) + +## [2.12.4](https://github.com/googleapis/gax-go/compare/v2.12.3...v2.12.4) (2024-05-03) + + +### Bug Fixes + +* provide unmarshal options for streams ([#343](https://github.com/googleapis/gax-go/issues/343)) ([ddf9a90](https://github.com/googleapis/gax-go/commit/ddf9a90bf180295d49875e15cb80b2136a49dbaf)) + +## [2.12.3](https://github.com/googleapis/gax-go/compare/v2.12.2...v2.12.3) (2024-03-14) + + +### Bug Fixes + +* bump protobuf dep to v1.33 ([#333](https://github.com/googleapis/gax-go/issues/333)) ([2892b22](https://github.com/googleapis/gax-go/commit/2892b22c1ae8a70dec3448d82e634643fe6c1be2)) + +## [2.12.2](https://github.com/googleapis/gax-go/compare/v2.12.1...v2.12.2) (2024-02-23) + + +### Bug Fixes + +* **v2/callctx:** fix SetHeader race by cloning header map ([#326](https://github.com/googleapis/gax-go/issues/326)) ([534311f](https://github.com/googleapis/gax-go/commit/534311f0f163d101f30657736c0e6f860e9c39dc)) + +## [2.12.1](https://github.com/googleapis/gax-go/compare/v2.12.0...v2.12.1) (2024-02-13) + + +### Bug Fixes + +* add XGoogFieldMaskHeader constant ([#321](https://github.com/googleapis/gax-go/issues/321)) ([666ee08](https://github.com/googleapis/gax-go/commit/666ee08931041b7fed56bed7132649785b2d3dfe)) + +## [2.12.0](https://github.com/googleapis/gax-go/compare/v2.11.0...v2.12.0) (2023-06-26) + + +### Features + +* **v2/callctx:** add new callctx package ([#291](https://github.com/googleapis/gax-go/issues/291)) ([11503ed](https://github.com/googleapis/gax-go/commit/11503ed98df4ae1bbdedf91ff64d47e63f187d68)) +* **v2:** add BuildHeaders and InsertMetadataIntoOutgoingContext to header ([#290](https://github.com/googleapis/gax-go/issues/290)) ([6a4b89f](https://github.com/googleapis/gax-go/commit/6a4b89f5551a40262e7c3caf2e1bdc7321b76ea1)) + +## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13) + + +### Features + +* **v2:** add GoVersion package variable ([#283](https://github.com/googleapis/gax-go/issues/283)) ([26553cc](https://github.com/googleapis/gax-go/commit/26553ccadb4016b189881f52e6c253b68bb3e3d5)) + + +### Bug Fixes + +* **v2:** handle space in non-devel go version ([#288](https://github.com/googleapis/gax-go/issues/288)) ([fd7bca0](https://github.com/googleapis/gax-go/commit/fd7bca029a1c5e63def8f0a5fd1ec3f725d92f75)) + +## [2.10.0](https://github.com/googleapis/gax-go/compare/v2.9.1...v2.10.0) (2023-05-30) + + +### Features + +* update dependencies ([#280](https://github.com/googleapis/gax-go/issues/280)) ([4514281](https://github.com/googleapis/gax-go/commit/4514281058590f3637c36bfd49baa65c4d3cfb21)) + +## [2.9.1](https://github.com/googleapis/gax-go/compare/v2.9.0...v2.9.1) (2023-05-23) + + +### Bug Fixes + +* **v2:** drop cloud lro test dep ([#276](https://github.com/googleapis/gax-go/issues/276)) ([c67eeba](https://github.com/googleapis/gax-go/commit/c67eeba0f10a3294b1d93c1b8fbe40211a55ae5f)), refs [#270](https://github.com/googleapis/gax-go/issues/270) + +## [2.9.0](https://github.com/googleapis/gax-go/compare/v2.8.0...v2.9.0) (2023-05-22) + + +### Features + +* **apierror:** add method to return HTTP status code conditionally ([#274](https://github.com/googleapis/gax-go/issues/274)) ([5874431](https://github.com/googleapis/gax-go/commit/587443169acd10f7f86d1989dc8aaf189e645e98)), refs [#229](https://github.com/googleapis/gax-go/issues/229) + + +### Documentation + +* add ref to usage with clients ([#272](https://github.com/googleapis/gax-go/issues/272)) ([ea4d72d](https://github.com/googleapis/gax-go/commit/ea4d72d514beba4de450868b5fb028601a29164e)), refs [#228](https://github.com/googleapis/gax-go/issues/228) + +## [2.8.0](https://github.com/googleapis/gax-go/compare/v2.7.1...v2.8.0) (2023-03-15) + + +### Features + +* **v2:** add WithTimeout option ([#259](https://github.com/googleapis/gax-go/issues/259)) ([9a8da43](https://github.com/googleapis/gax-go/commit/9a8da43693002448b1e8758023699387481866d1)) + +## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06) + + +### Bug Fixes + +* **v2/apierror:** return Unknown GRPCStatus when err source is HTTP ([#260](https://github.com/googleapis/gax-go/issues/260)) ([043b734](https://github.com/googleapis/gax-go/commit/043b73437a240a91229207fb3ee52a9935a36f23)), refs [#254](https://github.com/googleapis/gax-go/issues/254) + +## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) + + +### Features + +* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6)) +* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f)) + +## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13) + + +### Features + +* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3)) + +## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04) + + +### Bug Fixes + +* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23)) + +## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04) + + +### Features + +* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb)) + +## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09) + + +### Features + +* **v2:** add OnHTTPCodes CallOption ([#188](https://github.com/googleapis/gax-go/issues/188)) ([ba7c534](https://github.com/googleapis/gax-go/commit/ba7c5348363ab6c33e1cee3c03c0be68a46ca07c)) + + +### Bug Fixes + +* **v2/apierror:** use errors.As in FromError ([#189](https://github.com/googleapis/gax-go/issues/189)) ([f30f05b](https://github.com/googleapis/gax-go/commit/f30f05be583828f4c09cca4091333ea88ff8d79e)) + + +### Miscellaneous Chores + +* **v2:** bump release-please processing ([#192](https://github.com/googleapis/gax-go/issues/192)) ([56172f9](https://github.com/googleapis/gax-go/commit/56172f971d1141d7687edaac053ad3470af76719)) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go new file mode 100644 index 0000000000..cba5919195 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -0,0 +1,413 @@ +// Copyright 2021, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package apierror implements a wrapper error for parsing error details from +// API calls. Both HTTP & gRPC status errors are supported. +// +// For examples of how to use [APIError] with client libraries please reference +// [Inspecting errors](https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors) +// in the client library documentation. +package apierror + +import ( + "errors" + "fmt" + "net/http" + "strings" + + jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto" + "google.golang.org/api/googleapi" + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +// canonicalMap maps HTTP codes to gRPC status code equivalents. +var canonicalMap = map[int]codes.Code{ + http.StatusOK: codes.OK, + http.StatusBadRequest: codes.InvalidArgument, + http.StatusForbidden: codes.PermissionDenied, + http.StatusNotFound: codes.NotFound, + http.StatusConflict: codes.Aborted, + http.StatusRequestedRangeNotSatisfiable: codes.OutOfRange, + http.StatusTooManyRequests: codes.ResourceExhausted, + http.StatusGatewayTimeout: codes.DeadlineExceeded, + http.StatusNotImplemented: codes.Unimplemented, + http.StatusServiceUnavailable: codes.Unavailable, + http.StatusUnauthorized: codes.Unauthenticated, +} + +// toCode maps an http code to the most correct equivalent. +func toCode(httpCode int) codes.Code { + if sCode, ok := canonicalMap[httpCode]; ok { + return sCode + } + switch { + case httpCode >= 200 && httpCode < 300: + return codes.OK + + case httpCode >= 400 && httpCode < 500: + return codes.FailedPrecondition + + case httpCode >= 500 && httpCode < 600: + return codes.Internal + } + return codes.Unknown +} + +// ErrDetails holds the google/rpc/error_details.proto messages. +type ErrDetails struct { + ErrorInfo *errdetails.ErrorInfo + BadRequest *errdetails.BadRequest + PreconditionFailure *errdetails.PreconditionFailure + QuotaFailure *errdetails.QuotaFailure + RetryInfo *errdetails.RetryInfo + ResourceInfo *errdetails.ResourceInfo + RequestInfo *errdetails.RequestInfo + DebugInfo *errdetails.DebugInfo + Help *errdetails.Help + LocalizedMessage *errdetails.LocalizedMessage + + // Unknown stores unidentifiable error details. + Unknown []interface{} +} + +// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages. +var ErrMessageNotFound = errors.New("message not found") + +// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the +// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type, +// the content of the message is copied to the provided message. +// +// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the +// protocol buffer type of the provided message. +func (e ErrDetails) ExtractProtoMessage(v proto.Message) error { + if v == nil { + return ErrMessageNotFound + } + for _, elem := range e.Unknown { + if elemProto, ok := elem.(proto.Message); ok { + if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() { + proto.Merge(v, elemProto) + return nil + } + } + } + return ErrMessageNotFound +} + +func (e ErrDetails) String() string { + var d strings.Builder + if e.ErrorInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = ErrorInfo reason = %s domain = %s metadata = %s\n", + e.ErrorInfo.GetReason(), e.ErrorInfo.GetDomain(), e.ErrorInfo.GetMetadata())) + } + + if e.BadRequest != nil { + v := e.BadRequest.GetFieldViolations() + var f []string + var desc []string + for _, x := range v { + f = append(f, x.GetField()) + desc = append(desc, x.GetDescription()) + } + d.WriteString(fmt.Sprintf("error details: name = BadRequest field = %s desc = %s\n", + strings.Join(f, " "), strings.Join(desc, " "))) + } + + if e.PreconditionFailure != nil { + v := e.PreconditionFailure.GetViolations() + var t []string + var s []string + var desc []string + for _, x := range v { + t = append(t, x.GetType()) + s = append(s, x.GetSubject()) + desc = append(desc, x.GetDescription()) + } + d.WriteString(fmt.Sprintf("error details: name = PreconditionFailure type = %s subj = %s desc = %s\n", strings.Join(t, " "), + strings.Join(s, " "), strings.Join(desc, " "))) + } + + if e.QuotaFailure != nil { + v := e.QuotaFailure.GetViolations() + var s []string + var desc []string + for _, x := range v { + s = append(s, x.GetSubject()) + desc = append(desc, x.GetDescription()) + } + d.WriteString(fmt.Sprintf("error details: name = QuotaFailure subj = %s desc = %s\n", + strings.Join(s, " "), strings.Join(desc, " "))) + } + + if e.RequestInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = RequestInfo id = %s data = %s\n", + e.RequestInfo.GetRequestId(), e.RequestInfo.GetServingData())) + } + + if e.ResourceInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = ResourceInfo type = %s resourcename = %s owner = %s desc = %s\n", + e.ResourceInfo.GetResourceType(), e.ResourceInfo.GetResourceName(), + e.ResourceInfo.GetOwner(), e.ResourceInfo.GetDescription())) + + } + if e.RetryInfo != nil { + d.WriteString(fmt.Sprintf("error details: retry in %s\n", e.RetryInfo.GetRetryDelay().AsDuration())) + + } + if e.Unknown != nil { + var s []string + for _, x := range e.Unknown { + s = append(s, fmt.Sprintf("%v", x)) + } + d.WriteString(fmt.Sprintf("error details: name = Unknown desc = %s\n", strings.Join(s, " "))) + } + + if e.DebugInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = DebugInfo detail = %s stack = %s\n", e.DebugInfo.GetDetail(), + strings.Join(e.DebugInfo.GetStackEntries(), " "))) + } + if e.Help != nil { + var desc []string + var url []string + for _, x := range e.Help.Links { + desc = append(desc, x.GetDescription()) + url = append(url, x.GetUrl()) + } + d.WriteString(fmt.Sprintf("error details: name = Help desc = %s url = %s\n", + strings.Join(desc, " "), strings.Join(url, " "))) + } + if e.LocalizedMessage != nil { + d.WriteString(fmt.Sprintf("error details: name = LocalizedMessage locale = %s msg = %s\n", + e.LocalizedMessage.GetLocale(), e.LocalizedMessage.GetMessage())) + } + + return d.String() +} + +// APIError wraps either a gRPC Status error or a HTTP googleapi.Error. It +// implements error and Status interfaces. +type APIError struct { + err error + status *status.Status + httpErr *googleapi.Error + details ErrDetails +} + +// Details presents the error details of the APIError. +func (a *APIError) Details() ErrDetails { + return a.details +} + +// Unwrap extracts the original error. +func (a *APIError) Unwrap() error { + return a.err +} + +// Error returns a readable representation of the APIError. +func (a *APIError) Error() string { + var msg string + if a.httpErr != nil { + // Truncate the googleapi.Error message because it dumps the Details in + // an ugly way. + msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) + } else if a.status != nil && a.err != nil { + msg = a.err.Error() + } else if a.status != nil { + msg = a.status.Message() + } + return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) +} + +// Message returns the original, unformatted error message from the underlying +// googleapi.Error or gRPC Status, without additional details or context. +func (a *APIError) Message() string { + if a.httpErr != nil { + return a.httpErr.Message + } else if a.status != nil { + return a.status.Message() + } + return "" +} + +// GRPCStatus extracts the underlying gRPC Status error. +// This method is necessary to fulfill the interface +// described in https://pkg.go.dev/google.golang.org/grpc/status#FromError. +// +// For errors that originated as an HTTP-based googleapi.Error, GRPCStatus() +// returns a status that attempts to map from the original HTTP code to an +// equivalent gRPC status code. For use cases where you want to avoid this +// behavior, error unwrapping can be used. +func (a *APIError) GRPCStatus() *status.Status { + return a.status +} + +// Reason returns the reason in an ErrorInfo. +// If ErrorInfo is nil, it returns an empty string. +func (a *APIError) Reason() string { + return a.details.ErrorInfo.GetReason() +} + +// Domain returns the domain in an ErrorInfo. +// If ErrorInfo is nil, it returns an empty string. +func (a *APIError) Domain() string { + return a.details.ErrorInfo.GetDomain() +} + +// Metadata returns the metadata in an ErrorInfo. +// If ErrorInfo is nil, it returns nil. +func (a *APIError) Metadata() map[string]string { + return a.details.ErrorInfo.GetMetadata() + +} + +// setDetailsFromError parses a Status error or a googleapi.Error +// and sets status and details or httpErr and details, respectively. +// It returns false if neither Status nor googleapi.Error can be parsed. +// +// When err is a googleapi.Error, the status of the returned error will be +// mapped to the closest equivalent gGRPC status code. +func (a *APIError) setDetailsFromError(err error) bool { + st, isStatus := status.FromError(err) + var herr *googleapi.Error + isHTTPErr := errors.As(err, &herr) + + switch { + case isStatus: + a.status = st + a.details = parseDetails(st.Details()) + case isHTTPErr: + a.httpErr = herr + a.details = parseHTTPDetails(herr) + a.status = status.New(toCode(a.httpErr.Code), herr.Message) + default: + return false + } + return true +} + +// FromError parses a Status error or a googleapi.Error and builds an +// APIError, wrapping the provided error in the new APIError. It +// returns false if neither Status nor googleapi.Error can be parsed. +func FromError(err error) (*APIError, bool) { + return ParseError(err, true) +} + +// ParseError parses a Status error or a googleapi.Error and builds an +// APIError. If wrap is true, it wraps the error in the new APIError. +// It returns false if neither Status nor googleapi.Error can be parsed. +func ParseError(err error, wrap bool) (*APIError, bool) { + if err == nil { + return nil, false + } + ae := APIError{} + if wrap { + ae = APIError{err: err} + } + if !ae.setDetailsFromError(err) { + return nil, false + } + return &ae, true +} + +// parseDetails accepts a slice of interface{} that should be backed by some +// sort of proto.Message that can be cast to the google/rpc/error_details.proto +// types. +// +// This is for internal use only. +func parseDetails(details []interface{}) ErrDetails { + var ed ErrDetails + for _, d := range details { + switch d := d.(type) { + case *errdetails.ErrorInfo: + ed.ErrorInfo = d + case *errdetails.BadRequest: + ed.BadRequest = d + case *errdetails.PreconditionFailure: + ed.PreconditionFailure = d + case *errdetails.QuotaFailure: + ed.QuotaFailure = d + case *errdetails.RetryInfo: + ed.RetryInfo = d + case *errdetails.ResourceInfo: + ed.ResourceInfo = d + case *errdetails.RequestInfo: + ed.RequestInfo = d + case *errdetails.DebugInfo: + ed.DebugInfo = d + case *errdetails.Help: + ed.Help = d + case *errdetails.LocalizedMessage: + ed.LocalizedMessage = d + default: + ed.Unknown = append(ed.Unknown, d) + } + } + + return ed +} + +// parseHTTPDetails will convert the given googleapi.Error into the protobuf +// representation then parse the Any values that contain the error details. +// +// This is for internal use only. +func parseHTTPDetails(gae *googleapi.Error) ErrDetails { + e := &jsonerror.Error{} + if err := protojson.Unmarshal([]byte(gae.Body), e); err != nil { + // If the error body does not conform to the error schema, ignore it + // altogther. See https://cloud.google.com/apis/design/errors#http_mapping. + return ErrDetails{} + } + + // Coerce the Any messages into proto.Message then parse the details. + details := []interface{}{} + for _, any := range e.GetError().GetDetails() { + m, err := any.UnmarshalNew() + if err != nil { + // Ignore malformed Any values. + continue + } + details = append(details, m) + } + + return parseDetails(details) +} + +// HTTPCode returns the underlying HTTP response status code. This method returns +// `-1` if the underlying error is a [google.golang.org/grpc/status.Status]. To +// check gRPC error codes use [google.golang.org/grpc/status.Code]. +func (a *APIError) HTTPCode() int { + if a.httpErr == nil { + return -1 + } + return a.httpErr.Code +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md new file mode 100644 index 0000000000..9ff0caea94 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md @@ -0,0 +1,30 @@ +# HTTP JSON Error Schema + +The `error.proto` represents the HTTP-JSON schema used by Google APIs to convey +error payloads as described by https://cloud.google.com/apis/design/errors#http_mapping. +This package is for internal parsing logic only and should not be used in any +other context. + +## Regeneration + +To regenerate the protobuf Go code you will need the following: + +* A local copy of [googleapis], the absolute path to which should be exported to +the environment variable `GOOGLEAPIS` +* The protobuf compiler [protoc] +* The Go [protobuf plugin] +* The [goimports] tool + +From this directory run the following command: +```sh +protoc -I $GOOGLEAPIS -I. --go_out=. --go_opt=module=github.com/googleapis/gax-go/v2/apierror/internal/proto error.proto +goimports -w . +``` + +Note: the `module` plugin option ensures the generated code is placed in this +directory, and not in several nested directories defined by `go_package` option. + +[googleapis]: https://github.com/googleapis/googleapis +[protoc]: https://github.com/protocolbuffers/protobuf#protocol-compiler-installation +[protobuf plugin]: https://developers.google.com/protocol-buffers/docs/reference/go-generated +[goimports]: https://pkg.go.dev/golang.org/x/tools/cmd/goimports \ No newline at end of file diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go new file mode 100644 index 0000000000..90639e66ae --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go @@ -0,0 +1,226 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc v6.30.2 +// source: custom_error.proto + +package jsonerror + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Error code for `CustomError`. +type CustomError_CustomErrorCode int32 + +const ( + // Default error. + CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0 + // Too many foo. + CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1 + // Not enough foo. + CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2 + // Catastrophic error. + CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3 +) + +// Enum value maps for CustomError_CustomErrorCode. +var ( + CustomError_CustomErrorCode_name = map[int32]string{ + 0: "CUSTOM_ERROR_CODE_UNSPECIFIED", + 1: "TOO_MANY_FOO", + 2: "NOT_ENOUGH_FOO", + 3: "UNIVERSE_WAS_DESTROYED", + } + CustomError_CustomErrorCode_value = map[string]int32{ + "CUSTOM_ERROR_CODE_UNSPECIFIED": 0, + "TOO_MANY_FOO": 1, + "NOT_ENOUGH_FOO": 2, + "UNIVERSE_WAS_DESTROYED": 3, + } +) + +func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode { + p := new(CustomError_CustomErrorCode) + *p = x + return p +} + +func (x CustomError_CustomErrorCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor { + return file_custom_error_proto_enumTypes[0].Descriptor() +} + +func (CustomError_CustomErrorCode) Type() protoreflect.EnumType { + return &file_custom_error_proto_enumTypes[0] +} + +func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead. +func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0, 0} +} + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +type CustomError struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Error code specific to the custom API being invoked. + Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"` + // Name of the failed entity. + Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` + // Message that describes the error. + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CustomError) Reset() { + *x = CustomError{} + mi := &file_custom_error_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CustomError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomError) ProtoMessage() {} + +func (x *CustomError) ProtoReflect() protoreflect.Message { + mi := &file_custom_error_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomError.ProtoReflect.Descriptor instead. +func (*CustomError) Descriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0} +} + +func (x *CustomError) GetCode() CustomError_CustomErrorCode { + if x != nil { + return x.Code + } + return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED +} + +func (x *CustomError) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *CustomError) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_custom_error_proto protoreflect.FileDescriptor + +const file_custom_error_proto_rawDesc = "" + + "\n" + + "\x12custom_error.proto\x12\x05error\"\xfa\x01\n" + + "\vCustomError\x126\n" + + "\x04code\x18\x01 \x01(\x0e2\".error.CustomError.CustomErrorCodeR\x04code\x12\x16\n" + + "\x06entity\x18\x02 \x01(\tR\x06entity\x12#\n" + + "\rerror_message\x18\x03 \x01(\tR\ferrorMessage\"v\n" + + "\x0fCustomErrorCode\x12!\n" + + "\x1dCUSTOM_ERROR_CODE_UNSPECIFIED\x10\x00\x12\x10\n" + + "\fTOO_MANY_FOO\x10\x01\x12\x12\n" + + "\x0eNOT_ENOUGH_FOO\x10\x02\x12\x1a\n" + + "\x16UNIVERSE_WAS_DESTROYED\x10\x03BCZAgithub.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerrorb\x06proto3" + +var ( + file_custom_error_proto_rawDescOnce sync.Once + file_custom_error_proto_rawDescData []byte +) + +func file_custom_error_proto_rawDescGZIP() []byte { + file_custom_error_proto_rawDescOnce.Do(func() { + file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_custom_error_proto_rawDesc), len(file_custom_error_proto_rawDesc))) + }) + return file_custom_error_proto_rawDescData +} + +var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_custom_error_proto_goTypes = []any{ + (CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode + (*CustomError)(nil), // 1: error.CustomError +} +var file_custom_error_proto_depIdxs = []int32{ + 0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_custom_error_proto_init() } +func file_custom_error_proto_init() { + if File_custom_error_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_custom_error_proto_rawDesc), len(file_custom_error_proto_rawDesc)), + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_custom_error_proto_goTypes, + DependencyIndexes: file_custom_error_proto_depIdxs, + EnumInfos: file_custom_error_proto_enumTypes, + MessageInfos: file_custom_error_proto_msgTypes, + }.Build() + File_custom_error_proto = out.File + file_custom_error_proto_goTypes = nil + file_custom_error_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto new file mode 100644 index 0000000000..21678ae65c --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto @@ -0,0 +1,50 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package error; + +option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; + + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +message CustomError { + + // Error code for `CustomError`. + enum CustomErrorCode { + // Default error. + CUSTOM_ERROR_CODE_UNSPECIFIED = 0; + + // Too many foo. + TOO_MANY_FOO = 1; + + // Not enough foo. + NOT_ENOUGH_FOO = 2; + + // Catastrophic error. + UNIVERSE_WAS_DESTROYED = 3; + + } + + // Error code specific to the custom API being invoked. + CustomErrorCode code = 1; + + // Name of the failed entity. + string entity = 2; + + // Message that describes the error. + string error_message = 3; +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go new file mode 100644 index 0000000000..1a29ff5c89 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go @@ -0,0 +1,232 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc v6.30.2 +// source: error.proto + +package jsonerror + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + code "google.golang.org/genproto/googleapis/rpc/code" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The error format v2 for Google JSON REST APIs. +// Copied from https://cloud.google.com/apis/design/errors#http_mapping. +// +// NOTE: This schema is not used for other wire protocols. +type Error struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The actual error payload. The nested message structure is for backward + // compatibility with Google API client libraries. It also makes the error + // more readable to developers. + Error *Error_Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Error) Reset() { + *x = Error{} + mi := &file_error_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Error) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Error) ProtoMessage() {} + +func (x *Error) ProtoReflect() protoreflect.Message { + mi := &file_error_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Error.ProtoReflect.Descriptor instead. +func (*Error) Descriptor() ([]byte, []int) { + return file_error_proto_rawDescGZIP(), []int{0} +} + +func (x *Error) GetError() *Error_Status { + if x != nil { + return x.Error + } + return nil +} + +// This message has the same semantics as `google.rpc.Status`. It uses HTTP +// status code instead of gRPC status code. It has an extra field `status` +// for backward compatibility with Google API Client Libraries. +type Error_Status struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The HTTP status code that corresponds to `google.rpc.Status.code`. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // This corresponds to `google.rpc.Status.message`. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // This is the enum version for `google.rpc.Status.code`. + Status code.Code `protobuf:"varint,4,opt,name=status,proto3,enum=google.rpc.Code" json:"status,omitempty"` + // This corresponds to `google.rpc.Status.details`. + Details []*anypb.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Error_Status) Reset() { + *x = Error_Status{} + mi := &file_error_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Error_Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Error_Status) ProtoMessage() {} + +func (x *Error_Status) ProtoReflect() protoreflect.Message { + mi := &file_error_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Error_Status.ProtoReflect.Descriptor instead. +func (*Error_Status) Descriptor() ([]byte, []int) { + return file_error_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Error_Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Error_Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Error_Status) GetStatus() code.Code { + if x != nil { + return x.Status + } + return code.Code(0) +} + +func (x *Error_Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_error_proto protoreflect.FileDescriptor + +const file_error_proto_rawDesc = "" + + "\n" + + "\verror.proto\x12\x05error\x1a\x19google/protobuf/any.proto\x1a\x15google/rpc/code.proto\"\xc5\x01\n" + + "\x05Error\x12)\n" + + "\x05error\x18\x01 \x01(\v2\x13.error.Error.StatusR\x05error\x1a\x90\x01\n" + + "\x06Status\x12\x12\n" + + "\x04code\x18\x01 \x01(\x05R\x04code\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage\x12(\n" + + "\x06status\x18\x04 \x01(\x0e2\x10.google.rpc.CodeR\x06status\x12.\n" + + "\adetails\x18\x05 \x03(\v2\x14.google.protobuf.AnyR\adetailsBCZAgithub.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerrorb\x06proto3" + +var ( + file_error_proto_rawDescOnce sync.Once + file_error_proto_rawDescData []byte +) + +func file_error_proto_rawDescGZIP() []byte { + file_error_proto_rawDescOnce.Do(func() { + file_error_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_error_proto_rawDesc), len(file_error_proto_rawDesc))) + }) + return file_error_proto_rawDescData +} + +var file_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_error_proto_goTypes = []any{ + (*Error)(nil), // 0: error.Error + (*Error_Status)(nil), // 1: error.Error.Status + (code.Code)(0), // 2: google.rpc.Code + (*anypb.Any)(nil), // 3: google.protobuf.Any +} +var file_error_proto_depIdxs = []int32{ + 1, // 0: error.Error.error:type_name -> error.Error.Status + 2, // 1: error.Error.Status.status:type_name -> google.rpc.Code + 3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_error_proto_init() } +func file_error_proto_init() { + if File_error_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_error_proto_rawDesc), len(file_error_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_error_proto_goTypes, + DependencyIndexes: file_error_proto_depIdxs, + MessageInfos: file_error_proto_msgTypes, + }.Build() + File_error_proto = out.File + file_error_proto_goTypes = nil + file_error_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto new file mode 100644 index 0000000000..4b9b13ce11 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto @@ -0,0 +1,46 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package error; + +import "google/protobuf/any.proto"; +import "google/rpc/code.proto"; + +option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; + +// The error format v2 for Google JSON REST APIs. +// Copied from https://cloud.google.com/apis/design/errors#http_mapping. +// +// NOTE: This schema is not used for other wire protocols. +message Error { + // This message has the same semantics as `google.rpc.Status`. It uses HTTP + // status code instead of gRPC status code. It has an extra field `status` + // for backward compatibility with Google API Client Libraries. + message Status { + // The HTTP status code that corresponds to `google.rpc.Status.code`. + int32 code = 1; + // This corresponds to `google.rpc.Status.message`. + string message = 2; + // This is the enum version for `google.rpc.Status.code`. + google.rpc.Code status = 4; + // This corresponds to `google.rpc.Status.details`. + repeated google.protobuf.Any details = 5; + } + // The actual error payload. The nested message structure is for backward + // compatibility with Google API client libraries. It also makes the error + // more readable to developers. + Status error = 1; +} diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go new file mode 100644 index 0000000000..8f5948eb3e --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -0,0 +1,288 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "errors" + "math/rand" + "time" + + "google.golang.org/api/googleapi" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// CallOption is an option used by Invoke to control behaviors of RPC calls. +// CallOption works by modifying relevant fields of CallSettings. +type CallOption interface { + // Resolve applies the option by modifying cs. + Resolve(cs *CallSettings) +} + +// Retryer is used by Invoke to determine retry behavior. +type Retryer interface { + // Retry reports whether a request should be retried and how long to pause before retrying + // if the previous attempt returned with err. Invoke never calls Retry with nil error. + Retry(err error) (pause time.Duration, shouldRetry bool) +} + +type retryerOption func() Retryer + +func (o retryerOption) Resolve(s *CallSettings) { + s.Retry = o +} + +// WithRetry sets CallSettings.Retry to fn. +func WithRetry(fn func() Retryer) CallOption { + return retryerOption(fn) +} + +// OnErrorFunc returns a Retryer that retries if and only if the previous attempt +// returns an error that satisfies shouldRetry. +// +// Pause times between retries are specified by bo. bo is only used for its +// parameters; each Retryer has its own copy. +func OnErrorFunc(bo Backoff, shouldRetry func(err error) bool) Retryer { + return &errorRetryer{ + shouldRetry: shouldRetry, + backoff: bo, + } +} + +type errorRetryer struct { + backoff Backoff + shouldRetry func(err error) bool +} + +func (r *errorRetryer) Retry(err error) (time.Duration, bool) { + if r.shouldRetry(err) { + return r.backoff.Pause(), true + } + + return 0, false +} + +// OnCodes returns a Retryer that retries if and only if +// the previous attempt returns a GRPC error whose error code is stored in cc. +// Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnCodes(cc []codes.Code, bo Backoff) Retryer { + return &boRetryer{ + backoff: bo, + codes: append([]codes.Code(nil), cc...), + } +} + +type boRetryer struct { + backoff Backoff + codes []codes.Code +} + +func (r *boRetryer) Retry(err error) (time.Duration, bool) { + st, ok := status.FromError(err) + if !ok { + return 0, false + } + c := st.Code() + for _, rc := range r.codes { + if c == rc { + return r.backoff.Pause(), true + } + } + return 0, false +} + +// OnHTTPCodes returns a Retryer that retries if and only if +// the previous attempt returns a googleapi.Error whose status code is stored in +// cc. Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnHTTPCodes(bo Backoff, cc ...int) Retryer { + codes := make(map[int]bool, len(cc)) + for _, c := range cc { + codes[c] = true + } + + return &httpRetryer{ + backoff: bo, + codes: codes, + } +} + +type httpRetryer struct { + backoff Backoff + codes map[int]bool +} + +func (r *httpRetryer) Retry(err error) (time.Duration, bool) { + var gerr *googleapi.Error + if !errors.As(err, &gerr) { + return 0, false + } + + if r.codes[gerr.Code] { + return r.backoff.Pause(), true + } + + return 0, false +} + +// Backoff implements backoff logic for retries. The configuration for retries +// is described in https://google.aip.dev/client-libraries/4221. The current +// retry limit starts at Initial and increases by a factor of Multiplier every +// retry, but is capped at Max. The actual wait time between retries is a +// random value between 1ns and the current retry limit. The purpose of this +// random jitter is explained in +// https://www.awsarchitectureblog.com/2015/03/backoff.html. +// +// Note: MaxNumRetries / RPCDeadline is specifically not provided. These should +// be built on top of Backoff. +type Backoff struct { + // Initial is the initial value of the retry period, defaults to 1 second. + Initial time.Duration + + // Max is the maximum value of the retry period, defaults to 30 seconds. + Max time.Duration + + // Multiplier is the factor by which the retry period increases. + // It should be greater than 1 and defaults to 2. + Multiplier float64 + + // cur is the current retry period. + cur time.Duration +} + +// Pause returns the next time.Duration that the caller should use to backoff. +func (bo *Backoff) Pause() time.Duration { + if bo.Initial == 0 { + bo.Initial = time.Second + } + if bo.cur == 0 { + bo.cur = bo.Initial + } + if bo.Max == 0 { + bo.Max = 30 * time.Second + } + if bo.Multiplier < 1 { + bo.Multiplier = 2 + } + // Select a duration between 1ns and the current max. It might seem + // counterintuitive to have so much jitter, but + // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that + // that is the best strategy. + d := time.Duration(1 + rand.Int63n(int64(bo.cur))) + bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) + if bo.cur > bo.Max { + bo.cur = bo.Max + } + return d +} + +type grpcOpt []grpc.CallOption + +func (o grpcOpt) Resolve(s *CallSettings) { + s.GRPC = o +} + +type pathOpt struct { + p string +} + +func (p pathOpt) Resolve(s *CallSettings) { + s.Path = p.p +} + +type timeoutOpt struct { + t time.Duration +} + +func (t timeoutOpt) Resolve(s *CallSettings) { + s.timeout = t.t +} + +// WithPath applies a Path override to the HTTP-based APICall. +// +// This is for internal use only. +func WithPath(p string) CallOption { + return &pathOpt{p: p} +} + +// WithGRPCOptions allows passing gRPC call options during client creation. +func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return grpcOpt(append([]grpc.CallOption(nil), opt...)) +} + +// WithTimeout is a convenience option for setting a context.WithTimeout on the +// singular context.Context used for **all** APICall attempts. Calculated from +// the start of the first APICall attempt. +// If the context.Context provided to Invoke already has a Deadline set, that +// will always be respected over the deadline calculated using this option. +func WithTimeout(t time.Duration) CallOption { + return &timeoutOpt{t: t} +} + +type clientMetricsOpt struct { + cm *ClientMetrics +} + +// Resolve applies the ClientMetrics to the CallSettings. +func (o clientMetricsOpt) Resolve(s *CallSettings) { + s.clientMetrics = o.cm +} + +// WithClientMetrics applies metrics instrumentation to the CallSettings. +// +// This is for internal use only. +func WithClientMetrics(cm *ClientMetrics) CallOption { + return clientMetricsOpt{cm: cm} +} + +// CallSettings allow fine-grained control over how calls are made. +type CallSettings struct { + // Retry returns a Retryer to be used to control retry logic of a method call. + // If Retry is nil or the returned Retryer is nil, the call will not be retried. + Retry func() Retryer + + // CallOptions to be forwarded to GRPC. + GRPC []grpc.CallOption + + // Path is an HTTP override for an APICall. + Path string + + // Timeout defines the amount of time that Invoke has to complete. + // Unexported so it cannot be changed by the code in an APICall. + timeout time.Duration + + // clientMetrics holds the pre-allocated OpenTelemetry metrics instruments + // to use for this call. + clientMetrics *ClientMetrics +} diff --git a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go new file mode 100644 index 0000000000..3d7be5f2aa --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go @@ -0,0 +1,146 @@ +// Copyright 2023, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package callctx provides helpers for storing and retrieving values out of +// [context.Context]. These values are used by our client libraries in various +// ways across the stack. +package callctx + +import ( + "context" + "fmt" + "log/slog" +) + +const ( + // XGoogFieldMaskHeader is the canonical header key for the [System Parameter] + // that specifies the response read mask. The value(s) for this header + // must adhere to format described in [fieldmaskpb]. + // + // [System Parameter]: https://cloud.google.com/apis/docs/system-parameters + // [fieldmaskpb]: https://google.golang.org/protobuf/types/known/fieldmaskpb + XGoogFieldMaskHeader = "x-goog-fieldmask" + + headerKey = contextKey("header") +) + +// contextKey is a private type used to store/retrieve context values. +type contextKey string + +// HeadersFromContext retrieves headers set from [SetHeaders]. These headers +// can then be cast to http.Header or metadata.MD to send along on requests. +func HeadersFromContext(ctx context.Context) map[string][]string { + m, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + return nil + } + return m +} + +// SetHeaders stores key value pairs in the returned context that can later +// be retrieved by [HeadersFromContext]. Values stored in this manner will +// automatically be retrieved by client libraries and sent as outgoing headers +// on all requests. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +func SetHeaders(ctx context.Context, keyvals ...string) context.Context { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("callctx: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + h, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + h = make(map[string][]string) + } else { + h = cloneHeaders(h) + } + + for i := 0; i < len(keyvals); i = i + 2 { + h[keyvals[i]] = append(h[keyvals[i]], keyvals[i+1]) + } + return context.WithValue(ctx, headerKey, h) +} + +// cloneHeaders makes a new key-value map while reusing the value slices. +// As such, new values should be appended to the value slice, and modifying +// indexed values is not thread safe. +// +// TODO: Replace this with maps.Clone when Go 1.21 is the minimum version. +func cloneHeaders(h map[string][]string) map[string][]string { + c := make(map[string][]string, len(h)) + for k, v := range h { + vc := make([]string, len(v)) + copy(vc, v) + c[k] = vc + } + return c +} + +// telemetryKey is a private type used to store/retrieve telemetry context values. +type telemetryKey string + +// WithTelemetryContext injects telemetry attribute values (like resource name +// or client version) into the context. In accordance with standard Go context +// guidelines, this should only be used for data that transits processes and APIs, +// and not for passing optional parameters to functions. keyvals should have a +// corresponding value for every key provided. If there is an odd number of keyvals +// this method will panic. +func WithTelemetryContext(ctx context.Context, keyvals ...string) context.Context { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("callctx: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + + for i := 0; i < len(keyvals); i = i + 2 { + ctx = context.WithValue(ctx, telemetryKey(keyvals[i]), keyvals[i+1]) + } + return ctx +} + +// TelemetryFromContext extracts a telemetry attribute value from the context. +// The returned bool indicates a successful typecast of the value to a string. +func TelemetryFromContext(ctx context.Context, key string) (string, bool) { + val, ok := ctx.Value(telemetryKey(key)).(string) + return val, ok +} + +// loggerKey is a private type used to store/retrieve the logger context value. +type loggerContextKey string + +const loggerCKey = loggerContextKey("logger") + +// WithLoggerContext injects a slog.Logger into the context. This logger will +// be extracted by the client library or transport wrappers to emit logs. +func WithLoggerContext(ctx context.Context, logger *slog.Logger) context.Context { + return context.WithValue(ctx, loggerCKey, logger) +} + +// LoggerFromContext extracts a slog.Logger from the context. +// The returned bool indicates whether a logger was found. +func LoggerFromContext(ctx context.Context) (*slog.Logger, bool) { + logger, ok := ctx.Value(loggerCKey).(*slog.Logger) + return logger, ok +} diff --git a/vendor/github.com/googleapis/gax-go/v2/content_type.go b/vendor/github.com/googleapis/gax-go/v2/content_type.go new file mode 100644 index 0000000000..818a3b0152 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/content_type.go @@ -0,0 +1,112 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "io" + + "net/http" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = io.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// The content of media will be sniffed to determine the content type. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader) (io.Reader, string) { + // For backwards compatibility, allow clients to set content + // type by providing a ContentTyper for media. + // Note: This is an anonymous interface definition copied from googleapi.ContentTyper. + if typer, ok := media.(interface { + ContentType() string + }); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} diff --git a/vendor/github.com/googleapis/gax-go/v2/feature.go b/vendor/github.com/googleapis/gax-go/v2/feature.go new file mode 100644 index 0000000000..32e05a3234 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/feature.go @@ -0,0 +1,75 @@ +// Copyright 2025, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "os" + "strings" + "sync" +) + +var ( + // featureEnabledOnce caches results for IsFeatureEnabled. + featureEnabledOnce sync.Once + featureEnabledStore map[string]bool +) + +// IsFeatureEnabled checks if an experimental feature is enabled via +// environment variable. The environment variable must be prefixed with +// "GOOGLE_SDK_GO_EXPERIMENTAL_". The feature name passed to this +// function must be the suffix (e.g., "FOO" for "GOOGLE_SDK_GO_EXPERIMENTAL_FOO"). +// To enable the feature, the environment variable's value must be "true", +// case-insensitive. The result for each name is cached on the first call. +func IsFeatureEnabled(name string) bool { + featureEnabledOnce.Do(func() { + featureEnabledStore = make(map[string]bool) + for _, env := range os.Environ() { + if strings.HasPrefix(env, "GOOGLE_SDK_GO_EXPERIMENTAL_") { + // Parse "KEY=VALUE" + kv := strings.SplitN(env, "=", 2) + if len(kv) == 2 && strings.ToLower(kv[1]) == "true" { + key := strings.TrimPrefix(kv[0], "GOOGLE_SDK_GO_EXPERIMENTAL_") + featureEnabledStore[key] = true + } + } + } + }) + return featureEnabledStore[name] +} + +// TestOnlyResetIsFeatureEnabled is for testing purposes only. It resets the cached +// feature flags, allowing environment variables to be re-read on the next call to IsFeatureEnabled. +// This function is not thread-safe; if another goroutine reads a feature after this +// function is called but before the `featureEnabledOnce` is re-initialized by IsFeatureEnabled, +// it may see an inconsistent state. +func TestOnlyResetIsFeatureEnabled() { + featureEnabledOnce = sync.Once{} + featureEnabledStore = nil +} diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go new file mode 100644 index 0000000000..36cdfa33e3 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/gax.go @@ -0,0 +1,41 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gax contains a set of modules which aid the development of APIs +// for clients and servers based on gRPC and Google API conventions. +// +// Application code will rarely need to use this library directly. +// However, code generated automatically from API definition files can use it +// to simplify code generation and to provide more convenient and idiomatic API surfaces. +package gax + +import "github.com/googleapis/gax-go/v2/internal" + +// Version specifies the gax-go version being used. +const Version = internal.Version diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go new file mode 100644 index 0000000000..f5273985af --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -0,0 +1,200 @@ +// Copyright 2018, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "bytes" + "context" + "fmt" + "net/http" + "runtime" + "strings" + "unicode" + + "github.com/googleapis/gax-go/v2/callctx" + "google.golang.org/grpc/metadata" +) + +var ( + // GoVersion is a header-safe representation of the current runtime + // environment's Go version. This is for GAX consumers that need to + // report the Go runtime version in API calls. + GoVersion string + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +func init() { + GoVersion = goVersion() +} + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} + +// XGoogHeader is for use by the Google Cloud Libraries only. See package +// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving +// request/response headers. +// +// XGoogHeader formats key-value pairs. +// The resulting string is suitable for x-goog-api-client header. +func XGoogHeader(keyval ...string) string { + if len(keyval) == 0 { + return "" + } + if len(keyval)%2 != 0 { + panic("gax.Header: odd argument count") + } + var buf bytes.Buffer + for i := 0; i < len(keyval); i += 2 { + buf.WriteByte(' ') + buf.WriteString(keyval[i]) + buf.WriteByte('/') + buf.WriteString(keyval[i+1]) + } + return buf.String()[1:] +} + +// InsertMetadataIntoOutgoingContext is for use by the Google Cloud Libraries +// only. See package [github.com/googleapis/gax-go/v2/callctx] for help +// setting/retrieving request/response headers. +// +// InsertMetadataIntoOutgoingContext returns a new context that merges the +// provided keyvals metadata pairs with any existing metadata/headers in the +// provided context. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func InsertMetadataIntoOutgoingContext(ctx context.Context, keyvals ...string) context.Context { + return metadata.NewOutgoingContext(ctx, insertMetadata(ctx, keyvals...)) +} + +// BuildHeaders is for use by the Google Cloud Libraries only. See package +// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving +// request/response headers. +// +// BuildHeaders returns a new http.Header that merges the provided +// keyvals header pairs with any existing metadata/headers in the provided +// context. keyvals should have a corresponding value for every key provided. +// If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func BuildHeaders(ctx context.Context, keyvals ...string) http.Header { + return http.Header(insertMetadata(ctx, keyvals...)) +} + +func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("gax: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + out, ok := metadata.FromOutgoingContext(ctx) + if !ok { + out = metadata.MD(make(map[string][]string)) + } + headers := callctx.HeadersFromContext(ctx) + + // x-goog-api-client is a special case that we want to make sure gets merged + // into a single header. + const xGoogHeader = "x-goog-api-client" + var mergedXgoogHeader strings.Builder + + for k, vals := range headers { + if k == xGoogHeader { + // Merge all values for the x-goog-api-client header set on the ctx. + for _, v := range vals { + mergedXgoogHeader.WriteString(v) + mergedXgoogHeader.WriteRune(' ') + } + continue + } + out[k] = append(out[k], vals...) + } + for i := 0; i < len(keyvals); i = i + 2 { + out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) + + if keyvals[i] == xGoogHeader { + // Merge the x-goog-api-client header values set on the ctx with any + // values passed in for it from the client. + mergedXgoogHeader.WriteString(keyvals[i+1]) + mergedXgoogHeader.WriteRune(' ') + } + } + + // Add the x goog header back in, replacing the separate values that were set. + if mergedXgoogHeader.Len() > 0 { + out[xGoogHeader] = []string{mergedXgoogHeader.String()[:mergedXgoogHeader.Len()-1]} + } + + return out +} diff --git a/vendor/cuelang.org/go/internal/core/adt/dev.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go similarity index 56% rename from vendor/cuelang.org/go/internal/core/adt/dev.go rename to vendor/github.com/googleapis/gax-go/v2/internal/version.go index 66764d4ac2..54dbcd337a 100644 --- a/vendor/cuelang.org/go/internal/core/adt/dev.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -1,10 +1,10 @@ -// Copyright 2023 CUE Authors +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,19 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -package adt +// Code generated by gapicgen. DO NOT EDIT. -// This file contains types to help in the transition from the old to new -// evaluation model. +package internal -func unreachableForDev(c *OpContext) { - if c.isDevVersion() { - panic("unreachable for development version") - } -} - -type combinedFlags struct { - status vertexStatus - condition condition - mode runMode -} +// Version is the current tagged release of the library. +const Version = "2.20.0" diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go new file mode 100644 index 0000000000..7975dae367 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go @@ -0,0 +1,140 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "context" + "strconv" + "strings" + "time" + + "github.com/googleapis/gax-go/v2/apierror" + "github.com/googleapis/gax-go/v2/callctx" +) + +// APICall is a user defined call stub. +type APICall func(context.Context, CallSettings) error + +// withRetryCount returns a new context with the retry count appended to +// the telemetry context. The retry count is the number of retries that have been +// attempted. On the initial request, retry count is 0. +// On a second request (the first retry), retry count is 1. +func withRetryCount(ctx context.Context, retryCount int) context.Context { + // Add to telemetry context so it's visible to observability wrappers + return callctx.WithTelemetryContext(ctx, "resend_count", strconv.Itoa(retryCount)) +} + +// Invoke calls the given APICall, performing retries as specified by opts, if +// any. +func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { + var settings CallSettings + for _, opt := range opts { + opt.Resolve(&settings) + } + return invoke(ctx, call, settings, Sleep) +} + +// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. +// If interrupted, Sleep returns ctx.Err(). +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +type sleeper func(ctx context.Context, d time.Duration) error + +// invoke implements Invoke, taking an additional sleeper argument for testing. +func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) (err error) { + var retryer Retryer + + // Only use the value provided via WithTimeout if the context doesn't + // already have a deadline. This is important for backwards compatibility if + // the user already set a deadline on the context given to Invoke. + if _, ok := ctx.Deadline(); !ok && settings.timeout != 0 { + c, cc := context.WithTimeout(ctx, settings.timeout) + defer cc() + ctx = c + } + + if IsFeatureEnabled("METRICS") { + start := time.Now() + defer func() { + recordMetric(ctx, settings, time.Since(start), err) + }() + } + + retryCount := 0 + // Feature gate: GOOGLE_SDK_GO_EXPERIMENTAL_TRACING=true + tracingEnabled := IsFeatureEnabled("TRACING") + for { + ctxToUse := ctx + if tracingEnabled { + ctxToUse = withRetryCount(ctx, retryCount) + } + err = call(ctxToUse, settings) + if err == nil { + return nil + } + // Never retry permanent certificate errors. (e.x. if ca-certificates + // are not installed). We should only make very few, targeted + // exceptions: many (other) status=Unavailable should be retried, such + // as if there's a network hiccup, or the internet goes out for a + // minute. This is also why here we are doing string parsing instead of + // simply making Unavailable a non-retried code elsewhere. + if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { + return err + } + if apierr, ok := apierror.FromError(err); ok { + err = apierr + } + if settings.Retry == nil { + return err + } + if retryer == nil { + if r := settings.Retry(); r != nil { + retryer = r + } else { + return err + } + } + if d, ok := retryer.Retry(err); !ok { + return err + } else if err = sp(ctx, d); err != nil { + return err + } + retryCount++ + } +} diff --git a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go new file mode 100644 index 0000000000..9b690d40c4 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go @@ -0,0 +1,127 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "encoding/json" + "errors" + "io" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var ( + arrayOpen = json.Delim('[') + arrayClose = json.Delim(']') + errBadOpening = errors.New("unexpected opening token, expected '['") +) + +// ProtoJSONStream represents a wrapper for consuming a stream of protobuf +// messages encoded using protobuf-JSON format. More information on this format +// can be found at https://developers.google.com/protocol-buffers/docs/proto3#json. +// The stream must appear as a comma-delimited, JSON array of obbjects with +// opening and closing square braces. +// +// This is for internal use only. +type ProtoJSONStream struct { + first, closed bool + reader io.ReadCloser + stream *json.Decoder + typ protoreflect.MessageType +} + +// NewProtoJSONStreamReader accepts a stream of bytes via an io.ReadCloser that are +// protobuf-JSON encoded protobuf messages of the given type. The ProtoJSONStream +// must be closed when done. +// +// This is for internal use only. +func NewProtoJSONStreamReader(rc io.ReadCloser, typ protoreflect.MessageType) *ProtoJSONStream { + return &ProtoJSONStream{ + first: true, + reader: rc, + stream: json.NewDecoder(rc), + typ: typ, + } +} + +// Recv decodes the next protobuf message in the stream or returns io.EOF if +// the stream is done. It is not safe to call Recv on the same stream from +// different goroutines, just like it is not safe to do so with a single gRPC +// stream. Type-cast the protobuf message returned to the type provided at +// ProtoJSONStream creation. +// Calls to Recv after calling Close will produce io.EOF. +func (s *ProtoJSONStream) Recv() (proto.Message, error) { + if s.closed { + return nil, io.EOF + } + if s.first { + s.first = false + + // Consume the opening '[' so Decode gets one object at a time. + if t, err := s.stream.Token(); err != nil { + return nil, err + } else if t != arrayOpen { + return nil, errBadOpening + } + } + + // Capture the next block of data for the item (a JSON object) in the stream. + var raw json.RawMessage + if err := s.stream.Decode(&raw); err != nil { + e := err + // To avoid checking the first token of each stream, just attempt to + // Decode the next blob and if that fails, double check if it is just + // the closing token ']'. If it is the closing, return io.EOF. If it + // isn't, return the original error. + if t, _ := s.stream.Token(); t == arrayClose { + e = io.EOF + } + return nil, e + } + + // Initialize a new instance of the protobuf message to unmarshal the + // raw data into. + m := s.typ.New().Interface() + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + err := unm.Unmarshal(raw, m) + + return m, err +} + +// Close closes the stream so that resources are cleaned up. +func (s *ProtoJSONStream) Close() error { + // Dereference the *json.Decoder so that the memory is gc'd. + s.stream = nil + s.closed = true + + return s.reader.Close() +} diff --git a/vendor/github.com/googleapis/gax-go/v2/release-please-config.json b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json new file mode 100644 index 0000000000..61ee266a15 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json @@ -0,0 +1,10 @@ +{ + "release-type": "go-yoshi", + "separate-pull-requests": true, + "include-component-in-tag": false, + "packages": { + "v2": { + "component": "v2" + } + } +} diff --git a/vendor/github.com/googleapis/gax-go/v2/telemetry.go b/vendor/github.com/googleapis/gax-go/v2/telemetry.go new file mode 100644 index 0000000000..f3be81d4f6 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/telemetry.go @@ -0,0 +1,469 @@ +// Copyright 2026, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "context" + "errors" + "fmt" + "log/slog" + "strconv" + "sync" + "time" + + "github.com/googleapis/gax-go/v2/apierror" + "github.com/googleapis/gax-go/v2/callctx" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// TransportTelemetryData contains mutable telemetry information that the transport +// layer (e.g. gRPC or HTTP) populates during an RPC. This allows gax.Invoke to +// correctly emit metric data without directly importing those transport layers. +// TransportTelemetryData is experimental and may be modified or removed in future versions, +// regardless of any other documented package stability guarantees. +// It should not be used by external consumers. +type TransportTelemetryData struct { + serverAddress string + serverPort int + responseStatusCode int +} + +// SetServerAddress sets the server address. +// Experimental: This function is experimental and may be modified or removed in future versions, +// regardless of any other documented package stability guarantees. +func (d *TransportTelemetryData) SetServerAddress(addr string) { d.serverAddress = addr } + +// ServerAddress returns the server address. +// Experimental: This function is experimental and may be modified or removed in future versions, +// regardless of any other documented package stability guarantees. +func (d *TransportTelemetryData) ServerAddress() string { return d.serverAddress } + +// SetServerPort sets the server port. +// Experimental: This function is experimental and may be modified or removed in future versions, +// regardless of any other documented package stability guarantees. +func (d *TransportTelemetryData) SetServerPort(port int) { d.serverPort = port } + +// ServerPort returns the server port. +// Experimental: This function is experimental and may be modified or removed in future versions, +// regardless of any other documented package stability guarantees. +func (d *TransportTelemetryData) ServerPort() int { return d.serverPort } + +// SetResponseStatusCode sets the response status code. +// Experimental: This function is experimental and may be modified or removed in future versions, +// regardless of any other documented package stability guarantees. +func (d *TransportTelemetryData) SetResponseStatusCode(code int) { d.responseStatusCode = code } + +// ResponseStatusCode returns the response status code. +// Experimental: This function is experimental and may be modified or removed in future versions, +// regardless of any other documented package stability guarantees. +func (d *TransportTelemetryData) ResponseStatusCode() int { return d.responseStatusCode } + +// transportTelemetryKey is the private context key used to inject TransportTelemetryData +type transportTelemetryKey struct{} + +// InjectTransportTelemetry injects a mutable TransportTelemetryData pointer into the context. +// Experimental: This function is experimental and may be modified or removed in future versions, +// regardless of any other documented package stability guarantees. +func InjectTransportTelemetry(ctx context.Context, data *TransportTelemetryData) context.Context { + return context.WithValue(ctx, transportTelemetryKey{}, data) +} + +// ExtractTransportTelemetry retrieves a mutable TransportTelemetryData pointer from the context. +// It returns nil if the data is not present. +// Experimental: This function is experimental and may be modified or removed in future versions, +// regardless of any other documented package stability guarantees. +func ExtractTransportTelemetry(ctx context.Context) *TransportTelemetryData { + data, _ := ctx.Value(transportTelemetryKey{}).(*TransportTelemetryData) + return data +} + +const ( + metricName = "gcp.client.request.duration" + metricDescription = "Duration of the request to the Google Cloud API" + + // Constants for ClientMetrics configuration map keys. + // These are used by generated clients to pass attributes to the ClientMetrics option. + // Because they are used in generated code, these values must not be changed. + + // ClientService is the Google Cloud API service name. E.g. "storage". + ClientService = "client_service" + // ClientVersion is the version of the client. E.g. "1.43.0". + ClientVersion = "client_version" + // ClientArtifact is the library name. E.g. "cloud.google.com/go/storage". + ClientArtifact = "client_artifact" + // RPCSystem is the RPC system type. E.g. "grpc" or "http". + RPCSystem = "rpc_system" + // URLDomain is the nominal service domain. E.g. "storage.googleapis.com". + URLDomain = "url_domain" + + // Constants for telemetry attribute keys. + keyGCPClientService = "gcp.client.service" + keyRPCSystemName = "rpc.system.name" + keyURLDomain = "url.domain" + + // SchemaURL specifies the OpenTelemetry schema version. + schemaURL = "https://opentelemetry.io/schemas/1.39.0" +) + +// Default bucket boundaries for the duration metric in seconds. +// An exponential-ish distribution. +var defaultHistogramBoundaries = []float64{ + 0.0, 0.0001, 0.0005, 0.0010, 0.005, 0.010, 0.050, 0.100, 0.5, 1.0, 5.0, 10.0, 60.0, 300.0, 900.0, 3600.0, +} + +// ClientMetrics contains the pre-allocated OpenTelemetry instruments and attributes +// for a specific generated Google Cloud client library. +// There should be exactly one ClientMetrics instance instantiated per generated client. +type ClientMetrics struct { + get func() clientMetricsData +} + +type clientMetricsData struct { + duration metric.Float64Histogram + attr []attribute.KeyValue +} + +type telemetryOptions struct { + provider metric.MeterProvider + attributes map[string]string + explicitBucketBoundaries []float64 + logger *slog.Logger +} + +// TelemetryOption is an option to configure a ClientMetrics instance. +// TelemetryOption works by modifying relevant fields of telemetryOptions. +type TelemetryOption interface { + // Resolve applies the option by modifying opts. + Resolve(opts *telemetryOptions) +} + +type providerOpt struct { + p metric.MeterProvider +} + +func (p providerOpt) Resolve(opts *telemetryOptions) { + opts.provider = p.p +} + +// WithMeterProvider specifies the metric.MeterProvider to use for instruments. +func WithMeterProvider(p metric.MeterProvider) TelemetryOption { + return &providerOpt{p: p} +} + +type attrOpt struct { + attrs map[string]string +} + +func (a attrOpt) Resolve(opts *telemetryOptions) { + opts.attributes = a.attrs +} + +// WithTelemetryAttributes specifies the static attributes attachments. +func WithTelemetryAttributes(attr map[string]string) TelemetryOption { + return &attrOpt{attrs: attr} +} + +type boundariesOpt struct { + boundaries []float64 +} + +func (b boundariesOpt) Resolve(opts *telemetryOptions) { + opts.explicitBucketBoundaries = b.boundaries +} + +// WithExplicitBucketBoundaries overrides the default histogram bucket boundaries. +func WithExplicitBucketBoundaries(boundaries []float64) TelemetryOption { + return &boundariesOpt{boundaries: boundaries} +} + +type loggerOpt struct { + l *slog.Logger +} + +func (l loggerOpt) Resolve(opts *telemetryOptions) { + opts.logger = l.l +} + +// WithTelemetryLogger specifies a logger to record internal telemetry errors. +func WithTelemetryLogger(l *slog.Logger) TelemetryOption { + return &loggerOpt{l: l} +} + +func (config *telemetryOptions) meterProvider() metric.MeterProvider { + if config.provider != nil { + return config.provider + } + return otel.GetMeterProvider() +} + +func (config *telemetryOptions) bucketBoundaries() []float64 { + if len(config.explicitBucketBoundaries) > 0 { + return config.explicitBucketBoundaries + } + return defaultHistogramBoundaries +} + +// NewClientMetrics initializes and returns a new ClientMetrics instance. +// It is intended to be called once per generated client during initialization. +func NewClientMetrics(opts ...TelemetryOption) *ClientMetrics { + var config telemetryOptions + for _, opt := range opts { + opt.Resolve(&config) + } + + return &ClientMetrics{ + get: sync.OnceValue(func() clientMetricsData { + provider := config.meterProvider() + + var meterAttrs []attribute.KeyValue + if val, ok := config.attributes[ClientService]; ok { + meterAttrs = append(meterAttrs, attribute.KeyValue{Key: attribute.Key(keyGCPClientService), Value: attribute.StringValue(val)}) + } + + meterOpts := []metric.MeterOption{ + metric.WithInstrumentationVersion(config.attributes[ClientVersion]), + metric.WithSchemaURL(schemaURL), + } + if len(meterAttrs) > 0 { + meterOpts = append(meterOpts, metric.WithInstrumentationAttributes(meterAttrs...)) + } + + meter := provider.Meter(config.attributes[ClientArtifact], meterOpts...) + + boundaries := config.bucketBoundaries() + + duration, err := meter.Float64Histogram( + metricName, + metric.WithDescription(metricDescription), + metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(boundaries...), + ) + if err != nil && config.logger != nil { + config.logger.Warn("failed to initialize OTel duration histogram", "error", err) + } + + var attr []attribute.KeyValue + if val, ok := config.attributes[URLDomain]; ok { + attr = append(attr, attribute.KeyValue{Key: attribute.Key(keyURLDomain), Value: attribute.StringValue(val)}) + } + if val, ok := config.attributes[RPCSystem]; ok { + attr = append(attr, attribute.KeyValue{Key: attribute.Key(keyRPCSystemName), Value: attribute.StringValue(val)}) + } + return clientMetricsData{ + duration: duration, + attr: attr, + } + }), + } +} + +func (cm *ClientMetrics) durationHistogram() metric.Float64Histogram { + if cm == nil || cm.get == nil { + return nil + } + return cm.get().duration +} + +func (cm *ClientMetrics) attributes() []attribute.KeyValue { + if cm == nil || cm.get == nil { + return nil + } + return cm.get().attr +} + +var codeToStr = [...]string{ + "OK", // codes.OK = 0 + "CANCELED", // codes.Canceled = 1 + "UNKNOWN", // codes.Unknown = 2 + "INVALID_ARGUMENT", // codes.InvalidArgument = 3 + "DEADLINE_EXCEEDED", // codes.DeadlineExceeded = 4 + "NOT_FOUND", // codes.NotFound = 5 + "ALREADY_EXISTS", // codes.AlreadyExists = 6 + "PERMISSION_DENIED", // codes.PermissionDenied = 7 + "RESOURCE_EXHAUSTED", // codes.ResourceExhausted = 8 + "FAILED_PRECONDITION", // codes.FailedPrecondition = 9 + "ABORTED", // codes.Aborted = 10 + "OUT_OF_RANGE", // codes.OutOfRange = 11 + "UNIMPLEMENTED", // codes.Unimplemented = 12 + "INTERNAL", // codes.Internal = 13 + "UNAVAILABLE", // codes.Unavailable = 14 + "DATA_LOSS", // codes.DataLoss = 15 + "UNAUTHENTICATED", // codes.Unauthenticated = 16 +} + +// grpcCodeToStatusString converts a codes.Code to its string representation. +// Experimental: This function is experimental and may be modified or removed in future versions, +// regardless of any other documented package stability guarantees. +func grpcCodeToStatusString(c codes.Code) string { + if int(c) >= 0 && int(c) < len(codeToStr) { + return codeToStr[c] + } + return "UNKNOWN" +} + +// TelemetryErrorInfo contains the mapped error type and status code, as well as +// additional details like status message, domain, and metadata, extracted from an error +// for telemetry purposes. +type TelemetryErrorInfo struct { + // ErrorType is a mapped string for the error type. + // For stability, this maps client-side cancellations, timeouts, and known gRPC + // status codes to standard string literals (e.g., "CLIENT_TIMEOUT", + // "PERMISSION_DENIED"), and falls back to %T for unhandled types. If an + // apierror.APIError is found, it uses its fine-grained Reason() (e.g., + // "SERVICE_DISABLED"). + // This is used by metrics, tracing, and logging. + ErrorType string + // StatusCode is the string representation of the RPC status code. + // This is used by metrics, tracing, and logging. + StatusCode string + // StatusMessage is the raw message from the error. + // This is used for structured logging. + StatusMessage string + // Domain is the domain of the error, extracted from an ErrorInfo, if available. + // This is used for structured logging. + Domain string + // Metadata is the metadata of the error, extracted from an ErrorInfo, if available. + // This is used for structured logging. + Metadata map[string]string + + // _ struct{} prevents unkeyed struct literals, ensuring backwards + // compatibility when new fields are added in the future. + _ struct{} +} + +// ExtractTelemetryErrorInfo parses an error into a TelemetryErrorInfo struct. +// It relies on standard gRPC status codes, apierror.APIError parsing, and +// context inspection to determine the most accurate error classification and +// provide detailed metadata for telemetry systems. +// +// Experimental: This function is experimental and may be modified or removed in future versions, +// regardless of any other documented package stability guarantees. +func ExtractTelemetryErrorInfo(ctx context.Context, err error) TelemetryErrorInfo { + if err == nil { + return TelemetryErrorInfo{ErrorType: "", StatusCode: "OK"} + } + + st, ok := status.FromError(err) + if !ok { + st = status.FromContextError(err) + } + rpcStatusCode := grpcCodeToStatusString(st.Code()) + + var errType string + // 1. Check if the local context expired or was cancelled. This is the only + // reliable way to distinguish a local client timeout from a server timeout + // because gRPC does not wrap context errors in its status.Error types. + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + errType = "CLIENT_TIMEOUT" + } else if errors.Is(ctx.Err(), context.Canceled) { + errType = "CLIENT_CANCELLED" + } else if !ok || st.Code() == codes.Unknown || st.Code() == codes.Internal { + // 2. If the error isn't a context breakdown and the gRPC framework + // doesn't "understand" it (returning ok=false or a generic catch-all + // bucket like Unknown/Internal), we "pack" the actual Go error type + // name into error.type (e.g., "*net.OpError"). This is per the error.type + // [spec](https://opentelemetry.io/docs/specs/semconv/registry/attributes/error/#error-type). + // "When error.type is set to a type (e.g., an exception type), its canonical + // class name identifying the type within the artifact SHOULD be used." + errType = fmt.Sprintf("%T", err) + } else { + // 3. Otherwise, it is a well-understood gRPC protocol error (e.g., + // PERMISSION_DENIED) likely returned by the server. + errType = rpcStatusCode + } + + var msg, domain string + var metadata map[string]string + if ok { + msg = st.Message() + } else { + msg = err.Error() + } + + if parsedErr, parsedOk := apierror.ParseError(err, false); parsedOk { + // If there's an actionable error, the reason takes precedence over our calculated error type. + if reason := parsedErr.Reason(); reason != "" { + errType = reason + } else if httpCode := parsedErr.HTTPCode(); httpCode > 0 { + errType = strconv.Itoa(httpCode) + } + if message := parsedErr.Message(); message != "" { + msg = message + } else if parsedErr.HTTPCode() > 0 { + // For HTTP errors, avoid returning the raw, unformatted err.Error() (e.g. "googleapi: got HTTP response...") + // if the actual parsed message from the response is empty. + msg = "" + } + domain = parsedErr.Domain() + metadata = parsedErr.Metadata() + } + + return TelemetryErrorInfo{ + ErrorType: errType, + StatusCode: rpcStatusCode, + StatusMessage: msg, + Domain: domain, + Metadata: metadata, + } +} + +// recordMetric records a duration measurement for the configured metric. +func recordMetric(ctx context.Context, settings CallSettings, d time.Duration, err error) { + if settings.clientMetrics == nil || settings.clientMetrics.durationHistogram() == nil { + return + } + + // Use context.WithoutCancel to ensure metric records even if context is canceled + // preserving any trace context that might be required for exemplars. + recordCtx := context.WithoutCancel(ctx) + + // Pre-allocate to avoid repeated appends (5 is the max number of dynamic attributes added here) + attrs := make([]attribute.KeyValue, 0, len(settings.clientMetrics.attributes())+5) + attrs = append(attrs, settings.clientMetrics.attributes()...) + + errInfo := ExtractTelemetryErrorInfo(ctx, err) + if errInfo.ErrorType != "" { + attrs = append(attrs, attribute.String("error.type", errInfo.ErrorType)) + } + attrs = append(attrs, attribute.String("rpc.response.status_code", errInfo.StatusCode)) + + if rpcMethod, ok := callctx.TelemetryFromContext(ctx, "rpc_method"); ok && rpcMethod != "" { + attrs = append(attrs, attribute.String("rpc.method", rpcMethod)) + } + if urlTemplate, ok := callctx.TelemetryFromContext(ctx, "url_template"); ok && urlTemplate != "" { + attrs = append(attrs, attribute.String("url.template", urlTemplate)) + } + + settings.clientMetrics.durationHistogram().Record(recordCtx, d.Seconds(), metric.WithAttributes(attrs...)) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index 2f0b9e9e0f..4c9083d790 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -28,7 +28,9 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal } handleForwardResponseServerMetadata(w, mux, md) - w.Header().Set("Transfer-Encoding", "chunked") + if !mux.disableChunkedEncoding { + w.Header().Set("Transfer-Encoding", "chunked") + } if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { HTTPError(ctx, mux, marshaler, w, req, err) return diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index 3eb1616717..4e684c7de6 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -73,6 +73,7 @@ type ServeMux struct { disablePathLengthFallback bool unescapingMode UnescapingMode writeContentLength bool + disableChunkedEncoding bool } // ServeMuxOption is an option that can be given to a ServeMux on construction. @@ -125,6 +126,16 @@ func WithMiddlewares(middlewares ...Middleware) ServeMuxOption { } } +// WithDisableChunkedEncoding disables the Transfer-Encoding: chunked header +// for streaming responses. This is useful for streaming implementations that use +// Content-Length, which is mutually exclusive with Transfer-Encoding:chunked. +// Note that this option will not automatically add Content-Length headers, so it should be used with caution. +func WithDisableChunkedEncoding() ServeMuxOption { + return func(mux *ServeMux) { + mux.disableChunkedEncoding = true + } +} + // SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. // Configuring this will mean the generated OpenAPI output is no longer correct, and it should be // done with careful consideration. diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go index 0dd94ea263..c5cbbb6a18 100644 --- a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.11 // protoc v4.24.4 // source: in_toto_attestation/v1/resource_descriptor.proto diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.go b/vendor/github.com/in-toto/attestation/go/v1/statement.go index f63d5f0d74..7107459a5e 100644 --- a/vendor/github.com/in-toto/attestation/go/v1/statement.go +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.go @@ -4,9 +4,13 @@ Wrapper APIs for in-toto attestation Statement layer protos. package v1 -import "errors" +import ( + "errors" +) -const StatementTypeUri = "https://in-toto.io/Statement/v1" +const statementTypeUriPrefix = "https://in-toto.io/Statement/" +const statementTypeUriLegacy = statementTypeUriPrefix + "v0.1" +const StatementTypeUri = statementTypeUriPrefix + "v1" var ( ErrInvalidStatementType = errors.New("wrong statement type") @@ -17,7 +21,7 @@ var ( ) func (s *Statement) Validate() error { - if s.GetType() != StatementTypeUri { + if !s.isValidType() { return ErrInvalidStatementType } @@ -48,3 +52,7 @@ func (s *Statement) Validate() error { return nil } + +func (s *Statement) isValidType() bool { + return s.GetType() == StatementTypeUri || s.GetType() == statementTypeUriLegacy +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go index bc76eaf261..ac09fe8acd 100644 --- a/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.11 // protoc v4.24.4 // source: in_toto_attestation/v1/statement.proto diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go index 73aafe7e1c..b03871cbf4 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go @@ -1,6 +1,7 @@ package in_toto import ( + ita1 "github.com/in-toto/attestation/go/v1" "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" slsa01 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1" slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" @@ -11,6 +12,11 @@ const ( // StatementInTotoV01 is the statement type for the generalized link format // containing statements. This is constant for all predicate types. StatementInTotoV01 = "https://in-toto.io/Statement/v0.1" + + // StatementInTotoV1 is the type URI for ITE-6 v1 Statements. + // This is constant for all predicate types. + StatementInTotoV1 = ita1.StatementTypeUri + // PredicateSPDX represents a SBOM using the SPDX standard. // The SPDX mandates 'spdxVersion' field, so predicate type can omit // version. @@ -22,12 +28,28 @@ const ( ) // Subject describes the set of software artifacts the statement applies to. +// +// Deprecated: This implementation of Subject exists for historical +// compatibility and should not be used. This implementation has been +// superseded by a ResourceDescriptor struct generated from the Protobuf +// definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/v1. +// To generate an ITE-6 v1 Statement subject, use the ResourceDescriptor Go +// APIs provided in https://github.com/in-toto/attestation/tree/main/go/v1. type Subject struct { Name string `json:"name"` Digest common.DigestSet `json:"digest"` } // StatementHeader defines the common fields for all statements +// +// Deprecated: This implementation of StatementHeader exists for historical +// compatibility and should not be used. This implementation has been +// superseded by the Statement struct generated from the Protobuf +// definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/v1. +// To generate an ITE-6 v1 Statement, use the Go APIs provided in +// https://github.com/in-toto/attestation/tree/main/go/v1. type StatementHeader struct { Type string `json:"_type"` PredicateType string `json:"predicateType"` @@ -38,9 +60,16 @@ type StatementHeader struct { Statement binds the attestation to a particular subject and identifies the of the predicate. This struct represents a generic statement. */ +// Deprecated: This implementation of Statement exists for historical +// compatibility and should not be used. This implementation has been +// superseded by the Statement struct generated from the Protobuf +// definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/v1. +// To generate an ITE-6 v1 Statement, use the Go APIs provided in +// https://github.com/in-toto/attestation/tree/main/go/v1. type Statement struct { StatementHeader - // Predicate contains type speficic metadata. + // Predicate contains type specific metadata. Predicate interface{} `json:"predicate"` } @@ -57,6 +86,11 @@ type ProvenanceStatementSLSA02 struct { } // ProvenanceStatementSLSA1 is the definition for an entire provenance statement with SLSA 1.0 predicate. +// +// Deprecated: ProvenanceStatementSLSA1 exists for historical +// compatibility and should not be used. To generate an ITE-6 v1 Statement +// with an ITE-9 Provenance v1 predicate, use the Go APIs provided in +// https://github.com/in-toto/attestation/tree/main/go. type ProvenanceStatementSLSA1 struct { StatementHeader Predicate slsa1.ProvenancePredicate `json:"predicate"` diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go index 52429ca44b..a3994c8472 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go @@ -1,10 +1,8 @@ package in_toto import ( - "crypto" "crypto/ecdsa" "crypto/ed25519" - "crypto/rand" "crypto/rsa" "crypto/sha256" "crypto/x509" @@ -407,7 +405,11 @@ func (k *Key) loadKey(keyObj interface{}, pemData *pem.Block, scheme string, key } case ed25519.PrivateKey: pubKeyBytes := key.Public() - if err := k.setKeyComponents(pubKeyBytes.(ed25519.PublicKey), key, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + publicKey, ok := pubKeyBytes.(ed25519.PublicKey) + if !ok { + return fmt.Errorf("pubKeyBytes must be ed25519.PublicKey") + } + if err := k.setKeyComponents(publicKey, key, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { return err } case *ecdsa.PrivateKey: @@ -442,214 +444,6 @@ func (k *Key) loadKey(keyObj interface{}, pemData *pem.Block, scheme string, key return nil } -/* -GenerateSignature will automatically detect the key type and sign the signable data -with the provided key. If everything goes right GenerateSignature will return -a for the key valid signature and err=nil. If something goes wrong it will -return a not initialized signature and an error. Possible errors are: - - - ErrNoPEMBlock - - ErrUnsupportedKeyType - -Currently supported is only one scheme per key. - -Note that in-toto-golang has different requirements to an ecdsa key. -In in-toto-golang we use the string 'ecdsa' as string for the key type. -In the key scheme we use: ecdsa-sha2-nistp256. -*/ -func GenerateSignature(signable []byte, key Key) (Signature, error) { - err := validateKey(key) - if err != nil { - return Signature{}, err - } - var signature Signature - var signatureBuffer []byte - hashMapping := getHashMapping() - // The following switch block is needed for keeping interoperability - // with the securesystemslib and the python implementation - // in which we are storing RSA keys in PEM format, but ed25519 keys hex encoded. - switch key.KeyType { - case rsaKeyType: - // We do not need the pemData here, so we can throw it away via '_' - _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private)) - if err != nil { - return Signature{}, err - } - parsedKey, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return Signature{}, ErrKeyKeyTypeMismatch - } - switch key.Scheme { - case rsassapsssha256Scheme: - hashed := hashToHex(hashMapping["sha256"](), signable) - // We use rand.Reader as secure random source for rsa.SignPSS() - signatureBuffer, err = rsa.SignPSS(rand.Reader, parsedKey.(*rsa.PrivateKey), crypto.SHA256, hashed, - &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}) - if err != nil { - return signature, err - } - default: - // supported key schemes will get checked in validateKey - panic("unexpected Error in GenerateSignature function") - } - case ecdsaKeyType: - // We do not need the pemData here, so we can throw it away via '_' - _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private)) - if err != nil { - return Signature{}, err - } - parsedKey, ok := parsedKey.(*ecdsa.PrivateKey) - if !ok { - return Signature{}, ErrKeyKeyTypeMismatch - } - curveSize := parsedKey.(*ecdsa.PrivateKey).Curve.Params().BitSize - var hashed []byte - if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil { - return Signature{}, ErrCurveSizeSchemeMismatch - } - // implement https://tools.ietf.org/html/rfc5656#section-6.2.1 - // We determine the curve size and choose the correct hashing - // method based on the curveSize - switch { - case curveSize <= 256: - hashed = hashToHex(hashMapping["sha256"](), signable) - case 256 < curveSize && curveSize <= 384: - hashed = hashToHex(hashMapping["sha384"](), signable) - case curveSize > 384: - hashed = hashToHex(hashMapping["sha512"](), signable) - default: - panic("unexpected Error in GenerateSignature function") - } - // Generate the ecdsa signature on the same way, as we do in the securesystemslib - // We are marshalling the ecdsaSignature struct as ASN.1 INTEGER SEQUENCES - // into an ASN.1 Object. - signatureBuffer, err = ecdsa.SignASN1(rand.Reader, parsedKey.(*ecdsa.PrivateKey), hashed[:]) - if err != nil { - return signature, err - } - case ed25519KeyType: - // We do not need a scheme switch here, because ed25519 - // only consist of sha256 and curve25519. - privateHex, err := hex.DecodeString(key.KeyVal.Private) - if err != nil { - return signature, ErrInvalidHexString - } - // Note: We can directly use the key for signing and do not - // need to use ed25519.NewKeyFromSeed(). - signatureBuffer = ed25519.Sign(privateHex, signable) - default: - // We should never get here, because we call validateKey in the first - // line of the function. - panic("unexpected Error in GenerateSignature function") - } - signature.Sig = hex.EncodeToString(signatureBuffer) - signature.KeyID = key.KeyID - signature.Certificate = key.KeyVal.Certificate - return signature, nil -} - -/* -VerifySignature will verify unverified byte data via a passed key and signature. -Supported key types are: - - - rsa - - ed25519 - - ecdsa - -When encountering an RSA key, VerifySignature will decode the PEM block in the key -and will call rsa.VerifyPSS() for verifying the RSA signature. -When encountering an ed25519 key, VerifySignature will decode the hex string encoded -public key and will use ed25519.Verify() for verifying the ed25519 signature. -When the given key is an ecdsa key, VerifySignature will unmarshall the ASN1 object -and will use the retrieved ecdsa components 'r' and 's' for verifying the signature. -On success it will return nil. In case of an unsupported key type or any other error -it will return an error. - -Note that in-toto-golang has different requirements to an ecdsa key. -In in-toto-golang we use the string 'ecdsa' as string for the key type. -In the key scheme we use: ecdsa-sha2-nistp256. -*/ -func VerifySignature(key Key, sig Signature, unverified []byte) error { - err := validateKey(key) - if err != nil { - return err - } - sigBytes, err := hex.DecodeString(sig.Sig) - if err != nil { - return err - } - hashMapping := getHashMapping() - switch key.KeyType { - case rsaKeyType: - // We do not need the pemData here, so we can throw it away via '_' - _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public)) - if err != nil { - return err - } - parsedKey, ok := parsedKey.(*rsa.PublicKey) - if !ok { - return ErrKeyKeyTypeMismatch - } - switch key.Scheme { - case rsassapsssha256Scheme: - hashed := hashToHex(hashMapping["sha256"](), unverified) - err = rsa.VerifyPSS(parsedKey.(*rsa.PublicKey), crypto.SHA256, hashed, sigBytes, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}) - if err != nil { - return fmt.Errorf("%w: %s", ErrInvalidSignature, err) - } - default: - // supported key schemes will get checked in validateKey - panic("unexpected Error in VerifySignature function") - } - case ecdsaKeyType: - // We do not need the pemData here, so we can throw it away via '_' - _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public)) - if err != nil { - return err - } - parsedKey, ok := parsedKey.(*ecdsa.PublicKey) - if !ok { - return ErrKeyKeyTypeMismatch - } - curveSize := parsedKey.(*ecdsa.PublicKey).Curve.Params().BitSize - var hashed []byte - if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil { - return ErrCurveSizeSchemeMismatch - } - // implement https://tools.ietf.org/html/rfc5656#section-6.2.1 - // We determine the curve size and choose the correct hashing - // method based on the curveSize - switch { - case curveSize <= 256: - hashed = hashToHex(hashMapping["sha256"](), unverified) - case 256 < curveSize && curveSize <= 384: - hashed = hashToHex(hashMapping["sha384"](), unverified) - case curveSize > 384: - hashed = hashToHex(hashMapping["sha512"](), unverified) - default: - panic("unexpected Error in VerifySignature function") - } - if ok := ecdsa.VerifyASN1(parsedKey.(*ecdsa.PublicKey), hashed[:], sigBytes); !ok { - return ErrInvalidSignature - } - case ed25519KeyType: - // We do not need a scheme switch here, because ed25519 - // only consist of sha256 and curve25519. - pubHex, err := hex.DecodeString(key.KeyVal.Public) - if err != nil { - return ErrInvalidHexString - } - if ok := ed25519.Verify(pubHex, unverified, sigBytes); !ok { - return fmt.Errorf("%w: ed25519", ErrInvalidSignature) - } - default: - // We should never get here, because we call validateKey in the first - // line of the function. - panic("unexpected Error in VerifySignature function") - } - return nil -} - /* VerifyCertificateTrust verifies that the certificate has a chain of trust to a root in rootCertPool, possibly using any intermediates in diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go index f56b784ea0..4081535656 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go @@ -1,9 +1,11 @@ package in_toto import ( + "context" "crypto/ecdsa" "crypto/rsa" "crypto/x509" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -18,6 +20,8 @@ import ( "github.com/secure-systems-lab/go-securesystemslib/dsse" ) +type HashObj = map[string]string + /* KeyVal contains the actual values of a key, as opposed to key metadata such as a key identifier or key type. For RSA keys, the key value is a pair of public @@ -337,8 +341,8 @@ writing to disk. type Link struct { Type string `json:"_type"` Name string `json:"name"` - Materials map[string]interface{} `json:"materials"` - Products map[string]interface{} `json:"products"` + Materials map[string]HashObj `json:"materials"` + Products map[string]HashObj `json:"products"` ByProducts map[string]interface{} `json:"byproducts"` Command []string `json:"command"` Environment map[string]interface{} `json:"environment"` @@ -347,12 +351,18 @@ type Link struct { /* validateArtifacts is a general function used to validate products and materials. */ -func validateArtifacts(artifacts map[string]interface{}) error { +func validateArtifacts(artifacts map[string]HashObj) error { for artifactName, artifact := range artifacts { artifactValue := reflect.ValueOf(artifact).MapRange() for artifactValue.Next() { - value := artifactValue.Value().Interface().(string) - hashType := artifactValue.Key().Interface().(string) + value, ok := artifactValue.Value().Interface().(string) + if !ok { + return fmt.Errorf("value is not string") + } + hashType, ok := artifactValue.Key().Interface().(string) + if !ok { + return fmt.Errorf("hash type is not string") + } if err := validateHexString(value); err != nil { return fmt.Errorf("in artifact '%s', %s hash value: %s", artifactName, hashType, err.Error()) @@ -896,14 +906,26 @@ func (mb *Metablock) VerifySignature(key Key) error { return err } - dataCanonical, err := mb.GetSignableRepresentation() + verifier, err := getSignerVerifierFromKey(key) if err != nil { return err } - if err := VerifySignature(key, sig, dataCanonical); err != nil { + payload, err := mb.GetSignableRepresentation() + if err != nil { return err } + + sigBytes, err := hex.DecodeString(sig.Sig) + if err != nil { + return err + } + + err = verifier.Verify(context.Background(), payload, sigBytes) + if err != nil { + return err + } + return nil } @@ -951,17 +973,26 @@ field as provided. It returns an error if the Signed object cannot be canonicalized, or if the key is invalid or not supported. */ func (mb *Metablock) Sign(key Key) error { + signer, err := getSignerVerifierFromKey(key) + if err != nil { + return err + } - dataCanonical, err := mb.GetSignableRepresentation() + payload, err := mb.GetSignableRepresentation() if err != nil { return err } - newSignature, err := GenerateSignature(dataCanonical, key) + signature, err := signer.Sign(context.Background(), payload) if err != nil { return err } - mb.Signatures = append(mb.Signatures, newSignature) + mb.Signatures = append(mb.Signatures, Signature{ + KeyID: key.KeyID, + Sig: hex.EncodeToString(signature), + Certificate: key.KeyVal.Certificate, + }) + return nil } diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go index f0a55d8219..4cc0321667 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go @@ -41,11 +41,11 @@ value is the error. NOTE: For cross-platform consistency Windows-style line separators (CRLF) are normalized to Unix-style line separators (LF) before hashing file contents. */ -func RecordArtifact(path string, hashAlgorithms []string, lineNormalization bool) (map[string]interface{}, error) { +func RecordArtifact(path string, hashAlgorithms []string, lineNormalization bool) (HashObj, error) { supportedHashMappings := getHashMapping() // Read file from passed path contents, err := os.ReadFile(path) - hashedContentsMap := make(map[string]interface{}) + hashedContentsMap := make(HashObj) if err != nil { return nil, err } @@ -92,12 +92,22 @@ the following format: If recording an artifact fails the first return value is nil and the second return value is the error. */ -func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool) (evalArtifacts map[string]interface{}, err error) { +func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool) (evalArtifacts map[string]HashObj, err error) { // Make sure to initialize a fresh hashset for every RecordArtifacts call visitedSymlinks = NewSet() - evalArtifacts, err = recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) - // pass result and error through - return evalArtifacts, err + evalArtifactsUnnormalized, err := recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) + if err != nil { + return nil, err + } + + // Normalize all paths in evalArtifactsUnnormalized. + evalArtifacts = make(map[string]HashObj, len(evalArtifactsUnnormalized)) + for key, value := range evalArtifactsUnnormalized { + // Convert windows filepath to unix filepath. + evalArtifacts[filepath.ToSlash(key)] = value + } + + return evalArtifacts, nil } /* @@ -118,8 +128,8 @@ the following format: If recording an artifact fails the first return value is nil and the second return value is the error. */ -func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool) (map[string]interface{}, error) { - artifacts := make(map[string]interface{}) +func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool) (map[string]HashObj, error) { + artifacts := make(map[string]HashObj) for _, path := range paths { err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error { @@ -380,7 +390,7 @@ func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorit Type: "link", Name: name, Materials: materials, - Products: map[string]interface{}{}, + Products: map[string]HashObj{}, ByProducts: map[string]interface{}{}, Command: []string{}, Environment: map[string]interface{}{}, @@ -413,9 +423,9 @@ func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorit } /* -InTotoRecordStop ends the creation of a metatadata link file created by +InTotoRecordStop ends the creation of a metadata link file created by InTotoRecordStart. InTotoRecordStop takes in a signed unfinished link metablock -created by InTotoRecordStart and records the hashes of any products creted by +created by InTotoRecordStart and records the hashes of any products created by commands run between InTotoRecordStart and InTotoRecordStop. The resultant finished link metablock is then signed by the provided key and returned. */ @@ -460,3 +470,63 @@ func InTotoRecordStop(prelimLinkEnv Metadata, productPaths []string, key Key, ha return linkMb, nil } + +/* +InTotoMatchProducts checks if local artifacts match products in passed link. + +NOTE: Does not check integrity or authenticity of passed link! +*/ +func InTotoMatchProducts(link *Link, paths []string, hashAlgorithms []string, excludePatterns []string, lstripPaths []string) ([]string, []string, []string, error) { + if len(paths) == 0 { + paths = append(paths, ".") + } + + artifacts, err := RecordArtifacts(paths, hashAlgorithms, excludePatterns, lstripPaths, false, false) + if err != nil { + return nil, nil, nil, err + } + + artifactNames := []string{} + for name := range artifacts { + artifactNames = append(artifactNames, name) + } + artifactsSet := NewSet(artifactNames...) + + productNames := []string{} + for name := range link.Products { + productNames = append(productNames, name) + } + productsSet := NewSet(productNames...) + + onlyInProductsSet := productsSet.Difference(artifactsSet) + onlyInProducts := []string{} + for name := range onlyInProductsSet { + onlyInProducts = append(onlyInProducts, name) + } + + notInProductsSet := artifactsSet.Difference(productsSet) + notInProducts := []string{} + for name := range notInProductsSet { + notInProducts = append(notInProducts, name) + } + + inBothSet := artifactsSet.Intersection(productsSet) + differ := []string{} + for name := range inBothSet { + linkHashes := HashObj{} + for alg, val := range link.Products[name] { + linkHashes[alg] = val + } + + artifactHashes := HashObj{} + for alg, val := range artifacts[name] { + artifactHashes[alg] = val + } + + if !reflect.DeepEqual(linkHashes, artifactHashes) { + differ = append(differ, name) + } + } + + return onlyInProducts, notInProducts, differ, nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go index a45a454634..ab6763e855 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go @@ -4,7 +4,7 @@ package common // algorithm name to lowercase hex-encoded value. type DigestSet map[string]string -// ProvenanceBuilder idenfifies the entity that executed the build steps. +// ProvenanceBuilder identifies the entity that executed the build steps. type ProvenanceBuilder struct { ID string `json:"id"` } diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go index 5978e9229d..4470723b8d 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go @@ -40,8 +40,8 @@ type ProvenanceMetadata struct { Reproducible bool `json:"reproducible"` } -// ProvenanceComplete indicates wheter the claims in build/recipe are complete. -// For in depth information refer to the specifictaion: +// ProvenanceComplete indicates whether the claims in build/recipe are complete. +// For in depth information refer to the specification: // https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md type ProvenanceComplete struct { Arguments bool `json:"arguments"` diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go index 40416e29a8..3efbb51e81 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go @@ -127,8 +127,8 @@ type ProvenanceMetadata struct { Reproducible bool `json:"reproducible"` } -// ProvenanceComplete indicates wheter the claims in build/recipe are complete. -// For in depth information refer to the specifictaion: +// ProvenanceComplete indicates whether the claims in build/recipe are complete. +// For in depth information refer to the specification: // https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md type ProvenanceComplete struct { // Parameters if true, means the builder claims that [ProvenanceInvocation.Parameters] is diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go index e849731dce..5a26445c5c 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go @@ -12,6 +12,12 @@ const ( ) // ProvenancePredicate is the provenance predicate definition. +// +// Deprecated: ProvenancePredicate exists for historical compatibility +// and should not be used. This implementation has been superseded by the +// Provenance struct generated from the Protobuf definition provided +// by the in-toto Attestation Framework. +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/predicates/provenance/v1. type ProvenancePredicate struct { // The BuildDefinition describes all of the inputs to the build. The // accuracy and completeness are implied by runDetails.builder.id. @@ -25,6 +31,11 @@ type ProvenancePredicate struct { } // ProvenanceBuildDefinition describes the inputs to the build. +// +// Deprecated: ProvenanceBuildDefinition exists for historical compatibility +// and should not be used. This implementation has been superseded by the +// BuildDefinition struct generated from the Protobuf definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/predicates/provenance/v1. type ProvenanceBuildDefinition struct { // Identifies the template for how to perform the build and interpret the // parameters and dependencies. @@ -37,7 +48,7 @@ type ProvenanceBuildDefinition struct { // The parameters that are under external control, such as those set by a // user or tenant of the build system. They MUST be complete at SLSA Build - // L3, meaning that that there is no additional mechanism for an external + // L3, meaning that there is no additional mechanism for an external // party to influence the build. (At lower SLSA Build levels, the // completeness MAY be best effort.) @@ -66,6 +77,11 @@ type ProvenanceBuildDefinition struct { // ProvenanceRunDetails includes details specific to a particular execution of a // build. +// +// Deprecated: ProvenanceRunDetails exists for historical compatibility +// and should not be used. This implementation has been superseded by the +// RunDetails struct generated from the Protobuf definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/predicates/provenance/v1. type ProvenanceRunDetails struct { // Identifies the entity that executed the invocation, which is trusted to // have correctly performed the operation and populated this provenance. @@ -92,6 +108,12 @@ type ProvenanceRunDetails struct { // ResourceDescriptor describes a particular software artifact or resource // (mutable or immutable). // See https://github.com/in-toto/attestation/blob/main/spec/v1.0/resource_descriptor.md +// +// Deprecated: This implementation of ResoureDescriptor exists for +// historical compatibility and should not be used. This struct has been +// superseded by the ResourceDescriptor struct generated from the Protobuf +// definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/v1. type ResourceDescriptor struct { // A URI used to identify the resource or artifact globally. This field is // REQUIRED unless either digest or content is set. @@ -123,6 +145,11 @@ type ResourceDescriptor struct { // Builder represents the transitive closure of all the entities that are, by // necessity, trusted to faithfully run the build and record the provenance. +// +// Deprecated: This implementation of Builder exists for historical +// compatibility and should not be used. This implementation has been +// superseded by the Builder struct generated from the Protobuf definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/predicates/provenance/v1. type Builder struct { // URI indicating the transitive closure of the trusted builder. ID string `json:"id"` @@ -136,12 +163,17 @@ type Builder struct { BuilderDependencies []ResourceDescriptor `json:"builderDependencies,omitempty"` } +// Deprecated: This implementation of BuildMetadata exists for historical +// compatibility and should not be used. This implementation has been +// superseded by the BuildMetadata struct generated from the Protobuf +// definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/predicates/provenance/v1. type BuildMetadata struct { // Identifies this particular build invocation, which can be useful for // finding associated logs or other ad-hoc analysis. The exact meaning and // format is defined by builder.id; by default it is treated as opaque and // case-sensitive. The value SHOULD be globally unique. - InvocationID string `json:"invocationID,omitempty"` + InvocationID string `json:"invocationId,omitempty"` // The timestamp of when the build started. StartedOn *time.Time `json:"startedOn,omitempty"` diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go index 5c36dede13..01421afebf 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go @@ -121,10 +121,10 @@ func (s Set) Slice() []string { } /* -InterfaceKeyStrings returns string keys of passed interface{} map in an +artifactsDictKeyStrings returns string keys of passed HashObj map in an unordered string slice. */ -func InterfaceKeyStrings(m map[string]interface{}) []string { +func artifactsDictKeyStrings(m map[string]HashObj) []string { res := make([]string, len(m)) i := 0 for k := range m { diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go index 2564bd47eb..de9dfa7e64 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go @@ -59,7 +59,11 @@ func RunInspections(layout Layout, runDir string, lineNormalization bool, useDSS return nil, err } - retVal := linkEnv.GetPayload().(Link).ByProducts["return-value"] + link, ok := linkEnv.GetPayload().(Link) + if !ok { + return nil, fmt.Errorf("invalid metadata") + } + retVal := link.ByProducts["return-value"] if retVal != float64(0) { return nil, fmt.Errorf("inspection command '%s' of inspection '%s'"+ " returned a non-zero value: %d", inspection.Run, inspection.Name, @@ -80,7 +84,7 @@ func RunInspections(layout Layout, runDir string, lineNormalization bool, useDSS // verifyMatchRule is a helper function to process artifact rules of // type MATCH. See VerifyArtifacts for more details. func verifyMatchRule(ruleData map[string]string, - srcArtifacts map[string]interface{}, srcArtifactQueue Set, + srcArtifacts map[string]HashObj, srcArtifactQueue Set, itemsMetadata map[string]Metadata) Set { consumed := NewSet() // Get destination link metadata @@ -91,13 +95,19 @@ func verifyMatchRule(ruleData map[string]string, return consumed } + dstLink, ok := dstLinkEnv.GetPayload().(Link) + if !ok { + fmt.Printf("invalid metadata") + return consumed + } + // Get artifacts from destination link metadata - var dstArtifacts map[string]interface{} + var dstArtifacts map[string]HashObj switch ruleData["dstType"] { case "materials": - dstArtifacts = dstLinkEnv.GetPayload().(Link).Materials + dstArtifacts = dstLink.Materials case "products": - dstArtifacts = dstLinkEnv.GetPayload().(Link).Products + dstArtifacts = dstLink.Products } // cleanup paths in pattern and artifact maps @@ -216,18 +226,22 @@ func VerifyArtifacts(items []interface{}, // Create shortcuts to materials and products (including hashes) reported // by the item's link, required to verify "match" rules - materials := srcLinkEnv.GetPayload().(Link).Materials - products := srcLinkEnv.GetPayload().(Link).Products + link, ok := srcLinkEnv.GetPayload().(Link) + if !ok { + return fmt.Errorf("invalid metadata") + } + materials := link.Materials + products := link.Products // All other rules only require the material or product paths (without // hashes). We extract them from the corresponding maps and store them as // sets for convenience in further processing materialPaths := NewSet() - for _, p := range InterfaceKeyStrings(materials) { + for _, p := range artifactsDictKeyStrings(materials) { materialPaths.Add(path.Clean(p)) } productPaths := NewSet() - for _, p := range InterfaceKeyStrings(products) { + for _, p := range artifactsDictKeyStrings(products) { productPaths.Add(path.Clean(p)) } @@ -269,17 +283,24 @@ func VerifyArtifacts(items []interface{}, // TODO: Add logging library (see in-toto/in-toto-golang#4) // fmt.Printf("%s...\n", verificationData["srcType"]) - rules := verificationData["rules"].([][]string) - artifacts := verificationData["artifacts"].(map[string]interface{}) - + rules, ok := verificationData["rules"].([][]string) + if !ok { + return fmt.Errorf(`rules must be of type [][]string`) + } + artifacts, ok := verificationData["artifacts"].(map[string]HashObj) + if !ok { + return fmt.Errorf(`artifacts must be of type map[string]HashObj`) + } // Use artifacts (without hashes) as base queue. Each rule only operates // on artifacts in that queue. If a rule consumes an artifact (i.e. can // be applied successfully), the artifact is removed from the queue. By // applying a DISALLOW rule eventually, verification may return an error, // if the rule matches any artifacts in the queue that should have been // consumed earlier. - queue := verificationData["artifactPaths"].(Set) - + queue, ok := verificationData["artifactPaths"].(Set) + if !ok { + return fmt.Errorf(`queue must be of type Set`) + } // TODO: Add logging library (see in-toto/in-toto-golang#4) // fmt.Printf("Initial state\nMaterials: %s\nProducts: %s\nQueue: %s\n\n", // materialPaths.Slice(), productPaths.Slice(), queue.Slice()) @@ -398,10 +419,16 @@ func ReduceStepsMetadata(layout Layout, // threshold requires, but not all of them are equal? Right now we would // also error. for keyID, linkEnv := range linksPerStep { - if !reflect.DeepEqual(linkEnv.GetPayload().(Link).Materials, - referenceLinkEnv.GetPayload().(Link).Materials) || - !reflect.DeepEqual(linkEnv.GetPayload().(Link).Products, - referenceLinkEnv.GetPayload().(Link).Products) { + link, ok := linkEnv.GetPayload().(Link) + if !ok { + return nil, fmt.Errorf("invalid metadata") + } + refLink, ok := referenceLinkEnv.GetPayload().(Link) + if !ok { + return nil, fmt.Errorf("invalid metadata") + } + if !reflect.DeepEqual(link.Materials, refLink.Materials) || + !reflect.DeepEqual(link.Products, refLink.Products) { return nil, fmt.Errorf("link '%s' and '%s' have different"+ " artifacts", fmt.Sprintf(LinkNameFormat, step.Name, referenceKeyID), @@ -432,8 +459,13 @@ func VerifyStepCommandAlignment(layout Layout, } for signerKeyID, linkEnv := range linksPerStep { + link, ok := linkEnv.GetPayload().(Link) + if !ok { + fmt.Printf("invalid metadata") + return + } expectedCommandS := strings.Join(step.ExpectedCommand, " ") - executedCommandS := strings.Join(linkEnv.GetPayload().(Link).Command, " ") + executedCommandS := strings.Join(link.Command, " ") if expectedCommandS != executedCommandS { linkName := fmt.Sprintf(LinkNameFormat, step.Name, signerKeyID) @@ -708,16 +740,24 @@ func GetSummaryLink(layout Layout, stepsMetadataReduced map[string]Metadata, firstStepLink := stepsMetadataReduced[layout.Steps[0].Name] lastStepLink := stepsMetadataReduced[layout.Steps[len(layout.Steps)-1].Name] - summaryLink.Materials = firstStepLink.GetPayload().(Link).Materials + firstStepPayloadLink, ok := firstStepLink.GetPayload().(Link) + if !ok { + return nil, fmt.Errorf("invalid metadata") + } + summaryLink.Materials = firstStepPayloadLink.Materials summaryLink.Name = stepName - summaryLink.Type = firstStepLink.GetPayload().(Link).Type + summaryLink.Type = firstStepPayloadLink.Type - summaryLink.Products = lastStepLink.GetPayload().(Link).Products - summaryLink.ByProducts = lastStepLink.GetPayload().(Link).ByProducts + lastStepPayloadLink, ok := lastStepLink.GetPayload().(Link) + if !ok { + return nil, fmt.Errorf("invalid metadata") + } + summaryLink.Products = lastStepPayloadLink.Products + summaryLink.ByProducts = lastStepPayloadLink.ByProducts // Using the last command of the sublayout as the command // of the summary link can be misleading. Is it necessary to // include all the commands executed as part of sublayout? - summaryLink.Command = lastStepLink.GetPayload().(Link).Command + summaryLink.Command = lastStepPayloadLink.Command } if useDSSE { @@ -845,7 +885,7 @@ the in-toto specification. It requires the metadata of the root layout, a map that contains public keys to verify the root layout signatures, a path to a directory from where it can load link metadata files, which are treated as signed evidence for the steps defined in the layout, a step name, and a -paramater dictionary used for parameter substitution. The step name only +parameter dictionary used for parameter substitution. The step name only matters for sublayouts, where it's important to associate the summary of that step with a unique name. The verification routine is as follows: diff --git a/vendor/github.com/jedisct1/go-minisign/LICENSE b/vendor/github.com/jedisct1/go-minisign/LICENSE index 06c6cdbb99..7d147b428e 100644 --- a/vendor/github.com/jedisct1/go-minisign/LICENSE +++ b/vendor/github.com/jedisct1/go-minisign/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2018-2023 Frank Denis +Copyright (c) 2018-2024 Frank Denis Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 4528059ca6..804a201816 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -31,6 +31,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm - id: "s2d" binary: s2d @@ -57,6 +60,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm - id: "s2sx" binary: s2sx @@ -84,6 +90,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm archives: - @@ -91,7 +100,7 @@ archives: name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" format_overrides: - goos: windows - format: zip + formats: ['zip'] files: - unpack/* - s2/LICENSE diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 244ee19c4b..e839fe9c60 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -7,7 +7,7 @@ This package provides various compression algorithms. * Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). * [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. * [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. -* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped/zstd HTTP requests efficiently. * [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. [![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) @@ -27,6 +27,28 @@ Use the links above for more information on each. # changelog +* Feb 9th, 2026 [1.18.4](https://github.com/klauspost/compress/releases/tag/v1.18.4) + * gzhttp: Add zstandard to server handler wrapper https://github.com/klauspost/compress/pull/1121 + * zstd: Add ResetWithOptions to encoder/decoder https://github.com/klauspost/compress/pull/1122 + * gzhttp: preserve qvalue when extra parameters follow in Accept-Encoding by @analytically in https://github.com/klauspost/compress/pull/1116 + +* Jan 16th, 2026 [1.18.3](https://github.com/klauspost/compress/releases/tag/v1.18.3) + * Downstream CVE-2025-61728. See [golang/go#77102](https://github.com/golang/go/issues/77102). + +* Dec 1st, 2025 - [1.18.2](https://github.com/klauspost/compress/releases/tag/v1.18.2) + * flate: Fix invalid encoding on level 9 with single value input in https://github.com/klauspost/compress/pull/1115 + * flate: reduce stateless allocations by @RXamzin in https://github.com/klauspost/compress/pull/1106 + +* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1) - RETRACTED + * zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079 + * zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059 + * s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080 + * zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086 + * gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090 + * flate: Faster load+store https://github.com/klauspost/compress/pull/1104 + * flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101 + * flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103 + * Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 @@ -36,6 +58,9 @@ Use the links above for more information on each. * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 +

+ See changes to v1.17.x + * Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 @@ -102,7 +127,8 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 - + +
See changes to v1.16.x @@ -589,7 +615,7 @@ While the release has been extensively tested, it is recommended to testing when # deflate usage -The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: +The packages are drop-in replacements for standard library [deflate](https://godoc.org/github.com/klauspost/compress/flate), [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip), and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). Simply replace the import path to use them: Typical speed is about 2x of the standard library packages. @@ -600,17 +626,15 @@ Typical speed is about 2x of the standard library packages. | `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) | | `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) | -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). - -You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop-in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. -The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). +The packages implement the same API as the standard library, so you can use the original godoc documentation: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). Currently there is only minor speedup on decompression (mostly CRC32 calculation). Memory usage is typically 1MB for a Writer. stdlib is in the same range. If you expect to have a lot of concurrently allocated Writers consider using -the stateless compress described below. +the stateless compression described below. For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). @@ -669,3 +693,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv # license This code is licensed under the same conditions as the original Go code. See LICENSE file. + + + + + diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go index 99ddd4af97..2d6ef64be1 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc // This file contains the specialisation of Decoder.Decompress4X // and Decoder.Decompress1X that use an asm implementation of thir main loops. diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go index 908c17de63..6103923222 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm // This file contains a generic implementation of Decoder.Decompress4X. package huff0 diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go index e802579c4f..b97f9056f4 100644 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package cpuinfo diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index fd35ea1480..0e33aea442 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -78,6 +78,7 @@ func (b *blockEnc) initNewEncode() { b.recentOffsets = [3]uint32{1, 4, 8} b.litEnc.Reuse = huff0.ReusePolicyNone b.coders.setPrev(nil, nil, nil) + b.dictLitEnc = nil } // reset will reset the block for a new encode, but in the same stream, diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 30df5513d5..c7e500f02a 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -39,9 +39,6 @@ type Decoder struct { frame *frameDec - // Custom dictionaries. - dicts map[uint32]*dict - // streamWg is the waitgroup for all streams streamWg sync.WaitGroup } @@ -101,12 +98,10 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { d.current.err = ErrDecoderNilInput } - // Transfer option dicts. - d.dicts = make(map[uint32]*dict, len(d.o.dicts)) - for _, dc := range d.o.dicts { - d.dicts[dc.id] = dc + // Initialize dict map if needed. + if d.o.dicts == nil { + d.o.dicts = make(map[uint32]*dict) } - d.o.dicts = nil // Create decoders d.decoders = make(chan *blockDec, d.o.concurrent) @@ -238,6 +233,21 @@ func (d *Decoder) Reset(r io.Reader) error { return nil } +// ResetWithOptions will reset the decoder and apply the given options +// for the next stream or DecodeAll operation. +// Options are applied on top of the existing options. +// Some options cannot be changed on reset and will return an error. +func (d *Decoder) ResetWithOptions(r io.Reader, opts ...DOption) error { + d.o.resetOpt = true + defer func() { d.o.resetOpt = false }() + for _, o := range opts { + if err := o(&d.o); err != nil { + return err + } + } + return d.Reset(r) +} + // drainOutput will drain the output until errEndOfStream is sent. func (d *Decoder) drainOutput() { if d.current.cancel != nil { @@ -930,7 +940,7 @@ decodeStream: } func (d *Decoder) setDict(frame *frameDec) (err error) { - dict, ok := d.dicts[frame.DictionaryID] + dict, ok := d.o.dicts[frame.DictionaryID] if ok { if debugDecoder { println("setting dict", frame.DictionaryID) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 774c5f00fe..537627a078 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -20,10 +20,11 @@ type decoderOptions struct { concurrent int maxDecodedSize uint64 maxWindowSize uint64 - dicts []*dict + dicts map[uint32]*dict ignoreChecksum bool limitToCap bool decodeBufsBelow int + resetOpt bool } func (o *decoderOptions) setDefault() { @@ -42,8 +43,15 @@ func (o *decoderOptions) setDefault() { // WithDecoderLowmem will set whether to use a lower amount of memory, // but possibly have to allocate more while running. +// Cannot be changed with ResetWithOptions. func WithDecoderLowmem(b bool) DOption { - return func(o *decoderOptions) error { o.lowMem = b; return nil } + return func(o *decoderOptions) error { + if o.resetOpt && b != o.lowMem { + return errors.New("WithDecoderLowmem cannot be changed on Reset") + } + o.lowMem = b + return nil + } } // WithDecoderConcurrency sets the number of created decoders. @@ -53,18 +61,23 @@ func WithDecoderLowmem(b bool) DOption { // inflight blocks. // When decoding streams and setting maximum to 1, // no async decoding will be done. +// The value supplied must be at least 0. // When a value of 0 is provided GOMAXPROCS will be used. // By default this will be set to 4 or GOMAXPROCS, whatever is lower. +// Cannot be changed with ResetWithOptions. func WithDecoderConcurrency(n int) DOption { return func(o *decoderOptions) error { if n < 0 { - return errors.New("concurrency must be at least 1") + return errors.New("concurrency must be at least 0") } + newVal := n if n == 0 { - o.concurrent = runtime.GOMAXPROCS(0) - } else { - o.concurrent = n + newVal = runtime.GOMAXPROCS(0) } + if o.resetOpt && newVal != o.concurrent { + return errors.New("WithDecoderConcurrency cannot be changed on Reset") + } + o.concurrent = newVal return nil } } @@ -73,6 +86,7 @@ func WithDecoderConcurrency(n int) DOption { // non-streaming operations or maximum window size for streaming operations. // This can be used to control memory usage of potentially hostile content. // Maximum is 1 << 63 bytes. Default is 64GiB. +// Can be changed with ResetWithOptions. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { if n == 0 { @@ -92,16 +106,20 @@ func WithDecoderMaxMemory(n uint64) DOption { // "zstd --train" from the Zstandard reference implementation. // // If several dictionaries with the same ID are provided, the last one will be used. +// Can be changed with ResetWithOptions. // // [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithDecoderDicts(dicts ...[]byte) DOption { return func(o *decoderOptions) error { + if o.dicts == nil { + o.dicts = make(map[uint32]*dict) + } for _, b := range dicts { d, err := loadDict(b) if err != nil { return err } - o.dicts = append(o.dicts, d) + o.dicts[d.id] = d } return nil } @@ -109,12 +127,16 @@ func WithDecoderDicts(dicts ...[]byte) DOption { // WithDecoderDictRaw registers a dictionary that may be used by the decoder. // The slice content can be arbitrary data. +// Can be changed with ResetWithOptions. func WithDecoderDictRaw(id uint32, content []byte) DOption { return func(o *decoderOptions) error { if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) } - o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + if o.dicts == nil { + o.dicts = make(map[uint32]*dict) + } + o.dicts[id] = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} return nil } } @@ -124,6 +146,7 @@ func WithDecoderDictRaw(id uint32, content []byte) DOption { // The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. // If WithDecoderMaxMemory is set to a lower value, that will be used. // Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +// Can be changed with ResetWithOptions. func WithDecoderMaxWindow(size uint64) DOption { return func(o *decoderOptions) error { if size < MinWindowSize { @@ -141,6 +164,7 @@ func WithDecoderMaxWindow(size uint64) DOption { // or any size set in WithDecoderMaxMemory. // This can be used to limit decoding to a specific maximum output size. // Disabled by default. +// Can be changed with ResetWithOptions. func WithDecodeAllCapLimit(b bool) DOption { return func(o *decoderOptions) error { o.limitToCap = b @@ -153,17 +177,37 @@ func WithDecodeAllCapLimit(b bool) DOption { // This typically uses less allocations but will have the full decompressed object in memory. // Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. // Default is 128KiB. +// Cannot be changed with ResetWithOptions. func WithDecodeBuffersBelow(size int) DOption { return func(o *decoderOptions) error { + if o.resetOpt && size != o.decodeBufsBelow { + return errors.New("WithDecodeBuffersBelow cannot be changed on Reset") + } o.decodeBufsBelow = size return nil } } // IgnoreChecksum allows to forcibly ignore checksum checking. +// Can be changed with ResetWithOptions. func IgnoreChecksum(b bool) DOption { return func(o *decoderOptions) error { o.ignoreChecksum = b return nil } } + +// WithDecoderDictDelete removes dictionaries by ID. +// If no ids are passed, all dictionaries are deleted. +// Should be used with ResetWithOptions. +func WithDecoderDictDelete(ids ...uint32) DOption { + return func(o *decoderOptions) error { + if len(ids) == 0 { + clear(o.dicts) + } + for _, id := range ids { + delete(o.dicts, id) + } + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index c1192ec38f..c4de134a7a 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -21,7 +21,7 @@ type fastBase struct { crc *xxhash.Digest tmp [8]byte blk *blockEnc - lastDictID uint32 + lastDict *dict lowMem bool } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index c1581cfcb8..851799322b 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -479,10 +479,13 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { if d == nil { return } + dictChanged := d != e.lastDict // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || dictChanged { if len(e.dictTable) != len(e.table) { e.dictTable = make([]prevEntry, len(e.table)) + } else { + clear(e.dictTable) } end := int32(len(d.content)) - 8 + e.maxMatchOff for i := e.maxMatchOff; i < end; i += 4 { @@ -510,13 +513,14 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { offset: i + 3, } } - e.lastDictID = d.id } - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + // Init or copy dict long table + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]prevEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -538,8 +542,8 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { off++ } } - e.lastDictID = d.id } + e.lastDict = d // Reset table to initial state copy(e.longTable[:], e.dictLongTable) diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 85dcd28c32..3305f09248 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -1102,10 +1102,13 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { if d == nil { return } + dictChanged := d != e.lastDict // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || dictChanged { if len(e.dictTable) != len(e.table) { e.dictTable = make([]tableEntry, len(e.table)) + } else { + clear(e.dictTable) } end := int32(len(d.content)) - 8 + e.maxMatchOff for i := e.maxMatchOff; i < end; i += 4 { @@ -1133,14 +1136,15 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { offset: i + 3, } } - e.lastDictID = d.id e.allDirty = true } - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + // Init or copy dict long table + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]prevEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -1162,9 +1166,9 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { off++ } } - e.lastDictID = d.id e.allDirty = true } + e.lastDict = d // Reset table to initial state { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index cf8cad00dc..2fb6da112b 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -1040,15 +1040,18 @@ func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { // ResetDict will reset and set a dictionary if not nil func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { allDirty := e.allDirty + dictChanged := d != e.lastDict e.fastEncoderDict.Reset(d, singleBlock) if d == nil { return } // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]tableEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -1065,7 +1068,6 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { } } } - e.lastDictID = d.id allDirty = true } // Reset table to initial state diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 9180a3a582..5e104f1a48 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -805,9 +805,11 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { } // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || d != e.lastDict { if len(e.dictTable) != len(e.table) { e.dictTable = make([]tableEntry, len(e.table)) + } else { + clear(e.dictTable) } if true { end := e.maxMatchOff + int32(len(d.content)) - 8 @@ -827,7 +829,7 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { } } } - e.lastDictID = d.id + e.lastDict = d e.allDirty = true } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 8f8223cd3a..0f2a00a003 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -131,6 +131,29 @@ func (e *Encoder) Reset(w io.Writer) { s.frameContentSize = 0 } +// ResetWithOptions will re-initialize the writer and apply the given options +// as a new, independent stream. +// Options are applied on top of the existing options. +// Some options cannot be changed on reset and will return an error. +func (e *Encoder) ResetWithOptions(w io.Writer, opts ...EOption) error { + e.o.resetOpt = true + defer func() { e.o.resetOpt = false }() + hadDict := e.o.dict != nil + for _, o := range opts { + if err := o(&e.o); err != nil { + return err + } + } + hasDict := e.o.dict != nil + if hadDict != hasDict { + // Dict presence changed — encoder type must be recreated. + e.state.encoder = nil + e.init = sync.Once{} + } + e.Reset(w) + return nil +} + // ResetContentSize will reset and set a content size for the next stream. // If the bytes written does not match the size given an error will be returned // when calling Close(). @@ -432,6 +455,12 @@ func (e *Encoder) Close() error { if s.encoder == nil { return nil } + if s.w == nil { + if len(s.filling) == 0 && !s.headerWritten && !s.eofWritten && s.nInput == 0 { + return nil + } + return errors.New("zstd: encoder has no writer") + } err := e.nextBlock(true) if err != nil { if errors.Is(s.err, ErrEncoderClosed) { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 20671dcb91..e217be0a17 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -14,6 +14,7 @@ type EOption func(*encoderOptions) error // options retains accumulated state of multiple options. type encoderOptions struct { + resetOpt bool concurrent int level EncoderLevel single *bool @@ -41,6 +42,7 @@ func (o *encoderOptions) setDefault() { level: SpeedDefault, allLitEntropy: false, lowMem: false, + fullZero: true, } } @@ -71,19 +73,28 @@ func (o encoderOptions) encoder() encoder { // WithEncoderCRC will add CRC value to output. // Output will be 4 bytes larger. +// Can be changed with ResetWithOptions. func WithEncoderCRC(b bool) EOption { return func(o *encoderOptions) error { o.crc = b; return nil } } // WithEncoderConcurrency will set the concurrency, // meaning the maximum number of encoders to run concurrently. -// The value supplied must be at least 1. +// The value supplied must be at least 0. +// When a value of 0 is provided GOMAXPROCS will be used. // For streams, setting a value of 1 will disable async compression. // By default this will be set to GOMAXPROCS. +// Cannot be changed with ResetWithOptions. func WithEncoderConcurrency(n int) EOption { return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("concurrency must be at least 1") + if n < 0 { + return errors.New("concurrency must at least 0") + } + if n == 0 { + n = runtime.GOMAXPROCS(0) + } + if o.resetOpt && n != o.concurrent { + return errors.New("WithEncoderConcurrency cannot be changed on Reset") } o.concurrent = n return nil @@ -95,6 +106,7 @@ func WithEncoderConcurrency(n int) EOption { // A larger value will enable better compression but allocate more memory and, // for above-default values, take considerably longer. // The default value is determined by the compression level and max 8MB. +// Cannot be changed with ResetWithOptions. func WithWindowSize(n int) EOption { return func(o *encoderOptions) error { switch { @@ -105,6 +117,9 @@ func WithWindowSize(n int) EOption { case (n & (n - 1)) != 0: return errors.New("window size must be a power of 2") } + if o.resetOpt && n != o.windowSize { + return errors.New("WithWindowSize cannot be changed on Reset") + } o.windowSize = n o.customWindow = true @@ -122,6 +137,7 @@ func WithWindowSize(n int) EOption { // n must be > 0 and <= 1GB, 1<<30 bytes. // The padded area will be filled with data from crypto/rand.Reader. // If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +// Can be changed with ResetWithOptions. func WithEncoderPadding(n int) EOption { return func(o *encoderOptions) error { if n <= 0 { @@ -215,12 +231,16 @@ func (e EncoderLevel) String() string { } // WithEncoderLevel specifies a predefined compression level. +// Cannot be changed with ResetWithOptions. func WithEncoderLevel(l EncoderLevel) EOption { return func(o *encoderOptions) error { switch { case l <= speedNotSet || l >= speedLast: return fmt.Errorf("unknown encoder level") } + if o.resetOpt && l != o.level { + return errors.New("WithEncoderLevel cannot be changed on Reset") + } o.level = l if !o.customWindow { switch o.level { @@ -248,6 +268,7 @@ func WithEncoderLevel(l EncoderLevel) EOption { // WithZeroFrames will encode 0 length input as full frames. // This can be needed for compatibility with zstandard usage, // but is not needed for this package. +// Can be changed with ResetWithOptions. func WithZeroFrames(b bool) EOption { return func(o *encoderOptions) error { o.fullZero = b @@ -259,6 +280,7 @@ func WithZeroFrames(b bool) EOption { // Disabling this will skip incompressible data faster, but in cases with no matches but // skewed character distribution compression is lost. // Default value depends on the compression level selected. +// Can be changed with ResetWithOptions. func WithAllLitEntropyCompression(b bool) EOption { return func(o *encoderOptions) error { o.customALEntropy = true @@ -270,6 +292,7 @@ func WithAllLitEntropyCompression(b bool) EOption { // WithNoEntropyCompression will always skip entropy compression of literals. // This can be useful if content has matches, but unlikely to benefit from entropy // compression. Usually the slight speed improvement is not worth enabling this. +// Can be changed with ResetWithOptions. func WithNoEntropyCompression(b bool) EOption { return func(o *encoderOptions) error { o.noEntropy = b @@ -287,6 +310,7 @@ func WithNoEntropyCompression(b bool) EOption { // This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. // If this is not specified, block encodes will automatically choose this based on the input size and the window size. // This setting has no effect on streamed encodes. +// Can be changed with ResetWithOptions. func WithSingleSegment(b bool) EOption { return func(o *encoderOptions) error { o.single = &b @@ -298,8 +322,12 @@ func WithSingleSegment(b bool) EOption { // slower encoding speed. // This will not change the window size which is the primary function for reducing // memory usage. See WithWindowSize. +// Cannot be changed with ResetWithOptions. func WithLowerEncoderMem(b bool) EOption { return func(o *encoderOptions) error { + if o.resetOpt && b != o.lowMem { + return errors.New("WithLowerEncoderMem cannot be changed on Reset") + } o.lowMem = b return nil } @@ -311,6 +339,7 @@ func WithLowerEncoderMem(b bool) EOption { // "zstd --train" from the Zstandard reference implementation. // // The encoder *may* choose to use no dictionary instead for certain payloads. +// Can be changed with ResetWithOptions. // // [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithEncoderDict(dict []byte) EOption { @@ -328,6 +357,7 @@ func WithEncoderDict(dict []byte) EOption { // // The slice content may contain arbitrary data. It will be used as an initial // history. +// Can be changed with ResetWithOptions. func WithEncoderDictRaw(id uint32, content []byte) EOption { return func(o *encoderOptions) error { if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { @@ -337,3 +367,12 @@ func WithEncoderDictRaw(id uint32, content []byte) EOption { return nil } } + +// WithEncoderDictDelete clears the dictionary, so no dictionary will be used. +// Should be used with ResetWithOptions. +func WithEncoderDictDelete() EOption { + return func(o *encoderOptions) error { + o.dict = nil + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go index d04a829b0a..b8c8607b5d 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go index 8adfebb029..2138f8091a 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go index 0be16cefc7..9576426e68 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -1,5 +1,4 @@ //go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm -// +build !amd64,!arm64 appengine !gc purego noasm package xxhash diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go index f41932b7a4..1ed18927f9 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc // Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go index bea1779e97..379746c96c 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm // Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 1f8c3cec28..18c3703ddc 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go index 7cec2197cd..516cd9b070 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm package zstd diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/.gitignore b/vendor/github.com/lestrrat-go/dsig-secp256k1/.gitignore new file mode 100644 index 0000000000..aaadf736e5 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/.gitignore @@ -0,0 +1,32 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Code coverage profiles and other test artifacts +*.out +coverage.* +*.coverprofile +profile.cov + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work +go.work.sum + +# env file +.env + +# Editor/IDE +# .idea/ +# .vscode/ diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/Changes b/vendor/github.com/lestrrat-go/dsig-secp256k1/Changes new file mode 100644 index 0000000000..4dd588a006 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/Changes @@ -0,0 +1,5 @@ +Changes +======= + +v1.0.0 18 Aug 2025 +* Initial release \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/option/LICENSE b/vendor/github.com/lestrrat-go/dsig-secp256k1/LICENSE similarity index 97% rename from vendor/github.com/lestrrat-go/option/LICENSE rename to vendor/github.com/lestrrat-go/dsig-secp256k1/LICENSE index 188ea7685c..1e1f5d199e 100644 --- a/vendor/github.com/lestrrat-go/option/LICENSE +++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2021 lestrrat-go +Copyright (c) 2025 lestrrat-go Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/secp256k1.go b/vendor/github.com/lestrrat-go/dsig-secp256k1/secp256k1.go new file mode 100644 index 0000000000..85650efa1c --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/secp256k1.go @@ -0,0 +1,29 @@ +package dsigsecp256k1 + +import ( + "crypto" + + "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/lestrrat-go/dsig" +) + +const ECDSAWithSecp256k1AndSHA256 = "ECDSA_WITH_SECP256K1_AND_SHA256" + +// init adds secp256k1 support when the dsig_secp256k1 build tag is used. +func init() { + // Register ES256K (secp256k1 + SHA256) support using the new API + err := dsig.RegisterAlgorithm(ECDSAWithSecp256k1AndSHA256, dsig.AlgorithmInfo{ + Family: dsig.ECDSA, + Meta: dsig.ECDSAFamilyMeta{ + Hash: crypto.SHA256, + }, + }) + if err != nil { + panic("failed to register secp256k1 algorithm: " + err.Error()) + } +} + +// secp256k1Curve returns the secp256k1 curve. +func Curve() *secp256k1.KoblitzCurve { + return secp256k1.S256() +} diff --git a/vendor/github.com/lestrrat-go/dsig/.gitignore b/vendor/github.com/lestrrat-go/dsig/.gitignore new file mode 100644 index 0000000000..aaadf736e5 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/.gitignore @@ -0,0 +1,32 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Code coverage profiles and other test artifacts +*.out +coverage.* +*.coverprofile +profile.cov + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work +go.work.sum + +# env file +.env + +# Editor/IDE +# .idea/ +# .vscode/ diff --git a/vendor/github.com/lestrrat-go/dsig/Changes b/vendor/github.com/lestrrat-go/dsig/Changes new file mode 100644 index 0000000000..bccce97613 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/Changes @@ -0,0 +1,5 @@ +Changes +======= + +v1.0.0 - 18 Aug 2025 +* Initial release \ No newline at end of file diff --git a/vendor/github.com/sourcegraph/conc/LICENSE b/vendor/github.com/lestrrat-go/dsig/LICENSE similarity index 97% rename from vendor/github.com/sourcegraph/conc/LICENSE rename to vendor/github.com/lestrrat-go/dsig/LICENSE index 1081f4ef4a..1e1f5d199e 100644 --- a/vendor/github.com/sourcegraph/conc/LICENSE +++ b/vendor/github.com/lestrrat-go/dsig/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2023 Sourcegraph +Copyright (c) 2025 lestrrat-go Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/lestrrat-go/dsig/README.md b/vendor/github.com/lestrrat-go/dsig/README.md new file mode 100644 index 0000000000..37c194579e --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/README.md @@ -0,0 +1,163 @@ +# github.com/lestrrat-go/dsig [![CI](https://github.com/lestrrat-go/dsig/actions/workflows/ci.yml/badge.svg)](https://github.com/lestrrat-go/dsig/actions/workflows/ci.yml) [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/dsig.svg)](https://pkg.go.dev/github.com/lestrrat-go/dsig) [![codecov.io](https://codecov.io/github/lestrrat-go/dsig/coverage.svg?branch=v1)](https://codecov.io/github/lestrrat-go/dsig?branch=v1) + +Go module providing low-level digital signature operations. + +While there are many standards for generating and verifying digital signatures, the core operations are virtually the same. This module implements the core functionality of digital signature generation / verifications in a framework agnostic way. + +# Features + +* RSA signatures (PKCS1v15 and PSS) +* ECDSA signatures (P-256, P-384, P-521) +* EdDSA signatures (Ed25519, Ed448) +* HMAC signatures (SHA-256, SHA-384, SHA-512) +* Support for crypto.Signer interface +* Allows for dynamic additions of algorithms in limited cases. + +# SYNOPSIS + + +```go +package examples_test + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "fmt" + + "github.com/lestrrat-go/dsig" +) + +func Example() { + payload := []byte("hello world") + + // RSA signing and verification + { + privKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + fmt.Printf("failed to generate RSA key: %s\n", err) + return + } + + // Sign with RSA-PSS SHA256 + signature, err := dsig.Sign(privKey, dsig.RSAPSSWithSHA256, payload, nil) + if err != nil { + fmt.Printf("failed to sign with RSA: %s\n", err) + return + } + + // Verify with RSA-PSS SHA256 + err = dsig.Verify(&privKey.PublicKey, dsig.RSAPSSWithSHA256, payload, signature) + if err != nil { + fmt.Printf("failed to verify RSA signature: %s\n", err) + return + } + } + + // ECDSA signing and verification + { + privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + fmt.Printf("failed to generate ECDSA key: %s\n", err) + return + } + + // Sign with ECDSA P-256 SHA256 + signature, err := dsig.Sign(privKey, dsig.ECDSAWithP256AndSHA256, payload, nil) + if err != nil { + fmt.Printf("failed to sign with ECDSA: %s\n", err) + return + } + + // Verify with ECDSA P-256 SHA256 + err = dsig.Verify(&privKey.PublicKey, dsig.ECDSAWithP256AndSHA256, payload, signature) + if err != nil { + fmt.Printf("failed to verify ECDSA signature: %s\n", err) + return + } + } + + // EdDSA signing and verification + { + pubKey, privKey, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + fmt.Printf("failed to generate Ed25519 key: %s\n", err) + return + } + + // Sign with EdDSA + signature, err := dsig.Sign(privKey, dsig.EdDSA, payload, nil) + if err != nil { + fmt.Printf("failed to sign with EdDSA: %s\n", err) + return + } + + // Verify with EdDSA + err = dsig.Verify(pubKey, dsig.EdDSA, payload, signature) + if err != nil { + fmt.Printf("failed to verify EdDSA signature: %s\n", err) + return + } + } + + // HMAC signing and verification + { + key := []byte("secret-key") + + // Sign with HMAC SHA256 + signature, err := dsig.Sign(key, dsig.HMACWithSHA256, payload, nil) + if err != nil { + fmt.Printf("failed to sign with HMAC: %s\n", err) + return + } + + // Verify with HMAC SHA256 + err = dsig.Verify(key, dsig.HMACWithSHA256, payload, signature) + if err != nil { + fmt.Printf("failed to verify HMAC signature: %s\n", err) + return + } + } + // OUTPUT: +} +``` +source: [examples/dsig_readme_example_test.go](https://github.com/lestrrat-go/dsig/blob/v1/examples/dsig_readme_example_test.go) + + +# Supported Algorithms + +| Constant | Algorithm | Key Type | +|----------|-----------|----------| +| `HMACWithSHA256` | HMAC using SHA-256 | []byte | +| `HMACWithSHA384` | HMAC using SHA-384 | []byte | +| `HMACWithSHA512` | HMAC using SHA-512 | []byte | +| `RSAPKCS1v15WithSHA256` | RSA PKCS#1 v1.5 using SHA-256 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPKCS1v15WithSHA384` | RSA PKCS#1 v1.5 using SHA-384 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPKCS1v15WithSHA512` | RSA PKCS#1 v1.5 using SHA-512 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPSSWithSHA256` | RSA PSS using SHA-256 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPSSWithSHA384` | RSA PSS using SHA-384 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPSSWithSHA512` | RSA PSS using SHA-512 | *rsa.PrivateKey / *rsa.PublicKey | +| `ECDSAWithP256AndSHA256` | ECDSA using P-256 and SHA-256 | *ecdsa.PrivateKey / *ecdsa.PublicKey | +| `ECDSAWithP384AndSHA384` | ECDSA using P-384 and SHA-384 | *ecdsa.PrivateKey / *ecdsa.PublicKey | +| `ECDSAWithP521AndSHA512` | ECDSA using P-521 and SHA-512 | *ecdsa.PrivateKey / *ecdsa.PublicKey | +| `EdDSA` | EdDSA using Ed25519 or Ed448 | ed25519.PrivateKey / ed25519.PublicKey | + +# Description + +This library provides low-level digital signature operations. It does minimal parameter validation for performance, uses strongly typed APIs, and has minimal dependencies. + +# Contributions + +## Issues + +For bug reports and feature requests, please include failing tests when possible. + +## Pull Requests + +Please include tests that exercise your changes. + +# Related Libraries + +* [github.com/lestrrat-go/jwx](https://github.com/lestrrat-go/jwx) - JOSE (JWA/JWE/JWK/JWS/JWT) implementation \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/dsig/algorithms.go b/vendor/github.com/lestrrat-go/dsig/algorithms.go new file mode 100644 index 0000000000..0895c64764 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/algorithms.go @@ -0,0 +1,37 @@ +package dsig + +// This file defines verbose algorithm name constants that can be mapped to by +// different standards (RFC7518, FIDO, etc.) for interoperability. +// +// The algorithm names are intentionally verbose to avoid any ambiguity about +// the exact cryptographic operations being performed. + +const ( + // HMAC signature algorithms + // These use Hash-based Message Authentication Code with specified hash functions + HMACWithSHA256 = "HMAC_WITH_SHA256" + HMACWithSHA384 = "HMAC_WITH_SHA384" + HMACWithSHA512 = "HMAC_WITH_SHA512" + + // RSA signature algorithms with PKCS#1 v1.5 padding + // These use RSA signatures with PKCS#1 v1.5 padding and specified hash functions + RSAPKCS1v15WithSHA256 = "RSA_PKCS1v15_WITH_SHA256" + RSAPKCS1v15WithSHA384 = "RSA_PKCS1v15_WITH_SHA384" + RSAPKCS1v15WithSHA512 = "RSA_PKCS1v15_WITH_SHA512" + + // RSA signature algorithms with PSS padding + // These use RSA signatures with Probabilistic Signature Scheme (PSS) padding + RSAPSSWithSHA256 = "RSA_PSS_WITH_SHA256" + RSAPSSWithSHA384 = "RSA_PSS_WITH_SHA384" + RSAPSSWithSHA512 = "RSA_PSS_WITH_SHA512" + + // ECDSA signature algorithms + // These use Elliptic Curve Digital Signature Algorithm with specified curves and hash functions + ECDSAWithP256AndSHA256 = "ECDSA_WITH_P256_AND_SHA256" + ECDSAWithP384AndSHA384 = "ECDSA_WITH_P384_AND_SHA384" + ECDSAWithP521AndSHA512 = "ECDSA_WITH_P521_AND_SHA512" + + // EdDSA signature algorithms + // These use Edwards-curve Digital Signature Algorithm (supports Ed25519 and Ed448) + EdDSA = "EDDSA" +) \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/dsig/crypto_signer.go b/vendor/github.com/lestrrat-go/dsig/crypto_signer.go new file mode 100644 index 0000000000..f81666708b --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/crypto_signer.go @@ -0,0 +1,45 @@ +package dsig + +import ( + "crypto" + "crypto/rand" + "fmt" + "io" +) + +// cryptosign is a low-level function that signs a payload using a crypto.Signer. +// If hash is crypto.Hash(0), the payload is signed directly without hashing. +// Otherwise, the payload is hashed using the specified hash function before signing. +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +func cryptosign(signer crypto.Signer, payload []byte, hash crypto.Hash, opts crypto.SignerOpts, rr io.Reader) ([]byte, error) { + if rr == nil { + rr = rand.Reader + } + + var digest []byte + if hash == crypto.Hash(0) { + digest = payload + } else { + h := hash.New() + if _, err := h.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload to hash: %w`, err) + } + digest = h.Sum(nil) + } + return signer.Sign(rr, digest, opts) +} + +// SignCryptoSigner generates a signature using a crypto.Signer interface. +// This function can be used for hardware security modules, smart cards, +// and other implementations of the crypto.Signer interface. +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +// +// Returns the signature bytes or an error if signing fails. +func SignCryptoSigner(signer crypto.Signer, raw []byte, h crypto.Hash, opts crypto.SignerOpts, rr io.Reader) ([]byte, error) { + if signer == nil { + return nil, fmt.Errorf("dsig.SignCryptoSigner: signer is nil") + } + return cryptosign(signer, raw, h, opts, rr) +} diff --git a/vendor/github.com/lestrrat-go/dsig/dsig.go b/vendor/github.com/lestrrat-go/dsig/dsig.go new file mode 100644 index 0000000000..de6cbdec45 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/dsig.go @@ -0,0 +1,224 @@ +// Package dsig provides digital signature operations for Go. +// It contains low-level signature generation and verification tools that +// can be used by other signing libraries +// +// The package follows these design principles: +// 1. Does minimal checking of input parameters (for performance); callers need to ensure that the parameters are valid. +// 2. All exported functions are strongly typed (i.e. they do not take `any` types unless they absolutely have to). +// 3. Does not rely on other high-level packages (standalone, except for internal packages). +package dsig + +import ( + "crypto" + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + "sync" +) + +// Family represents the cryptographic algorithm family +type Family int + +const ( + InvalidFamily Family = iota + HMAC + RSA + ECDSA + EdDSAFamily + maxFamily +) + +// String returns the string representation of the Family +func (f Family) String() string { + switch f { + case HMAC: + return "HMAC" + case RSA: + return "RSA" + case ECDSA: + return "ECDSA" + case EdDSAFamily: + return "EdDSA" + default: + return "InvalidFamily" + } +} + +// AlgorithmInfo contains metadata about a digital signature algorithm +type AlgorithmInfo struct { + Family Family // The cryptographic family (HMAC, RSA, ECDSA, EdDSA) + Meta any // Family-specific metadata +} + +// HMACFamilyMeta contains metadata specific to HMAC algorithms +type HMACFamilyMeta struct { + HashFunc func() hash.Hash // Hash function constructor +} + +// RSAFamilyMeta contains metadata specific to RSA algorithms +type RSAFamilyMeta struct { + Hash crypto.Hash // Hash algorithm + PSS bool // Whether to use PSS padding (false = PKCS#1 v1.5) +} + +// ECDSAFamilyMeta contains metadata specific to ECDSA algorithms +type ECDSAFamilyMeta struct { + Hash crypto.Hash // Hash algorithm +} + +// EdDSAFamilyMeta contains metadata specific to EdDSA algorithms +// Currently EdDSA doesn't need specific metadata, but this provides extensibility +type EdDSAFamilyMeta struct { + // Reserved for future use +} + +var algorithms = make(map[string]AlgorithmInfo) +var muAlgorithms sync.RWMutex + +// RegisterAlgorithm registers a new digital signature algorithm with the specified family and metadata. +// +// info.Meta should contain extra metadata for some algorithms. Currently HMAC, RSA, +// and ECDSA family of algorithms need their respective metadata (HMACFamilyMeta, +// RSAFamilyMeta, and ECDSAFamilyMeta). Metadata for other families are ignored. +func RegisterAlgorithm(name string, info AlgorithmInfo) error { + muAlgorithms.Lock() + defer muAlgorithms.Unlock() + + // Validate the metadata matches the family + switch info.Family { + case HMAC: + if _, ok := info.Meta.(HMACFamilyMeta); !ok { + return fmt.Errorf("invalid HMAC metadata for algorithm %s", name) + } + case RSA: + if _, ok := info.Meta.(RSAFamilyMeta); !ok { + return fmt.Errorf("invalid RSA metadata for algorithm %s", name) + } + case ECDSA: + if _, ok := info.Meta.(ECDSAFamilyMeta); !ok { + return fmt.Errorf("invalid ECDSA metadata for algorithm %s", name) + } + case EdDSAFamily: + // EdDSA metadata is optional for now + default: + return fmt.Errorf("unsupported algorithm family %s for algorithm %s", info.Family, name) + } + + algorithms[name] = info + return nil +} + +// GetAlgorithmInfo retrieves the algorithm information for a given algorithm name. +// Returns the info and true if found, zero value and false if not found. +func GetAlgorithmInfo(name string) (AlgorithmInfo, bool) { + muAlgorithms.RLock() + defer muAlgorithms.RUnlock() + + info, ok := algorithms[name] + return info, ok +} + +func init() { + // Register all standard algorithms with their metadata + toRegister := map[string]AlgorithmInfo{ + // HMAC algorithms + HMACWithSHA256: { + Family: HMAC, + Meta: HMACFamilyMeta{ + HashFunc: sha256.New, + }, + }, + HMACWithSHA384: { + Family: HMAC, + Meta: HMACFamilyMeta{ + HashFunc: sha512.New384, + }, + }, + HMACWithSHA512: { + Family: HMAC, + Meta: HMACFamilyMeta{ + HashFunc: sha512.New, + }, + }, + + // RSA PKCS#1 v1.5 algorithms + RSAPKCS1v15WithSHA256: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA256, + PSS: false, + }, + }, + RSAPKCS1v15WithSHA384: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA384, + PSS: false, + }, + }, + RSAPKCS1v15WithSHA512: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA512, + PSS: false, + }, + }, + + // RSA PSS algorithms + RSAPSSWithSHA256: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA256, + PSS: true, + }, + }, + RSAPSSWithSHA384: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA384, + PSS: true, + }, + }, + RSAPSSWithSHA512: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA512, + PSS: true, + }, + }, + + // ECDSA algorithms + ECDSAWithP256AndSHA256: { + Family: ECDSA, + Meta: ECDSAFamilyMeta{ + Hash: crypto.SHA256, + }, + }, + ECDSAWithP384AndSHA384: { + Family: ECDSA, + Meta: ECDSAFamilyMeta{ + Hash: crypto.SHA384, + }, + }, + ECDSAWithP521AndSHA512: { + Family: ECDSA, + Meta: ECDSAFamilyMeta{ + Hash: crypto.SHA512, + }, + }, + + // EdDSA algorithm + EdDSA: { + Family: EdDSAFamily, + Meta: EdDSAFamilyMeta{}, + }, + } + + for name, info := range toRegister { + if err := RegisterAlgorithm(name, info); err != nil { + panic(fmt.Sprintf("failed to register algorithm %s: %v", name, err)) + } + } +} + diff --git a/vendor/github.com/lestrrat-go/dsig/ecdsa.go b/vendor/github.com/lestrrat-go/dsig/ecdsa.go new file mode 100644 index 0000000000..a04a266919 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/ecdsa.go @@ -0,0 +1,200 @@ +package dsig + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "encoding/asn1" + "fmt" + "io" + "math/big" + + "github.com/lestrrat-go/dsig/internal/ecutil" +) + + +func ecdsaGetSignerKey(key any) (*ecdsa.PrivateKey, crypto.Signer, bool, error) { + cs, isCryptoSigner := key.(crypto.Signer) + if isCryptoSigner { + if !isValidECDSAKey(key) { + return nil, nil, false, fmt.Errorf(`invalid key type %T for ECDSA algorithm`, key) + } + + switch key.(type) { + case ecdsa.PrivateKey, *ecdsa.PrivateKey: + // if it's ecdsa.PrivateKey, it's more efficient to + // go through the non-crypto.Signer route. Set isCryptoSigner to false + isCryptoSigner = false + } + } + + if isCryptoSigner { + return nil, cs, true, nil + } + + privkey, ok := key.(*ecdsa.PrivateKey) + if !ok { + return nil, nil, false, fmt.Errorf(`invalid key type %T. *ecdsa.PrivateKey is required`, key) + } + return privkey, nil, false, nil +} + +// UnpackASN1ECDSASignature unpacks an ASN.1 encoded ECDSA signature into r and s values. +// This is typically used when working with crypto.Signer interfaces that return ASN.1 encoded signatures. +func UnpackASN1ECDSASignature(signed []byte, r, s *big.Int) error { + // Okay, this is silly, but hear me out. When we use the + // crypto.Signer interface, the PrivateKey is hidden. + // But we need some information about the key (its bit size). + // + // So while silly, we're going to have to make another call + // here and fetch the Public key. + // (This probably means that this information should be cached somewhere) + var p struct { + R *big.Int // TODO: get this from a pool? + S *big.Int + } + if _, err := asn1.Unmarshal(signed, &p); err != nil { + return fmt.Errorf(`failed to unmarshal ASN1 encoded signature: %w`, err) + } + + r.Set(p.R) + s.Set(p.S) + return nil +} + +// UnpackECDSASignature unpacks a JWS-format ECDSA signature into r and s values. +// The signature should be in the format specified by RFC 7515 (r||s as fixed-length byte arrays). +func UnpackECDSASignature(signature []byte, pubkey *ecdsa.PublicKey, r, s *big.Int) error { + keySize := ecutil.CalculateKeySize(pubkey.Curve) + if len(signature) != keySize*2 { + return fmt.Errorf(`invalid signature length for curve %q`, pubkey.Curve.Params().Name) + } + + r.SetBytes(signature[:keySize]) + s.SetBytes(signature[keySize:]) + + return nil +} + +// PackECDSASignature packs the r and s values from an ECDSA signature into a JWS-format byte slice. +// The output format follows RFC 7515: r||s as fixed-length byte arrays. +func PackECDSASignature(r *big.Int, sbig *big.Int, curveBits int) ([]byte, error) { + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // Serialize r and s into fixed-length bytes + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := sbig.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + // Output as r||s + return append(rBytesPadded, sBytesPadded...), nil +} + +// SignECDSA generates an ECDSA signature for the given payload using the specified private key and hash. +// The raw parameter should be the pre-computed signing input (typically header.payload). +// +// rr is an io.Reader that provides randomness for signing. if rr is nil, it defaults to rand.Reader. +func SignECDSA(key *ecdsa.PrivateKey, payload []byte, h crypto.Hash, rr io.Reader) ([]byte, error) { + if !isValidECDSAKey(key) { + return nil, fmt.Errorf(`invalid key type %T for ECDSA algorithm`, key) + } + hh := h.New() + if _, err := hh.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload using ecdsa: %w`, err) + } + digest := hh.Sum(nil) + + if rr == nil { + rr = rand.Reader + } + + // Sign and get r, s values + r, s, err := ecdsa.Sign(rr, key, digest) + if err != nil { + return nil, fmt.Errorf(`failed to sign payload using ecdsa: %w`, err) + } + + return PackECDSASignature(r, s, key.Curve.Params().BitSize) +} + +// SignECDSACryptoSigner generates an ECDSA signature using a crypto.Signer interface. +// This function works with hardware security modules and other crypto.Signer implementations. +// The signature is converted from ASN.1 format to JWS format (r||s). +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +func SignECDSACryptoSigner(signer crypto.Signer, raw []byte, h crypto.Hash, rr io.Reader) ([]byte, error) { + signed, err := SignCryptoSigner(signer, raw, h, h, rr) + if err != nil { + return nil, fmt.Errorf(`failed to sign payload using crypto.Signer: %w`, err) + } + + return signECDSACryptoSigner(signer, signed) +} + +func signECDSACryptoSigner(signer crypto.Signer, signed []byte) ([]byte, error) { + cpub := signer.Public() + pubkey, ok := cpub.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf(`expected *ecdsa.PublicKey, got %T`, pubkey) + } + curveBits := pubkey.Curve.Params().BitSize + + var r, s big.Int + if err := UnpackASN1ECDSASignature(signed, &r, &s); err != nil { + return nil, fmt.Errorf(`failed to unpack ASN1 encoded signature: %w`, err) + } + + return PackECDSASignature(&r, &s, curveBits) +} + +func ecdsaVerify(key *ecdsa.PublicKey, buf []byte, h crypto.Hash, r, s *big.Int) error { + hasher := h.New() + hasher.Write(buf) + digest := hasher.Sum(nil) + if !ecdsa.Verify(key, digest, r, s) { + return NewVerificationError("invalid ECDSA signature") + } + return nil +} + +// VerifyECDSA verifies an ECDSA signature for the given payload. +// This function verifies the signature using the specified public key and hash algorithm. +// The payload parameter should be the pre-computed signing input (typically header.payload). +func VerifyECDSA(key *ecdsa.PublicKey, payload, signature []byte, h crypto.Hash) error { + var r, s big.Int + if err := UnpackECDSASignature(signature, key, &r, &s); err != nil { + return fmt.Errorf("dsig.VerifyECDSA: failed to unpack ECDSA signature: %w", err) + } + + return ecdsaVerify(key, payload, h, &r, &s) +} + +// VerifyECDSACryptoSigner verifies an ECDSA signature for crypto.Signer implementations. +// This function is useful for verifying signatures created by hardware security modules +// or other implementations of the crypto.Signer interface. +// The payload parameter should be the pre-computed signing input (typically header.payload). +func VerifyECDSACryptoSigner(signer crypto.Signer, payload, signature []byte, h crypto.Hash) error { + var pubkey *ecdsa.PublicKey + switch cpub := signer.Public(); cpub := cpub.(type) { + case ecdsa.PublicKey: + pubkey = &cpub + case *ecdsa.PublicKey: + pubkey = cpub + default: + return fmt.Errorf(`dsig.VerifyECDSACryptoSigner: expected *ecdsa.PublicKey, got %T`, cpub) + } + + var r, s big.Int + if err := UnpackECDSASignature(signature, pubkey, &r, &s); err != nil { + return fmt.Errorf("dsig.VerifyECDSACryptoSigner: failed to unpack ASN.1 encoded ECDSA signature: %w", err) + } + + return ecdsaVerify(pubkey, payload, h, &r, &s) +} diff --git a/vendor/github.com/lestrrat-go/dsig/eddsa.go b/vendor/github.com/lestrrat-go/dsig/eddsa.go new file mode 100644 index 0000000000..6562da37b8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/eddsa.go @@ -0,0 +1,44 @@ +package dsig + +import ( + "crypto" + "crypto/ed25519" + "fmt" +) + +func eddsaGetSigner(key any) (crypto.Signer, error) { + // The ed25519.PrivateKey object implements crypto.Signer, so we should + // simply accept a crypto.Signer here. + signer, ok := key.(crypto.Signer) + if ok { + if !isValidEDDSAKey(key) { + return nil, fmt.Errorf(`invalid key type %T for EdDSA algorithm`, key) + } + return signer, nil + } + + // This fallback exists for cases when users give us a pointer instead of non-pointer, etc. + privkey, ok := key.(ed25519.PrivateKey) + if !ok { + return nil, fmt.Errorf(`failed to retrieve ed25519.PrivateKey out of %T`, key) + } + return privkey, nil +} + +// SignEdDSA generates an EdDSA (Ed25519) signature for the given payload. +// The raw parameter should be the pre-computed signing input (typically header.payload). +// EdDSA is deterministic and doesn't require additional hashing of the input. +func SignEdDSA(key ed25519.PrivateKey, payload []byte) ([]byte, error) { + return ed25519.Sign(key, payload), nil +} + +// VerifyEdDSA verifies an EdDSA (Ed25519) signature for the given payload. +// This function verifies the signature using Ed25519 verification algorithm. +// The payload parameter should be the pre-computed signing input (typically header.payload). +// EdDSA is deterministic and provides strong security guarantees without requiring hash function selection. +func VerifyEdDSA(key ed25519.PublicKey, payload, signature []byte) error { + if !ed25519.Verify(key, payload, signature) { + return fmt.Errorf("invalid EdDSA signature") + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/dsig/hmac.go b/vendor/github.com/lestrrat-go/dsig/hmac.go new file mode 100644 index 0000000000..8b2612279d --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/hmac.go @@ -0,0 +1,45 @@ +package dsig + +import ( + "crypto/hmac" + "fmt" + "hash" +) + +func toHMACKey(dst *[]byte, key any) error { + keyBytes, ok := key.([]byte) + if !ok { + return fmt.Errorf(`dsig.toHMACKey: invalid key type %T. []byte is required`, key) + } + + if len(keyBytes) == 0 { + return fmt.Errorf(`dsig.toHMACKey: missing key while signing payload`) + } + + *dst = keyBytes + return nil +} + +// SignHMAC generates an HMAC signature for the given payload using the specified hash function and key. +// The raw parameter should be the pre-computed signing input (typically header.payload). +func SignHMAC(key, payload []byte, hfunc func() hash.Hash) ([]byte, error) { + h := hmac.New(hfunc, key) + if _, err := h.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload using hmac: %w`, err) + } + return h.Sum(nil), nil +} + +// VerifyHMAC verifies an HMAC signature for the given payload. +// This function verifies the signature using the specified key and hash function. +// The payload parameter should be the pre-computed signing input (typically header.payload). +func VerifyHMAC(key, payload, signature []byte, hfunc func() hash.Hash) error { + expected, err := SignHMAC(key, payload, hfunc) + if err != nil { + return fmt.Errorf("failed to sign payload for verification: %w", err) + } + if !hmac.Equal(signature, expected) { + return NewVerificationError("invalid HMAC signature") + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/dsig/internal/ecutil/ecutil.go b/vendor/github.com/lestrrat-go/dsig/internal/ecutil/ecutil.go new file mode 100644 index 0000000000..cf0bd4ac48 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/internal/ecutil/ecutil.go @@ -0,0 +1,76 @@ +// Package ecutil defines tools that help with elliptic curve related +// computation +package ecutil + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +const ( + // size of buffer that needs to be allocated for EC521 curve + ec521BufferSize = 66 // (521 / 8) + 1 +) + +var ecpointBufferPool = sync.Pool{ + New: func() any { + // In most cases the curve bit size will be less than this length + // so allocate the maximum, and keep reusing + buf := make([]byte, 0, ec521BufferSize) + return &buf + }, +} + +func getCrvFixedBuffer(size int) []byte { + //nolint:forcetypeassert + buf := *(ecpointBufferPool.Get().(*[]byte)) + if size > ec521BufferSize && cap(buf) < size { + buf = append(buf, make([]byte, size-cap(buf))...) + } + return buf[:size] +} + +// ReleaseECPointBuffer releases the []byte buffer allocated. +func ReleaseECPointBuffer(buf []byte) { + buf = buf[:cap(buf)] + buf[0] = 0x0 + for i := 1; i < len(buf); i *= 2 { + copy(buf[i:], buf[:i]) + } + buf = buf[:0] + ecpointBufferPool.Put(&buf) +} + +func CalculateKeySize(crv elliptic.Curve) int { + // We need to create a buffer that fits the entire curve. + // If the curve size is 66, that fits in 9 bytes. If the curve + // size is 64, it fits in 8 bytes. + bits := crv.Params().BitSize + + // For most common cases we know before hand what the byte length + // is going to be. optimize + var inBytes int + switch bits { + case 224, 256, 384: // TODO: use constant? + inBytes = bits / 8 + case 521: + inBytes = ec521BufferSize + default: + inBytes = bits / 8 + if (bits % 8) != 0 { + inBytes++ + } + } + + return inBytes +} + +// AllocECPointBuffer allocates a buffer for the given point in the given +// curve. This buffer should be released using the ReleaseECPointBuffer +// function. +func AllocECPointBuffer(v *big.Int, crv elliptic.Curve) []byte { + buf := getCrvFixedBuffer(CalculateKeySize(crv)) + v.FillBytes(buf) + return buf +} diff --git a/vendor/github.com/lestrrat-go/dsig/rsa.go b/vendor/github.com/lestrrat-go/dsig/rsa.go new file mode 100644 index 0000000000..a339fe5b78 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/rsa.go @@ -0,0 +1,63 @@ +package dsig + +import ( + "crypto" + "crypto/rsa" + "fmt" + "io" +) + +func rsaGetSignerCryptoSignerKey(key any) (crypto.Signer, bool, error) { + if !isValidRSAKey(key) { + return nil, false, fmt.Errorf(`invalid key type %T for RSA algorithm`, key) + } + cs, isCryptoSigner := key.(crypto.Signer) + if isCryptoSigner { + return cs, true, nil + } + return nil, false, nil +} + +// rsaPSSOptions returns the PSS options for RSA-PSS signatures with the specified hash. +// The salt length is set to equal the hash length as per RFC 7518. +func rsaPSSOptions(h crypto.Hash) rsa.PSSOptions { + return rsa.PSSOptions{ + Hash: h, + SaltLength: rsa.PSSSaltLengthEqualsHash, + } +} + +// SignRSA generates an RSA signature for the given payload using the specified private key and options. +// The raw parameter should be the pre-computed signing input (typically header.payload). +// If pss is true, RSA-PSS is used; otherwise, PKCS#1 v1.5 is used. +// +// The rr parameter is an optional io.Reader that can be used to provide randomness for signing. +// If rr is nil, it defaults to rand.Reader. +func SignRSA(key *rsa.PrivateKey, payload []byte, h crypto.Hash, pss bool, rr io.Reader) ([]byte, error) { + if !isValidRSAKey(key) { + return nil, fmt.Errorf(`invalid key type %T for RSA algorithm`, key) + } + var opts crypto.SignerOpts = h + if pss { + rsaopts := rsaPSSOptions(h) + opts = &rsaopts + } + return cryptosign(key, payload, h, opts, rr) +} + +// VerifyRSA verifies an RSA signature for the given payload and header. +// This function constructs the signing input by encoding the header and payload according to JWS specification, +// then verifies the signature using the specified public key and hash algorithm. +// If pss is true, RSA-PSS verification is used; otherwise, PKCS#1 v1.5 verification is used. +func VerifyRSA(key *rsa.PublicKey, payload, signature []byte, h crypto.Hash, pss bool) error { + if !isValidRSAKey(key) { + return fmt.Errorf(`invalid key type %T for RSA algorithm`, key) + } + hasher := h.New() + hasher.Write(payload) + digest := hasher.Sum(nil) + if pss { + return rsa.VerifyPSS(key, h, digest, signature, &rsa.PSSOptions{Hash: h, SaltLength: rsa.PSSSaltLengthEqualsHash}) + } + return rsa.VerifyPKCS1v15(key, h, digest, signature) +} diff --git a/vendor/github.com/lestrrat-go/dsig/sign.go b/vendor/github.com/lestrrat-go/dsig/sign.go new file mode 100644 index 0000000000..e2a6bde290 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/sign.go @@ -0,0 +1,100 @@ +package dsig + +import ( + "crypto" + "crypto/rsa" + "fmt" + "io" +) + +// Sign generates a digital signature using the specified key and algorithm. +// +// This function loads the signer registered in the dsig package _ONLY_. +// It does not support custom signers that the user might have registered. +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +// Not all algorithms require this parameter, but it is included for consistency. +// 99% of the time, you can pass nil for rr, and it will work fine. +func Sign(key any, alg string, payload []byte, rr io.Reader) ([]byte, error) { + info, ok := GetAlgorithmInfo(alg) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: unsupported signature algorithm %q`, alg) + } + + switch info.Family { + case HMAC: + return dispatchHMACSign(key, info, payload) + case RSA: + return dispatchRSASign(key, info, payload, rr) + case ECDSA: + return dispatchECDSASign(key, info, payload, rr) + case EdDSAFamily: + return dispatchEdDSASign(key, info, payload, rr) + default: + return nil, fmt.Errorf(`dsig.Sign: unsupported signature family %q`, info.Family) + } +} + +func dispatchHMACSign(key any, info AlgorithmInfo, payload []byte) ([]byte, error) { + meta, ok := info.Meta.(HMACFamilyMeta) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: invalid HMAC metadata`) + } + + var hmackey []byte + if err := toHMACKey(&hmackey, key); err != nil { + return nil, fmt.Errorf(`dsig.Sign: %w`, err) + } + return SignHMAC(hmackey, payload, meta.HashFunc) +} + +func dispatchRSASign(key any, info AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) { + meta, ok := info.Meta.(RSAFamilyMeta) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: invalid RSA metadata`) + } + + cs, isCryptoSigner, err := rsaGetSignerCryptoSignerKey(key) + if err != nil { + return nil, fmt.Errorf(`dsig.Sign: %w`, err) + } + if isCryptoSigner { + var options crypto.SignerOpts = meta.Hash + if meta.PSS { + rsaopts := rsaPSSOptions(meta.Hash) + options = &rsaopts + } + return SignCryptoSigner(cs, payload, meta.Hash, options, rr) + } + + privkey, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: invalid key type %T. *rsa.PrivateKey is required`, key) + } + return SignRSA(privkey, payload, meta.Hash, meta.PSS, rr) +} + +func dispatchEdDSASign(key any, _ AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) { + signer, err := eddsaGetSigner(key) + if err != nil { + return nil, fmt.Errorf(`dsig.Sign: %w`, err) + } + + return SignCryptoSigner(signer, payload, crypto.Hash(0), crypto.Hash(0), rr) +} + +func dispatchECDSASign(key any, info AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) { + meta, ok := info.Meta.(ECDSAFamilyMeta) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: invalid ECDSA metadata`) + } + + privkey, cs, isCryptoSigner, err := ecdsaGetSignerKey(key) + if err != nil { + return nil, fmt.Errorf(`dsig.Sign: %w`, err) + } + if isCryptoSigner { + return SignECDSACryptoSigner(cs, payload, meta.Hash, rr) + } + return SignECDSA(privkey, payload, meta.Hash, rr) +} diff --git a/vendor/github.com/lestrrat-go/dsig/validation.go b/vendor/github.com/lestrrat-go/dsig/validation.go new file mode 100644 index 0000000000..17682d8538 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/validation.go @@ -0,0 +1,66 @@ +package dsig + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" +) + +// isValidRSAKey validates that the provided key type is appropriate for RSA algorithms. +// It returns false if the key is clearly incompatible (e.g., ECDSA or EdDSA keys). +func isValidRSAKey(key any) bool { + switch key.(type) { + case + ecdsa.PrivateKey, *ecdsa.PrivateKey, + ed25519.PrivateKey: + // these are NOT ok for RSA algorithms + return false + } + return true +} + +// isValidECDSAKey validates that the provided key type is appropriate for ECDSA algorithms. +// It returns false if the key is clearly incompatible (e.g., RSA or EdDSA keys). +func isValidECDSAKey(key any) bool { + switch key.(type) { + case + ed25519.PrivateKey, + rsa.PrivateKey, *rsa.PrivateKey: + // these are NOT ok for ECDSA algorithms + return false + } + return true +} + +// isValidEDDSAKey validates that the provided key type is appropriate for EdDSA algorithms. +// It returns false if the key is clearly incompatible (e.g., RSA or ECDSA keys). +func isValidEDDSAKey(key any) bool { + switch key.(type) { + case + ecdsa.PrivateKey, *ecdsa.PrivateKey, + rsa.PrivateKey, *rsa.PrivateKey: + // these are NOT ok for EdDSA algorithms + return false + } + return true +} + +// VerificationError represents an error that occurred during signature verification. +type VerificationError struct { + message string +} + +func (e *VerificationError) Error() string { + return e.message +} + +// NewVerificationError creates a new verification error with the given message. +func NewVerificationError(message string) error { + return &VerificationError{message: message} +} + +// IsVerificationError checks if the given error is a verification error. +func IsVerificationError(err error) bool { + _, ok := err.(*VerificationError) + return ok +} diff --git a/vendor/github.com/lestrrat-go/dsig/verify.go b/vendor/github.com/lestrrat-go/dsig/verify.go new file mode 100644 index 0000000000..86085b0a37 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/verify.go @@ -0,0 +1,134 @@ +package dsig + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "fmt" +) + +// Verify verifies a digital signature using the specified key and algorithm. +// +// This function loads the verifier registered in the dsig package _ONLY_. +// It does not support custom verifiers that the user might have registered. +func Verify(key any, alg string, payload, signature []byte) error { + info, ok := GetAlgorithmInfo(alg) + if !ok { + return fmt.Errorf(`dsig.Verify: unsupported signature algorithm %q`, alg) + } + + switch info.Family { + case HMAC: + return dispatchHMACVerify(key, info, payload, signature) + case RSA: + return dispatchRSAVerify(key, info, payload, signature) + case ECDSA: + return dispatchECDSAVerify(key, info, payload, signature) + case EdDSAFamily: + return dispatchEdDSAVerify(key, info, payload, signature) + default: + return fmt.Errorf(`dsig.Verify: unsupported signature family %q`, info.Family) + } +} + +func dispatchHMACVerify(key any, info AlgorithmInfo, payload, signature []byte) error { + meta, ok := info.Meta.(HMACFamilyMeta) + if !ok { + return fmt.Errorf(`dsig.Verify: invalid HMAC metadata`) + } + + var hmackey []byte + if err := toHMACKey(&hmackey, key); err != nil { + return fmt.Errorf(`dsig.Verify: %w`, err) + } + return VerifyHMAC(hmackey, payload, signature, meta.HashFunc) +} + +func dispatchRSAVerify(key any, info AlgorithmInfo, payload, signature []byte) error { + meta, ok := info.Meta.(RSAFamilyMeta) + if !ok { + return fmt.Errorf(`dsig.Verify: invalid RSA metadata`) + } + + var pubkey *rsa.PublicKey + + if cs, ok := key.(crypto.Signer); ok { + cpub := cs.Public() + switch cpub := cpub.(type) { + case rsa.PublicKey: + pubkey = &cpub + case *rsa.PublicKey: + pubkey = cpub + default: + return fmt.Errorf(`dsig.Verify: failed to retrieve rsa.PublicKey out of crypto.Signer %T`, key) + } + } else { + var ok bool + pubkey, ok = key.(*rsa.PublicKey) + if !ok { + return fmt.Errorf(`dsig.Verify: failed to retrieve *rsa.PublicKey out of %T`, key) + } + } + + return VerifyRSA(pubkey, payload, signature, meta.Hash, meta.PSS) +} + +func dispatchECDSAVerify(key any, info AlgorithmInfo, payload, signature []byte) error { + meta, ok := info.Meta.(ECDSAFamilyMeta) + if !ok { + return fmt.Errorf(`dsig.Verify: invalid ECDSA metadata`) + } + + pubkey, cs, isCryptoSigner, err := ecdsaGetVerifierKey(key) + if err != nil { + return fmt.Errorf(`dsig.Verify: %w`, err) + } + if isCryptoSigner { + return VerifyECDSACryptoSigner(cs, payload, signature, meta.Hash) + } + return VerifyECDSA(pubkey, payload, signature, meta.Hash) +} + +func dispatchEdDSAVerify(key any, _ AlgorithmInfo, payload, signature []byte) error { + var pubkey ed25519.PublicKey + signer, ok := key.(crypto.Signer) + if ok { + v := signer.Public() + pubkey, ok = v.(ed25519.PublicKey) + if !ok { + return fmt.Errorf(`dsig.Verify: expected crypto.Signer.Public() to return ed25519.PublicKey, but got %T`, v) + } + } else { + var ok bool + pubkey, ok = key.(ed25519.PublicKey) + if !ok { + return fmt.Errorf(`dsig.Verify: failed to retrieve ed25519.PublicKey out of %T`, key) + } + } + + return VerifyEdDSA(pubkey, payload, signature) +} + +func ecdsaGetVerifierKey(key any) (*ecdsa.PublicKey, crypto.Signer, bool, error) { + cs, isCryptoSigner := key.(crypto.Signer) + if isCryptoSigner { + switch key.(type) { + case ecdsa.PublicKey, *ecdsa.PublicKey: + // if it's ecdsa.PublicKey, it's more efficient to + // go through the non-crypto.Signer route. Set isCryptoSigner to false + isCryptoSigner = false + } + } + + if isCryptoSigner { + return nil, cs, true, nil + } + + pubkey, ok := key.(*ecdsa.PublicKey) + if !ok { + return nil, nil, false, fmt.Errorf(`invalid key type %T. *ecdsa.PublicKey is required`, key) + } + + return pubkey, nil, false, nil +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml b/vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml index a51b41b4a5..b3af8cfe12 100644 --- a/vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml +++ b/vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml @@ -29,6 +29,7 @@ linters: - nakedret - nestif - nlreturn + - noinlineerr - nonamedreturns - paralleltest - tagliatelle @@ -37,6 +38,7 @@ linters: - varnamelen - wrapcheck - wsl + - wsl_v5 settings: govet: disable: diff --git a/vendor/github.com/lestrrat-go/httprc/v3/Changes b/vendor/github.com/lestrrat-go/httprc/v3/Changes index 3c82040d43..001c8c5444 100644 --- a/vendor/github.com/lestrrat-go/httprc/v3/Changes +++ b/vendor/github.com/lestrrat-go/httprc/v3/Changes @@ -1,7 +1,30 @@ Changes ======= -v3.0.0 UNRELEASED +v3.0.5 30 Mar 2026 + * Fix periodic check deadlock when number of ready resources exceeds + outgoing channel buffer, which caused circular wait between controller + and worker goroutines (#113, #116) + * Fix proxysink self-deadlock caused by missing mutex unlock on context + cancellation path + +v3.0.4 08 Feb 2026 + * Fix worker goroutine dying on sync refresh failure, which could cause + deadlocks after repeated failures (lestrrat-go/jwx#1551) + * Move ErrNotReady example functions out of client_example_test.go into + separate files to avoid triggering autodoc workflow + +v3.0.3 23 Dec 2025 + * Add ErrNotReady error state to avoid waiting for unstable URLs + +v3.0.2 05 Dev 2025 +* Code changes mainly due to upgraded linter. +* github.com/lestrrat-go/option upgraded to v2 + +v3.0.1 18 Aug 2025 +* Refresh() no longer requires the resource to be ready. + +v3.0.0 5 Jun 2025 [Breaking Changes] * The entire API has been re-imagined for Go versions that allow typed parameters diff --git a/vendor/github.com/lestrrat-go/httprc/v3/backend.go b/vendor/github.com/lestrrat-go/httprc/v3/backend.go index 7d4fb496aa..713de23de2 100644 --- a/vendor/github.com/lestrrat-go/httprc/v3/backend.go +++ b/vendor/github.com/lestrrat-go/httprc/v3/backend.go @@ -71,6 +71,7 @@ func (c *ctrlBackend) refreshResource(ctx context.Context, req refreshRequest) { c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] START %q", req.u)) defer c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] END %q", req.u)) u := req.u + r, ok := c.items[u] if !ok { c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] %s is not registered", req.u)) @@ -78,12 +79,9 @@ func (c *ctrlBackend) refreshResource(ctx context.Context, req refreshRequest) { return } - // Make sure it's ready - if err := r.Ready(ctx); err != nil { - c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] %s did not become ready: %v", req.u, err)) - sendReply(ctx, req.reply, struct{}{}, err) - return - } + // Note: We don't wait for r.Ready() here because refresh should work + // regardless of whether the resource has been fetched before. This allows + // refresh to work with resources registered using WithWaitReady(false). r.SetNext(time.Unix(0, 0)) sendWorkerSynchronous(ctx, c.syncoutgoing, synchronousRequest{ @@ -120,14 +118,6 @@ func (c *ctrlBackend) handleRequest(ctx context.Context, req any) { } } -func sendWorker(ctx context.Context, ch chan Resource, r Resource) { - r.SetBusy(true) - select { - case <-ctx.Done(): - case ch <- r: - } -} - func sendWorkerSynchronous(ctx context.Context, ch chan synchronousRequest, r synchronousRequest) { r.resource.SetBusy(true) select { @@ -161,26 +151,67 @@ func (c *ctrlBackend) loop(ctx context.Context, readywg, donewg *sync.WaitGroup) readywg.Done() defer c.traceSink.Put(ctx, "httprc controller: stopping main controller loop") defer donewg.Done() + + var pending []Resource for { - c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: waiting for request or tick (tick interval=%s)", c.tickInterval)) - select { - case req := <-c.incoming: - c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: got request %T", req)) - c.handleRequest(ctx, req) - case t := <-c.check.C: - c.periodicCheck(ctx, t) - case <-ctx.Done(): - return + if len(pending) > 0 { + // Dispatch pending items while remaining responsive to incoming + // requests. This prevents a deadlock where periodicCheck blocks + // on c.outgoing while a worker blocks on c.incoming (issue #113). + + // Skip resources that were removed (or replaced) after periodicCheck + // queued them. Without this check, a stale resource could be sent to + // a worker, causing an unnecessary fetch and a subsequent + // adjustIntervalRequest for a resource that is no longer registered. + r := pending[0] + // Compare interface values directly. This is safe because all + // Resource implementations are pointer types (*ResourceBase[T]), + // so the comparison is a pointer identity check. + if cur, ok := c.items[r.URL()]; !ok || cur != r { + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: skipping pending resource %q (no longer registered or replaced)", r.URL())) + r.SetBusy(false) + pending = pending[1:] + continue + } + + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: dispatching pending resource %q to worker pool (%d remaining)", pending[0].URL(), len(pending))) + select { + case req := <-c.incoming: + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: got request %T (while dispatching)", req)) + c.handleRequest(ctx, req) + case c.outgoing <- pending[0]: + pending = pending[1:] + case t := <-c.check.C: + pending = append(pending, c.periodicCheck(ctx, t)...) + case <-ctx.Done(): + return + } + } else { + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: waiting for request or tick (tick interval=%s)", c.tickInterval)) + select { + case req := <-c.incoming: + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: got request %T", req)) + c.handleRequest(ctx, req) + case t := <-c.check.C: + pending = c.periodicCheck(ctx, t) + case <-ctx.Done(): + return + } } } } -func (c *ctrlBackend) periodicCheck(ctx context.Context, t time.Time) { +// periodicCheck examines all registered resources and returns those that are +// due for refresh. Items are marked busy here so they won't be selected again +// on the next tick. The caller (loop) is responsible for dispatching them to +// the worker pool, interleaved with incoming request handling, to avoid the +// deadlock described in https://github.com/lestrrat-go/httprc/issues/113. +func (c *ctrlBackend) periodicCheck(ctx context.Context, t time.Time) []Resource { c.traceSink.Put(ctx, "httprc controller: START periodic check") defer c.traceSink.Put(ctx, "httprc controller: END periodic check") var minNext time.Time - var dispatched int minInterval := -1 * time.Second + var toDispatch []Resource for _, item := range c.items { c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: checking resource %q", item.URL())) @@ -198,14 +229,13 @@ func (c *ctrlBackend) periodicCheck(ctx context.Context, t time.Time) { c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resource %q is busy or not ready yet, skipping", item.URL())) continue } - c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resource %q is ready, dispatching to worker pool", item.URL())) + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resource %q is ready, queuing for dispatch", item.URL())) - dispatched++ - c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: dispatching resource %q to worker pool", item.URL())) - sendWorker(ctx, c.outgoing, item) + item.SetBusy(true) + toDispatch = append(toDispatch, item) } - c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: dispatched %d resources", dispatched)) + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: queued %d resources for dispatch", len(toDispatch))) // Next check is always at the earliest next check + 1 second. // The extra second makes sure that we are _past_ the actual next check time @@ -225,6 +255,7 @@ func (c *ctrlBackend) periodicCheck(ctx context.Context, t time.Time) { } c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: next check in %s", c.tickInterval)) + return toDispatch } func (c *ctrlBackend) SetTickInterval(d time.Duration) { diff --git a/vendor/github.com/lestrrat-go/httprc/v3/client.go b/vendor/github.com/lestrrat-go/httprc/v3/client.go index 75ac3fc188..05dfbb43f3 100644 --- a/vendor/github.com/lestrrat-go/httprc/v3/client.go +++ b/vendor/github.com/lestrrat-go/httprc/v3/client.go @@ -51,6 +51,9 @@ type Client struct { // By default ALL urls are allowed. This may not be suitable for you if // are using this in a production environment. You are encouraged to specify // a whitelist using the `WithWhitelist` option. +// +// NOTE: In future versions, this function signature should be changed to +// return an error to properly handle option parsing failures. func NewClient(options ...NewClientOption) *Client { //nolint:staticcheck var errSink ErrorSink = errsink.NewNop() @@ -63,19 +66,18 @@ func NewClient(options ...NewClientOption) *Client { defaultMaxInterval := DefaultMaxInterval numWorkers := DefaultWorkers - //nolint:forcetypeassert for _, option := range options { switch option.Ident() { case identHTTPClient{}: - httpcl = option.Value().(HTTPClient) + _ = option.Value(&httpcl) case identWorkers{}: - numWorkers = option.Value().(int) + _ = option.Value(&numWorkers) case identErrorSink{}: - errSink = option.Value().(ErrorSink) + _ = option.Value(&errSink) case identTraceSink{}: - traceSink = option.Value().(TraceSink) + _ = option.Value(&traceSink) case identWhitelist{}: - wl = option.Value().(Whitelist) + _ = option.Value(&wl) } } diff --git a/vendor/github.com/lestrrat-go/httprc/v3/controller.go b/vendor/github.com/lestrrat-go/httprc/v3/controller.go index ae2eb218e4..ffca6a590d 100644 --- a/vendor/github.com/lestrrat-go/httprc/v3/controller.go +++ b/vendor/github.com/lestrrat-go/httprc/v3/controller.go @@ -123,11 +123,12 @@ func (c *controller) Add(ctx context.Context, r Resource, options ...AddOption) c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: START Add(%q)", r.URL())) defer c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: END Add(%q)", r.URL())) waitReady := true - //nolint:forcetypeassert for _, option := range options { switch option.Ident() { case identWaitReady{}: - waitReady = option.(addOption).Value().(bool) + if err := option.Value(&waitReady); err != nil { + return fmt.Errorf(`httprc.Controller.Add: failed to parse WaitReady option: %w`, err) + } } } @@ -141,14 +142,23 @@ func (c *controller) Add(ctx context.Context, r Resource, options ...AddOption) resource: r, } c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: sending add request for %q to backend", r.URL())) + // Send to backend and wait for registration confirmation. + // If this succeeds, the resource is in the backend. if _, err := sendBackend[addRequest, struct{}](ctx, c.incoming, req, reply); err != nil { return err } + // IMPORTANT: At this point, the resource has been successfully registered + // in the backend (stored in c.items map). The backend worker will fetch + // this resource periodically. if waitReady { c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: waiting for resource %q to be ready", r.URL())) if err := r.Ready(ctx); err != nil { - return err + // CHANGE: Wrap Ready() errors with errNotReady to indicate that + // registration succeeded but the first fetch hasn't completed. + // Using %w twice creates a multi-error chain (Go 1.20+), allowing + // errors.Is() to check both errNotReady and the underlying error. + return fmt.Errorf("%w: %w", errNotReady, err) } } return nil diff --git a/vendor/github.com/lestrrat-go/httprc/v3/errors.go b/vendor/github.com/lestrrat-go/httprc/v3/errors.go index 1152ba947f..474c790f5d 100644 --- a/vendor/github.com/lestrrat-go/httprc/v3/errors.go +++ b/vendor/github.com/lestrrat-go/httprc/v3/errors.go @@ -55,3 +55,51 @@ var errBlockedByWhitelist = errors.New(`blocked by whitelist`) func ErrBlockedByWhitelist() error { return errBlockedByWhitelist } + +var errNotReady = errors.New(`resource registered but not ready`) + +// ErrNotReady returns a sentinel error indicating that the resource was +// successfully registered with the backend and is being actively managed, +// but the first fetch and transformation has not completed successfully yet. +// +// This error is returned by Add() when: +// - The resource was successfully added to the backend (registration succeeded) +// - WithWaitReady(true) was specified (the default) +// - The Ready() call failed (timeout, transform error, context cancelled, etc.) +// +// When Add() returns this error, the resource IS in the backend's resource map +// and will continue to be fetched periodically in the background according to +// the refresh interval. The application can safely proceed - the resource data +// may become available later when a fetch succeeds. +// +// IMPORTANT: "Not ready" means the first fetch and transformation has not completed +// successfully. The resource may eventually become ready (if the transformation +// succeeds on a subsequent retry), or it may never become ready (if the data is +// permanently invalid or the server is unreachable). The backend will continue +// retrying according to the configured refresh interval. +// +// The underlying error (context deadline, transform failure, etc.) is wrapped +// using Go 1.20+ multiple error wrapping and can be examined with errors.Is() +// or errors.As(). You do not need to manually unwrap the error. +// +// Example: +// +// err := ctrl.Add(ctx, resource) +// if err != nil { +// if errors.Is(err, httprc.ErrNotReady()) { +// // Resource registered, will fetch in background +// log.Print("Resource not ready yet, continuing startup") +// +// // Can also check the underlying cause +// if errors.Is(err, context.DeadlineExceeded) { +// log.Print("Timed out waiting for first fetch") +// } +// return nil +// } +// // Registration failed +// return fmt.Errorf("failed to register resource: %w", err) +// } +// // Resource registered AND ready with data +func ErrNotReady() error { + return errNotReady +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/options.go b/vendor/github.com/lestrrat-go/httprc/v3/options.go index 3f07b5671c..40cf891b15 100644 --- a/vendor/github.com/lestrrat-go/httprc/v3/options.go +++ b/vendor/github.com/lestrrat-go/httprc/v3/options.go @@ -3,7 +3,7 @@ package httprc import ( "time" - "github.com/lestrrat-go/option" + "github.com/lestrrat-go/option/v2" ) type NewClientOption interface { diff --git a/vendor/github.com/lestrrat-go/httprc/v3/proxysink/proxysink.go b/vendor/github.com/lestrrat-go/httprc/v3/proxysink/proxysink.go index f290422d6c..942e654c2e 100644 --- a/vendor/github.com/lestrrat-go/httprc/v3/proxysink/proxysink.go +++ b/vendor/github.com/lestrrat-go/httprc/v3/proxysink/proxysink.go @@ -72,6 +72,7 @@ func (p *Proxy[T]) flushloop(ctx context.Context) { p.mu.Unlock() return } + p.mu.Unlock() default: } diff --git a/vendor/github.com/lestrrat-go/httprc/v3/resource.go b/vendor/github.com/lestrrat-go/httprc/v3/resource.go index e637f791fc..0f0d140d27 100644 --- a/vendor/github.com/lestrrat-go/httprc/v3/resource.go +++ b/vendor/github.com/lestrrat-go/httprc/v3/resource.go @@ -41,17 +41,24 @@ func NewResource[T any](s string, transformer Transformer[T], options ...NewReso var interval time.Duration minInterval := DefaultMinInterval maxInterval := DefaultMaxInterval - //nolint:forcetypeassert for _, option := range options { switch option.Ident() { case identHTTPClient{}: - httpcl = option.Value().(HTTPClient) + if err := option.Value(&httpcl); err != nil { + return nil, fmt.Errorf(`httprc.NewResource: failed to parse HTTPClient option: %w`, err) + } case identMinimumInterval{}: - minInterval = option.Value().(time.Duration) + if err := option.Value(&minInterval); err != nil { + return nil, fmt.Errorf(`httprc.NewResource: failed to parse MinimumInterval option: %w`, err) + } case identMaximumInterval{}: - maxInterval = option.Value().(time.Duration) + if err := option.Value(&maxInterval); err != nil { + return nil, fmt.Errorf(`httprc.NewResource: failed to parse MaximumInterval option: %w`, err) + } case identConstantInterval{}: - interval = option.Value().(time.Duration) + if err := option.Value(&interval); err != nil { + return nil, fmt.Errorf(`httprc.NewResource: failed to parse ConstantInterval option: %w`, err) + } } } if transformer == nil { @@ -109,7 +116,7 @@ func (r *ResourceBase[T]) Ready(ctx context.Context) error { // returns `A` or `B` depending on the type of the resource. When accessing the // resource through the `httprc.Resource` interface, use this method to obtain the // stored value. -func (r *ResourceBase[T]) Get(dst interface{}) error { +func (r *ResourceBase[T]) Get(dst any) error { return blackmagic.AssignIfCompatible(dst, r.Resource()) } diff --git a/vendor/github.com/lestrrat-go/httprc/v3/worker.go b/vendor/github.com/lestrrat-go/httprc/v3/worker.go index 57d7fb9624..1b74164159 100644 --- a/vendor/github.com/lestrrat-go/httprc/v3/worker.go +++ b/vendor/github.com/lestrrat-go/httprc/v3/worker.go @@ -39,10 +39,12 @@ func (w worker) Run(ctx context.Context, readywg *sync.WaitGroup, donewg *sync.W case sr := <-w.nextsync: w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: syncing %q (synchronous)", sr.resource.URL())) if err := sr.resource.Sync(ctx); err != nil { + w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: FAILED to sync %q (synchronous): %s", sr.resource.URL(), err)) sendReply(ctx, sr.reply, struct{}{}, err) sr.resource.SetBusy(false) - return + continue } + w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: SUCCESS syncing %q (synchronous)", sr.resource.URL())) sr.resource.SetBusy(false) sendReply(ctx, sr.reply, struct{}{}, nil) w.sendAdjustIntervalRequest(ctx, sr.resource) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/.golangci.yml b/vendor/github.com/lestrrat-go/jwx/v3/.golangci.yml index 214a9edaa8..30dc4c519b 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/.golangci.yml +++ b/vendor/github.com/lestrrat-go/jwx/v3/.golangci.yml @@ -106,6 +106,9 @@ linters: - revive path: jwt/internal/types/ text: "var-naming: avoid meaningless package names" + - linters: + - godoclint + path: (^|/)internal/ paths: - third_party$ - builtin$ diff --git a/vendor/github.com/lestrrat-go/jwx/v3/Changes b/vendor/github.com/lestrrat-go/jwx/v3/Changes index 77381cf19d..4df33b756c 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/Changes +++ b/vendor/github.com/lestrrat-go/jwx/v3/Changes @@ -4,6 +4,70 @@ Changes v3 has many incompatibilities with v2. To see the full list of differences between v2 and v3, please read the Changes-v3.md file (https://github.com/lestrrat-go/jwx/blob/develop/v3/Changes-v3.md) +v3.0.13 12 Jan 2026 + * [jwt] The `jwt.WithContext()` option is now properly being passed to `jws.Verify()` from + `jwt.Parse()`. + * [jwx] github.com/lestrrat-go/httprc/v3 has been upgraded to remove dependency on + github.com/lestrrat-go/option (v1) + * [jwk] `jwk.Clone()` has been fixed to properly work with private fields. + +v3.0.12 20 Oct 2025 + * [jwe] As part of the next change, now per-recipient headers that are empty + are no longer serialized in flattened JSON serialization. + + * [jwe] Introduce `jwe.WithLegacyHeaderMerging(bool)` option to control header + merging behavior in during JWE encryption. This only applies to flattened + JSON serialization. + + Previously, when using flattened JSON serialization (i.e. you specified + JSON serialization via `jwe.WithJSON()` and only supplied one key), per-recipient + headers were merged into the protected headers during encryption, and then + were left to be included in the final serialization as-is. This caused duplicate + headers to be present in both the protected headers and the per-recipient headers. + + Since there may be users who rely on this behavior already, instead of changing the + default behavior to fix this duplication, a new option to `jwe.Encrypt()` was added + to allow clearing the per-recipient headers after merging to leave the `"headers"` + field empty. This in effect makes the flattened JSON serialization more similar to + the compact serialization, where there are no per-recipient headers present, and + leaves the headers disjoint. + + Note that in compact mode, there are no per-recipient headers and thus the + headers need to be merged regardless. In full JSON serialization, we never + merge the headers, so it is left up to the user to keep the headers disjoint. + + * [jws] Calling the deprecated `jws.NewSigner()` function for the first time will cause + legacy signers to be loaded automatically. Previously, you had to explicitly + call `jws.Settings(jws.WithLegacySigners(true))` to enable legacy signers. + + We incorrectly assumed that users would not be using `jws.NewSigner()`, and thus + disabled legacy signers by default. However, it turned out that some users + were using `jws.NewSigner()` in their code, which lead to breakages in + existing code. In hindsight we should have known that any API made public before will + be used by _somebody_. + + As a side effect, jws.Settings(jws.WithLegacySigners(...)) is now a no-op. + + However, please do note that jws.Signer (and similar) objects were always intended to be + used for _registering_ new signing/verifying algorithms, and not for end users to actually + use them directly. If you are using them for other purposes, please consider changing + your code, as it is more than likely that we will somehow deprecate/remove/discouraged + their use in the future. + +v3.0.11 14 Sep 2025 + * [jwk] Add `(jwk.Cache).Shutdown()` method that delegates to the httprc controller + object, to shutdown the cache. + * [jwk] Change timing of `res.Body.Close()` call + * [jwe] Previously, ecdh.PrivateKey/ecdh.PublicKey were not properly handled + when used for encryption, which has been fixed. + * [jws/jwsbb] (EXPERIMENTAL/BREAKS COMPATIBILITY) Convert most functions into + thin wrappers around functions from github.com/lestrrat-go/dsig package. + As a related change, HAMCHashFuncFor/RSAHashFuncFor/ECDSAHashFuncFor/RSAPSSOptions + have been removed or unexported. + Users of this module should be using jwsbb.Sign() and jwsbb.Verify() instead of + algorithm specific jwsbb.SignRSA()/jwsbb.VerifyRSA() and such. If you feel the + need to use these functions, you should use github.com/lestrrat-go/dsig directly. + v3.0.10 04 Aug 2025 * [jws/jwsbb] Add `jwsbb.ErrHeaderNotFound()` to return the same error type as when a non-existent header is requested. via `HeaderGetXXX()` functions. Previously, this diff --git a/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel b/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel index 004fb2a9a2..c9bdc9b730 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel +++ b/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel @@ -9,9 +9,9 @@ bazel_dep(name = "rules_go", version = "0.55.1") bazel_dep(name = "gazelle", version = "0.44.0") bazel_dep(name = "aspect_bazel_lib", version = "2.11.0") -# Go SDK setup - using Go 1.23.6 to match the toolchain in go.mod +# Go SDK setup from go.mod go_sdk = use_extension("@rules_go//go:extensions.bzl", "go_sdk") -go_sdk.download(version = "1.23.6") +go_sdk.from_file(go_mod = "//:go.mod") # Go dependencies from go.mod go_deps = use_extension("@gazelle//:extensions.bzl", "go_deps") @@ -23,6 +23,8 @@ use_repo( "com_github_decred_dcrd_dcrec_secp256k1_v4", "com_github_goccy_go_json", "com_github_lestrrat_go_blackmagic", + "com_github_lestrrat_go_dsig", + "com_github_lestrrat_go_dsig_secp256k1", "com_github_lestrrat_go_httprc_v3", "com_github_lestrrat_go_option_v2", "com_github_segmentio_asm", diff --git a/vendor/github.com/lestrrat-go/jwx/v3/formatkind_string_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/formatkind_string_gen.go index 38abd1bc47..ab7287214f 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/formatkind_string_gen.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/formatkind_string_gen.go @@ -22,8 +22,9 @@ const _FormatKind_name = "InvalidFormatUnknownFormatJWEJWSJWKJWKSJWT" var _FormatKind_index = [...]uint8{0, 13, 26, 29, 32, 35, 39, 42} func (i FormatKind) String() string { - if i < 0 || i >= FormatKind(len(_FormatKind_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_FormatKind_index)-1 { return "FormatKind(" + strconv.FormatInt(int64(i), 10) + ")" } - return _FormatKind_name[_FormatKind_index[i]:_FormatKind_index[i+1]] + return _FormatKind_name[_FormatKind_index[idx]:_FormatKind_index[idx+1]] } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/json/goccy.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/goccy.go index e70a3c1edc..9c99c098bc 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/internal/json/goccy.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/goccy.go @@ -1,5 +1,4 @@ //go:build jwx_goccy -// +build jwx_goccy package json diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/json/stdlib.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/stdlib.go index 6f416ec89a..9e51fa7fe9 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/internal/json/stdlib.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/stdlib.go @@ -1,6 +1,6 @@ //go:build !jwx_goccy -// +build !jwx_goccy +//nolint:revive package json import ( diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go index b911839c5d..751fd8f05a 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go @@ -5,8 +5,10 @@ import ( "crypto/ecdh" "crypto/ecdsa" "crypto/ed25519" + "crypto/elliptic" "crypto/rsa" "fmt" + "math/big" "github.com/lestrrat-go/blackmagic" "github.com/lestrrat-go/jwx/v3/jwk" @@ -263,3 +265,90 @@ func ECDHPublicKey(dst, src any) error { return blackmagic.AssignIfCompatible(dst, pubECDH) } + +// ecdhCurveToElliptic maps ECDH curves to elliptic curves +func ecdhCurveToElliptic(ecdhCurve ecdh.Curve) (elliptic.Curve, error) { + switch ecdhCurve { + case ecdh.P256(): + return elliptic.P256(), nil + case ecdh.P384(): + return elliptic.P384(), nil + case ecdh.P521(): + return elliptic.P521(), nil + default: + return nil, fmt.Errorf(`keyconv: unsupported ECDH curve: %v`, ecdhCurve) + } +} + +// ecdhPublicKeyToECDSA converts an ECDH public key to an ECDSA public key +func ecdhPublicKeyToECDSA(ecdhPubKey *ecdh.PublicKey) (*ecdsa.PublicKey, error) { + curve, err := ecdhCurveToElliptic(ecdhPubKey.Curve()) + if err != nil { + return nil, err + } + + pubBytes := ecdhPubKey.Bytes() + + // Parse the uncompressed point format (0x04 prefix + X + Y coordinates) + if len(pubBytes) == 0 || pubBytes[0] != 0x04 { + return nil, fmt.Errorf(`keyconv: invalid ECDH public key format`) + } + + keyLen := (len(pubBytes) - 1) / 2 + if len(pubBytes) != 1+2*keyLen { + return nil, fmt.Errorf(`keyconv: invalid ECDH public key length`) + } + + x := new(big.Int).SetBytes(pubBytes[1 : 1+keyLen]) + y := new(big.Int).SetBytes(pubBytes[1+keyLen:]) + + return &ecdsa.PublicKey{ + Curve: curve, + X: x, + Y: y, + }, nil +} + +func ECDHToECDSA(dst, src any) error { + // convert ecdh.PublicKey to ecdsa.PublicKey, ecdh.PrivateKey to ecdsa.PrivateKey + + // First, handle value types by converting to pointers + switch s := src.(type) { + case ecdh.PrivateKey: + src = &s + case ecdh.PublicKey: + src = &s + } + + var privBytes []byte + var pubkey *ecdh.PublicKey + // Now handle the actual conversion with pointer types + switch src := src.(type) { + case *ecdh.PrivateKey: + pubkey = src.PublicKey() + privBytes = src.Bytes() + case *ecdh.PublicKey: + pubkey = src + default: + return fmt.Errorf(`keyconv: expected ecdh.PrivateKey, *ecdh.PrivateKey, ecdh.PublicKey, or *ecdh.PublicKey, got %T`, src) + } + + // convert the public key + ecdsaPubKey, err := ecdhPublicKeyToECDSA(pubkey) + if err != nil { + return fmt.Errorf(`keyconv.ECDHToECDSA: failed to convert ECDH public key to ECDSA public key: %w`, err) + } + + // return if we were being asked to convert *ecdh.PublicKey + if privBytes == nil { + return blackmagic.AssignIfCompatible(dst, ecdsaPubKey) + } + + // Then create the private key with the public key embedded + ecdsaPrivKey := &ecdsa.PrivateKey{ + D: new(big.Int).SetBytes(privBytes), + PublicKey: *ecdsaPubKey, + } + + return blackmagic.AssignIfCompatible(dst, ecdsaPrivKey) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/secp2561k.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/secp2561k.go index e7a6be754d..9ce6ad4d05 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwa/secp2561k.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/secp2561k.go @@ -1,5 +1,4 @@ //go:build jwx_es256k -// +build jwx_es256k package jwa diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go index a71e71689a..e75f342a3d 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go @@ -96,23 +96,48 @@ func (e *encrypter) EncryptKey(cek []byte) (keygen.ByteSource, error) { keyToUse = e.pubkey } - // Handle ecdsa.PublicKey by value - convert to pointer - if pk, ok := keyToUse.(ecdsa.PublicKey); ok { - keyToUse = &pk + switch key := keyToUse.(type) { + case *ecdsa.PublicKey: + // no op + case ecdsa.PublicKey: + keyToUse = &key + case *ecdsa.PrivateKey: + keyToUse = &key.PublicKey + case ecdsa.PrivateKey: + keyToUse = &key.PublicKey + case *ecdh.PublicKey: + // no op + case ecdh.PublicKey: + keyToUse = &key + case ecdh.PrivateKey: + keyToUse = key.PublicKey() + case *ecdh.PrivateKey: + keyToUse = key.PublicKey() } // Determine key type and call appropriate function + switch key := keyToUse.(type) { + case *ecdh.PublicKey: + if key.Curve() == ecdh.X25519() { + if !keywrap { + return jwebb.KeyEncryptECDHESX25519(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) + } + return jwebb.KeyEncryptECDHESKeyWrapX25519(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) + } + + var ecdsaKey *ecdsa.PublicKey + if err := keyconv.ECDHToECDSA(&ecdsaKey, key); err != nil { + return nil, fmt.Errorf(`encrypt: failed to convert ECDH public key to ECDSA: %w`, err) + } + keyToUse = ecdsaKey + } + switch key := keyToUse.(type) { case *ecdsa.PublicKey: if !keywrap { return jwebb.KeyEncryptECDHESECDSA(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) } return jwebb.KeyEncryptECDHESKeyWrapECDSA(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) - case *ecdh.PublicKey: - if !keywrap { - return jwebb.KeyEncryptECDHESX25519(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) - } - return jwebb.KeyEncryptECDHESKeyWrapX25519(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) default: return nil, fmt.Errorf(`encrypt: unsupported key type for ECDH-ES: %T`, keyToUse) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/aescbc.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/aescbc.go index b572674e2d..4f08c4936f 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/aescbc.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/aescbc.go @@ -72,9 +72,7 @@ func extractPadding(payload []byte) (toRemove int, good byte) { // The maximum possible padding length plus the actual length field toCheck := 256 // The length of the padded data is public, so we can use an if here - if toCheck > len(payload) { - toCheck = len(payload) - } + toCheck = min(toCheck, len(payload)) for i := 1; i <= toCheck; i++ { t := uint(paddingLen) - uint(i) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwe.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwe.go index 5728021ec7..5b9c92771a 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwe.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwe.go @@ -99,15 +99,20 @@ func (b *recipientBuilder) Build(r Recipient, cek []byte, calg jwa.ContentEncryp rawKey = raw } - // Extract ECDH-ES specific parameters if needed + // Extract ECDH-ES specific parameters if needed. var apu, apv []byte - if b.headers != nil { - if val, ok := b.headers.AgreementPartyUInfo(); ok { - apu = val - } - if val, ok := b.headers.AgreementPartyVInfo(); ok { - apv = val - } + + hdr := b.headers + if hdr == nil { + hdr = NewHeaders() + } + + if val, ok := hdr.AgreementPartyUInfo(); ok { + apu = val + } + + if val, ok := hdr.AgreementPartyVInfo(); ok { + apv = val } // Create the encrypter using the new jwebb pattern @@ -116,20 +121,20 @@ func (b *recipientBuilder) Build(r Recipient, cek []byte, calg jwa.ContentEncryp return nil, fmt.Errorf(`jwe.Encrypt: recipientBuilder: failed to create encrypter: %w`, err) } - if hdrs := b.headers; hdrs != nil { - _ = r.SetHeaders(hdrs) - } + _ = r.SetHeaders(hdr) - if err := r.Headers().Set(AlgorithmKey, b.alg); err != nil { + // Populate headers with stuff that we automatically set + if err := hdr.Set(AlgorithmKey, b.alg); err != nil { return nil, fmt.Errorf(`failed to set header: %w`, err) } if keyID != "" { - if err := r.Headers().Set(KeyIDKey, keyID); err != nil { + if err := hdr.Set(KeyIDKey, keyID); err != nil { return nil, fmt.Errorf(`failed to set header: %w`, err) } } + // Handle the encrypted key var rawCEK []byte enckey, err := enc.EncryptKey(cek) if err != nil { @@ -143,8 +148,9 @@ func (b *recipientBuilder) Build(r Recipient, cek []byte, calg jwa.ContentEncryp } } + // finally, anything specific should go here if hp, ok := enckey.(populater); ok { - if err := hp.Populate(r.Headers()); err != nil { + if err := hp.Populate(hdr); err != nil { return nil, fmt.Errorf(`failed to populate: %w`, err) } } @@ -154,7 +160,9 @@ func (b *recipientBuilder) Build(r Recipient, cek []byte, calg jwa.ContentEncryp // Encrypt generates a JWE message for the given payload and returns // it in serialized form, which can be in either compact or -// JSON format. Default is compact. +// JSON format. Default is compact. When JSON format is specified and +// there is only one recipient, the resulting serialization is +// automatically converted to flattened JSON serialization format. // // You must pass at least one key to `jwe.Encrypt()` by using `jwe.WithKey()` // option. @@ -172,6 +180,10 @@ func (b *recipientBuilder) Build(r Recipient, cek []byte, calg jwa.ContentEncryp // // Look for options that return `jwe.EncryptOption` or `jws.EncryptDecryptOption` // for a complete list of options that can be passed to this function. +// +// As of v3.0.12, users can specify `jwe.WithLegacyHeaderMerging()` to +// disable header merging behavior that was the default prior to v3.0.12. +// Read the documentation for `jwe.WithLegacyHeaderMerging()` for more information. func Encrypt(payload []byte, options ...EncryptOption) ([]byte, error) { ec := encryptContextPool.Get() defer encryptContextPool.Put(ec) @@ -410,10 +422,26 @@ func (dc *decryptContext) decryptContent(msg *Message, alg jwa.KeyEncryptionAlgo Tag(msg.tag). CEK(dc.cek) - if v, ok := recipient.Headers().Algorithm(); !ok || v != alg { - // algorithms don't match + // The "alg" header can be in either protected/unprotected headers. + // prefer per-recipient headers (as it might be the case that the algorithm differs + // by each recipient), then look at protected headers. + var algMatched bool + for _, hdr := range []Headers{recipient.Headers(), protectedHeaders} { + v, ok := hdr.Algorithm() + if !ok { + continue + } + + if v == alg { + algMatched = true + break + } + // if we found something but didn't match, it's a failure return nil, fmt.Errorf(`jwe.Decrypt: key (%q) and recipient (%q) algorithms do not match`, alg, v) } + if !algMatched { + return nil, fmt.Errorf(`jwe.Decrypt: failed to find "alg" header in either protected or per-recipient headers`) + } h2, err := protectedHeaders.Clone() if err != nil { @@ -534,11 +562,12 @@ func (dc *decryptContext) decryptContent(msg *Message, alg jwa.KeyEncryptionAlgo // encryptContext holds the state during JWE encryption, similar to JWS signContext type encryptContext struct { - calg jwa.ContentEncryptionAlgorithm - compression jwa.CompressionAlgorithm - format int - builders []*recipientBuilder - protected Headers + calg jwa.ContentEncryptionAlgorithm + compression jwa.CompressionAlgorithm + format int + builders []*recipientBuilder + protected Headers + legacyHeaderMerging bool } var encryptContextPool = pool.New(allocEncryptContext, freeEncryptContext) @@ -561,6 +590,7 @@ func freeEncryptContext(ec *encryptContext) *encryptContext { } func (ec *encryptContext) ProcessOptions(options []EncryptOption) error { + ec.legacyHeaderMerging = true var mergeProtected bool var useRawCEK bool for _, option := range options { @@ -577,7 +607,11 @@ func (ec *encryptContext) ProcessOptions(options []EncryptOption) error { if v == jwa.DIRECT() || v == jwa.ECDH_ES() { useRawCEK = true } - ec.builders = append(ec.builders, &recipientBuilder{alg: v, key: wk.key, headers: wk.headers}) + ec.builders = append(ec.builders, &recipientBuilder{ + alg: v, + key: wk.key, + headers: wk.headers, + }) case identContentEncryptionAlgorithm{}: var c jwa.ContentEncryptionAlgorithm if err := option.Value(&c); err != nil { @@ -616,6 +650,12 @@ func (ec *encryptContext) ProcessOptions(options []EncryptOption) error { return err } ec.format = fmtOpt + case identLegacyHeaderMerging{}: + var v bool + if err := option.Value(&v); err != nil { + return err + } + ec.legacyHeaderMerging = v } } @@ -732,7 +772,8 @@ func (ec *encryptContext) EncryptMessage(payload []byte, cek []byte) ([]byte, er } } - recipients := recipientSlicePool.GetCapacity(len(ec.builders)) + lbuilders := len(ec.builders) + recipients := recipientSlicePool.GetCapacity(lbuilders) defer recipientSlicePool.Put(recipients) for i, builder := range ec.builders { @@ -767,14 +808,55 @@ func (ec *encryptContext) EncryptMessage(payload []byte, cek []byte) ([]byte, er } } - // If there's only one recipient, you want to include that in the - // protected header - if len(recipients) == 1 { + // fmtCompact does not have per-recipient headers, nor a "header" field. + // In this mode, we're going to have to merge everything to the protected + // header. + if ec.format == fmtCompact { + // We have already established that the number of builders is 1 in + // ec.ProcessOptions(). But we're going to be pedantic + if lbuilders != 1 { + return nil, fmt.Errorf(`internal error: expected exactly one recipient builder (got %d)`, lbuilders) + } + + // when we're using compact format, we can safely merge per-recipient + // headers into the protected header, if any h, err := protected.Merge(recipients[0].Headers()) if err != nil { - return nil, fmt.Errorf(`failed to merge protected headers: %w`, err) + return nil, fmt.Errorf(`failed to merge protected headers for compact serialization: %w`, err) } protected = h + // per-recipient headers, if any, will be ignored in compact format + } else { + // If it got here, it's JSON (could be pretty mode, too). + if lbuilders == 1 { + // If it got here, then we're doing flattened JSON serialization. + // In this mode, we should merge per-recipient headers into the protected header, + // but we also need to make sure that the "header" field is reset so that + // it does not contain the same fields as the protected header. + // + // However, old behavior was to merge per-recipient headers into the + // protected header when there was only one recipient, AND leave the + // original "header" field as is, so we need to support that for backwards compatibility. + // + // The legacy merging only takes effect when there is exactly one recipient. + // + // This behavior can be disabled by passing jwe.WithLegacyHeaderMerging(false) + // If the user has explicitly asked for merging, do it + h, err := protected.Merge(recipients[0].Headers()) + if err != nil { + return nil, fmt.Errorf(`failed to merge protected headers for flattenend JSON format: %w`, err) + } + protected = h + + if !ec.legacyHeaderMerging { + // Clear per-recipient headers, since they have been merged. + // But we only do it when legacy merging is disabled. + // Note: we should probably introduce a Reset() method in v4 + if err := recipients[0].SetHeaders(NewHeaders()); err != nil { + return nil, fmt.Errorf(`failed to clear per-recipient headers after merging: %w`, err) + } + } + } } aad, err := protected.Encode() diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/message.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/message.go index 13cf3dec83..7aad833f26 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwe/message.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/message.go @@ -265,14 +265,23 @@ func (m *Message) MarshalJSON() ([]byte, error) { if recipients := m.Recipients(); len(recipients) > 0 { if len(recipients) == 1 { // Use flattened format if hdrs := recipients[0].Headers(); hdrs != nil { - buf.Reset() - if err := enc.Encode(hdrs); err != nil { - return nil, fmt.Errorf(`failed to encode %s field: %w`, HeadersKey, err) + var skipHeaders bool + if zeroer, ok := hdrs.(isZeroer); ok { + if zeroer.isZero() { + skipHeaders = true + } + } + + if !skipHeaders { + buf.Reset() + if err := enc.Encode(hdrs); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, HeadersKey, err) + } + fields = append(fields, jsonKV{ + Key: HeadersKey, + Value: strings.TrimSpace(buf.String()), + }) } - fields = append(fields, jsonKV{ - Key: HeadersKey, - Value: strings.TrimSpace(buf.String()), - }) } if ek := recipients[0].EncryptedKey(); len(ek) > 0 { @@ -369,13 +378,18 @@ func (m *Message) UnmarshalJSON(buf []byte) error { // field. TODO: do both of these conditions need to meet, or just one? if proxy.Headers != nil || len(proxy.EncryptedKey) > 0 { recipient := NewRecipient() - hdrs := NewHeaders() - if err := json.Unmarshal(proxy.Headers, hdrs); err != nil { - return fmt.Errorf(`failed to decode headers field: %w`, err) - } - if err := recipient.SetHeaders(hdrs); err != nil { - return fmt.Errorf(`failed to set new headers: %w`, err) + // `"heders"` could be empty. If that's the case, just skip the + // following unmarshaling step + if proxy.Headers != nil { + hdrs := NewHeaders() + if err := json.Unmarshal(proxy.Headers, hdrs); err != nil { + return fmt.Errorf(`failed to decode headers field: %w`, err) + } + + if err := recipient.SetHeaders(hdrs); err != nil { + return fmt.Errorf(`failed to set new headers: %w`, err) + } } if v := proxy.EncryptedKey; len(v) > 0 { diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.go index c9137eecf4..0437ea8733 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.go @@ -6,8 +6,9 @@ import ( "github.com/lestrrat-go/option/v2" ) -// Specify contents of the protected header. Some fields such as -// "enc" and "zip" will be overwritten when encryption is performed. +// WithProtectedHeaders is used to specify contents of the protected header. +// Some fields such as "enc" and "zip" will be overwritten when encryption is +// performed. // // There is no equivalent for unprotected headers in this implementation func WithProtectedHeaders(h Headers) EncryptOption { diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.yaml b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.yaml index b7fb0262de..359d80944d 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.yaml +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.yaml @@ -169,4 +169,42 @@ options: If set to an invalid value, the default value is used. In v2, this option was called MaxBufferSize. - This option has a global effect. \ No newline at end of file + This option has a global effect. + - ident: LegacyHeaderMerging + interface: EncryptOption + argument_type: bool + option_name: WithLegacyHeaderMerging + comment: | + WithLegacyHeaderMerging specifies whether to perform legacy header merging + when encrypting a JWE message in JSON serialization, when there is a single recipient. + This behavior is enabled by default for backwards compatibility. + + When a JWE message is encrypted in JSON serialization, and there is only + one recipient, this library automatically serializes the message in + flattened JSON serialization format. In older versions of this library, + the protected headers and the per-recipient headers were merged together + before computing the AAD (Additional Authenticated Data), but the per-recipient + headers were kept as-is in the `header` field of the recipient object. + + This behavior is not compliant with the JWE specification, which states that + the headers must be disjoint. + + Passing this option with a value of `false` disables this legacy behavior, + and while the per-recipient headers and protected headers are still merged + for the purpose of computing AAD, the per-recipient headers are cleared + after merging, so that the resulting JWE message is compliant with the + specification. + + This option has no effect when there are multiple recipients, or when + the serialization format is compact serialization. For multiple recipients + (i.e. full JSON serialization), the protected headers and per-recipient + headers are never merged, and it is the caller's responsibility to ensure + that the headers are disjoint. In compact serialization, there are no per-recipient + headers; in fact, the protected headers are the only headers that exist, + and therefore there is no possibility of header collision after merging + (note: while per-recipient headers do not make sense in compact serialization, + this library does not prevent you from setting them -- they are all just + merged into the protected headers). + + In future versions, the new behavior will be the default. New users are + encouraged to set this option to `false` now to avoid future issues. \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options_gen.go index 2a15c141b4..2d28eecb44 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwe/options_gen.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options_gen.go @@ -147,6 +147,7 @@ type identFS struct{} type identKey struct{} type identKeyProvider struct{} type identKeyUsed struct{} +type identLegacyHeaderMerging struct{} type identMaxDecompressBufferSize struct{} type identMaxPBES2Count struct{} type identMergeProtectedHeaders struct{} @@ -193,6 +194,10 @@ func (identKeyUsed) String() string { return "WithKeyUsed" } +func (identLegacyHeaderMerging) String() string { + return "WithLegacyHeaderMerging" +} + func (identMaxDecompressBufferSize) String() string { return "WithMaxDecompressBufferSize" } @@ -292,6 +297,43 @@ func WithKeyUsed(v any) DecryptOption { return &decryptOption{option.New(identKeyUsed{}, v)} } +// WithLegacyHeaderMerging specifies whether to perform legacy header merging +// when encrypting a JWE message in JSON serialization, when there is a single recipient. +// This behavior is enabled by default for backwards compatibility. +// +// When a JWE message is encrypted in JSON serialization, and there is only +// one recipient, this library automatically serializes the message in +// flattened JSON serialization format. In older versions of this library, +// the protected headers and the per-recipient headers were merged together +// before computing the AAD (Additional Authenticated Data), but the per-recipient +// headers were kept as-is in the `header` field of the recipient object. +// +// This behavior is not compliant with the JWE specification, which states that +// the headers must be disjoint. +// +// Passing this option with a value of `false` disables this legacy behavior, +// and while the per-recipient headers and protected headers are still merged +// for the purpose of computing AAD, the per-recipient headers are cleared +// after merging, so that the resulting JWE message is compliant with the +// specification. +// +// This option has no effect when there are multiple recipients, or when +// the serialization format is compact serialization. For multiple recipients +// (i.e. full JSON serialization), the protected headers and per-recipient +// headers are never merged, and it is the caller's responsibility to ensure +// that the headers are disjoint. In compact serialization, there are no per-recipient +// headers; in fact, the protected headers are the only headers that exist, +// and therefore there is no possibility of header collision after merging +// (note: while per-recipient headers do not make sense in compact serialization, +// this library does not prevent you from setting them -- they are all just +// merged into the protected headers). +// +// In future versions, the new behavior will be the default. New users are +// encouraged to set this option to `false` now to avoid future issues. +func WithLegacyHeaderMerging(v bool) EncryptOption { + return &encryptOption{option.New(identLegacyHeaderMerging{}, v)} +} + // WithMaxDecompressBufferSize specifies the maximum buffer size for used when // decompressing the payload of a JWE message. If a compressed JWE payload // exceeds this amount when decompressed, jwe.Decrypt will return an error. diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go index d827668e10..6d5b00f056 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go @@ -223,6 +223,10 @@ func (c *Cache) Unregister(ctx context.Context, u string) error { return c.ctrl.Remove(ctx, u) } +func (c *Cache) Shutdown(ctx context.Context) error { + return c.ctrl.ShutdownContext(ctx) +} + // CachedSet is a thin shim over jwk.Cache that allows the user to cloak // jwk.Cache as if it's a `jwk.Set`. Behind the scenes, the `jwk.Set` is // retrieved from the `jwk.Cache` for every operation. @@ -266,7 +270,7 @@ func (cs *cachedSet) cached() (Set, error) { return cs.r.Resource(), nil } -// Add is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only +// AddKey is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only func (*cachedSet) AddKey(_ Key) error { return fmt.Errorf(`(jwk.Cachedset).AddKey: jwk.CachedSet is immutable`) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa.go index 3dcd33bb1f..8f76d0508e 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa.go @@ -141,8 +141,8 @@ func buildECDHPrivateKey(alg jwa.EllipticCurveAlgorithm, dbuf []byte) (*ecdh.Pri } var ecdsaConvertibleTypes = []reflect.Type{ - reflect.TypeOf((*ECDSAPrivateKey)(nil)).Elem(), - reflect.TypeOf((*ECDSAPublicKey)(nil)).Elem(), + reflect.TypeFor[ECDSAPrivateKey](), + reflect.TypeFor[ECDSAPublicKey](), } func ecdsaJWKToRaw(keyif Key, hint any) (any, error) { diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/es256k.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/es256k.go index 48114bbaee..293988db4b 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwk/es256k.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/es256k.go @@ -1,5 +1,4 @@ //go:build jwx_es256k -// +build jwx_es256k package jwk diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go index 2b7917fd21..2c80a369dc 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go @@ -40,7 +40,7 @@ type CachedFetcher struct { cache *Cache } -// Creates a new `jwk.CachedFetcher` object. +// NewCachedFetcher creates a new `jwk.CachedFetcher` object. func NewCachedFetcher(cache *Cache) *CachedFetcher { return &CachedFetcher{cache} } @@ -102,13 +102,13 @@ func Fetch(ctx context.Context, u string, options ...FetchOption) (Set, error) { if err != nil { return nil, fmt.Errorf(`jwk.Fetch: request failed: %w`, err) } + defer res.Body.Close() if res.StatusCode != http.StatusOK { return nil, fmt.Errorf(`jwk.Fetch: request returned status %d, expected 200`, res.StatusCode) } buf, err := io.ReadAll(res.Body) - defer res.Body.Close() if err != nil { return nil, fmt.Errorf(`jwk.Fetch: failed to read response body for %q: %w`, u, err) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface.go index c157c2362c..c5a22a43fb 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface.go @@ -92,9 +92,14 @@ type Set interface { Len() int // LookupKeyID returns the first key matching the given key id. + // // The second return value is false if there are no keys matching the key id. // The set *may* contain multiple keys with the same key id. If you - // need all of them, use `Iterate()` + // need all of them, Len() and Key(int) + // + // This method is meant to be used to lookup a key with a unique ID. + // Bacauseof this, you cannot use this method to lookup keys with an empty key ID + // (i.e. `kid` is not specified, or is an empty string). LookupKeyID(string) (Key, bool) // RemoveKey removes the key from the set. diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwk.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwk.go index 785feaf94c..22d4950d8f 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwk.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwk.go @@ -13,6 +13,7 @@ import ( "io" "math/big" "reflect" + "slices" "github.com/lestrrat-go/jwx/v3/internal/base64" "github.com/lestrrat-go/jwx/v3/internal/json" @@ -30,14 +31,14 @@ func bigIntToBytes(n *big.Int) ([]byte, error) { func init() { if err := RegisterProbeField(reflect.StructField{ Name: "Kty", - Type: reflect.TypeOf(""), + Type: reflect.TypeFor[string](), Tag: `json:"kty"`, }); err != nil { panic(fmt.Errorf("failed to register mandatory probe for 'kty' field: %w", err)) } if err := RegisterProbeField(reflect.StructField{ Name: "D", - Type: reflect.TypeOf(json.RawMessage(nil)), + Type: reflect.TypeFor[json.RawMessage](), Tag: `json:"d,omitempty"`, }); err != nil { panic(fmt.Errorf("failed to register mandatory probe for 'kty' field: %w", err)) @@ -665,10 +666,10 @@ func extractEmbeddedKey(keyif Key, concretTypes []reflect.Type) (Key, error) { rv := reflect.ValueOf(keyif) // If the value can be converted to one of the concrete types, then we're done - for _, t := range concretTypes { - if rv.Type().ConvertibleTo(t) { - return keyif, nil - } + if slices.ContainsFunc(concretTypes, func(t reflect.Type) bool { + return rv.Type().ConvertibleTo(t) + }) { + return keyif, nil } // When a struct implements the Key interface via embedding, you unfortunately diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp.go index 773734b660..7cbf66c2d8 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp.go @@ -141,8 +141,8 @@ func buildOKPPrivateKey(alg jwa.EllipticCurveAlgorithm, xbuf []byte, dbuf []byte } var okpConvertibleKeys = []reflect.Type{ - reflect.TypeOf((*OKPPrivateKey)(nil)).Elem(), - reflect.TypeOf((*OKPPublicKey)(nil)).Elem(), + reflect.TypeFor[OKPPrivateKey](), + reflect.TypeFor[OKPPublicKey](), } // This is half baked. I think it will blow up if we used ecdh.* keys and/or x25519 keys diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa.go index bcd7d05c02..ca27681587 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa.go @@ -115,8 +115,8 @@ func buildRSAPublicKey(key *rsa.PublicKey, n, e []byte) { } var rsaConvertibleKeys = []reflect.Type{ - reflect.TypeOf((*RSAPrivateKey)(nil)).Elem(), - reflect.TypeOf((*RSAPublicKey)(nil)).Elem(), + reflect.TypeFor[RSAPrivateKey](), + reflect.TypeFor[RSAPublicKey](), } func rsaJWKToRaw(key Key, hint any) (any, error) { diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/set.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/set.go index 89d8646874..6f339649a8 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwk/set.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/set.go @@ -3,6 +3,7 @@ package jwk import ( "bytes" "fmt" + "maps" "reflect" "sort" @@ -14,13 +15,17 @@ import ( const keysKey = `keys` // appease linter -// NewSet creates and empty `jwk.Set` object -func NewSet() Set { +func newSet() *set { return &set{ privateParams: make(map[string]any), } } +// NewSet creates and empty `jwk.Set` object +func NewSet() Set { + return newSet() +} + func (s *set) Set(n string, v any) error { s.mu.RLock() defer s.mu.RUnlock() @@ -300,12 +305,15 @@ func (s *set) SetDecodeCtx(dc DecodeCtx) { } func (s *set) Clone() (Set, error) { - s2 := &set{} + s2 := newSet() s.mu.RLock() defer s.mu.RUnlock() s2.keys = make([]Key, len(s.keys)) copy(s2.keys, s.keys) + + maps.Copy(s2.privateParams, s.privateParams) + return s2, nil } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric.go index 16427ff86f..7db5e1591a 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric.go @@ -27,7 +27,7 @@ func (k *symmetricKey) Import(rawKey []byte) error { } var symmetricConvertibleKeys = []reflect.Type{ - reflect.TypeOf((*SymmetricKey)(nil)).Elem(), + reflect.TypeFor[SymmetricKey](), } func octetSeqToRaw(key Key, hint any) (any, error) { diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/x509.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/x509.go index c0a7c4c4d9..f06063c6ed 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwk/x509.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/x509.go @@ -118,7 +118,7 @@ func NewPEMDecoder() PEMDecoder { type pemDecoder struct{} -// DecodePEM decodes a key in PEM encoded ASN.1 DER format. +// Decode decodes a key in PEM encoded ASN.1 DER format. // and returns a raw key. func (pemDecoder) Decode(src []byte) (any, []byte, error) { block, rest := pem.Decode(src) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go index ff5a1d8f78..28ebd2ea0e 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go @@ -1,5 +1,4 @@ //go:build jwx_es256k -// +build jwx_es256k package jws @@ -8,5 +7,6 @@ import ( ) func init() { + // Register ES256K to EC algorithm family addAlgorithmForKeyType(jwa.EC(), jwa.ES256K()) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go index b41b2758b8..f09e40db2d 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go @@ -19,6 +19,10 @@ // To verify, use `jws.Verify`. It will parse the `encodedjws` buffer // and verify the result using `algorithm` and `key`. Upon successful // verification, the original payload is returned, so you can work on it. +// +// As a sidenote, consider using github.com/lestrrat-go/htmsig if you +// looking for HTTP Message Signatures (RFC9421) -- it uses the same +// underlying signing/verification mechanisms as this module. package jws import ( @@ -531,12 +535,12 @@ var rawKeyToKeyType = make(map[reflect.Type]jwa.KeyType) var keyTypeToAlgorithms = make(map[jwa.KeyType][]jwa.SignatureAlgorithm) func init() { - rawKeyToKeyType[reflect.TypeOf([]byte(nil))] = jwa.OctetSeq() - rawKeyToKeyType[reflect.TypeOf(ed25519.PublicKey(nil))] = jwa.OKP() - rawKeyToKeyType[reflect.TypeOf(rsa.PublicKey{})] = jwa.RSA() - rawKeyToKeyType[reflect.TypeOf((*rsa.PublicKey)(nil))] = jwa.RSA() - rawKeyToKeyType[reflect.TypeOf(ecdsa.PublicKey{})] = jwa.EC() - rawKeyToKeyType[reflect.TypeOf((*ecdsa.PublicKey)(nil))] = jwa.EC() + rawKeyToKeyType[reflect.TypeFor[[]byte]()] = jwa.OctetSeq() + rawKeyToKeyType[reflect.TypeFor[ed25519.PublicKey]()] = jwa.OKP() + rawKeyToKeyType[reflect.TypeFor[rsa.PublicKey]()] = jwa.RSA() + rawKeyToKeyType[reflect.TypeFor[*rsa.PublicKey]()] = jwa.RSA() + rawKeyToKeyType[reflect.TypeFor[ecdsa.PublicKey]()] = jwa.EC() + rawKeyToKeyType[reflect.TypeFor[*ecdsa.PublicKey]()] = jwa.EC() addAlgorithmForKeyType(jwa.OKP(), jwa.EdDSA()) for _, alg := range []jwa.SignatureAlgorithm{jwa.HS256(), jwa.HS384(), jwa.HS512()} { @@ -582,11 +586,14 @@ func AlgorithmsForKey(key any) ([]jwa.SignatureAlgorithm, error) { return algs, nil } +// Settings allows you to set global settings for this JWS operations. +// +// Currently, the only setting available is `jws.WithLegacySigners()`, +// which for various reason is now a no-op. func Settings(options ...GlobalOption) { for _, option := range options { switch option.Ident() { case identLegacySigners{}: - enableLegacySigners() } } } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel index f8fcdac4ab..0799e81110 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel @@ -23,6 +23,7 @@ go_library( "//internal/pool", "//internal/tokens", "//jws/internal/keytype", + "@com_github_lestrrat_go_dsig//:dsig", ], ) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go index bd7ef4adc9..1eb492ee7b 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go @@ -3,59 +3,27 @@ package jwsbb import ( "crypto" "crypto/ecdsa" - "crypto/rand" "encoding/asn1" "fmt" "io" "math/big" + "github.com/lestrrat-go/dsig" "github.com/lestrrat-go/jwx/v3/internal/ecutil" - "github.com/lestrrat-go/jwx/v3/internal/keyconv" - "github.com/lestrrat-go/jwx/v3/jws/internal/keytype" ) -var ecdsaHashFuncs = map[string]crypto.Hash{ - "ES256": crypto.SHA256, - "ES256K": crypto.SHA256, - "ES384": crypto.SHA384, - "ES512": crypto.SHA512, -} - -func isSuppotedECDSAAlgorithm(alg string) bool { - _, ok := ecdsaHashFuncs[alg] - return ok -} - -func ECDSAHashFuncFor(alg string) (crypto.Hash, error) { - if h, ok := ecdsaHashFuncs[alg]; ok { - return h, nil - } - return 0, fmt.Errorf(`unsupported ECDSA algorithm %s`, alg) -} - -func ecdsaGetSignerKey(key any) (*ecdsa.PrivateKey, crypto.Signer, bool, error) { - cs, isCryptoSigner := key.(crypto.Signer) - if isCryptoSigner { - if !keytype.IsValidECDSAKey(key) { - return nil, nil, false, fmt.Errorf(`cannot use key of type %T`, key) - } - switch key.(type) { - case ecdsa.PrivateKey, *ecdsa.PrivateKey: - // if it's ecdsa.PrivateKey, it's more efficient to - // go through the non-crypto.Signer route. Set isCryptoSigner to false - isCryptoSigner = false - } - } - - if isCryptoSigner { - return nil, cs, true, nil - } - - var privkey *ecdsa.PrivateKey - if err := keyconv.ECDSAPrivateKey(&privkey, key); err != nil { - return nil, nil, false, fmt.Errorf(`invalid key type %T. ecdsa.PrivateKey is required: %w`, key, err) +// ecdsaHashToDsigAlgorithm maps ECDSA hash functions to dsig algorithm constants +func ecdsaHashToDsigAlgorithm(h crypto.Hash) (string, error) { + switch h { + case crypto.SHA256: + return dsig.ECDSAWithP256AndSHA256, nil + case crypto.SHA384: + return dsig.ECDSAWithP384AndSHA384, nil + case crypto.SHA512: + return dsig.ECDSAWithP521AndSHA512, nil + default: + return "", fmt.Errorf("unsupported ECDSA hash function: %v", h) } - return privkey, nil, false, nil } // UnpackASN1ECDSASignature unpacks an ASN.1 encoded ECDSA signature into r and s values. @@ -120,24 +88,16 @@ func PackECDSASignature(r *big.Int, sbig *big.Int, curveBits int) ([]byte, error // The raw parameter should be the pre-computed signing input (typically header.payload). // // rr is an io.Reader that provides randomness for signing. if rr is nil, it defaults to rand.Reader. +// +// This function is now a thin wrapper around dsig.SignECDSA. For new projects, you should +// consider using dsig instead of this function. func SignECDSA(key *ecdsa.PrivateKey, payload []byte, h crypto.Hash, rr io.Reader) ([]byte, error) { - hh := h.New() - if _, err := hh.Write(payload); err != nil { - return nil, fmt.Errorf(`failed to write payload using ecdsa: %w`, err) - } - digest := hh.Sum(nil) - - if rr == nil { - rr = rand.Reader - } - - // Sign and get r, s values - r, s, err := ecdsa.Sign(rr, key, digest) + dsigAlg, err := ecdsaHashToDsigAlgorithm(h) if err != nil { - return nil, fmt.Errorf(`failed to sign payload using ecdsa: %w`, err) + return nil, fmt.Errorf("jwsbb.SignECDSA: %w", err) } - return PackECDSASignature(r, s, key.Curve.Params().BitSize) + return dsig.Sign(key, dsigAlg, payload, rr) } // SignECDSACryptoSigner generates an ECDSA signature using a crypto.Signer interface. @@ -183,13 +143,16 @@ func ecdsaVerify(key *ecdsa.PublicKey, buf []byte, h crypto.Hash, r, s *big.Int) // VerifyECDSA verifies an ECDSA signature for the given payload. // This function verifies the signature using the specified public key and hash algorithm. // The payload parameter should be the pre-computed signing input (typically header.payload). +// +// This function is now a thin wrapper around dsig.VerifyECDSA. For new projects, you should +// consider using dsig instead of this function. func VerifyECDSA(key *ecdsa.PublicKey, payload, signature []byte, h crypto.Hash) error { - var r, s big.Int - if err := UnpackECDSASignature(signature, key, &r, &s); err != nil { - return fmt.Errorf("jwsbb.ECDSAVerifier: failed to unpack ECDSA signature: %w", err) + dsigAlg, err := ecdsaHashToDsigAlgorithm(h) + if err != nil { + return fmt.Errorf("jwsbb.VerifyECDSA: %w", err) } - return ecdsaVerify(key, payload, h, &r, &s) + return dsig.Verify(key, dsigAlg, payload, signature) } // VerifyECDSACryptoSigner verifies an ECDSA signature for crypto.Signer implementations. diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go index a5e41d2ecc..960cf97dde 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go @@ -1,52 +1,30 @@ package jwsbb import ( - "crypto" "crypto/ed25519" - "fmt" - "github.com/lestrrat-go/jwx/v3/internal/keyconv" - "github.com/lestrrat-go/jwx/v3/jws/internal/keytype" + "github.com/lestrrat-go/dsig" ) -func isSupportedEdDSAAlgorithm(alg string) bool { - return alg == "EdDSA" -} - -func eddsaGetSigner(key any) (crypto.Signer, error) { - // The ed25519.PrivateKey object implements crypto.Signer, so we should - // simply accept a crypto.Signer here. - signer, ok := key.(crypto.Signer) - if ok { - if !keytype.IsValidEDDSAKey(key) { - return nil, fmt.Errorf(`cannot use key of type %T to generate EdDSA based signatures`, key) - } - return signer, nil - } - - // This fallback exists for cases when jwk.Key was passed, or - // users gave us a pointer instead of non-pointer, etc. - var privkey ed25519.PrivateKey - if err := keyconv.Ed25519PrivateKey(&privkey, key); err != nil { - return nil, fmt.Errorf(`failed to retrieve ed25519.PrivateKey out of %T: %w`, key, err) - } - return privkey, nil -} - // SignEdDSA generates an EdDSA (Ed25519) signature for the given payload. // The raw parameter should be the pre-computed signing input (typically header.payload). // EdDSA is deterministic and doesn't require additional hashing of the input. +// +// This function is now a thin wrapper around dsig.SignEdDSA. For new projects, you should +// consider using dsig instead of this function. func SignEdDSA(key ed25519.PrivateKey, payload []byte) ([]byte, error) { - return ed25519.Sign(key, payload), nil + // Use dsig.Sign with EdDSA algorithm constant + return dsig.Sign(key, dsig.EdDSA, payload, nil) } // VerifyEdDSA verifies an EdDSA (Ed25519) signature for the given payload. // This function verifies the signature using Ed25519 verification algorithm. // The payload parameter should be the pre-computed signing input (typically header.payload). // EdDSA is deterministic and provides strong security guarantees without requiring hash function selection. +// +// This function is now a thin wrapper around dsig.VerifyEdDSA. For new projects, you should +// consider using dsig instead of this function. func VerifyEdDSA(key ed25519.PublicKey, payload, signature []byte) error { - if !ed25519.Verify(key, payload, signature) { - return fmt.Errorf("invalid EdDSA signature") - } - return nil + // Use dsig.Verify with EdDSA algorithm constant + return dsig.Verify(key, dsig.EdDSA, payload, signature) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/es256k.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/es256k.go new file mode 100644 index 0000000000..a8761ee0fc --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/es256k.go @@ -0,0 +1,14 @@ +//go:build jwx_es256k + +package jwsbb + +import ( + dsigsecp256k1 "github.com/lestrrat-go/dsig-secp256k1" +) + +const es256k = "ES256K" + +func init() { + // Add ES256K mapping when this build tag is enabled + jwsToDsigAlgorithm[es256k] = dsigsecp256k1.ECDSAWithSecp256k1AndSHA256 +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/header.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/header.go index d50c38eeb1..cac3987ea5 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/header.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/header.go @@ -26,7 +26,7 @@ func (e headerNotFoundError) Is(target error) bool { } } -// ErrHeaderdNotFound returns an error that can be passed to `errors.Is` to check if the error is +// ErrHeaderNotFound returns an error that can be passed to `errors.Is` to check if the error is // the result of the field not being found func ErrHeaderNotFound() error { return headerNotFoundError{} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go index 782915ff2a..8e70eb667d 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go @@ -1,67 +1,52 @@ package jwsbb import ( - "crypto/hmac" - "crypto/sha256" - "crypto/sha512" "fmt" "hash" - "github.com/lestrrat-go/jwx/v3/internal/keyconv" + "github.com/lestrrat-go/dsig" ) -var hmacHashFuncs = map[string]func() hash.Hash{ - "HS256": sha256.New, - "HS384": sha512.New384, - "HS512": sha512.New, -} - -func isSupportedHMACAlgorithm(alg string) bool { - _, ok := hmacHashFuncs[alg] - return ok -} - -// HMACHashFuncFor returns the appropriate hash function for the given HMAC algorithm. -// Supported algorithms: HS256 (SHA-256), HS384 (SHA-384), HS512 (SHA-512). -// Returns the hash function constructor and an error if the algorithm is unsupported. -func HMACHashFuncFor(alg string) (func() hash.Hash, error) { - if h, ok := hmacHashFuncs[alg]; ok { - return h, nil +// hmacHashToDsigAlgorithm maps HMAC hash function sizes to dsig algorithm constants +func hmacHashToDsigAlgorithm(hfunc func() hash.Hash) (string, error) { + h := hfunc() + switch h.Size() { + case 32: // SHA256 + return dsig.HMACWithSHA256, nil + case 48: // SHA384 + return dsig.HMACWithSHA384, nil + case 64: // SHA512 + return dsig.HMACWithSHA512, nil + default: + return "", fmt.Errorf("unsupported HMAC hash function: size=%d", h.Size()) } - return nil, fmt.Errorf("unsupported HMAC algorithm %s", alg) -} - -func toHMACKey(dst *[]byte, key any) error { - if err := keyconv.ByteSliceKey(dst, key); err != nil { - return fmt.Errorf(`jws.toHMACKey: invalid key type %T. []byte is required: %w`, key, err) - } - - if len(*dst) == 0 { - return fmt.Errorf(`jws.toHMACKey: missing key while signing payload`) - } - return nil } // SignHMAC generates an HMAC signature for the given payload using the specified hash function and key. // The raw parameter should be the pre-computed signing input (typically header.payload). +// +// This function is now a thin wrapper around dsig.SignHMAC. For new projects, you should +// consider using dsig instead of this function. func SignHMAC(key, payload []byte, hfunc func() hash.Hash) ([]byte, error) { - h := hmac.New(hfunc, key) - if _, err := h.Write(payload); err != nil { - return nil, fmt.Errorf(`failed to write payload using hmac: %w`, err) + dsigAlg, err := hmacHashToDsigAlgorithm(hfunc) + if err != nil { + return nil, fmt.Errorf("jwsbb.SignHMAC: %w", err) } - return h.Sum(nil), nil + + return dsig.Sign(key, dsigAlg, payload, nil) } // VerifyHMAC verifies an HMAC signature for the given payload. // This function verifies the signature using the specified key and hash function. // The payload parameter should be the pre-computed signing input (typically header.payload). +// +// This function is now a thin wrapper around dsig.VerifyHMAC. For new projects, you should +// consider using dsig instead of this function. func VerifyHMAC(key, payload, signature []byte, hfunc func() hash.Hash) error { - expected, err := SignHMAC(key, payload, hfunc) + dsigAlg, err := hmacHashToDsigAlgorithm(hfunc) if err != nil { - return fmt.Errorf("failed to sign payload for verification: %w", err) - } - if !hmac.Equal(signature, expected) { - return fmt.Errorf("invalid HMAC signature") + return fmt.Errorf("jwsbb.VerifyHMAC: %w", err) } - return nil + + return dsig.Verify(key, dsigAlg, payload, signature) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go index 51d01df63f..6a67ee8f86 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go @@ -12,8 +12,40 @@ // 1. Does minimal checking of input parameters (for performance); callers need to ensure that the parameters are valid. // 2. All exported functions are strongly typed (i.e. they do not take `any` types unless they absolutely have to). // 3. Does not rely on other public jwx packages (they are standalone, except for internal packages). +// +// This implementation uses github.com/lestrrat-go/dsig as the underlying signature provider. package jwsbb +import ( + "github.com/lestrrat-go/dsig" +) + +// JWS algorithm name constants +const ( + // HMAC algorithms + hs256 = "HS256" + hs384 = "HS384" + hs512 = "HS512" + + // RSA PKCS#1 v1.5 algorithms + rs256 = "RS256" + rs384 = "RS384" + rs512 = "RS512" + + // RSA PSS algorithms + ps256 = "PS256" + ps384 = "PS384" + ps512 = "PS512" + + // ECDSA algorithms + es256 = "ES256" + es384 = "ES384" + es512 = "ES512" + + // EdDSA algorithm + edDSA = "EdDSA" +) + // Signer is a generic interface that defines the method for signing payloads. // The type parameter K represents the key type (e.g., []byte for HMAC keys, // *rsa.PrivateKey for RSA keys, *ecdsa.PrivateKey for ECDSA keys). @@ -27,3 +59,36 @@ type Signer[K any] interface { type Verifier[K any] interface { Verify(key K, buf []byte, signature []byte) error } + +// JWS to dsig algorithm mapping +var jwsToDsigAlgorithm = map[string]string{ + // HMAC algorithms + hs256: dsig.HMACWithSHA256, + hs384: dsig.HMACWithSHA384, + hs512: dsig.HMACWithSHA512, + + // RSA PKCS#1 v1.5 algorithms + rs256: dsig.RSAPKCS1v15WithSHA256, + rs384: dsig.RSAPKCS1v15WithSHA384, + rs512: dsig.RSAPKCS1v15WithSHA512, + + // RSA PSS algorithms + ps256: dsig.RSAPSSWithSHA256, + ps384: dsig.RSAPSSWithSHA384, + ps512: dsig.RSAPSSWithSHA512, + + // ECDSA algorithms + es256: dsig.ECDSAWithP256AndSHA256, + es384: dsig.ECDSAWithP384AndSHA384, + es512: dsig.ECDSAWithP521AndSHA512, + // Note: ES256K requires external dependency and is handled separately + + // EdDSA algorithm + edDSA: dsig.EdDSA, +} + +// getDsigAlgorithm returns the dsig algorithm name for a JWS algorithm +func getDsigAlgorithm(jwsAlg string) (string, bool) { + dsigAlg, ok := jwsToDsigAlgorithm[jwsAlg] + return dsigAlg, ok +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go index f3083dcea9..36997cef7c 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go @@ -6,53 +6,33 @@ import ( "fmt" "io" - "github.com/lestrrat-go/jwx/v3/jws/internal/keytype" + "github.com/lestrrat-go/dsig" ) -func rsaGetSignerCryptoSignerKey(key any) (crypto.Signer, bool, error) { - cs, isCryptoSigner := key.(crypto.Signer) - if isCryptoSigner { - if !keytype.IsValidRSAKey(key) { - return nil, false, fmt.Errorf(`cannot use key of type %T`, key) +// rsaHashToDsigAlgorithm maps RSA hash functions to dsig algorithm constants +func rsaHashToDsigAlgorithm(h crypto.Hash, pss bool) (string, error) { + if pss { + switch h { + case crypto.SHA256: + return dsig.RSAPSSWithSHA256, nil + case crypto.SHA384: + return dsig.RSAPSSWithSHA384, nil + case crypto.SHA512: + return dsig.RSAPSSWithSHA512, nil + default: + return "", fmt.Errorf("unsupported hash algorithm for RSA-PSS: %v", h) + } + } else { + switch h { + case crypto.SHA256: + return dsig.RSAPKCS1v15WithSHA256, nil + case crypto.SHA384: + return dsig.RSAPKCS1v15WithSHA384, nil + case crypto.SHA512: + return dsig.RSAPKCS1v15WithSHA512, nil + default: + return "", fmt.Errorf("unsupported hash algorithm for RSA PKCS#1 v1.5: %v", h) } - return cs, true, nil - } - return nil, false, nil -} - -var rsaHashFuncs = map[string]struct { - Hash crypto.Hash - PSS bool // whether to use PSS padding -}{ - "RS256": {Hash: crypto.SHA256, PSS: false}, - "RS384": {Hash: crypto.SHA384, PSS: false}, - "RS512": {Hash: crypto.SHA512, PSS: false}, - "PS256": {Hash: crypto.SHA256, PSS: true}, - "PS384": {Hash: crypto.SHA384, PSS: true}, - "PS512": {Hash: crypto.SHA512, PSS: true}, -} - -func isSuppotedRSAAlgorithm(alg string) bool { - _, ok := rsaHashFuncs[alg] - return ok -} - -// RSAHashFuncFor returns the appropriate hash function and PSS flag for the given RSA algorithm. -// Supported algorithms: RS256, RS384, RS512 (PKCS#1 v1.5) and PS256, PS384, PS512 (PSS). -// Returns the hash function, PSS flag, and an error if the algorithm is unsupported. -func RSAHashFuncFor(alg string) (crypto.Hash, bool, error) { - if h, ok := rsaHashFuncs[alg]; ok { - return h.Hash, h.PSS, nil - } - return 0, false, fmt.Errorf("unsupported RSA algorithm %s", alg) -} - -// RSAPSSOptions returns the PSS options for RSA-PSS signatures with the specified hash. -// The salt length is set to equal the hash length as per RFC 7518. -func RSAPSSOptions(h crypto.Hash) rsa.PSSOptions { - return rsa.PSSOptions{ - Hash: h, - SaltLength: rsa.PSSSaltLengthEqualsHash, } } @@ -62,25 +42,30 @@ func RSAPSSOptions(h crypto.Hash) rsa.PSSOptions { // // The rr parameter is an optional io.Reader that can be used to provide randomness for signing. // If rr is nil, it defaults to rand.Reader. +// +// This function is now a thin wrapper around dsig.SignRSA. For new projects, you should +// consider using dsig instead of this function. func SignRSA(key *rsa.PrivateKey, payload []byte, h crypto.Hash, pss bool, rr io.Reader) ([]byte, error) { - var opts crypto.SignerOpts = h - if pss { - rsaopts := RSAPSSOptions(h) - opts = &rsaopts + dsigAlg, err := rsaHashToDsigAlgorithm(h, pss) + if err != nil { + return nil, fmt.Errorf("jwsbb.SignRSA: %w", err) } - return cryptosign(key, payload, h, opts, rr) + + return dsig.Sign(key, dsigAlg, payload, rr) } // VerifyRSA verifies an RSA signature for the given payload and header. // This function constructs the signing input by encoding the header and payload according to JWS specification, // then verifies the signature using the specified public key and hash algorithm. // If pss is true, RSA-PSS verification is used; otherwise, PKCS#1 v1.5 verification is used. +// +// This function is now a thin wrapper around dsig.VerifyRSA. For new projects, you should +// consider using dsig instead of this function. func VerifyRSA(key *rsa.PublicKey, payload, signature []byte, h crypto.Hash, pss bool) error { - hasher := h.New() - hasher.Write(payload) - digest := hasher.Sum(nil) - if pss { - return rsa.VerifyPSS(key, h, digest, signature, &rsa.PSSOptions{Hash: h, SaltLength: rsa.PSSSaltLengthEqualsHash}) + dsigAlg, err := rsaHashToDsigAlgorithm(h, pss) + if err != nil { + return fmt.Errorf("jwsbb.VerifyRSA: %w", err) } - return rsa.VerifyPKCS1v15(key, h, digest, signature) + + return dsig.Verify(key, dsigAlg, payload, signature) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go index 0599ee5dff..6f36ab0554 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go @@ -2,94 +2,109 @@ package jwsbb import ( "crypto" + "crypto/ecdsa" + "crypto/ed25519" "crypto/rsa" "fmt" "io" + "github.com/lestrrat-go/dsig" "github.com/lestrrat-go/jwx/v3/internal/keyconv" ) // Sign generates a JWS signature using the specified key and algorithm. // -// This function loads the signer registered in the hwsbb package _ONLY_. +// This function loads the signer registered in the jwsbb package _ONLY_. // It does not support custom signers that the user might have registered. // // rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. // Not all algorithms require this parameter, but it is included for consistency. // 99% of the time, you can pass nil for rr, and it will work fine. func Sign(key any, alg string, payload []byte, rr io.Reader) ([]byte, error) { - switch { - case isSupportedHMACAlgorithm(alg): - return dispatchHMACSign(key, alg, payload) - case isSuppotedRSAAlgorithm(alg): - return dispatchRSASign(key, alg, payload, rr) - case isSuppotedECDSAAlgorithm(alg): - return dispatchECDSASign(key, alg, payload, rr) - case isSupportedEdDSAAlgorithm(alg): - return dispatchEdDSASign(key, alg, payload, rr) + dsigAlg, ok := getDsigAlgorithm(alg) + if !ok { + return nil, fmt.Errorf(`jwsbb.Sign: unsupported signature algorithm %q`, alg) } - return nil, fmt.Errorf(`jwsbb.Sign: unsupported signature algorithm %q`, alg) -} + // Get dsig algorithm info to determine key conversion strategy + dsigInfo, ok := dsig.GetAlgorithmInfo(dsigAlg) + if !ok { + return nil, fmt.Errorf(`jwsbb.Sign: dsig algorithm %q not registered`, dsigAlg) + } -func dispatchHMACSign(key any, alg string, payload []byte) ([]byte, error) { - h, err := HMACHashFuncFor(alg) - if err != nil { - return nil, fmt.Errorf(`jwsbb.Sign: failed to get hash function for %s: %w`, alg, err) + switch dsigInfo.Family { + case dsig.HMAC: + return dispatchHMACSign(key, dsigAlg, payload) + case dsig.RSA: + return dispatchRSASign(key, dsigAlg, payload, rr) + case dsig.ECDSA: + return dispatchECDSASign(key, dsigAlg, payload, rr) + case dsig.EdDSAFamily: + return dispatchEdDSASign(key, dsigAlg, payload, rr) + default: + return nil, fmt.Errorf(`jwsbb.Sign: unsupported dsig algorithm family %q`, dsigInfo.Family) } +} +func dispatchHMACSign(key any, dsigAlg string, payload []byte) ([]byte, error) { var hmackey []byte - if err := toHMACKey(&hmackey, key); err != nil { - return nil, fmt.Errorf(`jwsbb.Sign: %w`, err) + if err := keyconv.ByteSliceKey(&hmackey, key); err != nil { + return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. []byte is required: %w`, key, err) } - return SignHMAC(hmackey, payload, h) + + return dsig.Sign(hmackey, dsigAlg, payload, nil) } -func dispatchRSASign(key any, alg string, payload []byte, rr io.Reader) ([]byte, error) { - h, pss, err := RSAHashFuncFor(alg) - if err != nil { - return nil, fmt.Errorf(`jwsbb.Sign: failed to get hash function for %s: %w`, alg, err) - } - cs, isCryptoSigner, err := rsaGetSignerCryptoSignerKey(key) - if err != nil { - return nil, fmt.Errorf(`jwsbb.Sign: %w`, err) - } - if isCryptoSigner { - var options crypto.SignerOpts = h - if pss { - rsaopts := RSAPSSOptions(h) - options = &rsaopts +func dispatchRSASign(key any, dsigAlg string, payload []byte, rr io.Reader) ([]byte, error) { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an RSA key + if _, ok := signer.Public().(*rsa.PublicKey); ok { + return dsig.Sign(signer, dsigAlg, payload, rr) } - return SignCryptoSigner(cs, payload, h, options, rr) } + // Fall back to concrete key types var privkey *rsa.PrivateKey if err := keyconv.RSAPrivateKey(&privkey, key); err != nil { - return nil, fmt.Errorf(`jws.RSASigner: invalid key type %T. rsa.PrivateKey is required: %w`, key, err) + return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. *rsa.PrivateKey is required: %w`, key, err) } - return SignRSA(privkey, payload, h, pss, rr) + + return dsig.Sign(privkey, dsigAlg, payload, rr) } -func dispatchEdDSASign(key any, _ string, payload []byte, rr io.Reader) ([]byte, error) { - signer, err := eddsaGetSigner(key) - if err != nil { - return nil, fmt.Errorf(`jws.EdDSASigner: %w`, err) +func dispatchECDSASign(key any, dsigAlg string, payload []byte, rr io.Reader) ([]byte, error) { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an ECDSA key + if _, ok := signer.Public().(*ecdsa.PublicKey); ok { + return dsig.Sign(signer, dsigAlg, payload, rr) + } + } + + // Fall back to concrete key types + var privkey *ecdsa.PrivateKey + if err := keyconv.ECDSAPrivateKey(&privkey, key); err != nil { + return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. *ecdsa.PrivateKey is required: %w`, key, err) } - return SignCryptoSigner(signer, payload, crypto.Hash(0), crypto.Hash(0), rr) + return dsig.Sign(privkey, dsigAlg, payload, rr) } -func dispatchECDSASign(key any, alg string, payload []byte, rr io.Reader) ([]byte, error) { - h, err := ECDSAHashFuncFor(alg) - if err != nil { - return nil, fmt.Errorf(`jwsbb.Sign: failed to get hash function for %s: %w`, alg, err) - } - privkey, cs, isCryptoSigner, err := ecdsaGetSignerKey(key) - if err != nil { - return nil, fmt.Errorf(`jws.ECDSASigner: %w`, err) +func dispatchEdDSASign(key any, dsigAlg string, payload []byte, rr io.Reader) ([]byte, error) { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an EdDSA key + if _, ok := signer.Public().(ed25519.PublicKey); ok { + return dsig.Sign(signer, dsigAlg, payload, rr) + } } - if isCryptoSigner { - return SignECDSACryptoSigner(cs, payload, h, rr) + + // Fall back to concrete key types + var privkey ed25519.PrivateKey + if err := keyconv.Ed25519PrivateKey(&privkey, key); err != nil { + return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. ed25519.PrivateKey is required: %w`, key, err) } - return SignECDSA(privkey, payload, h, rr) + + return dsig.Sign(privkey, dsigAlg, payload, rr) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go index 85121199fd..bac3ff487e 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go @@ -7,6 +7,7 @@ import ( "crypto/rsa" "fmt" + "github.com/lestrrat-go/dsig" "github.com/lestrrat-go/jwx/v3/internal/keyconv" ) @@ -15,113 +16,90 @@ import ( // This function loads the verifier registered in the jwsbb package _ONLY_. // It does not support custom verifiers that the user might have registered. func Verify(key any, alg string, payload, signature []byte) error { - switch { - case isSupportedHMACAlgorithm(alg): - return dispatchHMACVerify(key, alg, payload, signature) - case isSuppotedRSAAlgorithm(alg): - return dispatchRSAVerify(key, alg, payload, signature) - case isSuppotedECDSAAlgorithm(alg): - return dispatchECDSAVerify(key, alg, payload, signature) - case isSupportedEdDSAAlgorithm(alg): - return dispatchEdDSAVerify(key, alg, payload, signature) + dsigAlg, ok := getDsigAlgorithm(alg) + if !ok { + return fmt.Errorf(`jwsbb.Verify: unsupported signature algorithm %q`, alg) } - return fmt.Errorf(`jwsbb.Verify: unsupported signature algorithm %q`, alg) -} - -func dispatchHMACVerify(key any, alg string, payload, signature []byte) error { - h, err := HMACHashFuncFor(alg) - if err != nil { - return fmt.Errorf(`jwsbb.Verify: failed to get hash function for %s: %w`, alg, err) + // Get dsig algorithm info to determine key conversion strategy + dsigInfo, ok := dsig.GetAlgorithmInfo(dsigAlg) + if !ok { + return fmt.Errorf(`jwsbb.Verify: dsig algorithm %q not registered`, dsigAlg) } - var hmackey []byte - if err := toHMACKey(&hmackey, key); err != nil { - return fmt.Errorf(`jwsbb.Verify: %w`, err) + switch dsigInfo.Family { + case dsig.HMAC: + return dispatchHMACVerify(key, dsigAlg, payload, signature) + case dsig.RSA: + return dispatchRSAVerify(key, dsigAlg, payload, signature) + case dsig.ECDSA: + return dispatchECDSAVerify(key, dsigAlg, payload, signature) + case dsig.EdDSAFamily: + return dispatchEdDSAVerify(key, dsigAlg, payload, signature) + default: + return fmt.Errorf(`jwsbb.Verify: unsupported dsig algorithm family %q`, dsigInfo.Family) } - return VerifyHMAC(hmackey, payload, signature, h) } -func dispatchRSAVerify(key any, alg string, payload, signature []byte) error { - h, pss, err := RSAHashFuncFor(alg) - if err != nil { - return fmt.Errorf(`jwsbb.Verify: failed to get hash function for %s: %w`, alg, err) +func dispatchHMACVerify(key any, dsigAlg string, payload, signature []byte) error { + var hmackey []byte + if err := keyconv.ByteSliceKey(&hmackey, key); err != nil { + return fmt.Errorf(`jwsbb.Verify: invalid key type %T. []byte is required: %w`, key, err) } - var pubkey *rsa.PublicKey + return dsig.Verify(hmackey, dsigAlg, payload, signature) +} - if cs, ok := key.(crypto.Signer); ok { - cpub := cs.Public() - switch cpub := cpub.(type) { - case rsa.PublicKey: - pubkey = &cpub - case *rsa.PublicKey: - pubkey = cpub - default: - return fmt.Errorf(`jwsbb.Verify: failed to retrieve rsa.PublicKey out of crypto.Signer %T`, key) - } - } else { - if err := keyconv.RSAPublicKey(&pubkey, key); err != nil { - return fmt.Errorf(`jwsbb.Verify: failed to retrieve rsa.PublicKey out of %T: %w`, key, err) +func dispatchRSAVerify(key any, dsigAlg string, payload, signature []byte) error { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an RSA key + if _, ok := signer.Public().(*rsa.PublicKey); ok { + return dsig.Verify(signer, dsigAlg, payload, signature) } } - return VerifyRSA(pubkey, payload, signature, h, pss) -} - -func dispatchECDSAVerify(key any, alg string, payload, signature []byte) error { - h, err := ECDSAHashFuncFor(alg) - if err != nil { - return fmt.Errorf(`jwsbb.Verify: failed to get hash function for %s: %w`, alg, err) + // Fall back to concrete key types + var pubkey *rsa.PublicKey + if err := keyconv.RSAPublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`jwsbb.Verify: invalid key type %T. *rsa.PublicKey is required: %w`, key, err) } - pubkey, cs, isCryptoSigner, err := ecdsaGetVerifierKey(key) - if err != nil { - return fmt.Errorf(`jwsbb.Verify: %w`, err) - } - if isCryptoSigner { - return VerifyECDSACryptoSigner(cs, payload, signature, h) - } - return VerifyECDSA(pubkey, payload, signature, h) + return dsig.Verify(pubkey, dsigAlg, payload, signature) } -func dispatchEdDSAVerify(key any, _ string, payload, signature []byte) error { - var pubkey ed25519.PublicKey - signer, ok := key.(crypto.Signer) - if ok { - v := signer.Public() - pubkey, ok = v.(ed25519.PublicKey) - if !ok { - return fmt.Errorf(`jwsbb.Verify: expected crypto.Signer.Public() to return ed25519.PublicKey, but got %T`, v) - } - } else { - if err := keyconv.Ed25519PublicKey(&pubkey, key); err != nil { - return fmt.Errorf(`jwsbb.Verify: failed to retrieve ed25519.PublicKey out of %T: %w`, key, err) +func dispatchECDSAVerify(key any, dsigAlg string, payload, signature []byte) error { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an ECDSA key + if _, ok := signer.Public().(*ecdsa.PublicKey); ok { + return dsig.Verify(signer, dsigAlg, payload, signature) } } - return VerifyEdDSA(pubkey, payload, signature) + // Fall back to concrete key types + var pubkey *ecdsa.PublicKey + if err := keyconv.ECDSAPublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`jwsbb.Verify: invalid key type %T. *ecdsa.PublicKey is required: %w`, key, err) + } + + return dsig.Verify(pubkey, dsigAlg, payload, signature) } -func ecdsaGetVerifierKey(key any) (*ecdsa.PublicKey, crypto.Signer, bool, error) { - cs, isCryptoSigner := key.(crypto.Signer) - if isCryptoSigner { - switch key.(type) { - case ecdsa.PublicKey, *ecdsa.PublicKey: - // if it's ecdsa.PublicKey, it's more efficient to - // go through the non-crypto.Signer route. Set isCryptoSigner to false - isCryptoSigner = false +func dispatchEdDSAVerify(key any, dsigAlg string, payload, signature []byte) error { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an EdDSA key + if _, ok := signer.Public().(ed25519.PublicKey); ok { + return dsig.Verify(signer, dsigAlg, payload, signature) } } - if isCryptoSigner { - return nil, cs, true, nil - } - - var pubkey *ecdsa.PublicKey - if err := keyconv.ECDSAPublicKey(&pubkey, key); err != nil { - return nil, nil, false, fmt.Errorf(`invalid key type %T. ecdsa.PublicKey is required: %w`, key, err) + // Fall back to concrete key types + var pubkey ed25519.PublicKey + if err := keyconv.Ed25519PublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`jwsbb.Verify: invalid key type %T. ed25519.PublicKey is required: %w`, key, err) } - return pubkey, nil, false, nil + return dsig.Verify(pubkey, dsigAlg, payload, signature) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy.go index a6687d68cb..767ad723a3 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy.go @@ -2,11 +2,14 @@ package jws import ( "fmt" + "sync" "github.com/lestrrat-go/jwx/v3/jwa" "github.com/lestrrat-go/jwx/v3/jws/legacy" ) +var enableLegacySignersOnce = &sync.Once{} + func enableLegacySigners() { for _, alg := range []jwa.SignatureAlgorithm{jwa.HS256(), jwa.HS384(), jwa.HS512()} { if err := RegisterSigner(alg, func(alg jwa.SignatureAlgorithm) SignerFactory { @@ -74,7 +77,7 @@ func legacySignerFor(alg jwa.SignatureAlgorithm) (Signer, error) { muSigner.Lock() s, ok := signers[alg] if !ok { - v, err := NewSigner(alg) + v, err := newLegacySigner(alg) if err != nil { muSigner.Unlock() return nil, fmt.Errorf(`failed to create payload signer: %w`, err) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/legacy.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/legacy.go index 84a2527428..fe69b55e05 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/legacy.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/legacy.go @@ -23,7 +23,7 @@ type Signer interface { Algorithm() jwa.SignatureAlgorithm } -// This is for legacy support only. +// Verifier is for legacy support only. type Verifier interface { // Verify checks whether the payload and signature are valid for // the given key. diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/options.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/options.go index 729e561936..4c217c3483 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/options.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/options.go @@ -38,7 +38,7 @@ type withKey struct { public Headers } -// This exists as an escape hatch to modify the header values after the fact +// Protected exists as an escape hatch to modify the header values after the fact func (w *withKey) Protected(v Headers) Headers { if w.protected == nil && v != nil { w.protected = v @@ -221,7 +221,7 @@ type withInsecureNoSignature struct { protected Headers } -// This exists as an escape hatch to modify the header values after the fact +// Protected exists as an escape hatch to modify the header values after the fact func (w *withInsecureNoSignature) Protected(v Headers) Headers { if w.protected == nil && v != nil { w.protected = v diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/options.yaml b/vendor/github.com/lestrrat-go/jwx/v3/jws/options.yaml index 303ab3a32e..79dbb72500 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/options.yaml +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/options.yaml @@ -227,8 +227,4 @@ options: interface: GlobalOption constant_value: true comment: | - WithLegacySigners specifies whether the JWS package should use legacy - signers for signing JWS messages. - - Usually there's no need to use this option, as the new signers and - verifiers are loaded by default. + WithLegacySigners is a no-op option that exists only for backwards compatibility. diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/options_gen.go index b97cf7e8dd..7013e86bd7 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/options_gen.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/options_gen.go @@ -356,11 +356,7 @@ func WithKeyUsed(v any) VerifyOption { return &verifyOption{option.New(identKeyUsed{}, v)} } -// WithLegacySigners specifies whether the JWS package should use legacy -// signers for signing JWS messages. -// -// Usually there's no need to use this option, as the new signers and -// verifiers are loaded by default. +// WithLegacySigners is a no-op option that exists only for backwards compatibility. func WithLegacySigners() GlobalOption { return &globalOption{option.New(identLegacySigners{}, true)} } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/signer.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/signer.go index 340666931f..99005e859a 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/signer.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/signer.go @@ -2,6 +2,7 @@ package jws import ( "fmt" + "strings" "sync" "github.com/lestrrat-go/jwx/v3/jwa" @@ -33,6 +34,19 @@ func (fn SignerFactoryFn) Create() (Signer, error) { return fn() } +func init() { + // register the signers using jwsbb. These will be used by default. + for _, alg := range jwa.SignatureAlgorithms() { + if alg == jwa.NoSignature() { + continue + } + + if err := RegisterSigner(alg, defaultSigner{alg: alg}); err != nil { + panic(fmt.Sprintf("RegisterSigner failed: %v", err)) + } + } +} + // SignerFor returns a Signer2 for the given signature algorithm. // // Currently, this function will never fail. It will always return a @@ -43,6 +57,9 @@ func (fn SignerFactoryFn) Create() (Signer, error) { // 3. If no Signer2 or legacy Signer(Factory) is registered, it will return a // default signer that uses jwsbb.Sign. // +// 1 and 2 will take care of 99% of the cases. The only time 3 will happen is +// when you are using a custom algorithm that is not supported out of the box. +// // jwsbb.Sign knows how to handle a static set of algorithms, so if the // algorithm is not supported, it will return an error when you call // `Sign` on the default signer. @@ -80,6 +97,14 @@ var signerDB = make(map[jwa.SignatureAlgorithm]SignerFactory) // Unlike the `UnregisterSigner` function, this function automatically // calls `jwa.RegisterSignatureAlgorithm` to register the algorithm // in this module's algorithm database. +// +// For backwards compatibility, this function also accepts +// `SignerFactory` implementations, but this usage is deprecated. +// You should use `Signer2` implementations instead. +// +// If you want to completely remove an algorithm, you must call +// `jwa.UnregisterSignatureAlgorithm` yourself after calling +// `UnregisterSigner`. func RegisterSigner(alg jwa.SignatureAlgorithm, f any) error { jwa.RegisterSignatureAlgorithm(alg) switch s := f.(type) { @@ -87,22 +112,10 @@ func RegisterSigner(alg jwa.SignatureAlgorithm, f any) error { muSigner2DB.Lock() signer2DB[alg] = s muSigner2DB.Unlock() - - // delete the other signer, if there was one - muSignerDB.Lock() - delete(signerDB, alg) - muSignerDB.Unlock() case SignerFactory: muSignerDB.Lock() signerDB[alg] = s muSignerDB.Unlock() - - // Remove previous signer, if there was one - removeSigner(alg) - - muSigner2DB.Lock() - delete(signer2DB, alg) - muSigner2DB.Unlock() default: return fmt.Errorf(`jws.RegisterSigner: unsupported type %T for algorithm %q`, f, alg) } @@ -132,11 +145,25 @@ func UnregisterSigner(alg jwa.SignatureAlgorithm) { } // NewSigner creates a signer that signs payloads using the given signature algorithm. -// This function is deprecated. You should use `SignerFor()` instead. +// This function is deprecated, and will either be removed to re-purposed using +// a different signature. // -// This function only exists for backwards compatibility, but will not work -// unless you enable the legacy support mode by calling jws.Settings(jws.WithLegacySigners(true)). +// When you want to load a Signer object, you should use `SignerFor()` instead. func NewSigner(alg jwa.SignatureAlgorithm) (Signer, error) { + s, err := newLegacySigner(alg) + if err == nil { + return s, nil + } + + if strings.HasPrefix(err.Error(), `jws.NewSigner: unsupported signature algorithm`) { + // When newLegacySigner fails, automatically trigger to enable signers + enableLegacySignersOnce.Do(enableLegacySigners) + return newLegacySigner(alg) + } + return nil, err +} + +func newLegacySigner(alg jwa.SignatureAlgorithm) (Signer, error) { muSignerDB.RLock() f, ok := signerDB[alg] muSignerDB.RUnlock() diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/errors.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/errors.go index a1dca0d5a3..179763a50d 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/errors.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/errors.go @@ -2,6 +2,8 @@ // // It's internal because we don't want to expose _anything_ about these errors // so users absolutely cannot do anything other than use them as opaque errors. +// +//nolint:revive package errors import ( diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/jwt.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/jwt.go index 43e382987a..99b5ef37a6 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwt/jwt.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/jwt.go @@ -211,7 +211,12 @@ func parseBytes(data []byte, options ...ParseOption) (Token, error) { for _, o := range options { if v, ok := o.(ValidateOption); ok { ctx.validateOpts = append(ctx.validateOpts, v) - continue + // context is used for both verification and validation, so we can't just continue + switch o.Ident() { + case identContext{}: + default: + continue + } } switch o.Ident() { @@ -228,7 +233,7 @@ func parseBytes(data []byte, options ...ParseOption) (Token, error) { } } verifyOpts = append(verifyOpts, o) - case identKeySet{}, identVerifyAuto{}, identKeyProvider{}, identBase64Encoder{}: + case identKeySet{}, identVerifyAuto{}, identKeyProvider{}, identBase64Encoder{}, identContext{}: verifyOpts = append(verifyOpts, o) case identToken{}: var token Token diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.go index cadf163b15..4a7cfd3e5d 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.go @@ -1,7 +1,9 @@ package jwt import ( + "context" "fmt" + "strings" "time" "github.com/lestrrat-go/jwx/v3/jwa" @@ -137,6 +139,14 @@ func toVerifyOptions(options ...Option) ([]jws.VerifyOption, error) { return nil, fmt.Errorf(`failed to decode Base64Encoder: %w`, err) } voptions = append(voptions, jws.WithBase64Encoder(enc)) + case identContext{}: + var ctx context.Context + if err := option.Value(&ctx); err != nil { + return nil, fmt.Errorf(`failed to decode Context: %w`, err) + } + voptions = append(voptions, jws.WithContext(ctx)) + default: + return nil, fmt.Errorf(`invalid jws.VerifyOption %q passed`, `With`+strings.TrimPrefix(fmt.Sprintf(`%T`, option.Ident()), `jws.ident`)) } } return voptions, nil diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options.go index 0f54e05611..088c4263be 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options.go @@ -66,7 +66,7 @@ func (o *TokenOptionSet) Enable(flag TokenOption) { *o = TokenOptionSet(o.Value() | uint64(flag)) } -// Enable sets the appropriate value to disable the option in the +// Disable sets the appropriate value to disable the option in the // option set func (o *TokenOptionSet) Disable(flag TokenOption) { *o = TokenOptionSet(o.Value() & ^uint64(flag)) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options_gen.go index 7e7cbf14aa..c1f333d13b 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options_gen.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options_gen.go @@ -17,9 +17,9 @@ const _TokenOption_name = "FlattenAudienceMaxPerTokenOption" var _TokenOption_index = [...]uint8{0, 15, 32} func (i TokenOption) String() string { - i -= 1 - if i >= TokenOption(len(_TokenOption_index)-1) { - return "TokenOption(" + strconv.FormatInt(int64(i+1), 10) + ")" + idx := int(i) - 1 + if i < 1 || idx >= len(_TokenOption_index)-1 { + return "TokenOption(" + strconv.FormatInt(int64(i), 10) + ")" } - return _TokenOption_name[_TokenOption_index[i]:_TokenOption_index[i+1]] + return _TokenOption_name[_TokenOption_index[idx]:_TokenOption_index[idx+1]] } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/validate.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/validate.go index dbc43edbc2..af46868d8b 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwt/validate.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/validate.go @@ -3,6 +3,7 @@ package jwt import ( "context" "fmt" + "slices" "strconv" "time" @@ -344,12 +345,10 @@ func (ccs claimContainsString) Validate(_ context.Context, t Token) error { return ccs.makeErr(`claim %q does not exist or is not a []string: %w`, ccs.name, err) } - for _, v := range list { - if v == ccs.value { - return nil - } + if !slices.Contains(list, ccs.value) { + return ccs.makeErr(`%q not satisfied`, ccs.name) } - return ccs.makeErr(`%q not satisfied`, ccs.name) + return nil } // audienceClaimContainsString can be used to check if the audience claim, which is diff --git a/vendor/github.com/lestrrat-go/option/.gitignore b/vendor/github.com/lestrrat-go/option/.gitignore deleted file mode 100644 index 66fd13c903..0000000000 --- a/vendor/github.com/lestrrat-go/option/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Dependency directories (remove the comment below to include it) -# vendor/ diff --git a/vendor/github.com/lestrrat-go/option/README.md b/vendor/github.com/lestrrat-go/option/README.md deleted file mode 100644 index cab0044ed3..0000000000 --- a/vendor/github.com/lestrrat-go/option/README.md +++ /dev/null @@ -1,245 +0,0 @@ -# option - -Base object for the "Optional Parameters Pattern". - -# DESCRIPTION - -The beauty of this pattern is that you can achieve a method that can -take the following simple calling style - -```go -obj.Method(mandatory1, mandatory2) -``` - -or the following, if you want to modify its behavior with optional parameters - -```go -obj.Method(mandatory1, mandatory2, optional1, optional2, optional3) -``` - -Instead of the more clunky zero value for optionals style - -```go -obj.Method(mandatory1, mandatory2, nil, "", 0) -``` - -or the equally clunky config object style, which requires you to create a -struct with `NamesThatLookReallyLongBecauseItNeedsToIncludeMethodNamesConfig - -```go -cfg := &ConfigForMethod{ - Optional1: ..., - Optional2: ..., - Optional3: ..., -} -obj.Method(mandatory1, mandatory2, &cfg) -``` - -# SYNOPSIS - -Create an "identifier" for the option. We recommend using an unexported empty struct, -because - -1. It is uniquely identifiable globally -1. Takes minimal space -1. Since it's unexported, you do not have to worry about it leaking elsewhere or having it changed by consumers - -```go -// an unexported empty struct -type identFeatureX struct{} -``` - -Then define a method to create an option using this identifier. Here we assume -that the option will be a boolean option. - -```go -// this is optional, but for readability we usually use a wrapper -// around option.Interface, or a type alias. -type Option -func WithFeatureX(v bool) Option { - // use the constructor to create a new option - return option.New(identFeatureX{}, v) -} -``` - -Now you can create an option, which essentially a two element tuple consisting -of an identifier and its associated value. - -To consume this, you will need to create a function with variadic parameters, -and iterate over the list looking for a particular identifier: - -```go -func MyAwesomeFunc( /* mandatory parameters omitted */, options ...[]Option) { - var enableFeatureX bool - // The nolint directive is recommended if you are using linters such - // as golangci-lint - //nolint:forcetypeassert - for _, option := range options { - switch option.Ident() { - case identFeatureX{}: - enableFeatureX = option.Value().(bool) - // other cases omitted - } - } - if enableFeatureX { - .... - } -} -``` - -# Option objects - -Option objects take two arguments, its identifier and the value it contains. - -The identifier can be anything, but it's usually better to use a an unexported -empty struct so that only you have the ability to generate said option: - -```go -type identOptionalParamOne struct{} -type identOptionalParamTwo struct{} -type identOptionalParamThree struct{} - -func WithOptionOne(v ...) Option { - return option.New(identOptionalParamOne{}, v) -} -``` - -Then you can call the method we described above as - -```go -obj.Method(m1, m2, WithOptionOne(...), WithOptionTwo(...), WithOptionThree(...)) -``` - -Options should be parsed in a code that looks somewhat like this - -```go -func (obj *Object) Method(m1 Type1, m2 Type2, options ...Option) { - paramOne := defaultValueParamOne - for _, option := range options { - switch option.Ident() { - case identOptionalParamOne{}: - paramOne = option.Value().(...) - } - } - ... -} -``` - -The loop requires a bit of boilerplate, and admittedly, this is the main downside -of this module. However, if you think you want use the Option as a Function pattern, -please check the FAQ below for rationale. - -# Simple usage - -Most of the times all you need to do is to declare the Option type as an alias -in your code: - -```go -package myawesomepkg - -import "github.com/lestrrat-go/option" - -type Option = option.Interface -``` - -Then you can start defining options like they are described in the SYNOPSIS section. - -# Differentiating Options - -When you have multiple methods and options, and those options can only be passed to -each one the methods, it's hard to see which options should be passed to which method. - -```go -func WithX() Option { ... } -func WithY() Option { ... } - -// Now, which of WithX/WithY go to which method? -func (*Obj) Method1(options ...Option) {} -func (*Obj) Method2(options ...Option) {} -``` - -In this case the easiest way to make it obvious is to put an extra layer around -the options so that they have different types - -```go -type Method1Option interface { - Option - method1Option() -} - -type method1Option struct { Option } -func (*method1Option) method1Option() {} - -func WithX() Method1Option { - return &methodOption{option.New(...)} -} - -func (*Obj) Method1(options ...Method1Option) {} -``` - -This way the compiler knows if an option can be passed to a given method. - -# FAQ - -## Why aren't these function-based? - -Using a base option type like `type Option func(ctx interface{})` is certainly one way to achieve the same goal. In this case, you are giving the option itself the ability to "configure" the main object. For example: - -```go -type Foo struct { - optionaValue bool -} - -type Option func(*Foo) error - -func WithOptionalValue(v bool) Option { - return Option(func(f *Foo) error { - f.optionalValue = v - return nil - }) -} - -func NewFoo(options ...Option) (*Foo, error) { - var f Foo - for _, o := range options { - if err := o(&f); err != nil { - return nil, err - } - } - return &f -} -``` - -This in itself is fine, but we think there are a few problems: - -### 1. It's hard to create a reusable "Option" type - -We create many libraries using this optional pattern. We would like to provide a default base object. However, this function based approach is not reusuable because each "Option" type requires that it has a context-specific input type. For example, if the "Option" type in the previous example was `func(interface{}) error`, then its usability will significantly decrease because of the type conversion. - -This is not to say that this library's approach is better as it also requires type conversion to convert the _value_ of the option. However, part of the beauty of the original function based approach was the ease of its use, and we claim that this significantly decreases the merits of the function based approach. - -### 2. The receiver requires exported fields - -Part of the appeal for a function-based option pattern is by giving the option itself the ability to do what it wants, you open up the possibility of allowing third-parties to create options that do things that the library authors did not think about. - -```go -package thirdparty -, but when I read drum sheet music, I kind of get thrown off b/c many times it says to hit the bass drum where I feel like it's a snare hit. -func WithMyAwesomeOption( ... ) mypkg.Option { - return mypkg.Option(func(f *mypkg) error { - f.X = ... - f.Y = ... - f.Z = ... - return nil - }) -} -``` - -However, for any third party code to access and set field values, these fields (`X`, `Y`, `Z`) must be exported. Basically you will need an "open" struct. - -Exported fields are absolutely no problem when you have a struct that represents data alone (i.e., API calls that refer or change state information) happen, but we think that casually expose fields for a library struct is a sure way to maintenance hell in the future. What happens when you want to change the API? What happens when you realize that you want to use the field as state (i.e. use it for more than configuration)? What if they kept referring to that field, and then you have concurrent code accessing it? - -Giving third parties complete access to exported fields is like handing out a loaded weapon to the users, and you are at their mercy. - -Of course, providing public APIs for everything so you can validate and control concurrency is an option, but then ... it's a lot of work, and you may have to provide APIs _only_ so that users can refer it in the option-configuration phase. That sounds like a lot of extra work. - diff --git a/vendor/github.com/lestrrat-go/option/option.go b/vendor/github.com/lestrrat-go/option/option.go deleted file mode 100644 index bfdbb118c0..0000000000 --- a/vendor/github.com/lestrrat-go/option/option.go +++ /dev/null @@ -1,38 +0,0 @@ -package option - -import "fmt" - -// Interface defines the minimum interface that an option must fulfill -type Interface interface { - // Ident returns the "identity" of this option, a unique identifier that - // can be used to differentiate between options - Ident() interface{} - - // Value returns the corresponding value. - Value() interface{} -} - -type pair struct { - ident interface{} - value interface{} -} - -// New creates a new Option -func New(ident, value interface{}) Interface { - return &pair{ - ident: ident, - value: value, - } -} - -func (p *pair) Ident() interface{} { - return p.ident -} - -func (p *pair) Value() interface{} { - return p.value -} - -func (p *pair) String() string { - return fmt.Sprintf(`%v(%v)`, p.ident, p.value) -} diff --git a/vendor/github.com/letsencrypt/boulder/core/challenges.go b/vendor/github.com/letsencrypt/boulder/core/challenges.go index 75a20373fb..c2c8f060d6 100644 --- a/vendor/github.com/letsencrypt/boulder/core/challenges.go +++ b/vendor/github.com/letsencrypt/boulder/core/challenges.go @@ -1,7 +1,5 @@ package core -import "fmt" - func newChallenge(challengeType AcmeChallenge, token string) Challenge { return Challenge{ Type: challengeType, @@ -30,19 +28,7 @@ func DNSAccountChallenge01(token string) Challenge { return newChallenge(ChallengeTypeDNSAccount01, token) } -// NewChallenge constructs a challenge of the given kind. It returns an -// error if the challenge type is unrecognized. -func NewChallenge(kind AcmeChallenge, token string) (Challenge, error) { - switch kind { - case ChallengeTypeHTTP01: - return HTTPChallenge01(token), nil - case ChallengeTypeDNS01: - return DNSChallenge01(token), nil - case ChallengeTypeTLSALPN01: - return TLSALPNChallenge01(token), nil - case ChallengeTypeDNSAccount01: - return DNSAccountChallenge01(token), nil - default: - return Challenge{}, fmt.Errorf("unrecognized challenge type %q", kind) - } +// DNSPersistChallenge01 constructs a dns-persist-01 challenge. +func DNSPersistChallenge01() Challenge { + return newChallenge(ChallengeTypeDNSPersist01, "") } diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go index ac7a6f96b1..5186110945 100644 --- a/vendor/github.com/letsencrypt/boulder/core/objects.go +++ b/vendor/github.com/letsencrypt/boulder/core/objects.go @@ -33,21 +33,6 @@ const ( StatusDeactivated = AcmeStatus("deactivated") // Object has been deactivated ) -// AcmeResource values identify different types of ACME resources -type AcmeResource string - -// The types of ACME resources -const ( - ResourceNewReg = AcmeResource("new-reg") - ResourceNewAuthz = AcmeResource("new-authz") - ResourceNewCert = AcmeResource("new-cert") - ResourceRevokeCert = AcmeResource("revoke-cert") - ResourceRegistration = AcmeResource("reg") - ResourceChallenge = AcmeResource("challenge") - ResourceAuthz = AcmeResource("authz") - ResourceKeyChange = AcmeResource("key-change") -) - // AcmeChallenge values identify different types of ACME challenges type AcmeChallenge string @@ -57,12 +42,13 @@ const ( ChallengeTypeDNS01 = AcmeChallenge("dns-01") ChallengeTypeTLSALPN01 = AcmeChallenge("tls-alpn-01") ChallengeTypeDNSAccount01 = AcmeChallenge("dns-account-01") + ChallengeTypeDNSPersist01 = AcmeChallenge("dns-persist-01") ) // IsValid tests whether the challenge is a known challenge func (c AcmeChallenge) IsValid() bool { switch c { - case ChallengeTypeHTTP01, ChallengeTypeDNS01, ChallengeTypeTLSALPN01, ChallengeTypeDNSAccount01: + case ChallengeTypeHTTP01, ChallengeTypeDNS01, ChallengeTypeTLSALPN01, ChallengeTypeDNSAccount01, ChallengeTypeDNSPersist01: return true default: return false @@ -83,9 +69,12 @@ var OCSPStatusToInt = map[OCSPStatus]int{ OCSPStatusRevoked: ocsp.Revoked, } -// DNSPrefix is attached to DNS names in DNS challenges +// DNSPrefix is attached to DNS names in dns-01 and dns-account-01 challenges const DNSPrefix = "_acme-challenge" +// DNSPersistPrefix is attached to DNS names in dns-persist-01 challenges. +const DNSPersistPrefix = "_validation-persist" + type RawCertificateRequest struct { CSR JSONBuffer `json:"csr"` // The encoded CSR } @@ -171,9 +160,13 @@ type Challenge struct { Error *probs.ProblemDetails `json:"error,omitempty"` // Token is a random value that uniquely identifies the challenge. It is used - // by all current challenges (http-01, tls-alpn-01, and dns-01). + // by all challenges except dns-persist-01. Token string `json:"token,omitempty"` + // IssuerDomainNames contains the list of issuer domain name values accepted + // during dns-persist-01 challenge validation. + IssuerDomainNames []string `json:"issuer-domain-names,omitempty"` + // Contains information about URLs used or redirected to and IPs resolved and // used ValidationRecord []ValidationRecord `json:"validationRecord,omitempty"` @@ -204,10 +197,7 @@ func (ch Challenge) RecordsSane() bool { switch ch.Type { case ChallengeTypeHTTP01: for _, rec := range ch.ValidationRecord { - // TODO(#7140): Add a check for ResolverAddress == "" only after the - // core.proto change has been deployed. - if rec.URL == "" || rec.Hostname == "" || rec.Port == "" || (rec.AddressUsed == netip.Addr{}) || - len(rec.AddressesResolved) == 0 { + if rec.URL == "" || rec.Hostname == "" || rec.Port == "" || (rec.AddressUsed == netip.Addr{}) || len(rec.AddressesResolved) == 0 { return false } } @@ -218,18 +208,13 @@ func (ch Challenge) RecordsSane() bool { if ch.ValidationRecord[0].URL != "" { return false } - // TODO(#7140): Add a check for ResolverAddress == "" only after the - // core.proto change has been deployed. - if ch.ValidationRecord[0].Hostname == "" || ch.ValidationRecord[0].Port == "" || - (ch.ValidationRecord[0].AddressUsed == netip.Addr{}) || len(ch.ValidationRecord[0].AddressesResolved) == 0 { + if ch.ValidationRecord[0].Hostname == "" || ch.ValidationRecord[0].Port == "" || (ch.ValidationRecord[0].AddressUsed == netip.Addr{}) || len(ch.ValidationRecord[0].AddressesResolved) == 0 { return false } - case ChallengeTypeDNS01, ChallengeTypeDNSAccount01: + case ChallengeTypeDNS01, ChallengeTypeDNSAccount01, ChallengeTypeDNSPersist01: if len(ch.ValidationRecord) > 1 { return false } - // TODO(#7140): Add a check for ResolverAddress == "" only after the - // core.proto change has been deployed. if ch.ValidationRecord[0].Hostname == "" { return false } @@ -241,14 +226,20 @@ func (ch Challenge) RecordsSane() bool { return true } -// CheckPending ensures that a challenge object is pending and has a token. -// This is used before offering the challenge to the client, and before actually -// validating a challenge. +// CheckPending ensures that a challenge object is pending and, for challenge +// types that require one, has a token. This is used before offering the +// challenge to the client, and before actually validating a challenge. func (ch Challenge) CheckPending() error { if ch.Status != StatusPending { return fmt.Errorf("challenge is not pending") } + // dns-persist-01 does not use a token; validation relies on persistent + // DNS TXT records containing the issuer-domain-name and accounturi. + if ch.Type == ChallengeTypeDNSPersist01 { + return nil + } + if !looksLikeAToken(ch.Token) { return fmt.Errorf("token is missing or malformed") } diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go index a85ff2817e..39b72d0508 100644 --- a/vendor/github.com/letsencrypt/boulder/core/util.go +++ b/vendor/github.com/letsencrypt/boulder/core/util.go @@ -27,6 +27,8 @@ import ( "unicode" "github.com/go-jose/go-jose/v4" + "golang.org/x/net/idna" + "golang.org/x/text/unicode/norm" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/durationpb" @@ -398,3 +400,22 @@ func IsCanceled(err error) bool { func Command() string { return path.Base(os.Args[0]) } + +// NormalizeIssuerDomainName normalizes an RFC 8659 issuer-domain-name per the +// recommended algorithm in draft-ietf-acme-dns-persist-00, Section 9.1.1: +// case-fold to lowercase, apply Unicode NFC normalization, convert to A-label +// (Punycode), remove any trailing dot, and ensure the result is no more than +// 253 octets in length. If normalization fails, an error is returned. +func NormalizeIssuerDomainName(name string) (string, error) { + name = strings.ToLower(name) + name = norm.NFC.String(name) + name, err := idna.Lookup.ToASCII(name) + if err != nil { + return "", fmt.Errorf("converting issuer domain name %q to ASCII: %w", name, err) + } + name = strings.TrimSuffix(name, ".") + if len(name) > 253 { + return "", fmt.Errorf("issuer domain name %q exceeds 253 octets (%d)", name, len(name)) + } + return name, nil +} diff --git a/vendor/github.com/letsencrypt/boulder/probs/probs.go b/vendor/github.com/letsencrypt/boulder/probs/probs.go index 7ff35ca61f..fc8ba05765 100644 --- a/vendor/github.com/letsencrypt/boulder/probs/probs.go +++ b/vendor/github.com/letsencrypt/boulder/probs/probs.go @@ -312,19 +312,6 @@ func UnsupportedIdentifier(detail string, a ...any) *ProblemDetails { // Additional helper functions that return variations on MalformedProblem with // different HTTP status codes set. -// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request -// Timeout status code. -func Canceled(detail string, a ...any) *ProblemDetails { - if len(a) > 0 { - detail = fmt.Sprintf(detail, a...) - } - return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, - HTTPStatus: http.StatusRequestTimeout, - } -} - // Conflict returns a ProblemDetails with a ConflictProblem and a 409 Conflict // status code. func Conflict(detail string) *ProblemDetails { diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go index 0edabac394..a8c9c1fb10 100644 --- a/vendor/github.com/mattn/go-runewidth/runewidth.go +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -316,6 +316,11 @@ func IsAmbiguousWidth(r rune) bool { return inTables(r, private, ambiguous) } +// IsCombiningWidth returns whether is combining width or not. +func IsCombiningWidth(r rune) bool { + return inTable(r, combining) +} + // IsNeutralWidth returns whether is neutral width or not. func IsNeutralWidth(r rune) bool { return inTable(r, neutral) diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_table.go b/vendor/github.com/mattn/go-runewidth/runewidth_table.go index ad025ad529..cdd003e646 100644 --- a/vendor/github.com/mattn/go-runewidth/runewidth_table.go +++ b/vendor/github.com/mattn/go-runewidth/runewidth_table.go @@ -5,47 +5,50 @@ package runewidth var combining = table{ {0x0300, 0x036F}, {0x0483, 0x0489}, {0x07EB, 0x07F3}, {0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0CF3, 0x0CF3}, - {0x0D00, 0x0D01}, {0x135D, 0x135F}, {0x1A7F, 0x1A7F}, - {0x1AB0, 0x1ACE}, {0x1B6B, 0x1B73}, {0x1DC0, 0x1DFF}, + {0x0D00, 0x0D01}, {0x135D, 0x135F}, {0x180B, 0x180D}, + {0x180F, 0x180F}, {0x1A7F, 0x1A7F}, {0x1AB0, 0x1ADD}, + {0x1AE0, 0x1AEB}, {0x1B6B, 0x1B73}, {0x1DC0, 0x1DFF}, {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2DE0, 0x2DFF}, {0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D}, {0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA8E0, 0xA8F1}, - {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x10376, 0x1037A}, - {0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x10F82, 0x10F85}, - {0x11300, 0x11301}, {0x1133B, 0x1133C}, {0x11366, 0x1136C}, - {0x11370, 0x11374}, {0x16AF0, 0x16AF4}, {0x1CF00, 0x1CF2D}, - {0x1CF30, 0x1CF46}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172}, - {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD}, - {0x1D242, 0x1D244}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, - {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, - {0x1E08F, 0x1E08F}, {0x1E8D0, 0x1E8D6}, + {0xFE00, 0xFE0F}, {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, + {0x10376, 0x1037A}, {0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, + {0x10F82, 0x10F85}, {0x11300, 0x11301}, {0x1133B, 0x1133C}, + {0x11366, 0x1136C}, {0x11370, 0x11374}, {0x16AF0, 0x16AF4}, + {0x1CF00, 0x1CF2D}, {0x1CF30, 0x1CF46}, {0x1D165, 0x1D169}, + {0x1D16D, 0x1D172}, {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, + {0x1D1AA, 0x1D1AD}, {0x1D242, 0x1D244}, {0x1E000, 0x1E006}, + {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, + {0x1E026, 0x1E02A}, {0x1E08F, 0x1E08F}, {0x1E8D0, 0x1E8D6}, + {0xE0100, 0xE01EF}, } var doublewidth = table{ {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A}, {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3}, - {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653}, - {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1}, - {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5}, - {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA}, - {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA}, - {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B}, - {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E}, - {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797}, - {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C}, - {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99}, - {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x303E}, - {0x3041, 0x3096}, {0x3099, 0x30FF}, {0x3105, 0x312F}, - {0x3131, 0x318E}, {0x3190, 0x31E3}, {0x31EF, 0x321E}, - {0x3220, 0x3247}, {0x3250, 0x4DBF}, {0x4E00, 0xA48C}, - {0xA490, 0xA4C6}, {0xA960, 0xA97C}, {0xAC00, 0xD7A3}, - {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, {0xFE30, 0xFE52}, - {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, {0xFF01, 0xFF60}, - {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4}, {0x16FF0, 0x16FF1}, - {0x17000, 0x187F7}, {0x18800, 0x18CD5}, {0x18D00, 0x18D08}, - {0x1AFF0, 0x1AFF3}, {0x1AFF5, 0x1AFFB}, {0x1AFFD, 0x1AFFE}, - {0x1B000, 0x1B122}, {0x1B132, 0x1B132}, {0x1B150, 0x1B152}, - {0x1B155, 0x1B155}, {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, + {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2630, 0x2637}, + {0x2648, 0x2653}, {0x267F, 0x267F}, {0x268A, 0x268F}, + {0x2693, 0x2693}, {0x26A1, 0x26A1}, {0x26AA, 0x26AB}, + {0x26BD, 0x26BE}, {0x26C4, 0x26C5}, {0x26CE, 0x26CE}, + {0x26D4, 0x26D4}, {0x26EA, 0x26EA}, {0x26F2, 0x26F3}, + {0x26F5, 0x26F5}, {0x26FA, 0x26FA}, {0x26FD, 0x26FD}, + {0x2705, 0x2705}, {0x270A, 0x270B}, {0x2728, 0x2728}, + {0x274C, 0x274C}, {0x274E, 0x274E}, {0x2753, 0x2755}, + {0x2757, 0x2757}, {0x2795, 0x2797}, {0x27B0, 0x27B0}, + {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C}, {0x2B50, 0x2B50}, + {0x2B55, 0x2B55}, {0x2E80, 0x2E99}, {0x2E9B, 0x2EF3}, + {0x2F00, 0x2FD5}, {0x2FF0, 0x303E}, {0x3041, 0x3096}, + {0x3099, 0x30FF}, {0x3105, 0x312F}, {0x3131, 0x318E}, + {0x3190, 0x31E5}, {0x31EF, 0x321E}, {0x3220, 0x3247}, + {0x3250, 0xA48C}, {0xA490, 0xA4C6}, {0xA960, 0xA97C}, + {0xAC00, 0xD7A3}, {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, + {0xFE30, 0xFE52}, {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, + {0xFF01, 0xFF60}, {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4}, + {0x16FF0, 0x16FF6}, {0x17000, 0x18CD5}, {0x18CFF, 0x18D1E}, + {0x18D80, 0x18DF2}, {0x1AFF0, 0x1AFF3}, {0x1AFF5, 0x1AFFB}, + {0x1AFFD, 0x1AFFE}, {0x1B000, 0x1B122}, {0x1B132, 0x1B132}, + {0x1B150, 0x1B152}, {0x1B155, 0x1B155}, {0x1B164, 0x1B167}, + {0x1B170, 0x1B2FB}, {0x1D300, 0x1D356}, {0x1D360, 0x1D376}, {0x1F004, 0x1F004}, {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, {0x1F250, 0x1F251}, {0x1F260, 0x1F265}, @@ -56,12 +59,12 @@ var doublewidth = table{ {0x1F54B, 0x1F54E}, {0x1F550, 0x1F567}, {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, {0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2}, - {0x1F6D5, 0x1F6D7}, {0x1F6DC, 0x1F6DF}, {0x1F6EB, 0x1F6EC}, + {0x1F6D5, 0x1F6D8}, {0x1F6DC, 0x1F6DF}, {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB}, {0x1F7F0, 0x1F7F0}, {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F9FF}, - {0x1FA70, 0x1FA7C}, {0x1FA80, 0x1FA88}, {0x1FA90, 0x1FABD}, - {0x1FABF, 0x1FAC5}, {0x1FACE, 0x1FADB}, {0x1FAE0, 0x1FAE8}, - {0x1FAF0, 0x1FAF8}, {0x20000, 0x2FFFD}, {0x30000, 0x3FFFD}, + {0x1FA70, 0x1FA7C}, {0x1FA80, 0x1FA8A}, {0x1FA8E, 0x1FAC6}, + {0x1FAC8, 0x1FAC8}, {0x1FACD, 0x1FADC}, {0x1FADF, 0x1FAEA}, + {0x1FAEF, 0x1FAF8}, {0x20000, 0x2FFFD}, {0x30000, 0x3FFFD}, } var ambiguous = table{ @@ -121,10 +124,9 @@ var ambiguous = table{ {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC}, {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F}, {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF}, - {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A}, - {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D}, - {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, - {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, + {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A}, {0x1F110, 0x1F12D}, + {0x1F130, 0x1F169}, {0x1F170, 0x1F18D}, {0x1F18F, 0x1F190}, + {0x1F19B, 0x1F1AC}, {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, } var narrow = table{ {0x0020, 0x007E}, {0x00A2, 0x00A3}, {0x00A5, 0x00A6}, @@ -159,115 +161,116 @@ var neutral = table{ {0x0600, 0x070D}, {0x070F, 0x074A}, {0x074D, 0x07B1}, {0x07C0, 0x07FA}, {0x07FD, 0x082D}, {0x0830, 0x083E}, {0x0840, 0x085B}, {0x085E, 0x085E}, {0x0860, 0x086A}, - {0x0870, 0x088E}, {0x0890, 0x0891}, {0x0898, 0x0983}, - {0x0985, 0x098C}, {0x098F, 0x0990}, {0x0993, 0x09A8}, - {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, {0x09B6, 0x09B9}, - {0x09BC, 0x09C4}, {0x09C7, 0x09C8}, {0x09CB, 0x09CE}, - {0x09D7, 0x09D7}, {0x09DC, 0x09DD}, {0x09DF, 0x09E3}, - {0x09E6, 0x09FE}, {0x0A01, 0x0A03}, {0x0A05, 0x0A0A}, - {0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, {0x0A2A, 0x0A30}, - {0x0A32, 0x0A33}, {0x0A35, 0x0A36}, {0x0A38, 0x0A39}, - {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, {0x0A47, 0x0A48}, - {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, {0x0A59, 0x0A5C}, - {0x0A5E, 0x0A5E}, {0x0A66, 0x0A76}, {0x0A81, 0x0A83}, - {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8}, - {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9}, - {0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9}, {0x0ACB, 0x0ACD}, - {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3}, {0x0AE6, 0x0AF1}, - {0x0AF9, 0x0AFF}, {0x0B01, 0x0B03}, {0x0B05, 0x0B0C}, - {0x0B0F, 0x0B10}, {0x0B13, 0x0B28}, {0x0B2A, 0x0B30}, - {0x0B32, 0x0B33}, {0x0B35, 0x0B39}, {0x0B3C, 0x0B44}, - {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D}, {0x0B55, 0x0B57}, - {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63}, {0x0B66, 0x0B77}, - {0x0B82, 0x0B83}, {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90}, - {0x0B92, 0x0B95}, {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C}, - {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA}, - {0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8}, - {0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7}, - {0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C}, {0x0C0E, 0x0C10}, - {0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, {0x0C3C, 0x0C44}, - {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56}, - {0x0C58, 0x0C5A}, {0x0C5D, 0x0C5D}, {0x0C60, 0x0C63}, - {0x0C66, 0x0C6F}, {0x0C77, 0x0C8C}, {0x0C8E, 0x0C90}, - {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9}, - {0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD}, - {0x0CD5, 0x0CD6}, {0x0CDD, 0x0CDE}, {0x0CE0, 0x0CE3}, - {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF3}, {0x0D00, 0x0D0C}, - {0x0D0E, 0x0D10}, {0x0D12, 0x0D44}, {0x0D46, 0x0D48}, - {0x0D4A, 0x0D4F}, {0x0D54, 0x0D63}, {0x0D66, 0x0D7F}, - {0x0D81, 0x0D83}, {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1}, - {0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, {0x0DC0, 0x0DC6}, - {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, {0x0DD6, 0x0DD6}, - {0x0DD8, 0x0DDF}, {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF4}, - {0x0E01, 0x0E3A}, {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82}, - {0x0E84, 0x0E84}, {0x0E86, 0x0E8A}, {0x0E8C, 0x0EA3}, - {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EBD}, {0x0EC0, 0x0EC4}, - {0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECE}, {0x0ED0, 0x0ED9}, - {0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C}, - {0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC}, - {0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7}, - {0x10CD, 0x10CD}, {0x10D0, 0x10FF}, {0x1160, 0x1248}, - {0x124A, 0x124D}, {0x1250, 0x1256}, {0x1258, 0x1258}, - {0x125A, 0x125D}, {0x1260, 0x1288}, {0x128A, 0x128D}, - {0x1290, 0x12B0}, {0x12B2, 0x12B5}, {0x12B8, 0x12BE}, - {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, {0x12C8, 0x12D6}, - {0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A}, - {0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5}, - {0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8}, - {0x1700, 0x1715}, {0x171F, 0x1736}, {0x1740, 0x1753}, - {0x1760, 0x176C}, {0x176E, 0x1770}, {0x1772, 0x1773}, - {0x1780, 0x17DD}, {0x17E0, 0x17E9}, {0x17F0, 0x17F9}, - {0x1800, 0x1819}, {0x1820, 0x1878}, {0x1880, 0x18AA}, - {0x18B0, 0x18F5}, {0x1900, 0x191E}, {0x1920, 0x192B}, - {0x1930, 0x193B}, {0x1940, 0x1940}, {0x1944, 0x196D}, - {0x1970, 0x1974}, {0x1980, 0x19AB}, {0x19B0, 0x19C9}, - {0x19D0, 0x19DA}, {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, - {0x1A60, 0x1A7C}, {0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, - {0x1AA0, 0x1AAD}, {0x1AB0, 0x1ACE}, {0x1B00, 0x1B4C}, - {0x1B50, 0x1B7E}, {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, - {0x1C3B, 0x1C49}, {0x1C4D, 0x1C88}, {0x1C90, 0x1CBA}, - {0x1CBD, 0x1CC7}, {0x1CD0, 0x1CFA}, {0x1D00, 0x1F15}, - {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, - {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, - {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, - {0x1FB6, 0x1FC4}, {0x1FC6, 0x1FD3}, {0x1FD6, 0x1FDB}, - {0x1FDD, 0x1FEF}, {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFE}, - {0x2000, 0x200F}, {0x2011, 0x2012}, {0x2017, 0x2017}, - {0x201A, 0x201B}, {0x201E, 0x201F}, {0x2023, 0x2023}, - {0x2028, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034}, - {0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064}, - {0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080}, - {0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8}, - {0x20AA, 0x20AB}, {0x20AD, 0x20C0}, {0x20D0, 0x20F0}, - {0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108}, - {0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120}, - {0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152}, - {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F}, - {0x217A, 0x2188}, {0x218A, 0x218B}, {0x219A, 0x21B7}, - {0x21BA, 0x21D1}, {0x21D3, 0x21D3}, {0x21D5, 0x21E6}, - {0x21E8, 0x21FF}, {0x2201, 0x2201}, {0x2204, 0x2206}, - {0x2209, 0x220A}, {0x220C, 0x220E}, {0x2210, 0x2210}, - {0x2212, 0x2214}, {0x2216, 0x2219}, {0x221B, 0x221C}, - {0x2221, 0x2222}, {0x2224, 0x2224}, {0x2226, 0x2226}, - {0x222D, 0x222D}, {0x222F, 0x2233}, {0x2238, 0x223B}, - {0x223E, 0x2247}, {0x2249, 0x224B}, {0x224D, 0x2251}, - {0x2253, 0x225F}, {0x2262, 0x2263}, {0x2268, 0x2269}, - {0x226C, 0x226D}, {0x2270, 0x2281}, {0x2284, 0x2285}, - {0x2288, 0x2294}, {0x2296, 0x2298}, {0x229A, 0x22A4}, - {0x22A6, 0x22BE}, {0x22C0, 0x2311}, {0x2313, 0x2319}, - {0x231C, 0x2328}, {0x232B, 0x23E8}, {0x23ED, 0x23EF}, - {0x23F1, 0x23F2}, {0x23F4, 0x2426}, {0x2440, 0x244A}, - {0x24EA, 0x24EA}, {0x254C, 0x254F}, {0x2574, 0x257F}, - {0x2590, 0x2591}, {0x2596, 0x259F}, {0x25A2, 0x25A2}, - {0x25AA, 0x25B1}, {0x25B4, 0x25B5}, {0x25B8, 0x25BB}, - {0x25BE, 0x25BF}, {0x25C2, 0x25C5}, {0x25C9, 0x25CA}, - {0x25CC, 0x25CD}, {0x25D2, 0x25E1}, {0x25E6, 0x25EE}, - {0x25F0, 0x25FC}, {0x25FF, 0x2604}, {0x2607, 0x2608}, - {0x260A, 0x260D}, {0x2610, 0x2613}, {0x2616, 0x261B}, - {0x261D, 0x261D}, {0x261F, 0x263F}, {0x2641, 0x2641}, - {0x2643, 0x2647}, {0x2654, 0x265F}, {0x2662, 0x2662}, - {0x2666, 0x2666}, {0x266B, 0x266B}, {0x266E, 0x266E}, - {0x2670, 0x267E}, {0x2680, 0x2692}, {0x2694, 0x269D}, + {0x0870, 0x0891}, {0x0897, 0x0983}, {0x0985, 0x098C}, + {0x098F, 0x0990}, {0x0993, 0x09A8}, {0x09AA, 0x09B0}, + {0x09B2, 0x09B2}, {0x09B6, 0x09B9}, {0x09BC, 0x09C4}, + {0x09C7, 0x09C8}, {0x09CB, 0x09CE}, {0x09D7, 0x09D7}, + {0x09DC, 0x09DD}, {0x09DF, 0x09E3}, {0x09E6, 0x09FE}, + {0x0A01, 0x0A03}, {0x0A05, 0x0A0A}, {0x0A0F, 0x0A10}, + {0x0A13, 0x0A28}, {0x0A2A, 0x0A30}, {0x0A32, 0x0A33}, + {0x0A35, 0x0A36}, {0x0A38, 0x0A39}, {0x0A3C, 0x0A3C}, + {0x0A3E, 0x0A42}, {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, + {0x0A51, 0x0A51}, {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, + {0x0A66, 0x0A76}, {0x0A81, 0x0A83}, {0x0A85, 0x0A8D}, + {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, + {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5}, + {0x0AC7, 0x0AC9}, {0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0}, + {0x0AE0, 0x0AE3}, {0x0AE6, 0x0AF1}, {0x0AF9, 0x0AFF}, + {0x0B01, 0x0B03}, {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, + {0x0B13, 0x0B28}, {0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, + {0x0B35, 0x0B39}, {0x0B3C, 0x0B44}, {0x0B47, 0x0B48}, + {0x0B4B, 0x0B4D}, {0x0B55, 0x0B57}, {0x0B5C, 0x0B5D}, + {0x0B5F, 0x0B63}, {0x0B66, 0x0B77}, {0x0B82, 0x0B83}, + {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, + {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, + {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, + {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD}, + {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA}, + {0x0C00, 0x0C0C}, {0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, + {0x0C2A, 0x0C39}, {0x0C3C, 0x0C44}, {0x0C46, 0x0C48}, + {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, + {0x0C5C, 0x0C5D}, {0x0C60, 0x0C63}, {0x0C66, 0x0C6F}, + {0x0C77, 0x0C8C}, {0x0C8E, 0x0C90}, {0x0C92, 0x0CA8}, + {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9}, {0x0CBC, 0x0CC4}, + {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD}, {0x0CD5, 0x0CD6}, + {0x0CDC, 0x0CDE}, {0x0CE0, 0x0CE3}, {0x0CE6, 0x0CEF}, + {0x0CF1, 0x0CF3}, {0x0D00, 0x0D0C}, {0x0D0E, 0x0D10}, + {0x0D12, 0x0D44}, {0x0D46, 0x0D48}, {0x0D4A, 0x0D4F}, + {0x0D54, 0x0D63}, {0x0D66, 0x0D7F}, {0x0D81, 0x0D83}, + {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1}, {0x0DB3, 0x0DBB}, + {0x0DBD, 0x0DBD}, {0x0DC0, 0x0DC6}, {0x0DCA, 0x0DCA}, + {0x0DCF, 0x0DD4}, {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, + {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF4}, {0x0E01, 0x0E3A}, + {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82}, {0x0E84, 0x0E84}, + {0x0E86, 0x0E8A}, {0x0E8C, 0x0EA3}, {0x0EA5, 0x0EA5}, + {0x0EA7, 0x0EBD}, {0x0EC0, 0x0EC4}, {0x0EC6, 0x0EC6}, + {0x0EC8, 0x0ECE}, {0x0ED0, 0x0ED9}, {0x0EDC, 0x0EDF}, + {0x0F00, 0x0F47}, {0x0F49, 0x0F6C}, {0x0F71, 0x0F97}, + {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC}, {0x0FCE, 0x0FDA}, + {0x1000, 0x10C5}, {0x10C7, 0x10C7}, {0x10CD, 0x10CD}, + {0x10D0, 0x10FF}, {0x1160, 0x1248}, {0x124A, 0x124D}, + {0x1250, 0x1256}, {0x1258, 0x1258}, {0x125A, 0x125D}, + {0x1260, 0x1288}, {0x128A, 0x128D}, {0x1290, 0x12B0}, + {0x12B2, 0x12B5}, {0x12B8, 0x12BE}, {0x12C0, 0x12C0}, + {0x12C2, 0x12C5}, {0x12C8, 0x12D6}, {0x12D8, 0x1310}, + {0x1312, 0x1315}, {0x1318, 0x135A}, {0x135D, 0x137C}, + {0x1380, 0x1399}, {0x13A0, 0x13F5}, {0x13F8, 0x13FD}, + {0x1400, 0x169C}, {0x16A0, 0x16F8}, {0x1700, 0x1715}, + {0x171F, 0x1736}, {0x1740, 0x1753}, {0x1760, 0x176C}, + {0x176E, 0x1770}, {0x1772, 0x1773}, {0x1780, 0x17DD}, + {0x17E0, 0x17E9}, {0x17F0, 0x17F9}, {0x1800, 0x180A}, + {0x180E, 0x180E}, {0x1810, 0x1819}, {0x1820, 0x1878}, + {0x1880, 0x18AA}, {0x18B0, 0x18F5}, {0x1900, 0x191E}, + {0x1920, 0x192B}, {0x1930, 0x193B}, {0x1940, 0x1940}, + {0x1944, 0x196D}, {0x1970, 0x1974}, {0x1980, 0x19AB}, + {0x19B0, 0x19C9}, {0x19D0, 0x19DA}, {0x19DE, 0x1A1B}, + {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C}, {0x1A7F, 0x1A89}, + {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD}, {0x1AB0, 0x1ADD}, + {0x1AE0, 0x1AEB}, {0x1B00, 0x1B4C}, {0x1B4E, 0x1BF3}, + {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49}, {0x1C4D, 0x1C8A}, + {0x1C90, 0x1CBA}, {0x1CBD, 0x1CC7}, {0x1CD0, 0x1CFA}, + {0x1D00, 0x1F15}, {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, + {0x1F48, 0x1F4D}, {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, + {0x1F5B, 0x1F5B}, {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, + {0x1F80, 0x1FB4}, {0x1FB6, 0x1FC4}, {0x1FC6, 0x1FD3}, + {0x1FD6, 0x1FDB}, {0x1FDD, 0x1FEF}, {0x1FF2, 0x1FF4}, + {0x1FF6, 0x1FFE}, {0x2000, 0x200F}, {0x2011, 0x2012}, + {0x2017, 0x2017}, {0x201A, 0x201B}, {0x201E, 0x201F}, + {0x2023, 0x2023}, {0x2028, 0x202F}, {0x2031, 0x2031}, + {0x2034, 0x2034}, {0x2036, 0x203A}, {0x203C, 0x203D}, + {0x203F, 0x2064}, {0x2066, 0x2071}, {0x2075, 0x207E}, + {0x2080, 0x2080}, {0x2085, 0x208E}, {0x2090, 0x209C}, + {0x20A0, 0x20A8}, {0x20AA, 0x20AB}, {0x20AD, 0x20C1}, + {0x20D0, 0x20F0}, {0x2100, 0x2102}, {0x2104, 0x2104}, + {0x2106, 0x2108}, {0x210A, 0x2112}, {0x2114, 0x2115}, + {0x2117, 0x2120}, {0x2123, 0x2125}, {0x2127, 0x212A}, + {0x212C, 0x2152}, {0x2155, 0x215A}, {0x215F, 0x215F}, + {0x216C, 0x216F}, {0x217A, 0x2188}, {0x218A, 0x218B}, + {0x219A, 0x21B7}, {0x21BA, 0x21D1}, {0x21D3, 0x21D3}, + {0x21D5, 0x21E6}, {0x21E8, 0x21FF}, {0x2201, 0x2201}, + {0x2204, 0x2206}, {0x2209, 0x220A}, {0x220C, 0x220E}, + {0x2210, 0x2210}, {0x2212, 0x2214}, {0x2216, 0x2219}, + {0x221B, 0x221C}, {0x2221, 0x2222}, {0x2224, 0x2224}, + {0x2226, 0x2226}, {0x222D, 0x222D}, {0x222F, 0x2233}, + {0x2238, 0x223B}, {0x223E, 0x2247}, {0x2249, 0x224B}, + {0x224D, 0x2251}, {0x2253, 0x225F}, {0x2262, 0x2263}, + {0x2268, 0x2269}, {0x226C, 0x226D}, {0x2270, 0x2281}, + {0x2284, 0x2285}, {0x2288, 0x2294}, {0x2296, 0x2298}, + {0x229A, 0x22A4}, {0x22A6, 0x22BE}, {0x22C0, 0x2311}, + {0x2313, 0x2319}, {0x231C, 0x2328}, {0x232B, 0x23E8}, + {0x23ED, 0x23EF}, {0x23F1, 0x23F2}, {0x23F4, 0x2429}, + {0x2440, 0x244A}, {0x24EA, 0x24EA}, {0x254C, 0x254F}, + {0x2574, 0x257F}, {0x2590, 0x2591}, {0x2596, 0x259F}, + {0x25A2, 0x25A2}, {0x25AA, 0x25B1}, {0x25B4, 0x25B5}, + {0x25B8, 0x25BB}, {0x25BE, 0x25BF}, {0x25C2, 0x25C5}, + {0x25C9, 0x25CA}, {0x25CC, 0x25CD}, {0x25D2, 0x25E1}, + {0x25E6, 0x25EE}, {0x25F0, 0x25FC}, {0x25FF, 0x2604}, + {0x2607, 0x2608}, {0x260A, 0x260D}, {0x2610, 0x2613}, + {0x2616, 0x261B}, {0x261D, 0x261D}, {0x261F, 0x262F}, + {0x2638, 0x263F}, {0x2641, 0x2641}, {0x2643, 0x2647}, + {0x2654, 0x265F}, {0x2662, 0x2662}, {0x2666, 0x2666}, + {0x266B, 0x266B}, {0x266E, 0x266E}, {0x2670, 0x267E}, + {0x2680, 0x2689}, {0x2690, 0x2692}, {0x2694, 0x269D}, {0x26A0, 0x26A0}, {0x26A2, 0x26A9}, {0x26AC, 0x26BC}, {0x26C0, 0x26C3}, {0x26E2, 0x26E2}, {0x26E4, 0x26E7}, {0x2700, 0x2704}, {0x2706, 0x2709}, {0x270C, 0x2727}, @@ -276,175 +279,210 @@ var neutral = table{ {0x2780, 0x2794}, {0x2798, 0x27AF}, {0x27B1, 0x27BE}, {0x27C0, 0x27E5}, {0x27EE, 0x2984}, {0x2987, 0x2B1A}, {0x2B1D, 0x2B4F}, {0x2B51, 0x2B54}, {0x2B5A, 0x2B73}, - {0x2B76, 0x2B95}, {0x2B97, 0x2CF3}, {0x2CF9, 0x2D25}, - {0x2D27, 0x2D27}, {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, - {0x2D6F, 0x2D70}, {0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6}, - {0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, - {0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, - {0x2DD8, 0x2DDE}, {0x2DE0, 0x2E5D}, {0x303F, 0x303F}, - {0x4DC0, 0x4DFF}, {0xA4D0, 0xA62B}, {0xA640, 0xA6F7}, - {0xA700, 0xA7CA}, {0xA7D0, 0xA7D1}, {0xA7D3, 0xA7D3}, - {0xA7D5, 0xA7D9}, {0xA7F2, 0xA82C}, {0xA830, 0xA839}, - {0xA840, 0xA877}, {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9}, - {0xA8E0, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD}, - {0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36}, - {0xAA40, 0xAA4D}, {0xAA50, 0xAA59}, {0xAA5C, 0xAAC2}, - {0xAADB, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, - {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, - {0xAB30, 0xAB6B}, {0xAB70, 0xABED}, {0xABF0, 0xABF9}, - {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF}, - {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36}, - {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, - {0xFB43, 0xFB44}, {0xFB46, 0xFBC2}, {0xFBD3, 0xFD8F}, - {0xFD92, 0xFDC7}, {0xFDCF, 0xFDCF}, {0xFDF0, 0xFDFF}, - {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, - {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B}, - {0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D}, - {0x1003F, 0x1004D}, {0x10050, 0x1005D}, {0x10080, 0x100FA}, - {0x10100, 0x10102}, {0x10107, 0x10133}, {0x10137, 0x1018E}, - {0x10190, 0x1019C}, {0x101A0, 0x101A0}, {0x101D0, 0x101FD}, - {0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x102E0, 0x102FB}, - {0x10300, 0x10323}, {0x1032D, 0x1034A}, {0x10350, 0x1037A}, - {0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5}, - {0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3}, - {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, - {0x1056F, 0x1057A}, {0x1057C, 0x1058A}, {0x1058C, 0x10592}, - {0x10594, 0x10595}, {0x10597, 0x105A1}, {0x105A3, 0x105B1}, - {0x105B3, 0x105B9}, {0x105BB, 0x105BC}, {0x10600, 0x10736}, - {0x10740, 0x10755}, {0x10760, 0x10767}, {0x10780, 0x10785}, - {0x10787, 0x107B0}, {0x107B2, 0x107BA}, {0x10800, 0x10805}, - {0x10808, 0x10808}, {0x1080A, 0x10835}, {0x10837, 0x10838}, - {0x1083C, 0x1083C}, {0x1083F, 0x10855}, {0x10857, 0x1089E}, - {0x108A7, 0x108AF}, {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, - {0x108FB, 0x1091B}, {0x1091F, 0x10939}, {0x1093F, 0x1093F}, - {0x10980, 0x109B7}, {0x109BC, 0x109CF}, {0x109D2, 0x10A03}, - {0x10A05, 0x10A06}, {0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, - {0x10A19, 0x10A35}, {0x10A38, 0x10A3A}, {0x10A3F, 0x10A48}, - {0x10A50, 0x10A58}, {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, - {0x10AEB, 0x10AF6}, {0x10B00, 0x10B35}, {0x10B39, 0x10B55}, - {0x10B58, 0x10B72}, {0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, - {0x10BA9, 0x10BAF}, {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, - {0x10CC0, 0x10CF2}, {0x10CFA, 0x10D27}, {0x10D30, 0x10D39}, - {0x10E60, 0x10E7E}, {0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD}, - {0x10EB0, 0x10EB1}, {0x10EFD, 0x10F27}, {0x10F30, 0x10F59}, - {0x10F70, 0x10F89}, {0x10FB0, 0x10FCB}, {0x10FE0, 0x10FF6}, - {0x11000, 0x1104D}, {0x11052, 0x11075}, {0x1107F, 0x110C2}, - {0x110CD, 0x110CD}, {0x110D0, 0x110E8}, {0x110F0, 0x110F9}, - {0x11100, 0x11134}, {0x11136, 0x11147}, {0x11150, 0x11176}, - {0x11180, 0x111DF}, {0x111E1, 0x111F4}, {0x11200, 0x11211}, - {0x11213, 0x11241}, {0x11280, 0x11286}, {0x11288, 0x11288}, - {0x1128A, 0x1128D}, {0x1128F, 0x1129D}, {0x1129F, 0x112A9}, - {0x112B0, 0x112EA}, {0x112F0, 0x112F9}, {0x11300, 0x11303}, - {0x11305, 0x1130C}, {0x1130F, 0x11310}, {0x11313, 0x11328}, - {0x1132A, 0x11330}, {0x11332, 0x11333}, {0x11335, 0x11339}, - {0x1133B, 0x11344}, {0x11347, 0x11348}, {0x1134B, 0x1134D}, - {0x11350, 0x11350}, {0x11357, 0x11357}, {0x1135D, 0x11363}, - {0x11366, 0x1136C}, {0x11370, 0x11374}, {0x11400, 0x1145B}, + {0x2B76, 0x2CF3}, {0x2CF9, 0x2D25}, {0x2D27, 0x2D27}, + {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D70}, + {0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, + {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, + {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, + {0x2DE0, 0x2E5D}, {0x303F, 0x303F}, {0xA4D0, 0xA62B}, + {0xA640, 0xA6F7}, {0xA700, 0xA7DC}, {0xA7F1, 0xA82C}, + {0xA830, 0xA839}, {0xA840, 0xA877}, {0xA880, 0xA8C5}, + {0xA8CE, 0xA8D9}, {0xA8E0, 0xA953}, {0xA95F, 0xA95F}, + {0xA980, 0xA9CD}, {0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, + {0xAA00, 0xAA36}, {0xAA40, 0xAA4D}, {0xAA50, 0xAA59}, + {0xAA5C, 0xAAC2}, {0xAADB, 0xAAF6}, {0xAB01, 0xAB06}, + {0xAB09, 0xAB0E}, {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, + {0xAB28, 0xAB2E}, {0xAB30, 0xAB6B}, {0xAB70, 0xABED}, + {0xABF0, 0xABF9}, {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, + {0xD800, 0xDFFF}, {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, + {0xFB1D, 0xFB36}, {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, + {0xFB40, 0xFB41}, {0xFB43, 0xFB44}, {0xFB46, 0xFDCF}, + {0xFDF0, 0xFDFF}, {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, + {0xFE76, 0xFEFC}, {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, + {0x10000, 0x1000B}, {0x1000D, 0x10026}, {0x10028, 0x1003A}, + {0x1003C, 0x1003D}, {0x1003F, 0x1004D}, {0x10050, 0x1005D}, + {0x10080, 0x100FA}, {0x10100, 0x10102}, {0x10107, 0x10133}, + {0x10137, 0x1018E}, {0x10190, 0x1019C}, {0x101A0, 0x101A0}, + {0x101D0, 0x101FD}, {0x10280, 0x1029C}, {0x102A0, 0x102D0}, + {0x102E0, 0x102FB}, {0x10300, 0x10323}, {0x1032D, 0x1034A}, + {0x10350, 0x1037A}, {0x10380, 0x1039D}, {0x1039F, 0x103C3}, + {0x103C8, 0x103D5}, {0x10400, 0x1049D}, {0x104A0, 0x104A9}, + {0x104B0, 0x104D3}, {0x104D8, 0x104FB}, {0x10500, 0x10527}, + {0x10530, 0x10563}, {0x1056F, 0x1057A}, {0x1057C, 0x1058A}, + {0x1058C, 0x10592}, {0x10594, 0x10595}, {0x10597, 0x105A1}, + {0x105A3, 0x105B1}, {0x105B3, 0x105B9}, {0x105BB, 0x105BC}, + {0x105C0, 0x105F3}, {0x10600, 0x10736}, {0x10740, 0x10755}, + {0x10760, 0x10767}, {0x10780, 0x10785}, {0x10787, 0x107B0}, + {0x107B2, 0x107BA}, {0x10800, 0x10805}, {0x10808, 0x10808}, + {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C}, + {0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF}, + {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B}, + {0x1091F, 0x10939}, {0x1093F, 0x10959}, {0x10980, 0x109B7}, + {0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06}, + {0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A35}, + {0x10A38, 0x10A3A}, {0x10A3F, 0x10A48}, {0x10A50, 0x10A58}, + {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6}, + {0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72}, + {0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF}, + {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, + {0x10CFA, 0x10D27}, {0x10D30, 0x10D39}, {0x10D40, 0x10D65}, + {0x10D69, 0x10D85}, {0x10D8E, 0x10D8F}, {0x10E60, 0x10E7E}, + {0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD}, {0x10EB0, 0x10EB1}, + {0x10EC2, 0x10EC7}, {0x10ED0, 0x10ED8}, {0x10EFA, 0x10F27}, + {0x10F30, 0x10F59}, {0x10F70, 0x10F89}, {0x10FB0, 0x10FCB}, + {0x10FE0, 0x10FF6}, {0x11000, 0x1104D}, {0x11052, 0x11075}, + {0x1107F, 0x110C2}, {0x110CD, 0x110CD}, {0x110D0, 0x110E8}, + {0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11147}, + {0x11150, 0x11176}, {0x11180, 0x111DF}, {0x111E1, 0x111F4}, + {0x11200, 0x11211}, {0x11213, 0x11241}, {0x11280, 0x11286}, + {0x11288, 0x11288}, {0x1128A, 0x1128D}, {0x1128F, 0x1129D}, + {0x1129F, 0x112A9}, {0x112B0, 0x112EA}, {0x112F0, 0x112F9}, + {0x11300, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310}, + {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333}, + {0x11335, 0x11339}, {0x1133B, 0x11344}, {0x11347, 0x11348}, + {0x1134B, 0x1134D}, {0x11350, 0x11350}, {0x11357, 0x11357}, + {0x1135D, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, + {0x11380, 0x11389}, {0x1138B, 0x1138B}, {0x1138E, 0x1138E}, + {0x11390, 0x113B5}, {0x113B7, 0x113C0}, {0x113C2, 0x113C2}, + {0x113C5, 0x113C5}, {0x113C7, 0x113CA}, {0x113CC, 0x113D5}, + {0x113D7, 0x113D8}, {0x113E1, 0x113E2}, {0x11400, 0x1145B}, {0x1145D, 0x11461}, {0x11480, 0x114C7}, {0x114D0, 0x114D9}, {0x11580, 0x115B5}, {0x115B8, 0x115DD}, {0x11600, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C}, {0x11680, 0x116B9}, - {0x116C0, 0x116C9}, {0x11700, 0x1171A}, {0x1171D, 0x1172B}, - {0x11730, 0x11746}, {0x11800, 0x1183B}, {0x118A0, 0x118F2}, - {0x118FF, 0x11906}, {0x11909, 0x11909}, {0x1190C, 0x11913}, - {0x11915, 0x11916}, {0x11918, 0x11935}, {0x11937, 0x11938}, - {0x1193B, 0x11946}, {0x11950, 0x11959}, {0x119A0, 0x119A7}, - {0x119AA, 0x119D7}, {0x119DA, 0x119E4}, {0x11A00, 0x11A47}, - {0x11A50, 0x11AA2}, {0x11AB0, 0x11AF8}, {0x11B00, 0x11B09}, - {0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, {0x11C38, 0x11C45}, - {0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, {0x11C92, 0x11CA7}, - {0x11CA9, 0x11CB6}, {0x11D00, 0x11D06}, {0x11D08, 0x11D09}, - {0x11D0B, 0x11D36}, {0x11D3A, 0x11D3A}, {0x11D3C, 0x11D3D}, - {0x11D3F, 0x11D47}, {0x11D50, 0x11D59}, {0x11D60, 0x11D65}, - {0x11D67, 0x11D68}, {0x11D6A, 0x11D8E}, {0x11D90, 0x11D91}, - {0x11D93, 0x11D98}, {0x11DA0, 0x11DA9}, {0x11EE0, 0x11EF8}, - {0x11F00, 0x11F10}, {0x11F12, 0x11F3A}, {0x11F3E, 0x11F59}, + {0x116C0, 0x116C9}, {0x116D0, 0x116E3}, {0x11700, 0x1171A}, + {0x1171D, 0x1172B}, {0x11730, 0x11746}, {0x11800, 0x1183B}, + {0x118A0, 0x118F2}, {0x118FF, 0x11906}, {0x11909, 0x11909}, + {0x1190C, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x11935}, + {0x11937, 0x11938}, {0x1193B, 0x11946}, {0x11950, 0x11959}, + {0x119A0, 0x119A7}, {0x119AA, 0x119D7}, {0x119DA, 0x119E4}, + {0x11A00, 0x11A47}, {0x11A50, 0x11AA2}, {0x11AB0, 0x11AF8}, + {0x11B00, 0x11B09}, {0x11B60, 0x11B67}, {0x11BC0, 0x11BE1}, + {0x11BF0, 0x11BF9}, {0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, + {0x11C38, 0x11C45}, {0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, + {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6}, {0x11D00, 0x11D06}, + {0x11D08, 0x11D09}, {0x11D0B, 0x11D36}, {0x11D3A, 0x11D3A}, + {0x11D3C, 0x11D3D}, {0x11D3F, 0x11D47}, {0x11D50, 0x11D59}, + {0x11D60, 0x11D65}, {0x11D67, 0x11D68}, {0x11D6A, 0x11D8E}, + {0x11D90, 0x11D91}, {0x11D93, 0x11D98}, {0x11DA0, 0x11DA9}, + {0x11DB0, 0x11DDB}, {0x11DE0, 0x11DE9}, {0x11EE0, 0x11EF8}, + {0x11F00, 0x11F10}, {0x11F12, 0x11F3A}, {0x11F3E, 0x11F5A}, {0x11FB0, 0x11FB0}, {0x11FC0, 0x11FF1}, {0x11FFF, 0x12399}, {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543}, - {0x12F90, 0x12FF2}, {0x13000, 0x13455}, {0x14400, 0x14646}, - {0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, - {0x16A6E, 0x16ABE}, {0x16AC0, 0x16AC9}, {0x16AD0, 0x16AED}, - {0x16AF0, 0x16AF5}, {0x16B00, 0x16B45}, {0x16B50, 0x16B59}, - {0x16B5B, 0x16B61}, {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, - {0x16E40, 0x16E9A}, {0x16F00, 0x16F4A}, {0x16F4F, 0x16F87}, - {0x16F8F, 0x16F9F}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, - {0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, + {0x12F90, 0x12FF2}, {0x13000, 0x13455}, {0x13460, 0x143FA}, + {0x14400, 0x14646}, {0x16100, 0x16139}, {0x16800, 0x16A38}, + {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, {0x16A6E, 0x16ABE}, + {0x16AC0, 0x16AC9}, {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5}, + {0x16B00, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, + {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16D40, 0x16D79}, + {0x16E40, 0x16E9A}, {0x16EA0, 0x16EB8}, {0x16EBB, 0x16ED3}, + {0x16F00, 0x16F4A}, {0x16F4F, 0x16F87}, {0x16F8F, 0x16F9F}, + {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88}, + {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, {0x1CC00, 0x1CCFC}, + {0x1CD00, 0x1CEB3}, {0x1CEBA, 0x1CED0}, {0x1CEE0, 0x1CEF0}, {0x1CF00, 0x1CF2D}, {0x1CF30, 0x1CF46}, {0x1CF50, 0x1CFC3}, {0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D1EA}, {0x1D200, 0x1D245}, {0x1D2C0, 0x1D2D3}, {0x1D2E0, 0x1D2F3}, - {0x1D300, 0x1D356}, {0x1D360, 0x1D378}, {0x1D400, 0x1D454}, - {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F}, {0x1D4A2, 0x1D4A2}, - {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, {0x1D4AE, 0x1D4B9}, - {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, {0x1D4C5, 0x1D505}, - {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514}, {0x1D516, 0x1D51C}, - {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, {0x1D540, 0x1D544}, - {0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, {0x1D552, 0x1D6A5}, - {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B}, {0x1DA9B, 0x1DA9F}, - {0x1DAA1, 0x1DAAF}, {0x1DF00, 0x1DF1E}, {0x1DF25, 0x1DF2A}, - {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, - {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, {0x1E030, 0x1E06D}, - {0x1E08F, 0x1E08F}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D}, - {0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E290, 0x1E2AE}, - {0x1E2C0, 0x1E2F9}, {0x1E2FF, 0x1E2FF}, {0x1E4D0, 0x1E4F9}, - {0x1E7E0, 0x1E7E6}, {0x1E7E8, 0x1E7EB}, {0x1E7ED, 0x1E7EE}, - {0x1E7F0, 0x1E7FE}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6}, - {0x1E900, 0x1E94B}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F}, - {0x1EC71, 0x1ECB4}, {0x1ED01, 0x1ED3D}, {0x1EE00, 0x1EE03}, - {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24}, - {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, {0x1EE34, 0x1EE37}, - {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, {0x1EE42, 0x1EE42}, - {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, {0x1EE4B, 0x1EE4B}, - {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, {0x1EE54, 0x1EE54}, - {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, {0x1EE5B, 0x1EE5B}, - {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, {0x1EE61, 0x1EE62}, - {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, {0x1EE6C, 0x1EE72}, - {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, {0x1EE7E, 0x1EE7E}, - {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, {0x1EEA1, 0x1EEA3}, - {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, {0x1EEF0, 0x1EEF1}, - {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, {0x1F030, 0x1F093}, - {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, {0x1F0C1, 0x1F0CE}, - {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10F}, {0x1F12E, 0x1F12F}, - {0x1F16A, 0x1F16F}, {0x1F1AD, 0x1F1AD}, {0x1F1E6, 0x1F1FF}, - {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, - {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF}, - {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F}, - {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A}, - {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594}, - {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F}, - {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6D3, 0x1F6D4}, - {0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F776}, - {0x1F77B, 0x1F7D9}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, + {0x1D377, 0x1D378}, {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, + {0x1D49E, 0x1D49F}, {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, + {0x1D4A9, 0x1D4AC}, {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, + {0x1D4BD, 0x1D4C3}, {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, + {0x1D50D, 0x1D514}, {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, + {0x1D53B, 0x1D53E}, {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, + {0x1D54A, 0x1D550}, {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, + {0x1D7CE, 0x1DA8B}, {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, + {0x1DF00, 0x1DF1E}, {0x1DF25, 0x1DF2A}, {0x1E000, 0x1E006}, + {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, + {0x1E026, 0x1E02A}, {0x1E030, 0x1E06D}, {0x1E08F, 0x1E08F}, + {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D}, {0x1E140, 0x1E149}, + {0x1E14E, 0x1E14F}, {0x1E290, 0x1E2AE}, {0x1E2C0, 0x1E2F9}, + {0x1E2FF, 0x1E2FF}, {0x1E4D0, 0x1E4F9}, {0x1E5D0, 0x1E5FA}, + {0x1E5FF, 0x1E5FF}, {0x1E6C0, 0x1E6DE}, {0x1E6E0, 0x1E6F5}, + {0x1E6FE, 0x1E6FF}, {0x1E7E0, 0x1E7E6}, {0x1E7E8, 0x1E7EB}, + {0x1E7ED, 0x1E7EE}, {0x1E7F0, 0x1E7FE}, {0x1E800, 0x1E8C4}, + {0x1E8C7, 0x1E8D6}, {0x1E900, 0x1E94B}, {0x1E950, 0x1E959}, + {0x1E95E, 0x1E95F}, {0x1EC71, 0x1ECB4}, {0x1ED01, 0x1ED3D}, + {0x1EE00, 0x1EE03}, {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, + {0x1EE24, 0x1EE24}, {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, + {0x1EE34, 0x1EE37}, {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, + {0x1EE42, 0x1EE42}, {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, + {0x1EE4B, 0x1EE4B}, {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, + {0x1EE54, 0x1EE54}, {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, + {0x1EE5B, 0x1EE5B}, {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, + {0x1EE61, 0x1EE62}, {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, + {0x1EE6C, 0x1EE72}, {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, + {0x1EE7E, 0x1EE7E}, {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, + {0x1EEA1, 0x1EEA3}, {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, + {0x1EEF0, 0x1EEF1}, {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, + {0x1F030, 0x1F093}, {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, + {0x1F0C1, 0x1F0CE}, {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10F}, + {0x1F12E, 0x1F12F}, {0x1F16A, 0x1F16F}, {0x1F1AD, 0x1F1AD}, + {0x1F1E6, 0x1F1FF}, {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, + {0x1F37D, 0x1F37D}, {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, + {0x1F3D4, 0x1F3DF}, {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, + {0x1F43F, 0x1F43F}, {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, + {0x1F53E, 0x1F54A}, {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, + {0x1F57B, 0x1F594}, {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, + {0x1F650, 0x1F67F}, {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, + {0x1F6D3, 0x1F6D4}, {0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, + {0x1F700, 0x1F7D9}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, {0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, - {0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F90B}, {0x1F93B, 0x1F93B}, - {0x1F946, 0x1F946}, {0x1FA00, 0x1FA53}, {0x1FA60, 0x1FA6D}, - {0x1FB00, 0x1FB92}, {0x1FB94, 0x1FBCA}, {0x1FBF0, 0x1FBF9}, - {0xE0001, 0xE0001}, {0xE0020, 0xE007F}, + {0x1F8B0, 0x1F8BB}, {0x1F8C0, 0x1F8C1}, {0x1F8D0, 0x1F8D8}, + {0x1F900, 0x1F90B}, {0x1F93B, 0x1F93B}, {0x1F946, 0x1F946}, + {0x1FA00, 0x1FA57}, {0x1FA60, 0x1FA6D}, {0x1FB00, 0x1FB92}, + {0x1FB94, 0x1FBFA}, {0xE0001, 0xE0001}, {0xE0020, 0xE007F}, } var emoji = table{ {0x203C, 0x203C}, {0x2049, 0x2049}, {0x2122, 0x2122}, {0x2139, 0x2139}, {0x2194, 0x2199}, {0x21A9, 0x21AA}, - {0x231A, 0x231B}, {0x2328, 0x2328}, {0x2388, 0x2388}, - {0x23CF, 0x23CF}, {0x23E9, 0x23F3}, {0x23F8, 0x23FA}, - {0x24C2, 0x24C2}, {0x25AA, 0x25AB}, {0x25B6, 0x25B6}, - {0x25C0, 0x25C0}, {0x25FB, 0x25FE}, {0x2600, 0x2605}, - {0x2607, 0x2612}, {0x2614, 0x2685}, {0x2690, 0x2705}, - {0x2708, 0x2712}, {0x2714, 0x2714}, {0x2716, 0x2716}, - {0x271D, 0x271D}, {0x2721, 0x2721}, {0x2728, 0x2728}, - {0x2733, 0x2734}, {0x2744, 0x2744}, {0x2747, 0x2747}, - {0x274C, 0x274C}, {0x274E, 0x274E}, {0x2753, 0x2755}, - {0x2757, 0x2757}, {0x2763, 0x2767}, {0x2795, 0x2797}, - {0x27A1, 0x27A1}, {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, - {0x2934, 0x2935}, {0x2B05, 0x2B07}, {0x2B1B, 0x2B1C}, - {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x3030, 0x3030}, - {0x303D, 0x303D}, {0x3297, 0x3297}, {0x3299, 0x3299}, - {0x1F000, 0x1F0FF}, {0x1F10D, 0x1F10F}, {0x1F12F, 0x1F12F}, - {0x1F16C, 0x1F171}, {0x1F17E, 0x1F17F}, {0x1F18E, 0x1F18E}, - {0x1F191, 0x1F19A}, {0x1F1AD, 0x1F1E5}, {0x1F201, 0x1F20F}, - {0x1F21A, 0x1F21A}, {0x1F22F, 0x1F22F}, {0x1F232, 0x1F23A}, - {0x1F23C, 0x1F23F}, {0x1F249, 0x1F3FA}, {0x1F400, 0x1F53D}, - {0x1F546, 0x1F64F}, {0x1F680, 0x1F6FF}, {0x1F774, 0x1F77F}, - {0x1F7D5, 0x1F7FF}, {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, - {0x1F85A, 0x1F85F}, {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F8FF}, - {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1FAFF}, + {0x231A, 0x231B}, {0x2328, 0x2328}, {0x23CF, 0x23CF}, + {0x23E9, 0x23F3}, {0x23F8, 0x23FA}, {0x24C2, 0x24C2}, + {0x25AA, 0x25AB}, {0x25B6, 0x25B6}, {0x25C0, 0x25C0}, + {0x25FB, 0x25FE}, {0x2600, 0x2604}, {0x260E, 0x260E}, + {0x2611, 0x2611}, {0x2614, 0x2615}, {0x2618, 0x2618}, + {0x261D, 0x261D}, {0x2620, 0x2620}, {0x2622, 0x2623}, + {0x2626, 0x2626}, {0x262A, 0x262A}, {0x262E, 0x262F}, + {0x2638, 0x263A}, {0x2640, 0x2640}, {0x2642, 0x2642}, + {0x2648, 0x2653}, {0x265F, 0x2660}, {0x2663, 0x2663}, + {0x2665, 0x2666}, {0x2668, 0x2668}, {0x267B, 0x267B}, + {0x267E, 0x267F}, {0x2692, 0x2697}, {0x2699, 0x2699}, + {0x269B, 0x269C}, {0x26A0, 0x26A1}, {0x26A7, 0x26A7}, + {0x26AA, 0x26AB}, {0x26B0, 0x26B1}, {0x26BD, 0x26BE}, + {0x26C4, 0x26C5}, {0x26C8, 0x26C8}, {0x26CE, 0x26CF}, + {0x26D1, 0x26D1}, {0x26D3, 0x26D4}, {0x26E9, 0x26EA}, + {0x26F0, 0x26F5}, {0x26F7, 0x26FA}, {0x26FD, 0x26FD}, + {0x2702, 0x2702}, {0x2705, 0x2705}, {0x2708, 0x270D}, + {0x270F, 0x270F}, {0x2712, 0x2712}, {0x2714, 0x2714}, + {0x2716, 0x2716}, {0x271D, 0x271D}, {0x2721, 0x2721}, + {0x2728, 0x2728}, {0x2733, 0x2734}, {0x2744, 0x2744}, + {0x2747, 0x2747}, {0x274C, 0x274C}, {0x274E, 0x274E}, + {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2763, 0x2764}, + {0x2795, 0x2797}, {0x27A1, 0x27A1}, {0x27B0, 0x27B0}, + {0x27BF, 0x27BF}, {0x2934, 0x2935}, {0x2B05, 0x2B07}, + {0x2B1B, 0x2B1C}, {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, + {0x3030, 0x3030}, {0x303D, 0x303D}, {0x3297, 0x3297}, + {0x3299, 0x3299}, {0x1F004, 0x1F004}, {0x1F02C, 0x1F02F}, + {0x1F094, 0x1F09F}, {0x1F0AF, 0x1F0B0}, {0x1F0C0, 0x1F0C0}, + {0x1F0CF, 0x1F0D0}, {0x1F0F6, 0x1F0FF}, {0x1F170, 0x1F171}, + {0x1F17E, 0x1F17F}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, + {0x1F1AE, 0x1F1E5}, {0x1F201, 0x1F20F}, {0x1F21A, 0x1F21A}, + {0x1F22F, 0x1F22F}, {0x1F232, 0x1F23A}, {0x1F23C, 0x1F23F}, + {0x1F249, 0x1F25F}, {0x1F266, 0x1F321}, {0x1F324, 0x1F393}, + {0x1F396, 0x1F397}, {0x1F399, 0x1F39B}, {0x1F39E, 0x1F3F0}, + {0x1F3F3, 0x1F3F5}, {0x1F3F7, 0x1F3FA}, {0x1F400, 0x1F4FD}, + {0x1F4FF, 0x1F53D}, {0x1F549, 0x1F54E}, {0x1F550, 0x1F567}, + {0x1F56F, 0x1F570}, {0x1F573, 0x1F57A}, {0x1F587, 0x1F587}, + {0x1F58A, 0x1F58D}, {0x1F590, 0x1F590}, {0x1F595, 0x1F596}, + {0x1F5A4, 0x1F5A5}, {0x1F5A8, 0x1F5A8}, {0x1F5B1, 0x1F5B2}, + {0x1F5BC, 0x1F5BC}, {0x1F5C2, 0x1F5C4}, {0x1F5D1, 0x1F5D3}, + {0x1F5DC, 0x1F5DE}, {0x1F5E1, 0x1F5E1}, {0x1F5E3, 0x1F5E3}, + {0x1F5E8, 0x1F5E8}, {0x1F5EF, 0x1F5EF}, {0x1F5F3, 0x1F5F3}, + {0x1F5FA, 0x1F64F}, {0x1F680, 0x1F6C5}, {0x1F6CB, 0x1F6D2}, + {0x1F6D5, 0x1F6E5}, {0x1F6E9, 0x1F6E9}, {0x1F6EB, 0x1F6F0}, + {0x1F6F3, 0x1F6FF}, {0x1F7DA, 0x1F7FF}, {0x1F80C, 0x1F80F}, + {0x1F848, 0x1F84F}, {0x1F85A, 0x1F85F}, {0x1F888, 0x1F88F}, + {0x1F8AE, 0x1F8AF}, {0x1F8BC, 0x1F8BF}, {0x1F8C2, 0x1F8CF}, + {0x1F8D9, 0x1F8FF}, {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, + {0x1F947, 0x1F9FF}, {0x1FA58, 0x1FA5F}, {0x1FA6E, 0x1FAFF}, {0x1FC00, 0x1FFFD}, } diff --git a/vendor/github.com/miekg/pkcs11/params.go b/vendor/github.com/miekg/pkcs11/params.go index 6d9ce96ae8..f111086c37 100644 --- a/vendor/github.com/miekg/pkcs11/params.go +++ b/vendor/github.com/miekg/pkcs11/params.go @@ -26,6 +26,11 @@ static inline void putECDH1PublicParams(CK_ECDH1_DERIVE_PARAMS_PTR params, CK_VO params->pPublicData = pPublicData; params->ulPublicDataLen = ulPublicDataLen; } + +static inline void putRSAAESKeyWrapParams(CK_RSA_AES_KEY_WRAP_PARAMS_PTR params, CK_VOID_PTR pOAEPParams) +{ + params->pOAEPParams = pOAEPParams; +} */ import "C" import "unsafe" @@ -84,7 +89,7 @@ func cGCMParams(p *GCMParams) []byte { p.Free() p.arena = arena p.params = ¶ms - return C.GoBytes(unsafe.Pointer(¶ms), C.int(unsafe.Sizeof(params))) + return memBytes(unsafe.Pointer(¶ms), unsafe.Sizeof(params)) } // IV returns a copy of the actual IV used for the operation. @@ -121,7 +126,7 @@ func NewPSSParams(hashAlg, mgf, saltLength uint) []byte { mgf: C.CK_RSA_PKCS_MGF_TYPE(mgf), sLen: C.CK_ULONG(saltLength), } - return C.GoBytes(unsafe.Pointer(&p), C.int(unsafe.Sizeof(p))) + return memBytes(unsafe.Pointer(&p), unsafe.Sizeof(p)) } // OAEPParams can be passed to NewMechanism to implement CKM_RSA_PKCS_OAEP. @@ -153,7 +158,7 @@ func cOAEPParams(p *OAEPParams, arena arena) ([]byte, arena) { // field is unaligned on windows so this has to call into C C.putOAEPParams(¶ms, buf, len) } - return C.GoBytes(unsafe.Pointer(¶ms), C.int(unsafe.Sizeof(params))), arena + return memBytes(unsafe.Pointer(¶ms), unsafe.Sizeof(params)), arena } // ECDH1DeriveParams can be passed to NewMechanism to implement CK_ECDH1_DERIVE_PARAMS. @@ -186,5 +191,25 @@ func cECDH1DeriveParams(p *ECDH1DeriveParams, arena arena) ([]byte, arena) { publicKeyData, publicKeyDataLen := arena.Allocate(p.PublicKeyData) C.putECDH1PublicParams(¶ms, publicKeyData, publicKeyDataLen) - return C.GoBytes(unsafe.Pointer(¶ms), C.int(unsafe.Sizeof(params))), arena + return memBytes(unsafe.Pointer(¶ms), unsafe.Sizeof(params)), arena } + +type RSAAESKeyWrapParams struct { + AESKeyBits uint + OAEPParams OAEPParams +} + +func cRSAAESKeyWrapParams(p *RSAAESKeyWrapParams, arena arena) ([]byte, arena) { + var param []byte + params := C.CK_RSA_AES_KEY_WRAP_PARAMS { + ulAESKeyBits: C.CK_MECHANISM_TYPE(p.AESKeyBits), + } + + param, arena = cOAEPParams(&p.OAEPParams, arena) + if len(param) != 0 { + buf, _ := arena.Allocate(param) + C.putRSAAESKeyWrapParams(¶ms, buf) + } + return memBytes(unsafe.Pointer(¶ms), unsafe.Sizeof(params)), arena +} + diff --git a/vendor/github.com/miekg/pkcs11/pkcs11.go b/vendor/github.com/miekg/pkcs11/pkcs11.go index e1b5824ec8..8d8d4c39cd 100644 --- a/vendor/github.com/miekg/pkcs11/pkcs11.go +++ b/vendor/github.com/miekg/pkcs11/pkcs11.go @@ -5,6 +5,8 @@ //go:generate go run const_generate.go // Package pkcs11 is a wrapper around the PKCS#11 cryptographic library. +// Latest version of the specification: +// http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html package pkcs11 // It is *assumed*, that: @@ -104,11 +106,12 @@ void Destroy(struct ctx *c) } #endif -CK_RV Initialize(struct ctx * c) +CK_RV Initialize(struct ctx * c, CK_FLAGS flags, CK_VOID_PTR reserved) { CK_C_INITIALIZE_ARGS args; memset(&args, 0, sizeof(args)); - args.flags = CKF_OS_LOCKING_OK; + args.flags = flags; + args.pReserved = reserved; return c->sym->C_Initialize(&args); } @@ -803,9 +806,36 @@ func (c *Ctx) Destroy() { c.ctx = nil } +type initializeArgs struct { + flags uint + reserved unsafe.Pointer +} + +// An InitializeOption modifies the default behavior of Initialize. +type InitializeOption func(*initializeArgs) + +// InitializeWithFlags sets the flags field in CK_C_INITIALIZE_ARGS. +// Note that flags defaults to CKF_OS_LOCKING_OK if this option is not provided. +func InitializeWithFlags(flags uint) InitializeOption { + return func(args *initializeArgs) { + args.flags = flags + } +} + +// InitializeWithReserved sets the pReserved field in CK_C_INITIALIZE_ARGS. +func InitializeWithReserved(reserved unsafe.Pointer) InitializeOption { + return func(args *initializeArgs) { + args.reserved = reserved + } +} + // Initialize initializes the Cryptoki library. -func (c *Ctx) Initialize() error { - e := C.Initialize(c.ctx) +func (c *Ctx) Initialize(opts ...InitializeOption) error { + args := initializeArgs{flags: CKF_OS_LOCKING_OK} + for _, o := range opts { + o(&args) + } + e := C.Initialize(c.ctx, C.CK_FLAGS(args.flags), C.CK_VOID_PTR(args.reserved)) return toError(e) } diff --git a/vendor/github.com/miekg/pkcs11/release.go b/vendor/github.com/miekg/pkcs11/release.go index d8b99f147e..c9fcb0e735 100644 --- a/vendor/github.com/miekg/pkcs11/release.go +++ b/vendor/github.com/miekg/pkcs11/release.go @@ -6,7 +6,7 @@ package pkcs11 import "fmt" // Release is current version of the pkcs11 library. -var Release = R{1, 1, 1} +var Release = R{1, 1, 2} // R holds the version of this library. type R struct { diff --git a/vendor/github.com/miekg/pkcs11/types.go b/vendor/github.com/miekg/pkcs11/types.go index 60eadcb71b..d3bfce80da 100644 --- a/vendor/github.com/miekg/pkcs11/types.go +++ b/vendor/github.com/miekg/pkcs11/types.go @@ -53,7 +53,7 @@ func toList(clist C.CK_ULONG_PTR, size C.CK_ULONG) []uint { for i := 0; i < len(l); i++ { l[i] = uint(C.Index(clist, C.CK_ULONG(i))) } - defer C.free(unsafe.Pointer(clist)) + C.free(unsafe.Pointer(clist)) return l } @@ -65,9 +65,15 @@ func cBBool(x bool) C.CK_BBOOL { return C.CK_BBOOL(C.CK_FALSE) } +// memBytes returns a byte slice that references an arbitrary memory area +func memBytes(p unsafe.Pointer, len uintptr) []byte { + const maxIndex int32 = (1 << 31) - 1 + return (*([maxIndex]byte))(p)[:len:len] +} + func uintToBytes(x uint64) []byte { ul := C.CK_ULONG(x) - return C.GoBytes(unsafe.Pointer(&ul), C.int(unsafe.Sizeof(ul))) + return memBytes(unsafe.Pointer(&ul), unsafe.Sizeof(ul)) } // Error represents an PKCS#11 error. @@ -255,13 +261,14 @@ func NewMechanism(mech uint, x interface{}) *Mechanism { } switch p := x.(type) { - case *GCMParams, *OAEPParams, *ECDH1DeriveParams: + case *GCMParams, *OAEPParams, *ECDH1DeriveParams, *RSAAESKeyWrapParams: // contains pointers; defer serialization until cMechanism m.generator = p case []byte: m.Parameter = p default: - panic("parameter must be one of type: []byte, *GCMParams, *OAEPParams, *ECDH1DeriveParams") + panic("parameter must be one of type: []byte, *GCMParams, *OAEPParams, *ECDH1DeriveParams," + + " *RSAAESKeyWrapParams") } return m @@ -284,6 +291,8 @@ func cMechanism(mechList []*Mechanism) (arena, *C.CK_MECHANISM) { param, arena = cOAEPParams(p, arena) case *ECDH1DeriveParams: param, arena = cECDH1DeriveParams(p, arena) + case *RSAAESKeyWrapParams: + param, arena = cRSAAESKeyWrapParams(p, arena) } if len(param) != 0 { buf, len := arena.Allocate(param) diff --git a/vendor/github.com/miekg/pkcs11/vendor.go b/vendor/github.com/miekg/pkcs11/vendor.go index 83188e5001..5132dc4f07 100644 --- a/vendor/github.com/miekg/pkcs11/vendor.go +++ b/vendor/github.com/miekg/pkcs11/vendor.go @@ -10,12 +10,12 @@ const ( // Vendor specific mechanisms for HMAC on Ncipher HSMs where Ncipher does not allow use of generic_secret keys. const ( - CKM_NC_SHA_1_HMAC_KEY_GEN = CKM_NCIPHER + 0x3 /* no params */ - CKM_NC_MD5_HMAC_KEY_GEN = CKM_NCIPHER + 0x6 /* no params */ - CKM_NC_SHA224_HMAC_KEY_GEN = CKM_NCIPHER + 0x24 /* no params */ - CKM_NC_SHA256_HMAC_KEY_GEN = CKM_NCIPHER + 0x25 /* no params */ - CKM_NC_SHA384_HMAC_KEY_GEN = CKM_NCIPHER + 0x26 /* no params */ - CKM_NC_SHA512_HMAC_KEY_GEN = CKM_NCIPHER + 0x27 /* no params */ + CKM_NC_SHA_1_HMAC_KEY_GEN = CKM_NCIPHER + 0x3 // no params + CKM_NC_MD5_HMAC_KEY_GEN = CKM_NCIPHER + 0x6 // no params + CKM_NC_SHA224_HMAC_KEY_GEN = CKM_NCIPHER + 0x24 // no params + CKM_NC_SHA256_HMAC_KEY_GEN = CKM_NCIPHER + 0x25 // no params + CKM_NC_SHA384_HMAC_KEY_GEN = CKM_NCIPHER + 0x26 // no params + CKM_NC_SHA512_HMAC_KEY_GEN = CKM_NCIPHER + 0x27 // no params ) // Vendor specific range for Mozilla NSS. @@ -67,6 +67,8 @@ const ( CKA_NSS_JPAKE_X2 = CKA_NSS + 32 CKA_NSS_JPAKE_X2S = CKA_NSS + 33 CKA_NSS_MOZILLA_CA_POLICY = CKA_NSS + 34 + CKA_NSS_SERVER_DISTRUST_AFTER = CKA_NSS + 35 + CKA_NSS_EMAIL_DISTRUST_AFTER = CKA_NSS + 36 CKA_TRUST_DIGITAL_SIGNATURE = CKA_TRUST + 1 CKA_TRUST_NON_REPUDIATION = CKA_TRUST + 2 CKA_TRUST_KEY_ENCIPHERMENT = CKA_TRUST + 3 diff --git a/vendor/github.com/miekg/pkcs11/zconst.go b/vendor/github.com/miekg/pkcs11/zconst.go index 41df5cfcf0..164054decc 100644 --- a/vendor/github.com/miekg/pkcs11/zconst.go +++ b/vendor/github.com/miekg/pkcs11/zconst.go @@ -7,107 +7,199 @@ package pkcs11 const ( - CK_TRUE = 1 - CK_FALSE = 0 - CK_UNAVAILABLE_INFORMATION = ^uint(0) - CK_EFFECTIVELY_INFINITE = 0 - CK_INVALID_HANDLE = 0 - CKN_SURRENDER = 0 - CKN_OTP_CHANGED = 1 - CKF_TOKEN_PRESENT = 0x00000001 - CKF_REMOVABLE_DEVICE = 0x00000002 - CKF_HW_SLOT = 0x00000004 - CKF_RNG = 0x00000001 - CKF_WRITE_PROTECTED = 0x00000002 - CKF_LOGIN_REQUIRED = 0x00000004 - CKF_USER_PIN_INITIALIZED = 0x00000008 - CKF_RESTORE_KEY_NOT_NEEDED = 0x00000020 - CKF_CLOCK_ON_TOKEN = 0x00000040 - CKF_PROTECTED_AUTHENTICATION_PATH = 0x00000100 - CKF_DUAL_CRYPTO_OPERATIONS = 0x00000200 - CKF_TOKEN_INITIALIZED = 0x00000400 - CKF_SECONDARY_AUTHENTICATION = 0x00000800 - CKF_USER_PIN_COUNT_LOW = 0x00010000 - CKF_USER_PIN_FINAL_TRY = 0x00020000 - CKF_USER_PIN_LOCKED = 0x00040000 - CKF_USER_PIN_TO_BE_CHANGED = 0x00080000 - CKF_SO_PIN_COUNT_LOW = 0x00100000 - CKF_SO_PIN_FINAL_TRY = 0x00200000 - CKF_SO_PIN_LOCKED = 0x00400000 - CKF_SO_PIN_TO_BE_CHANGED = 0x00800000 - CKF_ERROR_STATE = 0x01000000 - CKU_SO = 0 - CKU_USER = 1 - CKU_CONTEXT_SPECIFIC = 2 - CKS_RO_PUBLIC_SESSION = 0 - CKS_RO_USER_FUNCTIONS = 1 - CKS_RW_PUBLIC_SESSION = 2 - CKS_RW_USER_FUNCTIONS = 3 - CKS_RW_SO_FUNCTIONS = 4 - CKF_RW_SESSION = 0x00000002 - CKF_SERIAL_SESSION = 0x00000004 - CKO_DATA = 0x00000000 - CKO_CERTIFICATE = 0x00000001 - CKO_PUBLIC_KEY = 0x00000002 - CKO_PRIVATE_KEY = 0x00000003 - CKO_SECRET_KEY = 0x00000004 - CKO_HW_FEATURE = 0x00000005 - CKO_DOMAIN_PARAMETERS = 0x00000006 - CKO_MECHANISM = 0x00000007 - CKO_OTP_KEY = 0x00000008 - CKO_VENDOR_DEFINED = 0x80000000 - CKH_MONOTONIC_COUNTER = 0x00000001 - CKH_CLOCK = 0x00000002 - CKH_USER_INTERFACE = 0x00000003 - CKH_VENDOR_DEFINED = 0x80000000 - CKK_RSA = 0x00000000 - CKK_DSA = 0x00000001 - CKK_DH = 0x00000002 - CKK_ECDSA = 0x00000003 // Deprecated - CKK_EC = 0x00000003 - CKK_X9_42_DH = 0x00000004 - CKK_KEA = 0x00000005 - CKK_GENERIC_SECRET = 0x00000010 - CKK_RC2 = 0x00000011 - CKK_RC4 = 0x00000012 - CKK_DES = 0x00000013 - CKK_DES2 = 0x00000014 - CKK_DES3 = 0x00000015 - CKK_CAST = 0x00000016 - CKK_CAST3 = 0x00000017 - CKK_CAST5 = 0x00000018 // Deprecated - CKK_CAST128 = 0x00000018 - CKK_RC5 = 0x00000019 - CKK_IDEA = 0x0000001A - CKK_SKIPJACK = 0x0000001B - CKK_BATON = 0x0000001C - CKK_JUNIPER = 0x0000001D - CKK_CDMF = 0x0000001E - CKK_AES = 0x0000001F - CKK_BLOWFISH = 0x00000020 - CKK_TWOFISH = 0x00000021 - CKK_SECURID = 0x00000022 - CKK_HOTP = 0x00000023 - CKK_ACTI = 0x00000024 - CKK_CAMELLIA = 0x00000025 - CKK_ARIA = 0x00000026 - CKK_MD5_HMAC = 0x00000027 - CKK_SHA_1_HMAC = 0x00000028 - CKK_RIPEMD128_HMAC = 0x00000029 - CKK_RIPEMD160_HMAC = 0x0000002A - CKK_SHA256_HMAC = 0x0000002B - CKK_SHA384_HMAC = 0x0000002C - CKK_SHA512_HMAC = 0x0000002D - CKK_SHA224_HMAC = 0x0000002E - CKK_SEED = 0x0000002F - CKK_GOSTR3410 = 0x00000030 - CKK_GOSTR3411 = 0x00000031 - CKK_GOST28147 = 0x00000032 - CKK_SHA3_224_HMAC = 0x00000033 - CKK_SHA3_256_HMAC = 0x00000034 - CKK_SHA3_384_HMAC = 0x00000035 - CKK_SHA3_512_HMAC = 0x00000036 - CKK_VENDOR_DEFINED = 0x80000000 + CK_TRUE = true + CK_FALSE = false + + // some special values for certain CK_ULONG variables + CK_UNAVAILABLE_INFORMATION = ^uint(0) + CK_EFFECTIVELY_INFINITE = 0 + + // The following value is always invalid if used as a session + // handle or object handle + CK_INVALID_HANDLE = 0 + + CKN_SURRENDER = 0 + CKN_OTP_CHANGED = 1 + + // flags: bit flags that provide capabilities of the slot + // + // Bit Flag Mask Meaning + CKF_TOKEN_PRESENT = 0x00000001 // a token is there + CKF_REMOVABLE_DEVICE = 0x00000002 // removable devices + CKF_HW_SLOT = 0x00000004 // hardware slot + + // The flags parameter is defined as follows: + // + // Bit Flag Mask Meaning + CKF_RNG = 0x00000001 // has random # generator + CKF_WRITE_PROTECTED = 0x00000002 // token is write-protected + CKF_LOGIN_REQUIRED = 0x00000004 // user must login + CKF_USER_PIN_INITIALIZED = 0x00000008 // normal user's PIN is set + + // CKF_RESTORE_KEY_NOT_NEEDED. If it is set, + // that means that *every* time the state of cryptographic + // operations of a session is successfully saved, all keys + // needed to continue those operations are stored in the state + CKF_RESTORE_KEY_NOT_NEEDED = 0x00000020 + + // CKF_CLOCK_ON_TOKEN. If it is set, that means + // that the token has some sort of clock. The time on that + // clock is returned in the token info structure + CKF_CLOCK_ON_TOKEN = 0x00000040 + + // CKF_PROTECTED_AUTHENTICATION_PATH. If it is + // set, that means that there is some way for the user to login + // without sending a PIN through the Cryptoki library itself + CKF_PROTECTED_AUTHENTICATION_PATH = 0x00000100 + + // CKF_DUAL_CRYPTO_OPERATIONS. If it is true, + // that means that a single session with the token can perform + // dual simultaneous cryptographic operations (digest and + // encrypt; decrypt and digest; sign and encrypt; and decrypt + // and sign) + CKF_DUAL_CRYPTO_OPERATIONS = 0x00000200 + + // CKF_TOKEN_INITIALIZED. If it is true, the + // token has been initialized using C_InitializeToken or an + // equivalent mechanism outside the scope of PKCS #11. + // Calling C_InitializeToken when this flag is set will cause + // the token to be reinitialized. + CKF_TOKEN_INITIALIZED = 0x00000400 + + // CKF_SECONDARY_AUTHENTICATION. If it is + // true, the token supports secondary authentication for + // private key objects. + CKF_SECONDARY_AUTHENTICATION = 0x00000800 + + // CKF_USER_PIN_COUNT_LOW. If it is true, an + // incorrect user login PIN has been entered at least once + // since the last successful authentication. + CKF_USER_PIN_COUNT_LOW = 0x00010000 + + // CKF_USER_PIN_FINAL_TRY. If it is true, + // supplying an incorrect user PIN will it to become locked. + CKF_USER_PIN_FINAL_TRY = 0x00020000 + + // CKF_USER_PIN_LOCKED. If it is true, the + // user PIN has been locked. User login to the token is not + // possible. + CKF_USER_PIN_LOCKED = 0x00040000 + + // CKF_USER_PIN_TO_BE_CHANGED. If it is true, + // the user PIN value is the default value set by token + // initialization or manufacturing, or the PIN has been + // expired by the card. + CKF_USER_PIN_TO_BE_CHANGED = 0x00080000 + + // CKF_SO_PIN_COUNT_LOW. If it is true, an + // incorrect SO login PIN has been entered at least once since + // the last successful authentication. + CKF_SO_PIN_COUNT_LOW = 0x00100000 + + // CKF_SO_PIN_FINAL_TRY. If it is true, + // supplying an incorrect SO PIN will it to become locked. + CKF_SO_PIN_FINAL_TRY = 0x00200000 + + // CKF_SO_PIN_LOCKED. If it is true, the SO + // PIN has been locked. SO login to the token is not possible. + CKF_SO_PIN_LOCKED = 0x00400000 + + // CKF_SO_PIN_TO_BE_CHANGED. If it is true, + // the SO PIN value is the default value set by token + // initialization or manufacturing, or the PIN has been + // expired by the card. + CKF_SO_PIN_TO_BE_CHANGED = 0x00800000 + CKF_ERROR_STATE = 0x01000000 + + // Security Officer + CKU_SO = 0 + + // Normal user + CKU_USER = 1 + + // Context specific + CKU_CONTEXT_SPECIFIC = 2 + + CKS_RO_PUBLIC_SESSION = 0 + CKS_RO_USER_FUNCTIONS = 1 + CKS_RW_PUBLIC_SESSION = 2 + CKS_RW_USER_FUNCTIONS = 3 + CKS_RW_SO_FUNCTIONS = 4 + + // The flags are defined in the following table: + // + // Bit Flag Mask Meaning + CKF_RW_SESSION = 0x00000002 // session is r/w + CKF_SERIAL_SESSION = 0x00000004 // no parallel + + // The following classes of objects are defined: + CKO_DATA = 0x00000000 + CKO_CERTIFICATE = 0x00000001 + CKO_PUBLIC_KEY = 0x00000002 + CKO_PRIVATE_KEY = 0x00000003 + CKO_SECRET_KEY = 0x00000004 + CKO_HW_FEATURE = 0x00000005 + CKO_DOMAIN_PARAMETERS = 0x00000006 + CKO_MECHANISM = 0x00000007 + CKO_OTP_KEY = 0x00000008 + CKO_VENDOR_DEFINED = 0x80000000 + + // The following hardware feature types are defined + CKH_MONOTONIC_COUNTER = 0x00000001 + CKH_CLOCK = 0x00000002 + CKH_USER_INTERFACE = 0x00000003 + CKH_VENDOR_DEFINED = 0x80000000 + + // the following key types are defined: + CKK_RSA = 0x00000000 + CKK_DSA = 0x00000001 + CKK_DH = 0x00000002 + CKK_ECDSA = 0x00000003 // Deprecated + CKK_EC = 0x00000003 + CKK_X9_42_DH = 0x00000004 + CKK_KEA = 0x00000005 + CKK_GENERIC_SECRET = 0x00000010 + CKK_RC2 = 0x00000011 + CKK_RC4 = 0x00000012 + CKK_DES = 0x00000013 + CKK_DES2 = 0x00000014 + CKK_DES3 = 0x00000015 + CKK_CAST = 0x00000016 + CKK_CAST3 = 0x00000017 + CKK_CAST5 = 0x00000018 // Deprecated + CKK_CAST128 = 0x00000018 + CKK_RC5 = 0x00000019 + CKK_IDEA = 0x0000001A + CKK_SKIPJACK = 0x0000001B + CKK_BATON = 0x0000001C + CKK_JUNIPER = 0x0000001D + CKK_CDMF = 0x0000001E + CKK_AES = 0x0000001F + CKK_BLOWFISH = 0x00000020 + CKK_TWOFISH = 0x00000021 + CKK_SECURID = 0x00000022 + CKK_HOTP = 0x00000023 + CKK_ACTI = 0x00000024 + CKK_CAMELLIA = 0x00000025 + CKK_ARIA = 0x00000026 + CKK_MD5_HMAC = 0x00000027 + CKK_SHA_1_HMAC = 0x00000028 + CKK_RIPEMD128_HMAC = 0x00000029 + CKK_RIPEMD160_HMAC = 0x0000002A + CKK_SHA256_HMAC = 0x0000002B + CKK_SHA384_HMAC = 0x0000002C + CKK_SHA512_HMAC = 0x0000002D + CKK_SHA224_HMAC = 0x0000002E + CKK_SEED = 0x0000002F + CKK_GOSTR3410 = 0x00000030 + CKK_GOSTR3411 = 0x00000031 + CKK_GOST28147 = 0x00000032 + CKK_SHA3_224_HMAC = 0x00000033 + CKK_SHA3_256_HMAC = 0x00000034 + CKK_SHA3_384_HMAC = 0x00000035 + CKK_SHA3_512_HMAC = 0x00000036 + CKK_VENDOR_DEFINED = 0x80000000 + CK_CERTIFICATE_CATEGORY_UNSPECIFIED = 0 CK_CERTIFICATE_CATEGORY_TOKEN_USER = 1 CK_CERTIFICATE_CATEGORY_AUTHORITY = 2 @@ -116,513 +208,539 @@ const ( CK_SECURITY_DOMAIN_MANUFACTURER = 1 CK_SECURITY_DOMAIN_OPERATOR = 2 CK_SECURITY_DOMAIN_THIRD_PARTY = 3 - CKC_X_509 = 0x00000000 - CKC_X_509_ATTR_CERT = 0x00000001 - CKC_WTLS = 0x00000002 - CKC_VENDOR_DEFINED = 0x80000000 - CKF_ARRAY_ATTRIBUTE = 0x40000000 - CK_OTP_FORMAT_DECIMAL = 0 - CK_OTP_FORMAT_HEXADECIMAL = 1 - CK_OTP_FORMAT_ALPHANUMERIC = 2 - CK_OTP_FORMAT_BINARY = 3 - CK_OTP_PARAM_IGNORED = 0 - CK_OTP_PARAM_OPTIONAL = 1 - CK_OTP_PARAM_MANDATORY = 2 - CKA_CLASS = 0x00000000 - CKA_TOKEN = 0x00000001 - CKA_PRIVATE = 0x00000002 - CKA_LABEL = 0x00000003 - CKA_APPLICATION = 0x00000010 - CKA_VALUE = 0x00000011 - CKA_OBJECT_ID = 0x00000012 - CKA_CERTIFICATE_TYPE = 0x00000080 - CKA_ISSUER = 0x00000081 - CKA_SERIAL_NUMBER = 0x00000082 - CKA_AC_ISSUER = 0x00000083 - CKA_OWNER = 0x00000084 - CKA_ATTR_TYPES = 0x00000085 - CKA_TRUSTED = 0x00000086 - CKA_CERTIFICATE_CATEGORY = 0x00000087 - CKA_JAVA_MIDP_SECURITY_DOMAIN = 0x00000088 - CKA_URL = 0x00000089 - CKA_HASH_OF_SUBJECT_PUBLIC_KEY = 0x0000008A - CKA_HASH_OF_ISSUER_PUBLIC_KEY = 0x0000008B - CKA_NAME_HASH_ALGORITHM = 0x0000008C - CKA_CHECK_VALUE = 0x00000090 - CKA_KEY_TYPE = 0x00000100 - CKA_SUBJECT = 0x00000101 - CKA_ID = 0x00000102 - CKA_SENSITIVE = 0x00000103 - CKA_ENCRYPT = 0x00000104 - CKA_DECRYPT = 0x00000105 - CKA_WRAP = 0x00000106 - CKA_UNWRAP = 0x00000107 - CKA_SIGN = 0x00000108 - CKA_SIGN_RECOVER = 0x00000109 - CKA_VERIFY = 0x0000010A - CKA_VERIFY_RECOVER = 0x0000010B - CKA_DERIVE = 0x0000010C - CKA_START_DATE = 0x00000110 - CKA_END_DATE = 0x00000111 - CKA_MODULUS = 0x00000120 - CKA_MODULUS_BITS = 0x00000121 - CKA_PUBLIC_EXPONENT = 0x00000122 - CKA_PRIVATE_EXPONENT = 0x00000123 - CKA_PRIME_1 = 0x00000124 - CKA_PRIME_2 = 0x00000125 - CKA_EXPONENT_1 = 0x00000126 - CKA_EXPONENT_2 = 0x00000127 - CKA_COEFFICIENT = 0x00000128 - CKA_PUBLIC_KEY_INFO = 0x00000129 - CKA_PRIME = 0x00000130 - CKA_SUBPRIME = 0x00000131 - CKA_BASE = 0x00000132 - CKA_PRIME_BITS = 0x00000133 - CKA_SUBPRIME_BITS = 0x00000134 - CKA_SUB_PRIME_BITS = CKA_SUBPRIME_BITS - CKA_VALUE_BITS = 0x00000160 - CKA_VALUE_LEN = 0x00000161 - CKA_EXTRACTABLE = 0x00000162 - CKA_LOCAL = 0x00000163 - CKA_NEVER_EXTRACTABLE = 0x00000164 - CKA_ALWAYS_SENSITIVE = 0x00000165 - CKA_KEY_GEN_MECHANISM = 0x00000166 - CKA_MODIFIABLE = 0x00000170 - CKA_COPYABLE = 0x00000171 - CKA_DESTROYABLE = 0x00000172 - CKA_ECDSA_PARAMS = 0x00000180 // Deprecated - CKA_EC_PARAMS = 0x00000180 - CKA_EC_POINT = 0x00000181 - CKA_SECONDARY_AUTH = 0x00000200 // Deprecated - CKA_AUTH_PIN_FLAGS = 0x00000201 // Deprecated - CKA_ALWAYS_AUTHENTICATE = 0x00000202 - CKA_WRAP_WITH_TRUSTED = 0x00000210 - CKA_WRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000211) - CKA_UNWRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000212) - CKA_DERIVE_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000213) - CKA_OTP_FORMAT = 0x00000220 - CKA_OTP_LENGTH = 0x00000221 - CKA_OTP_TIME_INTERVAL = 0x00000222 - CKA_OTP_USER_FRIENDLY_MODE = 0x00000223 - CKA_OTP_CHALLENGE_REQUIREMENT = 0x00000224 - CKA_OTP_TIME_REQUIREMENT = 0x00000225 - CKA_OTP_COUNTER_REQUIREMENT = 0x00000226 - CKA_OTP_PIN_REQUIREMENT = 0x00000227 - CKA_OTP_COUNTER = 0x0000022E - CKA_OTP_TIME = 0x0000022F - CKA_OTP_USER_IDENTIFIER = 0x0000022A - CKA_OTP_SERVICE_IDENTIFIER = 0x0000022B - CKA_OTP_SERVICE_LOGO = 0x0000022C - CKA_OTP_SERVICE_LOGO_TYPE = 0x0000022D - CKA_GOSTR3410_PARAMS = 0x00000250 - CKA_GOSTR3411_PARAMS = 0x00000251 - CKA_GOST28147_PARAMS = 0x00000252 - CKA_HW_FEATURE_TYPE = 0x00000300 - CKA_RESET_ON_INIT = 0x00000301 - CKA_HAS_RESET = 0x00000302 - CKA_PIXEL_X = 0x00000400 - CKA_PIXEL_Y = 0x00000401 - CKA_RESOLUTION = 0x00000402 - CKA_CHAR_ROWS = 0x00000403 - CKA_CHAR_COLUMNS = 0x00000404 - CKA_COLOR = 0x00000405 - CKA_BITS_PER_PIXEL = 0x00000406 - CKA_CHAR_SETS = 0x00000480 - CKA_ENCODING_METHODS = 0x00000481 - CKA_MIME_TYPES = 0x00000482 - CKA_MECHANISM_TYPE = 0x00000500 - CKA_REQUIRED_CMS_ATTRIBUTES = 0x00000501 - CKA_DEFAULT_CMS_ATTRIBUTES = 0x00000502 - CKA_SUPPORTED_CMS_ATTRIBUTES = 0x00000503 - CKA_ALLOWED_MECHANISMS = (CKF_ARRAY_ATTRIBUTE | 0x00000600) - CKA_VENDOR_DEFINED = 0x80000000 - CKM_RSA_PKCS_KEY_PAIR_GEN = 0x00000000 - CKM_RSA_PKCS = 0x00000001 - CKM_RSA_9796 = 0x00000002 - CKM_RSA_X_509 = 0x00000003 - CKM_MD2_RSA_PKCS = 0x00000004 - CKM_MD5_RSA_PKCS = 0x00000005 - CKM_SHA1_RSA_PKCS = 0x00000006 - CKM_RIPEMD128_RSA_PKCS = 0x00000007 - CKM_RIPEMD160_RSA_PKCS = 0x00000008 - CKM_RSA_PKCS_OAEP = 0x00000009 - CKM_RSA_X9_31_KEY_PAIR_GEN = 0x0000000A - CKM_RSA_X9_31 = 0x0000000B - CKM_SHA1_RSA_X9_31 = 0x0000000C - CKM_RSA_PKCS_PSS = 0x0000000D - CKM_SHA1_RSA_PKCS_PSS = 0x0000000E - CKM_DSA_KEY_PAIR_GEN = 0x00000010 - CKM_DSA = 0x00000011 - CKM_DSA_SHA1 = 0x00000012 - CKM_DSA_SHA224 = 0x00000013 - CKM_DSA_SHA256 = 0x00000014 - CKM_DSA_SHA384 = 0x00000015 - CKM_DSA_SHA512 = 0x00000016 - CKM_DSA_SHA3_224 = 0x00000018 - CKM_DSA_SHA3_256 = 0x00000019 - CKM_DSA_SHA3_384 = 0x0000001A - CKM_DSA_SHA3_512 = 0x0000001B - CKM_DH_PKCS_KEY_PAIR_GEN = 0x00000020 - CKM_DH_PKCS_DERIVE = 0x00000021 - CKM_X9_42_DH_KEY_PAIR_GEN = 0x00000030 - CKM_X9_42_DH_DERIVE = 0x00000031 - CKM_X9_42_DH_HYBRID_DERIVE = 0x00000032 - CKM_X9_42_MQV_DERIVE = 0x00000033 - CKM_SHA256_RSA_PKCS = 0x00000040 - CKM_SHA384_RSA_PKCS = 0x00000041 - CKM_SHA512_RSA_PKCS = 0x00000042 - CKM_SHA256_RSA_PKCS_PSS = 0x00000043 - CKM_SHA384_RSA_PKCS_PSS = 0x00000044 - CKM_SHA512_RSA_PKCS_PSS = 0x00000045 - CKM_SHA224_RSA_PKCS = 0x00000046 - CKM_SHA224_RSA_PKCS_PSS = 0x00000047 - CKM_SHA512_224 = 0x00000048 - CKM_SHA512_224_HMAC = 0x00000049 - CKM_SHA512_224_HMAC_GENERAL = 0x0000004A - CKM_SHA512_224_KEY_DERIVATION = 0x0000004B - CKM_SHA512_256 = 0x0000004C - CKM_SHA512_256_HMAC = 0x0000004D - CKM_SHA512_256_HMAC_GENERAL = 0x0000004E - CKM_SHA512_256_KEY_DERIVATION = 0x0000004F - CKM_SHA512_T = 0x00000050 - CKM_SHA512_T_HMAC = 0x00000051 - CKM_SHA512_T_HMAC_GENERAL = 0x00000052 - CKM_SHA512_T_KEY_DERIVATION = 0x00000053 - CKM_SHA3_256_RSA_PKCS = 0x00000060 - CKM_SHA3_384_RSA_PKCS = 0x00000061 - CKM_SHA3_512_RSA_PKCS = 0x00000062 - CKM_SHA3_256_RSA_PKCS_PSS = 0x00000063 - CKM_SHA3_384_RSA_PKCS_PSS = 0x00000064 - CKM_SHA3_512_RSA_PKCS_PSS = 0x00000065 - CKM_SHA3_224_RSA_PKCS = 0x00000066 - CKM_SHA3_224_RSA_PKCS_PSS = 0x00000067 - CKM_RC2_KEY_GEN = 0x00000100 - CKM_RC2_ECB = 0x00000101 - CKM_RC2_CBC = 0x00000102 - CKM_RC2_MAC = 0x00000103 - CKM_RC2_MAC_GENERAL = 0x00000104 - CKM_RC2_CBC_PAD = 0x00000105 - CKM_RC4_KEY_GEN = 0x00000110 - CKM_RC4 = 0x00000111 - CKM_DES_KEY_GEN = 0x00000120 - CKM_DES_ECB = 0x00000121 - CKM_DES_CBC = 0x00000122 - CKM_DES_MAC = 0x00000123 - CKM_DES_MAC_GENERAL = 0x00000124 - CKM_DES_CBC_PAD = 0x00000125 - CKM_DES2_KEY_GEN = 0x00000130 - CKM_DES3_KEY_GEN = 0x00000131 - CKM_DES3_ECB = 0x00000132 - CKM_DES3_CBC = 0x00000133 - CKM_DES3_MAC = 0x00000134 - CKM_DES3_MAC_GENERAL = 0x00000135 - CKM_DES3_CBC_PAD = 0x00000136 - CKM_DES3_CMAC_GENERAL = 0x00000137 - CKM_DES3_CMAC = 0x00000138 - CKM_CDMF_KEY_GEN = 0x00000140 - CKM_CDMF_ECB = 0x00000141 - CKM_CDMF_CBC = 0x00000142 - CKM_CDMF_MAC = 0x00000143 - CKM_CDMF_MAC_GENERAL = 0x00000144 - CKM_CDMF_CBC_PAD = 0x00000145 - CKM_DES_OFB64 = 0x00000150 - CKM_DES_OFB8 = 0x00000151 - CKM_DES_CFB64 = 0x00000152 - CKM_DES_CFB8 = 0x00000153 - CKM_MD2 = 0x00000200 - CKM_MD2_HMAC = 0x00000201 - CKM_MD2_HMAC_GENERAL = 0x00000202 - CKM_MD5 = 0x00000210 - CKM_MD5_HMAC = 0x00000211 - CKM_MD5_HMAC_GENERAL = 0x00000212 - CKM_SHA_1 = 0x00000220 - CKM_SHA_1_HMAC = 0x00000221 - CKM_SHA_1_HMAC_GENERAL = 0x00000222 - CKM_RIPEMD128 = 0x00000230 - CKM_RIPEMD128_HMAC = 0x00000231 - CKM_RIPEMD128_HMAC_GENERAL = 0x00000232 - CKM_RIPEMD160 = 0x00000240 - CKM_RIPEMD160_HMAC = 0x00000241 - CKM_RIPEMD160_HMAC_GENERAL = 0x00000242 - CKM_SHA256 = 0x00000250 - CKM_SHA256_HMAC = 0x00000251 - CKM_SHA256_HMAC_GENERAL = 0x00000252 - CKM_SHA224 = 0x00000255 - CKM_SHA224_HMAC = 0x00000256 - CKM_SHA224_HMAC_GENERAL = 0x00000257 - CKM_SHA384 = 0x00000260 - CKM_SHA384_HMAC = 0x00000261 - CKM_SHA384_HMAC_GENERAL = 0x00000262 - CKM_SHA512 = 0x00000270 - CKM_SHA512_HMAC = 0x00000271 - CKM_SHA512_HMAC_GENERAL = 0x00000272 - CKM_SECURID_KEY_GEN = 0x00000280 - CKM_SECURID = 0x00000282 - CKM_HOTP_KEY_GEN = 0x00000290 - CKM_HOTP = 0x00000291 - CKM_ACTI = 0x000002A0 - CKM_ACTI_KEY_GEN = 0x000002A1 - CKM_SHA3_256 = 0x000002B0 - CKM_SHA3_256_HMAC = 0x000002B1 - CKM_SHA3_256_HMAC_GENERAL = 0x000002B2 - CKM_SHA3_256_KEY_GEN = 0x000002B3 - CKM_SHA3_224 = 0x000002B5 - CKM_SHA3_224_HMAC = 0x000002B6 - CKM_SHA3_224_HMAC_GENERAL = 0x000002B7 - CKM_SHA3_224_KEY_GEN = 0x000002B8 - CKM_SHA3_384 = 0x000002C0 - CKM_SHA3_384_HMAC = 0x000002C1 - CKM_SHA3_384_HMAC_GENERAL = 0x000002C2 - CKM_SHA3_384_KEY_GEN = 0x000002C3 - CKM_SHA3_512 = 0x000002D0 - CKM_SHA3_512_HMAC = 0x000002D1 - CKM_SHA3_512_HMAC_GENERAL = 0x000002D2 - CKM_SHA3_512_KEY_GEN = 0x000002D3 - CKM_CAST_KEY_GEN = 0x00000300 - CKM_CAST_ECB = 0x00000301 - CKM_CAST_CBC = 0x00000302 - CKM_CAST_MAC = 0x00000303 - CKM_CAST_MAC_GENERAL = 0x00000304 - CKM_CAST_CBC_PAD = 0x00000305 - CKM_CAST3_KEY_GEN = 0x00000310 - CKM_CAST3_ECB = 0x00000311 - CKM_CAST3_CBC = 0x00000312 - CKM_CAST3_MAC = 0x00000313 - CKM_CAST3_MAC_GENERAL = 0x00000314 - CKM_CAST3_CBC_PAD = 0x00000315 - CKM_CAST5_KEY_GEN = 0x00000320 - CKM_CAST128_KEY_GEN = 0x00000320 - CKM_CAST5_ECB = 0x00000321 - CKM_CAST128_ECB = 0x00000321 - CKM_CAST5_CBC = 0x00000322 // Deprecated - CKM_CAST128_CBC = 0x00000322 - CKM_CAST5_MAC = 0x00000323 // Deprecated - CKM_CAST128_MAC = 0x00000323 - CKM_CAST5_MAC_GENERAL = 0x00000324 // Deprecated - CKM_CAST128_MAC_GENERAL = 0x00000324 - CKM_CAST5_CBC_PAD = 0x00000325 // Deprecated - CKM_CAST128_CBC_PAD = 0x00000325 - CKM_RC5_KEY_GEN = 0x00000330 - CKM_RC5_ECB = 0x00000331 - CKM_RC5_CBC = 0x00000332 - CKM_RC5_MAC = 0x00000333 - CKM_RC5_MAC_GENERAL = 0x00000334 - CKM_RC5_CBC_PAD = 0x00000335 - CKM_IDEA_KEY_GEN = 0x00000340 - CKM_IDEA_ECB = 0x00000341 - CKM_IDEA_CBC = 0x00000342 - CKM_IDEA_MAC = 0x00000343 - CKM_IDEA_MAC_GENERAL = 0x00000344 - CKM_IDEA_CBC_PAD = 0x00000345 - CKM_GENERIC_SECRET_KEY_GEN = 0x00000350 - CKM_CONCATENATE_BASE_AND_KEY = 0x00000360 - CKM_CONCATENATE_BASE_AND_DATA = 0x00000362 - CKM_CONCATENATE_DATA_AND_BASE = 0x00000363 - CKM_XOR_BASE_AND_DATA = 0x00000364 - CKM_EXTRACT_KEY_FROM_KEY = 0x00000365 - CKM_SSL3_PRE_MASTER_KEY_GEN = 0x00000370 - CKM_SSL3_MASTER_KEY_DERIVE = 0x00000371 - CKM_SSL3_KEY_AND_MAC_DERIVE = 0x00000372 - CKM_SSL3_MASTER_KEY_DERIVE_DH = 0x00000373 - CKM_TLS_PRE_MASTER_KEY_GEN = 0x00000374 - CKM_TLS_MASTER_KEY_DERIVE = 0x00000375 - CKM_TLS_KEY_AND_MAC_DERIVE = 0x00000376 - CKM_TLS_MASTER_KEY_DERIVE_DH = 0x00000377 - CKM_TLS_PRF = 0x00000378 - CKM_SSL3_MD5_MAC = 0x00000380 - CKM_SSL3_SHA1_MAC = 0x00000381 - CKM_MD5_KEY_DERIVATION = 0x00000390 - CKM_MD2_KEY_DERIVATION = 0x00000391 - CKM_SHA1_KEY_DERIVATION = 0x00000392 - CKM_SHA256_KEY_DERIVATION = 0x00000393 - CKM_SHA384_KEY_DERIVATION = 0x00000394 - CKM_SHA512_KEY_DERIVATION = 0x00000395 - CKM_SHA224_KEY_DERIVATION = 0x00000396 - CKM_SHA3_256_KEY_DERIVE = 0x00000397 - CKM_SHA3_224_KEY_DERIVE = 0x00000398 - CKM_SHA3_384_KEY_DERIVE = 0x00000399 - CKM_SHA3_512_KEY_DERIVE = 0x0000039A - CKM_SHAKE_128_KEY_DERIVE = 0x0000039B - CKM_SHAKE_256_KEY_DERIVE = 0x0000039C - CKM_PBE_MD2_DES_CBC = 0x000003A0 - CKM_PBE_MD5_DES_CBC = 0x000003A1 - CKM_PBE_MD5_CAST_CBC = 0x000003A2 - CKM_PBE_MD5_CAST3_CBC = 0x000003A3 - CKM_PBE_MD5_CAST5_CBC = 0x000003A4 // Deprecated - CKM_PBE_MD5_CAST128_CBC = 0x000003A4 - CKM_PBE_SHA1_CAST5_CBC = 0x000003A5 // Deprecated - CKM_PBE_SHA1_CAST128_CBC = 0x000003A5 - CKM_PBE_SHA1_RC4_128 = 0x000003A6 - CKM_PBE_SHA1_RC4_40 = 0x000003A7 - CKM_PBE_SHA1_DES3_EDE_CBC = 0x000003A8 - CKM_PBE_SHA1_DES2_EDE_CBC = 0x000003A9 - CKM_PBE_SHA1_RC2_128_CBC = 0x000003AA - CKM_PBE_SHA1_RC2_40_CBC = 0x000003AB - CKM_PKCS5_PBKD2 = 0x000003B0 - CKM_PBA_SHA1_WITH_SHA1_HMAC = 0x000003C0 - CKM_WTLS_PRE_MASTER_KEY_GEN = 0x000003D0 - CKM_WTLS_MASTER_KEY_DERIVE = 0x000003D1 - CKM_WTLS_MASTER_KEY_DERIVE_DH_ECC = 0x000003D2 - CKM_WTLS_PRF = 0x000003D3 - CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE = 0x000003D4 - CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE = 0x000003D5 - CKM_TLS10_MAC_SERVER = 0x000003D6 - CKM_TLS10_MAC_CLIENT = 0x000003D7 - CKM_TLS12_MAC = 0x000003D8 - CKM_TLS12_KDF = 0x000003D9 - CKM_TLS12_MASTER_KEY_DERIVE = 0x000003E0 - CKM_TLS12_KEY_AND_MAC_DERIVE = 0x000003E1 - CKM_TLS12_MASTER_KEY_DERIVE_DH = 0x000003E2 - CKM_TLS12_KEY_SAFE_DERIVE = 0x000003E3 - CKM_TLS_MAC = 0x000003E4 - CKM_TLS_KDF = 0x000003E5 - CKM_KEY_WRAP_LYNKS = 0x00000400 - CKM_KEY_WRAP_SET_OAEP = 0x00000401 - CKM_CMS_SIG = 0x00000500 - CKM_KIP_DERIVE = 0x00000510 - CKM_KIP_WRAP = 0x00000511 - CKM_KIP_MAC = 0x00000512 - CKM_CAMELLIA_KEY_GEN = 0x00000550 - CKM_CAMELLIA_ECB = 0x00000551 - CKM_CAMELLIA_CBC = 0x00000552 - CKM_CAMELLIA_MAC = 0x00000553 - CKM_CAMELLIA_MAC_GENERAL = 0x00000554 - CKM_CAMELLIA_CBC_PAD = 0x00000555 - CKM_CAMELLIA_ECB_ENCRYPT_DATA = 0x00000556 - CKM_CAMELLIA_CBC_ENCRYPT_DATA = 0x00000557 - CKM_CAMELLIA_CTR = 0x00000558 - CKM_ARIA_KEY_GEN = 0x00000560 - CKM_ARIA_ECB = 0x00000561 - CKM_ARIA_CBC = 0x00000562 - CKM_ARIA_MAC = 0x00000563 - CKM_ARIA_MAC_GENERAL = 0x00000564 - CKM_ARIA_CBC_PAD = 0x00000565 - CKM_ARIA_ECB_ENCRYPT_DATA = 0x00000566 - CKM_ARIA_CBC_ENCRYPT_DATA = 0x00000567 - CKM_SEED_KEY_GEN = 0x00000650 - CKM_SEED_ECB = 0x00000651 - CKM_SEED_CBC = 0x00000652 - CKM_SEED_MAC = 0x00000653 - CKM_SEED_MAC_GENERAL = 0x00000654 - CKM_SEED_CBC_PAD = 0x00000655 - CKM_SEED_ECB_ENCRYPT_DATA = 0x00000656 - CKM_SEED_CBC_ENCRYPT_DATA = 0x00000657 - CKM_SKIPJACK_KEY_GEN = 0x00001000 - CKM_SKIPJACK_ECB64 = 0x00001001 - CKM_SKIPJACK_CBC64 = 0x00001002 - CKM_SKIPJACK_OFB64 = 0x00001003 - CKM_SKIPJACK_CFB64 = 0x00001004 - CKM_SKIPJACK_CFB32 = 0x00001005 - CKM_SKIPJACK_CFB16 = 0x00001006 - CKM_SKIPJACK_CFB8 = 0x00001007 - CKM_SKIPJACK_WRAP = 0x00001008 - CKM_SKIPJACK_PRIVATE_WRAP = 0x00001009 - CKM_SKIPJACK_RELAYX = 0x0000100a - CKM_KEA_KEY_PAIR_GEN = 0x00001010 - CKM_KEA_KEY_DERIVE = 0x00001011 - CKM_KEA_DERIVE = 0x00001012 - CKM_FORTEZZA_TIMESTAMP = 0x00001020 - CKM_BATON_KEY_GEN = 0x00001030 - CKM_BATON_ECB128 = 0x00001031 - CKM_BATON_ECB96 = 0x00001032 - CKM_BATON_CBC128 = 0x00001033 - CKM_BATON_COUNTER = 0x00001034 - CKM_BATON_SHUFFLE = 0x00001035 - CKM_BATON_WRAP = 0x00001036 - CKM_ECDSA_KEY_PAIR_GEN = 0x00001040 // Deprecated - CKM_EC_KEY_PAIR_GEN = 0x00001040 - CKM_ECDSA = 0x00001041 - CKM_ECDSA_SHA1 = 0x00001042 - CKM_ECDSA_SHA224 = 0x00001043 - CKM_ECDSA_SHA256 = 0x00001044 - CKM_ECDSA_SHA384 = 0x00001045 - CKM_ECDSA_SHA512 = 0x00001046 - CKM_ECDH1_DERIVE = 0x00001050 - CKM_ECDH1_COFACTOR_DERIVE = 0x00001051 - CKM_ECMQV_DERIVE = 0x00001052 - CKM_ECDH_AES_KEY_WRAP = 0x00001053 - CKM_RSA_AES_KEY_WRAP = 0x00001054 - CKM_JUNIPER_KEY_GEN = 0x00001060 - CKM_JUNIPER_ECB128 = 0x00001061 - CKM_JUNIPER_CBC128 = 0x00001062 - CKM_JUNIPER_COUNTER = 0x00001063 - CKM_JUNIPER_SHUFFLE = 0x00001064 - CKM_JUNIPER_WRAP = 0x00001065 - CKM_FASTHASH = 0x00001070 - CKM_AES_KEY_GEN = 0x00001080 - CKM_AES_ECB = 0x00001081 - CKM_AES_CBC = 0x00001082 - CKM_AES_MAC = 0x00001083 - CKM_AES_MAC_GENERAL = 0x00001084 - CKM_AES_CBC_PAD = 0x00001085 - CKM_AES_CTR = 0x00001086 - CKM_AES_GCM = 0x00001087 - CKM_AES_CCM = 0x00001088 - CKM_AES_CTS = 0x00001089 - CKM_AES_CMAC = 0x0000108A - CKM_AES_CMAC_GENERAL = 0x0000108B - CKM_AES_XCBC_MAC = 0x0000108C - CKM_AES_XCBC_MAC_96 = 0x0000108D - CKM_AES_GMAC = 0x0000108E - CKM_BLOWFISH_KEY_GEN = 0x00001090 - CKM_BLOWFISH_CBC = 0x00001091 - CKM_TWOFISH_KEY_GEN = 0x00001092 - CKM_TWOFISH_CBC = 0x00001093 - CKM_BLOWFISH_CBC_PAD = 0x00001094 - CKM_TWOFISH_CBC_PAD = 0x00001095 - CKM_DES_ECB_ENCRYPT_DATA = 0x00001100 - CKM_DES_CBC_ENCRYPT_DATA = 0x00001101 - CKM_DES3_ECB_ENCRYPT_DATA = 0x00001102 - CKM_DES3_CBC_ENCRYPT_DATA = 0x00001103 - CKM_AES_ECB_ENCRYPT_DATA = 0x00001104 - CKM_AES_CBC_ENCRYPT_DATA = 0x00001105 - CKM_GOSTR3410_KEY_PAIR_GEN = 0x00001200 - CKM_GOSTR3410 = 0x00001201 - CKM_GOSTR3410_WITH_GOSTR3411 = 0x00001202 - CKM_GOSTR3410_KEY_WRAP = 0x00001203 - CKM_GOSTR3410_DERIVE = 0x00001204 - CKM_GOSTR3411 = 0x00001210 - CKM_GOSTR3411_HMAC = 0x00001211 - CKM_GOST28147_KEY_GEN = 0x00001220 - CKM_GOST28147_ECB = 0x00001221 - CKM_GOST28147 = 0x00001222 - CKM_GOST28147_MAC = 0x00001223 - CKM_GOST28147_KEY_WRAP = 0x00001224 - CKM_DSA_PARAMETER_GEN = 0x00002000 - CKM_DH_PKCS_PARAMETER_GEN = 0x00002001 - CKM_X9_42_DH_PARAMETER_GEN = 0x00002002 - CKM_DSA_PROBABLISTIC_PARAMETER_GEN = 0x00002003 - CKM_DSA_SHAWE_TAYLOR_PARAMETER_GEN = 0x00002004 - CKM_AES_OFB = 0x00002104 - CKM_AES_CFB64 = 0x00002105 - CKM_AES_CFB8 = 0x00002106 - CKM_AES_CFB128 = 0x00002107 - CKM_AES_CFB1 = 0x00002108 - CKM_AES_KEY_WRAP = 0x00002109 - CKM_AES_KEY_WRAP_PAD = 0x0000210A - CKM_RSA_PKCS_TPM_1_1 = 0x00004001 - CKM_RSA_PKCS_OAEP_TPM_1_1 = 0x00004002 - CKM_VENDOR_DEFINED = 0x80000000 - CKF_HW = 0x00000001 - CKF_ENCRYPT = 0x00000100 - CKF_DECRYPT = 0x00000200 - CKF_DIGEST = 0x00000400 - CKF_SIGN = 0x00000800 - CKF_SIGN_RECOVER = 0x00001000 - CKF_VERIFY = 0x00002000 - CKF_VERIFY_RECOVER = 0x00004000 - CKF_GENERATE = 0x00008000 - CKF_GENERATE_KEY_PAIR = 0x00010000 - CKF_WRAP = 0x00020000 - CKF_UNWRAP = 0x00040000 - CKF_DERIVE = 0x00080000 - CKF_EC_F_P = 0x00100000 - CKF_EC_F_2M = 0x00200000 - CKF_EC_ECPARAMETERS = 0x00400000 - CKF_EC_NAMEDCURVE = 0x00800000 - CKF_EC_UNCOMPRESS = 0x01000000 - CKF_EC_COMPRESS = 0x02000000 - CKF_EXTENSION = 0x80000000 + + // The following certificate types are defined: + CKC_X_509 = 0x00000000 + CKC_X_509_ATTR_CERT = 0x00000001 + CKC_WTLS = 0x00000002 + CKC_VENDOR_DEFINED = 0x80000000 + + // The CKF_ARRAY_ATTRIBUTE flag identifies an attribute which + // consists of an array of values. + CKF_ARRAY_ATTRIBUTE = 0x40000000 + + // The following OTP-related defines relate to the CKA_OTP_FORMAT attribute + CK_OTP_FORMAT_DECIMAL = 0 + CK_OTP_FORMAT_HEXADECIMAL = 1 + CK_OTP_FORMAT_ALPHANUMERIC = 2 + CK_OTP_FORMAT_BINARY = 3 + + // The following OTP-related defines relate to the CKA_OTP_..._REQUIREMENT + // attributes + CK_OTP_PARAM_IGNORED = 0 + CK_OTP_PARAM_OPTIONAL = 1 + CK_OTP_PARAM_MANDATORY = 2 + + // The following attribute types are defined: + CKA_CLASS = 0x00000000 + CKA_TOKEN = 0x00000001 + CKA_PRIVATE = 0x00000002 + CKA_LABEL = 0x00000003 + CKA_APPLICATION = 0x00000010 + CKA_VALUE = 0x00000011 + CKA_OBJECT_ID = 0x00000012 + CKA_CERTIFICATE_TYPE = 0x00000080 + CKA_ISSUER = 0x00000081 + CKA_SERIAL_NUMBER = 0x00000082 + CKA_AC_ISSUER = 0x00000083 + CKA_OWNER = 0x00000084 + CKA_ATTR_TYPES = 0x00000085 + CKA_TRUSTED = 0x00000086 + CKA_CERTIFICATE_CATEGORY = 0x00000087 + CKA_JAVA_MIDP_SECURITY_DOMAIN = 0x00000088 + CKA_URL = 0x00000089 + CKA_HASH_OF_SUBJECT_PUBLIC_KEY = 0x0000008A + CKA_HASH_OF_ISSUER_PUBLIC_KEY = 0x0000008B + CKA_NAME_HASH_ALGORITHM = 0x0000008C + CKA_CHECK_VALUE = 0x00000090 + CKA_KEY_TYPE = 0x00000100 + CKA_SUBJECT = 0x00000101 + CKA_ID = 0x00000102 + CKA_SENSITIVE = 0x00000103 + CKA_ENCRYPT = 0x00000104 + CKA_DECRYPT = 0x00000105 + CKA_WRAP = 0x00000106 + CKA_UNWRAP = 0x00000107 + CKA_SIGN = 0x00000108 + CKA_SIGN_RECOVER = 0x00000109 + CKA_VERIFY = 0x0000010A + CKA_VERIFY_RECOVER = 0x0000010B + CKA_DERIVE = 0x0000010C + CKA_START_DATE = 0x00000110 + CKA_END_DATE = 0x00000111 + CKA_MODULUS = 0x00000120 + CKA_MODULUS_BITS = 0x00000121 + CKA_PUBLIC_EXPONENT = 0x00000122 + CKA_PRIVATE_EXPONENT = 0x00000123 + CKA_PRIME_1 = 0x00000124 + CKA_PRIME_2 = 0x00000125 + CKA_EXPONENT_1 = 0x00000126 + CKA_EXPONENT_2 = 0x00000127 + CKA_COEFFICIENT = 0x00000128 + CKA_PUBLIC_KEY_INFO = 0x00000129 + CKA_PRIME = 0x00000130 + CKA_SUBPRIME = 0x00000131 + CKA_BASE = 0x00000132 + CKA_PRIME_BITS = 0x00000133 + CKA_SUBPRIME_BITS = 0x00000134 + CKA_SUB_PRIME_BITS = CKA_SUBPRIME_BITS + CKA_VALUE_BITS = 0x00000160 + CKA_VALUE_LEN = 0x00000161 + CKA_EXTRACTABLE = 0x00000162 + CKA_LOCAL = 0x00000163 + CKA_NEVER_EXTRACTABLE = 0x00000164 + CKA_ALWAYS_SENSITIVE = 0x00000165 + CKA_KEY_GEN_MECHANISM = 0x00000166 + CKA_MODIFIABLE = 0x00000170 + CKA_COPYABLE = 0x00000171 + CKA_DESTROYABLE = 0x00000172 + CKA_ECDSA_PARAMS = 0x00000180 // Deprecated + CKA_EC_PARAMS = 0x00000180 + CKA_EC_POINT = 0x00000181 + CKA_SECONDARY_AUTH = 0x00000200 // Deprecated + CKA_AUTH_PIN_FLAGS = 0x00000201 // Deprecated + CKA_ALWAYS_AUTHENTICATE = 0x00000202 + CKA_WRAP_WITH_TRUSTED = 0x00000210 + CKA_WRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000211) + CKA_UNWRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000212) + CKA_DERIVE_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000213) + CKA_OTP_FORMAT = 0x00000220 + CKA_OTP_LENGTH = 0x00000221 + CKA_OTP_TIME_INTERVAL = 0x00000222 + CKA_OTP_USER_FRIENDLY_MODE = 0x00000223 + CKA_OTP_CHALLENGE_REQUIREMENT = 0x00000224 + CKA_OTP_TIME_REQUIREMENT = 0x00000225 + CKA_OTP_COUNTER_REQUIREMENT = 0x00000226 + CKA_OTP_PIN_REQUIREMENT = 0x00000227 + CKA_OTP_COUNTER = 0x0000022E + CKA_OTP_TIME = 0x0000022F + CKA_OTP_USER_IDENTIFIER = 0x0000022A + CKA_OTP_SERVICE_IDENTIFIER = 0x0000022B + CKA_OTP_SERVICE_LOGO = 0x0000022C + CKA_OTP_SERVICE_LOGO_TYPE = 0x0000022D + CKA_GOSTR3410_PARAMS = 0x00000250 + CKA_GOSTR3411_PARAMS = 0x00000251 + CKA_GOST28147_PARAMS = 0x00000252 + CKA_HW_FEATURE_TYPE = 0x00000300 + CKA_RESET_ON_INIT = 0x00000301 + CKA_HAS_RESET = 0x00000302 + CKA_PIXEL_X = 0x00000400 + CKA_PIXEL_Y = 0x00000401 + CKA_RESOLUTION = 0x00000402 + CKA_CHAR_ROWS = 0x00000403 + CKA_CHAR_COLUMNS = 0x00000404 + CKA_COLOR = 0x00000405 + CKA_BITS_PER_PIXEL = 0x00000406 + CKA_CHAR_SETS = 0x00000480 + CKA_ENCODING_METHODS = 0x00000481 + CKA_MIME_TYPES = 0x00000482 + CKA_MECHANISM_TYPE = 0x00000500 + CKA_REQUIRED_CMS_ATTRIBUTES = 0x00000501 + CKA_DEFAULT_CMS_ATTRIBUTES = 0x00000502 + CKA_SUPPORTED_CMS_ATTRIBUTES = 0x00000503 + CKA_ALLOWED_MECHANISMS = (CKF_ARRAY_ATTRIBUTE | 0x00000600) + CKA_VENDOR_DEFINED = 0x80000000 + + // the following mechanism types are defined: + CKM_RSA_PKCS_KEY_PAIR_GEN = 0x00000000 + CKM_RSA_PKCS = 0x00000001 + CKM_RSA_9796 = 0x00000002 + CKM_RSA_X_509 = 0x00000003 + CKM_MD2_RSA_PKCS = 0x00000004 + CKM_MD5_RSA_PKCS = 0x00000005 + CKM_SHA1_RSA_PKCS = 0x00000006 + CKM_RIPEMD128_RSA_PKCS = 0x00000007 + CKM_RIPEMD160_RSA_PKCS = 0x00000008 + CKM_RSA_PKCS_OAEP = 0x00000009 + CKM_RSA_X9_31_KEY_PAIR_GEN = 0x0000000A + CKM_RSA_X9_31 = 0x0000000B + CKM_SHA1_RSA_X9_31 = 0x0000000C + CKM_RSA_PKCS_PSS = 0x0000000D + CKM_SHA1_RSA_PKCS_PSS = 0x0000000E + CKM_DSA_KEY_PAIR_GEN = 0x00000010 + CKM_DSA = 0x00000011 + CKM_DSA_SHA1 = 0x00000012 + CKM_DSA_SHA224 = 0x00000013 + CKM_DSA_SHA256 = 0x00000014 + CKM_DSA_SHA384 = 0x00000015 + CKM_DSA_SHA512 = 0x00000016 + CKM_DSA_SHA3_224 = 0x00000018 + CKM_DSA_SHA3_256 = 0x00000019 + CKM_DSA_SHA3_384 = 0x0000001A + CKM_DSA_SHA3_512 = 0x0000001B + CKM_DH_PKCS_KEY_PAIR_GEN = 0x00000020 + CKM_DH_PKCS_DERIVE = 0x00000021 + CKM_X9_42_DH_KEY_PAIR_GEN = 0x00000030 + CKM_X9_42_DH_DERIVE = 0x00000031 + CKM_X9_42_DH_HYBRID_DERIVE = 0x00000032 + CKM_X9_42_MQV_DERIVE = 0x00000033 + CKM_SHA256_RSA_PKCS = 0x00000040 + CKM_SHA384_RSA_PKCS = 0x00000041 + CKM_SHA512_RSA_PKCS = 0x00000042 + CKM_SHA256_RSA_PKCS_PSS = 0x00000043 + CKM_SHA384_RSA_PKCS_PSS = 0x00000044 + CKM_SHA512_RSA_PKCS_PSS = 0x00000045 + CKM_SHA224_RSA_PKCS = 0x00000046 + CKM_SHA224_RSA_PKCS_PSS = 0x00000047 + CKM_SHA512_224 = 0x00000048 + CKM_SHA512_224_HMAC = 0x00000049 + CKM_SHA512_224_HMAC_GENERAL = 0x0000004A + CKM_SHA512_224_KEY_DERIVATION = 0x0000004B + CKM_SHA512_256 = 0x0000004C + CKM_SHA512_256_HMAC = 0x0000004D + CKM_SHA512_256_HMAC_GENERAL = 0x0000004E + CKM_SHA512_256_KEY_DERIVATION = 0x0000004F + CKM_SHA512_T = 0x00000050 + CKM_SHA512_T_HMAC = 0x00000051 + CKM_SHA512_T_HMAC_GENERAL = 0x00000052 + CKM_SHA512_T_KEY_DERIVATION = 0x00000053 + CKM_SHA3_256_RSA_PKCS = 0x00000060 + CKM_SHA3_384_RSA_PKCS = 0x00000061 + CKM_SHA3_512_RSA_PKCS = 0x00000062 + CKM_SHA3_256_RSA_PKCS_PSS = 0x00000063 + CKM_SHA3_384_RSA_PKCS_PSS = 0x00000064 + CKM_SHA3_512_RSA_PKCS_PSS = 0x00000065 + CKM_SHA3_224_RSA_PKCS = 0x00000066 + CKM_SHA3_224_RSA_PKCS_PSS = 0x00000067 + CKM_RC2_KEY_GEN = 0x00000100 + CKM_RC2_ECB = 0x00000101 + CKM_RC2_CBC = 0x00000102 + CKM_RC2_MAC = 0x00000103 + CKM_RC2_MAC_GENERAL = 0x00000104 + CKM_RC2_CBC_PAD = 0x00000105 + CKM_RC4_KEY_GEN = 0x00000110 + CKM_RC4 = 0x00000111 + CKM_DES_KEY_GEN = 0x00000120 + CKM_DES_ECB = 0x00000121 + CKM_DES_CBC = 0x00000122 + CKM_DES_MAC = 0x00000123 + CKM_DES_MAC_GENERAL = 0x00000124 + CKM_DES_CBC_PAD = 0x00000125 + CKM_DES2_KEY_GEN = 0x00000130 + CKM_DES3_KEY_GEN = 0x00000131 + CKM_DES3_ECB = 0x00000132 + CKM_DES3_CBC = 0x00000133 + CKM_DES3_MAC = 0x00000134 + CKM_DES3_MAC_GENERAL = 0x00000135 + CKM_DES3_CBC_PAD = 0x00000136 + CKM_DES3_CMAC_GENERAL = 0x00000137 + CKM_DES3_CMAC = 0x00000138 + CKM_CDMF_KEY_GEN = 0x00000140 + CKM_CDMF_ECB = 0x00000141 + CKM_CDMF_CBC = 0x00000142 + CKM_CDMF_MAC = 0x00000143 + CKM_CDMF_MAC_GENERAL = 0x00000144 + CKM_CDMF_CBC_PAD = 0x00000145 + CKM_DES_OFB64 = 0x00000150 + CKM_DES_OFB8 = 0x00000151 + CKM_DES_CFB64 = 0x00000152 + CKM_DES_CFB8 = 0x00000153 + CKM_MD2 = 0x00000200 + CKM_MD2_HMAC = 0x00000201 + CKM_MD2_HMAC_GENERAL = 0x00000202 + CKM_MD5 = 0x00000210 + CKM_MD5_HMAC = 0x00000211 + CKM_MD5_HMAC_GENERAL = 0x00000212 + CKM_SHA_1 = 0x00000220 + CKM_SHA_1_HMAC = 0x00000221 + CKM_SHA_1_HMAC_GENERAL = 0x00000222 + CKM_RIPEMD128 = 0x00000230 + CKM_RIPEMD128_HMAC = 0x00000231 + CKM_RIPEMD128_HMAC_GENERAL = 0x00000232 + CKM_RIPEMD160 = 0x00000240 + CKM_RIPEMD160_HMAC = 0x00000241 + CKM_RIPEMD160_HMAC_GENERAL = 0x00000242 + CKM_SHA256 = 0x00000250 + CKM_SHA256_HMAC = 0x00000251 + CKM_SHA256_HMAC_GENERAL = 0x00000252 + CKM_SHA224 = 0x00000255 + CKM_SHA224_HMAC = 0x00000256 + CKM_SHA224_HMAC_GENERAL = 0x00000257 + CKM_SHA384 = 0x00000260 + CKM_SHA384_HMAC = 0x00000261 + CKM_SHA384_HMAC_GENERAL = 0x00000262 + CKM_SHA512 = 0x00000270 + CKM_SHA512_HMAC = 0x00000271 + CKM_SHA512_HMAC_GENERAL = 0x00000272 + CKM_SECURID_KEY_GEN = 0x00000280 + CKM_SECURID = 0x00000282 + CKM_HOTP_KEY_GEN = 0x00000290 + CKM_HOTP = 0x00000291 + CKM_ACTI = 0x000002A0 + CKM_ACTI_KEY_GEN = 0x000002A1 + CKM_SHA3_256 = 0x000002B0 + CKM_SHA3_256_HMAC = 0x000002B1 + CKM_SHA3_256_HMAC_GENERAL = 0x000002B2 + CKM_SHA3_256_KEY_GEN = 0x000002B3 + CKM_SHA3_224 = 0x000002B5 + CKM_SHA3_224_HMAC = 0x000002B6 + CKM_SHA3_224_HMAC_GENERAL = 0x000002B7 + CKM_SHA3_224_KEY_GEN = 0x000002B8 + CKM_SHA3_384 = 0x000002C0 + CKM_SHA3_384_HMAC = 0x000002C1 + CKM_SHA3_384_HMAC_GENERAL = 0x000002C2 + CKM_SHA3_384_KEY_GEN = 0x000002C3 + CKM_SHA3_512 = 0x000002D0 + CKM_SHA3_512_HMAC = 0x000002D1 + CKM_SHA3_512_HMAC_GENERAL = 0x000002D2 + CKM_SHA3_512_KEY_GEN = 0x000002D3 + CKM_CAST_KEY_GEN = 0x00000300 + CKM_CAST_ECB = 0x00000301 + CKM_CAST_CBC = 0x00000302 + CKM_CAST_MAC = 0x00000303 + CKM_CAST_MAC_GENERAL = 0x00000304 + CKM_CAST_CBC_PAD = 0x00000305 + CKM_CAST3_KEY_GEN = 0x00000310 + CKM_CAST3_ECB = 0x00000311 + CKM_CAST3_CBC = 0x00000312 + CKM_CAST3_MAC = 0x00000313 + CKM_CAST3_MAC_GENERAL = 0x00000314 + CKM_CAST3_CBC_PAD = 0x00000315 + + // Note that CAST128 and CAST5 are the same algorithm + CKM_CAST5_KEY_GEN = 0x00000320 + CKM_CAST128_KEY_GEN = 0x00000320 + CKM_CAST5_ECB = 0x00000321 + CKM_CAST128_ECB = 0x00000321 + CKM_CAST5_CBC = 0x00000322 // Deprecated + CKM_CAST128_CBC = 0x00000322 + CKM_CAST5_MAC = 0x00000323 // Deprecated + CKM_CAST128_MAC = 0x00000323 + CKM_CAST5_MAC_GENERAL = 0x00000324 // Deprecated + CKM_CAST128_MAC_GENERAL = 0x00000324 + CKM_CAST5_CBC_PAD = 0x00000325 // Deprecated + CKM_CAST128_CBC_PAD = 0x00000325 + CKM_RC5_KEY_GEN = 0x00000330 + CKM_RC5_ECB = 0x00000331 + CKM_RC5_CBC = 0x00000332 + CKM_RC5_MAC = 0x00000333 + CKM_RC5_MAC_GENERAL = 0x00000334 + CKM_RC5_CBC_PAD = 0x00000335 + CKM_IDEA_KEY_GEN = 0x00000340 + CKM_IDEA_ECB = 0x00000341 + CKM_IDEA_CBC = 0x00000342 + CKM_IDEA_MAC = 0x00000343 + CKM_IDEA_MAC_GENERAL = 0x00000344 + CKM_IDEA_CBC_PAD = 0x00000345 + CKM_GENERIC_SECRET_KEY_GEN = 0x00000350 + CKM_CONCATENATE_BASE_AND_KEY = 0x00000360 + CKM_CONCATENATE_BASE_AND_DATA = 0x00000362 + CKM_CONCATENATE_DATA_AND_BASE = 0x00000363 + CKM_XOR_BASE_AND_DATA = 0x00000364 + CKM_EXTRACT_KEY_FROM_KEY = 0x00000365 + CKM_SSL3_PRE_MASTER_KEY_GEN = 0x00000370 + CKM_SSL3_MASTER_KEY_DERIVE = 0x00000371 + CKM_SSL3_KEY_AND_MAC_DERIVE = 0x00000372 + CKM_SSL3_MASTER_KEY_DERIVE_DH = 0x00000373 + CKM_TLS_PRE_MASTER_KEY_GEN = 0x00000374 + CKM_TLS_MASTER_KEY_DERIVE = 0x00000375 + CKM_TLS_KEY_AND_MAC_DERIVE = 0x00000376 + CKM_TLS_MASTER_KEY_DERIVE_DH = 0x00000377 + CKM_TLS_PRF = 0x00000378 + CKM_SSL3_MD5_MAC = 0x00000380 + CKM_SSL3_SHA1_MAC = 0x00000381 + CKM_MD5_KEY_DERIVATION = 0x00000390 + CKM_MD2_KEY_DERIVATION = 0x00000391 + CKM_SHA1_KEY_DERIVATION = 0x00000392 + CKM_SHA256_KEY_DERIVATION = 0x00000393 + CKM_SHA384_KEY_DERIVATION = 0x00000394 + CKM_SHA512_KEY_DERIVATION = 0x00000395 + CKM_SHA224_KEY_DERIVATION = 0x00000396 + CKM_SHA3_256_KEY_DERIVE = 0x00000397 + CKM_SHA3_224_KEY_DERIVE = 0x00000398 + CKM_SHA3_384_KEY_DERIVE = 0x00000399 + CKM_SHA3_512_KEY_DERIVE = 0x0000039A + CKM_SHAKE_128_KEY_DERIVE = 0x0000039B + CKM_SHAKE_256_KEY_DERIVE = 0x0000039C + CKM_PBE_MD2_DES_CBC = 0x000003A0 + CKM_PBE_MD5_DES_CBC = 0x000003A1 + CKM_PBE_MD5_CAST_CBC = 0x000003A2 + CKM_PBE_MD5_CAST3_CBC = 0x000003A3 + CKM_PBE_MD5_CAST5_CBC = 0x000003A4 // Deprecated + CKM_PBE_MD5_CAST128_CBC = 0x000003A4 + CKM_PBE_SHA1_CAST5_CBC = 0x000003A5 // Deprecated + CKM_PBE_SHA1_CAST128_CBC = 0x000003A5 + CKM_PBE_SHA1_RC4_128 = 0x000003A6 + CKM_PBE_SHA1_RC4_40 = 0x000003A7 + CKM_PBE_SHA1_DES3_EDE_CBC = 0x000003A8 + CKM_PBE_SHA1_DES2_EDE_CBC = 0x000003A9 + CKM_PBE_SHA1_RC2_128_CBC = 0x000003AA + CKM_PBE_SHA1_RC2_40_CBC = 0x000003AB + CKM_PKCS5_PBKD2 = 0x000003B0 + CKM_PBA_SHA1_WITH_SHA1_HMAC = 0x000003C0 + CKM_WTLS_PRE_MASTER_KEY_GEN = 0x000003D0 + CKM_WTLS_MASTER_KEY_DERIVE = 0x000003D1 + CKM_WTLS_MASTER_KEY_DERIVE_DH_ECC = 0x000003D2 + CKM_WTLS_PRF = 0x000003D3 + CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE = 0x000003D4 + CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE = 0x000003D5 + CKM_TLS10_MAC_SERVER = 0x000003D6 + CKM_TLS10_MAC_CLIENT = 0x000003D7 + CKM_TLS12_MAC = 0x000003D8 + CKM_TLS12_KDF = 0x000003D9 + CKM_TLS12_MASTER_KEY_DERIVE = 0x000003E0 + CKM_TLS12_KEY_AND_MAC_DERIVE = 0x000003E1 + CKM_TLS12_MASTER_KEY_DERIVE_DH = 0x000003E2 + CKM_TLS12_KEY_SAFE_DERIVE = 0x000003E3 + CKM_TLS_MAC = 0x000003E4 + CKM_TLS_KDF = 0x000003E5 + CKM_KEY_WRAP_LYNKS = 0x00000400 + CKM_KEY_WRAP_SET_OAEP = 0x00000401 + CKM_CMS_SIG = 0x00000500 + CKM_KIP_DERIVE = 0x00000510 + CKM_KIP_WRAP = 0x00000511 + CKM_KIP_MAC = 0x00000512 + CKM_CAMELLIA_KEY_GEN = 0x00000550 + CKM_CAMELLIA_ECB = 0x00000551 + CKM_CAMELLIA_CBC = 0x00000552 + CKM_CAMELLIA_MAC = 0x00000553 + CKM_CAMELLIA_MAC_GENERAL = 0x00000554 + CKM_CAMELLIA_CBC_PAD = 0x00000555 + CKM_CAMELLIA_ECB_ENCRYPT_DATA = 0x00000556 + CKM_CAMELLIA_CBC_ENCRYPT_DATA = 0x00000557 + CKM_CAMELLIA_CTR = 0x00000558 + CKM_ARIA_KEY_GEN = 0x00000560 + CKM_ARIA_ECB = 0x00000561 + CKM_ARIA_CBC = 0x00000562 + CKM_ARIA_MAC = 0x00000563 + CKM_ARIA_MAC_GENERAL = 0x00000564 + CKM_ARIA_CBC_PAD = 0x00000565 + CKM_ARIA_ECB_ENCRYPT_DATA = 0x00000566 + CKM_ARIA_CBC_ENCRYPT_DATA = 0x00000567 + CKM_SEED_KEY_GEN = 0x00000650 + CKM_SEED_ECB = 0x00000651 + CKM_SEED_CBC = 0x00000652 + CKM_SEED_MAC = 0x00000653 + CKM_SEED_MAC_GENERAL = 0x00000654 + CKM_SEED_CBC_PAD = 0x00000655 + CKM_SEED_ECB_ENCRYPT_DATA = 0x00000656 + CKM_SEED_CBC_ENCRYPT_DATA = 0x00000657 + CKM_SKIPJACK_KEY_GEN = 0x00001000 + CKM_SKIPJACK_ECB64 = 0x00001001 + CKM_SKIPJACK_CBC64 = 0x00001002 + CKM_SKIPJACK_OFB64 = 0x00001003 + CKM_SKIPJACK_CFB64 = 0x00001004 + CKM_SKIPJACK_CFB32 = 0x00001005 + CKM_SKIPJACK_CFB16 = 0x00001006 + CKM_SKIPJACK_CFB8 = 0x00001007 + CKM_SKIPJACK_WRAP = 0x00001008 + CKM_SKIPJACK_PRIVATE_WRAP = 0x00001009 + CKM_SKIPJACK_RELAYX = 0x0000100a + CKM_KEA_KEY_PAIR_GEN = 0x00001010 + CKM_KEA_KEY_DERIVE = 0x00001011 + CKM_KEA_DERIVE = 0x00001012 + CKM_FORTEZZA_TIMESTAMP = 0x00001020 + CKM_BATON_KEY_GEN = 0x00001030 + CKM_BATON_ECB128 = 0x00001031 + CKM_BATON_ECB96 = 0x00001032 + CKM_BATON_CBC128 = 0x00001033 + CKM_BATON_COUNTER = 0x00001034 + CKM_BATON_SHUFFLE = 0x00001035 + CKM_BATON_WRAP = 0x00001036 + CKM_ECDSA_KEY_PAIR_GEN = 0x00001040 // Deprecated + CKM_EC_KEY_PAIR_GEN = 0x00001040 + CKM_ECDSA = 0x00001041 + CKM_ECDSA_SHA1 = 0x00001042 + CKM_ECDSA_SHA224 = 0x00001043 + CKM_ECDSA_SHA256 = 0x00001044 + CKM_ECDSA_SHA384 = 0x00001045 + CKM_ECDSA_SHA512 = 0x00001046 + CKM_ECDH1_DERIVE = 0x00001050 + CKM_ECDH1_COFACTOR_DERIVE = 0x00001051 + CKM_ECMQV_DERIVE = 0x00001052 + CKM_ECDH_AES_KEY_WRAP = 0x00001053 + CKM_RSA_AES_KEY_WRAP = 0x00001054 + CKM_JUNIPER_KEY_GEN = 0x00001060 + CKM_JUNIPER_ECB128 = 0x00001061 + CKM_JUNIPER_CBC128 = 0x00001062 + CKM_JUNIPER_COUNTER = 0x00001063 + CKM_JUNIPER_SHUFFLE = 0x00001064 + CKM_JUNIPER_WRAP = 0x00001065 + CKM_FASTHASH = 0x00001070 + CKM_AES_KEY_GEN = 0x00001080 + CKM_AES_ECB = 0x00001081 + CKM_AES_CBC = 0x00001082 + CKM_AES_MAC = 0x00001083 + CKM_AES_MAC_GENERAL = 0x00001084 + CKM_AES_CBC_PAD = 0x00001085 + CKM_AES_CTR = 0x00001086 + CKM_AES_GCM = 0x00001087 + CKM_AES_CCM = 0x00001088 + CKM_AES_CTS = 0x00001089 + CKM_AES_CMAC = 0x0000108A + CKM_AES_CMAC_GENERAL = 0x0000108B + CKM_AES_XCBC_MAC = 0x0000108C + CKM_AES_XCBC_MAC_96 = 0x0000108D + CKM_AES_GMAC = 0x0000108E + CKM_BLOWFISH_KEY_GEN = 0x00001090 + CKM_BLOWFISH_CBC = 0x00001091 + CKM_TWOFISH_KEY_GEN = 0x00001092 + CKM_TWOFISH_CBC = 0x00001093 + CKM_BLOWFISH_CBC_PAD = 0x00001094 + CKM_TWOFISH_CBC_PAD = 0x00001095 + CKM_DES_ECB_ENCRYPT_DATA = 0x00001100 + CKM_DES_CBC_ENCRYPT_DATA = 0x00001101 + CKM_DES3_ECB_ENCRYPT_DATA = 0x00001102 + CKM_DES3_CBC_ENCRYPT_DATA = 0x00001103 + CKM_AES_ECB_ENCRYPT_DATA = 0x00001104 + CKM_AES_CBC_ENCRYPT_DATA = 0x00001105 + CKM_GOSTR3410_KEY_PAIR_GEN = 0x00001200 + CKM_GOSTR3410 = 0x00001201 + CKM_GOSTR3410_WITH_GOSTR3411 = 0x00001202 + CKM_GOSTR3410_KEY_WRAP = 0x00001203 + CKM_GOSTR3410_DERIVE = 0x00001204 + CKM_GOSTR3411 = 0x00001210 + CKM_GOSTR3411_HMAC = 0x00001211 + CKM_GOST28147_KEY_GEN = 0x00001220 + CKM_GOST28147_ECB = 0x00001221 + CKM_GOST28147 = 0x00001222 + CKM_GOST28147_MAC = 0x00001223 + CKM_GOST28147_KEY_WRAP = 0x00001224 + CKM_DSA_PARAMETER_GEN = 0x00002000 + CKM_DH_PKCS_PARAMETER_GEN = 0x00002001 + CKM_X9_42_DH_PARAMETER_GEN = 0x00002002 + CKM_DSA_PROBABLISTIC_PARAMETER_GEN = 0x00002003 + CKM_DSA_SHAWE_TAYLOR_PARAMETER_GEN = 0x00002004 + CKM_AES_OFB = 0x00002104 + CKM_AES_CFB64 = 0x00002105 + CKM_AES_CFB8 = 0x00002106 + CKM_AES_CFB128 = 0x00002107 + CKM_AES_CFB1 = 0x00002108 + CKM_AES_KEY_WRAP = 0x00002109 // WAS: 0x00001090 + CKM_AES_KEY_WRAP_PAD = 0x0000210A // WAS: 0x00001091 + CKM_RSA_PKCS_TPM_1_1 = 0x00004001 + CKM_RSA_PKCS_OAEP_TPM_1_1 = 0x00004002 + CKM_VENDOR_DEFINED = 0x80000000 + + // The flags are defined as follows: + // + // Bit Flag Mask Meaning + CKF_HW = 0x00000001 // performed by HW + + // Specify whether or not a mechanism can be used for a particular task + CKF_ENCRYPT = 0x00000100 + CKF_DECRYPT = 0x00000200 + CKF_DIGEST = 0x00000400 + CKF_SIGN = 0x00000800 + CKF_SIGN_RECOVER = 0x00001000 + CKF_VERIFY = 0x00002000 + CKF_VERIFY_RECOVER = 0x00004000 + CKF_GENERATE = 0x00008000 + CKF_GENERATE_KEY_PAIR = 0x00010000 + CKF_WRAP = 0x00020000 + CKF_UNWRAP = 0x00040000 + CKF_DERIVE = 0x00080000 + + // Describe a token's EC capabilities not available in mechanism + // information. + CKF_EC_F_P = 0x00100000 + CKF_EC_F_2M = 0x00200000 + CKF_EC_ECPARAMETERS = 0x00400000 + CKF_EC_NAMEDCURVE = 0x00800000 + CKF_EC_UNCOMPRESS = 0x01000000 + CKF_EC_COMPRESS = 0x02000000 + CKF_EXTENSION = 0x80000000 + CKR_OK = 0x00000000 CKR_CANCEL = 0x00000001 CKR_HOST_MEMORY = 0x00000002 @@ -718,49 +836,69 @@ const ( CKR_PUBLIC_KEY_INVALID = 0x000001B9 CKR_FUNCTION_REJECTED = 0x00000200 CKR_VENDOR_DEFINED = 0x80000000 - CKF_LIBRARY_CANT_CREATE_OS_THREADS = 0x00000001 - CKF_OS_LOCKING_OK = 0x00000002 - CKF_DONT_BLOCK = 1 - CKG_MGF1_SHA1 = 0x00000001 - CKG_MGF1_SHA256 = 0x00000002 - CKG_MGF1_SHA384 = 0x00000003 - CKG_MGF1_SHA512 = 0x00000004 - CKG_MGF1_SHA224 = 0x00000005 - CKZ_DATA_SPECIFIED = 0x00000001 - CKD_NULL = 0x00000001 - CKD_SHA1_KDF = 0x00000002 - CKD_SHA1_KDF_ASN1 = 0x00000003 - CKD_SHA1_KDF_CONCATENATE = 0x00000004 - CKD_SHA224_KDF = 0x00000005 - CKD_SHA256_KDF = 0x00000006 - CKD_SHA384_KDF = 0x00000007 - CKD_SHA512_KDF = 0x00000008 - CKD_CPDIVERSIFY_KDF = 0x00000009 - CKD_SHA3_224_KDF = 0x0000000A - CKD_SHA3_256_KDF = 0x0000000B - CKD_SHA3_384_KDF = 0x0000000C - CKD_SHA3_512_KDF = 0x0000000D - CKP_PKCS5_PBKD2_HMAC_SHA1 = 0x00000001 - CKP_PKCS5_PBKD2_HMAC_GOSTR3411 = 0x00000002 - CKP_PKCS5_PBKD2_HMAC_SHA224 = 0x00000003 - CKP_PKCS5_PBKD2_HMAC_SHA256 = 0x00000004 - CKP_PKCS5_PBKD2_HMAC_SHA384 = 0x00000005 - CKP_PKCS5_PBKD2_HMAC_SHA512 = 0x00000006 - CKP_PKCS5_PBKD2_HMAC_SHA512_224 = 0x00000007 - CKP_PKCS5_PBKD2_HMAC_SHA512_256 = 0x00000008 - CKZ_SALT_SPECIFIED = 0x00000001 - CK_OTP_VALUE = 0 - CK_OTP_PIN = 1 - CK_OTP_CHALLENGE = 2 - CK_OTP_TIME = 3 - CK_OTP_COUNTER = 4 - CK_OTP_FLAGS = 5 - CK_OTP_OUTPUT_LENGTH = 6 - CK_OTP_OUTPUT_FORMAT = 7 - CKF_NEXT_OTP = 0x00000001 - CKF_EXCLUDE_TIME = 0x00000002 - CKF_EXCLUDE_COUNTER = 0x00000004 - CKF_EXCLUDE_CHALLENGE = 0x00000008 - CKF_EXCLUDE_PIN = 0x00000010 - CKF_USER_FRIENDLY_OTP = 0x00000020 + + // flags: bit flags that provide capabilities of the slot + // + // Bit Flag Mask Meaning + CKF_LIBRARY_CANT_CREATE_OS_THREADS = 0x00000001 + CKF_OS_LOCKING_OK = 0x00000002 + + // additional flags for parameters to functions + // CKF_DONT_BLOCK is for the function C_WaitForSlotEvent + CKF_DONT_BLOCK = 1 + + // The following MGFs are defined + CKG_MGF1_SHA1 = 0x00000001 + CKG_MGF1_SHA256 = 0x00000002 + CKG_MGF1_SHA384 = 0x00000003 + CKG_MGF1_SHA512 = 0x00000004 + CKG_MGF1_SHA224 = 0x00000005 + + // The following encoding parameter sources are defined + CKZ_DATA_SPECIFIED = 0x00000001 + + // The following EC Key Derivation Functions are defined + CKD_NULL = 0x00000001 + CKD_SHA1_KDF = 0x00000002 + + // The following X9.42 DH key derivation functions are defined + CKD_SHA1_KDF_ASN1 = 0x00000003 + CKD_SHA1_KDF_CONCATENATE = 0x00000004 + CKD_SHA224_KDF = 0x00000005 + CKD_SHA256_KDF = 0x00000006 + CKD_SHA384_KDF = 0x00000007 + CKD_SHA512_KDF = 0x00000008 + CKD_CPDIVERSIFY_KDF = 0x00000009 + CKD_SHA3_224_KDF = 0x0000000A + CKD_SHA3_256_KDF = 0x0000000B + CKD_SHA3_384_KDF = 0x0000000C + CKD_SHA3_512_KDF = 0x0000000D + + CKP_PKCS5_PBKD2_HMAC_SHA1 = 0x00000001 + CKP_PKCS5_PBKD2_HMAC_GOSTR3411 = 0x00000002 + CKP_PKCS5_PBKD2_HMAC_SHA224 = 0x00000003 + CKP_PKCS5_PBKD2_HMAC_SHA256 = 0x00000004 + CKP_PKCS5_PBKD2_HMAC_SHA384 = 0x00000005 + CKP_PKCS5_PBKD2_HMAC_SHA512 = 0x00000006 + CKP_PKCS5_PBKD2_HMAC_SHA512_224 = 0x00000007 + CKP_PKCS5_PBKD2_HMAC_SHA512_256 = 0x00000008 + + // The following salt value sources are defined in PKCS #5 v2.0. + CKZ_SALT_SPECIFIED = 0x00000001 + + CK_OTP_VALUE = 0 + CK_OTP_PIN = 1 + CK_OTP_CHALLENGE = 2 + CK_OTP_TIME = 3 + CK_OTP_COUNTER = 4 + CK_OTP_FLAGS = 5 + CK_OTP_OUTPUT_LENGTH = 6 + CK_OTP_OUTPUT_FORMAT = 7 + + CKF_NEXT_OTP = 0x00000001 + CKF_EXCLUDE_TIME = 0x00000002 + CKF_EXCLUDE_COUNTER = 0x00000004 + CKF_EXCLUDE_CHALLENGE = 0x00000008 + CKF_EXCLUDE_PIN = 0x00000010 + CKF_USER_FRIENDLY_OTP = 0x00000020 ) diff --git a/vendor/github.com/oklog/ulid/.travis.yml b/vendor/github.com/oklog/ulid/.travis.yml deleted file mode 100644 index 43eb762fa3..0000000000 --- a/vendor/github.com/oklog/ulid/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -sudo: false -go: - - 1.10.x -install: - - go get -v github.com/golang/lint/golint - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - - go get -d -t -v ./... - - go build -v ./... -script: - - go vet ./... - - $HOME/gopath/bin/golint . - - go test -v -race ./... - - go test -v -covermode=count -coverprofile=cov.out - - $HOME/gopath/bin/goveralls -coverprofile=cov.out -service=travis-ci -repotoken "$COVERALLS_TOKEN" || true diff --git a/vendor/github.com/oklog/ulid/Gopkg.lock b/vendor/github.com/oklog/ulid/Gopkg.lock deleted file mode 100644 index 349b449a6e..0000000000 --- a/vendor/github.com/oklog/ulid/Gopkg.lock +++ /dev/null @@ -1,15 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "github.com/pborman/getopt" - packages = ["v2"] - revision = "7148bc3a4c3008adfcab60cbebfd0576018f330b" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "6779b05abd5cd429c5393641d2453005a3cb74a400d161b2b5c5d0ca2e10e116" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/oklog/ulid/Gopkg.toml b/vendor/github.com/oklog/ulid/Gopkg.toml deleted file mode 100644 index 624a7a019c..0000000000 --- a/vendor/github.com/oklog/ulid/Gopkg.toml +++ /dev/null @@ -1,26 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - branch = "master" - name = "github.com/pborman/getopt" diff --git a/vendor/github.com/oklog/ulid/.gitignore b/vendor/github.com/oklog/ulid/v2/.gitignore similarity index 100% rename from vendor/github.com/oklog/ulid/.gitignore rename to vendor/github.com/oklog/ulid/v2/.gitignore diff --git a/vendor/github.com/oklog/ulid/AUTHORS.md b/vendor/github.com/oklog/ulid/v2/AUTHORS.md similarity index 100% rename from vendor/github.com/oklog/ulid/AUTHORS.md rename to vendor/github.com/oklog/ulid/v2/AUTHORS.md diff --git a/vendor/github.com/oklog/ulid/CHANGELOG.md b/vendor/github.com/oklog/ulid/v2/CHANGELOG.md similarity index 100% rename from vendor/github.com/oklog/ulid/CHANGELOG.md rename to vendor/github.com/oklog/ulid/v2/CHANGELOG.md diff --git a/vendor/github.com/oklog/ulid/CONTRIBUTING.md b/vendor/github.com/oklog/ulid/v2/CONTRIBUTING.md similarity index 100% rename from vendor/github.com/oklog/ulid/CONTRIBUTING.md rename to vendor/github.com/oklog/ulid/v2/CONTRIBUTING.md diff --git a/vendor/go.mongodb.org/mongo-driver/LICENSE b/vendor/github.com/oklog/ulid/v2/LICENSE similarity index 100% rename from vendor/go.mongodb.org/mongo-driver/LICENSE rename to vendor/github.com/oklog/ulid/v2/LICENSE diff --git a/vendor/github.com/oklog/ulid/README.md b/vendor/github.com/oklog/ulid/v2/README.md similarity index 55% rename from vendor/github.com/oklog/ulid/README.md rename to vendor/github.com/oklog/ulid/v2/README.md index 0a3d2f82b2..f6db0af3ef 100644 --- a/vendor/github.com/oklog/ulid/README.md +++ b/vendor/github.com/oklog/ulid/v2/README.md @@ -1,13 +1,13 @@ # Universally Unique Lexicographically Sortable Identifier -![Project status](https://img.shields.io/badge/version-1.3.0-yellow.svg) -[![Build Status](https://secure.travis-ci.org/oklog/ulid.png)](http://travis-ci.org/oklog/ulid) +[![Project status](https://img.shields.io/github/release/oklog/ulid.svg?style=flat-square)](https://github.com/oklog/ulid/releases/latest) +![Build Status](https://github.com/oklog/ulid/actions/workflows/test.yml/badge.svg) [![Go Report Card](https://goreportcard.com/badge/oklog/ulid?cache=0)](https://goreportcard.com/report/oklog/ulid) [![Coverage Status](https://coveralls.io/repos/github/oklog/ulid/badge.svg?branch=master&cache=0)](https://coveralls.io/github/oklog/ulid?branch=master) -[![GoDoc](https://godoc.org/github.com/oklog/ulid?status.svg)](https://godoc.org/github.com/oklog/ulid) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/oklog/ulid/v2) [![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE) -A Go port of [alizain/ulid](https://github.com/alizain/ulid) with binary format implemented. +A Go port of [ulid/javascript](https://github.com/ulid/javascript) with binary format implemented. ## Background @@ -31,27 +31,109 @@ A ULID however: ## Install +This package requires Go modules. + ```shell -go get github.com/oklog/ulid +go get github.com/oklog/ulid/v2 ``` ## Usage -An ULID is constructed with a `time.Time` and an `io.Reader` entropy source. -This design allows for greater flexibility in choosing your trade-offs. +ULIDs are constructed from two things: a timestamp with millisecond precision, +and some random data. + +Timestamps are modeled as uint64 values representing a Unix time in milliseconds. +They can be produced by passing a [time.Time](https://pkg.go.dev/time#Time) to +[ulid.Timestamp](https://pkg.go.dev/github.com/oklog/ulid/v2#Timestamp), +or by calling [time.Time.UnixMilli](https://pkg.go.dev/time#Time.UnixMilli) +and converting the returned value to `uint64`. + +Random data is taken from a provided [io.Reader](https://pkg.go.dev/io#Reader). +This design allows for greater flexibility when choosing trade-offs, but can be +a bit confusing to newcomers. -Please note that `rand.Rand` from the `math` package is *not* safe for concurrent use. -Instantiate one per long living go-routine or use a `sync.Pool` if you want to avoid the potential contention of a locked `rand.Source` as its been frequently observed in the package level functions. +If you just want to generate a ULID and don't (yet) care about details like +performance, cryptographic security, etc., use the +[ulid.Make](https://pkg.go.dev/github.com/oklog/ulid/v2#Make) helper function. +This function calls [time.Now](https://pkg.go.dev/time#Now) to get a timestamp, +and uses a source of entropy which is process-global, +[pseudo-random](https://pkg.go.dev/math/rand), and +[monotonic](https://pkg.go.dev/github.com/oklog/ulid/v2#LockedMonotonicReader). + +```go +fmt.Println(ulid.Make()) +// 01G65Z755AFWAKHE12NY0CQ9FH +``` +More advanced use cases should utilize +[ulid.New](https://pkg.go.dev/github.com/oklog/ulid/v2#New). ```go -func ExampleULID() { - t := time.Unix(1000000, 0) - entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0) - fmt.Println(ulid.MustNew(ulid.Timestamp(t), entropy)) - // Output: 0000XSNJG0MQJHBF4QX1EFD6Y3 -} +entropy := rand.New(rand.NewSource(time.Now().UnixNano())) +ms := ulid.Timestamp(time.Now()) +fmt.Println(ulid.New(ms, entropy)) +// 01G65Z755AFWAKHE12NY0CQ9FH +``` + +Care should be taken when providing a source of entropy. + +The above example utilizes [math/rand.Rand](https://pkg.go.dev/math/rand#Rand), +which is not safe for concurrent use by multiple goroutines. Consider +alternatives such as +[x/exp/rand](https://pkg.go.dev/golang.org/x/exp/rand#LockedSource). +Security-sensitive use cases should always use cryptographically secure entropy +provided by [crypto/rand](https://pkg.go.dev/crypto/rand). + +Performance-sensitive use cases should avoid synchronization when generating +IDs. One option is to use a unique source of entropy for each concurrent +goroutine, which results in no lock contention, but cannot provide strong +guarantees about the random data, and does not provide monotonicity within a +given millisecond. One common performance optimization is to pool sources of +entropy using a [sync.Pool](https://pkg.go.dev/sync#Pool). + +Monotonicity is a property that says each ULID is "bigger than" the previous +one. ULIDs are automatically monotonic, but only to millisecond precision. ULIDs +generated within the same millisecond are ordered by their random component, +which means they are by default un-ordered. You can use +[ulid.MonotonicEntropy](https://pkg.go.dev/github.com/oklog/ulid/v2#MonotonicEntropy) or +[ulid.LockedMonotonicEntropy](https://pkg.go.dev/github.com/oklog/ulid/v2#LockedMonotonicEntropy) +to create ULIDs that are monotonic within a given millisecond, with caveats. See +the documentation for details. + +If you don't care about time-based ordering of generated IDs, then there's no +reason to use ULIDs! There are many other kinds of IDs that are easier, faster, +smaller, etc. Consider UUIDs. +## Commandline tool + +This repo also provides a tool to generate and parse ULIDs at the command line. + +```shell +go install github.com/oklog/ulid/v2/cmd/ulid@latest +``` + +Usage: + +```shell +Usage: ulid [-hlqz] [-f ] [parameters ...] + -f, --format= when parsing, show times in this format: default, rfc3339, unix, ms + -h, --help print this help text + -l, --local when parsing, show local time instead of UTC + -q, --quick when generating, use non-crypto-grade entropy + -z, --zero when generating, fix entropy to all-zeroes +``` + +Examples: + +```shell +$ ulid +01D78XYFJ1PRM1WPBCBT3VHMNV +$ ulid -z +01D78XZ44G0000000000000000 +$ ulid 01D78XZ44G0000000000000000 +Sun Mar 31 03:51:23.536 UTC 2019 +$ ulid --format=rfc3339 --local 01D78XZ44G0000000000000000 +2019-03-31T05:51:23.536+02:00 ``` ## Specification @@ -63,7 +145,7 @@ Below is the current specification of ULID as implemented in this repository. **Timestamp** - 48 bits - UNIX-time in milliseconds -- Won't run out of space till the year 10895 AD +- Won't run out of space till the year 10889 AD **Entropy** - 80 bits @@ -145,6 +227,6 @@ BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s ## Prior Art -- [alizain/ulid](https://github.com/alizain/ulid) +- [ulid/javascript](https://github.com/ulid/javascript) - [RobThree/NUlid](https://github.com/RobThree/NUlid) - [imdario/go-ulid](https://github.com/imdario/go-ulid) diff --git a/vendor/github.com/oklog/ulid/ulid.go b/vendor/github.com/oklog/ulid/v2/ulid.go similarity index 73% rename from vendor/github.com/oklog/ulid/ulid.go rename to vendor/github.com/oklog/ulid/v2/ulid.go index c5d0d66fd2..77e9ddd634 100644 --- a/vendor/github.com/oklog/ulid/ulid.go +++ b/vendor/github.com/oklog/ulid/v2/ulid.go @@ -23,11 +23,12 @@ import ( "math" "math/bits" "math/rand" + "sync" "time" ) /* -An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier +A ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier The components are encoded as 16 octets. Each component is encoded with the MSB first (network byte order). @@ -59,7 +60,7 @@ var ( // size. ErrBufferSize = errors.New("ulid: bad buffer size when marshaling") - // ErrBigTime is returned when constructing an ULID with a time that is larger + // ErrBigTime is returned when constructing a ULID with a time that is larger // than MaxTime. ErrBigTime = errors.New("ulid: time too big") @@ -74,14 +75,29 @@ var ( // ErrScanValue is returned when the value passed to scan cannot be unmarshaled // into the ULID. ErrScanValue = errors.New("ulid: source value must be a string or byte slice") + + // Zero is a zero-value ULID. + Zero ULID ) -// New returns an ULID with the given Unix milliseconds timestamp and an +// MonotonicReader is an interface that should yield monotonically increasing +// entropy into the provided slice for all calls with the same ms parameter. If +// a MonotonicReader is provided to the New constructor, its MonotonicRead +// method will be used instead of Read. +type MonotonicReader interface { + io.Reader + MonotonicRead(ms uint64, p []byte) error +} + +// New returns a ULID with the given Unix milliseconds timestamp and an // optional entropy source. Use the Timestamp function to convert // a time.Time to Unix milliseconds. // // ErrBigTime is returned when passing a timestamp bigger than MaxTime. // Reading from the entropy source may also return an error. +// +// Safety for concurrent use is only dependent on the safety of the +// entropy source. func New(ms uint64, entropy io.Reader) (id ULID, err error) { if err = id.SetTime(ms); err != nil { return id, err @@ -90,7 +106,7 @@ func New(ms uint64, entropy io.Reader) (id ULID, err error) { switch e := entropy.(type) { case nil: return id, err - case *monotonic: + case MonotonicReader: err = e.MonotonicRead(ms, id[6:]) default: _, err = io.ReadFull(e, id[6:]) @@ -109,6 +125,33 @@ func MustNew(ms uint64, entropy io.Reader) ULID { return id } +// MustNewDefault is a convenience function equivalent to MustNew with +// DefaultEntropy as the entropy. It may panic if the given time.Time is too +// large or too small. +func MustNewDefault(t time.Time) ULID { + return MustNew(Timestamp(t), defaultEntropy) +} + +var defaultEntropy = func() io.Reader { + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + return &LockedMonotonicReader{MonotonicReader: Monotonic(rng, 0)} +}() + +// DefaultEntropy returns a thread-safe per process monotonically increasing +// entropy source. +func DefaultEntropy() io.Reader { + return defaultEntropy +} + +// Make returns a ULID with the current time in Unix milliseconds and +// monotonically increasing entropy for the same millisecond. +// It is safe for concurrent use, leveraging a sync.Pool underneath for minimal +// contention. +func Make() (id ULID) { + // NOTE: MustNew can't panic since DefaultEntropy never returns an error. + return MustNew(Now(), defaultEntropy) +} + // Parse parses an encoded ULID, returning an error in case of failure. // // ErrDataSize is returned if the len(ulid) is different from an encoded @@ -180,24 +223,24 @@ func parse(v []byte, strict bool, id *ULID) error { // to decode a base32 ULID. // 6 bytes timestamp (48 bits) - (*id)[0] = ((dec[v[0]] << 5) | dec[v[1]]) - (*id)[1] = ((dec[v[2]] << 3) | (dec[v[3]] >> 2)) - (*id)[2] = ((dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4)) - (*id)[3] = ((dec[v[5]] << 4) | (dec[v[6]] >> 1)) - (*id)[4] = ((dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3)) - (*id)[5] = ((dec[v[8]] << 5) | dec[v[9]]) + (*id)[0] = (dec[v[0]] << 5) | dec[v[1]] + (*id)[1] = (dec[v[2]] << 3) | (dec[v[3]] >> 2) + (*id)[2] = (dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4) + (*id)[3] = (dec[v[5]] << 4) | (dec[v[6]] >> 1) + (*id)[4] = (dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3) + (*id)[5] = (dec[v[8]] << 5) | dec[v[9]] // 10 bytes of entropy (80 bits) - (*id)[6] = ((dec[v[10]] << 3) | (dec[v[11]] >> 2)) - (*id)[7] = ((dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4)) - (*id)[8] = ((dec[v[13]] << 4) | (dec[v[14]] >> 1)) - (*id)[9] = ((dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3)) - (*id)[10] = ((dec[v[16]] << 5) | dec[v[17]]) - (*id)[11] = ((dec[v[18]] << 3) | dec[v[19]]>>2) - (*id)[12] = ((dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4)) - (*id)[13] = ((dec[v[21]] << 4) | (dec[v[22]] >> 1)) - (*id)[14] = ((dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3)) - (*id)[15] = ((dec[v[24]] << 5) | dec[v[25]]) + (*id)[6] = (dec[v[10]] << 3) | (dec[v[11]] >> 2) + (*id)[7] = (dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4) + (*id)[8] = (dec[v[13]] << 4) | (dec[v[14]] >> 1) + (*id)[9] = (dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3) + (*id)[10] = (dec[v[16]] << 5) | dec[v[17]] + (*id)[11] = (dec[v[18]] << 3) | dec[v[19]]>>2 + (*id)[12] = (dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4) + (*id)[13] = (dec[v[21]] << 4) | (dec[v[22]] >> 1) + (*id)[14] = (dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3) + (*id)[15] = (dec[v[24]] << 5) | dec[v[25]] return nil } @@ -222,9 +265,14 @@ func MustParseStrict(ulid string) ULID { return id } +// Bytes returns bytes slice representation of ULID. +func (id ULID) Bytes() []byte { + return id[:] +} + // String returns a lexicographically sortable string encoded ULID -// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3 -// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy +// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3. +// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy. func (id ULID) String() string { ulid := make([]byte, EncodedSize) _ = id.MarshalTextTo(ulid) @@ -250,7 +298,7 @@ func (id ULID) MarshalBinaryTo(dst []byte) error { } // UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by -// copying the passed data and converting it to an ULID. ErrDataSize is +// copying the passed data and converting it to a ULID. ErrDataSize is // returned if the data length is different from ULID length. func (id *ULID) UnmarshalBinary(data []byte) error { if len(data) != len(*id) { @@ -366,17 +414,28 @@ func (id ULID) Time() uint64 { uint64(id[1])<<32 | uint64(id[0])<<40 } +// Timestamp returns the time encoded in the ULID as a time.Time. +func (id ULID) Timestamp() time.Time { + return Time(id.Time()) +} + +// IsZero returns true if the ULID is a zero-value ULID, i.e. ulid.Zero. +func (id ULID) IsZero() bool { + return id.Compare(Zero) == 0 +} + // maxTime is the maximum Unix time in milliseconds that can be -// represented in an ULID. +// represented in a ULID. var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time() // MaxTime returns the maximum Unix time in milliseconds that -// can be encoded in an ULID. +// can be encoded in a ULID. func MaxTime() uint64 { return maxTime } // Now is a convenience function that returns the current // UTC time in Unix milliseconds. Equivalent to: -// Timestamp(time.Now().UTC()) +// +// Timestamp(time.Now().UTC()) func Now() uint64 { return Timestamp(time.Now().UTC()) } // Timestamp converts a time.Time to Unix milliseconds. @@ -452,41 +511,62 @@ func (id *ULID) Scan(src interface{}) error { return ErrScanValue } -// Value implements the sql/driver.Valuer interface. This returns the value -// represented as a byte slice. If instead a string is desirable, a wrapper -// type can be created that calls String(). +// Value implements the sql/driver.Valuer interface, returning the ULID as a +// slice of bytes, by invoking MarshalBinary. If your use case requires a string +// representation instead, you can create a wrapper type that calls String() +// instead. // -// // stringValuer wraps a ULID as a string-based driver.Valuer. -// type stringValuer ULID +// type stringValuer ulid.ULID // -// func (id stringValuer) Value() (driver.Value, error) { -// return ULID(id).String(), nil +// func (v stringValuer) Value() (driver.Value, error) { +// return ulid.ULID(v).String(), nil // } // // // Example usage. // db.Exec("...", stringValuer(id)) +// +// All valid ULIDs, including zero-value ULIDs, return a valid Value with a nil +// error. If your use case requires zero-value ULIDs to return a non-nil error, +// you can create a wrapper type that special-cases this behavior. +// +// var zeroValueULID ulid.ULID +// +// type invalidZeroValuer ulid.ULID +// +// func (v invalidZeroValuer) Value() (driver.Value, error) { +// if ulid.ULID(v).Compare(zeroValueULID) == 0 { +// return nil, fmt.Errorf("zero value") +// } +// return ulid.ULID(v).Value() +// } +// +// // Example usage. +// db.Exec("...", invalidZeroValuer(id)) func (id ULID) Value() (driver.Value, error) { return id.MarshalBinary() } -// Monotonic returns an entropy source that is guaranteed to yield -// strictly increasing entropy bytes for the same ULID timestamp. -// On conflicts, the previous ULID entropy is incremented with a -// random number between 1 and `inc` (inclusive). +// Monotonic returns a source of entropy that yields strictly increasing entropy +// bytes, to a limit governeed by the `inc` parameter. +// +// Specifically, calls to MonotonicRead within the same ULID timestamp return +// entropy incremented by a random number between 1 and `inc` inclusive. If an +// increment results in entropy that would overflow available space, +// MonotonicRead returns ErrMonotonicOverflow. // -// The provided entropy source must actually yield random bytes or else -// monotonic reads are not guaranteed to terminate, since there isn't -// enough randomness to compute an increment number. +// Passing `inc == 0` results in the reasonable default `math.MaxUint32`. Lower +// values of `inc` provide more monotonic entropy in a single millisecond, at +// the cost of easier "guessability" of generated ULIDs. If your code depends on +// ULIDs having secure entropy bytes, then it's recommended to use the secure +// default value of `inc == 0`, unless you know what you're doing. // -// When `inc == 0`, it'll be set to a secure default of `math.MaxUint32`. -// The lower the value of `inc`, the easier the next ULID within the -// same millisecond is to guess. If your code depends on ULIDs having -// secure entropy bytes, then don't go under this default unless you know -// what you're doing. +// The provided entropy source must actually yield random bytes. Otherwise, +// monotonic reads are not guaranteed to terminate, since there isn't enough +// randomness to compute an increment number. // -// The returned io.Reader isn't safe for concurrent use. -func Monotonic(entropy io.Reader, inc uint64) io.Reader { - m := monotonic{ +// The returned type isn't safe for concurrent use. +func Monotonic(entropy io.Reader, inc uint64) *MonotonicEntropy { + m := MonotonicEntropy{ Reader: bufio.NewReader(entropy), inc: inc, } @@ -495,23 +575,42 @@ func Monotonic(entropy io.Reader, inc uint64) io.Reader { m.inc = math.MaxUint32 } - if rng, ok := entropy.(*rand.Rand); ok { + if rng, ok := entropy.(rng); ok { m.rng = rng } return &m } -type monotonic struct { +type rng interface{ Int63n(n int64) int64 } + +// LockedMonotonicReader wraps a MonotonicReader with a sync.Mutex for safe +// concurrent use. +type LockedMonotonicReader struct { + mu sync.Mutex + MonotonicReader +} + +// MonotonicRead synchronizes calls to the wrapped MonotonicReader. +func (r *LockedMonotonicReader) MonotonicRead(ms uint64, p []byte) (err error) { + r.mu.Lock() + err = r.MonotonicReader.MonotonicRead(ms, p) + r.mu.Unlock() + return err +} + +// MonotonicEntropy is an opaque type that provides monotonic entropy. +type MonotonicEntropy struct { io.Reader ms uint64 inc uint64 entropy uint80 rand [8]byte - rng *rand.Rand + rng rng } -func (m *monotonic) MonotonicRead(ms uint64, entropy []byte) (err error) { +// MonotonicRead implements the MonotonicReader interface. +func (m *MonotonicEntropy) MonotonicRead(ms uint64, entropy []byte) (err error) { if !m.entropy.IsZero() && m.ms == ms { err = m.increment() m.entropy.AppendTo(entropy) @@ -524,7 +623,7 @@ func (m *monotonic) MonotonicRead(ms uint64, entropy []byte) (err error) { // increment the previous entropy number with a random number // of up to m.inc (inclusive). -func (m *monotonic) increment() error { +func (m *MonotonicEntropy) increment() error { if inc, err := m.random(); err != nil { return err } else if m.entropy.Add(inc) { @@ -536,7 +635,7 @@ func (m *monotonic) increment() error { // random returns a uniform random value in [1, m.inc), reading entropy // from m.Reader. When m.inc == 0 || m.inc == 1, it returns 1. // Adapted from: https://golang.org/pkg/crypto/rand/#Int -func (m *monotonic) random() (inc uint64, err error) { +func (m *MonotonicEntropy) random() (inc uint64, err error) { if m.inc <= 1 { return 1, nil } diff --git a/vendor/github.com/olekukonko/errors/chain.go b/vendor/github.com/olekukonko/errors/chain.go index 5dc73a5852..2bb390ef00 100644 --- a/vendor/github.com/olekukonko/errors/chain.go +++ b/vendor/github.com/olekukonko/errors/chain.go @@ -527,46 +527,34 @@ func (c *Chain) wrapCallable(fn interface{}, args ...interface{}) (func() error, } // executeStep runs a single step, applying retries if configured. +// This version is synchronous and avoids the bugs caused by the previous goroutine-based implementation. func (c *Chain) executeStep(ctx context.Context, step *chainStep) error { + // First, check if the context has already been canceled before starting the step. + // This allows the chain to fail fast. select { case <-ctx.Done(): return ctx.Err() default: + // Context is still active, proceed. } + + // If the step has retry logic configured... if step.config.retry != nil { - retry := step.config.retry.Transform(WithContext(ctx)) - // Wrap step execution to respect context - wrappedFn := func() error { - type result struct { - err error - } - done := make(chan result, 1) - go func() { - done <- result{err: step.execute()} - }() - select { - case res := <-done: - return res.err - case <-ctx.Done(): - return ctx.Err() - } - } - return retry.Execute(wrappedFn) - } - // Non-retry case also respects context - type result struct { - err error - } - done := make(chan result, 1) - go func() { - done <- result{err: step.execute()} - }() - select { - case res := <-done: - return res.err - case <-ctx.Done(): - return ctx.Err() + // Create a new retry instance that is aware of the chain's context. + // The retry executor will be responsible for checking ctx.Done() between attempts. + retryExecutor := step.config.retry.Transform(WithContext(ctx)) + + // Execute the step's function directly. The retry mechanism will manage the loop, + // delays, and context cancellation checks. We pass step.execute without any + // extra goroutine wrappers. + return retryExecutor.Execute(step.execute) } + + // For a simple, non-retrying step, execute the function directly and synchronously + // in the current goroutine. This is the simplest, fastest, and most correct approach. + // It ensures that database connections are used and returned to the pool sequentially, + // preventing the deadlock issue. + return step.execute() } // enhanceError wraps an error with additional context from the step. diff --git a/vendor/github.com/olekukonko/errors/errors.go b/vendor/github.com/olekukonko/errors/errors.go index 4f6509da98..6bf3bca6f8 100644 --- a/vendor/github.com/olekukonko/errors/errors.go +++ b/vendor/github.com/olekukonko/errors/errors.go @@ -95,6 +95,10 @@ type contextItem struct { // context, cause, and metadata like code and category. It is thread-safe and // supports pooling for performance. type Error struct { + // Fields used in atomic operations. Place them at the beginning of the + // struct to ensure proper alignment across all architectures. + count uint64 // Occurrence count for tracking frequency. + // Primary fields (frequently accessed). msg string // The error message displayed by Error(). name string // The error name or type (e.g., "AuthError"). @@ -103,7 +107,6 @@ type Error struct { // Secondary metadata. template string // Fallback message template if msg is empty. category string // Error category (e.g., "network"). - count uint64 // Occurrence count for tracking frequency. code int32 // HTTP-like status code (e.g., 400, 500). smallCount int32 // Number of items in smallContext. @@ -172,7 +175,7 @@ func newError() *Error { // // err := errors.Empty().With("key", "value").WithCode(400) func Empty() *Error { - return emptyError + return newError() } // Named creates an error with the specified name and captures a stack trace. @@ -213,10 +216,18 @@ func New(text string) *Error { // err := errors.Newf("query failed: %w", cause) // // err.Error() will match fmt.Errorf("query failed: %w", cause).Error() // // errors.Unwrap(err) == cause -func Newf(format string, args ...interface{}) *Error { +func Newf(f any, args ...interface{}) *Error { + var format string + switch v := f.(type) { + case string: + format = v + case fmt.Stringer: + format = v.String() + default: + panic("Newf: format must be a string or fmt.Stringer") + } err := newError() - // --- Start: Parsing and Validation (mostly unchanged) --- var wCount int var wArgPos = -1 var wArg error @@ -356,11 +367,10 @@ func Newf(format string, args ...interface{}) *Error { err.formatWrapped = false return err } - // --- End: Parsing and Validation --- - // --- Start: Processing Valid Format String --- + // Start: Processing Valid Format String if wCount == 1 && wArg != nil { - // --- Handle %w: Simulate for Sprintf and pre-format --- + // Handle %w: Simulate for Sprintf and pre-format err.cause = wArg // Set the cause for unwrapping err.formatWrapped = true // Signal that msg is the final formatted string @@ -397,10 +407,10 @@ func Newf(format string, args ...interface{}) *Error { // Store the final, fully formatted string, matching fmt.Errorf output err.msg = result } - // --- End %w Simulation --- + // End %w Simulation } else { - // --- No %w or wArg is nil: Format directly (original logic) --- + // No %w or wArg is nil: Format directly (original logic) result, fmtErr := FmtErrorCheck(format, args...) if fmtErr != nil { err.msg = fmt.Sprintf("errors.Newf: formatting error for format %q: %v", format, fmtErr) @@ -411,7 +421,7 @@ func Newf(format string, args ...interface{}) *Error { err.formatWrapped = false // Ensure false if no %w was involved } } - // --- End: Processing Valid Format String --- + // End: Processing Valid Format String return err } @@ -448,38 +458,6 @@ func FmtErrorCheck(format string, args ...interface{}) (result string, err error return result, nil } -// countFmtArgs counts format specifiers that consume arguments in a format string. -// Ignores %% and non-consuming verbs like %n. -// Internal use by Newf for argument validation. -func countFmtArgs(format string) int { - count := 0 - runes := []rune(format) - i := 0 - for i < len(runes) { - if runes[i] == '%' { - if i+1 < len(runes) && runes[i+1] == '%' { - i += 2 // Skip %% - continue - } - i++ // Move past % - for i < len(runes) && (runes[i] == '+' || runes[i] == '-' || runes[i] == '#' || - runes[i] == ' ' || runes[i] == '0' || - (runes[i] >= '1' && runes[i] <= '9') || runes[i] == '.') { - i++ - } - if i < len(runes) { - if strings.ContainsRune("vTtbcdoqxXUeEfFgGsp", runes[i]) { - count++ - } - i++ // Move past verb - } - } else { - i++ - } - } - return count -} - // Std creates a standard error using errors.New for compatibility. // Does not capture stack traces or add context. // Example: @@ -700,8 +678,8 @@ func (e *Error) Error() string { return e.msg // Return the pre-formatted fmt.Errorf-compatible string } - // --- Original logic for errors not created via Newf("%w", ...) --- - // --- or errors created via New/Named and then Wrap() called. --- + // Original logic for errors not created via Newf("%w", ...) + // or errors created via New/Named and then Wrap() called. var buf strings.Builder // Append primary message part (msg, template, or name) diff --git a/vendor/github.com/olekukonko/errors/helper.go b/vendor/github.com/olekukonko/errors/helper.go index 06c2adc55c..2f4af34e94 100644 --- a/vendor/github.com/olekukonko/errors/helper.go +++ b/vendor/github.com/olekukonko/errors/helper.go @@ -430,3 +430,8 @@ func Wrapf(err error, format string, args ...interface{}) *Error { e.cause = err return e } + +// Err creates a new Error with the given message and wraps the provided error as its cause. +func Err(msg string, err error) *Error { + return New(msg).Wrap(err) +} diff --git a/vendor/github.com/olekukonko/ll/.goreleaser.yaml b/vendor/github.com/olekukonko/ll/.goreleaser.yaml new file mode 100644 index 0000000000..937f363415 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/.goreleaser.yaml @@ -0,0 +1,37 @@ +# yaml-language-server: $schema=https://goreleaser.com/static/schema.json +version: 2 + +project_name: ll + +# For a library repo, publish source archives instead of binaries. +source: + enabled: true + name_template: "{{ .ProjectName }}_{{ .Version }}" + + # Optional: include/exclude files in the source archive (defaults are usually fine) + # files: + # - README.md + # - LICENSE + # - go.mod + # - go.sum + # - "**/*.go" + +# No binaries to build. +builds: [] + +## Other Information + +checksum: + name_template: "checksums.txt" + +snapshot: + version_template: "{{ .Tag }}-next" + +changelog: + sort: asc + filters: + exclude: + - "^docs:" + - "^test:" + - "^chore:" + - "^ci:" diff --git a/vendor/github.com/olekukonko/ll/Makefile b/vendor/github.com/olekukonko/ll/Makefile new file mode 100644 index 0000000000..e9bf75d34f --- /dev/null +++ b/vendor/github.com/olekukonko/ll/Makefile @@ -0,0 +1,99 @@ +# Git remote for pushing tags +REMOTE ?= origin + +# Version for release tagging (required for tag/release targets) +RELEASE_VERSION ?= + +# Convenience +GO ?= go +GOLANGCI ?= golangci-lint +GORELEASER?= goreleaser + +.PHONY: help \ + test race bench fmt tidy lint check \ + ensure-clean ensure-release-version tag tag-delete \ + release release-dry + +help: + @echo "Targets:" + @echo " fmt - gofmt + go fmt" + @echo " tidy - go mod tidy" + @echo " test - go test ./..." + @echo " race - go test -race ./..." + @echo " bench - go test -bench=. ./..." + @echo " lint - golangci-lint run ./... (if installed)" + @echo " check - fmt + tidy + test + race" + @echo "" + @echo "Release targets:" + @echo " tag - Create annotated tag RELEASE_VERSION and push" + @echo " tag-delete - Delete tag RELEASE_VERSION locally + remote" + @echo " release - tag + goreleaser release --clean (if you use goreleaser)" + @echo " release-dry - tag + goreleaser release --clean --skip=publish" + @echo "" + @echo "Usage:" + @echo " make check" + @echo " make tag RELEASE_VERSION=v0.1.2" + @echo " make release RELEASE_VERSION=v0.1.2" + +fmt: + @echo "Formatting..." + gofmt -w -s . + $(GO) fmt ./... + +tidy: + @echo "Tidying..." + $(GO) mod tidy + +test: + @echo "Testing..." + $(GO) test ./... -count=1 + +race: + @echo "Race testing..." + $(GO) test ./... -race -count=1 + +bench: + @echo "Bench..." + $(GO) test ./... -bench=. -run=^$$ + +lint: + @echo "Linting..." + @command -v $(GOLANGCI) >/dev/null 2>&1 || { echo "golangci-lint not found"; exit 1; } + $(GOLANGCI) run ./... + +check: fmt tidy test race + +# -------------------------- +# Release helpers +# -------------------------- + +ensure-clean: + @echo "Checking git working tree..." + @git diff --quiet || (echo "Error: tracked changes exist. Commit/stash them."; exit 1) + @test -z "$$(git status --porcelain)" || (echo "Error: uncommitted/untracked files:"; git status --porcelain; exit 1) + @echo "OK: working tree clean" + +ensure-release-version: + @test -n "$(RELEASE_VERSION)" || (echo "Error: set RELEASE_VERSION, e.g. make tag RELEASE_VERSION=v0.1.2"; exit 1) + +tag: ensure-clean ensure-release-version + @if git rev-parse "$(RELEASE_VERSION)" >/dev/null 2>&1; then \ + echo "Error: tag $(RELEASE_VERSION) already exists. Bump version."; \ + exit 1; \ + fi + @echo "Tagging $(RELEASE_VERSION) at HEAD $$(git rev-parse --short HEAD)" + @git tag -a $(RELEASE_VERSION) -m "$(RELEASE_VERSION)" + @git push $(REMOTE) $(RELEASE_VERSION) + +tag-delete: ensure-release-version + @echo "Deleting tag $(RELEASE_VERSION) locally + remote..." + @git tag -d $(RELEASE_VERSION) 2>/dev/null || true + @git push $(REMOTE) :refs/tags/$(RELEASE_VERSION) || true + +release: tag + @command -v $(GORELEASER) >/dev/null 2>&1 || { echo "goreleaser not found"; exit 1; } + $(GORELEASER) release --clean + +release-dry: tag + @command -v $(GORELEASER) >/dev/null 2>&1 || { echo "goreleaser not found"; exit 1; } + $(GORELEASER) release --clean --skip=publish diff --git a/vendor/github.com/olekukonko/ll/README.md b/vendor/github.com/olekukonko/ll/README.md index facb4736ca..aaa58a4b8f 100644 --- a/vendor/github.com/olekukonko/ll/README.md +++ b/vendor/github.com/olekukonko/ll/README.md @@ -1,17 +1,19 @@ # ll - A Modern Structured Logging Library for Go -`ll` is a high-performance, production-ready logging library for Go, designed to provide **hierarchical namespaces**, **structured logging**, **middleware pipelines**, **conditional logging**, and support for multiple output formats, including text, JSON, colorized logs, and compatibility with Go’s `slog`. It’s ideal for applications requiring fine-grained log control, extensibility, and scalability. +`ll` is a high-performance, production-ready logging library for Go, designed to provide **hierarchical namespaces**, **structured logging**, **middleware pipelines**, **conditional logging**, and support for multiple output formats, including text, JSON, colorized logs, syslog, VictoriaLogs, and compatibility with Go's `slog`. It's ideal for applications requiring fine-grained log control, extensibility, and scalability. ## Key Features -- **Hierarchical Namespaces**: Organize logs with fine-grained control over subsystems (e.g., "app/db"). -- **Structured Logging**: Add key-value metadata for machine-readable logs. -- **Middleware Pipeline**: Customize log processing with error-based rejection. -- **Conditional Logging**: Optimize performance by skipping unnecessary log operations. -- **Multiple Output Formats**: Support for text, JSON, colorized logs, and `slog` integration. -- **Debugging Utilities**: Inspect variables (`Dbg`), binary data (`Dump`), and stack traces (`Stack`). -- **Thread-Safe**: Built for concurrent use with mutex-protected state. -- **Performance Optimized**: Minimal allocations and efficient namespace caching. +- **Logging Enabled by Default** - Zero configuration to start logging +- **Hierarchical Namespaces** - Organize logs with fine-grained control over subsystems (e.g., "app/db") +- **Structured Logging** - Add key-value metadata for machine-readable logs +- **Middleware Pipeline** - Customize log processing with rate limiting, sampling, and deduplication +- **Conditional & Error-Based Logging** - Optimize performance with fluent `If`, `IfErr`, `IfAny`, `IfOne` chains +- **Multiple Output Formats** - Text, JSON, colorized ANSI, syslog, VictoriaLogs, and `slog` integration +- **Advanced Debugging Utilities** - Source-aware `Dbg()`, hex/ASCII `Dump()`, private field `Inspect()`, and stack traces +- **Production Ready** - Buffered batching, log rotation, duplicate suppression, and rate limiting +- **Thread-Safe** - Built for high-concurrency with atomic operations, sharded mutexes, and lock-free fast paths +- **Performance Optimized** - Zero allocations for disabled logs, sync.Pool buffers, LRU caching for source files ## Installation @@ -21,235 +23,267 @@ Install `ll` using Go modules: go get github.com/olekukonko/ll ``` -Ensure you have Go 1.21 or later for optimal compatibility. - -## Getting Started - -Here’s a quick example to start logging with `ll`: +Requires Go 1.21 or later. +## Quick Start ```go package main -import ( - "github.com/olekukonko/ll" -) +import "github.com/olekukonko/ll" func main() { - // Create a logger with namespace "app" - logger := ll.New("") - - // enable output - logger.Enable() - - // Basic log - logger.Info("Welcome") // Output: [app] INFO: Application started - - logger = logger.Namespace("app") - - // Basic log - logger.Info("start at :8080") // Output: [app] INFO: Application started - - //Output - //INFO: Welcome - //[app] INFO: start at :8080 + // Logger is ENABLED by default - no .Enable() needed! + logger := ll.New("app") + + // Basic logging - works immediately + logger.Info("Server starting") // Output: [app] INFO: Server starting + logger.Warn("Memory high") // Output: [app] WARN: Memory high + logger.Error("Connection failed") // Output: [app] ERROR: Connection failed + + // Structured fields + logger.Fields("user", "alice", "status", 200).Info("Login successful") + // Output: [app] INFO: Login successful [user=alice status=200] } - ``` -```go -package main +**That's it. No `.Enable()`, no handlers to configure—it just works.** -import ( - "github.com/olekukonko/ll" - "github.com/olekukonko/ll/lh" - "os" -) +## Core Concepts -func main() { - // Chaining - logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout)) +### 1. Enabled by Default, Configurable When Needed - // Basic log - logger.Info("Application started") // Output: [app] INFO: Application started +Unlike many logging libraries that require explicit enabling, `ll` **logs immediately**. This eliminates boilerplate and reduces the chance of missing logs in production. - // Structured log with fields - logger.Fields("user", "alice", "status", 200).Info("User logged in") - // Output: [app] INFO: User logged in [user=alice status=200] +```go +// This works out of the box: +ll.Info("Service started") // Output: [] INFO: Service started - // Conditional log - debugMode := false - logger.If(debugMode).Debug("Debug info") // No output (debugMode is false) -} +// But you still have full control: +ll.Disable() // Global shutdown +ll.Enable() // Reactivate ``` -## Core Features - -### 1. Hierarchical Namespaces - -Namespaces allow you to organize logs hierarchically, enabling precise control over logging for different parts of your application. This is especially useful for large systems with multiple components. +### 2. Hierarchical Namespaces -**Benefits**: -- **Granular Control**: Enable/disable logs for specific subsystems (e.g., "app/db" vs. "app/api"). -- **Scalability**: Manage log volume in complex applications. -- **Readability**: Clear namespace paths improve traceability. +Organize logs hierarchically with precise control over subsystems: -**Example**: ```go -logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout)) +// Create a logger hierarchy +root := ll.New("app") +db := root.Namespace("database") +cache := root.Namespace("cache").Style(lx.NestedPath) -// Child loggers -dbLogger := logger.Namespace("db") -apiLogger := logger.Namespace("api").Style(lx.NestedPath) +// Control logging per namespace +root.NamespaceEnable("app/database") // Enable database logs +root.NamespaceDisable("app/cache") // Disable cache logs -// Namespace control -logger.NamespaceEnable("app/db") // Enable DB logs -logger.NamespaceDisable("app/api") // Disable API logs - -dbLogger.Info("Query executed") // Output: [app/db] INFO: Query executed -apiLogger.Info("Request received") // No output +db.Info("Connected") // Output: [app/database] INFO: Connected +cache.Info("Hit") // No output (disabled) ``` -### 2. Structured Logging +### 3. Structured Logging with Ordered Fields -Add key-value metadata to logs for machine-readable output, making it easier to query and analyze logs in tools like ELK or Grafana. +Fields maintain insertion order and support fluent chaining: -**Example**: ```go -logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout)) - -// Variadic fields -logger.Fields("user", "bob", "status", 200).Info("Request completed") -// Output: [app] INFO: Request completed [user=bob status=200] +// Fluent key-value pairs +logger. + Fields("request_id", "req-123"). + Fields("user", "alice"). + Fields("duration_ms", 42). + Info("Request processed") // Map-based fields -logger.Field(map[string]interface{}{"method": "GET"}).Info("Request") -// Output: [app] INFO: Request [method=GET] +logger.Field(map[string]interface{}{ + "method": "POST", + "path": "/api/users", +}).Debug("API call") + +// Persistent context (included in ALL subsequent logs) +logger.AddContext("environment", "production", "version", "1.2.3") +logger.Info("Deployed") // Output: ... [environment=production version=1.2.3] ``` -### 3. Middleware Pipeline +### 4. Conditional & Error-Based Logging -Customize log processing with a middleware pipeline. Middleware functions can enrich, filter, or transform logs, using an error-based rejection mechanism (non-nil errors stop logging). +Optimize performance with fluent conditional chains that **completely skip processing** when conditions are false: -**Example**: ```go -logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout)) +// Boolean conditions +logger.If(debugMode).Debug("Detailed diagnostics") // No overhead when false +logger.If(featureEnabled).Info("Feature used") + +// Error conditions +err := db.Query() +logger.IfErr(err).Error("Query failed") // Logs only if err != nil + +// Multiple conditions - ANY true +logger.IfErrAny(err1, err2, err3).Fatal("System failure") + +// Multiple conditions - ALL true +logger.IfErrOne(validateErr, authErr).Error("Both checks failed") + +// Chain conditions +logger. + If(debugMode). + IfErr(queryErr). + Fields("query", sql). + Debug("Query debug") +``` -// Enrich logs with app metadata -logger.Use(ll.FuncMiddleware(func(e *lx.Entry) error { - if e.Fields == nil { - e.Fields = make(map[string]interface{}) - } - e.Fields["app"] = "myapp" - return nil -})) +**Performance**: When conditions are false, the logger returns immediately with zero allocations. -// Filter low-level logs -logger.Use(ll.FuncMiddleware(func(e *lx.Entry) error { - if e.Level < lx.LevelWarn { - return fmt.Errorf("level too low") - } - return nil -})) +### 5. Powerful Debugging Toolkit + +`ll` includes advanced debugging utilities not found in standard logging libraries: -logger.Info("Ignored") // No output (filtered) -logger.Warn("Warning") // Output: [app] WARN: Warning [app=myapp] +#### Dbg() - Source-Aware Variable Inspection +Captures both variable name AND value from your source code: + +```go +x := 42 +user := &User{Name: "Alice"} +ll.Dbg(x, user) +// Output: [file.go:123] x = 42, *user = &{Name:Alice} ``` -### 4. Conditional Logging +#### Dump() - Hex/ASCII Binary Inspection +Perfect for protocol debugging and binary data: + +```go +ll.Handler(lh.NewColorizedHandler(os.Stdout)) +ll.Dump([]byte("hello\nworld")) +// Output: Colorized hex/ASCII dump with offset markers +``` -Optimize performance by skipping expensive log operations when conditions are false, ideal for production environments. +#### Inspect() - Private Field Reflection +Reveals unexported fields, embedded structs, and pointer internals: -**Example**: ```go -logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout)) +type secret struct { + password string // unexported! +} -featureEnabled := true -logger.If(featureEnabled).Fields("action", "update").Info("Feature used") -// Output: [app] INFO: Feature used [action=update] +s := secret{password: "hunter2"} +ll.Inspect(s) +// Output: [file.go:123] INSPECT: { +// "(password)": "hunter2" // Note the parentheses +// } +``` -logger.If(false).Info("Ignored") // No output, no processing +#### Stack() - Configurable Stack Traces +```go +ll.StackSize(8192) // Larger buffer for deep stacks +ll.Stack("Critical failure") +// Output: ERROR: Critical failure [stack=goroutine 1 [running]...] ``` -### 5. Multiple Output Formats +#### Mark() - Execution Flow Tracing +```go +func process() { + ll.Mark() // *MARK*: [file.go:123] + ll.Mark("phase1") // *phase1*: [file.go:124] + // ... work ... +} +``` -`ll` supports various output formats, including human-readable text, colorized logs, JSON, and integration with Go’s `slog` package. +### 6. Production-Ready Handlers -**Example**: ```go -logger := ll.New("app").Enable() +import ( + "github.com/olekukonko/ll" + "github.com/olekukonko/ll/lh" + "github.com/olekukonko/ll/l3rd/syslog" + "github.com/olekukonko/ll/l3rd/victoria" +) -// Text output -logger.Handler(lh.NewTextHandler(os.Stdout)) -logger.Info("Text log") // Output: [app] INFO: Text log +// JSON for structured logging +logger.Handler(lh.NewJSONHandler(os.Stdout)) -// JSON output -logger.Handler(lh.NewJSONHandler(os.Stdout, time.RFC3339Nano)) -logger.Info("JSON log") // Output: {"timestamp":"...","level":"INFO","message":"JSON log","namespace":"app"} +// Colorized for development +logger.Handler(lh.NewColorizedHandler(os.Stdout, + lh.WithColorTheme("dark"), + lh.WithColorIntensity(lh.IntensityVibrant), +)) -// Slog integration -slogText := slog.NewTextHandler(os.Stdout, nil) -logger.Handler(lh.NewSlogHandler(slogText)) -logger.Info("Slog log") // Output: level=INFO msg="Slog log" namespace=app class=Text +// Buffered for high throughput (100 entries or 10 seconds) +buffered := lh.NewBuffered( + lh.NewJSONHandler(os.Stdout), + lh.WithBatchSize(100), + lh.WithFlushInterval(10 * time.Second), +) +logger.Handler(buffered) +defer buffered.Close() // Ensures flush on exit + +// Syslog integration +syslogHandler, _ := syslog.New( + syslog.WithTag("myapp"), + syslog.WithFacility(syslog.LOG_LOCAL0), +) +logger.Handler(syslogHandler) + +// VictoriaLogs (cloud-native) +victoriaHandler, _ := victoria.New( + victoria.WithURL("http://victoria-logs:9428"), + victoria.WithAppName("payment-service"), + victoria.WithEnvironment("production"), + victoria.WithBatching(200, 5*time.Second), +) +logger.Handler(victoriaHandler) ``` -### 6. Debugging Utilities - -`ll` provides powerful tools for debugging, including variable inspection, binary data dumps, and stack traces. - -#### Core Debugging Methods - -1. **Dbg - Contextual Inspection** - Inspects variables with file and line context, preserving variable names and handling all Go types. - ```go - x := 42 - user := struct{ Name string }{"Alice"} - ll.Dbg(x) // Output: [file.go:123] x = 42 - ll.Dbg(user) // Output: [file.go:124] user = [Name:Alice] - ``` - -2. **Dump - Binary Inspection** - Displays a hex/ASCII view of data, optimized for strings, bytes, and complex types (with JSON fallback). - ```go - ll.Handler(lh.NewColorizedHandler(os.Stdout)) - ll.Dump("hello\nworld") // Output: Hex/ASCII dump (see example/dump.png) - ``` - -3. **Stack - Stack Inspection** - Logs a stack trace for debugging critical errors. - ```go - ll.Handler(lh.NewColorizedHandler(os.Stdout)) - ll.Stack("Critical error") // Output: [app] ERROR: Critical error [stack=...] (see example/stack.png) - ``` - -4**General Output** - Logs a output in structured way for inspection of public & private values. - ```go - ll.Handler(lh.NewColorizedHandler(os.Stdout)) - ll.Output(&SomeStructWithPrivateValues{}) - ``` - -#### Performance Tracking -Measure execution time for performance analysis. +### 7. Middleware Pipeline + +Transform, filter, or reject logs with a middleware pipeline: + ```go -// Automatic measurement -defer ll.Measure(func() { time.Sleep(time.Millisecond) })() -// Output: [app] INFO: function executed [duration=~1ms] - -// Explicit benchmarking -start := time.Now() -time.Sleep(time.Millisecond) -ll.Benchmark(start) // Output: [app] INFO: benchmark [start=... end=... duration=...] +// Rate limiting - 10 logs per second maximum +rateLimiter := lm.NewRateLimiter(lx.LevelInfo, 10, time.Second) +logger.Use(rateLimiter) + +// Sampling - 10% of debug logs +sampler := lm.NewSampling(lx.LevelDebug, 0.1) +logger.Use(sampler) + +// Deduplication - suppress identical logs for 2 seconds +deduper := lh.NewDedup(logger.GetHandler(), 2*time.Second) +logger.Handler(deduper) + +// Custom middleware +logger.Use(ll.Middle(func(e *lx.Entry) error { + if strings.Contains(e.Message, "password") { + return fmt.Errorf("sensitive information redacted") + } + return nil +})) ``` -**Performance Notes**: -- `Dbg` calls are disabled at compile-time when not enabled. -- `Dump` optimizes for primitive types, strings, and bytes with zero-copy paths. -- Stack traces are configurable via `StackSize`. +### 8. Global Convenience API + +Use package-level functions for quick logging without creating loggers: + +```go +import "github.com/olekukonko/ll" + +func main() { + ll.Info("Server starting") // Global logger + ll.Fields("port", 8080).Info("Listening") + + // Conditional logging at package level + ll.If(simulation).Debug("Test mode") + ll.IfErr(err).Error("Startup failed") + + // Debug utilities + ll.Dbg(config) + ll.Dump(requestBody) + ll.Inspect(complexStruct) +} +``` -## Real-World Example: Web Server +## Real-World Examples -A practical example of using `ll` in a web server with structured logging, middleware, and `slog` integration: +### Web Server with Structured Logging ```go package main @@ -257,110 +291,127 @@ package main import ( "github.com/olekukonko/ll" "github.com/olekukonko/ll/lh" - "log/slog" "net/http" - "os" "time" ) func main() { - // Initialize logger with slog handler - slogHandler := slog.NewJSONHandler(os.Stdout, nil) - logger := ll.New("server").Enable().Handler(lh.NewSlogHandler(slogHandler)) - - // HTTP child logger - httpLogger := logger.Namespace("http").Style(lx.NestedPath) - - // Middleware for request ID - httpLogger.Use(ll.FuncMiddleware(func(e *lx.Entry) error { - if e.Fields == nil { - e.Fields = make(map[string]interface{}) - } - e.Fields["request_id"] = "req-" + time.Now().String() - return nil - })) - - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + // Root logger - enabled by default + log := ll.New("server") + + // JSON output for production + log.Handler(lh.NewJSONHandler(os.Stdout)) + + // Request logger with context + http.HandleFunc("/api/users", func(w http.ResponseWriter, r *http.Request) { + reqLog := log.Namespace("http").Fields( + "method", r.Method, + "path", r.URL.Path, + "request_id", r.Header.Get("X-Request-ID"), + ) + start := time.Now() - httpLogger.Fields("method", r.Method, "path", r.URL.Path).Info("Request received") - w.Write([]byte("Hello, world!")) - httpLogger.Fields("duration_ms", time.Since(start).Milliseconds()).Info("Request completed") + reqLog.Info("request started") + + // ... handle request ... + + reqLog.Fields( + "status", 200, + "duration_ms", time.Since(start).Milliseconds(), + ).Info("request completed") }) - - logger.Info("Starting server on :8080") + + log.Info("Server listening on :8080") http.ListenAndServe(":8080", nil) } ``` -**Sample Output (JSON via slog)**: -```json -{"level":"INFO","msg":"Starting server on :8080","namespace":"server"} -{"level":"INFO","msg":"Request received","namespace":"server/http","class":"Text","method":"GET","path":"/","request_id":"req-..."} -{"level":"INFO","msg":"Request completed","namespace":"server/http","class":"Text","duration_ms":1,"request_id":"req-..."} +### Microservice with VictoriaLogs + +```go +package main + +import ( + "github.com/olekukonko/ll" + "github.com/olekukonko/ll/l3rd/victoria" +) + +func main() { + // Production setup + vlHandler, _ := victoria.New( + victoria.WithURL("http://logs.internal:9428"), + victoria.WithAppName("payment-api"), + victoria.WithEnvironment("production"), + victoria.WithVersion("1.2.3"), + victoria.WithBatching(500, 2*time.Second), + victoria.WithRetry(3), + ) + defer vlHandler.Close() + + logger := ll.New("payment"). + Handler(vlHandler). + AddContext("region", "us-east-1") + + logger.Info("Payment service initialized") + + // Conditional error handling + if err := processPayment(); err != nil { + logger.IfErr(err). + Fields("payment_id", paymentID). + Error("Payment processing failed") + } +} ``` -## Why Choose `ll`? +## Performance -- **Granular Control**: Hierarchical namespaces for precise log management. -- **Performance**: Conditional logging and optimized concatenation reduce overhead. -- **Extensibility**: Middleware pipeline for custom log processing. -- **Structured Output**: Machine-readable logs with key-value metadata. -- **Flexible Formats**: Text, JSON, colorized, and `slog` support. -- **Debugging Power**: Advanced tools like `Dbg`, `Dump`, and `Stack` for deep inspection. -- **Thread-Safe**: Safe for concurrent use in high-throughput applications. - -## Comparison with Other Libraries - -| Feature | `ll` | `log` (stdlib) | `slog` (stdlib) | `zap` | -|--------------------------|--------------------------|----------------|-----------------|-------------------| -| Hierarchical Namespaces | ✅ | ❌ | ❌ | ❌ | -| Structured Logging | ✅ (Fields, Context) | ❌ | ✅ | ✅ | -| Middleware Pipeline | ✅ | ❌ | ❌ | ✅ (limited) | -| Conditional Logging | ✅ (If, IfOne, IfAny) | ❌ | ❌ | ❌ | -| Slog Compatibility | ✅ | ❌ | ✅ (native) | ❌ | -| Debugging (Dbg, Dump) | ✅ | ❌ | ❌ | ❌ | -| Performance (disabled logs) | High (conditional) | Low | Medium | High | -| Output Formats | Text, JSON, Color, Slog | Text | Text, JSON | JSON, Text | - -## Benchmarks - -`ll` is optimized for performance, particularly for disabled logs and structured logging: -- **Disabled Logs**: 30% faster than `slog` due to efficient conditional checks. -- **Structured Logging**: 2x faster than `log` with minimal allocations. -- **Namespace Caching**: Reduces overhead for hierarchical lookups. - -See `ll_bench_test.go` for detailed benchmarks on namespace creation, cloning, and field building. - -## Testing and Stability - -The `ll` library includes a comprehensive test suite (`ll_test.go`) covering: -- Logger configuration, namespaces, and conditional logging. -- Middleware, rate limiting, and sampling. -- Handler output formats (text, JSON, slog). -- Debugging utilities (`Dbg`, `Dump`, `Stack`). - -Recent improvements: -- Fixed sampling middleware for reliable behavior at edge cases (0.0 and 1.0 rates). -- Enhanced documentation across `conditional.go`, `field.go`, `global.go`, `ll.go`, `lx.go`, and `ns.go`. -- Added `slog` compatibility via `lh.SlogHandler`. +`ll` is engineered for high-performance environments: -## Contributing +| Operation | Time/op | Allocations | +|-----------|---------|-------------| +| **Disabled log** | **15.9 ns** | **0 allocs** | +| Simple text log | 176 ns | 2 allocs | +| With 2 fields | 383 ns | 4 allocs | +| JSON output | 1006 ns | 13 allocs | +| Namespace lookup (cached) | 550 ns | 6 allocs | +| Deduplication | 214 ns | 2 allocs | -Contributions are welcome! To contribute: -1. Fork the repository: `github.com/olekukonko/ll`. -2. Create a feature branch: `git checkout -b feature/your-feature`. -3. Commit changes: `git commit -m "Add your feature"`. -4. Push to the branch: `git push origin feature/your-feature`. -5. Open a pull request with a clear description. +**Key optimizations**: +- Zero allocations when logs are skipped (conditional, disabled) +- Atomic operations for hot paths +- Sync.Pool for buffer reuse +- LRU cache for source file lines (Dbg) +- Sharded mutexes for deduplication -Please include tests in `ll_test.go` and update documentation as needed. Follow the Go coding style and run `go test ./...` before submitting. +## Why Choose `ll`? -## License +| Feature | `ll` | `slog` | `zap` | `logrus` | +|---------|------|--------|-------|----------| +| **Enabled by default** | ✅ | ❌ | ❌ | ❌ | +| Hierarchical namespaces | ✅ | ❌ | ❌ | ❌ | +| Conditional logging | ✅ | ❌ | ❌ | ❌ | +| Error-based conditions | ✅ | ❌ | ❌ | ❌ | +| Source-aware Dbg() | ✅ | ❌ | ❌ | ❌ | +| Private field inspection | ✅ | ❌ | ❌ | ❌ | +| Hex/ASCII Dump() | ✅ | ❌ | ❌ | ❌ | +| Middleware pipeline | ✅ | ❌ | ✅ (limited) | ❌ | +| Deduplication | ✅ | ❌ | ❌ | ❌ | +| Rate limiting | ✅ | ❌ | ❌ | ❌ | +| VictoriaLogs support | ✅ | ❌ | ❌ | ❌ | +| Syslog support | ✅ | ❌ | ❌ | ✅ | +| Zero-allocs disabled logs | ✅ | ❌ | ❌ | ❌ | +| Thread-safe | ✅ | ✅ | ✅ | ✅ | + +## Documentation + +- [GoDoc](https://pkg.go.dev/github.com/olekukonko/ll) - Full API documentation +- [Examples](_example/) - Runable example code +- [Benchmarks](tests/ll_bench_test.go) - Performance benchmarks -`ll` is licensed under the MIT License. See [LICENSE](LICENSE) for details. +## Contributing -## Resources +Contributions are welcome! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines. + +## License -- **Source Code**: [github.com/olekukonko/ll](https://github.com/olekukonko/ll) -- **Issue Tracker**: [github.com/olekukonko/ll/issues](https://github.com/olekukonko/ll/issues) -- **GoDoc**: [pkg.go.dev/github.com/olekukonko/ll](https://pkg.go.dev/github.com/olekukonko/ll) \ No newline at end of file +MIT License - see [LICENSE](LICENSE) for details. \ No newline at end of file diff --git a/vendor/github.com/olekukonko/ll/comb.hcl b/vendor/github.com/olekukonko/ll/comb.hcl new file mode 100644 index 0000000000..20763e802a --- /dev/null +++ b/vendor/github.com/olekukonko/ll/comb.hcl @@ -0,0 +1,12 @@ +recursive = true +output_file = "all.txt" +extensions = [".go"] +exclude_dirs { +items = ["_examples", "_lab", "_tmp", "pkg", "lab","bin"] +} +exclude_files { + items = ["before.txt","after.txt"] +} +use_gitignore = true +detailed = true +go_mode = "all" \ No newline at end of file diff --git a/vendor/github.com/olekukonko/ll/conditional.go b/vendor/github.com/olekukonko/ll/conditional.go index 0ec9e4b842..581079bdda 100644 --- a/vendor/github.com/olekukonko/ll/conditional.go +++ b/vendor/github.com/olekukonko/ll/conditional.go @@ -1,5 +1,16 @@ package ll +import ( + "sync" +) + +// conditionalPool pools Conditional instances to reduce allocations. +var conditionalPool = sync.Pool{ + New: func() any { + return &Conditional{} + }, +} + // Conditional enables conditional logging based on a boolean condition. // It wraps a logger with a condition that determines whether logging operations are executed, // optimizing performance by skipping expensive operations (e.g., field computation, message formatting) @@ -9,38 +20,32 @@ type Conditional struct { condition bool // Whether logging is allowed (true to log, false to skip) } +// getConditional retrieves a Conditional from the pool or creates a new one. +func getConditional(logger *Logger, condition bool) *Conditional { + c := conditionalPool.Get().(*Conditional) + c.logger = logger + c.condition = condition + return c +} + +// putConditional returns a Conditional to the pool for reuse. +func putConditional(c *Conditional) { + c.logger = nil + c.condition = false + conditionalPool.Put(c) +} + // If creates a conditional logger that logs only if the condition is true. // It returns a Conditional struct that wraps the logger, enabling conditional logging methods. // This method is typically called on a Logger instance to start a conditional chain. -// Thread-safe via the underlying logger’s mutex. +// Thread-safe via the underlying logger's mutex. // Example: // // logger := New("app").Enable() // logger.If(true).Info("Logged") // Output: [app] INFO: Logged // logger.If(false).Info("Ignored") // No output func (l *Logger) If(condition bool) *Conditional { - return &Conditional{logger: l, condition: condition} -} - -// IfOne creates a conditional logger that logs only if all conditions are true. -// It evaluates a variadic list of boolean conditions, setting the condition to true only if -// all are true (logical AND). Returns a new Conditional with the result. Thread-safe via the -// underlying logger. -// Example: -// -// logger := New("app").Enable() -// logger.IfOne(true, true).Info("Logged") // Output: [app] INFO: Logged -// logger.IfOne(true, false).Info("Ignored") // No output -func (cl *Conditional) IfOne(conditions ...bool) *Conditional { - result := true - // Check each condition; set result to false if any is false - for _, cond := range conditions { - if !cond { - result = false - break - } - } - return &Conditional{logger: cl.logger, condition: result} + return getConditional(l, condition) } // IfAny creates a conditional logger that logs only if at least one condition is true. @@ -61,82 +66,140 @@ func (cl *Conditional) IfAny(conditions ...bool) *Conditional { break } } - return &Conditional{logger: cl.logger, condition: result} + // Reuse current instance if condition unchanged + if cl.condition == result { + return cl + } + cl.condition = result + return cl } -// Fields starts a fluent chain for adding fields using variadic key-value pairs, if the condition is true. -// It returns a FieldBuilder to attach fields, skipping field processing if the condition is false -// to optimize performance. Thread-safe via the FieldBuilder’s logger. +// IfErr creates a conditional logger that logs only if the error is non-nil. +// It's designed for the common pattern of checking errors before logging. // Example: // -// logger := New("app").Enable() -// logger.If(true).Fields("user", "alice").Info("Logged") // Output: [app] INFO: Logged [user=alice] -// logger.If(false).Fields("user", "alice").Info("Ignored") // No output, no field processing -func (cl *Conditional) Fields(pairs ...any) *FieldBuilder { - // Skip field processing if condition is false - if !cl.condition { - return &FieldBuilder{logger: cl.logger, fields: nil} +// err := doSomething() +// logger.IfErr(err).Error("Operation failed") // Only logs if err != nil +func (l *Logger) IfErr(err error) *Conditional { + return l.If(err != nil) +} + +// IfErrAny creates a conditional logger that logs only if AT LEAST ONE error is non-nil. +// It evaluates a variadic list of errors, setting the condition to true if any +// is non-nil (logical OR). Useful when any error should trigger logging. +// Example: +// +// err1 := validate(input) +// err2 := authorize(user) +// logger.IfErrAny(err1, err2).Error("Either check failed") // Logs if EITHER error exists +func (l *Logger) IfErrAny(errs ...error) *Conditional { + for _, err := range errs { + if err != nil { + return l.If(true) // Any non-nil error makes it true + } } - // Delegate to logger’s Fields method - return cl.logger.Fields(pairs...) + return l.If(false) // False only if all errors are nil } -// Field starts a fluent chain for adding fields from a map, if the condition is true. -// It returns a FieldBuilder to attach fields from a map, skipping processing if the condition -// is false. Thread-safe via the FieldBuilder’s logger. +// IfErrOne creates a conditional logger that logs only if ALL errors are non-nil. +// It evaluates a variadic list of errors, setting the condition to true only if +// all are non-nil (logical AND). Useful when you need all errors to be present. // Example: // -// logger := New("app").Enable() -// logger.If(true).Field(map[string]interface{}{"user": "alice"}).Info("Logged") // Output: [app] INFO: Logged [user=alice] -// logger.If(false).Field(map[string]interface{}{"user": "alice"}).Info("Ignored") // No output -func (cl *Conditional) Field(fields map[string]interface{}) *FieldBuilder { - // Skip field processing if condition is false - if !cl.condition { - return &FieldBuilder{logger: cl.logger, fields: nil} +// err1 := validate(input) +// err2 := authorize(user) +// logger.IfErrOne(err1, err2).Error("Both checks failed") // Logs only if BOTH errors exist +func (l *Logger) IfErrOne(errs ...error) *Conditional { + for _, err := range errs { + if err == nil { + return l.If(false) // Any nil error makes it false + } } - // Delegate to logger’s Field method - return cl.logger.Field(fields) + return l.If(len(errs) > 0) // True only if we have at least one error and all are non-nil } -// Info logs a message at Info level with variadic arguments if the condition is true. -// It concatenates the arguments with spaces and delegates to the logger’s Info method if the -// condition is true. Skips processing if false, optimizing performance. Thread-safe via the -// logger’s log method. +// IfErr creates a conditional logger that logs only if the error is non-nil. +// Returns a new Conditional with the error check result. // Example: // -// logger := New("app").Enable() -// logger.If(true).Info("Action", "started") // Output: [app] INFO: Action started -// logger.If(false).Info("Action", "ignored") // No output -func (cl *Conditional) Info(args ...any) { - // Skip logging if condition is false - if !cl.condition { - return +// err := doSomething() +// logger.If(true).IfErr(err).Error("Failed") // Only logs if condition true AND err != nil +func (cl *Conditional) IfErr(err error) *Conditional { + return cl.IfOne(err != nil) +} + +// IfErrAny creates a conditional logger that logs only if AT LEAST ONE error is non-nil. +// Returns a new Conditional with the logical OR result of error checks. +// Example: +// +// err1 := validate(input) +// err2 := authorize(user) +// logger.If(true).IfErrAny(err1, err2).Error("Either failed") // Logs if condition true AND either error exists +func (cl *Conditional) IfErrAny(errs ...error) *Conditional { + for _, err := range errs { + if err != nil { + // Reuse if condition already true + if cl.condition { + return cl + } + cl.condition = true + return cl + } } - // Delegate to logger’s Info method - cl.logger.Info(args...) + cl.condition = false + return cl } -// Infof logs a message at Info level with a format string if the condition is true. -// It formats the message using the provided format string and arguments, delegating to the -// logger’s Infof method if the condition is true. Skips processing if false, optimizing performance. -// Thread-safe via the logger’s log method. +// IfErrOne creates a conditional logger that logs only if ALL errors are non-nil. +// Returns a new Conditional with the logical AND result of error checks. +// Example: +// +// err1 := validate(input) +// err2 := authorize(user) +// logger.If(true).IfErrOne(err1, err2).Error("Both failed") // Logs if condition true AND both errors exist +func (cl *Conditional) IfErrOne(errs ...error) *Conditional { + for _, err := range errs { + if err == nil { + cl.condition = false + return cl + } + } + if len(errs) > 0 { + cl.condition = cl.condition && true + } + return cl +} + +// IfOne creates a conditional logger that logs only if all conditions are true. +// It evaluates a variadic list of boolean conditions, setting the condition to true only if +// all are true (logical AND). Returns a new Conditional with the result. Thread-safe via the +// underlying logger. // Example: // // logger := New("app").Enable() -// logger.If(true).Infof("Action %s", "started") // Output: [app] INFO: Action started -// logger.If(false).Infof("Action %s", "ignored") // No output -func (cl *Conditional) Infof(format string, args ...any) { - // Skip logging if condition is false - if !cl.condition { - return +// logger.IfOne(true, true).Info("Logged") // Output: [app] INFO: Logged +// logger.IfOne(true, false).Info("Ignored") // No output +func (cl *Conditional) IfOne(conditions ...bool) *Conditional { + result := true + // Check each condition; set result to false if any is false + for _, cond := range conditions { + if !cond { + result = false + break + } } - // Delegate to logger’s Infof method - cl.logger.Infof(format, args...) + // Reuse current instance if condition unchanged + if cl.condition == result { + return cl + } + cl.condition = result + return cl } // Debug logs a message at Debug level with variadic arguments if the condition is true. -// It concatenates the arguments with spaces and delegates to the logger’s Debug method if the -// condition is true. Skips processing if false. Thread-safe via the logger’s log method. +// It concatenates the arguments with spaces and delegates to the logger's Debug method if the +// condition is true. Skips processing if false, optimizing performance. Thread-safe via the +// logger's log method. // Example: // // logger := New("app").Enable().Level(lx.LevelDebug) @@ -147,13 +210,13 @@ func (cl *Conditional) Debug(args ...any) { if !cl.condition { return } - // Delegate to logger’s Debug method + // Delegate to logger's Debug method cl.logger.Debug(args...) } // Debugf logs a message at Debug level with a format string if the condition is true. -// It formats the message and delegates to the logger’s Debugf method if the condition is true. -// Skips processing if false. Thread-safe via the logger’s log method. +// It formats the message and delegates to the logger's Debugf method if the condition is true. +// Skips processing if false. Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable().Level(lx.LevelDebug) @@ -164,150 +227,154 @@ func (cl *Conditional) Debugf(format string, args ...any) { if !cl.condition { return } - // Delegate to logger’s Debugf method + // Delegate to logger's Debugf method cl.logger.Debugf(format, args...) } -// Warn logs a message at Warn level with variadic arguments if the condition is true. -// It concatenates the arguments with spaces and delegates to the logger’s Warn method if the -// condition is true. Skips processing if false. Thread-safe via the logger’s log method. +// Error logs a message at Error level with variadic arguments if the condition is true. +// It concatenates the arguments with spaces and delegates to the logger's Error method if the +// condition is true. Skips processing if false. Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable() -// logger.If(true).Warn("Warning", "issued") // Output: [app] WARN: Warning issued -// logger.If(false).Warn("Warning", "ignored") // No output -func (cl *Conditional) Warn(args ...any) { +// logger.If(true).Error("Error", "occurred") // Output: [app] ERROR: Error occurred +// logger.If(false).Error("Error", "ignored") // No output +func (cl *Conditional) Error(args ...any) { // Skip logging if condition is false if !cl.condition { return } - // Delegate to logger’s Warn method - cl.logger.Warn(args...) + // Delegate to logger's Error method + cl.logger.Error(args...) } -// Warnf logs a message at Warn level with a format string if the condition is true. -// It formats the message and delegates to the logger’s Warnf method if the condition is true. -// Skips processing if false. Thread-safe via the logger’s log method. +// Errorf logs a message at Error level with a format string if the condition is true. +// It formats the message and delegates to the logger's Errorf method if the condition is true. +// Skips processing if false. Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable() -// logger.If(true).Warnf("Warning %s", "issued") // Output: [app] WARN: Warning issued -// logger.If(false).Warnf("Warning %s", "ignored") // No output -func (cl *Conditional) Warnf(format string, args ...any) { +// logger.If(true).Errorf("Error %s", "occurred") // Output: [app] ERROR: Error occurred +// logger.If(false).Errorf("Error %s", "ignored") // No output +func (cl *Conditional) Errorf(format string, args ...any) { // Skip logging if condition is false if !cl.condition { return } - // Delegate to logger’s Warnf method - cl.logger.Warnf(format, args...) + // Delegate to logger's Errorf method + cl.logger.Errorf(format, args...) } -// Error logs a message at Error level with variadic arguments if the condition is true. -// It concatenates the arguments with spaces and delegates to the logger’s Error method if the -// condition is true. Skips processing if false. Thread-safe via the logger’s log method. +// Fatal logs a message at Error level with a stack trace and variadic arguments if the condition is true, +// then exits. It concatenates the arguments with spaces and delegates to the logger's Fatal method +// if the condition is true, terminating the program with exit code 1. Skips processing if false. +// Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable() -// logger.If(true).Error("Error", "occurred") // Output: [app] ERROR: Error occurred -// logger.If(false).Error("Error", "ignored") // No output -func (cl *Conditional) Error(args ...any) { +// logger.If(true).Fatal("Fatal", "error") // Output: [app] ERROR: Fatal error [stack=...], then exits +// logger.If(false).Fatal("Fatal", "ignored") // No output, no exit +func (cl *Conditional) Fatal(args ...any) { // Skip logging if condition is false if !cl.condition { return } - // Delegate to logger’s Error method - cl.logger.Error(args...) + // Delegate to logger's Fatal method + cl.logger.Fatal(args...) } -// Errorf logs a message at Error level with a format string if the condition is true. -// It formats the message and delegates to the logger’s Errorf method if the condition is true. -// Skips processing if false. Thread-safe via the logger’s log method. +// Fatalf logs a formatted message at Error level with a stack trace if the condition is true, then exits. +// It formats the message and delegates to the logger's Fatalf method if the condition is true, +// terminating the program with exit code 1. Skips processing if false. Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable() -// logger.If(true).Errorf("Error %s", "occurred") // Output: [app] ERROR: Error occurred -// logger.If(false).Errorf("Error %s", "ignored") // No output -func (cl *Conditional) Errorf(format string, args ...any) { +// logger.If(true).Fatalf("Fatal %s", "error") // Output: [app] ERROR: Fatal error [stack=...], then exits +// logger.If(false).Fatalf("Fatal %s", "ignored") // No output, no exit +func (cl *Conditional) Fatalf(format string, args ...any) { // Skip logging if condition is false if !cl.condition { return } - // Delegate to logger’s Errorf method - cl.logger.Errorf(format, args...) + // Delegate to logger's Fatalf method + cl.logger.Fatalf(format, args...) } -// Stack logs a message at Error level with a stack trace and variadic arguments if the condition is true. -// It concatenates the arguments with spaces and delegates to the logger’s Stack method if the -// condition is true. Skips processing if false. Thread-safe via the logger’s log method. +// Field starts a fluent chain for adding fields from a map, if the condition is true. +// It returns a FieldBuilder to attach fields from a map, skipping processing if the condition +// is false. Thread-safe via the FieldBuilder's logger. // Example: // // logger := New("app").Enable() -// logger.If(true).Stack("Critical", "error") // Output: [app] ERROR: Critical error [stack=...] -// logger.If(false).Stack("Critical", "ignored") // No output -func (cl *Conditional) Stack(args ...any) { - // Skip logging if condition is false +// logger.If(true).Field(map[string]interface{}{"user": "alice"}).Info("Logged") // Output: [app] INFO: Logged [user=alice] +// logger.If(false).Field(map[string]interface{}{"user": "alice"}).Info("Ignored") // No output +func (cl *Conditional) Field(fields map[string]interface{}) *FieldBuilder { + // Skip field processing if condition is false - return FieldBuilder with nil logger + // so that all subsequent logging methods become no-ops if !cl.condition { - return + return &FieldBuilder{logger: nil, fields: nil} } - // Delegate to logger’s Stack method - cl.logger.Stack(args...) + // Delegate to logger's Field method + return cl.logger.Field(fields) } -// Stackf logs a message at Error level with a stack trace and a format string if the condition is true. -// It formats the message and delegates to the logger’s Stackf method if the condition is true. -// Skips processing if false. Thread-safe via the logger’s log method. +// Fields starts a fluent chain for adding fields using variadic key-value pairs, if the condition is true. +// It returns a FieldBuilder to attach fields, skipping field processing if the condition is false +// to optimize performance. Thread-safe via the FieldBuilder's logger. // Example: // // logger := New("app").Enable() -// logger.If(true).Stackf("Critical %s", "error") // Output: [app] ERROR: Critical error [stack=...] -// logger.If(false).Stackf("Critical %s", "ignored") // No output -func (cl *Conditional) Stackf(format string, args ...any) { - // Skip logging if condition is false +// logger.If(true).Fields("user", "alice").Info("Logged") // Output: [app] INFO: Logged [user=alice] +// logger.If(false).Fields("user", "alice").Info("Ignored") // No output, no field processing +func (cl *Conditional) Fields(pairs ...any) *FieldBuilder { + // Skip field processing if condition is false - return FieldBuilder with nil logger + // so that all subsequent logging methods become no-ops if !cl.condition { - return + return &FieldBuilder{logger: nil, fields: nil} } - // Delegate to logger’s Stackf method - cl.logger.Stackf(format, args...) + // Delegate to logger's Fields method + return cl.logger.Fields(pairs...) } -// Fatal logs a message at Error level with a stack trace and variadic arguments if the condition is true, -// then exits. It concatenates the arguments with spaces and delegates to the logger’s Fatal method -// if the condition is true, terminating the program with exit code 1. Skips processing if false. -// Thread-safe via the logger’s log method. +// Info logs a message at Info level with variadic arguments if the condition is true. +// It concatenates the arguments with spaces and delegates to the logger's Info method if the +// condition is true. Skips processing if false, optimizing performance. Thread-safe via the +// logger's log method. // Example: // // logger := New("app").Enable() -// logger.If(true).Fatal("Fatal", "error") // Output: [app] ERROR: Fatal error [stack=...], then exits -// logger.If(false).Fatal("Fatal", "ignored") // No output, no exit -func (cl *Conditional) Fatal(args ...any) { +// logger.If(true).Info("Action", "started") // Output: [app] INFO: Action started +// logger.If(false).Info("Action", "ignored") // No output +func (cl *Conditional) Info(args ...any) { // Skip logging if condition is false if !cl.condition { return } - // Delegate to logger’s Fatal method - cl.logger.Fatal(args...) + // Delegate to logger's Info method + cl.logger.Info(args...) } -// Fatalf logs a formatted message at Error level with a stack trace if the condition is true, then exits. -// It formats the message and delegates to the logger’s Fatalf method if the condition is true, -// terminating the program with exit code 1. Skips processing if false. Thread-safe via the logger’s log method. +// Infof logs a message at Info level with a format string if the condition is true. +// It formats the message using the provided format string and arguments, delegating to the +// logger's Infof method if the condition is true. Skips processing if false, optimizing performance. +// Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable() -// logger.If(true).Fatalf("Fatal %s", "error") // Output: [app] ERROR: Fatal error [stack=...], then exits -// logger.If(false).Fatalf("Fatal %s", "ignored") // No output, no exit -func (cl *Conditional) Fatalf(format string, args ...any) { +// logger.If(true).Infof("Action %s", "started") // Output: [app] INFO: Action started +// logger.If(false).Infof("Action %s", "ignored") // No output +func (cl *Conditional) Infof(format string, args ...any) { // Skip logging if condition is false if !cl.condition { return } - // Delegate to logger’s Fatalf method - cl.logger.Fatalf(format, args...) + // Delegate to logger's Infof method + cl.logger.Infof(format, args...) } // Panic logs a message at Error level with a stack trace and variadic arguments if the condition is true, -// then panics. It concatenates the arguments with spaces and delegates to the logger’s Panic method -// if the condition is true, triggering a panic. Skips processing if false. Thread-safe via the logger’s log method. +// then panics. It concatenates the arguments with spaces and delegates to the logger's Panic method +// if the condition is true, triggering a panic. Skips processing if false. Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable() @@ -318,13 +385,13 @@ func (cl *Conditional) Panic(args ...any) { if !cl.condition { return } - // Delegate to logger’s Panic method + // Delegate to logger's Panic method cl.logger.Panic(args...) } // Panicf logs a formatted message at Error level with a stack trace if the condition is true, then panics. -// It formats the message and delegates to the logger’s Panicf method if the condition is true, -// triggering a panic. Skips processing if false. Thread-safe via the logger’s log method. +// It formats the message and delegates to the logger's Panicf method if the condition is true, +// triggering a panic. Skips processing if false. Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable() @@ -335,6 +402,74 @@ func (cl *Conditional) Panicf(format string, args ...any) { if !cl.condition { return } - // Delegate to logger’s Panicf method + // Delegate to logger's Panicf method cl.logger.Panicf(format, args...) } + +// Stack logs a message at Error level with a stack trace and variadic arguments if the condition is true. +// It concatenates the arguments with spaces and delegates to the logger's Stack method if the +// condition is true. Skips processing if false. Thread-safe via the logger's log method. +// Example: +// +// logger := New("app").Enable() +// logger.If(true).Stack("Critical", "error") // Output: [app] ERROR: Critical error [stack=...] +// logger.If(false).Stack("Critical", "ignored") // No output +func (cl *Conditional) Stack(args ...any) { + // Skip logging if condition is false + if !cl.condition { + return + } + // Delegate to logger's Stack method + cl.logger.Stack(args...) +} + +// Stackf logs a message at Error level with a stack trace and a format string if the condition is true. +// It formats the message and delegates to the logger's Stackf method if the condition is true. +// Skips processing if false. Thread-safe via the logger's log method. +// Example: +// +// logger := New("app").Enable() +// logger.If(true).Stackf("Critical %s", "error") // Output: [app] ERROR: Critical error [stack=...] +// logger.If(false).Stackf("Critical %s", "ignored") // No output +func (cl *Conditional) Stackf(format string, args ...any) { + // Skip logging if condition is false + if !cl.condition { + return + } + // Delegate to logger's Stackf method + cl.logger.Stackf(format, args...) +} + +// Warn logs a message at Warn level with variadic arguments if the condition is true. +// It concatenates the arguments with spaces and delegates to the logger's Warn method if the +// condition is true. Skips processing if false. Thread-safe via the logger's log method. +// Example: +// +// logger := New("app").Enable() +// logger.If(true).Warn("Warning", "issued") // Output: [app] WARN: Warning issued +// logger.If(false).Warn("Warning", "ignored") // No output +func (cl *Conditional) Warn(args ...any) { + // Skip logging if condition is false + if !cl.condition { + return + } + // Delegate to logger's Warn method + cl.logger.Warn(args...) +} + +// Warnf logs a message at Warn level with a format string if the condition is true. +// It formats the message and delegates to the logger's Warnf method if the condition is true. +// Skips processing if false. Thread-safe via the logger's log method. +// Example: +// +// logger := New("app").Enable() +// logger.If(true).Warnf("Warning %s", "issued") // Output: [app] WARN: Warning issued +// logger.If(false).Warnf("Warning %s", "ignored") // No output +func (cl *Conditional) Warnf(format string, args ...any) { + // Skip logging if condition is false + if !cl.condition { + return + } + // Delegate to logger's Warnf method + cl.logger.Warnf(format, args...) +} diff --git a/vendor/github.com/olekukonko/ll/dbg.go b/vendor/github.com/olekukonko/ll/dbg.go new file mode 100644 index 0000000000..d3d8c42b43 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/dbg.go @@ -0,0 +1,282 @@ +package ll + +import ( + "container/list" + "fmt" + "os" + "runtime" + "strings" + "sync" + + "github.com/olekukonko/ll/lx" +) + +// ----------------------------------------------------------------------------- +// Global Cache Instance +// ----------------------------------------------------------------------------- + +// sourceCache caches up to 128 source files using LRU eviction. +var sourceCache = newFileLRU(128) + +// ----------------------------------------------------------------------------- +// File-Level LRU Cache +// ----------------------------------------------------------------------------- + +type fileLRU struct { + capacity int + mu sync.Mutex + list *list.List + items map[string]*list.Element +} + +type fileItem struct { + key string + lines []string +} + +func newFileLRU(capacity int) *fileLRU { + if capacity <= 0 { + capacity = 1 + } + return &fileLRU{ + capacity: capacity, + list: list.New(), + items: make(map[string]*list.Element, capacity), + } +} + +// getLine retrieves a specific 1-indexed line from a file. +func (c *fileLRU) getLine(file string, line int) (string, bool) { + c.mu.Lock() + defer c.mu.Unlock() + + // 1. Cache Hit + if elem, ok := c.items[file]; ok { + c.list.MoveToFront(elem) + item := elem.Value.(*fileItem) + if item.lines == nil { + return "", false + } + return nthLine(item.lines, line) + } + + // 2. Cache Miss - Read File + // Release lock during I/O to avoid blocking other loggers + c.mu.Unlock() + data, err := os.ReadFile(file) + c.mu.Lock() + + // 3. Double-check (another goroutine might have loaded it while unlocked) + if elem, ok := c.items[file]; ok { + c.list.MoveToFront(elem) + item := elem.Value.(*fileItem) + if item.lines == nil { + return "", false + } + return nthLine(item.lines, line) + } + + var lines []string + if err == nil { + lines = strings.Split(string(data), "\n") + } + + // 4. Store (Positive or Negative Cache) + item := &fileItem{ + key: file, + lines: lines, + } + elem := c.list.PushFront(item) + c.items[file] = elem + + // 5. Evict if needed + if c.list.Len() > c.capacity { + old := c.list.Back() + if old != nil { + c.list.Remove(old) + delete(c.items, old.Value.(*fileItem).key) + } + } + + if lines == nil { + return "", false + } + return nthLine(lines, line) +} + +// nthLine returns the 1-indexed line from slice. +func nthLine(lines []string, n int) (string, bool) { + if n <= 0 || n > len(lines) { + return "", false + } + return strings.TrimSuffix(lines[n-1], "\r"), true +} + +// ----------------------------------------------------------------------------- +// Logger Debug Implementation +// ----------------------------------------------------------------------------- + +// Dbg logs debug information including source file, line number, +// and the best-effort extracted expression. +// +// Example: +// +// x := 42 +// logger.Dbg("val", x) +// Output: [file.go:123] "val" = "val", x = 42 +func (l *Logger) Dbg(values ...interface{}) { + if !l.shouldLog(lx.LevelInfo) { + return + } + l.dbg(2, values...) +} + +func (l *Logger) dbg(skip int, values ...interface{}) { + file, line, ok := callerFrame(skip) + if !ok { + // Fallback if we can't get frame + var sb strings.Builder + sb.WriteString("[?:?] ") + for i, v := range values { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%+v", v)) + } + l.log(lx.LevelInfo, lx.ClassText, sb.String(), nil, false) + return + } + + shortFile := file + if idx := strings.LastIndex(file, "/"); idx >= 0 { + shortFile = file[idx+1:] + } + + srcLine, hit := sourceCache.getLine(file, line) + + var expr string + if hit && srcLine != "" { + // Attempt to extract the text inside Dbg(...) + if a := strings.Index(srcLine, "Dbg("); a >= 0 { + rest := srcLine[a+len("Dbg("):] + if b := strings.LastIndex(rest, ")"); b >= 0 { + expr = strings.TrimSpace(rest[:b]) + } + } else { + // Fallback: extract first (...) group if Dbg isn't explicit prefix + a := strings.Index(srcLine, "(") + b := strings.LastIndex(srcLine, ")") + if a >= 0 && b > a { + expr = strings.TrimSpace(srcLine[a+1 : b]) + } + } + } + + // Format output + var outBuilder strings.Builder + outBuilder.WriteString(fmt.Sprintf("[%s:%d] ", shortFile, line)) + + // Attempt to split expressions to map 1:1 with values + var parts []string + if expr != "" { + parts = splitExpressions(expr) + } + + // If the number of extracted expressions matches the number of values, + // print them as "expr = value". Otherwise, fall back to "expr = val1, val2". + if len(parts) == len(values) { + for i, v := range values { + if i > 0 { + outBuilder.WriteString(", ") + } + outBuilder.WriteString(fmt.Sprintf("%s = %+v", parts[i], v)) + } + } else { + if expr != "" { + outBuilder.WriteString(expr) + outBuilder.WriteString(" = ") + } + for i, v := range values { + if i > 0 { + outBuilder.WriteString(", ") + } + outBuilder.WriteString(fmt.Sprintf("%+v", v)) + } + } + + l.log(lx.LevelInfo, lx.ClassDbg, outBuilder.String(), nil, false) +} + +// splitExpressions splits a comma-separated string of expressions, +// respecting nested parentheses, brackets, braces, and quotes. +// Example: "a, fn(b, c), d" -> ["a", "fn(b, c)", "d"] +func splitExpressions(s string) []string { + var parts []string + var current strings.Builder + depth := 0 // Tracks nested (), [], {} + inQuote := false // Tracks string literals + var quoteChar rune + + for _, r := range s { + switch { + case inQuote: + current.WriteRune(r) + if r == quoteChar { + // We rely on the fact that valid Go source won't have unescaped quotes easily + // accessible here without complex parsing, but for simple Dbg calls this suffices. + // A robust parser handles `\"`, but simple state toggling covers 99% of debug cases. + inQuote = false + } + case r == '"' || r == '\'': + inQuote = true + quoteChar = r + current.WriteRune(r) + case r == '(' || r == '{' || r == '[': + depth++ + current.WriteRune(r) + case r == ')' || r == '}' || r == ']': + depth-- + current.WriteRune(r) + case r == ',' && depth == 0: + // Split point + parts = append(parts, strings.TrimSpace(current.String())) + current.Reset() + default: + current.WriteRune(r) + } + } + if current.Len() > 0 { + parts = append(parts, strings.TrimSpace(current.String())) + } + return parts +} + +// ----------------------------------------------------------------------------- +// Caller Resolution +// ----------------------------------------------------------------------------- + +// callerFrame walks stack frames until it finds the first frame +// outside the ll package. +func callerFrame(skip int) (file string, line int, ok bool) { + // +2 to skip callerFrame + dbg itself. + pcs := make([]uintptr, 32) + n := runtime.Callers(skip+2, pcs) + if n == 0 { + return "", 0, false + } + + frames := runtime.CallersFrames(pcs[:n]) + for { + fr, more := frames.Next() + // fr.Function looks like: "github.com/you/mod/ll.(*Logger).Dbg" + // We want the first frame that is NOT inside package ll. + if fr.Function == "" || !strings.Contains(fr.Function, "/ll.") && !strings.Contains(fr.Function, ".ll.") { + return fr.File, fr.Line, true + } + + if !more { + // Fallback: return the last frame we saw + return fr.File, fr.Line, fr.File != "" + } + } +} diff --git a/vendor/github.com/olekukonko/ll/field.go b/vendor/github.com/olekukonko/ll/field.go index 4162162ff7..2de5b7ff39 100644 --- a/vendor/github.com/olekukonko/ll/field.go +++ b/vendor/github.com/olekukonko/ll/field.go @@ -2,23 +2,58 @@ package ll import ( "fmt" - "github.com/olekukonko/cat" - "github.com/olekukonko/ll/lx" "os" "strings" + "sync" + + "github.com/olekukonko/cat" + "github.com/olekukonko/ll/lx" ) +// fieldBuilderPool pools FieldBuilder instances to reduce allocations. +var fieldBuilderPool = sync.Pool{ + New: func() any { + return &FieldBuilder{ + fields: make(lx.Fields, 0, 8), // Pre-allocate common size + } + }, +} + // FieldBuilder enables fluent addition of fields before logging. // It acts as a builder pattern to attach key-value pairs (fields) to log entries, // supporting structured logging with metadata. The builder allows chaining to add fields // and log messages at various levels (Info, Debug, Warn, Error, etc.) in a single expression. type FieldBuilder struct { - logger *Logger // Associated logger instance for logging operations - fields map[string]interface{} // Fields to include in the log entry as key-value pairs + logger *Logger // Associated logger instance for logging operations + fields lx.Fields // Fields to include in the log entry as ordered key-value pairs +} + +// getFieldBuilder retrieves a FieldBuilder from the pool or creates a new one. +func getFieldBuilder(logger *Logger, capacity int) *FieldBuilder { + fb := fieldBuilderPool.Get().(*FieldBuilder) + fb.logger = logger + // Ensure minimum capacity to reduce small allocations + const minFieldCapacity = 4 + if capacity < minFieldCapacity { + capacity = minFieldCapacity + } + if cap(fb.fields) < capacity { + fb.fields = make(lx.Fields, 0, capacity) + } else { + fb.fields = fb.fields[:0] // Reset but keep capacity + } + return fb +} + +// putFieldBuilder returns a FieldBuilder to the pool for reuse. +func putFieldBuilder(fb *FieldBuilder) { + fb.logger = nil + fb.fields = fb.fields[:0] + fieldBuilderPool.Put(fb) } -// Logger creates a new logger with the builder’s fields embedded in its context. -// It clones the parent logger and copies the builder’s fields into the new logger’s context, +// Logger creates a new logger with the builder's fields embedded in its context. +// It clones the parent logger and copies the builder's fields into the new logger's context, // enabling persistent field inclusion in subsequent logs. This method supports fluent chaining // after Fields or Field calls. // Example: @@ -27,203 +62,185 @@ type FieldBuilder struct { // newLogger := logger.Fields("user", "alice").Logger() // newLogger.Info("Action") // Output: [app] INFO: Action [user=alice] func (fb *FieldBuilder) Logger() *Logger { + // If logger is nil (e.g., from a false Conditional), return nil + if fb.logger == nil { + return nil + } // Clone the parent logger to preserve its configuration newLogger := fb.logger.Clone() - // Initialize a new context map to avoid modifying the parent’s context - newLogger.context = make(map[string]interface{}) - // Copy builder’s fields into the new logger’s context - for k, v := range fb.fields { - newLogger.context[k] = v + // Copy builder's fields into the new logger's context (optimized) + if len(fb.fields) > 0 { + newLogger.context = append(lx.Fields(nil), fb.fields...) } return newLogger } -// Info logs a message at Info level with the builder’s fields. -// It concatenates the arguments with spaces and delegates to the logger’s log method, -// returning early if fields are nil. This method is used for informational messages. +// Info logs a message at Info level with the builder's fields. +// It concatenates the arguments with spaces and delegates to the logger's log method. +// This method is used for informational messages. // Example: // // logger := New("app").Enable() // logger.Fields("user", "alice").Info("Action", "started") // Output: [app] INFO: Action started [user=alice] func (fb *FieldBuilder) Info(args ...any) { - // Skip logging if fields are nil - if fb.fields == nil { + if fb.logger == nil { return } - // Log at Info level with the builder’s fields, no stack trace fb.logger.log(lx.LevelInfo, lx.ClassText, cat.Space(args...), fb.fields, false) + putFieldBuilder(fb) } -// Infof logs a message at Info level with the builder’s fields. +// Infof logs a message at Info level with the builder's fields. // It formats the message using the provided format string and arguments, then delegates -// to the logger’s internal log method. If fields are nil, it returns early to avoid logging. -// This method is part of the fluent API, typically called after adding fields. +// to the logger's internal log method. This method is part of the fluent API. // Example: // // logger := New("app").Enable() // logger.Fields("user", "alice").Infof("Action %s", "started") // Output: [app] INFO: Action started [user=alice] func (fb *FieldBuilder) Infof(format string, args ...any) { - // Skip logging if fields are nil to prevent invalid log entries - if fb.fields == nil { + if fb.logger == nil { return } - // Format the message using the provided arguments msg := fmt.Sprintf(format, args...) - // Log at Info level with the builder’s fields, no stack trace fb.logger.log(lx.LevelInfo, lx.ClassText, msg, fb.fields, false) + putFieldBuilder(fb) } -// Debug logs a message at Debug level with the builder’s fields. -// It concatenates the arguments with spaces and delegates to the logger’s log method, -// returning early if fields are nil. This method is used for debugging information. +// Debug logs a message at Debug level with the builder's fields. +// It concatenates the arguments with spaces and delegates to the logger's log method. +// This method is used for debugging information. // Example: // // logger := New("app").Enable() // logger.Fields("user", "alice").Debug("Debugging", "mode") // Output: [app] DEBUG: Debugging mode [user=alice] func (fb *FieldBuilder) Debug(args ...any) { - // Skip logging if fields are nil - if fb.fields == nil { + if fb.logger == nil { return } - // Log at Debug level with the builder’s fields, no stack trace fb.logger.log(lx.LevelDebug, lx.ClassText, cat.Space(args...), fb.fields, false) + putFieldBuilder(fb) } -// Debugf logs a message at Debug level with the builder’s fields. -// It formats the message and delegates to the logger’s log method, returning early if -// fields are nil. This method is used for debugging information that may be disabled in -// production environments. +// Debugf logs a message at Debug level with the builder's fields. +// It formats the message and delegates to the logger's log method. +// This method is used for debugging information that may be disabled in production. // Example: // // logger := New("app").Enable() // logger.Fields("user", "alice").Debugf("Debug %s", "mode") // Output: [app] DEBUG: Debug mode [user=alice] func (fb *FieldBuilder) Debugf(format string, args ...any) { - // Skip logging if fields are nil - if fb.fields == nil { + if fb.logger == nil { return } - // Format the message msg := fmt.Sprintf(format, args...) - // Log at Debug level with the builder’s fields, no stack trace fb.logger.log(lx.LevelDebug, lx.ClassText, msg, fb.fields, false) + putFieldBuilder(fb) } -// Warn logs a message at Warn level with the builder’s fields. -// It concatenates the arguments with spaces and delegates to the logger’s log method, -// returning early if fields are nil. This method is used for warning conditions. +// Warn logs a message at Warn level with the builder's fields. +// It concatenates the arguments with spaces and delegates to the logger's log method. +// This method is used for warning conditions. // Example: // // logger := New("app").Enable() // logger.Fields("user", "alice").Warn("Warning", "issued") // Output: [app] WARN: Warning issued [user=alice] func (fb *FieldBuilder) Warn(args ...any) { - // Skip logging if fields are nil - if fb.fields == nil { + if fb.logger == nil { return } - // Log at Warn level with the builder’s fields, no stack trace fb.logger.log(lx.LevelWarn, lx.ClassText, cat.Space(args...), fb.fields, false) + putFieldBuilder(fb) } -// Warnf logs a message at Warn level with the builder’s fields. -// It formats the message and delegates to the logger’s log method, returning early if -// fields are nil. This method is used for warning conditions that do not halt execution. +// Warnf logs a message at Warn level with the builder's fields. +// It formats the message and delegates to the logger's log method. +// This method is used for warning conditions that do not halt execution. // Example: // // logger := New("app").Enable() // logger.Fields("user", "alice").Warnf("Warning %s", "issued") // Output: [app] WARN: Warning issued [user=alice] func (fb *FieldBuilder) Warnf(format string, args ...any) { - // Skip logging if fields are nil - if fb.fields == nil { + if fb.logger == nil { return } - // Format the message msg := fmt.Sprintf(format, args...) - // Log at Warn level with the builder’s fields, no stack trace fb.logger.log(lx.LevelWarn, lx.ClassText, msg, fb.fields, false) + putFieldBuilder(fb) } -// Error logs a message at Error level with the builder’s fields. -// It concatenates the arguments with spaces and delegates to the logger’s log method, -// returning early if fields are nil. This method is used for error conditions. +// Error logs a message at Error level with the builder's fields. +// It concatenates the arguments with spaces and delegates to the logger's log method. +// This method is used for error conditions. // Example: // // logger := New("app").Enable() // logger.Fields("user", "alice").Error("Error", "occurred") // Output: [app] ERROR: Error occurred [user=alice] func (fb *FieldBuilder) Error(args ...any) { - // Skip logging if fields are nil - if fb.fields == nil { + if fb.logger == nil { return } - // Log at Error level with the builder’s fields, no stack trace fb.logger.log(lx.LevelError, lx.ClassText, cat.Space(args...), fb.fields, false) + putFieldBuilder(fb) } -// Errorf logs a message at Error level with the builder’s fields. -// It formats the message and delegates to the logger’s log method, returning early if -// fields are nil. This method is used for error conditions that may require attention. +// Errorf logs a message at Error level with the builder's fields. +// It formats the message and delegates to the logger's log method. +// This method is used for error conditions that may require attention. // Example: // // logger := New("app").Enable() // logger.Fields("user", "alice").Errorf("Error %s", "occurred") // Output: [app] ERROR: Error occurred [user=alice] func (fb *FieldBuilder) Errorf(format string, args ...any) { - // Skip logging if fields are nil - if fb.fields == nil { + if fb.logger == nil { return } - // Format the message msg := fmt.Sprintf(format, args...) - // Log at Error level with the builder’s fields, no stack trace fb.logger.log(lx.LevelError, lx.ClassText, msg, fb.fields, false) + putFieldBuilder(fb) } -// Stack logs a message at Error level with a stack trace and the builder’s fields. -// It concatenates the arguments with spaces and delegates to the logger’s log method, -// returning early if fields are nil. This method is useful for debugging critical errors. +// Stack logs a message at Error level with a stack trace and the builder's fields. +// It concatenates the arguments with spaces and delegates to the logger's log method. +// This method is useful for debugging critical errors. // Example: // // logger := New("app").Enable() // logger.Fields("user", "alice").Stack("Critical", "error") // Output: [app] ERROR: Critical error [user=alice stack=...] func (fb *FieldBuilder) Stack(args ...any) { - // Skip logging if fields are nil - if fb.fields == nil { + if fb.logger == nil { return } - // Log at Error level with the builder’s fields and a stack trace fb.logger.log(lx.LevelError, lx.ClassText, cat.Space(args...), fb.fields, true) + putFieldBuilder(fb) } -// Stackf logs a message at Error level with a stack trace and the builder’s fields. -// It formats the message and delegates to the logger’s log method, returning early if -// fields are nil. This method is useful for debugging critical errors. +// Stackf logs a message at Error level with a stack trace and the builder's fields. +// It formats the message and delegates to the logger's log method. +// This method is useful for debugging critical errors. // Example: // // logger := New("app").Enable() // logger.Fields("user", "alice").Stackf("Critical %s", "error") // Output: [app] ERROR: Critical error [user=alice stack=...] func (fb *FieldBuilder) Stackf(format string, args ...any) { - // Skip logging if fields are nil - if fb.fields == nil { + if fb.logger == nil { return } - // Format the message msg := fmt.Sprintf(format, args...) - // Log at Error level with the builder’s fields and a stack trace fb.logger.log(lx.LevelError, lx.ClassText, msg, fb.fields, true) + putFieldBuilder(fb) } -// Fatal logs a message at Error level with a stack trace and the builder’s fields, then exits. +// Fatal logs a message at Error level with a stack trace and the builder's fields, then exits. // It constructs the message from variadic arguments, logs it with a stack trace, and terminates -// the program with exit code 1. Returns early if fields are nil. This method is used for -// unrecoverable errors. +// the program with exit code 1. This method is used for unrecoverable errors. // Example: // // logger := New("app").Enable() // logger.Fields("user", "alice").Fatal("Fatal", "error") // Output: [app] ERROR: Fatal error [user=alice stack=...], then exits func (fb *FieldBuilder) Fatal(args ...any) { - // Skip logging if fields are nil - if fb.fields == nil { + if fb.logger == nil { return } - // Build the message by concatenating arguments with spaces var builder strings.Builder for i, arg := range args { if i > 0 { @@ -231,42 +248,37 @@ func (fb *FieldBuilder) Fatal(args ...any) { } builder.WriteString(fmt.Sprint(arg)) } - // Log at Error level with the builder’s fields and a stack trace - fb.logger.log(lx.LevelError, lx.ClassText, builder.String(), fb.fields, true) - // Exit the program with status code 1 - os.Exit(1) + fb.logger.log(lx.LevelFatal, lx.ClassText, builder.String(), fb.fields, fb.logger.fatalStack) + if fb.logger.fatalExits { + os.Exit(1) + } + putFieldBuilder(fb) } -// Fatalf logs a formatted message at Error level with a stack trace and the builder’s fields, -// then exits. It delegates to Fatal and returns early if fields are nil. This method is used -// for unrecoverable errors. +// Fatalf logs a formatted message at Error level with a stack trace and the builder's fields, +// then exits. It delegates to Fatal. This method is used for unrecoverable errors. // Example: // // logger := New("app").Enable() // logger.Fields("user", "alice").Fatalf("Fatal %s", "error") // Output: [app] ERROR: Fatal error [user=alice stack=...], then exits func (fb *FieldBuilder) Fatalf(format string, args ...any) { - // Skip logging if fields are nil - if fb.fields == nil { + if fb.logger == nil { return } - // Format the message and pass to Fatal fb.Fatal(fmt.Sprintf(format, args...)) } -// Panic logs a message at Error level with a stack trace and the builder’s fields, then panics. +// Panic logs a message at Error level with a stack trace and the builder's fields, then panics. // It constructs the message from variadic arguments, logs it with a stack trace, and triggers -// a panic with the message. Returns early if fields are nil. This method is used for critical -// errors that require immediate program termination with a panic. +// a panic with the message. This method is used for critical errors. // Example: // // logger := New("app").Enable() // logger.Fields("user", "alice").Panic("Panic", "error") // Output: [app] ERROR: Panic error [user=alice stack=...], then panics func (fb *FieldBuilder) Panic(args ...any) { - // Skip logging if fields are nil - if fb.fields == nil { + if fb.logger == nil { return } - // Build the message by concatenating arguments with spaces var builder strings.Builder for i, arg := range args { if i > 0 { @@ -275,48 +287,36 @@ func (fb *FieldBuilder) Panic(args ...any) { builder.WriteString(fmt.Sprint(arg)) } msg := builder.String() - // Log at Error level with the builder’s fields and a stack trace fb.logger.log(lx.LevelError, lx.ClassText, msg, fb.fields, true) - // Trigger a panic with the formatted message panic(msg) } -// Panicf logs a formatted message at Error level with a stack trace and the builder’s fields, -// then panics. It delegates to Panic and returns early if fields are nil. This method is used -// for critical errors that require immediate program termination with a panic. +// Panicf logs a formatted message at Error level with a stack trace and the builder's fields, +// then panics. It delegates to Panic. This method is used for critical errors. // Example: // // logger := New("app").Enable() // logger.Fields("user", "alice").Panicf("Panic %s", "error") // Output: [app] ERROR: Panic error [user=alice stack=...], then panics func (fb *FieldBuilder) Panicf(format string, args ...any) { - // Skip logging if fields are nil - if fb.fields == nil { + if fb.logger == nil { return } - // Format the message and pass to Panic fb.Panic(fmt.Sprintf(format, args...)) } // Err adds one or more errors to the FieldBuilder as a field and logs them. // It stores non-nil errors in the "error" field: a single error if only one is non-nil, -// or a slice of errors if multiple are non-nil. It logs the concatenated string representations -// of non-nil errors (e.g., "failed 1; failed 2") at the Error level. Returns the FieldBuilder -// for chaining, allowing further field additions or logging. Thread-safe via the logger’s mutex. +// or a slice of errors if multiple are non-nil. Returns the FieldBuilder for chaining. // Example: // // logger := New("app").Enable() // err1 := errors.New("failed 1") // err2 := errors.New("failed 2") // logger.Fields("k", "v").Err(err1, err2).Info("Error occurred") -// // Output: [app] ERROR: failed 1; failed 2 -// // [app] INFO: Error occurred [error=[failed 1 failed 2] k=v] func (fb *FieldBuilder) Err(errs ...error) *FieldBuilder { - // Initialize fields map if nil - if fb.fields == nil { - fb.fields = make(map[string]interface{}) + if fb.logger == nil { + return fb } - - // Collect non-nil errors and build log message var nonNilErrors []error var builder strings.Builder count := 0 @@ -330,46 +330,41 @@ func (fb *FieldBuilder) Err(errs ...error) *FieldBuilder { count++ } } - - // Set error field and log if there are non-nil errors if count > 0 { if count == 1 { - // Store single error directly - fb.fields["error"] = nonNilErrors[0] + fb.fields = append(fb.fields, lx.Field{Key: "error", Value: nonNilErrors[0]}) } else { - // Store slice of errors - fb.fields["error"] = nonNilErrors + fb.fields = append(fb.fields, lx.Field{Key: "error", Value: nonNilErrors}) } - // Log concatenated error messages at Error level fb.logger.log(lx.LevelError, lx.ClassText, builder.String(), nil, false) } - - // Return FieldBuilder for chaining return fb } // Merge adds additional key-value pairs to the FieldBuilder. -// It processes variadic arguments as key-value pairs, expecting string keys. Non-string keys -// or uneven pairs generate an "error" field with a descriptive message. Returns the FieldBuilder -// for chaining to allow further field additions or logging. +// It processes variadic arguments as key-value pairs, expecting string keys. +// Returns the FieldBuilder for chaining. // Example: // // logger := New("app").Enable() // logger.Fields("k1", "v1").Merge("k2", "v2").Info("Action") // Output: [app] INFO: Action [k1=v1 k2=v2] func (fb *FieldBuilder) Merge(pairs ...any) *FieldBuilder { - // Process pairs as key-value, advancing by 2 + // Merge can work even with nil logger since it just manipulates fields for i := 0; i < len(pairs)-1; i += 2 { - // Ensure the key is a string if key, ok := pairs[i].(string); ok { - fb.fields[key] = pairs[i+1] + fb.fields = append(fb.fields, lx.Field{Key: key, Value: pairs[i+1]}) } else { - // Log an error field for non-string keys - fb.fields["error"] = fmt.Errorf("non-string key in Merge: %v", pairs[i]) + fb.fields = append(fb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("non-string key in Merge: %v", pairs[i]), + }) } } - // Check for uneven pairs (missing value) if len(pairs)%2 != 0 { - fb.fields["error"] = fmt.Errorf("uneven key-value pairs in Merge: [%v]", pairs[len(pairs)-1]) + fb.fields = append(fb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("uneven key-value pairs in Merge: [%v]", pairs[len(pairs)-1]), + }) } return fb } diff --git a/vendor/github.com/olekukonko/ll/global.go b/vendor/github.com/olekukonko/ll/global.go index 85146f5436..dc7c2f4337 100644 --- a/vendor/github.com/olekukonko/ll/global.go +++ b/vendor/github.com/olekukonko/ll/global.go @@ -1,11 +1,9 @@ package ll import ( - "os" "sync/atomic" "time" - "github.com/olekukonko/ll/lh" "github.com/olekukonko/ll/lx" ) @@ -14,16 +12,7 @@ import ( // a logger instance. The logger is initialized with default settings: enabled, Debug level, // flat namespace style, and a text handler to os.Stdout. It is thread-safe due to the Logger // struct’s mutex. -var defaultLogger = &Logger{ - enabled: true, // Initially enabled - level: lx.LevelDebug, // Minimum log level set to Debug - namespaces: defaultStore, // Shared namespace store for enable/disable states - context: make(map[string]interface{}), // Empty context for global fields - style: lx.FlatPath, // Flat namespace style (e.g., [parent/child]) - handler: lh.NewTextHandler(os.Stdout), // Default text handler to os.Stdout - middleware: make([]Middleware, 0), // Empty middleware chain - stackBufferSize: 4096, // Buffer size for stack traces -} +var defaultLogger = New("") // Handler sets the handler for the default logger. // It configures the output destination and format (e.g., text, JSON) for logs emitted by @@ -41,9 +30,9 @@ func Handler(handler lx.Handler) *Logger { // the specified level are ignored. Returns the default logger for method chaining. // Example: // -// ll.Level(lx.LevelWarn) -// ll.Info("Ignored") // No output -// ll.Warn("Logged") // Output: [] WARN: Logged +// ll.Level(lx.LevelInfo) +// ll.Warn("Ignored") // Inactive output +// ll.Info("Logged") // Output: [] INFO: Logged func Level(level lx.LevelType) *Logger { return defaultLogger.Level(level) } @@ -76,7 +65,7 @@ func NamespaceEnable(path string) *Logger { // Example: // // ll.NamespaceDisable("app/db") -// ll.Clone().Namespace("db").Info("Query") // No output +// ll.Clone().Namespace("db").Info("Query") // Inactive output func NamespaceDisable(path string) *Logger { return defaultLogger.NamespaceDisable(path) } @@ -233,16 +222,25 @@ func Panicf(format string, args ...any) { } // If creates a conditional logger that logs only if the condition is true using the default logger. -// It returns a Conditional struct that wraps the default logger, enabling conditional logging methods. -// Thread-safe via the Logger’s mutex. -// Example: -// -// ll.If(true).Info("Logged") // Output: [] INFO: Logged -// ll.If(false).Info("Ignored") // No output func If(condition bool) *Conditional { return defaultLogger.If(condition) } +// IfErr creates a conditional logger that logs only if the error is non-nil using the default logger. +func IfErr(err error) *Conditional { + return defaultLogger.IfErr(err) +} + +// IfErrAny creates a conditional logger that logs only if AT LEAST ONE error is non-nil using the default logger. +func IfErrAny(errs ...error) *Conditional { + return defaultLogger.IfErrAny(errs...) +} + +// IfErrOne creates a conditional logger that logs only if ALL errors are non-nil using the default logger. +func IfErrOne(errs ...error) *Conditional { + return defaultLogger.IfErrOne(errs...) +} + // Context creates a new logger with additional contextual fields using the default logger. // It preserves existing context fields and adds new ones, returning a new logger instance // to avoid mutating the default logger. Thread-safe with write lock. @@ -260,8 +258,8 @@ func Context(fields map[string]interface{}) *Logger { // // ll.AddContext("user", "alice") // ll.Info("Action") // Output: [] INFO: Action [user=alice] -func AddContext(key string, value interface{}) *Logger { - return defaultLogger.AddContext(key, value) +func AddContext(pairs ...any) *Logger { + return defaultLogger.AddContext(pairs...) } // GetContext returns the default logger’s context map of persistent key-value fields. @@ -269,7 +267,7 @@ func AddContext(key string, value interface{}) *Logger { // Example: // // ll.AddContext("user", "alice") -// ctx := ll.GetContext() // Returns map[string]interface{}{"user": "alice"} +// ctx := ll.GetContext() // Returns map[string]interface{}{"user": "alice"}k func GetContext() map[string]interface{} { return defaultLogger.GetContext() } @@ -371,7 +369,7 @@ func StackSize(size int) *Logger { // } // return nil // })) -// ll.Info("Ignored") // No output +// ll.Info("Ignored") // Inactive output // mw.Remove() // ll.Info("Logged") // Output: [] INFO: Logged func Use(fn lx.Handler) *Middleware { @@ -395,7 +393,7 @@ func Remove(m *Middleware) { // // ll.Use(someMiddleware) // ll.Clear() -// ll.Info("No middleware") // Output: [] INFO: No middleware +// ll.Info("Inactive middleware") // Output: [] INFO: Inactive middleware func Clear() *Logger { return defaultLogger.Clear() } @@ -472,6 +470,37 @@ func Measure(fns ...func()) time.Duration { return defaultLogger.Measure(fns...) } +// Labels temporarily attaches one or more label names to the logger for the next log entry. +// Labels are typically used for metrics, benchmarking, tracing, or categorizing logs in a structured way. +// +// The labels are stored atomically and intended to be short-lived, applying only to the next +// log operation (or until overwritten by a subsequent call to Labels). Multiple labels can +// be provided as separate string arguments. +// +// Example usage: +// +// logger := New("app").Enable() +// +// // Add labels for a specific operation +// logger.Labels("load_users", "process_orders").Measure(func() { +// // ... perform work ... +// }, func() { +// // ... optional callback ... +// }) +func Labels(names ...string) *Logger { + return defaultLogger.Labels(names...) +} + +// Since creates a timer that will log the duration when completed +// If startTime is provided, uses that as the start time; otherwise uses time.Now() +// +// defer logger.Since().Info("request") // Auto-start +// logger.Since(start).Info("request") // Manual timing +// logger.Since().If(debug).Debug("timing") // Conditional +func Since(start ...time.Time) *SinceBuilder { + return defaultLogger.Since(start...) +} + // Benchmark logs the duration since a start time at Info level using the default logger. // It calculates the time elapsed since the provided start time and logs it with "start", // "end", and "duration" fields. Thread-safe via the Logger’s mutex. @@ -516,7 +545,7 @@ func Err(errs ...error) { // Example: // // ll.Shutdown() -// ll.Info("Ignored") // No output +// ll.Info("Ignored") // Inactive output // ll.Start() // ll.Info("Logged") // Output: [] INFO: Logged func Start() { @@ -529,7 +558,7 @@ func Start() { // Example: // // ll.Shutdown() -// ll.Info("Ignored") // No output +// ll.Info("Ignored") // Inactive output func Shutdown() { atomic.StoreInt32(&systemActive, 0) } @@ -551,7 +580,7 @@ func Active() bool { // Example: // // ll.Disable() -// ll.Info("Ignored") // No output +// ll.Info("Ignored") // Inactive output // ll.Enable() // ll.Info("Logged") // Output: [] INFO: Logged func Enable() *Logger { @@ -564,7 +593,7 @@ func Enable() *Logger { // Example: // // ll.Disable() -// ll.Info("Ignored") // No output +// ll.Info("Ignored") // Inactive output func Disable() *Logger { return defaultLogger.Disable() } @@ -586,8 +615,8 @@ func Dbg(any ...interface{}) { // Example: // // ll.Dump([]byte{0x41, 0x42}) // Outputs hex/ASCII dump -func Dump(any interface{}) { - defaultLogger.Dump(any) +func Dump(values ...interface{}) { + defaultLogger.Dump(values...) } // Enabled returns whether the default logger is enabled for logging. @@ -667,3 +696,12 @@ func Inspect(values ...interface{}) { o := NewInspector(defaultLogger) o.Log(2, values...) } + +func Apply(opts ...Option) *Logger { + return defaultLogger.Apply(opts...) + +} + +func Toggle(v bool) *Logger { + return defaultLogger.Toggle(v) +} diff --git a/vendor/github.com/olekukonko/ll/inspector.go b/vendor/github.com/olekukonko/ll/inspector.go index fb6d690190..b816ac5c26 100644 --- a/vendor/github.com/olekukonko/ll/inspector.go +++ b/vendor/github.com/olekukonko/ll/inspector.go @@ -31,7 +31,7 @@ func NewInspector(logger *Logger) *Inspector { // Example usage within a Logger method: // // o := NewInspector(l) -// o.Log(2, someStruct) // Logs JSON representation with caller info +// o.Log(2, someStruct) func (o *Inspector) Log(skip int, values ...interface{}) { // Skip if logger is suspended or Info level is disabled if o.logger.suspend.Load() || !o.logger.shouldLog(lx.LevelInfo) { @@ -74,13 +74,13 @@ func (o *Inspector) Log(skip int, values ...interface{}) { } if err != nil { - o.logger.log(lx.LevelError, lx.ClassText, fmt.Sprintf("Inspector: JSON encoding error: %v", err), nil, false) + o.logger.log(lx.LevelError, lx.ClassInspect, fmt.Sprintf("Inspector: JSON encoding error: %v", err), nil, false) continue } // Construct log message with file, line, and JSON data - msg := fmt.Sprintf("[%s:%d] INSPECT: %s", shortFile, line, string(jsonData)) - o.logger.log(lx.LevelInfo, lx.ClassText, msg, nil, false) + msg := fmt.Sprintf("[%s:%d] %s", shortFile, line, string(jsonData)) + o.logger.log(lx.LevelInfo, lx.ClassInspect, msg, nil, false) } } diff --git a/vendor/github.com/olekukonko/ll/lh/buffered.go b/vendor/github.com/olekukonko/ll/lh/buffered.go index 0fc8c14d7f..1799191da5 100644 --- a/vendor/github.com/olekukonko/ll/lh/buffered.go +++ b/vendor/github.com/olekukonko/ll/lh/buffered.go @@ -15,18 +15,16 @@ import ( type Buffering struct { BatchSize int // Flush when this many entries are buffered (default: 100) FlushInterval time.Duration // Maximum time between flushes (default: 10s) + FlushTimeout time.Duration // FlushTimeout specifies the duration to wait for a flush attempt to complete before timing out. MaxBuffer int // Maximum buffer size before applying backpressure (default: 1000) OnOverflow func(int) // Called when buffer reaches MaxBuffer (default: logs warning) + ErrorOutput io.Writer // Destination for internal errors like flush failures (default: os.Stderr) } // BufferingOpt configures Buffered handler. type BufferingOpt func(*Buffering) // WithBatchSize sets the batch size for flushing. -// It specifies the number of log entries to buffer before flushing to the underlying handler. -// Example: -// -// handler := NewBuffered(textHandler, WithBatchSize(50)) // Flush every 50 entries func WithBatchSize(size int) BufferingOpt { return func(c *Buffering) { c.BatchSize = size @@ -34,21 +32,20 @@ func WithBatchSize(size int) BufferingOpt { } // WithFlushInterval sets the maximum time between flushes. -// It defines the interval at which buffered entries are flushed, even if the batch size is not reached. -// Example: -// -// handler := NewBuffered(textHandler, WithFlushInterval(5*time.Second)) // Flush every 5 seconds func WithFlushInterval(d time.Duration) BufferingOpt { return func(c *Buffering) { c.FlushInterval = d } } +// WithFlushTimeout sets the maximum time to wait for a flush to complete. +func WithFlushTimeout(d time.Duration) BufferingOpt { + return func(c *Buffering) { + c.FlushTimeout = d + } +} + // WithMaxBuffer sets the maximum buffer size before backpressure. -// It limits the number of entries that can be queued in the channel, triggering overflow handling if exceeded. -// Example: -// -// handler := NewBuffered(textHandler, WithMaxBuffer(500)) // Allow up to 500 buffered entries func WithMaxBuffer(size int) BufferingOpt { return func(c *Buffering) { c.MaxBuffer = size @@ -56,28 +53,40 @@ func WithMaxBuffer(size int) BufferingOpt { } // WithOverflowHandler sets the overflow callback. -// It specifies a function to call when the buffer reaches MaxBuffer, typically for logging or metrics. -// Example: -// -// handler := NewBuffered(textHandler, WithOverflowHandler(func(n int) { fmt.Printf("Overflow: %d entries\n", n) })) func WithOverflowHandler(fn func(int)) BufferingOpt { return func(c *Buffering) { c.OnOverflow = fn } } +// WithErrorOutput sets the destination for internal errors (e.g., downstream handler failures). +func WithErrorOutput(w io.Writer) BufferingOpt { + return func(c *Buffering) { + c.ErrorOutput = w + } +} + +// batchHandler is an optional interface that handlers may implement to receive +// an entire flush batch in a single call instead of one entry at a time. +// When implemented, flushBatch calls HandleBatch once per batch, allowing +// the handler (and test mocks) to track flush operations rather than +// individual per-entry Handle calls. +type batchHandler interface { + HandleBatch(entries []*lx.Entry) error +} + // Buffered wraps any Handler to provide buffering capabilities. // It buffers log entries in a channel and flushes them based on batch size, time interval, or explicit flush. // The generic type H ensures compatibility with any lx.Handler implementation. // Thread-safe via channels and sync primitives. type Buffered[H lx.Handler] struct { - handler H // Underlying handler to process log entries - config *Buffering // Configuration for batching and flushing - entries chan *lx.Entry // Channel for buffering log entries - flushSignal chan struct{} // Channel to trigger explicit flushes - shutdown chan struct{} // Channel to signal worker shutdown - shutdownOnce sync.Once // Ensures Close is called only once - wg sync.WaitGroup // Waits for worker goroutine to finish + handler H + config *Buffering + entries chan *lx.Entry + flushSignal chan struct{} + shutdown chan struct{} + shutdownOnce sync.Once + wg sync.WaitGroup } // NewBuffered creates a new buffered handler that wraps another handler. @@ -88,76 +97,96 @@ type Buffered[H lx.Handler] struct { // textHandler := lh.NewTextHandler(os.Stdout) // buffered := NewBuffered(textHandler, WithBatchSize(50)) func NewBuffered[H lx.Handler](handler H, opts ...BufferingOpt) *Buffered[H] { - // Initialize default configuration config := &Buffering{ - BatchSize: 100, // Default: flush every 100 entries - FlushInterval: 10 * time.Second, // Default: flush every 10 seconds - MaxBuffer: 1000, // Default: max 1000 entries in buffer - OnOverflow: func(count int) { // Default: log overflow to io.Discard + BatchSize: 100, + FlushInterval: 10 * time.Second, + MaxBuffer: 1000, + ErrorOutput: os.Stderr, + OnOverflow: func(count int) { fmt.Fprintf(io.Discard, "log buffer overflow: %d entries\n", count) }, } - // Apply provided options for _, opt := range opts { opt(config) } - // Ensure sane configuration values if config.BatchSize < 1 { - config.BatchSize = 1 // Minimum batch size is 1 + config.BatchSize = 1 } + // Ensure the channel always holds at least BatchSize entries so a single + // batch can always be enqueued without blocking. if config.MaxBuffer < config.BatchSize { - config.MaxBuffer = config.BatchSize * 10 // Ensure buffer is at least 10x batch size + config.MaxBuffer = config.BatchSize * 10 } if config.FlushInterval <= 0 { - config.FlushInterval = 10 * time.Second // Minimum flush interval is 10s + config.FlushInterval = 10 * time.Second + } + if config.FlushTimeout <= 0 { + config.FlushTimeout = 100 * time.Millisecond + } + if config.ErrorOutput == nil { + config.ErrorOutput = os.Stderr } - // Initialize Buffered handler b := &Buffered[H]{ - handler: handler, // Set underlying handler - config: config, // Set configuration - entries: make(chan *lx.Entry, config.MaxBuffer), // Create buffered channel - flushSignal: make(chan struct{}, 1), // Create single-slot flush signal channel - shutdown: make(chan struct{}), // Create shutdown signal channel + handler: handler, + config: config, + entries: make(chan *lx.Entry, config.MaxBuffer), + flushSignal: make(chan struct{}, 1), + shutdown: make(chan struct{}), } - // Start worker goroutine b.wg.Add(1) go b.worker() - // Set finalizer for cleanup during garbage collection runtime.SetFinalizer(b, (*Buffered[H]).Final) return b } +// cloneEntry creates a deep copy of an entry for safe asynchronous processing. +// The original entry belongs to the logger's pool and is reused immediately after Handle() returns. +func (b *Buffered[H]) cloneEntry(e *lx.Entry) *lx.Entry { + entryCopy := &lx.Entry{ + Timestamp: e.Timestamp, + Level: e.Level, + Message: e.Message, + Namespace: e.Namespace, + Style: e.Style, + Class: e.Class, + Error: e.Error, + Id: e.Id, + } + + if len(e.Fields) > 0 { + entryCopy.Fields = make(lx.Fields, len(e.Fields)) + copy(entryCopy.Fields, e.Fields) + } + + if len(e.Stack) > 0 { + entryCopy.Stack = make([]byte, len(e.Stack)) + copy(entryCopy.Stack, e.Stack) + } + + return entryCopy +} + // Handle implements the lx.Handler interface. -// It buffers log entries in the entries channel or triggers a flush on overflow. -// Returns an error if the buffer is full and flush cannot be triggered. -// Thread-safe via non-blocking channel operations. -// Example: -// -// buffered.Handle(&lx.Entry{Message: "test"}) // Buffers entry or triggers flush func (b *Buffered[H]) Handle(e *lx.Entry) error { + entryCopy := b.cloneEntry(e) + select { - case b.entries <- e: // Buffer entry if channel has space + case b.entries <- entryCopy: return nil - default: // Handle buffer overflow + default: if b.config.OnOverflow != nil { - b.config.OnOverflow(len(b.entries)) // Call overflow handler - } - select { - case b.flushSignal <- struct{}{}: // Trigger flush if possible - return fmt.Errorf("log buffer overflow, triggering flush") - default: // Flush already in progress - return fmt.Errorf("log buffer overflow and flush already in progress") + b.config.OnOverflow(len(b.entries)) } + return fmt.Errorf("log buffer overflow") } } // Flush triggers an immediate flush of buffered entries. -// It sends a signal to the worker to process all buffered entries. // If a flush is already pending, it waits briefly and may exit without flushing. // Thread-safe via non-blocking channel operations. // Example: @@ -165,114 +194,137 @@ func (b *Buffered[H]) Handle(e *lx.Entry) error { // buffered.Flush() // Flushes all buffered entries func (b *Buffered[H]) Flush() { select { - case b.flushSignal <- struct{}{}: // Signal worker to flush - case <-time.After(100 * time.Millisecond): // Timeout if flush is pending - // Flush already pending + case b.flushSignal <- struct{}{}: + case <-time.After(b.config.FlushTimeout): } } // Close flushes any remaining entries and stops the worker. // It ensures shutdown is performed only once and waits for the worker to finish. +// If the underlying handler implements a Close() error method, it will be called to release resources. // Thread-safe via sync.Once and WaitGroup. -// Returns nil as it does not produce errors. +// Returns any error from the underlying handler's Close, or nil. // Example: // // buffered.Close() // Flushes entries and stops worker func (b *Buffered[H]) Close() error { + var closeErr error b.shutdownOnce.Do(func() { - close(b.shutdown) // Signal worker to shut down - b.wg.Wait() // Wait for worker to finish - runtime.SetFinalizer(b, nil) // Remove finalizer + close(b.shutdown) + b.wg.Wait() + runtime.SetFinalizer(b, nil) + + if closer, ok := any(b.handler).(interface{ Close() error }); ok { + closeErr = closer.Close() + } }) - return nil + return closeErr } // Final ensures remaining entries are flushed during garbage collection. -// It calls Close to flush entries and stop the worker. -// Used as a runtime finalizer to prevent log loss. -// Example (internal usage): -// -// runtime.SetFinalizer(buffered, (*Buffered[H]).Final) func (b *Buffered[H]) Final() { b.Close() } // Config returns the current configuration of the Buffered handler. -// It provides access to BatchSize, FlushInterval, MaxBuffer, and OnOverflow settings. -// Example: -// -// config := buffered.Config() // Access configuration func (b *Buffered[H]) Config() *Buffering { return b.config } // worker processes entries and handles flushing. -// It runs in a goroutine, buffering entries, flushing on batch size, timer, or explicit signal, -// and shutting down cleanly when signaled. -// Thread-safe via channel operations and WaitGroup. func (b *Buffered[H]) worker() { - defer b.wg.Done() // Signal completion when worker exits - batch := make([]*lx.Entry, 0, b.config.BatchSize) // Buffer for batching entries - ticker := time.NewTicker(b.config.FlushInterval) // Timer for periodic flushes - defer ticker.Stop() // Clean up ticker + defer b.wg.Done() + batch := make([]*lx.Entry, 0, b.config.BatchSize) + ticker := time.NewTicker(b.config.FlushInterval) + defer ticker.Stop() + for { select { - case entry := <-b.entries: // Receive new entry + case entry := <-b.entries: batch = append(batch, entry) - // Flush if batch size is reached if len(batch) >= b.config.BatchSize { b.flushBatch(batch) batch = batch[:0] + ticker.Reset(b.config.FlushInterval) } - case <-ticker.C: // Periodic flush + case <-ticker.C: if len(batch) > 0 { b.flushBatch(batch) batch = batch[:0] } - case <-b.flushSignal: // Explicit flush + case <-b.flushSignal: if len(batch) > 0 { b.flushBatch(batch) batch = batch[:0] } - b.drainRemaining() // Drain all entries from the channel - case <-b.shutdown: // Shutdown signal + b.drainRemaining() + ticker.Reset(b.config.FlushInterval) + case <-b.shutdown: + // Merge whatever is already in batch with anything remaining in + // the channel, then flush everything in a single call so that + // callCount increments exactly once regardless of how many + // entries the worker happened to have pre-loaded into batch. + batch = b.collectRemaining(batch) if len(batch) > 0 { b.flushBatch(batch) } - b.drainRemaining() // Flush remaining entries return } } } // flushBatch processes a batch of entries through the wrapped handler. -// It writes each entry to the underlying handler, logging any errors to stderr. -// Example (internal usage): -// -// b.flushBatch([]*lx.Entry{entry1, entry2}) +// If the handler implements the batchHandler interface, the entire batch is +// delivered in a single HandleBatch call (one "flush event"). Otherwise each +// entry is forwarded individually via Handle. func (b *Buffered[H]) flushBatch(batch []*lx.Entry) { + if bh, ok := any(b.handler).(batchHandler); ok { + if err := bh.HandleBatch(batch); err != nil { + if b.config.ErrorOutput != nil { + fmt.Fprintf(b.config.ErrorOutput, "log flush error: %v\n", err) + } + } + return + } for _, entry := range batch { - // Process each entry through the handler if err := b.handler.Handle(entry); err != nil { - fmt.Fprintf(os.Stderr, "log flush error: %v\n", err) // Log errors to stderr + if b.config.ErrorOutput != nil { + fmt.Fprintf(b.config.ErrorOutput, "log flush error: %v\n", err) + } + } + } +} + +// collectRemaining drains the entries channel into the provided slice and +// returns the extended slice without flushing, so the caller can flush +// everything atomically in a single batch. +func (b *Buffered[H]) collectRemaining(batch []*lx.Entry) []*lx.Entry { + for { + select { + case entry := <-b.entries: + batch = append(batch, entry) + default: + return batch } } } // drainRemaining processes any remaining entries in the channel. -// It flushes all entries from the entries channel to the underlying handler, -// logging any errors to stderr. Used during flush or shutdown. -// Example (internal usage): -// -// b.drainRemaining() // Flushes all pending entries +// Collects entries into a batch and flushes them together for efficiency. func (b *Buffered[H]) drainRemaining() { + batch := make([]*lx.Entry, 0, b.config.BatchSize) for { select { - case entry := <-b.entries: // Process next entry - if err := b.handler.Handle(entry); err != nil { - fmt.Fprintf(os.Stderr, "log drain error: %v\n", err) // Log errors to stderr + case entry := <-b.entries: + batch = append(batch, entry) + if len(batch) >= b.config.BatchSize { + b.flushBatch(batch) + batch = batch[:0] + } + default: + if len(batch) > 0 { + b.flushBatch(batch) } - default: // Exit when channel is empty return } } diff --git a/vendor/github.com/olekukonko/ll/lh/colorized.go b/vendor/github.com/olekukonko/ll/lh/colorized.go index e343ff3816..81736817f0 100644 --- a/vendor/github.com/olekukonko/ll/lh/colorized.go +++ b/vendor/github.com/olekukonko/ll/lh/colorized.go @@ -1,10 +1,12 @@ package lh import ( + "bytes" "fmt" + "hash/fnv" "io" "os" - "sort" + "strconv" "strings" "sync" "time" @@ -12,15 +14,23 @@ import ( "github.com/olekukonko/ll/lx" ) +// ColorIntensity defines the intensity level for ANSI colors +type ColorIntensity int + +const ( + IntensityNormal ColorIntensity = iota + IntensityBright + IntensityPastel + IntensityVibrant +) + // Palette defines ANSI color codes for various log components. -// It specifies colors for headers, goroutines, functions, paths, stack traces, and log levels, -// used by ColorizedHandler to format log output with color. type Palette struct { Header string // Color for stack trace header and dump separators Goroutine string // Color for goroutine lines in stack traces Func string // Color for function names in stack traces Path string // Color for file paths in stack traces - FileLine string // Color for file line numbers (not used in provided code) + FileLine string // Color for file line numbers Reset string // Reset code to clear color formatting Pos string // Color for position in hex dumps Hex string // Color for hex values in dumps @@ -29,134 +39,337 @@ type Palette struct { Info string // Color for Info level messages Warn string // Color for Warn level messages Error string // Color for Error level messages + Fatal string // Color for Fatal level messages Title string // Color for dump titles (BEGIN/END separators) + // Field type colors + Key string // Color for field keys + Number string // Color for numbers + String string // Color for strings + Bool string // Color for booleans + Time string // Color for timestamps/durations + Nil string // Color for nil values + Default string // Default color for unknown types + // JSON and Inspect specific colors + JSONKey string // Color for JSON keys + JSONString string // Color for JSON string values + JSONNumber string // Color for JSON number values + JSONBool string // Color for JSON boolean values + JSONNull string // Color for JSON null values + JSONBrace string // Color for JSON braces and brackets + InspectKey string // Color for inspect keys + InspectValue string // Color for inspect values + InspectMeta string // Color for inspect metadata (annotations) } // darkPalette defines colors optimized for dark terminal backgrounds. -// It uses bright, contrasting colors for readability on dark backgrounds. var darkPalette = Palette{ - Header: "\033[1;31m", // Bold red for headers - Goroutine: "\033[1;36m", // Bold cyan for goroutines - Func: "\033[97m", // Bright white for functions - Path: "\033[38;5;245m", // Light gray for paths - FileLine: "\033[38;5;111m", // Muted light blue (unused) - Reset: "\033[0m", // Reset color formatting - - Title: "\033[38;5;245m", // Light gray for dump titles - Pos: "\033[38;5;117m", // Light blue for dump positions - Hex: "\033[38;5;156m", // Light green for hex values - Ascii: "\033[38;5;224m", // Light pink for ASCII values - - Debug: "\033[36m", // Cyan for Debug level - Info: "\033[32m", // Green for Info level - Warn: "\033[33m", // Yellow for Warn level - Error: "\033[31m", // Red for Error level + Header: "\033[1;38;5;203m", // Brighter red + Goroutine: "\033[1;38;5;51m", // Bright cyan + Func: "\033[1;97m", // Bright white + Path: "\033[38;5;110m", // Brighter gray-blue + FileLine: "\033[38;5;117m", // Bright blue + Reset: "\033[0m", + Title: "\033[38;5;245m", + Pos: "\033[38;5;117m", + Hex: "\033[38;5;156m", + Ascii: "\033[38;5;224m", + Debug: "\033[36m", + Info: "\033[32m", + Warn: "\033[33m", + Error: "\033[31m", + Fatal: "\033[1;31m", + // Field type colors - made brighter for dark backgrounds + Key: "\033[38;5;117m", // Brighter blue + Number: "\033[38;5;141m", // Brighter purple + String: "\033[38;5;223m", // Brighter yellow/orange + Bool: "\033[38;5;85m", // Brighter green + Time: "\033[38;5;110m", // Brighter cyan-blue + Nil: "\033[38;5;243m", // Slightly brighter gray + Default: "\033[38;5;250m", // Brighter gray + // JSON and Inspect colors + JSONKey: "\033[38;5;117m", + JSONString: "\033[38;5;223m", + JSONNumber: "\033[38;5;141m", + JSONBool: "\033[38;5;85m", + JSONNull: "\033[38;5;243m", + JSONBrace: "\033[38;5;245m", + InspectKey: "\033[38;5;117m", + InspectValue: "\033[38;5;223m", + InspectMeta: "\033[38;5;243m", } // lightPalette defines colors optimized for light terminal backgrounds. -// It uses darker colors for better contrast on light backgrounds. var lightPalette = Palette{ - Header: "\033[1;31m", // Same red for headers - Goroutine: "\033[34m", // Blue (darker for light bg) - Func: "\033[30m", // Black text for functions - Path: "\033[90m", // Dark gray for paths - FileLine: "\033[94m", // Blue for file lines (unused) - Reset: "\033[0m", // Reset color formatting + Header: "\033[1;31m", + Goroutine: "\033[34m", + Func: "\033[30m", + Path: "\033[90m", + FileLine: "\033[94m", + Reset: "\033[0m", + Title: "\033[38;5;245m", + Pos: "\033[38;5;117m", + Hex: "\033[38;5;156m", + Ascii: "\033[38;5;224m", + Debug: "\033[36m", + Info: "\033[32m", + Warn: "\033[33m", + Error: "\033[31m", + Fatal: "\033[1;31m", + Key: "\033[34m", + Number: "\033[35m", + String: "\033[38;5;94m", + Bool: "\033[32m", + Time: "\033[38;5;24m", + Nil: "\033[38;5;240m", + Default: "\033[30m", + JSONKey: "\033[1;34m", + JSONString: "\033[1;33m", + JSONNumber: "\033[1;35m", + JSONBool: "\033[1;32m", + JSONNull: "\033[1;37m", + JSONBrace: "\033[1;37m", + InspectKey: "\033[1;34m", + InspectValue: "\033[1;33m", + InspectMeta: "\033[1;37m", +} + +// brightPalette defines vibrant, high-contrast colors +var brightPalette = Palette{ + Header: "\033[1;91m", + Goroutine: "\033[1;96m", + Func: "\033[1;97m", + Path: "\033[38;5;250m", + FileLine: "\033[38;5;117m", + Reset: "\033[0m", + Title: "\033[1;37m", + Pos: "\033[1;33m", + Hex: "\033[1;32m", + Ascii: "\033[1;35m", + Debug: "\033[1;36m", + Info: "\033[1;32m", + Warn: "\033[1;33m", + Error: "\033[1;31m", + Fatal: "\033[1;91m", + Key: "\033[1;34m", + Number: "\033[1;35m", + String: "\033[1;33m", + Bool: "\033[1;32m", + Time: "\033[1;36m", + Nil: "\033[1;37m", + Default: "\033[1;37m", + JSONKey: "\033[1;34m", + JSONString: "\033[1;33m", + JSONNumber: "\033[1;35m", + JSONBool: "\033[1;32m", + JSONNull: "\033[1;37m", + JSONBrace: "\033[1;37m", + InspectKey: "\033[1;34m", + InspectValue: "\033[1;33m", + InspectMeta: "\033[1;37m", +} + +// pastelPalette defines soft, pastel colors +var pastelPalette = Palette{ + Header: "\033[38;5;211m", + Goroutine: "\033[38;5;153m", + Func: "\033[38;5;255m", + Path: "\033[38;5;248m", + FileLine: "\033[38;5;111m", + Reset: "\033[0m", + Title: "\033[38;5;248m", + Pos: "\033[38;5;153m", + Hex: "\033[38;5;158m", + Ascii: "\033[38;5;218m", + Debug: "\033[38;5;122m", + Info: "\033[38;5;120m", + Warn: "\033[38;5;221m", + Error: "\033[38;5;211m", + Fatal: "\033[38;5;204m", + Key: "\033[38;5;153m", + Number: "\033[38;5;183m", + String: "\033[38;5;223m", + Bool: "\033[38;5;120m", + Time: "\033[38;5;117m", + Nil: "\033[38;5;247m", + Default: "\033[38;5;250m", + JSONKey: "\033[38;5;153m", + JSONString: "\033[38;5;223m", + JSONNumber: "\033[38;5;183m", + JSONBool: "\033[38;5;120m", + JSONNull: "\033[38;5;247m", + JSONBrace: "\033[38;5;247m", + InspectKey: "\033[38;5;153m", + InspectValue: "\033[38;5;223m", + InspectMeta: "\033[38;5;247m", +} + +// vibrantPalette defines highly saturated, eye-catching colors +var vibrantPalette = Palette{ + Header: "\033[38;5;196m", + Goroutine: "\033[38;5;51m", + Func: "\033[38;5;15m", + Path: "\033[38;5;244m", + FileLine: "\033[38;5;75m", + Reset: "\033[0m", + Title: "\033[38;5;244m", + Pos: "\033[38;5;51m", + Hex: "\033[38;5;46m", + Ascii: "\033[38;5;201m", + Debug: "\033[38;5;51m", + Info: "\033[38;5;46m", + Warn: "\033[38;5;226m", + Error: "\033[38;5;196m", + Fatal: "\033[1;38;5;196m", + Key: "\033[38;5;33m", + Number: "\033[38;5;129m", + String: "\033[38;5;214m", + Bool: "\033[38;5;46m", + Time: "\033[38;5;75m", + Nil: "\033[38;5;242m", + Default: "\033[38;5;15m", + JSONKey: "\033[38;5;33m", + JSONString: "\033[38;5;214m", + JSONNumber: "\033[38;5;129m", + JSONBool: "\033[38;5;46m", + JSONNull: "\033[38;5;242m", + JSONBrace: "\033[38;5;242m", + InspectKey: "\033[38;5;33m", + InspectValue: "\033[38;5;214m", + InspectMeta: "\033[38;5;242m", +} - Title: "\033[38;5;245m", // Light gray for dump titles - Pos: "\033[38;5;117m", // Light blue for dump positions - Hex: "\033[38;5;156m", // Light green for hex values - Ascii: "\033[38;5;224m", // Light pink for ASCII values +// noColorPalette defines a palette with empty strings for environments without color support +var noColorPalette = Palette{ + Header: "", Goroutine: "", Func: "", Path: "", FileLine: "", Reset: "", + Title: "", Pos: "", Hex: "", Ascii: "", Debug: "", Info: "", Warn: "", Error: "", Fatal: "", + Key: "", Number: "", String: "", Bool: "", Time: "", Nil: "", Default: "", + JSONKey: "", JSONString: "", JSONNumber: "", JSONBool: "", JSONNull: "", JSONBrace: "", + InspectKey: "", InspectValue: "", InspectMeta: "", +} - Debug: "\033[36m", // Cyan for Debug level - Info: "\033[32m", // Green for Info level - Warn: "\033[33m", // Yellow for Warn level - Error: "\033[31m", // Red for Error level +// colorBufPool is a pool of bytes.Buffer instances to reduce allocations +var colorBufPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, } // ColorizedHandler is a handler that outputs log entries with ANSI color codes. -// It formats log entries with colored namespace, level, message, fields, and stack traces, -// writing the result to the provided writer. -// Thread-safe if the underlying writer is thread-safe. type ColorizedHandler struct { - w io.Writer // Destination for colored log output - palette Palette // Color scheme for formatting - showTime bool // Whether to display timestamps - timeFormat string // Format for timestamps (defaults to time.RFC3339) - mu sync.Mutex + writer io.Writer + palette Palette + showTime bool + timeFormat string + mu sync.Mutex + noColor bool // Whether to disable colors entirely + intensity ColorIntensity // Color intensity level + colorFields bool // Whether to colorize fields (default: true) } // ColorOption defines a configuration function for ColorizedHandler. -// It allows customization of the handler, such as setting the color palette. type ColorOption func(*ColorizedHandler) // WithColorPallet sets the color palette for the ColorizedHandler. -// It allows specifying a custom Palette for dark or light terminal backgrounds. -// Example: -// -// handler := NewColorizedHandler(os.Stdout, WithColorPallet(lightPalette)) func WithColorPallet(pallet Palette) ColorOption { return func(c *ColorizedHandler) { c.palette = pallet } } -// NewColorizedHandler creates a new ColorizedHandler writing to the specified writer. -// It initializes the handler with a detected or specified color palette and applies -// optional configuration functions. +// WithColorNone disables all color output. +func WithColorNone() ColorOption { + return func(c *ColorizedHandler) { + c.noColor = true + c.colorFields = false // Also disable field coloring + } +} + +// WithColorField enables or disables field coloring specifically. +// This is useful for performance optimization or when field colors are too much. // Example: // -// handler := NewColorizedHandler(os.Stdout) -// logger := ll.New("app").Enable().Handler(handler) -// logger.Info("Test") // Output: [app] : Test -func NewColorizedHandler(w io.Writer, opts ...ColorOption) *ColorizedHandler { - // Initialize with writer - c := &ColorizedHandler{w: w, - showTime: false, - timeFormat: time.RFC3339, +// handler := NewColorizedHandler(os.Stdout, WithColorField(false)) // Disable field coloring only +func WithColorField(enable bool) ColorOption { + return func(c *ColorizedHandler) { + c.colorFields = enable } +} + +// WithColorShowTime enables or disables the display of timestamps. +func WithColorShowTime(show bool) ColorOption { + return func(c *ColorizedHandler) { + c.showTime = show + } +} + +// WithColorIntensity sets the color intensity for the ColorizedHandler. +func WithColorIntensity(intensity ColorIntensity) ColorOption { + return func(c *ColorizedHandler) { + c.intensity = intensity + } +} + +// WithColorTheme configures the ColorizedHandler to use a specific color theme based on the provided theme name. +func WithColorTheme(theme string) ColorOption { + return func(c *ColorizedHandler) { + switch strings.ToLower(theme) { + case "light": + c.palette = lightPalette + case "dark": + c.palette = darkPalette + case "bright": + c.palette = brightPalette + case "pastel": + c.palette = pastelPalette + case "vibrant": + c.palette = vibrantPalette + } + } +} - // Apply configuration options +// NewColorizedHandler creates a new ColorizedHandler writing to the specified writer. +func NewColorizedHandler(w io.Writer, opts ...ColorOption) *ColorizedHandler { + c := &ColorizedHandler{ + writer: w, + showTime: false, + timeFormat: time.RFC3339, + noColor: false, + intensity: IntensityNormal, + colorFields: true, // Default: enable field coloring + } for _, opt := range opts { opt(c) } - // Detect palette if not set c.palette = c.detectPalette() return c } +func (h *ColorizedHandler) Output(w io.Writer) { + h.mu.Lock() + defer h.mu.Unlock() + h.writer = w +} + // Handle processes a log entry and writes it with ANSI color codes. -// It delegates to specialized methods based on the entry's class (Dump, Raw, or regular). -// Returns an error if writing to the underlying writer fails. -// Thread-safe if the writer is thread-safe. -// Example: -// -// handler.Handle(&lx.Entry{Message: "test", Level: lx.LevelInfo}) // Writes colored output func (h *ColorizedHandler) Handle(e *lx.Entry) error { - h.mu.Lock() defer h.mu.Unlock() - switch e.Class { case lx.ClassDump: - // Handle hex dump entries return h.handleDumpOutput(e) + case lx.ClassJSON, lx.ClassOutput: + return h.handleJSONOutput(e) + case lx.ClassInspect: + return h.handleInspectOutput(e) case lx.ClassRaw: - // Write raw entries directly - _, err := h.w.Write([]byte(e.Message)) + _, err := h.writer.Write([]byte(e.Message)) return err default: - // Handle standard log entries return h.handleRegularOutput(e) } } -// Timestamped enables or disables timestamp display and optionally sets a custom time format. -// If format is empty, defaults to RFC3339. -// Example: -// -// handler := NewColorizedHandler(os.Stdout).Timestamped(true, time.StampMilli) -// // Output: Jan 02 15:04:05.000 [app] INFO: Test +// Timestamped enables or disables timestamp display. func (h *ColorizedHandler) Timestamped(enable bool, format ...string) { h.showTime = enable if len(format) > 0 && format[0] != "" { @@ -165,61 +378,268 @@ func (h *ColorizedHandler) Timestamped(enable bool, format ...string) { } // handleRegularOutput handles normal log entries. -// It formats the entry with colored namespace, level, message, fields, and stack trace (if present), -// writing the result to the handler's writer. -// Returns an error if writing fails. -// Example (internal usage): -// -// h.handleRegularOutput(&lx.Entry{Message: "test", Level: lx.LevelInfo}) // Writes colored output func (h *ColorizedHandler) handleRegularOutput(e *lx.Entry) error { - var builder strings.Builder // Buffer for building formatted output - - // Add timestamp if enabled + buf := colorBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer colorBufPool.Put(buf) if h.showTime { - builder.WriteString(e.Timestamp.Format(h.timeFormat)) - builder.WriteString(lx.Space) + buf.WriteString(e.Timestamp.Format(h.timeFormat)) + buf.WriteString(lx.Space) } - - // Format namespace with colors - h.formatNamespace(&builder, e) - - // Format level with color based on severity - h.formatLevel(&builder, e) - - // Add message and fields - builder.WriteString(e.Message) - h.formatFields(&builder, e) - - // fmt.Println("------------>", len(e.Stack)) - // Format stack trace if present + h.formatNamespace(buf, e) + h.formatLevel(buf, e) + buf.WriteString(e.Message) + h.formatFields(buf, e) if len(e.Stack) > 0 { - h.formatStack(&builder, e.Stack) + h.formatStack(buf, e.Stack) } - - // Append newline for non-None levels if e.Level != lx.LevelNone { - builder.WriteString(lx.Newline) + buf.WriteString(lx.Newline) } + _, err := h.writer.Write(buf.Bytes()) + return err +} - // Write formatted output to writer - _, err := h.w.Write([]byte(builder.String())) +// handleJSONOutput handles JSON log entries. +func (h *ColorizedHandler) handleJSONOutput(e *lx.Entry) error { + buf := colorBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer colorBufPool.Put(buf) + if h.showTime { + buf.WriteString(e.Timestamp.Format(h.timeFormat)) + buf.WriteString(lx.Newline) + } + if e.Namespace != "" { + h.formatNamespace(buf, e) + h.formatLevel(buf, e) + } + h.colorizeJSON(buf, e.Message) + buf.WriteString(lx.Newline) + _, err := h.writer.Write(buf.Bytes()) + return err +} + +// handleInspectOutput handles inspect log entries. +func (h *ColorizedHandler) handleInspectOutput(e *lx.Entry) error { + buf := colorBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer colorBufPool.Put(buf) + if h.showTime { + buf.WriteString(e.Timestamp.Format(h.timeFormat)) + buf.WriteString(lx.Space) + } + h.formatNamespace(buf, e) + h.formatLevel(buf, e) + h.colorizeInspect(buf, e.Message) + buf.WriteString(lx.Newline) + _, err := h.writer.Write(buf.Bytes()) return err } +// colorizeJSON applies syntax highlighting to JSON strings without changing formatting +func (h *ColorizedHandler) colorizeJSON(b *bytes.Buffer, jsonStr string) { + inString := false + escapeNext := false + for i := 0; i < len(jsonStr); i++ { + ch := jsonStr[i] + if escapeNext { + b.WriteByte(ch) + escapeNext = false + continue + } + switch ch { + case '\\': + escapeNext = true + if inString { + b.WriteString(h.palette.JSONString) + } + b.WriteByte(ch) + case '"': + if inString { + // End of string + b.WriteString(h.palette.JSONString) + b.WriteByte(ch) + b.WriteString(h.palette.Reset) + inString = false + } else { + // Start of string + inString = true + b.WriteString(h.palette.JSONString) + b.WriteByte(ch) + } + case ':': + if !inString { + b.WriteString(h.palette.JSONBrace) + b.WriteByte(ch) + b.WriteString(h.palette.Reset) + } else { + b.WriteByte(ch) + } + case '{', '}', '[', ']', ',': + if !inString { + b.WriteString(h.palette.JSONBrace) + b.WriteByte(ch) + b.WriteString(h.palette.Reset) + } else { + b.WriteByte(ch) + } + default: + if !inString { + // Check for numbers, booleans, null + remaining := jsonStr[i:] + // Check for null + if len(remaining) >= 4 && strings.HasPrefix(remaining, "null") { + b.WriteString(h.palette.JSONNull) + b.WriteString("null") + b.WriteString(h.palette.Reset) + i += 3 // Skip "null" + } else if len(remaining) >= 4 && strings.HasPrefix(remaining, "true") { + b.WriteString(h.palette.JSONBool) + b.WriteString("true") + b.WriteString(h.palette.Reset) + i += 3 // Skip "true" + } else if len(remaining) >= 5 && strings.HasPrefix(remaining, "false") { + b.WriteString(h.palette.JSONBool) + b.WriteString("false") + b.WriteString(h.palette.Reset) + i += 4 // Skip "false" + } else if (ch >= '0' && ch <= '9') || ch == '-' || ch == '.' { + b.WriteString(h.palette.JSONNumber) + b.WriteByte(ch) + // Continue writing digits + for j := i + 1; j < len(jsonStr); j++ { + nextCh := jsonStr[j] + if (nextCh >= '0' && nextCh <= '9') || nextCh == '.' || nextCh == 'e' || nextCh == 'E' || nextCh == '+' || nextCh == '-' { + b.WriteByte(nextCh) + i = j + } else { + break + } + } + b.WriteString(h.palette.Reset) + } else if ch == ' ' || ch == '\n' || ch == '\t' || ch == '\r' { + // Preserve whitespace exactly as is + b.WriteByte(ch) + } else { + // Unexpected character outside string - preserve it + b.WriteByte(ch) + } + } else { + // Inside string + b.WriteByte(ch) + } + } + } +} + +// colorizeInspect applies syntax highlighting to inspect output +func (h *ColorizedHandler) colorizeInspect(b *bytes.Buffer, inspectStr string) { + lines := strings.Split(inspectStr, "\n") + for lineIdx, line := range lines { + if lineIdx > 0 { + b.WriteString("\n") + } + trimmed := strings.TrimSpace(line) + if trimmed == "" { + b.WriteString(line) + continue + } + // For inspect output, we'll do simple line-based coloring + // This preserves the original formatting + inString := false + escapeNext := false + for i := 0; i < len(line); i++ { + ch := line[i] + if escapeNext { + b.WriteByte(ch) + escapeNext = false + continue + } + if ch == '\\' { + escapeNext = true + b.WriteByte(ch) + continue + } + if ch == '"' { + inString = !inString + if inString { + // Check if this is a metadata key + if i+1 < len(line) && line[i+1] == '(' { + b.WriteString(h.palette.InspectMeta) + } else if i+2 < len(line) && line[i+1] == '*' && line[i+2] == '(' { + b.WriteString(h.palette.InspectMeta) + } else { + b.WriteString(h.palette.InspectKey) + } + } + b.WriteByte(ch) + if !inString { + b.WriteString(h.palette.Reset) + } + continue + } + if inString { + // Inside a string key or value + b.WriteByte(ch) + } else { + // Outside strings + if ch == ':' { + b.WriteString(h.palette.JSONBrace) + b.WriteByte(ch) + b.WriteString(h.palette.Reset) + } else if ch == '{' || ch == '}' || ch == '[' || ch == ']' || ch == ',' { + b.WriteString(h.palette.JSONBrace) + b.WriteByte(ch) + b.WriteString(h.palette.Reset) + } else { + // Check for numbers, booleans, null outside strings + remaining := line[i:] + if len(remaining) >= 4 && strings.HasPrefix(remaining, "null") { + b.WriteString(h.palette.JSONNull) + b.WriteString("null") + b.WriteString(h.palette.Reset) + i += 3 + } else if len(remaining) >= 4 && strings.HasPrefix(remaining, "true") { + b.WriteString(h.palette.JSONBool) + b.WriteString("true") + b.WriteString(h.palette.Reset) + i += 3 + } else if len(remaining) >= 5 && strings.HasPrefix(remaining, "false") { + b.WriteString(h.palette.JSONBool) + b.WriteString("false") + b.WriteString(h.palette.Reset) + i += 4 + } else if (ch >= '0' && ch <= '9') || ch == '-' { + b.WriteString(h.palette.InspectValue) + b.WriteByte(ch) + // Continue writing digits + for j := i + 1; j < len(line); j++ { + nextCh := line[j] + if (nextCh >= '0' && nextCh <= '9') || nextCh == '.' { + b.WriteByte(nextCh) + i = j + } else { + break + } + } + b.WriteString(h.palette.Reset) + } else { + b.WriteByte(ch) + } + } + } + } + } +} + // formatNamespace formats the namespace with ANSI color codes. -// It supports FlatPath ([parent/child]) and NestedPath ([parent]→[child]) styles. -// Example (internal usage): -// -// h.formatNamespace(&builder, &lx.Entry{Namespace: "parent/child", Style: lx.FlatPath}) // Writes "[parent/child]: " -func (h *ColorizedHandler) formatNamespace(b *strings.Builder, e *lx.Entry) { +func (h *ColorizedHandler) formatNamespace(b *bytes.Buffer, e *lx.Entry) { if e.Namespace == "" { return } - b.WriteString(lx.LeftBracket) switch e.Style { case lx.NestedPath: - // Split namespace and format as [parent]→[child] parts := strings.Split(e.Namespace, lx.Slash) for i, part := range parts { b.WriteString(part) @@ -229,8 +649,7 @@ func (h *ColorizedHandler) formatNamespace(b *strings.Builder, e *lx.Entry) { b.WriteString(lx.LeftBracket) } } - default: // FlatPath - // Format as [parent/child] + default: b.WriteString(e.Namespace) b.WriteString(lx.RightBracket) } @@ -239,87 +658,147 @@ func (h *ColorizedHandler) formatNamespace(b *strings.Builder, e *lx.Entry) { } // formatLevel formats the log level with ANSI color codes. -// It applies a color based on the level (Debug, Info, Warn, Error) and resets afterward. -// Example (internal usage): -// -// h.formatLevel(&builder, &lx.Entry{Level: lx.LevelInfo}) // Writes "INFO: " -func (h *ColorizedHandler) formatLevel(b *strings.Builder, e *lx.Entry) { - // Map levels to colors +func (h *ColorizedHandler) formatLevel(b *bytes.Buffer, e *lx.Entry) { color := map[lx.LevelType]string{ - lx.LevelDebug: h.palette.Debug, // Cyan - lx.LevelInfo: h.palette.Info, // Green - lx.LevelWarn: h.palette.Warn, // Yellow - lx.LevelError: h.palette.Error, // Red + lx.LevelDebug: h.palette.Debug, + lx.LevelInfo: h.palette.Info, + lx.LevelWarn: h.palette.Warn, + lx.LevelError: h.palette.Error, + lx.LevelFatal: h.palette.Fatal, }[e.Level] - b.WriteString(color) - b.WriteString(e.Level.String()) + b.WriteString(e.Level.Name(e.Class)) b.WriteString(h.palette.Reset) + // b.WriteString(lx.Space) b.WriteString(lx.Colon) b.WriteString(lx.Space) } // formatFields formats the log entry's fields in sorted order. -// It writes fields as [key=value key=value], with no additional coloring. -// Example (internal usage): -// -// h.formatFields(&builder, &lx.Entry{Fields: map[string]interface{}{"key": "value"}}) // Writes " [key=value]" -func (h *ColorizedHandler) formatFields(b *strings.Builder, e *lx.Entry) { +func (h *ColorizedHandler) formatFields(b *bytes.Buffer, e *lx.Entry) { if len(e.Fields) == 0 { return } - - // Collect and sort field keys - var keys []string - for k := range e.Fields { - keys = append(keys, k) - } - sort.Strings(keys) - b.WriteString(lx.Space) b.WriteString(lx.LeftBracket) - // Format fields as key=value - for i, k := range keys { + for i, pair := range e.Fields { if i > 0 { b.WriteString(lx.Space) } - b.WriteString(k) - b.WriteString("=") - b.WriteString(fmt.Sprint(e.Fields[k])) + if h.colorFields { + // Color the key + b.WriteString(h.palette.Key) + b.WriteString(pair.Key) + b.WriteString(h.palette.Reset) + b.WriteString("=") + // Format value with type-based coloring + h.formatFieldValue(b, pair.Value) + } else { + // No field coloring - just write plain text + b.WriteString(pair.Key) + b.WriteString("=") + writeFieldValue(b, pair.Value) + } } b.WriteString(lx.RightBracket) } +// formatFieldValue formats a field value with type-based ANSI color codes. +func (h *ColorizedHandler) formatFieldValue(b *bytes.Buffer, value interface{}) { + // If field coloring is disabled, just write the value + if !h.colorFields { + writeFieldValue(b, value) + return + } + switch v := value.(type) { + case time.Time: + b.WriteString(h.palette.Time) + b.WriteString(v.Format("2006-01-02 15:04:05")) + b.WriteString(h.palette.Reset) + case time.Duration: + b.WriteString(h.palette.Time) + h.formatDuration(b, v) + b.WriteString(h.palette.Reset) + case error: + b.WriteString(h.palette.Error) + b.WriteString(`"`) + b.WriteString(v.Error()) + b.WriteString(`"`) + b.WriteString(h.palette.Reset) + case int, int8, int16, int32, int64: + b.WriteString(h.palette.Number) + writeFieldValue(b, v) + b.WriteString(h.palette.Reset) + case uint, uint8, uint16, uint32, uint64: + b.WriteString(h.palette.Number) + writeFieldValue(b, v) + b.WriteString(h.palette.Reset) + case float32, float64: + b.WriteString(h.palette.Number) + writeFieldValue(b, v) + b.WriteString(h.palette.Reset) + case string: + b.WriteString(h.palette.String) + b.WriteString(`"`) + b.WriteString(v) + b.WriteString(`"`) + b.WriteString(h.palette.Reset) + case bool: + b.WriteString(h.palette.Bool) + writeFieldValue(b, v) + b.WriteString(h.palette.Reset) + case nil: + b.WriteString(h.palette.Nil) + b.WriteString("nil") + b.WriteString(h.palette.Reset) + default: + b.WriteString(h.palette.Default) + writeFieldValue(b, v) + b.WriteString(h.palette.Reset) + } +} + +// formatDuration formats a duration in a human-readable way +func (h *ColorizedHandler) formatDuration(b *bytes.Buffer, d time.Duration) { + if d < time.Microsecond { + b.WriteString(d.String()) + } else if d < time.Millisecond { + fmt.Fprintf(b, "%.3fµs", float64(d)/float64(time.Microsecond)) + } else if d < time.Second { + fmt.Fprintf(b, "%.3fms", float64(d)/float64(time.Millisecond)) + } else if d < time.Minute { + fmt.Fprintf(b, "%.3fs", float64(d)/float64(time.Second)) + } else if d < time.Hour { + minutes := d / time.Minute + seconds := (d % time.Minute) / time.Second + fmt.Fprintf(b, "%dm%.3fs", minutes, float64(seconds)/float64(time.Second)) + } else { + hours := d / time.Hour + minutes := (d % time.Hour) / time.Minute + fmt.Fprintf(b, "%dh%dm", hours, minutes) + } +} + // formatStack formats a stack trace with ANSI color codes. -// It structures the stack trace with colored goroutine, function, and path segments, -// using indentation and separators for readability. -// Example (internal usage): -// -// h.formatStack(&builder, []byte("goroutine 1 [running]:\nmain.main()\n\tmain.go:10")) // Appends colored stack trace -func (h *ColorizedHandler) formatStack(b *strings.Builder, stack []byte) { +func (h *ColorizedHandler) formatStack(b *bytes.Buffer, stack []byte) { b.WriteString("\n") b.WriteString(h.palette.Header) b.WriteString("[stack]") b.WriteString(h.palette.Reset) b.WriteString("\n") - lines := strings.Split(string(stack), "\n") if len(lines) == 0 { return } - // Format goroutine line b.WriteString(" ┌─ ") b.WriteString(h.palette.Goroutine) b.WriteString(lines[0]) b.WriteString(h.palette.Reset) b.WriteString("\n") - - // Pair function name and file path lines for i := 1; i < len(lines)-1; i += 2 { funcLine := strings.TrimSpace(lines[i]) pathLine := strings.TrimSpace(lines[i+1]) - if funcLine != "" { b.WriteString(" │ ") b.WriteString(h.palette.Func) @@ -330,35 +809,25 @@ func (h *ColorizedHandler) formatStack(b *strings.Builder, stack []byte) { if pathLine != "" { b.WriteString(" │ ") - // Look for last "/" before ".go:" lastSlash := strings.LastIndex(pathLine, "/") goIndex := strings.Index(pathLine, ".go:") - if lastSlash >= 0 && goIndex > lastSlash { - // Prefix path prefix := pathLine[:lastSlash+1] - // File and line (e.g., ll.go:698 +0x5c) suffix := pathLine[lastSlash+1:] - b.WriteString(h.palette.Path) b.WriteString(prefix) b.WriteString(h.palette.Reset) - - b.WriteString(h.palette.Path) // Use mainPath color for suffix + b.WriteString(h.palette.Path) b.WriteString(suffix) b.WriteString(h.palette.Reset) } else { - // Fallback: whole line is gray b.WriteString(h.palette.Path) b.WriteString(pathLine) b.WriteString(h.palette.Reset) } - b.WriteString("\n") } } - - // Handle any remaining unpaired line if len(lines)%2 == 0 && strings.TrimSpace(lines[len(lines)-1]) != "" { b.WriteString(" │ ") b.WriteString(h.palette.Func) @@ -371,110 +840,160 @@ func (h *ColorizedHandler) formatStack(b *strings.Builder, stack []byte) { } // handleDumpOutput formats hex dump output with ANSI color codes. -// It applies colors to position, hex, ASCII, and title components of the dump, -// wrapping the output with colored BEGIN/END separators. -// Returns an error if writing fails. -// Example (internal usage): -// -// h.handleDumpOutput(&lx.Entry{Class: lx.ClassDump, Message: "pos 00 hex: 61 62 'ab'"}) // Writes colored dump func (h *ColorizedHandler) handleDumpOutput(e *lx.Entry) error { - var builder strings.Builder - - // Add timestamp if enabled + buf := colorBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer colorBufPool.Put(buf) if h.showTime { - builder.WriteString(e.Timestamp.Format(h.timeFormat)) - builder.WriteString(lx.Newline) + buf.WriteString(e.Timestamp.Format(h.timeFormat)) + buf.WriteString(lx.Newline) } - - // Write colored BEGIN separator - builder.WriteString(h.palette.Title) - builder.WriteString("---- BEGIN DUMP ----") - builder.WriteString(h.palette.Reset) - builder.WriteString("\n") - - // Process each line of the dump + buf.WriteString(h.palette.Title) + buf.WriteString("---- BEGIN DUMP ----") + buf.WriteString(h.palette.Reset) + buf.WriteString("\n") lines := strings.Split(e.Message, "\n") length := len(lines) for i, line := range lines { if strings.HasPrefix(line, "pos ") { - // Parse and color position and hex/ASCII parts parts := strings.SplitN(line, "hex:", 2) if len(parts) == 2 { - builder.WriteString(h.palette.Pos) - builder.WriteString(parts[0]) - builder.WriteString(h.palette.Reset) - + buf.WriteString(h.palette.Pos) + buf.WriteString(parts[0]) + buf.WriteString(h.palette.Reset) hexAscii := strings.SplitN(parts[1], "'", 2) - builder.WriteString(h.palette.Hex) - builder.WriteString("hex:") - builder.WriteString(hexAscii[0]) - builder.WriteString(h.palette.Reset) - + buf.WriteString(h.palette.Hex) + buf.WriteString("hex:") + buf.WriteString(hexAscii[0]) + buf.WriteString(h.palette.Reset) if len(hexAscii) > 1 { - builder.WriteString(h.palette.Ascii) - builder.WriteString("'") - builder.WriteString(hexAscii[1]) - builder.WriteString(h.palette.Reset) + buf.WriteString(h.palette.Ascii) + buf.WriteString("'") + buf.WriteString(hexAscii[1]) + buf.WriteString(h.palette.Reset) } } } else if strings.HasPrefix(line, "Dumping value of type:") { - // Color type dump lines - builder.WriteString(h.palette.Header) - builder.WriteString(line) - builder.WriteString(h.palette.Reset) + buf.WriteString(h.palette.Header) + buf.WriteString(line) + buf.WriteString(h.palette.Reset) } else { - // Write non-dump lines as-is - builder.WriteString(line) + buf.WriteString(line) } - - // Don't add newline for the last line if i < length-1 { - builder.WriteString("\n") + buf.WriteString("\n") } } + buf.WriteString(h.palette.Title) + buf.WriteString("---- END DUMP ----") + buf.WriteString(h.palette.Reset) + buf.WriteString("\n") + _, err := h.writer.Write(buf.Bytes()) + return err +} - // Write colored END separator - builder.WriteString(h.palette.Title) - builder.WriteString("---- END DUMP ----") - builder.WriteString(h.palette.Reset) - builder.WriteString("\n") +// paletteCache stores the detected palette and environment hash to avoid repeated checks. +var paletteCache struct { + once sync.Once + mu sync.RWMutex + hash uint64 + palette Palette +} - // Write formatted output to writer - _, err := h.w.Write([]byte(builder.String())) - return err +// computeEnvHash generates a hash of relevant environment variables to detect changes. +func computeEnvHash() uint64 { + h := fnv.New64a() + h.Write([]byte(os.Getenv("NO_COLOR"))) + h.Write([]byte(os.Getenv("TERM"))) + h.Write([]byte(os.Getenv("COLORFGBG"))) + h.Write([]byte(os.Getenv("AppleInterfaceStyle"))) + h.Write([]byte(os.Getenv("APPEARANCE"))) + h.Write([]byte(os.Getenv("TERM_BACKGROUND"))) + return h.Sum64() } -// detectPalette selects a color palette based on terminal environment variables. -// It checks TERM_BACKGROUND, COLORFGBG, and AppleInterfaceStyle to determine -// whether a light or dark palette is appropriate, defaulting to darkPalette. -// Example (internal usage): -// -// palette := h.detectPalette() // Returns darkPalette or lightPalette -func (h *ColorizedHandler) detectPalette() Palette { - // Check TERM_BACKGROUND (e.g., iTerm2) - if bg, ok := os.LookupEnv("TERM_BACKGROUND"); ok { - if bg == "light" { - return lightPalette // Use light palette for light background - } - return darkPalette // Use dark palette otherwise +// computePaletteFromEnv computes the palette based on current environment variables. +// This contains the original logic from detectPalette, extracted for caching. +func (h *ColorizedHandler) computePaletteFromEnv() Palette { + if os.Getenv("NO_COLOR") != "" { + return noColorPalette + } + + term := os.Getenv("TERM") + if term == "dumb" { + return noColorPalette } - // Check COLORFGBG (traditional xterm) - if fgBg, ok := os.LookupEnv("COLORFGBG"); ok { + // Windows is handled via build tag, just check terminal capability + if h.isWindowsTerminal() { + return h.applyIntensity(darkPalette) + } + + // Unix/macOS background detection + isDarkBackground := true + if style, ok := os.LookupEnv("APPEARANCE"); ok && strings.EqualFold(style, "light") { + isDarkBackground = false + } else if fgBg := os.Getenv("COLORFGBG"); fgBg != "" { parts := strings.Split(fgBg, ";") if len(parts) >= 2 { - bg := parts[len(parts)-1] // Last part (some terminals add more fields) - if bg == "7" || bg == "15" || bg == "0;15" { // Handle variations - return lightPalette // Use light palette for light background - } + bgInt, _ := strconv.Atoi(parts[len(parts)-1]) + isDarkBackground = (bgInt >= 0 && bgInt <= 6) || (bgInt >= 8 && bgInt <= 14) } } - // Check macOS dark mode - if style, ok := os.LookupEnv("AppleInterfaceStyle"); ok && strings.EqualFold(style, "dark") { - return darkPalette // Use dark palette for macOS dark mode + if isDarkBackground { + return h.applyIntensity(darkPalette) + } + return h.applyIntensity(lightPalette) +} + +// detectPalette selects a color palette based on terminal environment variables. +// It uses caching with environment hash to avoid repeated checks. +func (h *ColorizedHandler) detectPalette() Palette { + // If colors are explicitly disabled, return noColorPalette + if h.noColor { + return noColorPalette } - // Default: dark (conservative choice for terminals) - return darkPalette + // Fast path: Check cache + currentHash := computeEnvHash() + + paletteCache.mu.RLock() + if paletteCache.hash == currentHash && paletteCache.palette != (Palette{}) { + p := paletteCache.palette + paletteCache.mu.RUnlock() + return p + } + paletteCache.mu.RUnlock() + + // Slow path: Compute + paletteCache.mu.Lock() + defer paletteCache.mu.Unlock() + + // Double-check after acquiring write lock + if paletteCache.hash == currentHash && paletteCache.palette != (Palette{}) { + return paletteCache.palette + } + + // Compute new palette + p := h.computePaletteFromEnv() + paletteCache.hash = currentHash + paletteCache.palette = p + return p +} + +// applyIntensity applies the intensity setting to a base palette +func (h *ColorizedHandler) applyIntensity(basePalette Palette) Palette { + switch h.intensity { + case IntensityNormal: + return basePalette + case IntensityBright: + return brightPalette + case IntensityPastel: + return pastelPalette + case IntensityVibrant: + return vibrantPalette + default: + return basePalette + } } diff --git a/vendor/github.com/olekukonko/ll/lh/colorized_unix.go b/vendor/github.com/olekukonko/ll/lh/colorized_unix.go new file mode 100644 index 0000000000..b01e1f542c --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lh/colorized_unix.go @@ -0,0 +1,10 @@ +//go:build !windows + +package lh + +// No-op for Unix systems - ANSI is native. +func enableWindowsANSI() {} + +func (h *ColorizedHandler) isWindowsTerminal() bool { + return false +} diff --git a/vendor/github.com/olekukonko/ll/lh/colorized_windows.go b/vendor/github.com/olekukonko/ll/lh/colorized_windows.go new file mode 100644 index 0000000000..e4edafd53c --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lh/colorized_windows.go @@ -0,0 +1,47 @@ +//go:build windows + +package lh + +import ( + "os" + "syscall" + "unsafe" +) + +func init() { + enableWindowsANSI() +} + +// enableWindowsANSI enables virtual terminal processing on Windows 10+. +func enableWindowsANSI() { + kernel32 := syscall.NewLazyDLL("kernel32.dll") + setConsoleMode := kernel32.NewProc("SetConsoleMode") + getConsoleMode := kernel32.NewProc("GetConsoleMode") + + const enableVirtualTerminalProcessing = 0x0004 + handles := []syscall.Handle{syscall.Stdout, syscall.Stderr} + + for _, handle := range handles { + var mode uint32 + if r, _, _ := getConsoleMode.Call(uintptr(handle), uintptr(unsafe.Pointer(&mode))); r != 0 { + if mode&enableVirtualTerminalProcessing == 0 { + newMode := mode | enableVirtualTerminalProcessing + setConsoleMode.Call(uintptr(handle), uintptr(newMode)) + } + } + } +} + +// isWindowsTerminal checks Windows-specific terminal indicators. +func (h *ColorizedHandler) isWindowsTerminal() bool { + if os.Getenv("WT_SESSION") != "" { + return true + } + if os.Getenv("ConEmuANSI") == "ON" { + return true + } + if os.Getenv("ANSICON") != "" { + return true + } + return false +} diff --git a/vendor/github.com/olekukonko/ll/lh/dedup.go b/vendor/github.com/olekukonko/ll/lh/dedup.go new file mode 100644 index 0000000000..523e15fca1 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lh/dedup.go @@ -0,0 +1,248 @@ +package lh + +import ( + "bytes" + "fmt" + "sort" + "sync" + "time" + + "github.com/cespare/xxhash/v2" + "github.com/olekukonko/ll/lx" +) + +// shardCount determines the number of shards for the dedup handler. +// Must be a power of 2 for efficient modulo via bitwise AND. +const shardCount = 32 + +// Dedup is a log handler that suppresses duplicate entries within a TTL window. +// It wraps another handler and filters out repeated log entries that match +// within the deduplication period. +type Dedup struct { + next lx.Handler + ttl time.Duration + cleanupEvery time.Duration + keyFn lx.Deduper + maxKeys int + shards [shardCount]dedupShard // value array; take &shards[i] when locking + done chan struct{} + wg sync.WaitGroup + once sync.Once +} + +type dedupShard struct { + mu sync.Mutex + seen map[uint64]int64 // key -> expiry unix-nano timestamp +} + +// DedupOpt configures a Dedup handler. +type DedupOpt func(*Dedup) + +// WithDedupKeyFunc customizes how deduplication keys are generated. +func WithDedupKeyFunc(fn func(*lx.Entry) uint64) DedupOpt { + return func(d *Dedup) { + d.keyFn = dedupKeyFunc(fn) + } +} + +type dedupKeyFunc func(*lx.Entry) uint64 + +func (f dedupKeyFunc) Calculate(e *lx.Entry) uint64 { + return f(e) +} + +// WithDedupCleanupInterval sets how often expired deduplication keys are purged. +func WithDedupCleanupInterval(every time.Duration) DedupOpt { + return func(d *Dedup) { + if every > 0 { + d.cleanupEvery = every + } + } +} + +// WithDedupMaxKeys sets a soft limit on tracked deduplication keys. +func WithDedupMaxKeys(max int) DedupOpt { + return func(d *Dedup) { + if max > 0 { + d.maxKeys = max + } + } +} + +// WithDedupIgnore specifies fields to ignore in the default key function. +func WithDedupIgnore(fields ...string) DedupOpt { + return func(d *Dedup) { + if dd, ok := d.keyFn.(*defaultDedup); ok { + if dd.ignoreFields == nil { + dd.ignoreFields = make(map[string]struct{}, len(fields)) + } + for _, f := range fields { + dd.ignoreFields[f] = struct{}{} + } + } + } +} + +// NewDedup creates a deduplicating handler wrapper. +func NewDedup(next lx.Handler, ttl time.Duration, opts ...DedupOpt) *Dedup { + if ttl <= 0 { + ttl = 2 * time.Second + } + d := &Dedup{ + next: next, + ttl: ttl, + cleanupEvery: time.Minute, + keyFn: NewDefaultDedup(), + done: make(chan struct{}), + } + // Pre-allocate each shard's map to avoid growth allocations at startup. + for i := 0; i < len(d.shards); i++ { + d.shards[i].seen = make(map[uint64]int64, 64) + } + for _, opt := range opts { + opt(d) + } + d.wg.Add(1) + go d.cleanupLoop() + return d +} + +// Handle processes a log entry, suppressing duplicates within the TTL window. +func (d *Dedup) Handle(e *lx.Entry) error { + // Guard against nil keyFn — pass through if not configured. + if d.keyFn == nil { + return d.next.Handle(e) + } + + now := time.Now().UnixNano() + key := d.keyFn.Calculate(e) + + // Bitwise AND is safe because shardCount is a power of 2. + shard := &d.shards[key&(shardCount-1)] + + shard.mu.Lock() + exp, ok := shard.seen[key] + if ok && now < exp { + shard.mu.Unlock() + return nil // duplicate within TTL — suppress + } + + // Opportunistic per-shard cleanup when the shard is getting full. + if d.maxKeys > 0 { + limitPerShard := d.maxKeys / shardCount + if limitPerShard > 0 && len(shard.seen) >= limitPerShard { + d.cleanupShardLocked(shard, now) + } + } + + shard.seen[key] = now + d.ttl.Nanoseconds() + shard.mu.Unlock() + + return d.next.Handle(e) +} + +// getShardIndex returns the shard index for a given key. +// Uses bitwise AND since shardCount is a power of 2. +func (d *Dedup) getShardIndex(key uint64) int { + return int(key & (shardCount - 1)) +} + +// Close stops the cleanup goroutine and closes the underlying handler. +func (d *Dedup) Close() error { + var err error + d.once.Do(func() { + close(d.done) + d.wg.Wait() + if c, ok := d.next.(interface{ Close() error }); ok { + err = c.Close() + } + }) + return err +} + +// cleanupLoop runs periodically to purge expired deduplication keys. +func (d *Dedup) cleanupLoop() { + defer d.wg.Done() + ticker := time.NewTicker(d.cleanupEvery) + defer ticker.Stop() + for { + select { + case <-ticker.C: + now := time.Now().UnixNano() + for i := 0; i < len(d.shards); i++ { + shard := &d.shards[i] + shard.mu.Lock() + d.cleanupShardLocked(shard, now) + shard.mu.Unlock() + } + case <-d.done: + return + } + } +} + +// cleanupShardLocked removes expired keys from a shard (caller must hold lock). +func (d *Dedup) cleanupShardLocked(shard *dedupShard, now int64) { + for k, exp := range shard.seen { + if now > exp { + delete(shard.seen, k) + } + } +} + +// defaultDedup implements the default deduplication key calculation. +type defaultDedup struct { + ignoreFields map[string]struct{} +} + +// NewDefaultDedup creates a new default deduplication key generator. +func NewDefaultDedup() lx.Deduper { + return &defaultDedup{ignoreFields: make(map[string]struct{})} +} + +// Calculate generates a deduplication key from level, message, namespace, and +// fields. Fields are sorted before hashing so that identical entries always +// produce the same key regardless of Go map iteration order. +func (d *defaultDedup) Calculate(e *lx.Entry) uint64 { + h := xxhash.New() + zero := []byte{0} + + h.Write([]byte(e.Level.String())) + h.Write(zero) + h.Write([]byte(e.Message)) + h.Write(zero) + h.Write([]byte(e.Namespace)) + h.Write(zero) + + if len(e.Fields) > 0 { + m := e.Fields.Map() + keys := make([]string, 0, len(m)) + for k := range m { + if _, excluded := d.ignoreFields[k]; !excluded { + keys = append(keys, k) + } + } + // Sort keys to guarantee a deterministic hash across calls. + // Without this, Go's random map iteration order means two identical + // entries can hash to different values and bypass deduplication. + sort.Strings(keys) + + buf := dedupBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer dedupBufPool.Put(buf) + + for _, k := range keys { + buf.WriteString(k) + buf.WriteByte('=') + fmt.Fprint(buf, m[k]) + buf.WriteByte(0) + } + h.Write(buf.Bytes()) + } + + return h.Sum64() +} + +var dedupBufPool = sync.Pool{ + New: func() any { return new(bytes.Buffer) }, +} diff --git a/vendor/github.com/olekukonko/ll/lh/json.go b/vendor/github.com/olekukonko/ll/lh/json.go index c40576d674..4fab0203f6 100644 --- a/vendor/github.com/olekukonko/ll/lh/json.go +++ b/vendor/github.com/olekukonko/ll/lh/json.go @@ -1,26 +1,49 @@ package lh import ( - "encoding/json" + "bytes" "fmt" - "github.com/olekukonko/ll/lx" "io" "os" "strings" "sync" "time" + + "github.com/goccy/go-json" + "github.com/olekukonko/ll/lx" ) +var jsonBufPool = sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, +} + +// fieldsMapPool pools map[string]interface{} to reduce allocations +var fieldsMapPool = sync.Pool{ + New: func() any { + return make(map[string]interface{}, 8) + }, +} + +// jsonOutputPool pools JsonOutput structs to reduce allocations +var jsonOutputPool = sync.Pool{ + New: func() any { + return &JsonOutput{ + Fields: make(map[string]interface{}, 8), + } + }, +} + // JSONHandler is a handler that outputs log entries as JSON objects. // It formats log entries with timestamp, level, message, namespace, fields, and optional // stack traces or dump segments, writing the result to the provided writer. // Thread-safe with a mutex to protect concurrent writes. type JSONHandler struct { - writer io.Writer // Destination for JSON output - timeFmt string // Format for timestamp (default: RFC3339Nano) - pretty bool // Enable pretty printing with indentation if true - fieldMap map[string]string // Optional mapping for field names (not used in provided code) - mu sync.Mutex // Protects concurrent access to writer + writer io.Writer // Destination for JSON output + timeFmt string // Format for timestamp (default: RFC3339Nano) + pretty bool // Enable pretty printing with indentation if true + mu sync.Mutex // Protects concurrent access to writer } // JsonOutput represents the JSON structure for a log entry. @@ -75,7 +98,6 @@ func NewJSONHandler(w io.Writer, opts ...func(*JSONHandler)) *JSONHandler { func (h *JSONHandler) Handle(e *lx.Entry) error { h.mu.Lock() defer h.mu.Unlock() - // Handle dump entries separately if e.Class == lx.ClassDump { return h.handleDump(e) @@ -84,6 +106,13 @@ func (h *JSONHandler) Handle(e *lx.Entry) error { return h.handleRegular(e) } +// Output sets the Writer destination for JSONHandler's output, ensuring thread safety with a mutex lock. +func (h *JSONHandler) Output(w io.Writer) { + h.mu.Lock() + defer h.mu.Unlock() + h.writer = w +} + // handleRegular handles standard log entries (non-dump). // It converts the entry to a JsonOutput struct and encodes it as JSON, // applying pretty printing if enabled. Logs encoding errors to stderr for debugging. @@ -92,31 +121,61 @@ func (h *JSONHandler) Handle(e *lx.Entry) error { // // h.handleRegular(&lx.Entry{Message: "test", Level: lx.LevelInfo}) // Writes JSON object func (h *JSONHandler) handleRegular(e *lx.Entry) error { - // Create JSON output structure - entry := JsonOutput{ - Time: e.Timestamp.Format(h.timeFmt), // Format timestamp - Level: e.Level.String(), // Convert level to string - Class: e.Class.String(), // Convert class to string - Msg: e.Message, // Set message - Namespace: e.Namespace, // Set namespace - Dump: nil, // No dump for regular entries - Fields: e.Fields, // Copy fields - Stack: e.Stack, // Include stack trace if present + // Get fieldsMap from pool to avoid allocation + fieldsMap := fieldsMapPool.Get().(map[string]interface{}) + // Clear any existing keys from previous use + for k := range fieldsMap { + delete(fieldsMap, k) + } + // Convert ordered fields to map for JSON output + for _, pair := range e.Fields { + fieldsMap[pair.Key] = pair.Value } - // Create JSON encoder - enc := json.NewEncoder(h.writer) + + // Get JsonOutput from pool + entry := jsonOutputPool.Get().(*JsonOutput) + entry.Time = e.Timestamp.Format(h.timeFmt) + entry.Level = e.Level.String() + entry.Class = e.Class.String() + entry.Msg = e.Message + entry.Namespace = e.Namespace + entry.Dump = nil + entry.Fields = fieldsMap + entry.Stack = e.Stack + + // Acquire buffer from pool to avoid allocation and reduce syscalls + buf := jsonBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer func() { + // Return all pooled objects + jsonBufPool.Put(buf) + // Reset and return fieldsMap to pool + for k := range entry.Fields { + delete(entry.Fields, k) + } + fieldsMapPool.Put(entry.Fields) + // Reset and return JsonOutput to pool + entry.Fields = nil + entry.Stack = nil + entry.Dump = nil + jsonOutputPool.Put(entry) + }() + + // Create JSON encoder writing to buffer (uses go-json for 2-5x speedup) + enc := json.NewEncoder(buf) if h.pretty { // Enable indentation for pretty printing enc.SetIndent("", " ") } - // Log encoding attempt for debugging - fmt.Fprintf(os.Stderr, "Encoding JSON entry: %v\n", e.Message) - // Encode and write JSON + // Encode JSON to buffer err := enc.Encode(entry) if err != nil { // Log encoding error for debugging fmt.Fprintf(os.Stderr, "JSON encode error: %v\n", err) + return err } + // Write buffer to underlying writer in one go + _, err = h.writer.Write(buf.Bytes()) return err } @@ -130,7 +189,6 @@ func (h *JSONHandler) handleRegular(e *lx.Entry) error { func (h *JSONHandler) handleDump(e *lx.Entry) error { var segments []dumpSegment lines := strings.Split(e.Message, "\n") - // Parse each line of the dump message for _, line := range lines { if !strings.HasPrefix(line, "pos") { @@ -143,11 +201,9 @@ func (h *JSONHandler) handleDump(e *lx.Entry) error { // Parse position var offset int fmt.Sscanf(parts[0], "pos %d", &offset) - // Parse hex and ASCII hexAscii := strings.SplitN(parts[1], "'", 2) hexStr := strings.Fields(strings.TrimSpace(hexAscii[0])) - // Create dump segment segments = append(segments, dumpSegment{ Offset: offset, // Set byte offset @@ -156,15 +212,52 @@ func (h *JSONHandler) handleDump(e *lx.Entry) error { }) } - // Encode JSON output with dump segments - return json.NewEncoder(h.writer).Encode(JsonOutput{ - Time: e.Timestamp.Format(h.timeFmt), // Format timestamp - Level: e.Level.String(), // Convert level to string - Class: e.Class.String(), // Convert class to string - Msg: "dumping segments", // Fixed message for dumps - Namespace: e.Namespace, // Set namespace - Dump: segments, // Include parsed segments - Fields: e.Fields, // Copy fields - Stack: e.Stack, // Include stack trace if present - }) + // Get fieldsMap from pool + fieldsMap := fieldsMapPool.Get().(map[string]interface{}) + for k := range fieldsMap { + delete(fieldsMap, k) + } + for _, pair := range e.Fields { + fieldsMap[pair.Key] = pair.Value + } + + // Get JsonOutput from pool + entry := jsonOutputPool.Get().(*JsonOutput) + entry.Time = e.Timestamp.Format(h.timeFmt) + entry.Level = e.Level.String() + entry.Class = e.Class.String() + entry.Msg = "dumping segments" + entry.Namespace = e.Namespace + entry.Dump = segments + entry.Fields = fieldsMap + entry.Stack = e.Stack + + // Acquire buffer from pool + buf := jsonBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer func() { + jsonBufPool.Put(buf) + for k := range entry.Fields { + delete(entry.Fields, k) + } + fieldsMapPool.Put(entry.Fields) + entry.Fields = nil + entry.Stack = nil + entry.Dump = nil + jsonOutputPool.Put(entry) + }() + + // Encode JSON output with dump segments to buffer + enc := json.NewEncoder(buf) + if h.pretty { + enc.SetIndent("", " ") + } + err := enc.Encode(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "JSON dump encode error: %v\n", err) + return err + } + // Write buffer to underlying writer + _, err = h.writer.Write(buf.Bytes()) + return err } diff --git a/vendor/github.com/olekukonko/ll/lh/lh.go b/vendor/github.com/olekukonko/ll/lh/lh.go new file mode 100644 index 0000000000..a118b6a8ae --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lh/lh.go @@ -0,0 +1,75 @@ +package lh + +import ( + "fmt" + "strconv" + "strings" +) + +// rightPad pads a string with spaces on the right to reach the specified length. +// Returns the original string if it's already at or exceeds the target length. +// Uses strings.Builder for efficient memory allocation. +func rightPad(str string, length int) string { + if len(str) >= length { + return str + } + var sb strings.Builder + sb.Grow(length) + sb.WriteString(str) + sb.WriteString(strings.Repeat(" ", length-len(str))) + return sb.String() +} + +// stringWriter is the interface for types that can write strings and bytes. +// Both *strings.Builder and *bytes.Buffer implement this. +type stringWriter interface { + WriteString(s string) (int, error) + Write(p []byte) (n int, err error) +} + +// writeFieldValue writes a field value to the builder using type switches +// to avoid reflection and allocations associated with fmt.Fprint. +func writeFieldValue(b stringWriter, v interface{}) { + switch val := v.(type) { + case string: + b.WriteString(val) + case int: + b.WriteString(strconv.Itoa(val)) + case int8: + b.WriteString(strconv.FormatInt(int64(val), 10)) + case int16: + b.WriteString(strconv.FormatInt(int64(val), 10)) + case int32: + b.WriteString(strconv.FormatInt(int64(val), 10)) + case int64: + b.WriteString(strconv.FormatInt(val, 10)) + case uint: + b.WriteString(strconv.FormatUint(uint64(val), 10)) + case uint8: + b.WriteString(strconv.FormatUint(uint64(val), 10)) + case uint16: + b.WriteString(strconv.FormatUint(uint64(val), 10)) + case uint32: + b.WriteString(strconv.FormatUint(uint64(val), 10)) + case uint64: + b.WriteString(strconv.FormatUint(val, 10)) + case float32: + b.WriteString(strconv.FormatFloat(float64(val), 'g', -1, 32)) + case float64: + b.WriteString(strconv.FormatFloat(val, 'g', -1, 64)) + case bool: + if val { + b.WriteString("true") + } else { + b.WriteString("false") + } + case nil: + b.WriteString("nil") + case error: + b.WriteString(val.Error()) + case fmt.Stringer: + b.WriteString(val.String()) + default: + fmt.Fprint(b, val) + } +} diff --git a/vendor/github.com/olekukonko/ll/lh/memory.go b/vendor/github.com/olekukonko/ll/lh/memory.go index e3bc939873..42fb8928c9 100644 --- a/vendor/github.com/olekukonko/ll/lh/memory.go +++ b/vendor/github.com/olekukonko/ll/lh/memory.go @@ -2,9 +2,10 @@ package lh import ( "fmt" - "github.com/olekukonko/ll/lx" "io" "sync" + + "github.com/olekukonko/ll/lx" ) // MemoryHandler is an lx.Handler that stores log entries in memory. @@ -106,7 +107,7 @@ func (h *MemoryHandler) Dump(w io.Writer) error { // Process each entry through the TextHandler for _, entry := range h.entries { if err := tempHandler.Handle(entry); err != nil { - return fmt.Errorf("failed to dump entry: %w", err) // Wrap and return write errors + return fmt.Errorf("failed to dump entry: %writer", err) // Wrap and return write errors } } return nil diff --git a/vendor/github.com/olekukonko/ll/lh/multi.go b/vendor/github.com/olekukonko/ll/lh/multi.go index 8a9d8846d3..e5eba6cf2e 100644 --- a/vendor/github.com/olekukonko/ll/lh/multi.go +++ b/vendor/github.com/olekukonko/ll/lh/multi.go @@ -3,6 +3,7 @@ package lh import ( "errors" "fmt" + "github.com/olekukonko/ll/lx" ) @@ -30,6 +31,34 @@ func NewMultiHandler(h ...lx.Handler) *MultiHandler { } } +// Len returns the number of handlers in the MultiHandler. +// Useful for monitoring or debugging handler composition. +// +// Example: +// +// multi := &MultiHandler{} +// multi.Append(h1, h2, h3) +// count := multi.Len() // Returns 3 +func (h *MultiHandler) Len() int { + return len(h.Handlers) +} + +// Append adds one or more handlers to the MultiHandler. +// Handlers will receive log entries in the order they were appended. +// This method modifies the MultiHandler in place. +// +// Example: +// +// multi := &MultiHandler{} +// multi.Append( +// lx.NewJSONHandler(os.Stdout), +// lx.NewTextHandler(logFile), +// ) +// // Now multi broadcasts to both stdout and file +func (h *MultiHandler) Append(handlers ...lx.Handler) { + h.Handlers = append(h.Handlers, handlers...) +} + // Handle implements the Handler interface, calling Handle on each handler in sequence. // It collects any errors from handlers and combines them into a single error using errors.Join. // If no errors occur, it returns nil. Thread-safe if the underlying handlers are thread-safe. @@ -43,7 +72,7 @@ func (h *MultiHandler) Handle(e *lx.Entry) error { if err := handler.Handle(e); err != nil { // fmt.Fprintf(os.Stderr, "MultiHandler error for handler %d: %v\n", i, err) // Wrap error with handler index for context - errs = append(errs, fmt.Errorf("handler %d: %w", i, err)) + errs = append(errs, fmt.Errorf("handler %d: %writer", i, err)) } } // Combine errors into a single error, or return nil if no errors diff --git a/vendor/github.com/olekukonko/ll/lh/pipe.go b/vendor/github.com/olekukonko/ll/lh/pipe.go new file mode 100644 index 0000000000..78057134e1 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lh/pipe.go @@ -0,0 +1,76 @@ +package lh + +import ( + "fmt" + "os" + "time" + + "github.com/olekukonko/ll/lx" +) + +// Pipe chains multiple handler wrappers together, applying them from left to right. +// The wrappers are composed such that the first wrapper in the list becomes +// the innermost layer, and the last wrapper becomes the outermost layer. +// +// Usage pattern: Pipe(baseHandler, wrapper1, wrapper2, wrapper3) +// Result: wrapper3(wrapper2(wrapper1(baseHandler))) +// +// This enables clean, declarative construction of handler middleware chains. +// +// Example - building a processing pipeline: +// +// base := lx.NewJSONHandler(os.Stdout) +// handler := lh.Pipe(base, +// lh.NewDedup(2*time.Second), // 1. Deduplicate first +// lh.NewRateLimit(10, time.Second), // 2. Then rate limit +// ) +// logger := lx.NewLogger(handler) +// +// In this example, logs flow: Dedup → RateLimit → AddTimestamp → JSONHandler +func Pipe(h lx.Handler, wraps ...lx.Wrap) lx.Handler { + for _, w := range wraps { + if w != nil { + h = w(h) + } + } + return h +} + +// PipeDedup returns a wrapper that applies deduplication to the handler. +func PipeDedup(ttl time.Duration, opts ...DedupOpt) lx.Wrap { + return func(next lx.Handler) lx.Handler { + return NewDedup(next, ttl, opts...) + } +} + +// PipeBuffer returns a wrapper that applies buffering to the handler. +func PipeBuffer(opts ...BufferingOpt) lx.Wrap { + return func(next lx.Handler) lx.Handler { + return NewBuffered(next, opts...) + } +} + +// PipeRotate returns a wrapper that applies log rotation. +// Ideally, the 'next' handler should be one that writes to a file (like TextHandler or JSONHandler). +// +// If the underlying handler does not implement lx.HandlerOutputter (cannot change output destination), +// or if rotation initialization fails, this will log a warning to stderr and return the +// original handler unmodified to prevent application crashes. +func PipeRotate(maxSizeBytes int64, src RotateSource) lx.Wrap { + return func(next lx.Handler) lx.Handler { + // Attempt to cast to HandlerOutputter (Handler + Outputter interface) + h, ok := next.(lx.HandlerOutputter) + if !ok { + fmt.Fprintf(os.Stderr, "ll/lh: PipeRotate skipped - handler does not implement SetOutput(io.Writer)\n") + return next + } + + // Initialize the rotating handler + r, err := NewRotating(h, maxSizeBytes, src) + if err != nil { + fmt.Fprintf(os.Stderr, "ll/lh: PipeRotate initialization failed: %v\n", err) + return next + } + return r + } +} diff --git a/vendor/github.com/olekukonko/ll/lh/rotate.go b/vendor/github.com/olekukonko/ll/lh/rotate.go new file mode 100644 index 0000000000..37fcf27dd7 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lh/rotate.go @@ -0,0 +1,235 @@ +package lh + +import ( + "io" + "sync" + "sync/atomic" + + "github.com/olekukonko/ll/lx" +) + +// trackingWriter wraps an io.WriteCloser to keep an in-memory count of bytes written. +// This prevents the rotator from having to query the filesystem (via os.Stat) +// on every single log entry, which would cause severe performance bottlenecks. +type trackingWriter struct { + io.WriteCloser + written int64 // Atomic: use atomic.LoadInt64/AddInt64 +} + +// Write intercepts the write operation, counts the bytes, and passes them to the underlying writer. +func (t *trackingWriter) Write(p []byte) (n int, err error) { + n, err = t.WriteCloser.Write(p) + if n > 0 { + atomic.AddInt64(&t.written, int64(n)) + } + return +} + +// writtenBytes returns the current byte count atomically. +func (t *trackingWriter) writtenBytes() int64 { + if t == nil { + return 0 + } + return atomic.LoadInt64(&t.written) +} + +// RotateSource defines the callbacks needed to implement log rotation. +// It abstracts the destination lifecycle: opening, sizing, and rotating. +// +// Example for file rotation: +// +// src := lh.RotateSource{ +// Open: func() (io.WriteCloser, error) { +// return os.OpenFile("app.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) +// }, +// Size: func() (int64, error) { +// if fi, err := os.Stat("app.log"); err == nil { +// return fi.Size(), nil +// } +// return 0, nil // File doesn't exist yet +// }, +// Rotate: func() error { +// // Close and rename the current log before creating a new one. +// return os.Rename("app.log", "app.log."+time.Now().Format("20060102-150405")) +// }, +// } +type RotateSource struct { + // Open returns a fresh destination for log output. + // Called on initialization and after each rotation. + Open func() (io.WriteCloser, error) + + // Size returns the current size in bytes of the active destination. + // Return an error if size cannot be determined (rotation will be skipped). + Size func() (int64, error) + + // Rotate performs all cleanup/rotation actions before a new destination is + // opened, including closing or renaming the previous writer when required. + // Rotating will NOT close the old writer itself; that is the responsibility + // of this callback. May be nil if no pre-open actions are needed. + Rotate func() error +} + +// Rotating wraps a handler to rotate its output when maxSize is exceeded. +// The wrapped handler must implement both Handler and Outputter interfaces. +// Rotation is triggered on each Handle call if the current size >= maxSize. +// +// Example: +// +// handler := lx.NewJSONHandler(os.Stdout) +// src := lh.RotateSource{...} // see RotateSource example +// rotator, err := lh.NewRotating(handler, 10*1024*1024, src) // 10 MB +// logger := lx.NewLogger(rotator) +// logger.Info("This log may trigger rotation when file reaches 10MB") +type Rotating[H interface { + lx.Handler + lx.Outputter +}] struct { + mu sync.Mutex + maxSize int64 + src RotateSource + + out *trackingWriter // Uses the tracking wrapper to count bytes in memory + handler H +} + +// NewRotating creates a rotating wrapper around handler. +// Handler's output will be replaced with destinations from src.Open. +// If maxSizeBytes <= 0, rotation is disabled. +// src.Rotate may be nil if no pre-open actions are needed. +// +// Example: +// +// // Create a JSON handler that rotates at 5MB +// handler := lx.NewJSONHandler(os.Stdout) +// rotator, err := lh.NewRotating(handler, 5*1024*1024, src) +// if err != nil { +// log.Fatal(err) +// } +// // Use rotator as your logger's handler +// logger := lx.NewLogger(rotator) +func NewRotating[H interface { + lx.Handler + lx.Outputter +}](handler H, maxSizeBytes int64, src RotateSource) (*Rotating[H], error) { + // Validate that Open callback is provided + if src.Open == nil { + return nil, io.ErrClosedPipe + } + + r := &Rotating[H]{ + maxSize: maxSizeBytes, + src: src, + handler: handler, + } + if err := r.reopenLocked(); err != nil { + return nil, err + } + return r, nil +} + +// Handle processes a log entry, rotating output if necessary. +// Thread-safe: can be called concurrently. +// +// Example: +// +// rotator.Handle(&lx.Entry{ +// Level: lx.InfoLevel, +// Message: "Processing request", +// Namespace: "api", +// }) +func (r *Rotating[H]) Handle(e *lx.Entry) error { + r.mu.Lock() + defer r.mu.Unlock() + + if err := r.rotateIfNeededLocked(); err != nil { + return err + } + return r.handler.Handle(e) +} + +// Close releases resources (closes the current output). +// Safe to call multiple times. +// +// Example: +// +// defer rotator.Close() +func (r *Rotating[H]) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + + if r.out != nil { + return r.out.Close() + } + return nil +} + +// Written returns the total bytes written to the current output destination. +// Useful for metrics and monitoring. +func (r *Rotating[H]) Written() int64 { + r.mu.Lock() + out := r.out + r.mu.Unlock() + return out.writtenBytes() +} + +// rotateIfNeededLocked checks current size and rotates if maxSize exceeded. +// Called with mu already held. +// +// The old trackingWriter is simply dereferenced (not closed) because ownership +// of the underlying io.WriteCloser belongs to the src.Rotate callback. That +// callback is responsible for closing, renaming, or otherwise finishing with +// the old destination before src.Open is called to provide a fresh one. This +// design avoids double-closes on shared writers (e.g. test mocks, pipes) and +// correctly models real file-rotation where the OS rename is done before the +// old fd is released. +func (r *Rotating[H]) rotateIfNeededLocked() error { + if r.maxSize <= 0 || r.src.Open == nil { + return nil + } + + // PERFORMANCE OPTIMIZATION: + // Instead of calling r.src.Size() (which executes a slow os.Stat filesystem call), + // we simply check our fast, in-memory integer counter. + if r.out != nil && r.out.writtenBytes() < r.maxSize { + return nil + } + + // Drop the reference to the old trackingWriter without closing the underlying + // WriteCloser. Closing/renaming is the responsibility of src.Rotate (see doc above). + r.out = nil + + // Run rotation hook (rename/move/compress/close old file, etc.) + if r.src.Rotate != nil { + if err := r.src.Rotate(); err != nil { + return err + } + } + + // Open fresh output + return r.reopenLocked() +} + +// reopenLocked opens a new destination and sets it on the handler. +// Called with mu already held. +func (r *Rotating[H]) reopenLocked() error { + out, err := r.src.Open() + if err != nil { + return err + } + + // We only ask the filesystem for the true file size ONCE when we first open the file. + // This is necessary to know the starting size if we are appending to an existing log file. + var initialSize int64 + if r.src.Size != nil { + initialSize, _ = r.src.Size() + } + + // Wrap the returned io.WriteCloser so we can track all future bytes written in memory. + r.out = &trackingWriter{ + WriteCloser: out, + written: initialSize, + } + + r.handler.Output(r.out) + return nil +} diff --git a/vendor/github.com/olekukonko/ll/lh/slog.go b/vendor/github.com/olekukonko/ll/lh/slog.go index 77584202ea..e457f2ecba 100644 --- a/vendor/github.com/olekukonko/ll/lh/slog.go +++ b/vendor/github.com/olekukonko/ll/lh/slog.go @@ -2,8 +2,9 @@ package lh import ( "context" - "github.com/olekukonko/ll/lx" "log/slog" + + "github.com/olekukonko/ll/lx" ) // SlogHandler adapts a slog.Handler to implement lx.Handler. @@ -27,6 +28,15 @@ func NewSlogHandler(h slog.Handler) *SlogHandler { return &SlogHandler{slogHandler: h} } +// Handle converts an lx.Entry to slog.Record and delegates to the slog.Handler. +// It maps the entry's fields, level, namespace, class, and stack trace to slog attributes, +// passing the resulting record to the underlying slog.Handler. +// Returns an error if the slog.Handler fails to process the record. +// Thread-safe if the underlying slog.Handler is thread-safe. +// Example: +// +// handler.Handle(&lx.Entry{Message: "test", Level: lx.LevelInfo}) // Processes as slog record +// // Handle converts an lx.Entry to slog.Record and delegates to the slog.Handler. // It maps the entry's fields, level, namespace, class, and stack trace to slog attributes, // passing the resulting record to the underlying slog.Handler. @@ -58,9 +68,9 @@ func (h *SlogHandler) Handle(e *lx.Entry) error { record.AddAttrs(slog.String("stack", string(e.Stack))) // Add stack trace as string } - // Add custom fields - for k, v := range e.Fields { - record.AddAttrs(slog.Any(k, v)) // Add each field as a key-value attribute + // Add custom fields in order (preserving insertion order) + for _, pair := range e.Fields { + record.AddAttrs(slog.Any(pair.Key, pair.Value)) // Add each field as a key-value attribute } // Handle the record with the underlying slog.Handler @@ -81,7 +91,7 @@ func toSlogLevel(level lx.LevelType) slog.Level { return slog.LevelInfo case lx.LevelWarn: return slog.LevelWarn - case lx.LevelError: + case lx.LevelError, lx.LevelFatal: return slog.LevelError default: return slog.LevelInfo // Default for unknown levels diff --git a/vendor/github.com/olekukonko/ll/lh/text.go b/vendor/github.com/olekukonko/ll/lh/text.go index 0b88cf4b87..2aaff3af17 100644 --- a/vendor/github.com/olekukonko/ll/lh/text.go +++ b/vendor/github.com/olekukonko/ll/lh/text.go @@ -1,9 +1,8 @@ package lh import ( - "fmt" + "bytes" "io" - "sort" "strings" "sync" "time" @@ -11,12 +10,40 @@ import ( "github.com/olekukonko/ll/lx" ) +type TextOption func(*TextHandler) + +var textBufPool = sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, +} + +// WithTextTimeFormat enables timestamp display and optionally sets a custom time format. +// It configures the TextHandler to include temporal information in each log entry, +// allowing for precise tracking of when log events occur. +// If the format string is empty, it defaults to time.RFC3339. +func WithTextTimeFormat(format string) TextOption { + return func(t *TextHandler) { + t.Timestamped(true, format) + } +} + +// WithTextShowTime enables or disables timestamp display in log entries. +// This option provides direct control over the visibility of the time prefix +// without altering the underlying time format configured in the handler. +// Setting show to true will prepend timestamps to all subsequent regular log outputs. +func WithTextShowTime(show bool) TextOption { + return func(t *TextHandler) { + t.showTime = show + } +} + // TextHandler is a handler that outputs log entries as plain text. // It formats log entries with namespace, level, message, fields, and optional stack traces, // writing the result to the provided writer. // Thread-safe if the underlying writer is thread-safe. type TextHandler struct { - w io.Writer // Destination for formatted log output + writer io.Writer // Destination for formatted log output showTime bool // Whether to display timestamps timeFormat string // Format for timestamps (defaults to time.RFC3339) mu sync.Mutex @@ -29,12 +56,18 @@ type TextHandler struct { // handler := NewTextHandler(os.Stdout) // logger := ll.New("app").Enable().Handler(handler) // logger.Info("Test") // Output: [app] INFO: Test -func NewTextHandler(w io.Writer) *TextHandler { - return &TextHandler{ - w: w, +func NewTextHandler(w io.Writer, opts ...TextOption) *TextHandler { + t := &TextHandler{ + writer: w, showTime: false, timeFormat: time.RFC3339, } + + for _, opt := range opts { + opt(t) + } + + return t } // Timestamped enables or disables timestamp display and optionally sets a custom time format. @@ -50,6 +83,14 @@ func (h *TextHandler) Timestamped(enable bool, format ...string) { } } +// Output sets a new writer for the TextHandler. +// Thread-safe - safe for concurrent use. +func (h *TextHandler) Output(w io.Writer) { + h.mu.Lock() + defer h.mu.Unlock() + h.writer = w +} + // Handle processes a log entry and writes it as plain text. // It delegates to specialized methods based on the entry's class (Dump, Raw, or regular). // Returns an error if writing to the underlying writer fails. @@ -61,18 +102,15 @@ func (h *TextHandler) Handle(e *lx.Entry) error { h.mu.Lock() defer h.mu.Unlock() - // Special handling for dump output if e.Class == lx.ClassDump { return h.handleDumpOutput(e) } - // Raw entries are written directly without formatting if e.Class == lx.ClassRaw { - _, err := h.w.Write([]byte(e.Message)) + _, err := h.writer.Write([]byte(e.Message)) return err } - // Handle standard log entries return h.handleRegularOutput(e) } @@ -84,81 +122,68 @@ func (h *TextHandler) Handle(e *lx.Entry) error { // // h.handleRegularOutput(&lx.Entry{Message: "test", Level: lx.LevelInfo}) // Writes "INFO: test" func (h *TextHandler) handleRegularOutput(e *lx.Entry) error { - var builder strings.Builder // Buffer for building formatted output + buf := textBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer textBufPool.Put(buf) - // Add timestamp if enabled if h.showTime { - builder.WriteString(e.Timestamp.Format(h.timeFormat)) - builder.WriteString(lx.Space) + buf.WriteString(e.Timestamp.Format(h.timeFormat)) + buf.WriteString(lx.Space) } - // Format namespace based on style switch e.Style { case lx.NestedPath: if e.Namespace != "" { - // Split namespace into parts and format as [parent]→[child] parts := strings.Split(e.Namespace, lx.Slash) for i, part := range parts { - builder.WriteString(lx.LeftBracket) - builder.WriteString(part) - builder.WriteString(lx.RightBracket) + buf.WriteString(lx.LeftBracket) + buf.WriteString(part) + buf.WriteString(lx.RightBracket) if i < len(parts)-1 { - builder.WriteString(lx.Arrow) + buf.WriteString(lx.Arrow) } } - builder.WriteString(lx.Colon) - builder.WriteString(lx.Space) + buf.WriteString(lx.Colon) + buf.WriteString(lx.Space) } default: // FlatPath if e.Namespace != "" { - // Format namespace as [parent/child] - builder.WriteString(lx.LeftBracket) - builder.WriteString(e.Namespace) - builder.WriteString(lx.RightBracket) - builder.WriteString(lx.Space) + buf.WriteString(lx.LeftBracket) + buf.WriteString(e.Namespace) + buf.WriteString(lx.RightBracket) + buf.WriteString(lx.Space) } } - // Add level and message - builder.WriteString(e.Level.String()) - builder.WriteString(lx.Colon) - builder.WriteString(lx.Space) - builder.WriteString(e.Message) + buf.WriteString(e.Level.Name(e.Class)) + // buf.WriteString(lx.Space) + buf.WriteString(lx.Colon) + buf.WriteString(lx.Space) + buf.WriteString(e.Message) - // Add fields in sorted order if len(e.Fields) > 0 { - var keys []string - for k := range e.Fields { - keys = append(keys, k) - } - // Sort keys for consistent output - sort.Strings(keys) - builder.WriteString(lx.Space) - builder.WriteString(lx.LeftBracket) - for i, k := range keys { + buf.WriteString(lx.Space) + buf.WriteString(lx.LeftBracket) + for i, pair := range e.Fields { if i > 0 { - builder.WriteString(lx.Space) + buf.WriteString(lx.Space) } - // Format field as key=value - builder.WriteString(k) - builder.WriteString("=") - builder.WriteString(fmt.Sprint(e.Fields[k])) + buf.WriteString(pair.Key) + buf.WriteString("=") + writeFieldValue(buf, pair.Value) } - builder.WriteString(lx.RightBracket) + buf.WriteString(lx.RightBracket) } - // Add stack trace if present if len(e.Stack) > 0 { - h.formatStack(&builder, e.Stack) + h.formatStack(buf, e.Stack) } - // Append newline for non-None levels if e.Level != lx.LevelNone { - builder.WriteString(lx.Newline) + buf.WriteString(lx.Newline) } - // Write formatted output to writer - _, err := h.w.Write([]byte(builder.String())) + _, err := h.writer.Write(buf.Bytes()) return err } @@ -169,22 +194,20 @@ func (h *TextHandler) handleRegularOutput(e *lx.Entry) error { // // h.handleDumpOutput(&lx.Entry{Class: lx.ClassDump, Message: "pos 00 hex: 61"}) // Writes "---- BEGIN DUMP ----\npos 00 hex: 61\n---- END DUMP ----\n" func (h *TextHandler) handleDumpOutput(e *lx.Entry) error { - // For text handler, we just add a newline before dump output - var builder strings.Builder // Buffer for building formatted output + buf := textBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer textBufPool.Put(buf) - // Add timestamp if enabled if h.showTime { - builder.WriteString(e.Timestamp.Format(h.timeFormat)) - builder.WriteString(lx.Newline) + buf.WriteString(e.Timestamp.Format(h.timeFormat)) + buf.WriteString(lx.Newline) } - // Add separator lines and dump content - builder.WriteString("---- BEGIN DUMP ----\n") - builder.WriteString(e.Message) - builder.WriteString("---- END DUMP ----\n") + buf.WriteString("---- BEGIN DUMP ----\n") + buf.WriteString(e.Message) + buf.WriteString("---- END DUMP ----\n\n") - // Write formatted output to writer - _, err := h.w.Write([]byte(builder.String())) + _, err := h.writer.Write(buf.Bytes()) return err } @@ -194,21 +217,18 @@ func (h *TextHandler) handleDumpOutput(e *lx.Entry) error { // Example (internal usage): // // h.formatStack(&builder, []byte("goroutine 1 [running]:\nmain.main()\n\tmain.go:10")) // Appends formatted stack trace -func (h *TextHandler) formatStack(b *strings.Builder, stack []byte) { +func (h *TextHandler) formatStack(b *bytes.Buffer, stack []byte) { lines := strings.Split(string(stack), "\n") if len(lines) == 0 { return } - // Start stack trace section b.WriteString("\n[stack]\n") - // First line: goroutine b.WriteString(" ┌─ ") b.WriteString(lines[0]) b.WriteString("\n") - // Iterate through remaining lines for i := 1; i < len(lines); i++ { line := strings.TrimSpace(lines[i]) if line == "" { @@ -216,16 +236,13 @@ func (h *TextHandler) formatStack(b *strings.Builder, stack []byte) { } if strings.Contains(line, ".go") { - // File path lines get extra indent b.WriteString(" ├ ") } else { - // Function names b.WriteString(" │ ") } b.WriteString(line) b.WriteString("\n") } - // End stack trace section b.WriteString(" └\n") } diff --git a/vendor/github.com/olekukonko/ll/ll.go b/vendor/github.com/olekukonko/ll/ll.go index 7510b89aec..99ba5e1ad8 100644 --- a/vendor/github.com/olekukonko/ll/ll.go +++ b/vendor/github.com/olekukonko/ll/ll.go @@ -1,7 +1,6 @@ package ll import ( - "bufio" "encoding/binary" "encoding/json" "fmt" @@ -20,25 +19,52 @@ import ( "github.com/olekukonko/ll/lx" ) +// stackBufPool pools buffers for stack trace capture to reduce allocations. +var ( + stackBufPool = sync.Pool{ + New: func() any { return make([]byte, 4096) }, + } + + entryPool = sync.Pool{ + New: func() any { + return &lx.Entry{ + Fields: make(lx.Fields, 0, 4), + Stack: nil, + } + }, + } + + fieldsSlicePool = sync.Pool{ + New: func() any { + s := make(lx.Fields, 0, 8) + return &s + }, + } +) + // Logger manages logging configuration and behavior, encapsulating state such as enablement, // log level, namespaces, context fields, output style, handler, middleware, and formatting. // It is thread-safe, using a read-write mutex to protect concurrent access to its fields. type Logger struct { - mu sync.RWMutex // Guards concurrent access to fields - enabled bool // Determines if logging is enabled - suspend atomic.Bool // uses suspend path for most actions eg. skipping namespace checks - level lx.LevelType // Minimum log level (e.g., Debug, Info, Warn, Error) - namespaces *lx.Namespace // Manages namespace enable/disable states - currentPath string // Current namespace path (e.g., "parent/child") - context map[string]interface{} // Contextual fields included in all logs - style lx.StyleType // Namespace formatting style (FlatPath or NestedPath) - handler lx.Handler // Output handler for logs (e.g., text, JSON) - middleware []Middleware // Middleware functions to process log entries - prefix string // Prefix prepended to log messages - indent int // Number of double spaces for message indentation - stackBufferSize int // Buffer size for capturing stack traces - separator string // Separator for namespace paths (e.g., "/") - entries atomic.Int64 // Tracks total log entries sent to handler + mu sync.RWMutex // Guards concurrent access to fields + enabled atomic.Int32 // Determines if logging is enabled + suspend atomic.Bool // uses suspend path for most actions eg. skipping namespace checks + level lx.LevelType // Minimum log level (e.g., Debug, Info, Warn, Error) + atomicLevel int32 // Shadow copy of level for lock-free checks + namespaces *lx.Namespace // Manages namespace enable/disable states + currentPath string // Current namespace path (e.g., "parent/child") + context lx.Fields // Contextual fields included in all logs + style lx.StyleType // Namespace formatting style (FlatPath or NestedPath) + handler lx.Handler // Output handler for logs (e.g., text, JSON) + middleware []Middleware // Middleware functions to process log entries + prefix string // Prefix prepended to log messages + indent int // Number of double spaces for message indentation + stackBufferSize int // Buffer size for capturing stack traces + separator string // Separator for namespace paths (e.g., "/") + entries atomic.Int64 // Tracks total log entries sent to handler + fatalExits bool + fatalStack bool + labels atomic.Pointer[[]string] } // New creates a new Logger with the given namespace and optional configurations. @@ -51,42 +77,76 @@ type Logger struct { // logger.Info("Starting application") // Output: [app] INFO: Starting application func New(namespace string, opts ...Option) *Logger { logger := &Logger{ - enabled: lx.DefaultEnabled, // Defaults to disabled (false) + //enabled: 0, // Defaults to disabled (false) level: lx.LevelDebug, // Default minimum log level + atomicLevel: int32(lx.LevelDebug), // Initialize atomic level namespaces: defaultStore, // Shared namespace store currentPath: namespace, // Initial namespace path - context: make(map[string]interface{}), // Empty context for fields + context: make(lx.Fields, 0, 10), // Empty context for fields style: lx.FlatPath, // Default namespace style ([parent/child]) handler: lh.NewTextHandler(os.Stdout), // Default text output to stdout middleware: make([]Middleware, 0), // Empty middleware chain stackBufferSize: 4096, // Default stack trace buffer size separator: lx.Slash, // Default namespace separator ("/") } - + logger.enabled.Store(lx.Active) // Apply provided configuration options for _, opt := range opts { opt(logger) } - return logger } -// AddContext adds a key-value pair to the logger's context, modifying it directly. -// Unlike Context, it mutates the existing context. It is thread-safe using a write lock. +// Apply applies one or more functional options to the default/global logger. +// Useful for late configuration (e.g., after migration, attach VictoriaLogs handler, +// set level, add middleware, etc.) without changing existing New() calls. +// // Example: // -// logger := New("app").Enable() -// logger.AddContext("user", "alice") -// logger.Info("Action") // Output: [app] INFO: Action [user=alice] -func (l *Logger) AddContext(key string, value interface{}) *Logger { +// // In main() or init(), after setting up handler +// ll.Apply( +// ll.Handler(vlBatched), +// ll.Level(ll.LevelInfo), +// ll.Use(rateLimiterMiddleware), +// ) +// +// Returns the default logger for chaining (if needed). +func (l *Logger) Apply(opts ...Option) *Logger { l.mu.Lock() defer l.mu.Unlock() + for _, opt := range opts { + if opt != nil { + opt(l) + } + } + return l +} - // Initialize context map if nil +// AddContext adds one or more key-value pairs to the logger's persistent context. +// These fields will be included in **every** subsequent log message from this logger +// (and its child namespace loggers). +// +// It supports variadic key-value pairs (string key, any value). +// Non-string keys or uneven number of arguments will be safely ignored/logged. +// +// Returns the logger for chaining. +// +// Examples: +// +// logger.AddContext("user", "alice", "env", "prod") +// logger.AddContext("request_id", reqID, "trace_id", traceID) +// logger.AddContext("service", "payment") // single pair +func (l *Logger) AddContext(pairs ...any) *Logger { + l.mu.Lock() + defer l.mu.Unlock() if l.context == nil { - l.context = make(map[string]interface{}) + l.context = make(lx.Fields, 0, len(pairs)/2) + } + for i := 0; i < len(pairs)-1; i += 2 { + if key, ok := pairs[i].(string); ok { + l.context = append(l.context, lx.Field{Key: key, Value: pairs[i+1]}) + } } - l.context[key] = value return l } @@ -103,7 +163,6 @@ func (l *Logger) Benchmark(start time.Time) time.Duration { "duration_ms", duration.Milliseconds(), "duration", duration.String(), ).Infof("benchmark completed") - return duration } @@ -123,7 +182,7 @@ func (l *Logger) CanLog(level lx.LevelType) bool { // // logger := New("app").Enable().Use(someMiddleware) // logger.Clear() -// logger.Info("No middleware") // Output: [app] INFO: No middleware +// logger.Info("Inactive middleware") // Output: [app] INFO: Inactive middleware func (l *Logger) Clear() *Logger { l.mu.Lock() defer l.mu.Unlock() @@ -142,20 +201,20 @@ func (l *Logger) Clear() *Logger { func (l *Logger) Clone() *Logger { l.mu.RLock() defer l.mu.RUnlock() - return &Logger{ - enabled: l.enabled, // Copy enablement state - level: l.level, // Copy log level - namespaces: l.namespaces, // Share namespace store - currentPath: l.currentPath, // Copy namespace path - context: make(map[string]interface{}), // Fresh context map - style: l.style, // Copy namespace style - handler: l.handler, // Copy output handler - middleware: l.middleware, // Copy middleware chain - prefix: l.prefix, // Copy message prefix - indent: l.indent, // Copy indentation level - stackBufferSize: l.stackBufferSize, // Copy stack trace buffer size - separator: l.separator, // Default separator ("/") + enabled: l.enabled, // Copy enablement state + level: l.level, // Copy log level + atomicLevel: l.atomicLevel, // Copy atomic level + namespaces: l.namespaces, // Share namespace store + currentPath: l.currentPath, // Copy namespace path + context: nil, // Fresh context map (nil saves allocation, handled by AddContext) + style: l.style, // Copy namespace style + handler: l.handler, // Copy output handler + middleware: l.middleware, // Copy middleware chain + prefix: l.prefix, // Copy message prefix + indent: l.indent, // Copy indentation level + stackBufferSize: l.stackBufferSize, // Copy stack trace buffer size + separator: l.separator, // Default separator ("/") suspend: l.suspend, } } @@ -171,14 +230,14 @@ func (l *Logger) Clone() *Logger { func (l *Logger) Context(fields map[string]interface{}) *Logger { l.mu.Lock() defer l.mu.Unlock() - // Create a new logger with inherited configuration newLogger := &Logger{ enabled: l.enabled, level: l.level, + atomicLevel: l.atomicLevel, namespaces: l.namespaces, currentPath: l.currentPath, - context: make(map[string]interface{}), + context: make(lx.Fields, 0, len(l.context)+len(fields)), style: l.style, handler: l.handler, middleware: l.middleware, @@ -187,37 +246,18 @@ func (l *Logger) Context(fields map[string]interface{}) *Logger { stackBufferSize: l.stackBufferSize, separator: l.separator, suspend: l.suspend, + fatalExits: l.fatalExits, + fatalStack: l.fatalStack, } - - // Copy parent's context fields - for k, v := range l.context { - newLogger.context[k] = v - } - - // Add new fields + // Copy parent's context fields (in order) + newLogger.context = append(newLogger.context, l.context...) + // Add new fields from map for k, v := range fields { - newLogger.context[k] = v + newLogger.context = append(newLogger.context, lx.Field{Key: k, Value: v}) } - return newLogger } -// Dbg logs debug information, including the source file, line number, and expression -// value, capturing the calling line of code. It is useful for debugging without temporary -// print statements. -// Example: -// -// x := 42 -// logger.Dbg(x) // Output: [file.go:123] x = 42 -func (l *Logger) Dbg(values ...interface{}) { - // Skip logging if Info level is not enabled - if !l.shouldLog(lx.LevelInfo) { - return - } - - l.dbg(2, values...) -} - // Debug logs a message at Debug level, formatting it and delegating to the internal // log method. It is thread-safe. // Example: @@ -225,16 +265,13 @@ func (l *Logger) Dbg(values ...interface{}) { // logger := New("app").Enable().Level(lx.LevelDebug) // logger.Debug("Debugging") // Output: [app] DEBUG: Debugging func (l *Logger) Debug(args ...any) { - // check if suspended if l.suspend.Load() { return } - // Skip logging if Debug level is not enabled if !l.shouldLog(lx.LevelDebug) { return } - l.log(lx.LevelDebug, lx.ClassText, cat.Space(args...), nil, false) } @@ -248,7 +285,6 @@ func (l *Logger) Debugf(format string, args ...any) { if l.suspend.Load() { return } - l.Debug(fmt.Sprintf(format, args...)) } @@ -257,11 +293,9 @@ func (l *Logger) Debugf(format string, args ...any) { // Example: // // logger := New("app").Enable().Disable() -// logger.Info("Ignored") // No output +// logger.Info("Ignored") // Inactive output func (l *Logger) Disable() *Logger { - l.mu.Lock() - defer l.mu.Unlock() - l.enabled = false + l.enabled.Store(lx.Inactive) return l } @@ -272,13 +306,16 @@ func (l *Logger) Disable() *Logger { // type Data struct { X int; Y string } // logger.Dump(Data{42, "test"}) // Outputs hex/ASCII dump func (l *Logger) Dump(values ...interface{}) { + if l.suspend.Load() { + return + } + // Iterate over each value to dump for _, value := range values { // Log value description and type l.Infof("Dumping %v (%T)", value, value) var by []byte var err error - // Convert value to byte slice based on type switch v := value.(type) { case []byte: @@ -310,13 +347,11 @@ func (l *Logger) Dump(values ...interface{}) { // Fallback to JSON marshaling for complex types by, err = json.Marshal(v) } - // Log error if conversion fails if err != nil { l.Errorf("Dump error: %v", err) continue } - // Generate hex/ASCII dump n := len(by) rowcount := 0 @@ -333,7 +368,6 @@ func (l *Logger) Dump(values ...interface{}) { } // Write position and hex prefix s.WriteString(fmt.Sprintf("pos %02d hex: ", i)) - // Write hex values for j := 0; j < rowcount; j++ { s.WriteString(fmt.Sprintf("%02x ", by[i+j])) @@ -354,14 +388,22 @@ func (l *Logger) Dump(values ...interface{}) { // Each value is logged on its own line with [file:line] and a blank line after the header. // Ideal for inspecting outgoing/incoming REST payloads. func (l *Logger) Output(values ...interface{}) { + if l.suspend.Load() { + return + } + l.output(2, values...) } +// mark logs the caller's file and line number along with an optional custom name label for tracing execution flow. func (l *Logger) output(skip int, values ...interface{}) { - if !l.shouldLog(lx.LevelInfo) { + if l.suspend.Load() { return } + if !l.shouldLog(lx.LevelInfo) { + return + } _, file, line, ok := runtime.Caller(skip) if !ok { return @@ -370,9 +412,7 @@ func (l *Logger) output(skip int, values ...interface{}) { if idx := strings.LastIndex(file, "/"); idx >= 0 { shortFile = file[idx+1:] } - header := fmt.Sprintf("[%s:%d] JSON:\n", shortFile, line) - for _, v := range values { // Always pretty-print with indent b, err := json.MarshalIndent(v, " ", " ") @@ -382,26 +422,29 @@ func (l *Logger) output(skip int, values ...interface{}) { "error": err.Error(), }, " ", " ") } - l.log(lx.LevelInfo, lx.ClassText, header+string(b), nil, false) + l.log(lx.LevelInfo, lx.ClassOutput, header+string(b), nil, false) } } // Inspect logs one or more values in a **developer-friendly, deeply introspective format** at Info level. -// It includes the caller file and line number, and reveals **all fields** — including: +// It includes the caller file and line number, and reveals **all fields** â including: // -// - Private (unexported) fields → prefixed with `(field)` +// - Private (unexported) fields â prefixed with `(field)` // - Embedded structs (inlined) -// - Pointers and nil values → shown as `*(field)` or `nil` +// - Pointers and nil values â shown as `*(field)` or `nil` // - Full struct nesting and type information // // This method uses `NewInspector` under the hood, which performs **full reflection-based traversal**. -// It is **not** meant for production logging or REST APIs — use `Output` for that. +// It is **not** meant for production logging or REST APIs â use `Output` for that. // // Ideal for: // - Debugging complex internal state // - Inspecting structs with private fields // - Understanding struct embedding and pointer behavior func (l *Logger) Inspect(values ...interface{}) { + if l.suspend.Load() { + return + } o := NewInspector(l) o.Log(2, values...) } @@ -413,9 +456,7 @@ func (l *Logger) Inspect(values ...interface{}) { // logger := New("app").Enable() // logger.Info("Started") // Output: [app] INFO: Started func (l *Logger) Enable() *Logger { - l.mu.Lock() - defer l.mu.Unlock() - l.enabled = true + l.enabled.Store(lx.Active) return l } @@ -427,12 +468,10 @@ func (l *Logger) Enable() *Logger { // logger.Info("Logging is enabled") // Output: [app] INFO: Logging is enabled // } func (l *Logger) Enabled() bool { - l.mu.RLock() - defer l.mu.RUnlock() - return l.enabled + return l.enabled.Load() == lx.Active } -// Err adds one or more errors to the logger’s context and logs them at Error level. +// Err adds one or more errors to the loggerâ s context and logs them at Error level. // Non-nil errors are stored in the "error" context field (single error or slice) and // logged as a concatenated string (e.g., "failed 1; failed 2"). It is thread-safe and // returns the logger for chaining. @@ -445,18 +484,20 @@ func (l *Logger) Enabled() bool { // // Output: [app] ERROR: failed 1; failed 2 // // [app] INFO: Error occurred [error=[failed 1 failed 2]] func (l *Logger) Err(errs ...error) { + if l.suspend.Load() { + return + } + // Skip logging if Error level is not enabled if !l.shouldLog(lx.LevelError) { return } - l.mu.Lock() - - // Initialize context map if nil + defer l.mu.Unlock() + // Initialize context slice if nil if l.context == nil { - l.context = make(map[string]interface{}) + l.context = make(lx.Fields, 0, 4) } - // Collect non-nil errors and build log message var nonNilErrors []error var builder strings.Builder @@ -471,19 +512,17 @@ func (l *Logger) Err(errs ...error) { count++ } } - if count > 0 { if count == 1 { // Store single error directly - l.context["error"] = nonNilErrors[0] + l.context = append(l.context, lx.Field{Key: "error", Value: nonNilErrors[0]}) } else { // Store slice of errors - l.context["error"] = nonNilErrors + l.context = append(l.context, lx.Field{Key: "error", Value: nonNilErrors}) } // Log concatenated error messages l.log(lx.LevelError, lx.ClassText, builder.String(), nil, false) } - l.mu.Unlock() } // Error logs a message at Error level, formatting it and delegating to the internal @@ -493,11 +532,9 @@ func (l *Logger) Err(errs ...error) { // logger := New("app").Enable() // logger.Error("Error occurred") // Output: [app] ERROR: Error occurred func (l *Logger) Error(args ...any) { - // check if suspended if l.suspend.Load() { return } - // Skip logging if Error level is not enabled if !l.shouldLog(lx.LevelError) { return @@ -515,7 +552,6 @@ func (l *Logger) Errorf(format string, args ...any) { if l.suspend.Load() { return } - l.Error(fmt.Errorf(format, args...)) } @@ -526,18 +562,16 @@ func (l *Logger) Errorf(format string, args ...any) { // logger := New("app").Enable() // logger.Fatal("Fatal error") // Output: [app] ERROR: Fatal error [stack=...], then exits func (l *Logger) Fatal(args ...any) { - // check if suspended if l.suspend.Load() { return } - - // Exit immediately if Error level is not enabled if !l.shouldLog(lx.LevelError) { os.Exit(1) } - - l.log(lx.LevelError, lx.ClassText, cat.Space(args...), nil, false) - os.Exit(1) + l.log(lx.LevelFatal, lx.ClassText, cat.Space(args...), nil, l.fatalStack) + if l.fatalExits { + os.Exit(1) + } } // Fatalf logs a formatted message at Error level with a stack trace and exits the program. @@ -547,61 +581,81 @@ func (l *Logger) Fatal(args ...any) { // logger := New("app").Enable() // logger.Fatalf("Fatal %s", "error") // Output: [app] ERROR: Fatal error [stack=...], then exits func (l *Logger) Fatalf(format string, args ...any) { - // check if suspended if l.suspend.Load() { return } - l.Fatal(fmt.Sprintf(format, args...)) } +// FieldOne logs a message at Error level with a stack trace and exits the program with +// exit code 1. It is thread-safe. +func (l *Logger) FieldOne(key string, value any) *FieldBuilder { + fb := fieldBuilderPool.Get().(*FieldBuilder) + fb.logger = l + fb.fields = fb.fields[:1] + fb.fields[0] = lx.Field{Key: key, Value: value} + return fb +} + +// FieldSet avoids variadic allocation overhead by accepting a slice of strongly typed fields. +// Ideally, lx.Field is struct{Key string, Value any} +func (l *Logger) FieldSet(fields []lx.Field) *FieldBuilder { + fb := getFieldBuilder(l, len(fields)) + if l.suspend.Load() { + return fb + } + fb.fields = append(fb.fields, fields...) + return fb +} + // Field starts a fluent chain for adding fields from a map, creating a FieldBuilder -// for type-safe field addition. It is thread-safe via the FieldBuilder’s logger. +// for type-safe field addition. It is thread-safe via the FieldBuilderâ s logger. // Example: // // logger := New("app").Enable() // logger.Field(map[string]interface{}{"user": "alice"}).Info("Action") // Output: [app] INFO: Action [user=alice] +// +// Field starts a fluent chain for adding fields from a map func (l *Logger) Field(fields map[string]interface{}) *FieldBuilder { - fb := &FieldBuilder{logger: l, fields: make(map[string]interface{})} - - // check if suspended + fb := getFieldBuilder(l, len(fields)) if l.suspend.Load() { return fb } - - // Copy fields from input map to FieldBuilder for k, v := range fields { - fb.fields[k] = v + fb.fields = append(fb.fields, lx.Field{Key: k, Value: v}) } return fb } -// Fields starts a fluent chain for adding fields using variadic key-value pairs, -// creating a FieldBuilder. Non-string keys or uneven pairs add an error field. It is -// thread-safe via the FieldBuilder’s logger. +// Fields starts a fluent chain for adding fields using variadic key-value pairs. +// It creates a FieldBuilder to attach fields, handling non-string keys or uneven pairs by +// adding an error field. Thread-safe via the FieldBuilder's logger. // Example: // -// logger := New("app").Enable() // logger.Fields("user", "alice").Info("Action") // Output: [app] INFO: Action [user=alice] func (l *Logger) Fields(pairs ...any) *FieldBuilder { - fb := &FieldBuilder{logger: l, fields: make(map[string]interface{})} - + fb := getFieldBuilder(l, len(pairs)/2) if l.suspend.Load() { return fb } - // Process key-value pairs for i := 0; i < len(pairs)-1; i += 2 { if key, ok := pairs[i].(string); ok { - fb.fields[key] = pairs[i+1] + fb.fields = append(fb.fields, lx.Field{Key: key, Value: pairs[i+1]}) } else { // Log error for non-string keys - fb.fields["error"] = fmt.Errorf("non-string key in Fields: %v", pairs[i]) + fb.fields = append(fb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("non-string key in Fields: %v", pairs[i]), + }) } } // Log error for uneven pairs if len(pairs)%2 != 0 { - fb.fields["error"] = fmt.Errorf("uneven key-value pairs in Fields: [%v]", pairs[len(pairs)-1]) + fb.fields = append(fb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("uneven key-value pairs in Fields: [%v]", pairs[len(pairs)-1]), + }) } return fb } @@ -615,7 +669,12 @@ func (l *Logger) Fields(pairs ...any) *FieldBuilder { func (l *Logger) GetContext() map[string]interface{} { l.mu.RLock() defer l.mu.RUnlock() - return l.context + // Convert slice to map for backward compatibility + contextMap := make(map[string]interface{}, len(l.context)) + for _, pair := range l.context { + contextMap[pair.Key] = pair.Value + } + return contextMap } // GetHandler returns the logger's current handler for customization or inspection. @@ -715,11 +774,9 @@ func (l *Logger) Info(args ...any) { if l.suspend.Load() { return } - if !l.shouldLog(lx.LevelInfo) { return } - l.log(lx.LevelInfo, lx.ClassText, cat.Space(args...), nil, false) } @@ -732,7 +789,6 @@ func (l *Logger) Infof(format string, args ...any) { if l.suspend.Load() { return } - l.Info(fmt.Sprintf(format, args...)) } @@ -747,17 +803,40 @@ func (l *Logger) Len() int64 { return l.entries.Load() } +// Labels temporarily attaches one or more label names to the logger for the next log entry. +// Labels are typically used for metrics, benchmarking, tracing, or categorizing logs in a structured way. +// +// The labels are stored atomically and intended to be short-lived, applying only to the next +// log operation (or until overwritten by a subsequent call to Labels). Multiple labels can +// be provided as separate string arguments. +// +// Example usage: +// +// logger := New("app").Enable() +// +// // Add labels for a specific operation +// logger.Labels("load_users", "process_orders").Measure(func() { +// // ... perform work ... +// }, func() { +// // ... optional callback ... +// }) +func (l *Logger) Labels(names ...string) *Logger { + l.labels.Store(&names) // store temporarily + return l +} + // Level sets the minimum log level, ignoring messages below it. It is thread-safe using // a write lock and returns the logger for chaining. // Example: // // logger := New("app").Enable().Level(lx.LevelWarn) -// logger.Info("Ignored") // No output +// logger.Info("Ignored") // Inactive output // logger.Warn("Logged") // Output: [app] WARN: Logged func (l *Logger) Level(level lx.LevelType) *Logger { l.mu.Lock() defer l.mu.Unlock() l.level = level + atomic.StoreInt32(&l.atomicLevel, int32(level)) return l } @@ -792,66 +871,43 @@ func (l *Logger) Line(lines ...int) *Logger { // // logger.Mark() // *MARK*: [file.go:123] func (l *Logger) Mark(name ...string) { + if l.suspend.Load() { + return + } l.mark(2, name...) } +// mark logs the caller's file and line number along with an optional custom name label for tracing execution flow. func (l *Logger) mark(skip int, names ...string) { + if l.suspend.Load() { + return + } // Skip logging if Info level is not enabled if !l.shouldLog(lx.LevelInfo) { return } - // Get caller information (file, line) _, file, line, ok := runtime.Caller(skip) if !ok { l.log(lx.LevelError, lx.ClassText, "Mark: Unable to parse runtime caller", nil, false) return } - // Extract just the filename (without full path) shortFile := file if idx := strings.LastIndex(file, "/"); idx >= 0 { shortFile = file[idx+1:] } - name := strings.Join(names, l.separator) if name == "" { name = "MARK" } - // Format as [filename:line] out := fmt.Sprintf("[*%s*]: [%s:%d]\n", name, shortFile, line) l.log(lx.LevelInfo, lx.ClassRaw, out, nil, false) } -// Measure benchmarks function execution, logging the duration at Info level with a -// "duration" field. It is thread-safe via Fields and log methods. -// Example: -// -// logger := New("app").Enable() -// duration := logger.Measure(func() { time.Sleep(time.Millisecond) }) -// // Output: [app] INFO: function executed [duration=~1ms] -func (l *Logger) Measure(fns ...func()) time.Duration { - start := time.Now() - - for _, fn := range fns { - if fn != nil { - fn() - } - } - - duration := time.Since(start) - l.Fields( - "duration_ns", duration.Nanoseconds(), - "duration", duration.String(), - "duration_ms", fmt.Sprintf("%.3fms", float64(duration.Nanoseconds())/1e6), - ).Infof("execution completed") - - return duration -} - // Namespace creates a child logger with a sub-namespace appended to the current path, -// inheriting the parent’s configuration but with an independent context. It is thread-safe +// inheriting the parentâ s configuration but with an independent context. It is thread-safe // using a read lock. // Example: // @@ -862,23 +918,21 @@ func (l *Logger) Namespace(name string) *Logger { if l.suspend.Load() { return l } - l.mu.RLock() defer l.mu.RUnlock() - // Construct full namespace path fullPath := name if l.currentPath != "" { fullPath = l.currentPath + l.separator + name } - // Create child logger with inherited configuration return &Logger{ enabled: l.enabled, level: l.level, + atomicLevel: l.atomicLevel, namespaces: l.namespaces, currentPath: fullPath, - context: make(map[string]interface{}), + context: nil, // Fresh context map (nil saves allocation) style: l.style, handler: l.handler, middleware: l.middleware, @@ -891,24 +945,23 @@ func (l *Logger) Namespace(name string) *Logger { } // NamespaceDisable disables logging for a namespace and its children, invalidating the -// namespace cache. It is thread-safe via lx.Namespace’s sync.Map and returns the logger +// namespace cache. It is thread-safe via lx.Namespaceâ s sync.Map and returns the logger // for chaining. // Example: // // logger := New("parent").Enable().NamespaceDisable("parent/child") -// logger.Namespace("child").Info("Ignored") // No output +// logger.Namespace("child").Info("Ignored") // Inactive output func (l *Logger) NamespaceDisable(relativePath string) *Logger { l.mu.RLock() fullPath := l.joinPath(l.currentPath, relativePath) l.mu.RUnlock() - // Disable namespace in shared store l.namespaces.Set(fullPath, false) return l } // NamespaceEnable enables logging for a namespace and its children, invalidating the -// namespace cache. It is thread-safe via lx.Namespace’s sync.Map and returns the logger +// namespace cache. It is thread-safe via lx.Namespaceâ s sync.Map and returns the logger // for chaining. // Example: // @@ -918,7 +971,6 @@ func (l *Logger) NamespaceEnable(relativePath string) *Logger { l.mu.RLock() fullPath := l.joinPath(l.currentPath, relativePath) l.mu.RUnlock() - // Enable namespace in shared store l.namespaces.Set(fullPath, true) return l @@ -931,20 +983,15 @@ func (l *Logger) NamespaceEnable(relativePath string) *Logger { // logger := New("parent").Enable().NamespaceDisable("parent/child") // enabled := logger.NamespaceEnabled("parent/child") // false func (l *Logger) NamespaceEnabled(relativePath string) bool { - l.mu.RLock() fullPath := l.joinPath(l.currentPath, relativePath) separator := l.separator if separator == "" { separator = lx.Slash } - instanceEnabled := l.enabled - l.mu.RUnlock() - // Handle root path case if fullPath == "" && relativePath == "" { - return instanceEnabled + return l.enabled.Load() == lx.Active } - if fullPath != "" { // Check namespace rules isEnabledByNSRule, isDisabledByNSRule := l.namespaces.Enabled(fullPath, separator) @@ -956,7 +1003,7 @@ func (l *Logger) NamespaceEnabled(relativePath string) bool { } } // Fall back to logger's enabled state - return instanceEnabled + return l.enabled.Load() == lx.Active } // Panic logs a message at Error level with a stack trace and triggers a panic. It is @@ -968,17 +1015,14 @@ func (l *Logger) NamespaceEnabled(relativePath string) bool { func (l *Logger) Panic(args ...any) { // Build message by concatenating arguments with spaces msg := cat.Space(args...) - if l.suspend.Load() { panic(msg) } - // Panic immediately if Error level is not enabled if !l.shouldLog(lx.LevelError) { panic(msg) } - - l.log(lx.LevelError, lx.ClassText, msg, nil, true) + l.log(lx.LevelFatal, lx.ClassText, msg, nil, true) panic(msg) } @@ -1015,7 +1059,6 @@ func (l *Logger) Print(args ...any) { if l.suspend.Load() { return } - // Skip logging if Info level is not enabled if !l.shouldLog(lx.LevelInfo) { return @@ -1033,7 +1076,6 @@ func (l *Logger) Println(args ...any) { if l.suspend.Load() { return } - // Skip logging if Info level is not enabled if !l.shouldLog(lx.LevelInfo) { return @@ -1050,12 +1092,11 @@ func (l *Logger) Printf(format string, args ...any) { if l.suspend.Load() { return } - l.Print(fmt.Sprintf(format, args...)) } // Remove removes middleware by the reference returned from Use, delegating to the -// Middleware’s Remove method for thread-safe removal. +// Middlewareâ s Remove method for thread-safe removal. // Example: // // logger := New("app").Enable() @@ -1098,7 +1139,7 @@ func (l *Logger) Separator(separator string) *Logger { // // logger := New("app").Enable() // logger.Suspend() -// logger.Info("Ignored") // No output +// logger.Info("Ignored") // Inactive output func (l *Logger) Suspend() *Logger { l.suspend.Store(true) return l @@ -1123,17 +1164,8 @@ func (l *Logger) Suspended() bool { // logger := New("app").Enable() // logger.Stack("Critical error") // Output: [app] ERROR: Critical error [stack=...] func (l *Logger) Stack(args ...any) { - if l.suspend.Load() { - return - } - - // Skip logging if Debug level is not enabled - if !l.shouldLog(lx.LevelDebug) { - return - } - for _, arg := range args { - l.log(lx.LevelError, lx.ClassText, cat.Concat(arg), nil, true) + l.log(lx.LevelError, lx.ClassStack, cat.Concat(arg), nil, true) } } @@ -1144,10 +1176,6 @@ func (l *Logger) Stack(args ...any) { // logger := New("app").Enable() // logger.Stackf("Critical %s", "error") // Output: [app] ERROR: Critical error [stack=...] func (l *Logger) Stackf(format string, args ...any) { - if l.suspend.Load() { - return - } - l.Stack(fmt.Sprintf(format, args...)) } @@ -1167,12 +1195,12 @@ func (l *Logger) StackSize(size int) *Logger { } // Style sets the namespace formatting style (FlatPath or NestedPath). FlatPath uses -// [parent/child], while NestedPath uses [parent]→[child]. It is thread-safe using a write +// [parent/child], while NestedPath uses [parent]â [child]. It is thread-safe using a write // lock and returns the logger for chaining. // Example: // // logger := New("parent/child").Enable().Style(lx.NestedPath) -// logger.Info("Log") // Output: [parent]→[child]: INFO: Log +// logger.Info("Log") // Output: [parent]â [child]: INFO: Log func (l *Logger) Style(style lx.StyleType) *Logger { l.mu.Lock() defer l.mu.Unlock() @@ -1191,13 +1219,22 @@ func (l *Logger) Style(style lx.StyleType) *Logger { func (l *Logger) Timestamped(enable bool, format ...string) *Logger { l.mu.Lock() defer l.mu.Unlock() - if h, ok := l.handler.(lx.Timestamper); ok { h.Timestamped(enable, format...) } return l } +// Toggle enables or disables the logger based on the provided boolean value and returns the updated logger instance. +func (l *Logger) Toggle(v bool) *Logger { + if v { + l.Resume() + return l.Enable() + } + l.Suspend() + return l.Disable() +} + // Use adds a middleware function to process log entries before they are handled, returning // a Middleware handle for removal. Middleware returning a non-nil error stops the log. // It is thread-safe using a write lock. @@ -1210,18 +1247,16 @@ func (l *Logger) Timestamped(enable bool, format ...string) *Logger { // } // return nil // })) -// logger.Info("Ignored") // No output +// logger.Info("Ignored") // Inactive output // mw.Remove() // logger.Info("Now logged") // Output: [app] INFO: Now logged func (l *Logger) Use(fn lx.Handler) *Middleware { l.mu.Lock() defer l.mu.Unlock() - // Assign a unique ID to the middleware id := len(l.middleware) + 1 // Append middleware to the chain l.middleware = append(l.middleware, Middleware{id: id, fn: fn}) - return &Middleware{ logger: l, id: id, @@ -1238,12 +1273,10 @@ func (l *Logger) Warn(args ...any) { if l.suspend.Load() { return } - // Skip logging if Warn level is not enabled if !l.shouldLog(lx.LevelWarn) { return } - l.log(lx.LevelWarn, lx.ClassText, cat.Space(args...), nil, false) } @@ -1256,62 +1289,9 @@ func (l *Logger) Warnf(format string, args ...any) { if l.suspend.Load() { return } - l.Warn(fmt.Sprintf(format, args...)) } -// dbg is an internal helper for Dbg, logging debug information with source file and line -// number, extracting the calling line of code. It is thread-safe via the log method. -// Example (internal usage): -// -// logger.Dbg(x) // Calls dbg(2, x) -func (l *Logger) dbg(skip int, values ...interface{}) { - for _, exp := range values { - // Get caller information (file, line) - _, file, line, ok := runtime.Caller(skip) - if !ok { - l.log(lx.LevelError, lx.ClassText, "Dbg: Unable to parse runtime caller", nil, false) - return - } - - // Open source file - f, err := os.Open(file) - if err != nil { - l.log(lx.LevelError, lx.ClassText, "Dbg: Unable to open expected file", nil, false) - return - } - - // Scan file to find the line - scanner := bufio.NewScanner(f) - scanner.Split(bufio.ScanLines) - var out string - i := 1 - for scanner.Scan() { - if i == line { - // Extract expression between parentheses - v := scanner.Text()[strings.Index(scanner.Text(), "(")+1 : len(scanner.Text())-strings.Index(reverseString(scanner.Text()), ")")-1] - // Format output with file, line, expression, and value - out = fmt.Sprintf("[%s:%d] %s = %+v", file[len(file)-strings.Index(reverseString(file), "/"):], line, v, exp) - break - } - i++ - } - if err := scanner.Err(); err != nil { - l.log(lx.LevelError, lx.ClassText, err.Error(), nil, false) - return - } - // Log based on value type - switch exp.(type) { - case error: - l.log(lx.LevelError, lx.ClassText, out, nil, false) - default: - l.log(lx.LevelInfo, lx.ClassText, out, nil, false) - } - - f.Close() - } -} - // joinPath joins a base path and a relative path using the logger's separator, handling // empty base or relative paths. It is used internally for namespace path construction. // Example (internal usage): @@ -1328,7 +1308,7 @@ func (l *Logger) joinPath(base, relative string) string { if separator == "" { separator = lx.Slash // Default separator } - return base + separator + relative + return cat.Concat(base, separator, relative) } // log is the internal method for processing a log entry, applying rate limiting, sampling, @@ -1339,76 +1319,114 @@ func (l *Logger) joinPath(base, relative string) string { // // logger := New("app").Enable() // logger.Info("Test") // Calls log(lx.LevelInfo, "Test", nil, false) -func (l *Logger) log(level lx.LevelType, class lx.ClassType, msg string, fields map[string]interface{}, withStack bool) { - // Skip logging if level is not enabled +// +// log is the internal method for processing a log entry, applying rate limiting, sampling, +// middleware, and context before passing to the handler. Middleware returning a non-nil +// error stops the log. It is thread-safe with read/write locks for configuration and stack +// trace buffer. +func (l *Logger) log(level lx.LevelType, class lx.ClassType, msg string, fields lx.Fields, withStack bool) { + // Skip logging if level is not enabled (fast path) if !l.shouldLog(level) { return } var stack []byte - - // Capture stack trace if requested + // Capture stack trace if requested (outside lock) if withStack { l.mu.RLock() - buf := make([]byte, l.stackBufferSize) + size := l.stackBufferSize l.mu.RUnlock() - n := runtime.Stack(buf, false) - if fields == nil { - fields = make(map[string]interface{}) + + buf := stackBufPool.Get().([]byte) + if cap(buf) < size { + buf = make([]byte, size) + } else { + buf = buf[:size] } - stack = buf[:n] + n := runtime.Stack(buf, false) + stack = append([]byte(nil), buf[:n]...) + stackBufPool.Put(buf) } + // Read-only config snapshot (minimal lock scope) l.mu.RLock() - defer l.mu.RUnlock() + handler := l.handler + prefix := l.prefix + indent := l.indent + context := l.context + style := l.style + currentPath := l.currentPath + middleware := l.middleware + l.mu.RUnlock() - // Apply prefix and indentation to the message + // Apply prefix and indentation to the message (outside lock) var builder strings.Builder - if l.indent > 0 { - builder.WriteString(strings.Repeat(lx.DoubleSpace, l.indent)) + // Optimization: Pre-grow buffer if indent/prefix known + if indent > 0 { + builder.Grow(indent*2 + len(prefix) + len(msg)) + builder.WriteString(strings.Repeat(lx.DoubleSpace, indent)) + } else { + builder.Grow(len(prefix) + len(msg)) } - if l.prefix != "" { - builder.WriteString(l.prefix) + + if prefix != "" { + builder.WriteString(prefix) } builder.WriteString(msg) finalMsg := builder.String() - // Create log entry - entry := &lx.Entry{ - Timestamp: time.Now(), - Level: level, - Message: finalMsg, - Namespace: l.currentPath, - Fields: fields, - Style: l.style, - Class: class, - Stack: stack, - } - - // Merge context fields, avoiding overwrites - if len(l.context) > 0 { - if entry.Fields == nil { - entry.Fields = make(map[string]interface{}) - } - for k, v := range l.context { - if _, exists := entry.Fields[k]; !exists { - entry.Fields[k] = v - } - } - } + // Optimized field merging - avoid allocation when possible + var combinedFields lx.Fields + var pooledFields *lx.Fields // Track if we allocated from pool + + if len(context) == 0 { + combinedFields = fields + } else if len(fields) == 0 { + combinedFields = context + } else { + // Get pooled slice + pooledFields = fieldsSlicePool.Get().(*lx.Fields) + combinedFields = (*pooledFields)[:0] // Reset length, keep capacity + combinedFields = append(combinedFields, context...) + combinedFields = append(combinedFields, fields...) + } + + // Get entry from pool + entry := entryPool.Get().(*lx.Entry) + + // Ensure pool return on ALL paths (including middleware errors) + defer func() { + // Reset slices to zero length but keep capacity for pool reuse + entry.Fields = entry.Fields[:0] + entry.Stack = entry.Stack[:0] + entryPool.Put(entry) + }() + + entry.Timestamp = time.Now() + entry.Level = level + entry.Message = finalMsg + entry.Namespace = currentPath + entry.Fields = combinedFields + entry.Style = style + entry.Class = class + entry.Stack = stack + entry.Error = nil + entry.Id = 0 // Apply middleware, stopping if any returns an error - for _, mw := range l.middleware { + for _, mw := range middleware { if err := mw.fn.Handle(entry); err != nil { + // Defer handles pool return return } } // Pass to handler if set - if l.handler != nil { - _ = l.handler.Handle(entry) + if handler != nil { + _ = handler.Handle(entry) l.entries.Add(1) } + // Defer handles pool return } // shouldLog determines if a log should be emitted based on enabled state, level, namespaces, @@ -1426,23 +1444,16 @@ func (l *Logger) shouldLog(level lx.LevelType) bool { return false } - // check for suspend mode - if l.suspend.Load() { - return false - } - - // Skip if log level is below minimum - if level > l.level { + // Atomic fast path: read level without lock + if level > lx.LevelType(atomic.LoadInt32(&l.atomicLevel)) { return false } - - separator := l.separator - if separator == "" { - separator = lx.Slash - } - - // Check namespace rules if path is set + // Check namespace rules if path is set (minimal lock scope) if l.currentPath != "" { + separator := l.separator + if separator == "" { + separator = lx.Slash + } isEnabledByNSRule, isDisabledByNSRule := l.namespaces.Enabled(l.currentPath, separator) if isDisabledByNSRule { return false @@ -1451,62 +1462,5 @@ func (l *Logger) shouldLog(level lx.LevelType) bool { return true } } - - // Fall back to logger's enabled state - if !l.enabled { - return false - } - - return true -} - -// WithHandler sets the handler for the logger as a functional option for configuring -// a new logger instance. -// Example: -// -// logger := New("app", WithHandler(lh.NewJSONHandler(os.Stdout))) -func WithHandler(handler lx.Handler) Option { - return func(l *Logger) { - l.handler = handler - } -} - -// WithTimestamped returns an Option that configures timestamp settings for the logger's existing handler. -// It enables or disables timestamp logging and optionally sets the timestamp format if the handler -// supports the lx.Timestamper interface. If no handler is set, the function has no effect. -// Parameters: -// -// enable: Boolean to enable or disable timestamp logging -// format: Optional string(s) to specify the timestamp format -func WithTimestamped(enable bool, format ...string) Option { - return func(l *Logger) { - if l.handler != nil { // Check if a handler is set - // Verify if the handler supports the lx.Timestamper interface - if h, ok := l.handler.(lx.Timestamper); ok { - h.Timestamped(enable, format...) // Apply timestamp settings to the handler - } - } - } -} - -// WithLevel sets the minimum log level for the logger as a functional option for -// configuring a new logger instance. -// Example: -// -// logger := New("app", WithLevel(lx.LevelWarn)) -func WithLevel(level lx.LevelType) Option { - return func(l *Logger) { - l.level = level - } -} - -// WithStyle sets the namespace formatting style for the logger as a functional option -// for configuring a new logger instance. -// Example: -// -// logger := New("app", WithStyle(lx.NestedPath)) -func WithStyle(style lx.StyleType) Option { - return func(l *Logger) { - l.style = style - } + return l.enabled.Load() == lx.Active } diff --git a/vendor/github.com/olekukonko/ll/lx/field.go b/vendor/github.com/olekukonko/ll/lx/field.go new file mode 100644 index 0000000000..f662201c01 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lx/field.go @@ -0,0 +1,140 @@ +package lx + +import ( + "fmt" + "strings" +) + +// Field represents a key-value pair where the key is a string and the value is of any type. +type Field struct { + Key string + Value interface{} +} + +// Fields represents a slice of key-value pairs. +type Fields []Field + +// Map converts the Fields slice to a map[string]interface{}. +// This is useful for backward compatibility or when map operations are needed. +// Example: +// +// fields := lx.Fields{{"user", "alice"}, {"age", 30}} +// m := fields.Map() // Returns map[string]interface{}{"user": "alice", "age": 30} +func (f Fields) Map() map[string]interface{} { + m := make(map[string]interface{}, len(f)) + for _, pair := range f { + m[pair.Key] = pair.Value + } + return m +} + +// Get returns the value for a given key and a boolean indicating if the key was found. +// This provides O(n) lookup, which is fine for small numbers of fields. +// Example: +// +// fields := lx.Fields{{"user", "alice"}, {"age", 30}} +// value, found := fields.Get("user") // Returns "alice", true +func (f Fields) Get(key string) (interface{}, bool) { + for _, pair := range f { + if pair.Key == key { + return pair.Value, true + } + } + return nil, false +} + +// Filter returns a new Fields slice containing only pairs where the predicate returns true. +// Example: +// +// fields := lx.Fields{{"user", "alice"}, {"password", "secret"}, {"age", 30}} +// filtered := fields.Filter(func(key string, value interface{}) bool { +// return key != "password" // Remove sensitive fields +// }) +func (f Fields) Filter(predicate func(key string, value interface{}) bool) Fields { + result := make(Fields, 0, len(f)) + for _, pair := range f { + if predicate(pair.Key, pair.Value) { + result = append(result, pair) + } + } + return result +} + +// Translate returns a new Fields slice with keys translated according to the provided mapping. +// Keys not in the mapping are passed through unchanged. This is useful for adapters like Victoria. +// Example: +// +// fields := lx.Fields{{"user", "alice"}, {"timestamp", time.Now()}} +// translated := fields.Translate(map[string]string{ +// "user": "username", +// "timestamp": "ts", +// }) +// // Returns: {{"username", "alice"}, {"ts", time.Now()}} +func (f Fields) Translate(mapping map[string]string) Fields { + result := make(Fields, len(f)) + for i, pair := range f { + if newKey, ok := mapping[pair.Key]; ok { + result[i] = Field{Key: newKey, Value: pair.Value} + } else { + result[i] = pair + } + } + return result +} + +// Merge merges another Fields slice into this one, with the other slice's fields taking precedence +// for duplicate keys (overwrites existing keys). +// Example: +// +// base := lx.Fields{{"user", "alice"}, {"age", 30}} +// additional := lx.Fields{{"age", 31}, {"city", "NYC"}} +// merged := base.Merge(additional) +// // Returns: {{"user", "alice"}, {"age", 31}, {"city", "NYC"}} +func (f Fields) Merge(other Fields) Fields { + result := make(Fields, 0, len(f)+len(other)) + + // Create a map to track which keys from 'other' we've seen + seen := make(map[string]bool, len(other)) + + // First add all fields from 'f' + result = append(result, f...) + + // Then add fields from 'other', overwriting duplicates + for _, pair := range other { + // Check if this key already exists in result + found := false + for i, existing := range result { + if existing.Key == pair.Key { + result[i] = pair // Overwrite + found = true + break + } + } + if !found { + result = append(result, pair) + } + seen[pair.Key] = true + } + + return result +} + +// String returns a human-readable string representation of the fields. +// Example: +// +// fields := lx.Fields{{"user", "alice"}, {"age", 30}} +// str := fields.String() // Returns: "[user=alice age=30]" +func (f Fields) String() string { + var builder strings.Builder + builder.WriteString(LeftBracket) + for i, pair := range f { + if i > 0 { + builder.WriteString(Space) + } + builder.WriteString(pair.Key) + builder.WriteString("=") + builder.WriteString(fmt.Sprint(pair.Value)) + } + builder.WriteString(RightBracket) + return builder.String() +} diff --git a/vendor/github.com/olekukonko/ll/lx/interface.go b/vendor/github.com/olekukonko/ll/lx/interface.go new file mode 100644 index 0000000000..6486b97226 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lx/interface.go @@ -0,0 +1,72 @@ +package lx + +import "io" + +// Handler defines the interface for processing log entries. +// Implementations (e.g., TextHandler, JSONHandler) format and output log entries to various +// destinations (e.g., stdout, files). The Handle method returns an error if processing fails, +// allowing the logger to handle output failures gracefully. +// Example (simplified handler implementation): +// +// type MyHandler struct{} +// func (h *MyHandler) Handle(e *Entry) error { +// fmt.Printf("[%s] %s: %s\n", e.Namespace, e.Level.String(), e.Message) +// return nil +// } +type Handler interface { + Handle(e *Entry) error // Processes a log entry, returning any error +} + +// Outputter defines the interface for handlers that support dynamic output +// destination changes. Implementations can switch their output writer at runtime. +// +// Example usage: +// +// h := &JSONHandler{} +// h.Output(os.Stderr) // Switch to stderr +// h.Output(file) // Switch to file +type Outputter interface { + Output(w io.Writer) +} + +// HandlerOutputter combines the Handler and Outputter interfaces. +// Types implementing this interface can both process log entries and +// dynamically change their output destination at runtime. +// +// This is useful for creating flexible logging handlers that support +// features like log rotation, output redirection, or runtime configuration. +// +// Example usage: +// +// var ho HandlerOutputter = &TextHandler{} +// // Handle log entries +// ho.Handle(&Entry{...}) +// // Switch output destination +// ho.Output(os.Stderr) +// +// Common implementations include TextHandler and JSONHandler when they +// support output destination changes. +type HandlerOutputter interface { + Handler // Can process log entries + Outputter // Can change output destination (has Output(w io.Writer) method) +} + +// Timestamper defines an interface for handlers that support timestamp configuration. +// It includes a method to enable or disable timestamp logging and optionally set the timestamp format. +type Timestamper interface { + // Timestamped enables or disables timestamp logging and allows specifying an optional format. + // Parameters: + // enable: Boolean to enable or disable timestamp logging + // format: Optional string(s) to specify the timestamp format + Timestamped(enable bool, format ...string) +} + +// Wrap is a handler decorator function that transforms a log handler. +// It takes an existing handler as input and returns a new, wrapped handler +// that adds functionality (like filtering, transformation, or routing). +type Wrap func(next Handler) Handler + +// Deduper defines how to calculate a deduplication key for an entry. +type Deduper interface { + Calculate(*Entry) uint64 +} diff --git a/vendor/github.com/olekukonko/ll/lx/lx.go b/vendor/github.com/olekukonko/ll/lx/lx.go index d370cb2d01..4e7f42ca28 100644 --- a/vendor/github.com/olekukonko/ll/lx/lx.go +++ b/vendor/github.com/olekukonko/ll/lx/lx.go @@ -1,10 +1,5 @@ package lx -import ( - "strings" - "time" -) - // Formatting constants for log output. // These constants define the characters used to format log messages, ensuring consistency // across handlers (e.g., text, JSON, colorized). They are used to construct namespace paths, @@ -16,7 +11,7 @@ const ( Arrow = "→" // Arrow for NestedPath style namespaces (e.g., [parent]→[child]) LeftBracket = "[" // Opening bracket for namespaces and fields (e.g., [app]) RightBracket = "]" // Closing bracket for namespaces and fields (e.g., [app]) - Colon = ":" // Separator after namespace or level (e.g., [app]: INFO:) + Colon = ":" // Separator after namespace or level (e.g., [app]: INFO:) can also be "|" Dot = "." // Separator for namespace paths (e.g., "parent.child") Newline = "\n" // Newline for separating log entries or stack trace lines ) @@ -25,7 +20,12 @@ const ( // It specifies whether logging is enabled by default for new Logger instances in the ll package. // Set to false to prevent logging until explicitly enabled. const ( - DefaultEnabled = false // Default state for new loggers (disabled) + DefaultEnabled = true // Default state for new loggers (disabled) + True = true + False = false + Active = 1 + Inactive = -1 + Unknown = 0 ) // Log level constants, ordered by increasing severity. @@ -36,6 +36,7 @@ const ( LevelInfo // Info level for general operational messages LevelWarn // Warn level for warning conditions LevelError // Error level for error conditions requiring attention + LevelFatal // Fatal level for critical error conditions LevelDebug // None level for logs without a specific severity (e.g., raw output) LevelUnknown // None level for logs without a specific severity (e.g., raw output) ) @@ -45,7 +46,9 @@ const ( DebugString = "DEBUG" InfoString = "INFO" WarnString = "WARN" + WarningString = "WARNING" ErrorString = "ERROR" + FatalString = "FATAL" NoneString = "NONE" UnknownString = "UNKNOWN" @@ -54,6 +57,11 @@ const ( DumpString = "DUMP" SpecialString = "SPECIAL" RawString = "RAW" + InspectString = "INSPECT" + DbgString = "DBG" + TimedString = "TIMED" + StackString = "STACK" + OutputString = "OUTPUT" ) // Log class constants, defining the type of log entry. @@ -65,7 +73,12 @@ const ( ClassDump // Dump entries for hex/ASCII dumps ClassSpecial // Special entries for custom or non-standard logs ClassRaw // Raw entries for unformatted output - ClassUnknown // Raw entries for unformatted output + ClassInspect // Inspect entries for debugging + ClassDbg // Inspect entries for debugging + ClassTimed // Inspect entries for debugging + ClassStack // Inspect entries for debugging + ClassOutput // Inspect entries for debugging + ClassUnknown // Unknown output ) // Namespace style constants. @@ -75,149 +88,3 @@ const ( FlatPath StyleType = iota // Formats namespaces as [parent/child] NestedPath // Formats namespaces as [parent]→[child] ) - -// LevelType represents the severity of a log message. -// It is an integer type used to define log levels (Debug, Info, Warn, Error, None), with associated -// string representations for display in log output. -type LevelType int - -// String converts a LevelType to its string representation. -// It maps each level constant to a human-readable string, returning "UNKNOWN" for invalid levels. -// Used by handlers to display the log level in output. -// Example: -// -// var level lx.LevelType = lx.LevelInfo -// fmt.Println(level.String()) // Output: INFO -func (l LevelType) String() string { - switch l { - case LevelDebug: - return DebugString - case LevelInfo: - return InfoString - case LevelWarn: - return WarnString - case LevelError: - return ErrorString - case LevelNone: - return NoneString - default: - return UnknownString - } -} - -// LevelParse converts a string to its corresponding LevelType. -// It parses a string (case-insensitive) and returns the corresponding LevelType, defaulting to -// LevelUnknown for unrecognized strings. Supports "WARNING" as an alias for "WARN". -func LevelParse(s string) LevelType { - switch strings.ToUpper(s) { - case DebugString: - return LevelDebug - case InfoString: - return LevelInfo - case WarnString, "WARNING": // Allow both "WARN" and "WARNING" - return LevelWarn - case ErrorString: - return LevelError - case NoneString: - return LevelNone - default: - return LevelUnknown - } -} - -// StyleType defines how namespace paths are formatted in log output. -// It is an integer type used to select between FlatPath ([parent/child]) and NestedPath -// ([parent]→[child]) styles, affecting how handlers render namespace hierarchies. -type StyleType int - -// Entry represents a single log entry passed to handlers. -// It encapsulates all information about a log message, including its timestamp, severity, -// content, namespace, metadata, and formatting style. Handlers process Entry instances -// to produce formatted output (e.g., text, JSON). The struct is immutable once created, -// ensuring thread-safety in handler processing. -type Entry struct { - Timestamp time.Time // Time the log was created - Level LevelType // Severity level of the log (Debug, Info, Warn, Error, None) - Message string // Log message content - Namespace string // Namespace path (e.g., "parent/child") - Fields map[string]interface{} // Additional key-value metadata (e.g., {"user": "alice"}) - Style StyleType // Namespace formatting style (FlatPath or NestedPath) - Error error // Associated error, if any (e.g., for error logs) - Class ClassType // Type of log entry (Text, JSON, Dump, Special, Raw) - Stack []byte // Stack trace data (if present) - Id int `json:"-"` // Unique ID for the entry, ignored in JSON output -} - -// Handler defines the interface for processing log entries. -// Implementations (e.g., TextHandler, JSONHandler) format and output log entries to various -// destinations (e.g., stdout, files). The Handle method returns an error if processing fails, -// allowing the logger to handle output failures gracefully. -// Example (simplified handler implementation): -// -// type MyHandler struct{} -// func (h *MyHandler) Handle(e *Entry) error { -// fmt.Printf("[%s] %s: %s\n", e.Namespace, e.Level.String(), e.Message) -// return nil -// } -type Handler interface { - Handle(e *Entry) error // Processes a log entry, returning any error -} - -// Timestamper defines an interface for handlers that support timestamp configuration. -// It includes a method to enable or disable timestamp logging and optionally set the timestamp format. -type Timestamper interface { - // Timestamped enables or disables timestamp logging and allows specifying an optional format. - // Parameters: - // enable: Boolean to enable or disable timestamp logging - // format: Optional string(s) to specify the timestamp format - Timestamped(enable bool, format ...string) -} - -// ClassType represents the type of a log entry. -// It is an integer type used to categorize log entries (Text, JSON, Dump, Special, Raw), -// influencing how handlers process and format them. -type ClassType int - -// String converts a ClassType to its string representation. -// It maps each class constant to a human-readable string, returning "UNKNOWN" for invalid classes. -// Used by handlers to indicate the entry type in output (e.g., JSON fields). -// Example: -// -// var class lx.ClassType = lx.ClassText -// fmt.Println(class.String()) // Output: TEST -func (t ClassType) String() string { - switch t { - case ClassText: - return TextString - case ClassJSON: - return JSONString - case ClassDump: - return DumpString - case ClassSpecial: - return SpecialString - case ClassRaw: - return RawString - default: - return UnknownString - } -} - -// ParseClass converts a string to its corresponding ClassType. -// It parses a string (case-insensitive) and returns the corresponding ClassType, defaulting to -// ClassUnknown for unrecognized strings. -func ParseClass(s string) ClassType { - switch strings.ToUpper(s) { - case TextString: - return ClassText - case JSONString: - return ClassJSON - case DumpString: - return ClassDump - case SpecialString: - return ClassSpecial - case RawString: - return ClassRaw - default: - return ClassUnknown - } -} diff --git a/vendor/github.com/olekukonko/ll/lx/ns.go b/vendor/github.com/olekukonko/ll/lx/namespace.go similarity index 72% rename from vendor/github.com/olekukonko/ll/lx/ns.go rename to vendor/github.com/olekukonko/ll/lx/namespace.go index 5d30c93940..33b7694443 100644 --- a/vendor/github.com/olekukonko/ll/lx/ns.go +++ b/vendor/github.com/olekukonko/ll/lx/namespace.go @@ -3,12 +3,14 @@ package lx import ( "strings" "sync" + "sync/atomic" ) // namespaceRule stores the cached result of Enabled. type namespaceRule struct { isEnabledByRule bool isDisabledByRule bool + generation uint64 // NEW: track cache validity } // Namespace manages thread-safe namespace enable/disable states with caching. @@ -16,22 +18,22 @@ type namespaceRule struct { // The cache holds computed effective states for paths (path -> namespaceRule) // based on hierarchical rules to optimize lookups. type Namespace struct { - store sync.Map // path (string) -> rule (bool: true=enable, false=disable) - cache sync.Map // path (string) -> namespaceRule + store sync.Map // path (string) -> rule (bool) + cache sync.Map // path (string) -> namespaceRule + genCounter uint64 // NEW: atomic generation counter } // Set defines an explicit enable/disable rule for a namespace path. // It clears the cache to ensure subsequent lookups reflect the change. func (ns *Namespace) Set(path string, enabled bool) { ns.store.Store(path, enabled) - ns.clearCache() + ns.invalidatePathCache(path) } -// Load retrieves an explicit rule from the store for a path. -// Returns the rule (true=enable, false=disable) and whether it exists. -// Does not consider hierarchy or caching. -func (ns *Namespace) Load(path string) (rule interface{}, found bool) { - return ns.store.Load(path) +// invalidatePathCache increments generation counter instead of scanning cache. +func (ns *Namespace) invalidatePathCache(path string) { + // Atomic increment - O(1), no lock contention on cache + atomic.AddUint64(&ns.genCounter, 1) } // Store directly sets a rule in the store, bypassing cache invalidation. @@ -61,23 +63,26 @@ func (ns *Namespace) clearCache() { // // If both are false, no explicit rule applies to the path or its prefixes. func (ns *Namespace) Enabled(path string, separator string) (isEnabledByRule bool, isDisabledByRule bool) { - if path == "" { // Root path has no explicit rule + if path == "" { return false, false } - // Check cache + // Check cache with generation validation if cachedValue, found := ns.cache.Load(path); found { if state, ok := cachedValue.(namespaceRule); ok { - return state.isEnabledByRule, state.isDisabledByRule + // If cache generation matches current, result is valid + if state.generation == atomic.LoadUint64(&ns.genCounter) { + return state.isEnabledByRule, state.isDisabledByRule + } + // Stale cache - fall through to recompute + ns.cache.Delete(path) } - ns.cache.Delete(path) // Remove invalid cache entry } - // Compute: Most specific rule wins + // Compute: Most specific rule wins (original logic) parts := strings.Split(path, separator) computedIsEnabled := false computedIsDisabled := false - for i := len(parts); i >= 1; i-- { currentPrefix := strings.Join(parts[:i], separator) if val, ok := ns.store.Load(currentPrefix); ok { @@ -92,11 +97,11 @@ func (ns *Namespace) Enabled(path string, separator string) (isEnabledByRule boo } } - // Cache result, including (false, false) for no rule + // Cache result with current generation ns.cache.Store(path, namespaceRule{ isEnabledByRule: computedIsEnabled, isDisabledByRule: computedIsDisabled, + generation: atomic.LoadUint64(&ns.genCounter), }) - return computedIsEnabled, computedIsDisabled } diff --git a/vendor/github.com/olekukonko/ll/lx/types.go b/vendor/github.com/olekukonko/ll/lx/types.go new file mode 100644 index 0000000000..6c2905cddb --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lx/types.go @@ -0,0 +1,145 @@ +package lx + +import ( + "strings" + "time" +) + +// levelStrings provides O(1) lookup for level string conversion +var levelStrings = [7]string{ + LevelNone: NoneString, + LevelInfo: InfoString, + LevelWarn: WarnString, + LevelError: ErrorString, + LevelFatal: FatalString, + LevelDebug: DebugString, + LevelUnknown: UnknownString, +} + +// LevelType represents the severity of a log message. +// It is an integer type used to define log levels (Debug, Info, Warn, Error, None), with associated +// string representations for display in log output. +type LevelType int + +// String converts a LevelType to its string representation. +// It maps each level constant to a human-readable string, returning "UNKNOWN" for invalid levels. +// Used by handlers to display the log level in output. +// Example: +// +// var level lx.LevelType = lx.LevelInfo +// fmt.Println(level.String()) // Output: INFO +func (l LevelType) String() string { + if l >= 0 && int(l) < len(levelStrings) { + return levelStrings[l] + } + return UnknownString +} + +func (l LevelType) Name(class ClassType) string { + if class == ClassRaw || class == ClassDump || class == ClassInspect || class == ClassDbg || class == ClassTimed { + return class.String() + } + return l.String() +} + +// LevelParse converts a string to its corresponding LevelType. +// It parses a string (case-insensitive) and returns the corresponding LevelType, defaulting to +// LevelUnknown for unrecognized strings. Supports "WARNING" as an alias for "WARN". +func LevelParse(s string) LevelType { + switch strings.ToUpper(s) { + case DebugString: + return LevelDebug + case InfoString: + return LevelInfo + case WarnString, WarningString: // Allow both "WARN" and "WARNING" + return LevelWarn + case ErrorString: + return LevelError + case NoneString: + return LevelNone + default: + return LevelUnknown + } +} + +// Entry represents a single log entry passed to handlers. +// It encapsulates all information about a log message, including its timestamp, severity, +// content, namespace, metadata, and formatting style. Handlers process Entry instances +// to produce formatted output (e.g., text, JSON). The struct is immutable once created, +// ensuring thread-safety in handler processing. +type Entry struct { + Timestamp time.Time // Time the log was created + Level LevelType // Severity level of the log (Debug, Info, Warn, Error, None) + Message string // Log message content + Namespace string // Namespace path (e.g., "parent/child") + Fields Fields // Additional key-value metadata (e.g., {"user": "alice"}) + Style StyleType // Namespace formatting style (FlatPath or NestedPath) + Error error // Associated error, if any (e.g., for error logs) + Class ClassType // Type of log entry (Text, JSON, Dump, Special, Raw) + Stack []byte // Stack trace data (if present) + Id int `json:"-"` // Unique ID for the entry, ignored in JSON output +} + +// StyleType defines how namespace paths are formatted in log output. +// It is an integer type used to select between FlatPath ([parent/child]) and NestedPath +// ([parent]→[child]) styles, affecting how handlers render namespace hierarchies. +type StyleType int + +// ClassType represents the type of a log entry. +// It is an integer type used to categorize log entries (Text, JSON, Dump, Special, Raw), +// influencing how handlers process and format them. +type ClassType int + +// String converts a ClassType to its string representation. +// It maps each class constant to a human-readable string, returning "UNKNOWN" for invalid classes. +// Used by handlers to indicate the entry type in output (e.g., JSON fields). +// Example: +// +// var class lx.ClassType = lx.ClassText +// fmt.Println(class.String()) // Output: TEST +func (t ClassType) String() string { + switch t { + case ClassText: + return TextString + case ClassJSON: + return JSONString + case ClassDump: + return DumpString + case ClassSpecial: + return SpecialString + case ClassInspect: + return InspectString + case ClassDbg: + return DbgString + case ClassRaw: + return RawString + case ClassTimed: + return TimedString + case ClassStack: + return StackString + case ClassOutput: + return OutputString + default: + return UnknownString + } +} + +// ParseClass converts a string to its corresponding ClassType. +// It parses a string (case-insensitive) and returns the corresponding ClassType, defaulting to +// ClassUnknown for unrecognized strings. +func ParseClass(s string) ClassType { + switch strings.ToUpper(s) { + case TextString: + return ClassText + case JSONString: + return ClassJSON + case DumpString: + return ClassDump + case SpecialString: + return ClassSpecial + case RawString: + return ClassRaw + default: + return ClassUnknown + } +} diff --git a/vendor/github.com/olekukonko/ll/middleware.go b/vendor/github.com/olekukonko/ll/middleware.go index 694f75fac2..17365e8fb5 100644 --- a/vendor/github.com/olekukonko/ll/middleware.go +++ b/vendor/github.com/olekukonko/ll/middleware.go @@ -39,11 +39,13 @@ func (m *Middleware) Remove() { // Acquire write lock to modify middleware slice m.logger.mu.Lock() defer m.logger.mu.Unlock() + // Iterate through middleware slice to find and remove matching ID for i, entry := range m.logger.middleware { if entry.id == m.id { - // Remove middleware by slicing out the matching entry - m.logger.middleware = append(m.logger.middleware[:i], m.logger.middleware[i+1:]...) + last := len(m.logger.middleware) - 1 + m.logger.middleware[i] = m.logger.middleware[last] + m.logger.middleware = m.logger.middleware[:last] return } } diff --git a/vendor/github.com/olekukonko/ll/options.go b/vendor/github.com/olekukonko/ll/options.go new file mode 100644 index 0000000000..ae8ebbdb2b --- /dev/null +++ b/vendor/github.com/olekukonko/ll/options.go @@ -0,0 +1,69 @@ +package ll + +import ( + "github.com/olekukonko/ll/lx" +) + +// WithHandler sets the handler for the logger as a functional option for configuring +// a new logger instance. +// Example: +// +// logger := New("app", WithHandler(lh.NewJSONHandler(os.Stdout))) +func WithHandler(handler lx.Handler) Option { + return func(l *Logger) { + l.handler = handler + } +} + +// WithTimestamped returns an Option that configures timestamp settings for the logger's existing handler. +// It enables or disables timestamp logging and optionally sets the timestamp format if the handler +// supports the lx.Timestamper interface. If no handler is set, the function has no effect. +// Parameters: +// +// enable: Boolean to enable or disable timestamp logging +// format: Optional string(s) to specify the timestamp format +func WithTimestamped(enable bool, format ...string) Option { + return func(l *Logger) { + if l.handler != nil { // Check if a handler is set + // Verify if the handler supports the lx.Timestamper interface + if h, ok := l.handler.(lx.Timestamper); ok { + h.Timestamped(enable, format...) // Apply timestamp settings to the handler + } + } + } +} + +// WithLevel sets the minimum log level for the logger as a functional option for +// configuring a new logger instance. +// Example: +// +// logger := New("app", WithLevel(lx.LevelWarn)) +func WithLevel(level lx.LevelType) Option { + return func(l *Logger) { + l.Level(level) + } +} + +// WithStyle sets the namespace formatting style for the logger as a functional option +// for configuring a new logger instance. +// Example: +// +// logger := New("app", WithStyle(lx.NestedPath)) +func WithStyle(style lx.StyleType) Option { + return func(l *Logger) { + l.style = style + } +} + +// Functional options (can be passed to New() or applied later) +func WithFatalExits(enabled bool) Option { + return func(l *Logger) { + l.fatalExits = enabled + } +} + +func WithFatalStack(enabled bool) Option { + return func(l *Logger) { + l.fatalStack = enabled + } +} diff --git a/vendor/github.com/olekukonko/ll/since.go b/vendor/github.com/olekukonko/ll/since.go new file mode 100644 index 0000000000..22be568a27 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/since.go @@ -0,0 +1,388 @@ +package ll + +import ( + "fmt" + "strings" + "time" + + "github.com/olekukonko/ll/lx" +) + +// Measure executes one or more functions and logs the duration of each. +// It returns the total cumulative duration across all functions. +// +// Each function in `fns` is run sequentially. If a function is `nil`, it is skipped. +// +// Optional labels previously set via `Labels(...)` are applied to the corresponding function +// by position. If there are fewer labels than functions, missing labels are replaced with +// default names like "fn_0", "fn_1", etc. Labels are cleared after the call to prevent reuse. +// +// Example usage: +// +// logger := New("app").Enable() +// +// // Optional: add labels for functions +// logger.Labels("load_users", "process_orders") +// +// total := logger.Measure( +// func() { +// // simulate work 1 +// time.Sleep(100 * time.Millisecond) +// }, +// func() { +// // simulate work 2 +// time.Sleep(200 * time.Millisecond) +// }, +// func() { +// // simulate work 3 +// time.Sleep(50 * time.Millisecond) +// }, +// ) +// +// // Logs something like: +// // [load_users] completed duration=100ms +// // [process_orders] completed duration=200ms +// // [fn_2] completed duration=50ms +// +// Returns the sum of durations of all executed functions. +func (l *Logger) Measure(fns ...func()) time.Duration { + if len(fns) == 0 { + return 0 + } + + var total time.Duration + lblPtr := l.labels.Swap(nil) + var lbls []string + if lblPtr != nil { + lbls = *lblPtr + } + + for i, fn := range fns { + if fn == nil { + continue + } + // Use SinceBuilder instead of manual timing + sb := l.Since() // starts timer internally + fn() + duration := sb.Fields( + "index", i, + ).Info(fmt.Sprintf("[%s] completed", func() string { + if i < len(lbls) && lbls[i] != "" { + return lbls[i] + } + return fmt.Sprintf("fn_%d", i) + }())) + + total += duration + } + + return total +} + +// Since creates a timer that will log the duration when completed +// If startTime is provided, uses that as the start time; otherwise uses time.Now() +// +// defer logger.Since().Info("request") // Auto-start +// logger.Since(start).Info("request") // Manual timing +// logger.Since().If(debug).Debug("timing") // Conditional +func (l *Logger) Since(startTime ...time.Time) *SinceBuilder { + start := time.Now() + if len(startTime) > 0 && !startTime[0].IsZero() { + start = startTime[0] + } + + return &SinceBuilder{ + logger: l, + start: start, + condition: true, + fields: nil, // Lazily initialized + } +} + +// SinceBuilder provides a fluent API for logging timed operations +// It mirrors FieldBuilder exactly for field operations +type SinceBuilder struct { + logger *Logger + start time.Time + condition bool + fields lx.Fields +} + +// --------------------------------------------------------------------- +// Conditional Methods (match conditional.go pattern) +// --------------------------------------------------------------------- + +// If adds a condition to this timer - only logs if condition is true +func (sb *SinceBuilder) If(condition bool) *SinceBuilder { + sb.condition = sb.condition && condition + return sb +} + +// IfErr adds an error condition - only logs if err != nil +func (sb *SinceBuilder) IfErr(err error) *SinceBuilder { + sb.condition = sb.condition && (err != nil) + return sb +} + +// IfAny logs if ANY condition is true +func (sb *SinceBuilder) IfAny(conditions ...bool) *SinceBuilder { + if !sb.condition { + return sb + } + + for _, cond := range conditions { + if cond { + return sb + } + } + sb.condition = false + return sb +} + +// IfOne logs if ALL conditions are true +func (sb *SinceBuilder) IfOne(conditions ...bool) *SinceBuilder { + if !sb.condition { + return sb + } + + for _, cond := range conditions { + if !cond { + sb.condition = false + return sb + } + } + return sb +} + +// --------------------------------------------------------------------- +// Field Methods - EXACT MATCH with FieldBuilder API +// --------------------------------------------------------------------- + +// Fields adds key-value pairs as fields (variadic) +// EXACT match to FieldBuilder.Fields() +func (sb *SinceBuilder) Fields(pairs ...any) *SinceBuilder { + if sb.logger.suspend.Load() || !sb.condition { + return sb + } + + // Lazy initialization + if sb.fields == nil { + sb.fields = make(lx.Fields, 0, len(pairs)/2) + } + + // Process key-value pairs + for i := 0; i < len(pairs)-1; i += 2 { + if key, ok := pairs[i].(string); ok { + sb.fields = append(sb.fields, lx.Field{Key: key, Value: pairs[i+1]}) + } else { + // Log error for non-string keys (matches Fields behavior) + sb.fields = append(sb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("missing key '%v'", pairs[i]), + }) + } + } + + // Handle uneven pairs (matches Fields behavior) + if len(pairs)%2 != 0 { + sb.fields = append(sb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("missing key '%v'", pairs[len(pairs)-1]), + }) + } + + return sb +} + +// Field adds fields from a map +// EXACT match to FieldBuilder.Field() +func (sb *SinceBuilder) Field(fields map[string]interface{}) *SinceBuilder { + if sb.logger.suspend.Load() || !sb.condition || len(fields) == 0 { + return sb + } + + // Lazy initialization + if sb.fields == nil { + sb.fields = make(lx.Fields, 0, len(fields)) + } + + // Copy fields from input map (preserves iteration order) + for k, v := range fields { + sb.fields = append(sb.fields, lx.Field{Key: k, Value: v}) + } + + return sb +} + +// Err adds one or more errors as a field +// EXACT match to FieldBuilder.Err() +func (sb *SinceBuilder) Err(errs ...error) *SinceBuilder { + if sb.logger.suspend.Load() || !sb.condition { + return sb + } + + // Lazy initialization + if sb.fields == nil { + sb.fields = make(lx.Fields, 0, 2) + } + + // Collect non-nil errors + var nonNilErrors []error + var builder strings.Builder + count := 0 + + for i, err := range errs { + if err != nil { + if i > 0 && count > 0 { + builder.WriteString("; ") + } + builder.WriteString(err.Error()) + nonNilErrors = append(nonNilErrors, err) + count++ + } + } + + if count > 0 { + if count == 1 { + sb.fields = append(sb.fields, lx.Field{Key: "error", Value: nonNilErrors[0]}) + } else { + sb.fields = append(sb.fields, lx.Field{Key: "error", Value: nonNilErrors}) + } + // Note: Unlike FieldBuilder.Err(), we DON'T log immediately + // The error will be included in the timing log + } + + return sb +} + +// Merge adds additional key-value pairs to the fields +// EXACT match to FieldBuilder.Merge() +func (sb *SinceBuilder) Merge(pairs ...any) *SinceBuilder { + if sb.logger.suspend.Load() || !sb.condition { + return sb + } + + // Lazy initialization + if sb.fields == nil { + sb.fields = make(lx.Fields, 0, len(pairs)/2) + } + + // Process pairs as key-value + for i := 0; i < len(pairs)-1; i += 2 { + if key, ok := pairs[i].(string); ok { + sb.fields = append(sb.fields, lx.Field{Key: key, Value: pairs[i+1]}) + } else { + sb.fields = append(sb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("non-string key in Merge: %v", pairs[i]), + }) + } + } + + if len(pairs)%2 != 0 { + sb.fields = append(sb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("uneven key-value pairs in Merge: [%v]", pairs[len(pairs)-1]), + }) + } + + return sb +} + +// --------------------------------------------------------------------- +// Logging Methods (match logger pattern) +// --------------------------------------------------------------------- + +// Debug logs the duration at Debug level with message +func (sb *SinceBuilder) Debug(msg string) time.Duration { + return sb.logAtLevel(lx.LevelDebug, msg) +} + +// Info logs the duration at Info level with message +func (sb *SinceBuilder) Info(msg string) time.Duration { + return sb.logAtLevel(lx.LevelInfo, msg) +} + +// Warn logs the duration at Warn level with message +func (sb *SinceBuilder) Warn(msg string) time.Duration { + return sb.logAtLevel(lx.LevelWarn, msg) +} + +// Error logs the duration at Error level with message +func (sb *SinceBuilder) Error(msg string) time.Duration { + return sb.logAtLevel(lx.LevelError, msg) +} + +// Log is an alias for Info (for backward compatibility) +func (sb *SinceBuilder) Log(msg string) time.Duration { + return sb.Info(msg) +} + +// logAtLevel internal method that handles the actual logging +func (sb *SinceBuilder) logAtLevel(level lx.LevelType, msg string) time.Duration { + // Fast path - don't even compute duration if we're not logging + if !sb.condition || sb.logger.suspend.Load() || !sb.logger.shouldLog(level) { + return time.Since(sb.start) + } + + duration := time.Since(sb.start) + + // Build final fields in this order: + // 1. Logger context fields (from logger.context) + // 2. Builder fields (from sb.fields) + // 3. Duration fields (always last) + + // Pre-allocate with exact capacity + totalFields := 0 + if sb.logger.context != nil { + totalFields += len(sb.logger.context) + } + if sb.fields != nil { + totalFields += len(sb.fields) + } + totalFields += 2 // duration_ms, duration + + fields := make(lx.Fields, 0, totalFields) + + // Add logger context fields first (preserves order) + if sb.logger.context != nil { + fields = append(fields, sb.logger.context...) + } + + // Add builder fields + if sb.fields != nil { + fields = append(fields, sb.fields...) + } + + // Add duration fields last (so they're visible at the end) + fields = append(fields, + lx.Field{Key: "duration_ms", Value: duration.Milliseconds()}, + lx.Field{Key: "duration", Value: duration.String()}, + ) + + sb.logger.log(level, lx.ClassTimed, msg, fields, false) + return duration +} + +// --------------------------------------------------------------------- +// Utility Methods +// --------------------------------------------------------------------- + +// Reset allows reusing the builder with a new start time +// Zero-allocation - keeps fields slice capacity +func (sb *SinceBuilder) Reset(startTime ...time.Time) *SinceBuilder { + sb.start = time.Now() + if len(startTime) > 0 && !startTime[0].IsZero() { + sb.start = startTime[0] + } + sb.condition = true + if sb.fields != nil { + sb.fields = sb.fields[:0] // Keep capacity, zero length + } + return sb +} + +// Elapsed returns the current duration without logging +func (sb *SinceBuilder) Elapsed() time.Duration { + return time.Since(sb.start) +} diff --git a/vendor/github.com/olekukonko/ll/writer.go b/vendor/github.com/olekukonko/ll/writer.go new file mode 100644 index 0000000000..195fd12c79 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/writer.go @@ -0,0 +1,53 @@ +package ll + +import ( + "bytes" + "io" + "strings" + + "github.com/olekukonko/ll/lx" +) + +// Writer returns an io.Writer that logs every write operation at the given level. +// Useful for capturing Stdout/Stderr from external processes. +func (l *Logger) Writer(level lx.LevelType) io.Writer { + return &logWriter{ + logger: l, + level: level, + } +} + +// logWriter implements io.Writer to bridge external streams to ll.Logger +type logWriter struct { + logger *Logger + level lx.LevelType + buf bytes.Buffer // Buffer for incomplete lines +} + +func (w *logWriter) Write(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + // Buffer handling for partial lines (streams often write byte-by-byte) + w.buf.Write(p) + + // Process complete lines + for { + line, err := w.buf.ReadString('\n') + if err != nil { // No newline found, buffer remains + w.buf.WriteString(line) + break + } + + // Clean and log the complete line + msg := strings.TrimSuffix(line, "\n") + msg = strings.TrimSuffix(msg, "\r") + + if msg != "" { + w.logger.log(w.level, lx.ClassText, msg, nil, false) + } + } + + return len(p), nil +} diff --git a/vendor/github.com/olekukonko/tablewriter/MIGRATION.md b/vendor/github.com/olekukonko/tablewriter/MIGRATION.md index 08ab3055c3..057901dd82 100644 --- a/vendor/github.com/olekukonko/tablewriter/MIGRATION.md +++ b/vendor/github.com/olekukonko/tablewriter/MIGRATION.md @@ -448,7 +448,7 @@ func NewInvoiceRenderer() *InvoiceRenderer { Settings: tw.Settings{Separators: tw.SeparatorsNone, Lines: tw.LinesNone}, Streaming: false, } - defaultLogger := ll.New("simple-invoice-renderer") + defaultLogger := ll.New("simple-invoice-renderer").Disable() return &InvoiceRenderer{logger: defaultLogger, rendition: rendition} } diff --git a/vendor/github.com/olekukonko/tablewriter/README.md b/vendor/github.com/olekukonko/tablewriter/README.md index 3af9409f9a..d8334424c4 100644 --- a/vendor/github.com/olekukonko/tablewriter/README.md +++ b/vendor/github.com/olekukonko/tablewriter/README.md @@ -28,7 +28,7 @@ go get github.com/olekukonko/tablewriter@v0.0.5 #### Latest Version The latest stable version ```bash -go get github.com/olekukonko/tablewriter@v1.1.2 +go get github.com/olekukonko/tablewriter@v1.1.3 ``` **Warning:** Version `v1.0.0` contains missing functionality and should not be used. @@ -62,7 +62,7 @@ func main() { data := [][]string{ {"Package", "Version", "Status"}, {"tablewriter", "v0.0.5", "legacy"}, - {"tablewriter", "v1.1.2", "latest"}, + {"tablewriter", "v1.1.3", "latest"}, } table := tablewriter.NewWriter(os.Stdout) @@ -77,7 +77,7 @@ func main() { │ PACKAGE │ VERSION │ STATUS │ ├─────────────┼─────────┼────────┤ │ tablewriter │ v0.0.5 │ legacy │ -│ tablewriter │ v1.1.2 │ latest │ +│ tablewriter │ v1.1.3 │ latest │ └─────────────┴─────────┴────────┘ ``` diff --git a/vendor/github.com/olekukonko/tablewriter/benchstat.txt b/vendor/github.com/olekukonko/tablewriter/benchstat.txt deleted file mode 100644 index 912c38deb3..0000000000 --- a/vendor/github.com/olekukonko/tablewriter/benchstat.txt +++ /dev/null @@ -1,194 +0,0 @@ -goos: darwin -goarch: arm64 -pkg: github.com/olekukonko/tablewriter/pkg/twwarp -cpu: Apple M2 - │ old.txt │ new.txt │ - │ sec/op │ sec/op vs base │ -WrapString-8 112.8µ ± 1% 112.9µ ± 2% ~ (p=0.589 n=6) -WrapStringWithSpaces-8 113.4µ ± 1% 113.7µ ± 1% ~ (p=0.310 n=6) -geomean 113.1µ 113.3µ +0.15% - - │ old.txt │ new.txt │ - │ B/s │ B/s vs base │ -WrapString-8 84.92Mi ± 1% 84.82Mi ± 2% ~ (p=0.589 n=6) -WrapStringWithSpaces-8 84.43Mi ± 1% 84.27Mi ± 1% ~ (p=0.310 n=6) -geomean 84.68Mi 84.55Mi -0.15% - - │ old.txt │ new.txt │ - │ B/op │ B/op vs base │ -WrapString-8 47.35Ki ± 0% 47.35Ki ± 0% ~ (p=1.000 n=6) ¹ -WrapStringWithSpaces-8 52.76Ki ± 0% 52.76Ki ± 0% ~ (p=1.000 n=6) ¹ -geomean 49.98Ki 49.98Ki +0.00% -¹ all samples are equal - - │ old.txt │ new.txt │ - │ allocs/op │ allocs/op vs base │ -WrapString-8 33.00 ± 0% 33.00 ± 0% ~ (p=1.000 n=6) ¹ -WrapStringWithSpaces-8 51.00 ± 0% 51.00 ± 0% ~ (p=1.000 n=6) ¹ -geomean 41.02 41.02 +0.00% -¹ all samples are equal - -pkg: github.com/olekukonko/tablewriter/pkg/twwidth - │ old.txt │ new.txt │ - │ sec/op │ sec/op vs base │ -WidthFunction/SimpleASCII_EAfalse_NoCache-8 387.6n ± 1% 368.4n ± 2% -4.97% (p=0.002 n=6) -WidthFunction/SimpleASCII_EAfalse_CacheMiss-8 219.0n ± 127% 217.5n ± 119% ~ (p=0.372 n=6) -WidthFunction/SimpleASCII_EAfalse_CacheHit-8 14.78n ± 1% 14.54n ± 3% ~ (p=0.061 n=6) -WidthFunction/SimpleASCII_EAtrue_NoCache-8 676.4n ± 1% 366.8n ± 2% -45.77% (p=0.002 n=6) -WidthFunction/SimpleASCII_EAtrue_CacheMiss-8 216.1n ± 375% 216.0n ± 128% ~ (p=0.937 n=6) -WidthFunction/SimpleASCII_EAtrue_CacheHit-8 14.71n ± 0% 14.49n ± 0% -1.53% (p=0.002 n=6) -WidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1.027µ ± 3% 1.007µ ± 1% -2.00% (p=0.002 n=6) -WidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 219.5n ± 516% 221.4n ± 502% ~ (p=0.515 n=6) -WidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 14.81n ± 1% 14.61n ± 1% -1.35% (p=0.009 n=6) -WidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 1.313µ ± 2% 1.009µ ± 2% -23.15% (p=0.002 n=6) -WidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 653.2n ± 150% 218.2n ± 524% ~ (p=0.331 n=6) -WidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 14.73n ± 2% 14.50n ± 0% -1.60% (p=0.002 n=6) -WidthFunction/EastAsian_EAfalse_NoCache-8 747.3n ± 1% 336.2n ± 1% -55.02% (p=0.002 n=6) -WidthFunction/EastAsian_EAfalse_CacheMiss-8 226.3n ± 384% 227.4n ± 113% ~ (p=0.937 n=6) -WidthFunction/EastAsian_EAfalse_CacheHit-8 14.74n ± 1% 14.58n ± 1% -1.09% (p=0.011 n=6) -WidthFunction/EastAsian_EAtrue_NoCache-8 965.4n ± 2% 348.7n ± 0% -63.88% (p=0.002 n=6) -WidthFunction/EastAsian_EAtrue_CacheMiss-8 225.4n ± 511% 225.8n ± 111% ~ (p=1.000 n=6) -WidthFunction/EastAsian_EAtrue_CacheHit-8 14.72n ± 1% 14.54n ± 3% ~ (p=0.056 n=6) -WidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 1376.0n ± 2% 983.8n ± 2% -28.50% (p=0.002 n=6) -WidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 633.6n ± 170% 222.4n ± 513% ~ (p=0.974 n=6) -WidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 15.73n ± 1% 15.64n ± 1% ~ (p=0.227 n=6) -WidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 1589.5n ± 1% 996.9n ± 2% -37.29% (p=0.002 n=6) -WidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 484.8n ± 309% 221.3n ± 516% ~ (p=0.240 n=6) -WidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 15.74n ± 1% 15.73n ± 1% ~ (p=0.485 n=6) -WidthFunction/LongSimpleASCII_EAfalse_NoCache-8 4.916µ ± 3% 4.512µ ± 4% -8.22% (p=0.002 n=6) -WidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 2.430µ ± 114% 2.182µ ± 123% ~ (p=0.699 n=6) -WidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 23.75n ± 3% 23.24n ± 3% ~ (p=0.065 n=6) -WidthFunction/LongSimpleASCII_EAtrue_NoCache-8 9.273µ ± 1% 4.519µ ± 1% -51.27% (p=0.002 n=6) -WidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 4.021µ ± 131% 2.127µ ± 128% ~ (p=0.240 n=6) -WidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 23.50n ± 2% 23.48n ± 1% ~ (p=0.589 n=6) -WidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 57.36µ ± 1% 57.33µ ± 2% ~ (p=0.818 n=6) -WidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 22.18µ ± 135% 14.55µ ± 299% ~ (p=0.589 n=6) -WidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 44.21n ± 1% 44.20n ± 2% ~ (p=0.818 n=6) -WidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 60.25µ ± 2% 57.90µ ± 2% -3.90% (p=0.002 n=6) -WidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 16.11µ ± 263% 20.02µ ± 183% ~ (p=0.699 n=6) -WidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 44.57n ± 1% 44.18n ± 2% ~ (p=0.461 n=6) -geomean 358.5n 283.9n -20.82% - - │ old.txt │ new.txt │ - │ B/s │ B/s vs base │ -WidthFunction/SimpleASCII_EAfalse_NoCache-8 86.11Mi ± 1% 90.63Mi ± 2% +5.24% (p=0.002 n=6) -WidthFunction/SimpleASCII_EAfalse_CacheMiss-8 152.4Mi ± 56% 153.5Mi ± 54% ~ (p=0.394 n=6) -WidthFunction/SimpleASCII_EAfalse_CacheHit-8 2.205Gi ± 1% 2.242Gi ± 3% ~ (p=0.065 n=6) -WidthFunction/SimpleASCII_EAtrue_NoCache-8 49.35Mi ± 1% 91.00Mi ± 2% +84.40% (p=0.002 n=6) -WidthFunction/SimpleASCII_EAtrue_CacheMiss-8 154.5Mi ± 79% 154.5Mi ± 56% ~ (p=0.937 n=6) -WidthFunction/SimpleASCII_EAtrue_CacheHit-8 2.215Gi ± 0% 2.250Gi ± 0% +1.58% (p=0.002 n=6) -WidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 56.66Mi ± 2% 57.78Mi ± 1% +1.99% (p=0.002 n=6) -WidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 265.1Mi ± 84% 262.7Mi ± 83% ~ (p=0.485 n=6) -WidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 3.836Gi ± 1% 3.888Gi ± 1% +1.34% (p=0.009 n=6) -WidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 44.30Mi ± 2% 57.65Mi ± 2% +30.14% (p=0.002 n=6) -WidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 147.3Mi ± 81% 266.7Mi ± 84% ~ (p=0.310 n=6) -WidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 3.856Gi ± 2% 3.919Gi ± 0% +1.63% (p=0.002 n=6) -WidthFunction/EastAsian_EAfalse_NoCache-8 76.58Mi ± 1% 170.21Mi ± 1% +122.28% (p=0.002 n=6) -WidthFunction/EastAsian_EAfalse_CacheMiss-8 252.8Mi ± 79% 251.6Mi ± 53% ~ (p=0.937 n=6) -WidthFunction/EastAsian_EAfalse_CacheHit-8 3.791Gi ± 1% 3.832Gi ± 1% +1.08% (p=0.009 n=6) -WidthFunction/EastAsian_EAtrue_NoCache-8 59.27Mi ± 2% 164.10Mi ± 0% +176.87% (p=0.002 n=6) -WidthFunction/EastAsian_EAtrue_CacheMiss-8 253.9Mi ± 84% 253.4Mi ± 53% ~ (p=1.000 n=6) -WidthFunction/EastAsian_EAtrue_CacheHit-8 3.796Gi ± 1% 3.841Gi ± 3% ~ (p=0.065 n=6) -WidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 60.29Mi ± 1% 84.33Mi ± 2% +39.88% (p=0.002 n=6) -WidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 227.1Mi ± 79% 373.2Mi ± 84% ~ (p=1.000 n=6) -WidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 5.154Gi ± 1% 5.181Gi ± 1% ~ (p=0.240 n=6) -WidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 52.19Mi ± 1% 83.23Mi ± 2% +59.47% (p=0.002 n=6) -WidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 230.9Mi ± 82% 374.9Mi ± 84% ~ (p=0.240 n=6) -WidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 5.147Gi ± 1% 5.152Gi ± 1% ~ (p=0.485 n=6) -WidthFunction/LongSimpleASCII_EAfalse_NoCache-8 104.8Mi ± 3% 114.1Mi ± 4% +8.95% (p=0.002 n=6) -WidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 368.0Mi ± 293% 474.3Mi ± 211% ~ (p=0.699 n=6) -WidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 21.17Gi ± 3% 21.64Gi ± 2% ~ (p=0.065 n=6) -WidthFunction/LongSimpleASCII_EAtrue_NoCache-8 55.54Mi ± 1% 113.97Mi ± 1% +105.21% (p=0.002 n=6) -WidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 399.8Mi ± 232% 577.5Mi ± 149% ~ (p=0.240 n=6) -WidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 21.40Gi ± 2% 21.41Gi ± 1% ~ (p=0.589 n=6) -WidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 34.08Mi ± 1% 34.10Mi ± 2% ~ (p=0.784 n=6) -WidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 101.5Mi ± 1396% 643.9Mi ± 320% ~ (p=0.589 n=6) -WidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 43.18Gi ± 1% 43.20Gi ± 2% ~ (p=0.818 n=6) -WidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 32.45Mi ± 2% 33.76Mi ± 2% +4.06% (p=0.002 n=6) -WidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 393.0Mi ± 296% 122.4Mi ± 1610% ~ (p=0.699 n=6) -WidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 42.83Gi ± 1% 43.21Gi ± 2% ~ (p=0.485 n=6) -geomean 456.4Mi 560.6Mi +22.83% - - │ old.txt │ new.txt │ - │ B/op │ B/op vs base │ -WidthFunction/SimpleASCII_EAfalse_NoCache-8 112.0 ± 1% 113.0 ± 0% ~ (p=0.061 n=6) -WidthFunction/SimpleASCII_EAfalse_CacheMiss-8 55.00 ± 200% 55.00 ± 202% ~ (p=1.000 n=6) -WidthFunction/SimpleASCII_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/SimpleASCII_EAtrue_NoCache-8 113.0 ± 1% 113.0 ± 0% ~ (p=1.000 n=6) -WidthFunction/SimpleASCII_EAtrue_CacheMiss-8 55.00 ± 505% 55.00 ± 205% ~ (p=0.697 n=6) -WidthFunction/SimpleASCII_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 185.0 ± 0% 185.0 ± 1% ~ (p=0.455 n=6) -WidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 87.00 ± 402% 87.00 ± 401% ~ (p=1.000 n=6) -WidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 185.0 ± 0% 185.0 ± 1% ~ (p=1.000 n=6) -WidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 174.00 ± 115% 87.00 ± 401% ~ (p=0.621 n=6) -WidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/EastAsian_EAfalse_NoCache-8 145.0 ± 0% 146.0 ± 0% +0.69% (p=0.002 n=6) -WidthFunction/EastAsian_EAfalse_CacheMiss-8 87.00 ± 392% 87.00 ± 167% ~ (p=0.697 n=6) -WidthFunction/EastAsian_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/EastAsian_EAtrue_NoCache-8 145.0 ± 1% 146.0 ± 1% +0.69% (p=0.013 n=6) -WidthFunction/EastAsian_EAtrue_CacheMiss-8 87.00 ± 392% 87.00 ± 164% ~ (p=0.697 n=6) -WidthFunction/EastAsian_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 193.0 ± 1% 193.0 ± 0% ~ (p=1.000 n=6) -WidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 232.0 ± 134% 103.0 ± 485% ~ (p=0.924 n=6) -WidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 193.0 ± 0% 193.0 ± 1% ~ (p=1.000 n=6) -WidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 185.0 ± 203% 103.0 ± 485% ~ (p=0.621 n=6) -WidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/LongSimpleASCII_EAfalse_NoCache-8 1.153Ki ± 0% 1.150Ki ± 0% ~ (p=0.126 n=6) -WidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 1.050Ki ± 72% 1.047Ki ± 74% ~ (p=0.939 n=6) -WidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/LongSimpleASCII_EAtrue_NoCache-8 1.152Ki ± 0% 1.155Ki ± 0% +0.30% (p=0.015 n=6) -WidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 1.036Ki ± 71% 1.039Ki ± 76% ~ (p=0.981 n=6) -WidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 1.355Ki ± 0% 1.358Ki ± 0% ~ (p=0.065 n=6) -WidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 2.787Ki ± 31% 2.613Ki ± 43% ~ (p=0.805 n=6) -WidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 1.358Ki ± 0% 1.361Ki ± 0% ~ (p=0.158 n=6) -WidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 2.625Ki ± 43% 2.741Ki ± 37% ~ (p=0.987 n=6) -WidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -geomean ² -5.62% ² -¹ all samples are equal -² summaries must be >0 to compute geomean - - │ old.txt │ new.txt │ - │ allocs/op │ allocs/op vs base │ -WidthFunction/SimpleASCII_EAfalse_NoCache-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/SimpleASCII_EAfalse_CacheMiss-8 1.000 ± 200% 1.000 ± 200% ~ (p=1.000 n=6) -WidthFunction/SimpleASCII_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/SimpleASCII_EAtrue_NoCache-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/SimpleASCII_EAtrue_CacheMiss-8 1.000 ± 300% 1.000 ± 200% ~ (p=0.697 n=6) -WidthFunction/SimpleASCII_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 6.000 ± 0% 6.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 1.000 ± 600% 1.000 ± 600% ~ (p=1.000 n=6) -WidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 6.000 ± 0% 6.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 3.500 ± 100% 1.000 ± 600% ~ (p=0.610 n=6) -WidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/EastAsian_EAfalse_NoCache-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/EastAsian_EAfalse_CacheMiss-8 1.000 ± 300% 1.000 ± 200% ~ (p=0.697 n=6) -WidthFunction/EastAsian_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/EastAsian_EAtrue_NoCache-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/EastAsian_EAtrue_CacheMiss-8 1.000 ± 300% 1.000 ± 200% ~ (p=0.697 n=6) -WidthFunction/EastAsian_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 5.000 ± 0% 5.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 3.000 ± 133% 1.000 ± 600% ~ (p=1.000 n=6) -WidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 5.000 ± 0% 5.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 2.500 ± 180% 1.000 ± 600% ~ (p=0.610 n=6) -WidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/LongSimpleASCII_EAfalse_NoCache-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3.000 ± 67% 3.000 ± 67% ~ (p=1.000 n=6) -WidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/LongSimpleASCII_EAtrue_NoCache-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3.000 ± 67% 3.000 ± 67% ~ (p=1.000 n=6) -WidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 9.000 ± 0% 9.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 5.000 ± 100% 3.500 ± 186% ~ (p=0.978 n=6) -WidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 9.000 ± 0% 9.000 ± 0% ~ (p=1.000 n=6) ¹ -WidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 4.000 ± 150% 4.500 ± 122% ~ (p=0.952 n=6) -WidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹ -geomean ² -9.28% ² -¹ all samples are equal -² summaries must be >0 to compute geomean diff --git a/vendor/github.com/olekukonko/tablewriter/comb.hcl b/vendor/github.com/olekukonko/tablewriter/comb.hcl new file mode 100644 index 0000000000..6d5025af2d --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/comb.hcl @@ -0,0 +1,10 @@ +recursive = true +output_file = "all.txt" +extensions = [".go"] +exclude_dirs = [ + "_examples", "_readme", "_lab","_tmp","pkg","lab","cmd","test.txt","tmp", + "_readme","pkg","renderer" +] +exclude_files = ["README.md","README_LEGACY.md","MIGRATION.md","test.hcl","csv.go"] +use_gitignore = true +detailed = true \ No newline at end of file diff --git a/vendor/github.com/olekukonko/tablewriter/config.go b/vendor/github.com/olekukonko/tablewriter/config.go index 415c9576bd..6e4038c27d 100644 --- a/vendor/github.com/olekukonko/tablewriter/config.go +++ b/vendor/github.com/olekukonko/tablewriter/config.go @@ -81,6 +81,13 @@ func (b *ConfigBuilder) WithTrimSpace(state tw.State) *ConfigBuilder { return b } +// WithTrimTab enables or disables automatic trimming of leading/trailing tabs. +// Useful for preserving indentation in code blocks while trimming other whitespace. +func (b *ConfigBuilder) WithTrimTab(state tw.State) *ConfigBuilder { + b.config.Behavior.TrimTab = state + return b +} + // WithDebug enables/disables debug logging func (b *ConfigBuilder) WithDebug(debug bool) *ConfigBuilder { b.config.Debug = debug @@ -796,6 +803,12 @@ func (bb *BehaviorConfigBuilder) WithTrimSpace(state tw.State) *BehaviorConfigBu return bb } +// WithTrimTab enables/disables trim tab +func (bb *BehaviorConfigBuilder) WithTrimTab(state tw.State) *BehaviorConfigBuilder { + bb.config.TrimTab = state + return bb +} + // WithHeaderHide enables/disables header visibility func (bb *BehaviorConfigBuilder) WithHeaderHide(state tw.State) *BehaviorConfigBuilder { bb.config.Header.Hide = state diff --git a/vendor/github.com/olekukonko/tablewriter/new.txt b/vendor/github.com/olekukonko/tablewriter/new.txt deleted file mode 100644 index 46791ad1a5..0000000000 --- a/vendor/github.com/olekukonko/tablewriter/new.txt +++ /dev/null @@ -1,248 +0,0 @@ -PASS -ok github.com/olekukonko/tablewriter 0.284s -? github.com/olekukonko/tablewriter/cmd/csv2table [no test files] -goos: darwin -goarch: arm64 -pkg: github.com/olekukonko/tablewriter/pkg/twwarp -cpu: Apple M2 -BenchmarkWrapString-8 10030 114909 ns/op 87.40 MB/s 48488 B/op 33 allocs/op -BenchmarkWrapString-8 10000 112188 ns/op 89.52 MB/s 48488 B/op 33 allocs/op -BenchmarkWrapString-8 10000 113708 ns/op 88.32 MB/s 48488 B/op 33 allocs/op -BenchmarkWrapString-8 10000 113233 ns/op 88.69 MB/s 48488 B/op 33 allocs/op -BenchmarkWrapString-8 10000 112575 ns/op 89.21 MB/s 48488 B/op 33 allocs/op -BenchmarkWrapString-8 10000 112604 ns/op 89.19 MB/s 48488 B/op 33 allocs/op -BenchmarkWrapStringWithSpaces-8 10000 113731 ns/op 88.30 MB/s 54024 B/op 51 allocs/op -BenchmarkWrapStringWithSpaces-8 10000 113511 ns/op 88.48 MB/s 54024 B/op 51 allocs/op -BenchmarkWrapStringWithSpaces-8 10000 113575 ns/op 88.43 MB/s 54024 B/op 51 allocs/op -BenchmarkWrapStringWithSpaces-8 10000 113746 ns/op 88.29 MB/s 54024 B/op 51 allocs/op -BenchmarkWrapStringWithSpaces-8 10000 113473 ns/op 88.51 MB/s 54024 B/op 51 allocs/op -BenchmarkWrapStringWithSpaces-8 10000 114487 ns/op 87.72 MB/s 54024 B/op 51 allocs/op -PASS -ok github.com/olekukonko/tablewriter/pkg/twwarp 14.612s -goos: darwin -goarch: arm64 -pkg: github.com/olekukonko/tablewriter/pkg/twwidth -cpu: Apple M2 -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 264374 4533 ns/op 119.12 MB/s 1178 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 265746 4514 ns/op 119.62 MB/s 1177 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 263538 4509 ns/op 119.75 MB/s 1178 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 266173 4510 ns/op 119.72 MB/s 1180 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 265224 4676 ns/op 115.48 MB/s 1180 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 265696 4508 ns/op 119.80 MB/s 1177 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 251047 4859 ns/op 111.13 MB/s 1867 B/op 4 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 1000000 3945 ns/op 136.89 MB/s 1584 B/op 4 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3504475 3729 ns/op 144.81 MB/s 1474 B/op 4 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3664098 635.4 ns/op 849.84 MB/s 670 B/op 2 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3818680 588.6 ns/op 917.47 MB/s 667 B/op 2 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3761966 348.7 ns/op 1548.66 MB/s 583 B/op 1 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 49524442 23.54 ns/op 22938.55 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 51765230 23.25 ns/op 23221.81 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 51881983 23.83 ns/op 22664.79 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 51665586 23.20 ns/op 23272.39 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 51782077 23.23 ns/op 23250.20 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 51498277 23.21 ns/op 23267.21 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 263586 4520 ns/op 119.47 MB/s 1183 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 265484 4519 ns/op 119.49 MB/s 1182 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 265218 4514 ns/op 119.64 MB/s 1181 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 265957 4515 ns/op 119.60 MB/s 1184 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 265981 4518 ns/op 119.52 MB/s 1183 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 265028 4574 ns/op 118.06 MB/s 1184 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 251682 4853 ns/op 111.27 MB/s 1869 B/op 4 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 1000000 3893 ns/op 138.70 MB/s 1583 B/op 4 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3596130 3747 ns/op 144.13 MB/s 1499 B/op 4 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3671358 506.1 ns/op 1066.92 MB/s 628 B/op 2 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3687993 370.6 ns/op 1456.96 MB/s 594 B/op 2 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3672946 358.4 ns/op 1506.88 MB/s 583 B/op 1 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 49266897 23.64 ns/op 22844.78 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 50158659 23.54 ns/op 22938.83 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 50689321 23.45 ns/op 23025.77 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51113672 23.52 ns/op 22954.95 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51489162 23.21 ns/op 23269.51 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51705564 23.16 ns/op 23311.21 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 20930 57159 ns/op 35.86 MB/s 1389 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 20882 57502 ns/op 35.65 MB/s 1395 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 21103 57730 ns/op 35.51 MB/s 1391 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 20889 56615 ns/op 36.21 MB/s 1393 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 20808 58303 ns/op 35.16 MB/s 1391 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 21104 56727 ns/op 36.14 MB/s 1387 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 38569 27485 ns/op 74.59 MB/s 3041 B/op 6 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1000000 58061 ns/op 35.31 MB/s 3835 B/op 10 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 2124566 31025 ns/op 66.08 MB/s 3140 B/op 6 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1000000 1607 ns/op 1275.74 MB/s 2311 B/op 1 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1615826 1224 ns/op 1674.27 MB/s 2311 B/op 1 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1478348 722.9 ns/op 2835.84 MB/s 2311 B/op 1 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 23989044 44.26 ns/op 46313.25 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27268802 44.13 ns/op 46454.64 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27292006 44.51 ns/op 46054.40 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 24128786 44.99 ns/op 45569.06 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 26858004 44.09 ns/op 46497.43 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27259458 44.05 ns/op 46538.64 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 20671 57887 ns/op 35.41 MB/s 1395 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 20966 56795 ns/op 36.09 MB/s 1396 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 20708 57092 ns/op 35.91 MB/s 1388 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 20882 57917 ns/op 35.40 MB/s 1389 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 21244 58013 ns/op 35.34 MB/s 1393 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 20854 58122 ns/op 35.27 MB/s 1396 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 38907 30289 ns/op 67.68 MB/s 3066 B/op 6 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 1000000 56603 ns/op 36.22 MB/s 3835 B/op 10 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 1949059 29030 ns/op 70.62 MB/s 3084 B/op 6 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 1479127 933.7 ns/op 2195.47 MB/s 2311 B/op 1 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 2335996 11012 ns/op 186.17 MB/s 2548 B/op 3 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 983864 1169 ns/op 1753.75 MB/s 2311 B/op 1 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27291516 44.18 ns/op 46398.32 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27220657 44.18 ns/op 46402.04 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27059124 44.91 ns/op 45645.46 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 26679783 44.04 ns/op 46551.62 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27244114 44.14 ns/op 46448.19 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27221737 44.61 ns/op 45948.75 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3247359 366.1 ns/op 95.62 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3292773 370.6 ns/op 94.44 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3275070 365.3 ns/op 95.82 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3291489 365.6 ns/op 95.73 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3282121 374.9 ns/op 93.37 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3198205 375.6 ns/op 93.18 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 3092488 419.4 ns/op 83.45 MB/s 152 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6276060 476.4 ns/op 73.46 MB/s 166 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6135336 218.8 ns/op 159.98 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6175833 216.1 ns/op 161.95 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6156606 215.2 ns/op 162.63 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6160923 216.2 ns/op 161.88 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 78655855 15.02 ns/op 2330.76 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 70905223 14.59 ns/op 2398.68 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 82255629 14.49 ns/op 2415.75 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 82383864 14.48 ns/op 2417.21 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 82325931 14.49 ns/op 2415.73 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 82426311 14.66 ns/op 2386.73 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 3265182 365.8 ns/op 95.68 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 3275419 366.3 ns/op 95.56 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 3057087 375.3 ns/op 93.26 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 3239217 372.6 ns/op 93.94 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 3246429 367.3 ns/op 95.29 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 3252763 365.3 ns/op 95.80 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 2986195 396.4 ns/op 88.30 MB/s 142 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6487422 493.6 ns/op 70.90 MB/s 168 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6261225 216.1 ns/op 161.99 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6154988 210.7 ns/op 166.13 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6308702 213.8 ns/op 163.69 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6120438 216.0 ns/op 162.05 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 82184980 14.47 ns/op 2419.17 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 78985473 14.51 ns/op 2412.95 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 82368319 14.47 ns/op 2419.30 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 82366668 14.47 ns/op 2418.96 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 82104614 14.53 ns/op 2409.59 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 82399426 14.53 ns/op 2409.13 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1020 ns/op 59.80 MB/s 186 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1010 ns/op 60.40 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1007 ns/op 60.55 MB/s 186 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1006 ns/op 60.63 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1006 ns/op 60.65 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1006 ns/op 60.63 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 1000000 1334 ns/op 45.74 MB/s 436 B/op 7 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6892693 1204 ns/op 50.65 MB/s 321 B/op 7 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6433399 221.7 ns/op 275.14 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6323521 221.2 ns/op 275.73 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6000822 218.5 ns/op 279.15 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6329578 220.3 ns/op 276.90 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 80806719 14.65 ns/op 4163.13 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 82397774 14.63 ns/op 4169.11 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 82794307 14.76 ns/op 4134.15 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 82610730 14.59 ns/op 4180.13 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 82639170 14.58 ns/op 4183.56 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 82560049 14.45 ns/op 4222.53 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 1000000 1006 ns/op 60.61 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 1000000 1012 ns/op 60.29 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 1000000 1030 ns/op 59.25 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 1000000 1005 ns/op 60.68 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 1000000 1006 ns/op 60.64 MB/s 186 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 1000000 1012 ns/op 60.26 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 1000000 1361 ns/op 44.84 MB/s 436 B/op 7 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6967185 1216 ns/op 50.17 MB/s 323 B/op 7 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6413974 219.1 ns/op 278.46 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6381684 216.9 ns/op 281.27 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6383749 216.2 ns/op 282.14 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6360810 217.3 ns/op 280.75 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 81573231 14.53 ns/op 4197.28 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 82780268 14.47 ns/op 4215.84 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 82845276 14.48 ns/op 4212.74 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 82545850 14.51 ns/op 4203.96 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 82419704 14.49 ns/op 4209.69 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 82121707 14.50 ns/op 4206.82 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 3552715 336.1 ns/op 178.50 MB/s 146 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 3551234 335.0 ns/op 179.09 MB/s 146 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 3588946 338.9 ns/op 177.05 MB/s 146 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 3577424 338.5 ns/op 177.25 MB/s 146 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 3554505 335.4 ns/op 178.89 MB/s 146 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 3575703 336.2 ns/op 178.46 MB/s 146 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 2990224 412.6 ns/op 145.42 MB/s 207 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 6066997 484.0 ns/op 123.95 MB/s 232 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5743347 224.3 ns/op 267.49 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5870154 220.6 ns/op 271.92 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5880489 228.0 ns/op 263.14 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5660132 226.8 ns/op 264.52 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 81708613 14.54 ns/op 4126.40 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 79903231 14.65 ns/op 4094.56 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 80580853 14.62 ns/op 4103.14 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 82036092 14.73 ns/op 4073.52 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 83622964 14.49 ns/op 4139.65 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 82724623 14.53 ns/op 4129.78 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 3463408 349.4 ns/op 171.71 MB/s 145 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 3245782 350.0 ns/op 171.41 MB/s 146 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 3461160 348.3 ns/op 172.28 MB/s 146 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 3453544 349.1 ns/op 171.87 MB/s 146 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 3443858 347.0 ns/op 172.92 MB/s 146 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 3469286 347.4 ns/op 172.72 MB/s 146 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 3050086 428.5 ns/op 140.04 MB/s 213 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5927800 476.0 ns/op 126.05 MB/s 230 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5852149 223.0 ns/op 269.05 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5721747 224.9 ns/op 266.80 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5751147 225.7 ns/op 265.84 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5893626 225.9 ns/op 265.55 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 81984477 14.52 ns/op 4132.81 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 79537578 14.59 ns/op 4112.59 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 82339353 14.56 ns/op 4119.49 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 82286889 14.92 ns/op 4020.68 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 82166224 14.53 ns/op 4129.14 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 83084276 14.52 ns/op 4131.45 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 1221180 982.5 ns/op 88.55 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 1210902 983.5 ns/op 88.46 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 1223528 989.3 ns/op 87.94 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 1212517 984.1 ns/op 88.40 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 1224182 983.5 ns/op 88.46 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 1000000 1007 ns/op 86.36 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 999058 1364 ns/op 63.76 MB/s 603 B/op 7 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6682279 1218 ns/op 71.40 MB/s 465 B/op 7 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6339568 220.6 ns/op 394.46 MB/s 103 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6226921 222.3 ns/op 391.34 MB/s 103 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6264051 221.1 ns/op 393.47 MB/s 103 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6234439 222.4 ns/op 391.23 MB/s 103 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 75337251 15.64 ns/op 5562.01 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76826634 15.76 ns/op 5521.54 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76836674 15.79 ns/op 5508.81 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76840162 15.64 ns/op 5564.05 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76694060 15.60 ns/op 5577.81 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76737175 15.62 ns/op 5571.56 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 1202406 1012 ns/op 85.93 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 1000000 1000 ns/op 86.99 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 1208559 993.7 ns/op 87.55 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 1209415 990.9 ns/op 87.80 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 1206118 1020 ns/op 85.33 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 1211994 990.6 ns/op 87.82 MB/s 194 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 1000000 1363 ns/op 63.84 MB/s 603 B/op 7 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6504960 1214 ns/op 71.65 MB/s 465 B/op 7 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6349030 220.2 ns/op 395.18 MB/s 103 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6183368 220.3 ns/op 394.99 MB/s 103 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6240484 220.6 ns/op 394.32 MB/s 103 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6280713 222.0 ns/op 391.95 MB/s 103 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 69630140 15.77 ns/op 5517.31 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 76043014 15.65 ns/op 5559.61 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 76239080 15.63 ns/op 5567.94 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 75864739 15.88 ns/op 5479.13 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 71286422 15.74 ns/op 5527.29 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 75704404 15.71 ns/op 5536.58 MB/s 0 B/op 0 allocs/op -PASS -ok github.com/olekukonko/tablewriter/pkg/twwidth 659.150s -? github.com/olekukonko/tablewriter/renderer [no test files] -PASS -ok github.com/olekukonko/tablewriter/tests 3.025s -PASS -ok github.com/olekukonko/tablewriter/tw 0.283s diff --git a/vendor/github.com/olekukonko/tablewriter/old.txt b/vendor/github.com/olekukonko/tablewriter/old.txt deleted file mode 100644 index f9916ea0f1..0000000000 --- a/vendor/github.com/olekukonko/tablewriter/old.txt +++ /dev/null @@ -1,248 +0,0 @@ -PASS -ok github.com/olekukonko/tablewriter 0.819s -? github.com/olekukonko/tablewriter/cmd/csv2table [no test files] -goos: darwin -goarch: arm64 -pkg: github.com/olekukonko/tablewriter/pkg/twwarp -cpu: Apple M2 -BenchmarkWrapString-8 10630 111320 ns/op 90.22 MB/s 48488 B/op 33 allocs/op -BenchmarkWrapString-8 10000 112981 ns/op 88.89 MB/s 48488 B/op 33 allocs/op -BenchmarkWrapString-8 10000 113419 ns/op 88.55 MB/s 48488 B/op 33 allocs/op -BenchmarkWrapString-8 10000 112794 ns/op 89.04 MB/s 48488 B/op 33 allocs/op -BenchmarkWrapString-8 10000 112400 ns/op 89.35 MB/s 48488 B/op 33 allocs/op -BenchmarkWrapString-8 10000 112767 ns/op 89.06 MB/s 48488 B/op 33 allocs/op -BenchmarkWrapStringWithSpaces-8 10000 115098 ns/op 87.26 MB/s 54024 B/op 51 allocs/op -BenchmarkWrapStringWithSpaces-8 10000 113343 ns/op 88.61 MB/s 54024 B/op 51 allocs/op -BenchmarkWrapStringWithSpaces-8 10000 113702 ns/op 88.33 MB/s 54024 B/op 51 allocs/op -BenchmarkWrapStringWithSpaces-8 10000 113547 ns/op 88.45 MB/s 54024 B/op 51 allocs/op -BenchmarkWrapStringWithSpaces-8 10000 113016 ns/op 88.86 MB/s 54024 B/op 51 allocs/op -BenchmarkWrapStringWithSpaces-8 10000 113206 ns/op 88.71 MB/s 54024 B/op 51 allocs/op -PASS -ok github.com/olekukonko/tablewriter/pkg/twwarp 15.179s -goos: darwin -goarch: arm64 -pkg: github.com/olekukonko/tablewriter/pkg/twwidth -cpu: Apple M2 -BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 2953855 387.1 ns/op 90.40 MB/s 112 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3095179 387.8 ns/op 90.24 MB/s 112 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3096141 391.0 ns/op 89.51 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3090711 387.2 ns/op 90.40 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3066110 387.4 ns/op 90.35 MB/s 112 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3098689 389.2 ns/op 89.92 MB/s 112 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 3125685 440.9 ns/op 79.39 MB/s 159 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6477175 496.2 ns/op 70.53 MB/s 165 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6019939 217.7 ns/op 160.79 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6231590 219.2 ns/op 159.67 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6245622 216.2 ns/op 161.90 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6109658 218.8 ns/op 159.95 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 80977806 14.73 ns/op 2375.87 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 80972566 14.76 ns/op 2371.06 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 81432532 14.90 ns/op 2348.78 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 80644483 14.85 ns/op 2357.10 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 81361905 14.79 ns/op 2365.80 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 81612987 14.78 ns/op 2368.60 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 1777732 682.2 ns/op 51.30 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 1778122 672.9 ns/op 52.01 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 1779956 674.0 ns/op 51.93 MB/s 112 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 1773282 678.7 ns/op 51.57 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 1783092 680.2 ns/op 51.46 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 1780448 674.0 ns/op 51.93 MB/s 113 B/op 3 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 1000000 1027 ns/op 34.08 MB/s 333 B/op 4 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6891168 958.3 ns/op 36.52 MB/s 227 B/op 4 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6165972 211.7 ns/op 165.30 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6370098 217.4 ns/op 161.02 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6193920 214.8 ns/op 162.92 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6190384 209.4 ns/op 167.16 MB/s 55 B/op 1 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 79747688 14.75 ns/op 2372.71 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 79607492 14.75 ns/op 2372.90 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 81634501 14.73 ns/op 2376.30 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 81644916 14.70 ns/op 2381.26 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 82505884 14.70 ns/op 2380.77 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 81840265 14.70 ns/op 2380.34 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1053 ns/op 57.95 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1028 ns/op 59.34 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1029 ns/op 59.27 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1025 ns/op 59.49 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1026 ns/op 59.48 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1025 ns/op 59.54 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 1000000 1352 ns/op 45.13 MB/s 437 B/op 7 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6619118 1219 ns/op 50.06 MB/s 320 B/op 7 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6486976 221.2 ns/op 275.81 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6508150 217.8 ns/op 280.07 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6487533 217.4 ns/op 280.56 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6243558 216.4 ns/op 281.93 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 80787679 14.90 ns/op 4093.19 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 81640521 14.89 ns/op 4097.92 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 81596338 14.71 ns/op 4145.47 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 81950889 14.84 ns/op 4111.86 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 79321578 14.78 ns/op 4126.88 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 81880058 14.75 ns/op 4134.44 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 906406 1313 ns/op 46.44 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 917503 1313 ns/op 46.46 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 915308 1312 ns/op 46.49 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 918404 1312 ns/op 46.51 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 892551 1338 ns/op 45.58 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 915020 1333 ns/op 45.76 MB/s 185 B/op 6 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 791368 1633 ns/op 37.36 MB/s 374 B/op 7 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 2314653 1064 ns/op 57.34 MB/s 265 B/op 5 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6531552 1198 ns/op 50.94 MB/s 258 B/op 5 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6629763 242.5 ns/op 251.57 MB/s 90 B/op 2 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6388215 219.1 ns/op 278.36 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6472197 218.6 ns/op 279.09 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 80704821 14.76 ns/op 4132.33 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 82628028 14.70 ns/op 4149.56 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 81870517 14.70 ns/op 4148.97 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 81944124 14.99 ns/op 4068.84 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 81918950 14.70 ns/op 4150.75 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 82547270 14.91 ns/op 4092.20 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 1604370 749.9 ns/op 80.02 MB/s 145 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 1610148 749.7 ns/op 80.03 MB/s 145 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 1585026 744.8 ns/op 80.56 MB/s 145 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 1615032 749.9 ns/op 80.01 MB/s 145 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 1614980 743.3 ns/op 80.72 MB/s 145 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 1609586 741.8 ns/op 80.88 MB/s 145 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 1000000 1095 ns/op 54.77 MB/s 428 B/op 4 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 6214893 995.6 ns/op 60.26 MB/s 316 B/op 4 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5702408 224.5 ns/op 267.21 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5712139 220.2 ns/op 272.50 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5783916 228.2 ns/op 262.91 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5713358 224.0 ns/op 267.91 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 78757815 14.92 ns/op 4020.51 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 81419875 14.79 ns/op 4057.15 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 81656493 14.75 ns/op 4068.12 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 81522430 14.73 ns/op 4073.37 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 81887037 14.70 ns/op 4080.93 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 82019505 14.72 ns/op 4074.99 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 1241600 965.5 ns/op 62.14 MB/s 145 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 1243646 964.8 ns/op 62.19 MB/s 145 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 1243516 968.1 ns/op 61.98 MB/s 144 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 1241917 965.3 ns/op 62.16 MB/s 145 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 1242903 985.0 ns/op 60.92 MB/s 145 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 1223456 964.3 ns/op 62.22 MB/s 145 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 1000000 1378 ns/op 43.55 MB/s 428 B/op 4 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 6265657 1229 ns/op 48.84 MB/s 316 B/op 4 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5960497 224.3 ns/op 267.52 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5961004 222.6 ns/op 269.52 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5772004 226.5 ns/op 264.87 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5766748 223.5 ns/op 268.51 MB/s 87 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 78664455 14.76 ns/op 4063.92 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 81305858 14.71 ns/op 4079.19 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 81626406 14.71 ns/op 4078.32 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 81168830 14.71 ns/op 4077.52 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 81860040 14.72 ns/op 4075.37 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 81093633 14.88 ns/op 4031.15 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 837949 1397 ns/op 62.29 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 869082 1380 ns/op 63.04 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 864015 1377 ns/op 63.18 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 873742 1374 ns/op 63.33 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 875703 1375 ns/op 63.27 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 866865 1375 ns/op 63.26 MB/s 194 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 772100 1709 ns/op 50.91 MB/s 543 B/op 7 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 2127564 1046 ns/op 83.14 MB/s 361 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6476034 1274 ns/op 68.30 MB/s 381 B/op 6 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6401709 221.3 ns/op 393.18 MB/s 103 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6368766 220.2 ns/op 395.14 MB/s 103 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6404850 220.6 ns/op 394.34 MB/s 103 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 74606566 15.83 ns/op 5494.39 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76326774 15.72 ns/op 5536.01 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76140116 15.74 ns/op 5525.94 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76340330 15.69 ns/op 5544.89 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76240900 15.69 ns/op 5544.81 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76301294 15.73 ns/op 5531.49 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 753624 1592 ns/op 54.64 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 757292 1599 ns/op 54.42 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 758196 1588 ns/op 54.79 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 753902 1586 ns/op 54.85 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 758770 1589 ns/op 54.74 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 757748 1590 ns/op 54.71 MB/s 193 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 653979 1985 ns/op 43.82 MB/s 561 B/op 7 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 2344717 731.5 ns/op 118.93 MB/s 263 B/op 3 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6440574 1420 ns/op 61.26 MB/s 369 B/op 5 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6506366 238.2 ns/op 365.22 MB/s 107 B/op 2 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6504939 220.8 ns/op 394.05 MB/s 103 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6399746 221.0 ns/op 393.66 MB/s 103 B/op 1 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 75646941 15.95 ns/op 5453.57 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 75406885 15.73 ns/op 5532.42 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 76186243 15.69 ns/op 5545.76 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 76350855 15.76 ns/op 5521.29 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 76240896 15.70 ns/op 5542.36 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 76404126 15.90 ns/op 5471.17 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 241440 4945 ns/op 109.19 MB/s 1181 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 245013 5050 ns/op 106.94 MB/s 1180 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 245098 4887 ns/op 110.49 MB/s 1177 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 244785 4971 ns/op 108.62 MB/s 1179 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 245007 4880 ns/op 110.66 MB/s 1182 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 245986 4878 ns/op 110.71 MB/s 1181 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 232534 5203 ns/op 103.78 MB/s 1845 B/op 4 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 1000000 4309 ns/op 125.31 MB/s 1613 B/op 4 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3491629 4013 ns/op 134.57 MB/s 1471 B/op 4 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3670467 847.5 ns/op 637.15 MB/s 680 B/op 2 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3669694 385.0 ns/op 1402.66 MB/s 583 B/op 1 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3242532 356.5 ns/op 1514.63 MB/s 583 B/op 1 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 50391319 23.77 ns/op 22714.54 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 51225590 23.32 ns/op 23159.25 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 51732408 23.74 ns/op 22751.21 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 46074986 24.16 ns/op 22352.67 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 43649127 24.43 ns/op 22104.61 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 49954903 23.53 ns/op 22952.45 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 127574 9378 ns/op 57.58 MB/s 1180 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 128386 9386 ns/op 57.53 MB/s 1183 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 128604 9280 ns/op 58.19 MB/s 1178 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 129218 9264 ns/op 58.29 MB/s 1179 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 129030 9261 ns/op 58.31 MB/s 1179 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 129080 9266 ns/op 58.28 MB/s 1180 B/op 3 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 123823 9282 ns/op 58.18 MB/s 1817 B/op 4 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 1000000 8943 ns/op 60.38 MB/s 1754 B/op 4 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3532728 7337 ns/op 73.60 MB/s 1481 B/op 4 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3610767 705.9 ns/op 764.94 MB/s 626 B/op 2 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3502867 387.5 ns/op 1393.73 MB/s 583 B/op 1 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3706471 680.7 ns/op 793.25 MB/s 640 B/op 2 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51185895 24.01 ns/op 22492.97 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51442992 23.44 ns/op 23041.30 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 47312392 23.56 ns/op 22917.72 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51727110 23.33 ns/op 23144.01 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51212746 23.62 ns/op 22862.18 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51598200 23.23 ns/op 23247.62 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 21105 57258 ns/op 35.80 MB/s 1389 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 20656 57558 ns/op 35.62 MB/s 1386 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 21045 57257 ns/op 35.80 MB/s 1386 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 20884 57463 ns/op 35.68 MB/s 1391 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 20984 56898 ns/op 36.03 MB/s 1388 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 21164 57796 ns/op 35.47 MB/s 1388 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 103934 31906 ns/op 64.25 MB/s 3143 B/op 6 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1000000 52097 ns/op 39.35 MB/s 3737 B/op 10 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1298925 14140 ns/op 144.98 MB/s 2637 B/op 4 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1000000 1288 ns/op 1592.17 MB/s 2311 B/op 1 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 2546826 30224 ns/op 67.83 MB/s 3071 B/op 6 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1000000 8376 ns/op 244.74 MB/s 2311 B/op 1 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 25786026 44.71 ns/op 45849.62 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27173578 44.15 ns/op 46427.72 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27221428 44.54 ns/op 46030.74 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27213686 44.07 ns/op 46519.79 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27233990 44.27 ns/op 46310.26 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27164018 44.12 ns/op 46460.92 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 19785 60051 ns/op 34.14 MB/s 1386 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 20198 60161 ns/op 34.08 MB/s 1391 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 19585 60345 ns/op 33.97 MB/s 1390 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 19956 61714 ns/op 33.22 MB/s 1391 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 19554 61682 ns/op 33.24 MB/s 1388 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 19830 60050 ns/op 34.14 MB/s 1393 B/op 9 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 38818 29507 ns/op 69.48 MB/s 3059 B/op 6 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 1000000 58539 ns/op 35.02 MB/s 3835 B/op 10 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 2186653 33757 ns/op 60.73 MB/s 3157 B/op 6 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 1000000 1283 ns/op 1597.72 MB/s 2311 B/op 1 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 1653430 1256 ns/op 1632.67 MB/s 2311 B/op 1 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 2195628 2716 ns/op 754.79 MB/s 2317 B/op 2 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 26531894 44.76 ns/op 45801.05 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 26634384 44.68 ns/op 45878.57 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27184633 44.97 ns/op 45583.96 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27011893 44.46 ns/op 46104.62 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27183812 44.09 ns/op 46498.94 MB/s 0 B/op 0 allocs/op -BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27269318 44.17 ns/op 46406.38 MB/s 0 B/op 0 allocs/op -PASS -ok github.com/olekukonko/tablewriter/pkg/twwidth 724.296s -? github.com/olekukonko/tablewriter/renderer [no test files] -PASS -ok github.com/olekukonko/tablewriter/tests 2.959s -PASS -ok github.com/olekukonko/tablewriter/tw 0.270s diff --git a/vendor/github.com/olekukonko/tablewriter/option.go b/vendor/github.com/olekukonko/tablewriter/option.go index 0ec9844b0f..a895b193ea 100644 --- a/vendor/github.com/olekukonko/tablewriter/option.go +++ b/vendor/github.com/olekukonko/tablewriter/option.go @@ -528,6 +528,17 @@ func WithTrimSpace(state tw.State) Option { } } +// WithTrimTab sets whether leading and trailing tab characters are automatically trimmed. +// Logs the change if debugging is enabled. +func WithTrimTab(state tw.State) Option { + return func(target *Table) { + target.config.Behavior.TrimTab = state + if target.logger != nil { + target.logger.Debugf("Option: WithTrimTab applied to Table: %v", state) + } + } +} + // WithTrimLine sets whether empty visual lines within a cell are trimmed. // Logs the change if debugging is enabled. func WithTrimLine(state tw.State) Option { @@ -781,6 +792,7 @@ func defaultConfig() Config { Behavior: tw.Behavior{ AutoHide: tw.Off, TrimSpace: tw.On, + TrimTab: tw.On, TrimLine: tw.On, Structs: tw.Struct{ AutoHeader: tw.Off, @@ -920,6 +932,7 @@ func mergeConfig(dst, src Config) Config { dst.Debug = src.Debug || dst.Debug dst.Behavior.AutoHide = src.Behavior.AutoHide dst.Behavior.TrimSpace = src.Behavior.TrimSpace + dst.Behavior.TrimTab = src.Behavior.TrimTab dst.Behavior.Compact = src.Behavior.Compact dst.Behavior.Header = src.Behavior.Header dst.Behavior.Footer = src.Behavior.Footer diff --git a/vendor/github.com/olekukonko/tablewriter/pkg/twwarp/wrap.go b/vendor/github.com/olekukonko/tablewriter/pkg/twwarp/wrap.go index 5977aac26e..f6fa17e42d 100644 --- a/vendor/github.com/olekukonko/tablewriter/pkg/twwarp/wrap.go +++ b/vendor/github.com/olekukonko/tablewriter/pkg/twwarp/wrap.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a MIT // license that can be found in the LICENSE file. -// This module is a Table Writer API for the Go Programming Language. +// This module is a Table Writer API for the Go Programming Language. // The protocols were written in pure Go and works on windows and unix systems package twwarp @@ -13,8 +13,7 @@ import ( "unicode" "github.com/clipperhouse/uax29/v2/graphemes" - "github.com/olekukonko/tablewriter/pkg/twwidth" // IMPORT YOUR NEW PACKAGE - // "github.com/mattn/go-runewidth" // This can be removed if all direct uses are gone + "github.com/olekukonko/tablewriter/pkg/twwidth" ) const ( @@ -60,8 +59,7 @@ func WrapString(s string, lim int) ([]string, int) { var lines []string max := 0 for _, v := range words { - // max = runewidth.StringWidth(v) // OLD - max = twwidth.Width(v) // NEW: Use twdw.Width + max = twwidth.Width(v) if max > lim { lim = max } @@ -84,10 +82,8 @@ func WrapStringWithSpaces(s string, lim int) ([]string, int) { return []string{""}, lim } if strings.TrimSpace(s) == "" { // All spaces - // if runewidth.StringWidth(s) <= lim { // OLD - if twwidth.Width(s) <= lim { // NEW: Use twdw.Width - // return []string{s}, runewidth.StringWidth(s) // OLD - return []string{s}, twwidth.Width(s) // NEW: Use twdw.Width + if twwidth.Width(s) <= lim { + return []string{s}, twwidth.Width(s) } // For very long all-space strings, "wrap" by truncating to the limit. if lim > 0 { @@ -118,8 +114,7 @@ func WrapStringWithSpaces(s string, lim int) ([]string, int) { maxCoreWordWidth := 0 for _, v := range words { - // w := runewidth.StringWidth(v) // OLD - w := twwidth.Width(v) // NEW: Use twdw.Width + w := twwidth.Width(v) if w > maxCoreWordWidth { maxCoreWordWidth = w } @@ -156,8 +151,7 @@ func stringToDisplayWidth(s string, targetWidth int) (substring string, actualWi g := graphemes.FromString(s) for g.Next() { grapheme := g.Value() - // graphemeWidth := runewidth.StringWidth(grapheme) // OLD - graphemeWidth := twwidth.Width(grapheme) // NEW: Use twdw.Width + graphemeWidth := twwidth.Width(grapheme) if currentWidth+graphemeWidth > targetWidth { break @@ -187,8 +181,7 @@ func WrapWords(words []string, spc, lim, pen int) [][]string { } lengths := make([]int, n) for i := 0; i < n; i++ { - // lengths[i] = runewidth.StringWidth(words[i]) // OLD - lengths[i] = twwidth.Width(words[i]) // NEW: Use twdw.Width + lengths[i] = twwidth.Width(words[i]) } nbrk := make([]int, n) cost := make([]int, n) diff --git a/vendor/github.com/olekukonko/tablewriter/pkg/twwidth/cache.go b/vendor/github.com/olekukonko/tablewriter/pkg/twwidth/cache.go new file mode 100644 index 0000000000..94e9746411 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/pkg/twwidth/cache.go @@ -0,0 +1,26 @@ +package twwidth + +import "github.com/olekukonko/tablewriter/pkg/twcache" + +// widthCache stores memoized results of Width calculations to improve performance. +var widthCache *twcache.LRU[cacheKey, int] + +type cacheKey struct { + eastAsian bool + str string +} + +// SetCacheCapacity changes the cache size dynamically +// If capacity <= 0, disables caching entirely +func SetCacheCapacity(capacity int) { + mu.Lock() + defer mu.Unlock() + + if capacity <= 0 { + widthCache = nil // nil = fully disabled + return + } + + newCache := twcache.NewLRU[cacheKey, int](capacity) + widthCache = newCache +} diff --git a/vendor/github.com/olekukonko/tablewriter/pkg/twwidth/ea.go b/vendor/github.com/olekukonko/tablewriter/pkg/twwidth/ea.go new file mode 100644 index 0000000000..378e129faf --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/pkg/twwidth/ea.go @@ -0,0 +1,424 @@ +/* +Package twwidth provides intelligent East Asian width detection. + +In 2025/2026, most modern terminal emulators (VSCode, Windows Terminal, iTerm2, +Alacritty) and modern monospace fonts (Hack, Fira Code, Cascadia Code) treat +box-drawing characters as Single Width, regardless of the underlying OS Locale. + +Detection Logic (in order of priority): +- RUNEWIDTH_EASTASIAN environment variable (explicit user override) +- Force Legacy Mode (programmatic override for backward compatibility) +- Modern environment detection (VSCode, Windows Terminal, etc. -> Narrow) +- Locale-based detection (CJK locales in traditional terminals -> Wide) + +This prioritization ensures that: +- Users can always override behavior using RUNEWIDTH_EASTASIAN +- Modern development environments work correctly by default +- Traditional CJK terminals maintain compatibility via locale checks + +Examples: + + // Force narrow borders (for Hack font in zh_CN) + RUNEWIDTH_EASTASIAN=0 go run . + + // Force wide borders (for legacy CJK terminals) + RUNEWIDTH_EASTASIAN=1 go run . +*/ +package twwidth + +import ( + "os" + "runtime" + "strings" + "sync" +) + +// Environment Variable Constants +const ( + EnvLCAll = "LC_ALL" + EnvLCCtype = "LC_CTYPE" + EnvLang = "LANG" + EnvRuneWidthEastAsian = "RUNEWIDTH_EASTASIAN" + EnvTerm = "TERM" + EnvTermProgram = "TERM_PROGRAM" + EnvTermProgramWsl = "TERM_PROGRAM_WSL" + EnvWTProfile = "WT_PROFILE_ID" // Windows Terminal + EnvConEmuANSI = "ConEmuANSI" // ConEmu + EnvAlacritty = "ALACRITTY_LOG" // Alacritty + EnvVTEVersion = "VTE_VERSION" // GNOME/VTE +) + +const ( + overwriteOn = "override_on" + overwriteOff = "override_off" + + envModern = "modern_env" + envCjk = "locale_cjk" + envAscii = "default_ascii" +) + +// CJK Language Codes (Prefixes) +// Covers ISO 639-1 (2-letter) and common full names used in some systems. +var cjkPrefixes = []string{ + "zh", "ja", "ko", // Standard: Chinese, Japanese, Korean + "chi", "zho", // ISO 639-2/B and T for Chinese + "jpn", "kor", // ISO 639-2 for Japanese, Korean + "chinese", "japanese", "korean", // Full names (rare but possible in some legacy systems) +} + +// CJK Region Codes +// Checks for specific regions that imply CJK font usage (e.g., en_HK). +var cjkRegions = map[string]bool{ + "cn": true, // China + "tw": true, // Taiwan + "hk": true, // Hong Kong + "mo": true, // Macau + "jp": true, // Japan + "kr": true, // South Korea + "kp": true, // North Korea + "sg": true, // Singapore (Often uses CJK fonts) +} + +// Modern environments that should use narrow borders (1-width box chars) +var modernEnvironments = map[string]bool{ + // Terminal programs + "vscode": true, "visual studio code": true, + "iterm.app": true, "iterm2": true, + "windows terminal": true, "windowsterminal": true, + "alacritty": true, "kitty": true, + "hyper": true, "tabby": true, "terminus": true, "fluentterminal": true, + "warp": true, "ghostty": true, "rio": true, + "jetbrains-jediterm": true, + + // Terminal types (TERM signatures) + "xterm-kitty": true, "xterm-ghostty": true, "wezterm": true, +} + +var ( + eastAsianOnce sync.Once + eastAsianVal bool + + // Legacy override control + // Renamed to cfgMu to avoid conflict with width.go's mu + cfgMu sync.RWMutex + forceLegacyEastAsian = false +) + +type Enviroment struct { + GOOS string `json:"goos"` + LC_ALL string `json:"lc_all"` + LC_CTYPE string `json:"lc_ctype"` + LANG string `json:"lang"` + RUNEWIDTH_EASTASIAN string `json:"runewidth_eastasian"` + TERM string `json:"term"` + TERM_PROGRAM string `json:"term_program"` +} + +// State captures the calculated internal state. +type State struct { + NormalizedLocale string `json:"normalized_locale"` + IsCJKLocale bool `json:"is_cjk_locale"` + IsModernEnv bool `json:"is_modern_env"` + LegacyOverrideMode bool `json:"legacy_override_mode"` +} + +// Detection aggregates all debug information regarding East Asian width detection. +type Detection struct { + AutoUseEastAsian bool `json:"auto_use_east_asian"` + DetectionMode string `json:"detection_mode"` + Raw Enviroment `json:"raw"` + Derived State `json:"derived"` +} + +// EastAsianForceLegacy forces the detection logic to ignore modern environment checks. +// It relies solely on Locale detection. This is useful for applications that need +// strict backward compatibility. +// +// Note: This does NOT override RUNEWIDTH_EASTASIAN. User environment variables take precedence. +// This should be called before the first table render. +func EastAsianForceLegacy(force bool) { + cfgMu.Lock() + defer cfgMu.Unlock() + forceLegacyEastAsian = force +} + +// EastAsianDetect checks the environment variables to determine if +// East Asian width calculations should be enabled. +func EastAsianDetect() bool { + eastAsianOnce.Do(func() { + eastAsianVal = detectEastAsian() + }) + return eastAsianVal +} + +// EastAsianConservative is a stricter version that only defaults to Narrow +// if the terminal is definitely known to be modern (e.g. VSCode, iTerm2). +// It avoids heuristics like checking "xterm" in the TERM variable. +func EastAsianConservative() bool { + // Check overrides first + if val, found := checkOverrides(); found { + return val + } + + // Stricter modern environment detection + if isConservativeModernEnvironment() { + return false + } + + // Fall back to locale + return checkLocale() +} + +// EastAsianMode returns the decision path used for the current environment. +// Useful for debugging why a specific width was chosen. +func EastAsianMode() string { + // Check override + if val, found := checkOverrides(); found { + if val { + return overwriteOn + } + return overwriteOff + } + + cfgMu.RLock() + legacy := forceLegacyEastAsian + cfgMu.RUnlock() + + if legacy { + if checkLocale() { + return envCjk + } + return envAscii + } + + if isModernEnvironment() { + return envModern + } + + if checkLocale() { + return envCjk + } + + return envAscii +} + +// Debugging returns detailed information about the detection decision. +// Useful for users to include in Github issues. +func Debugging() Detection { + locale := getNormalizedLocale() + + cfgMu.RLock() + legacy := forceLegacyEastAsian + cfgMu.RUnlock() + + return Detection{ + AutoUseEastAsian: EastAsianDetect(), + DetectionMode: EastAsianMode(), + Raw: Enviroment{ + GOOS: runtime.GOOS, + LC_ALL: os.Getenv(EnvLCAll), + LC_CTYPE: os.Getenv(EnvLCCtype), + LANG: os.Getenv(EnvLang), + RUNEWIDTH_EASTASIAN: os.Getenv(EnvRuneWidthEastAsian), + TERM: os.Getenv(EnvTerm), + TERM_PROGRAM: os.Getenv(EnvTermProgram), + }, + Derived: State{ + NormalizedLocale: locale, + IsCJKLocale: isCJKLocale(locale), + IsModernEnv: isModernEnvironment(), + LegacyOverrideMode: legacy, + }, + } +} + +// detectEastAsian evaluates the environment and locale settings to determine if East Asian width rules should apply. +func detectEastAsian() bool { + // User Override check (Highest Priority) + if val, found := checkOverrides(); found { + return val + } + + // Force Legacy Mode check + cfgMu.RLock() + isLegacy := forceLegacyEastAsian + cfgMu.RUnlock() + + if isLegacy { + // Legacy mode ignores modern environment checks, + // relying solely on locale. + return checkLocale() + } + + // Modern Environment Detection + // If modern, we assume Single Width (return false) + if isModernEnvironment() { + return false + } + + // 4. Locale Fallback + return checkLocale() +} + +// checkOverrides looks for RUNEWIDTH_EASTASIAN +func checkOverrides() (bool, bool) { + if rw := os.Getenv(EnvRuneWidthEastAsian); rw != "" { + rw = strings.ToLower(rw) + if rw == "0" || rw == "off" || rw == "false" || rw == "no" { + return false, true + } + if rw == "1" || rw == "on" || rw == "true" || rw == "yes" { + return true, true + } + } + return false, false +} + +// checkLocale performs the string analysis on LANG/LC_ALL +func checkLocale() bool { + locale := getNormalizedLocale() + if locale == "" { + return false + } + return isCJKLocale(locale) +} + +// isModernEnvironment performs comprehensive checks for modern terminal capabilities. +func isModernEnvironment() bool { + // Check TERM_PROGRAM (Most reliable) + if termProg := os.Getenv(EnvTermProgram); termProg != "" { + termProgLower := strings.ToLower(termProg) + if modernEnvironments[termProgLower] { + return true + } + } + + // Check WSL specific variable + if os.Getenv(EnvTermProgramWsl) != "" { + return true + } + + // Windows Specifics + if runtime.GOOS == "windows" { + // Windows Terminal + if os.Getenv(EnvWTProfile) != "" { + return true + } + // ConEmu/Cmder + if os.Getenv(EnvConEmuANSI) == "ON" { + return true + } + // Modern Windows console (Windows 10+) check via TERM + if term := os.Getenv(EnvTerm); term != "" { + termLower := strings.ToLower(term) + if strings.Contains(termLower, "xterm") || + strings.Contains(termLower, "vt") { + return true + } + } + } + + // VTE-based terminals (GNOME Terminal, Tilix, etc.) + if os.Getenv(EnvVTEVersion) != "" { + return true + } + + // Check for Alacritty specifically + if os.Getenv(EnvAlacritty) != "" { + return true + } + + // Check TERM for modern terminal signatures + if term := os.Getenv(EnvTerm); term != "" { + termLower := strings.ToLower(term) + // Specific modern terminals often put their name in TERM + if modernEnvironments[termLower] { + return true + } + // Heuristics for standard modern-capable descriptors + if strings.Contains(termLower, "xterm") && !strings.Contains(termLower, "xterm-mono") { + return true + } + if strings.Contains(termLower, "screen") || + strings.Contains(termLower, "tmux") { + return true + } + } + + return false +} + +// isConservativeModernEnvironment performs strict checks only for known modern terminals. +func isConservativeModernEnvironment() bool { + termProg := strings.ToLower(os.Getenv(EnvTermProgram)) + + // Allow-list of definitely modern terminals + switch termProg { + case "vscode", "visual studio code": + return true + case "iterm.app", "iterm2": + return true + case "windows terminal", "windowsterminal": + return true + case "alacritty", "wezterm", "kitty", "ghostty": + return true + case "warp", "tabby", "hyper": + return true + } + + // Windows Terminal via specific Env + if os.Getenv(EnvWTProfile) != "" { + return true + } + + return false +} + +// isCJKLocale determines if a given locale string corresponds to a CJK (Chinese, Japanese, Korean) language or region. +func isCJKLocale(locale string) bool { + // Check Language Prefix + for _, prefix := range cjkPrefixes { + if strings.HasPrefix(locale, prefix) { + return true + } + } + + // Check Regions + parts := strings.Split(locale, "_") + if len(parts) > 1 { + for _, part := range parts[1:] { + if cjkRegions[part] { + return true + } + } + } + + return false +} + +// getNormalizedLocale returns the normalized locale by inspecting environment variables LC_ALL, LC_CTYPE, and LANG. +func getNormalizedLocale() string { + var locale string + if loc := os.Getenv(EnvLCAll); loc != "" { + locale = loc + } else if loc := os.Getenv(EnvLCCtype); loc != "" { + locale = loc + } else if loc := os.Getenv(EnvLang); loc != "" { + locale = loc + } + + // Fast fail for empty or standard C/POSIX locales + if locale == "" || locale == "C" || locale == "POSIX" { + return "" + } + + // Strip encoding and modifiers + if idx := strings.IndexByte(locale, '.'); idx != -1 { + locale = locale[:idx] + } + if idx := strings.IndexByte(locale, '@'); idx != -1 { + locale = locale[:idx] + } + + return strings.ToLower(locale) +} diff --git a/vendor/github.com/olekukonko/tablewriter/pkg/twwidth/tab.go b/vendor/github.com/olekukonko/tablewriter/pkg/twwidth/tab.go new file mode 100644 index 0000000000..c1f6c614f1 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/pkg/twwidth/tab.go @@ -0,0 +1,288 @@ +package twwidth + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" + "sync" +) + +type Tab rune + +const ( + TabWidthDefault = 8 + TabString Tab = '\t' +) + +// IsTab returns true if t equals the default tab. +func (t Tab) IsTab() bool { + return t == TabString +} + +func (t Tab) Byte() byte { + return byte(t) +} + +func (t Tab) Rune() rune { + return rune(t) +} + +func (t Tab) String() string { + return string(t) +} + +// IsTab returns true if r is a tab rune. +func IsTab(r rune) bool { + return r == TabString.Rune() +} + +type Tabinal struct { + once sync.Once + width int + mu sync.RWMutex +} + +func (t *Tabinal) String() string { + return TabString.String() +} + +// Size returns the current tab width, default if unset. +func (t *Tabinal) Size() int { + t.once.Do(t.init) + + t.mu.RLock() + w := t.width + t.mu.RUnlock() + + if w <= 0 { + return TabWidthDefault + } + return w +} + +// SetWidth sets the tab width if valid (1–32). +func (t *Tabinal) SetWidth(w int) { + if w <= 0 || w > 32 { + return + } + t.mu.Lock() + t.width = w + t.mu.Unlock() +} + +func (t *Tabinal) init() { + w := t.detect() + t.mu.Lock() + t.width = w + t.mu.Unlock() +} + +// detect determines tab width using env, editorconfig, project, or term. +func (t *Tabinal) detect() int { + if w := envInt("TABWIDTH"); w > 0 { + return clamp(w) + } + if w := envInt("TS"); w > 0 { + return clamp(w) + } + if w := envInt("VIM_TABSTOP"); w > 0 { + return clamp(w) + } + if w := editorConfigTabWidth(); w > 0 { + return w + } + if w := projectHeuristic(); w > 0 { + return w + } + if w := termHeuristic(); w > 0 { + return w + } + return 0 +} + +func editorConfigTabWidth() int { + dir, err := os.Getwd() + if err != nil { + return 0 + } + + for dir != "" && dir != "/" && dir != "." { + path := filepath.Join(dir, ".editorconfig") + if w := parseEditorConfig(path); w > 0 { + return clamp(w) + } + parent := filepath.Dir(dir) + if parent == dir { + break + } + dir = parent + } + return 0 +} + +// parseEditorConfig reads tab_width or indent_size from a file. +func parseEditorConfig(path string) int { + f, err := os.Open(path) + if err != nil { + return 0 + } + defer f.Close() + + scanner := bufio.NewScanner(f) + inMatch := false + globalWidth := 0 + + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if line == "" || strings.HasPrefix(line, "#") || strings.HasPrefix(line, ";") { + continue + } + + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + pattern := line[1 : len(line)-1] + inMatch = pattern == "*" + + knownExts := []string{".go", ".py", ".js", ".ts", ".java", ".rs"} + for _, ext := range knownExts { + if strings.Contains(pattern, ext) { + inMatch = true + break + } + } + continue + } + + if !inMatch && globalWidth == 0 { + continue + } + + parts := strings.SplitN(line, "=", 2) + if len(parts) != 2 { + continue + } + key := strings.TrimSpace(parts[0]) + val := strings.TrimSpace(parts[1]) + + switch key { + case "tab_width": + if w, err := strconv.Atoi(val); err == nil && w > 0 { + if inMatch { + return clamp(w) + } + if globalWidth == 0 { + globalWidth = w + } + } + case "indent_size": + if val == "tab" { + continue + } + if w, err := strconv.Atoi(val); err == nil && w > 0 { + if inMatch { + return clamp(w) + } + if globalWidth == 0 { + globalWidth = w + } + } + } + } + + return globalWidth +} + +// projectHeuristic returns 4 for known project types. +func projectHeuristic() int { + dir, err := os.Getwd() + if err != nil { + return 0 + } + + indicators := []string{ + "go.mod", "go.sum", + "package.json", "package-lock.json", "yarn.lock", "pnpm-lock.yaml", + "setup.py", "pyproject.toml", "requirements.txt", "Pipfile", + "pom.xml", "build.gradle", "build.gradle.kts", + "Cargo.toml", + "composer.json", + } + + for _, indicator := range indicators { + if _, err := os.Stat(filepath.Join(dir, indicator)); err == nil { + return 4 + } + } + + patterns := []string{"*.go", "*.py", "*.js", "*.ts", "*.java", "*.rs"} + for _, pattern := range patterns { + if matches, _ := filepath.Glob(filepath.Join(dir, pattern)); len(matches) > 0 { + return 4 + } + } + + return 0 +} + +// termHeuristic returns a default width based on the TERM variable. +func termHeuristic() int { + termEnv := strings.ToLower(os.Getenv("TERM")) + if termEnv == "" { + return 0 + } + + if strings.Contains(termEnv, "vt52") { + return 2 + } + + if strings.Contains(termEnv, "xterm") || + strings.Contains(termEnv, "screen") || + strings.Contains(termEnv, "tmux") || + strings.Contains(termEnv, "linux") || + strings.Contains(termEnv, "ansi") || + strings.Contains(termEnv, "rxvt") { + return TabWidthDefault + } + + return 0 +} + +func clamp(w int) int { + if w <= 0 { + return 0 + } + if w > 32 { + return 32 + } + return w +} + +var ( + globalTab *Tabinal + globalTabOnce sync.Once +) + +// TabInstance returns the singleton Tabinal. +func TabInstance() *Tabinal { + globalTabOnce.Do(func() { + globalTab = &Tabinal{} + }) + return globalTab +} + +// TabWidth returns the detected global tab width. +func TabWidth() int { + return TabInstance().Size() +} + +// SetTabWidth sets the global tab width. +func SetTabWidth(w int) { + TabInstance().SetWidth(w) +} + +func envInt(k string) int { + v := os.Getenv(k) + w, _ := strconv.Atoi(v) + return w +} diff --git a/vendor/github.com/olekukonko/tablewriter/pkg/twwidth/width.go b/vendor/github.com/olekukonko/tablewriter/pkg/twwidth/width.go index 4afff74f82..14b334b095 100644 --- a/vendor/github.com/olekukonko/tablewriter/pkg/twwidth/width.go +++ b/vendor/github.com/olekukonko/tablewriter/pkg/twwidth/width.go @@ -21,6 +21,10 @@ const ( // Options allows for configuring width calculation on a per-call basis. type Options struct { EastAsianWidth bool + + // Explicitly force box drawing chars to be narrow + // regardless of EastAsianWidth setting. + ForceNarrowBorders bool } // globalOptions holds the global displaywidth configuration, including East Asian width settings. @@ -29,30 +33,34 @@ var globalOptions Options // mu protects access to globalOptions for thread safety. var mu sync.Mutex -// widthCache stores memoized results of Width calculations to improve performance. -var widthCache *twcache.LRU[string, int] - // ansi is a compiled regular expression for stripping ANSI escape codes from strings. var ansi = Filter() func init() { - // Initialize global options by detecting from the environment, - // which is the one key feature we get from go-runewidth. + isEastAsian := EastAsianDetect() + cond := runewidth.NewCondition() + cond.EastAsianWidth = isEastAsian + globalOptions = Options{ - EastAsianWidth: cond.EastAsianWidth, + EastAsianWidth: isEastAsian, + + // Auto-enable ForceNarrowBorders for edge cases. + // If EastAsianWidth is ON (e.g. forced via Env Var), but we detect + // a modern environment, we might technically want to narrow borders + // while keeping text wide. + ForceNarrowBorders: isEastAsian && isModernEnvironment(), } - widthCache = twcache.NewLRU[string, int](cacheCapacity) + + widthCache = twcache.NewLRU[cacheKey, int](cacheCapacity) } -// makeCacheKey generates a string key for the LRU cache from the input string -// and the current East Asian width setting. -// Prefix "0:" for false, "1:" for true. -func makeCacheKey(str string, eastAsianWidth bool) string { - if eastAsianWidth { - return cacheEastAsianPrefix + str - } - return cachePrefix + str +// Display calculates the visual width of a string using a specific runewidth.Condition. +// Deprecated: use WidthWithOptions with the new twwidth.Options struct instead. +// This function is kept for backward compatibility. +func Display(cond *runewidth.Condition, str string) int { + opts := Options{EastAsianWidth: cond.EastAsianWidth} + return WidthWithOptions(str, opts) } // Filter compiles and returns a regular expression for matching ANSI escape sequences, @@ -73,25 +81,15 @@ func Filter() *regexp.Regexp { return regexp.MustCompile("(" + regCSI + "|" + regOSC + ")") } -// SetOptions sets the global options for width calculation. -// This function is thread-safe. -func SetOptions(opts Options) { +// GetCacheStats returns current cache statistics +func GetCacheStats() (size, capacity int, hitRate float64) { mu.Lock() defer mu.Unlock() - if globalOptions.EastAsianWidth != opts.EastAsianWidth { - globalOptions = opts - widthCache.Purge() - } -} -// SetEastAsian enables or disables East Asian width handling globally. -// This function is thread-safe. -// -// Example: -// -// twdw.SetEastAsian(true) // Enable East Asian width handling -func SetEastAsian(enable bool) { - SetOptions(Options{EastAsianWidth: enable}) + if widthCache == nil { + return 0, 0, 0 + } + return widthCache.Len(), widthCache.Cap(), widthCache.HitRate() } // IsEastAsian returns the current East Asian width setting. @@ -108,6 +106,7 @@ func IsEastAsian() bool { return globalOptions.EastAsianWidth } +// SetCondition sets the global East Asian width setting based on a runewidth.Condition. // Deprecated: use SetOptions with the new twwidth.Options struct instead. // This function is kept for backward compatibility. func SetCondition(cond *runewidth.Condition) { @@ -120,55 +119,33 @@ func SetCondition(cond *runewidth.Condition) { } } -// Width calculates the visual width of a string using the global cache for performance. -// It excludes ANSI escape sequences and accounts for the global East Asian width setting. +// SetEastAsian enables or disables East Asian width handling globally. // This function is thread-safe. // // Example: // -// width := twdw.Width("Hello\x1b[31mWorld") // Returns 10 -func Width(str string) int { - currentEA := IsEastAsian() - key := makeCacheKey(str, currentEA) - - if w, found := widthCache.Get(key); found { - return w - } - - opts := displaywidth.Options{EastAsianWidth: currentEA} - stripped := ansi.ReplaceAllLiteralString(str, "") - calculatedWidth := opts.String(stripped) - - widthCache.Add(key, calculatedWidth) - return calculatedWidth -} - -// WidthWithOptions calculates the visual width of a string with specific options, -// bypassing the global settings and cache. This is useful for one-shot calculations -// where global state is not desired. -func WidthWithOptions(str string, opts Options) int { - dwOpts := displaywidth.Options{EastAsianWidth: opts.EastAsianWidth} - stripped := ansi.ReplaceAllLiteralString(str, "") - return dwOpts.String(stripped) +// twdw.SetEastAsian(true) // Enable East Asian width handling +func SetEastAsian(enable bool) { + SetOptions(Options{EastAsianWidth: enable}) } -// WidthNoCache calculates the visual width of a string without using the global cache. -// -// Example: -// -// width := twdw.WidthNoCache("Hello\x1b[31mWorld") // Returns 10 -func WidthNoCache(str string) int { - // This function's behavior is equivalent to a one-shot calculation - // using the current global options. The WidthWithOptions function - // does not interact with the cache, thus fulfilling the requirement. - return WidthWithOptions(str, Options{EastAsianWidth: IsEastAsian()}) +// SetForceNarrow to preserve the new flag, or create a new setter +func SetForceNarrow(enable bool) { + mu.Lock() + defer mu.Unlock() + globalOptions.ForceNarrowBorders = enable + widthCache.Purge() // Clear cache because widths might change } -// Deprecated: use WidthWithOptions with the new twwidth.Options struct instead. -// This function is kept for backward compatibility. -func Display(cond *runewidth.Condition, str string) int { - opts := Options{EastAsianWidth: cond.EastAsianWidth} - return WidthWithOptions(str, opts) +// SetOptions sets the global options for width calculation. +// This function is thread-safe. +func SetOptions(opts Options) { + mu.Lock() + defer mu.Unlock() + if globalOptions.EastAsianWidth != opts.EastAsianWidth || globalOptions.ForceNarrowBorders != opts.ForceNarrowBorders { + globalOptions = opts + widthCache.Purge() + } } // Truncate shortens a string to fit within a specified visual width, optionally @@ -235,11 +212,13 @@ func Truncate(s string, maxWidth int, suffix ...string) string { // Case 4: String needs truncation (sDisplayWidth > maxWidth). // maxWidth is the total budget for the final string (content + suffix). - currentGlobalEastAsianWidth := IsEastAsian() + mu.Lock() + currentOpts := globalOptions + mu.Unlock() - // Special case for EastAsian true: if only suffix fits, return suffix. + // Special case for EastAsianDetect true: if only suffix fits, return suffix. // This was derived from previous test behavior. - if len(suffixStr) > 0 && currentGlobalEastAsianWidth { + if len(suffixStr) > 0 && currentOpts.EastAsianWidth { provisionalContentWidth := maxWidth - suffixDisplayWidth if provisionalContentWidth == 0 { // Exactly enough space for suffix only return suffixStr @@ -271,8 +250,6 @@ func Truncate(s string, maxWidth int, suffix ...string) string { inAnsiSequence := false ansiWrittenToContent := false - dwOpts := displaywidth.Options{EastAsianWidth: currentGlobalEastAsianWidth} - for _, r := range s { if r == '\x1b' { inAnsiSequence = true @@ -305,7 +282,7 @@ func Truncate(s string, maxWidth int, suffix ...string) string { ansiSeqBuf.Reset() } } else { // Normal character - runeDisplayWidth := dwOpts.Rune(r) + runeDisplayWidth := calculateRunewidth(r, currentOpts) if targetContentForIteration == 0 { // No budget for content at all break } @@ -342,28 +319,101 @@ func Truncate(s string, maxWidth int, suffix ...string) string { return result } -// SetCacheCapacity changes the cache size dynamically -// If capacity <= 0, disables caching entirely -func SetCacheCapacity(capacity int) { +// Width calculates the visual width of a string using the global cache for performance. +// It excludes ANSI escape sequences and accounts for the global East Asian width setting. +// This function is thread-safe. +// +// Example: +// +// width := twdw.Width("Hello\x1b[31mWorld") // Returns 10 +func Width(str string) int { + // Fast path ASCII (Optimization) + if len(str) == 1 && str[0] < 0x80 { + // Treat tab as special case even in fast path + if IsTab(rune(str[0])) { + return TabWidth() + } + return 1 + } + mu.Lock() - defer mu.Unlock() + currentOpts := globalOptions + mu.Unlock() - if capacity <= 0 { - widthCache = nil // nil = fully disabled - return + key := cacheKey{ + eastAsian: currentOpts.EastAsianWidth, + str: str, } - newCache := twcache.NewLRU[string, int](capacity) - widthCache = newCache + // Check Cache (Optimization) + if w, found := widthCache.Get(key); found { + return w + } + + //stripped := ansi.ReplaceAllLiteralString(str, "") + calculatedWidth := 0 + + for _, r := range strip(str) { + calculatedWidth += calculateRunewidth(r, currentOpts) + } + + // Store in Cache + widthCache.Add(key, calculatedWidth) + return calculatedWidth } -// GetCacheStats returns current cache statistics -func GetCacheStats() (size, capacity int, hitRate float64) { +// WidthNoCache calculates the visual width of a string without using the global cache. +// +// Example: +// +// width := twdw.WidthNoCache("Hello\x1b[31mWorld") // Returns 10 +func WidthNoCache(str string) int { + // This function's behavior is equivalent to a one-shot calculation + // using the current global options. The WidthWithOptions function + // does not interact with the cache, thus fulfilling the requirement. mu.Lock() - defer mu.Unlock() + opts := globalOptions + mu.Unlock() + return WidthWithOptions(str, opts) +} - if widthCache == nil { - return 0, 0, 0 +// WidthWithOptions calculates the visual width of a string with specific options, +// bypassing the global settings and cache. This is useful for one-shot calculations +// where global state is not desired. +func WidthWithOptions(str string, opts Options) int { + // stripped := ansi.ReplaceAllLiteralString(str, "") + calculatedWidth := 0 + for _, r := range strip(str) { + calculatedWidth += calculateRunewidth(r, opts) } - return widthCache.Len(), widthCache.Cap(), widthCache.HitRate() + return calculatedWidth +} + +// calculateRunewidth calculates the width of a single rune based on the provided options. +// It applies narrow overrides for box drawing characters if configured and handles Tabs. +func calculateRunewidth(r rune, opts Options) int { + if opts.ForceNarrowBorders && isBoxDrawingChar(r) { + return 1 + } + + // Explicitly handle Tabinal to ensure tables have enough space + // when TrimTab is Off. + if IsTab(r) { + return TabWidth() + } + + dwOpts := displaywidth.Options{EastAsianWidth: opts.EastAsianWidth} + return dwOpts.Rune(r) +} + +// isBoxDrawingChar checks if a rune is within the Unicode Box Drawing range. +func isBoxDrawingChar(r rune) bool { + return r >= 0x2500 && r <= 0x257F +} + +func strip(s string) string { + if strings.IndexByte(s, '\x1b') == -1 { + return s + } + return ansi.ReplaceAllLiteralString(s, "") } diff --git a/vendor/github.com/olekukonko/tablewriter/renderer/blueprint.go b/vendor/github.com/olekukonko/tablewriter/renderer/blueprint.go index 48638fb23e..18bece5399 100644 --- a/vendor/github.com/olekukonko/tablewriter/renderer/blueprint.go +++ b/vendor/github.com/olekukonko/tablewriter/renderer/blueprint.go @@ -44,7 +44,7 @@ func NewBlueprint(configs ...tw.Rendition) *Blueprint { // Merge user settings with default settings cfg.Settings = mergeSettings(cfg.Settings, userCfg.Settings) } - return &Blueprint{config: cfg, logger: ll.New("blueprint")} + return &Blueprint{config: cfg, logger: ll.New("blueprint").Disable()} } // Close performs cleanup (no-op in this implementation). @@ -322,14 +322,22 @@ func (f *Blueprint) formatCell(content string, width int, padding tw.Padding, al result.WriteString(content) rightPaddingWidth = totalPaddingWidth - padLeftWidth if rightPaddingWidth > 0 { - result.WriteString(tw.PadRight(tw.Empty, rightPadChar, rightPaddingWidth)) - f.logger.Debugf("Applied right padding: '%s' for %d width", rightPadChar, rightPaddingWidth) + padChar := rightPadChar + if padChar == tw.Empty { + padChar = tw.Space + } + result.WriteString(tw.PadRight(tw.Empty, padChar, rightPaddingWidth)) + f.logger.Debugf("Applied right padding: '%s' for %d width", padChar, rightPaddingWidth) } case tw.AlignRight: leftPaddingWidth = totalPaddingWidth - padRightWidth if leftPaddingWidth > 0 { - result.WriteString(tw.PadLeft(tw.Empty, leftPadChar, leftPaddingWidth)) - f.logger.Debugf("Applied left padding: '%s' for %d width", leftPadChar, leftPaddingWidth) + padChar := leftPadChar + if padChar == tw.Empty { + padChar = tw.Space + } + result.WriteString(tw.PadLeft(tw.Empty, padChar, leftPaddingWidth)) + f.logger.Debugf("Applied left padding: '%s' for %d width", padChar, leftPaddingWidth) } result.WriteString(content) result.WriteString(rightPadChar) @@ -337,15 +345,23 @@ func (f *Blueprint) formatCell(content string, width int, padding tw.Padding, al leftPaddingWidth = (totalPaddingWidth-padLeftWidth-padRightWidth)/2 + padLeftWidth rightPaddingWidth = totalPaddingWidth - leftPaddingWidth if leftPaddingWidth > padLeftWidth { - result.WriteString(tw.PadLeft(tw.Empty, leftPadChar, leftPaddingWidth-padLeftWidth)) - f.logger.Debugf("Applied left centering padding: '%s' for %d width", leftPadChar, leftPaddingWidth-padLeftWidth) + padChar := leftPadChar + if padChar == tw.Empty { + padChar = tw.Space + } + result.WriteString(tw.PadLeft(tw.Empty, padChar, leftPaddingWidth-padLeftWidth)) + f.logger.Debugf("Applied left centering padding: '%s' for %d width", padChar, leftPaddingWidth-padLeftWidth) } result.WriteString(leftPadChar) result.WriteString(content) result.WriteString(rightPadChar) if rightPaddingWidth > padRightWidth { - result.WriteString(tw.PadRight(tw.Empty, rightPadChar, rightPaddingWidth-padRightWidth)) - f.logger.Debugf("Applied right centering padding: '%s' for %d width", rightPadChar, rightPaddingWidth-padRightWidth) + padChar := rightPadChar + if padChar == tw.Empty { + padChar = tw.Space + } + result.WriteString(tw.PadRight(tw.Empty, padChar, rightPaddingWidth-padRightWidth)) + f.logger.Debugf("Applied right centering padding: '%s' for %d width", padChar, rightPaddingWidth-padRightWidth) } default: // Default to left alignment @@ -353,8 +369,12 @@ func (f *Blueprint) formatCell(content string, width int, padding tw.Padding, al result.WriteString(content) rightPaddingWidth = totalPaddingWidth - padLeftWidth if rightPaddingWidth > 0 { - result.WriteString(tw.PadRight(tw.Empty, rightPadChar, rightPaddingWidth)) - f.logger.Debugf("Applied right padding: '%s' for %d width", rightPadChar, rightPaddingWidth) + padChar := rightPadChar + if padChar == tw.Empty { + padChar = tw.Space + } + result.WriteString(tw.PadRight(tw.Empty, padChar, rightPaddingWidth)) + f.logger.Debugf("Applied right padding: '%s' for %d width", padChar, rightPaddingWidth) } } diff --git a/vendor/github.com/olekukonko/tablewriter/renderer/colorized.go b/vendor/github.com/olekukonko/tablewriter/renderer/colorized.go index 9bee749312..21ef89463d 100644 --- a/vendor/github.com/olekukonko/tablewriter/renderer/colorized.go +++ b/vendor/github.com/olekukonko/tablewriter/renderer/colorized.go @@ -4,7 +4,6 @@ import ( "io" "strings" - "github.com/fatih/color" "github.com/olekukonko/ll" "github.com/olekukonko/ll/lh" "github.com/olekukonko/tablewriter/pkg/twwidth" @@ -24,28 +23,6 @@ type ColorizedConfig struct { Symbols tw.Symbols // Symbols for table drawing (e.g., corners, lines) } -// Colors is a slice of color attributes for use with fatih/color, such as color.FgWhite or color.Bold. -type Colors []color.Attribute - -// Tint defines foreground and background color settings for table elements, with optional per-column overrides. -type Tint struct { - FG Colors // Foreground color attributes - BG Colors // Background color attributes - Columns []Tint // Per-column color settings -} - -// Apply applies the Tint's foreground and background colors to the given text, returning the text unchanged if no colors are set. -func (t Tint) Apply(text string) string { - if len(t.FG) == 0 && len(t.BG) == 0 { - return text - } - // Combine foreground and background colors - combinedColors := append(t.FG, t.BG...) - // Create a color function and apply it to the text - c := color.New(combinedColors...).SprintFunc() - return c(text) -} - // Colorized renders colored ASCII tables with customizable borders, colors, and alignments. type Colorized struct { config ColorizedConfig // Renderer configuration @@ -126,7 +103,7 @@ func NewColorized(configs ...ColorizedConfig) *Colorized { tw.Row: tw.AlignLeft, tw.Footer: tw.AlignRight, }, - logger: ll.New("colorized", ll.WithHandler(lh.NewMemoryHandler())), + logger: ll.New("colorized", ll.WithHandler(lh.NewMemoryHandler())).Disable(), } // Log initialization details f.logger.Debugf("Initialized Colorized renderer with symbols: Center=%q, Row=%q, Column=%q", f.config.Symbols.Center(), f.config.Symbols.Row(), f.config.Symbols.Column()) @@ -377,19 +354,27 @@ func (c *Colorized) formatCell(content string, width int, padding tw.Padding, al // Calculate visual width of content contentVisualWidth := twwidth.Width(content) - // Set default padding characters + // Set padding characters padLeftCharStr := padding.Left - // if padLeftCharStr == tw.Empty { - // padLeftCharStr = tw.Space - //} padRightCharStr := padding.Right - // if padRightCharStr == tw.Empty { - // padRightCharStr = tw.Space - //} + + // Determine the character to use for alignment filling. + // We default to the padding character defined for that side. + // If the padding character is empty (e.g. Overwrite: true), we MUST fallback to Space + // for the alignment calculation to prevent the content from shifting incorrectly. + alignFillLeft := padLeftCharStr + if alignFillLeft == tw.Empty { + alignFillLeft = tw.Space + } + alignFillRight := padRightCharStr + if alignFillRight == tw.Empty { + alignFillRight = tw.Space + } // Calculate padding widths definedPadLeftWidth := twwidth.Width(padLeftCharStr) definedPadRightWidth := twwidth.Width(padRightCharStr) + // Calculate available width for content and alignment availableForContentAndAlign := max(width-definedPadLeftWidth-definedPadRightWidth, 0) @@ -404,21 +389,27 @@ func (c *Colorized) formatCell(content string, width int, padding tw.Padding, al remainingSpaceForAlignment := max(availableForContentAndAlign-contentVisualWidth, 0) // Apply alignment padding + // Note: We use tw.Pad* helpers here instead of strings.Repeat to handle multi-byte fill chars correctly. leftAlignmentPadSpaces := tw.Empty rightAlignmentPadSpaces := tw.Empty + switch align { case tw.AlignLeft: - rightAlignmentPadSpaces = strings.Repeat(tw.Space, remainingSpaceForAlignment) + rightAlignmentPadSpaces = tw.PadRight(tw.Empty, alignFillRight, remainingSpaceForAlignment) case tw.AlignRight: - leftAlignmentPadSpaces = strings.Repeat(tw.Space, remainingSpaceForAlignment) + leftAlignmentPadSpaces = tw.PadLeft(tw.Empty, alignFillLeft, remainingSpaceForAlignment) case tw.AlignCenter: leftSpacesCount := remainingSpaceForAlignment / 2 rightSpacesCount := remainingSpaceForAlignment - leftSpacesCount - leftAlignmentPadSpaces = strings.Repeat(tw.Space, leftSpacesCount) - rightAlignmentPadSpaces = strings.Repeat(tw.Space, rightSpacesCount) + if leftSpacesCount > 0 { + leftAlignmentPadSpaces = tw.PadLeft(tw.Empty, alignFillLeft, leftSpacesCount) + } + if rightSpacesCount > 0 { + rightAlignmentPadSpaces = tw.PadRight(tw.Empty, alignFillRight, rightSpacesCount) + } default: // Default to left alignment - rightAlignmentPadSpaces = strings.Repeat(tw.Space, remainingSpaceForAlignment) + rightAlignmentPadSpaces = tw.PadRight(tw.Empty, alignFillRight, remainingSpaceForAlignment) } // Apply colors to content and padding @@ -467,7 +458,7 @@ func (c *Colorized) formatCell(content string, width int, padding tw.Padding, al sb.WriteString(coloredPadRight) output := sb.String() - // Adjust output width if necessary + // Adjust output width if necessary (safety check) currentVisualWidth := twwidth.Width(output) if currentVisualWidth != width { c.logger.Debugf("formatCell MISMATCH: content='%s', target_w=%d. Calculated parts width = %d. String: '%s'", diff --git a/vendor/github.com/olekukonko/tablewriter/renderer/html.go b/vendor/github.com/olekukonko/tablewriter/renderer/html.go index d02594b9ce..c6fa2c919f 100644 --- a/vendor/github.com/olekukonko/tablewriter/renderer/html.go +++ b/vendor/github.com/olekukonko/tablewriter/renderer/html.go @@ -64,7 +64,7 @@ func NewHTML(configs ...HTMLConfig) *HTML { tableStarted: false, tbodyStarted: false, tfootStarted: false, - logger: ll.New("html"), + logger: ll.New("html").Disable(), } } diff --git a/vendor/github.com/olekukonko/tablewriter/renderer/markdown.go b/vendor/github.com/olekukonko/tablewriter/renderer/markdown.go index 936889de37..c8ff55a9d0 100644 --- a/vendor/github.com/olekukonko/tablewriter/renderer/markdown.go +++ b/vendor/github.com/olekukonko/tablewriter/renderer/markdown.go @@ -37,7 +37,7 @@ func NewMarkdown(configs ...tw.Rendition) *Markdown { if len(configs) > 0 { cfg = mergeMarkdownConfig(cfg, configs[0]) } - return &Markdown{config: cfg, logger: ll.New("markdown")} + return &Markdown{config: cfg, logger: ll.New("markdown").Disable()} } // mergeMarkdownConfig combines user-provided config with Markdown defaults, enforcing Markdown-specific settings. diff --git a/vendor/github.com/olekukonko/tablewriter/renderer/ocean.go b/vendor/github.com/olekukonko/tablewriter/renderer/ocean.go index 230220d26b..c6ff2b9bc1 100644 --- a/vendor/github.com/olekukonko/tablewriter/renderer/ocean.go +++ b/vendor/github.com/olekukonko/tablewriter/renderer/ocean.go @@ -36,7 +36,7 @@ func NewOcean(oceanConfig ...OceanConfig) *Ocean { config: cfg, oceanConfig: oCfg, fixedWidths: tw.NewMapper[int, int](), - logger: ll.New("ocean"), + logger: ll.New("ocean").Disable(), } r.resetState() return r diff --git a/vendor/github.com/olekukonko/tablewriter/renderer/svg.go b/vendor/github.com/olekukonko/tablewriter/renderer/svg.go index b725754cf1..c7d7f1c180 100644 --- a/vendor/github.com/olekukonko/tablewriter/renderer/svg.go +++ b/vendor/github.com/olekukonko/tablewriter/renderer/svg.go @@ -139,7 +139,7 @@ func NewSVG(configs ...SVGConfig) *SVG { allVisualLineData: make([][][]string, 3), allVisualLineCtx: make([][]tw.Formatting, 3), vMergeTrack: make(map[int]int), - logger: ll.New("svg"), + logger: ll.New("svg").Disable(), } for i := 0; i < 3; i++ { r.allVisualLineData[i] = make([][]string, 0) diff --git a/vendor/github.com/olekukonko/tablewriter/renderer/tint.go b/vendor/github.com/olekukonko/tablewriter/renderer/tint.go new file mode 100644 index 0000000000..4090a0f639 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/renderer/tint.go @@ -0,0 +1,25 @@ +package renderer + +import "github.com/fatih/color" + +// Colors is a slice of color attributes for use with fatih/color, such as color.FgWhite or color.Bold. +type Colors []color.Attribute + +// Tint defines foreground and background color settings for table elements, with optional per-column overrides. +type Tint struct { + FG Colors // Foreground color attributes + BG Colors // Background color attributes + Columns []Tint // Per-column color settings +} + +// Apply applies the Tint's foreground and background colors to the given text, returning the text unchanged if no colors are set. +func (t Tint) Apply(text string) string { + if len(t.FG) == 0 && len(t.BG) == 0 { + return text + } + // Combine foreground and background colors + combinedColors := append(t.FG, t.BG...) + // Create a color function and apply it to the text + c := color.New(combinedColors...).SprintFunc() + return c(text) +} diff --git a/vendor/github.com/olekukonko/tablewriter/tablewriter.go b/vendor/github.com/olekukonko/tablewriter/tablewriter.go index 464324a1d9..9fd7404565 100644 --- a/vendor/github.com/olekukonko/tablewriter/tablewriter.go +++ b/vendor/github.com/olekukonko/tablewriter/tablewriter.go @@ -4,10 +4,10 @@ import ( "bytes" "io" "math" - "os" "reflect" "runtime" "strings" + "unicode" "github.com/olekukonko/errors" "github.com/olekukonko/ll" @@ -413,13 +413,21 @@ func (t *Table) Options(opts ...Option) *Table { t.logger = ll.New("table").Handler(lh.NewTextHandler(t.trace)) } + // Disable and suspend the logger before applying options to prevent premature + // debug output from renderer methods (e.g., Blueprint.Rendition) triggered by + // options like WithRendition. Without this, a previously-enabled logger would + // still be active on the renderer during option application, causing debug + // messages even when WithDebug(false) is being applied. + t.logger.Disable() + t.logger.Suspend() + t.renderer.Logger(t.logger) + // loop through options for _, opt := range opts { opt(t) } // force debugging mode if set - // This should be move away form WithDebug if t.config.Debug { t.logger.Enable() t.logger.Resume() @@ -434,11 +442,28 @@ func (t *Table) Options(opts ...Option) *Table { goArch := runtime.GOARCH numCPU := runtime.NumCPU() - t.logger.Infof("Environment: LC_CTYPE=%s, LANG=%s, TERM=%s", os.Getenv("LC_CTYPE"), os.Getenv("LANG"), os.Getenv("TERM")) - t.logger.Infof("Go Runtime: Version=%s, OS=%s, Arch=%s, CPUs=%d", goVersion, goOS, goArch, numCPU) + // Use the new struct-based info. + // No type assertions or magic strings needed. + info := twwidth.Debugging() + + t.logger.Infof("Go Runtime: Version=%s, OS=%s, Arch=%s, CPUs=%d", + goVersion, goOS, goArch, numCPU) + + t.logger.Infof("Environment: LC_CTYPE=%s, LANG=%s, TERM=%s, TERM_PROGRAM=%s", + info.Raw.LC_CTYPE, + info.Raw.LANG, + info.Raw.TERM, + info.Raw.TERM_PROGRAM, + ) + + t.logger.Infof("East Asian Detection: Auto=%v, Mode=%s, ModernEnv=%v, CJKLocale=%v", + info.AutoUseEastAsian, + info.DetectionMode, + info.Derived.IsModernEnv, + info.Derived.IsCJKLocale, + ) // send logger to renderer - // this will overwrite the default logger t.renderer.Logger(t.logger) return t } @@ -531,16 +556,37 @@ func (t *Table) Counters() []tw.Counter { } // Trimmer trims whitespace from a string based on the Table’s configuration. -// It conditionally applies strings.TrimSpace to the input string if the TrimSpace behavior -// is enabled in t.config.Behavior, otherwise returning the string unchanged. This method -// is used in the logging library to format strings for tabular output, ensuring consistent -// display in log messages. Thread-safe as it only reads configuration and operates on the -// input string. +// It conditionally applies trimming based on TrimSpace and TrimTab settings. +// +// Behavior Matrix: +// - TrimSpace=On, TrimTab=On: Uses strings.TrimSpace (removes all Unicode space including \t). +// - TrimSpace=On, TrimTab=Off: Removes spaces/newlines but PRESERVES tabs. +// - TrimSpace=Off, TrimTab=On: Removes only tabs. +// - TrimSpace=Off, TrimTab=Off: Returns string unchanged. func (t *Table) Trimmer(str string) string { - if t.config.Behavior.TrimSpace.Enabled() { + trimSpace := t.config.Behavior.TrimSpace.Enabled() + trimTab := t.config.Behavior.TrimTab.Enabled() + + // Fast Path 1: If both are enabled (Default), use the stdlib optimized TrimSpace + if trimSpace && trimTab { return strings.TrimSpace(str) } - return str + + // Fast Path 2: If both are disabled, return raw string + if !trimSpace && !trimTab { + return str + } + + // Granular Trimming via TrimFunc + return strings.TrimFunc(str, func(r rune) bool { + if twwidth.IsTab(r) { + return trimTab // Return true to trim if TrimTab is On + } + if trimSpace { + return unicode.IsSpace(r) // Trim other whitespace if TrimSpace is On + } + return false + }) } // appendSingle adds a single row to the table's row data. @@ -920,6 +966,13 @@ func (t *Table) prepareContent(cells []string, config tw.CellConfig) [][]string cellContent = t.Trimmer(cellContent) + if strings.Contains(cellContent, twwidth.TabString.String()) { + // Get the detected width from the singleton + width := twwidth.TabWidth() + spaces := strings.Repeat(tw.Space, width) + cellContent = strings.ReplaceAll(cellContent, twwidth.TabString.String(), spaces) + } + colPad := config.Padding.Global if i < len(config.Padding.PerColumn) && config.Padding.PerColumn[i].Paddable() { colPad = config.Padding.PerColumn[i] @@ -941,7 +994,7 @@ func (t *Table) prepareContent(cells []string, config tw.CellConfig) [][]string switch config.Formatting.AutoWrap { case tw.WrapNormal: var wrapped []string - if t.config.Behavior.TrimSpace.Enabled() { + if t.config.Behavior.TrimSpace.Enabled() && t.config.Behavior.TrimTab.Enabled() { wrapped, _ = twwarp.WrapString(line, effectiveContentMaxWidth) } else { wrapped, _ = twwarp.WrapStringWithSpaces(line, effectiveContentMaxWidth) diff --git a/vendor/github.com/olekukonko/tablewriter/tw/types.go b/vendor/github.com/olekukonko/tablewriter/tw/types.go index 54a9b86ef8..bc84756a35 100644 --- a/vendor/github.com/olekukonko/tablewriter/tw/types.go +++ b/vendor/github.com/olekukonko/tablewriter/tw/types.go @@ -166,8 +166,9 @@ type Struct struct { // Behavior defines settings that control table rendering behaviors, such as column visibility and content formatting. type Behavior struct { AutoHide State // AutoHide determines whether empty columns are hidden. Ignored in streaming mode. - TrimSpace State // TrimSpace enables trimming of leading and trailing spaces from cell content. + TrimSpace State // TrimSpace determines trimming of leading and trailing spaces from cell content. TrimLine State // TrimLine determines whether empty visual lines within a cell are collapsed. + TrimTab State // TrimTab determines trimming of leading and trailing tabs from cell content. Header Control // Header specifies control settings for the table header. Footer Control // Footer specifies control settings for the table footer. diff --git a/vendor/github.com/olekukonko/tablewriter/zoo.go b/vendor/github.com/olekukonko/tablewriter/zoo.go index b11c4b79b7..c24a53d1d5 100644 --- a/vendor/github.com/olekukonko/tablewriter/zoo.go +++ b/vendor/github.com/olekukonko/tablewriter/zoo.go @@ -991,7 +991,7 @@ func (t *Table) calculateContentMaxWidth(colIdx int, config tw.CellConfig, padLe constraintTotalCellWidth := 0 hasConstraint := false - // 1. Check new Widths.PerColumn (highest priority) + // Check new Widths.PerColumn (highest priority) if t.config.Widths.Constrained() { if colWidth, ok := t.config.Widths.PerColumn.OK(colIdx); ok && colWidth > 0 { @@ -1001,7 +1001,7 @@ func (t *Table) calculateContentMaxWidth(colIdx int, config tw.CellConfig, padLe colIdx, constraintTotalCellWidth) } - // 2. Check new Widths.Global + // Check new Widths.Global if !hasConstraint && t.config.Widths.Global > 0 { constraintTotalCellWidth = t.config.Widths.Global hasConstraint = true @@ -1009,7 +1009,7 @@ func (t *Table) calculateContentMaxWidth(colIdx int, config tw.CellConfig, padLe } } - // 3. Fall back to legacy ColMaxWidths.PerColumn (backward compatibility) + // Fall back to legacy ColMaxWidths.PerColumn (backward compatibility) if !hasConstraint && config.ColMaxWidths.PerColumn != nil { if colMax, ok := config.ColMaxWidths.PerColumn.OK(colIdx); ok && colMax > 0 { constraintTotalCellWidth = colMax @@ -1019,7 +1019,7 @@ func (t *Table) calculateContentMaxWidth(colIdx int, config tw.CellConfig, padLe } } - // 4. Fall back to legacy ColMaxWidths.Global + // Fall back to legacy ColMaxWidths.Global if !hasConstraint && config.ColMaxWidths.Global > 0 { constraintTotalCellWidth = config.ColMaxWidths.Global hasConstraint = true @@ -1027,7 +1027,7 @@ func (t *Table) calculateContentMaxWidth(colIdx int, config tw.CellConfig, padLe constraintTotalCellWidth) } - // 5. Fall back to table MaxWidth if auto-wrapping + // Fall back to table MaxWidth if auto-wrapping if !hasConstraint && t.config.MaxWidth > 0 && config.Formatting.AutoWrap != tw.WrapNone { constraintTotalCellWidth = t.config.MaxWidth hasConstraint = true @@ -1217,14 +1217,10 @@ func (t *Table) convertToString(value interface{}) string { // convertItemToCells is responsible for converting a single input item (which could be // a struct, a basic type, or an item implementing Stringer/Formatter) into a slice // of strings, where each string represents a cell for the table row. -// zoo.go - -// convertItemToCells is responsible for converting a single input item into a slice of strings. -// It now uses the unified struct parser for structs. func (t *Table) convertItemToCells(item interface{}) ([]string, error) { t.logger.Debugf("convertItemToCells: Converting item of type %T", item) - // 1. User-defined table-wide stringer (t.stringer) takes highest precedence. + // User-defined table-wide stringer (t.stringer) takes highest precedence. if t.stringer != nil { res, err := t.convertToStringer(item) if err == nil { @@ -1234,13 +1230,13 @@ func (t *Table) convertItemToCells(item interface{}) ([]string, error) { t.logger.Warnf("convertItemToCells: Custom table stringer was set but incompatible for type %T: %v. Will attempt other methods.", item, err) } - // 2. Handle untyped nil directly. + // Handle untyped nil directly. if item == nil { t.logger.Debugf("convertItemToCells: Item is untyped nil. Returning single empty cell.") return []string{""}, nil } - // 3. Use the new unified struct parser. It handles pointers and embedding. + // Use the new unified struct parser. It handles pointers and embedding. // We only care about the values it returns. _, values := t.extractFieldsAndValuesFromStruct(item) if values != nil { @@ -1248,7 +1244,7 @@ func (t *Table) convertItemToCells(item interface{}) ([]string, error) { return values, nil } - // 4. Fallback for any other single item (e.g., basic types, or types that implement Stringer/Formatter). + // Fallback for any other single item (e.g., basic types, or types that implement Stringer/Formatter). // This code path is now for non-struct types. if formatter, ok := item.(tw.Formatter); ok { t.logger.Debugf("convertItemToCells: Item (non-struct, type %T) is tw.Formatter. Using Format().", item) diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/capabilities.go b/vendor/github.com/open-policy-agent/opa/capabilities/capabilities.go index ba32cf977e..5482720f7c 100644 --- a/vendor/github.com/open-policy-agent/opa/capabilities/capabilities.go +++ b/vendor/github.com/open-policy-agent/opa/capabilities/capabilities.go @@ -2,9 +2,6 @@ // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. -//go:build go1.16 -// +build go1.16 - package capabilities import ( diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.10.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.10.0.json new file mode 100644 index 0000000000..0a37621d0c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.10.0.json @@ -0,0 +1,4867 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.11.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.11.0.json new file mode 100644 index 0000000000..d58fc6760f --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.11.0.json @@ -0,0 +1,4878 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.11.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.11.1.json new file mode 100644 index 0000000000..d58fc6760f --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.11.1.json @@ -0,0 +1,4878 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.0.json new file mode 100644 index 0000000000..e4bf21abf3 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.0.json @@ -0,0 +1,4896 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.1.json new file mode 100644 index 0000000000..e4bf21abf3 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.1.json @@ -0,0 +1,4896 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.2.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.2.json new file mode 100644 index 0000000000..e4bf21abf3 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.2.json @@ -0,0 +1,4896 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.3.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.3.json new file mode 100644 index 0000000000..e4bf21abf3 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.3.json @@ -0,0 +1,4896 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.0.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.0.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.1.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.1.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.2.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.2.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.2.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.14.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.14.0.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.14.0.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.14.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.14.1.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.14.1.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.0.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.0.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.1.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.1.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.9.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.9.0.json new file mode 100644 index 0000000000..0a37621d0c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.9.0.json @@ -0,0 +1,4867 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go b/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go index 836aa586b9..98093b774e 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go +++ b/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go @@ -72,7 +72,7 @@ func LoadWasmResolversFromStore(ctx context.Context, store storage.Store, txn st var resolvers []*wasm.Resolver if len(resolversToLoad) > 0 { // Get a full snapshot of the current data (including any from "outside" the bundles) - data, err := store.Read(ctx, txn, storage.Path{}) + data, err := store.Read(ctx, txn, storage.RootPath) if err != nil { return nil, fmt.Errorf("failed to initialize wasm runtime: %s", err) } diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv index 473497abbd..10dc4d482e 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv +++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv @@ -111,6 +111,9 @@ opa_arith_rem,opa_bf_to_number opa_array_concat,opa_value_type opa_array_concat,opa_array_with_cap opa_array_concat,opa_array_append +opa_array_flatten,opa_value_type +opa_array_flatten,opa_array_with_cap +opa_array_flatten,opa_array_append opa_array_slice,opa_value_type opa_array_slice,opa_number_try_int opa_array_slice,opa_array_with_cap @@ -194,7 +197,9 @@ opa_cidr_contains,parse_ip opa_cidr_contains,opa_boolean parse_cidr,parse_ip parse_cidr,opa_atoi64 +parse_ip,inet_pton4 parse_ip,memchr +inet_pton4,memchr opa_cidr_intersects,opa_value_type opa_cidr_intersects,parse_cidr opa_cidr_intersects,opa_boolean @@ -347,15 +352,15 @@ opa_value_dump,opa_json_writer_write move_freelists,opa_abort opa_heap_blocks_stash,move_freelists opa_heap_blocks_restore,move_freelists -opa_malloc,opa_free_bulk_commit +opa_malloc,merge_sort_blocks +opa_malloc,merge_blocks opa_malloc,opa_abort -opa_free_bulk_commit,merge_sort_blocks +merge_sort_blocks,merge_sort_blocks +merge_sort_blocks,merge_blocks opa_realloc,opa_malloc opa_realloc,memcpy -opa_realloc,opa_free opa_builtin_cache_get,opa_abort opa_builtin_cache_set,opa_abort -merge_sort_blocks,merge_sort_blocks opa_memoize_init,opa_malloc opa_memoize_init,opa_object opa_memoize_push,opa_malloc @@ -583,6 +588,8 @@ opa_sets_union,opa_value_type opa_sets_union,opa_set opa_sets_union,opa_set_add opa_sets_union,opa_value_free_shallow +opa_strlen,strlen +opa_itoa,strlen opa_strings_any_prefix_match,opa_value_type opa_strings_any_prefix_match,opa_value_iter opa_strings_any_prefix_match,opa_value_get @@ -719,6 +726,18 @@ opa_strings_upper,opa_abort opa_strings_upper,opa_unicode_to_upper opa_strings_upper,opa_realloc opa_strings_upper,opa_unicode_encode_utf8 +to_string,opa_value_type +to_string,opa_string_terminated +to_string,opa_value_dump +to_string,opa_strlen +to_string,opa_string_allocated +opa_template_string,opa_value_type +opa_template_string,opa_array_with_cap +opa_template_string,to_string +opa_template_string,opa_array_append +opa_template_string,opa_malloc +opa_template_string,memcpy +opa_template_string,opa_string_allocated opa_types_is_number,opa_value_type opa_types_is_number,opa_boolean opa_types_is_string,opa_value_type @@ -789,8 +808,8 @@ opa_object_keys,opa_value_compare opa_object_keys,opa_value_compare_number opa_object_keys,opa_strncmp opa_object_keys,opa_value_compare_object -opa_object_keys,opa_abort opa_object_keys,opa_value_compare_set +opa_object_keys,opa_abort opa_array_free,__opa_value_free opa_array_free,opa_free opa_array_free,opa_free_bulk @@ -836,17 +855,16 @@ opa_number_ref,opa_malloc opa_number_int,opa_malloc opa_string,opa_malloc opa_value_shallow_copy_object,opa_malloc +opa_value_shallow_copy_object,memset opa_value_shallow_copy_object,opa_value_iter opa_value_shallow_copy_object,opa_value_get opa_value_shallow_copy_object,__opa_object_insert -opa_value_shallow_copy_set,opa_malloc -opa_value_shallow_copy_set,opa_value_iter -opa_value_shallow_copy_set,opa_set_add opa_set_add,opa_value_hash opa_set_add,opa_value_compare opa_set_add,__opa_set_grow opa_set_add,opa_malloc __opa_set_grow,opa_malloc +__opa_set_grow,memset __opa_set_grow,opa_value_hash __opa_set_grow,opa_value_compare_number __opa_set_grow,opa_strncmp @@ -857,7 +875,9 @@ __opa_set_grow,opa_abort __opa_set_grow,opa_free opa_value_shallow_copy,opa_malloc opa_value_shallow_copy,opa_value_shallow_copy_object -opa_value_shallow_copy,opa_value_shallow_copy_set +opa_value_shallow_copy,memset +opa_value_shallow_copy,opa_set_add +opa_value_shallow_copy,opa_value_iter opa_value_shallow_copy,opa_abort opa_value_transitive_closure,opa_malloc opa_value_transitive_closure,__opa_value_transitive_closure @@ -881,7 +901,9 @@ opa_array_with_cap,opa_free opa_object,opa_malloc opa_set,opa_malloc opa_set_with_cap,opa_malloc +opa_set_with_cap,memset __opa_object_grow,opa_malloc +__opa_object_grow,memset __opa_object_grow,opa_value_hash __opa_object_grow,opa_value_compare_number __opa_object_grow,opa_strncmp @@ -908,170 +930,191 @@ opa_lookup,opa_abort opa_lookup,opa_atoi64 opa_mapping_init,opa_json_parse opa_mapping_lookup,opa_lookup -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\29 -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29 -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,operator\20delete\28void*\29 -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\29 -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,escape\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,operator\20new\28unsigned\20long\29 -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,memcpy -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,abort -escape\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,std::__1::basic_string\2c\20std::__1::allocator\20>::push_back\28char\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,operator\20new\28unsigned\20long\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,lexer::lexer\28char\20const*\2c\20unsigned\20long\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,glob_parse\28lexer*\2c\20node**\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::compare\28unsigned\20long\2c\20unsigned\20long\2c\20char\20const*\2c\20unsigned\20long\29\20const -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,lexer::~lexer\28\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,operator\20delete\28void*\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,opa_unicode_decode_utf8 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,escape\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,node::~node\28\29 -lexer::~lexer\28\29,operator\20delete\28void*\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::assign\28char\20const*\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::push_back\28char\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,operator\20new\28unsigned\20long\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,memmove +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,operator\20new\28unsigned\20long\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,lexer::lexer\28char\20const*\2c\20unsigned\20long\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,glob_parse\28lexer*\2c\20node**\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,lexer::~lexer\28\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::assign\28char\20const*\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,opa_unicode_decode_utf8 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::push_back\28char\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,node::~node\28\29 +std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29,std::_LIBCPP_ABI_NAMESPACE::__libcpp_verbose_abort\28char\20const*\2c\20...\29 +lexer::~lexer\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 lexer::next\28token*\29,strlen lexer::next\28token*\29,operator\20new\28unsigned\20long\29 -lexer::next\28token*\29,memcpy -lexer::next\28token*\29,operator\20delete\28void*\29 -lexer::next\28token*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -lexer::next\28token*\29,abort +lexer::next\28token*\29,memmove +lexer::next\28token*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +lexer::next\28token*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::operator=\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29 +lexer::next\28token*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 lexer::next\28token*\29,lexer::fetch_item\28\29 lexer::fetch_item\28\29,opa_unicode_decode_utf8 lexer::fetch_item\28\29,operator\20new\28unsigned\20long\29 -lexer::fetch_item\28\29,memcpy -lexer::fetch_item\28\29,operator\20delete\28void*\29 +lexer::fetch_item\28\29,memmove +lexer::fetch_item\28\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29 lexer::fetch_item\28\29,lexer::fetch_range\28\29 lexer::fetch_item\28\29,lexer::fetch_text\28int\20const*\29 -lexer::fetch_item\28\29,abort +lexer::fetch_item\28\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 lexer::fetch_range\28\29,opa_unicode_decode_utf8 lexer::fetch_range\28\29,operator\20new\28unsigned\20long\29 -lexer::fetch_range\28\29,memcpy -lexer::fetch_range\28\29,operator\20delete\28void*\29 +lexer::fetch_range\28\29,memmove +lexer::fetch_range\28\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29 lexer::fetch_range\28\29,lexer::fetch_text\28int\20const*\29 -lexer::fetch_range\28\29,abort +lexer::fetch_range\28\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 lexer::fetch_text\28int\20const*\29,opa_unicode_decode_utf8 lexer::fetch_text\28int\20const*\29,operator\20new\28unsigned\20long\29 lexer::fetch_text\28int\20const*\29,memcpy -lexer::fetch_text\28int\20const*\29,operator\20delete\28void*\29 +lexer::fetch_text\28int\20const*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +lexer::fetch_text\28int\20const*\29,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__throw_length_error\5babi:nn210108\5d\28\29 +lexer::fetch_text\28int\20const*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 lexer::fetch_text\28int\20const*\29,opa_malloc +lexer::fetch_text\28int\20const*\29,memmove +lexer::fetch_text\28int\20const*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29 lexer::fetch_text\28int\20const*\29,opa_free -lexer::fetch_text\28int\20const*\29,abort +lexer::fetch_text\28int\20const*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 +std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__libcpp_verbose_abort\28char\20const*\2c\20...\29 +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 node::~node\28\29,node::~node\28\29 -node::~node\28\29,operator\20delete\28void*\29 -node::insert\28node*\29,operator\20new\28unsigned\20long\29 -node::insert\28node*\29,memcpy -node::insert\28node*\29,operator\20delete\28void*\29 -node::insert\28node*\29,abort +node::~node\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28node*\20const&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28node*\20const&\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28node*\20const&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28node*\20const&\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28node*\20const&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 glob_parse\28lexer*\2c\20node**\29,operator\20new\28unsigned\20long\29 -glob_parse\28lexer*\2c\20node**\29,std::__1::basic_string\2c\20std::__1::allocator\20>::compare\28unsigned\20long\2c\20unsigned\20long\2c\20char\20const*\2c\20unsigned\20long\29\20const +glob_parse\28lexer*\2c\20node**\29,operator\20delete\28void*\2c\20unsigned\20long\29 glob_parse\28lexer*\2c\20node**\29,node::~node\28\29 -glob_parse\28lexer*\2c\20node**\29,operator\20delete\28void*\29 -glob_parse\28lexer*\2c\20node**\29,std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 +glob_parse\28lexer*\2c\20node**\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29 parser_main\28state*\2c\20lexer*\29,lexer::next\28token*\29 -parser_main\28state*\2c\20lexer*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 +parser_main\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::operator=\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29 parser_main\28state*\2c\20lexer*\29,operator\20new\28unsigned\20long\29 -parser_main\28state*\2c\20lexer*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -parser_main\28state*\2c\20lexer*\29,node::insert\28node*\29 -parser_main\28state*\2c\20lexer*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\29 -parser_main\28state*\2c\20lexer*\29,operator\20delete\28void*\29 +parser_main\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29 +parser_main\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28node*\20const&\29 +parser_main\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::assign\28char\20const*\29 +parser_main\28state*\2c\20lexer*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29,memmove +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 parser_range\28state*\2c\20lexer*\29,lexer::next\28token*\29 -parser_range\28state*\2c\20lexer*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\29 -parser_range\28state*\2c\20lexer*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 +parser_range\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::assign\28char\20const*\29 +parser_range\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::operator=\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29 parser_range\28state*\2c\20lexer*\29,opa_unicode_decode_utf8 parser_range\28state*\2c\20lexer*\29,memcmp -parser_range\28state*\2c\20lexer*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::compare\28unsigned\20long\2c\20unsigned\20long\2c\20char\20const*\2c\20unsigned\20long\29\20const parser_range\28state*\2c\20lexer*\29,operator\20new\28unsigned\20long\29 -parser_range\28state*\2c\20lexer*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -parser_range\28state*\2c\20lexer*\29,node::insert\28node*\29 -parser_range\28state*\2c\20lexer*\29,operator\20delete\28void*\29 +parser_range\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29 +parser_range\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28node*\20const&\29 +parser_range\28state*\2c\20lexer*\29,operator\20delete\28void*\2c\20unsigned\20long\29 opa_glob_match,opa_value_type opa_glob_match,opa_value_iter opa_glob_match,opa_value_get opa_glob_match,operator\20new\28unsigned\20long\29 -opa_glob_match,memcpy -opa_glob_match,operator\20delete\28void*\29 -opa_glob_match,void\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>::__push_back_slow_path\2c\20std::__1::allocator\20>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>&&\29 +opa_glob_match,memmove +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::push_back\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&&\29 +opa_glob_match,operator\20delete\28void*\2c\20unsigned\20long\29 opa_glob_match,opa_builtin_cache_get opa_glob_match,opa_builtin_cache_set -opa_glob_match,std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -opa_glob_match,std::__1::__hash_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::find\28cache_key\20const&\29 -opa_glob_match,std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -opa_glob_match,glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29 -opa_glob_match,std::__1::unordered_map\2c\20std::__1::allocator\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::erase\28std::__1::__hash_map_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20>\29 -opa_glob_match,std::__1::pair\2c\20std::__1::allocator\20>\20>::pair\2c\20std::__1::allocator\20>&\2c\20false>\28cache_key&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>&\29 -opa_glob_match,std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29 +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29 +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::find\28cache_key\20const&\29 +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::operator=\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29 +opa_glob_match,glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29 +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::unordered_map\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::erase\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::__hash_map_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>>\29 +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::pair\5babi:nn210108\5d\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\2c\200>\28cache_key&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\29 +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\28cache_key\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>&&\29 opa_glob_match,opa_string opa_glob_match,opa_regex_match -opa_glob_match,abort -void\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>::__push_back_slow_path\2c\20std::__1::allocator\20>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>&&\29,operator\20new\28unsigned\20long\29 -void\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>::__push_back_slow_path\2c\20std::__1::allocator\20>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>&&\29,operator\20delete\28void*\29 -void\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>::__push_back_slow_path\2c\20std::__1::allocator\20>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>&&\29,abort -std::__1::__hash_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::find\28cache_key\20const&\29,std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>::operator\28\29\28cache_key\20const&\29\20const -std::__1::__hash_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::find\28cache_key\20const&\29,std::__1::equal_to::operator\28\29\28cache_key\20const&\2c\20cache_key\20const&\29\20const -std::__1::unordered_map\2c\20std::__1::allocator\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::erase\28std::__1::__hash_map_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20>\29,std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::remove\28std::__1::__hash_const_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\29 -std::__1::unordered_map\2c\20std::__1::allocator\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::erase\28std::__1::__hash_map_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20>\29,std::__1::unique_ptr\2c\20std::__1::allocator\20>\20>\2c\20void*>\2c\20std::__1::__hash_node_destructor\2c\20std::__1::allocator\20>\20>\2c\20void*>\20>\20>\20>::~unique_ptr\28\29 -std::__1::pair\2c\20std::__1::allocator\20>\20>::pair\2c\20std::__1::allocator\20>&\2c\20false>\28cache_key&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>&\29,std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -std::__1::pair\2c\20std::__1::allocator\20>\20>::pair\2c\20std::__1::allocator\20>&\2c\20false>\28cache_key&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>&\29,operator\20new\28unsigned\20long\29 -std::__1::pair\2c\20std::__1::allocator\20>\20>::pair\2c\20std::__1::allocator\20>&\2c\20false>\28cache_key&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>&\29,abort -std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>::operator\28\29\28cache_key\20const&\29\20const -std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,std::__1::equal_to::operator\28\29\28cache_key\20const&\2c\20cache_key\20const&\29\20const -std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,operator\20new\28unsigned\20long\29 -std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,std::__1::__next_prime\28unsigned\20long\29 -std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__rehash\28unsigned\20long\29 -std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,std::__1::unique_ptr\2c\20std::__1::allocator\20>\20>\2c\20void*>\2c\20std::__1::__hash_node_destructor\2c\20std::__1::allocator\20>\20>\2c\20void*>\20>\20>\20>::~unique_ptr\28\29 -std::__1::unique_ptr\2c\20std::__1::allocator\20>\20>\2c\20void*>\2c\20std::__1::__hash_node_destructor\2c\20std::__1::allocator\20>\20>\2c\20void*>\20>\20>\20>::~unique_ptr\28\29,operator\20delete\28void*\29 -std::__1::equal_to::operator\28\29\28cache_key\20const&\2c\20cache_key\20const&\29\20const,memcmp -std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 -std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__rehash\28unsigned\20long\29,operator\20delete\28void*\29 -std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__rehash\28unsigned\20long\29,memcmp -std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__rehash\28unsigned\20long\29,abort +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::push_back\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::push_back\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&&\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::push_back\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::push_back\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&&\29,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::push_back\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::find\28cache_key\20const&\29,std::_LIBCPP_ABI_NAMESPACE::__hash_memory\28void\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::find\28cache_key\20const&\29,cache_key::operator==\28cache_key\20const&\29\20const +std::_LIBCPP_ABI_NAMESPACE::unordered_map\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::erase\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::__hash_map_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>>\29,std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::remove\28std::_LIBCPP_ABI_NAMESPACE::__hash_const_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\29 +std::_LIBCPP_ABI_NAMESPACE::unordered_map\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::erase\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::__hash_map_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>>\29,void\20std::_LIBCPP_ABI_NAMESPACE::__destroy_at\5babi:nn210108\5d\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\200>\28std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>*\29 +std::_LIBCPP_ABI_NAMESPACE::unordered_map\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::erase\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::__hash_map_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>>\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::pair\5babi:nn210108\5d\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\2c\200>\28cache_key&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::pair\5babi:nn210108\5d\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\2c\200>\28cache_key&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::pair\5babi:nn210108\5d\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\2c\200>\28cache_key&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\29,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\28cache_key\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>&&\29,std::_LIBCPP_ABI_NAMESPACE::__hash_memory\28void\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\28cache_key\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>&&\29,cache_key::operator==\28cache_key\20const&\29\20const +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\28cache_key\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>&&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\28cache_key\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>&&\29,std::_LIBCPP_ABI_NAMESPACE::__next_prime\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\28cache_key\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>&&\29,void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__do_rehash\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__destroy_at\5babi:nn210108\5d\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\200>\28std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +cache_key::operator==\28cache_key\20const&\29\20const,memcmp +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__do_rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__do_rehash\28unsigned\20long\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__do_rehash\28unsigned\20long\29,memset +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__do_rehash\28unsigned\20long\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 opa_regex_is_valid,opa_value_type opa_regex_is_valid,opa_boolean opa_regex_is_valid,operator\20new\28unsigned\20long\29 -opa_regex_is_valid,memcpy +opa_regex_is_valid,memmove opa_regex_is_valid,re2::RE2::RE2\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29 opa_regex_is_valid,re2::RE2::~RE2\28\29 -opa_regex_is_valid,operator\20delete\28void*\29 -opa_regex_is_valid,abort +opa_regex_is_valid,operator\20delete\28void*\2c\20unsigned\20long\29 +opa_regex_is_valid,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 opa_regex_match,opa_value_type opa_regex_match,operator\20new\28unsigned\20long\29 -opa_regex_match,memcpy +opa_regex_match,memmove opa_regex_match,compile\28char\20const*\29 opa_regex_match,re2::RE2::PartialMatchN\28re2::StringPiece\20const&\2c\20re2::RE2\20const&\2c\20re2::RE2::Arg\20const*\20const*\2c\20int\29 opa_regex_match,reuse\28re2::RE2*\29 opa_regex_match,opa_boolean -opa_regex_match,operator\20delete\28void*\29 -opa_regex_match,abort +opa_regex_match,operator\20delete\28void*\2c\20unsigned\20long\29 +opa_regex_match,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 compile\28char\20const*\29,opa_builtin_cache_get compile\28char\20const*\29,operator\20new\28unsigned\20long\29 compile\28char\20const*\29,opa_builtin_cache_set compile\28char\20const*\29,strlen compile\28char\20const*\29,memcpy -compile\28char\20const*\29,std::__1::__hash_iterator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::find\2c\20std::__1::allocator\20>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -compile\28char\20const*\29,operator\20delete\28void*\29 +compile\28char\20const*\29,std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::find\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29 +compile\28char\20const*\29,operator\20delete\28void*\2c\20unsigned\20long\29 compile\28char\20const*\29,re2::RE2::RE2\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29 compile\28char\20const*\29,re2::RE2::~RE2\28\29 -compile\28char\20const*\29,abort +compile\28char\20const*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 reuse\28re2::RE2*\29,opa_builtin_cache_get reuse\28re2::RE2*\29,operator\20new\28unsigned\20long\29 reuse\28re2::RE2*\29,opa_builtin_cache_set reuse\28re2::RE2*\29,re2::RE2::~RE2\28\29 -reuse\28re2::RE2*\29,operator\20delete\28void*\29 -reuse\28re2::RE2*\29,std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::remove\28std::__1::__hash_const_iterator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\29 -reuse\28re2::RE2*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -reuse\28re2::RE2*\29,std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>&&\29 -std::__1::__hash_iterator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::find\2c\20std::__1::allocator\20>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,memcmp -std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>&&\29,memcmp -std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>&&\29,operator\20new\28unsigned\20long\29 -std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>&&\29,std::__1::__next_prime\28unsigned\20long\29 -std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>&&\29,std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__rehash\28unsigned\20long\29 +reuse\28re2::RE2*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +reuse\28re2::RE2*\29,std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::remove\28std::_LIBCPP_ABI_NAMESPACE::__hash_const_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\29 +reuse\28re2::RE2*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29 +reuse\28re2::RE2*\29,std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>&&\29 +std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::find\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::__hash_memory\28void\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::find\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,memcmp +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>&&\29,std::_LIBCPP_ABI_NAMESPACE::__hash_memory\28void\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>&&\29,memcmp +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>&&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>&&\29,std::_LIBCPP_ABI_NAMESPACE::__next_prime\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>&&\29,void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__do_rehash\28unsigned\20long\29 opa_regex_find_all_string_submatch,opa_value_type opa_regex_find_all_string_submatch,opa_number_try_int opa_regex_find_all_string_submatch,operator\20new\28unsigned\20long\29 -opa_regex_find_all_string_submatch,memcpy +opa_regex_find_all_string_submatch,memmove opa_regex_find_all_string_submatch,compile\28char\20const*\29 opa_regex_find_all_string_submatch,opa_array opa_regex_find_all_string_submatch,memset @@ -1079,16 +1122,19 @@ opa_regex_find_all_string_submatch,re2::RE2::Match\28re2::StringPiece\20const&\2 opa_regex_find_all_string_submatch,fullrune opa_regex_find_all_string_submatch,chartorune opa_regex_find_all_string_submatch,opa_array_with_cap +opa_regex_find_all_string_submatch,reuse\28re2::RE2*\29 opa_regex_find_all_string_submatch,opa_malloc +opa_regex_find_all_string_submatch,memcpy opa_regex_find_all_string_submatch,opa_string_allocated opa_regex_find_all_string_submatch,opa_array_append -opa_regex_find_all_string_submatch,reuse\28re2::RE2*\29 -opa_regex_find_all_string_submatch,operator\20delete\28void*\29 -opa_regex_find_all_string_submatch,abort -std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 -std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__rehash\28unsigned\20long\29,operator\20delete\28void*\29 -std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__rehash\28unsigned\20long\29,memcmp -std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__rehash\28unsigned\20long\29,abort +opa_regex_find_all_string_submatch,operator\20delete\28void*\2c\20unsigned\20long\29 +opa_regex_find_all_string_submatch,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +opa_regex_find_all_string_submatch,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__do_rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__do_rehash\28unsigned\20long\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__do_rehash\28unsigned\20long\29,memset +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__do_rehash\28unsigned\20long\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 snprintf_,_vsnprintf _vsnprintf,_ntoa_format _vsnprintf,_ftoa @@ -1311,14 +1357,14 @@ mpd_qplus,opa_abort mpd_qplus,mpd_qfinalize mpd_qadd,mpd_qcopy mpd_qadd,_mpd_fix_nan -mpd_qadd,opa_abort -mpd_qadd,mpd_realloc +mpd_qadd,_mpd_qaddsub_inf mpd_qadd,_mpd_qaddsub mpd_qadd,mpd_qfinalize +_mpd_qaddsub_inf,opa_abort +_mpd_qaddsub_inf,mpd_realloc mpd_qsub,mpd_qcopy mpd_qsub,_mpd_fix_nan -mpd_qsub,opa_abort -mpd_qsub,mpd_realloc +mpd_qsub,_mpd_qaddsub_inf mpd_qsub,_mpd_qaddsub mpd_qsub,mpd_qfinalize mpd_qdiv,_mpd_qdiv @@ -1349,15 +1395,15 @@ _mpd_base_ndivmod,_mpd_qmul_exact _mpd_base_ndivmod,mpd_qshiftr _mpd_base_ndivmod,_mpd_qmul _mpd_base_ndivmod,mpd_qfinalize -_mpd_base_ndivmod,mpd_qsub -_mpd_base_ndivmod,mpd_realloc +_mpd_base_ndivmod,_mpd_qsub_exact _mpd_base_ndivmod,_mpd_qround_to_integral +_mpd_base_ndivmod,_mpd_cmp +_mpd_base_ndivmod,_mpd_qadd_exact _mpd_base_ndivmod,fprintf _mpd_base_ndivmod,fwrite _mpd_base_ndivmod,fputc -_mpd_base_ndivmod,_mpd_cmp -_mpd_base_ndivmod,mpd_qadd _mpd_base_ndivmod,mpd_qcopy +_mpd_base_ndivmod,mpd_realloc _mpd_qdivmod,opa_abort _mpd_qdivmod,mpd_qcopy _mpd_qdivmod,_settriple @@ -1373,43 +1419,37 @@ _mpd_qmul,mpd_qcopy _mpd_qmul,_mpd_fix_nan _mpd_qmul,opa_abort _mpd_qmul,mpd_realloc -_mpd_qmul,_mpd_mul_2_le2 _mpd_qmul,memset _mpd_qmul,_mpd_shortmul _mpd_qmul,_mpd_basemul -_mpd_qmul,mpd_switch_to_dyn -_mpd_qmul,mpd_realloc_dyn _mpd_qmul,mpd_calloc _mpd_qmul,_mpd_kmul _mpd_qmul,_mpd_fntmul _mpd_qmul,_mpd_kmul_fnt _mpd_qmul,mpd_seterror +_mpd_qmul,_mpd_mul_2_le2 +_mpd_qmul,mpd_switch_to_dyn +_mpd_qmul,mpd_realloc_dyn _mpd_kmul,opa_abort +_mpd_kmul,_kmul_resultsize _mpd_kmul,mpd_calloc _mpd_kmul,_kmul_worksize _mpd_kmul,_karatsuba_rec -_mpd_kmul,fprintf -_mpd_kmul,fwrite -_mpd_kmul,fputc -_mpd_kmul,abort _mpd_fntmul,opa_abort -_mpd_fntmul,fprintf -_mpd_fntmul,fwrite -_mpd_fntmul,fputc -_mpd_fntmul,abort _mpd_fntmul,mpd_calloc _mpd_fntmul,memcpy _mpd_fntmul,fnt_autoconvolute _mpd_fntmul,fnt_convolute _mpd_fntmul,memset _mpd_fntmul,crt3 +_mpd_fntmul,fprintf +_mpd_fntmul,fwrite +_mpd_fntmul,fputc +_mpd_fntmul,abort _mpd_kmul_fnt,opa_abort +_mpd_kmul_fnt,_kmul_resultsize _mpd_kmul_fnt,mpd_calloc _mpd_kmul_fnt,_kmul_worksize -_mpd_kmul_fnt,fprintf -_mpd_kmul_fnt,fwrite -_mpd_kmul_fnt,fputc -_mpd_kmul_fnt,abort _mpd_kmul_fnt,_karatsuba_rec_fnt mpd_qmul,_mpd_qmul mpd_qmul,mpd_qfinalize @@ -1435,6 +1475,9 @@ mpd_qround_to_intx,_mpd_qround_to_integral mpd_qtrunc,_mpd_qround_to_integral mpd_qfloor,_mpd_qround_to_integral mpd_qceil,_mpd_qround_to_integral +_mpd_qadd_exact,mpd_qadd +_mpd_qadd_exact,opa_abort +_mpd_qadd_exact,mpd_realloc mpd_sizeinbase,opa_abort mpd_sizeinbase,log10 mpd_qexport_u16,opa_abort @@ -1454,6 +1497,13 @@ mpd_qimport_u16,_mpd_shortmul_c mpd_qimport_u16,_mpd_shortadd mpd_qimport_u16,mpd_qfinalize _mpd_basecmp,opa_abort +_mpd_qsub_exact,mpd_qsub +_mpd_qsub_exact,opa_abort +_mpd_qsub_exact,mpd_realloc +_kmul_resultsize,fprintf +_kmul_resultsize,fwrite +_kmul_resultsize,fputc +_kmul_resultsize,abort _kmul_worksize,_kmul_worksize _kmul_worksize,fprintf _kmul_worksize,fwrite @@ -1502,78 +1552,79 @@ swap_halfrows_pow2,fprintf swap_halfrows_pow2,fwrite swap_halfrows_pow2,fputc swap_halfrows_pow2,abort -std::__1::__next_prime\28unsigned\20long\29,abort +std::_LIBCPP_ABI_NAMESPACE::__next_prime\28unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::__throw_overflow_error\5babi:nn210108\5d\28char\20const*\29 +std::_LIBCPP_ABI_NAMESPACE::__throw_overflow_error\5babi:nn210108\5d\28char\20const*\29,std::_LIBCPP_ABI_NAMESPACE::__libcpp_verbose_abort\28char\20const*\2c\20...\29 operator\20new\28unsigned\20long\29,opa_malloc -operator\20delete\28void*\29,opa_free +operator\20delete\28void*\2c\20unsigned\20long\29,opa_free operator\20new\5b\5d\28unsigned\20long\29,opa_malloc operator\20delete\5b\5d\28void*\29,opa_free __cxa_pure_virtual,opa_abort -std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,opa_malloc -std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,memcpy -std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,abort -std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,memcpy -std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,opa_malloc -std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,opa_free -std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,abort -std::__1::basic_string\2c\20std::__1::allocator\20>::resize\28unsigned\20long\2c\20char\29,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28unsigned\20long\2c\20char\29 -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28unsigned\20long\2c\20char\29,opa_malloc -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28unsigned\20long\2c\20char\29,memcpy -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28unsigned\20long\2c\20char\29,opa_free -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28unsigned\20long\2c\20char\29,memset -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28unsigned\20long\2c\20char\29,abort -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29,memcpy -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29,opa_malloc -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29,opa_free -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29,abort -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\29,strlen -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29 -std::__1::basic_string\2c\20std::__1::allocator\20>::push_back\28char\29,opa_malloc -std::__1::basic_string\2c\20std::__1::allocator\20>::push_back\28char\29,memcpy -std::__1::basic_string\2c\20std::__1::allocator\20>::push_back\28char\29,opa_free -std::__1::basic_string\2c\20std::__1::allocator\20>::push_back\28char\29,abort -std::__1::basic_string\2c\20std::__1::allocator\20>::__assign_external\28char\20const*\2c\20unsigned\20long\29,memmove -std::__1::basic_string\2c\20std::__1::allocator\20>::__assign_external\28char\20const*\2c\20unsigned\20long\29,opa_malloc -std::__1::basic_string\2c\20std::__1::allocator\20>::__assign_external\28char\20const*\2c\20unsigned\20long\29,memcpy -std::__1::basic_string\2c\20std::__1::allocator\20>::__assign_external\28char\20const*\2c\20unsigned\20long\29,opa_free -std::__1::basic_string\2c\20std::__1::allocator\20>::__assign_external\28char\20const*\2c\20unsigned\20long\29,abort -std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\2c\20unsigned\20long\29,std::__1::basic_string\2c\20std::__1::allocator\20>::__assign_external\28char\20const*\2c\20unsigned\20long\29 -std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\29,strlen -std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::__assign_external\28char\20const*\2c\20unsigned\20long\29 -std::__1::basic_string\2c\20std::__1::allocator\20>::compare\28unsigned\20long\2c\20unsigned\20long\2c\20char\20const*\2c\20unsigned\20long\29\20const,memcmp -std::__1::basic_string\2c\20std::__1::allocator\20>::compare\28unsigned\20long\2c\20unsigned\20long\2c\20char\20const*\2c\20unsigned\20long\29\20const,abort -void\20std::__1::__sort&\2c\20int*>\28int*\2c\20int*\2c\20std::__1::__less&\29,unsigned\20int\20std::__1::__sort5&\2c\20int*>\28int*\2c\20int*\2c\20int*\2c\20int*\2c\20int*\2c\20std::__1::__less&\29 -void\20std::__1::__sort&\2c\20int*>\28int*\2c\20int*\2c\20std::__1::__less&\29,bool\20std::__1::__insertion_sort_incomplete&\2c\20int*>\28int*\2c\20int*\2c\20std::__1::__less&\29 -void\20std::__1::__sort&\2c\20int*>\28int*\2c\20int*\2c\20std::__1::__less&\29,void\20std::__1::__sort&\2c\20int*>\28int*\2c\20int*\2c\20std::__1::__less&\29 -bool\20std::__1::__insertion_sort_incomplete&\2c\20int*>\28int*\2c\20int*\2c\20std::__1::__less&\29,unsigned\20int\20std::__1::__sort5&\2c\20int*>\28int*\2c\20int*\2c\20int*\2c\20int*\2c\20int*\2c\20std::__1::__less&\29 +std::_LIBCPP_ABI_NAMESPACE::__libcpp_verbose_abort\28char\20const*\2c\20...\29,opa_abort +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::operator=\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::operator=\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,memmove +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,opa_malloc +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,opa_free +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,memmove +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,opa_malloc +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::resize\28unsigned\20long\2c\20char\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28unsigned\20long\2c\20char\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28unsigned\20long\2c\20char\29,opa_malloc +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28unsigned\20long\2c\20char\29,memmove +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28unsigned\20long\2c\20char\29,opa_free +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28unsigned\20long\2c\20char\29,memset +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28unsigned\20long\2c\20char\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29,memmove +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29,opa_malloc +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29,opa_free +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\29,strlen +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::push_back\28char\29,opa_malloc +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::push_back\28char\29,memmove +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::push_back\28char\29,opa_free +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\2c\20unsigned\20long\29,memmove +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\2c\20unsigned\20long\29,opa_malloc +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\2c\20unsigned\20long\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\2c\20unsigned\20long\29,opa_free +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\2c\20unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::assign\28char\20const*\2c\20unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::assign\28char\20const*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\29,strlen +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\2c\20unsigned\20long\29 re2::BitState::Push\28int\2c\20char\20const*\29,operator\20new\28unsigned\20long\29 re2::BitState::Push\28int\2c\20char\20const*\29,memmove -re2::BitState::Push\28int\2c\20char\20const*\29,operator\20delete\28void*\29 -re2::BitState::Push\28int\2c\20char\20const*\29,abort +re2::BitState::Push\28int\2c\20char\20const*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::BitState::Push\28int\2c\20char\20const*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::BitState::TrySearch\28int\2c\20char\20const*\29,re2::BitState::Push\28int\2c\20char\20const*\29 re2::BitState::TrySearch\28int\2c\20char\20const*\29,opa_abort re2::BitState::TrySearch\28int\2c\20char\20const*\29,re2::Prog::EmptyFlags\28re2::StringPiece\20const&\2c\20char\20const*\29 re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,operator\20new\28unsigned\20long\29 -re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,operator\20delete\28void*\29 +re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,memset re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,re2::BitState::TrySearch\28int\2c\20char\20const*\29 re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,memchr re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,re2::Prog::PrefixAccel_FrontAndBack\28void\20const*\2c\20unsigned\20long\29 -re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,abort +re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::Prog::SearchBitState\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29 -re2::Prog::SearchBitState\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,operator\20delete\28void*\29 +re2::Prog::SearchBitState\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Compiler::AllocInst\28int\29,operator\20new\28unsigned\20long\29 re2::Compiler::AllocInst\28int\29,memset re2::Compiler::AllocInst\28int\29,memmove -re2::Compiler::AllocInst\28int\29,operator\20delete\28void*\29 -re2::Compiler::AllocInst\28int\29,abort +re2::Compiler::AllocInst\28int\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Compiler::AllocInst\28int\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::Compiler::~Compiler\28\29,re2::Prog::~Prog\28\29 -re2::Compiler::~Compiler\28\29,operator\20delete\28void*\29 +re2::Compiler::~Compiler\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Compiler::~Compiler\28\29,re2::Regexp::Walker::~Walker\28\29 re2::Regexp::Walker::~Walker\28\29,operator\20delete\5b\5d\28void*\29 -re2::Regexp::Walker::~Walker\28\29,operator\20delete\28void*\29 -re2::Regexp::Walker::~Walker\28\29,std::__1::__deque_base\2c\20std::__1::allocator\20>\20>::~__deque_base\28\29 -re2::Compiler::~Compiler\28\29.1,re2::Compiler::~Compiler\28\29 -re2::Compiler::~Compiler\28\29.1,operator\20delete\28void*\29 +re2::Regexp::Walker::~Walker\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Regexp::Walker::~Walker\28\29,std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::~deque\5babi:nn210108\5d\28\29 +re2::Compiler::~Compiler\28\29_479,re2::Compiler::~Compiler\28\29 +re2::Compiler::~Compiler\28\29_479,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Compiler::Cat\28re2::Frag\2c\20re2::Frag\29,opa_abort re2::Compiler::Star\28re2::Frag\2c\20bool\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::Star\28re2::Frag\2c\20bool\29,re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29 @@ -1583,45 +1634,40 @@ re2::Compiler::Quest\28re2::Frag\2c\20bool\29,re2::Prog::Inst::InitNop\28unsigne re2::Compiler::Quest\28re2::Frag\2c\20bool\29,re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29 re2::Compiler::Nop\28\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::Nop\28\29,re2::Prog::Inst::InitNop\28unsigned\20int\29 -re2::Compiler::ByteRange\28int\2c\20int\2c\20bool\29,re2::Compiler::AllocInst\28int\29 -re2::Compiler::ByteRange\28int\2c\20int\2c\20bool\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 -re2::Compiler::Match\28int\29,re2::Compiler::AllocInst\28int\29 -re2::Compiler::Match\28int\29,re2::Prog::Inst::InitMatch\28int\29 re2::Compiler::EmptyWidth\28re2::EmptyOp\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::EmptyWidth\28re2::EmptyOp\29,re2::Prog::Inst::InitEmptyWidth\28re2::EmptyOp\2c\20unsigned\20int\29 re2::Compiler::Capture\28re2::Frag\2c\20int\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::Capture\28re2::Frag\2c\20int\29,re2::Prog::Inst::InitCapture\28int\2c\20unsigned\20int\29 re2::Compiler::Capture\28re2::Frag\2c\20int\29,opa_abort -re2::Compiler::BeginRange\28\29,operator\20delete\28void*\29 +re2::Compiler::BeginRange\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Compiler::BeginRange\28\29,memset re2::Compiler::UncachedRuneByteSuffix\28unsigned\20char\2c\20unsigned\20char\2c\20bool\2c\20int\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::UncachedRuneByteSuffix\28unsigned\20char\2c\20unsigned\20char\2c\20bool\2c\20int\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 re2::Compiler::UncachedRuneByteSuffix\28unsigned\20char\2c\20unsigned\20char\2c\20bool\2c\20int\29,opa_abort -std::__1::pair\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__emplace_unique_key_args\2c\20std::__1::tuple<>\20>\28unsigned\20long\20long\20const&\2c\20std::__1::piecewise_construct_t\20const&\2c\20std::__1::tuple&&\2c\20std::__1::tuple<>&&\29,operator\20new\28unsigned\20long\29 -std::__1::pair\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__emplace_unique_key_args\2c\20std::__1::tuple<>\20>\28unsigned\20long\20long\20const&\2c\20std::__1::piecewise_construct_t\20const&\2c\20std::__1::tuple&&\2c\20std::__1::tuple<>&&\29,std::__1::__next_prime\28unsigned\20long\29 -std::__1::pair\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__emplace_unique_key_args\2c\20std::__1::tuple<>\20>\28unsigned\20long\20long\20const&\2c\20std::__1::piecewise_construct_t\20const&\2c\20std::__1::tuple&&\2c\20std::__1::tuple<>&&\29,std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__rehash\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::find\28unsigned\20long\20long\20const&\29,std::_LIBCPP_ABI_NAMESPACE::__hash_memory\28void\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>>\28unsigned\20long\20long\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::piecewise_construct_t\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple&&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>&&\29,std::_LIBCPP_ABI_NAMESPACE::__hash_memory\28void\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>>\28unsigned\20long\20long\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::piecewise_construct_t\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple&&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>&&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>>\28unsigned\20long\20long\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::piecewise_construct_t\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple&&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>&&\29,std::_LIBCPP_ABI_NAMESPACE::__next_prime\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>>\28unsigned\20long\20long\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::piecewise_construct_t\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple&&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>&&\29,void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__do_rehash\28unsigned\20long\29 re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,opa_abort -re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,re2::Compiler::FindByteRange\28int\2c\20int\29 +re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,re2::Compiler::ByteRangeEqual\28int\2c\20int\29 re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29 -re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,std::__1::__hash_iterator\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::find\28unsigned\20long\20long\20const&\29 +re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::find\28unsigned\20long\20long\20const&\29 re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,re2::Compiler::AddSuffixRecursive\28int\2c\20int\29 -re2::Compiler::FindByteRange\28int\2c\20int\29,opa_abort -re2::Compiler::FindByteRange\28int\2c\20int\29,re2::Compiler::ByteRangeEqual\28int\2c\20int\29 re2::Compiler::ByteRangeEqual\28int\2c\20int\29,opa_abort -re2::Compiler::AddRuneRange\28int\2c\20int\2c\20bool\29,re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29 -re2::Compiler::AddRuneRange\28int\2c\20int\2c\20bool\29,re2::Compiler::AddRuneRangeLatin1\28int\2c\20int\2c\20bool\29 -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::Add_80_10ffff\28\29 -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,runetochar re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29 -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,opa_abort -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,std::__1::__hash_iterator\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::find\28unsigned\20long\20long\20const&\29 -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::UncachedRuneByteSuffix\28unsigned\20char\2c\20unsigned\20char\2c\20bool\2c\20int\29 -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,std::__1::pair\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__emplace_unique_key_args\2c\20std::__1::tuple<>\20>\28unsigned\20long\20long\20const&\2c\20std::__1::piecewise_construct_t\20const&\2c\20std::__1::tuple&&\2c\20std::__1::tuple<>&&\29 -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::AddSuffixRecursive\28int\2c\20int\29 re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::AllocInst\28int\29 -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29 re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::AddSuffixRecursive\28int\2c\20int\29 +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29 +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,runetochar +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,opa_abort +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::find\28unsigned\20long\20long\20const&\29 +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::UncachedRuneByteSuffix\28unsigned\20char\2c\20unsigned\20char\2c\20bool\2c\20int\29 +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,std::_LIBCPP_ABI_NAMESPACE::pair\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>>\28unsigned\20long\20long\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::piecewise_construct_t\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple&&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>&&\29 +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::Add_80_10ffff\28\29 re2::Compiler::AddRuneRangeLatin1\28int\2c\20int\2c\20bool\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::AddRuneRangeLatin1\28int\2c\20int\2c\20bool\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 re2::Compiler::AddRuneRangeLatin1\28int\2c\20int\2c\20bool\29,re2::Compiler::AddSuffixRecursive\28int\2c\20int\29 @@ -1635,22 +1681,25 @@ re2::Compiler::Literal\28int\2c\20bool\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::Literal\28int\2c\20bool\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 re2::Compiler::Literal\28int\2c\20bool\29,runetochar re2::Compiler::Literal\28int\2c\20bool\29,re2::Compiler::Cat\28re2::Frag\2c\20re2::Frag\29 -re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Nop\28\29 -re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Match\28int\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::AllocInst\28int\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Prog::Inst::InitNop\28unsigned\20int\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Prog::Inst::InitMatch\28int\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::EmptyWidth\28re2::EmptyOp\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Cat\28re2::Frag\2c\20re2::Frag\29 -re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Star\28re2::Frag\2c\20bool\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Quest\28re2::Frag\2c\20bool\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Literal\28int\2c\20bool\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Nop\28\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,opa_abort -re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::BeginRange\28\29 -re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::AddRuneRange\28int\2c\20int\2c\20bool\29 -re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::ByteRange\28int\2c\20int\2c\20bool\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,memset re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::AddRuneRangeLatin1\28int\2c\20int\2c\20bool\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::BeginRange\28\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Capture\28re2::Frag\2c\20int\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Prog::Inst::InitEmptyWidth\28re2::EmptyOp\2c\20unsigned\20int\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,operator\20new\28unsigned\20long\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Prog::Prog\28\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Prog::Inst::InitFail\28\29 @@ -1661,19 +1710,19 @@ re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Regexp: re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Regexp::Decref\28\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,memset re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,memmove -re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,operator\20delete\28void*\29 +re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Prog::Inst::InitMatch\28int\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Compiler::Cat\28re2::Frag\2c\20re2::Frag\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Compiler::DotStar\28\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Compiler::Finish\28re2::Regexp*\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Compiler::~Compiler\28\29 -re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,abort +re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,re2::Regexp::Incref\28\29 re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,re2::IsAnchorStart\28re2::Regexp**\2c\20int\29 re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,operator\20new\28unsigned\20long\29 re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,re2::Regexp::Concat\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29 re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,re2::Regexp::Decref\28\29 -re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,operator\20delete\28void*\29 +re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,opa_abort re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,re2::Regexp::Capture\28re2::Regexp*\2c\20re2::Regexp::ParseFlags\2c\20int\29 re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,re2::Regexp::LiteralString\28int*\2c\20int\2c\20re2::Regexp::ParseFlags\29 @@ -1682,73 +1731,74 @@ re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,re2::IsAnchorEnd\28re2::Regexp**\2c re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,operator\20new\28unsigned\20long\29 re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,re2::Regexp::Concat\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29 re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,re2::Regexp::Decref\28\29 -re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,operator\20delete\28void*\29 +re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,opa_abort re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,re2::Regexp::Capture\28re2::Regexp*\2c\20re2::Regexp::ParseFlags\2c\20int\29 re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,re2::Regexp::LiteralString\28int*\2c\20int\2c\20re2::Regexp::ParseFlags\29 re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Frag\2c\20bool\29,operator\20delete\5b\5d\28void*\29 -re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Frag\2c\20bool\29,operator\20delete\28void*\29 -re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Frag\2c\20bool\29,std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29 +re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Frag\2c\20bool\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Frag\2c\20bool\29,std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29 re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Frag\2c\20bool\29,operator\20new\5b\5d\28unsigned\20long\29 re2::Compiler::DotStar\28\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::DotStar\28\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 re2::Compiler::DotStar\28\29,re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29 re2::Compiler::DotStar\28\29,opa_abort -re2::Compiler::Finish\28re2::Regexp*\29,operator\20delete\28void*\29 +re2::Compiler::Finish\28re2::Regexp*\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Compiler::Finish\28re2::Regexp*\29,re2::Prog::Optimize\28\29 re2::Compiler::Finish\28re2::Regexp*\29,re2::Prog::Flatten\28\29 re2::Compiler::Finish\28re2::Regexp*\29,re2::Prog::ComputeByteMap\28\29 -re2::Compiler::Finish\28re2::Regexp*\29,re2::Regexp::RequiredPrefixForAccel\28std::__1::basic_string\2c\20std::__1::allocator\20>*\2c\20bool*\29 +re2::Compiler::Finish\28re2::Regexp*\29,re2::Regexp::RequiredPrefixForAccel\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\2c\20bool*\29 re2::Regexp::CompileToProg\28long\20long\29,re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29 re2::Regexp::CompileToReverseProg\28long\20long\29,re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29 -std::__1::__deque_base\2c\20std::__1::allocator\20>\20>::~__deque_base\28\29,operator\20delete\28void*\29 -std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 -std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__rehash\28unsigned\20long\29,operator\20delete\28void*\29 -std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__rehash\28unsigned\20long\29,abort -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,memmove -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,operator\20delete\28void*\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,abort +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::~deque\5babi:nn210108\5d\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__do_rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__do_rehash\28unsigned\20long\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__do_rehash\28unsigned\20long\29,memset +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__do_rehash\28unsigned\20long\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29,operator\20new\28unsigned\20long\29 -re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29,abort +re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::DFA::~DFA\28\29,opa_abort -re2::DFA::~DFA\28\29,operator\20delete\28void*\29 +re2::DFA::~DFA\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::DFA::~DFA\28\29,memset re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,operator\20new\28unsigned\20long\29 +re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,opa_abort -re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,void\20std::__1::__sort&\2c\20int*>\28int*\2c\20int*\2c\20std::__1::__less&\29 -re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,abort +re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,void\20std::_LIBCPP_ABI_NAMESPACE::__sort&\2c\20int*>\28int*\2c\20int*\2c\20std::_LIBCPP_ABI_NAMESPACE::__less&\29 re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29 -re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,operator\20delete\28void*\29 -re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,std::__1::__hash_iterator*>\20std::__1::__hash_table\20>::find\28re2::DFA::State*\20const&\29 +re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,std::_LIBCPP_ABI_NAMESPACE::__hash_iterator*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::find\28re2::DFA::State*\20const&\29 re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,operator\20new\28unsigned\20long\29 re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,memset -re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,memmove -re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,std::__1::pair*>\2c\20bool>\20std::__1::__hash_table\20>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29 -std::__1::__hash_iterator*>\20std::__1::__hash_table\20>::find\28re2::DFA::State*\20const&\29,opa_abort -std::__1::pair*>\2c\20bool>\20std::__1::__hash_table\20>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,opa_abort -std::__1::pair*>\2c\20bool>\20std::__1::__hash_table\20>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,operator\20new\28unsigned\20long\29 -std::__1::pair*>\2c\20bool>\20std::__1::__hash_table\20>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,std::__1::__next_prime\28unsigned\20long\29 -std::__1::pair*>\2c\20bool>\20std::__1::__hash_table\20>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,std::__1::__hash_table\20>::__rehash\28unsigned\20long\29 +re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,memcpy +re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,std::_LIBCPP_ABI_NAMESPACE::pair*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29 +std::_LIBCPP_ABI_NAMESPACE::__hash_iterator*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::find\28re2::DFA::State*\20const&\29,opa_abort +std::_LIBCPP_ABI_NAMESPACE::pair*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,opa_abort +std::_LIBCPP_ABI_NAMESPACE::pair*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,std::_LIBCPP_ABI_NAMESPACE::__next_prime\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__do_rehash\28unsigned\20long\29 re2::SparseSetT::InsertInternal\28bool\2c\20int\29,opa_abort re2::SparseSetT::InsertInternal\28bool\2c\20int\29,re2::SparseSetT::create_index\28int\29 re2::DFA::AddToQueue\28re2::DFA::Workq*\2c\20int\2c\20unsigned\20int\29,opa_abort @@ -1761,7 +1811,8 @@ re2::DFA::RunStateOnByte\28re2::DFA::State*\2c\20int\29,re2::DFA::AddToQueue\28r re2::DFA::RunStateOnByte\28re2::DFA::State*\2c\20int\29,re2::DFA::RunWorkqOnByte\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20int\2c\20unsigned\20int\2c\20bool*\29 re2::DFA::RunStateOnByte\28re2::DFA::State*\2c\20int\29,re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29 re2::DFA::ResetCache\28re2::DFA::RWLocker*\29,re2::hooks::GetDFAStateCacheResetHook\28\29 -re2::DFA::ResetCache\28re2::DFA::RWLocker*\29,operator\20delete\28void*\29 +re2::DFA::ResetCache\28re2::DFA::RWLocker*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::DFA::ResetCache\28re2::DFA::RWLocker*\29,memset re2::DFA::SearchFFF\28re2::DFA::SearchParams*\29,bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::RunStateOnByte\28re2::DFA::State*\2c\20int\29 @@ -1821,9 +1872,9 @@ bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::Search bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,memchr bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::Prog::PrefixAccel_FrontAndBack\28void\20const*\2c\20unsigned\20long\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::RunStateOnByte\28re2::DFA::State*\2c\20int\29 +bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::ResetCache\28re2::DFA::RWLocker*\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,operator\20new\5b\5d\28unsigned\20long\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,memmove -bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::ResetCache\28re2::DFA::RWLocker*\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,operator\20delete\5b\5d\28void*\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 @@ -1832,61 +1883,60 @@ bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchP bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,memchr bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::Prog::PrefixAccel_FrontAndBack\28void\20const*\2c\20unsigned\20long\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::RunStateOnByte\28re2::DFA::State*\2c\20int\29 +bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::ResetCache\28re2::DFA::RWLocker*\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,operator\20new\5b\5d\28unsigned\20long\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,memmove -bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::ResetCache\28re2::DFA::RWLocker*\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,operator\20delete\5b\5d\28void*\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 re2::DFA::AnalyzeSearch\28re2::DFA::SearchParams*\29,re2::DFA::AddToQueue\28re2::DFA::Workq*\2c\20int\2c\20unsigned\20int\29 re2::DFA::AnalyzeSearch\28re2::DFA::SearchParams*\29,re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29 re2::DFA::AnalyzeSearch\28re2::DFA::SearchParams*\29,re2::DFA::ResetCache\28re2::DFA::RWLocker*\29 -re2::DFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20bool\2c\20bool*\2c\20char\20const**\2c\20re2::SparseSetT*\29,re2::DFA::AnalyzeSearch\28re2::DFA::SearchParams*\29 -re2::Prog::GetDFA\28re2::Prog::MatchKind\29,std::__1::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,operator\20new\28unsigned\20long\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,operator\20new\28unsigned\20long\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,operator\20new\28unsigned\20long\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29 +re2::Prog::GetDFA\28re2::Prog::MatchKind\29,std::_LIBCPP_ABI_NAMESPACE::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29 re2::Prog::DeleteDFA\28re2::DFA*\29,re2::DFA::~DFA\28\29 -re2::Prog::DeleteDFA\28re2::DFA*\29,operator\20delete\28void*\29 +re2::Prog::DeleteDFA\28re2::DFA*\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Prog::SearchDFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20bool*\2c\20re2::SparseSetT*\29,re2::Prog::GetDFA\28re2::Prog::MatchKind\29 -re2::Prog::SearchDFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20bool*\2c\20re2::SparseSetT*\29,re2::DFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20bool\2c\20bool*\2c\20char\20const**\2c\20re2::SparseSetT*\29 +re2::Prog::SearchDFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20bool*\2c\20re2::SparseSetT*\29,re2::DFA::AnalyzeSearch\28re2::DFA::SearchParams*\29 re2::Prog::SearchDFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20bool*\2c\20re2::SparseSetT*\29,re2::hooks::GetDFASearchFailureHook\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 re2::SparseSetT::create_index\28int\29,opa_abort -std::__1::__hash_table\20>::__rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 -std::__1::__hash_table\20>::__rehash\28unsigned\20long\29,operator\20delete\28void*\29 -std::__1::__hash_table\20>::__rehash\28unsigned\20long\29,opa_abort -std::__1::__hash_table\20>::__rehash\28unsigned\20long\29,abort +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__do_rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__do_rehash\28unsigned\20long\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__do_rehash\28unsigned\20long\29,memset +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__do_rehash\28unsigned\20long\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::NFA::NFA\28re2::Prog*\29,memset re2::NFA::NFA\28re2::Prog*\29,re2::SparseArray::resize\28int\29 re2::NFA::NFA\28re2::Prog*\29,operator\20new\28unsigned\20long\29 -re2::NFA::NFA\28re2::Prog*\29,operator\20delete\28void*\29 -re2::NFA::NFA\28re2::Prog*\29,abort +re2::NFA::NFA\28re2::Prog*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::NFA::NFA\28re2::Prog*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::SparseArray::resize\28int\29,opa_abort re2::SparseArray::resize\28int\29,operator\20new\28unsigned\20long\29 re2::SparseArray::resize\28int\29,memmove -re2::SparseArray::resize\28int\29,operator\20delete\28void*\29 -re2::SparseArray::resize\28int\29,abort +re2::SparseArray::resize\28int\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::SparseArray::resize\28int\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::NFA::~NFA\28\29,operator\20delete\5b\5d\28void*\29 -re2::NFA::~NFA\28\29,std::__1::__deque_base\20>::~__deque_base\28\29 -re2::NFA::~NFA\28\29,operator\20delete\28void*\29 +re2::NFA::~NFA\28\29,std::_LIBCPP_ABI_NAMESPACE::deque>::~deque\5babi:nn210108\5d\28\29 +re2::NFA::~NFA\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::NFA::~NFA\28\29,opa_abort -std::__1::__deque_base\20>::~__deque_base\28\29,operator\20delete\28void*\29 +std::_LIBCPP_ABI_NAMESPACE::deque>::~deque\5babi:nn210108\5d\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29,opa_abort -re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29,std::__1::deque\20>::__add_back_capacity\28\29 +re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29,std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29 re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29,operator\20new\5b\5d\28unsigned\20long\29 re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29,memmove re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29,re2::Prog::EmptyFlags\28re2::StringPiece\20const&\2c\20char\20const*\29 -std::__1::deque\20>::__add_back_capacity\28\29,memmove -std::__1::deque\20>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 -std::__1::deque\20>::__add_back_capacity\28\29,operator\20delete\28void*\29 -std::__1::deque\20>::__add_back_capacity\28\29,std::__1::__split_buffer\20>::push_back\28re2::NFA::Thread*&&\29 -std::__1::deque\20>::__add_back_capacity\28\29,std::__1::__split_buffer\20>::push_front\28re2::NFA::Thread*&&\29 -std::__1::deque\20>::__add_back_capacity\28\29,std::__1::__split_buffer&>::push_back\28re2::NFA::Thread*&&\29 -std::__1::deque\20>::__add_back_capacity\28\29,std::__1::__split_buffer&>::push_front\28re2::NFA::Thread*\20const&\29 -std::__1::deque\20>::__add_back_capacity\28\29,abort +std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer&>::emplace_front\28re2::NFA::Thread*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_front\28re2::NFA::Thread*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::NFA::Step\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\29,memmove re2::NFA::Step\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\29,opa_abort re2::NFA::Step\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\29,re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29 @@ -1894,13 +1944,11 @@ re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\2 re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,memset re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,re2::NFA::Step\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\29 re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,opa_abort -re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,re2::Prog::PrefixAccel\28void\20const*\2c\20unsigned\20long\29 -re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,std::__1::deque\20>::__add_back_capacity\28\29 +re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,memchr +re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,re2::Prog::PrefixAccel_FrontAndBack\28void\20const*\2c\20unsigned\20long\29 +re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29 re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,memmove re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29 -re2::Prog::PrefixAccel\28void\20const*\2c\20unsigned\20long\29,opa_abort -re2::Prog::PrefixAccel\28void\20const*\2c\20unsigned\20long\29,memchr -re2::Prog::PrefixAccel\28void\20const*\2c\20unsigned\20long\29,re2::Prog::PrefixAccel_FrontAndBack\28void\20const*\2c\20unsigned\20long\29 re2::Prog::SearchNFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,re2::NFA::NFA\28re2::Prog*\29 re2::Prog::SearchNFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29 re2::Prog::SearchNFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,re2::NFA::~NFA\28\29 @@ -1909,22 +1957,22 @@ re2::SparseArray::SetInternal\28bool\2c\20int\2c\20int\20const&\29,re2::Spa re2::SparseArray::SetInternal\28bool\2c\20int\2c\20int\20const&\29,re2::SparseArray::SetExistingInternal\28int\2c\20int\20const&\29 re2::SparseArray::create_index\28int\29,opa_abort re2::SparseArray::SetExistingInternal\28int\2c\20int\20const&\29,opa_abort -std::__1::__split_buffer\20>::push_back\28re2::NFA::Thread*&&\29,memmove -std::__1::__split_buffer\20>::push_back\28re2::NFA::Thread*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer\20>::push_back\28re2::NFA::Thread*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer\20>::push_back\28re2::NFA::Thread*&&\29,abort -std::__1::__split_buffer\20>::push_front\28re2::NFA::Thread*&&\29,memmove -std::__1::__split_buffer\20>::push_front\28re2::NFA::Thread*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer\20>::push_front\28re2::NFA::Thread*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer\20>::push_front\28re2::NFA::Thread*&&\29,abort -std::__1::__split_buffer&>::push_back\28re2::NFA::Thread*&&\29,memmove -std::__1::__split_buffer&>::push_back\28re2::NFA::Thread*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer&>::push_back\28re2::NFA::Thread*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer&>::push_back\28re2::NFA::Thread*&&\29,abort -std::__1::__split_buffer&>::push_front\28re2::NFA::Thread*\20const&\29,memmove -std::__1::__split_buffer&>::push_front\28re2::NFA::Thread*\20const&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer&>::push_front\28re2::NFA::Thread*\20const&\29,operator\20delete\28void*\29 -std::__1::__split_buffer&>::push_front\28re2::NFA::Thread*\20const&\29,abort +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer&>::emplace_front\28re2::NFA::Thread*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer&>::emplace_front\28re2::NFA::Thread*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer&>::emplace_front\28re2::NFA::Thread*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer&>::emplace_front\28re2::NFA::Thread*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_front\28re2::NFA::Thread*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_front\28re2::NFA::Thread*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_front\28re2::NFA::Thread*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_front\28re2::NFA::Thread*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::Prog::SearchOnePass\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,memset re2::Prog::SearchOnePass\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,re2::Prog::EmptyFlags\28re2::StringPiece\20const&\2c\20char\20const*\29 re2::Prog::SearchOnePass\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,memcpy @@ -1932,17 +1980,19 @@ re2::Prog::IsOnePass\28\29,operator\20new\28unsigned\20long\29 re2::Prog::IsOnePass\28\29,memset re2::Prog::IsOnePass\28\29,opa_abort re2::Prog::IsOnePass\28\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 -re2::Prog::IsOnePass\28\29,std::__1::vector\20>::insert\28std::__1::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29 -re2::Prog::IsOnePass\28\29,operator\20delete\28void*\29 +re2::Prog::IsOnePass\28\29,std::_LIBCPP_ABI_NAMESPACE::vector>::insert\28std::_LIBCPP_ABI_NAMESPACE::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29 +re2::Prog::IsOnePass\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Prog::IsOnePass\28\29,memmove -re2::Prog::IsOnePass\28\29,abort -std::__1::vector\20>::insert\28std::__1::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,memmove -std::__1::vector\20>::insert\28std::__1::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,operator\20new\28unsigned\20long\29 -std::__1::vector\20>::insert\28std::__1::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,memcpy -std::__1::vector\20>::insert\28std::__1::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,operator\20delete\28void*\29 -std::__1::vector\20>::insert\28std::__1::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,abort -std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29,operator\20delete\28void*\29 +re2::Prog::IsOnePass\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::insert\28std::_LIBCPP_ABI_NAMESPACE::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,memset +std::_LIBCPP_ABI_NAMESPACE::vector>::insert\28std::_LIBCPP_ABI_NAMESPACE::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,memmove +std::_LIBCPP_ABI_NAMESPACE::vector>::insert\28std::_LIBCPP_ABI_NAMESPACE::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::insert\28std::_LIBCPP_ABI_NAMESPACE::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::vector>::insert\28std::_LIBCPP_ABI_NAMESPACE::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::insert\28std::_LIBCPP_ABI_NAMESPACE::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 +std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29,re2::Regexp::ParseState::MaybeConcatString\28int\2c\20re2::Regexp::ParseFlags\29 re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29,re2::CharClassBuilder::RemoveAbove\28int\29 re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29,re2::Regexp::Decref\28\29 @@ -1965,36 +2015,36 @@ re2::Regexp::ParseState::PushSimpleOp\28re2::RegexpOp\29,re2::Regexp::Regexp\28r re2::Regexp::ParseState::PushSimpleOp\28re2::RegexpOp\29,re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29 re2::Regexp::ParseState::PushDot\28\29,operator\20new\28unsigned\20long\29 re2::Regexp::ParseState::PushDot\28\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 -re2::Regexp::ParseState::PushDot\28\29,re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29 re2::Regexp::ParseState::PushDot\28\29,re2::CharClassBuilder::CharClassBuilder\28\29 re2::Regexp::ParseState::PushDot\28\29,re2::CharClassBuilder::AddRange\28int\2c\20int\29 +re2::Regexp::ParseState::PushDot\28\29,re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29 re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,operator\20new\28unsigned\20long\29 re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::CharClassBuilder::GetCharClass\28\29 -re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,operator\20delete\28void*\29 +re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::Regexp::ComputeSimple\28\29 re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,operator\20new\28unsigned\20long\29 re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::CharClassBuilder::GetCharClass\28\29 -re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,operator\20delete\28void*\29 +re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::Regexp::ComputeSimple\28\29 re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29 re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::Regexp::Walker::~Walker\28\29 re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29,operator\20delete\5b\5d\28void*\29 -re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29,operator\20delete\28void*\29 -re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29,std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29 +re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29,std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29 re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29,operator\20new\5b\5d\28unsigned\20long\29 re2::Regexp::Walker::~Walker\28\29,operator\20delete\5b\5d\28void*\29 -re2::Regexp::Walker::~Walker\28\29,operator\20delete\28void*\29 -re2::Regexp::Walker::~Walker\28\29,std::__1::__deque_base\2c\20std::__1::allocator\20>\20>::~__deque_base\28\29 -std::__1::__deque_base\2c\20std::__1::allocator\20>\20>::~__deque_base\28\29,operator\20delete\28void*\29 +re2::Regexp::Walker::~Walker\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Regexp::Walker::~Walker\28\29,std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::~deque\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::~deque\5babi:nn210108\5d\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::ParseState::DoLeftParen\28re2::StringPiece\20const&\29,operator\20new\28unsigned\20long\29 re2::Regexp::ParseState::DoLeftParen\28re2::StringPiece\20const&\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 -re2::Regexp::ParseState::DoLeftParen\28re2::StringPiece\20const&\29,memcpy +re2::Regexp::ParseState::DoLeftParen\28re2::StringPiece\20const&\29,memmove re2::Regexp::ParseState::DoLeftParen\28re2::StringPiece\20const&\29,re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29 -re2::Regexp::ParseState::DoLeftParen\28re2::StringPiece\20const&\29,abort +re2::Regexp::ParseState::DoLeftParen\28re2::StringPiece\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 re2::Regexp::ParseState::DoVerticalBar\28\29,re2::Regexp::ParseState::MaybeConcatString\28int\2c\20re2::Regexp::ParseFlags\29 re2::Regexp::ParseState::DoVerticalBar\28\29,operator\20new\28unsigned\20long\29 re2::Regexp::ParseState::DoVerticalBar\28\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 @@ -2005,65 +2055,70 @@ re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,operator\20new\28unsigned re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,re2::Regexp::Incref\28\29 re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,re2::Regexp::Decref\28\29 re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,re2::CharClassBuilder::GetCharClass\28\29 -re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,operator\20delete\28void*\29 +re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29 re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,re2::Regexp::ComputeSimple\28\29 -re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,abort +re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::Regexp::ParseState::DoRightParen\28\29,re2::Regexp::ParseState::DoVerticalBar\28\29 re2::Regexp::ParseState::DoRightParen\28\29,re2::Regexp::Decref\28\29 re2::Regexp::ParseState::DoRightParen\28\29,re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29 re2::Regexp::ParseState::DoRightParen\28\29,re2::CharClassBuilder::GetCharClass\28\29 -re2::Regexp::ParseState::DoRightParen\28\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::Regexp::ParseState::DoRightParen\28\29,operator\20delete\28void*\29 +re2::Regexp::ParseState::DoRightParen\28\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::Regexp::ParseState::DoRightParen\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::ParseState::DoRightParen\28\29,re2::Regexp::ComputeSimple\28\29 re2::Regexp::ParseState::DoRightParen\28\29,re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29 re2::Regexp::ParseState::DoFinish\28\29,re2::Regexp::ParseState::DoVerticalBar\28\29 re2::Regexp::ParseState::DoFinish\28\29,re2::Regexp::Decref\28\29 re2::Regexp::ParseState::DoFinish\28\29,re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29 re2::Regexp::ParseState::DoFinish\28\29,re2::CharClassBuilder::GetCharClass\28\29 -re2::Regexp::ParseState::DoFinish\28\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::Regexp::ParseState::DoFinish\28\29,operator\20delete\28void*\29 +re2::Regexp::ParseState::DoFinish\28\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::Regexp::ParseState::DoFinish\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::RemoveLeadingString\28re2::Regexp*\2c\20int\29,operator\20delete\5b\5d\28void*\29 re2::Regexp::RemoveLeadingString\28re2::Regexp*\2c\20int\29,memmove re2::Regexp::RemoveLeadingString\28re2::Regexp*\2c\20int\29,re2::Regexp::Decref\28\29 re2::Regexp::RemoveLeadingString\28re2::Regexp*\2c\20int\29,re2::Regexp::Swap\28re2::Regexp*\29 -re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,void\20std::__1::vector\20>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29 +re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Frame*\20std::_LIBCPP_ABI_NAMESPACE::vector>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29 re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::AlternateNoFactor\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29 re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::Concat\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29 -re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29 -re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29 -re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,operator\20delete\28void*\29 -re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29 -void\20std::__1::vector\20>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29,operator\20new\28unsigned\20long\29 -void\20std::__1::vector\20>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29,operator\20delete\28void*\29 -void\20std::__1::vector\20>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29,abort -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29 -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::Incref\28\29 -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::Decref\28\29 -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,memmove -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,operator\20new\28unsigned\20long\29 -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,memcpy -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,operator\20delete\28void*\29 -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,abort -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::CharClassBuilder::CharClassBuilder\28\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::CharClassBuilder::AddRange\28int\2c\20int\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::AddFoldedRange\28re2::CharClassBuilder*\2c\20int\2c\20int\2c\20int\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::Decref\28\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::CharClassBuilder::GetCharClass\28\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::NewCharClass\28re2::CharClass*\2c\20re2::Regexp::ParseFlags\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,operator\20new\28unsigned\20long\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,memcpy -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,operator\20delete\28void*\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,abort -re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::LiteralString\28int*\2c\20int\2c\20re2::Regexp::ParseFlags\29 -re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::RemoveLeadingString\28re2::Regexp*\2c\20int\29 -re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,operator\20new\28unsigned\20long\29 -re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,memcpy -re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,operator\20delete\28void*\29 -re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,abort +re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29 +re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29 +re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29 +re2::Frame*\20std::_LIBCPP_ABI_NAMESPACE::vector>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29,operator\20new\28unsigned\20long\29 +re2::Frame*\20std::_LIBCPP_ABI_NAMESPACE::vector>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Frame*\20std::_LIBCPP_ABI_NAMESPACE::vector>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Frame*\20std::_LIBCPP_ABI_NAMESPACE::vector>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::Incref\28\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::Decref\28\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,memmove +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20new\28unsigned\20long\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,memcpy +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::CharClassBuilder::CharClassBuilder\28\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::CharClassBuilder::AddRange\28int\2c\20int\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::AddFoldedRange\28re2::CharClassBuilder*\2c\20int\2c\20int\2c\20int\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::Decref\28\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::CharClassBuilder::GetCharClass\28\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::NewCharClass\28re2::CharClass*\2c\20re2::Regexp::ParseFlags\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20new\28unsigned\20long\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,memcpy +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::LiteralString\28int*\2c\20int\2c\20re2::Regexp::ParseFlags\29 +re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::RemoveLeadingString\28re2::Regexp*\2c\20int\29 +re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20new\28unsigned\20long\29 +re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,memcpy +re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 re2::AddFoldedRange\28re2::CharClassBuilder*\2c\20int\2c\20int\2c\20int\29,re2::CharClassBuilder::AddRange\28int\2c\20int\29 re2::AddFoldedRange\28re2::CharClassBuilder*\2c\20int\2c\20int\2c\20int\29,re2::AddFoldedRange\28re2::CharClassBuilder*\2c\20int\2c\20int\2c\20int\29 re2::CharClassBuilder::AddRangeFlags\28int\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::AddFoldedRange\28re2::CharClassBuilder*\2c\20int\2c\20int\2c\20int\29 @@ -2082,7 +2137,7 @@ re2::AddUGroup\28re2::CharClassBuilder*\2c\20re2::UGroup\20const*\2c\20int\2c\20 re2::AddUGroup\28re2::CharClassBuilder*\2c\20re2::UGroup\20const*\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::CharClassBuilder::AddRange\28int\2c\20int\29 re2::AddUGroup\28re2::CharClassBuilder*\2c\20re2::UGroup\20const*\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::CharClassBuilder::Negate\28\29 re2::AddUGroup\28re2::CharClassBuilder*\2c\20re2::UGroup\20const*\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::CharClassBuilder::AddCharClass\28re2::CharClassBuilder*\29 -re2::AddUGroup\28re2::CharClassBuilder*\2c\20re2::UGroup\20const*\2c\20int\2c\20re2::Regexp::ParseFlags\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 +re2::AddUGroup\28re2::CharClassBuilder*\2c\20re2::UGroup\20const*\2c\20int\2c\20re2::Regexp::ParseFlags\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 re2::StringPieceToRune\28int*\2c\20re2::StringPiece*\2c\20re2::RegexpStatus*\29,fullrune re2::StringPieceToRune\28int*\2c\20re2::StringPiece*\2c\20re2::RegexpStatus*\29,chartorune re2::Regexp::ParseState::ParseCCCharacter\28re2::StringPiece*\2c\20int*\2c\20re2::StringPiece\20const&\2c\20re2::RegexpStatus*\29,re2::ParseEscape\28re2::StringPiece*\2c\20int*\2c\20re2::RegexpStatus*\2c\20int\29 @@ -2117,8 +2172,8 @@ re2::Regexp::ParseState::ParsePerlFlags\28re2::StringPiece*\29,re2::Regexp::Rege re2::Regexp::ParseState::ParsePerlFlags\28re2::StringPiece*\29,re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29 re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,operator\20new\28unsigned\20long\29 re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,runetochar -re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29 -re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,operator\20delete\28void*\29 +re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29 +re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,fullrune re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,chartorune re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,re2::Regexp::ParseState::PushLiteral\28int\29 @@ -2142,31 +2197,31 @@ re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\2 re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,re2::AddUGroup\28re2::CharClassBuilder*\2c\20re2::UGroup\20const*\2c\20int\2c\20re2::Regexp::ParseFlags\29 re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29 re2::RepetitionWalker::~RepetitionWalker\28\29,re2::Regexp::Walker::~Walker\28\29 -re2::RepetitionWalker::~RepetitionWalker\28\29,operator\20delete\28void*\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,memmove -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,operator\20delete\28void*\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,abort +re2::RepetitionWalker::~RepetitionWalker\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29,opa_abort re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29,opa_abort re2::Prog::Inst::InitCapture\28int\2c\20unsigned\20int\29,opa_abort @@ -2175,26 +2230,30 @@ re2::Prog::Inst::InitMatch\28int\29,opa_abort re2::Prog::Inst::InitNop\28unsigned\20int\29,opa_abort re2::Prog::Inst::InitFail\28\29,opa_abort re2::Prog::~Prog\28\29,re2::Prog::DeleteDFA\28re2::DFA*\29 -re2::Prog::~Prog\28\29,operator\20delete\28void*\29 +re2::Prog::~Prog\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Prog::Optimize\28\29,operator\20new\28unsigned\20long\29 re2::Prog::Optimize\28\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 +re2::Prog::Optimize\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::Prog::Optimize\28\29,opa_abort -re2::Prog::Optimize\28\29,abort -re2::Prog::Optimize\28\29,operator\20delete\28void*\29 +re2::Prog::Optimize\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::ByteMapBuilder::Mark\28int\2c\20int\29,opa_abort re2::ByteMapBuilder::Mark\28int\2c\20int\29,operator\20new\28unsigned\20long\29 re2::ByteMapBuilder::Mark\28int\2c\20int\29,memcpy -re2::ByteMapBuilder::Mark\28int\2c\20int\29,operator\20delete\28void*\29 -re2::ByteMapBuilder::Mark\28int\2c\20int\29,abort +re2::ByteMapBuilder::Mark\28int\2c\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::ByteMapBuilder::Mark\28int\2c\20int\29,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::ByteMapBuilder::Mark\28int\2c\20int\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 re2::ByteMapBuilder::Merge\28\29,opa_abort re2::ByteMapBuilder::Merge\28\29,operator\20new\28unsigned\20long\29 re2::ByteMapBuilder::Merge\28\29,memcpy -re2::ByteMapBuilder::Merge\28\29,operator\20delete\28void*\29 -re2::ByteMapBuilder::Merge\28\29,abort +re2::ByteMapBuilder::Merge\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::ByteMapBuilder::Merge\28\29,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::ByteMapBuilder::Merge\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::ByteMapBuilder::Recolor\28int\29,operator\20new\28unsigned\20long\29 re2::ByteMapBuilder::Recolor\28int\29,memcpy -re2::ByteMapBuilder::Recolor\28int\29,operator\20delete\28void*\29 -re2::ByteMapBuilder::Recolor\28int\29,abort +re2::ByteMapBuilder::Recolor\28int\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::ByteMapBuilder::Recolor\28int\29,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::ByteMapBuilder::Recolor\28int\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::ByteMapBuilder::Build\28unsigned\20char*\2c\20int*\29,opa_abort re2::ByteMapBuilder::Build\28unsigned\20char*\2c\20int*\29,re2::ByteMapBuilder::Recolor\28int\29 re2::ByteMapBuilder::Build\28unsigned\20char*\2c\20int*\29,memset @@ -2202,78 +2261,99 @@ re2::Prog::ComputeByteMap\28\29,re2::ByteMapBuilder::Mark\28int\2c\20int\29 re2::Prog::ComputeByteMap\28\29,opa_abort re2::Prog::ComputeByteMap\28\29,operator\20new\28unsigned\20long\29 re2::Prog::ComputeByteMap\28\29,memcpy -re2::Prog::ComputeByteMap\28\29,operator\20delete\28void*\29 +re2::Prog::ComputeByteMap\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Prog::ComputeByteMap\28\29,re2::ByteMapBuilder::Merge\28\29 -re2::Prog::ComputeByteMap\28\29,abort +re2::Prog::ComputeByteMap\28\29,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Prog::ComputeByteMap\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::Prog::ComputeByteMap\28\29,re2::ByteMapBuilder::Build\28unsigned\20char*\2c\20int*\29 re2::Prog::Flatten\28\29,operator\20new\28unsigned\20long\29 -re2::Prog::Flatten\28\29,re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29 +re2::Prog::Flatten\28\29,re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29 re2::Prog::Flatten\28\29,memmove -re2::Prog::Flatten\28\29,void\20std::__1::__sort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 -re2::Prog::Flatten\28\29,re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29 +re2::Prog::Flatten\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__introsort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\20false>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20std::_LIBCPP_ABI_NAMESPACE::iterator_traits::IndexValue*>::difference_type\2c\20bool\29 +re2::Prog::Flatten\28\29,re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29 re2::Prog::Flatten\28\29,memset -re2::Prog::Flatten\28\29,re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::__1::vector\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29 -re2::Prog::Flatten\28\29,re2::Prog::ComputeHints\28std::__1::vector\20>*\2c\20int\2c\20int\29 +re2::Prog::Flatten\28\29,re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29 +re2::Prog::Flatten\28\29,re2::Prog::ComputeHints\28std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20int\2c\20int\29 +re2::Prog::Flatten\28\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Prog::Flatten\28\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 re2::Prog::Flatten\28\29,opa_abort -re2::Prog::Flatten\28\29,operator\20delete\28void*\29 -re2::Prog::Flatten\28\29,abort -re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,re2::SparseArray::SetInternal\28bool\2c\20int\2c\20int\20const&\29 -re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,opa_abort -re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,operator\20new\28unsigned\20long\29 -re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,operator\20delete\28void*\29 -re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 -re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,memcpy -re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,abort -void\20std::__1::__sort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29,unsigned\20int\20std::__1::__sort4::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 -void\20std::__1::__sort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29,void\20std::__1::__insertion_sort_3::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 -void\20std::__1::__sort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29,bool\20std::__1::__insertion_sort_incomplete::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 -void\20std::__1::__sort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29,void\20std::__1::__sort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,operator\20new\28unsigned\20long\29 -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,operator\20delete\28void*\29 -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,opa_abort -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,memcpy -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,re2::SparseArray::create_index\28int\29 -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,re2::SparseArray::SetExistingInternal\28int\2c\20int\20const&\29 -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,abort -re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::__1::vector\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,operator\20new\28unsigned\20long\29 -re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::__1::vector\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,operator\20delete\28void*\29 -re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::__1::vector\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,opa_abort -re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::__1::vector\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 -re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::__1::vector\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,memcpy -re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::__1::vector\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,abort -re2::Prog::ComputeHints\28std::__1::vector\20>*\2c\20int\2c\20int\29,opa_abort -re2::Prog::ComputeHints\28std::__1::vector\20>*\2c\20int\2c\20int\29,re2::Prog::ComputeHints\28std::__1::vector\20>*\2c\20int\2c\20int\29::$_1::operator\28\29\28int\2c\20int\29\20const -re2::Prog::ComputeHints\28std::__1::vector\20>*\2c\20int\2c\20int\29::$_1::operator\28\29\28int\2c\20int\29\20const,opa_abort +re2::Prog::Flatten\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Prog::Flatten\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::SparseArray::SetInternal\28bool\2c\20int\2c\20int\20const&\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,opa_abort +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int&&\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20new\28unsigned\20long\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,memcpy +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__introsort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\20false>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20std::_LIBCPP_ABI_NAMESPACE::iterator_traits::IndexValue*>::difference_type\2c\20bool\29,re2::SparseArray::IndexValue*\20std::_LIBCPP_ABI_NAMESPACE::__partial_sort_impl\5babi:nn210108\5d::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__introsort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\20false>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20std::_LIBCPP_ABI_NAMESPACE::iterator_traits::IndexValue*>::difference_type\2c\20bool\29,void\20std::_LIBCPP_ABI_NAMESPACE::__sort5\5babi:nn210108\5d::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\200>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__introsort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\20false>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20std::_LIBCPP_ABI_NAMESPACE::iterator_traits::IndexValue*>::difference_type\2c\20bool\29,bool\20std::_LIBCPP_ABI_NAMESPACE::__insertion_sort_incomplete\5babi:nn210108\5d::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__introsort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\20false>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20std::_LIBCPP_ABI_NAMESPACE::iterator_traits::IndexValue*>::difference_type\2c\20bool\29,void\20std::_LIBCPP_ABI_NAMESPACE::__introsort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\20false>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20std::_LIBCPP_ABI_NAMESPACE::iterator_traits::IndexValue*>::difference_type\2c\20bool\29 +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int\20const&\29 +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,opa_abort +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20new\28unsigned\20long\29 +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,memcpy +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int\20const&\29 +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,opa_abort +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20new\28unsigned\20long\29 +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,memcpy +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Prog::ComputeHints\28std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20int\2c\20int\29,opa_abort +re2::Prog::ComputeHints\28std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20int\2c\20int\29,re2::Prog::ComputeHints\28std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20int\2c\20int\29::$_0::operator\28\29\28int\2c\20int\29\20const +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int&&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int&&\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int&&\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int\20const&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int\20const&\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int\20const&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int\20const&\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int\20const&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::Prog::ComputeHints\28std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20int\2c\20int\29::$_0::operator\28\29\28int\2c\20int\29\20const,opa_abort re2::Prog::PrefixAccel_FrontAndBack\28void\20const*\2c\20unsigned\20long\29,opa_abort re2::Prog::PrefixAccel_FrontAndBack\28void\20const*\2c\20unsigned\20long\29,memchr -bool\20std::__1::__insertion_sort_incomplete::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29,unsigned\20int\20std::__1::__sort4::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 -re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,std::__1::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 -re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\2c\20unsigned\20long\29 +bool\20std::_LIBCPP_ABI_NAMESPACE::__insertion_sort_incomplete\5babi:nn210108\5d::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29,void\20std::_LIBCPP_ABI_NAMESPACE::__sort5\5babi:nn210108\5d::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\200>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 +re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,std::_LIBCPP_ABI_NAMESPACE::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 +re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::assign\28char\20const*\2c\20unsigned\20long\29 re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29 re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,operator\20new\28unsigned\20long\29 re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::RegexpStatus::Text\28\29\20const -re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,memcpy -re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,operator\20delete\28void*\29 -re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::Regexp::RequiredPrefix\28std::__1::basic_string\2c\20std::__1::allocator\20>*\2c\20bool*\2c\20re2::Regexp**\29 +re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,memmove +re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::Regexp::RequiredPrefix\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\2c\20bool*\2c\20re2::Regexp**\29 re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::Regexp::Incref\28\29 re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::Regexp::CompileToProg\28long\20long\29 re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::Regexp::NumCaptures\28\29 re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::Prog::IsOnePass\28\29 -re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,abort -void\20std::__1::__call_once_proxy\20>\28void*\29,operator\20new\28unsigned\20long\29 +re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,operator\20new\28unsigned\20long\29 re2::RE2::RE2\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29 -re2::RE2::ReverseProg\28\29\20const,std::__1::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,re2::Regexp::CompileToReverseProg\28long\20long\29 +re2::RE2::ReverseProg\28\29\20const,std::_LIBCPP_ABI_NAMESPACE::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,re2::Regexp::CompileToReverseProg\28long\20long\29 re2::RE2::~RE2\28\29,re2::Regexp::Decref\28\29 re2::RE2::~RE2\28\29,re2::Prog::~Prog\28\29 -re2::RE2::~RE2\28\29,operator\20delete\28void*\29 -re2::RE2::~RE2\28\29,std::__1::__tree\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\2c\20std::__1::__value_type\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::less\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20int>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\2c\20int>\2c\20void*>*\29 -re2::RE2::~RE2\28\29,std::__1::__tree\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\20>\2c\20std::__1::less\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\20>\2c\20void*>*\29 -std::__1::__tree\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\2c\20std::__1::__value_type\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::less\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20int>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\2c\20int>\2c\20void*>*\29,std::__1::__tree\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\2c\20std::__1::__value_type\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::less\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20int>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\2c\20int>\2c\20void*>*\29 -std::__1::__tree\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\2c\20std::__1::__value_type\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::less\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20int>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\2c\20int>\2c\20void*>*\29,operator\20delete\28void*\29 -std::__1::__tree\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\20>\2c\20std::__1::less\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\20>\2c\20void*>*\29,std::__1::__tree\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\20>\2c\20std::__1::less\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\20>\2c\20void*>*\29 -std::__1::__tree\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\20>\2c\20std::__1::less\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\20>\2c\20void*>*\29,operator\20delete\28void*\29 +re2::RE2::~RE2\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::RE2::~RE2\28\29,std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20void*>*\29 +re2::RE2::~RE2\28\29,std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*\29 +std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20void*>*\29,std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20void*>*\29 +std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20void*>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*\29,std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*\29 +std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::RE2::DoMatch\28re2::StringPiece\20const&\2c\20re2::RE2::Anchor\2c\20unsigned\20long*\2c\20re2::RE2::Arg\20const*\20const*\2c\20int\29\20const,memset re2::RE2::DoMatch\28re2::StringPiece\20const&\2c\20re2::RE2::Anchor\2c\20unsigned\20long*\2c\20re2::RE2::Arg\20const*\20const*\2c\20int\29\20const,operator\20new\5b\5d\28unsigned\20long\29 re2::RE2::DoMatch\28re2::StringPiece\20const&\2c\20re2::RE2::Anchor\2c\20unsigned\20long*\2c\20re2::RE2::Arg\20const*\20const*\2c\20int\29\20const,re2::RE2::Match\28re2::StringPiece\20const&\2c\20unsigned\20long\2c\20unsigned\20long\2c\20re2::RE2::Anchor\2c\20re2::StringPiece*\2c\20int\29\20const @@ -2286,20 +2366,20 @@ re2::RE2::Match\28re2::StringPiece\20const&\2c\20unsigned\20long\2c\20unsigned\2 re2::RE2::Match\28re2::StringPiece\20const&\2c\20unsigned\20long\2c\20unsigned\20long\2c\20re2::RE2::Anchor\2c\20re2::StringPiece*\2c\20int\29\20const,re2::Prog::SearchNFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29 re2::RE2::Match\28re2::StringPiece\20const&\2c\20unsigned\20long\2c\20unsigned\20long\2c\20re2::RE2::Anchor\2c\20re2::StringPiece*\2c\20int\29\20const,memset re2::RE2::PartialMatchN\28re2::StringPiece\20const&\2c\20re2::RE2\20const&\2c\20re2::RE2::Arg\20const*\20const*\2c\20int\29,re2::RE2::DoMatch\28re2::StringPiece\20const&\2c\20re2::RE2::Anchor\2c\20unsigned\20long*\2c\20re2::RE2::Arg\20const*\20const*\2c\20int\29\20const -re2::Regexp::~Regexp\28\29,operator\20delete\28void*\29 +re2::Regexp::~Regexp\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::~Regexp\28\29,operator\20delete\5b\5d\28void*\29 -re2::Regexp::~Regexp\28\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::Regexp::Incref\28\29,std::__1::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 +re2::Regexp::~Regexp\28\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::Regexp::Incref\28\29,std::_LIBCPP_ABI_NAMESPACE::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 re2::Regexp::Incref\28\29,operator\20new\28unsigned\20long\29 -re2::Regexp::Incref\28\29,void\20std::__1::__tree_balance_after_insert*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,operator\20new\28unsigned\20long\29 +re2::Regexp::Incref\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_balance_after_insert\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,operator\20new\28unsigned\20long\29 re2::Regexp::Decref\28\29,operator\20new\28unsigned\20long\29 -re2::Regexp::Decref\28\29,void\20std::__1::__tree_balance_after_insert*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 -re2::Regexp::Decref\28\29,void\20std::__1::__tree_remove*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 -re2::Regexp::Decref\28\29,operator\20delete\28void*\29 +re2::Regexp::Decref\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_balance_after_insert\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 re2::Regexp::Decref\28\29,re2::Regexp::Destroy\28\29 +re2::Regexp::Decref\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_remove\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 +re2::Regexp::Decref\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::Destroy\28\29,re2::Regexp::~Regexp\28\29 -re2::Regexp::Destroy\28\29,operator\20delete\28void*\29 +re2::Regexp::Destroy\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::Destroy\28\29,re2::Regexp::Decref\28\29 re2::Regexp::Destroy\28\29,operator\20delete\5b\5d\28void*\29 re2::Regexp::AddRuneToString\28int\29,opa_abort @@ -2317,8 +2397,8 @@ re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20 re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29,operator\20new\5b\5d\28unsigned\20long\29 re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29,re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29 re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29,opa_abort -re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29,operator\20delete\28void*\29 -re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29,abort +re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::Concat\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29 re2::Regexp::AlternateNoFactor\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29 re2::Regexp::Capture\28re2::Regexp*\2c\20re2::Regexp::ParseFlags\2c\20int\29,operator\20new\28unsigned\20long\29 @@ -2329,74 +2409,80 @@ re2::Regexp::NewCharClass\28re2::CharClass*\2c\20re2::Regexp::ParseFlags\29,oper re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,re2::TopEqual\28re2::Regexp*\2c\20re2::Regexp*\29 re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,operator\20new\28unsigned\20long\29 re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,memcpy -re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,operator\20delete\28void*\29 +re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,opa_abort -re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,std::__1::vector\20>::__append\28unsigned\20long\29 -re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,abort +re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__append\28unsigned\20long\29 +re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::TopEqual\28re2::Regexp*\2c\20re2::Regexp*\29,memcmp -std::__1::vector\20>::__append\28unsigned\20long\29,memset -std::__1::vector\20>::__append\28unsigned\20long\29,operator\20new\28unsigned\20long\29 -std::__1::vector\20>::__append\28unsigned\20long\29,memcpy -std::__1::vector\20>::__append\28unsigned\20long\29,operator\20delete\28void*\29 -std::__1::vector\20>::__append\28unsigned\20long\29,abort +std::_LIBCPP_ABI_NAMESPACE::vector>::__append\28unsigned\20long\29,memset +std::_LIBCPP_ABI_NAMESPACE::vector>::__append\28unsigned\20long\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__append\28unsigned\20long\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::vector>::__append\28unsigned\20long\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__append\28unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__append\28unsigned\20long\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 re2::RegexpStatus::Text\28\29\20const,strlen re2::RegexpStatus::Text\28\29\20const,operator\20new\28unsigned\20long\29 -re2::RegexpStatus::Text\28\29\20const,memcpy -re2::RegexpStatus::Text\28\29\20const,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29 -re2::RegexpStatus::Text\28\29\20const,operator\20delete\28void*\29 -re2::RegexpStatus::Text\28\29\20const,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\29 -re2::RegexpStatus::Text\28\29\20const,abort +re2::RegexpStatus::Text\28\29\20const,memmove +re2::RegexpStatus::Text\28\29\20const,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29 +re2::RegexpStatus::Text\28\29\20const,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::RegexpStatus::Text\28\29\20const,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\29 +re2::RegexpStatus::Text\28\29\20const,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 re2::Regexp::NumCaptures\28\29,re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29 re2::Regexp::NumCaptures\28\29,re2::Regexp::Walker::~Walker\28\29 -re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::resize\28unsigned\20long\2c\20char\29 -re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,runetochar -re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,operator\20new\28unsigned\20long\29 -re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,memcpy -re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,operator\20delete\28void*\29 -re2::Regexp::RequiredPrefix\28std::__1::basic_string\2c\20std::__1::allocator\20>*\2c\20bool*\2c\20re2::Regexp**\29,re2::Regexp::Incref\28\29 -re2::Regexp::RequiredPrefix\28std::__1::basic_string\2c\20std::__1::allocator\20>*\2c\20bool*\2c\20re2::Regexp**\29,re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29 -re2::Regexp::RequiredPrefix\28std::__1::basic_string\2c\20std::__1::allocator\20>*\2c\20bool*\2c\20re2::Regexp**\29,operator\20new\28unsigned\20long\29 -re2::Regexp::RequiredPrefix\28std::__1::basic_string\2c\20std::__1::allocator\20>*\2c\20bool*\2c\20re2::Regexp**\29,re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29 -re2::Regexp::RequiredPrefixForAccel\28std::__1::basic_string\2c\20std::__1::allocator\20>*\2c\20bool*\29,re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29 -re2::CharClassBuilder::AddRange\28int\2c\20int\29,void\20std::__1::__tree_remove*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 -re2::CharClassBuilder::AddRange\28int\2c\20int\29,operator\20delete\28void*\29 +re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::resize\28unsigned\20long\2c\20char\29 +re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,runetochar +re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,memmove +re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,operator\20new\28unsigned\20long\29 +re2::Regexp::RequiredPrefix\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\2c\20bool*\2c\20re2::Regexp**\29,re2::Regexp::Incref\28\29 +re2::Regexp::RequiredPrefix\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\2c\20bool*\2c\20re2::Regexp**\29,re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29 +re2::Regexp::RequiredPrefix\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\2c\20bool*\2c\20re2::Regexp**\29,operator\20new\28unsigned\20long\29 +re2::Regexp::RequiredPrefix\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\2c\20bool*\2c\20re2::Regexp**\29,re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29 +re2::Regexp::RequiredPrefixForAccel\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\2c\20bool*\29,re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29 +re2::CharClassBuilder::AddRange\28int\2c\20int\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_remove\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 +re2::CharClassBuilder::AddRange\28int\2c\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::CharClassBuilder::AddRange\28int\2c\20int\29,operator\20new\28unsigned\20long\29 -re2::CharClassBuilder::AddRange\28int\2c\20int\29,void\20std::__1::__tree_balance_after_insert*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 +re2::CharClassBuilder::AddRange\28int\2c\20int\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_balance_after_insert\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 re2::CharClassBuilder::AddCharClass\28re2::CharClassBuilder*\29,re2::CharClassBuilder::AddRange\28int\2c\20int\29 -re2::CharClassBuilder::RemoveAbove\28int\29,void\20std::__1::__tree_remove*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 -re2::CharClassBuilder::RemoveAbove\28int\29,operator\20delete\28void*\29 +re2::CharClassBuilder::RemoveAbove\28int\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_remove\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 +re2::CharClassBuilder::RemoveAbove\28int\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::CharClassBuilder::RemoveAbove\28int\29,operator\20new\28unsigned\20long\29 -re2::CharClassBuilder::RemoveAbove\28int\29,void\20std::__1::__tree_balance_after_insert*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 +re2::CharClassBuilder::RemoveAbove\28int\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_balance_after_insert\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 re2::CharClassBuilder::Negate\28\29,operator\20new\28unsigned\20long\29 re2::CharClassBuilder::Negate\28\29,memcpy -re2::CharClassBuilder::Negate\28\29,operator\20delete\28void*\29 -re2::CharClassBuilder::Negate\28\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::CharClassBuilder::Negate\28\29,void\20std::__1::__tree_balance_after_insert*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 -re2::CharClassBuilder::Negate\28\29,abort +re2::CharClassBuilder::Negate\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::CharClassBuilder::Negate\28\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::CharClassBuilder::Negate\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::CharClassBuilder::Negate\28\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::CharClassBuilder::Negate\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_balance_after_insert\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 re2::CharClassBuilder::GetCharClass\28\29,operator\20new\5b\5d\28unsigned\20long\29 re2::CharClassBuilder::GetCharClass\28\29,opa_abort re2::NumCapturesWalker::~NumCapturesWalker\28\29,re2::Regexp::Walker::~Walker\28\29 -re2::NumCapturesWalker::~NumCapturesWalker\28\29,operator\20delete\28void*\29 +re2::NumCapturesWalker::~NumCapturesWalker\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::Simplify\28\29,re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool\29 re2::Regexp::Simplify\28\29,re2::Regexp::Decref\28\29 re2::Regexp::Simplify\28\29,re2::Regexp::Walker::~Walker\28\29 re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool\29,operator\20delete\5b\5d\28void*\29 -re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool\29,operator\20delete\28void*\29 -re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool\29,std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29 +re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool\29,std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29 re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool\29,operator\20new\5b\5d\28unsigned\20long\29 re2::Regexp::Walker::~Walker\28\29,operator\20delete\5b\5d\28void*\29 -re2::Regexp::Walker::~Walker\28\29,operator\20delete\28void*\29 -re2::Regexp::Walker::~Walker\28\29,std::__1::__deque_base\2c\20std::__1::allocator\20>\20>::~__deque_base\28\29 +re2::Regexp::Walker::~Walker\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Regexp::Walker::~Walker\28\29,std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::~deque\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::~deque\5babi:nn210108\5d\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::CoalesceWalker::Copy\28re2::Regexp*\29,re2::Regexp::Incref\28\29 re2::CoalesceWalker::ShortVisit\28re2::Regexp*\2c\20re2::Regexp*\29,re2::Regexp::Incref\28\29 re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::Regexp::Incref\28\29 re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::CoalesceWalker::CanCoalesce\28re2::Regexp*\2c\20re2::Regexp*\29 re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::Regexp::Decref\28\29 -re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::CoalesceWalker::DoCoalesce\28re2::Regexp**\2c\20re2::Regexp**\29 re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,operator\20new\28unsigned\20long\29 re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 -re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,opa_abort re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,operator\20new\5b\5d\28unsigned\20long\29 +re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::CoalesceWalker::DoCoalesce\28re2::Regexp**\2c\20re2::Regexp**\29 +re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,opa_abort re2::CoalesceWalker::CanCoalesce\28re2::Regexp*\2c\20re2::Regexp*\29,re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29 re2::CoalesceWalker::DoCoalesce\28re2::Regexp**\2c\20re2::Regexp**\29,re2::Regexp::Incref\28\29 re2::CoalesceWalker::DoCoalesce\28re2::Regexp**\2c\20re2::Regexp**\29,re2::Regexp::Repeat\28re2::Regexp*\2c\20re2::Regexp::ParseFlags\2c\20int\2c\20int\29 @@ -2410,52 +2496,51 @@ re2::SimplifyWalker::ShortVisit\28re2::Regexp*\2c\20re2::Regexp*\29,re2::Regexp: re2::SimplifyWalker::PreVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool*\29,re2::Regexp::Incref\28\29 re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::Regexp::Decref\28\29 re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::Regexp::Incref\28\29 -re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29 -re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29 re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,operator\20new\28unsigned\20long\29 re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 -re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,opa_abort re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,operator\20new\5b\5d\28unsigned\20long\29 -re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,opa_abort -re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,operator\20new\28unsigned\20long\29 -re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 -re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,re2::Regexp::Incref\28\29 +re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,opa_abort +re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29 +re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::Incref\28\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::Star\28re2::Regexp*\2c\20re2::Regexp::ParseFlags\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::Plus\28re2::Regexp*\2c\20re2::Regexp::ParseFlags\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,operator\20new\28unsigned\20long\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::Concat\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29 -re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,operator\20delete\28void*\29 +re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::Quest\28re2::Regexp*\2c\20re2::Regexp::ParseFlags\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,operator\20new\5b\5d\28unsigned\20long\29 -re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,abort +re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,opa_abort +re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,operator\20new\28unsigned\20long\29 +re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 +re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,re2::Regexp::Incref\28\29 re2::CoalesceWalker::~CoalesceWalker\28\29,re2::Regexp::Walker::~Walker\28\29 -re2::CoalesceWalker::~CoalesceWalker\28\29,operator\20delete\28void*\29 +re2::CoalesceWalker::~CoalesceWalker\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::SimplifyWalker::~SimplifyWalker\28\29,re2::Regexp::Walker::~Walker\28\29 -re2::SimplifyWalker::~SimplifyWalker\28\29,operator\20delete\28void*\29 -std::__1::__deque_base\2c\20std::__1::allocator\20>\20>::~__deque_base\28\29,operator\20delete\28void*\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,memmove -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,operator\20delete\28void*\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,abort +re2::SimplifyWalker::~SimplifyWalker\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::StringPiece::find\28char\2c\20unsigned\20long\29\20const,memchr diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm index 667b9cdd4902ae7d186d5007302013358862ffb6..a294543e8823f45b8ce4ecfd1387e7a9c8b41663 100644 GIT binary patch literal 431796 zcmeFa3%F%hRp+~&`*GGer#28sOmVMkZ}Q!*(k7B_vEU+JYSm`~Noj)*l+U(1M2uHY z1xQsYWxQRfI*-+J8JMbqk> z^}~aaevkC7bx(Qd#{f|S>fahV%G2!9L4NTw4=+FK(u*$q;md#gl0!fK%yW`-vFKkX z$zsvJNydvs-;z{|MUP6BR5Vue7ya{#a)! z5fqcQQ(9BI`lly)Bm7I!u|sTcS`BM)$iJdU^I@4jh|aUJ9A?X9`rtBME|-_f2WQz3 zkkhOjt`3R>==h&t%~prmFiSGJ%U6Lrqq8C%<_WNuLG64D-QwX)qoa#W=}_D$mpLI8DdNGF9TgtVjy|%ZedI!(k4jX`Uy;6#4;DG9F}u zQwM`Q&j}lO>)eke=WzTx%g?9e~ z4~-2PB|G5asO@UlY}UG`(i zzduC)Sh*Ek`1DIIzUW}`9}KR3b-+6Gtb-RP7d}`oIBG8Xkskp)hkh*ir_`GF(u0uZ zu<*u%e@@3g2TwI=b^bfjCx7o#uFHNV`!CsRvNz@*$o@EcQ+{XmC)pdaH)g+?{YLh| z{Lk}$$Ul*PEWaoJv;4F9=kj~=_2QlRjm57PuPlD1cv125#r4HYikB6?RNPSfUh&c5 zj^drgUl;Ex{-*fb;_r&*mOocsU;aw@^75wgo#ngAcbC6k-c^3E{KN9S<@?G%EAK4- zxcsB?1LdEWcb9)rF1`MR5BYHUjx>Y2jy__Ye@QdhKTFT>%Ve5md%FMC$>eR%R+S@b zs|7rrmsPf>PIhI*O0qYZ=F3IZ@QN<;J$1e}Ju=Pfv{_zLol%{wO+I=xuX8cW?6vb( z65grmZ`b9j4AM+yOK12e%0~yWOAF{2}8HyWLh-EG|R!2 zjf)ZPVpck`^7B>_AihU02|bLs&imE=LH$iWt-k=0(eE^?izZ#06{?~NvTVUjg!nc?Of5E%Z{I!fI;Q}^od_G6sYTy zLBX$Ju=Kb<_znV4!dAAL(`Ryz$$6vfNJJ7_pf4hU@+gD~7ea7_=<-p$rORT5wh6)p zO61}Kn4x8X$%nmt$rrAWte;W=x7GFYjB2!>0}^A`TSlOzO^Qa#qL@ck`hDO?_}KW_ z059saC-5@_zQ%+m3xh(CTZOX|XLK>JwclsrC#~d&c4<)k^0S#_*8BM$)saq$z-Lcb z@q6pQE{j>(Lz7@%VA_20XDy_SXn$@+*J2j>zc|xqRpk*GHrs7eo8stw} zXOs7-zHy;ipy1sqa47)(DhL!Cs~+Po=NX!@W$h;2XQB&1)PMKfDeN9lTwRl<*+%RJ z_A_ldON?cbPPb=_2a9C6R>RREK|#D&770aWP5=p9U^@P9|I=~ccFP3t+*T0RclJ^8 zfCBQWEdaUY`P%^GKW@fun;@ib?1*j=DcDbar2LmYEQS(j(i%*2NXV)OxbIs|rti1R zb3zs@*JNT`FogxfRVrja-bCswl_ZAbl4jJfU$D{R7HLD{77#-X=a&r2d-9Tb@SJa8 zDlEC>=FJx#nCZ39QS-ZfCW`ojiLyQu-7zmK7RP|}31cX11k$s$WO*>P(+FycJz4ZY zeVKt8EM*=Ui}yjz9aOd0yt)r+8u(p;Q#Pg{iU#ag2zjB2I;LcABzg74r^YK!h8oZ7 z4{ZKXJGd;s6b6^Ls3FS=FVGQyMX% zeg8_LhsW|>gd}exBuKeAe6?iGMe}GV=mz9s2SPY#{w@h5J*V4ny*HVbE-}x{li6Tz z^1VDCSZrigKGlrzx_oq=)a7(gQF$0E%xGFKC^i%)!%&?kKD($)yN$$5f?s8p=1F8VdPe?y4=le=cQmJ&(qZ=Kp<3~ zGM^OwX6ON=#pF|lOe2AGGDSaUcFv#+&<*$11U&; z1kK_!5%%HxIqZkMI-g+3GQwj7!}bOytV~9b+TJA z+t9j=mA5t>;8TKy5rVt2EUW_)9t^>5dh#NmlVFlyA-v#0)zRcCq2P41f%EGIQdv;I zT62QL7G&@`J{MYWG%cFEH)NgX^`4}qc#9ZU<3-%95|OOPyv^ruBo_6D|~gx#RC+R63+7cv`rLBKuoJb*%|$$${rR7X`AE|o9_ z0%p1v(7ROT4aUW<*#<9>ZO|OEWdo_{))W9&$!vJb-u$`WZ=5wWjH!cq*rW%r`wS%A z=;dMaISNx1=3pEk275;`Dp*|*Qk#3z$0kzdR~(rR)JQB}KUxoNx}0ZN83U)*VDAuh z?=>F0*38vtU*$o+NyI4w%K|D436j#8=$RTc)+UvD4XQCKv6eKj{J^wi40>bO6j)^Z ze1?GyRS5dg+H@h5C&n7|`5G;14b+$HjK!-TF>e1rlflBAYJZ>s~*GA7ZEc|B}N;_ zND&PRwB~!(R^f#1A1hkm_t@^_LHra#fc^(#W%NIJrRF6w43jY1d+gCxX`)J5^9^e% zV8ZN8WH#ClugW4dq&AObxmjpKdQFFS? zZz4VfQOR_LmdmqX=me%YFk>9`3X_EaXhuNt_17I=PegY8&+q8=L6hH-td%v`+~76* zxBm=3K>kL4!OBqVvA-TS#i4@$6v&{CY*wx|kJU_BZNB*lRIQq2-Vns~GDh=~BkbIj zF;9xMX~oJNKofqpDoc=+LDehtwmb%G2y*Vqz@Amj>IEyKdWnulhxoJpx%@ygI(+ck zl7zN0+OlfE5%9wMd0nkdm%!*CGMSm1))ZDdPq%IjRXAM{Z~&toCg-i>5~lvz%Kq7s zc&MoPIWS$N9^Hx0S3orgXcC@$?%sRv9Uj;(7ppnCHXRE>C`FJ4mTq>?yRIZyP1uQV zn3wudKm3EkCmfnmf=y#j8E{X7S0dJ7)mM@yp)y!b%UBMMT( zmt+I{g$E*59b#^nAH$8tIya??>OHkxdoY4w7hOR=j^UfOYp#kk%~NhGt_Lm6k*AsH#th z(9O6x?ND>t<^9gYY9%OBPX_?$09-X_mM&>wSPMYl>IsOX9zkL_QvKB)X}1~AqJj9q zbm#b7vZD5E(WyfEZFeh6-rFdQIQF&_dyB(G?=F-1qp^l@iDE!*&R%`l39Q`pz) zN5Q2e2Bo;U0yYILOb@{+il`E(Yk?ML-UrH-1pOxHT3Q&uBD%ao@%tAMLtA5K3(Vww z%q&G}Y|RDiAX^xyRJ&*a@x?J5>w#83qDpdrW5NT@h%oJgjtuc%k<( zlXph%MaxW!oytCJJ423IUtv};`+fe5BQuRP-K`(;-i(n^At|3yqRGg{1v8K5>8jiA}Kl9QW0l=U6$yV5mPl4FAtIQkY z)kM??Vj~Sxs}mbEC9$D793%)hAGA34R=FInUGxNO+<~v4( zQJt|u{2U5nH4V>&gcwe=B=brJ^FaKUsHH)00MhWAv@H$P#6UKbqF312=$y_GkOi}q zBk+)x{>(@PYS@VsDH)~|`()5X;?l&q3GMU^h%WZ}| z{^*QWQGDU58e1p3v!r@)I_Js{-JyYInuE>9u0oGRPr97^soIljpS;^_!V=a?wc5y> zQW_Ly({8*u!ih9p4=1lxt<&oveqIPtN;`tPJMDGz9?HoF{6Y}|`CMQ+C<*52DNK2T3rsz?Z+~5$kH{4Z^B-ew+PVV0RQLNr+09%yWNt+`zm@Z>g)Rq-xRIQb2k$1*PBy7h*h zKUbuqC^W9i_Rk7O3X@}kBSw~KHhGI0IlalJCDTpilAMy)SIX^J!|=Ou&CNC*4KVaV z>Ct}au~>SnSLzk@ZYhVVYIveF3=mYB;@3o*kSfki8>PerMZaB(F5jW zSP4K5988)e9Yme*AA@;8T zKqa#w;L9p4lt7uU)kCxH!1lMFhks=wn&pj!YOtlxPF9-Yuzr_GpgE|zR=Ns=;i*c~ zSUNbQ(j63K^=O6SCYSdyQr?2QV8Tp_V8^X6B8-roq)#Ji>alZ~#-lm1B#&lEWWm4S z3a+P{@qWpUv1bHcm#qaZE30R0HYI7xCRXHqr~;Bs#O{V$B?y2+B2CMazmIdSH_x)q zySLx;$+gYiJ}ZUHSPFno&85osLBkZ9H2E_dm=n66Pe~bg#^fj!u!94u=VFTiMmr

n!#ag zETDT_{~6vcHn>5a+&?#?j~p$QmW-*fD2&Z zGHE~pMsP}U70tIlHbGI<3|c0XAXBN~fCA>C*bGEb`gsuhy8qgfq}wF+{bywF?c`YN zESUnyG}W}|&5~}8Ox`6{Rl;Igonf=bHL@oc*&0+b`5j3rpDK83+iEdtmJg}f>>ZyY?c|RmLzmDra$?yo`ya7xC=-567ui?ZD1{91q!eM>#$m8 zpA9{WP0?~2T@M=@#>cbF+hX2(bP~zm4Rkj0d_0hdq>0PxArBy|vd3}D@kgE9C%P71 zYtf}a_2B8ShRp0^O?4b^mMUnVb!bF}2nl#KPKD_x>cTowM^J9r`Uz<=-P34DV(hDwzDdUrKWjVy+ylTfJY-YvT2>DnK>d|3P?gG z^64fpomy*&P=Hl`S4{1!!s{63ciG6xW_BJy+p?({q;VDP2sAuHAtA+(g`hN3*vqB* z{ts)b-?Ma_*ZgydYmOC@pECiX zF$&q>O~cGAX>9JDs`!%3s(99`Xv+2P& zZeDZie1GHcE{3f^neCBiD<&^?Zb_J%4iFVmHLu61KQ{R+U}usS&6!W2GmYQsHLI3T z^j6tQp7s;UM{{@Mxx1ygyK3%kdG2n7yTZlwERBNLCSKZ{Eo()>7-UX<#{tl|BGlr- zMq&4W>YzO8bR)YVV~XnbVHaE^IV5)n&N@pPSxPKu->q)U+hG=jgf3sNP3RiyM>0NZ zK4WW3=DsHqdv!uUPaUt#M&MBXjMf4a5RVSZkZI1nc{lzXe`kbC66z0XTnB=$zyY+{ z%x3LPWg;f{Ek;2RNH2>xJdsB<#rNt~VG$C+Z}b&G8(lDJh=r+{3>4C$NPU-j^LyTV zlXzu@TWpfnoAwnAb4syWr>Z=fywRqGhKQd$)I%1sxq?o-H3LBs5;6z&M}&ei(8*Y) zEffI`TYRHe6VFiufS6jC9!czzn(+J2JC2oaHm|yiVfln6wS+bjP69Fl#F?~SS_4b1 zWvj`XdxL8}d1Kog0cd<}!>}5{g1KEaeOwWo?iXCU6FPFGpq=jVUlK32^Xrw_C(e#{ z|EI0!>`EViMjw&ZCRh|?Y%E1_wX7>N~-kO1E=_$FgB?(=9U)6 zx+%vD)guLIi~W&@=avVO>ZfHw!o|2ai~nOv998lFyLbz>)t95uXgI1Se=QItCfvc3 zNR3yL63eqxKO@YCSrK?u{u9QECO=E=-Q<`2ZI0}-;)VH-4WcxOTt7o57NoE#3R&9u zPsD+kS2zDtrat#4n*VB#S+3T%JOA+oCV%T=uEWrVHc-vQx4R5&(kWXijCPsC0prC1 z*E3>tmlG2!W$YnhG@} zx=p1M`|Zw76RBnH*&Tb|wXX8;I@7uhHn>t3^v6f#17N;qO}*w?woXp2b^1EF)_=1! z?^5<9Aj)-|GJO;}D6&1E1KO+7iQ?`?Hk>ELh1J&~8J7q=FZr zuUIwol+ip6qbCGfU1Epk9nqpZNrji-ec+@Ic@t*)qdT(ZBPOR*-24TXSdP=I`2^Mv z>m+IOcM9-lrrvqrSOcQ3d8~a>SOY^@^C_V!163avs!~&I!tszmv^0Pr97gb!b;JHz z^I=E2Ka`2i6C>Sj*sF=a#J5@@!9g~?;OlGNJC?*(OPG1H7%8kBPju{27T*?q(urkL$A zn~1+*EB?#_VZV}NEu}4jnX<`CVbU^K85>TIwyV=#!c`a@lOd2lUF$H3H0erPOhT)! zw8bQgO2TBw7*Q-V^tI4a@Po9JH-pg;m3{HYIK?>3MBp^?rpy%tqKCvZ7fim3Ei}MI z`JP7NF&t#9FU0m{wuY&(_L+haGfa(jp_E~2tf6!<|27QKf%M)&%}s_VRx$gCh`F;0 zcI~e`_xO5nk(y)Gi@zaVifiJF8zFiu0q`zFT5f2o%=B&a2KSCzE9D?C*Tcp7{~;F1S3x5q$A<$#3){5#ig%k6yHx)ydQWPe=Xpt z`Fm$YKvuJ$wF+nl520*<}6^IBJ&h*Ss{qfym|fjIyWI(gt@Ak(G9@r>Z2E135&0l|Cas! z)o+Bbq^!`nA;!|bTig<;cH0{B&m2KKVeCCy_q=bh7sE)=q%%qR6>HLUk)#uvbPJZV zJFi-QBJiMfAX1vzDBvmJyiVawKbY#ha#*Y96k72=x-qU#6!&e4n#$4Tkl#Ej2Bir| zV`!TVSl4n4L`jZcbC(t`fMYg-yeM0jRxw|o!KKYH=2PZHyP$_lJ5U*31->USGPT!p zX)_dGEaEbF8(HhYnA_KuMh}M4)`L;bK7a<7)=%rRzGAcC?cnd_a%l-hnoCP|gvB}l z5bRkFJ3o4n^a?x}qNT(N(!QRS+%WtUMDk2rNO0&0W0dms*vapP&;f&<{6X*f z_q*%nPm|_@Nka-6p62_!93;{h$C%M-8Y)=&daR)G$@_ba{7ce&HffG`jaXWF^@%Y7 zTyL!i4y5@57Ka+mhE7=`GlxjSx?6AC-ZPKYTCTHgtrZnZnqfs!!HS`R6-rhn$BB(T zgnca%0QMpcZJpMLUG{uaTdj?yt#7L38YdTZTgtM>QIz$>YYVZ3zGBrtS$V-E*$hP4 zL7*&=tUy`FS)&w$N0#43*%Hb^YbeW#+?+4qGi*s-09}#nJS)S=Z|oOK8NDP9KWgRy z#%1<}#U=TDUuax5XU*Z#P%KQ5WFef{v~rjI0bsqa0(8}L=rVp}O4nrLKZselP7Flc zMvO?>aT^>Lk}SR&W-nP(+nT+;9AL;n2Kel?RJUbK_iW-?@Lru*1rF26A4=zSGdR_o zu%r@)b;)wIOGC8MW_qp*f{Xnm5BfpyIX&mT$paa>o!4_=nN>Q~2GR8t^c=hU8P!SZ zxlf?Rz?_tr)>8~Wh4~bQpQ71iYEIA3d~AX*;dR4uiz~dw^-xb&Rj(WnUVnr!b;AN^ zeuiCov;`#6-$VvPm#ogJX-ht3F9lIse!BQAt30js)5UN5Yd>B5meN*7&aeGw84AUvb*6=$!otX$j@;JT4{pq3 z=LD51V7Q%hbJl8r@-mT;pRS;CodNjn$uuiyd<)MsBq$3j# z*Hjag+))WW?o(&*PQ1CTZN{F7qtPryywPP}xVc@Ug>cNwV8LgQ7o%EsUMf3JG|J#B z%%!l%Meothle}?eW1YR_Vav(^K2_}2Y%Dh?mbN@hma}Y=uqNMH@Np5N?J1*qmfp9p z;2G_L1>awjcfn>DqZLCKz)529<8pxY;_sYG)JR=UfrBV`v|-EzPilgPM~I!o#m-dN z>g8p8_ZE%Ogyi2uk}Ni}7lEXb(@xmpPYsd=%J0r@xak^vw`5N;lpWGHIzTR(d?J8OYDqibPt`8MohiYk^7iiXjxdDR zzuX$VRX_A`v)|qpZr|^>?+Lee`0btH_LF{lPq_W4-+nsWzQ~byT^PuhVp80*$Atq;Gn2L00B4%iIX5jNxHW0RZ1XeqS;d)+q%NGY{}dg)zV zsSPl++m+hDy!2%Oe6`zMzEPJ|1wMEAT&R947jo}#ihY&Lgm#{PHB18t_?gHq8#$-? z03Uz~>3&#)NP$y)QCjl(G;T*6KZI47g`)|ipm>0}CJ8(%wK~ksK4kftw!25Rkh<$$ z;UylNLu-#BP{4|}ZXd(#uFV>Mj5WTCz{W{5{$yHvKgp`2(E3xkza!bK`KMa*Je!e5 zxozMtrM*w`&QfUq3hqC;Y5P}L`#hUfC(-_OnEcI)h|?{H4z5?hwL1K11NN^^tqUH` zR<_Xv?Z5-eqT5977IZT;tkQ?0yM5=3la*^FMv^Ij8>k9r)nv=6Fooc!@FM#xAf>gl zt83?R-VP+z&SPlD!>wxPJ81y3_c7Maqj>QgReZ9w11(yBe#jewoO=FLHS|;( zayv{7T|x^)m7Z!1LGj($lU4IlYYD8kEgkWeAe^3Gp_Z`26@!Lv77UAd zvqKewRnVBBfTF*HFX)8@RxWlzWVG`|7v z6+Y9w*0UXY7KMT2b$ix_^Ib=lmYzuxqXR%xfLbf6dfr1xx( zRvr^O3r0uYPS8_)T#XEC%g*$z+c2A)W|WGuOYP2nM%pQ=bA`3q(IjWPWs7^<+^xPQ ze<8_fep^Or-mc8tEK`K8&&X+)KBF`3X{3S~qgFwBi+4_Br6Ra_{S<%wv@l-l82A2v z1fh*DYv#7W83qSFSbD+s3ETB$3b7A6@WVm8?&pcLHnl-F0pic|%Fx>VbB{QHyA6vKAKPwAZxtc|)n6*AbSV5hyp8QP?w2TUo+dL9^$kVrwN;v|x)oy#CW zhsT;tOrEHS+zap6?BEbM%~*Z)wbeENs_kv^;;x{sdXl%&n|zW;WD)31mD*MfwZNDh zuI_c9p*^dvrF%=xsNOCjA*Jp%y{tJ3sU$=+bJhF#J14i3Sz#+PkkTHR8d;#C&L-r! zJzAh*|Iq>!^=OWYuAy$kRN!z2wigdXMNWVdQPP%*1E=D^M2IIFAW@7!;73$cT!Z&; zSDDMEEe9eN!q>*eVwXe&Uui+Lc+jwfa920lY;eF@#t9r<9Ku2-nUHTTa|yiDsJ93ncG9 zS|GU|&5_*Rj}tc&PCLWKuwa;wy2>hYl>Jc>k>Ua!huL4@1Icghqs~_x+^dD_} zX_ob-B-=ChzMfC+(8HOho5@5c39e_gCDngRySCY#lX%-|>FnhF8aT{#r+z2zl}OMS zc4vQSq3h3B@%utCvgbuNQ_pK$>zDq85>2NYR8ZU71hks8v9~GceVG^R*asJ%cl>zE z%rpnh3=+{grtejw0G2MSDGo!;_SlK6b@;kSRsCa@>4Ou&(dt>GTAfqvuzoJvK}7(@ z=pLte;B(>Z#5cC3Nglu6mdPkvM$Yg*tY|-LP-Un{7n?JtxGkPhH4HOt9=A66Q(=l; znOu*PcQp51$#A+T6D54r#o*I7DvG@#_vpx84*Ixv2cM^qHn_=&0>CtU^;I;?n>D5@ zOJwQ-L^XADs|LZ4$Z290zO)A4AWenNmR)IBRx*YQ9BF$#fD{r8pgZ^3rJ>RO;Km{4;>J0YgR zn7|a~1C|3DtQ}LxyhThU%@Sm`nc|q*lE?x7E#)+9`I-%91xaFiBlibW2_FvYfBGri zFt2;89Fc{zIw3|rw+VwvN44`etB&5!UJ=TscKYLf$pVMG?)u{!=Ep+!>*-wonU9*8 zDrU8#p_K~~XE2C1vF3BqG2VSHmNzgkP0Rg47j;3b1yRN)XeHe6`8ovT4=4icQ?P&` zDJw1XOJ6huyfUCZBH*`mxLHs|_kn=7Y)S%NvM+W`;puRdYHqe<30Z2AbpgQywN#rF z9$tyk?>5jU;?$Y{PbgUssgm5oWseJ_y&2fyo`lz|6KWSng!V zwDUa5z#KLP<@j;JP8>sIJ0<(HVnpIGh?KNu*!y-8)#{5D8Pxc1B+&9cg55;R>AjKl z)d$5Pac}6~zH(5V4BD=;LR$B1-K2n)KQ5wJI=X-!; z{>4J~*;;*=R4>ZAFAa7lD!4*&=@-AkXtrEx1|FlgPdJAnbGpL^tY)Kd_4`_6^Y) z5^6g=(fTn?Oef}2ZNCTm>f=fDAo#W6$v?_zYNNf=MAn!9wxvp5{m+zdKg7s^298jY zg2isk_lP((7O|5DN1S)VvCu^GIGd4pHgz}xhtIrGPXnV3!uzT_w6m#4;r!?vLCd$k z;(=c6VP4_1-@MzWHYig5=|rzw#<)J~Me_(&g_*2h?d(EuGt$si9>DVB>O%YhV0i^Y z4p6;zTJags{8%75X0bds{~tJen?)p!=$i&O&H)puMDo9-#il9)2O3vkf=s|kd(jiN zllLSVTUArx&HHXTzMgn;`s7vCN5ABi7V7BGzFB9@`+v1{0Hm zRK9<|SrtRWM!&RrC3_NycJLPSxzRd-imnSH&xkV3|g#hvA(tqD8vr;62pyW%{xaU>-PH83!_r+ z_Ni<-Vi&Cjw3l7U`knlW{oS;4ql*?HEDY@*^0se`f1e90kpntce~@Qopi`J-?@*&) zjKM_$`EmK8T05Uanu33sjwFm^(JM97#+o-Ja{eseIb#($@J1;z_%4$=->GA9c!4D` zN1x5Actbm(cyhJiv9I)5)OrwU2vNFr84BECXB@gG#FUkk*WrE)b3w=!0`i8#+a>H&T*4O4MSucm|VSsIgj|7rB0yG6iB3>YPWL`k(E_1O_Ab& zo^K7hK?~}fWsuEG>zzA7bkHe|n6S>VVpx$1b5j#gEYd7b6Nnza50;8eF}2%eVHn0Z zBDOE&ie>Za|8caY3$PipPd*_sPE#Yg#kVzzj_2~obK)qvM`TFX7+dMm01sRxbh_HH z(}KZj_0It>p9e|1nUS{B(Sg~Ls3=D#YCxgn1^5n7OX@lGm}SVY$TOW_j?J6geWwrv z2|AuI$4WdQ3PhgJ<6f)q#J8{u17t1^eNHsdYC3tok-%iybqb+Hh(xb;$U%BTW#`PP zCVjlN6dMkk;|+H$0n9RFieN+7ydek1P1tf{sW?0Uf|&1V93Y%ycDi8LEYQLajLs}9 zXGKmx=LHTj?rpIlBY@sh zF)9$zc4YfFG=lU3m2UHBk-trN_G7`Lac8;M!lTgu1D*vonzePv4Tiz@Z%$Bk6MEe! zk$7~Ky0x#B0B1URwr*X$yB>QWn>Eqv&Aw<8X{r_77FgZP8xJBJ!*`mR@zB!LRgQ-0 zLMF}%R%2UvfO!CEdum3xS`rAD0g;?<-40YHnXr@LiDeg*ETy}kqEJ|rnwGA1H68g; zPo$m^6hVebj3Sf*W6Cx#(MB7B+6F-xoN4z#S!(3^B9Y|<7*pmh)>@`~Z`yv(0+d8u zW!M!|^7{B?E!+{B1COz)N_+5_mgd`(=GO$t>P$*gs1n*PBaK|{{)~wV`@Wpb`DAjP zl|Y8cpXdtvmrN#(d@aVFfVO_LxZnzNiz15G#c!LzXhF%o2fSQ~~TRxo>H>nWgyd`ums>|S|U zK@31W?e%Rd6rm$U_!MK)7il%daW04`Atx&GXpr-KM7o_-q!K#V~vqKDjOqh zSmwoCtCVcJRlU8TRYv0+rO%qyL-CKmfFXmrw?cFU_?VyNd?Khof4TjR8=6UPu8DC)qa?TES`B=}r&x1eb>qBoo<_!+cKUn>C zn&n3e`@YfM$6qtcudeeei9Teno}zlq{mzspu>PyptfORPD;$$+bo9!b;s#YZmoKg~ zsd8`f6fh_F_8#Mkv7D{V&0fU}eXh0KO_kK~WurA*)i_Xo4!yok%^ka8$6!q@zym^->G}e3N5lX_Oc+!ZF0$KLy%o1d29kbDpE+ zAN4`V=BSye`QFzk2(N!+LEWp$SVfKek_zIW+pJvg%W$w!0+jY%v7 zhwKGwR|;)AS!{=(pDFU}SaAe9I(7J3fWITLfA_pOUOc`7edn|cr3ucGrnn7FL+X_~ zyD>ryC!ZTmT@W$A_!@-jXY6@-ox@nS-N1~Ftc|Kv;@9Bi{&5Vy0(Qll=#Phw zuHO;Qa?pSh6Y{^~CBLA5Z+QGPb_UF&y#IcS=<`>gITH&&US5xGfbQ{~Gxof~AoGR| zGNSe`uZK4P90O@@9KJL{hik)uOxm{JemDxB31lw?tT76~YDltgM0rF7^+`4$2S3^O zGyP5s>k5jwAht4op(n?1)nQoUU@7I2x>{o_us{!`7M7v`4&|?aqi}2yG2{)cjTkM7 z1BmH(ESo-WKJ6cB0aWP-Mzj8w4?OY`=44K4li8Ap4WBhE+vq{J(xL3zdg+p1hm10E zhySM-Z#qVvR_|6EjvLJNh>jq@iN6(Tk`sZ`C}Hau6hM8arn)l?e8B)ElT$JwoxDtH zS(;LFjvPw!9&~Vj9u9-hyPfBtKFms|b~Gy&Aei%mA_SmyGYCNCeM6vkH-W&d71Szb zgnxrekeTK;bDg*Bb4usxc(puy1=!peBJ0tUe_)aMi(hhN-X{bnLZ;-v0y-hHf|7_l zaNEEOd=k!4%%@FBHI-^k;Bz#Ki5PgzMDlsCBF3nXsNj0b#kt&FJfmLH9w-W<~k1)JV zB-2NLOQb&`J!GdI+xJ`ZARE<(HjE&JnYqq)IxyrCtA9iHcbG+KhL@_R4TXaQ;)39ba;Cmso z24MCv7(ta&=%Na3#0=_Q*#AV$6-^mt4h)EzlLTXB%zpbq?Nyt2NP=PxB%2>=Ct0$ljEO=5w2ClXq$2iXG0(PuqL+$lk|4j~vP&iIdGQ zAmeVrD&%{e?0`C;Da)xVqk^Df&NX`85+5?;r18vC_=&-M~~t>&RYIxsSg%~mzx) zC)jjpRVzXagth!yz^|mgi|!cHqWj)s9*Gb71LSA6-n`eWVf%VUZfMbLNWCe`%dpR< z`Np-E_497D;38jEAr^!ij0cGDS6IQ3G|KwxwHzH}?scE9HjKJ;jqQoX!w_astRs6w zIL(@Yp+jGw11tIh0YZxzGv+xKu;XlLKg-to4t%y}5p7HDG}lW3bu7}FbXWp;5Y~}b zwTnqqW4yVc!hkRVZpVNl8ie{mJ=#B8V)>Xofr7ij;zc1>lfM)nLy*eW#`KvOBO+R4 zDGEdrw|t!qSAa2$HCOWJL?CA6&|4?5zIBK^z(8g1ilIb`QQij|&CKKDs$)u9Fz<}5 zmhn8Y*=b*A3n<)fA zWxFv>T@orJc^CYi#% zCnKTH>4(NFk+Qc|Lp0VJ9l^F7n|7=|`ju^(J zGGf+h zhIK?iwI63)8jLvL_^MoQ$vTp$pFr?A| zg>4qqX)lVrz}o(-j{{W}1{}IsV1PNn`9nmk#D5|}pKYT)mZ*%+HD*JrEI=oQ5iFWD z8;q5!@|!rf$V z@;$)*$=4o-C+rMFciu%h6SbOc42@}Lj4C5km*_0`4yiDhrk&C<@o}Q*DEQcY{M`ip zO~^Vt^-y6pK{Fc(Hmh@GJ^JnyG?-SQ=?iO>{pDJq5NP_`H{TJacSXklllxjJa`GZ3uSp*`VdZ?o8og7S4;b1PtZ|6%mL@k; z%Wn(Yz?c*!SWU$FmM9Q>LaFKW(q|)$CY{X04naRt*gAv&+1yeIc$Q{xc{gS+uHS12dC@+Tt$Wfp%+W5t5d}Y4pV0p1ejZW-<26 zK1G%BRiH+y8~E6KM#uPQK zu%!5ubvZG*HJZ)vQNPnfo~B6}=B*rObWMNWigMxs`eUD`e^h$2>0Nq%HkD|59t{$T z5eL8?n1!pT8ECz=@QUiKy#qeFy=;+TdV1O9uIZ*e2SE`Vr1fM3mQ`I##2X8vK^cLF zhp=sqfd<&4W$h_=ws4Qybxn{V>hQILAou7)$ua?=x@_0PPCtLA%$-b}hk_DYiz!`t zx3-YV9!GV|U-z7FrY}$ELC!@YXhdZ!)=4N^xrC z+}RGUxrDCq zn(Ta!(Cx-Re5UZb$M>lUc4B~>=3FM>aqy=I2!})D{{h z58MQA5Ff~7#=s{SCub*#eXCGhW2bZTd1aeW+`Ed&SP`BjA%nwaUe?N~QvOBO{O4m( zSLRLD>LH$_+@8+^bUlsv+PgJYY~Gk1MmE<1D@ba=)|9;$OZ?^R@ipMaz6Ub1zM3WY ziEcUxYK?Vj5pbk#Y0Q#NO9MG`$n29e-k3$pPogoi>AW#U??@UQf~g6*H^m_^t*bRI z!GNx85w`kPk!R^pg9Nx3OSf6)Y|(?wXYXZxwp80`_Z<~?R=;FlYvP;I!l#Tejo7GI z3LAswv_s8lm(PeBX?-Yf{3I1KQ3uW40Cc-Pfc&*4?2svRsJ9$uyJ=0O@68vh%JAm!lEctkRpX0oON}xm zR3(tgHAl_$;X()E&|8KL zsbv~dD5e>i0``h<4mZ~Yi-n*RQH2qWMO>5dWhzl>@dCDsaN`@j#I%kI`BqIThQ_-? z)SQY`Yv}Ay^ULe&1#4a6df&d>Dg+}zc9mDjoR%~~72zF+AzU8TFd(tEAaFZC+{5>upEzd^6x=uecx z!w4Rk;(2Q)o=}4x&z^Ozd#FisweqDH@eRIDPo%A`O_i1Krpz5?*&Rd#n)EOh$1L}V zeVsR_T+-aCUa%xAt>FCGP@IT)!9>F4fUm$SSIhLhzW!DdR0AFoQa*~9OxLuC066>< zf|54zTZcO#v51R^t*(AxAtC*@g(Sd11pU6jI8!zNFpA#dm9vqYI<{#w%Mu(at%fA7 zi~p=(4Kh30aO;)hUr&!ay0$u!rI4gW#(7|7x3ch2Qa{%_xit^+z(q(0kUt0sY>&s= zpN&z0 zdj+A(7C^M^w{QE+k6(L*9s;EzW6C3gGr%JUrD#%E1?R_9yX^B=BF<~rq&*_(1e9ME zujzm8)=Zb~sTm*Lt(gzqtr-B_t$kp;yB%7~8n(>YxePWu5$H1ykyW(h9;6k9ldoXf zR;pR~WX6_E;|@GBIXILMCn3iyaZD0(g_QhX`Ohjc4rL;-h=71x;Jgp zxN10ox!a_2m}RizFqyjqr=mzl zQdO6TH-9hwH7UG0944t5!Ojph#%8)-WGB+|yUrDM{d>LZ-|t=jLGSuKe%-vhJD5P- z%Cq_|rXnpG@4JI295QgY>(V%9qr2cIeqVOHqP=r!m&;!z%Z%j~YKVoccD786( zXMlWAjG|6B@5*v&0|bJX#r(-jG*Z+I5z9I~Gfy}}BK$jcWN-F;>_%%|clGi0csfIK ztorASoLns!T!iFyX6hy>#bOeK~9q%nsW)nl@9 zA?I(BX$S(Vr9NBy2_p`ad-eJAydr7iH1sI2;Y>dNi3R|zqh=9){^-6zg zKXbH$znEye+R~1xr~8r3Fr8(pc4w^(kyVjV7`wG^)` z@lD0FePNyCl_0>?ZxG;?9Wr!LgC(S_Ptzdt5}dC0fzs^!{YobpR?n}p2|$9F_e&DE zXazlX=jZe?<;3}{*|9!P>{Lh%Yv}5I$lM}HQkRzLXO{ttv0NoRvBKFnOqC_x*L+=O zl)^2anH0`j@yYj16GPhAOphfdq7pp$9nF@IoO%`BwRxq7vz40fmN0AU6#-XFSNU&; zT2Rs1D3?iJ{p^r+JekT$@Lo35SoIpKRlQp8zz6e+@o&9DB5SyW6@|hfRh;f!#Y|T) zNa__$sd4yI%yy`tn9)+b8u^FJS1aUAJ=<}<3hEvEXuyIFdV98PEzKTOFV_zO@jJvV zJ8qdicyBU&hz4j1eHCO(80JIjmB15fxZVLlc)$>OG7eAFS9rqECiQAiOan#)uk5HF zTt7tG{vodI>3CnwA9!U&a#O=JEy*pCL${_Vxdox%IjzT1y3-ZzNN%q-P^_;|?P)Px z#-0;c3mgL6#@kCaakk4D5~fJ*jwg`4Vu3F{SFj=L3&a(p$Q7e66<3VJ6{F9aD|B#k zwFyU{50^=#Gn;892{M$u&(aMO(UPgzuTpDUEk=tB)Z%>KGcQ3ibJ1ZEOGHnmk~J&v zgo&Xg&5H1$%_Xt?QoW+aSL_AEHwuAPrb{rVSEIl$U?cHq?4S!$#}15(x9&hMSO=;% z*Fhct8dooVNVa6c7}YgJ@%!qx)P;0h-joW5Sn3VaJn7%ya&AZKeG16jFN7$lNrQpEQ||M zA>AO2+kC=k_#HEPj?ZjP3uw>k@h$omLe^ZB>5w#QLmMbLT?i~$yt4E|?-?PV zJP`W`B~t%k@j;`E0N4fu0$WpgGmb7TOyaijHPu6WGk``Q&LG&TXiJ*{cHR?17zjK^Q>k+`l)p=YE=C7{lZFBg#1gE@fFf@< z{tf=zcwqX~HWx(wIwG22bG4^m%{~GB)^N1XMkrJh2#mszcK!~i4?`v3pGG~vCR@l>ubnJ@j zSi0U<^9N}Y1@ysXh| zLl)?jI7d`(ZjxGtbC5N_*~*$EmsU6XfJn0MT2aSzsLxht&!;uaRH4lvYaHs$WevcG zS6Yc0WX-k*5%M{`nfYNDEQ*?wrLECOXKj!*#hk1$RA{|YvvlmTW~@)gVJHbp$V8kj zP$F4_JR$UhO&?^9a5~Gh-AU3$Gc7TMv4kpVBLNl+c9n#)(u#R$gTN5jnpVVt^oNE=|2w1LhpZ6xa@34^q;CSYP4$56~DQR&h~?;9{d+OX>>jnZZ$iR1FX z&aY>^sTV&oE+mmSN}EFRXs*Uc(q^Cy>;Uf4Mohs@X^`UW#=n>svgZ5JrU=&jpp`a? zaGIb5A0}3`=Xw;adA2CD(ngU{eGW*{X1H%UaA{)^(gkTF?N|?QnMP@&Z4JJ`L7*;3 z8#||tPdM0eQ0v_ZAt8e(%Wp3z~R#Lxd@Bkk^~E(;cO>DT#@J6ZdebbwDJ+x!@^8F$SBxKX3hZI66n6J~k81Az zw8QaPzPG;irvAfx@2%DvY%WzPmAFIT{#)W_-hr5>>FCZF*&}sIhs{o%&LU1W@Z=0epDHpqIux z*+I;<9)`gp{ToKZFGsP?`B#Wwvw70sUkPRSx+Aj{K%Vpn>P_+g-PYqz!TKOX1lW9@ZIbFtVAbuJB?=Xw+r_Se=pTRDdue2{E%(VCU3Ia|H`uRu4_`Uo@n;ie^AnodLB;H{ek0 zP1pq#J6!Rv$-LEZu$a<;R&KC8Y>^EZFb-sy%22gG?Y?J#qP6Iu<=9Lz;e`kN05G;@ zo$sRw^U(A@ezw}0VT{&^J{%{TDZgd9vNy3HFK#qUoAaOee|0`CF8c96DtNUX1)E8I z8D&kTu-_-x3OP2D$>jmIz^YWls(Wa3y|3mE9-3<@hU!sJUrgfGJv7PPMGsA@BsHYM zW`Zif=^k2Obz>5-h<(;-EUhqxV-XT=DjIXM=J?u-MMtB$=07Nilwf3<5JHNvkx)UJ z07A$F1-qM%2nS6{*yE3+m}2>SVGvKbdNxJj^V%v;c^3{f^AL_o+MJBg11*L4RcHm@ zP)Ra^OYnzAq#8e@98?mqBrQpz%(Nr<0OuJ-8JJgdu zCeRe^2%Vq?MRJ&?oTfaEFGj4X2rAE%_lE$}A;yM1<~c=%Fm#e3%>3<9AhaA^p6jSE z8R0GYCA&E=$3`o*@>?6O$vAszBjuUDW$2xUhn)srLU<4xMmyL#tW?iRF}tiogWqSH z_*@PB zej(>-+lBGZW~?Bq^(%Rhk<;P!)6JRlsc0UX9|F zJyMUR4(J{FZniq-iJd0mEQK?i%BX@ zAd%E-qukVe5iCGEGB>O}_zpph-)cC31V6n5k1tv#{g8gKAe0TAA+HS6st_s(9ffMT z@8y*h$}0;Ch{WFMR)3c33}bUXuWY=Sl4N;hWj*%1vhjRg+4v@Td8h~SlcWMS4gSQ@ zU}qt3g+g1XP_RcyEi#RSNRX)IIgbI=iiKNc%eG;?7_&YYLG^&K&X)V}@Je>m@>CXt z_pqUcozu8%%g%MhI=WYke`}(iT|x^^JBw9q2psIn{ch1#%&dTw@LKCu!JWgI^V7Xl zk19%N8Pt^m;I>q3+_2(~{Hi=GoVg0p*VQf49eX|6%lKg@t*0w7h~N{e)KOM2L{A>}2U5rgH8_t+(ZrqxR`;-9#mZPWBfqfnjkG}Y&Tm@)06ny_;|QWJ0vi0vHHYZ803)O5_EOG%XT7Uoy z(XWDP_?GrD?+7VD&T0bObqd0wE_%4OPhoHp6iNWes{gHDmxh!c62q5i^$0zUI3faK@7kBJfhDx?|EA>6n`)l+0??-K1=68- z0~_yp}bX#nZ9ndL8yS>orr0aaY$0o{&EdhO2$ zhgk>}3R|$ld0-=#z3oS_ZQ=q!-VDVnT3u`pr~^h%J@yL!>ReZvAcIN2|n7898klMSZDlG%d9^W3ypl4$|pAxdo61MSEsGcA;A*d*G}8VJg=61H%u}dWirwtpbHh-8kUIXHJBn~{KuxK!HCHHw$GU=CMvuvEOx2PW}lz7 z3iLJ+HEq`nMH^P^0v7dRJg|*xLc*aWXk^f+r*#10Ji9ZtvsPV)6j>&Ck6UdWP?LU+TLohdjCLlNB*F$5b6>#Gi;-KoCOq4s1X2qZt~ zi0`H$sxiD`ShsW+2G#g91aDy)<`Mb1WGyzH24*~sp0-snZ^mo^I*g)CgB8ac74(~# zXzX%&A&0Fw=V8qm=n>8G;%77?gVH8)Q9V?)TpFcIgKXnb^}jFJe{<{C>7psgOIw?g z53;U;;FDOZ&Q?l86lfSQCdvGC7ZR9Kwb#Cv$+}ra4B6E28JO*{AeKM~IS|O%m;-^yXn`D`N199GQ5eDwRYT@PQtAuU zQ)3P-GKw!FA;rsfE_Az)MOPZ8j zgAo-Mnv@v+e4G4bFwPHx$9ZujNCyB0y6z&C;;4gjFnoZwbOS0k{uI5m;6a zLYS_D5`Cw)&TTM0O|%JPNZbi&ubC1uK10&^kTg&pAf4I(wPv(4Ao<1rjLNbISuTKfQe_FTwXB$S>hN%RWIc$_6=Ro`f1} zkilVPky|Eeyaf`umB=AmRl>53i-y&+P$edCTyeTHVFh#g9^ja%rt86$PCU>|Mm~k~ z6U2o{gH{`wU)=?q8rN@m^@&0Uh2*{UaN26@Tr(*c+t2~HVA)Q|KC%QdQPmK~waXq{k!N;hE+I}}b234gHBE&xpv z%dXZ(>4ZLEg$#TVhEplg?KJG!eq4YQm@%MaDufgwWkiT=8;FCTp`8CxDhW7vk-KdS z!jlCz4ni>-Q0ar-x}!tEZW0tLSI$EiKnnv;QIq<>Gsm}j~;A)V=Rrj&990n}V4 z2bEV9lhuoTrVGA?ppm!XOqT~utcK{*NJyDjMUG3{rld^+o-NmMoNq*uQ^;AtX3WdX z!bg$v#qJGubs&yx{(0hR)s5V>mMH6fuDkJns%lwS9*P;31D5M3 z!(=yscOFTUFvmgnXdwg=PF|^}JR-tBXRb2RR%pQw1gSG7?Sa{7r^*Pk>7b?2&dGZW z2!KXTX5d_?r*PeqSN789pGe3p=c|hdPf!ScE%>v9~Pmmp)Yx%hkWu~Zzu^Uf6T)P9Q zzY{cYu$L{EXoHSg3&k4bH^W@?eHXEG8=H6f@QoGE+HS?iZnxrWyA{8EyA>a|-HQL$ zb}RmAvfY{=+;+`B+jh-IZnyKhw%z2vY`f-Xw_Woy+phVqwmYD2+HS?iFI3dTcUJa! zgDJy2*Axl5o!NStX09f@EH0A(_^g`e)Y8n?UN1~1rbT+t=iG276JlUzIr8Fg`;sDQ zKK=4e2qnH`E`kOAHwkK66wCw#n&ZL)Tm!niPTvxh%cEtbC!$kf0e;wE1V`kOr(rB z2f(cF<|m%Snu*pIbu_-2AcKa)CQJlm#_%9}mpMiLZL$<*>{60@I_CUVi*C7lCYkAW z$)Z7HO{9l{+k0>}#Dc<+!CYl0tNai4gzQ9k|F*C2Q_81h{?)f7GCgf5xL&f{sCi&k z1j~(^!+bh9SZ>|uEgrErdR$zLU}432i%786Z1vMH(za#`NzLnS?WAgEW^1rJEwK@_ zU>Yvkm_mXM6%n|G;0W$~2Q+sdhGuJ8?zb;r;P}vj?YK*t=d5>tfm^D|k=+EE0ElbC zfVZFkPuAi!E-}x}kr3=%vvzOkA)fwJM)9ukTew0k>H`JF3aOvelDn zN78&;r*u!AEpAAvn^yR|-XqMJXwKo2T<%vi=QuCcDHM2)YQj^j8ST!_;-}_wG5nmh zHhG~Q88_pBo}n!oHEg7lnWaKy4J)`am|UfLG-_r(aPkE^HN!7x6_evDotf37p=y97 zkqGl<^jY&q=bT7oU1-LMVeJ@u);`yo_ZuGE!|@j>e8&yPTenAyplPXBJ&6Iw^B|!E zbB^`>zj5x3>s*25OJ+-KH=5^T-1P&qYA24GaWc1u>uM(r#4k$tq1B9kd1f|GQhOso z7}yKe$kI*{F{@^mqgK)Uz5f}+-^%*A5MW6knU=910qT-7ljlE=fjm0liFq(*CO_L1 zFivW`v?~zC&P-m>6>vCpvMYI&3U((60*b+=gh&kChS$WfTA9_q;Yigjg9PdVrG7&Df+2H!-$T)198})O4q3J2ll$lkuOD4p)htpowXJK zjoK244doQvWnMXr=Mncw^|qY!i4HfJV=Uhz`L6%z4H7B@6W=*9ZBAqUl?9H6f09nS z^m=ZR@`;awX14}SrC=`jv-M*$N-f;W`oDB8k64?2Y(iPXhq+6IxuwIgoRZe$BxTi7 zFId6%uO6jRDyE7oyD|Yt9Px;`Xr1*j2ke0}&3up%>@Ao{B+}3!$mHzK+L9W{0_}BS z2%fxK{BgQtgcn3N1(xr-cjagW<3)7Su^!eNY?13jruO+XY-7gBq&c$IjvSWbc6W9P zm|`9}X*zQZ99=Df=)8*CR_8*zsn>PF*fdc@22Or#QrBEa^6+OD-=I-$jNP0?poOwC zENk4~q?%)fY!_>QLBp~Z)6K-i0e=)i%kI0h*l}W#gzpOw5tWIlpm*}bozAol0N*z7 zcs`jxXVu1`%quPEFsEpaFH{ILu(kRB;P*e#%09o}zqb0Ya+nS`{44CBqN2H))W19# zX69@#$cHmkcJp@<+@_*g)#T=Dc28PS2H*sa^Ij=4SBn&hgK`^!qcv41%B(KMu;gr6 zzIotl@w4-FA-<+z<7)+BqY{E|lr52SR&N=`rBXbHLr-j(grZdDMg+%`^HN=x^j1TydQU|1EtQe3?)h`1k z?T#Bj5zk2&kwbGLv#){A7 z6@(B~(kz%+`3uq7ZdS|QN(&k%5zqd!q)b+&J3qb1&) zRgTZ_J7n8oX5O=(>J;DB7oD<0!FEDhA>{O-rU(haBp&Z)Vua7QPt)3zFQov8>cUIs zYmZ6GAUkvosxd*@!~8Rp7(FXseICou#G$ssfl zQMfpu@}ZIhBmrp&qM{>6{ALstb#U+*)S$qiG7|j4uQB)c|G#VR+Eu6eBOx#{Lpt46 zwLjK+*Sp^D_g!njS0&5c7~yZ;b|UyG4_-^wr0!S0-O&n`PWAS0P0tVyspSR-nptkh zaL6<;-ble&hh}Ayr`gox0Ea%P0}?s$4fukGc`Gai%7(NZpbl$hD^;yNZ7c?b5^ED! z%r6pcbR3I4=(`C2p@DBujZX_7&)4u#0~7EOp-u4N;wAV*cmdx)@L}{AK8lH2@Nu<) zPXk+kuVRA|{&orA0w?mj$1W#znZ$L_^7aZSHGI=2oJVz9@y7 zD}%&-QgAnWR3-CQn1y&$UZLbeCem9X81&lpuB6!~YF27L&(H<60(`$zUuZm0ohql) z7^jZcDx<9g9vo*IEyC|(?5 zZ&~7%w!FGsQCe1OO*aH?{7G^h&80Aeo~=it$d-tI9a5J%nyq;!r?uc`R@CbzADrir zEAZ>}c%+ep9*T2BTd`nGJ|k=7cV|UaeDh3{J{ws5h( z8jT>X{rp*xo6fv2)}RdbV-|r>Pu7qdL(?XiUeVWo=ffFjVE`zfNkL=iTp8j8@0orWT8PHmi;A~u#O zBHre32IfUa_&^ctsh}_rbRAnjkjD_+o1R&_AY-sN?i0)@N5dEd^+Lb5R3BI8YlG^3 zb+e9kOE*MCHgaM;18g757uKr(*J;DS4*!X&NO|#c0R>{;Z7{#2W$2dWbZGv`w=G(7 z0Bn{Ap*NIcPGaTVx+f-5&G8|A(eew;81TSosj}4+LmR|#HM%}QNT+mqZ0;f9p_Mfo zTA`Le2O?!$1Apg93TQ}Ju6iVbZG4UwpGwcr?V4PPHIOK~W2VYVQxLs77V4g2`4(oI zkp-!z&gP~VfrCbYVWF!{ixbAt)?)=6oJ53_v-Oxt6~iJ!i6lKHO&%7{TEuWdk4eTg zO00x$k^*B{nk1e0s#`}LHexv#vt$CUNoQ3okJRdCULuW^QmY3#)kj~;Z`S9fxb*T9 ze>x8^k7rr1y_Q;QdidLwB$w;a1ekFd79#I{B?5A6&VGg{GGgiRDKw3Md4k@3{5&*G zPa(HFU))0zEd=A^0hKt4jktHd{@Gq^!Je1zOPMj@dS-%ntP$7wAKF=a#UhI@)5FJ9 z4W0t|9LH9K2~%AXw2tsZn>-1Q#yx&~e3X1Rr@>)$TC}V~I3khJC8ePUu_KUK;Yt^} zwOzd!FIS?Xh;L8S1jSm;f|nv{hIA-st0a|m0HF6ew4=O3p_XplPi{>t zgCLS*cj(&r`lm%9aI_(cofKq5bQkHF*VO^7ip0u(JL0dk2nY`3gI+W^ZvXJ)JA~Ex zMG@bst_y^3;B)i`gL%GWe(D`u4{$896t1Yk2<>{pZA;Ng0B2!g?f+K>-gtA;$?_p5 zq7DcBA^)wdD+27))42J@1rlp)rr}h?ID)U@_hy#snRKLPaY92HD(kMK8Y87hzqPa& zbGVHo1esGs%t7D?e6J!QPY1XZU^&Dxd3g-l6KK=8CX}uMuW+ebQ6HgBWtg8;r*?QBp-#0pqI!w= zP4*-~iZfst)raF16tfD;F#vxrF)B=XxwLG;{ZP!>A=DpBLDds|~` zlg_C}g9GUC2E}P?7!mlOXBysO_i7_b&{{C1#%ND>C1A|Ad|(~?hnJr~tv4$vYW|a` zsX45j)XwLk=33U>g|q-K!>cXTa@!=xH~Aqcbg0l1Pb~WAC{IWE_=sqeWFl%69xN=M zu!6!BpC=T*ZY;(03?QCvzGPVU74D!LC zUvwF5zxu7$ooHr&hBwpnj7hD`e-X?(Swa)7Kz4F|BGPj?6 zN?j}fQ&*TJV5l%_0mC4lN?MzjRs+LP={!zZUOoKkFJ9X8a{(4Iz?Ym`$)FF=@CEsk z6sx$fb5S7Tpszh4&~Wewr%Uq4(URwHEDs!+akgVQVQ;?oN-N9`0E>=$ypmfRkTT=K zDHF-ed)Gx4b&a1~j-Gto;ykPAZJMO_$!x2xf!PvhTP_ydB-9jw_dp|+HbW#3P7sxD zHrixU=E}ixi)CmBwFqmXO1gv4smd-y(V)CkgNsyB{h2Z^iJvu9mXcfhyMx!3y9)zm z6J+b8Opz661bS$NK|Qv(NFYoE0!D~IK$dVvIOm@y)QWhM^2vD%WvhjE!| z3p_7P3kqZ$m+H++GT)c@+|8hxMt^frdW5+EKe7cT&D)`_Iq~x=vQqmPF1B?*BQOQF z+B!g65{$Lj6zc)pA_s0Q%1LyG{RMg_G>CYe`wJzE4r`he=1)=Df-yONDjpDmlDIV% zT17(hIYx^#YTNI-T16HQARlWR8;d5yQQ4XN+zTP}N@-5o&eBgE650G9 zQOcy`ttYXbn5$=&HnG3nlz}Ofb~$nKrcmW+uMo{NOB84Qx{2`m6bFdCFV_^qCAN*| znbxa=*CLd=CN(jy7e^)&DU@bisu_3oMae3;z?h0W$PO{UO?a0?QeDHq)#U$bwHkCg zd9@fFFfql^QN}yOyj1i}0!|BpgevdMs+nwXrO*|xe7*j8hJQ$9lKfpStA~u|3)3Z58XtoNWrh9Sngg(Sw$u72~aAAD;j%7GXXqA-e|9%Fc# zJ~)x&iTT%x4rysErWn{5OY$V-Xn~y?Lczt&_|2ym0@slBk;S4bKF9>Ben3c4kD>={ zy4#GVz0MQxG}6g;iqaGktDe+yQHzTNTfpZQq)8P#tChQEqB^?c`P4sZ&ZNtOlRY~x zpy&e8)lp?|!%>~B;i>`^Vs%`7w4=orDuJB&>@=lTze{gwscb`UYb|AThP4EKEU8hsNG7j_OO-U5hHkQ|f6hT?@rT`_o!c(MoVqZ5}A_%B(IkxbGC%CW;38f%SfLF6V z_x6>E3bAv^@OaJf(N>rIe3A!_$kQZTq)E^MTRr#$0A)t{MbK{#>uG=sPg_JBSDyaJAWm%yahTs?DrWhD{A=fk(^w&%T4Fv` zw=Y}7%h&G%?HP|4XkXue_KX1SnQhRj2Wll)s%{_^T})mp+p72rl4@@S409CYDWYOCtq>gMNOO)3Fs-yYTY?VFLFwc;uH;^h~ z8-Y@F79W%tgM|3_;QXwf`eiAS@ij?cW0EKa0&OQ?44~3mQ=%gQn3BYdX-x{lgK%`h z3IX4gM`xJ|L2h_9%{u0l(g?jWhEx}0_%&-0-I3Vgc^onZ5*rjKAXBrE-Zd_29M?4( zKTv3=k&ZTC2yDAFmu8)y0CHueS+$iRoN>+IoSv7aruFo_Ba@$1@JPWRrST97k)LLx zZ^`kl5E^?}RQ(&s59Bv;FmICIG{+A$Y?jo*u#o7%m8{iw+dI{d|LJdk`gs`A*Mmg` zp>VT(zHmPx`*dAAOJy_iX%$Mmb?3Mfjo2CFBy?nyuU{*g6ozR(kJDA7ltoC_H;Z{H zVtBqlpwTV@r^}FJaPbR$tfu(5)<;E_Y@gz!t0PC*Vf9un!R;fAO58pMq@THq-;AP^uQY<>87;Wxr{eY!)2txmU)a6a_|_5bv4AtwK`@G@1V^fdGHv#ip=WC zr4Bw}kIpz|QM|U^=Cx>;cGk^nj&YgSmYQmDE!oS**8<%ZF@-nwrJz*ieKwGY2edH4 zFqtRQaXdis0@HA&t|$MnKEaL@sy^Ump(IZ3A|;4t^!`Ra1(Uxq)0?m{>gS0Y_BNyn?s z71y!K(2sAGo8S5J+2&VuI(uL9FH3!PI?u3$u=y4j$br-WRY5*zS~Vp6z50lxbIgsl z$)id7WL>Xp@wLfMTl%NMUCVwv9LZq}am5kW07c3rsA>GpGGicT0fgVk)zhvTu5g)U##qs>{Zc13hwZNQzWPx&6bIhj5!%ASSV#mVnSx_nItu7{nS&#ID~&7 zSM?KdX#jy-l8A{O6iP(a)r5dhL{gP(YKiUOm$a&OA@%!;f>pJaO0fnZxVrl+Yz(uq zb`L8lGB9V5dlNK#Tsws$#KuUUq8YkRE*`}0+=w~}Oyh_x!Gw*WB$UpjpE^#!SlopO zTsk@CE4tuXV(>ysQJDc`9C#K*>lA^9+yWCMc2fvQHjrD3lQ`5`FhGJwo8~k&0zWil zJ}kLLs%9vG$t3K#h+2N}G&DwynPwRI^A(+SKB$$ZNTwW$sXM`|dGLLw8sSJrHJbU> zd(e%3HcKZ^&Al#cl2V4rqSwU*^rP^R**wei2rGpIS|P71mz=BBlb*|&1HkmyTc$oK zi7RX%=$*2VU#zxcM_GGawmwwsFG-001DvrR7JmLkigyZGn$ zj16+(4zM0pRSz>r3IL%_hRqdzIU zZcj|Ypkp@WwX3unE{48lgzhXv3yly@q@+JcjK@=QuEAFan1Fz# zmtrOaEe&ujL94IEk>ob!q91^rQN#DB{7q8U&$hlYOlaT4R`}Y&`+xZlJ-lpHtzBv9(*$lSP z9!9F^^%H=|CGBM7at2vDwxF}>c1H7EF9rqL>)@g%*XRJY*Xrtd(O?opsV{b{%Ua?s ztM1Kq0>!qtQ#9d3cU5Ht+QC>|2(354;#f|kY3Qz)2CA&3DY}YzqelKDQ5B_n_Yr5Db&pk%rQloHedC6wRk1%r!&A;$c>-mQE;Zf1iU->KFup{cPDphG;|Ow0DyCqqaOCD zvA}{Aov|ZZc)nj>j^&-GpqR&$-Yof>d}iuvvYHhED23Pa%RY-47bOm4gVk-cXr@SB;GAaUBi1xi)S-B!Zb!Ic66)3O7I1UK%v;x~ z>kUnwW&0MV66tR=Q&}P~G18SVQgjZbkVlu|hSO@A0V1b_TQnnXZNl&MkRKMKFydrK z(y1w&u!$^_TBCAwO0LtU%nMw%0b`9{hiFW5+q{eb(SUeH|4iPbank=$B$ zNGUBwX<~$KXdXgXPccFs!_X2(Al}&gQyf8BLA#u*Z3LY-f-A&_f+LW$O83E6+&2^v zNEGnXVd9Hc?*wRu$4mNixa48?hyND+$zed+)*4p%_bDlZTB+30#xY~D?!|hu5m)~r zXo!55K~3Bcd807lhEN+}Lf5s0AIVu_gb6nzOo*(w0f9l7P&13NrfO&rS*xZoH*FZgyQ^LTP_3DdjI=La-eIYXGn`3C`JURg+(b=wkB?x-`)YBsf_m zDo1#rFk!@K1bY)EJQEOxcTGK|FyXqxgzNOLFd@`+<&hx3^%N$A@U&2nzknRQ%bDv= z?v_G3S<|f1&VZxy5Amy47NMr>;I|4lGiR!GaT46#*bD?AzQYL}r zFk!oAme4d|!a45{Ce%_JdAn*&7=|n$U$u!d%!k5+pt&Z56ejFOn9vWrYEX=nWUY-Z zHry+Y%J(xvXhe**@m0k@bw!Pxq`KZnITLXZ5EKNfC%-6nmWL7j$(PNo)0>i}wlCXq zPwbCAF)wk7!abn|T-N4MjSjoUR@K>>#{2MSjebyDh_E(rZ_@pAX6M<#!mUeyp%%ac#&nqV4Gy7}OPKhwEtHo-nq2oAEM zCXPzSt?Iu%SUlyraJNC~lyPL`_^Zjd!b9tm2aJCzV0mdB&kGsLfrTnj>?h|Gt)wiI zP!vJKDTo&sJF&D)RKxE`(a$!LkG934uFm9-)p|O4OeQOx%-6>ufVR>-V?&4_043Qi z0MQCH3}SAoX6w+brxxssJUk=((<)J{$voYL{IxsL+)FMf0SY#E<;9yN^w_Z>#Y~oS z2Vaa{dLv4hK1o+sLCUO^sQvFVdbuh3{#VDV_ujfwopdv&zvBU5YjOBpUUcHU3DO7N zd&kTmq~G^nde>dE-kt9`CINr#i{3jMaHBlu`M>m^Xmg)91$M|{&|IgHK z0sD|z&eJG22?Z}d;HthOpBQ|Z>? zCZb@SGLx+^IVs6Ag|pC=>4{?W?f(FyqzA>!BSGo)>ZRbRQ{AqA%Z@iPAgoVblZY$j zhhEqck;up(R$qb&fZDKVi_1i#wSLPlE0x5atzX#b%!bu7b~X>>{eIDp|FV9k-)EOM zjN2Uln7x>D)Imk?2Mu4bNg}3~6p51KIm=B{bYSf@te(D8J(wM@*2NPjbW{ebY_4Ia zTCVPJmn;m%Y146fe28n0Dst9YuL0h$`p4MO>6+v>aB}f6-#p8&M-M24hb{tj!|EU2 zSmnpdf!4jOF`nsA+0PmvLGz_*X)P>Z1CFs??db*K;lfS^3KjvEt{W`^4t%Qt2Q@n7 zR~^Nn4~DfO8(L}gzi`Futk>|;UoH@{>A2dY5dcG@jKed)N|sy=_Q7G^diWDd4`Vg@plCgOA0Zk|=pLT9a_(!w z;5sYYO%{T2z&iU$8^i!~@!uHxY+U_cJDXoS8lV0@jgc78{Ti^Ts&CX}#z=48Gw`dFa^iYmX>jE(4gFn?dytuf~q!@&8lQ&VUZmGQm-(Zc_#ozCx*N zBi1#BDgEDmH!@BJ)5hdp%NHul)mA>X0ujkHgZ$L>8Kvs`-)$91n=vkN&epmQN6&^)|yJ|Ow<9uRW z{nYc0mtT-0-V|@2mP`+v1(|;S=@M~x3Yx4(w1OsKwy2d1W)m$WX50S!5@7@V_A&(u z`UMKK08uw#3RVmhWx$&28Rr-C7_lvv?R^i;|lH1qUydRjDH?sJbA~W zxUt~go1j(#OXKP%yk)GUt>08B(HVP~{Lo^n)}6ryIcr7{S2;=+6G-`iTviR`cE`QH zOkUlxLrhKDU;D$jDhflWEF~|ADk?+KJ3vNmuha2WSqJ;C0`X`M#;T`eQ>ZXhjcp|@(_^=hUdEhHQIuagLM90Nno2{AQzHu z<_BbPE_3c}r4S&}go?6mv-B9k<5$Aln0v;XtYtLW90P2sHb(+1E4qkdLYZA{HdeAa zX9sOU3molk$}6{bHKX%J<}D=j?5C}Vfku#M!U9#2HRAvQ>r)DlmC%L&utavO_GOVB z=!GCy{Tt?G8OB0Q_f&$K4sAM*JyMVlTWPjJ9iPj7NJ1MSqZalSsSd5r^sOMFrhT9g z8PKe$E}HtBql*}qI-GI}4`chLWbLdSnKJoc34mn6fSb@P{E*WEa9ZHDMdCzu%%HlM zgh_4382!pOgdJ0DU5#0UbHoeX8%4Nn+|5lH;%e1|$)K}kEyAMtFIk!vQEKuBmhKP} z^NfWnoymc#}qmLUjF%xMqbvQGY%ec}Xz)l`riI*&~Uz=$Q#>Or<}u{l38H9hr)>XBv#@PGyVi8BQ`AHZ8p`qA~jP&8+x-U!3+fq_*MADND`G6 zaV^I`iCSO$KSg4D)nhQ^tKZ|QE1IDwnfVI;J7VD5j6mbxcLUJrUg7%cLbvLn*jcF_ z7u^iKvsYG?tJ}}5CWfd9pymgjUXKu~K6asUL_}}|%!w$~$RK5&6CrZwhP-`#nZYv< zQa5+PC?Lts3>LNjj z?6Z9O!XU8MUf$uqfAh<~T#3R;F%u?=Cx*^;K% z_3BB-9q>~idCX0m&5>uo#yHjuN#WU+4U4eUQL(#hLxj<{BQ-JRy%rCT=^8?DPn$9L zPUrs0LUr@4wu;ES?Csr@Ociq0y?5OS=*10>EAIdJiM`Hja@XuY8vO&+HWqniWe@W<|KUb+8LBYFZ*^2&bf--wXHbkQr~(z?a_FZ~9lWccKAsGZ1V zIYf&fX^R`ZfGvT-!|WkVX$hK0`@T1 zW$<-qPD2GPy=wSuc+g|p(ZlZMARiuB4>F{OgY+;&hmNW^X3QjECziG~A|3#dzT7uK zdW|BB(C4HNz1P;b)-0lTOZ~-~UM~+yY(^UZc2IcXVq;AMx!wra-ASum3HuAp06Zdz zuM3ZxU$Ei5&To=Qw1cgok&l|OawWIyl=;awc$r9tq&UnX`JUDgqJ+Xk9a$@wQ^h#( zD@pqW5ouH|XvHw9Xc`4k8&&OBK#8>7!_$h=iJGUDi3&!bRarTsU>9wgErU7fg?8WW zSH*#0FU6~2q+)4G7}ZAV!ds)pNL_-P_N)r#Myd^FCN{Sg5Qfou>&Zmb1V0=bW64Bq z!}eKWQ#RM!#t_-eJ=*or``6bpS*y3mV$E*ux3Z>#qm9MNkdXfWH|X!awzf%Son)hO zS$RYdQ_?8t`*lltW48XD+0dD74BNFA9aA6t_m`J8Y3{##?;V%_>#zEzZ@BxVzkcam zJ@^HSdY@^`-P8Q*rn%YOdSJMNf`mSpBBR}zu-Tbn4BqU;M3Q`d~JR_aOoWc zq8?n^IeF{}FL^I-TTRvPAHD32h_SJ_nQ=+`ZlQ`X5^>tA-)C{nFA%@bMr?w!_S)Ak zzwoQh|D)e~;(3>Tqirg8yLaMClLjouS0NH<^ZMx)Qkp&Hk>eG*hL$~C)_!wci%n>k zUC#I+oX|ZQAs`@#g8r?E0WqG2_Q0iVMEJ(p=yl+rCF_nbL=k+yB)gCBF8fqc_R0wd z(VA^%2Pg$2Hd#IRO~+GQoVx;kO}UGYb*@Lgu*_v{$Br2QdA7Cdq8$lj zp1QDl9H6jOy#zBG0kWf?-DLDJ*gKf)T9kw2*TE#e4z-8Ule{@3jDmKw2Z%i=9g9JG zusoza*Z9p@3K~TiuM3#iOxG5rFOx;9Jv8^4$7OTsfG~PUa%`uzJsw(LQz1A9Yy0+hWK+&|HV+MMXW^Cdvk7-ew z^+s>k3Ltb}9b9E}iq)LbM;hw3DOvWCf2A?52zDSx8KXTGwb=MbSe~_y)PBoD`YfX~ za~}x=o_?w>tr27YOIjslvdS21uxdm<;3e@fWoyk z7s2l$9v&p;NWc|?s2XtW%SE(6Pp&QB$fL!nKPh8E}u-|Ipio6x0#nrmGf6HBHes z>!t1y0tgLPS5|Mim32sO(Qh_iL!P0J0hl*ty{F+TAT*!KFm9EZ&2U);iH@x zY!A17gOzd}#>b_6s;n8D*=q?jh|wT4;D7sN%{Ezxh7#kH zYG!6-z&sNn7zSnx)RED}!oOFjWP*2^6@swCgm|ApEjuppo_D6mED4%i;th(tGSUP* zCI3VXG%s?HTgLmbsSlL0s~v>YM7FY}AIoCDg;*v7V}dD2O<+}C8IUW@+talI$S5&q zk1^_ESd&jLQfI;kzK#z^kKagaW#O9pNNeJd^>39$^17=1!KPkKgG%2O7H*G0rx{62!V*&9s2gImsT(J$B@vt7 zj6ree<~L&?j9>p|;hQy%$3Q<_(Ue+*Rn)#a!fI(=XB47IJ~Vtr(e>zaA$%3E1Q*=~ z2A6;}%nuh>;F<#)pc+P1ZJs@-MH~`Rc1CZ{VxtkGTOuYF?Bqmld_pu>zll+m+%mdZ z;V-sj>~yQK_tMx0@N@eKrHJF%e)A2bp02SAS^CCYq;Jep`UVZAZ?3B7a!gIaacA^D z0WbK>=!;nrJ&7S^1uP*t0~%A-=tEk_nZ&8~t^vI24X->=3;vQh)3oqctB>@x%*b0~ zk|A@+;~;iax{b>Z)T+K$0C4YBO6&WRz+?~>nM{v}fgmWZjCw%4zyi1g?t$`+pgX`W z*RG_g1-dh3IY`O$(GJ~ch!su^uZxo8AO$6{rK~13!kkDieiQlV#w#b9 zH6M7DjXv0AJ1h|ulwaJxf!W7}b%rMF6`S^B*S0FeA+4xWbj}JMpQszGV z)Nx-WhD%8<$d6bfAw7{C%u5}*zNrV|UYLUtM1W8dZAw5XQW80wBxB|>ZJjn1*k+78 zuND=P3z7*NN~n@J8!vh&a`u=PTe6F;_Vp+-Uu@X%#L+6*rKA(G zD~uv`W@4i<+Mj7w>Sk(`$8Thp@Y>u*c2U7m|6Y_`UCA!f+vtZLLCNJa$*?D=PDlbD z7@(-$l}Q^}Vyy@EvC0$0obHW>C#jN{l5K-u$SRqj6m}pe2MLx_NT?9P3M?ebSQJi> zCerE7tg8j6#4kgeV>K@@D~5?geR0xazkth>L+wL(iehA61{rj3nXTOobjNVT_9n*= ze~=ilr<;A@+833BlP8WD?7h2Z$OH*n?UIX@7G;LXC67R=Jd|IOwBFun^tV~D-T-(y znW4lD;skF@@=cOj_J?3qNf9hajTl?1h?rr7!g9iyuFyLi0;l1u^Up1_O-4kLhICB1 z$!aFTJ{wn_ycwCX#fO#`ja!4Fd}}_aQ*Z_N!e;Zyy-8%s4`5|~8255s0_X~i0&HD= z^kyc=qxWXa`=&=83sN_~!E9iiF3*6rPR>1%W@fZfQeB>m`G|?bP?=BH1FDzkyog6m zls)FlCnzB(%07Ol)`C(Aks|ES2K<9Q>5 zh7E<>=}}oQs$Mp?7n9HJr?@R^@>ZgErW!i8DU6TdZ3;jejxsa)N*Ps>W7T~fx67TX zJNaFCw<>VKx$Vo&p>bM>XXIBj&mgVT4c_yJ1*XyYKKgfE*!oE)$WaZ1bi30nXo|%H zWiEo*xMenhTGkpYQ@;p8Cqe%;Lf1}3FUj;H22He(fF~#=Q7Nq)xp;ZOT4pZN2`rug z#1RU(XApBhELtFLHXs80eh@cL1!5uCuv7Grw?UNTfv*n8KnVDdgtyAi7M@9ElA2_5 zmLtSnV;JRuT|%?Edz>_Bf#)$j2ZE=->G^4-c=_xp(X5XX~Q2F>IWDX_CbM$MB^oRf> zbX!~o=@3M>AHt}L@*JUa?c_|8-Uu~QHTa2hcS07FbXsl#&sR$zITpM|V@!yVC3;Qv z5#=$=HVb$*WVnopP#2}<@Sx|=OS=hCG{YQYx`^(A>J}yvS-WZu%S~e4yR-9a-N7kq zAY-;^Ta^Zsxje-bmz@&yqrOdiabl{!$)KA+L)K9bse~HFV zHDS~1y?0++FzZnB9 zWTF);3Yy37E2eTc)~IgFQajHQcm^=32ZVn>=N^*oQal|}xsr!#*xZ96_hGQqyqUotI3Xp3q6(GZ zRmPCz1^MD#bV_$86jr%5`rt4;VTIC4@p$!)?*L~J+D_`CT7M1^*XLg2D4%%RMu-y) zg+c*d2`giSeNHsDJM0x9pgjZ<60jl)i3_rhah#wUjZ{`G>Xv|_9&Lgg%)KIwT9B~V z;uRoc>YYiLzCLw^o4q2>Iz2Y% zA%gxKsQ<=maMl5e<$7X(LG{s&E`QIht?PdJ6RZFnSFgFLbsgzDs9yOie&K<4wBN`) zht{dGJvY1VRgd5C^1j-+|4*LRYRDbjcj7B`=f1x=*1F>^_D=p@SGV?l<9naqy2mI9 zF#Pm?{GWaAC+~Us{2q5acxTWMMJ5mLY^;?-WC;u!sUYGmWtUQW0SwMkY##w4x6H3e zd-1vm0L|rCqNZWgMtD6TG2qQ!J={?YcM!wH@ymtdQRShsxKTs(RSR~b@57XBf zKG!`&u6u~S?g_S%ehwwxNiFJ*k`8PfRSv@qNPya(+kX@@QK0Nb~ zmxtNOa*kj%h_V-!K#8HQ0RFUt-G>?v)l}0$<3k!I^T+7q1+1D-*y39lJ2Jw;Ux7A) zH1(fyeY2}kBHt3J_p6%JaeZK#hB^8*bHsI1u9&G`GgDkQ)xvuH#CpDZ#__a50J7;M z?TB<*;yg0MF**aHvO$F70^kfa>NcZlu1o`^=dfjq#8+%W zWWE9+4wx0r!O@qv9oZl!H~i-+BM}!=B7qG{VNJ@{$3$TLCo1MCIvtWoHNH3*6_L^y zNRap`-ke~lRg`?_LJ!de`b{n?%8N3vq^cAjhyNQ*;^V>;4{PqDiAHlxqz#gQNd+@9 z<(w9Zg)4FvP-~9i-ms&GFjolOYiu5o6jd@*Q&hgzj3B5T)bj~;gz1TI_+tUyxA+bd( zUuw%!%TOhTWmOoNsEU3k-L4T!!1<`_@Nm zXT?tRbTNBd$JhIGem&oR^1HK(IfY~S<@^5fH-7Pjr+oST#g{);bjj1)(o+x%(OljB z{TQ~e1?WSPp^vEA!N)j88dIig_ay#o2K7-XEJqEpP*ej_$LsME>n9OqgOj(EYq#B< zs)w$|Q054dxTK{S*^LA~wTz4hDG-5e#EbHDit1bCvQ#hAznI*6<<+zGBh|OX&u39> z&5Gk$?*PB|v`D#AW_pVS<;|7q+jXycx&AGA_Z9xa0ujl3$L>rc9tkoB=2P-cb3=H6 z#n7Z|747IL*uWtEslH2Qv+YPZT)2pF&ckxBm2Sut;#a~Nm%&3R$1R}9&(Qe`XhuM% z&eburTy|Q`cF-9#MAgZ{MbdctF9Oy6i|CmIF*puzMx&ovIn8-J&2n*+PGFjy^Fw<+Ucv=sh8i!7DU~xw?J+1+P(vVT9$i*K z_F~^JMC&LX|jOH&x#cnVcNsK|QVxAvZ<_2r99 z;r@%B!l;3%R|pPAzo8w?$WiNbLRmu<_#dle3^ z-r?L=cIbMLo(r(a&+=GWj!_!ei%A+Gc=;Ig$-?f zCUP8J>wU;ZL@weotIa zjfB-=rJqvIiMeTcptGj6Jp0DA4Zvd?~yo$9W;Pc+e#(GL#TK};Lf zMLKHil)31i2Veh|`GcO*tSS^Yc>VV`*XfT44RV=sl^d~aDVgXd5$@!8lHxts8JG|( zYvzs|LP31vHighrW)?z}ZA)B9REr#R%yAQ1;Let)hvUxmL0I(w60xJ*kwiWR_}b$~ zj~~K>!tV}p61+a>2M=oAcV`FvAeYjETg(_|s_*(4AFjS*c_L0_erXcAiH=M+H7BK_ z2gP=#+S$y4*JUa~KinIWafrHsZ3Zc<%+BEh44td*2X>~9;%9LK@`|fBOb?g%m7qjX z+Pr*J3>QS?i0sjZ@6myXM|*r+O5p@yNEDwMomXrA)2b$>RXNbFNJ6264 zUnEt^VLh2_##IjD9$yyksaybXS>~m1=ay$?K5}_V=ZMeFA*@Wu`EYrzyejr9VoJBm zb4lS!6@woAB^$%q0a}B?`9$*~M#&hdw9}&p(m z{pbp6c9(8$+%5C#HF%9sYb$)L)9A%nrj&JQ9l1hA=vJkDqC-kV8au0AxVLwaDCM1R zM061PXBR|QDkuF(jpV;$LmqMqyy%KjhMZ4+Q-KRmvJ^0;l;v{*hPr%cVEkx*J)~zC zu#quN8#uB;q&RA$5R4Shsbj7DxJc;vmP0Q{ zBn5k0CBSKL1(yT^H{lVk@SK0QZVILdh}4!d)*Q(FaUnPP(nkdh3J(7LLQToaKrp;O06i(D3PMB z3l&zBpqhF;J^lIXUNI~!sPPeQ9S9g=1Qf*qn<0Ihk9O+Bf%q(d@xh|6qGQ5f$IT7K#PkLmMJ<(puVR8 z)j?WSCQ?rr>noOdF8oc2@tQ7(6RBM>o!L=$%#L&R6N0Eux^~u26GrlSc-wRYsYv{1 zCKXa>CWhhM$=fJ5xK#4i<}GD?-9$1Lwb2BhrSIL1O$8J>zE?&^n@aG3?R5+jmGIhT z>RDkuf`I#brZ{X)r1C)b_3vJ-3jz{F+vuKhR4WhH(=prHLB(4Nxjq6Z z1T477YQkQE=8L7+b``o{K#tQ;GKGDnDqu%aXo_%iGGX;mqsHYgp^ z1FfgU?Zm%i1e`^~n1AW1(>5sonIt26+1Q?dwstO>vN#Ph*RAaMrQ!vSPhf^l| zIAt*UwJny|%=x1mECdCCtEw~kj5Q?RbTWoN2r`6HxUd6SV$BUT15n50&-5_Ugnr4i zZbsxo$T33+FaKd?wVdT$gzg{L?eSkn|77}S-+%4<*5|aX#X2A9D1k;#xqkhze<2Pi z1NyIqnmdKo=jphuf)X*9YOT_OpF)lShR#4k;1a%wmXw%jcv!3@z-Rp_lc{HV^+vu9 zH_!#JZ%`hrwjDhsy#{8rvDV96M|~6vA)tEG$j8j z=JzzD!F))Z0sQ0*VHUILkscD{q_$TK336jd#H!b-KLAGMEO`vHYZi57X-M;V`MBl< z)O$8mLC^O-uPBkn9v-;PX?)_E$YoJ5ng`+%hMCe?w z@>kO^kouVap+_08z3OK>EL!3vHgRT-IlGCm0a&m4IRn;XDXjrpW11sO1WjnlyvD4X z0W;F~x(!Dpc!Kx?wib|>;(0deHVDLZ)w|aNYGp4+N@~+}&?x#sY_YT0aPl6}tBRpz ziTZ(ACiFmi+W8BgY|K}_0kRlzZJLE9@UN_zUBRqz5%^JHdpGUi4s1Yw6q$O_uXi8f z7x`BL@>!(-s-A$OWkB8}>DWcSWcVqJz}K#ODup>JM^%di@XRg&G zOFi@9*;!yzVMEmUrHz!B?+Y$!6_>{c@5Qz;aWZ!G+5M;?Pf135g6g@hPgskX(=7&Igy&26^p z@#xfDMK#DSpjS_i$sSdIr=0<)EkS&V|A$l@@Mrxg%qR=-5NF^~4H2EzO^dKhv(6I( zJyAXIe4f=KvGSGCX(_iA^id>6}JFYs*WZ43sial00dXOdFn^*{ol^~$nV~K=4XEVU!D1x_x!sTocW=T zz3&^({Lr8O#~0pm`lqUUzw=YC47i0F(Yf)N2t=9*&H5)+XtFtq$(~G0vwx+ZCOFxn zwfPGjwa*C1L!+09jzN@6(iK(C9y*iXR-5NZXN{pqopY6*%tkLe2ctlg2$g4(PwGK& z;mC_3v&kQ%3yQf?!Uj?6+dzjj!_I`oQ>`v?HE&w^!@89V?X%NzUd0C6RT*BqR`ur9 z9+97{dYpWC*h_2S%$3g2&bv;a{ZS1`QT@tK|9Jky4*SK~Nv+78hq2hXu=7K2JhA6> zng#N;^Mm@ulf(#|hHgf|T9?UxR)fHyby*xRsb5$@zY|x54MvRBlanhg)e}%kTOxnB zc!SdRXdFGI-GAgzD*`72G)^nmSwoQ&9CBDpy(kb78|hmj$=r8-m;2s|h~VB%+~>0I zI|Vs^y@=af^^L_P5dZN9$lxKO2B z>%0ky-}S&pQwzvjva|EsRHy0od~2nFs2i&Z9IM<^(g!5ktebp7XqN0Llu7_)7k0jV zZ$5R8?VQlBi#w99t=dIXq4at6GjHa8ifm{Sx~Nz_aBt|q6vu|X_O**xXEah_QZ*s( z!(BR*QJ`z1>RYbDr12{pyrQ|5q7ea1{!0y|I$(CH;+tj#2Bs)R`&*bC#GhWg!%Z#x zg`jBusa~9oDiu&Fzop zn7KqO!B>9Qesk(~pnfv}$Pm?Us#s!sL#sAz0&AW-n_kV7?M|(-&Zf^)mRE!8N=b^z zheD7hsh*b!fUOvN6{c_YD&AXNwJgc6A_S)k`MV@iAG9t8fYpq|k4-OVa z%sdHY^eN#eguA5$yRDaytBd0MIJWq-=mw5yl-<#+o~7zu1r)HqQKZIik%ZP zmQU~q%4xqDBrQTarPogPt1pSxFkO9vZ6V0CUWqdP6;R=jEkJHx9Opex=Kf1U2d;4RV0e2Pc^ zORf|Gf*wDn%ube_lef$Y)o?&%#oc%l`G>s><;4U$Iy^;Ad2|u(*TNg=c{R-oAN30g z)6_3qnnh_E>t^WV7S@DC2ZVFo#r>@}hzp^{Q8ePZ4g)F)f7C!Kc(T++_im#1_}E&< z56t;KkTL0s(Vbar4+7I&kbg+sDYjD`UVeI}{Z49Y)L|l5BhV@bx60(zwj#JeOL&>- zcG(f+2B5t-$Jtn3(|NCMXI$nwp;;?cVs9O@uy$O@*$PV06b+L>aWC93L~Qy$N`rFlduMm7!a9CVw8E8W6FY1c-yXgm%^Av9Ree`W1!C(2O9d4iz~8Hm@G2`0Is!vD z46A@^+I0-lC2{#tF+>nDzdoj4$`JKNe=ZKui>IG&Gd0gF%sCQ%k&JT{>@AQn}(5?vF3Pm6|3d1s(@qJ^C+(ynO-Eci^|MoQ^+r>dt;i~De|1YVX-7Mg z6}!rL@Kf~_9sJ2otaMgeY3m2X2>z&}&a@ztA)>GDU8_c(nch66HPV0j-XHqquW$u_ zb$Dm_%4FY_!M-ccpf!fUMrH1E^c+hxhR?XA0~#Dmp`dAAXacweSwxs{cNxjdzKa~oKC$?o(372?b zz{Jbw>tuomaOq@2>^eiM_b$L0;zAHZMuJ!uYlxe=S|FC4z--hIW2rH8h>!BD>p8x1 z^aC~YXOf?$qW{Lgept$CE}V)*;l%oumBL9p2a@(PatNn%dxdaf*GmyrTR2e|z?GJ8 zqUmC_c!Hai$zTRsW*~HgpamKvZi+kBDU;F1|2r6+2NwE>k-DBClf?_tnsNSew`NdC zlNk#XS%Pju$H*~NrX**ZJ!X(Sgb`95dFJYA%(Tj>S#Q3b1G zg&&D|5l)R2-bx5@wqE-M)CjQ@4}{>goC+c6|IZ#9$^2QuMzS!M&7UP~q^Hh+4IEKf z^o!UK6Hy-O73?;DnCK>nIr>1aCMQikqI=UprGkSbwBJIgMPXg?b@D8j8RHlWF!*MzK9UvBF>R6`ilH*8sP;r5CZCo zunzRCX>WM#tovjHP46k!(Q>jC1h1daKjX6_rjAZ8^$ZHs-m>OP=1)y%wY{h0?-Vk^IEJof)4!0~Rc3-=& zvmw@OsD0VxhKb=rTS9@n7~SE`^-hM)>vXb?4QuPtPswlf@PJW@FO4x| zMr%ZKLw%DGiSngO7}VxGiom-JYIDV)P^*pzE*a{w`edb3=!Ef9-FFcJ8|^KO<&(dX z=G1pNQ~c9OGZ~NdJfWekW4h1g9bvmJK#g`93^IBFeimKZ<3LnK9cwgGEET$#1hJ_b zz)j&aFF(JJN@0L6lf6SptFCH{UF>eIM->IXIB3w&pw|CtAnn9q9gVUwT*#6#vVMYU=U ztG&jC!%iR6N*snDOIQbUHX-a2P-$C7h&YZotka$*P}5}mh7HP8ew&TlWQb@Lxm!_P zoitK)-yI9nNw+KUErBY=vKmgo3JjLXUy2i?R&Bu8<1?5Ga`#WVS+6B`^SR?LCb7X@ z-GBFqI_fO;NfY(z)>XwqxN`=o)4R14#DZ8Lzm9II0jzu(58UQ=@s!E`7v{ZMm-Y1d zqmsO~se(%KCbke%!oK=vOC=af8qMTaH6gX9f|0nZme|RMZ0l>=bI4GDG_9F_~+h-HuIdaJ&FU8Nm@3maYNgLaD)ztB#H^8Zdd3-UH5t^nfy9ZrBQ@h<^zA0cJfK1D1M-n zHE&>(S_iXq1V(Uw1W-l9-mAsVX*T&=0jw~Jd=UOebyL*&di^0^i{%$gWnCSD3J zFq!wynROH;(pGO@774qOT~=m;yR|@){hqUOp!x?KO-4&6cz)$tgiE9k7T!sP5`(+L z2d$NDE-lFp(2BlPvmeItZ2lF(HpAlmwwV+mhAsr&RGogRDpyNHgmIqg-g)PB&X61&__UDun>6A^-~qec5sVx*9v_81yE+ zh-if#)C$r$Pnz*OaQNAU6=)z=V8L_SGTog3EEdG(DFO?;+31gDZ$twRhs4MLaI9EI zW)em)gRB=7wILiqaS!0IK89brR}k1jd7J`2?5)XUq32{-lYlKXF$zII7EMU|d8iuN zVj3ndu_b}vQjJ>FTkrzWvc24+7wYk4;5Z;q%8mE(U8F;YItea{Rb%lKIvfqae>b`M zP6onWP-SWx5Jqp#Q}!v2u(Bwykt8MekTW6x(BK$K0Ig@G%R@Ipqzj47Q0ZLkC%@Dh zoq>w5PMy`{1=!UkG9p-2g@V_Hh&4jZ?U<|VWGPiQr{>@Qu?)y+>T0k~M?ax#Fik%s zF4gm=aB+<26zVijHl4O35G8_h@~xr>YG4=z3h2Z?N%ok$p@affvGbkpD z7&{+JG4@P{r~Ohj!Z%{KMC{ z!I6*G=wY1{U!P|F5!HV%cRkq;*~gH5i^H3>XW26UENX5dVyIY=F;jqC;<;798LT_M0ZFI31+IaOw1 zb;j$S_127u;iv9+|Lbt}*sFcS4;Y8F4%jd1AK&cUCOE-5<`e%JymE!B#i_v(84@yKd?jl9i99L{wJ@{jOcAa!tX$}z3va2_SrHDcI%l?`lp_XU911yR7i8l*)v$YM(F0 z0)s1RO?4fb1!X<*;xeF)nwxYK08jGnHmU}X-`(+zvw8fub1Djq*en4`mY6XrE?jwJ z^wzG+={(mtSnx`8@JU}(0H^MfJF?Ivt1pSc(LQMQURd4Fx?vBNY4*AmF}}RF0qxLe(e01A^>|iVS&^s#k(0~HJ4TF9d9<1g%?n7DS z=1T(5izEvlvQca7K4crz6X|>-LgiHMJgmG_&)Zvyg7!`w1(gH8l!~YnSxkZlNkPkdu8P8@rwK|^d*1XGb z*@x~~U1#+#Am#+b)v&J#t0((9?S-ccQPqpWVSgLO#VEF?=y3+fn5Z}3_9etBVoII0b<313TyS@2H! z)r0H|IN6E+q;?=wHjaq~Rnpj)UQV}LPZ24`tdjGX_NwQYEbk~N0>5;WEqjh0;7r-YECXN%#^5XXGxp$tH>aS0H(*kx39iJx=V%gixc1ZGkVOA9 zHUVlrJw9GEriSPwxe_1yT589%E#|u37{gA(HdYXOHL?I;2(CfZMm5l~p$+P5WD+t9 zmJ%Fla?Nf`YhPe#GH?UD5?C`MsWhI+Y^1qcS>n3`&8uPdl2(jVP05hNwq!W8W{JVv zHqAD)gB0O*^p#08w0?=9TR+8>t)F78)=#rX`?qMm`3(gC7W9t6J2W*RmNbq^%9^c#ipjr~FA7Vbg!k}- z>z#pmjTJKb_1;1PB(H4{Y0B}eXxAs25+bEU)>$jVsQ`qe6i&lS$u9#{8OY|%?S%@! z+Q&bA1t3q^!x8La61K)~U^?Q1%T#}bd)$b?;$;)i+|I)7*i6vX}`ai}Kc2rtjdFp}h8?>k^-epk$ z-!OnDCmdRXnQO~>^tLbB)nBx$A+pbjU5)?sh}hN9&S!5|M>~&%T^%hw5_Wa8^hns% z(b6MhS4Ufqj9ndVJ%V<1H2C+xt{$(ntH)<$S2s^B>xl)sT2Jk_t99Dde!F`7@a*dO z^qj3-EpZW6v{GREtZ0Q-@vhS1ghjW+7y{ql`-|ap}~z=+;ZoV`Hj`DLuXhSUxd7%Ex2}mq>5vgSb)kN_K`B$NlqZD+Q&=Ct*EJQ|Xt2 zUH$4Pt(CGi;Rfc{0cbzwYH`&#$++Xg87a0U)6Ma0{M6|>IuS>nUHHSZhHBY|_Nv)N z1n_3uUp8(?P!=&B3HFj;D}`bvcf$e&T?1*ZFfr0S3qyl zmcDJ9!&8L_PEg<<^bld`R~P8SLw$e=&b-*=fDM=7Z*=I!?h4zvshfROs{>icY?QXo zue9mQrfj}_s(7t8&iYln&aj^0S)lmUd|~ZG|H96%Dj^8lrkH2cV1W(o`DSh&*PGpd zVO4RYkZoGU6q=&c!IL4rW3}w zwE_*kT~3Zn+3~I!zzlfhzHO}x?rt^Dajc@y$CRiKVzK4cir_9+Be=`9%qh4lwFOdj z!M>Z$XJq40F8VkypS5Ss$wX5G>|^6Gk%^8lTIZN}2!`+R4db+f)F1k1XY#Xh2Z{TL zFr4J^fopnbngQ=a-l_w>LP{@Uh~+fG(0C<=;G{bI4Sg%WRtI$2!9&cvm$-Mq8;Xbi;ss+6K!@$VJQj7{rmeMu4`g&SPH2N z6-2F((7-@MA&Cq?cZ8)_^c!T5!czKeDsY@b)69!tj<9E4)G;NKjvP{BN|qH`oh3Bw zm%u>8q3|_|TPh+;4KSUtHI7ul;R^OcwXk)7#Brz0X*dMuKG~jHHuJ^=&ReI^#V||0 zFU*33orhUeU{)#2f(&e54Yk+=^cAAn7nYVg!PxVvTne)+n9p`P_JyTA#=*j_LNtY( zzktm{y{Qz|^(C6tg-xaOK&?=;=dl&m@>9}gAINvYE*bq?v9PC5uZ|efog^P|VwX*; zcQ4vkfv9vdKL^LF-VI)_#QYPEbS8k-bY-}nZRYyUgaOEX8xAg&L;;)79n@_ zEo9nfmu4DZ>gm7=R~^)$P)Q?9hQ799^-~E5RWUVBl^q_TPA67|$E29!9&&s!q+T67 z6MmV1R=so~^JFo>D%$xRC0JSFGY^ve_2i4mXOZO>lg|$|`K%Ke>TEMyhA84EQeFNr zIk6n0t(>4FBt{}Xrqk$CqM4`Xd1v$|KW7IbZxqQSnpmTW1Dpmy6SZVzbqP%<$9y2t zpGsZpCub?DiRd0!5)@lBF^MM7xB&%bWggk)L*;=Q3}>4Uln2$w!6i6ke;$lRYFQpB zbSIV%mWS#_{5u+*aTKG{uni;!Xatu=gQ*oshFf4uDK(jSJhH5=k@XEH!h8X3mLbn#{5@=aEQ0r#x59C~*)MqTLkx0C@?y zs>VRRYDo3&jdJAF!h-rMiR@Q$7jFRTF?4g))U*PgUHWQ zKKZ~(#jW@(5S}5Q+&oneLF#?qc7Trc?o-^fy-evO0+}T7UF4G! zBJpUMlyZnz6B38Ymg<~ab`H~NL`$h;#&sYHMP1<@I4Na^RKsxgen|qp!PUC8iZOE3HHV4;X4~E72A3Dwe0>wG1n;n7l?ALe*Q_ zDl8_ib;j`1pHm?0OsJCI&XNiSDd^qjdFZ;zZ~2ThPAQlK@Qe)~M(ixG6BfMP5Jq-& z%ki`&Nc&zEc64@qBwe*3v^^i*_6b3wKl~gInaO*^2h5;iSjo2oGxd1Vl-3loIM)A^ z#aavdB-GE^HMx$8vf4XY3%c_}v3r#%X1N|QmlLz%R+$I-iPm^bUcW%7zDSkBXvlHS`o`zlm)X$h zMbY8Q)O*pyl6i_nSM5c^^q6YTx>(e4iO=;&h?uoUvbX^aBY6lL&`4nu;Yb_MB(p-S z>>{vx7AxyNH{^B*md-I)EO6M(Nd~!PX=u=6ZA0zuBM&S4-edt^mZm6R2-BEzK zRt~Nsmr{$c^_B?3+_VU*h_}$t zt<4t|mLX1g+$Gv;jqFkMI=`&U6y{$Mr<72}Lgk5kMSS1=*0-JJKtC==O`PT^f!MlJ zlBUnX{$`tLnkKeYBKjy#Yv(vp*Sf5*bEa*@v~fwkBF7OpjP*C4)kp}Qu3hgHemOBy z0ktroHHCo|2gH;-5(6rWs_@PL%o8XfhFrBoO-*J&beet$O#4(4|6;1yDUtc6f^;W} ztja`N-eCyRqVa7z7|Qd=DP&0`2V(M$@^Z@=G+kHDL=pO_PFH4OuHx)`5Zu;H`Tg=; z$i(OM@+HX~rjIgg83g zY_6P7=u+cQP?k)i+=T9KP^5^kU){q-`q0!?`U){}=66aJ4|Yw&J5VGE8_w4zK}^s{ z4NVWZsVN)z=nuEFfdgOMA`<-o>5H^;n%I+W5Sfya)gD^2IoH*%-inpgyjl=8+V^Tf z9(q_#rXvBp)C38nt#aMLB|HLwHJ+{OAaP(8SC9?+XC*S~3jQgca-`DQFfN-oIKg*0 zi)K!eZ8`_DQ zepwHrEWr*+VsXltP4=Rn7gy-OEboI8dibyvZ;9SLpW)yEC=gXJsy9f#8(NRstHMz; z19Y5pvL}|HWo*|6jOkvTe>q?HT+Y7?6sjT1bR68NtL0}x4iF<^6RG`jP(2W_)9op# zG(OK~bwDu}he_+`*kgkM_pOEja1Dh>eLoZu_MIyfMv}|w<+{jRWlzpwnIu_`a z>j0UMqy3mW;udf26rxBCPyX6uTp*12`&15BsW&V}&brbB$SA&$CWIF5$sxs>+}@q9 zn(KS=hTxJ^TYgH0fCOClnvlbN-JLLDM=v|VF~I*Qik0r*ptxQ#ZE<2%g~O(0k4NJ3 zef-4yB7J*o&Y2!h;G1O{%OnrS_sNWO@cHq5dq%jhjPHZlP&=0LrUc7BJdSw?K97c_ zBJTpL?E-F(ntK#v=pw=T_*0@L2N!}HzKjv5uRyf@^Yuqh9m$;*AZcl9jqw+<;V%ZL&ee-}W0xQ+=rQ9gcW4 z>)R_n9}vfU_$6=qkz1aa-_Q4BZ~Cj>_{E?4qx(MijTh&8JQKfP{oc1e`r`aUQ7XYM z?3aLfgSb{<(03+ZtIQ5Badj$7O1T0wSP{h3- zc3Ww=pw93{49UOsN=gyB?LUCu08c@T8oivAp>|pp{}C;`hwWrSIWvrRlMYxi#QaDd z_i!~%i29}HQfs$2s-D)qhk4rH+U-*H^quUw&;m=(!S;;#5}_>6BYk>wXMWX)geS-R zNe|u~xIOkW=(S(_jA@MlVki_y`%C=lcxV22Ooh5>w`-&V+*pFbmb67iEDDn}uI~NM z5NEj&n85g~rX>sc~taeU0mZR^xV<7aFgN-{8@wRX@3Q+>sqJe)Vs?95yMnkN-b=?*c8^ zS>1QmIj8DY-Kx6xR<|0|QVVmc+_P3IBQ0xY6r@F1ai0zdgs`p2B3aA~UeTIpW$;yp zrEc0CW(K-1&^-KB0#5u&jE%8)wUHl*u|sUx2AlCPv6l_D(l+C4qr}=v(LUo$s+<-`@M%-~ND}dP7t({q%UBlmV6-sxZc=!P4g%Vvlc7 zRN@s1j+fr8a43?GKv*@iLg9#bxb%8O0DBt`_pno(N+*a#BsAHsZx>PdaGyc-gc0i{ zZ2-H%aA_SmoI=(qBxRvzS1&!wq%5uIdv@i%cVDk?;x6?)3m3)}`)>Mg`<|-DRpH(? zj_J-wG}pa(xP9FA*W@Rz$u??R?4hNNBUkoqScAe6mm)P{>ju+9>&AsofP$^B#q?zu zLzHI6L+cO$ek-;|*ttvGI4Xc*uZiEbUsqx?{tL&hvlfH~5ArhG*bV|!{H3jFp`GC0 zs+Symo;&G7+V`=^Jf(i4!t-ofE;5mz+E8Tn3sS9I-iN?xjy;H~!x@FJj};CU(?u!E ztzo<=b__Xyrb5k-AHcz%C?EGm-HwEXy(GLl6j!`fl$Kh?5cDa9$#JAu_|y~m^a(Gw z7twDEbehejuDSrGK5ZUWMERHoCeD;1r{GxQmTyq1S7eU?Sy)vxJSLW1jAx@)57z4k zqWArh7>xDFcm=QWykRcdh0kW*PP1ej=&3gU)P5Es{pg7PVGFRg0YDjM4)6_YfTyu( zU<_P-TVj4bw&R2E>P-VwGd6mVPYx-hk5G%oIfh`zv?4y-3cV z6Q++{s`Q@@gd22Hk5A0oxIwAs0B%r@&#=kGt`9)X0F&<%SRS!ar7?GVKo&7B{V z@^9*j%wZdvZ|Nmc5|Uj%t>g6Mp^oA_Ol6bjlwPSUhP_-(ZZdqRYBS!c8GmIEm&uJ< zUBn?G56G5k56*4;p%&_)0T+`h4a`V%WS`b7(9(0lo-U|3r>Pt!S)QRB?h!cmMNufDx8vkrU@ye zJnHa19=Ce||S32x5#C^>^>tH~8)x9&Ults$CWg-BolG#&nn3b=4R%bZRqph1w`#S%g zrTq$*rlq}>x)f4@M+?<#uV*tTUZupiQECO@XCfZ_O3&=gYPu(B4o_ygt&Q0}R8JF2 zbWSxo3=|ToBfac>^gI>BR6k@f^ z)H?fF74lT8mwFl{M)*PhZ@&}HQs+l(5|b^6w46Pnmy3x+mIsZ1c`n*621}}gz5ct} zNd4Y%l0Fx6dj+<#*2HGtEstcxh3ExRP=)3i0f$8G?A)U9%C>I4cuV`m((~_$=OkIQ zZHvZ^Kg%g6bc{m4JvvJEQjiowRYQ`hwNpq)lJ?V`w;F*;PqaV_RE1;~d?k8tk)2tL zSc7RG+75Fxd#ifoM)p~yN+O+Ztdiw?+2~@Vougwc9`DxK^mL9XU+Zj^x=3P4W0W-@ zSDR{9O2ZW8X_%sl-3(Jy+08W|9{i3lZReg%HL6ZHLHv>bF(5LML4@P`Or6h_8xo-x zcbZ&2**i1eqA-@le6t|NrGzVlb!d*OA(SW!Zp7jWvuA(1xhLX;xx2l`oz5~#eXp}> zFp-pDhE&5P7h-A<~NsX<@iG1*?QsI5FTnsc?KS|rO_1%F_+aKW$} zUsJLtWQ7D0I`1_$#%Il4tfoAXY?D`exFve9MB#gtdHx(xAhn&q=TO@<*OWTeEEi{9 zNs&$AC3~m!RivOfe93AzDYRN17^K0L6zEUr?HxkkS?pk6s|g|5a=JGiTyOd{$?w%` zXi;;T`CMe8@z(*2Fx9dpSL|7X1Tmc@UI?nvERM&i`_%S&v1bE$v0_cRsLIZovi6~| zlX|(XYQ4ghv}Fi&M`4^mX-=i3NC6WNed0Vk3N1s-+*}sZH@QjEL-1*ZV*3+`@Qtg)ml@gGt*d@pHyt9YzS)LvF(Kl1l4uoaO{s3K=L_RvdIjvtvC{%4`q9!UdM;v18DA|UJd|cx>4qn@TJT?tZpiB zU{}GB5x8Qp%Q(d}+g08xZ(W8eK@2mYUAx`q5(;&q&2j|O4XW?@=1cX$-!D235S7Qm z--nkM{$A~czZ(y-@HhKm4XihLBzsb&4zeSpPNJdQypY6?MN%ZLZw_z1R$NwU)dKx=1DibC1*ljg#-o^Xx)?kvel}#2J4sEc%T)jNp_= z9MtM?-|8?<6|DiyPPJe4`RYL(roma~<=UOc%Kj@{=Kj}|gUgqi`jBbz%r%FPsyI#L zQoXze+SZI9{p&QIdi3BnO4MMf-l9`4l}x++R3v~pURR;+aW-Q~ABk2a>0O!MdHEH< zkq%%raDA$LU72@kRPtpiKswc@rqK1t4hI)_ zSn|?HEQtHC6^I7fIk(0x#SG)Z?;TU|fJ=dQ>@W!Sb zL-+H5N>C#Zg4-sLkfm?dR0i51#Ky~wjlb8+tpYgYA;A=*wraOPS0>X^>+=3S3Ft4@ z?1c{9l`dGqnwb!Ah<5d@Bi(�iTFn-Um*77s1ozqWTFN^jtoM$IylM+Lg6;!>?C3 zTJy$Fyj8+fGV!i*=j;kUvAx8r4#Mf`Sd+uZbJ{dmW)8g5qu^|5Ak8CbkKI!LBy0DG z-@*a9NsN3ae$xQ@Zp#{5^Je{C8?-#068rEW9Et-un8%1#vnjyYL z3o#r#!k_zjIql=INq%4k2zHyihYYBQa3R*`c z19GpzcR^oY^{z`Da2Pv<6wd?_M;onJ$Kg440ZWwq(k-2lPkz;eLSQ5^;d zi^T$njQEyD+>xsHH!`S&{z-udFf$O`DB5!xQ!(4X(A69%hOeTYprJ=a!6#imye^VG zS|-r+oal)2j^#X{Ur_$HbqJ9DH^~0=4o5)9xNgNb>+st;ek8CVK(Pic=e1WwYn>Gb zF>a8wIRHvClz7z9&1iH$6s?U?)2e96 zVwW8>>8w`ce-%MArwIn5nc2|R5@&0fgdy7O#g|PE0$4yHpA-?@v&-YmoMaPh$lhRc zOE#ZEsB9DXlFUmroPx*B2DZsG_h>o{qrKP8hia|8XQ_tIp`-bfl!RLpU2MgQ&8Bda z@dz%V%m!ED3^h0{CrtzFa!~~c#VP(qq#;>n4CTj%CJ-!&c0}XCliC4NLIZhax#k~> zY4qj}T&Hj23o7% z^8=DnY!mh5prQSaCI@}1^-9_j* zMS%3Ca$}nfjF2eL3R)9DHREd23KDMi^Pa36o1z(kW$3xnlzV5E*4aQALbt{PVRS25 z`d~4@7#Lc5JwV3jQh}dS3_1QeQf^fou*#5{Fcx6(2`0KxdgBr{Y@m&-chTo;goei! zKpfsMN;#9945~Wvmb?!H3C`P`FvkYHUYK|*JD0tr0`x=Yo~8b}}&1P7TrI#rSRd=nWWveHb60M`-?Rj^_| z4aq5&togPHHv}{&0lU>1#s>fcxT3g#cxdsY(pX;6M7VX<8&RSpt=jd*=0urkcSSRu z6*JwlqB-rZXf~zFP77MmoOV|2jJ@G#RVq5lKK+@_~;An68-G z07NS@0j03NxpLi^*`y9ZaBUDTxYt1gVCaru#SZ36YRQD58tZA@xe+$boQ^}k-i*Iu zhY_mEcCxQOHO|&9n!I%6w>&AD()v(A^|7~J+O6ngMajD26hG#qH44hMfxGO9W~oaA zrFCc&LsUBssL=yiXK@;(*3VFParar%nm4Qe{rE5|wWB`Hqv!h0WBAQwC?rgx+sDdV zvZm_gO?98muxt0pD3FQrTi?_4TpYoIs_ZxnK9$se)cSvJZw`t)>kYibhe0u}IO2V% z{*PJoL~;x(9jZR|q3X?ZmnMxc&%fp6o$|<>VH`L=jsxeLFKY85+T9%V)hdS2FwTe{ z#~JZWaW|~YzO>+T=$kig+Sz(}6(`6y@7#Qx4bMBD4d1*20<12_iSoRRwUPR*oN|hk z1|~zpNb9|jm%^-NfU_lI2u`YtRmNOCG)mYB%ooAx1=qZ;-^-j=tE1Z9Jbmc0eaFGB>jC|EFiSZBd% z#k^-tL((EZ+t2!i|<@eswypzt-uN;d7u+XD$Ye(Fzea8d)b(P2cJh4?$5Y-5U$gVUJ z=!;5nt2nnHDCKMf84*^dNNYRNh>gPIjZ=bjK@s()2K5m>^m814gY5nkF6ZJ*8>F;Q z!przt-5F84s~|Eps*uhQ?;UcyJ$t`K=~|`1(UHX{b;Y?eRfz)C(&v0l{|W7g@4$YM z8sxKwZK6>xjx`JyzZ&6ziXpGpP0>EQ9EKioT1{|P{h7>5J|nAyAhnr3!{?;{+XAhd zSTKzX)tY5*3)^L&TsX5BEP-T)6oSmKsY~b&aH@hqR51D0wKlO94#c(K9FfAl^u|k88$k1`9LCqH-uOM2YJv=+ z!|iv}Bt$ zqca;dpx~(*P$GbJvdyCg2`3a%z*gP3cs3ER8xj}Ppd0K0-9Vrlb?X)P1S6=P#7Tmo z%*5KHEr*joN4LC~U#Q;j*4OVqzjS0#*qG@{TOX7?Qhrr$@=HIjS43>cU%JHiglfS8ZEdVHR*UuiDGi#x(=4h+27C^fZposj{3z$vy zK(Lc<%dbfPRdHbYpL<#Haj<}TyPE~HV4qk3_2erIw9NC)Rk9dJT3Nr5)YAdPcrBG? z5bVI5a422rUw-x9BpmeT2K{w!pf#Ih`>+XU%&M8X_`qA?5xX{c!n`?C`1`+=vL+`1 z@x0;*N$@`3(5M6b6(CP=esK(8!-C-1GE;TuS&N5{6zD%VM&K=*!_Fdu3kCY~fNdYm z3j+Q1fpZ%Cr7(fmsU1Xj0M|Ha( zNrr(UdYMZYc^Oy}8_o=d1W8JP8_#~pS_&bvisGF>;SyaMA}Y-&>IOOt8w7Tw!8Xyz zb0Xy3pt z@cJ%Rd}rk>9+3=E0)S0hbdbJrTGTlYjgIS67n>hCm3e_~;L(wqMIqBvDK-PhxH8K% zg;Mj$;r2P~M>>r1ke-lRg(JVBO=d8$AxSp{72M}c!fdQES29$bIsAo%lE#Tk(wo4s7sLYex|N15MNVWuQO!N8d$0fVub48TNd(meGRs-B; z=O_e&n-0;M*r^kucEFcee909~!gGWMFgdrC;cREoj(UJt)I-74Sof3mksFK#Hqe&m zYtum!l#TFew$c>#_Uv(y6i>uO(p0Z(R`S5?aaU66kznOZn50_^&D3YBsVnEr5^cuMJ4JF>qUr4)+dw`^Lc#8c+iQTcTXa^h|; zj_J=0`tvmY%&%jXzrLI%OY@6Z?<|kix4OBZd-Jr;4O8N^sH}@?HT8WF`*LXLdM%qd z%-s%vk=wnN7E=9GNGbLaYeKzFCfIeB#=ZH|J+P;v`Y?w+z3iiKQLcZweERnM`jQ9Q zTROkitj*+qUnmDppI;}fF+fpXZ!laZPy4a*y87w0e##zJdyH-`tNDYh`OjbVYTr=itfcOPZ?<+bu)%)h9v(9O{Q8Ua!z)aLbc_}cPmKsdE^th_$j zLa(?!dUZV|z6kIy_vTs*W5GpwzTk2p!A0Xnz=-Z<}ZdBj2jkYlUWr zMEZW2Gt9%~Qxj<~N|UTcvY2Zu6c@c7`Fy9>J)~-y+d~(Rk4mF*Knbuv{j}K-AS7uV zhFIz%^3zU_WlfLtAqgIhoSCE%JW!->yA)}96f1RUFJ$GXy~|51gpzY07NWysdfR3; zc#1fhGk0Z_!A;iZINewoC9#sWv3L9|MB`{XrnSkhtqtT2Q`)E|`{x5Yc)0qt`os){ zTG9h1q25T<`_PiY#4BboEvPm7TnE;Hl>I_4V{Vcs1k!-@@qVxzfSOYryUhHUOQE-g zLkJg&50hg)C(>%ZQD|!0AR7qMc7a5F=&i5cL3fgG@qG13mLak;#4jQ_F$=4B$&^5- z<>hj8VKIA2urjI#*j?&B+Dt(mFN2;BY`C&Z29$GsJHYCA!b~1?ZIxND)v15y) zF|8ufbR04jL6JviKOuwqdc3Y0j1QmDUL@rHPN_-hdMZI_U^-E4B3QMgj$VsYeF~Kw z3u#KTN0|NPUR^wO+fAqA3G`_x;ZHZ7c5HE7?SAi-Pu*s2JWS<>sF%8^2Tsm6saXx+x>5SXLXW&Q5r%6*#M3iD#N$Ril8?7($5Et5>RRty|p{&k9;dR7*t0ebVE6M#QpkUt1 z-q&0@TQ%q!0PkDo_?SL&X{R^OX~=9VAFVV(bU`Bg+)mlMbJ0IGHy^UtDnjfZTVz6m z!dtupU7fF9ABZW8^IoN99{#;2(oB5Lve{S94HCL|B6pQiCi3c6 zF3hw1WE#VJ)s)@I#YxTVQn!ztR?1-G^Uo3#@M9-?s%;*YMkIfz^HZ{l_Zshi!q?efU^gV09CJ))rXZ z$xpNeRyXmfw!rEpUd`l}C~9>RZ)gjwuHk{U0Mxt!-EV3OtZw3;wgpy?;@jE+t84g< zw!rEd-rN>gUBlbj0;@;yeQkl&aQK0?!0IO6(-v4=!;iKFR*&Mx+5)Sacy@!#)h*o7 zykFhG3!3+gW!&tiQ7SSfx9zzm6GHjq6V|MU_ralIcP#Q_ zb7`N{=2;MTo67wD4)fCNr{v|+)H(TYmv*FQp+}clDQ83Va}QnGK^>U=Y!v@s78(Mx zZuS0u{joQ^oHgZ4UL)Lq`AIJ2^@ul2wn-f&(Dcl_-+jttM*Uh2U`JZ-Wz`#Bvoks^ zALI7YZN7Y4bZ!|mGpY3okDSK6r%9F02d=+UHKiTps9oQ139I3#T-js{xMq*Y>l&B8 z)n4v-y4kCC{t2*1Yh?a3mdjq95kyA&fJv^Gj3F#?oI7?5^tj_XEmW~b78qryF2@uw zj6Q&h&&}+E0`HcL4@a(3i;GfZ_W#5yRj*CvD6Bmc1FEU>UdYvf7ios#~XnDELfx z78!sMAseV_Ewzu{uNnY0ZTy^&e7y)DZ%M#F5ZHTR)X7=j49dm_*b+DVao9fXXX}9&9w57#;Wfne5BnaVa_i_B%07}4w6cou$p}wp` zD_W@ivT28b<^(T^Q_4Fpt$vrF;sHpSFsmlmXgi8W)M0gnWmpu%-zD7zt==T|vNm2?A)kED!`m0#ac?fIq`SBlxj_l{#S}lHnYY z6~tXZ$Jq9*6iD-d#4V7Zj{!1L!|$CBHiMeTBWOCB`gZn9zS%`-QR2T5If*8MoFv)l z)uBsL2KczTDD*7Tql(|oDEYZ{(bv9y&wzvKm)`N=cOXQeF`gVfJ$t{JRk2%amQPOy z8F4x+Z8~)`)^)|>pRNnut~CiW&aEQ5FK6=_Me%8}WzQ86q!NI&N1qN^e6N7?#>UdUr7Bf((-lvPgoD}4U$gh};l#e$|is)ibGZ)%3G z41R?zi?tPzG0Ub=O<5P>gmW+sjm3{^Q8Bj)3)%_^nnL`!jJK+ zR$qGN^6QXdJSpkN!@Yl~FwDi;jYV%Udd;cc-)7V#MJPv?l&Hs;nf*!wE4E1Zs39Ct z-ke4B7<;hj3JM@)#o=`GcGZ3hw*rt9D{+BxYQ_qDtE%lo1LNZcEK1?n+`a1f1n@Q4meT%_5@1Ns}tqYy|s|}>PY)v-H8Rd z4yZIF)X9vlY>vh~LP3Z9N$ggmN$KAG^9+H4DmGP*FR&LdOGhi?Y}73O31R=5lSK7I#kYOcAg14aEg2ZQpjqr zzoOwPdWup!tcygBgO!L;*MSrnMRMA3E`$~3^rEle)=a>@>B|pR7|KQ$O1Kx|fo1gh z3Qbwuop6Ju75GO>h|vQAsuB4yZ7@7~VE#X$LGXE5+Qc!`nj;3a|mg1#@ks4ED9 z0DlaP=nDEqq@F;{po6ltUlGh1wAH_PcZ2!##RS$3_Vi#0zaxQ(xL5Mc+zSUS@ho1< z#Isq;vobGgp3NbpTnqvsJ+(g0X}wOhK?)a05H>!8C27`@tPhUR{ZVt`!fK%?G1AS; zjS7eMx4#VeMJW=Qkom>4zi!|`$Dua2?&YV28M-73H=l zC-><_=9Lfv03c#Dfs|L!mBd`?hfF56_M^h8QpOA$Y zIJI8&?XG7`M>P+>Vp|6c(uN?X*hnD9Y@Hyd3v%rMAJE1trCdLLy86;>kq=>>Otw4u zS8QZ_$6g;I>?Px=hR;fGyM-)DnvWbxdVHLM?X+2jQpJ(L$LRqBNd^G@b=AzH>N@+o zD>+A$mq8m=7Hqg_TRI^lN7LL1T8dRzwR95&+m=o=>f23jX+5=6?X0D`JnCE2c^zS{ zncf)f};!T74Y$WN@zs2j60CeTh`NNKc+d}%O#j)knrq**EYbWHn&jXI|_nrdcxOD zH;8(t0ogXW@J#Emj73W1T7~%291Hl11^ga|n>R^;pg_pvd01aZ6Ty&=2rW)0!VjyP znrF8*SI=s$-pBS-AVxve!AhxoR3xm{9kEt(7=k(!LC&z?t~=o0{EkJB35v)&UVWfP zT)9aY-IS}|G2Q6j^SR=7_NxLiIG`rJ1#GI;t6j^V3j;=6AVXXP{{uTReLag=bEu9! zOTZUGxrd}WsOR?9y40;prEYJC&2`;Wm%3@G)GZD1Yise(_qLB4(B)|I+Pqm^ySaCQ zwxJ%*yAEN;^-1FYM0%JLC#s9J$3dlTU-Xrth~$On-u9slv@cI#Vyx;(WC18MllYXh z@aGb}4@_m$aO+}71&Br7fhmTa-=|`6BuQ!Ua;qjWISN#W&1ANMZ4~eK0cEA(v>X`2 z2|XFPb-Uyjeu8I}c}soo$e=!k`G!BZ81lovMdkWQCv-v_5x??~CvY(rEZ zeR_=*f6E-;R4X(l>=b~2Eqcs!oJh7k`=okuec78_Lrb$?*Io4qa@H6IU)xXSexxe! zd>?DfCRcGdGo!g*G%FO{ib>U|zMR^ArdpGj8&+GgdXeE$L>=BrdEEx`IZA;zUa+jN zNA|hBoRAFI(b7W|hXKshXqA3UNwh~ul9|%(8D;9*I#W7R5|{n+)-7YyWzqH67LCU? zMsRcQ77J#YVx*~e4#<@G^2BPSW!Y2TN~z(f!vP(d2AKM_*{O%KAGG(;r`Py>}+SpZd8 z4vcbUGxQNlE_5{+LVUTt3gwjSBLk6&38+fi2+D`{O_Pu1yqw$vp`0p`|K3AV^U*(3 zHJ>v|(d6Y*_^x+m7Yuei3jpX9zKzb?&JbZt5+3%{G6RI8&e;j_8)(apB+oCUYn<=~-)3j!!4)|s1ASOSVzGWo9*nKmEXK(tknX*+ zj>^|UOP1MM4=&oycuCFnX(7WC1~YXpKv~0)p(cJn?WH4OjwMgJwJybs(uX`!l1f zOGl7rKlzQKcBLhrd{Z8abv04!<{ytHPb{_%Hd{1Gp^|5>Kl@X+pWdMTNfls83;fYJ zzocd}q4i!B#1vwK7<2&>Vina)$SW)(j~(huOjxX9LP}z8l;|A|3X7HrG2L+SLOV(B z)|l)dK&U*I7V1vqFAjcZ!aLBUuX*Q142tqEj9G-MyP;*xAW6N^EJv{y^9x55FNxcG zC%=X-hHb16P;g|gk!mEC4J2#dxaz*uT2-yS_GTY9prwP=i~wV;nJ)#?AV^P@Scyt6 z9Fi^r$jt9GkZqj;WS(3R#FXpLVR*~AvKI%&D3}O|6YFpS^8z<1rYxdPHQrdfWHG9F zb3J%-So3CNqs3fxb}d#9HB2l+nV^XD5XAN&L(Aq+M~|7E*f5wlRCH5&$cHs+uHz1T z2|{=B!+BA3H`0(iAU`ht1sSdyu=bPt0PO~}(*t`d2IT985=p2qn!Qi6%p6(4nEGEO ztye09zBlIu9zvoB3a#iSNGRnMx0*xa;rv5OwYpEDe zGmSiV!%Pk=*){t=>R4=5Y&esLY42zBkq_~m=?6!1$#`uo)X)7&9VwQV128<6VHj(;x}qYoYYAkkMVUZdxkbs?o3?P8P0y5* zG+H{z`A4{j!S%YfH(AnAQ?5XVyT=BNa$SO!dnJK_8Ss9qA<(Yo z@Z4%>{E+*`!HIxNz{>s_=kO8K38|qi|6n?!-cAb+DF4AXm78A)o8nl7r}+O8SSC;zi_3kl$Y`ndb5<8zu-(Y`^{;}8s0w5|s zBGcuze5d0;19==w;Ng9#4_v1gLKr`?=hT^4PIB*m&_mh38#=QdsHenfXdIA05CGk% zO7M7b`+vLdobbu=m`}K`djZ%D_;O6AqjfsQ7P(hR+-XcfT?WizqH#b)K~cPfknHJ5 z6c%Jjt?NRAyKl76A71WKZ1w(hYdN&=uu z$JqJMquOz8Nv(A`flRu1Av*YBA>jmrgFk{P#MR2MIhCE=I ziE+p^CSwl?#ezVQ^|Ici`r2>3RIg4*a;RivD_Apcx(1c6l#j3^J%J-shv*D?x={dA zWE7e)wXab-1U~x%(TfC{v^gneKIjquluZ`fkZOlnUER^NFq9Hdca1MKT4lseHEG=m zy%4~mm?LOQPy_woB@~oU+d}N!>TaM?P*b8tiX_FLze+LitYxhaE&n{y(6T`C58$Wf z(LQW}YpU!t%1N%)7kLCyy3;cLV}r7tk3(lI*UW$Iy9I%^{r|2vK+f^gpHCT?W1?-3 z3gFe(w8?MdyRpEe`Gb_e71zAnMpzx^A<}q^DR}^?Q1-(DQeTW zt-oOg^x99rhjpF#=I+l^YlpV|wyx$-r<#?(ae+ChUP%W{PFFTeq<*vbz_fgZ-G%-0 zp%?*b3)Ufd9pF!qz89zx!6cF<`%uytgT&>|cV={1aII>=$E7_|>$?yd=)`G2E+4O` z5Zn^1v;-}+Uao6&L4|UeT^X}T5P1Y@j53;BV|W&VQ;_*j?`pU@#^&yA%h*EAs7vDY zlEC!+t;RT>q38q~p?_b*BIQdIUc*RxYh2#;Ppw_S3e2BHBa+yo{DC}Us|}S)rVw8v z245I-`vE{oMqk9#=QCme{DqCy6<>%^C42ftGQoPIXg#=(^{4KCG!#+|1`NlpX#eA*b*f=><8z+au zIGNSP$zk|`=4KCSiv?SWBHgsO_>k?B+D1*u@xwl0EJ>JbdDD|kqgIHNv)Vp6Ec-<3 zIVU!lZbhy4x*Z_;jQrl|$V{u1C1U_~1TO2rukjw{$Q}b4s+5oH^)KJ{q!8m2?ta>iBMGB4g%;G|X!X*wjnCJE25$l=FXnh6>bfl%xj{0}0xIkOhs zJBp9`IKL-26y3Mg!}7axd0f5!VtQ}lM34~Y!H}I5MO${1TO9wn299zMTzp8B0v3X= zG?B6pTPieSG1AOhMD`@&DahZVF=g*!w!Z|O)B{R=h*=Xe zzhW@ERYk5B29awlS}xP#LT%fNHEY~JSb`hac5-#cXm`;htjEL89%MJ?fgG4%at7(1SNHetn zf${eMt0KcM!ui#1bBr{gb3oh*jzB)Y@ zKVl7EO{z6~QH~OPNsX4^%V{=^)3B)FI}m&s1z`i|Db9Pkf-CTAfN9K z`MtF0i8^mA$Y!FRBxtsDyce#IWYr&E5w6206|PwauJBueE6jTkxQhO2xVBfrn7q;y zwhGQeaGl$p6%slWF8_gTnA+GC=IS-U$Aq&yv@-tgJqVmXnpGdaBAh>4=yxTITWBX$ zVHxH@*}||LR}>bXb|R)9Nf>lUAfbDa*BYXb<+z`A0#VQK`t22CENEOcd(}Ds z{-=h$=ucvwPR~-0YPR3gD@|)xFI*2!ev`?ykZE^qy38E(wD0q$A^Np@??f17tVwxy zkn8A8{<2RR(k6BP!vwS?BObjDRwC3Z{*$P@30-q1u^n`?+hrR)D_M35skpg!yYs$= z65S3j-#fg}Ka@OlAN>G1FGmT|x?Q~nC>)rmI5RX{mbdAF43z?nzK|wQa5KYS)h@8` zDOw;Rg*dRD1=JhRF6kzaF|JbL2Lq2KEzMZ(+J=-_HITlhb%Gv&Jm?L{!Yx%Gz0t>o z%;Nryc^QWZ-=q~>RfB@|h;2!k9XEA8Lbtgwze%3F!h-@dSYLy%zJh9Tp!chX>%hLr zk3Ds$<+}P`6IuCi)hjg}h?LLW+#0UCX6h#pXPMxrsD9HqWh$n1f z(LO5CJ=8uV-Jg0vz_dxR?!Ya)F(D!YnmXVl_pa>p?iLM@yn{iC>+z=jr0pOTx#@-& z4{PPvJzjv9=ySKnqX!j1MOS^a=>Ry?9bXfa`tvdU*ZD$pU7k%bQCebKOu*j=_vIiR zT3bT><)pr8`5OO6h|4ilpooINI?I(f1aazy;$hk!*Ji~u%!)cN!G{9U&(VWoF&0Hz zk{sKtAQg{;!se2w8_8^=?xg;*UdaGtwkdWnVi88QQ8B%wb>sv&Ax-{vfo-ga6Mk{{ z^`8r+&H)H~#tLKp4zm+wot8=CY^tf*#f2EPmv@^%e23VJ?Fh`7VwoJG zV~VKy@<9GTckdQ7NKd|eL(6pg6zO&&RyUUiL_c?kgY7V+=LX^ss*eWbL@eqWk)gL0 zJBk$r&UCT3zk?R)h%XMnmaNgRZA@@NA}F$f-Xr=_3=>x`a`c3FEiqaXxQsXyV=3;U zj$xww3te7yD26GA`kNRg7bAuVOJB|i-`>m5Z};GBa~WT;Ap+3qC0^p6>LJqY&o^_n z&;FzEEV{5rs>Meutfu3un>X;_>^F(8oIchCCsQ66QG~+MtVQn?0}T$w7R8YJr@y}Qnqn`C26NUuR(;W-&Hq+^YiK( zB1ZhG8-kp!z9DVoxLVK^SH+T?6MbN$th9yldU9`PWE%@rc@;Iwf~R`<_3UYoZ2T1^ zNJ+a(-k@t_{zvtWHa=P0R(vGW_LBFD@@N0~5`T#?LD_!>x}XX_v-c3c3blNfc2K4O zGd$aBUdB#+bxU(~qPZ%Yt9lQWtd07MQMxKly@9^WZYv&6ExlJBgz=a-^6h3lt}E(syCKNaMd^@=L) z|IGKK4j`j8v%G#kkZEgHtxpxz4!aO*2^C1fR%F#HJNFFK`FOG;8~YO|Fdz@vL_Pk# zH~Vwdk2?7_bxbDaL-J4*)kk7d_eV|b_3+Yg*tqa=W=q43e!VYSc+1!aycfLCJoAu4 zROkxZ)B&YmKb$ReIyE7398pnkxWen;yxiqTt0DkbBn!;7?6_;g6)nA&ot>J27$gF| zf^y1HcgK|>|6aGvR%@4H$78+aiu5=8eL-Oz9F=zWq%pboq7ta~VUr8G?faON!8J== z8jJf>_a>QSBpp&@Nmwy8Ku?5@WR|*zx2098qWU$1yMC*dUQB}pt1`D0xA-c5@oh!P z7hs#eLZa$+6-k$>aTSFcq{Y`ekoe2h&v#{uz6jD<4LA|HG3J$Q0|Bl4AyDv%{N)RX z6hTVVTC7s@N+l?`TwLWMleGr!QPm4oc+0Q8iK-HPZZU8J-?9 zM8&hEkz#6+Ji|4M!Inv0U+?4#k1!|2e+*OLcXmz=g)_W*v%fGi87=Cm_hKno>jO=$ zQMLos8#zBxWW!o?t_rVa!->=&;)Cu|08jm&Ttq#cOuX3}=zp324ss^VzLEvN@qIjA za;az|moybL+&h$ceD=L!LP91%i!3YYYTF?Z)RI3}zI7)D&`4RUTN0R2vf&q6Vy8_sK|R)(i0jppeD?j(FS?NA1Zscq zNQyq5#&h3!Xwj^yQ=^?06``{6tt zE%o=q`B0zk#XL)th$OjA9zW)>;}K_{5#-0j_E(Smk8ZYX{s`{vL6C(ws@_yyz}&hU zk+8+C4j4n5QHhxxdoyYs*H2W-WN9<1Pih*c&8ShI{9FE>jUlea*#Q)mO`H#8OR4E% z%n>=A&8Xb68I{w~yPHwRK|t+h)Xd-VRGU%Nn8J<;-c-!7jAsU+oG zHXxF|;J$E1APm{*3-{z7Vtx%@Ce!RQ@|)N~L%+-Tp(!^e!6o29_g07959)JwIM$rE z?f`-)HVn%;s5cBdJYJFE@t^VtuuCP*LU;A0?f_^B@bJgVI<%U7gQ{l*@d!heA?W_| zM6(%c$&EQa5P@255KPuXOguY19WYL5l;FRYJ{NX4q%(9afnZk^CwZ%x)zzpR_rJm4 zwL`CSg*1DEo>8VjBSXg;z56zhL6qp<+sG&s(9=)fbTHIRw6>Y&Jtl7fzhsOzAwC>Y z&S{_=l0Eww&6bO?qkoHvAXp^#OBd@37u9j{glM6$5~ayWS3V?sEi`eRCCae0y+d1N zM0tqY5B0PgX3X?6Uk6o9*$?%$>(9tiP+!)$57tm|>SxEBt6KY4OZD4Fh!v;NF!oU_ zO)lEopxpqa$LpwR%HUUp2EiFo6YOr#7Fa->K0y>(PIdx=_7P|L|SH-=+H4$R??>&AChU+ zOOj((w;+6z9l(ugUzA6m6y3*F2tmo19`tquw(LqF9h4Z|z~t8$jv_I{yGvp*Nu%{; zX|xXhLX>vJb1W8$lE%oCW8Ikz!|>yrYt_MxX?30y2&0izVa% zlNy#RjfkapVdU-u3gn}L9H2e8SZ1G6NJWs_k&2+QK!QjzK&AB(Oe2$tE)fz#s)z&k z%4G0nG(wxwCkA-*wqs^hGQW`$r{(;Uo`*nf_Pg7fVi0NnxV)-a$NOnB5#^87uB8tj zYZosYd|XRJYp8zldhE^C;>LW0Ow!@RX_vgB>1DC13|=@iF|O8E?ZcrYDt8zf-fKY6 ztCmbg?Jjx6!XpO-4w@DHV_Lx_Oy>?Y(u0n3PwOWiDDr;0>R#^I+?hfhO2p4`9k{^^ zFQaG5PfyO4XVHpU>vId1NU5j%9oC!4-%%Fcso+!rHkMx4+PtdKhN~*L{TcZS85&p5 zat!qBlb+~RW_C%MwL36-jV|+(zQMghCvw7#)i76(2Xs&Td?m4rw=ZyhwB+o0Gr|Wk2X{mTu=S+CS9nopxY?iT4-H1mU~(#;i^B4cIZk&&pedfTed| z1Mo@F>rHOZm-{H6@sjX>Rx_1}9D*=j?z5gU`B;&1sCcZAbH@H7vOoiBc$zq&_uKR@ z)G^T>{eU|2lQG#Fg4&s#96v7P$P9(BA_5q2EET~+yd*yVIn4hO>vK~F0ttXPQB7Za zKB$-n(s=l}Uu*L$!AaCAb5jp!LtgeDfFYT({RJeV;{4n{_hwK&1^9?M1%pzGhE;*L z2=W{cq1a2Z2T7`F)i$wt4nGdn2k3k$Z1*gOV`M>b8opV?P-%ys2)d6|UHd}ReKu{Q z0oG%P5qPhp^Xse?Ck=iYpm`E5NB5ENb@$bP^HBODx9Q$8kMNe3u#{by(u3 zCPmwme=%tN+K9*OIC{wr1FHKlqpKgb$_X5+cS?olq`shJ(YDZfsXRoBiT8*1mqJ)L>^Bb1p5bLV?IRXpvGlQM@nOcZC?x2LEoo*EL{I)M65*=;XB%bE8vsTG6ouOS%JIPc?<`l7x3H2`__5* zb2;=WVx&hcB~8^coLi=`0Fdd5BZZU}9TwBUxWrV^!!8!t{p>lC0;)mk##&0u>{^mY zXZEwWLa}fg&>Pr;R7G@LL3EMH%z{xS4_02#gj_}#53-FB=oozk9Z$`T-7!=s_;BpK zCA)=w-?eXc_S{_B<84&y(VjT!=v(mbL}esCbxG)c*y5 ze%b)~p*$ORC=`W5i0CaWM{@6*g#=8y7JAdgT1Y&xPa@7-A_l^MsaRX&ROddQSSG^8 zP|dJIq@xIQgZ+eJH~{J-s^>nPb^g`5T0xz)pPS$ zlIa(qD%O9pq55dyOo6gA!ImtFTf*7*Y^K&D+j`#P%B#n6yHy|TH4hsHQ%{R*by0k9 zLU?G;+Nh+jn2tY*xH`!TOCT8ll#zo-x-bS6o;a)`|2?U=Tqbcbz-3W8M1>_O1L0dP zzGA^rcu7sgsf&LHI>IP(gWf>Ks}p*qx;TKtwH?<%wU{)QPaV1NKQDby9Zw<}JD6_r zA(2?2IABAn`B4;*???MSv}ywd<2!AitND`W5ICMo4O)wm#Zs;QOy`6vpqaT=;5qY(cla^n`&PJU*Nm;>%}#4(qp^>OPbl-?Dw*)uo*ciuUSrwzt~b) zXU{K+qDLfD4iLOedPN5PoK5jQZiGNVENgZN_8TLy?jS85Zqls_hokb41gv^Cr*+a{ zX*}+lJNgQo6TJ*ei4p`)>!mdF4KjFF*~_%+R!(|V#MI02uj{~3SDctDy^~Ps`4I>} zZc0;+Tu5qn^EoB@{+FNiW`BXZ94J@=%|kmZuHgcFWb!do7iGpF#&93eRr+{D#ZV|h zRTqSzW5L&eKb+v(3+QIOUIsHbc#^2eJIW)tuo84AG1~JcJz@$(r5UXq-c_Ims%-D3kHfutp>F$LpiN{(2W@U zF{LPz3&GS!Sk7YWED&^mM1kbcb;br)B0zZvG8pEEfczn~P^k0JCcwldB@n@M!j4~z zerF$Zj2&cJGV@fM+q0jLwB!aRITQtg+GY7yG8}0qwZsQF4*fv@DHk3iEw1Vrb&i##dU)u1w1^f_u6#jVSMw1c5JUn1>7Y$6a0-o# ztJ%4;v#-4bbk(yaRs$e5K@ZvFp}en+U7gKc_)(H9L!Chn;&^3g3Mu!+lsjcZV1f>^ zV0B3mBT7f{b!scEh-m&uM}y#7Em<;`(`bRV#n6zj2t@QI(;lnmw{E{{F~RqWI(HF` z2~O6xrM%;lSMq>H$8ybtm5N}N*kmDAd@Qd@6;c(zHKE+R2-4>D!V&*08==DZxE=B^ z*A}UDKXo6pr43G`CHi^|#nCHsIJ)1M_GNGKkJpG4pm|1twIqCVC7$P*kgYTl)gS%i zOSL8fKLL#D7YQNnQ$#bU+}-NBz23O+_$na`2%g;GoZRi}uRZp?cwaCv*tWYEL)I={%(XO@25Cf~LbQD$7LR02! z+K85am7L`Gn`OZvT2j_Xwn^%NJevHh$Ql_slz|oi#A`)q$&eaU_&AXH+(Ct^h@bo2 z8)ewhyknr(pdBf3ESr%^-O9%}P*r5E# zs$~}+`f?Xb0AAE9u^P}3@!&LZ4j*nrJlvYNARV@M*rftKtZJ=8HoEXbkAFR(OePCFxGAf!3%#I$2@M_$xH=u&oL!I_fcIL*>27k4JU zgh;1NR@BGpg!_y5E7!g%W(Cxns)R5iD=M=-Sy8>4aXs}xDimGjA6<^t5rRZ>zVi|j zu~gdNFN0;V=24Uc9f_xS2`kAf33EwpNQ_mpC7FDKy%r)MJ2FH-))t|k4zk;#iLbEz;mhN!XNIOrbqBRNUB!G2x zQ}rL--c%PjescRG8lXQhQdNk`X9OSzH8KhW^gj4eG9{`P{DF?(zZyG2 z3+*gHL@=0quy04F)MR=o`hgT)M#2GtmZdphq4CjCnRx%Kg}Uf1@|Yz+YRh6$G;(R% zMEdnG|FxN_)N-Rj5X;i!c*8;^>=vZU`Rp4s(ChKg(AZwU`>CjSwir@ej;PjAK)Ms;W@;RnObNBUqw2; z(7TIv@;el$X|W(E5t2-?BhuZ7D(w;dK&(y)`%=A#$<^P0MHj@x({fxBlO#2*8e!PI zcO#T#Ur$}F29Nit-^+8kfG~>6>CHXqhwN1-M@p5=J!dIqI4HQ{EXAyZQmLeWo)znF z6v0xvYExZW{q3fx6$MNc?n>8~;g~DO1y9PcR*S|mpf}LWCX6Xis99tKhYhPz1cU}7 z>{qud);@U6GoR89i_CHTk>f<{t<>h_IzXcVd?@xcFnZ*iv`W14fC)rW14cBpf;&|h3ghdqUQ{@1g%R4xaSG1zV_~i142q18@6Tu|%aHc3VHjS}y zn_|?u)fCGZWWWr(BH5%&YM&Ki%rV_Mb`~X!#zx3tr7uKooaMWm^M)6A zuCi);wiI1L!U!i6Qv4q0=avK@!l@1kqP~NxySG(AHNKPU-{3;Wck03QcaoG|fn5@0 z_FPQEhX$HBr?M(5oFdf@uA_48Wd!D*MLn=B5#Vx+XA3T~nh+a@G9u}$3{w@o=J^p~ z%hqGO9xkF^r47%945BZhd^&~Dm0t#|TazdR+b!KvH;e%%<1PCpF~+XX&_(LcTwIg5 z$h0c%V5hOR|7~$pI>fvKNXMLq28eh1KIrt=>}ynE=oLMIBR^>%L!};tJ!k@aLs+75 z5oS`1u!ma*RDbd>;P;LjJwKl4TIk1+96k|PU)~yyNUpSyA#k*=Diu?# zW*S-vKx-G>B%R>=MnQe@L*wi)fr9XsmRH1LUZd%g_S)%)uoR30&4QdNnKN04APL>k z)?=$ME@o8xoF?gnjAT170B{?(k5(zV5`BuP8}u*Udji3URSkp5A)~{|CE8* z=IMNcD$z0ei$kjN8a(;SRUWo&T3*vuiM2%U(Ew+7wRS|1Qw#vw`Lx{h5J%)j zr3i$?)Wi=)Xc1b_`prFVttMrnjD|7;F;FFa!i6n}Kuc0J_=#ycs8TaGuX*lSW^IWk z*J_&7#wbncA}9mSE5vAT=AqPdQD;DM55e_1WX3ZQUXY?gFZlv;~?x|(->W@nG%9P z?c7BWlvY3TAP6Kuw>ED00`{=ye*umaV>1`Uz@SZ!x=6-_mxck2(m69}KjFEYBz)TH zZ1Kt2>89WYn;u>*W%k1*(j43#0G}3bk}+4pXF{k24JFy1WyMFX{Zb7{VdjwGBtpO0 zBJ^XsM5x#F0jISX`6n3ys}r^Sv~y4g;MF73bu&6>$?-+F$bwR(pdbhp5g_t|yrEUO z8A=B+L(pCMS*ymBS7B3@Bry~s2b0W0j0KTC8^G6U)zE~vyMD7ukoF&2kPf?olqju6kjj56S>hF40oQ_5{+0a% zDb5&yts_X|Ko?*|wYvo=yJ4U;*Mz& zmTr2~mAKL=Vi+NDV;!tU{@42yo+Px2w2mLgW_3V z#3Ga=@RjgvDW$5R3Po)t0ZbsJ1Tar^`eZGD|Ct5wkB_r!E*kkshjoxK1Hsu96rHHR zS|al_)2*{6;hKOdda~qXyw?b3w1k*KHhiXKmJ}e zY@o0-85)?rrAE=|wZ?(dt<-uIvxe%VKg9Wz{btHzLJ^-+4k!1D0VOQid~kK)SXM8G z42ai&%X8a415$iAeOMk)CT~2FO^f<8|5ws>EloN14v)aQtj>x(>dqWlp!LbZ60P&x zVziHpq!26Af@mJ_Ris9esD(U~`VZJ>e~c~}V)D4*SgJRKX2juoEV9M6p7Q&`Ttt%RPLc2|?L|k1k|nT)KRzi2%?;tzp45-oQm++%&St$enjW z`|4*u_G)IFSU8dctv`F!gjbW2kOHkYRgg$UuzPLr2Tp+tFRX-w-ET7ZpV#8hFY<*?QRU{2nT z!kxe{HCvdq?Z6#3bA1iLWz)bow33BN!18PTG*mE!15Bd)=xl76`0^KfYpuqM> zqP61 zmI8a0=>)btGG0z&Pgyu`gxikO*lSv2S(34)RY*X10*f`Rv^cPj(>N_{Er$kBmU-kr zPUG*i(Em^4Y-Xo%kLi^D6Yn77hO5s3*0Rt>R+8qUrQWpIA*EW1NVCl0v#_k1 z$c7Tp4y~Gai9kM>mS|(81FvRM1iC97t38;=`*4xkgURA`crd-vp`9wrstKhGmel|X5 zMiQ*UicRqhdq_AqvFpKvh7-lggGmKE*cPJmC7(nc>q=>?ShOrzv3O$W%`Ok7)?kx6 z+UpD|xbFgMZErEz?pk1tH@iGz=4Doy4&oULdUBYGJ_ADXj0w;in%^BlpqLUa`im(r zSZtQlU1-J;Er4vc%K#}}gjJ+F7kG|T?H2#MSYgU(eBb*lRuBuPDm#l6dsi0LMEMA~ zX%mK$noVdWc7*qwVqSa6m=3v#Hmz+7Hg5CFWgpbESPbCyU9+VR4TT5eVr3WNj8 zt#(LU$st`6Vb||Wg?ovQk$dgEaiL%iC*qR}#efMH%09%GTqw)LmnlzVJU&(h_{(h>FzXx(Nr)V^M0?QIu*@x_fBIcS4q~n1l zuyVUBOJIVZS4d#2tz`*3m}{voAZTIMe~d-vjx^pSitSEq!hAv$Gm=K|_3Ft+*q1o# z3QtIskjxOpb|^{|+uvnTEHS@Z6x*TLPoh``Ae7fGCh^7N6UE7)cu-NSW!;WLu@S}M ztCfUFi7Rn~Me#u#ipzwlnUsk^)X5y|#UxWb>p%3uYZXG5)V&^TfT*7;`U$pgEU1{C zMbOJ{RtaNPdLN%KmLQfqdJ=@O5Vc1bn|36OO*{JvW7AGY7z4a6{Q$D77sfIdH8o^J zry${)nQeKQLWB1*#leIzX_^{gY_>tyTD3Y@li|TuC-RVs{A|yWGf6Jy2x9{#w6jisR2|R`{HZt}X#%?r(@ji|2BaCATnmp7eLuez#LI)=Oq|AV%4-+o#Sl}tk zCG4}ZcZkjVV%$b55mHU!4XK~NSgQqE0Tk2Z1kK~x*-;=kG4PKW6~D;YWKi`hmJ*M# z+XT8mBT`^+zp1VFxNFPh6=GVZVr(K@*U%H*36GRqv1i5Hi=uj8oXIA9K3gL~`QQgF znjAk`WDtS!P67BPR_WK(s3M@AN{$KtooyrWA*1g{e7m-5^HoRQy8bMh$sSiT)TiF< zYNU4Jv7`)Av`G*lr^XiCfIiQ+>N}!e(oDVQC&_RIsA)rIkEm$41P@WwL^!7V2qFE%JX<+yZTP$yqW#7 z6CQC`(A=lQJfRxjyr)KJxm)AM57@_#*M0o1ZjFCPA1w(*0CQ1$lcl#e`R|P(l9uRY zyzeop*xk#pd3|GX&#C<4?Dd*No{_&~@+FV)2?jl%wB=9bI>c76Zkt*3ZtmrCh^pHb zw|oev=d@R5$LcChPmc9KR%9iby+QrZXOVMi@+X~LLbs4q?>J+p0OkSF6l@QHewdt5 zM4&K|=LMlzf)FbuX5J8m=y;Dy@W$c9J z%f;rgIqT|52ju-r!{;C!3QxDSm@Bme`xejfY!kDNCg`g2%^C(okv+3VqJ7Mw8=&YD zK1zc0)ObB%1%5WPYnxKiHC*@x9Y!E0>j*cLd0f;1l`ZBt?cUb$v`PBx2 zv%0wEtoFDS=T785{|A?LdWT*mEK`>%xS1sZ%lM`V#SPdCMCIk2(@xjGtG4bbzzT;J zVa9$EbYKdRkSaG`#!!xJ^9A*8Y2p_haa5#;Hz>)paST*rrwulkZK;mgAezPlGJCU} zA!&1t#5o20eTX$M6MZ#R%Db>B!Huq8a=gXnee9-}Ny~ja-oyyt85NUu=D2CQT%T<9 zsy}}nV3@s13zWUdqnRg5grT#-b#^doAeop?iGj_zN(hGmwRiV2JwRAX=d>i~^JPR` zm)VzrKOSlmx%Eu)wwB~l)r8}K1Hj{y+1)+NZd6KzD@}E(J4EKtljy{2ckRe#Q9T-W zCsKRfC{9y)qxK_%lzYS+0KxUXkP17<9h(3ciE?5ie!6j7xwhkfUZukRwTyjqOd-m+ zUwxpL@{~hxi9WP1j!l16?{8#N{Yb*`4*fwXE6n<{+2uOAg&!%ym zIq0d1sK7e&`0FFNd-MVY;Q6p@H`}AxBzCK-%xqEA?A>3T7r~ulD<&*R$1p1?&pyZe z1s;Qj)1-}0c`3DrF%2{84s%TnvPxHvPsc0dFlKVCDfI*Q4A(KC*Gu`plxgINJ0cH2 zN{&cn>K3I4 zv~#S5JbN)7+HNt1gL+UG(%$&O*aF^*rI2~r%u*6GzPYd3DP`m+uxT#cLAxWRRx5=K zPThR=ZqYeOz#w1>m>XQN6>L53&}WS2CUOd`qPm+RMr)vto)t954%6KL)LSR3bW^vh zyG>Ki>Y|g~p>>D$%}^N~I=WAXj&?e9B06+@b%%~y6P6@6zSN-;%N@F*?vPUkG~F&6 zl7&_$zo#iT0C^<(cf}JdcFLOC1sQGbzS0R6JLI7bB?<~{j zEfkPiP)>5$%R;>yTYzw+-a>0ZrNmK&Qc)G#2R*{#w+}gEb_g?SI=LWHPcoFf=bCGR z4HQF1Rs~l0>U0Wdi}Q);*Zp`lOuC%CgV1n!946)n>*r|Xkn=qER|@cCm}`9YOKkg0 zd2W>|K@%EkC*MkYq6{i10+I_m^8XD|y;GVl-{XEtz}$n5$#%#_I`fUB>tc&mcIizq zMbHjH1sSoJ?$MigJR?`B-pFA?CngVY)xsX@#@;XkhfyqLVly}4@&bY$Zzb1{I1Rf80t!Ohfh=8 z*}wo3yakP}#;U$youz{M4NNaM@dty7&ikDVq@YyrZcvzSRB?4-1{nAS|Z6{5WTD{;*K>jXZ| z2eT>rRtbE1MZVV0Vz1mT$ya4G5YjK~9{do9G)Uv-9-cnD2%~Nyo+&Hxo65jL7&ekt zY6nCi!HsB26=0td*A_c2U6%0Lx@=P@eHCS!gfrS_S^dI0T3*(h{=%u7M_*3FeO(a? z3)Z;RcF)u%iR|#~68Q%#5X)-JLR$X4{2bW#grM!S`AMi>@REZ@Iu^6n@EpKG5R&{w z&&YpqB7YG~21{due}8E8Jn~BndVeUVY7R&mt&FC*P3l%kk?$`smStGF+86Lm)SfSY zNk)~V&85CGk*MTo;WY|2L+o1L+5!v&tr_G(o#aKmAnoBI5M#1PAIltMjvKHT`rnDYLdXW7Sl z4-V1+SBrPWge-Dn!$t(8TqW)8%^D@zL!lqc{!+MCwi-lK3`6;$aO6xC@rPIJ`H?@0 zDGuG;&ry*NcCyk=|ISdMsvVs+tCTX}o20m~xkx-q)I8;dPgZ)5I%5*fPX=k2mQdy& z^JFFTx%bPN4}We z&Ape*k)$DhZ|hbL3syqyfK3@`ELNyJOeyD{Yugm%i#UXSb&~iB z`etCS3obGTt2ebXvR^rZl49WUZgvVl73WEj8pg-Mnd5v0!gxIHu^EQJuYmV+Hynn! zzUm-Qn(u2V0&jRw(UmYkr|(tw>)q%#_t1?sXAd==BajDOphE4crs!OLAgQMO)P%Yv zNP~U-OeKzy@;a25ovVEKWuk`>lPjOw$Fxids?;fKH5VM9*yk7NNP3X3q(r%Z^T;Bo z7?J@&4S3<%S-WSj!6mtP1a<$)Ok$E{gSeFbi2AfBNF-}D70Z3{!D*4R95|>KYDw%j zJT50AC1nZkTVaV&u0N!->$JV^FMhbf7AHqe@FuCuHscGf~1AO8RBy?wA{S#{WV&pG$LulwHX+cVA3 znx5F_T{=mV8p8ZB1r%Vp&lJq`3{+{HkP4}yD*T~Jl-J-=Hv^V!Jkx~McN`H%i78vM zaf~&9K*BPTaUzRBfNU#5#<4yuWJ>}?wq)xA*@`Sf#N_u|YoBw^z3(+Mpi!h$80dHJ zJ!gNcz4rRrYp=aFVDN@rYUy&{WFCBvI7x&Fs}tL0Ymvnu_&dDLFMM_{o~ZM&(jE!t zC2e32!Am$uNC?KB9nsV729b3G-dp0H58JSM#f8{#AuKv?9mj9{)op{lI~{I)kzekn zDL%X+2uaL_w2~>D+se0ez+mIz#@4}!OB@-3h~qw$6t!{8c@I!I_S`)d0j8l&&nf3? z@emGJcKDzO!amvTUVNWK(Z`~yWx9Ve_N&BZVQ~Dpa0MH*m`n>MV7{Yp`ta@Hqe^`Q zN=zE>DkOLpWBOY6=S+aPHLD;>xfjBj8;i{@U>L#~9e{Q9CU!r=k=e5GkHeM5)MyF@ zzQ*Demsw8tgf*I5as(HI7GtUtGCakY3ri(FGe>aULDWXDIMb2I;4Z>6^Uz;xAKKzU z39d_gIa@Rw`@m3&aJZJq!I#~a*D}ntwM=Zwu=%{UmI)v6E0@!LEkmL8wM?X|J3P6T z$%;;x{_C~1Ow+GO*48qnHyGA31l7e#Y2=#*L1U+L!tRMKrxBG*JS|FpV=V(2x|XpF z1lucI5DdSw;SQsk&&V59?5R@MF08py7r+ILM5?^hGO0DJ*6EX2;U(V}8w ziHydP!5}@&iHXg@INVlbbjGqF%crpx<^urmtqUkIcC;>tO8>orZ~R>REgJ)9nxLK% zBTj^Pgd|Ku+VyLR0f96oi|?_@owR$SI=&cCwmb9pV+#}5CMus+LTz?ECKl!%%F{Mk zyh_%$y-zzRf=T?nBs?Fr2&(LV>xAo@K1W?kh)x}T*PvpAfCAhhMq{ieKqgJ4-PwY_ zYU~FN$w-~>436CJ4sFy8XnXqbuqsMFFm0Ry4}%)PqhAlws24xroW)n9+TV^JpZ|l5 zt|$nTbH)fu#DrWWWW!&oaA^}PehJYPL1NNl#8V)cLsQ32`urD(RT?N3(Y)!hj?N?g zH2~3J%cE1>)n70`#aK)$-YP&P1ik?(p?5=o;BwK^Pc6Q!Q`~Y0Q0Y;4!gsq?E%^v6 z1`O(XgIlOud^%H+d+OjfCxB+f!O43U9b7iN*0Y_=zkG3We-S4}pC&cRhpG7T#mTNr zSfFsy`49Le>MvjMt6G38_xZs5jdfoV>6b4~mP*5T#0WD{0_R;Z$F~~XLa|$~VYr1Q ziTko#vi19V9rcHH8!z}f=#9hAOa|jHl}Gx@T*`+rr?a7Z!Z0jNJ`&w^CLyjLx)6(> zekrc`sbC*|r+;Fv_fPCSodo%QT6P6U3kt`-P8UrrO`ie(d zlHo7lU6*aVBzXyF_? z$H<(I1>@w_sIG~za{mLjizX`euOHpUSBA{D+snh9>)a3;5Bc^i^W;p9?&y1Bw(oGs zQM+Y+Cj1Jkvq@$Q{^Qq{;jYFcciB*{Hfu1fyL+OgI7P(5in|Ni#p~Fzj4r~o>B3D@ zb@xbZJIvR$<+Iy6Xf^Nb!~(c=2kF=;YoTYg=Qy@mE=DBCjk^y7faz&(KbkZSfB?BS z;UjO;InMc*Sk?;0z8%NvB_FXg#97O7fJFssoXE0WX2;VE+#d zJL>ng(l0BcH*uB2Dg9PYt6!FWE*TH;jN&U6dEi}y(eW3WfE9k02Z7xJT$y)BAn+5o z`8~o2Bz94#sC2M%eUlK&{Pd{QdnUiKZvpoCQSk$2Wo_Gky%q$ND30*^t$xkx;!lRC zAyF12%u;mkH<@)A0=$tXjjHoW#BVgNeJlR(gd~0nvBpKBdwV`Q$VIVhT%A^|dB8n9 z4d(BVLXd%Fal7gdM#VP}O%=PfU)yp=z{P%*`2EkW?(Buv69}&=-^g2_4>5tIuJDlU zS-qyT0TJ9B;V^y9X{n%6BJ8m1f9Z1#c6%JLxX-{fqwYfjENAcKfV_}zz4t%O7Vq^n zx4rW>!(HoD9B2$^yF`eh0*%wj*AYJIvTwPYuenl$swv}t~d-GSz z>)7MO=&od@OMKuZxw!fKo_v0HR@>iv?Pcx;#ce+K%6Fb~9)W4wTw2P?h+fHObdi^2 zmHTfU5}?oCx92#+B^mx8Me&P3GDCVXYsIR&7wN!cSKa)0@k2LRK;lrMd5X0$1>J9Y z7b=3nYan7j=Wg;Y(i7xJp?fj#f>X`v7=$%@-SP~w*{OD-;>2^@`CK!*4Ylg-9!W@F z6M!@_eJsAbs0iv?#B0i)h3Ub&CuifJLCgXybaWey?2s%Mztn9mK+^qkdn|VJ*~#en zKS6l7ZIV&d2&M-AAi9uZ6sq{)UX-(({XVzQ?Cb2{83OnJr{qCU;1KfYgsn zQ2>IS?eExQB-ub)Pa?aSAECn!fj^ShEu)99!T9-{)g=c3C(P76 z*SdSsih4G~)>C`fKAZY6MUf)x3jiVzs1UVzKs6M#MaKd(pOVWyHDRO1ErBsoPAQRd zaVi4q81b^{J_}{?=fMk04>9unwZ}(aK_7h7!P$sSk?_z3C?2H}*tsLHW4sfn5htKV z3@z`J{YB6nBB;F+hK}P)fvG#B9XVTFsm8q`;FLO<+8_l|?=0HeArWNXS*Ie#RwWqMwvfRR~Mx1ai$8QN`a9>X0-exmus_;==obT&<&Z_0?h96oIsWJWt3~L+FZ3 z(%233;_ogH4@y&JGU84xqk=gU{RF(lnO~kkZMV^7FkZhISWhZyP}6h@yU5S3Z5zZ5 z3lNwuUc?V4noU7@U@U*n5Av5$w-`JpGxr7)(OL zYyI>*)!5K%3DYx5X=avEJf~cH3xL`YK=&j-cb^W>-2*^(uXT0Lsw+FX*aLKNt<2X!abGHb}fe*fL zuKFsi?)2NdScFpxVPRq_lgU#)9lq9ksSs-xfenv4#w{F3kD(PW{SjEc81BtL2S37*b1EG z@)3mwX{VP?BX}zx+ZlV}y6~6-zR7Fu&(>JQ61a(G%2YR=8X~F0;zKuY+?Y&G9KQXJ ztrVNR!9Mn|BOVNzRGMXQbK$?myDUm?AxXNBB$LHo_aUhY7)U9Lc=gi9uV9-H3^bhF zSvHH0TFi~S>eObs>U!T&Og)^=Cy=MC$qOc*C579Rtxq%i$7uE)EV+CqA|a4~c6jO-@zasoHwizie| z#LQfFE+CC&@!bN}PK6sWPP_qvD=&F4c!CC7%WJ* zG+Ii_i6wYSL2IONY5FsR@$Q^6F~AbbB)UX+QXjg8UcG0t5q`M(B~@)wla|56i<}(H zxB1_wx#+gO+=0Rm{>87qfnOn^{>=Y%^9Hlh#V76VzYaKaP1n}YKE>X|zA}%~INWL{ z4=uOp8+L$bHOps_s0t|fn&aEPfG5e8LTZ^Th$p~IX~@c88%Rl=9}#9Qlf-KOKL!%D z^-IwG@nJKo$bbMSx;)-o5+bw}E}0nW)>Y>;b`>WvyDcBreFfc*N`y@`>E1#kUq=Ka z3}nG5p(#ucllgB}uv`L?!U%kZ2d`RuBzQFfCwrbbhe-SUlap&n>&vERrp1RP9HXzQ zjPzj2>TWN3WZ2z}MTDH2fdzEbiyyW)r(f&rq#Vg(g`%CsWvy1Mtp&mNeRLJYlUpNo z?F59-l0ae$^$?D^8lfk-NF7AsSny5^$oWzYS9FA z-?X^aMMi1TqQJ}n(w!kcI>tZ7&rmn<6G>d)o=J!6ComA=tOGy{i~%VOeFe#nD-xsQ zkt~qiXWbo!@>z9%NsBIQscAln8p@Om)Gam2nA@S*aqZy`rPgssn5PN|m(J0_tozd` zQzBbgzH}l8hl1M};0S4DCg7IN;YURHY-)(8r!Zk_f+HCyK@OSJ4_*i@l(ALQ6t1D$!taHUP|`YJ#lj1# z3Ur2O4l?t52-z?MV?NwKU?9pHknno#^m_jBDy$b7-WTkCUn^P~VTv~bNf@c(W*%yO za{Jm5z9nvqVXvpTOzCnLB;QIj5Z^Ks3Ss9wvDA0De}$EA3!L#uhPe2|dwzvjNUGpW zHRont49(nND@Ra^fr(hp*_q98Jr=tkeKkZeTTSr%ax3aI^C!jSxTCK#(MPEHz?*YDqj4HWMgb!q<$R~VC0zyp3G!{Zc??{L+0%TdnqgqQs4BfLW zyF0q3adKo{KI$1J1-+wDgx_jd0A2a54*1i2#Z^j82T9dv2 z>6f{+W$|Mn5RnDSh0QsMl8cXrC=5MI#qtvdf$nFkR4UW?+xJkRC4`J%5-g6ev9ly?T=N5MBd#XQ)QCn8S=G1>r^}4Iw`Pbn*JXccgrRFc~sgt6< zw*%Tt+T3K?VQq!3(kCGym0rv;+8mvgyK+ONooei0Q!6d`OBVH=b(E3tS zZWS*@cg}t@WzkGPjV9pQaD+yjhIdz#5lsV@nP=&?4r|PFdBvSB2CE?{L3+;@YvKs_ z76(Ee;d^b=i}eSov<-w-lqx$-tyFQ(eGE1i*D)koQ|KlAWhSB&f;@si8Q$pqa+mWJ z42%BGfn8rF0v3Hbr9evpWSe;qF|Mw6)flU!m}Bcfxqd)q23rrvqH*rHwuhi^O6;Hg zmcW^IqjDt3*AmP1TN?tJ^D$}5b$&?FVhK573RDD4kvuf!3olw?WKYExu@hTgb_DWu zPsl-XUyy8tJCkfWFYKEcAO_KfqzP&5l*FsRSWGxb&@$=yJ9?wr@pvE##v?*U^2Me3 znC%ccrN{=qF*|{?f$0XB#PN=&WIFAt@LEkF)QiCkSOdRaRa@AH-3LdJd*?s2XvKXpo6D zg8XwGwIX4Boh;Oz!HgcCtXzKgcE=qey(|yt#u<>Fz`qAv`5?gwz+plXAc_*{_6I+u zE)JG+e~PtV86A6|s;8>kz78o=$E7GC)XH{FEFjZrLDX`@(dKb z%@+Si^2o6T7Dnx;s&(_F_5W;n)ir_(en6xb44!c8V7kP%O~*ksgrKKhZkyH{|4q?7 z!;-Cg_GnIqg%azXX;iKz<-fXoR<5K)%f47~;vg4(h2MY|fOX;>>&0{yrEei>^^Tuv z{@13}ysVj5<94)Yv4($8bBvow*5%`qmYvk*GPcDQ?{LHEP7x^$QeV4(`D7QIcY;&g zB48<7aC0pjA?czrTdP6H3L};t9_K)#t>O+mv$SmNx*79OznY>LWMLXk{7{hB5tqJH`8#ro4>0C#^6v zO!=09N=_@+_sZG*gmK{z<{@lU#~6%%EVWEzPe&&U9E$Mz*G4|nmf zL;WK{*dVE=!$M#A0}(5dDpvHA2)cA(o6A)sk0Jz7@>I+7lD(N#Q8-z?s0pBbh=g8o zvun)^%X2kL&IilL!TYKD?=exkga^QZpXcwdSgC(07!+^yq}gC za`~3T1uj>_W$u8Oh~87(6PI4@ehQGyfZ9)ibmK_o|F$9R)oQs^Ukq!3w@u2(<}zRr zp5##iGvBAcs_P~WCp2Abc_STzdVyL#f(x}E#VhlVg_Bm|1=#HcgCoYzaphZ%o`(1*{kH5 z9X6ARyQ5=dc$t8+ZMbbzfvaibZ_CnsTM2y%C^-Z2#GX(*hi9ojEU#`6S|&t$E)1l4 zum|V=Znrw@C!XxeO-?){54z6t!CZ3fR#ICd>XjfV#z;BKxAVt~pHfg5`?f%yR@qzC z;ZYX&uL;H9eKo5}kXUtJTh57cq_|`Ecu)Mzp(tJ^E^#64UhSrDbe*&dR;H8^!ASO4 zrS^T`RG=MnucT{3PO0J1r03X;1HE4pzo6mTwXUc(CL%@n?8Zsg(~}Z{^IDf{ZWqq zOS^PbBpVXzd5>TLw{&x|Mj~Su6ym<%%(Q_$2TqM$nOTI{AYaW@e2PocdGBYlq){e8 z5@q=?Lqy^PJSD~gJ4_F48HsT9m;<4Ue-e*j{+K9uEzH}rtVjoW7rE{w zf4GQ-GU!@1N^zs?u!WZ31dh(B8LAc{kFIp9}iDS1m)k*6zIbl{8?P{R5%0+oc| zX;qq&ewG-&6Zip(kY62}bBI%TRNhsoz<+k;2*O=X1FccwK{9|E@;ctcc#f*zD;Xwk zir^vAEP@*e{e%B`K_cbeTyoS*lTZTzXXI%Pj)p<(JLDFc(+BKp47D1(lNWO#yg&b` zCU)5b%jFJr?3?pAv+l0fgDPAWKl;wU@DCzq3a@rpz;mkmuGh0&RuK#f5%7;~?SEh) ztW`cPva3DR@|fveuUqbZnoI$O|Chd$QONE1#*X|TrbwvbL4U2!uYNsEL|g!Z z>u3j9kp!WCF-~{WXWnjw(;CH~8qzCVTGZzr&4x$K$(VKLd~z=L2lGj|U-y~&q1JiJ zobOu+|JMW~y0B_|fFi{^19XctV&hXCVuR4mGCljzzrEuv`DN^f?()?0$T!I_S3rQJ!phK&80k z*w)^GNV{Fv%{y+`1gGE|g(KpJ)5QN0 zd=|jxt(we16=lsO51NJaYM&nz#@Px>lm0@sk5Rr-d z#e6{r=1hb~6XhopQ+A4KiFf#VOVRzfMH#5~73cd}D5mLxr@g|woOTU3!bDU53b{*>_c57ABD}9p)vh{K@cc{`H9W(V`sUqZVq>}*O#*rqBSFG zbxz0rV{T9dEY>ixg|unYHxi4ADByy2{;R&~AU(Al+rPRz(=~5g+8X~#bPMXg`;2>N z$N`iG^cL5S?f12}8T|B-vT|c{cR{i;+QvzKE2z{*k6dN!2PN=@jWw#-46+;(fI*>~ z!HgyX;{ri)2}y8zQArWr296|ow&QS&nn#!)_lrpeh<#|axe1a#d2@}OzH6&GGqMn; z7ejs)T1x&3?oRe%$Ow$1ofWmwBk+{Vy`4U+Bk;@0zTB0zEt%;%nN%D9$#}c{+iJ4$ zAI=db8QNAJx_nMPaDO)Utd*fhr3~yafCR}FAdMliyEAqk>cS8mgNkzr|9Gda(Z4}o|PXCsn5TGAI=W^a2Wh>n`1ic zYg2Z`54X+R+Xz^v?cj&gp)w*Je&8J1h#zi?W(q$DxK8tdA2^=Hksc0)6;BJh%f98R z1HAINR#v#MHB*>lS2@m)`>~YK^-3GtP|OVW!%05*{H(g&*r(|lYw#e{4xfcD__oWx zJ^rPb;%-9}k;k)Qmaw6F>YPIR$RD>0QoFif1dkE>uxdp!QmW7B0_4^GuCbSWZP-K5 zrNQ-%59PWV>(uHR>Rpw0f4j$wH0EQWTgeSR{B3MA0U>yTt=c_y_j&3{U=~-&G8}bm zhWn$TL)+7apK3cj=|~w(cId~1p71ssXLL>jsoD=?BTYVG7l@Y47nDrRHVVq&`-7Oz zWPuE>3~{3La$OcHNOrG(LgwjvjB8d(j~b|iDC%s=if31h0lml=x2Y)bCn%DqE#Ymd znzItjv?bA}??6=qEk+F{=<4!z86&*w>Dn9=1(bPp`79aZHtO?lV2o!CjB#5q#vL-o zZ7asOZN(UOaO|wT-GuUA04n0fWelRR3uBx?AKXEUgQ5^{9UU@8jKrkcRCv~kF?>sK zv#5vY=U8NWX=j2s8DR?kG~7?q6PuTp{9GO8HH*}}*rRPL#L*AO45E12+J~|2fbEx2 zZY4gYv8SyP4R+`qyc317WAbdY*>i_?DZrbDBHxqK4S<&A*yQr5_xN~ovqXZ(YsiI zM8F)njKrqzuHZ%hAWlL6i98ddO==amhgVfcti4}=FSLurkO3?05)TILQoG4hjmvkq z?fddN5&d0?M{>YX!|MgxVYcs_&wqTY+U-qYG{?p<#oh*JiNKFe9|Rs+EoEKpMd3KUhn(q zZV*;Ca5GuTiuj>(Qm)%aPC|=Wpq-Bj;MXVx%*p4Li4+^=-F*i*u2N6zcAp~$T2@D8 z>|G7H3%tO9sk!(U4ggLtR6-Ejt@qv6enY$czME_b{hT5(Hn3uQ4gyOR*}?J}DN&)c zjaviwNlURB0tSW!AWrPjl^|-;pd&S4j@xOUkuB; zoy|Y$7XAruJ@?O$2@!!Zc|641#1hoczcKZDIUX+ZB>iwo>|=gC`Q%3w zh%tLu)zTb;x*DQttLVP%p5ApU`$qAE=wcI#fIA3^iJ27FJS@X75+^YbaYF8mRbw#_cTJ(^$Sb!wkX2)Oo|wi~ z`T~sYb$f#@m@R(f>qRH_-QpwhEh6=x{IYqeECVb;!vdkK`5b&GWfQ~AoSEgW))aGs zL?Pm>pCU(tZIQM0rp|C`%F4T1rKfdz zE<2@W=`_Pwm3GlBv%TIeV9N|lta6)^5d7#zAy4++#QFQG2z3_5pa;*no#3{3(<_>W z5ru}+Vhly299P9m5CObwlJlx;vk74wKWI0EaT9R^p>}9FiZKvnb{0#u6QKfiYLhzH zuzPI&YZV)m^Z~edNWd1mdRC}hW=k8u3G?5YWC?hR zWRV4Qc!HFEto7PhVOFI~8LJ0(`az%-R6O`d9%8snN}ayst%i$17j}CNcQXIyyQc?( zhJ<4@;6Pg-rb89LXx+A!uG;~16^b}#258k%Yj1b*Z40}oG{PUFF#-uA;1Olfmtl@4 zP_=1-uy|6ScpYlUVayo%tKf$+*9oajhvzDOYGi2YgyFdcnFfTsnzh>PwOVMQs1<`@ zf$BC#!g|_YKzR80@WC4D@xRH_{cqQTd!_UipWBn2$$T=yT4t2!8a@~AQjZ4zJ$&4e z>KDDqKZXtZIu#>lnF)!&IB|xqWVrwDR1fk*utIZoHb5cR6=Q{R$cwZVX$6(r{SKC5 zoGZto7h|D(%i3Tr)tMU35LFut2UR=IX4Yyx3WdT4h=+`Midx%Q%k?rxR1t6CuME&R zQwLNbuV6K|=`o{!K@Jk-dWM{G?7N<>P8L$Sx_oC3pt)Qs;5vL*f63m1%RT6K*8P{) zy1&EmuRrr3iyq4GkiWAy5;h}*g|815r_QW_A7IQFdk%btDn^UTCMY&X_i{Vyz}41DZ%b&75Pu<~;=$B&qLBc28q(09EdU8V-S2)b7H}1O z$pQ{E=p?{a63G;bZo^@!qF}}bQ^l@GNzCR#^1a5xf#&{=U%!b!IrQs0e#@_CKKt-t7v6mISVD^Wo@4cg_<+uh#uF3PYj4MQ$Am2y&|X*; zY~_o)_Bxq#9A*_g>1XF;=Hj0zC=cuHJ%+&?ULkDI)mnGsH*PYkd+nK{a{>Vv;Q%6Z z0F*>xfkm#)j&4_o+}>_EFv2MAM@aDj?#n_F2HtbYw}%&vi8_Z=o5kS@2h6x`lyile z3X3ix-GjjQ9ASDVaBG6E?lZqCOn5oV6?uklD*+1t6~#U|RzAJcrA-BuZ;L2hs-krjyiR z##OAh$$9N)uh1jbZ%+OB z2vIbNittwDMhRqN7e_n~F7tp7Ji9Kz3BS@fnH4e4f?hUok2{TbRmHD?>6It7IWSMNDOq>K?mHAMXflZo@R8*x9eJx5$Moe3ubAVfqVZ*4Y; zD-Qdm9!tBGRkr5m`mf4bTc$ZP4|R$0z*#fw*)TN1qOHTcG*iKy@?vIP_<0nA-_7aZ ztFJ(D=J{v@xg@Rds(`Gk=09>Fa-(kdF11gPUa(170*YRA3>TwK8V2b{Bt*&B&L^w% zYoLFxEvFY=K-w^ia8Q%Ij;PAD7!LCi4~X4HpS`ed`M&kShMX|W@f0Ap!uhE0x<5y< zj8I+R#kXP%o@5QyytsQprLDpKgwF`Y#G}(jQmn*n<0Z`W9)B0V^7U}?;uG4LcY+Tr ziP)z(2dXe6u$XZ^(k9-iJiEbnj}pYhf$CqlGkk8}!G%o7xvAb{ zKi8HdL%<5h4Dqddf!konQ$6`Vrl3|d+aVtgB?f|^z!AAJ^^7%*q42hdiWtEagk_3~ zCQwPA%JOgp;rA|x3AN4f0UOss0U(XS0c(af=gZbB6-;%2OhDB(1X;C{#Ya;RT67Z&d0;ZG zi3F0%gh{W>K@18zl_@T(Bh_8RR49=Zs za4LvH9=_5y3GEei*pp2^z@#YL)*QXE|Nh*^IvR#jx0h1?hXB6(;LZ(n4dsaDh=D6X z?m8aezO5tKUAMS)0a*WfF!hIR=$6f#&GFk50yC0o&|}@*Kszj9TZ7X_&EF2OZ!`<{ z-WB*5mm`buTDe;yg&?g!%8z_OD8n^brjGJ5`fTxwiJ?290g-W~=$`$>yxTc)S=a3z zp?4G0_%{%1n4YpSlN#?c%51NQY~N(F&unDc=Mt8mFC-hg$o#!d+IrLT)5{bHdc?jYIkIsY=np< zMYea|f-V)JZ-;M_e9^h{g`e>&FVYs9_iSb_7x6S8a6>x`gR zVo3z`$b;C+^?q(tvAQeEhv=m4C3)W&#>L^;$*i7@|7%n)V~!zq}m%glbu=to590Y^Qz46%a4a+7>4)lH30y9euuHtD3$IZvW z&A8eTh$fVuYM>EkqvK{Ub z7!sh}rWtS94XFdR3sQqFD_3mAIWbLdLBfW=H0axl8zE#2P<&A0OG(l*-{)4Q z+pWk4n8QwUp@?55Xi8~iBEXdE2(5 zhjGL^=<7&V!buC3r)!fL4M?2SxjW9upf*@?xtyOcAsYO=jWxm@8r-}gzf&6WmD4bI z{D@xAfwfvW;Ii*m4*g-^i4bLPFI#Fz_ibk@PqF@JaJn2{%;1jc{E3RAn2g;?aY2$N zs`*wTI^FXL4oBPBPm{_sj%BCvoN8CD)|Np`25DrYAc6uO&tA1t32*34V#{)Y=~`Gp zU|>gHaaXHO*{MgJYSf|8(-owR5hr9N5N2n7o80_yC$_s-Ff~Pz&YQ`EJU=7R7A#o-^MP5;x#19{HeK~6FH=;GhRJG6K#oT>Q<*3Fs^v96Fsdfmv zhTxnmL8L`^^z}vpyY!A@1n8l1mdGcT6jnaKh8{I@^Gp-rfhHVh40VUuA~h)tk_R11 zdCWgVe(4pOh{R>Jr{3ztXB;=L0@tGhI&lK|597t$Jw``!_nR?>r$sN!?-0qcItKsc zo4ceJIjcBW_fjCZw;QLNAgGczH^~g?P1)c~Y~7HW6#X(nVX3XqSS&)GQG|Z^<}N7? zvXn@%NZbW*Rm%s6Mu4G+A;g`rNW@A~r!gl8PeO4{lIfSR`ul;^&C>y0IK}JBiVmzN zOp~}r`{VDsd1JEpprJtVhNSqBL_+(*d3D7$edIc`JO9s4wBQy_cUlWLLZOBiPf-1> zSLxUg2A*dU5oM4AO?Gv(`mz(Peqp%WO}9Br6dP%tiAJW;$X9MaGGjr1+W0tIFuShd z#n=)Ua5b6VGpT#zV!W_fgd|=^P9m7gz8LU+3Reffxo0;k<6~ z_|2dce|$~W^Jc~TMR^(NLfAz*arLOc1=_pz?^hyqZ z52fb+c>AdhS&jR+xU|NiBPmZv|8XH{>@6V(gmAI`;_AUVMuJWD;xLf;{KE z+?=)1QdCw`1md;=H=-^=!HNCwj*E!*=lqnbky0I@F$~$)1iv$6@(nv=9&pD@G@7$C z+)8w!Xv`Av0@Sc8*95c^0rkTq#QFW4hKL>!PkG`14~v^`e7OE|Ac#`DI)_(t%kj=wwnq7wPa0G!0@oGrGru*F!+}&(y5K zor?TlTS-8$!ULwz>1N5gK4Vz=61y`vD~uIkUHrUQUGLq$UP!MFJ=0L0Fs!+tybV{w z9w%nRIM{>6*w$$MU%n)3*3aY3>o8S)clz6*9L@!+2Hs!=;MbKi%R8j-LvIz6t#D1ww{1p6O6a&7qh^Py1nC&DWaBssIPfm7~ zGoI)JcXSjSF93Re39KIR5BP&oyq~QeeVNrhmlu_E_4B8(c%bl%z6)Cgt+yAFZikQkcKhoaZ--+jrD^rg1V24CJF$Fn@%BD zT_e850HB@w^iTZpYw!Dp_s=|jJUOak$!x#R~JeUI)k1ANZfR|Ynij|`{aR)--5_+ zt5-QB<9=spB+CR&`V*7al_6G9ATqN=vM(1(=ZwZ5JM?F z!V4V*vX|QosEmSGct>GoXsBqbOs?79Zb3zH6#gU?MQZ7jqrjgR;d9J*T-M9d=2HF6 zo#kG{!R-0#U4+cbs6295jIt$lU!KGt{Xkw2)Pm{I?-siE!Vu-?IOYzk6G4q=WTs!h zm!MKMXD-$6EqPTqv7((u2$b^DOc+VE93MA{ifJLJi@5A%T{PhlR#HkJDLI+Z@^6ro zX9h{RAChvELC$!uFV#!MDL;$wo6IIzOY{kR}$XDP(@q|eA<@?#~D$BDF%RG7~ zUbJ`Z%zxW6@)lv{$g)~vnD~pCn>q)Pc-1WotWIZ6=i2B8ougsO%&BawL+Pn|u(rdb z$(}iB=pr65WwWq9(zHhJJ1{AFhjQS8Q)MwbQ#QXxS`ML_T5b|=ITz(=UFHrO52o(( z8jQ8vEaon>T&B-aN5Ut=-1S-x1t<9wSfHbrJIK3eIWUZt3-R^>22h}NQzGV$5xf(k z>}|S{p34%9Xj9RBYh6z{D1j1gauxUvT2-IHDYS$zy9MKqByBQ&O4*QPZ(VMyMOaQf zCF4gyiXlc65xx2t9;-f9j6Xb9eZct47NnSpWAPS)XhO>piUHIuWJ2^w&?*LZb2Tr~ zy0hKVk)n7veL&LLJM`twKXkZC0r>=hrPW2Jnudm@x^$lF^QwDqe3c@*4-8++OFm?y z>b@JSzuwlqu>OwtVRQcX*b5tPnu7{6wJYeaodW&^-leSkpJg6~CYwi=CQXCpUC5BG zs+r6x1UR9*EPP0}!^eyaYoEUfz%<>LWJ_oLo>SiKbf22Ie%tOoJBeklNw;!;Zkty; z{}i`@(&i2Y;t!VEjwK1pAd4Q4UVEH9u`&fDxPvPjtAuq1W5Qa38S5e@;0+xKAzc&* z7~qm3@#Sru$44BdMiZ82EMc)cb2sWflj^_2V>b|dFGr&Y))$krt2B*E-UgmRY|t2R zUv={xtCZR2-ylurgEUQ!EO)&)BXFb&dEiKF6Z978x<+6irh^~7IxVhNC;S$S^!$j` z$=WXr&HVtl&p$Qqr6>kn*rV0rhP2vMxH0;PQe<_%HL}waQ}=+WCNL#&#Ig|~%Dii% zV_gL1j&@ONTS&Mkt81|oX-12d=EPEjv%eONKM#556|$T?;9z*-ln|q2Xb4{1>i&c# zjp5#Ct-LI;qNAsT3eI0bI%*7Xt~VEPHdjPNKmE(@1JB}zuYQnT|avpjg@i!z5jZ8S!xIlRoT z$$6ky#{@;8D=XdlEMa=m`Tr;_{JxQb^6bQvvxd=zpM7alUTS;0$ul(Nz{oVKJ`em8Z6*gJ^X2_5nKw)+yAv5tFOdwwfM#W2WO8yBXIL|_&q{xSX4s*BN7L6Sl@n&++`3w{oHbh z-(;KFSVrWd=YnJF%lW?;m#=uS$G?bZQYznqZdjhtWw~CKV@`V!`|E5YKE?S28qxGB zH#GE&>r_HI z)HzOMHE4lW`v!UL6!4+HE>@xYi*JegHHwMAAl^aEkPoyGTL%s6w|C!UVsMFlt0kGC z*unauM(q5t(k!`U-LwN=m&{ho79KVRM}Rs67=>zqJA=*0p;Cy8VFN!a;y1-be9bk9 zi)}?7ow*hxwa7!hB&-9sQbXWf1JeM6}jVE79R3pA__x25J(Zpsg@ zF&9h+V6sV9;)Adu($}~vpr?z6U6CnI9u`x!B?kwCy-F7T5sO-IaYS2|ckyJKuL66d zOS+1_Sp~w}RQg~gVO0iIQyxQf9cBpoQ+_Jb{Kw%`# zQj2oGUt;m{BHbD3N;llH*q4U^CbSK~qp{)By4eH^JB z5J27+qB$OL<@~9@hSa!(@R?ZZ;&QTiv$`Y7fM$rJCs-d+{I1N{9#va-c+J+h`;;E7 zbT<@E_2~Ge+3kx?c5BHOIj5oEU$g>(=PzBsVX=Fd$|!ZuTB&NwQDW6A ztIYD$<-sd&0rV?JK))t|e)Z`~qu4fYDUWq?r3+_?J=6C`E@-63@EwWMh&LsZP=^_~Hvwd18`$&%)HVY{#U%)&K3yz`658i7CC5 zd5Rjbd+ROs`LN~g5~fPyYhEHCdTseIHJQ(_aG0a*YlqvDNmW(#7Angn_4G}Qo?);T z;t|sBm%e?VBMV^2V8n;<6r&YM#JbLmS1hF%s}Lwd%0+C*_*X5z~BA ze6E+DcR%ocaB>nt8rT^AMnwv+`w%^BvgjH_f^zGT2clxey#ux%Jhn~uGuVkGP|YSb zfbmh83uOg8-f}MDVDf4%_Ih}Vq*$McMK389w*n^7&^PKY+tS39_k^6uFq;v7?564{}jB?<6RAf4fIsZ|Y=5 z)Vug@!ycm-pIQ_LCIDv&n5Ml=S}8kfqulGbio+v*YmWInPWqr$PiJ1m^~qW9E3HIh zP?SjT>7A)^pvGc3XMFT?rjwFlSf2f4Mer39628{kbVlv=zKayLFxBLlpp=A=!5wo1TZo1#2>XCk|ER{A@E- zECZYYzDJ6dGCzxAiSjt!a`A-+Y;j~vBj`(ARm>PIo4nzdZ)n+;w9{bpgbfs#r&O};C6)aCi006!cOhKeL}gc zmG6}cDeHTs&$!>T6h2wJ-QCn2>(y7)P8|`2i>dn27;)6`_9@A;CqK8& zVy0tXB!+rIM~sKcnf6%n(oQs?j*wy@`@_@`o`)P6!w(#6k#=YC=hHDo2m8;P5ZQgw zbkuMRj-j%fqb1Z#by2EOTZ{UAf!TMTKXMCl_n8Z6xZ z+mI=w)%HqS<-QTMHF8f#D{h}tT&i=FR-{TR(u$xV+wP^6J4#5Qu=(T0o*E@a^h*+!MjxVU%E7Vi2-lGCxwbCU;-5dXE~`{tpO57V=<+cMFKeor7YJeCLOE|+oA#1 z(-~?MJ$0yId(FFN>k!=|s;DQj>i+BDZk48E1SZ!(gK%SX7uf{c(6S ziG@03%sjeKE!1LFyE=*-#BoF2q;P^0KEj1N=+->qlcqBT7qZYt+%I$l@+1;w9VG^P zP+F8oKWrxih=4n8F+n^mAU666as9Eb(J@$%oe+&{(NREnaKt`A+e^6ZC>Fx&{gly}+?ixdk4v!9klIPu9$}wlc)p{b=0g$Eo zC#5hwa2ZjxBAfp9KO6>0HXAqy4mnlCjLAMON#bIkf|hJ7NPdHcH{lY=0|~EJFNkF1 zPTav7lCk*K>FJ2Dd?r!0UO2vig?0)^++dpGWvwBLN`!C_1X_v~3B~yG746cdI%mBx z$G2H)6E5rB0v5*?0}C-lUZataTA)|g06jVz<0Cm~dv*=z?E*AmjgAkaZQPJWrC`Pf z6O^JUA%s3(F+SRyP-4BYD6vs%6RyBZc5tv!T|Qi6QTdQ(YvUYdtq?&>9^aLdA@z3n z2*t437){x)exn0`KXYUTL<)=TWtr<<@a7CqGUky|A?u+$kwyu(ZvZLt2_oF5mguxu4siHyt zMEg;Gu3}juSc@|%XbR^F%?=n|mlR*P!~X55&D%D6deIG28W6ZSHJD41WKkX{)M+XL z@d9#mv1I@RXs#E3K~*6SBw8sRFjE%n?XjLpw2bhfda8;V0kbGZe*!R!Dz*=V=io#& z&`3AV%dHDIqB(J6=LF492r?y9C;F0(R`Uj7< zLgxxP3W2`=QS>GACCaM)p|n3gLJ}zaANMm|d2uS9pZPp?rB>C?JN=yQ+L682KF5T=06)q{*)}TDW zJ-j@s8q0YwYPrLDYGY#qe(Tr8eA!>_o!Hp$JquFbhS!(IGErPxx!~nF9l5}wGcP3O zL$9N0P|J2k#<(yj!mIxLO-&a8(%WTl0N+oAi%HH67rvYZm+#ZbEL>|a@oEhweh)Ae zUr@jl7-Z4*FazLuUK~#@{n%CU^3E!@NvqQq#C!Bn_7D>N&H|U*I`+!tIv=bxkK#NW zvBiO%t7NSsQZnKwIex}}u4n@c+`+DvSr5a0&|Woo?+%Cca24xem!h;?YP*RytcNvV zNjQi1E&wMK~aL^2s#3+}C!QJwT_>3E|PGFNekHqFK!*Bw=C|=$gc)33Xa5^t@WIK+W zZlKedsg%p3jlz?t43-TG!pRIVezd!R&*0s}Cyr^k343Ii%h(v*4XcGL%ctxS4f}#= zHgJa=?Hg|H)NX`?1iZa>eyexgY7vcyj9(jL-Ka)^TanKCc-Pyt750IHZqumBv91UQ zy4~Co+<;_+b+&YE@iZ0+DE8y+fc3o|7;-&1^CMtzoFpU$UkhF=j>nK#TK9S4PVAX==jgiU^T0zT%V+8Q-%p>RUn(=r+_Cx>4fh>Bt<2QX!?;Bohxo z;&Gvf5f^JF3Zl1|D5pXt>H{jioCcNebK5sI(oELmIr~sIwLZUgEGYa)-~;jnN1@d&M8`)-(Tx(>$w_8o56h7RFH9n=pp3 z{lUUW0JJbXqJbl&SQz_QzO?NSa{u;>hI8xylKob*sN5w#SQv4OjJMdZu3&Fi>g*H9 zFp;-LJXeba|C~Y&f%piPNOI^20!RBG?Go=%s8B;3DJj&ju|*Tp6*X|y^CRZ70DI;d z=qz<3?GJ;D^ygxoo<{~|B)SL#bEld#O+$iQwJC*ShO3<6IJK4W)$j^y+hRlZGmMIO z<1+SC(h6jT(W2tY84e563$yq)%<%qj7UzTwNo~!#^%-VmxSccX25-*r-ms_Q42Q*> zGaRR0!%B<<$ioZ|DJ_YxjO7S?4_po-u4OeJVcN;eJT@~irm4bn5j}K#n>uTlSV1%F9Fv9oN zjBrXKNy-+wixCw2k~2yI;9cPw;rJ#C0@)q2ZDo%LWM!_b4iv@S(}2L?&@eJn1Qu*i zwKu|_3EG)`k+c(M{RYZ&?`oi(IO_-P%#(J!TInS1WQ`&nTpZ`npNe*HXY3Xam3R;Z zArbpkm|PjzRzoAQMaezY8$kqPv4F1J(E*XM6R|@>l|ef6i&xsARtN|PiqV9+;I^mJ z1Vmq{ml5`n0u324R3Bu-GsF7R-_!KzF8F_W`tX2&(ZaB4gsiqvF+$2IiQWk5jglXo z)?4hRmc7&`VAN^aE$o?oaOW6`Yf$yw`=KYZF~^U0@nfs}@Z%j`w&95)C2kwpUnWde z(1 zt(dyl<|H$FIxnOVoI}X&lD$O$&nK^2U`lpl^LhF0Jl_`at>xQ(e0zt7>`MetIVy}1 z!P(%rYOwp}@4h+2<>k|8I!ZFo@q0r~@veDvk~>Ol!*kHzmfogY*Ai*n`Z?&`r+p4O zdR!faj?NVK*lg#q24XG@X5#ljPDJCr=Nm7M^M<5gm zXC#V3sakv?QAtwZM;QN{SYRi(Uq_Y06D7Njg?K_d-A;LT|F;O1N zG0hbzJetUuP<3xHS^YRsP*#{p){w+nwjeX7L}H?C5)=Ii2##}X$dDq7PHW3W7Fx)Y zT4al8x6M^lItbx&+G~_Gho{h3<5WpOqAtePvlnrDOk^*L1Q8saG!%#Ae5Rj>td);B z@~FGj=bdpYBJ))A=_i`laWH$)kY~d56G7&bKEHZw`Uyxp7>|@6+C&&caTu+TJFj4n z9wl|h&Dnh#qevwK+l1Chc=l#>gpOlP0LsC=K-<2nui*THWS=zB@jcgwdu-YrDIwgr#TV{CC$+)A5)0`%tPrRW6NtE+y>rDjPPUknxhw2g7*anTEWdI15e0l**8|TC%VmY-w&@nUiZWsed%&hS zwlIHP{+0NX<>3X~BRH*EzK{dLc(2iG>PFC!(Lh05EYFtxE-Rr8_}UhLjhj6Uiz));kSvMBf;xkO3|DaQ z+>()q#71HF@BSmG>gce!45AJh!Fp#Ie{t3~Ww}Fi2OW16ZbRHjRGL8_$PnY?$CZ~m zU@?O9`4WNLYi$QQqfJxm8q5j5KGP`Obw))gUzpLSiBbcy*ou~$+(iBou#-VurHwhV z2$W003~>u@SdcN;kNxGd@Zr7~;AKz`#v74pf*m3~a?XI~Y{V3!2QtGv!#ehz*nLJQ zIpggbk#h{*4YnLSxGghiei_^W>_3DaqhbVHMsn4im|b-@PfPhKp7CIE70k$xcy)w9 za!-~_P$k$A1bV1hTG&cq875=d2X;g%-!10HR=E8U#_n6$B>JFvZhq)#{y>@s4_z^) z4?QAHme1p|1gq!YHmwbIZDmQsm5qjm zU>3n&;VJIa1a~p9@`g!rD6!s{*xjtP377S3_Y_zpCuIG26qdFRN+Y!kl()6}1V2T>}HwNP&Ur7luD@aOdyeoi!2tTs>NiHW8Ynn6B#06Zwp> zRnmI%#>1`IjOgYDQzKYp#=Pl(DdaQ+w;IVfD?`R4X2?dtM3Bfv6}UcH($O|*>8PU9 zZRfnS=}p2Yt7uc!woR&>)_oQYQ5wswp?fC-%P|9f`^NZO0ca50E{#$}Lf*BL4amGU z)}a9p!Ruv=r3oySh;9a3icFgrj|l+>Pnt$)?J=Srpd2ib8&oyA*}?J!Vp;Glz}aZ6 z6g_pYJ;yoNhz5x^%fM{Vfd$u{HsB5TWDv&*OrW9+I470iu1}k{Y!Ap;iGea%Ys0qa z(;jLRJ$0xEIxq#c1GoH%>@uU^Dsl1ulgV)K5S`{CI+P6gzG*frheXTaDNohg#OrNX^K=2((X++*ArD%jEB3-Qumk@SXf0%n2tEROH zfN24{GXeka(4xzj)7Z(Q$pb!^-}_cYWiH zN%t57XJk$Hd>k#LE8?3jxt%Uv2h`mMY;Ez=Z{@Gxh9i+b)5TBt=uT**J#I~+!6Gi! z?qZ|{y8btM#W!$~VPKc?;~qaQ)lUcp$(DklC(oPQTSDu3MIMJ2Poz}dzbIJGTaoWY zZ`9!ad6UblqE|*mUp5rIEFDw%YcfC0YyI`&$!qxFGT2w=!-wh*d6zWC=i znF?Bmo}?&pgNgNOVCxp5A5ZZK^DeGioo8 z5hpBp*;3_CebqtJcheczNfX=GS!Y$pWydI1lHvrx&&nn7#g0$psCBVjJ@VoSI0WHZ zs=$D3Ry2apRRSs!N5#A{c>yg}q6)r>(<3lWV zq+sH&`H}=+@geb|$Yyvuaes(Yduj1qKG!S^ltC!iLFO2~ulk1jn#UHOmHI{bs{n6= zZf+X9RM3Fkbluii}c3)Q)Kh0GyJB33O-P?Zf9ze(^hQ-k7{_B09YY{JE2V zwR+%WJa_VMRu8-vd(A-$@%UnWU<691k!4uHy)T_xGz7e^*+84@4_N zCOr#-=ercX!Y*Wwl`_Jv^i)FV8Zuj@kip&h30AQ{r-A5<=SKz7_-=54fzs@k9V+TmRA&DI&fWSB_ zaQ3nS1moe{I;)+d-74%b;435}E@2D*UHi4w-#?^{AB~1+@~$QpxlygX(KWHG(ik zHU9D`HP*F-sJOg>MpVQ!#vqu4y&{mKeD}ZtM;HZvNWCE-rE}P>L`@^HhgkhNnct$4v&mS z;GBq|Ja$)R(tT5vdAp8w6`%eOiC!UU6z$hJ(c0Z+@yB8-dFjd2%#C?r@%JRk zrLmhkr3tXc2y+C;QVK7=)49?v-(ho4@8!4vK`2>(RBC|f`<=z#x7qlKWZuo#sFME7 zeNA`uNN(i}Tt$*V_dpz5MRCJJB#Xdq0}B2<#FiW8RKrpNcYp`>D>(DbdP2bjPI@=s zu;Vb?2z*AZBPoso{usX9;Rl$W4S6up28(S^@)_5;e@A2xYOMf`_pp0g)${fC{4;xv z>&fkYR*)y#!=&s0Hu_A*e3=_Osr0qA;UyEe~PMBU%r{pmv!a2naCa0dFhgMKoY(=YfN|tMT@gBAp7_@7%2o zPU;6P9FYja&@5tF58ACO7xI`zX`TLFCTCqgREoxB>Wfhd)>$?R5}Tk1u_3o7d~qflz-Y{YCC~3>1BE z%#Bvtygj@4kRIm)9ExVtKMS3JDPJh08E*sbHQ#~_YDfjy`eUJe0P9&$1s|`Wu|og{ zG(IjKnZFmMdWH>C^7g%+->9tzcndII^L^0ayVaQw7@JLgacy~EWeZPnwzYFB@am@r zUbc4asLnYHB3Q5wRvQp+C?)N0U6A|RGZM=-3hW3zUHnBkcgd+|#wjl14cyEBB6{o$ zH5czvqZ~ApINNSNZ*t~fhbyKnPjf?!pfswFYV`dK6<>ZZ=U6|rwwRO(SrO2!fda2( zM+zYh4J9b?b**ir+7VdmA=>7md*>Vz&g!*Zmm0G7Yiw|wwLb%>6+nGmz5)ypg)IR# z6fl9M9|b?WgDZ^A7FOieZfE3o{E8D8A(6=XEHh`xQqv;*bGyU7R#?)cJ~>$i(oHxe^$ADVBL)uABaYQnc*fD1sxBz?%|LXkq&Jd z>}eXs2)2=HS&kxhz_P&U=$2O71H5jJlw^~V@L);p%+E~#2<1UJWb5KPB;om-pO@;l zKaK@jzJEu2S9wh4FJXuX(F*TUSG}Y{2ekD|s@hgwJfX1`yOJW^;jyLsb_ZaX7IHHG zKo!*aMXSihi7x(3qo3&v>Z3O!2ZG>AJPc3&3ivPHh8*3?5qfcps z=ZoHQ3zfMl3d~rhYLms{36an5^Sx<5ng8k(LjgQ7xcisf2dP=h@}>bW44)+;c;B76 zpaA>@KGUVK^o|XSs-*HZB}>BGn%?`$Y8vz#_5&Y~T{Ho0Q9C4khJ{9(1Xo9!M07+c zNv$gDrw9O%jw13&M|%Rn;)?!Z`i3EW<~%U*c;;nh+=UVumw&Ej8sOc_mwx$5wq$< znKGLxNs}rD22P@Vu47r@M7zm3jVX%$;+wLyKq+)E+ulcEfr8to?j8W z)P4R*H<#tlja=bnMQ~U zArMbC*ZdPOr!tJ>t<7b|ihPm87kMd6jZ&9tma~{4-p|^&Go!}gho}!!&2;S<>K|6ZLlPzF_CQRA%TPFXh}#3j`|x4EU?W_Z5&#`zoM(>+5d4o@HBQu+k1o{D0I zU%F+AT^ok(4@4U%J6XItY=<+@N?QdIKn>!I*>Eo7?klBu_L6h=m<$WQHjDQp9}imh z+v=cKDHZ#c*57G?{0PXFJ0sSDh&7d>lw{x5 z3RG@tNikQaXDFWLJxbn{#!$C{^P(JM&3b}WuY)D02C1ShJxJ?Svqx@p#HuUdgF^x( zoe_k@vGtaS7Ls=~t+&`#M_^kterykf@H0di5G3oVhPAPS64a$21P$RBAw5H zAz6=rCdXD(6qBm$@4;IK7Pe$qF3t;+6%W;16oOZe!a8=1pISExsl?FX;Wq)8`$df~ zaZ3@cN5dH(pfU+z9||zODbj?u=l}pv2b3Z4eF^|+JcW`74AWQIBv1z3T{uc_p4c#M zn@=ivz*BSx81%uDROX2uqmFrmkrQFwrz!VDaYC?W@Kiy)ryD%Iffdrtv4vJove51k zVoiZ1dm+rN^Mh;@y!>Bz=}<(eD-+iiX?>+pQy#0?!Zypd7#moVRCB-wkWyMtuu2$i zRD~ijq-iorl39;MNc3$zzXHS9lxTR)$4#weUm0Hu7Q_U1wc%Cak!wq(p*#j0+klyVTM=Tc(+<5h{`bVV7r zh&}l~-Mq2N$SgKAaai7=LfUlMqHPg=xlr$RT!h#EO$2~i_FO2N$3SrN2!Gamy``d( z6G>!YFNuL@ge=6I8Uj91Lj*^~y+i23PzwP{*=`GSHvE>f5h2t{n)$z~%JxRAk(Xhu zBRij88E4m4bQ7T5va9eLptAKFgxu6qd9vgAAxgca zsl|K4)Ziz%(gO7t_U)dx}HX)cluQ*<#68s3ane@6U5 zU8NVpyfghbU4UXI$Zn$!*n6rvkOMI_tN%f07?$c+kKF+-s=NGPs$Ko9ZhI=XByn^H z<1&wis5zmSpS+2Iv}6&ajanOL17Ts?ru4C$soN6$bbGp(=_at(A#=*kA+t513kNhh z6}!Wn$CC3{GwDwf-NCt)*vTN^k<|8>Eqv@b^LrROUf#SUjIynCPY>Nqv%5_UM|Tju z-A()MI5=~r^l-padbN+Km#rFHyc6%^O2IoZCLm7`o|aa;($PI!IYPiEzDy1rKqq{o zt=Gi}2*=>$4rDf2ykBBU4K{!q*fGJ#7s5{O;MyG=T)PsK21}qD1F!qux83Ze3nP#? zRFu0A)6pbJGYvsmx{R42ML|FgQ)q_CMV;~x6`?xqxFtrjZ5Evo=$YZ>w9iiW!3ZyLpH!mltEn{*plbpaMH8AdM8~$c=dB9F5c8I%c} z_s2mdpQ2y>jwlJ4oCmku?b2oTXAy}gRP*X|dOdkdl7d6!XpH{G`FWcl6I5 zd#i`}r`T4?rE4rP9J!3eU({F@|IW@?6yq};{5`boG?dng(sISa;=f|>p){zHk@X7zzTdi zsUq*h>BEd$sN;U|0npJf&t!st^bIj|OgIPps~hz2m*ZdNyG{=@ak0IcOtjVJJ;mjxMoN zL>R-H!)k_oOv_`)r4wPADixM96X)jv!tYlX|7(g17$V(%JdCR;+bt!rjKXtFua!SW zkc-i{gu9^h6UOAS@Eke!sF`gr!eFvyzWCqlbCtpdf=lW(^--jC!fj*kkw1u(di@n$ z$fLn`+RT5quIexllbw-Vbc8Urm!4Z!pVWQq`0!bKdsW>u)9G}!wU_@*cDB8+32WNP zsNc}tcw%zxLL%iSyJ#QY*{>5f(Lug%ViC}3~OyeyFyhFrt z{Rnr|{?<(v1kK_jahR|&Mzv0QrclJFlBMQ4*6((K z7Z#sTecf3@NfarY(dtu6O-BJ{lkQ*tO%T8MM^PRG$|AuBr3tO~UJR{*MQ>D?SI-H1 zq$4oBO49_KUrdDZ?XF|33g_Wm|IG(1wYuMOt`&LJeNWu{V=pA4Jmf0G&pot1c2md& z;qD`A6X5&t@@R-!Z71``-FFoCVF|&}s?QpO{Kp*$-@I(wk82nxP0#>k`K>3*6{4re z!(gG4qq?~qn{rq#4RV!Q7h1xZKvpdF@Vq$W zi%(W`-|>iV;6ghUifShVIkQGfX`-~Y%uZpJsqJ7Za8A}4h%JX+ z^2GY%DO;p}9l(4bJiSbIKL52QGHKAX8k2Uza0VyA`3x9Jj2(Htu?kSVNH9EgTAhBO zvg$0>KAJG$;SbB68F#uoMXP7ueNfhru60?BmlLfxEVNQ=;x<){hePm2cwiSxkr+KY z4ZTK5p(E2tn9JK#K@0r3G~7MI9R$x6B3g^qro+7mtV=lPeXzKLHq*~_Cjv6cy6L>c z&WSN-hIJfjiV+AmX8P_i1)-9?t6?7W>stE2@M#^+6- zi)&02EI0pTMf6Lc>FG`P`W!Et)hq`pm!bbw<{+9T9Ei)b^Yni;WrEaAvVaJ1aI=6S z7f>;{IDidX=Y|$^6KCEW9QvB&(*vLfGbsnh!02K%F#4T8hsKqmtevG#CNf1VkCD+D z=9Bri>}KK^3`EmXKv#fRb)R+h&saZf>Ik0H-6#F|NiR!t1^eMi{CSA2N7;`5L9rc@ z0Yl^wSRM-c&|}As1WQrxvsV6WU-?s3{uI%;qsky^T=}nNWi}xK3*g2c7J2bJDMs3@ za5}5^@YP`WUExMc9z(bt43YcVZW2-vb*SixupvFrpb9Z`PWOi56cOgb*j-0zpKN>Q zR&B4hY+PChIPAvgV97cAHyj7Mfb3kV(lM70QL@P}WQmoHBart13w~0;wL1q9z1m!E zw=P$#putQ7$$-sMCia#@lcU2# zb~ubq)cP$(w>CEBaXhg|1`pj5^< zLpz;((_E%IY!0bvVOk4<2Ex&~-0EXo3M*-G!x;Qxu}wdz4rZ&Zg(l?)%KLrywcpTI z@4KmxiOq5l6)Lq1O*aXT)_4Ee>758j$Lt^~EfBy0NJ;2YBVfTo zvPm{cAju9p3B{ujMFlHZj$MgIB_UYA-oc6u#bfVSk6r8_LhkojGm{J(JjZi>_x}I) z^SNi9JbC9`vu4ejHLJgCW|K2-QHAVxo}*dIllib9uoKsG zLRdvhu<8X7XH&zNl`|BCT++_2VJxK%$HIye5bQvDsXNh_pRnrHkndv4%h<{#Z&96IGM`Y zKAGq##k-lopWUFS!c#}Ur)Ov8yQAs#MLG2mb1&v@C+J%onut-sPq6P+c~`m}x7HSuh;(?A5n8G?zN?{UBUjaX@tI5=nP-z! z&u(IUCWT57XL`e~>G%*OHJ$w^oC=Pt>{M`ZOjDaT)$qV)gR1<_;u!N%1g#!X)yfhc zx-oA?a|xY4Xb&3VQtHv7ZmSUj~=0#~q za^gsyLQ|_{{#a8mNHp~hNlnuXm>W}iIA3~j?u`p`N4M-#Fr4^H@Z(iV1RlsdW*G^L z(U!@@4T(`H`cwf4??V2yOwfICN`o)HluFiTzM`AYv5TTIj7ai7UXgY)5^UR8Ql2(<6e$_`cin5#IL?N|vgwJlAylN#>JIX%g+$jB0 zEZ-tgkDB3$lO)_e5vI60-&NJr8g%A=ePpQ}JWc(aA2H&l%~(9aJzFnkH-CSU$HSZ` zVMoM)KD<67iVd>)gLihk@NPbqY@UHVL&H~Ao!2g@s7x)~Hq&lnOogmW!>z}gicMV! zr;XVw0hyK|0aacpSSJ0A10yNY5I4QGrJ}1R-<)jmw>3X&Z_3dJGbzW{!t@mvw zt(KJI%b|R9$xjjTyWgC})Y_bKJZMtROtmzhZD&PiEJs$R|D%-KCmLAt?8I|S!rYmY zc;pif*%{bLyhJzgSQo-V&GgMGPoCLSXYAN*-+Zm*l9Vo%$}^g#IJk@DsdQ8*7>Lkc zHr;8HXr|6r!jLSPN!*+o`Y1AsB19&jY$Q4vQ}3zStzZg!)OQHFdZx~yH=rkJ>KuCd zn#a)UHmwEMep|n(bJ(MyGvz^b4quCOMpdMRjGnygB^xx^9YeC(Vl2w4a;W9)Do33? zg>0rBna9#l*W4tA%3JoSa;Tp5ocW-Ton0&*_>-pcP2e>QYE+%dy6h^`+R#SfAScY@ z%}z0-lJiVdw4BG1969~B%g4$;QqQOJ+ji4B6H?nY7BjkH*pkACNn>sa25l5ny{g_) zlX`0{1r$wLrmD2FhQ2DJmtIUpHRZ=yILBg2r3c-cy~~eW$MGDr>RWvuGd?g=B{(nv zrvx!kRoP@DMEtl>pFc`3|Ga(s*{%gb2e~6-&@p?|tpx2P&lqzfn1Pw**m5>GG`EM7 zKnD?3d*&gTOZiQ*cIROn8P%Q2I621R-)6>G~vJf;;PoK{UobK*&6a4rWru6hS>E>K%Kg$~@a{*tQaL8+{eFfr#g( zMIbno&CBgj##~F=ZuYOtw5((zJheo|reVIqf+u~#jKL@`8Aj#gnkUIO_A%H{RVpm} z6Kver&eVLdbLa7pb6+* zW+dCN(v4_%uWN2IcJGVF>f*|0$>|>{Ih~XtD$RjtQG{vc&7`1xJAQq`Q^`ZEZ+}l( zt4ZACp=BjMX#}bzhxp{{8x$r}=}lJWiF684(`U-$;~Yhnv&U09KNiqNgpNVT%5?DO0kyX znikY3m0PFDFbD0sN_RhFret^0(Kgu_uO@tZC61DSEQKKBnv7t-g58`}oWNVJx`4?K zcuYCqe3N9<1iC%uoVYLfu)DVL&FjDId}UUf?_C_s^%A-zc(WxvDzc@yMq6*Oi2ZZL zg4Z8RO6q=S$=$+Wa@;@7wTid%)9+^RVlub#uo+&N+)Tr&2Z~A5ET7`Zo58kHMk5Tj zkeh3AaqnF*<;z_WDL&z85?Pgu8QplwUfXVV%6E&8D0Gxa{pfQC^T_wAm}JRrE;WBp z@-&HRe?^nr+TAo3RFB`P>80GfYFEmta!vH?F1oGBcxjf&m^Q3!H+pEXdzV_fDrl(~ z?bYnwiqXuhne!9KC|sf~eR}dLMov$~$T~l7a-ou3^k4qX9H4=BCZl`PbgM>8-)hpw8#`n zGaljIn1uDI-!e{-(9cdsX`j1Szs>ck=;_CL?KGy>qw3qtR#=+WwYyv6<}*tzlLx&+ zb^rEvs5vT%o?>H1fIysc_vb-#(OL&LOr(>Jnc$<#5V5zrhiZMw$0Q904j z48JJV<|k3?4^|LuVv+u;E<`h{aA{%9WI$YMlmby*Z_G9NaJ>CuPPcg3UlgkAV0s{? zAI0cE#vvJ$==U4@j(&tjbrq&XS&}toASMWTr0Gp0Ftu4ED!pQ->t)aLX>_IGv5A;2 zN=C3#g}mYnJ4_E%9-`83GPjXrVrD0i-3+K@an^k5!=$`qL`~)A+ZARI=s3_B$09{0 zWg3OcI?Fy2%}vtK8RL{$`>D|EwV(VzmodX85_QgDG)gUjaDU|2q=-J7#;`~OA}1Z0 zNuusE6)u<3w*xp$HGDTMYBeF5)F1~=LX8(4GVk!qU2iXq!#t+Ugah?4Fkr$0_p z4<^;kr|XsU_7hgo<{_rPX7W|@LvXT_{ZKX1FwdPrXHtO_FloTjXdR&YVUL~TSj|Bq zDF&KGrn*Ng$KC^pOF& zA0}P!7jc%!Q_Y#_pJoknvwnW?LUS%M7)@6H)r?bknrrpDTT-N>8ZLN0B1G*rl)eqtwU(q2C758@f)pGg8OPScg! zazQn}EW{)DOy`=s{>e49fI)42rbSmGc|*K;(Gvf5SBxc8N(3fX6q#sG{MRV{a*3j! zV5Q$}s(CX;QDT||RXULtX)!#8XjY8G+SA_5Kk@DUI5S`|E*Y*14=Z-0Pnu)Y#$-h7 z?Vg1REs3T(_f|e5+hgX)jZp@&oFR(YXTFJtkp4K`hHX%W=NQj9^Fk1|D1S4|J zVz%1ZBP_;S?z{7vSN6v$fXIu=7q)hl3BH#ZZTj3AGo{;OFomJiZmWSGVVFonhM~aq zMC?JU6BF|hnf+dpD4M&}1*85b%;tE}=Qe?xI4My*_x0y)ikV>U&b}%8t(<*wK({!h zjSO(Pcdqq35ZZxsS|ncV)KfHl9Fqd(eX~0oTYM4|JDQAk-CNqnlSxP>ewBmO_IT@C zGo*I!jw-@V?)|5qZd)nHhrNg~cgILv$zz^_Ze+h#W;`d~_@~7;dD^}eMMO`Si1CpJ zijU{MS`OGxe&2u@yATy7di1V!RQf%3(NPQ2{6a-;6lKbEowzU!i9G~&Cb;~3S%TYk zX1n&^D|P1yeLNnsC`{8ImfKV$Z;5kOF|`yJQ)`-ele7b4*({6Y`7FM4HrD)_@?|$8 zrZ?JBvC$a1&D-9M8%x&RUZrW=?C+}Cjavd<_w8IdYl>7=s`H~pl$iV6n0xw|rU$y; zMvgVJoXeSebuoE2WA84eabPs-G;-!zo-U@wrJgRPLTl+_vbxJOa>UPW2&zQJZWbXm&^JVJ6D*4+_auR zlsnUJZe2%|OLFbyVfNyZz3xP_k&ZQ)Tk!w!Ei~5+dqonvuDfQ_z7)x#kpQC`Q)4zK z;~nOn=z%CQtfti*D(P6o`^LuT<0M9ARP*i8b#R8K6jo;}Y9?rvgKTR&&4j+ffY3Z2 zj?wAPbrd#**_7oN2GQJ`3S*on`IOT3@K=2WRy#YtI{Z%~peM?LM@4`-fbN|g0dCkt zAi3Or|ITz)+p1V@_w~?%RQzLFo6Q=q&8(Ya!TFLDR z?GqCk-6N93{xol{DMdA$h1G?LOwEK+9yL*wfdEr3}6l)$)Qov3GKkehb^r!Y-4? z>;y9TiVw7y7r$7&%5Acgf*5+AqFxY=j>_Sd)Z62}m~wK~r<+im`ATeM%Gaz;#XHv^ zzwz|!pHS2una*ByVj2O>Ztw<+$+q1Xd}(q(GYQOAc1!YJE=kvA@`OPBNoJxM-RVL& zeFqgOatbHga|?Q$_KF4j;~mVbm|0kRZo{6V=WDy>A=tS?Yaf_|Rms$PcyfihDF02+ z|6>XIY81asubPg0Ws0gPu?)!VJSxMIeMD9)6~Jz0j?peYPUehK6+_0LP_cR`ex7+f z-_(h;C*+!VPbPKjx16s>Z0AnKXZCwzlu7I@`{YiyLS#L^UO~|0m0g^>#b}wIF00t8 z4cGPcrC6;hH4Exg(49=`^kJOW=;M`1<4wbAvZbnj%?l%_)+mp5O=pZE(Qep*sQ`rBvrs_mqCS^`XRiMeLI)g2{2_i_s9Ln%4#pE z|Mim?mR7z+Gk}zFE}0cS`rc?C#t`}`N`1`%dygpwbgk9(V@N{Z##coPJz}EymB6Un ze7!0Wm7Bwt>G*oD76Zjq=WgOxsbj-TVb%J0v(7A;%Ce%&sr)Lbdum9h>W3LQDTQCw z*cLrMtTk-rD|c$BV)U9quKA)%?n2H|1PF6!6~6Qeyh3UTAdG)=@untgKDS_Iv{lg? z&%BeTsS#6|szax*s93qX^{qpTCC)GVEYeVwtHQPvGh)EV-I*1{v6bV<&zN#2VGT*} z!jGKh@>8So8KpkCf+XtrXCWW&TxrLi)Wpp_Q;&h3!Ghdv=1eyn_b`j+6g$rebUDyk zFUZ}#-xHd{YY8DHd4j&2z;t3%4yWo>3U(|#UGj%{np(rdJ>(YHV|MI0insj=LoN?T z?W9&yF{8ewKAL1AMUyZ8Tl5m^bnBRkIi(e))r+m%ip5qS91bkDLY0C1@<3TxsBnnC zw5%*x94Pb8t|=|6F0Jqv1`11p{?dw~;3Dgg>;e2+Wc*jul+O-^^Q%Mov*&PD#ezUt zX_4P{^)IsW^DBajs`D$urR70?XhASMr!2H^KPNUH8Xue;4lcA#S!hivt*Ws`mo5la z<>$_;3D;OtYN|?VtQoOntnj0t#Ra6rpD^OJQCw*4c%>I);Yq-_QU}1Hz$RDgI@`vV-9F;ZI){HrF0HIKsW`i|x+*_VQIua1skx)Yf^P{h=nDA zaDY1w`wPngRaMrQ;>DHKr9^amX)qirE?Zn#Qbh&}RENqUT_@wAw7Rr#FR!OmStnIl zc~#b^D(jG{A@-mDkgC0MOn7lVDM-AXEJ_lEy&eKp`Q);|{(O)-Yl8VzCCcOr^9w_j zi}UTQx}VVIR)s3^7luo@yZm5zY4sobj3@(<(K63yQ7CLJAoUOxq2>qRpTqMf{=sUJ zB2?iIRR+T(R@j;yDB7>P`zY~O*5aEGO;)nJSx|Y)B#XH|`z56m1owGWm4s@_iu|*K ze)0-gB~0-MRQSt6Eq=VAl?5w`t4oxp|FsYAbu9NGRaJ*eD~kQWs=`1e#n6q1RTL~M zC2_faX_cQ$><{>>@L1;c(_E|y%R(v=lqc_D_Y&N^3BAY@{KS&-pq&c#qc04VR|diq zGgEDu^hMrFBxJH8~uz#s?~DDncP^ z%Dj+uLZG6sBxH>YRfft#b3)ei(Be?}?2t9NG!U{T!Ra9@53eX8YgEWOBxI7rzNe~u z^DwAkO}ds+Wbjj7j!G3qL{PMM%f}aOigO*HH88B%g-e^+o>cJ z4pf!v=;TmEv8e=Uc&kkPO;QBP^78`W>JoVv7Yv1qwKHjPk%TThJydM=CoL|NQ(j4k zOPO8!Xbl-RHyoJl`kz8m(c&^Y6rv5Ou%-lN2dV?wn@qGQdfFumSd+O`|8ReQ|DlKa zE0lZ7m72C=wj5jZVSeL4`l-t1KC6e;Pzm=xGQWCwkBXlDVLESQpuDEs8bvuQrCuwy za!W$xR$--osGnxft^j(M2XQYd3zQR?vcO_kZcQyN;s5l~q9RE(H#4*|%D9$Wlgce3 zVn@I{?{ZHTqTK3UiV@ZR%wc{y95rQS)@;vyz_ULiP+nSQRjNcRC=FNF1j=}h6#=Dk ze(vm0xU{&Gr(&H{W=$!xCY6!hg4My&{Kfig7?d4ZBL zGRr;>)^?ptUm%!2v2bO$CNNOLeI=u4XP8J2L|F!ll;q#bu%D()=lbVw5Sg z>BK}?jdpkeQ8c@ADoU-%)Kol2(yt^iS34uafs%3rRWucsHL@g7IHg z$${#^5>mFRDv*DCpv0;Q7xr`eOZ}=BR|ZvJOZt|ORNP~zn2ujbU|~uAcxswrYs%u% z042USKd**fDh<5tJiWBI;snm4IiWqRBH!A1keKBw!_J!)ET(Chq}C@;Hr+H()OO(j z<*vMv@T_S;61ccIx6zli6{kYqUq@q(mEC{9z{8I?@~A}AXXm;SVKsbN;{2E$8tI+fT)auq$&pxaHaG5er_gS%J=dZEc3W}IsjALDIUEXApW1hS8ABQ7 zpZXcOxH>pZ9Rh1=Re=AaN=VOn!Jv9j`DR!{LM_a9`Y#IuRrGVhp@kKM>d%@}5y}cK zDh$$R4OLY6>30V$JCC%q!ZlQxa(A0mrw3CNoL>{HC=6P8dW5FZniTXlLKOSvlvEF$ zCsR^dWX++AUQx|ZNvU31PA^{_@gN6GS93q^3?UdZM0!7_f*U307C8~7oh=UX90}Y{ z`&h;S<&=_OpfZ0pBf5E2_On%FO+no1Pfhod&X*bxM(yS7c2ovJNS- zDi(&qMOA8_h!r(p)^IaWwI=6{&YyhDl-x;^rjGItjXXLk#G@^=j#K5SopEIWwZhsR zL;V-3WM`V5u-nd;vpZ^WnEdRx9aBxovJcOw3D0A9T!=FZEz|B2Oxfb{0O~Q+7<3ED z)E*lL+S(!=m`-Q`M4^>eXiX;c!o6!ZGbSu$T-UeII%GDvxI9p?IABc;m-3&Mjp{Ui zTBw9d44vwauEoNDY6P^>a7j&dfDTO!-Ha0YF4pK!G5;q9Xrd_W*7(ve6(&2>))F+S z^U67)w1Oc}fCep4LAT$U6s)Ass?QS+6zTK|n#2J8jX-e~&6{N>xth^{m911ZgKX>M zS=PxzW|<5<lfj~khrKQedvm{Cww>q0%#DaTEkNlY0-kwhxZH=-f(azA6 zjC6N$t)a9uL0bP{(Qu1~RhJ&D9_BFr;e(o6w3_NUS!m86=(rhU(Ttz|M>cop54UIv zPYaqet-SsUh`O1_Y&xa(9&UL9)vg_93UGtr@b zrDgIWbql)-&5*+GYudvL9IG5h6MQA&on&4$ys&LI`$$)VUe;NXL{{RxduGw5Ymd|HtXEnx5$&U*GOE z?*Hsj_RxDjCwq_V6RGANO;1hF@fb_(nc85F*)1;BbX~(e0?p6Vebl%tG-X05(*L-j zL;D}u)2i9WCA^OdS$iM1=A<2P#1Y&D32YMD)(X7}BS5lkj{*21kIyMxWKWs4xRw^# z$zEYPb6NfO>rt+HnhGl0xuCnx<=kD`ey(AMCvTye>U{g*IvG`tbaukb0=M|0e=((zHiu5vkhj$=rdd zHv{&w(22A#R7&R2pgLZ>PHsCzmT8&G#%Hj)ht=IOBMsG9-P{>BQ_{Lw?&N*GJ)ixr zJK6q)jOP61L56#D47(jt)y<+RWrFd45`rd+nH@867~(h6$EXVfVe(ut4lUy{&#p5> zv-aLvyylJ^HG0h0y^mrjGOpvsA2VU%q{&mJ<{dk2`i$d_KjFlgC)wc@mz2((S5{sT zs+=FLs;*hEaM9w^TEgtz?|*N2S@{a@s#d<{V28%}to3Ov`e>_VU!D@JR9yVlIabiB zvufLII}pRB-)Hsk_vC*+e?Bdcm2G9aXXSFvNlxf~M+T6m!EL-kA}7gvkm5h0IOF-4@na=;s)>mU}xFUDWoXEv)6}D#yiyz|keIKo_;WVhig^bWz*O zwy>^3zuLI9T=yDu@g~QQu8EXPf9F1qMFcOJU|KXe~S&J?n>iCm93tb%L#6@x$y6E+{99`_? z_>)|LE_(f)hb}(kTto7F^b6o&j+^9#=;8y8o8+bF;vtTkba9j8PjU~iWQQJ<(?{Db4BK++}Z*u&(ZHg6RS*@V9JX=`l z==~fwzvGsTJ{4|r!k5fL&xhXoEkHjLdeiEB^o(E}4Md3TkuHKOOzR2!9>WyGHo)qaPOGuLpX+2!Gk=M@9G>ggzp| zUk>`12!G?zCr9`@7G3n-$8>bj>vkNv=yf{*UG%!mL>Ikja5B2+bvqSZ^t$Dvi(a<^ z^kV3}MhSW-!e1r&d^o{A-N-O{4Lsg)lU#tl5EeOZl8ew6!&=8p@-+0nz)HtW@^thw z;1b78vKD=+8rKZrk+&x~RbxvHip??Uy=YEPVdfUOz(Z7J&-0;3c|H^S&<%IV& zx~R=<-+n|Fz4x{QeW&BE);ae#^j(hIS&rN9=%P0Feh1n7qxYw@s%t> z7qxYC`~}g)*Iu*zNX|hQ-*nt0i_y!Vw{9;-7qxxj_^m(}wM9GE2%(GGK6Lz5qKn!- za@@k`RmQF5-fGYnM7S+MUuxXwXFBKBqJJFW?<@3gBiw#K|1rW%ubdtN>zpzuc^JCr zb<08*y>2;cNCW6ims<2|Biy>x)6PJzzX9l>2)D)Pr$@M*fi7zMy~ch{OVF1`_`3l8 z!U(sE&_!);o@Af93jMMOf0v_+&3O<1SD>$r@YjegYJ1oTZyma*ZG+=>E4rxd9>?uA zbWz(z$L)4>QJWjydUR3Sw~oIJ=%TjQ9k)BtMQvX>Zg-)J+UlG>^WEqhjay5;dK~?U z2)8HEMQv`GdkS6D_K0(hr_n`i&p6Lx`a0TkSirB2$z8G%eIv{+vE3vyIXcAnV4g@G z*?1quIthCF=vIu+It*&-Vg4i0r@}X#`ap6$`YRFs-bCLO;jcEuXWa?4<(U8UhtYq4 zo1E|@>pJ+XjZmB4{Kp^q2hbb8oC=?n54HKtf9@H5DfIfYzVKO@P+K?0UpMqo@O|gH zk}(`pyOZKjy1R{Frx!dK{>63l$jIYoaLg2O8qdjId_fGH0bNS3F_M zxjT%kJ-ctN#kRF|p^<05aix*LEn9L|=>VJ0!$Gh5tV@jR-tJ|u_^kc;m%De1LQ(eB z&?p`z!&Imotl>y|NcY>yfQP`Ius4+P>Iir=90o_haqt)@+re}=69!-q7Q=b49L|SJ z;WBt0ya4_cYAAF)ya_7X-v;l3vL$VVkHM$lX7~!!o&0|lU)h%;H9BKN^rst;ZP+M4s zInpifdZyk@WkAnZc{>aCg?irZrsJx=`@;cH#}%J!YcTz-B+6E*j+8*|j&_xBv`N{>kQIoyZtFCG6Xx9)bbbAOTUZ5{s|@UQdSEz$8G>E7hT zr%OcmKRNMpS z6Mm%oagP5%5&kDQ{v+M5aQx@s-Z+sB=F>ievqZ%aFf7x$i*karT>#qS_o4+B+{DN=2U=$!YMsGZ$a5fnW?jL#?7YbS za-`N2D4g?<=ff?yUx2*OalaTTs_kEi6rW>%6;gbj{Y#LSLWOe~QdBtZnt+$s_pSxF zg5#pv`YVy5+Rdwwvib98?EJ2+-?j0}zwG+rYwUY%`m1ph^~~y!w?O`k-F^)_Z?j*I zl+9l8T#FRtek<}e$e*#x-|o2Ifm{#ee*^MP_!fTdLf-4R--i_6!To-uZ0`J7uB~1B z8~G=CZR>J>ihrWQmmOW)!~SNZD0kO(j=Go4{5c#%xj)Z8QT|^*iV9~7QZ{k9du`#a z4V**9?)|;v=Le)H_aFHuDje6wE%#seC$7f*SEML=_70?M*$895_OrhP_ijkpuI299 ztYxouZPj>b8pd%^_G#BPExWYWChgjy<$n}@McJQS+q3Em*XFEx!nHN)x?UTz*S0*3 zFvLh(ve$;}wHo*G61Gc%p2>UYl?sZeoAjy*6O4?bmDb_1b#9HeRo7*K5=D z+H$=%+%VS^BW<>4;3h`eXuY=CWgHh}YxUYe;!rQ|)_grmn42c2d_ydMNv@ZS)ZKFC+{xhke%;I)eR6aFY$x&%SJ*Vg~y+ zBV`Ab`>javZSLJ`_uPP+xDEF^k)qn6yO4J~?)MP!xn4V~*S_l7RTWR!QpHF+ zs@Hz%wVS&3(whnYXU-Gr*mv!t>^8Y}QOy~<_E25RwS%f&Qb(^9%){Nad-~azy|X`5 zx(z^zQy8=jM2d1h94Su5{RreR$9*_b9F6-3WRByWiySH9J_>n^<30f?4#j;U(zUaC z?W+a2&xVSBAyVv)|01O9t#WtmtlHm%o2X~)+Es~ilh>Z=+EL}-wVx{gcDo!P@7hPT@3o71?V+w6R5no8{;53UwR_fbz8Goe^x8LN*A!*XlzmfFoV~V9uT9fy z%kDnGUu;0~j_ahI3^4|m5&vDO29_6?XLdvG7aB`4i9QW}^*%0M^ zEK*cB(~+X~y|zNx2SvHhM2b51WTdG5Q<0+fT^peG3verjI=2KFa{N>x=ferOdu@DP z+ukCMFNU@F@!IlE$L$Qb1ov9xa;WfEAXh?NcNNmL)m_EDY;Akrk#9kT z@7l~{FWZKjYb%p|OpM3>=SbPcjG`zXQ1w*5dBkyv|}@b}muz zIW*I@aV;ZH_F`Ym{Gh3+H&VQY{Q*dkBDR+es{pqUOvk+v>DsK8v41us_$Jpz)d@d) z*`}Cil1)l=i`N$A+Ms;6yS69Uom`uf>`kt%Np>c$jmc|Ua&1bAgKJBY9m%yJ$$sS8 zj$}7-ZAP*exwaz3!L<=79M?7^yO3)Wk{{O=BtNbVNbX+SkJskowe`rxGXcu~M5H)u zK~vKtq-;NOcWpk}cWph|cWpe{cWpb0gKN`~J;$}>$d7BokssG~BfE`jvymUKt;V&{ z$lbNg$d2OLWMq$VZ85ULxHcHsUtHUZ{JS<6*;`y&i|j0}jYakq*S1neo9)_EbY0h$ zqJ7yy|#=wN&syfp9u5dOgIbX z!{)8M&*}ub!rm|&j)2x^pEY(YGA@Q+VM>lmjq4QG1snwYaUk{rxxHn;Q!Yc_MCVZUGl#sXUv0bn18n!!c_sZRu?%uq6`|jPlEB93I zS+M8#J?`}j(wGl|*0;RCCI1SElcmh(K>F{Kvc&fvEq!@s+VUfdP}4Igp1+naEZfODlB zcY=EpA4q&S@yW!^iO(cHpZH?p8{pl<_Yyxy{5bKm#64;*?5&H>N`Oi5NH_?NfqAeD zei`*uRBZo#{pP(CX3D!|K$z(?OjwI_0#VF7t-7X>NTrwi2u* zaj_;3z8CjiT&wt2@gF68l#tdTtwXZEmA{w2ufM=whOZrQOu zs}P19+O~kz?%{tc-fobZ8YegAHO_22r7^#;uFcPF zcC^{urgGiJb?aOCtmh#ElN~K_h)4%UqP8t#t+md2zGcGpA?QGiJWS)T!y8w)b1L09{ z1Y8BLgInS6@I&(CF8EQ%XMGIa{EY|eXK20bvkrwl;T$*@D&H@IXTuBOT6jDB9B%u& z&-xKsuP}!X3*aJXy-J-4kB8U68{lgAH;1<%*TA*#R(Lyn06ygK5#$r_DH!`2_XU$+ zDr^he!7lJ1I2axchr=8=8jf{10htG{H+Q@>HJN!E{0ufhACs9;FdZHZbKrQG2akmXFbFl#`394k z+u+x5JNzALa`aE~a2(8oGhqQ-4p+h3;Ci?LT040T@HF^5+ybZn zhJRQND`7QU1W$vt@LaeGHo*1pUbqQ94!6SBp|#6rT?#LU&%y0*CycBYKZ^b?y0*`c zeRlR~>O(#If#vsES0Bvz-F;RCtc2CD7LGau_XM6ZOmLWlJOUmC2f?8ZM<7Kj=Iof| zG5um|yR7KayKC>Ri@>(5rmUPbtJn11({GQ~qG;Vo^Ol&GcwAy(;+({#iQ5{Q8mx{! zD-I?)OhLAR2f}u+13UsA1qZ>S;aHdlXF{t3Zm@ZyjhG87;pwmzu7IoH)$lsF8a8j) zKCAEH$C37xC_H2#ixEA`7qLX5il9E~_r6;vX>XFndDLZLE(!iu6 zl8#Iolr%VLC>WkJGHG1W_@s$Rlar1GGm}n9ng!-0El8^Edv)Jy`~KM1>i1f|*zDNs zc#xUhGkZ|>(b?AF8z);+eqU|vEdQEu)9N&TaZ%8ZJn$U*9-2qJp^v&*<-M|0E{jC894Ja6}WI(~%)7M(J z-t@-$H=cgu`8RgGQP=cn)8kE>o1SZWsp;=cZ#2E#^se1@DgvpLKbQ>%Lx20jJ$*~M zI@CEoz@OmHa2ITX?siG+=Gd*VU&ekH`%`S!T}SR3v}@?D;UH&M?ygb0M(-NCYwE7t z>?^Xb&t9#m3Cp*hwrd0V7EXi42&Q(y*>VW`ICuig*L9*p(dWdR8#6KPxVTDiY~qZ> z^_@0#+S=vsU0&_7t;e|#b#h>cm>fi3~m7SG+b@nyc z)}B^-{Cl$Y>?WUy+vp33M^iS$;+K6^Df|mO3!VcnfQ|4rX!+83mo?2QU`!E)i{Mh2 z1CN6z!hBe}cFWqWYu{S?{@M@L>XvHLX*;3YzphQWFy-QuRVkOIG^8}9+?8@q$^$76 zr#zYRbjmX+&!uchc`fDjl=s25l#f$BOZhJ4`;;G2+_aIrG5fdd-Pujq+xs{5-#TE= z0Bhh01Lrh_n&vlMZ$DGgSBBK04crs7)@H5k2b#l3qW*!gFb<}`Ht;~$9(IIBz(H^b z90A9}Ja{7fguE)saQ)ha+;98$+HY(BMf>gTcedZwv8kig+GiaA+rZ&)J^T`GhdZIW zKB&=Z#QuMe^&r@UCHYOP_&cn}qMnX=Ici(fC-jlOj`|i<(obICsjv2}tA39DHTuk$ z^Jx?9vEaKRJ**A1njsB%hyraq_Cr52pCpFI6Y z$|Fub{?O#%nADWiwAA#})~Q2Mhk+?zM(XjY`Kf`_(^8kFo|Ae->NTkwQtwKADD@Gr zDfQ`8Us`tBfV6>WIca0l&P+Qi?QC#v+WBc0rrnfw3#d<9pLS>3U1^ViO=(Z2Jq_MX z`yh>>X{(%8<60fls;t$DRu{Ii(tD+k0D0-hrJt03YI*$tw- zvmLEY?K=58&FEAQVmlA)Jcc=sWu5Cf-`@F-&JT9}w6oRa;4YK9Ozm<~movJk-{|&d zTE>gN*yBaD@dy3cHvZv^N5SK!t$!h7tJB_Z%lO)9?|;vj@1O5q3Rd~A^G9VyWyXV~ z%r=<^f{vM8GrI%z33`BDnOT|rG6!c4%^Zl@4XwSV>zw8lfT-F8WJ>hIL4nuk$4s<{}|ug&ds-Gw)A zy?OV|)-5S((${3H>9wZ!nw&KiYnHCLeoft)PuH}sRl==Os8jrEV$KUe>Ay{{p< zA+{mDA*msyp<6>vLsi3B4RkGvENEQNcp6yJct_&~a8KjCjSn=RG7=GP`|W2iezO?A!b4#X$gf3ML*Xzu0#1f`@Kjg}f0Lwbz7EzwGoM7? zIRj?F95@ml1M^@FtcBOZI`{;92EGI}9=aa7^UUrXvpeo^=clYV=78b}8y*J>U@`o6 z#%Joc6u-h6@}nL*5A=gHOV@9lnbcz5R;b^doBF zxp0NU^N|<9mGFAF8a6=nKi-G`?s#h$eL~S4Uuj$wfL2@P3*k680p`O1tc7>LC*f0Y zv%}g0nMZ+d!grw6j;}(&(GF{ojqrYlR(sptpmBC@SO@h!M-%*4 z9yY?Q@I!|mBSou|&1B>NIK*KdQalBo2d{Uy3AqDWU0UX~`?8^zQXEGBL{h;jqhXV`S-QJUvXvh8_^iUO!mf+EK1v_5=^$_4uUBU8 z%mF}sx`NCVnFW28_UTr?+|yrbs<-D{a=5_ z-W{_$9^EkqjP5v9OZ-~4KWfLDef($rk*zm3-P~LkHHT$c0p4ZNIQ9cGer>XRY|$VV z#DheT3{pWWZ~$ln4g~E%N6;B`1sNa{91IQthk>4;H^>70Kz}e09088<4e|{EL&0#6 z14e?;U@RE#JH|H=Oa@cIv0yql4x9jHf|J3iARh!kAqawEPzvUOau5RZK^3S03&CmN zbg;x%>stoS0V}|TzKeX9g1>^Reb@SK05^i0z%8Hw&{6f>2G)bS!M)%C--EtKd>egF zgJ;1D;6<<%{2jasUI%Z2x52x<_k16K55dRaQ}7S)1^5bl1HSWZ_x%Wd20Otnup2Z1 zUz9H@8pMKlkO-1NDrf}`0Byj5pgrgaI)knt17w1O!6D!<&=d3qS)d>24+cgJjv5L^ zMCC+{1mnOlU?P|drbb;B_1CC7qUJ}>XJ)pFnU00ge_?KR6}UC}#ptckFGs%?{YLa# z;2rQD_yBweJ_etHe}FH*SKu4)UG$I9KZ6}$SM=}E*)jPs0Z<5npcs^bd7vDGzy5VQv!L1)kvWPnU?FgOGp z26}?tAPe*ZW_}9W!ftE}vMp;&Zeh!@>(6Vt`VR6P6b)iQJV*q|AQiL%2Y@!Fzya=|Dt28;v8fQeu-m7e+G zbnGhrjKearGmgs0!L}m1NA@B&;2p$7ZfTzG_@GN*9YymHUm%%IGHSh*_3%mo~10R47!N<%UeGR?^ zKSshpE5?lf<1Ahfqf~&!`;Nbd$>kk2kfu5i@$O8T9`_~TyM}VWiU~n`T21bBf zFba$T2gG<0=^?$9u5?l?g1=oYs;6`v0xCLli3+@2-fcwA$;34n`*a)5g&w%H^ zFW@(zzGA_;66Uk`VuLexrTHgy%bM2~GUuc3D!B6-o1AfUfivFg^eXmMI0d@vHHz6k z3tkKFg@3dz<0X!L38P=L*I^um)H;iK@LaeWJ_uid-@W_@Q$lK zME8!XIx|l4j;oGC_l~PpqI<_x&!TH=CAw=J*GKJZQ_kqDF-q_pUKfTWxeKaR)&$q@p^Q}i? z1H6HG);-w=)*V>4gZb2yH7U%YE?sjS^Qf9btpqE;j{2STS|9WosBElccJsZ)ADP=c zWnDg_1LiTEwLp5_?zsMs#_R6*(j7m_?xbvHO~K+c9M4;JIN3GwUcc61N+E-Y8P?s^8a-G1-6d=KkXxV zj(z0+b$iHv?H_xuH~YWE{_$VChrNFEKec;A+B+=l7+yQa|1o<8^ZAGA)PnQDh2Uba3S0^<2Umcrz%}4Ha093Ve*-szHJ|}Bf?L7u zU_H1K+zsvp_k#z)!{AY{Ddw@5C%{u+Gk6v}54M1<;P2p7@H%)Cybaz3?}KgNBk&3M z415m01Yd)1!FKQi_zC<1c7Wf&?_dw$1B5bV#Kr(#CyGq~EM|^P0cju|v<7WKJD`_` zI)N@glg)n64Ri;Gf*zn3=mYu!t*`X1lhgV(cP$wo=1?{}Cy$OMoCQa~C=2dzO{ z&<=C}yv%k`7og1#x`FQCP|ySP0)0SVkPTSge9+*~ zU<$|s)4&XHJU9`Y1Wp07KmnKyiohID0_K7;Pys4I7*vA=U=cVCoDPzDYzV5 z0j>hqfNrdK><$hEJwPv@3I4ud05}{R2?l{7U?><4a==J18jJ+i0=7u*jX1P_Bp!QF5kQt z@;dL?e-QUc+&|(zkNYz2o45nw58&0C3Gr*<*T;Vq|6}}a&@Q24Lg$2o6Ano@ETLyY zR>I7Llbv_*qZ5ZD4ow`MI5qLu#1j%{^S<1fy!ZVuc(nN)?=9vX?~i!Ddk@fiLi>9c zs4{7OQVr01OAmubJ?|)e#QRCV@othYIXZbL7@j;jd0g`NBZ^g>GRX8)6Yu3D19ZkCjI*KQ`_XXY2ChU z`vJUXG>!Mz!|kiu-`)Nd-XeLgeR7A?4(D_@x5HZCuJLKlJMYJJn$@YG)9Ia-0=@fV zb&lzLc<0fb=XI{?T-*7q&Np_xt@CHRx8I$$LiT$>E$;;V>Uk$f?*o0;=lecOvzBJ* zoylIjH>G!jLTkcns@I&c<_6xO+`i`fHBk*w4RL^%AQ}#CI804ILv6#E4R<#@)v&GM zd)}gKYB-elYR0XdzP5JlvbA@u-L&>o(35w^M}bMaKmM1-GaA=}I~(tAysz>8#zz}B zHa-TvYW%M8=f+b>|ea4f8H zSc^Oh-UTZ-S-f$f0%XaF!-|lK3q964FBl8xRHP7IYR4gt^<#TWw6HK*+}tPxEkL0zuLPF z_^67lzu5wW011i&2tGCjlH7nX1XK(V5-G})5&`?uW!c;$E1TVAcLT&vF=Fr7yJABK z7Hrrd*nLk0dspmTu{;$^^8L@0-Fxru-fTo-^s)0hxtY0B&YU@O=FHrgGl1IwDW99g z8q!{XFR&-{YrrP#34Iwb31h}T0rFfP?I)+b?pO?A+mh*ttV1#*Fgb6=J-Y#>R`K7%MjR zel9vjknBGmZg|#+tdVeI;KpW+gWNwPD)UdwIw|Yath0p7zcOoW_S)=q*^gyEk^N+L z-oX5U3fsRqa^R?eV+IZ!Gzjwjph1Vgx#2v6st45%3JtPD#_tI?7H)h_WzIy%`=w0Q zUz@Wc=cb%yxR2pJ%lRVbE4Xi&{2xAe^x!?2EsP&w1B1@R=B*qG30@%N_aiH3L1v!_ zch{=BSKYtrfmQ1uyZ2}=X&%))8d96&Hc9T|Ag4cX{))-!|3X%Ou==CbAFuwD$?Pw5 z=a)&IN|{P>m1L=ur+480djR(WMu0a7q&y`VD&^?i_GY1Q_M43??`Pa46ss0VH<`$_0?TCAeL{U3d~mxk~54F9cjH;J-qyO8F_}CCSMefRvM` zL04TjXx*Tq??#{CwI`CC?Rjq#A!=l+C{Ys;bY(s`f<0UH6& z1KI#hfNKD9{hnmGl;dC@q2dh-cPZ{7gC`5CyF zzHDXsa;4$yd`w?n%Jk(8OkaKyC;Ff18Pk{3n7%wAV?stHbmr+yUp_dhFSlg0KvRAO zdNOIsq$^h>(3MYMy7Ky7>!B+@-s@?cwdQ(q8q%di4o8*X7)pvodE@&T6J#f0Xk{&ZjwF!hN0dP0onHqXzF4)wh$M!{6k5 z)A=skhjF?$KbId^5lE_g4_lefbni#w^zH+pcQ1wBednsRtL|I%;Hrm&{$1QWvbnT* z&!`?gt@&8EM0)ra&0nH7{?Pp4>JL|c0{3MCU7Y&V_SVPoiN!*o)yGeO|4zVKznUUV;A! zjCMuFV0@SW*cUJfK)ShBKfe?HdjaHGww z>l3^Pkox``_@8us0z}6XSxb;ELqoNZN|Cn zxy{En%e3^Q`)cmM4NUjJJ!n~HncL0VZA-VU-CDXo)x9P?09%1yr+=TmHNB>10Jdws z?pc!&$OvUD%J?Yb+l-oCfnJMyEn#**4()vu<}8bQf7JWy-c@}LV`sFSnWHi*vZiJ& z%epMf3%f^O_21H8+Tx(PPQlrzO#@#a*fi+$LCXeRHmEA+u$(nH_vKXJJXO`;!v+VP zWEZZ{+2lOkS(Upq_heXsn>+N1p`Q(H8@jOIf`Z>h{5ArMXu^I?S;_e$FM@41rF8$& znWYE9##>YA>7}=qt}k`2P*!}j;_DS9EB9QvY2|AxEvwR*4U{dbwyq*OdZh1UwGCwJ zqS8)VMH+0V0L}oM33wOle`KG{iM5VAK&1e(nMCj$;3dFvtfj639NL51hiSy~c)(47 z+W^u&O@HW0IRMh59s;xgwE9#u-d$MQLg7dR40xUzh{V~bsRag@^ z3~&U%4QPUXdNJv!&vUzWb6-IF0HnRRig$6|9&}MW_Ubf$I$XIz^S;}A1?d6a9Q=&ek?LQx7e9ig} z+0Uuz9WeA6CE91Guf+PvYf1YF@ppTCE%B55Dfy!YZBBMPleK?G+I=6(A59R{#ApIC)4QcC%>1q^n$ztNck(@TQDZjctLhS_5d^q zIZk##Xx)4{;1K}X1EKN50-0O@n2UJF11!js>|w|seVa*ULus7hdkwdzO(uA>!?2kyfkZZ}a48skXBiOxnV`2Y0AG>euUecqSt$C77*&y6+a0_9#ZVTL^UWpj`(#% zE9`9scMtAvDK0K9DV_<7wX?7R%?4YweM|b441vAcl9KUo+*a)`C9OEm-FrpmiZI*~ zxNl(}I%*0uBer@q)&T#|3P_ zyzUi1C3KjH0I7eEgIp>n*#x;nFbQKl*&BUNzy!>hrT{JnTnV@ua2=o$vTHJc*5?-h zeiZBeZP0ZHq@7i%&y#I$g1w>Zj0H>*a0zrCTAL%gzyxHgl;G6Yp_c;U?7<`5WPnkB zGVZgTi#8*@G-@wJ2nf2za)PtMIR|c3>*&_$tutGRC#3w?9hV%OA^JA;t8SRNWI1fhso|ldumVq8tl;X-3S4?gdI?O{mGp+o zgzF2p3tToF3_g_P!2JV#x%{{C-v+b}Xx$5L3|tx9IJk1S3b=jYCc*6wcR1X?;EsYz zZ%c0*4@Z4(E}RFBWGf6xICq6BgR>ReXscUsPq^N2yTawdRX{edHbVcSd4=qcB%?8@ zbmlk*J8_8Dxepv|sIr>pnctG*M;vBet6b~sbVzPdp;&H_jiYwtJYUDo2`d{Gw2sxkC+8@@*$v$~W z>*Urcty5d4wN^1(<%hJoTHOYj-yKq)fFs?k3|i1QxGSLPz~CZnS%t1L8G6cb&{NKap3)<)C!7LTS5Q}QG@K944;O?B z!A0O6gZl{X6S&XdzJU7*?nLE81;ZBD;6YgxdbK%Z|y8!MYxJ%$(g?kUU!cBzR5AHCyBjApN!}TSw%|DjuGA?K`^WccBn=-haIjMihb(0c&cO_kl z>|cIG_AkY6sAVYb$sP`SfaR8>;O4>|3%3OBI?DRkAg@>(= zS)XTjDtvDJ67Flano#A%L z%+4H?IV5v5++J{F;MQe6mia{H)0xj^Zp@@R8m`7#;4;`CukT;q|BdW7vdK=-+pvx6 z&N(_Kg1bl_8uHMP^+Prcd1lB)*vEcn$d|B@{Tu9KPjXIj&VoC}d93pc=UL9>?5>8i zTw89p+#b26Q!RDQVw_u~ADyKv^U&TFl1t!eeb zkztk$xWR4zXe)=SfcqERQE>83h+lChguMTu5pwl(z*zvYFD~zcSdRY*A!prb7Y zJPF;IfbRAocnLb>H-K@_^(F|NkMusWXHS3(;_i*8evaQ8;luMpz)1k(y%Cn@p}PXe z#yNonYitJtHoVN?4Lk{c5wOcEuonr~Q@~6-2?7F6$CF@{fJg8ou)GRA_%-Oe&|%8} z^?(RqA>c&7$$(P;H0LI0!gC{{T*6j$Q_wH=z*gX-wzrp+i@DbpD>3#?c z_LtIJy9wVepQSZ%jl8?ga--#3$R(0VQvUoRZOtr;Y5NN!O7AG31I%3pu`^*7lE zHn+Xo_95;-k@>v^9^DL(?I+u13n>%cZ+pM(m+wFi z0R80HBF7XOOKyjgV+f5CG(H$*?*PbIlBWm2Ez7+y_ww8;a+kwh54QsDR=8(#H|9PM z_afZ)xox{S^K$c!g}Wl}&b&AC-p#vb*geBG!2L9=ZCD^bklzxOyL7*fR`#|Pl?PxbqwvC_%y)pNu z+*@*Q%Y7zyQ|@!QFTlN&`yHldG-eLT+l`Hz|Ce`M-sO3B<-L*jo^A|XJM7+JEyErj z_Tw;1zCS;hPh;x(0(M8&@I8i49zJFG^x?7llR7e$Vo`(2;T36YI>GICn9<`^@fiR|2Zv;^I#e)=QrN$nU>RSPLe*=5l@5a^rCJT}{R(d%5&IR*wI4 zu86)1lidV@CanFEJ#RV(KYwF+B-{S1)EOaW)J>Xn$*t}3; zL0i*a_CoeXaT??6;sPmJ^91#xzm@~APZhpeL-=M9?zigW|AcT#KM8!u@|gx3!|>0= zGY`*cu*pv0P1AA42~YX^0d`-tXYV5l+vW#q$!(0krwF zh;-wz=11Y14Eeky!Z#zFH#jfhEfLOr7K?KR$9HFeX9fI37x{bE5Wh;ipV5n#TL-@u zuQx=!d-h^tVD7VK~iD-hlr>6`GRkH=d@e3^fXNVh_?%O?1>?fZtH!)AnQ=~idJ=L@_i z{Pq!a{$BVkRlI&jitsG>k05*w#2yViKkdWQWzEs&f1TjZ!3fvZ`$z*nJMo_IeFbw% zZM+JEcSHC;GI@9q{vPnlU%r9=mx*$2745QIjOAtq8IBA-nNk6y-wms|&*~@WzX^VAJq8%)yczF_udhHX z7Vj|LKP5h&iu{KnoboNflj6@dlv^dzgh5W``wR*e4HZw5Yb*G@Kb-6zs&;AAi}BtFB9=+3;Hh?eiu2)aDN02+^A4E9yL;0H_%9Z|QBHd&|_!@*0{caTbd;&k^ z_l0;Cq>FkTjq`J;=&~^Oz~cR_f^U~cuqFh5Qv>J!y=1-M-whd690UCW@5_aJXll~; z-k}Iz7;$lllbF* z`ki(jkN1p2)QbmOmWl9_Wxe1hy2xL(;I{`6PUXqpPX>H8iT580I>>mN5l;M0ccKxV z(ck&JoJNs<1^o5&@7@K@q>ZpxZL|pGQucFT;0#Ja;nGciC0E+_0eAa`;JZ-is&U z|Er;0S`be4pgosbx@|@{@%u|6evhm5_5H9fkMBhIM8qq(hST#aLwi+R%kjC+kbf1z zsa(==soWs^#Ftsu;a*NW*9f|8f?umwJZzAkoAI9L+(Xc9i|`|e{f5eNujhQ=#FOeZ zO2n^%e^>Z7;aP;|T41B4gY!n7|3(9yOAtkl=rb4Zi4L^K zNt?b2;Z&~|4e^(W{B}0Pzf^<|H^g5q^0~nfzDC4<*bsiN2ro6{-y*`V7vY=W*N&$z z34ISJvtQNSlfo8hMmH?!{H{)vJt4MSVBJA4K@_`>}rs&y@x|H{lx+m6s#vjDOj0Gr|ea!6F|KY}q2>e<|Ys zB;v{U7s+;fNMApnpl24swQ}!%K?f(owe5SKh+l|sqPzUf68t>c5dSJchYEy~9F)Ie z!EbX6@z0fbiimT?`=3QVLHJ1zUn9c(Y5MxKAe`#)AgIUM3x4edso%=_uG6QRB5Kelt{QYR;H@qjjaz(kH2)~TK)xqC?VF;(Y z`6%5Mglp;6LJ2FEU)MdO2`O>?9{H2}=)D#EiNos%VcUttV1_?rxVt+%V=N4Pd= z5jOmPB-(P!i>w`%pe~C6KI(%*05tddvaM~=UU(w>2T*b>)t7jm+Jxv%v~>b108Iea zpxZG5l}~E{)qqdHyjca1kEdYo>Asnkri(ANSl<1w!}9OT#}W!O$0z(~Or>#v(h|@; zlJq?51^!$no)$|RzKdbavn|U)b*4Fv<%LgdcE7t`wWRk51l`4p+~NAPOFyxF?y&TN z-!-qn>x+2(C23mUNUPRYYSa2g*|omWj-Hf@dtM+Eadqp>{;#cdc>)doNcRls4L8*H z$dsO7C{W$tiKIv0*1H$?jCs8N3|4>}*@Tg6ujnhc&(}NV@i+GABanC1tM&8LP-dpy zKPrpk6Y@stfMK{VM}c`mK(KnJJ`5)o;8DH4oi!0^e1SlypC&@k*AU)C6QSPgZwU9_ zDV9Th0|*)PHD+hU!m7Osz18Xf4IUx2exQgDayNodevdmcs4q`KAvHdCB%=CrL{StJ z_IW+(U`?Emx=;;;)m<4i=6NGwS0MBc-Yn#C`>Thrr~~{_xPAK{pC_QQ)v1{C{sSzo*x)MqI&2 zz(2Zx<-`(s{Sm^{6A2eGM)A}KUFrgbAx{7M)dfY-7qyY$(HFH5bwu=qFS2{|g)gEO zGXkm$-9DEmvKV1Qb^UbQ>o9+eXptokFN z#$W*WkIvS;tVNoT8b)L9nd5TR1VSFw<*g4A|5H4{g5kYbn@0jJe*^L6-kiAZu)5c1 zmpAMJF5c=fedqCjTGj8WRy~1gbst7=5zK?iv_Yz$VvW@%WELV(u4F+W^lopx>QY0YKxp4=+3jjW?qD5= z4XD ze|qA##PGFhXa?gbmbo@9XNH-HU-EiY?Ut}qcZei`>Qn2%nFkI^7>D6LYe>R(o`BDX zN_zwT+0KL!s4s}@=OmBeBRaT!2jwNqgA-O}b8w9Af(Gny2HYB-tp7VO~Anx-p zQ9~G1Gg$?8rz)rn6WK#N=o%_jp(a5v>QT#RLzzI3_~zHKiehvNc#naU0WrL~kYGc& z?r2Ug5hoZ3&Sza9mME--B0gOtlwZ$=gZf}K%hjJr|5d9sZiqFKjDZXaz`#tNAgd3L z;ak8E;C$?Yc$-%fVj=Tt$j8IOtb1!iT>b!SoCud?EH2*DVACLj*vP|jTxef_;d)Vz z1@7uhQGSHvB!dN!#wgIV_$&5_Wuy-uRa#B{E#bw$Z@s>fzT5B0t|1*+ZSNe zYf58T3!Gdbx4%|BA+4_qy*pADCUiN!E=}u2VKk=En-kNrT+~Eda;^wN@tJAe zT~K?#k!Pj#PZYuBKRZ?q>ufx~a}1#z(Q~=_$Hqf%P0g}2)=|hC9l%lOd1;Jm5ggWq zpP$CqmZCG{E=Xg2n#E%Q7pAeE1Oew${THP%euBpj4d7z#^Mn;W~BkWofK`c_RV$|R*rb; zu8MyhZm5aAzB-Nd9GRM{X4eQ(u+b?TRf3l5gH?ZZcoEdhYtxt_BC~*YfP#ru*QGH9 zK)&$$tJTGUn(OtUAvNejwQmq)7Db9sm;Xj%Y^DUR;9M?q4hJEkZ_=XySr>{h{8s9- zMt|bzR;95{EHgpL_09UI6yzc`akV~g7S2?~o70#a)#f3ne@lWlobb0M$c#1pZD~DO z&djq$UqxSFks7*PAJz~Is-Zj5($T3o0(Wv^MbvuGGy=Je?EJNNiO|NN3Kk9vd3CoT zl*e7mn>ZrligEuQ&Lz=cHZb4I=@$(I>)&U{lnaLYIoi>P{sy1#0p4AB7){k4o{i{-b53wPnJ)On|CtbdR4W3E#iu1y=oJ{&utmQW*O2jeT z#3`sx1kQLajg7_vPpa(m+S-$b^#bQ7o)j{(HUeA7ES0D{4H+--R9rJ8o#AB~xkzI2 zbQJVT+CUy8(#>n|%qQ_p?_N#o&)<>Y*1mX6iw?znop;k%wl115pn>1uNbwhJIC?W} zXI+7qFhc)$i?^mgf``4$8&7nrcX*kCC=lA4bpdF}ccX&{I`VsIyT&Rikb!UxHi|L* z{TOFJb@(8yPpqc3p}?XKwH4AP|A==aUZDt7hA?z4XqFyd0KG~5IPD)iqht|2;SI;z z#0{b3ul|&eT-4J))3tmUO5Nw2ePY#iF|84uz4{9-K*TFj{)nHy)QV*nSOx146#FaQ z(Kx%tGWnVhp$wlbynk@qXtZ*9+`(@+Re8&No7SDE3V!%b3<%&18S{HBrwgozbAZ%; zwVX(cHb0P>MOld!Cz}7L;{mRX{KN?&u%`Cg%8}&sCH;>Su%ER(iKk@Yzi{NSR5QOJ z_$x=b9#jW|04;CiH=dac;_+H>zlk4mwX7_pmKKI1)n#R_X;b%|SXJe!+;^&LM&Nr|V~#9TnylB_Ykh%vRDFM#2HZlT2Cq`l z2xkNOQLSICcKQPTT63~2#4Hsw={giJM>p1rT?gZG$xG&_cP=9pDJNJDFr!fle5T-IXN0yqhh+?!bn_SsGMRw z)(maIuRIHLg2#A;$1X1~E2AmAYo6OP-_WIajQo7%RBJzhDo2V#M$HyKl*a~T4sI+pX^>b6%mx6Ezy1iVYCKf&^ z3uQCvwO0l;Y$_x=%i5cdL`&#VsGMzGVpgoEJ;jAgHH#VJG#1&6(O3jI_MBrq&Mez) zLLe1_oNFD#h7qu);{hRx~cOo@P&>!OWs`E?rV(+pdGL zS>Ne8SYn{K$of`>8A2`>O*Lsy3t{QZT*DG^spFYyVj`ZH4PhQXj<0ZIdTQR%oy&C) z`U?Zz>Z0suQ8FSAUawn{z~iKM<-`Ln^KH2N{2;7(CfWigufj4}vl-A3?fU zqnlNpIzz0g?WhInf(CqSjs$cGBB2Jgopdg&&S@#k@`+_)POiT=ffLfKLoJd5fn$n} zS!lbtHp9MXR^)}#OAncdzUmL&)SA0Y8Q+oNzaw$~hN?oja*4IcRK3>A6?5k9+>S;o3z!J&c_bzi^1BkQK&C_^ z?z$Fk%B9wGGR^uH;{me>qVxTaccu|e`p~mO{nmV5Fe-;M{OwS`6McwDs7j`w9U3XT zL;W`S+7+jLr9j}nkxu2p?oPJ-hBj0zz#dRcfU#a(i*I>yj%z+>c41{eGrEe y{5 zSm?UUdaBKAz)j&JLFX!C=EP2|4|OiaseZt?-1<~1YmPe>)UfM!$AX$h_t`O`V}D7k z=hKJn^73{T6BE+ROc4H$kLY}(km(K|F*jp7Z#uLz@;6-WP_D2(WTVd{?R+As#^H!i z0J>4cu)1KzihAX!OMWcC8pL0^21Ux1*4?AJYFSwTJEG}B-}v$IOUb+euClI6Pocp{ zjJS3T;X8(K{qD$w!%+%z#vPM#!*@h8sq;TRgu7hT0UR7(gyZRj##Wh17`5UVAgcYljx2JNfsA9OX-$`dv?6n(<24 z9FJ0v@J1$Q71^PwZ_9Dj)OoyXa*Y|EcfDSj0+0WVw3sgT{l<({{>))ro@`NWuwI;P zhGe^{>yE`OIw7>3cPg5-(4WahW_TF~PLhl}*0{FK&_n7YXV)8t%y^}1j`JyuoJJ?^bJuE6_l8Vry#D&~4Uq<|Ct~ zYplIoewgK^yD*^0+-`NaYT&&CPZiHQtvy&&+*xv$wR;WE`)+Hu8g?1LTC1Z5_jGvf zu{zitGWS}U$#3@meb#Q=_Wu1=J6WN9z}n5j8tg%9cVS)sAuB9mgdzdoI>c4$J$0e= z{Ga+@3;!qBxWU>TugGls!z>6kOCMp$V9@eW;7*3^!JD|9qWUpwAFVeMc--2X1%&Hh z)D#h(5Ppj9^?O23YSKfGd&-&>O%HEPC{X`zZ4R(n{~t{Xc%Rn3#%(drSTmzJz*`@v zepVC0?`qj-3<+%#Aq-9A>he5i-AQB8pRR6!`LXA%eS}$m5%7Ywca0wg{{#Mo0bfHz zebL&N{O*QGAR6|P)$R#JM!jt9L9eR4HKnguyQ8}ANRi=gkM}Ln08umEZ(I8;f&m@&n(4gnSkoyw zrOTxT)gxAq_}ZUtEMfweDt1GBa;@rZ(|8~q=0tVCK0 z`G|+GvYBQ2@W;Hc`Y?rk!YSzud;L)7akT$aj+QsidS*bOCOufCb@ic{~u5 zLphasGcXqQ(n`9Qo zEmYYi8zbHV5FHoj9HX9KoXvaF489UUn~;izKly`KJ&tt+E{m7 zz^=EtOw^9Je-7G$ltQoKbeSBZZizOJ^VvpOxn|JT_1vj0hRy8K+W zEtmLl@3k^IMA&uW|M~d;dRqod3Yh}m4K^;5=?#gQ8%2wew-k0oOIHX45O9-dgaw@b z=6|#WxFTLDlekZV?`Kb=a+v zF>tTgZL$EeXUz&&V`Gw-Sla6+irjAFauF>~H%T>&-0KbzBJP$#$en^(6dw$8m&_G3 z=egdEc41WE_ft{sYe5_q1gSNj0`8#zHx33Nl^a{@J@?vn(gzSt?-Miy*Ug8~O4WD2 z;7(@K5)tUW6nB020nw-|c+n`fTk1huuQ0l!90eY-Wzi_04O(aG!|X&u`ryiyz@pOi zwyZELtc6q-%z|2MJB1f9n{+-5ql>~UaD(lVv=r7N$O5_PwhkBABpe;_MaW}WEU*Nrr-PkJXuXP(v!{N&B*zz?IzQ8 z^Uh`ou9oY?+z2OCP}$hUP~{htBt&(S?ba?uRe`EWw|dTYO&7;ldERzKmqIam`2g!* z$_uvpx)gd*VUUXH^cTAt!?JO~OmMx_)fhGoW94OA^IwM}lviwrn(1^zD!I%dKC6?*Mg++=`MU_`=!`KJZeZ7&{bnie}Srsnsp!ot9?R-tPNO{dx zWQaE>)HoZLjCvA8e%&_T5Lv`Deyfds0#Evaj4Seui{d+)<_j^O(I{`&&PrqHt8klK zSs5=%)`uT~)+B1bNKIaqeh(H%Maew->2%@Z0r=FN6rVS3yRs}M@{`%P$fc4lOr46k z2+>t}%QndHeu<1)sJv}c*vnaT@;g{oc8J@DiRw5OHYMb)h5gedJgiW8$1J1GHePj^ zby?X&cZ3H~J_39CS9#Yq)PN9JA*78nbC^I-m&LFGeRz9HsBcQAsvlC~(KTvh$!|uS0%jSouj}y{v~%J!`jtCj z$`roDCH~qRpPTu{jBT#Hc|Cp~-BPH1Y$9Y1^Z5xPpB-p-r%GtXq-axo8z&gYqL8%;_2Rwa7#|N&hFaD zH@(JJ%A6O1w0Cfk7b~P}^*7YuiMYh&aqSpjk2K(`*|_-fge+-?mOOR2i7@)J@Xk-E z-sRZN`I&9FL1U_{uI5V8EY;Hx3geqEpBj)l^m&{@M>-W#z9!SPGO?In*!GCemy|FT z`Jg#d#zfw0x-`S=c z;#38~<``*^)BGzYOW)h}G~_4Lc}|9C{kYZ}9Z!}23Y~mvn9qr5hM=d3DBuS%nUE1D z`>St=m?_B3DBgkFh+)7Y$G)3FsI^kyg7kcD!a7a5dBx%#iq*))lkb+KDndX zvKhE$u4gVWV!9PWDNzQbH7#L<=uKJ{D!+ALp4n>KUsq{9|J2}#MS~PszRh-2yIE}$ ze>d5=sF4ks09Qk&Kf(U1gRP2~sVLiEH6wmN*`Zqf&ilm@k ze$k&Uqp%>39VB#HFkRj5_IM+Wh02Ncms8pl5%l2(dA@x!xhTb;Va6#b$kHMbrt&HU zLe-?O{Bl0LB$CNR`N~Ou-I|$JEMGa<{%Y51vwTgXVZV((^7~uu6#FY(t8N{rT06~l zs=bU2%af*5mX+ZT=U!4~q_P#a_#M-UPP4<>DC~@~#7ii(a=M)_y|SP~utRIXoWMl3 z^vYBmo_*}4EJPO)74|gOIKy5XD?^(TULrOAYSwKj!I}1Trh5^7@48M53sw`9-SJh| z_&f|D`(q=MP~90xSW#?F)gdVsOUq;zozLUkuYI9u3tl6+bas~g7Sr2E*x+g=Q~!7p zcX$!ZKvx}~vD$7QN+(~G7-6nICxA~n4dOeCt~bW_uygGP$I@al&${_cKdk|1aA%=9y33Hb z9ObllnNZL zfH`H#6irnA{jiE{11(Z6u+JcJ@%cVWGu7|&`qgS4ZCW4@Q;5v) z!bIiINm~9z_URogpYJP*HI2SL7bhrwPV(wsl6vt}Zo9R=G*R)%Yk!%2TB^0@#hX|D z^7zu{n6&*BDO6rl>vk)DWuoGfSN^ILDz7Wvyz*BkDBYyxuSvOd4GziKetDwOlh^%P z`vL7XJTEu)H+kXL3CYO1VlG#;b!ZjrR#U2o@KtGV8@pBh109@(G?DL2?xrlJ-` zw`gFmbYO9#em%x?Q#W5(W*xeHY|6@px=LupHyXbvKOd{oOiNL2>~gq{Op5D@E{AJr zQe1Dc4~fzniZI(Y9%UH(Q>3i4UmL$kB^ssE_NP#%lB2OvPbgAW*&EFiI$@}R4cun& zVv}h^S9{+S615}OQpxr*DZ0q zi)J*WO-_;mpg;BtQONev1I>@F-fSPpFq=Vm;7?4z6e+jpw7JSq$lb`5cNTe09f(q0 z1l>;T$|btoDkfqq#uCa;xy^o3s*5|Cd^HL}N2gRsq})kI zk1=cKc<_yUNfRc0LGpy_?PA2yv*81opNY4IM>K1yTC@{kov%D-KQmqw%=D{OI2-dPh=Ygh{Tcsri83`T zF;UjpUoagjgd!AOq2wDyQ|ocML^NP}+2V}p@i>=jQ3yvyX>&kqe~MoNcV4wzu1Foux8gu*a-FZm{)8z)$OGmvxY`rlE+WQ z#ift!ke|@NNx15GT*T_Hvcb+btjO=qSndg{Ots`Y>UlV4a{hmj^00k>#(#;@Om{cV zQzv3ezZ?4)V78=|?{$-SQ|@J2yOM(Y&lUL zvyYB1KvRj?IJ50jX9|^t=ZO8(k9u0Actx;UJLc+-%*+wb4j{)2-ztAv17{w zUCt)<`N#8%%uch-iKWnJSW(4@(c)?Q1YMS5>jO#ypOa^*)NnM>q;Ab}c*f56%1ESi zIYf^gJZnG0wAUuw)IwNvsFzaNjP>X=JFXB;wvlC{eY~bRXm?G=n-rL))(d_H$Ddg&f(S9Ff&#X?fzM6@b;7!Y(v>crr{s9jWq zbuVy>)KEFeqoK+x_PYhz#@bJXTtif`ntEnJ9 zstY3iS}KT-?1G5DZa>{r#{EG}OMKnFn)HVFI*)Y|8rpGwi5+laJ#Zq9EKT%5f8Mvj zi&r7pbj5$laZ$DC#wSLHD8AmD1d$vArP12&o zWcfhwyN-RdWT{d^KF+l)Gj0CLhhp7SFpW4lI0NHU95Qlg=Oe)y z^JBA&EK{L>)h2IAgsQa8uh68vdne73>RQbuykH{M_qLcQz zif^^^Bl5_Ina@B-F4hN0M~!_7liHl| z^IAJYU<&OgqOcY;YZb%JgH(rJvn+pS(U`W&wM#~&*mlg+sL|1?7AdDV_+-LJ{jxH$ z2`SWBLyM7E!LABSuNu^74`r<$pG#a!QnAPof_R;?-@o>81 zEGugkF6SrLdn41Z6T!T_QXsk}Pf}D;rKurV=ZniT9Ov@IU``J)D((+q6*m1CD0t0) z|F%?{iF*7|&UBoYvYO4Y-X@Btw)l9KBb)*kusWE z<2#}i%u4(#3Wd~~uszAGwDn79Xf4?Z|b6}`#qR(lOg%PI9L2}EU|r?=|V>(4a z2HX3TL-A-y3%3Qxaqc2VN*hD}kV8G&f~Pp{6FuN_v7;hZcVzrKs(y)sn=fT(Y0cV* z;q+3+^{K39|8b;AfP?;&6EGD6x8|tRWsa;EC6*8=l*=7&rki(Nw%yP?%0gNzVY6Y= z5xaAZv$KW1cGfjru6pbt<>-?{a);C|RRc4!w|mySbIHxQEfu1(>sZWc*13p-Mc5(H zRHv(En0Mheq2>6FZtOLys!MbmS_A7+eFl>pG^yN!e1+rLcI*>(Oun@{sqb{=HE*PN z`POa?=7FC{(i+&2GWemCg&SQjXc{*V}1TR4bq2gWTSQsm|okDd*7#2?CUMa#} zqGZHgSMb#iPpsf7zF@-2pV7V|n}`gFD|wBBpY5*;1m=g!%J|`aw!e&-4Vgs-ME1wK zLS?yw-;SV<$&H5K13FC7^LW=f-0VXbLtaFl6qpf+)L~OL-&-X^r&JnB64R=r??t_o z>l{aRpbRaJ@g->A%KWc)@KcL&7aV_by7T{M24#b44=wP`p*J}ASt^+tJJ#p|gF@v- z#}}zg7ieUH0XUzVzBRdPu;Ee6qdPKbX2*rtIURQKEi->p1cUgd3;O(!QG6y^zgDo1?LvcTRu{T3S<9Zg$M#(n*jP9XV8m8$@S@G#6p; z3=CZ*DthBP&Ml5Hx=blUZKpFydaEHR;ge$0+Z^2RldNuB1x$)v!y{fQTO)n)MYS3U1Zp=9rY%!l5&q@ zh%T4exYJZu7v)|DH`ymyjN1g)B+}9)!cq*i2&9D|GTm-g{uKU>YToOqJ?Z(uF8eq;hwiO`V)~5ZK#jnT z9zD^7k&Ps((A30-pS3ZC^6{*oh%AkhLwiDo@{Asy*(mn(%IIPz5kD(msBCgvY04T2 z&$MWPRD4BH(HJu|D!wZ8P-AM0c1f=si4u_pa@^!78X_)>BnP#$t*ED(-p92A z<#op#mcp35^e0q%rFN^!QFq|-4aaEHY-18li?8ygLtZ7Q=Wg7%s0}Do-g3;)Bupkq zIzZ;PMai<>k|UPr18|FD$HdKM$I+=E`o}cpyFy?~*~Tkj6ov6j{+`49KH$z1dI^vA zz3+H0mF0|h6I)b&wtS~20a)K`^iNGhr*^-9=*)>u{b|?&i(dK42Y-WFilxhBbjME7 z4;>$L#kOqs(4e8U+N-GYk>mZYSWoB`@_=f>gN6EPwpYu1)D0V zhVKhvYWsI6{|<1Opgx)KGd~ ztEGE!C{XQ=xYHcP^sv;V2NBTVQA6RhUigY&udKUbPzl1AoLnUHvf$AtE}0%G=~RC37`PfU{}N$*JaJt5!N*hn{?ki~zKClr)yBu_}~ zFON#ybEBHc6NYxDE33!zftR#3xk&gME9y3({cE|%ueEMcT~k6QN7JKEs6vza;lW71 zM|;=0r#$pyfT#iWuivqr+n;nJyL^ye`lPESPrm$;i@*2EAG++w4?XFeBwa50+9X*n z`nqJiT=egfYPsm^lNA+>)%+#j`{ct%k|O_u@||V;lP0=K?N7q5U4m%;Gs#xVEGyT_ za%}%oHBb2`Nv+0?l;27>Wm!y$;o90-xmKiQRuo+F-egj!dH#6A{HtqT(?8aiN(95S z?UdHkuKw5)y{1ky;DJPTiv@9F)a+*;* zNCaF~ro${tGP==Q3HN1@4s$Nb@_yrSp6}Yd`}Aa-t`(Gv@%U3#!z3LH^TBv!Fdi4h zn0Hm4r{i&&j*|&h;-9QY3jN87Aw|Ps4v1-0T+7)c~w# zcg!FmR#xWYqy$o$0oP){G+E(MGF~ai#hB8S)H>6PMVVfnKKjvcXOazeWE*L-xmi>j zNwazLup0i4{Pe*SFMZ;Z5C2G(OeG1IUUkW37k}T8t1o`ir9YfJWJN_!yyUXWp8S2u ze?EQT`4yKQx%h{kborG>lCwwgLGsns#t&Y4$>kSce&q1Q)Jgv14n3vSD=)qH@=Fe1 zacOelT37P@S3dFb&Gy-+#%GOOkW;Z7IL>Nkh-fwWE(6c2fS7|0})%i`u z^NQyePc5!5o?bkocvkW3;>U~U6t@+BQoOEseetKo8;bW7e^&f?@v-75<+1V^<+IA? zl)qNKs{HlxHRW5%x0bIjf3tjD`L^=6%Qu#{mbaB}FMp@}-E!qQzq;$4<=f!Qjr7_F zZt^E-rU&QggZwg?W!e7jzdD)z_ES{l==xd#N9Sdg?XQ!wvSKwkkj(N)Q8m1x%Y1*G zA4reR@;YrM>#8%Vv-Rm8K9$$Gm}mCdg{ukgRCV$V*=YJ6H9gBFx!q6N`)uhxt?E2J ztZ06^?$Fj*^;#79#tDYe4&cA8ub~*zteW2Wqv};)Cg%cU-Xzlx2@MpC)~BD)Q^uGA z|9PuP^Y-hGZ6?!OR2qhE#mTH_idmL}D;pOhJPWCGWaa0tCP4gty(IK7;yUkF`#beF z{h0m*kc@t3SzR>g`n*uJ5m*98`Sfn9kxkMis3!|hPZpq_Y=zok9L-BT0`d;(qREa3 z@Oj1~D46vIalvW<)K;$k;3u*GaJ8v^rG<*vo|GO{QTs9=(d3cf~VF zy(B{df@GNfkYr3U>5sEAF}n3LFPyKl*o(K4Mc&ShxBH#vV)K5tDzFZPpS)#lq9`Dy%x-8~cm!MoQ1-YmX6SSnC{-w7seZm!*^usFPw)(MbfiaEd3qa!0ddn!V zv`MjGSyb!jM!z>G(eD5a?-~ySWl^U+6`x(O>IMysZRKYtFm`4(WR^y*VYsHtt>ILr zay#8CiOBb>J6W9wE3ieH7KaPC^ zG?To-t)2l0@}}OonY+wwTJE%qcmM-!r8MAuBAe7J1%*GFePg;-CtR(qPv4>uHOU3B z%90g%M4Czy;k6hiWz*kRed9W{P-oM(slcV(kd(EJRS)x*^9;+^vT~DNWEu+k_TN2k z27BiYMRk3eW?OL^*v+ivJTY6FbhcZ}fyXha)o}DUFaXcI4)`cCcL7M?0kh%1|6|91 z+dU7!b6-GQ-uXYteG16W9tV&cpK%g^{MYT6Z5xF2of*+BA_f1bkCd16VKJ0Ilhz?x zKtfjC$9=!}RQi77A}3_QGfk$(1v6MMT%|$==y8jzo^cC^ zp@#EIhG#u}_9A%BH!u~R+N8Pl;zM)27CLHP(PyHFKbR=%Gtupfnqqkj=$u6qpBm3U6>5C&={-q09yQ*1Vrppc&RZwLQNmv!lNq!~D&h><`JnNian+A+KMr_m zo;1ZFD303N!d`#Zr&$qcwnUU$@7;jJWw3#QC|#z0xxL_=c$humdBJgqDV{#g0DfHq za3cdm9^;j=f$74HB3@a#_22+ulk8s1oy=rUvHn7%oS?#-iQx0dSmF&Y8Ot^XMKQE5 zV;#1A8T-3F2ckB5%&J$B_S(pLL4Mu_`3Um%Bgr+aXbW`%Nx9w2#WE?ria7U<6h|ADvidO&GCAYhDdjpl>+*1+Ey>8(Ls`U~Yl z^HE*S25M+<&CLO?V2Td)kKV7JcA&m~ZZ8aOZt^2dad?)3XN#nx)8=?qfInj_db=c7 zF)x8wZd0cWi|YHF*6A{aMA;?Jw!wo28|w5ZaSENLPF#;`ywkk7+GCgvVi@Z#tv)V> zf&kmvh+rX3f&F*j(oX+Mf+Bkwu4GWXOvHv{5R(h(ZJvD8{N{fQ_NR9@{U>|!5BBe; z>|gUd)CX<<^KwaD>|1P%Y7~89`&OzC{tF(tkL`FPcf3SgKxl?3lq?}2jNIx~SGs@c zycFERMY_66;DqW^=EK6@9L0yan0~~NX~Z%YfubIiGsQ|lb0IwGzi1qPa+!aoizUa$ ztJmv|+lO5JQ3{eDNV7xP4A`1a$n7%U=D1c ziGl&uV|**AL!{o+fqJF1SfBn_->}dy&n+$Ssb2n&-kg-dmzS;!em`n<*(>`=vXxs0 zM#Y@wgYFQANSvdUlNz&?GNsmmDG!>ro4$r{bdn_zEX2>A zB^*sZE)8@Bw!sIvg;Z1yO)s_p7A()Wr3FXRvXA#uSr=;Dk+c+V5##o`=-X%A=((9= zSJ7E>*8244L_Z&*45zFoG41rFDh7M~vQSk603q4g$ysx-d3KVxS|u=V(pe#FsO4to z`b><$kIX_&y=LQzI^lPYGX(=Rm#=e!>G9v<&;^!{z-nR0c{kS&o_B-UP-bC*p<)PR zFNzQe8(!{6*4?PrdG(t`UXax9GflAg2D8!(YQZ99wAcE)6j)z#KH#9m*38;*24qC$Ip{}4+nK=b6R|G;Oe6X zVD^3Uwn}PMhW;SkHyh$XknCoN(5n)I4G?5;KGH2Nce}IJq2uBs-D*~sGhTuR#Dm12j@M$VbCPXaCCUf zvR6hp#wEy_f4{zlhUoqpCYu{x zd2BNQ!F!K1d#`M&>90wh%vS=}*1UEB40um^h*u7+j_VbF;~=lBTn5T`bP>gHDSZL; zP;aKQp|f26=+&|A#C_{CxtSV|DvfC1t1sYZ)WH9uDbA(nZ>Ra*dgEG7s5WFr5GxkZ2~p95xS! z#8MThu1aD5e15EdoYhQlOllxu7)Hd|Y7M&roPnOtGJaG3B4VT!UnI0j>J7I05tJdIU5Ij8vG&=qmca>I%Z`d z=SjVK5m+`}II~99&!@S(IdgqB23T>0BGeV*j7pq=!s>HIy2KeKz`Sw+y{O9+u2=yy zT*+OvvCc5Yo_4R$p~&@fak5ECZ&^7VTNmfA06SJ z%}?itn$cBaRVDHZ; zA*S8hpX%#XO%>9~>iMg=;fH|fAnbx3FkGSgR0&JaQ0<_B(p+np0cqDBI%rapEUKGbnhuZjW7dW9dJhqu5J zZ~1IAf@E-O)-~ESBasjw3Nw331Sn~o;O?2-;zq-iQmog5K`pBHYBnLbmiLjTv7Jwm zNHX1}fIy50Xeoetz?5$1Qb6U>L=umfWzb@SPfUCA^Wc$+e zQK9Yw6sAq5e=fIg&bo$&X0!@xgsW<7ot&E`)la1h!N$;81!BpIZ2q|K>zC6%w&*}9 z(YNbr2E*5>CIH}jtvo>D6-^+%G+hs;uTZTs>LH%!%z468Tr&M%{z2#~t{#IsKp;d@^r%a2@Z{PXZn3_-TMDjQDxm0? z6;(Ga=D^d46}LIUPKdHLY!yk7<<)^CEa;G)tdp-z()K=+WzF=KTyp*);8IxjypVlr zi>H68l9@b?sNjg97N$pjWPw~gz*>+{@EW{NT`6a^)bV7!xl49&`WqT+jJIAP-vqc@ z_Ehw?(DSh}9Yx)7V|H*}M6CkVnoSmVWz$z%ywv2g5AE9%`+mbYx)FU|GzG*ZiU-`w;VH#z zDVp4zTq^;{fx|+x%*d(}{xX;g6ch-2R7<~99yAbO;gl^klO(6C-^^llQ6A?@vP7+e zINnVB4%VCOswTf|`pf1J;xEdotuO+@BMh+B#ucKB*K;+I?{B_I?oW;P^jI@wr2xb0 zwUGvd%4ry=94}!LA4`~KvceC(L9Y7MiVe z4LHM7m8P+Da73j$D9Q+fC~k6mje4XBMLYs|5#UFP-oOKFg+*(G|1i~fYP_aVNm5q8 zru!-GvB3LuGd?H{BGb(fZp@+uE2}3iq!0rC)0T->5K0Y;K3lPZ?8{7VY@wOE8jfK~ zVEQ*v@_IaK|~dtSw@KJ%)>O}|>^ z)v_ZfY#x8*(&NH|GdMz^Un5^Bdvp*MlhEw|>&dGDE-0FF0&FR~Z^?JP>F^OtFpIMU zT>>+Xul36MN>B&ncEz{e@v|2lqRu3{catrPA|CTjRrmvCaa-qSSJ*gI> zrMXMZH>q_H+?C%6k|30ps5?>JtY#({ec?ox=!<~iLM&7TbM2LMKfe7ddrLO zBrFU<=>jqy1{y+|*RJCV$t>TmzAPKKv{(u0=(R=-U#z5-q<%AL^Q`ujkkh|&HL2u4 zSXDEymdOLC?Ey1Kz*fwMzR*MwuZ>QfA8&j-lN>TkWZN?`LcZFwfzC(1WCkSW^~hIM zNrr9QZfx3dvs}Mxhw5D`aW&SqueIpXpnC9hSVM{Wx>AMPRtysfZ*LJs473i7K(~lT z;^P2lq>i9GZtDQvHFn~*6zbq<-z6x}Um_Mc22n<;O(I=ky)HBe z_UiN@N#ga8Mb(+5;sJW@U|E0zo_1d%l+Fp61>anJS*NT>Ha|BEi&V$Lury$zYq2rT z)As#SCsR`fE6qD#gry2sw}M3tU%=@gm@DW~V$kr8U@^2ILiJ(8+xrvK1O{zvlZeUD z4nG})c0iaqs(HV8ycT(y)f=_^95w(3Bg|akdoy>0#%2g&^pXHkYx6u119p)Z^KQOhW1tPajvMhF z8+qBx&nMwgHZ_BE{s1}fUg=0kac^mhW(s>*spsZDudh8W(H5NeXW=Fm9`DyH>)-gA zU$bRMIL+kC8wpZRO!SH{2H8QJXC*|r6n1}7tZkW1c=jQLcuNJ@azWlJFnlU{G1Q5B z(c8k`EN|4i5jmoG+N3q;Xg-}>v`?mzEF6dt#UkEhzXWZHwVF1=hUBHyOsnfk`H?gU z{%L>ejJimsJP^pq)c@B;iqi((zlV(t;mO}c1XJ~yi+xw_{qh0MAqIr`O-V(qezhc z!cX}-W$0%mzEfI1MRS1sI47VcJexJpl>!bbIC55$Gj z-}XKkN(-piPCOL`BdOh11-Md4t6PWIoi>HjpMR;7Mh+6*l;x+C$rl@kl_x$~mRQ-w zx=&7J%~9spAbkQdkr}e!HGr(oM_^51%@A;G9;Ce|NEvyM5Hiz4oOLKc*J z-g~XMV@_CXn$?@uD45+51@Km92``VPuQ5T;5Q&rR;SkVgLsfzXv?Ywo2{;(>>DXi} zHV0u)1UPJhlwM68N1+EXHEr;Nnr9a2SDt06rEPO$qlvW*!)ge><@KuRZ2K0UdxwAJIftjm9>?FbtcCu5f>c&5EuF3y00Bu1Hx7o3XCCQ7=%l`TZMu z1BTbvmD&bD9W6@>XANgeT7tStYBNL;IF(lpBpwy~C@(P%iTrhhFE zr6oM9lM;>9L5XDmuJ z(pd<`&{(;zXQb$^BqdhLw*!6+ow_!iCTUXo{4G|`Cfv~6ICCDufTC1$9KE*EW(@C4 z|JI?g*VDz#j(I%+7_dNU&U|7G!1qnYNAX>$TA&BWy!!2w^q)hpR80}2 zk6+-x>3=NHx{ew~TV!eKu5{=bhlBlPTv3NRf(j)WIWqv-O=7yY)n6O9gb10mt1nX)--y~cg zW!e?7o*_DfIe6-NY#nQzh)oojxW}Sa+))PtPqh}6LM&}ngC?xyl2cL60CyW~U4_@2 zC)8_fDzu)_B#{*wicy*$fK#B{k=j<&SPzs_;zuQN9nDk1f@IBKo42Ck=Dl39{$-Xm zA4yo;kb{B`YNjGHBhP~ZbdaL|GAg%6ag=z^SZ_4=Zl;_6M0*XWZr<7oVzc~a52oP?R0NuYD73)XNu*Q3#o zkO}iy*pay2vI)@Dra?)T9kP`oxCdlzI>sLu<7+6IN5lAQl|f|uvd8$|FV6#OYAu3+ zLQ%)~TCdfXwv4Z}Z!niC8sp2aRlZNNQK8rPQfOTnEGDzYV*GJn{9()Z480yXDI#G9Lt18nz15B9~HVwUv7` zCo)C_S{fgjUjb4HnKb7Y`6%u2ktyqh=VAPL%SY7el}0|Iw8gfTa|ztYM;fvc7RscC zUY(EHc0xR5D#dffa(vY1?SJYBYz?!0%e8YY1&!?-2GTT^E^zI%IhaHhP1%SI<g4bSk8RJdZbsmw6$LM6a5`3*$Ahf;zbS63IF;i~gufG`e3?~OzNk=sFIn|o z;BEY+h_~i%omE}2BIEg~Gu-G)5^*11LYx`qQrUeX9n(SU{aZM855~~Vb;=&9qN6~c6XI7gFu^uj^Y1%Iu z>~`Va30=x1Pu`L=Hg!TK#d3AM_2RprL7l?8emc%O6=zrfokLGInrpYtVUUUTU6sV= z93Prz#h^3=YYcDFg14`bMU>^tr(Pa2%w1>_Fyt|nd`5&q9y3J_ltzqUxMnZ4eI&KG zlz5qZU>ji(dA1Q2qNy0DjioZejabBGh`X{j!m_9}lxph|;3~Ye2rGu+A&;qEV^iUc z5RNY7F-h0-!f>*hLaGIR9)^=KCJayeet`6h;j18jY3fDZfi6W_p%b`_ayI*%PJgF2 z$?%Te_3w4p&7UUCpCt{8?ue`J@N$q!W1Ou-t7)iU3;1INl~4b$*T_F7&8L#)Sl5X1 zx>@xxO-y@htlcrBSX^i6+R=RIl!jbKSa{~SJF;DY(ze>0nIxZ%E@@`iqx8@yy#l3` zxEm{hrN@r=e&WtD8(=olRC8Btt(MZ7!D>s}SbF6+i96*KL$C3rKhuNy$;Z)#4`iUPk@@34j!0$wuZ630W zS(wvaL5bDHPS|#nw1gN=Nv4xSYnb4*4ah#l@AO|Jle<)%t&3?Z zf{ZEV0)x#6NqVyxi}O7?KY^}0$4#r43YGNChUe+oh->>cScKkB!Lpsdbr4GHndd#l z9*a5$p-aK?ZfiDJ#U=-VB6kp!S=DZbU^diiY$~*_ z4Hxv~7{1BS0!Ru@MeyG;mGsEt^U(6&G83oI$viX@%_7h7=AkjffjliiGm&R$9=a1^ zM!zJ){ds7YVbNPeHUz@ba?tp3mS;NjThjI%8bmUE5R$MdJVhDPRws%7n)pr!ksPyHlN{)i#5&*?Yp)o;65NuSoiBKY#rum-~|$|27|Z33umaAJ-4J zANAY2{Z=qq1)kjQPwosA-{rS=bQP@v6<_X8UK1+5%5PuURkR9JyuqK`6e>Q~Z*S}> zS_LY8;KdH3ez^Up-`?%F2BTlroc?p<-0aOhKxws4hPJu1k?}T{@99cy7+(7NuGC=m z(ie242B4RIGJv9X!{rS>XF%ShA1Xc9Z*TNlfXu8ylL+2B970{**>9+#gAo0Oiw|;p;SHT+eQhPgKGhhd}3_&V=cyfCfEu}VKult@b4&k`F{A71& z0}Sm3K&fj3^U~LMr8c0@u1c$lp3ibwsD3LKaz7wSf6+)nE9YMfVgOlQaD2|G{%FX- zHHxsL0JIu_-(4gk2FExS54l0V)wf!~nhiqW? zmi2N@X!Gl_^WMwDLdjX%^kUDfm$L|OPretHO88*j#mY?`F~Vr>jAgec+w|m5tt+0- zSz36qBVv1=_w`ZUTk*a=9?L$kU0?px`r`S#I{ChCw9~Q~%yUWXOuVm~V%ZJpHskWA z))&v`97*yS$3^p^p$|FTYn;_>WfTa3#~A}#%fOhj9BC7MM?!#s;% zw9SDyH;W#Lne}tC;IF7+@GzPi^a7j{-g~a!2De8Qt%CSPoP>c79>k*eq>90lY0SAf zH;ZnT`6TCN!3i@MJ168lp<5d6`4~OV!vLpuVW+EWPb}{ZTqsW?ZOS7~GFu;=VK;p7cedX)c`Ag?++mrsJww|7(280c%a18@{mMGY zL4kk#Xi2IlAJTAAg*nbNGyMyxL@k##_s3h4VkLLMb{JCY8XeuzV4@wfA!QsEkj+{#N#nTykRI>^Bo9avu3XDpj$jYAgfr2NEJXL&Y&m9G{O$=A~EArV{I zmdQFza*xYGiS7Z^C?B&2tXOO!}(?G&%T4#pk`wXj}C9Ew}3|BjU+L#)ajQOPMrrbBDrIE-%(IMyVwb|P81(jzd z3o6e_I+f>QXDW{)QI}MnRv*LWOwicD6V@+$Ss>c*j}(uePFa)0txGW%c(v0LaM5{- zqc?3lddGq?P1SNXlQ>$sg85l)m1XDF6lEx3r8>)C)g-gfVnZf}q;+5e>nE_R?WfU; znt0P9UO)aRY-;B;cgkH})LoFvt63qh4#^_++27hVO-M#iW6@f7eK8MZZW99xO^_rU z1aES01F{#Y>g&k!k&Ly8Qy@v8h>05DB4ZcsoBq1aQkkXX3GXsRlUDyG0V{+unlt&S z&10=bi*oXlnlzGIfsMIjBIkIR1jk^MdaL$4tqT*N%d*lgG^KH3K>t$ z382<=!Ztgb1&$G#U&T|9%QZRpgVRb_d`LU0!U0T%T|0PBtl`!5A{2s2n)C|z(c~N= zivyvJEBF*?YIQw4I+Q4h4Rl_>pj8Ryi0J3D?jF9=UzD1`$xRY6&3cG>B#dSYoq4J|rY&l$2*MNr@yh zk~s-Yep(W0iNy&?=tR~)=Jrx;htRAGMSD_Id(?e=(Z*(vs|V8bFU2!1dlh^#&bHrH-`$PAxzOL+V&A}d$aHzx5STg zM6AO{B@8}&$)-4fecv1%J-|VR_w4vi8QQwI*@*(cG;E)yHD|jnFrDfGL^XBuZUs14 zDqDxz)Eedg={W|0IvO_BjmqAjL0a`deOvFXI^>hep!=AOsRB2=? zS``|%vmY_Cp!LNotS}3M@k1mXiU+ zzD(GGve-%dzA+xWLO7hy>CXu)LdW!oi;L`~O>*ZAGUA}W)IiUV*J=R0V_6e8OvOqc zu%BPV(IQ}=WaLsN9oX_mR=vD;ocFdA61(;&-5X3Le7C6o5s`F@T=zcW7?kNm zZb!f4WsJOc8wOpX&$aimSCq1uoqqTBk;ESAo_*7?PKyPCEOo!RsLe7THT)oEHG`vt zFOF~QLgs4CWztL&MF>so88d8>`CGm0Q>)52n?%aZvv%H5omclI0_@X^Jp}x-&hUzA z{Bw$c=Lg}82zaSZDO>6pW3?Ru-ncCZz}klu`_51y;VPAEwqyyb&7|w%$RHcFb(~-5 zsTJK5knU-MJT?(uq?b67XFb0+lJJ?MEi3D9?JdpgzDL!L0pSi~K(cKNwk9&HnDh0K z0XK{RCs|_%U(RFNtf?4QII2tfE^n4FQ*$ayg?nw!Q(O z*p}ci?h$8rm(9>WfJ4mdEAfWPA@lJFodHxfSNneYVZNUL{VAKrHgDX#EqQ1PC}cdA z3djkiI?9T7=R+J7rPV~Y0Y&K%9;%Th{YHD6Jt2%m3+iwus^Brr8L(6x35C`unBYZK z7pc`Bk-g$Zp;eelT=(Sm8B1*YyuQH1PnQN}3T-HH8I_)a;0zQrb7Z6*l3&^Lc;N&4 zPL>^4*qN!c`T`5V4WB^>-f{i0%~qU`Cj?*MF&Hf){L>1(g36+(5%*`v9!@M~Jgd zR`cpZDVfulMi36rOsF#&$@2aDi?)}W?oZ^Lg#(|JpVnucRFjV>%{U{&k8-A_Wv_J} zkCHy4a56MUC-Gnu&+PK^y6W7f-FPSO_Gb->ls`JSi>Eq$Psz%-qt5z8pXwuK3_5g` z2eAA|I&WKCp*0;gB(I%Te3CHV2=ph0Rv731fj-|Lc2wUjAc_u{P^DbQHs$cV%D{of z75E_&a3sLg6RH%X(38k*Rn49^fAg|qn~ASDpT68uApMe;THd2W`*xi*A9`uq#6oi~ zw&rg5`R?f^)+=^qPfr9WV46#xRX;-L7ttbG6TG&cp-;rR&H@2PI?66O9Zq^J`D zTib`n7DyxJrj1I1q*cad|5aiYL&HYDw0aqP@$}6<&@1o9PGW7T68i0J-Wv>3{@AH* z;5`yeyZUrQ0#Ly*G!i(P8tq8%OmY`On9YC%u5dN|QHNIqgPMrD=d*w}4~T$&HP`1y z;Hhh9ZQlrL;>q5|9PPQ`a)jU+Wd|+J)%;BnHY+alR>Z@+smk74&|+Ax$a%}u_;XL2RY<^_A zQe;rdlv?>d9aO^$d?U9$$PaW@SDgtm z2)uvj$yu-AoOtIZsTzI>Oe< zk<+gur{(}6ZEb;RyutG&0cht>A`2+VC?$rgC9ySikw{&*x))WQdN8?q2Xk*W;|JsR(1+*HoPw&<+XQ{KDlHD3b$8&k) zIdK%-qcUV`jIC^j8DH@*87AxOk&dBQg95FtuY^z`yVtMJ6XV;&l0E)TtB2w&;U?bD3} zrqgazJ}q*wcwvc;fg*R#oJr{8H5wUEOso4wFWr^AUqoJ%+;BK1XrZ*+S}G0?fS~4k z7RP$*F*~mtzp5Wb$E>lJXsS1;CpR<{1FV?E7+y^53!G$Cq9k$6{zh?-8r3Ynwgb%p z#{HwLdV1iXRPD9c2#lJlck3uv`+4-*qTZm}+?OCkMPX>~S2H{s4dy92ghAO(KHv>( zl9`aF)pu@BQ2LK;Z`DXVdWtC$V;YJ7J@V+?yt^H{FPk;h>&-rQ8*QreKntvH7M%wX zuBEA|`wW6C#he$~t2%PEw>nRXwHrABV)5mRtt%G}jj z%anhRw%@1#CDG?I>URJ$TGY+g5#5aLwvIKTTmuXuFIw@~1bN zs<87aP3Dv7e{*I4J zU}IZi7%$q+8Y7TUuo_&QWaO=fp{zs%7%25L+S6U6Aq=M>)LA z9?pN`o)P?CBc6&)Nk$Tz@>+IwqAVmlxv|b(PaUi+-c1ZG%pf!IRj2x~&${?=0)#@W zA8FKw+MyM0mh_ zk%La6sx}Xi@nGoaB_sk$2o98! zRzzO?Vww{WhhVEo`O=?GBRPPUr85z#Is5!M!__AM@smUjnBw4!;Y4RrWK1JT#v^g!>YYo4&0MoHy>PcU}f_^cqd@n$eSNnxwal0J@9Qug(fAiO9L)D09LON zTK6%559vQxl$;TNRDW|Z6kHPE{pQ5}+6cpIe5CPcPIXD%t1U+Z%^1jgawy zPmV9*8}QvHq=>YdR9_1F(!(epvo%ap@vZmjOf8lM0#)%Ok3b!@} zKL9mv*+VF;_}DnOrfeox-dPeHSuTtBIjz3}3~?a8WE}ZnQYE$aD;T61`lbU-F*ka8 zbvOix@CKMS-}pyga~Zs(IhSy{fP6H2wxS1Gzk)|72|ydyE0?_*8p<~w{@-G>*%&ok zy;bQuZrTJ<>kW{$E7BwEneF*64Gee+vw?N>7mGQwIXL5R@dOUWzDP+a)vTZNd>9bBi{M6eN*dt;6uk^LO?ZT}K#;eTTfZvlpS2Bg zAGyqODMfs?bD>f1%71e-4gwmj8rocR!|p_=KwUp2$-{z2RFXdCGmzWh&#(urU>HNN;sY&N zWf5Qx%Qg%g2?(!Ylp?qdgV&hm0r{S~R3qm3MI_Q@_?(dG71q;5{oHD1Tkjj$S2v8y zvNS@SC_2vppRtGu09Vpwoujf`eIZAPR#*Mtqa;L$Qh9nSUXTcvp6g6zTh-UHVjJ@Ck< zvmSHEBFyG_Kyx$BAl?(Z$_^R-h3QlUe;D5E2p=uF)7C1YxHOfz3!|7!KO}f4*tI@= z(REx3CH959r1?ABziHnLgHxK1>NCiS4)i|NeT`)Qg%9^voU1<%1-v%6-T;h0kBA%4)d_uL;ap&6bbk+6-ga`aDpBZyhIjySKb&uqRa zAiJB5UPAU3HgOnRCgdBD;#oO2#2@kAs@q(P4-abfTK^lfWxAv&oJa8V>9Hig3y|*QfONT+a z-MW#Vq!FVivIQb5az3FRlmc9W1~jQOfMUu)r>iIv17ZD{S&O(>LOcK(9K^^23)=I0 z$Z?4YMWeoo>`QKZu7MkFB^MeV3SIw1|HeJQKQSpYH|iPrKn_6~{sxA^G00nnGKtu( zWAhTC)(&0*-J>V_UIMq+%(n)oA;)Q$lRf$lb9=a(9!UN@_`m*@$FMJUR-$i@7imoA zO6(XO@@KNCNq1dhykHE}!(f&|IhgU=XUeVEm`$CWNIV7Ri1`HLZ@hJb3Yc^?osR{d z7_Yph9)J5PDpZr(^oR30fve{ho1r4KmlviK$I{|fY2f^pAnkL)kiZ9u@7-bGwY+l} zit4_R#H+zfNU_L*3uS|Wm&Mi#cvaMMe7n-&6-{^?yiz`JA4hp#XaIVL;1wT3Sq1ex8wjro z<04-1ARMpSTZ(Z_aF9XovU-o)+&5B@o0rjfeR|(1F6SMl|DED>#>Q(jFHv&X4K5{z zZ4KMa5Db?v({Z--3Ot`sgGMr%#U@Z}^(T%A#@jj9(F!nLi#Sc5?xF?Eg6^kMEAa7S z&IZoSS_%L-60g2OR}p^=O0l{;D^q9YgHT((EblyTAmg!3rg=_K| z8GbRkHJZ)g{BJR}XJ`%LtDt&Gd__6&0R4^PH>tOi^3wY=Q8EqHM}rh&!~w7eX5lJY z2U>3(zM^__-W_9%T2CM3G5l7xHXR{Nx4=PA#0F_S8G+?y*HZJw!dOy9;8M>8XpnJ= ztUUwIt(WK)IKx&Dg9l~qnTZl>hRQA5S+z4R+!trLqMIA$+R@;xUnF3oz`{1_%KQFh z%M_3(B;I16#-XO!dEWEC=N_3YXjn*;Pj5B-yZuusT1@#+fttQn%{&BOxkD)qAGMEJ z8%F+K3*BU-#arwCMR6`XCYdk*ERG{uQHWJ?SFa-?g16wd|LdUQ6P${1HuMWj#oPYH zQSp8#=9nrw-_w9qudLXnEZe56+@@@}P1#_ZvNV>xICZbl41{Dq{?~%&A0pv{>iQ^s zbEmeLHO2JLgjEvKZ-2J#hwc3@JVW<`_Ws>Jv3UREo4POC=Py3C`23l6pZjwyOi9V3 z)Jd~>`Jra>mHGJYb-9Pa0xx{L^ipexV zo7e>H3R1_Dtev$luKEPda+70R&3vxaw@zM*9Xu7 zd#crfo!;fW*qTxu=>?>?c{FC$w+WDyEo9bMrxvZTQjM9JEDhw$A+s-ccw?4HJ%z?> zFN-$@-W^HB5u7+Yg7t?ggEsTn#urX*h(?eh|Z|vZqf%ztb<}N1IZ<7yV1jgW+6(2O8wsL$= z7=s8TxH#NB8p1gqZcRnRys@({2F>6|bK8zXJbUD!!=drJY2>cZIC+)OxEceZ8K&NO zN|q2<1&jc3hbd`Zl#1f42%#T=xA#c1_e%G%9Dv)XV9P8A%>#VVNaYOItLgMVnI=9I zI^Xj03s2lXa0^*~%j;su_I2H8*@F7UHR(U-@Fzd8;V2Z1`jISpIYMg_D1TRe=x{_O zMIWb~y8|ML5uCh66(Lfdf(c^}r4Zjx>TenA!NWHiO5ywC4apz$MiIyXkF>bn;keF( zYtD`7sNxKMZ`)>=A7zMXzfIi;htCGyw2wpGMC5{v4hr404}@Gh^(ql^CqnLN605tD zfL#!Nm>1w!OBqyeNWq)>OpN7f<%zTc$_sadLN&IT9@ZBz1mNH*u)|%_koEMM_vFh} z<(zVm$W`#Ts>T=FmKv=%P?cDMuyc3XRnUFA7uxn>eY9Tn<7rQmn+mX1UbTp*U_fDn z36L>I*04H>Zsc$g7$`pA`&H=Ee7{CPFbZ=qG!2$XPpf16YAcs`5ua49*rVmR#<>lk zhVkq4QS99S{naljW>9x(63Sp&nskD@U3aZf+LLP-#VQBi(>%yNjSV zjc?wQD~Q@W8^ktuT{}bVM36UcV+A5AMD2U6XWVATU~cZGT063rZ>jc+UhU3XHQO4N zvS-aJw$!`J>-~Y%BiZek0z4$X*?f`SCh?^;GHLV2;97HSh8oTy&YHnx%}amv*k-c+ zHEHtX^E}Q=nx`5{rR3s#a#}qv)k=UjWFB_n&V!hPP4+}wTvqW(ujkFK%Mjx`tUXQV zGDm*Vd?@vZmM)uj#RqjbPf{;5?}?>C;O3jdlrhuB2b*iLL+QGb^>w130?gwq(-8sT zkn_JRdQ7UH{2YOH=Qf}Pz?gyg%y2zez7{}>OT+2}tf&?%^w$PUB`Hr*YoeL2M|T~uCwz7)9eLyAxfJRMTE+IU@yD(XCdwCq5ajC$ zp<((CLFgJv!6>uQ&0Ajg;Tx{jL%m+0Q?*$hJZeRjuj;ZIwobocbhff9_t%6GN}hAw z=IFAz0P$DEYq#p%t=XpT*7QnuYtCACYwlKeYYW!8+o5#Q6&kuPTT#5PZ4GhXrI9SH zvs(^q9A$Y)GVAS><4!U}oO#Q4%XAqjwW7pye@j3bC-3>~*z7Q^sy)-8N|DInr_jFsr)vdDMk3m9d(ZoZ5INljJEO&VnB&|k>@}>UYr1nIkz?*)C z0yByCMQgt004r1d7kJ(Ob3p+ss13kQ>x5&iY?*6-0OK<89eLB63^+tY67k?XIl#UD z**JP2dmLNMnio9v*k+uaZ#Jq=WVui3bjh=4v8>_6n*YzbS=Wk&IM=$k7#m585Qjm{Eu9jU=%lp zebp?2fvX|`OMhxV3*>{cNCdC8Ps{xUHqfYt3k9r!J~1U!c=N zwgx0GrcKl3)n8>fE0mc~>!#8I?jynttEMOotC{>DqyIdAXueneNUsNhPuDsT)%n4e z-~^TsqW8}8C5EsVWef4tO(5RZ9xS`9`E21+!--6%9;PhsSt4a>Ju`N6ic)cfrpx$w zyieGKQx;~Tjh&`2;{{99))aBso|{m@V49+UADjRA^kXd(V?N%m+#e0tuaMlH&{ras zwxz%j(EL2JXf!e(7EE%BihrC2YPjSk*kb9O$XW+t~pR0jVII@XM2Ne zW3vY51?R*7XIwghBkAn-~D zg${{1w6A*Iq&Us^F})U~-XUEQ;@{NoY>D0+p@qxHX95EU;)GdNn-dpYe3z7OmI53@ z)IW9^z#A(LZ42isLDs$=g3=dHrGw}0hiQmzDjE|vL;eL}$ zJ)E!BD;&lOW>&>5#cYkgJJf=TFw^sC--hjwg__Rfe|RrjYOHyUwW?mLcUY3itHyup z9cH$wf!nRB1^~BIvz?Oxw-U>h@J}({p@L#gTlHGxzSV&j@}{2cxKIW4j*DoZG{Efr z`DxbH{C@Rm_5CiK@7y=vAwJu2%WPMZ&L;5KZVk1*-|TdOOtr_ZmOx-!wg#T2U|_en zY)^f?hT-8dK zp*OW2N2}v>NIYu<(TQ@THyQ=2i}J0-HnT|9eSMYjCB8O>G^;}v+# zwB)M9G8|C8DhWeQ)Z>b^f&&des_0d?_6vFu@5dgwUhYvv%RQR79KT1s;ytQj>@g3} zjjNk?4XR)nWgVr!Co}v5pLLgE)M=%8Q!2%jb4Ynsm=MY666Q!-QHO~&Rb#sCNLXk` zEsaJapkBsbZ5hA@F@p~v{609NG5I1ONclz(WRte-#z@suyiA=IZacnST1}3DJ*4IB z&e_nXi|KF(C1$oEOZ8dg$?&wOhp`yc4KyJq;lSrT-~;#uz1jX#9K?KVSodZYhhccp z79`v$JBm85wI`J0L7Tk+?HI{h`0gwqUYF_cH$#Q?gmPdKSmK0txYUn`W`rDm4NnQ`#M~SRWp=uqZ5gW`Pjw9cW3z73B?tSX>2xA z?Z6o{;6sa6Uf)kV ztH#YNno=0yN20DSoTJ8Vdq;7LDVH_W_zsP`p3J6_I;zP>7`l(3#hg-8JLDtmtf%H9 zs8g08NCa$7Y-c?VK7wp*crr9;VllC+M`ix-a~?NalN%!+A>>7N$Vb>=8q5IDSy~%7 zt-yhgu-2(@VaR<1Q{%dIA7P6cS70R{0ScuDwTYP}Rmu*!4*)WqIrwy&Skn|JTocq8 z{y<+*nKUqA?~01$hl`3TthL@oQ4PTw`&4?0Y7BQmASTF#rDa7WQALsHl_+4g&1F$h zu{v9#P20fPDyk%hCEb%rVxmGF)8R-|8t_yNYv`?_k{q$qd0RyV@ZptK9S3cD(t`*K zQoWfaMI~62$_k_5SF6Pq6;-jIs0m@p*&wiXRRsYMD+2f3=3JRgBz$Qt2S5<-IojHsn(1q&+iO$VIN0^*Sd5Nt+fGE#k@S;$o z$wj0Dvs-36VV1FwXJKDPHF;5nr&W-v6oRZ~JvNnY+GVxgsbT>UijganRzOjKvnx0- zzdQ6U-O~&jS5DWMvh0@m&hN&k1eD3*;1O$r9=Zd&egs`nJzH6?^k}=YtXrVw%nPJl z#w(R{lOlXAQ(Kt^DtE0-aM>pso~Jwo-8IkaGQ6mUuOj)V>9FPvPdyfMMc&Z~n?zF{#g)T34yCyXO;V3-^1kiqG-`^$j=oALa+nS}r(ex!_?p z+9mM?j!!TH6 z(8Fl>)u!maQ!KBGt2D&hg##q2uF4ThUpT`ONT&OqqOKxLys<% z{GPE+|9}+cwN&z=6d@?DEfdCv<+zzo`Yx5fp^uXoAnZ4?=np6@giuUE6`F>Qf=O}Z z49t4;r&G^GVM%-!{aH(72Q?(#tLAn^*BYD3Q6~br)*U@QZaHp3I8x{VSEgYL0IdQtp>%*Q)-6zkqQ^hR zk6J)gnpm)>3GZ;e3S%P$oXbNR9j`GsW$bx!xux1;(U&29)E-EsNzB<`E3`QqCR6q? z^5}X^f>S0qU<2t{b3?RM*_|?Z-lWGL$Bq>8BysM}5GGxSqizVYsEn>0aY>YA~O6x{jdow(HaDJ+>e)QXzD*$7nGc*s2%i zGy4-@PRu}>-O}~4)k;X)KLs;+6clrsu1{}u{q9w{EA{}28R;SzxC|6sWVkW}tEu@{ zxC?Mb{T1~Y7tVL@o9~g*-*d}sA80yL2to0$)o0A^ufb6brEjAOivM)*yq7N5lsNp# z`u;{T^(hhcKb>yZ>V1!!omrC(l;S64d6cgA$i=h4 zjtq#ZJ{`rsTd<(`&osp^0%~+Y@$Z)6*R4n6y9GJ~#dcZvy}PP+Ycg?%LYN)(^l`IY z0Lg{687Ck}VIs(ZHO?PFOZeqF>J5ySmh2XfIDb>5=$h#|fe}R;Gs@@S?f`2H< zGB6MRgBmnVVa{pPby9KWfyrd|?H5W&_KPpEYC?sTGBHo-F12X_DXJ|lwfF)js%O90 zREmlS>jSsgFKKY>i(ZMYAlJAU<`hJi+Pz89*jxxb1cPqB%*eO)OA90DP|?{e39#q@ zfYyFF)Z5!HOMw|I*1mKG-GrU7CpU<20*3dzI1Fo8+9HgGU(JCmy3~UO`(>y=E1LZx zDwK{VJdyj#2*aU8LWz9DW&0(ZviVT?5rk|HSlMF%{}5isNSM*mG7rF^q_tp!z6J9) zP+tOGD_Wuj)2=y6NJdhHreS(P#SXW1mR6YsgXnI-kj8$V(prK|H7MTOIkzhsa|@;> z3^=uqGim#Pg9R(m6h?X1dbUAcGaSv?NC{Ecr<~3+9#Ff}tIgXu;IN{7koCr`2b&l3|{7dCF+P zPOne%(NV>xQx+_kC*7h2i!)Tv2ICeim<$o4-Rl-irNM$}ttdtSCIfy{MhIqXo|*dk zrj#t0@_`$?ZWc_dK+t8b-esTe-?w19qzCUm+ZD-BZmD6K^8T~k5@?oqxam;aTKBP? z1>1FE3kJ>uY4*y3$>LZ((S>A}TPT{YcekY3YZgp)08v|wi}S+HHQU^dv10pTWPuVBG~ona=$kSuh$Nzo!d zacnRtdj-0TR}IauA$##e7A@H60NGkFd)qA7&@9+qELg(Qp)=|3bn8y*9})3E=uThs zuv@oaa%s1jX*dM61(wlpa(c_}=78(aC#I;S-LFg2o z!hV8Jrch}C_CcVAO+g>k;F^^5`X4HLZNWNLa%zqI|vIl7V~9-mQe{0H1mFz3>!=qTrq9CT{B*3+h%CQU}Sty zxoglH_EpOuL|xsbuEgS2la-vh4>ckjU14xGK9Yjh<^McysgNc~%#B86(&#X%anZfW zE&xwlO{Ez$=-DESHofI9wZM6}WqfnIx6o*GWivr75gb+xrKXSRABfKF-TySI^NwXc zy~ev@8Q2JjsPsY@^xH?aTgjkq8O@jr;!BiZE*QZGUBq00d@UZy{FlZjIw)chbBc?I zGqC`s!OPOHhulfQ2w|8ULzqn^j{?OIBt6%u;L^RJ6x~7AkV9^5v{EI0Yr~c1@>?4z zH(_L3;lWCO0pY=aH@1X7rA8RuywrRGSI8`*bD5Pf@89}M&Q**&WTLg>0Yu1jAfqQH zQ)*;V%3C@kG9Zf>;8KgD4UD+t%Gh^nN2l0@3Eb8$Iu)RFPJb*?m7h0eGccsJ7nsdU z19zp1%$80Um@S>Q%;qEg?3nGohX-bJz_w?$bdlNG+o&6*!WIb>m9Nd5h>b!AsFKLn z&lr5Ms*-ky5t|_ZTNd5VxvYkMKbNxFc47Rpt={=pE~}AsTSdG`&HF|e>?;wiR`-ua zpr>8#s@JX!hRkRV!p!EDO;S=Pbs}YLa}l~xELAkb#7T|i#p+R5*gpJ`c^|RNqT|n4 zv7JKAn}MyIZR)VJ(}HoPTP&!TE%Mw$xFYi&t|sjj(=nlplXSz?-a0=pdMMFYEI3nR zFq?<+K+|uxCe}BX+Sq+UE*u|^eZs%RGxuZ!SiDnCO~3&?V)OqHAx%iynuGnK#uhbRH>rM6lty$xP%U!wGXsPP3R%~@N;XNf!TErD| zE8x)GnigZA&y<0sUMvMj6=h%sHOFNPCoF9?URd?qv{sU?T{z#dZ(hmqte8J&Qd{g) z9Y|S)v^G1f-A$#fL}R6*nNjFc(Uk?26N%*g78i#36WdKSS=dcAR$7>ausYo_p-EEF zV@6Ft=A)aP1{!@g71NdVDv`BqMIaooM0;2mW-DwNS6CRvgsDPXxXoDZ;B1G2AYSK; zEv}YhZZ`Sw*s>fVc}a&#KfpfJU0m5=3AE41Q83AN9Ayd?f9uN}^(DhmPI4j?Qz14f zp)D{rq3o%cPBB}KrCWK6h8!yS;_KaMA?q(G`e|iH2x8tSg>MtH9-Ep{Z+Bk#Yyi9{ z1xD?#dW!-((~$f{Yl}$&v;Mchk&S{tml# z+|?-vi$?EEofZ*3xIi|G(_K>9$?QCq>Z|my-O{0Rj?>>qFr*uWQ#EJ2|s#jvnGXgejd_Txq#C&0-wk&_Cf%xj5Zl zy>x$c>E0@9Rp$&6;&K(Nal&Xp2iQZa&yAy$8=0lhGd3Guv(<;WU}QCb#p}2i;nOx6 zk=}q};!*onPNbZb_!n?ey$A_@ZF>X`jhJ8J2n^p}6&e?j_D?B6X{#=n(14RnR^d{d zh*n{Uh|mIeRc)BMkUJ#z)hP!-qDpwLsf1B5SjzV>PIwOPhvzP9z~=M$K(y{pc` zq?y17Cl?qVCd@qfdcOhPo)&%W)8uQPCSUtBuz;`a(N~x>W91T4Ts>bqgi)X;uERj$ zgsr#^15t)zsdmn^3ompeaG-5(xlNE*etTOXoJHzVpG&$G$=PY9acd& zX|<6DtWv`!v|9WMpGX*4WDN#cYhoh2ynH*XIo6Bu`v!*GPV=E}h4<9mp43;rpr%C` zgJb&VQu;P=@)v&9{KZp`#T_65LwN=J(rBwr0n8c=t=U?d_F?7^Gk=)bLm`ownfIv! z*1!L68T|Hz+-v4K-)~a;R_Bq-6WIptYO*R}n zxr|Bml}kST!V08xIXvK{tVV(`kFPtG(LWUY!Z8mpV2-@AJ~}W|pN#^$p|Kps2AMLg!qV zhYOMgSM>Xk#1=Q1yP{a8NE&&cMsnU07TUnjaE;U%zw0)vJP2eOGJDkFngA8-4Q=Eoix`tM}($Br#P2^7m)hL%B^6Ex0GffZu6krWdk8f zIdhe-e25KUw8nNZijFhvAfWql4s!_^qOIJV>yT0fCW0AHEZvFX-Aq6tX@oilkyK~A zYK8s8Bu=!QgcodFT$)>qYmQE2(~DZ_H4+xu=w&pYZ)vxMHbGm3Sy};Wy6RrV6~^da zjw^)wd7MX3T*0@|ElT)uTp?fL%W(xrAnG`^xWeWPWVE)4D-@{X*eR1KYk9BZieF9p zS=Zc^mx30l1xuoM!qXQ5sY?uOOruB_=k^g>IXIS7X`f27`UxHit>llo+d{h&LJsq| zXsp46hBXWFQ@dLrQBWP{bbHGYswXS+BefQC8EJIa2){Ax(Om;L9DQ@uz?M#1^$AgP zKJ|L=(OPE)msIu#dnB`*H}0O~)z6kX%sBTvq(QB5x=^ z+mWY@X^uLD(x75ws1w}{O{*);55%-qfsYg&ydi2@KG#Ai!JH z8Mx)b3WD^zfUUe)gU|YtY7eBxh8EuZvgAbWz)=HL}Y%ty&a`&1_JPp{(hi__N| z9Kfe4R1od-()3k&prkU)&Q33}m+kZ*m^2U$HrbWv7bfGufAfnk(V*TC8$+xfrk3Jp z{efmbQ++V{lYQtk4hN8&3l#`uBS#M7kkJp{7Qjf@H;m6v=>Q15@;?ftC?e;@ z$h9ZM7aa=B%?oU+#XmOY&~qI4Ar26BR2UYD$X%!uDEqsw=qL*c79`G=oiV^|q)TwJ z`^iMi@Jb&hVvBvy;&DU_d2*qWNLe_HJ>X?C@k#Ten;lr<6-o4o#2iuW#GIQ`5p%SH z4zO5&$eak!^VS}EOzAcs>xqI)6tB68dq=OTm@%Hsy`!4`l8%&4s(Xeu7j9}2^_9_n z;MmZPq%oO;+pBn=0TuhqssNvo3+GvO*Q~M5ViVdStb8EzQt)*Atvr-AUo8W#?^;cN zON?x-^Aetl@abnH*jw?1hn(sQ4?oox=BN6?H=XJWk2uvA{-0BQ;ZKuOedEtg`Heq6 zbp7I<2_EZP{Rj2yGBNtzgnjQENMm@oj*dnnAsVRPv zYSIjn$gApNpXrn8b%X9xn&tHT77RT|I~nSEW=?~&0phOe4UUw5*qSMtH%j!)CZ*GR)btq`B=d7j{rC$ygwXPa zH1OcO#*LG*EBT(b8&sWK`Tw)`F3^^qWu51@zi;37KIiPZ1S+XGd}kZi6iwn}t#QEi zmio#-Qi<41uXd(;P4^mluzTd3h*e35vBl&N3c(9DEvP`#V@QBPv}{BjWYI`kTcNdW zY4I-Fh(KeDHfZbMrOEvM|L6TK`*yF?Hu`fRUG~K64_anEgq6fkE=bC)x`_*AugE{+8wyj%&+K zWG~T$Cud$A$)r^B1cf;lpJ_#=^geVvyPPs8yDb{ww6@jYPml0`41y}wYFBj(29!;d z4f96AHIjF$`PYUOGl{zX7nD^S;w?^Gu-8ln{k8+m;@2HuAUOL&*ipiUS884nOf*u@ zUY!$f)-SDLwT#JK*-OSjp=~{mWAz*2q)p}*;633aA`@|YjQ+zX5?f@Uiq;) zeREx=u)?H>vnv{KLDlrJG@>a|Hp*|YH=2^4A{W>3lj5RKCxqWbgtcqSRd`*lZBNdo+jF^6mg6I}2uluTGfv9l^CoOG zi^a<+!d);GEG%*W0}V^Eb!4}!kui*PFT{8yjT8`tE1 zC~J;c4oV{%Vm285i?Whd=?A`#FpizEHASu8*9tg75N==Nd~(evOfl;s^Uk(pJjHk% zM(5&ja9z1kE<^g>;%%}MSTOG1Ol+!iz-7(gi{ru70Rji@sRRp!6jFCBhk^zBonS{j zf75(jz=!Ow`wPi`U2{=<{%<>*j~k47qXU2XPE_`*7gCH;WTV`H4&z~$7La>2NlFK5 z#B`fxUUB~6G_SOgBt1orDp#H;{Iv#q3-|05pRx?sLtjrXQQ|PBM(NN}$o8 z=?HDYI<5xq=O>{;XcP&}F)IyDx|To%W@xT2hR|vR>xad1uuDXmKX>VDLtaVZDXud7 z!d}UJ*zmba5~*x{dYrTWIQjSyCeG13`1B?8n} z8%e&ctTNj~F)%L)`O`@XZRlW!j=d)uQc!%^kYcNmiqT(sc>Zw)@PX`-!f9+4J?|5KrbCRjdAv=AkTERx=e{X(TB zuG{D7$h68b=ibrJB9dGgN1q6oT^aAocfge)r`HZ98M6ew9Ii}w38WlwnOdH~|IuwcF$?o&8RsvTaXM`C!S6Yz$*Z};2UR?l z&{Xbmmt5fhNyO{m=rh$dVOHZ2McnXzDk83w@uyM5F#}1&@!6SL#=7^Epi<`KME=kR zOe|sx(k8`^>DN<=6Y=X!@vG#QvMML?59@;DOpCJg{QKgG^!BgDFD*~w-n-L1D~wjD ztmh8GaWM#t(&KHiZS;0^mECfD@dDetj;QAqX$;M{3zEF`_WP^eK2Hcz1vTOAGj1MH ziebgu@7KBv~p0(QLp6yoT%=Fazf`^x{f6IAx^ zJKjE$VsF3W)%2=|vXqi(BiR=n>CNltwLO9Xg$$;)3n}LOWN~sIHCBSz6Zwhdfa(@0 zSP*p$yV>z^tt(mLJ!xrcY^JxHFm{OArg@c>N6m}gk}hVNga9w8f6`r2|AXVoHhS`- zYu3PTIhyCKfy;B17Rp2BZgHm6~un%zH^~xa>un`-}xZjuYNOAy-!ws z=eMM1I?88UO~n}lDIXJ^HwF=+I!#B2nujYwtT|SQYTM!|J%^~u_cb9Z=eQ7DD_2v` zKq^GVv?>r(TCqvbaJ3ClmFV!`k1aHmYJ(?8=;WXv zL@o-R7*`v3lje=!6hbLD1y^iKq2>H3y~CVnysKngW3a*DXA}{fA=poZiaaF$`&8mY zjL{b381*2$F*1uU z9Hx3yk{i@x?=1W`uC@{A4tcz{=x2X=El- z(z;`-wNS6Jv-Q*qVk#YqJ3X$ht2HzGT1X9(HJo#PFIv4JZ&A%IC4JMR8D!OJ6{!6a zos#AnDo|!E@?EGvr>m_35gFILTLq$JIx3KdY843U0db=Q^H73H`~h^W1ks@_QKwRZ zvlS(n+bnsJk>nC3I4@b1)F?se%_c*aw@R={@uA6Dn{Bz9SqY-MjUeBaLT49o1_jVy zKp8qI-f2hfcw9Cclr&0EOR{QtYhB+#wV{^BUF$Or>oZOX(oti5G}F_n<3PRU+ZyX5 zL}7iXqwHEA3gK*hIA)F48z_0M#(v=5Ov54K!ad2vz`YC_qp;dYg5~EH%N5->uI)$| zB30#fbsr~(UEFVqs-3!l6~Bo@pp;w~qGWeN_sKq`EGd*04Rk_TSNHLF$8W+cEud@#21qzpJO|?`Ro%K@TA70T zHNxfM31){t-f{Ta&ql+vBL^J!?;3-U7y{rz7&J363J}BLawf+UOcOen4>;w2AlPOV zu#P@S?xqTZWczsr07GP{h>_# zJ<>gFcqEHkJQvRh_b9j$Wr8APk0b6O&*2)?9NFXTBD6wA-oE<=b;ZkB2p_3_FUZmM z(&~8Nj)AABd5=l%MzY4`Ob6R|`J{}h&a~&Sl2pn#=J0BmhFy@P88?iB;-nO#PKuxm z8^j>OFgHY@aJNC!Iu_N$OC3}btcX7kGV}qrzF{r7_2%qr_svdO#=CAkWxnh9N>!bj zq)cu-Ga}-val=j=Q4t?9Jy)|c z9pY2N5XASSj+mq+K4r*jh_9oTSnM1LNaftLk`ro(^y8ZJaG2DNjtgwPdIbpE2WdWp z<_uJq%JeS z1PX25kc5d_`o(=>(RM^|+{#nw85$j-d4&v!rfFF8B89=O8FweKZkRbYcZQEt3vJ^J z5$7}(*$xHR*k_T8WRcZdZIPo8S`0FhMV76SLQ-&#(x@%6v=TrPu`+TK4?EVe$=s@K zGTqiQmk48b=F*IaUZ>S2myv8Txr)$mtpG4{Nr0e1K;$}?vB6erwGgbGxm>l$#%4Wp zX`AdVh?fl3xDv7n4#k7XCZplPCJ#7Jy{<^W3>~n^`z8Q}^^*5hovF#IT%dQQ}MUO7FP6|ElmCd~q>TTto4ik}7WYyLC?g?NOMwHfsvLVSxWV4s#tHaBA zeWBN7C@BdL;`w40pl(<7BKQj0C?XP+G`x%Qz}(psHa>1zM^FUls)UDB_eZ;fWjf3 zH5QuTx5KF`b>Eq@F+eK5DY5av4b_(kIKOE#hwI340d~Ha`Danzb-7=Tx7j`TJ#6g@o>tY z(S)5L0RV>79+B~l47VC;iq(f^mG&pkF@3MOltPPr)#y&}@)N-r?2uCW!8QQDq0IZjyAj|JMBoVAw z{s!C*)6j{Y#D!kHm|o#6-NjU7snQXquHN#Jb6AvSC6(5plt=03!jLlv- z>220x_-?ApQe23$%3P&B}8nP~n#;iNWI(^J-c|t!aT4 zUzwyC+>VFlZ%DPgFW36=Idw1Kk%~uk?R|uJBeX!(lTW+HlecoC8g>=v1gYswB_Z<;2CtCkW~Jc~?1M zfdsfsVFv-H4UKAzdGlEF9wo_A#4a#y0RhBsRf%w-nWycAs0JWGgHUL|YHt;i@hT*& zWa~hp@JA-^@*(+wE+hm#B3=VXpj^$}s4<-sE@Di}Iossw#uwEY1ISk4_z$n>zyYuh zG$feY8;wSog@z}DlJv0Vct;oXtybnUL2t3BP0Eb?im~2Ya8w`^&2@Wu_3nBOarV87-mF$ld%f1z!Wda1kelbjJeBG_Xt9@m>cFbz2P=!=ti1Ci zqchJ{HRKxtEJ9zdm~bqKicrH(For+QnC+qMYL(ZtJ%>jm_EtW5yj z+$3_R6Dd1p?o#@OkdoqC(f2CV=!raR<3r!GQ79j zmR{6wW;szditjA(6`#!kKgLkk6;#N0q$_?&&j2VNzobOf#2&i$_?}d$fg$(jx252eu$L?fzm>VD;q&~ZaooTs z+2tV)QS!ot$TYA=wx~JRIuNcq`!UhSB-Mtyp$VgP7%pLBEA)^w8!pD6(3{bdp@ZP^ z_HW|@oPP}nAy}kkm}hcbmL+ynYaLsu7DNs|tB9G@-?pnr*0l*@#Xe1vO?(skiV@0K zkQz1xE;-;|Q>NASc!5)BcHeuEO+_Mb>4}`lU?P+TZ_d20;*B9e*F@E#`Y`504=ldb zZZnYpF~5BPT3a;o$MVwm%j-h(65XfZWge)TE7JN4G}pp>nCZdRDq!-ttU&5zB zEWS-NqLre0S9bv7$XbttGLlm;!D7^oinN97v(RsP@G~gSlZ*c*MWw-ifdIIk;mcEj z4e(yPZ$Ae`&cPWeg*ZmjXqkhHZx=vXhTrgqy3ZY6v-FgrEz;% z1p_qJX`yZn%jCn!)lA!0Z5nxjXa9bS5q>@MF+ztL4#ou?&uq}40X?II+M9QrDS9FIu~=k$Ok$x#ljL|fIzT3#b&E;`mC1qOm-g} zlADsu#3Q*Zn{rR8rwYrKH-VfG>Oc0CYkGG)-SNP8k9|)Kk-fJ7RI2yGWfgb*2u1 z(54Q__Zm2~^fs+(PQ!!Flwpk8RUY-SDM=6AftFrt|I`H?)WlY^L~sIy=K5@L!6yDS z1!yojOTD;x)Q1+sWVl_CMV!4NXLT!4AqnZ;;-6Uta_CX;AfxVkSCm{}^q(zXmZKQEL5rL&jdYq+OJ~)H3-IH|Qp~=LZL|(1!OTg-&$QTE{8Iq|E?=t8t6;-A1ZlNhj3}N56aH6x{vNx1WrqGt3pA6JCP^AN0o9D2JbTZ7Kdc|HCMJ2t$D4OGI zzSdJ_M|me2tJ_%cN&1k{)bO^_{N3YXENT9x79ag2^U8+tPH+xV>;V7-)p^jQ2g3o| z@Y$NJ67Og?ta5NRO>E%2F}Wd1^KoXI%z)5~<<9R6iy~I_e*RNhVcNy~5_@%{qM3Wq zcb^U$e8Y^XTI1tP9GJ_jHv_~`m%+<}YiN}43igHYf@gb%SbP6(+U%{iNmH}4C9Ux6 zMOq;cyEZx06X>GgL@O$(=8|+5QbsrxHpN_=`=ll5q&)8A=?=`Go6v%92W~^gf=GV@PT^dgAAY%c2bKK8%guiv z8YwPg=J`(kDNdt;g`~3t+_&GZf))_O#@pZZlI6yiXW8E0`Uj;xv+SAnWH-LgQ}&>j zOb)t%hbg(>?}&f|ogQS;B1M{{uUH}gW#0S(M97<)@rp~3*v!owQ!+W<g+4v6B!Wqul5nz9d+9 znWS7AG}zl{S>_%^&C=Y2lN}f|b9dtFKzPI3amv5|sJ!~y+Q6PEU`n3RR#HeZCQI?8 zxqN%#R)QSMLjCgg+2m~n<5RzLMpoKlG?5rJ(~R&V&8cm+mE%L{1MpnaUXial#pNY| zZV-}Mv-;ISV*2y-M$65fU$FjF865WLs)vZlShW;7?n4eC+DNj=#~CRdgIEZ}rS__c z>Eoqp8&v-+{S>4N9-hEB4(a1)aZR3xIW(OJBP9Yx2V6_S&n9D1KmxF}L+NnxN6`X= z7DA*mMs;g)vC63i?)jd&HQW`$<5_z7EKql}Ld*}{&ZM|zv zW(ZGm4}zBUgtDoDqy`n`Y!t;wTE_*gTT?vIwg!!JIVOdlm1AOZA2r!ED2|+Ke3&*0 zmZI-8(yJfIX7_0e4q;6djO@=Omq^;=tQQ9fT8$ZMpeFF@wf}d;q&CdvqATC_?!e4D zM!@asFgzppuJ^q2w+6S-+%b0Xx+DDr_PBlru9>H-spt{>v_K}*ejv-r+t=B4gDJh{ z^=`}iZsm2$^{0CvD=&oIO+i9cz}omBMRSw{qw`4+Mc&JRp?_V5t8pjLYV$mPq2}vf zKnrqeIJ~)Dwb22px=G%7xQmF$*0v!gQr9+~z=rF8?vZ(o8;B75QcB;4j(Qkh28`#YO>X^|9 zJyf;_FAQfttgR(#Hn#u>$B-`JU#7|ukZ6*2qbiu$KrOJ@YD+d&(G_mLR;JQYm>Em; zi1#hp^?S50&k?&BM)VNh9}^pwU(V8K`O3aD@UdN zn~u-w#qn8eIH_BVOyQ~qS09&E|E@PzuYcRj*FWmb)m_9u=F(a(j?c0Ug{EBMz#ieQ ze#tGMYsDb#fjzoCUcwxCnWh`D@U`9TJzG>m!&#qnjJLK<)ioP$ci{CT;Sb40P%KJS z3*#nkP#aD?iy4ylF`)tS$BnJBLV6kwtVq#<70;}ea;EYba(hF(W<}&kFHPaht$`6wFB>p}^Y1=PHO5@jhR>WNHl8A>V8m_`K}vj=(-My*TmxSH4l z_!i4*HO`~;ls4$gl=gLeFM>2E!ts*K7^cUZwG0{ixKVY`5zdEU@oN$nI zK-pYoQY9!`7A@^x-`Q}Bn!jExcXa%IN#HW?Op@f0YRTJ45+T7CD@KqeNrc&j_vTs_ z!g`j_T?D#iABFB(B<53XYGENC^O`AEVpegy)UL{9+2!p3yx3XYFzTDMK>^xYU!2 zR|r>PU`azgO$haCESUj97ooo+G;0L;SR=C5j`W0~3bAEY;d(t-)B>B5$zpBZAn7wR?rod)womn}-DNgz#VO_eN=uNH zfcONeA;;0PG%Pk*bm__!S219X3>^gL(aVQfL0saDoz;fcuxZJ^6pe2$UL{B`%Dp^O zGs*1rb!?A+E2jbjmRx##{XX7}hRX|fzL$e-d1Oi}5|raGS`Jv&KoTj1_Bew?>Uzqx z$%9Ue4v2S^y*f6gg*L=@K{81`aWE~>a)IdK*mhoL`_Q|H&C*-Il{LwOeYe(y_#z@; zbwm8JNC{7mLRr?$6ybx3PK3`d-fkm&;i(3L$P&3zUJ8GPq`MhlHN5O8;_uy&%;B){ zzqO!4{&y7e<5P1%J~$uF#oIw@a`CaLKFs7ZGYxTGbtZ5ilMW?PehXl5m(>&>XPkpj z9P0@j9zzq!iiprl9tp>xOr=8bqxPJ@c5LS&f!vgdIJ;zWE*!=%H#m&S68a=`UV!h= zj*v`hR-qryQ!FhutB7Y^#^CgT2JuMiF*;kZ0u3l8ht^=5AF~HApiV9>ut$uCO>(SELn@ z8-7_`#jn6q4TkRbrJnlr74!h=iBu+7YUOdzP96@PjgRhx0kefxSgtP_tHg(u+sGgv z`(`~PKQfG>Mtp+=WG{>;zKVWJ7V3qv(k>VhWi=!C%86zT=vgFCB;xxQbyro?`Y3sazeE-iZ!=R)DSb&+KpL@&~82E8R?BAn*v)m zES#os#Bva!VS6t9)g1|d&(8|>{4!v7ZEOv%x%Oi4iZAJCsGJ_7WFY__sOHNA9^D(j zE04)>n6Bx9vpg}nQ8q3RGosp6c6*c2%N?bCDgl8k{lb$eH#O@?R)jg!U=>YaZk2=X z6y`!xm;(j8#AtW&4^x=hN(M_8Q<#yMSd?4L9&UYk<|#~lW@!p@Yjp~<&)CUs@>lbL zv>7~;np9sd8X_qeQ<$kH9B)QEHb8L1SPJk3N}4W`X=$b~F9n2$3i`{zqng6hoUcHR z-mxRt!&l25ikbnO(X#AoDiL9bsYGrfPfAcCVG3-Mbk(xSETAXtJX1#^7<$lHECo?+ zLOY(y3Y&!DTC5*kgN#(&5SUwzn_jK>_Q@bk0+Tv&Ml=Q3TvW3#1s|Hg1k36@otdT_ zM?YpN{lIUHin+=b>M{#mQPDs>c~?pU8Ic)5_p5bh})6 zCxPGw!Qj61O}&9_0{v=6!2G%s2>m93AXbkALcd8Mpu?QGq31d@HymKSNg#mQ60n?e zDEq4k1Q*`~t=A+FF0zM_pd^4Lt0{r8{xEiY%my(Wvkl4-gB#MNF&8$4;)*xh4KNyUaAn z#H{RV}MK^vZmj2Ynnzv7M>4Cl)7`Tc_^*U+E7e!ujO?yUIL} z8#Tn2le!fn~yJkF<#|Pg}@MD{emv2(`>n5-Q`baw=54)MUgVU%rTUQNFD#Thl{7( z9u&|x(YI7TKON7Lu}l}gZ|vh36BcbM#swR~R&i3jDmdI0D0I0K>$6qL$nPK|TWm#I zVu6pEEq+gp)#ppgK_Dnau`0=;>5g)d?s?>5JCeFemnw1&`KN2rN^ zp9+4s+$4YcZSrT(CVzf#+}rx>>&P?k*f6Q%&&0Sd|impUP%?g zWtq$6Y~grCxTSqTN;s_CP?YCc7kmUwt#BO&V!0)$+)BdQOs-(2%PkUP1W!DGxwm47 zITlF*>R{j|a94?W`KNxv3)Cb@f}lIULQC6jEzL6UOK~CsgI1_R+5mgzsYihb3Z|2$j0gPGH-PS0lgnZ937v8M$ntG=GP z-s2U{U*Xv4{rg1~S;y2Y&qX zP~O$iyLPJQ-@WseGu%@e(AEpi zy(9JT(RbXxGz{tY`)+vPf#u*eZ$BkB>&y$@u^e-wyy6>w_BUvDazA0z1iLwX&HcbV zeoZ-ejo`1LJ$e6wG&ayB!7yUnzWu#FefPU3ulmUw9=P#e-u)e)^HXR4-3|9YaAW_^ zzUP&%eC4n4l?LOv8~^r~{l~)}`w5==t>S0D`>UV!PCfVcUbcMsHK%_;UtjiB`tw1) z-p_wa^_xH|hc`a?Ngq4(N1yn6U-|%zjqktlmoB{OXTI>|Z~Ianz5jlN+Jo8O?DcAI zk}Wg);YnhwaR%nR)I%bR#RGC!P#>B3EC_k7M^iZj}~w}5R@9W9yg)I{|nyQDAH zzj!V#QW$b3E-5yuE{fnyX(}H{P0as!n15J~IH))&MiuenZ>ieG<>XqPomS7>**ugF zhs7}d^oH4R$jJO9C{+)ic?C8Ji$Y4wbq>fzqmY9hgi z++1EwZ{r46cesm2wOcn4kj>*$oH^El)J384Kc>~+kTQ&Gs`b5{hyevboQ`Kgxe++b zt0z1cYf(;BekDHzY>sGnd#dQO21)RIzS~zH|y%v@PIy$nPt%9!A zDtM#^LHBgDhgh+FG-g6zPAgPY#a6RuKS~s|)~E$B7l^%W7HZ{Tk`qy;arRX83UtpC zzJ7Ds%d>vJU+6!EqeX6l^dqsy6~HLFK^vk05qGWzhhWj6eR}xA`yOU?X)L1Z;fKf? zqM@rj90S)Cau2a%#$1C0#sF*eUq*H8&gz)97WzMSHvi7>z?mMa7xG@ddSNumnhRNx z>3&?u2BT45-!rO$$AGukhZ`u#=-)sBe7l#`fAac^K8yNC4}Ka~Uq{-yqM}mYOEJ*2 z`qk`dVxAE-8Pn?3G&Rh7{K>%r7}0}w@DgUiTkyCyy_`w>(@f?)j$k;NlbT_Vn~|Y@v$DnE}*x zVLbA8HhTPe!f6Cz%4iPLFg_Hnz()Av(wr1q(J-)_y5Ys=(TwsFwid6mR?-4cIW_Z5 zv7J~y2+?B?2blrG7B1{$|Dhm|g z_7Qn0658z}8f(-P9HDlyT0D3ANa&G7*uQ=w-bfUDWF9?qkVg--Jo=Z;l$C0{)-q&0 zob?PjJ-$5BWk?FJA?=a<9N3YmE0BQc#W~O-g{lY+v`C2q$8g~Eq8!*je`G)V3HlZG zyH0AhW=X7r>&#Z}~LYc(vR{L`r7 z#p_q~jT{LCy}aFS#DAjLQfh5)fnf^Ng_(VukBgE5c9U8Ybjhw4=U2P31=f@(&c#_T zqsA7?l~QKO35$^`DW^P$ z)Bfj*?lo|vt=PN^r@|c~KqA3hS`b$lebsc@wxb-O79mdM&rL%oqSOr9!KaeCWR1Jh zl%uD|M|JNV7>=BQs;s+U`RgP1ReOKxa_lZi)18jCcC|*M3e>dy#|0qWj{!=Z*IZl0 z3bSGwA-D(CO!*8kWcIN(4D<#YdC_qJ6%02ANmJmm`%P9|`3~0o6X@*7+ zVfKUs>->)u)oz&>>E#xZ2QthTSHKK5Lr09BsWSCPLS%E{9K_AzPJ=kOqea1}J33i1 z0i*g-(N}w{QrF0btPJ5ZNQC#wBfFw$+sLN}kID?FAFv=b6dOxQ?qnF*0mcvjmTN`X zBqhfo26?m>8?4y=0v;TbB`44vp<2z72f$3%2a^2r^koUCl{PcMxQd#Yq+Ow z*@QqBi|>MhY_EAb+WJf2yFm(b#?|)iiaPD-$q(Y>aXjzZy@SXyJFr1+PlqJ@_0>~& zd^4Q2QMGl}?v1f8mRQKWVYc=zDx!M7L}N4Z(pRDpEN4K%yqPb zaZ;WYms5j>zcHxF)0%gHwQ_=(`ik@SgqMo*-yc`O2P79l%CVvXW%-0=&(G;>z^Z z@=mzI1VEN-s3;@7WbaD?(#X5kn!5ezuJn6VmaA{Z1>Wwomaf5fZ4c=hcBZ{Xci0&X z9Xn&$%TFc}>IJy~6+e!HFiu5hv+ zl0S%xtDt-+F0O$E4J023Ozdj?u}jKGpY0NOd}Hi3Em8K)APGt)WLv2WO#%=N_sqmS znuYCfPxlq0Pp@^FN!M0k7MiJi=sw5rRd1>7npGvJBwOAg{JmcdE-N;*hTeVr_v7p6 zkKK@FpU5n54$y{qjO2!e1d^1KU<-+K`nrxh&A_dd)T=>lN@h$TZxHtX5G-#0`fyE6m|`DEm-Wat_zi;A{tG zhLCJy&rbK^J+%KWKZE_$Updn`L#kAQx)NQ;8gFPP1>{zaREt}4ZqmX z*^U0oWJ2W+DH*D(1}pI zY0qjy7e?)PYBPz9zs$z#nrRF-a5a~I)G3C}P z(?H%=SwC;R(Y-Q5w6}M+{Wsd(DkpDwje?##tGy4N+sk&y5-b-FEDxo3KUzJjtiNa3 zKQ?0=`hLe=z#YkHlAY5ZdrkAo`)-g{C>Lh=<@Cq?kyy^D%!ytR%IX&1cf+&9XowGy z_3dO(Q;Psb$bw`{0>FA8H$1EA-EkJ)rV-pCQmJYi$Bb2Gw9U9d24uFKPBR+os62zAC3^Hljtl!&ZDxq--lScpgvponu?{SR_)abv@S z+)#{QdQ1xydo!HjjJ0t$9GJmOJm3~&gYO{EbuL`EbL7n9n_ z<+mJ8ZL`u-K-}Sf9<2R+;uLO5kj0`fGA|)l)fWkO7f$zm4;xZvsL7Zh}NQOhW=1jdCK0AutbW zcz+Mk2G+|NpiblOf&%$bLKR}G;em2`>I+_czIu4;tY`jj{PS-}^Zqyf@VlP* zZ706h0zU_y0^OFxGCv0}(*$2O2^0!_# zJ^4VRVb`{`$zW1jn{r{WgtCmNppA0#EjRv`zxijMIQ97-dIz)gu-^Tttqcx+4N7D7 zfUcIcnZ5SpgXw{5s31sRkMXtn@zERZFE^`)M>`izeZe=sgSTzI>i4fX`QWy)Uow4C ziNlgqA=IwO1XRDnTF7^jIN4qR9dOpkfXR*5efibD_x>keb;CcFZJM~-y>p)*mXSAa zQPm;byuF1YBt215?wCMvFCPRs+SU10 z485b~SnN`X=~?W&%Y@rjIpkEBt#ZLG&KCUTAsZI-Px3vKTMYkfIe)V;ue#W zAI$DRIu3VbR+T@@_>t=z-9ZLuQGF8`*y;(gV73^WK68cFF^fk#X7PwHd4zHBL2ad! z;HI9E`8Y~L@@SIT7=$6*r15)pWD3URrZPTEpzhKgF60yVwghE9r>)KRY>{B)e-c_e zuMUL9&DxG|?YinofKN?AzC8GAT)_|MIvvRM#llPn)aB zTH*=Rq5sO`n~e_K?@ObGl}RH}L{GK@hr~MM)*QDb9VqsY4%`GtzFP-&0?d`ZQf{sd zFd0T0_}A`r_mxUIa35gl+Cd$NQD4!40*%lOhEV9Xssl%}xA(OhQRI%AV4dUHB6lRk zu3!x_#K06<*<)b*1B9>19rly(OT{EU;)pVNg*6|o zJSrsWRO5BU@ozclF~_Sw zgOf-h)toe2WYztzI!B4|1zEc)(}lP0zwjScE^KAhi{7MHUWm0*?wJ%*{e7zMK6Gm4 zukX_%v!j^SxQfQ?ML0E^^C&-l+fO{d&+g_W62nbekfw}cK!ujEWJz_io{GGltZoo3fKX^QmYF73&m zie^$~nBpMn{uU5C^oO<|(amZqi0THgrsM6Rx?XFTs9n`29OGG3ABLGNhinR7j-TV` z(17rA6h$x?Z=B5@zUBC~T_PGcOLN@df1U;zBMjFpYR$k16=5o=;z_E%Vso&?zYubi z$Ua?t-@TN={XhDBoV87=@`Seq>$3k9SOF6Y&bO&ultP$nsr7OIl4Fra8kj4%poZF({*Gm=(yu#Ee9mqK8KDh=geQy&J{|^R z_LNLe2&SGu-tHBUO$R)2L6EThL`l59vkj1v^;~)ghCS^%Qd*XDyMg9KPTlMYy1wo} zwq2UgvgYmH+LqqdLzUJ*u=`;g+}76>It+!E$Id@6D8zvfz)>;}z+z}RlF`eh^E6-6 z{6^ntVz&8>t|Nz9|6bYkCk|Xvd(2p=b$7!oU-L4+d)tfHy9_i$DN?4zs%+SnhnRUA z1LM-e(9&LoX~tX1`g zk3@DBAwd&bM26J9J7S`#d7YtXDuxf?fT6IoNim_>QrNG-?m|o|$_-(_1hE1CWFZE) z7UF={fQ~`^<-=RV;b>(x`+s{e0hdOAA68gX<>ZYtxDwD6_{9j=#X@LK2oPyZ*#-~P zU|?Y(PV=CqzvC5-j7{IjAEa;n<{NZL*PvMX22-YQu7uCE?VTkExc^7M2CY4Fv>8l7 zf=u_!cu)l`a_`RUSBl;+%3@7h1WFrqR`5L+3&r#xb=3{_iF7~s#wrkTQDSFh{m)Q*5wkQ z2U~IaY^?A>bhPRL>P>@)dBrAS3@oqEWppgI(SDwLFQ<#Ar5?2S6N{6sYL!o#ZAg8p z1M#ND6*I5!>m6kQAY77Skyqu701^PW3Ic$ST%EgXjb&fEb-#>-)D6s!2Pd~mg8*x4 zS(OX4G*p<*90OEi6m?S!%L|I)H+Eg?Uz_{dCYT+Y(r;~70Y$wVH14l_tC9O0%-)~( zSY-{IA$lm^nsKFhp{XTNtV{6@yHM*&sl!s4Bo)Phkdi+739_=_D!VjU?TV1zYhR18 z9C(vPYnE8fYNkksUHW8CdAndNt=|=uY}x{icsnXDP%mgGq<|@H!$*0F*$%v7fMKZM zy5;ORzecAy9RRGVF?67_HhiQ!0d-Am$*ICOhVOw(UR1=8*lJ|PO>V5QJbRG3SmC$_ zLO=exbB)B5nO9(7su)0Civso7@Equ_MWL@2d0mo6aV>_ea%~l}Hi^f8M64lOM3Xfn zey{#1&Q2^%1E;q!isC@#0L%#e*=?b~WU-MO$qtObFyVo;rU)d1>oiR5JR~gpry4gj zz@f&mT523?<;K}TsQSa#HRG;&vk&&obygQ9SYO)YciU&WN(2#Y4_r-LDLzzs1_i6|cJxp{)sPPC0FytfwC6y49Qc3e91;a*o zQZRfFJb0WunS^O4xQ64nf;?7k6HP5rA~9j_qCUWvr=(4LZ?HSG`R`y%FTNCHM-p3nEfe+5y%SX?|j>-9BquiuMWI-s0!D`8b$ZBEBPA5xZrA9)Z=TKX+RhN3>lT&0#iwPD^bV(A( zG9!3koM3SjSPqoc&49TjU3bkEBI35rcwZ)s5hc^~l;!PD-3=Q@qe2 zOj|^ZW{qL`p&2xgG5_4N++Zx5${P?4u_%mLp84qu&qGu<`OqS*Sv#hU^O9v5ml#vB z15z&*4#uP;3ZnsAI|8HJ_%eD*j*dJ+a&O*@V&;*h^t{m*0U&el%Slfbl z6?vrxXqD+hlwn*zgFczPbz?9D#Df1f>ZJTj`@75GHR6EG!~*|zb-Yud_rU2X}Ni}P9`1~za6Q8nTNygDQ$26F&NCj}Ca z)R>Z07#K)EEQ=11tduG9wLosJ0GW4zECd}(%k{Cb3M9D(DI6@^AyRpU5A}Di0&iuV zxPDmKX+1;!?5KvZeT9CU-F_S&8286BJaR2uKL%3yzPwVeUA413ULM+6UM+?#$|L&7 zpHo}#+sfnRVO!HpID~ylZrg1*TC`vV{@OtnaCCkFmS*x-iCVwO(5B_l@=A-OGQouy zh7`WT5k^v!R|tip3rmqYsW93QAA=TcE?W~7!m)V@z&_oDhRB9v1&AP^%=zd&SrGE` za{S>d>8CBz?AUg}_Z&QMDwjsALSN>st%@|R{lr75wsbpR`;oVireBiC|D4)ZeGZ(o zd78s0rD|zI?pdN{qAg38ceKBYb`L1C66{kG6y$Vtpb8;vgP-^ha{-DdTL)Yv6E;cy z@FD2^lM%4<{0_yV8ydzI0Ko%X5Nh`MyIWmznT;2|;I60lNVc@3xgN(M?PYLTuarIrFz_k|5eF51;q zR*FxBLml95`aVq~nU%++C6AR?niI+^L2u|I;sNo$C?WB|1=54zc85yUdEFQ=+C}KJ zr@lncb{d_V-I8V~%fSI%VBc6XsQI0|6}wpUg6K&@h(;UG2)3koxN2iXdYBYkD?J-X z&m7^|6dSSGN4e?zWrB^&Qj7+B08kE_r=h8(4ycnkk-zwLFWSovos%&DH=&?)bcC{k z`|qOT`#WJc$`RIWSe^T>b9?qTht+%89hT_yME=?bX|ek7zvK=ZKYgE+|5ep@-^b7D zi50GIdSda$xML{%&f@!hrQX{1gHyYUutwxZ74H+4y!@ZhQY6WPZtRNi9Ev5rUKvrj zD0&|J<&U$^$&^Dn-!bB{sdng7JE{7=63 z<8OFIdyhLFyAxzs6yY*XBh&II&wIjwB)L#r<l8A5TS2JppPd#adB zDiaml!FfGkeU{VBfzaFs&!fsi?#GZ>KBKKFxa#5JQ)#%q3KDR}JXD6|q53)~;JSy& zbq~|m8Gdr!o}&oGzK&ZG=mB*0B4y|!fFE+e9c)eel*D>yJnb)Hr zxRQ>(rK7nH-EvD)-_q1vXCS#`uP@oFuW}9DG}D}tHehOqm#C(6q8urRr-}}(0K}=0 zWfNm_(9&%-f?Qa9KW5D+plRDC0Tw7_Aqr`bspH`Vy$c7_Myf*(Ri6F$27#QF+*c#V(gH3^+VJ2 z&`jr(4~^D|(dq}Jo3s+%?RcjejF5jgrMO&xqHR}0_0a} z%$N^)f1dqZyb6mN`3H*{*dclWl)Ggvyiu>Liip50RO2~lA`lXBRzr9Ww&WktC!g&e*E|Dzs9Q|PS=m#Hw4<&$Q$82E~^ESV+KuXj$ zzp+p{);Rr6$6shO3rwl&W~mDjNJ_~;U7#jshMn~f898sj)O2QR<;bMwJ!WfHq@O&eOx=B$NfR=_%sgI=Zcn6&k~K?N>~3S2}m()@uqDYPAz z&3@2gta9TVtOa+u39FEaHEhhMV)0=Q@t_YI?z7zWp}pmpXTgEr+@6=nFMUnqBEdN^MPXFnLU#?FoC{*- za?ozkQVuo;Hh4&0(a3rsH+2&BuIR{ma~&LVk}#SxM7+5U7K@PUN$^T@r}Zls=OP(d zciQE(=5a_@1d)Ob5TgW=yIx_u_MW?6;lK7CV@E?Mu58(`xywDgwS0R+IqD5|i9n_# zu_-~7@=vu8p&eM^l9ejtk{0(7c1HMgbC~t`lhK>9-q}rPO}@_V>*cv~hHKMOb-EBL z!ct9|a6mk(!~as6aL}X)hixWeus%&#&o_wwoO&9ZpM41sR}a1D2Y&P(?Tne7`c9>s zUi^+XUa6f-vs3!DcnT*<#Mg&7A(SIMPyP7!^f)T?z{?MP{5?PO{ENQ)zSWn1tr#dV zr@U$Z-s%ksNFYX-r)6K+H>N`23#Kc`%z;|{h8$RTqg8Vp;G0x{m-LZlATxR4o^o*C zd6^7ubN&vL8^^A-ANEx600y>f3d%dWm1*-S6-h<_@G09Fu5`IDKjkP#Hz#ZOKlczsL(JNzzxY z0??J%RJcJpoE42L7qNRvBJ!c)IHa5f&X_zq4uWUaLI|dqE>UfwS#k)9=sZ%HnYuGh z)R)fHRSIt3#qqj}Ky~oRg_4?}T7`r(qHogVV>Exg+jkbm&6=obO`A zl=qyUKE887y*wmcfHYjVuN=l)!k}(u%-g(q+S3YXLiFhw=Z*&?S!NfO8++%qna{4& zEKX3<>zcEmaXk3b#F-?S=u=Y~8h|Dn zrd-zE4d5F9_`$>yp1$sdHNjR3gR-sZN+*g^v5dWa1HL*FLI@a<-;6SF=1HclQU}U3 z34a?9$q*%do+g3nscP}5%AAyGKX;xYJo|Po1=V~wgnZlU&ovRm?5D@0P{E;!9cEZG z7{ZUdp?wgM;&?aZAb#Yv?NzQx@{;-%3)EsR4eocGy;WreVk7--B`AF~fw5&oVPe!3 z@(dDb0N+J?P4-TPR?|rOV2eyvXgNo`Bu&d1VGx+@ro>Q(_}b?OC$aC^K;spDLuFa4^q~>>u2~P2CTKP3i%FcFZ`z z7Z4?$d_1EK zv=~L2Y*XL-eO!YUkCy4SD-G?HftD*hS}vv0YH1{Xd;N?tvi}(p>NP(20zQP#L%16W z5qwD($K^5k-z*6ejdYTo6m#)(9dx1G9%It~9M5?bdh!ZIBimP^=O51=CQ@m6`S|ka zZCu@fHOvAYHJdPVGmTE;UM5;{=Cpii0;ZrPAsMvfQbTm{4{u%xG4vjZ5q*Fab}{8) z79EHOw)`HK(tGewJvhFI1IlSlXNn=n$oXx)tlnEce#cdhm&gA2@~V_Z547Ynwj+b+ zN6Rblp=Gua9sLi6HW@n3m*YvSG-88epv7JDAU57S%m%7-PM-rgwmXJ0%9n6>3hNIX zBxom3RaPBmH|D!{7?+Aq790t~!4Ix$o#_sxW7;ek9=?kmYq+z;{ujD&Pkp1GZqOFx zJ$f|P6^!yOBfD!iHdmV?mdGmedW>^6+w1i-x6B5=E*xLc9icK(nN)~J^{Wq{MHH)6 z&)?g-Ueh{Mdf!g2|hlMDBF_8B>Y+YtP&!i6g$&Ri&wqJ90IKKqlDmH z>|KcgWnYndM*N>P(PYU>krMJ~(C1JD@WR(>>{0sHs~CKi(18=wqGOQIQ7cz5fcSx2 zj8!QSzL4|-LXw>YsTXhuv)=;m1Pb4%yWt@tFEl|-4cZpJWxfr@*;5=EvXuQ!^etMa z&;;!!g)%D&7p78f|9aduBR zx(|!0oq)Ay)}qN_U^L4my}Bq5UsztjEO9)z(tr0sHH&C76InmH(x%je)=Ij~A-n#q z?YaAu8q~xYT21LO$*Its5DBgny?dTK5ULqypgXf1TtNT0`R&;$?Xb!Dj)m|!PaD#r z$~Mt-8j*ZOcfw*QES-Y>m(*z|?<1!Us|`T{j@i0pX}r zDj;w=Wevhn?IYiS;EG;0^r`Qn^H=Ty2&=^ELDe`$hY{lnL9sBRia-rrcC+jr2!m63 zQS7Gv;YmORhZn?*c8!Kg^?(9Z-zi;QzsXoPsE*z~PR&+KnxT=!P7eCm5t8GigMsogCXG{PY0c#W0;H*!;gAW30xMj-7 z5qVw^0*n=w(~4J!R?RDu`x;*9K`3lnO^Wb>?kOkc75Pb;78bAY?q+-4o`^|ATYQ$j zcOEYbGO$xLh7Ne$zj%fIo30xW#4EBvBZIJ6g8+Ta4IL1AU0zY-dJ#b6eIk#5q*Zmm zLHJ#h8NDUd8hQ)E6-*Q8;VjyHnh6Riv4w&<1av|qo`4Hf`w?qE6y?DnqHQ{RV@u+> zBrVe8dAvYRrvnAJ=PuES?{yxO&~JeLKDZUTPo$ivBjFL83_UFuLTuq?;f@QqmL4Gw zuJqr1P|Z@pT_aca?iVY)C#hoQT&}|HV(x;+awP z7Ul|`G|-05Z04-(oY(#7+(#1CgVq)OHtULd(hZ}HzS7BD&vpF|GEMD40g90R(4*i^ zAw4XKBM%6ctLvY}O9QzNT}WnmQUB>72rNR=t`@&bgDO$t;kxhWqwevy-J^P0?SI_% z@6+rgXE;KdPXPj;EKbrq4&W$I3Q*E+qwSubsMjR%n+Ju_C4C6@+_AmE)7%ir84=zY z_nx}pKpU1GYNr7OS^DSdUA9ovcKvCv+E;N@{aX;oyB=Q$ud8GGkj54xHxBiEm|h+F zRhk8?em!*L>AGy7U&5Pz4fX54*1tyj^{4f(u}(o2vGPC`M1cnt^C3`m0xxl1KtHKI zXuvtA&>K7ttDmAV1~3qTVPEr~n8eH2iXQrSSbe^#sIk4bqkF~U*1s?+S+88@Dwtv6 z+gM04Neja8yJ9IKZ91=E;>Dy{eT8RuYGQ&$)AZdC2}-DMRF2p)2q$DqMj~a!SB+Fa zN@hyv$m>$X@LxgG>W=IPlK3v|vJbA{1ZHgrhN|h}&0=w7Kjr570UZq}h|CWtCC)40 zt7U>n0JTJoX|8= zZ;M`~e6F5ASI98mErkoym%UwdTAUPr_w6@Ru|w_;I*e}B26(7W!Vu-i4mBY)#{9Z66L2si)%BAV4i_ikp+;Tb2GFDgVGbD`U!9- z1pwQPAO%KmBDPa&ypQ^JHIyx6-MC+E-?9Q#0#dMRpFw6qdw>K+^$^UGfYNqM4NOW( z1C#k!hmi>;J~gKRCg5AY1$=n=e#2jP%E5BxsV6ElGSO zx3o6tFiiJ2hx{4|3$LOj;5`U(6Q>hYy~Qu6ovUOO=G(emzt3{D|9q6S!b#V%epnAm z^-v%Lzp~CaT~NZ7y(;C4@oii&pNV3?J4=J=1V20tL(b$SClJI zO9lEcdyjtcBq4()f6+tP>M++rO5PaJ$`4YAki2H2_iDio2(519-G6wfIe8TFRp!M=Gpc zmwo$QOT8y%=k)6-nbc=pM3~6phN}0zj{7O|Av)OJz>&2JPT@ zIFNSeP`M9Xn^phh7CZykWJ2lQX-P96f zc#7t4KCTI3ITrw>a$ibn^q%UXN4Kapw#WO#94oTZLv}Fx?!1*9$x1Cd)H+(0TG=TY z*%`*r1JwZWruSxFoLN6p?UA3X(D&qFLVAnp`f^qR5>{s_^CvOMAh7{>bbQc6l(vQJH+Og~Et z=IB<{)T5?_0x{Ep4Ic2)3RFGvkTxZjCYM0_fn6vC&L!(IM~(QP`S5hdAwGN`@k%l; zVTjPOxu~qx(39T0P$xWw1j)67wnt+rqjx=_4}}s%Yvk%By7Zf26ua+X4ydfNOEIws~d1-9`D-< zb>Fugy5#E?pmnc@99^P!i_x05Lz*t}vW02Q%OO*jc-bPg=H-yAOT4VYy*2Mzv@Y?g zglf&JmaR*?im$omO-a`yy%@qJUTH}@`=4{o6rj~}IJkGA%r4xsEL6xL<$7L;l)rc_ znU+H@R0zdWRP)ZRC%jsDBR#LCdEwma^ny|)^$R!jw2YJv@(c8F&$8dV(03Pmoj0hl zLz-bsNnPr&s)599+uuf?m1rz=A)D-y2}7^~GG1IUdu=cFA3^9&Cqi`?Ja%b>2;5DAAYknH@XW`N(U*UIZB zNY&kXoV_Cm9P3M@r*x@pIef z=bCy^Zm$!@{!JbV)2N_IueQmzZ+poR$Yrp-vd3N^nkl3D>O0RE5 z-1lg^f2v_>dZxRpz-w&21w{;Qy@GspYZ@Z`F+?Bwp^@yD4FeQvaI@OD^|X$od#<4 zzzHj4z%@E~4JF|zuWFO88i(N?a828uAz)0-{hDMV7@1$cu3wr68qEG!LZTNRncfoZ znop2?#I8&HWxkJ%^?Q|#RXCI@qrJk(aaE?FZv0s3cUPGC;8SHhwZhN!Q`HJf`>NE8 zbR$cyIBl20=-Cm784UL^oWZ_a>)BRwt>@p1<~L^}u0HSt|85)kC|iHWOYags+o|nd zWcY+&F$lH8YZ)Y$`$myEG3<>(#ul%wZM=hI6{50Vxt8JHnN%oy9MU%2KYq z>oTnfF!!3((DtC@lnf!o;AkClzuZ433tut6i>v?K|ODT225nk7_uafMYLZ{BZQvW zDXqo+qj$Xd=f2qW{nfFZ{Z|$Tu8a>{d8R<>lM#Z*BgZha=UA-?tDzVgL~O%G+j$`> za0}juk-8E&#*2xilqDndx=mD{ep?kQGMTXNhU>cMRM=&9NL zb|Alq8A@W2bP=&rm?Ej3bT~EOKoQynHd;0_+r+cHHUKUJun-Qwx>y73NnkC{0br%| zphunn*31@ZM&%jqy@LZn!}NT=Mi5AuKZ4;D6r!PciZ$s6AckMM5NmSSizNi%BSYNO z3mf7U6z_+4_Fh@7R!bGDS_fLK)&=Q3msZ*kUZ_?yefHdwaZ7x;@uT)JNQ%?PpwE^{nRSA`Kq{$ zJwm7-5VtJ}q`fo_8aXCm8rr;Y6_1HeyjHP;Av0^uT1rsZ4rbL}89LC*qpOF?+qX04 zJw$u%DOZt~w1#K#zTw48oXAwWGb$m;DJz6cP1asSQS#M8rydOEOgw674g_=Nt?&b} zfjteUIyuBDi0x-FZ~e(!&#hNA01)MtCLa>1c3o z>suh^?0Q3ShHw*mmgF#zMCyzXM4LVRf3psU@N0fU*|y@OE@<(*yohZu#7j7zX{!o3 zP<~zOo`Ar?z@x^NbU$!M=o`p#G;=WYWwf>XZ`=bee+M}KKz1PP;WK*lFW&dFKh4%icao)bCXrs3jq;=323VadlM^ z{qBk+9(<}S{nUyi)=#ZUqOW?Gl0@8N$G#UIuZSI%(oVURj`+oSI=Njw8@q8oetC+= zyT*wW&gcYW^_OmLjH4`8MADi4x2WmvBWu3{D$j<*O=#02I^g@&*1K{_0CP9A>nG2W zwE(Ma0?81pRS`~Z>_(ZH8F)@2ywM4^abh?0X2Y}>4YtwEfhwbm5ggl`WIb$Mmi00_ z)3(L^R6_ZvC9EC}n3Tc0*r;0PTqnPX#bI!bYO@^`?=r5{z{hqF{lu>Pfp3# zEJ!z(kfJ1UOq45r-9Mz6lV-y3xFYTgFt5(+!!)t|op zTxZ$^z#Oq(=v=KxdgPbnGM;E$OZJVAbcCc%t6JlA`|_Aa(1ccC%4u;WQbjrz|3(DRERA>^=MUEp-?L)8yO=(^S=!akxywti zJSw11%-}&BbOzNWDX(7M@p8Sg=%ihV;8iz*-FY`K*cVa^o6Dg9}Ch zcXN0MCz=pp}3Xb3(vGylQT|&^OWWi{4j9eEk9iPsq7K<&_Zx- z8qjl=Da5N-3g<21VqM`q%kg??kfwPvRYHu< zM`T&s)m#RV70XJfROL<(rtHriF4ji_HNCJW)d${qu4}c@Y*^Q7)%0AG7XjUKdX?B3 z{+g;?tK|v}@Wi4syEIb5Agfa%0L`pK%H9+6vFE<5yFx99v$mX0#%8!1^8zLDwf*C3 zvVT_rge}CUXHE}}**B-RCQ|^1HCE%|VTRmuj1_4PRAo%|2`@N1wuZ{V4BPwyY&+b& z2_1^b0Fw>AoK3UxY_YWBwtNE?MLdtB{JE_x$9JgU%#H%>Fpp8~<#xr@0wGD3lfIw; zPNEEW=z`3J4HnE;q2#$2*`~X_KQ2a*Hq3#kZ9tg4I(PC?U1KN(^St#=9`$4fgl~|F zHoz)huF3AsN`}d1fW})rE7`cAk2otSe0Z+D<O7E+k-tLW-JZ zi3U_RnG^`wPuc?D{)7k8Qj%}NNy>dnCiaOJ#Cw3<8X?}nzh%CX7$<7FmHG-T^Nhqe zANvyHxt0#%)~6hJB*t@X=g38HmW0wlr5bU$hRoPae3NzDml)5Vitsuyt{L2zN!RER zn3NRXwi2F40UxYU6Z=tTGUU8QwAxbHZC{3D+Sxjk@R;G| zhRQKY{$+qMO5}_cmzcwnzmhwpW~De8XkM*PDMrqx^*iNty2aWz`D>6)wCh=?6KAup zaygZV3kxkZbtEM$c2cZr{A(VmzUUbxC>q(2t~L*yx42K?p^XbSUAesmw;3i(K16V;SWSG03r4hB+7o2oNj zr{5JqN#M8L@xj;QHCe~xq$J{MDlPe&9UzK}H_!;B;NRC60Kn4C@W@UG41n>GGy@Mv^DVbe0kjB9YmFVFeS2i+nCUTIiN>q7)s6vkkB)xW9)P_>> zR98OqkuUXO7=*O4*ag2)(x5vQoUl?B8T8}p)iAJzT-DFa^)y?qFpPL29uT!$upx2~ ztZnCoh@-NZWl_nc4Ng%{d7aJHH%{pB_?XHcx2 zy^E_7srp@_4ZXWEXK?VTGM-wQGpL_hoip%N+BeU8qJuy^cB;=K;7a#~`}UOee%Y+9 zAq&~vQ#N+jZmxUMwcU-oxsI%*J!RAW-OU|sROoY-^5{4L_D{5xG0F12=Ym~kpZ0KX z-gJcG>ix8jdvnK7#IjEON$&&P+%qJxB$Rt;lj$@ntROkq*j|>KQ8`e*$vuZB;va=> z!81}C3g5&^m%%xSYnW;7Y%;5ajwbvVP=EH(;YxO}4euL``)J~$lufi+ z`*`HrsR%C{KTq|Y;>;ZRPz22?PDM>xl)~3KYX}v?<9p0=OrJ_3;?%)XNVz=Pae%Oi zHTrCZAg7g;Vz{knr4b>LyjP7T4(MeKCL>KTRk%DCNu`NUF>|^ul_uFnXUDv#llyDR zB0=y%J5tmcLp(d&WMR5?WKmRM5F+$QZmbkta&WFv*+cw}OreSQNz!H?(7?3@XkA?e z8W{J1odrr|$yNYY_PO@VF6MvV3j-qAbYlaPtV}}@lk+me+BO$e=Cjuy?j7#rN(a}Z zTxqa6pc2d2=OHDXv3}a$Sr{gdN5>b+c3=~w68K0S(It}0qTWUGtl`qG%bdF$7@6Le zIvDKBvkoqvXBF4)o6GY|UMfW+v4{OiF}Oqod#_tFapTCj-mg%ht<+vJ7K8gF}_J+u z$!A%VR(ap*xh3tkq1`O?(|vE1V_1i#Q~{!)x`<<&%59%p!CF*aIYYa>iUnJ28qcc! zGs|hT2jA80aBy0*b4cx^l_&k}Luv<0%yMvc=Z|*ubIi$Rv=e}cf7^DpqMgH?cE;O> z)s9xGB?t_Ve@Dah2v=H4BRkS*X}*0#Ey{#1``v3*o6$?|sx=iiqe z-4&gdj&5I}mh35XhZh$vJ-)|v+B&xVIJc#>jEqc3#;ah4(HF)+J$y=YLyL7D39L^$ zZC$PX=1u*ci6^I6@1@tL9Q<_!oBJJM2Qs!p-HAWRT*1=OV6W3)={bBUPMxjIB4`D* z+M|xOtsL*=U3(=1_wL}?P|nT9hd9YpT+7g5u$uKu8y>0VY zl)J1rxCfsqFH#g^*;y%u^t(_3D8c`NA#EQ*$kMq^jCbKvU32&rsg-)Ov_db$(+CFV zFok|@2<=!}Gz9~1(HciIpcxVhg;CLby9b#G9i}RvlAY3#a3GT1r=up|P&H<&X|;aF zO#|Op(cD-f-nlr6i^yx|qS9A9DcS2>Rl0+#lB3R5MSonCOmwa~a%59#Etu_GR*@AC zn^Wt{P{^DioDe|v1hg0E`qe>3Utge{DUYJ8!`Ae6{ z0Wg1M4iR}MNC%GH!>{66T_%@|VN)ZQY@rI|n$3-h#fucCBbm4dd&GFLXBM;E?Zo46 zD0==}J4Y3p73Q^{B%tSt&J`8$-M#`0p*dmCxNEjB3LBaBX$l+VFiA#Yv$qJ!B{H7x zA(x>*IusADuPOnxX^H>A{(b8iP1Vl@ZMGz1zJvKo=wDs5o0nyfD7z9Lvp8-L@#@ZlY5WqxJ9R!e2Ytec+K>$_@55IQ#Q+Sb_nvLYt zls%t7O8!8B7!Ff-;}H-<MoC0`nKUrMpSBfo@~+eT+B)2-t($HHvG+Qd9t_y`)i0%fMG z_QyL)$&k%Z*w@56DdXd;O%s%S8si-*)jQFAihc;XR2UWmiI^TM6O~bNdz&cevjGsE zAP(Z?1VHaxw7a#xTjf>}0hPm33X1k9H~Rvst@g4H>|GVWe#4~ZqNElYQj5@^?XDKzlndzv6GqQ z%sY13pOe-&5EAJ6wl-(yX%4v~xFy;rAmDl(5a1+_Qj=#u)4Us+c(ye|8BO%)=3DVrUYd|*SsTPo*l^x#dIGSm%4LMa)a7Wc)rNDj?S z8A&l|YaN+2Hxh5={6mpg5usyR=DgQtA!6^kOJyOf94{j&@7Bnb3+2SL924i5RzQf< zk%!=FB;`pZq@=t~e+6Z_;DA;K^>R=j7su66o%xXKg9AJ{kC97r9cH8Om)Zj^ad=HR z)fb+VG+^kHXn2B@`Vj3?Cm>*$a!LZg=Sr12Qo_fPE#9i&&qo9LqUFJ88(jo#BSoJa z8GbN%16QYMcQtw|fS+0%p_z+XREVE%)f-HYRacTdr-rCK{Hv zBab2sr&d4@HbrTkUZ9k+3-a2+E8k$n$c*U>tYta`Ok+|u*)9-M4m)JoCuGm2?A8nO z^A*z5(G*H=Y;UG9=o~Pq8sK2U_3a{8f(^)lo@||Z>!dW4`!3KZl@oT_&4W(V0syAN zyL~dW-Ud`>6R?6f3F`$#Ra`u~hT>gPBlseG5zr0$9d*CJg< zrP@m7N59*&kLU?xV3K5XT@bXbNzrIb1%`!IRJk@@7QY#cMiR4Tk!K$EV6>Y63XzE+b`V%(B-?`xHmn!CFvcbb>;-{s z@Ddi3T4QW5;0g}d!Lj!9{hr_b)BXC5MwV=1B9HaD``+K*bI$La^E>DK&M&870cP)( zMNO@mh6SKb=Tcvt;f(C+1Epz#~da4@S<1=gEA2Y2(a24Y1Ro*1K z;(_d^Wzo&(pe5I>B|hx%rlIi0@SWZWNv##Gg0cf!kg9`PLn7knwz=a)JcW8n9zl-r|TE*iz{>W?+K) zgs*>J*AYh(Qoi?D2-Uor-G-V_e89eNUCSM&pZhUj&_r}k>+(a>1bPD(|4EM3O{`9d z;LIaZ;6S;r9)=?Y_Lck9$o_?KOlV~PTqENMe{fL1{_;TGh<`_;GqQBb72b?wAB~`T zcSbUa*ayc09CXii?)mo>Zgh5Fp^^PGa$v5JgAw=;kpNe66=(u$Jm6ALP3Ve+W)9HI z6?4rTil~PO0=S~QQq43j%W1S5W+Ai#w+3)gX2g6uQ#vw5%Tt%r%x%=7x|X{)p_m##=G0Cm4iu>kv*ngL~IZdHc8Vgcq$m3zbj*krvc7C`YU zQUr4-0k!ae;2SiHNoW5h%z!5OHWt8(h?w7L21F<2J|KFT7Qh30SpfG-SC^E|k~xnB z(lKRSmIaVDI#n*q0=S>MF0WO{&wKGPu(ug!cGCbV?!escno2vs0RVO}>+$K#gj=Hf{3`?+juuoazWQTR0+ z0nS`ZzE?3BIvA0Df1iGjKKCUZgoly5?ARffCh^g8N2_ZO&X3wK$IzJ>`$jQG4LZRf zwg%pzr0pdIdFku4()^mDs_CNH-P&*Rt0_AAqrb^hcgCvDT!(YvHoG^8wy4fl?!x5j zq#cBlbD#!NVMNdPw(!dwDeZWoXM7S0D~i2sle($zz3qJJCib>ZA$3y$ zu(1=R)JxjqIBm-+Fo3l=Yp|Tc_bFa$6-V-S!h*^n8 ztf`v7ptiwgVUTZau-Ip%=k>dKo@}sMiE({zbGn%ukb7F0a!PR6bDv#FCmixvlfF%v zR^Jwl?`{SqT&{D!oOhl74WP1#ydR;qJiX-t4X)QTxMbCQ8K|qK4BvYclyNC+F2yI* zr+LWTLX?K6qkkgkjt2G9Q6;M<%!{~O8nT?~&hN8#Psgq@-EgJprZ}dFEZjiV!}jkE z^$DAkyFP*{U?J5G9+u`>ZC5%gzj5vu5>}w<6y>t;BwFZ7@3Y zTPI|0#)F#%gbbDbLYS3rnCGY5l9O@^H9mjOUS!Vkd zJ-K?S<3qL6p1mp&VY0vDvfr{1&@dB|OjZY;%4MHXsCLk(~QXp+}`cj~F)_NyYha;qByKNKm&kj+3xA^hhCT8LMe>Y`h=&RUr$Ur+QaA zPYGn&?L1vYa=~+Pol2?`aI{b+mO8Fi?$g-K>eK^NrPmV3xi}<0j?8Gtx&D%nc>?b2 zVK@Y1(SfTmFkFv9B4zg|)HIoLxEo2lm7X>@099_D&>}*Y;Xk4h6fM}Xh@_kVJd;B; zMJ0fo9!sEb`iZg=m0(K!;yBtIEwesE%M$u?Q3+rD;f`2*MV(+CE~AJJ3K1Nk^l>7(hw~2Ay~mSYMES#c zLwCYB3~Q0_)TY>6+Ab2w*WV76boB0P+epB7aLwl+sjB#)6`AG_m|vMI30-kCzD=HA z+{m}W&nEGWXXNLP*tqAMf=vOpQm=%B`T?Rp{*`c8o7Qk$u$2`%oP5+^83Lx-(9=qj zR4a>GncQG=wN~Ur#2vEJ58ZZ9-LgITWi_ZVMXVpVTGi#Ey3FC0Y85{U#|l6Dp7sg1{S_rgi+xQYevfGy7nLtzAcnqvbN>GHV=^N&>CyCq1$}e=!Sv(M&G}>LD~tE$ z{KNb>UdL6w3p`ly=hKQmpL_!$sQP+W7kmJHTm}CbpDK7CpL}~zdxAIW89km;nG}Wj z88RfeGGUmX;=AK8KLHafM<-w_s(Vg#KcD58&G@yt{X&5DksmqNBs!!Jr?E6a<{i@( z6}(KpANrmh*4vR8r1|pT4Gmi=gd8qCU%QiIrfcQKVSRA7EA*|%@Tj|8eUltSuO#CN ztHO)~5)hA}9p{XUa<1Z%@mmc#Pb8Jg;w0HgSQrSrG$0OyElW^ywmMUnK#?GXo?xR< z_3G;0`YAHVPv?DhPEhlm)&E+TI=G}%=^m@^s9uFqP&ZQjFV*R~(zWq4ICwnVCiqp% zaxBnG#xIH1T49~QjVbY@(OT%};)@%Ld{Iwz*1zV9c)|zOqdi5CG1QY{;*h+^UU7P> z4fw$H^8W8?S}R%iga{E85pV3% zjaceRbOK9tMAsbtG@N_t(|k*@$zg#SxPi?=*kmEsPXiI6thVL7(hbd*O4wwdT(?DT zR47BG<_}`{Df=0QOI%e^UroccJSz>SsXjVvzccYSc%9vxe_BiGcVG9mpFVSQ{FMi~kpP!$jO#FV}7v6I9^NWv02ovn)7I@Ve?VrE!#n61VyBIa~V9c-*43Jxyc^f@Qsm! z@;BMG2pfP}mG=v`=f=(XjtX4lB0SMA1ABBhBX(6=)akyzJUT%W->oLD;Tdm_ zQk8`AW?>YRaEls96!&fDQR(&3Z)y#Q^31|2mD2z|!Hv@k;}my#IL5D;)T7Z{Xdk@X zpXS3zZr5-gUYTvM>AIbHY*g=cT(1^@mLx}O7Ht(?k{eo*8%xw~lQd^@{Mx%Nt-ogV z&Bg&B80p(JM6a5$ZQgHZWc8JY)JEF89r?x5saLM!s#EWJ>?IWD3LUOt8Fk~TZ;vD= zw~I2I+G|zet+D-bWV2d=maOBFz*qIzHl^FhJhDD|$@;c(|7$u7=wTA0g_a4_-os(% z1#Q5dp|v;Wi>1`zeNtlRJ1MS4qNn>>k zqN`$ouAVZh54uW{UDj|sxc;;ZZjZ590Yuw^GH7@S1B`~+=R{R6hJk048YY{VlMy9GmJ@9JJ zR@3gl?u(lqjmR&|37%alo4uENo^o>32LY%WMYv%AZHGOz-e2ShXcv2ue<>K}yvla5 zFT$8-PpNmRu5=g@g`~jstiaK=En`c@F4Jp0xQn|^XoV-KJ9e4Dn*MDi6K`YRt_;$= ze3G$S@V1w1r`~1uv=J=SO^CCdi3H0#R<^H|iJ~|9-UHcM?c{vN(lexsrdXew#1DiH zDnjI2oD-d`*=|2uf6)}OlvPrvEQevX1z<}~=|m}P?YOJh(5!R5YIL|hTIwo`-XdTE z@2?jz04mqCU+tk!gP`PRunH=3k^KoX?4sJlvQ?JtX!`^?>qK$3cZ_wQO{-U%Yn&3v zr@-A;scCK1=K5r*E%t^LO2oMC<=Scag|wXe0IKFf^QiMqN+g4>pd*;~l9s7&L zt&%A}`RR7kE^}-N)%QAF#-U8C7odUvHf^j}b|jMs@iD{@E|-CYR1#n ze5W=*0#SPX+wT|I3oZS6&;Jm-vcnO<0z*%|+Xco=!I#C~(jLcr$+ftzxq# z6WMKvu+$mMNj#IM;elCi=BKrs?VP8+D4-?A6V%?YMyc zsQYp~JUA8ZHF?aYBekvt#4wnR9WoJ?$KC?BlfV$)j5tlk?uxu(RoO|g&L_)UJ<5Kx zx08Y%VrM)Cs*-$Ayb)k>Ua~_oIbWVJE?55(AKsdCQs}A7GPNLE8HrJiY zc(}V%S-RAmjjb*k80}9XA=N9?VipQFZ|}N2eQ^WKk7gt1?W43j{OExojTvLNqtE$n%TS*Cuxz`-J#Uo%1cnZ)hjgC zC}q>eJmEV@r1fuZUk_PQ(F-PY#qif??1$Bh8xsxaJl#^A*EDQ}4 z>q{LzZiXL5#mgQ($k;Xf&JvDYW|*o|(37GE;g8*Tv<7D&BXX7Vg@w432J{>R2MuVo zCNGj`AT<=UkkQDK(E!mQUlBB5_VqV{22fJKGa+V1BI&_nh+M zLO7Hj@L05}?JN?S>mERg9pv-OY<%3eUhJHoArR%(DC*lU#^*8jCuLQ+jJPT!HJD2+ z)bjeDs~RR+ ze=)hJca0R@n>BgDyLl2dTd~C^O}d7*qq{Xw2MKwXAO00M3jc-cEeHJ`^agfF*YH}Y8N;eL6$$4VJ8WvZDijMatnNKE-J(#zf13sI z0{Botvth$GEI}xJ(&OZkOG%3LjN@S!U;~avhg?*7w$)E88 zz5zjyWt1S5KV<^niI|9UgZARJsREcYJ`&~rOOYHKBWOl1FqEj5fUs^EyixV!1#4=& zp{+6Z{DV^KDu9{Vk@_va?aU?9lG+QTO-ikiA6#75}R-_1-E^4CIS6-qqd8k8fS zhBeN`hT`V*{tlo<(EwXsdfg7vxeGmRltDv;@vAAZ(plKys|@UO>=mt*l#mIyB#V zcE+}A3&*qA!pUIVD(ldHy?U*d7hogbsy|)afv8nNlmE+twS$GWz#!UtwCTfjX|7O?8IQP$)eLF^~a2* z8J%3nBKx`(4UX$*OGXFS*+9{US%5 z&uRC!i6rn`pJjRHL+VviY+AhIjgsPZ}+K4iRqH>K97*B99df72xBy?qAEMh z-wVf!GqV+6qsxxETsaS8)*1uKe0ed~l)#$Z62~bMQ^foYC+`POW0x{HFP1Yy63_x@ zt(X&n?qNNx8DCx3g%(*(YAtHBUYgnr1?ul|ClS<*+p)PR`@|p<+xB{9Edes9KBRkd zRVDn+pLjcO*g7Y_^QXRxpVf7j<9D9CO$-x$(=<=4qnkIsoNHcc{xi`-Ne`c50@v=E z0Os(zhhKc^bNH#1HqMij_k(t4I7c7ESCyg=;~T)6&VKG8KsLD`V2JlV@kH_Bs(%lO z-jK_8fvI6NInAG)9d4$>DYKK`;oEhHG3nohu%gsy-A%JnO_~>Zyy&MFeZ6qeJUz)? zOx!b1R!=J>W4aN9J|sF39xLv?9KP@#@~>rPm>$8-r1O5L==vcA`XfotLl!)QOCGvo z1FaBoO2BB(5=cK@ywR5todYYU6+8pwlZydSbq51u0D5E7B{ghL`I3?xNI?loOKsgt zO_ps9m;Z<+)<$%$5K2mK?tvEBD2h@OJFb`vSE1x_$MkeuN`ThC*1%)?3w0;{@yz-T zA$&MVenli84+k0ds;vE~)2VsaGSKjRrHnuU~DH1hjF6 zLwXMJy3)NdoV$9hcryU9Dl&wo0d6%ZQv=f;b(dF@NlHU?HOK)LHqTk)t3-oCse}K> zwmA7!4Y6FCt`Rx6DgyBhNAui>w*YvtEvT~8X*kxwLNBZ$JzvV#>OB7C>gbctpbRjz zJVSDTf!CxNQa~5>GFfC8i>nm`qEg-3+ZG?P@NivokZ=JaD%Nn>XPI~{ye1IM8um_d(<@WE0(0ufqUdc+A%tcR)2sl<8-{Z)@#XQS&#jUM} zg9stuC!3Z|_62^nc|+7zlhZHrPN3?ZCzV>ylAg;O;sMl^e24_g0F#T86K#9-tMLqb zn>>--TSIIZG&%Z0$c||cNHMA#ddLsXH`YGiIIIHg!u{xt(`6CDiggw($%>3syozL9 z2}ISQZH>M^*%1Otqrl99Ttqvi?CYg&R|QvG_~YNB(QmJ|IWXicA0+Li4frRfT0VD$ z#`@rrt(c_5X25VN#qawVlNK_dM=zoi4-H zN!j$6JAK(kwZuk$Y2VtD`4?Ib4fMZHc>hRMS15Xwpyna5FCuZHed@-*UF z@X!d+SQCqYNFES#0C5mvy;H-;6oexIKZYknSrmt9S( z*i(evkUXZaJgTbE4qx}r2(S;kyv#eCZxruh$ekGGCo!A5@NWBLZ9ekrCEkX16>l*_ zq#cR<>@2zLRHsXvCEauUbKVBYx>~Io?br4pfo0<8a6MuW0jc(pbVUO%J^F#HO?As| zykBoWg|0)gMdW;3m-n7uNFwu)$n=~)tW`+_Kf*}NR(YobORb_UB7cfCSKBw`C2sH+^2`UM5C1P-*(=9wWimw87wlg z5L-b~XV3}zOHLGzst?Vl401xsUiJSybgtR^Y7$Z(x)Va5UDQA{QH^ zIz4ZEbo@9tTUo%a?7+0&)A=Q4C`Z67ZJQEy9htGpHVW+Wr@^k=1gJ(F$Wly)?8P!a z8M_#V>8!8Ag~l!`O1rtF3{_{RYsqRLoT^9dv3bf*3?P#?i+7{VITgTMRc%yQk?bKq zEF9^^ZMKw9s?@7p09hi~4G>!spbi0~p$-qE>iu^abi()~83W8rM4E-vU@>k+sEFZ| z!l02nP$HLo^TH>vY~q9Q(Sfwbqxq3UL9(EuOhC2J{5w0O$M_p$|3zmlK*vPM*z~|k zGcE)s;u~a8w8y(v#kAE`fPF_5)7F{Os8+_9_L@cDAQ(pmeylSl29U|aRgD>%V6xnL!~+_&5ob+LdB{jX zE+Yc*3GfS+07w$t*5T1r++86W5vDEFSDF)2dkA%-*_qUZx7|X$Bgf0N(U0ycG8kvG zSdr!{wVAGjZB@T9kF9>aG~YJ2G?y3E?qz|QF^zs=HHoXtj=j0e-~D5=)gMX^9u<1>0@e+8c9qz%!XUb|2 zNq=$3qNeED9_ATfl_O0E+U-aqio15C0e=}JS%if4GMd{kDO$2B>{Z4c_Q*9%(+?a z_jQ)L0Df>b2HRF{K(F&K!?;YYBM^~=Lpg`4>a-pkI&u%e@=BbQe#iWS)lGiMLd_9M zk{l$mR)E9=G)#0|)oij95>X~Wf{UvXBy`0%kkEsG zMBQ8i2^fRmAbnp`+!9OKlqlVVn;>6VA0i9gj3q<3+=yzC5CK@J-$^K z9hU0I5p>Zyx`hOHkW`}Ccp*|*zvm;B$3!ZR;SCj&`_g?=DqF zDGX?Jy(3Lq^O4G~>#57p%?0AVWd)F#i*liuCVc<-bG6DGEXddOjH})IWsMZb1{VdC zB8FW@2=ugjgutz_H260no@bPf5OkLW&mtV5;iT#A?1ud~RWE+@IyVfbVdnK9X<^Og z`Bzr)B6@+?zk+2!I$&Si>Yx62Gg)TiGHkoF9pl1mq*R}Rh1L$0glBxu(3Sj+TzW0M z`?of8u}zcTNGdzJ8(OkVp#4-qAJQUqi##+~=>Q#Gx~)+ljdH;R(e&D$+)|87e@QDs zUPd+(+S+{n&CSrYLU3D3S)$9jyp+Z5&3-S-3vQ?4M)+6G`qk-^IQTWJ+Q~tddmg{> zZ09+1@OIs>7UN=S1w zc&US+NZu0}VQ3Zh6(p$LIAe1(^m5WB@BvtCcl> z$Klb#d23E9Pt5Yz>9FdGgmsiz*gayUj#8QNLjgLyJLp7IlqFXq)OfdUQh~uRIMHLd zovd?($+HO#$sJo^e_1zrBYzr!74(*xpYT<&C%b|}Wb&(f_#(njex;wS1k)v)+)g@{G8Axb79sEB|EOaxRHn!;vNDyW~5d@Q; zmAd6d6!q5aR%EF0Q=P#^b!t09N2&*|IhN)I8SPJ7b0bocjW_y?Utw|MtfeNnR(C~l zfME7U`>7f{Ndv-E#UhC6kQGg#Y7bO>kgBscRupT2LPiDVt;r>Dcp7U77NZ0`f+dmH z1SW3-igp^q>I~_v)f?}4fjv3(Kn_;Q{k-7<;lj8BE`z4Qgjd03r306u(?AgojJ`0K zivk+PDoJR+1Z_poOpw>R=g2GS+VKySK|Y+7wx?7YH-;a8;aF$(8Y_yPNP}e>#Jf6r zb;j|`3``OOD`PCt)yCX4*l-q95x3!epa~a)1Ccsi!sVQ75Uzyx9PzFvXbFq3GXt8f zOb~DQ(KEzEQ?DlBwq9LJ`9rBEq_|!LLZ>FXRNSh%s)uZt|Hdt$cHP_^uI4OORViKMv&3Oc}I=f>E zw<}lT5oTuS7ad5a581)UOsVyPbb}C{V$<1+mp5(En~h%I&xdD=GfbfO3P-+H?|S%L zM|$uAcgOZ-k@YfE>Q6t^eaSebF2n?9qraoy zvIDM)A!*XH(fO$k)F4n%_hb|-d4eY-f>B_&N!AiH&%6HUf&7xIp(-x58cd_iqFoY; z7|SnXK}dWrE{J}Ov3eCq1)0a#lQs>;mNspS0rM2nBcKWcie=TZ{5ynRe{#@Y^Fc~4 zm~6>{sUFc%Xn4a!WBcU7Bi&bc%%2%gmof@l4L;Mu`pKOuP5PY9kJ$W1hG@GL4b)Q66T z+H#K)Nho@DZSr>cxBxX&bQX`yTkY~9m4G242{D!S2qDVvB#8+`nKT5c(jNmOAf$qRA?cAkHb&DxShRUtt7=Qa% z&#1iCy^j$sPyy?Fg%Hbj$h5|5A|VhWsp(&FfGYG~yi&lI_> zsxCI29M6;~dpht`_Z^o?r)#7BnvL3jJEhJ{f_ZK z#w^T1sobcf&6a@X*piL+Oa-uMhX$?eh8hBK4n@c|U_qsX9mz{$M$oS@!x*<`*yfCx z-EE2id}c6QHK}JDG{_|%o&j>tK@;Wq4U6mo?f(s?iUbxzaex#fzZ)4 zz+NJBk@=9&a7Iv5w1kOn9s}topq(B=6K%_dnV_c)NQbr&LG!e2d;#%taL;tTBi>BB zXteGuh*uVJ6Yo02%YP6rsW^4sjlX?N$OCrgEFnuiXfYA5)2c(fTPEI7hj?-Elv`&? zJ!*&->%52;(?*AQQ?cpRrHOac5-;T55^w!H5wE+{1*#%mkEbTy6%(&=#wZ79O}wmh z_8#UaW)_QzI|%TUDHTKx3t40hz^j9`2}=D*6(`G>9zlj8@T4N_EmfJ~++KTZ-96)KCCug&p z-(@FZ={I(gr`1E5dbn0J);07LScaa)99YEIe~TvmsvL|S+033(6VlEO%4;5^RAwP6 z+4NZAV6@Y=*dkRjR(m0^u6-sFifh9-$O3ZsK$Vm`83VPJxTq0(|w37PD7mJ!sC5Yt{R>mL}*^Y!yY&K?)TnrdL7nuf;_S-l)iiLGREpSvPOmAi)-LWU{W+{ZNBA?nTDpPc z9|?fBDW6K~$KmdeBb^^~en`Ufkk}e|q#2E%6d^#0zEqXFOb9qYn%=n)4xaAOLk0oicV

K^;4W9PPt4~m-+c-mypVO1r&a?N-bNZR(9n&xkE3A}69oXD7`BJb>EiEmr@drm{ov*(J=(}WJv z%5$dIFe&nb*Ns#88YaNl=ZIN2F+>CGb!p*XZ(?3DC1C&8K+L=7nB^kC>>{wsC*f#P z)ca*3+<6{lVtuZ-7Rc?8>Q#{Hp%ToRRIi#{U1LhP7Gd&0oT??IAl^$qnLekBep6k| zq^@4Zq@IjeJEYo9q+WA0pIGgZIe58!adT*vTeET&=Fp|g%a{SF+_sS?2eb|ku-<5m zMQ4?eoz6BIXxTFDwUsmCuA z9BGVyGN*N^RF&)Ga&r)k9i8==tdX!XxbaZ--r_JuIhY=7V-+h9-`FQurxe%E5UP-L zQq$Bz@T0t<+9^f4 z>vzl!9iClXTgDF{nf9YSTsb{V?ZdMxOVz(7I3GeQ$><}nwF2qf)KiZ0&Lpr4FpV_b zL7Fa4e1Zu})8fp)g=cn6NiZpx@!qx5b$O)iirt04CfK>TtC+ZQvmvg{wv`DpbXwcF z3MPuYNuW6#pgB^G%&L#5E<+b&B$9!~m4|0X(ku@%f}%X&$up=Ulb?}c;u`Jx=SFa~ zU`Zd>&<+}vt|*|vZSSk}reJ(W$}14GS6X=LTBIp=C1}%*nuRZtCA{m%W~hW3cb(v% z9LP4@>eBt?x^(|(=%N2~D;tJKuvdM^k)1bUF?{gj=XQG29Ez}-FWambKDs3lJ@(3C z_+$r-%~|%yvv+&ve0Y{g{K{MGjM-3auew(X0spGs%oM@#o7u@3yJxmabo2NhKAmo& zI>i0$(;g#becls~WGa>(lUE--GtKg2X&S$)-fTw3;VsYc1{T(mFDISC06yUH>{%j1 zDe?@fHy5%c8#eDiL)xpkpXFU2-#QS&36lClLvQ+fPg?-WSW@6(TL50Tq`)t>1(vt) zZ`%UPTlipGfFu)3y7;xW!169W+!k2g!bjTz%g69LZGq)o{9apNc?%zF3oIYQA3a%t zPqhV>kKsSG1(tX57j1#%gZyk;V0jl`XbUXw;x&k!L{ZDTxUVg+yoLMQ0#Nf3bU)A* zSl-1Cwgr~Y;)mM;%UgJ3TVQz$54HuCxA1UVVEHV5qAjo-4nNfvSl-3E+XBm5_}R9= z@>%?RTVQz?f3HF2@*Zw$-Y@Uq>zen=>;JFc#tO{6 zd*Ib9!qXBkg&C$416LSGJ8E^&v|fGNG_Ss`9GoceCt!bq5HqR`v^OcLkIVZDLMvGr zvwLu?fk1`J@y=-E@#qdTIAEb3ARzQ;y`Ew;A_&7(cKz~u*O z={-BG5fP(w7%eOd^$?Tf2?p?UvRGf^fHFq~Yh>+|-7!#2xJ!T#d_}LL59D=ln@}DU z2Ds_*%qd_}t;YZ$Rp2kbs0SsZs$8A)?^9?DNujug$q?cI$^}p!o)99O#peWO@>m?J zQA`e?VHe=CvC}@DV1>0378d{?8sIn%aM01E-1!OQLt3yARavt7HIec#+?N4)O+4ix z1AZjJ!(hiYBGERpk_*fVdKSSh*8~h=L3tIFoH10MEn32{WvIcAh)om4&f)FjH82}E z&O^}{v~SH&6bA?zib9}AAAn2OG%RsOT&Ie%>TA7n~ zYpXeVj~2k_NAt8P6Y%SQLA}5uvPXzz!+?XIXvt&T$budfrChDUsVID!*{;7(J|J;B z;>`ny-=_6meVb29Y4z*gqr@=)u|^8S%%9{Kf~)|WYz)fB*f-elUJL?Sl%sk8$}V<+{m~oYEo#S%8nRadgpYka8lp?8hg8xNU4zW=3kciz zeGpJ0j6pr~7zgC%$;D_#3X2~^`Y^@245!54DBO#KJ6iwd@O*HN3Qzi6ft9){oM@N| z#PHS7It`875MV??(5#vuNSO~rRx=f%AEFjgOeMH^O_N;L2e6bj(rP@Qe2#q zMns~xGjSFU7|O;7D!98b?Bk@SPhXfC1N|KU zS0Wj&9mU}rERI@4%J3o)OTB*31Zs-WbP?hjNkAmMVPWt_l>;w^H>m;BLz^6#s({}1 zr?(vR<1X0lis^v>4}iijFfa`@7Yf00ufB!I%MEBny8u{u2>Z8f3)UN9_B}9 z3Cct|_#5XJsIy{7iw_C#;xV6C*2^I}FhsE)h}YO1 ztVuLe;&0G`X+Te=>l7sgHqD8!I8F-!qNG9ai`R1|%oPpbQ&fX4Yw?zQ4vCW^pV zgQgOnW;V-`rMu!43^xZ%89PreFxzdxeO+3@8q9cnE?AycVb% zP%J+&^cGZtA%g%plG~z;ML-2Ar(PR^%fMgdHX8B2k89@|=4}*qT4E1})CvA9X~L>t znF;q=V@elF2jmSE3P^0p2Nb>*)zV^Oyje+E70Ciwwx1J8)+825#8ej}mNB)IH2Jo! zoA+rgAqH7TJ!^RA+Nw@_Tr3;Srq?{Dn#ZhWnV4&8A$cCq zb={*0N`>8)btG^c!a4QQl(ixP5lgUAMGFxzD@BCeS^SI9WHB1w7>;#E@|C5nH7kkO zM|~Nv($gFBb&UXHL5SA204+pSRtKAE7l5SGniB#FtHj0CY(q5=uwXM22WDPcwq2Y10mHTEDLT# zRKHks0Um2T&7)WmZNnZ}d)PR|an(G9Y;nWrN0;ePC-evhHPBf)S~P|A5YUc>uhIB? z5Iv#vh$vAjus=D5&|picl_ag0RsqFGGf^o<8DW(Fjk?wff*G)6)F=e9@0`q^dU94% z8|~!Da2SQT)1Fs1d1!sDYVW!R)o;D!BX7Z~WF%LIPEOuw0YmFIvg(Rzd@?qEH|ICf zj`#2V$-C+@w`f47-J3K$L6}ll^wa@+R(+@`zGG1up9FdgI|3&81{Sn#*+e0p0ji>; z29d``;>3{F!ZY)sIkv#J)a|J;lPVNj=RXiAMy)aJwWDG{aCLDO)N1kskln&psPH)O zQbY88f)_UR>Nw`M!Fn{mojH)qh1@UnvQmA`sSB?|C#6DJu2dI$->Tz=vz6-#@@2jD zc<&!FRs=}7c1{`SjEu=Iw;*ytSfDLnz;wKELGhjHm{eh^QlQ}sf=rX^8&Yt}RgD#Z znrRZMuha%Kf1+lL4Ctjg>%?$Q0M}~eRHXp8>W?2e*I2qW>Z4i^8Z$GNc5C7fy^^## zvEQ5JF+!C*3!pU-s@{^I&U%F3fn5g7BC8j2`fw(xwYV>U8y4S!Pd(ZdCvpF^%=Bp$t3KqYreZ z3bX4^(U&-QuC&nN&o}h0*jOE}u%WiKfY@+ZQ1csl(19!uqzc4>R{gu)?Sww?_>e(Sw@T7gXZF&K{32U;5fUqK(?9K*V-FIL zMJP3qDT@wBFkGwO`iYhVnuX1>X8r83qWg0zT1bx--EUX4%&PT@&Y>ssYN+W-sq0Wt zooDN4egrB+`5BXoz@U-zjAfzUhs^P2;Z=bcW$0BDh#24Y-xpkHp@N)r=q`RpRfx7` zfEm4;p;&J;rS!w_Z3zKj{hNKO$7I%TEVn{RM zvesmvUtN@;;n1WHMHXH@iil^zw{5VEV;LCeC>s=KsR=YH3@1zDzeSV_qe5sEES}M) z23$JAJ^%#TF=gq^x7i9Amqlpjb*wYJ_Cp?F{cS@ivXm`p<1A4GaTMYEWzFCDi3f{9lSd^$In^Mwh-(Wpn%O!4WNQ2o zj;Rx&Y3r@Z@sYgEt??LE*p+LAUep6fq_`b!esDX8Wu-cd75(NOud1VZhUT!s@rL@_ zEzP%!CPDMe0zQ5l#8+_E_%43*UdXFQdmLhAexhl) z5i7Qscu^<6qB(&B>QfCssj;TKN+bD>f;96gV?}>rNwEfEcX10+cEzfd+7U{VerQg9 zWBqnfd-U6*^;?nNLfz`I-gW|BZ}i$cLz5HEVZld(Y;FYsU;!pYZE`J%&hT{{t>Mbv8VW_%Dz|?@0wMpb0KMYAVSsS(flS~t^*sCu2Rty zB&K3<8C_|%a+9b^hAb80jJ@3@*x*qA_ox80$8Rn3mbTuQL0in} z;ZL6Pg*!2}YeTkVlTV!kooUJGRrS$`-jnEI&RSF~(Jb|uPazj`Y3BD*et`X(^OsW5 zqAC?*7|gzYd(-Z>lmA;pbWPbC{Yi!sj#eJ+j4smkdw^cBG)6*hB)MGfYWg6D2vHV4 z;sik`rlK}r$b*w7svA%8`}&k`c=9vqNOcY=JFI*kYAyKWLFo$y7za%Bocjr!F8~9C z+M+y2$fek2+j9L^Z?U^Z=101$(cqdqa!xfm@@|XCv&)ll7)@;QXZ3U^jAimc{YoJJ z;eJh`VdBtAuw`ao($v~T^m!$VK7|j+3d&3B?~0T=?#*GGtTCQv_vpt;wMA3{<@GdB zjkS&=d7uRE7bz@bd#O~ISDJdP<#(Dd;VI}BMr7jF&2#(M|=|uHH zV#UPEq8n18!!Gm&H;;MT(7Q3ZW4398K2)Wx&C?qp3!yv0;u0hD&Kl5Nm-m6Q2etsP z{AmwIrUxzZ1TbBsv=_i+TqwbR3}^IzhJ#wDnS4Mr1OL%xuI$Z|g^40aqJmT=AJdYq zac{4oAq!NsDwUx?w%jfWteFIOmPlZ2PZE$sXaT$?5*Xfv3Y17-O(Y=aiAaDUM0`ou zQ`CEm(mPY@q9)i2$G;0cIyqw$F#iMw(K4>=&fCsJS*bR-k*Alt3n471`z~e`;~0H1 zG9?5MfScDs3rF^j?~LN7z7U-_sofPZ}a_5JNc7o(+0en^cQ%J6Fsxup~Ti z5?u75MV)Zxr%YX(M6{WfXi*Piwt8M`9RH-1*`y0&gi zeJOR|N?Q1uf{8Nyo@J`0eXF2pP-ZnK0YO?OD{hE8`&QEt`*UDy^xtQ`Y37*(?&u#a zw)v#+MTyb*jNarA650gV8eZn1o&~4EbK7;GjCRD-la-}Qpp#FyoxWvjT5h&f~iaa2N2c@ z=`(6Zk%>`AgHEy{k(hQM9juLNuyAFN)sheym#d^y>@P!jej~H&RUd9u8%DyJ-P~YB z%oCyw{+0I`eUEK16`4IM!AlYSSPepzu7BUV@Y{NJ{gRYe{;fF9DLZynoVL3HPVS36%{D|YfQgte2ocK! z&4IHq*{^1)}~RY{*nk!^TcAs`F2GssR%!Y9M^v8KDz2V%YamBe6q{Msx_c=e0k2 z%Sw@*VT<-^#2m03HW1=!meXZkLvI0=!!9i6OWuw70$4UR`3WrZyF-F9QW2!`RWU

ZF$B3P&H z;ZJb~>Yqm|o7Y__+=YX8um9fJ`0w_5J1_air9OMTmx(Ks<42r>o9SZg%MD+e!M6?R{NMkFPVDDkbSI#ix z8mGsR=JkqLRScsl!Rp!V|M|o{#xC(2S^)JZ0)qnGa4ifCZaN6X6u>< zX;V&lD!c@}T6B%kDH<*uGzY1X@z1iEB2?hc}YnlZxh^X)s7X@n{F~QE^(k>C4VE@JH^g{^`){fw`8+8UagSOMP^) z>`jzp|5}p$$x?#F#Bp5KJJ_Ql`)k!B8i7FqF0x60f5in31J%PC)lnTyh^%bx(EiEg zIN#EY6B(fc@51?@i*xvC7uQUWIo%2~D&6@1{hV(W_7t(k+HKgn<-96k`V&?9 z_e~~>S)N8ix~G39dXi02q*Q}XA^t9paF$jUnvqtpZ8|A|PLrQ`FnV{M&Oiyj8UpYZ zy=d?4RgfQ(&_X@YlXO^Sw?&CzN;x)}D!n9n^5p;30KJeMWWzk9r_S2|8tV~Mu&_rG zpVN>C+DMZIPMy)wvbiOdk;kqEZA8VetQb@ZPh!)FcHi*F#1@6oL5E>%+jXv z8gg@M(zK28)z^w0&{xsGdRci%w?3r^gwRb6QI@LgJ7Oskoasht&-t=OVnLZ*xma4v z(r}U)+o9mgs>*NYwHtNlmP&Iol@wZ5Gdk72AiU>O)qIl4*6*W6*}Gd@-tOPg^{M0Q zO^l_b4fy_j(Uv1ZGUNYo{T_W_Gan<=o);pmy(Q3siGteL=UDmgJOxwW%gfNd`8QGb-q*VgvB|gIix3bLnB0!mqBq5hrbt=7Y@O1d7vfgs7a1y^cT7OO zrnz9*i+_Q}j@?kwzi8iA6_KS+PSSF1V~uB9^|LKK!;7VdPZaf2hQQ+yPw8z3$D~3& zjr?T4uoN_YirSJr`S*g*7-fBhvRqc=fD}gcGMI!o`4i7| z8_WqYJ>>gFQv`UG1DDYt=}2Ip)PNsjslxCLcc!?sF?|xIMEJ_~!X{!t-eg6;UVQjm!zYb996rguOd)o}tPZzA!z_wfe&P5N*=rHR z`pMz7_=!Y*eldyq7R2iegA`{L#K~czJ3T`72%lPN`fI!EM+LB{xSDF=e&zG%HX59d zrab;hhR9cij!3%yj!H`lKT2W8=Q+ou&KLRW7MZiKB``z4p^rRBrQFhpAvS%iYIzq{ zr0I5zoS2$Z_BQm4b0v8((}dC((nQGLjcOMY(Z);~dBr-JJlzRcrqCKb*S59DoXI3| zPlD0PjZ<&+g~J*>2y}rCS`Ywc*|uib!4#$#J=qIcqTQ#BtZ1aVJY>nU&sfR&R^!H_ z&ru{ibfy#EoUhB5^Fa6)Iw)c;4UXo<9|@>BII{Joh3X=aUG}&GM+>b9N9;CpK2i{d z9j0j4xtaLOXIspaq^{JCC)bCMTq#+zP%QkCWyY05jSinnNe%e}`lXm55nfJ>kDU|o z=TiG<0_&FJV9Fr6Z$+%MlZ2_+`jLLav6NN^jT-$5 z!zbFO6!Q9z-XvMm%Y7ZoQ+TZVbRdX%1guj5YOsg_Nw~GZLA(bq|2YD+s@`k&R*NcJG=)-*)V`3D|>Ookf z)uWd}TnBA=2W_3zQ@f7x7Hy-^_itjQBqK1sbR=K5nPk$He_ri-P_!K2x97>=cL&uk zX4UUw{cgYw&u-TWIvHKggS#(q7ld;e+(krX;WcT%%?8`x)=s_OSu1HjDLuezCH-c{ z*zxOmtu#)(5^-6ByXb(MX$=6|PFDQ_V{dq*jkS*Zm`4PYScZsvZ$#J>(KGpY!?Nu~ zi4o&tW@Ez+A`*0JBgUS9emtxG?2>@iG6!TBP`?Em^OFD>GFl2`S=vR@HIUQqS!RX1FBH+s#0dpEM$O7J)Vn*C6DLK*rdacXmGOdQvv zNN?sDClSp%(k6kz- zwtqwK8%b_~Z#*X3OfIk0i6#Jr0ZQ)ZR*XHA3G64tRxklF4v5Jpp$-b2`%SxDpi8v( zt$Yw7af>&+(%R-Goq{ij;J7(IdOW$kWd{V58iB4WXh-0dj`FIb)!-k|eq5f3K7pTn zqN~d>;@+?4A@?u&<-FXRGl)gMSM;*gwW2>5t{mK0A8l^!KQP%h&JQxwu0?cwg~pyue$$2`y=C$!%ah56 zJ#Wx(r@Rgahvw!WLmPCW5%I9-g7?%iF7w+r_Va|(RfKno5EbsL zxN40;L6Y48+{l~KZ}PSc$l0SSV;COYjhZ0&>w^ju|Hru7D(wJSE$&)tU6c^ zHQs$ZA;2V!msonmiLFWIqYX29T@$JAm@65WT(3E6Z6?p>I_1KH#v@3u9M$rn%dgT7 z0n1Q03%sa83&2els;?VelXgm*A!63gJL)-ka#d-HE7jU5?kT=$uNXTnTt2}i(iR~7 zo{@8bk>^8njP1A()($M172M7a!ww0w#F@otf47C^=uVr?gyIrS7*7lejF$; z>N@Br7}BUib{cJzpb75$t%j#3kD0c)ce!oAI5a+dUM5H*nz$&U#DgcYk;AF`?xlf$ zcR`VbF6evZy8OeCs~7>k*^6w)dt>Y)O~;S#Ir5jKkyD~=_BTB8O#vrXrCY*IKKTQ` z*`R6G>ep^q@#dhuKGS6dN&t}KCfDY>BKtzG`oJUSItEr4yXsmwEXVRhkser+)r$4O zlJTa+6lSp9^&3FY#=-E_%#ST-6{-RbAI&hZ^Z8+=yx{Y(%+5#JcrXxQXZZ>n6GJ`y z`gFi*Va2UztPg=B4fa8O28$@}jIR&EdUk}ui;2I`D8Fbdtzr6qG>zx|X*?Gh58lz_ zHWa4^HcUI^2FWJBZH}jHhfM5fzrW`PhE!I4${}e6olZKvTYDp7tcod(p(@xV{Q-^C!zDFo{JU+Hp#>b+?F8HCig z7mYKHI>R-ne$ktO^f(ME=?7lZ&!LWBVHvuzr1^hJ`pJMplGqx`4FH0%nGXs*?$Xek zgM?uc$ytMk0MZ(Tvj%=r)(^s4!9*33dR>na&z?b=dEmEbvN> zwW{NvE7kq&0$*6d_h-<#=z}8cf_}@G;fNLwD}=n%#~Tlah~w<2AM;}FvBI<@2&?|w zWA9}#4RY9|+eldBs!3{7p;%Nd(@)v%yYjfE0^mlm5X+j-a#qhxz87p%Y~}Fk)Sj_ zi%%%DIGs6pLsoq@PXS5&$seEF(b*4_u$%Mus}%iS$AxxWv}0QW~4P<~9Cl)Ne(xKvc^nBNZ|v3Whe zcs$b*Df8pmKY|ruRf>4ygCVlBk=frufF&A>e=&r`^(qtT5P(0Vo`MU-E`=I5b0>&lnxPEW)M{1vQfe&b4 zWpM-eVF`oox^*fwt=H2_(H zuN^MC!C@Hq!<^kfIC}aPJ^=2ko?qM&-@fJf#ZhX2Y5odA%{D`j?A`TyAa@GB#$rg#Fp>IFR|%b?(W9Mi{j3)-YP7TzYh z_sJidXcWy7GhhSRnE_w*xK^e;m2cGk$=^gVX^^xTb72#uc(F!(UKsf-#-3xz2dbKZ zR@OLG?@d1CHDZXmO+jFVJ?|UC7;D5Q}pMckfAr z*Ifx+L^mlEjgtWE=QYH1@dvtA3EI(;CvS>@F^07MSAzO|hZ)0&r~c)cMW->~JLaag zII(HwdD!*h^MdNnAT}IFQZ(;tL+B*N;7MwKGWn4-Dk%|(G0A=cpVUIvxPLU20QNCN z#WYeRXP#%nR5A!HDFJFdoK@<}`QJYln5J18` zv0WHf+>dy;!BGD5Lkcx=>Ms$thvTUcDz9l2Fr@*ec@;}1Qi8~+i|#LNwJ;A zhszJn)(F(~sZ)ZPL&3Dhhu99oCI%xPf;$%Gek7o9oj(cyWhoX9%X%7X{v!i$=ftuq0{NPL_TZm}7G`b>j0+vg7J@ZEP9Z?U6fr9{8mIDtg04a6j zx_ni7=|Ee58p5DZ4)+0|ogNraalZ-RO7>Fpy5gnU>WP_2|9wg7zzJ(>8--@S(8;lG zCo0G9U+3>|-|M2g)#}Ku?hrceP8li&H}qa2X47K|2LWT3I>;`S`qQ{D{AjE-UaIRc zHkqzg#FvJfd7hz?4M{r0E%rZcVY}(~8x;ZVTh;iL;0e2p%|V;qFT$z0m1do*v#>9e z!_eGmL3?JBE-0zz`b&^cBgzmVBswNi;={gSo!H=%m9ehIX|DP!T5tXJ*5+HiH5iiY zqJCR%2-Io)466yknS&H!gn7A{RtMY6s$_XNW;Et3z&VNIdZg8~krJDLlO&M#ap?J5 zQ&<=Epc-mou?z@FKp5#aW~qjOwC{BTxTF7}vx(`fF+W}pmr>#>^>;MXH?nlvXmw*l z5Cir?(82d4I$%%jvASYS0Yc|)awPnVR)N3ptpyf`Q&=0FNv3}yy^MXf9$ z&yts6*%ci&Lxsu?%Bb5}AhhAK0EFW)`?R;~J;`Dzg#_z8bbY;ApG72F9P$@a)!@B^ z%^#p19*l{VKGb%)BKDBi%yF)0NqhSHW&B#nWqDzG3J}umeCvdRm49Ohid(8=cRUD3TLn{3mt1 zHB#?7w<{wX+pxyPtRv)DeRYa=uHus&pe3!GTlv^I$Wr6t*gOs7VgSygunPY|DX))1 zdX4l=6+BDlO_t^wqhy~EIh(KJUdZVvJ2zL$>qvoP?g=7n&c>$BtqKdcC?rV4u(&Qb z2uD9#J% zipvehH49so3bs|CQ40sL#MW%Sb(wyZltp4Q+RkG@DoR~>v$GSh(~5f^&)zK|h}l4` zF>Ifkq)Y?oQNP#gjgF||9tvx`#D{7gX{}1z>fn*_(jK^+HpD>l_T}#m(v>CoF}gS9 z5XR2`oZ3n|tk`C=E;Tk35QYXdQrIL>G{bacrx481aaL7d%>gz8pKlx=M?0JrdIhx9 z7(4Lp@p1p|93S^@Q+{0-iuCV-1)!}uYym*m*8&F%Qrf-xfO^|PA^jG6rSuC{m3DQa z6BaEK0vc*f+$Xe@>=hi2@v&m0AXU<_VtznK5O%h3q}a~sh|L2i>IMr$UI?PBfj-uw zxXM5kQ%oiXC27_iJkeSyq&@~rH2&EN+9v>_00Kj6pK@?&t*iW)Xf0CU!E-TC>3pjW zx&8ys3$v>#n5JQgA_D%eM~0#wc)SP_UO3vkAMh}C4Pgyj3%&vh}vn9FZ;f6wo#j|;Q86LqHKDv{|P1&aQzXh z(NK2vRvi@M=5ezR1{(xlGm1bGCo)xF%v5<03u87=>T@k@E<$2^h}LxGTZKxJlS({d z>LM~mFhw+Cgo&#^+>cYF@mQp+gCA;U$S5oVIj<$|W49wW++9i(N-vh1z!;OWGxq6EyCc1zRhOX5?t#iea5Vw($$;bnDTt|9d_Jo_{oG)lfmK-$CVwFjru6+w z-~OfEx(A1LJWG7f5}$|EkI5Ggsk2noa%~z~A zA`fnsRZ5G2?N5h55mAsQf5^!8Sy5q4@S(YgtLNUO!yjByF*F2Me$Euz6v8;t_zk>Z zNcCSby6f_>qc>P7QlX`}I~qZ3lzp@x9Q2rCof3>417+9bk@Job!CSnt_`=sk4T?yV z`HY8IqeYIsua9vvKSmo`=O}0DIT$$KNLr!?crwJr@w#xc(i|7(if~i|;90QH#l_KX zQPH|w+zx2s8>~`_i=$H1TpT={VDD|bakbH~#+zc!!CJ~t3Ui?!ggL#y%w=B{7hkt> zphHg{zqaABk@&{&WI|f+n2<;;8zv;_9!XuY^5f&%3{YGgE#W0ZLJU}6Y;h3(WN;Ag z9MxRh+PiD38VYd3#k~dZ*d1jq;(f!#b=Mrf8!kSCf%CiJ;zQB0JYi{cLs{jDmWvnS z;)(%;i!)kqB<(~DTU^|PO-NO6aqp?SI1ie`LKoH|t37@ejJ$Pf{YXs@VATX*is*^D z@VZ#fa=j$#X9HL};uS5dCJf(dBk^9)gvGnX30Nm~fc^&^>abveB4X5NNf3q-l+hk) z0ednnXs!U%E#67>mbawNDl1dF2t%pq(3wRPWU;Ti^>H|knK1AH&rBcKA{zRDaFFj9 z7r|+iVhuBu1pgGCBUPHW5UEU!Ls7?w8bKW-@?`;F)<`nGt*vU>_Se|dzJuTO<^%o% zbtrl;^`>Kp{BEeDFX|Yy)ZzHhK}#KdQOBUAj)AB{ia*p5+GdwJU>aU|9qRCEO`9oW zT5c~VZ-xh%znXE2-U{XKovj=cI{`?Yb|IGB33ldxMob+vzd&Aj#XT++N$dKar*>6DJ z_2l29d!Iz+)lt1tX9uaL&ug(@-2*efaQxsI!oNBds%w)bipiN#QnA^(Y9okjTTO-~ zJP?u@7Udk))kM3m7(rQDVU<-9@sBNBIwD%EL`hps+5zwxFZ4Xu*dG0$Dr=^f(NEGs z=tmx#B&-pxX)YOKuw~UlIVi~0Q8e!zG}ncWoh|{8NCKh6>-0Gz>&q{6iwvq^&)2IEk5jXrJWrWv!X+jD*G&NNq9 zF+JfJ_^ru0@uYKgYc$~L-l&NICk*MyX+d8H>$ftcFT5T7;A*zNwx$sX*6&)=gpY8U zahqei!GSqdJGnB#Xrq%>2?26yUBtg8-E_Lne!5hScZDku^Q7Qy_D@TmFFQNi$b7*4&?p^Ma%SI{Ns(hM}`hoEuXzpeazE(X0_^-Hs47SwJ zrUxlYX^-$;{1v^7jSz48<@#NbVQ+jxFZk-Qv`}cu0w^J6!ghLOP z1Z}dab70mRB2hh~5p_8DgfUDMav|EEAs;A9hhP*VYFyBmfp~_)p4KmedVnTKi_U9+ zX>SsB1k{{PN1V)|?Np#T(?FCIGD0>hAk(+;)Fxr(gwM~TmS<$Psjc|Is_(GqNPR=Y zjD+t%Pzy-Xl4eYiU&qX=~*HwtSO&?wb5(S$QEoCB2LmPO^OxmI;m5mZN66FQJdPHX^lT}USIso@ncVKPk$BEc+i#nVt%e*wiyi}oIh2dlRE}TG1G<&5A}J*)P$v z6lN_v*S~8KQU9(*MEyHN1c04lvxYjL_mLN8skl3li&U*?uhT`U0{x(aXn793I@ID+ zu1Ed}Jlqopuwx{EjwpXuXZ z0UV-KIo7J*_~++pH3lOL7_2_j3lj-H@Px%BKzul97`5uaAae>J7+#O+3^GH%%3&PH z3A*;Glt|fm{Gs7VnT`I%4_)}$lccppATE1~qL4#rG9xn=l-+O^mNwPsL~Zb~0`jmW!MRuL=-Vt; zW?0rT))Sm6?p>7)hf1i;)YAxR+F>=6%aV*lE zCSbaQSH}b}YiU9AAzj2hhZe#U));7xm<7?@ntmaqtvI%O%e#tMx`C zTNX|e)K|rg<`@D)9V}2Cu-*!!%LC52P`tUOJdmYXvd!W{;uGyow`A++sLz&$`&vc` zy-ilkFj+N}$64ta*Gr=|1m&qTc2u>2qBmEk+*fjcsBpAaPPnHQUxIpqwul57Cz*jJ z6&x}cd$O=6eO5`L_m`t?lIR0U5PjLsjl2OEoLy|;e#AVtC%Kc<#-v4)d zP7^@PdSS2?5LW4lRr9_$4~7$DL@|^q|Ax(l4MM$vPVeK(#fTB>YIkDjiD{mDIRE z#%vh94wnWu8+t1ZP*d>(RCO~HY zbh0?S>)*_kTHc&Y6W>4IT&G~utJHeNf|l3t7E3vinpBCz?zGLRb{=W^H3OrLEg;YA zc+h>8jz`qhjtAZ#L~*dECPr$p>UY~@^xVNa;GXa7KlQN(YJ?N|p+E}}!C>^hFcM06 zwMLBmBmj)as7z`OXv|gVkTzoP_=yHgqPGZLN`RDLS8*y7(t(LM?-k61^RcA@r4=?` zmL|s|5Ss`R>ls5XCOewwHHcK#T?D+ILc^08%9n|G#_zX41m52iyV<(`;pvQiJ zg7Tj75buo7o*W*1u=V!!a~R?_;8&c3CzjzWyD#tB9I(sc3j1)FBiFrR|;6QliwCM zK1QvBAy(I9CE*lAPcdvaKuVc<5h=NsBi?X{lz2*7#Y9TlpwWyGwl0LN8DY*-q06si z{pk1dT-FHshR+YT=>t_vR|bqoIV-CS-y%CQM+5kZ9=Trj=ocf!X}0>`C9?aaZ5*7 zm%({>%8qj8Z zm8*5a!ubg)!=%76fd{9(HwOV##@ur9HJ++|fsMLSbzCDo*!fPY-|yYVx0lBA^*ino zw+0XsR71hTQFG9$u3WClYC_99*m%mp-G{NHai4_Nn9u%*s}a(eU?zo8ulOXW1@hBt z>#HsbDKUg(;g`VzuDy?$@AR0vvT@Z;vyLmR<6x`ZZgf-MVyg8Do}I_-PG%UU>H=1(Yl3E*)425+M= zPw^JKPQ*6)%sS2hLr(5Sc;Kwi3TFJU710m>vA@u(gV5WW*ihK#O+Y-p7NAyKQ&MtY zi|Rpp2;Jt14)%zfBa;1$uffYPgTZ^n-W5$8_J1rdXC$Z9Y=!(t%m%_&QH)^N#yCZ1 z@u}9}I7FstBpwa65VlUbgoaUb(T`g93(lgH%uLn$OaMD$;;0=~8s}Q;98V00?q4ey z#pTKC=}pzUp?Cj}e@|TE;A}Dk&7RQ2Z>`o(N<#`5u%!$7=MHn0mQ!SGcrdzS$Zhgs zlmE8(Pl=cXhgCcxXMGq7t272tE)3zMyUx0on=d>ZXAsG?Y>17M@sH~1`XI#B9kYGH zM{l~H|Hf(+EogPRL92Y}&-SZTHmY^2`}2)|bVC1Or`LWw>gGA-9?yfGT3 zyXV74k0Bub&mn5qed|zTdS|uzTH+0FL2~0Hgo_JA;ra(Q{2K$fKwRBM*02z&wAf|Ij3hvvSd*(J=4ASe&2O{)^lIa zdR7aJQ~B6xkRMdSK*?T%&Q^$#90feT-{OxML>6DdVEA#9AGF2J0O5Vn!)u*|D6?U; zRg{RQ_Zos@R@>e4tUY?54=A#=@&Un1tveV=y_d1}J&$^^wQq|rk80yodmP^BSnxMX zR`WkUZ!CC%MI$N*Ek9J)!h@MD&#$68?aarAa$m4Z%k!Ye=9-0vO*BH(-%!m{q7u@G z3E{po`D4rzc2emQ=K6kt>UkESS#0>YL+_dBExx&f*w}E_xjt+*!Zyr!9xhaKzI(9tXXUC1tfNRB$Ijgb7j(3y@k&=Om zXf(N=5GlhcD9@br<6UX6ly=+>&~p^Joq7Bi2Y` z-QY=BH>&y;@$DkYh(QEVBqGLlo^`Vq)?6q~K){YzpN@4CWVJ*LHr6ekxJo>r$i=J< zSVqC@o05teO31ZF;khptEq3|bi=l-gyx|3|PzJio$$x)1`9C@yJ{V!KuT-6h6%cGajW|IiL@?acq@y^RkAmKbBBo3t6*%c6yFOyYx z=P7wE&hgHL1@KU9luoli0yGx+snr9rs=?WV{LQujHuemaIY#(o^`IR2Dp579)dzbt zKaC;2yt-*MXMH!QP6x$17x2z2O;v3@*Eu^C%q0V3Q|D59PaE#Efx{2-W&_vB4)1iZ zIcJ6r995RNb*ij=l8uyqI^Ovpa+&DDOqtE>z2NAN*J3nbTAa|$({ zuiIsV3WpiKF_>k;@ViGUtjG(P3Et5hA{2JBGgMf=O!U`0i*9<@LHV$UG8J6AmR=6z z-D4&meVI3Af&#PF z87DPV{c!rgFh&I62KMPDjzw1Av4aAX>ZZZ`7=*EQ@`p6bkO*?3_SUQry%ui#+NWx(7zK5L)5Ix3RHjG!1Ou{7D&k^_!qAnY|qpvyoYFc6{~*kC?!8(hRcwg@dKK(~P`N@wkk!#K|?oum)l78VmVg^i?W zNehhZ*+>cnnGlaS#iyRGBJCp?3eme2;+U0?jNu!M~n#TU=D zN;|#iul{m+s}I!0O7z5up-#+`9npi4{ff8x^Ym6H#An-Et?=o+)$Q$;ynzjL?h#f% zTeC_|NfKzMBLTA48U$u28;<7 z+Quq9$;u%Ma@K-zZ>bHA!oeA%7k zTT2n6i7v>}npWk+pD+PKdd3$@Y6aWqXgLaaG`mcD&_c3LJ>hy}2oRE`0 z90Br2=flNL$Vph8cGrb&a)6iE5)G^FKN)9Y=3%gyj>Vnzk@7nlI$QPEpx0du-G?j$ z8amwhINZ7IL?l-9VRl}MJIC9KbAtz^q1U!qnvWx1b8W0`{USS;TORmsN2l#v)|(xd zP&Q8RHr;mk_YWkAi?d9GZI0YsX?01dlD#0EO^Ps_2%M!OP8 zmr&#gUuZFyN$ z7thD=*%b9&f!zY{&O=YAjW zdQ`N^cAY9Zp@N3G0J@zR?Kus<&Z)xh9(S8hVqjOBy*!RHm1P)6v&CNiTN@?3cHBXF z+(Wv~KLLuYAK@h14e~?n;;1=Q&`9G{(MP}W^z1s+JTDc8np-an-^IWy)4ShsST^@N zW9q$Un{b=pW;`<^9Wz_99_i$glDY}y4Q#WKld@A?ZU?)0k(Cy-0 z?qbHeKFHruWipdO}jIT{caitV;AAns z%wB>~w;+EhV~+W5A zvMO6rm1)K^kp;ZjqhGYy#eShtC3NM)!c_s*Vak5M>@KIQA?6|!n3n; zarnh(mv3hSXX6QPem2wyz}_)!4KN4YF>QYLOuO;CZ63=cU|=x{1vXK19n@_UrcIQ3 zdBR8iT887noA1xG!STkl@oM)>8%l56N~0YkM5djMesO`}-=AqSCYd&tW$l@|CAsAZ zH^z6g%e2q4J(;TIwmjhqtx@^w!w2_0;U4&{E1Fg?rFcWBvGIiOGsIm_IIR~Wugy`E z8@C;uNgVIzw@|xT?Fyiad3Xo}c*suq@bJA0r`#EJa%a@}@Gpw?GLXtPsAcYAk8&DTh%5>qZ$0p%Wmt)bjP&? zPbMa@p~LqQ*^dq$R0#mqZj&iCWThjHZg{e2|5(P*M>!zgAIdnQc_VsInyoVFt8)-( zzq-)3x}WV#$I;uQfV*M=5j9}V8ose6l|Z2!h!E;qyJ6Dkz|nI0fHeBggt9p;(2{J^ zFf!+a;qM-}T3SVEw4SX=rFq>I$2I@;ib3F5wbnk$0SeP2@DT8JVN)u+JRQEA<{jEj z4;Q!J#YCvF=pP=a#qh}*=@!ax8Gwspi-B1!GJFWGGltYH%0hE+=?XM6|I}zW>dtCB zc6)VnqmFH`5lpy&5Xq7cILu?KM5%J%33CFtdL)O_D(tZO!Idju{KFhYyvYT{xypd& zFq3-yFt1T!Zty|2*knp02}{12iDdkS!Ge#;8wL;XF@3|}K|YSS5im(a0LIbcj|lkf zA!J|({;S|0SMqpFsg5HCp;;NQ>%G@$!6{e3-SX(s!2=%f;`0i4z@Zs%`I#Ib8b3=u zn_v7V_wFW-ng_=i0y%WMCs)2cM^VlIh_xQx0S7%Zk|ljD-9l&RkKHOrE|2G*91VvZ zh&NYLIQ&du*dLz*-zsYfX!mKzDo797o242bA>Wc!CEu>`3|YGV^vERH+&uHA9NywD zXRn**T%vfEOO}3Bjeg*wMgjD8qrdnvNBWE1NWXKpQSJdg1Cb@QGl_gF|Hyv<;PL_x z@H!=!Sv#&jWSYSHwd1GXIC_5Z8E5_ws6q;wch6t* znh4&3ELKO{3Mr<2$PGmzdZd~><-%;u zW=@yD*$>}*MsP|()%{JA9Gu7M3g4L0EEe7JIRO&lj8je9*ZT=4 zXAI?CY#X)+=*Y+5j6jeI%jVFX=NN|M|3TGnpZvai{Z4ijOT*gF{A?ZO3?Lm@+W}PWvJ44I zkps`a=U_-qmnVwh1-@ikx$89~U@3K4?zoHvVZMYBP;CSjT6l9&g-zQlxtbVcP+>29 z-+y@5kl@4vW6V>O{3e7`78v?#NDy1dFjCjVPgl2z@a$`KRR~igu~5YxM*_P-#$cxo zv45zjZ_68iq`+){;XgU*Xo9w?5kM%D4)8k~;v z$kXL1Ct~JH@&dw9(+NN8H=u(NWerYhRpw1535)vK#Ff)D&5{2`p`r38G8kY1L3dGBU6x4) z_cZ;YVYlI`i`sj0;>7BmQX-FA{xpO*>Cyy2r+jE*@@E>4(|v2nh; z;W(uy$(Czq*2UKz!MnqUn56{O&+@==g_>%5-UK$1O|>flaA)(I;`LkUMF<&~x3jc1 z;&6Bi#kf?6HS;=BPVt1q>!9XiZbB%w9+<%8$P+?UEAS>B?29v(EW|D0Rb{|#>e2N+ zl(tapTgo7`*t=Rf)J>e^z}q}&j{$SQTFB>K1{Z>xt!pI?pIiU(=RS9tT=jA6a2F50C>xTT}bMyp-LKSH|!luEQ&UpU_L75lf+an$P+8K1A*d?dk2c_-2-js>y z$lS6`+v^t8q=ybZ16DWKFH&Qex7~;b(F_z_E z{;A=Y>YlZ`c-xzZahIfBw1)K_!oV~0Yjx4uzqmpg4xkhLPCQw~!eyf{$_8)(j}o(a zVPX{`HtH0Pdg;y4o6cqN;fNOV@(;3P1H z)tppP``V=?&XT(1gru)w?UuKw9ztFtK-EzACiiQaCV8=Vv$~|5AnmM^=ePT?u{zjs z+-L1O9kq7pk6`fXPr&Y_z}^~IrN26>gGSu|QGkWk?zC-st0D!u;x9GzzMzWr?sNm7 zDAuS2{bXT8>wo>>%`%-Ij1zP`pWhn1$UrUyas~BE_Z!H05HJQ7tF^nY;q#KUPs3-I zwibGI`Bp9mUJmn)IdJyD+k#o~0HM>?PuB0hV70f}1PU2Au6{6UZ>5FN-Wq;kV56OG zsrwbY9OhY2Tb=B~O>ABNo6QP3&&e`%&>98WhBTOT03Nk1U0sCYRG1eU(?GDsISXN? z4HaEle9R5@FX^0nBE1-EFP?N)X=lP&Wd-kPvGU!2^Dn zLr-_ykN1_0$aYffiut1h6eWBZv+cB7C&XHunDL9Vs_w>~e}MF^5qVJA&S?%)Zf#WS%??R34JEo^yWt}q7n)TwdO z_bZoT0(HbBD)-lO+K?oAK5;MgW#TKNFzs(JZo+N^6+chxJD(*^?9+k0_!pvrsjAFb z>O`t5uSw20vA$jF_Df?GD!}0RJg&#QJ8_H9<-hAKNm!Pmel!>C3Ld+oTGmalBfh0q zuE&h+0l4)pymCF}`Rg(7F3V-myc5`S3l!Pzddv&neyrnquOMGh9bOhMStXpn_$Q)3 z{*%0$6~D}v=6cKx3hGhTH3P60-zyfv!o%t!Ig_9I!s{`=FBsbw(%0Adw^Bb2T(@q% z%-3W7{Uff&L|k~lxqJ~!P^DJq$P`5c6jL)@wbW?rUshyYU7zsi;N53PHUD#en`(f zwP@PobTDGa4AIB-vFYOw&21p3rVQV?izRiui)Az%4&4quSEQ7MTxr4}nFS3y`V2iQ zwx!Dq-0OaKHUbx1k|{wRTjgJ$~}K~}AC zH6z_j8#cYzdbnOu{Gm3gfRBmacsQB;7 z)#a;&iqOe&!6geQ{BEfH7Ey#G3syBCr&Dwra@e4W#thv~>n}C(6z*`UCl7`k0(qGW zO>x(FcYVdFqb~_uE+^M5lRyKg`3qAW-{dUzun?8UQKNRXS;7PY~)wZSeZYl|HYxV6Q;svJ$$ zNw0(y9zUgzzz5l3K`C}_wS&Go!9_+};KN{b`se@`i=2^Izqa|#vp9%POde%Q5tL~8 z%W1X9-oHCs9Hf0KW;YuHZy5MNxD6NLpg_^|0W%;6ERoj8EJ8a zo5W7keHYK&Y4~#q=chaj;F0>cMi;QOt9-z#vZEx&D?jKdpO2We(0!;gM&N&ASOJ^* z&xd!ggz|brC~E`Xn|}-&a`S%iXI%Z1a}QBQ`3F$&ddg=|jS*dyrMHWEw3mnSM4kvs zeF|mlXCo5R_s;tFns54GIt1mUP|?B-9?CjZH%evGnnf5%$CX-pIVUvGHW6iwHAIG0 zhy;ER=~s8X^Qmds{R+%YG!+X84;bBEe8^dW47}x*#Oo~_!kQce|GS@r6DB-66dXAhq!@+^;4t zGF)8P_A^`M9yR!9g{scGP}TAQ0C!8h3+sKu6XmJRlg_s@oc&qZ0#=VHZrFe)=mHH> zI!@6sie$;mJ}Y4(C1;cOW5*4i={z0yin~wO>g9H_wjs&nE3^UpVuSj4ISKh!?_gie z^Xm1CVm&B)eFz*84bGt}ITz_HT)xzTnth-_!(wnlb3ad-%U{!3G^)L!VkFrkVAlgg z>86)6EIZR13f(q6mWAn!pepdr#?zBxvqZW$jRyzL0cVoyz%^thW|U+RiU#?ifH?35 zoCM*lGsarP5!*u~OJ}5Lh$Pl#)DZ|u+$&-+8>G&L^v_SIP`DWb0=5jHyg>SJ6HqgoKShWa zV5gaDUYEF&bis$Umpblp%JHZ_XG{EKTCMW|^G_I>(*m;Y4tv=FW^e`RV8PX$a58*} zc-Y2Z&YA2u^_ev3$?y=#i1s3q;QgO$|$)n zUMPxnggB{_`%fQ;1*0Dczir>E8xK(08jmh@aAUOih-;RwL@#|$etcFm1F~O^2(SjP z+=jIUuYyZBbPFkC#lGMOuz(1vjoe}LA7IgM0vP81aI}T4hz8+)F5V-)Q+$pU@6nKT zA5}%p`+(5>HK;IXcmD{w@?VBM(3Isbj-jg+UJ;|_TzZ^?k?!pk0W&=ag&18jvA&nC zZrdD~q|b}y&k8v54b?K83~q0mEYoB7G`!QZz@EQyu{}Z#pIPN{BUAP0x1l9Z5kV&r zJ!tHaN9%{N^~O)godLOW#%gf#NbLFzdmh81`}f>-Fs|fU^&aWqy$AjKtNy;a`HkCZ zjPl&Sf7ss-cfZH$Wg-Ikz_phY;PMMpcC>o+D0Kb0kPOjdhua}TuKWz`vuu%!KWIN= z{M6jUS-o>z@oVFyQJMV;dgJ1}HhZL6h;{x{e`tZBEJ z6A**nNxhcEQg@hcuww#Z@Vm#3x`uOu9TO0P_dRw@#SX7}06W-dYpPy{9c+|}9can0 znZk~!exnL>3SS6^P%~4ha7aNA)ii2t5abnx@e0Fug<)`1VMzuR1j2^oUtt*GBl`^F z?;ef43;MLPm_9w|EvEQwG=@;_bd<%^8LF_sZb=usCVB#+my5yH1C$ypnimWW);4^0 zHSSzgnqBHW?YGCe;<208k(l|1H1~KIf1MAP^ADA=hbbJ>-M(W=<(jVYCgFgimbMjY zmM?P+N`{FqVsj1#hp>fKV~>X5aU*lT?z3j`<6kUTo`=G)cwaQ4jEtLh=&UcJ2J zm<_~F4`Wc+ybv5JA9E2P?0G?FxT?c%U3NQSo~ml zAALZ?bvvS@JM-$<*r_as%h_}5r8@-ZNC+(;p4cd!E2%dx>kWl+o}ylQ#!0_IR>;GU zmzSn5y>@EsNplKSuglYC_@CL2z=8+M*N6_q1%uUhTp3;8IZR9hbN{mS?PlQ_M~@vC zXW6Uo*YG59aqJ(US~8k!>x=?ECYa@Dj-dVJ(4b&IloGip8VJ=$8!~_w!vN}wF<3gn z@uHI`I5*?wwNN5oBt}qgjMtG-(%lI+m2c%3B-B+%3rN~LCIpQC1Z*^2p(6*XSXn7o;lbSb=qF|9D*-}Fo1yFwtaL2b~ zL<&&PZqSrR9_ZZePns`}Qs^Ik-#L{?nBKcpUU<#$)g5R1NjTfE-bidr4F(e}T;kW` zmV9l1fSX{MWb9~;Rg1*ldT)o5^*{KmH*D59#Yvp2jUTM0A0?ZMLqGWFf1N6iTZO7Q znQ@CRut%QU645{k{^X!SP9A*1V}+)})#$C4SgJ!ns@|Xwr=zae^K!%H# z_Cbc5wM_qtm~X~*bM!bT7U@FJ+@u7TptycIxU(wL_i(p@;g9wRkCA##XHrE9(U9?yUeoSTg&8 z(BPKYivz}&_||=2;@8p^Y%#ZNkTl?7Xi;iWj+7pvn!F5WbV6pt;0DOstt;Fxk#r0X zueRIHR$D#jQmjB3MbZ(HavT9hDIBgcqiT-d2sF4bU+DPt)W|#@UlDFO-$SgFD4pQ( z?B}p{EF%TmJRVuLZY_B;8yOU7Xfcn+FSIixFV)?km|MoK%6gPjG6ZPel43H3mqJhU z19mR1+7}zyU|h9yVVL95dhzUX@eGUq!r&S3{1WW>I!eO>;^2Jw0NkpH*No<(HbM<3 zK&VIIQYbCm1rkTu@AGP=&*}35qzuf>-I4homfXA!UNcCdH-rX#=1d-H-eY&xkFmHY z99Ai%oD+|KWmNRW9yVXt!(t0N!&9QwRwfAS9qx z56T3D6BvRc_(90Ji>5fuwpjocAvo>tiscx_IMSi&0UUtpLhBYrY zBIZjvSAqpDnrjA>5CP_9)HM1pZ0uMdqu;>pg&F`ufTNhbcKkFnh$b-l=qc+d41Lyon}=<*OoC&orJj3-P8WIkUGw40 zZMG5fPU<4zKU-x#hBM)}mz)Z~8|)52(pI4y-X@472c3Cp&E*W2hH!(N^~yf~UIXf; zNjGo^+E=ItzsLHEu4Kkg}Yr+#~>i`k6IINnL*3 zcG|W<1w^hNnb2`RxHfq{j#xh;<)aeuit=!K$lH<>~$9gz%a>KNMK`TBW%b3^QIFTQ7-jGiB`1$=nNMtD6%mKpvytHcMPHXTH$ zB*5;VaYf_{JMk)_qdyNh(;TWPu#nE z7Xj-JJQDdaYc`dQ zLSe^K$PoXd)0@l}i$CTNmlGOJEDvKjsQOqa{2+7*_XUQD1Lf}l=*`Y0V8@trajU*t z>$IDVSE9qHL=^-Co-;bCc1R}cw8mYI&%C?pGNi#pH5FTj3g|dA0Tl`@+8KxWFGpWF z9GSFm$*l6C76&DE{@tVK25VsXxBtjD-ZcwK2L7$Tc#riSt@BB{C+}gXT6Xj@f)`Ni zIqr<*ak(uImV>XBfr=d{7LH;{cp-W)BL;L(GUrsU6ZC9LaX^_HmM4t)t7W|4P`q_w z4~+;}q_Xb{ZR=m=mL?!N$~f9vm&ViKSk4`P;MbW?AwQRcTbVMFnng||%^=H-+BwWC z@PDgBOcOj5|3d}HZ52AB^}+mqtbiTIQTz{>#yvk={I3#WxLi*+`g;ET=LUBgz7zIP z1hpD1J}UM+v>bzArTf|yj2@L&_|m+XRjw*pA&Df*O;83+q91=OwSialYHC-o7D%fu zkJfh=HKw)Py6bd3q@v@{D#F$VXGN|GwE>*o*|L$Asq=~2(6oV2Dp-14~K~Nxf3&Vuy1IIN(kmDiz zzH(^H4!nRpgb`~iQiQ<+rNVw$LrRDm!^QWD8WOl^OZg{}IJ?3_t_Q=>G6q_+;^-44 z8PjJl&U{iV7%FA)!!MR2`LXkqXS|`}OvAOsGRB5T>CCh&kE@RqWD;@$K=3sh(WCia z9S#pQMPLYF5vDCeAIKAg2^?Smzf9V%>H6#L@Y6*`y@`QB_qw}ar-1F^~vjDp!e)xgoSyp&zr97%{#^}1>> z>nY3{Js#DLI^+3-!Op*Z0=~CcS@MNWZ}i73YSmiS%Jn}!#1jB;>XYRnv6lRCxc(=H z$*~pWgWO#LaI8bVDA8#u_)#}UiwavPJ1UgjueB6wxj4CFJ%?*kl#0WMo9jh=Mq0VX z2XZ4w9(%@WtY$+*uth|S;3-55cM2jJ!y}-Q3bjO&40?lzp6P;NZzE!7wI1|{$f5aJ zokTeh5z&&_D9=b5B0?I5^e{-zfxz>lIP-~c0RKmV540grVSNLcUDEsETXp_lIo`hc zLFn81g?s;{Sy`v+PyWC6B$!$7k8PR*R%bn2{IL9vrY~F(s&R1lg|NiP;muD61}1J& zolfT8dJMuh?BEd2q{9x`Xh5gdGL``7Y{p`E_z=Y7*Htd0Tpg`HGW?xH2Y{CV8ehvX z58iWoeeFH`zPhT5GDP9Q9o$Aq8okf9D363Duz7}Z zj;Oq~{^|Gwyz-qcn~$M@k;sRWbfnRI2tgc53p8B1s~Qa^60@sqNu*fK68FJQe}~s8 zQ^=FVdq<0(6QUV#+V=!zCNWjjISd#Wa)H4se>*VHFiS{?fOsrbKVMr(!R0h~jY+M* zpKH*yO;S|2+zO=A@O0Dgbh}|@Q#ZkPrY*|N`gwVI8eZlM7b$(2F=6mxBf{VfgNJ#| znC*&=?CVOH0t!{rh}?@~6kVwan{ln^?8^T{S!5EkK?*!NxD3}gaEh#9n<;=h183=V z3K6YFE!nS=Ul7Zth+#D$jAs`=s5_2FO)8~0!!*HCBRRgUDC~(LFfIvP4e+wLoE_HB#bTy-~%}S9`>7Q6Lr%Z)w z$OGgKtq?iSXm}J9oZ-5<0IR5A5mbbik63~#;{%Dd*S{fFf(v`Z!mwjp8Ci!|u4=!S z4%E?JWesY+Cs%%d)>OS$t{3_`WEF+|PA2-+Zm2jY+vdv((G*U&b5uL6$!-y@iO*^H zMF|%ryAvrpLpHq7O>1~z3{WmSx&=_~J2;0H`)|6<3$2gD7jh~y z6e`mkrKabFIQPsxv&{?fyQ7S6^Fnly7X_(m(HZt^^nw9QaPb%JL3bqI`x2&+`xzvj ziwFSO|LxMqeIzmnB@THhUj~X8rEorNs)iFIH6LE6`vf|5es8jhpJqTEUfyQTYqo2j zf3FJ-Yqd4;x%m~EGlD7py`eUFXGOd^g_?3vP?t=iNJgK;%T7x=Ibyu8Px?IreI1x| zUyB{sCN^Bp^-X7|;%o1$rmZM3>gpc{h% zt!3GoBzm2dhlBQK)INuRBMzDqp`z{uN+6NqM&Jd*QaER3-)$ec^zvAm^5k7ER@EH-t`4CjD(%x|21fiGel}(4l zsHT&0pnwNLu(b`I#Ai}s(GdjFb${yh6RbFaQjyH;Y zg3a35j%HEzTWBN(kWtrBV}5%J#QA16+r3`?*R6 zhG4`k4^Z`&5V6SDZE?cP{==rsyNpJnju0VK#SC>QFhRiXv%hPi7!E)wu95~NQTJ2u|jP0~p?Q}^_Y1c48 zmr9>bez z2-Yf_FORTq@$S%Z@2AjApxU;1*vY8ykXjHJJAG^xB(942y)C;N%W6~#1_(AT7BhZg zguD{{CTsFu>wbaLjYi|-1Rk%w`a>L-sFsa{AV2`B(c+ZRxk_8R&LqwTQDp{eTp=AC z0JD9>T3)u3*_~C>JpFYm+;X7l3HGFRRl|_L;EpW;m~!To!&&{IId&8}RVJP|hMz$W z;?#A4nGsUjQGBT-V`h+rbTwO9ggY*34RNIFH^c3*dHYMSRGwjMt;kjed z*mc=OEY7QGLQlisa)|#?A=YM8tRpZu`!3X?En%pB-GCco$~{TiaB7Sp^*Zx%vu28b1E?q z@0d9;t6*kE&6-nP10>5O)|ehJNmymN$5cBdTdCpLlhtK6Tuo4THUtQ?PhwLFhU7A~ z#BiV0-D;_hGMCU9B77>~lh#^W=xRCx{4X9f)zXF;u=UpLnDp{|KS3V-NpT^1+*;T6 zclh%R(MidSsS&18a;6y8n?Fx##`e=JY#dnUBO z!xP5$b*yfeq~FL;6DW%E%54q>I&)5p@}VanUeW<`=65Z7Tl~SL$ry$JPG&7#3MM^U z{1u6 zfQle|ykEgIo{M2a#Dy%H5xd5eKuX}BR?*ZxEI^+0HWZTq`e8JzMcN1yW@ij1%#62P z>G8V15okXD{TsE~J_tFht|ox(YtVMxcw7IK5B}jFGYwgW7_T56))c_qNkSbw6$gYc z{Fud1T=}@sM=-=0-#4Kz>f!fv3NXtHuYTB5GR39{`(&O{bYdVwC=y>WE8TV!Mk_vo zX+bCK@uPV`2SeyU0%NT2(llkt_;36^jUODyBc5m)u6!WpomGn?g;o-)9>|+VB_nl& z#epFPm@<%8uADy)Oxuxgj~b~@I9;Q*-b%DarX7PFIt-|@A`!BqA8*V=(a_kkBLnc6 zLweDY@N5_BkG4K~BA8vio&(oEN@l0kW=i-woji@Q7rOye!@xYso`7 zm7_j0&+wzrY)x_Fl5<2Y2U<;#p`J9Q2kK{GouLLB5<)UJDQ&oBP`=ZTPR^z}1U+(t z7&uX{k$Xt>dwPvLg7!v`LaW@{>%^zF9b0Tr;3QUgmYo?HGzEFIa2!L?s4pI~hB+$mIm$8VjOXm~T#736Vx#kf6`WfD zDX69_K)d{gQIHg>^XH6fYD^~TMDCoa6k5j}+XPUeKAV7$*qN3+JX~-xNmNVsu;yDA zg5-jxwsV7KIk_PeKmbNi)J4Usj7Z|*FTQy#ecw}pvqfruhjp4z&=h^)K+xYyi9<0= z*UMnI_eZkRY~FRu4BDSz4grGJl7OCpA_2E>E3g3WdT^#p;w6;O4+(7Ti2S5)ea~eC zf|Ey0d*&1gY$OJskT=|!AU7}rJ{jGk+g|tF9=lbR@}DF{mv?!`PXdZyFooqr%{RPb?FHwZ%6^ zvAn!R6PX&5DVp#ujXj!35D}hR0Ul5WbES|YnL23v7S7vvrW8W)ARz#s^yqgABqYb~omRp#`4>k#Uu8tT9 zBZl4tGz^#zC(K_E3DC&I)hDB{OF#8luZlbGsS zl;HfW@W{~^RKb}P!$+fYlmtY@cLuSwTX<9AE*wTSc6N!uXozwV_W$L&0-*GG(neEc zL&e)3#xB-$c7?V=BG}kU!Q5Vgi$bx%X$47Svb-W5aS4QYWO!yTHd1M@df@izK}m$e zfH*Wd6fc}`#=IxH>HAi%;so@h7aItHo;c%J-veMT71k{`kJ;_JMyNWBBL~7L>$&L zR7>2*VwtNXEPx7FbC!_V8$dT>x>v+biyg^_7ZpHO@Za2If zbqyzkka2eH&i}~a_<(#6+gIE+?O{zu4VocR(d#@=q0gpewCUgn(lHVbVSQW|2AM)@ z{1~!Q*PdNpd!Hx4A5NWF?u7oyRt8V?V0HWzW2u>ZhG=XebJhWaDq1U@8d_q641~A% z=?)>2WXE0?e3GzIz{x5=X!xc80=yM44$Je4%kdje|o} zP#^yQg>iF|ZN!^7_8!3s0{^|o(DlcE`S;n$82d+t!*D6fVEvrpMx}ptNXb)K%HO7Z zW|umLjCD{kBZrGmI;NH;G|8M`Hk_X|-VRuILsl^EHjX&2b%Je7UOQg6L&KViGX@pI zFcv@My3$i*ZSN_W&VO{sKJF$IYjh(DedrHPl?7u7nBn-%$Whqg5E#Rrpc?*eQ;kU% zaK{wM5I~@}iU9D>{lzX?nNepe9oXEqZI3-fp*@gZq?7W!12fnhS^y>xx^UFyVNjp( zW1ei#f8+9APUe4&T)*8+_DqvesFH!>1p|X&j6wpYF=Ao^pAy`oo8|qte&Y~!;XmWW zPY5zN8hV1;$zVDzdm5lR0Ownd_v@&Ee{7m!Q`n~BIf;W~t0@BcwwvF`|FvV#bzf;GKv9lez3oBLvHiLN|?qN#!Y^p`FGu5uF3j zYH4ws#xjvjW0kys#>VSj5KY&|>*J37BgOs~InI(*Uwm)A8Mz8rx3;3FMCDf`Gku!}mqTAfm}bs9jShqP{i({8s6TasY!L$1$Raej2OfEV^w#QfvK zgk?Lf49;V-=V&f(I+`0Zqi}qc%!nx{DFR9=DD}8K+0D63o{-U`+2DuPin>* zR*}Bz!TcAFIVZAVo|9spjhdMdU{Jx$q9_-baQ4(kZyX}XMe=12z*-{4jPzQL-Z;Cc zz!K_Fo-|b^^rz1hNa%7t2d`uswD$%w{SGKHN2M4s53eFe{jh=+XMp4RTf* za9WPx7ziq0ssP&Rb^#=L3S^2nFP_YW3k8_-|3a7VxOM5N5%}Yu!OQ(~WWnQB`hd^F z!E7)eOc5O%y746%`UWr0yL|&aXZ_ z3WmLzPazt&h|1~itYF6{m_Ic4D@+O#^v!`tzpg+;Jly~VJ#-f+MF*Kv&W>))b$Q&) z@$kNXYL17_5m3z_C_sgITkd@T^?5bX!uf;R)gfPIIdz!2vo)|zD9o(E zde*}l%@HMwozxEh@i${HcE9j#4cGd^f>p_dAkpU+%Tz%?`Nb9{r)j!(HoF$x^9~_C z^74sd3}=8{!-11Y8UrX-u-;&y%%h}8aMIV!s@aN+oz8!1J|1E>F3$5=NGZAfrb+lG zTUk!usQSA2?Xb}4hotGhQw(?9F%=mY=k-rd33Wxu}gul;JesQ642lk%^#)U$5=rvazRnG0~% ze=opF%WTdhB5%Ha^sUcS8CLg}ARXO_4S*i4mO!h+R2=n9!?yqm+v0-a2&u&1Z7Gf+ z+2j$v63It9v=iY?9>p%89rwjz9(ZLBYZvSsoT+rjaE5O^c}nIRbR$L*Fm zoQ^ERVo%H{M2EQJpIYH{W`=(!Hf5&p2E{Ip)+h5Z@dMTODybL63(_tUX>p6yI>XH- zOlb&{3^B+vp_2M?EPn_{h@#oY2i99&<#ZVw>k1Q^NAV#ZM%XQ%AxDtY9HyuAibD9; zW#5-ZhsR7K(R4r!vvU1`e}0dJ<=NR)2f;An>nbn$tE8zb&5V2%)5WD|PGFqxn30Em zY|Kcg9RhF@>8cBL6_z@U1?_~H*WGrGQ#IzwI-ZzlZi=s_xs`)`{jLAhF&!;0x0o8E zPg?Y5jb)vEL2&9VT#16Qe=N(REAhONS zDY&#efrZULCkJL}7y?MH2_VW~d_;y3(eO;!kUF@jRlTnvNq&i`jEW^nCPFKgg`~-@ zPUlUwL-sUE84zho8AumL(o`Jl$7R{;wb-D>73{&F#Z^5ySAmP4ZMCns+Te@Y4MWCb zp{pjO68plwykY6VMl701oR?!Wu%eS>)yE%NWzBE)YyMSHuc-oeVi3%* zbAkXzkB*!ujeul#UD6@UKvEZV%5zAIn5f&YvQ+*agFE)pi3&X9 zY;XE^n}-{p)SRPbgt>_yPmlB_J3HyDDyIv^HQ<*CF8u(UdFR!^N7ajEaE~hBUSCB% z3*__$80Z4W_uTr$|IzRFpnd&@$?5q&LmrLt*rV_#>;Hz0=Gh5Bg8c3O99AARuuZn3 zKEPSi9P@x=gM;AmGkM1Qf0_3H@)(){kef39wravdd z0E|VliO!Z9+z~5s%2mTQLCGNnCB7qx{=x$$i5<}KJ#F#LaZj@i3I}|z;#>zMnnU5l z=(S_5O(tMxc7kFZif&k~qDS#C*)lyvIYK5nRgM+Ik8`73htv7JVJ`6i9K)cM&TGay zqUrJIiO5dyR7uHx7*?4{CZah{qN)1xA*qr>27LWF1cl$ttKn~QYe-k>7;uZH@FWhG z$`?7%didTPsb-4vfBx`zjN;fE=%#5Mw=s^%oyr_a(v1ChJ?yKZW9g*4GRWpTs|oRJ zk1bQ=CPmopp(=UQB&Zua$Tq{lR~S#bJaD(+Kn{i0dl1)ra+`FAc~*)xK-s$x_%`^; z;Q&Cvpm75w{y|{Nh<}v8$M)~yZ+;^rz4+ffwn&NuWJMSt@Eky`W1q0v0`w&-tHrJ8 ziZ%c-FpJwZ@W904sxo~9c&6cI49Stn8r#khysX;*?Bnj>E_RCJS=KUfQ7Ql2(n>2b zSui9LeE7D4O3ZEnFYp*v{fF%^lCJDaC>&B19<|MHM$WPT!b}_E$t0e{A}_`RX4yiy zFR%;sC}S!IHj%Z9`k}JJkkpiVz`6gBA6Nn4H2=zEklMyV6K3`2ilYj&3fnJ$?+z3F ziQu$US^Q-yoDF%0m&vn&a7EpaW#* zPuwFrPc}aePX?3Cuk+vLO7II-f-W~->A*H@UpGIOwFE&tuOf7~$fB=UM#Vi;UvQ zu1V;#iP{d6Iy93$aa{~ft~PY|i)msH2z{vsTdBWfsW+cO;5$g$mwSM69{dII8CgH> z2iBLs*To&eZ)^N*#P@6FO3gp;vz{~uA6wr=0!f)15ZG>>g!B8&Lekd)kL_}o_qx9? zP)MJWBL+Y8qCkmX7Wlh#{3A1*E`FvU^y)MqFrMsIXMz5ZoDl0HPqP9+w!u~~WR?s4 z1D-c8BF}ytjQ%QQ2&#Jg?bj9mX%BK_KQ@kd6v(K8%fq6!*$#|XEkVe+J;ZPjClgZZ zUN2R78gIzboj0?R&w>a@gcTh-Ahhe-S`BZniFDllWbwZ_!P848qQuq|B2!jeBTQHq zjmVL8<`HwCW(Cd=k_6_%3-Llt)<1i+4=NLo%_$=@Q8X8~qD^ z>o@9t!~5_SK86UC1XCTq2mKKh7Lo?1QJYF~pHyGUZH)!fV*}2U^&=&%DJxo%+|08H z%R{IERmiCiZmS6cMrIEcq6wolKUv-4cGg|;JpK5_iT(AMt& z9mW$Bk{$t`4#@Ph%Y#Nf%k$QmzQ8Mk>1ewBXRP$urk)30W}lG7aQztu&zQFBKln21 z6DgE>r;ASt#-7*g&ke`8Jb{IhR`@RNGkuk}%x-vTbTnk;h0KbnmP?>Oy#7lJP>XqJ z15E*F4V*~+)G?3_CpIFKzZ=T}iTx7tWt#4H)|#UK&9`qDj$y->W0sk7zFW!t9tawly)z*N_xnLq>P z+P1g>aQQcCMw~_np?GyQ6oCgp%TW}NF?D48*+H)(-WL&Z2*Un?m2y6VAWIq`s-9F- zVLN;%p_5?9u3Yj#$$*@dnMiI;I+a(64SdxCkPu17oPg@fkWQxSbPWJ&jGwg7M3XW} z-Ph@$^nru_M`Y7jzf=HmRhzDJ#CBhK2Tq>B7?4V^fKWPXR~IbXWUm{?h}#jK7p#2x zXo!C^Cq>41D^_t(D3V+dfD+!c@a=Gvwh2YX(^X`(kgqG@(lP*FW2%J zZzUEU8c1_su=t|5fl#rW)z-NW$G|~gMt({j)lgeDG+jAHXu$?DuP3< z$^1c}*it#khBP`!>a`s5&miAM{v6N0`DEvm>F`GE|B?^Iq-9B_j-eL@H(LL!bR{`k z98X)zmYsf_k|4na23^eTFJX4syN30eEN4iIF-zHnAjN@kpGYfOZ#)C7OCL3!yW2Qm z^oM=j9WKCmOmBo3l?kpv0dn ztcp2o#dr09!OMQ)ILpdI(V&J)yrLPM5C?wXxEb+`z@HIVzdTgK9Iik88)|uKRgDT; z{JbnG-u8>$e#*D$;T7a#WfRu+3i7E?t8YOmV&-V#|=O>9>Wg~8$W>9O|3FNlTUaS%T2OrEM_P?iuWlV z`;MSJYP%)g(U&~PrEWAPlMtevNb#=QsL^A{stzfb|51yplk!HWuW|O@R@rD2B4K|IB2b<&is(qOD8@wpBujR{UGX~9o6+kIq zYi0{2xX((?2tC3^OF;m2z>BDTFm7`O1}_e{lVF1vo)+%%siEc24-wV$g7&9YEp8k~ z!xt?^Pk5qIWnI%EiW{LSm6PffhS5v2SFRs_g<&-EC=DGp z3$bpF(ykL)4!yx12H8|rjI96RNAKMoEIy_v$Q#nb7v*bD!T@k<5G!da#QA@Bu7yPS zUep2`f9OnH)`?c=^*QWhvP;VkuEMB9SBfedUXRlDo6fcU;-kZ-TBAb?r;+g&qmfA( z`OSMHnUY-hA~^l7JL~B)WgEA_0I|XRF_>Bdv2*9q`d`(*d_$@6dL zzwd~{ukDZk=aPmudN+w9*t(Qe;{)+CE-M3N!UUgP8oSsO-7Dgcv{_uTW#YiS9{>o> z=$53IB+8i>6=S|7MYx&F~_yVsFAZ7M^VZA37Ynzt>9k`(cH$fQ7K^Kg0TH5Z|V{MmOyK5>Iu!IvU0WG65#bcnhNr zXIcWRdYi2liNQ4|VY>u)uf*vRLQVD6fT?&DNplB|f1HSoA6kF>WXAC2xg3O7bu$quPu#r zudSK;MK|S9RyPF{&gs}4#~P0{@I#oOOvc{0ZF$@q>7N^MB}0)+$OcBioR`D5 zuE@|#MSf4piRyh*XGl9X)JOC6Wj+9*X``VI&x5>0Lnq}8C1T^K47z1qpn*&TK-q~p z=76uzPr$fO*eUxtwobZGu0(n9h@BMas)d5#^cHU_$}Y@cdcGEqQY&tCw_}d96(;Lr zL-S-w>xcXXD;P4BVJNPf!uWi1JQ}M+I~%Njpspam0l)raeZ}vs!W7ME+%hTxkr|5E56le5aK%5K z{PLW!exH7GkGv?&yly<*V5w=L;}?71%KT~Lmd_$Vt;uKcIWNzJ$Qy?SWUT?cY2z*r zlDrls@7wre;tG0rJap> z`Ol8W+`#hw+`>|`9QH60HwTcBogq-5H>^wg<%{u)>D&`>IsZRde?6fH2h8v9ZY>Sn z!@EjA4zVE*UppRZ# z$G1Bx;^y7KbC|XOi^^~G26?;shSkwKn9pSW#GU2Q@A=R(wL$TGWf8D%GE)vmfS*fD zXH}MBYlBk{B`Y2;HjnQPs{tRgEp5vw?PG)=(?byQ>26BfR{;A8Q;4w38G-eWi}8_@ zH4XS8SOwPW!71H7D9h6q1}5(dR&)?lDjYm}djrJQ-f`D%^|uYybif_WjCKP`6qt5I zt@XJBcKXh?*?9eYu2l)V*$cv@R0HLpvqnf}2e3&3nZX`1w$%Zn0|SE247)7>rv!w5 zjF9KWoq*-={_s!wwDa&!fcts)BQT6rE%;|0KvyUzBV_$E|HxHv7f=+qwr0VI4N7hR zZ#WcSuMU||n+1spoW-$>_IVG|Wfo?>y$=>0UyMaIZ~+z(iHtkNSwMiv>S7?$&Cv&E zEAq*;-cEe-VXldhQh5xKs$sZKB!$(!e)};~Q}aj+!_YyPI+P|IZ$@BKAArZP+^^${ zXyc=gw)9w>E0-Cu2PHEazITx}@CLfN)m;s zj_PGmFhJ#6N3{)p&@-Z-TSvZHhtjwV=#r*cL}|7yBG&f$qt$xzHD|*dHKo?ki0nXv z6YwLnT^-IFA=k^OL58ZMT8DHe^bz%)$gm|MW)w@lK!!xWW&n($1nIeOA1Vi4S*7h ze{~OLoBenAzlE~OQdDRO07J3@K(FXJ9`L7tan^kg>54fL= zTmk+jD68=?#`X?iI|jC}kDC}6SUIHR91q4rBr>~p7$fLI2X!0kYPR^7f`Q+t>qo45 zSI2F<^>=;b-nPbggr}y3@gEXEs1ZL~e`=r|e#mG3c^i@WJpGKXKBJyCE6^U2LYPNS z->MGbKnxE0;J$hgT9~3XH|lcC>Lffrc&~Q8e4#iXqbHg4o0p?HP@0D*Q9l5?RhjxE zr>Ipyr*HILpbwZKfjOHLRSiz=oMLF8Bp1MJdmsBSu6D;JQO|&Zi8y&rR;^?w?h|ni znZOot&O)3)ICCLHg*7o(Pn;kfNBa4THr|+?XYs`~=@#JLdKlt67#mo9WIvA`U19s!UZUp8uV4TGQ z{N#*K-M}*Ex27oP+Chou6e9B7+R`$?W+PoAwgSNm$ETqGgO|g-Uf8dDMf_hR6Dc#+BrLFjem}@^)*CV zlHu0`UwRBt+erw~%8Lm-NI=vBKu~FkN3LrNka8jpYTr~GXI6PD_RuOxWA5VTQEn6r zA9^awgmB^!XWwM3d&V9~tbDp>>_OV+&9~(tGc1V4%qDA{5F?+d!k=8Ak<&p&Y2=th zSP%x(F&1(?au;95L!2)(dhhSx}y^4%E)4p3|3RT|=X z+S}J)X9gb7ZD63azK7HOb&-|Z9>2u6+vW!Eo~>q|Mx|oA$L*P@x2Swo#Q?0|0iWRG zoqnJSzHaoi_6dW*hS+NOh5dftq$`a-(alFJ|B8F%g^ThPUG)~~AVG+2Fv#$?SF<LenbIazI;OGko4Y0Z-AQXai6w46L$o!8HW1US75faOmk1OdTNMPvXC5HhP3v zn#I3u7&G!_0Fp1gvM^E1!6|vfR6R$w_jSjy4;J2$kms@qVQi$4nq_zp!9qcv9{cVvxy|yK$1ec{;*qO=7xY# z!ESA^uINoJkAen=6_`^eQ)Tz zCD5*{5wyns+w1#pEKPe2H%-iyyRXh`1VM? zaa0ii`7D<~#&e7^bV^c?%ge>B`m^MQd3GH?cCWXhg5P?93%R&~P!wU~bp6?n_NCOp z@SM4q0=IBO2E;)Xx`F3;{Fij|T&j-Yax-p&YY)y}CevYu&>G$frf73ns|NS4E#8CrJdD0sIM zMayrpL2CQCM4G0msTLv4Z2e(&Zf=4C{7wSO%fvc5mO0vPgO@BcX)jGy2!5FV_; z{GDD&pnhAB;&08+Exrs7Mxbpg1UFf!{c$<1n_K&puU`j$A72eF+O&LJAVvm1?r?-= z2L&5?gpTv}k^Q&)=KT}*8Nd@;19(Rb;O!R=;O+eY-oD?39lWUe-7&Mdr2!mS^|-byV-^M)jV3kge|yn?km6=l!c+KfK&|vAFK< z;D)*kY{8Zy*MTsj(M`ShZbisL_NfSJcIglN*$sXbOm+Vnp69W{D6>i~DimPBQWs+kJ4K#ue-&MUqz!c42Q>76`` zZ(gYg4OBF}T8UYmmA_frl4$8zFFNZs`nFywH16Z3qP2o z?8V_sm=hnYqQNoQJ8>-($MZ3whczV*I^#>oPTi%)SwwMVZ)uQ)Y#Fkid;#ySFHiOn z$`YQh3I?#y-GA6)U;!1fwJ$35H({i-mv`xPYxuSF`d=*}c7a;(O8^YK-$6~V%?n&e z*M*Pr(&1n*91cfQ3^TKWPo4uLpxEi7g9`$HEJIZc473EfK9^Sri_77(mu+;x+|8=B zKYN5$&7Lor{bGOimSFb2XVtKZ7pAB+=p;=xJ2yu4L;k%15*4xDQsv7z|2+#kq! z5Lju|K^mKV{U-%y4cpRkL(52meAdiY58#O}O)m9uJ|{O;(E&j4RuWg>X#IX+yQNj^ zwMm%_oO^4bvAvZRMtf`cg@G+22@#7KHID~M#AI@n&f8swZUM#K^}pGy;8q=kP5{yq z3&fv_X5E;it8*#~6?#A(%}bx6oix!+9i}ncpo^5lJ_sKKKx9>A;c!7rCKgr~x<|#b zvDa=vdO#wp?TFd>XFu{YAK_jw>}h(bH>}dLs|S}4#^3rgzGr>-_!&1@jy5%NeaTVd z+7Cj~TzaBZv-@_p--D!~Bpc<7F`N+$#-tCI4`^SEmk(NOyyT>p?pUz|V`R)SqZF== zYxFbuc3RYt=7oT(qa>?~fCy6uFTCGn-8GBKNHVH4hy_i%>Ws|R$`Uy;Jg3y&WJ zkB)dw00HmWO?+R_>ZrN##|npyMzDYZm?jSVUUbUAapXrcD>?X|Fe;FWOp|D7+|!i- z(Sbj?3h9dC?h1WAP==dm{Xp-~Klb(`mZ);#J{abcJL4dzw#UWwV1_D?`ZHc=T2g-$ z2cto%5Ga0E-coa4HM8U*m`q=k@0HAijYP@plmLP&R6ke8#g7*qU-24^xk4O(ERW;C zAY1D`i*#rpAs;dg8^T49@nwS*s9(e_Ex4)H$wi$cRebfp(&$8ixO~ZJ_LG!f(y(6X zra#xz_9wO{@%9GO58}(wvjAj43JUuoj+fg&?x(7VQ;02xSpYBv0?31o+nazn^ExNu zcH5*(KvvjxPRM=XklF%jze*+58umq*KZJ;yR0I3=q*|t%IuIE@5c7Tj5ooe1c2eyE z#cg|@vrei3=uD}hDCoW@spjs3+I%$o)!|#YNxx=x2KYRm_IN)H{rSQyd3I* z4O|gM^+1Ua^GlOX%3#8f5WAz@iZg;4xb-Cf!&ISoSi7;UH9K|NX{$V)%fKfW_%pk z1qY>eyC8PHCde|h32s`*6#0iO6K940^H=Txmxn5<%jwfFZ*iV`5(r>)B?<~8rr7Nb z?{nANVe1WRz2=U;djl{NBs(6!#E@Ijf;1$*j6_`u0ANtMkPy55sIPujAv1-gor8eo{ zKujv}S-V4EORR&zxJ}GlrQx9lR&R}$E+A6qNR}bM7&sm5fP+!v95`2t{p{50fw&8A zYO!YLYK>bhu`NK_#R*0uRja`e_qko0BM4~XJ5Q$JH5FSTPhofBYZ4OeH)!#G z6uV=OPd@{67KQ!i8x&-yCkdoaRNk&$#AXNmW_uGZk9fo}e9%bEd(M@e>SY z(s+mlfZPFYpe3-qqV|q&_#ovkXz@NnDaG*6WGd}CTN)v2C|da*&M+({-mTv*t~6{C z)5E+-0gL(qpPVu9iqw^;#pCTU>wR}N#3puA+$ms}Gzp<9ey4cIlyEhKbJXi_>A-ur zJLF~-RF7jzi0n}}OtYGzz|LxJ%?44mq#R`TxqHxTvJlOj3ftG+L)VKKSgjcEIAv`a zMlQmk;@~y~XSeHKDci}0D{x`jCX&s!wAEgakk05R*OFbFz|ITXMFd7-0IS?^o1pI^CRb#0!0RBa)J` z8j``}1Pl`WgCgNJ1$v6|*YGO1SkmXuipq&)Z02ulNIb?>P(ENm+R)ahi>W&!!*(@8 z?WNI=3I{y_8@+qjys4xC+S#J|>k#Jq{yKD-1&RtYMHySoFgEt>5=$8Rud-%da4^a8 z77aG;sO%{h-;fc5rN=um7#iTg<$722(JN2JO$N-D3kJoq)}Z)u@t`chpnR{tXnR0B z+p-~2rXSF@dRi<;U#Vd*NLqk};aR>?uf!fkVjZ179|^L$00j%z!kZ!0855~?7HttEMGjevQea(xgO)(&^*whuFab?0 z3yOg}tp#75wrv-lWXTUejBd#h8PwbB-MSYyyiW!D1uq&yb)R9j?t3@id4njUPL0jP za?BFQw`%225x}d!d9v~dVBneCE?QLO9yF!b9g05KpNl0frs#v7qRkA_py_gIgl|za z$4C;Kxd%>HM#_x0L)yG>F)hKIeV@hL0;P@)p2OADLHNd5kc{F|9+y#e03LU%8qR{8 ztI&fBJRCcnwYD85dF_HiZwDQ>Hk~c8Gq@hp7+P%rG#m4x`QN|x{u11`8y8?}-X*Gq zevcJ!(LEQ4f&NDe;@|I57wXqxyWlt;jMmJ@ND>ql>OifgxS zw%Km|>=gtZLbaD`Rv|o$kH|IlU1fFHGd7#;v4h&aLvA*?R$kApV7CRQNmk|MFD*9D zcDHuo!k|lO89!n9-95NN-;WhXZjr8x;oYNz?8Ldu*@=T)oZH&XP8>>Fhl<^5AG!=Y z-P+7esh+HxWMiig)x{mS!;NIaLM_R5!H`_{9qKqYq+QI|8Bp-T-DR%)b|EfA4%{6u zI-nwhVXgRRo2>_(L*KnT!)_v)DpAjhC~39~-^YP1+!_5@uE1E-Gm1KA3@#Bgv^mQK zquNAKAFu-WQKa0y0rVGWwYf90x$4<2HYS^UKCj(hJXLc;5+bYmK3ZwgO@w-Z5e<_bS*wBfuH+4qgbvNCwMXnhm|5 zkfhskB?@Kjvsu&Derq#_wU_c-rj&>)3pvU8k&P>h0L!Daq_vQpc+?gn=nwk{`kl2u z+^l^cq3M3h?%ES<$FS$!+GiFVSQOaeTK#S$ZGz~>y69c|@Qb$g+perydwpfK z_BAULor0VO)}cKYLI7b-WRQU-X$(q#K2#cTZuNZ94Zz!Fa*#2xM<(Cv9D^v>a=!D9 zK{hI|eSp>mnizq#Ypi3C_i+rK5`fo1^JO>&4ItTd3`VPCQ8JRwF{rZF9D^*U&~ydo zJtc8N_<6@*JY4+_hG-SHtV*8V$uAyGmCNE+fm66=e(iB;dC<&a$?6Qch@9WBakglL znY%jYbggZ6E~-(vwg;BcwZhVSU2DV80g<#qs~*|Ch>diWI!y}vKN9~BGM1cHbLp#< zVkZY3JNY7ViOH{)OGrY$=3Js%(Y<{FG^CC4^HY=YLEPM1rzR&i%G95jL;^N8))Vn@ zIO??f!I|Ebg4Q>Fd;q<{K+=ch%1ciYBO~EZ`g#PN_)xKonn<-?EA4jHYK_C8Efs9b zR5V<6&(SxP<2Rxsb968r?e?d`3Z?7Y?f7`>P$uaVJ_+j4IbLhIf5vrf87F~-CnP^M zp{w7n@okZBOTL}t+Xp{!jFkHXJvty0!_6OhJIoM`DLs5XBI`ANKqhCvKPx|2}yg0kY)E`O!09iGu)#C;nmz5g7=Oym8U1 z+a9Lsh<@}Qu3zX6mi*CKuIQ>;!;3qcqs8cMR=myvtiL`;vYp|AhR0^N$SG+LC36&{ z5@{TKLlSB9)q_vN^G(n~DzDs)05uQHt>YXFfJmwOc?s0!an#b2RkGmt%iev$UDxT?;N{&>< z*lExVI7Vx3pKtjda+s*4-GCaIVA5`kH_jGJ8l%Xc0i;xA!c2^7lPoxe2J+C$u=fP~&o&?k|)Lh+BILg>}SYxKN&na-p(d ztd9#tPiDOWIO8a`9SI~(A$d~zg~@z8WV16LkGC+N@#btkCc>9~J}qZ|o6HKekUK_X z116K>%adH9r*g>DPim_2{sl(Khj~A}z4(NQ0*s{} zQiBecX`M4F8WfyzxEwWAioGg!y|t3mK3IRP z6u{oBSfn$7UD~ngNpzI0W+B`(LKE>j$ka0~Gz!hx<&>K4>cJ9n&;~N>U2O9|G~0@! zIJbmgD}=uJZ^_Es#+to!Fr98kQ?RJJGi9B`&I6wBZD_HV7QEQ2B2AXnyt7qyHUt*f z^d<&Po3*3wKB4G~L052HlEQtOt|a+t6i2?-NoX+uNCNie4B}A+3pFQtVb07PSS9?2ON7A;9mlva56d z0qnKb6T4Wox2zqpAJA@giZUsQ9RL|5a*o(}Zu>1(W@mg7`+eZ&JXtIDQ5&g4?#w0x zuQ1+QG6_B#^C9@D1?iJ4GcyT(woUNUj^MK$?4~giDFpA<^fpwB;HO+fSq!)%c>FTB zOi7QzMs)=b0DrcZ!!aB(3`IL39=C9j;aid=vgZcN$^1_rk*u=ufya7QK6+w}g#NpK z_M7fdJs-C9naI(1In zx=Z9c!CivGXG<^efvrApzSRl^@)P5X9Mu)shw5l2`*IeN7vJiwL`aibk!zaI@Cjdq z)KPD0{`|pt7$4gDeEJMTH6r}W*nX1OcU7}Z_j%NR-Q`L%m?9Gf^`Gw@3cGV zfEz0`Hl+MZ_GuNQiNwb0v)Z8<2t3@~^wH~v-J2bcQe_aZp%kD-INW`(6rfGO4|f|& z0TBVy-6jQ`=)5?kqO|5VsVLSh3V{a|14mM?1Nh8+Brs5^~ zqHiI(Y#dBI z*Vth~W1u~lvxqIqIC7@npb6!x@SgplIjD+A`x0GmG>f9F7@Wa&!l~LWH|+aOwx2S; z(dr>z+RU67l(Ql|OLg4kS#w;n$yP?G$|l;G*QB}pA0R^OE?*+G5|f^qUeZXlkZ)hx zk;^udF*+Re#DD&{ZXx1vo+cs4KZzi~T$)V3lYg5?A{a=)MwK%s=-CN+|IPec5 zkI8Nn2I2te{aZ{?)MHy_0W`uY)kZc%>O+CQp9uK)*j`I}OH z$B;&8ert%O8EIakd!}Fes*IDOYCTgH7T9XxfW2Icm2B>$+?R~y$yiYn(x`!?^dPe9 z^ADRuu+6zwxTsF^SGTrdYkzDm7n!CF6`bXXA)&r6<-P$P@};zp3^PeSL=| zR1*oQJuSKk|7!ABvzqTDN+m2CtVAn)d!b%6=~G?Y)IbMnG82BA8ObvIV@ndYFI5^Oi|MvPpM++M50kby z*{&@h7#SypElH8d<}0@C@r?MOUD5Z6vbHKULl&iOXAFc@Q48tB_H?|Som#_N7PKnO zsNo%fI1}wdm`r{FYTS+#_~WnS8LkX^#h}7)x1g6LDjr9?Bl{n5LI_K?gIhP zh{<1+W24D*%3cZ6c#B!zI*2M=4U@Z8RNd4-|8R}1HZx6KR>Wki({!sWMMNo8Q(o)oNPK-c) zhN2uJAxHId`pGUmXR&%4)s8BY4Lr74ovVY}wMI3Z6+zsbgbhb#EO&JZU%HaNL8Y$^u0#GHm{S2Tcvf)nm3}j7b!)kW-)5|O<7eL z#o@g=yRVB<^ge!MDwDfOWR`_LW(i?e&`%>KmAv3U+TmZ3@}P#eNrkX zI1-D3T+^^hv51^U*6|Wf_KPW#4?O9s-#C_0HSCjOy$FYvjo4v(pfd_lG>yDzYT)(9 z^~k@WQ@n$*+;di4;BLyo_qoMSLbzse#n^aERX-8K=;^ZeR-h55o+#WI zO?-n$i&hc4GvtbKx(Rkv=eOV|T*AD4;S@yxywWME*t6ewMlr?Hcz?JetfSE#Jk5GQ zc)vhF++n5nlAX`GOt&;JPtf!L-D&J?f(0s-%v^8RV1yIL)MauZABp*!Bz4xoFlj3s zyRFGTj%Z}o=hKoFuOwM}cS|lMQl6r;e~P6`43S}4W1!z`Hl#4Bq*}Y|>xPn^K@DVB zP)|RxF_V&xdi1D5(z#xvkjUIBBo0~)6_S}+C?tKdypVjOM%BraD*iVMNx7|%THmOm zONR#s3Dqdd$mpE$N@9Jwkvu_f*i&w#!UUsleAl#8E5=vtVX<=n#P}ghvliI z(tcV)a^9p*>#AZk2Qnfc8)_)ERV8#1_JHHTil;>NJe7)_t=?^tx9wS`v7Y3m-q}}L zR53*hQZxC&sq_E;uDDJuDjsZA$hinp0}uh)eT$Y$mDs(@9=Y-ALrxZP&R*$-9O_fu#eBU&t_MN;yF>*>yTopCGMpeqwdi=LT-eHdwRPDiY_ zH);qv2QfttL4e|lF-quGyVqk$guYTnR+gF%6F>KzIYC~eA@b5H7ZHjlFJpoDMv<2o zrE-uGBlc_vWr@g3iR!s31MJ_Em$Aqqc_}HoSqmj>SW-R6o=%NZ-K7p_6+s{4ehSxA zMOi8vQw$jtQw2rzKC`I;X&0}3$vEUmOp!%a@ED78wgx|6;0nt!!PQMFfR=?wtp?-O zTS(U7r1YRK+5>zndb^}v4OMoR(#Wa?RSh;kscTbub#cOY-B}$;;zRCzzJK{~P{XKx zr4kwwu%t);BBjKppB8I!bH9+s)x@Qx(Y5Tg&i&6IbIj3WO1FOpF6s~Knm zu4rFt-&QP8v3nVOc{Cy&8PgS8N>5C685@#pooKw4n$eCYF%;WkX2@J)FOF!41+20I z>70}iKumNner(ytM5GwV!KyI3iB+e5{PbIcSOn1xvRR!U2h|T!yzFNaZ0(!vwHfeU zGOH0s)u8;OtY8qW7nbZ35=7Ag3|m^Zw-Ma3rGcJ^VQQ}|!YJY@GndJpdU7nLSbGyA zltN{a$V%r|g`!VyBr0Tp6-0URzi?4whN@3!dYppDTi3{i*Zjh+z4Tym8T?$2@ zXgzNB4JTnobg!F4Q%%ZFQAR;j@!8>$)1&AJmI;a?{fTPGYWKhPL@#f@Ry+Fgd)f^$ zt#3jraY@oN4pwYJ$%iVqZ#!-v=VXttxb>)S-&J^kO&NR>73(){Ad^~3ZCY%;(0JCW zlv};ZUZqf@*aSkErD76|rp6c9;O?dIe2sF)|4CK;r;P{w#&q)E(s=rSi^vNpP*g#$ z6jR^~au(BqQ&Az=?ACNb7^t#w$>lbP8~VY*L=Z`I+PgNLu}-DFjRbp>ic7RE17*><$VZP0#r_f_? zLsBMEqpSBy9v5FC5{AXl$I#{w3w@0rVT@BsN&7#UL+n$&?o= zcm{8$AdRp+|C+pg`7w@zB0E?R85X5vAgXL7nf%*^jlE@WVo>?z%lPU(A^8s_+pZz#AevSU|7)t97O)Gkn|M1I;{HDW%lFq+EZ zigxIQbpB$FwXj}Ad)=yviJ&Sb0-8|GhPvJ5-?QPmnD0D^V1YgD1z#%0hBvaZ`t(YD}DbxZc3eD5%)K6Kg z5|+GCv1|$rU8B1v7Mwr~$8@MEf}l8?-k|%ZVl%T;(U6sxRWQXAtDKt7+Rd`wus$)? z;6t(M+5KO=%;H z%Y8s}Pulbx+QH^r>M)t?oMbzsi%F#YgCI$<4`w2&S9BqIP&!VYaEOF$-LqF!rR*h| zH2X!R;>D`b>H~QcYw*&CJ0@c`cr;b$9QuGz8;vILYkh>o6}})!s3uR9K8SlQqUb53 zAT7zm!3HG<)fZ0ok7G(7J!64vx}*n8&+=koO0>$|XZRKH$_&2Tr&THN-#lZr>CE94 z|FkM!CDUJ8QkGY%Wn9d(Gqd6|u`0&Ye_Ew2ceRZ~T2GL8V|rfG7m#apWteVb2C#5d zeO`m7@i|`nIdc*#vLTWt=>LeqyUYl@`bMM*%GN8!5Gfa4 zZMUe-Bte!(sGw|Oz}7ACN1?C+`hsFARzYp#s@Gs&AM?zN5|DKhK@d^Kh)1$9te?q; z{<$U=1k;8U!FF5Gkgjx$T)$1cHBc5;LbqBf!t8sM%`j4t*!VSC2d#3*j@}4Gli!TJ z->j3>x!XO1K%AsLltz@XZ`}Czab-{*EJ(*P6L_6PQO-rH|8kB0>JxeYfdHt$} z{c3Ad=}e}gqN1{@)*r5V3|3*2hbc*VAV0j_t(gO_HSQoCA*ErzV-t1Bki|jmqE-7` zU?%6UTlY?;gPw)f%$q)wYVoebI&T}d1gJXQQgucwDEs7n4w8uxQZMEKe;S>l4Drf` z<$`3&v%SX+dQ|cTUkXAPG*uq{>C2;WI@(#>mr-Z>^dV8H3;pAR3lhjeWoZ<-P2WC+ zrWA*}lxkB(q#J3r#|5vGpb{EWk(8?tO#G1>hz;&eFEM=p`4-_+gfEd&O^|HdH!XLh zh6)XVP9m`XAb05nYwEeQi%xo!Y@LD>iXEGWGXD&@LFA%mB=bZTEw=5pV7*GuWEq65J}C)BH&%?YRi#IbQR+JsS|qFrrrVIYhO;M;tNac= za%D?7nlibssMh@o&9tX*;*DCN8GoaRp(oB4^0a%M3v>}5tmfC7nDeuME5*g1^@vvxSQtn$dwX6YO;WTKkBX2ci;ezMHEAS0KPyoS z@8#AgMJef})H1ii%X|kdW?K*Ay(eY6shAyBe>b}HER})|Om5XYsoeK9)_Fmg5mMfIpeKF`0N>6Y6z>kpH==hUXC!ggXp*7ZPsc9(8R*M^DlUSZfalV!h&qB#$m^$-; zZW6MLRed^NOL4zh9@tL^9 z8B+m`I@Cnq3l>Lt(5;m!o7vZV%ddgNT~i^PL%$RoCLSeL)re+^deR9g0VYu<0C6dp#Zp6Oit-@?xFOqyJ2a$WbOgpHJjEY5Yt|HB*mTazn<%xAJT&{NDNfF6;t3dYcrNCn zHCod~kyL4UEkr;_Wx`JhW8v1M5bBT}#d1&)iLsWhmfB9`Hmf%%CLEXqAdNNjFeJe2 zk5VzG_=l;%I0?5~c0>qBoh4FWZj_2j5+p;@8B!oNdC^dUYoZ-}jZrp}>bRx}mlaO0 zJ)!=PaZkgU7mUb170-vl=(8#u)Rib4`4U|g3zx|r1|Q~Lt^q&MYMs=$pvYMk8qM=lL6EO^rLS$ACk`hP~IOpmL*cFgxkNfP`pca;3r1FHe0-% z3*ijr*AGDceEfG%L!QQpmv#Tk@%_)}{%2S-o46m1TiN}$`TLD6Snf@;Z&t)ADr8Lf zwx0&+Adk*t_eZYyZ|PW1tE4+oBshspw5Y?#zsPQ?>xDAL!5>rzOE+v~u=nIMdry9J z;(m8fG)xGcQZ((`#D)q3jAHN^*~pD{2OWfEVV6qC3yRt9(ZC=K>z z9}D6ScKJ%b*Q1IDy+`S@Y#ieu=oyVUX)0Q0m*DP2~!P?cLGjE!T6bX-d_gLjNbE=j#CFQm6yk!OpA>{@tKKrc_E-WyTIvp(ltU3IdTbTD*rO8& z-E|>r!|YQVbY|-jTKK{qEJQ&CQORa3qHcYoSsSy^RdvcIO~e?hh?bXXOQ|!th_$VZ zQ4$8Utqe5IOO>BJExI8}efB!N%*l8OO1>p)W0EQnrQ!Ay8*Jct>wdacSF>fDT>Cf^mO*7N+rAxazfQtHh_ zO%tM&>l7cbgj-9^!k98Q`;kg}#T$tl)hDG$#oWQpUlO0Nvu89;L{?AI{*6o#zfY}R zyCw)!&O|G2|3UGV?Z#y6f{BNAvlgatGlPx4ne3IyXFU-c&K-Jn;||lO8Ba3190mzv(N^-fN5W=$V9aqVmHEigIynvx@BrRCn53>w|c#!+3 zFqNG5IGDKje#lM*z8{j5hJ-{{Lzk_t7QS+X0DGErtYfc@Dy_3|=jhI6T$F66vneSp zlef-R9ahbSnh=}W*I14S5qf3kMpVW;3))mS$r$PXx;C9fAFo2jKO5zIM?!q1Yibse zHU$g20cygB-X|<$#IOu0r6`t~M0y?dW7{el*$bMo?GxCwJ7UJRKuhaR)5P}Kk^8kp z0{jkAP&FbwRRS%^GU+&(6k;%{aUMaobIOWXFHa@N2YQ}BaJ5uaM9_?YxhKpHDl2t) zjr!!CM^+Oqfx+_%sCyKz&>}sh?1cc94|-N9ks>XMlP>bV`=1o4M6?pL}*5IUAazN9_3TUg;{K~FarOLQq z8(i-jYMPS!n&u>MUtV=jF$wxlYc$APrKZq`WNjE~+5CZ#B&&7IXN&ZbtXW1et64}9 z^&Sb)kfCNx?L~F`Lp94dAgWXQ788J?%WBs4&Vw74Wkw`Ys?~I{R;@m&xVlGm+H;WZ z$t^lah|Yh{l9Oa}7jt6ap^cmN)^Yqo@}h;6_ib&5U+aQUcX*qEzAt7G*5_Kip=>V-|> zk?MpHQLN&w6pyq8$2dhEnf42gvHQ>xaC+4%36yvwCL?{Ttr0_#rd$bTStohK3`UN- zG@j_mrb*_hlFyaW>#|5sh-e(pf|!6QdLS3E=>d*5Jp6z&PRhG#Qs?R+j3zeBGB^{U zAf{f$pbBy`s`az~E6uVk(OQwaijC4z-sazw(coSOLzMbN4l`t$LGR(qY^TA>IBcco zu$mJnwahFSyXbY5HX6|@RMr%>J{TLFpY*5>qDpmDn+#QQla}W)@22Bw8d4Oe<`$<$ zqZZ6UAfCxHpan6f8u`J$Az;JLzJB?vCdA|vzM}24pZn}}@tOyTLgb&ZjKW!M>7NS~ zHpdE^9Zx;R`pDR@^9Bl}rGGA;C36u!-uojve77B5o~@vTvmZ_)zXgTq4!$^)I#|3w z^&@JtZ6x%7RPiP6RQ^q?yl87v`J=dmu9S~_bMi&6Kb@+_vc(3~?NFS4`FawJUzsO6 zr%OC^ejOaWIy`D}Q9)Z&%lhRk)W1DRZDmi2IQe(~BrYW%f-oM^B?qD*Wu)0n=6M<4 z|1$ou5P0q@$`*TjE2qK3`3|?r*8p|%Zs{gM-C+5y@E1rEVc*Z~d4xK!H4o}h`2-ETY95Xriq1eS^Z zDSTWxs`8_r(vsejA#)O)yKxt z+v)`rU6jr~dyZ>%lO}lX81dZV^Ttf#n$4i}T`-m5JE}`7x20b!g!b5i9jY442t=Nc zR;!BDK*ID{O#{u^gPbWMXLWaxGfdO~rSjA9$huZ9sCu>08aFFpX*pF`u2d!yj-^SY zBkR`^Ec5G&9U)@u(;PIS#OPn8Zh}}(aFZiV+9XixL3&nrBidvlrY>-ovC2gptAbZo zL)7Yh-l5LJ>M$ZxC zLQ)Z0VX6A{)sZW0T8A8E88DU-qH4Jl`LOmgN;5fMD@&`$3`rOTR_~ptxIt0S6aC|K zrJ4%OpH4P^pevh;7wJm#0Sd)4igcw}eX%Vmtxd!Cr3rj->-M?XniO`Z20-mh3&fK= zuazmTu(fop6C1^b^3p&g#fH@qRh7eHr>)e5eUtrGUs|O~U$@^Pei%I{(PVEB@h7?& zYKT&GFl51z#BX2}5+1BK+TtwH5lRXzG}09rWpt}X3BPz+i`x#iK>?Qd$CTa~VPnO> ze^ti748=rB%uu5TUDU>_eeJVjubn+*PA^%Y^RUIrz8-@nRK=9`kVw+3!^soOkc`Vt zovhci-sw*zE4A`JsZ@llOe4a*&6LFja}V3lfOh(;X*P?PS^7o7T7aHl9_GY=~+ta>|s1Q`hWFeAc1?O?%-ydImg>dXp-L?9c?{N|6y4 zd@Q|~!F>q)C`ZmQgMQ42jfEo@RyR4R=e^)HcTHH8peElVTnxjqrOYa4K408!+1krh zSrW9^OXi`t4rDhVZGH4Oe^TVuqJ=Y=G+XN!WNlNR$&l(rHAy$u%6>hf33DRKWQ*D;Lkew%W=9S)D6i>2AyU;;r}3q> zw8q~U2126UKuk@ZVKPBV;Rzuj207$&@=c!j>{r&zUz9-)XA1${!uK7D(e+$5MW{!h zq+}AIuo8#88*NHQy=iW(`|K`z4J&6Aq*|d4a$i?%NrLxT2rZ&Ifo5qLt2PZXS}2QL zpir?c7B1-;!z+t%*ur6D>+!lQ@ExvS=)% zg3;1&Uz3zoREf2xS^D;YGBPyb5JAy51`AhjS zW3Cf}_Lo?|)QC+w#V4~^h!BMa`6vAX?<*wscron|^{K_jO1nvWHgF5nqyojpPOGRI zDv7zGJ;n5{)mC;9Pj!NLye3WCO=wWH!3$jkQ_Dbth)Q^we|gF*(oSSsQIxvY>(xW3 z392CsZL#3dSO-O0X`n_5XBFTB=qwmBDOn>19%j3K&4T7xjI>Y)8R5*xqFyBpB*D;% zvH|vt6MsP5dRq-iF-evBC$v0k{nYYdDK#~z+v26vB8*sVVpOJ4J*H!kvp$%@-~$m$ zc|x{S$BB;3@D_J$n;Dh&XQ-K2H zjl>B4(FT0&CafoN>|>54dqRUA|%m- zv@i`-WE8L6gla&QP(5Vrss^NGBsQjPHf7I6d~85Iv?mf_%$L1ElUlpW7|=RMQj3l? zp^=(Zygf1IX(J3x==UW~9y%M$-?URp1>@f&M0%za8sh4_#IvIlI& zjOzn=OC>EUk*xpa&!u_oMreD_i)OUXn?2r2ZZ_4R3lcGt7^^4uc%qh+*pwO>q$2y| z7<Z8R=39VfZWcfzm0~bNnw-fk2sSo#Bwn1T*3jf1yyLor!h_NUNt9Og87Tet%gU}^ z)iAE2vbtK?sjjG~wxa?KJ+o+T(nm=BndO7oxh5;HOCHS55Inn51uB2GgkXC#9E9&u z6=mU^BnTBKB!nzy#ZYpjWeeTP)6zrGAEELVV-MM~4T|>qXwoM#qB##vzgC*X{AnqS z5+?&W&0#EWrj81Q{KrZtWv`d^wY?`5F=RjMf27xRy@#ZUYOpz1RR3#_oUB4+II z)nM-mKDxOSlT>|PnWx6(+9Q$*!)G)6R8I9}99F|U1D>cS6w!=bj*X{%#qYf)Rd&@S z!x7o0hzyacC~FawX0MOWc?})$E$w1fm79<|TN9H{6s~mPH44we)l6t@qO=5%Px!7y zf#OkFlQH_7NhWMBEAz2~qxng6P*_Fi3#ovhz1q8I9jp6NG?XpjlRCr2gGlzaF~xWG zN?pl%wit}aj`>WA!@0`J%8DwT>R>%MxsgD_u#ZeG?o(oe2^pD{V|tB&aS^%7hT%NR z&TJq>L)>H|ZB5xg8s9)njRoqWAx3z$HAu61zQmgv$N+w7>(#{8f*p_j%9793>l3s) z`-hQp)dI3xn2Ru4q;lU<8%TYx*Y)gePH+9L5NTimgZPLig`@Rx8(oaA=N_=uV~~i; zvDY+EEyby%)uT$)l^8YbF=fg4`I1fSs6)lD(WC;x3Au`ih90F$o@yjiF}$m+CVlRa zDI)J};8L1ZuaSlLL3wNZ_(AkXJ&4s|JczQ{*pwA(xQS0E7Fh$=Rkf5H(x=~9eIjRQ zhcxBV>OC%O{G>gkbTd-wAtFA^J|+?&3Kw5!L{X^nA`b@PCIm1U#>PyEC7+_r{aJ(H7yUu!KIQ+gK#f=kG!kW5jy(LRAZyMiRG z22B5FrW>k6bSkdqch9;E6RlkN7e$eGBpud4JN32tj0RfE2!^0IP0pwE~_%*NyD zLv+zvMo$qe-wf+f@^zzvQIPvN`7;%$HasI>EYk=TJgFdJg^0Wn`(Y8iX8itELOv8i zyt$1RpXf%ENYanm$1JN&u&lhzjbVYsx0p2!QUkJSi?jQ=9rBMY35lb{Pk<3hA%gc~ z*6(1YK{ZxseIym7+0w39QY_?g=|wgE4c@eOLIz?rLq5e=M6=sUo>8SJW?#FHzjpSE z4X7qAW>CwS{JRO(UgTX9NY)8T`YBc4N31l~f2aemzoq^LWKvE;H6bPgYut!fWPQ=7 zQ{IBoQgWye)wk%Md|L1KA9gfepfN9#3(rye`W4tv8*`T`+n^-7S3qRcZpK zQqypOKi>QtKU*uNy(!FJXSdKa+&?^BHURwbT!#4e82~d3`uLM|-8Lcixs7~oBPnb& zorCu~twM)E`{5f^lToqP^aPE#bsIGWR%6K|sJ}%F3Lx_u_iQpFKk2H;thZQa;ESvZ z!K|C6+0omj5wRlMv?7&%c8YHv-*Y7&z=Kg1{EgIJ(^ z8I3)e8#hSva!$+*;B|Ba_`#T1Fc8TR<_CT871jsh9YYHHaz+56Sb{($R!Hvx!RNjw zDuj-W;5|E*7|Y5-X6W>Ai^(ny3<_XnDXCx z8Ellf3^ACRTAqTKt#M8nWt_8|bB!FqnM@?(gwyh2AxGyGhm8dUG8(_WmVH-6DjW1w z>c*Dgz`)g^y%h}=mAE+TT6j5g6c?}bE6@;s=;(R|>y#LGl3X0+g@Snx%na>HrZVQ@ zw1bS3)^1g-n2Xy@5*n>m8CnhVx-_eL3d~$OXL0dbzd|nN;#zpnlr3@bDsoe6(|5Rd z)oOYSK3a(!20oghQDG%v{)V)smju%1Fte*9Hd~FsCV4qryfYhf@zUM84UJNfi)-~D z{vsFGCM@{Pyr3qnf`Q{Na&c|Vg1^Yct3=EC=10g~6$~7Ik&9<&G|Meqoa&6ZII4MZ z@ldz5o{O6llx%aOSq-R24I%v`BQHr^x5e}TRT%*oMVP>!wiT5=JRa)pV|5G-EJun7 zBOMR#8KT(!&UbNmK9<(v7;gg$6ybBt+yuffRk2xDk>NGGSMsJzi7>S0qBy)u zw<7Bc(qM+S;vou_uEviwqfLe(nArefMjxz*iAQ-f`-ps^4BFJkf;uQlsGl%dD9)NR zGsCn6b!552QwOUurE;i^siSnaZX;Jk>aY(7X&qNcT^r_by+|E0b>lBmhwWe@#ab~w z)KOWY4t?wue~~&WMIE#P4ocQww!)<2sl#T@#dL(k|fZCPaKS)OarcM zhpA~AJXsbF|BhgqF`A2-OWTVO8mMI%XOKe2&0<-02WEvzc6W+R8KTrtD3|qZU2Uty zoSPh=j<7dJr0_%ck{V!W+3sK7;+oczROElAca7ns+nM~LQ(|by@R8d5_OWZC%KoMd zcM&eT2Wg}&jjGXU`h3wArk zjneJ9(=x%U1#6;|Qo*W!whV5sj%0lCBxl(ats?t)N^+`3dXgS-W`T^*^|`!PITJWG z1oGi$vd}L*AkrqEg(fTbP)18(rPPr&wGrjF9Iw6W#2YjTi|?fN@zt%(@3cGPbmGfs zL8ZY<+#uaSDnD**{@K0WP)x6sLPd5lD$0CHB42tIH`{9wP7RFBPp>i2QuZE}y-d7Q zO+-+p6!dHyWS_*wDyenI*>8V3gJllQLH3IqGuODQ+?hQqI(s??*SUPpI#=lLFRXLD zeTBB}LRVMsvJ;}tuCDf#g|2AnP-oX*XHT@Ou&kp!>g-wGeuX=vWhVcwu=qVg-Amj1 z+Xj2vmagEgp0$Op&gGF0jrJC$cWrzBimu+v-Msdt{q2{#OD=cwI|qi`>78ra2io$h zhx&)y!l8kVA-A}_YiQ*VSG$)Exz@r^+w9Jsws{3YhFtehSKC16mF;bVQx88OiqkwH zB9qHjM}uT_S#Qr^`xS$(uzdMH7;|7~=|32AsHgpkzV>B-stdAf>`!Z&!Kf0wQz{b}kzg z^^yU1(SU0maI**8ApsZqW)xr!ay6ORM=Y#=y0gLZJ;TJ1JdTdw1vHYsW03_VR358(q=c?|OQRH6N{@;mLUW zAa&5&6ZQ7B_ft##ZfRlpUc=)yBwkl3HYpknxg_n0sH{Hh!JKz=(gWg#8tCX9>RKKx zZI2)lXr!NhQRs=fdP{MVsdcsYtQ_nRG5_Z=lFxB*>kJI`clNA|+6R^u`sjagKHT#5 zu1;!|=XVZ7P;XR-28h^|j8p7S%es2i7wAaI%kd*9yrsT8k^I>m-R-^>e48)p?d~h| z)4z-}S@jfX+m1GP#$ab(*E;nBq88ayGD>I9kaeZkxZcgnxu z3s?8Lw-$Pqb@aN^di#32dspAyHGynO&7Ph2T1-Q&U@=@xIBUtp+l3)+{ruPAgb zhnn0FR2iLeaK1n{ZI?Tz z3#N-ZuP{h?Tn|{(QCJJFa_4j{Z);V?!}+|So|W8x{jgB)A6iyWytX{a`Jj&8 z{=z`Ft}f{9S!vvlv30;0H&s#SYHKa@4|XWR-1gr7l{%TfZn+#icv0_4JD;G>*TJF`^W*w6~YRqsJ{QEG-NcbhdzO(e-o+3AhCmH994l5gmGH z)FbNcR&Dyg76q34Cr1`Q{fSpo&cTz0;J!1CZ5upgYR}=($-3{fLibR&n@vCLgs*kG zd`EA$ThF37EYu)VWwL1A6{YB#sPo&Tp%nXBER)r?PtE{zJUg^n(W zWsipSp$iZK+S|_VTwPc_)Jbnshe22w>~wR7R^j7habb0Tr(3kHt9P)oZDC<0o`nqN z*RBrRF|C8DqKv&Ok@z|amv^+ygVU^Z3)giP z=*w60DDK2C<9U7eg& zfL?U6h~dJYT?7FwEnHdXZ(Foxi1E)Yv(aDU?NATnKf(sRw)lh-qHc|KQSYcQgk=k9 zEp&CS>u={8l&8VBSi<74?&Ux~QYW6p&>N56+%quPc5#1i@8G4=_C^vSmDi1P+PcB^ zb0rG6a|R0hpWQ)yu5ND+ZCx5ihhTb8VlkIN1{r8yGt}O*tlhP$DU9vRZ%+;*blhT% zNVukkJ35!U6-d`TgUn}Cd!5|~=#rt^xnNS_Uc!<75%o(p}^%K!;F+G+i4FH zj~L;)xS$sAXfO1&EoIuZdY}#VJJ3OoVlW-(92g|`>yULcsdMIP=J$j$4#a*)8wJvuHm%Brj zyPnH?`2Ht80$p5C_L%!T>+^Us+bom75xb}uuDW$rw&Kb_3&Drg+l z=^Qv%ZyzV;s)gf`x|`G4>-wR~5O(GuJ;YyLJk-CM)49Fexy;$fNitpQx(j&Eq~|lR zcWFem07lE@y0D1U3OLJL>oT{1)R&FsPd1(FWJ)$|nLA`D)Yx67lj)H9TD@QMQ2_# zr)@$00(Z$<-6e}$s~cEYScs@awI6+ik$e7v#pmW1E;{3!a~HJD443wHVS(!^=r&zD zJAZC|@uKri%eS4DUo>Yn5R1HAZMyKh`SU}}qmP)nmxQLzJbZNIOHOyQ-F(IihJ@4N zle}Y4)Z>nqTzGQyrsImE(a_+EW_)4la^B)I@L8N0#}>u#H#vr}E8Fc(*E&N9p~9eP zO_cO!gtZ>c?3Q7G2h4QW%y0t?(;AeUXDExYZ-%?3rCjV&I;NM9H0X@};3u43ZW947 zA^J%Hfde5>oi#qCshUrTJxQUG_~v!pg)92{+n2@irj#PFUo}joyOt^+{MVDM4q9BN zF0{C1sLIw|+;XXkWAd^Om{A@u(+9ZeZW@aX5$cX~(~fYB(*8haD34%6Q+<~`JPsBE zA8cC=UnswV9;3f~`RGeik#tP*ni^!Cb5SxB1`l_g1AXe3GpF|VUOsR*V{_Mla+^?@ z%{(gd*Y-#Q-qSx@3Z6tVN+99wIoyTmz|?L|%X1MVF0OR-9xeiz+R;e~JCLVLdpK1- zN;+6z=$D5Syc8JJslean&_=53Je)S1MB##eM+;WeD4(9Y$)+Ivh8Yo4*!hzeC}Dyf$6 z!1$cU#ixw-Jj17_XuU&&ZN6~>p(+poryr$lDHbxxn&wl)R+6Fe`;?OtQXA5kj`lV8 zwH!`jgT0LWKB;6hGb2fhk$j;0;ldLGjoG z$&^&j#B3j&>JD}`OAyCuiuGjH)0#}NiZ;mh5z!huq*HjI&KVUuET##S-kc5g@5M@Wr{0z7{Bl>1#rlH8-78@ zzj?>K9sl(fRw~;K_yrx`^^Us{|4n7_-huz-5bv=N@16K>DU0_m{DO{iyyI@gFX(v0 zJ8lDhLC2%saqq_e9t$hw@4fg19r1Jj1;3!;DM;z}n_&-|~?{oNtWW3Mg-&PjyPW)d8@qXZ=ySwn;T^8@l z_yrw5@{aoo{(H*e-HZRevatK{3&(}~ z{DO}7-iPrEx*jgMf8|6--unoCA$jkk_=V)X-@z{=?|lsak3o`0`~<&nCpYVg+@IqA zb6LE<;1_hndHySYA^F_r@C(W3{tf^0Ao<+C;}>+?7M}eAenH2>A?!u`Q}n!NKMR~1`*HL5PqTQX zzAziVpyT9_-s$)S9do?n=HM4}%=L~t1OGV|R!XlGzc3{{SMFQz3zI{b+;i~@ZwO&> z7vUGCg?z|ej9-`;!sMQh|E=KY5GMCR{KByzOzy?_g=0dP+)MBa=X}~1Lhcg$LNdJq ze&Os8Pwq1O!isr-n$yV@WSCf z&vLu)3;FO2xxM&>WO{4x3$KQI<@VzjjttM0JA_|2B817k8o%%#;a<7J_=Q75Jh^Yj zFU$^kk$WwEAsO#R{K63-p4^-83(0uzz%P6)JVWly_}>W*3t@6^!7tns!sNaOzi>zh zllw9J!hYeoazBn=7#G6i&dGQz4)iUzkFWud?9*%UUtSjWPx!9}Zx8p%U5{VTG3Xt4 z1Aalrq26&f;lDYA9TLKB!7u2TW&auT@!uN4P7Pu2#{Y>B7KN}+;@?^p_8I)b4@14k z{Ve{^gCB)3xp(6K2G|+GAxqrhi=y)K6KaXG782oZy!2hDfEAh^k@C!O#3UOb? zzpE_XtN2Gkym(A;70!(XI$FKsa`>l*uqcGJ;6DfK4C%{l#oq>!U~ak6-v$2$OpQe&JIgOzw^N zg&&15xi{e#zWEMc2DuyXe--GM^*5i#`|#gi7Pjsn=4_ziW*=AX)%b;9H~28Q@4_z} z7vh=w2IdPO*}fmaFFY99Qtrp`3p(;Y_j%cff5gH{wBe@E|3F;ta;x!=1v=vT&*9&% zEZ#)?2bIN(@E=wdZz}%jW${|@zqu^larjRyi#H4ZoU(ZH@GmHf_ZIv@vK))>3(2ta z@C(VXx8fI)VHe^Tl67z~ejyolDSjat)`nk5hAqLr5+t9|fxovbULXE7;4FW;x&8Qu z!1*Ce?ppkpgXJMi?iKjgf#DD)_e%W#1o}dl+^g_k4Xz1ca)MGf_htOMLfG3w*em!29dUUNZA7jDIzAbA$r1Pk9r>U6dOZ@qpyR6{Y$kp|M__|) zA%5YP;hA!m;O`Cb7&7eW!@nLp8{)~m7XNh?uf)UdKA3S3=s3_jZU_F~f?tPw&1H%8 zG@zr!JFW%)WkBPfg5)m2U$A(kXDr1p=%{(p$6bbB&~YGxuiWMM1sw;3cNI;{6`~pUT4iivPK?Ff3&55U?@yLAi(F7m{Jk_=RNHtlOyr zkgS(s{2wU`Yr2DR1|;Ln#NS&MwhsSQWnow27j(Qb0jf9e)b(?!+(Xcrt{20l%Q*_aW>q{DO{+L1z9U{`)PgL|5O$|E;pH2k;9z z;y(8venH25;Thk?FX(tUw8x^YjOSp6|xsCM16RbC)C$bdaQTQ(c zNg3T$q}*XZ$J7vSD*ki8(}5qz-G=}BW$~WIzpE_Xa9zrM0qB@z|B;9B{}pTx>B-$V zG3D+DIwJc|Jp4Zc$^6agNx3$lBeMUL9shcejOTuva*aSoQ;64ue>V7Oc&^+Eu4&w< z3y?oPPUYkXa54C&I56RsU%#4b*2QrzO~f00`mAo_ICz#h?n3X*y4AZkeapM6UNm?3 zx@mdGv18mabFX{q1Ln3r_E>&{E^wq0xaHqcZg1fid_U#hZL#-^AA-$E^IznvsnHma z1@%A_tm()EfcWjEfJ4CH;7B0j)iK~ia59(;=7KYUYzK?Lg`fc1!Ah_ibb~cuJ-8OU z1H2Qw4`?d%G4M$svi~f&3&@sqKllcC5Ih9F4^$8TPvuwkrSd%AO4#1=y_xW2-oJ*w zJpYdq79XnTeZhWUf1o;_2;$=aPBf-Z0+Jup9yW!+O&&4bzcBZ<+$A_+qf6o9jfvzh*E$(>aty=n4y^HwCpT}a@#;<^taDGF| z+N|#w?{bSBzuG6|=$a$t;n${pyg}k;;>frQL;UjaTS#3rF--hqzdk#}FAuLGZk$Ku zPkEKAmagHaGKlim^HaP=W(&F=g;_%*wQ>Sn?qatDesK+V_;M>99dX)m`Mv)P(pX{F zugv-U9(?TR^7Rs~`X~?2QVx}`h4|{X#UqrjJp7)J{&B?Deev|1;sL|MKt?L;6dIpQNvrkbZgiFGBo2;wRhd=@7p>ygkIfqAWibh4|&+FNFBR zW%>DIh+iI_3-LD)KiQuuLj3aZc&-xD&0U$4yBGX`vHVE-=FzzOL3?*jNV%_pDV!I$ z{+4XYU61=QFlij)er1XJ#5;}$CxDoDXbg#ulQ_|Matb&V%mR53AG&@Tm<>(`bHEuO zK6HI9(Eam(#-o^5DjoIA2o%67AQ?jAl5l^-+yl2CD0~1{pqQp-7iUno#v?)N8@A)h zCLDpM!B4~a&v1qNVb(iwe+hK&uW*GkBIaede+LSG5qEbuAHfwg=BVRN2mHC2{Cjga zkJlz}>&RJzT^_=(z!h#JJYIvKIO_=$%GVrdoq?cr25-l`9`I-D3U1)!yPV&Mt2G5m z=N-5=gU1MeC+;mF{9U+$#{OGzg-1BwfGa%8`MYu71C-8taRsFluL<}b=kFs-C~*FM zTtQ>~2XF^aGtd3-$t0AHrt5%X~3V^?QiGghn(Mm zE1SLYxfxeb_-Alsqvy};@}CRgpU2$>6#q`#FMuBq=Pum4L-?0*g&z_A6v{~cUG>BKg1g(q#{ zu??JPX7~O}xc9HPg2JEUpP+PN8@Iy$&OhNc!e77@WY2yPSGH^%vtLJ?Uqg5ku58x| zk8RenSI4$$qKur(bwT#&*fuS@bkZgr+oBbJHgN^npJUsz_(g1U7N3Z1&3bOq#+`{fY+62e36zM`i$+QI*;w6I#1d~llIWq4k{aHZ2uI^B<-HV+%J^dIg|EH*);{(GiBct zl;@;vGilRI+A@= zxN}1IJY3lj75)}nLFp{Q6?C4o70Ny+DEvZPLHAyaE9m@ETtVlt4N&Jx2wMqsZwGE~ zh|`C=2AoBB(#Dsx?Onn3bzqn{Nn74kgk24;A$%D3MxgX>!o3yfxf^g}TipjamyJ&0 z8*%RdioXf>^C5g2?wuk0E?l98IA6iN2k73|<|cca>}-PUM6r$S#xmR5!(5k5O}s0% zr78Xn!X5|8!_&Aw07^f$naN(Zld#xUCi|E$hWNk1m2FJ%W1H9p&SP7cc-nJZ7nJ`O zabE(%gvU0ow{tE#m!SL{+URXu*Fuv=a4t0dY-Hp}T;ao<&%_nzVxw$WO9<-)Il}vJ zW1H2roL>h>ek8V09Y~x}wkZ@%vPp@zByCZ#4Jt)=Y_@TfNOq&xW+Z!2Y%5Y8VjGdtiETr&3&l1e#ffb}iWA#_6rQyG zByB!PTaRo!X92}O8&^1a?a0V{T-kmU9@~6$9@~0!9@}_y9@}=5huEegdroZ2QJmO@ zqd2kcMs}OnW}`SsTTN`EQFv^dksT$r$;ciP+hSygiES{lzr?l|#gA<+vbV&x7TH;1 z8;k5Kv2A4|V|Hv)(Q{*4iq2&}5oAAk5Lb}hB(|BzUh)uOj{ueNdECE;^P~+WX*-E+ zCK2h6vXx-tkd0(2;Yr&_Y!gv<(iRfiKxPr1w0*=jk4WJYOSX>K#-Z?0wvFFgF$dPSO=~G*8~jXz5~1yd<1*~d=h*bYzAAw*TA#jFCac`ASYu9~#*_7`q<9VvI*s?xnmV+Z~}f@i_=VEd%!CcQFg zL}mgzhHI{^xxVI>npM{4qJ@szEI{790oWfL722ewY4zx^l+!85?KZFH-=_pEdyJ0_m{V z2e_#tQ{91+4t&FbQx2RpVUf%m;gD|qHf$wWOR6f327glZld7>}#*X=U_0OvtCN@mW zMq{HRqG{2RXnC|E>Wwf;zBsabWcN!WyGHIKec`cymN$_{u<>5%9}GO6a#ww%=+S4SlI=5QIY_FcdKwu#SA+=xzd2Z)cB%9L9MdIP*I;I{bke<|N_o{Kkg^1q+| zkjg&v6>uz=3*44tx6InIU`y+k3%6XdrESZ`@z0Ncar~>}`?lV{b=%mKdlWD+d9jp- zvU*@9>TMaD-Bx#x`x;iOr}(|V@1FF}(!WaoHvNb6NO~|cn7M}E_55z+S4rE|@teeN z3cp$W)Q)Py7&rN=Q@(Yt+=Eny1QG>O4Uo;q96Aef2iZ({~N3QXZ#={y9Z#=4TM&r?q3mVU9 zY-@Z+<2xHSG#+~Jtb@-wc*(&dhq=QCj$C`>`;Pp;kt4j0eD1Vor|+81gsr8DUv*2= z($ezgmXlgeZTU#cM_V@X+tu<)%Z3@Z&DcKU7c-)nZ8O);{NBtFeis})a`g5EBMaOe zjd$F1$IW-NY&w3^w9V5te~;hATQA~7h8?(W6ASM2WCy&CEq z*_|4RMot`=HFEJt`$!+ZkB{6ovUB9uBflB>)5uFBuZ~z7q_7KsG2m$MW^gLl06q$K zfLFjX(Bv-g^WK#E1&HaJ2<~~{9#6SL!Qo&9SOrA)*MjT7EnqYF9QX~``EM!r9B|)H zx%0sia0PJx4$lPVgO7rbgWJIW3ivec?O-$b4EP+l2YfByKHP7C2SMc%lnc~?day6p zA4~>s0LOz9!6{%CI31i3a29SWI2U~S$&}j!9(^k1z6(acnm?r67eVTeyxR!evy5+` z1-ut*1fK(Ug8RWY!1uu8;CUcr=4ZjL!3ap9%uItEI1$VO^FS+j3s?f$ffSujq14<7 z{s5i@uK+1G2f#3R8teqW2M%Rt7UaNO&;O*!w=3n|1O5d(0-gmgfpWh1^UR+!!$&=L z)JsQ=90i~KnTt~HLvQ5%gHx^t^npPz3}zofcs1<|ssn0qj{$E6$AOaqPQ?{m#dQ@o zR!pxPo_y2fBM&o$%Ga12=BoxU3vysQmx7pL4k<$)>Rp7Q-EPfq#Kl%Gub^OV0%`P-DgPuV@?(8fa>+qbrFjq@UR z7QFdgU}J#$H{>G_A8)I@w)Xnkf3Cf?_T9Dbt=&|+rFL8G7i#aS{Zj2cwfELOSo=`z zzt%ol`&jJ`emiS_QTyxKztp}|J5n3pzvLkJ4p<2~z$)-AumQXqycc{BYy_J@sy0(w zSzB8>wl-HgzIJNu5w$I~Gi#5oJ*M{9+T&`EuRV$1DYd87&aItSdv@)D+PCn#u=bML z%lNIRU0XXm?L*T(GVQr(Zu%3`D_bgC#_(%wIlSe#mJ?gto9yszyU%>`sclbv`>98t+V#}Nk*|+@bL63sM@GIk z@^2$gjr?%r$A0Wo2I}d5pamQcq6u$G$d-sY@SMMbzk%n$E-(V(u48u{x9g-`r|_G#E5B>@uG4p&vFn^&`Ih&$e5~a*sU}=%8)Mg<&@DI@m=R3l zLO5oT_Ico~piR%o^k%NFxS`_gs`IM)_`RiOam}^^w;#A;^1n^~_sKga|9tXqCjXJ& z-}t>a`K8I*4|?gKT?dUER2S7pJECW!BU+kUKGgEz7Pous?r3-O?pL8RVJEVHa3Xy} zSowI$b%K8aZwJ?dcY-b8v%sYqc$c-oEn!a453T^~!7OkdxB#?);mwb2-m&=yn}53b zXPZ@|;T&Tph~?|yx?AerRkxw;J$0Mvw$$BK_ocdf>h7(3pzhms57#|X_gLK%bx+p) zl;6&}U)24&?$339srzePTsLw*(eiT3t1Tlf&(0W`v18`$neOPf9=&3ucVx}T$9$Vo zzcQpQzmqZnx4C)qbbiG&YT-Yi5>$aYFdpm&CV&ILG2l3G0yq`S1Fhf!@Jnb_km34? zTPW{@pG?>};kOf>o$%6xod=8@;KrrgK43gJ1#APq1J8n&K)gO^i`#Jwm?@a#^8Tk+CKk@5BPF|bfS9|iR=QA&4-d1rlW1^o6URb%L@}@wO zf8}ecXI1ssJpB7rPw@Ly)vv4mRQ2bom#bc3Rz6ad8iwz@@db}p)EB&>DugL@NtGzGqzGvWjFV$9N>-gw$E;~LuHG4$1 zB|9^FboQ9+@!6C4U6Q>t+mv+v4o$i650zU+sxAI^T1-^a7JWj~qy zRCZH#OZIcwd$K$DRo2zjHPq$m#??)&JEHFRx|8``P z>r)La4Ko{#ZkW|@M#I}0-rjH>zZ)8EZn&l4lMSEdcSpmvhA%YS)o>rb?F|n!e4F2o z8-CWn)O75uv2(|sIks!;O=E8v>vBirPUY8{J1=)p?$TU=Uwdvvt|!-*8_K;c_bz^W zj{E;-a>(%bTgS(9$JD-E`}gj@e*d@a|Ka^N?*F^}|FZv!`@0Es6B;J$Ga;HVW5TQn zZ<(-ULf3?z34IfWC)_pRDSkiZ7frlk;_$@lCf+!4)5I1G*3BJ7DmD z;RCiFu}NzM$0jb#wWGJfl0ZeM@<$8O(v`>$@FaL1H8 znwZ_b^N#!P_~spt-0}P!sZE(pm7B(Fs@+t#scF-!O#_?WzUi(_f8F%Lrd|AA-L!jC z?dC%^w`@LRv&QP{Hh;n9!#g+sa`Ue@|9SJvn|E(^Tc)CiIBm=6Th89HWXswuSMs}N z%jdV;$?r>B?%s0GmixAB=l4y1Kicw>EjzdTZp&Y{{B6tLWZIUk7j3;{YvaHX9I*#92rmNgl)KxZ)oG_9fxoBj?UgZqtZ;tsZI223; zd{@Dp1WpF0f(4)zTndK4%W@f;KMFPi(JNH>;y^q$<;8{f(RTB zhQXJ?*TMb3F~$lxu(xq}3E@|PVenC~9Xtg775oy!`p;M{S%OR=d1V;98)zZkJP_;G zW8J#sz&C>72IOF{4crObSo96xuz(iaIY7Fw2S7Y;%OK;t8C(lK4|agxfXqI$J2*W+ zIyRvd^nfHei-m$Tp=k}9Elt;3~m571>B7L&)`<@F>o8$1SEg_6#Q4` zTPGtE3i13(^Qr=H`=T!dbHQ1l4HUpI_&j(3JO~~N7~T&(3V0g)2)O;(6$+dlFpRqe zd?mn5@b(7HvyTKDf!^mB0sq;&eLC~=Qv>q2!bWfh*aCKdX99kKE4TwavbZzB2?4FR z!X@Ax;9~*XabE;(a!Ic}jq_On{kXRR$6TrkRD&9j1ufu=fW6Jj-oZ7^#e|)2px=OP zQ#@ilN>Ojp;`IncJ4^>M>nidAE(5V%->s&;!7MNvECZK=tHG^cR*!!V#`V&kpf%vV zxVM3w;2H3X0N0mtFWgA^Zu0W4`)A4sBy%U{HL?8NcPDeEFL3?d*VmK$e_3z1{Yi9T z;D_MH;AQX%*!L-PM&M20Sg;VZf*#NZZu-C4I}i9MiuUp6N`R1r#6}C4L|L>U^8Y?Fvv+%YmjFuC z|NHa4pC_B0ot>GToqgunX^WhSEJm7;`;i9`X@^XC$CXlq?2PP+j5D(8UE2MSl#lU? zNdKJ7KaxH=k$?NabU)Z1Z7)b}N5|~vM>{>$X?3SpwH?h%qy2p?{#|XkdQVH~*}G>^ z&%=98>KW)cwdeGnGkdPUhHT#2Zu=GH_sbs$V$+r7pORlzbbQe^i%z!LOU;Y4jZ(R^ z3OgkQd&IISQhMy%V@QuZmHn#TVX=Sny7lW;+-*p=QW({3ybSS+{aUKWqdxwaJ+k_m z=4+z5C>mBuN;z{`)rkPexU1)ISZuoZNHZjb{#As6zY7xad{ z;Ddg!9c&K+U=ZvGJG%zEN?<4qgHqTHM!^`^!?mYtZ`cRM!@jUT8~_KwA#fNR0e&cl z3JAbtsD`Of3qhC$VQ7Hqa10y^vs{g?xo{$!0;jvqaGe9^!^N&kU01+W@L#wV76XQ= z>v~uQx5Djkr|T})y{?t6hv6}J5}tQ$i0huC=7R(x_5)!VNci__JQ&4bKU2=Z*))dOrvEsOiRaf&wpr}oe$S}p7yNv zJnMPU^Rnkvcpct^x8WUl4?cj8;1l=^zJRYh-+I1>pWs)|Z=PbW-&+n95P->04O5{O zf-nuj&;Zk6Cj1AEgX3W~oCasWS#UO-3+Kaya4}p8m&28C75o>jg+;IgmcsRLBisbH zz-@2`+zEHXy|5DQhX>(dcoZIoC*f)DGu~(6d3X_ChF9TrcoW`+ci=tv06v0G;4}CF zzJjmeTlgM+grDJ8_#K)-9$pW~uRWweI%Gl?YzAAvR?r2yK@RkUT*!xB&>Q-K5BkA& zusx{uDf$+Er*A>}mgSv1g}xlEKQHO&+S;|X2U4Luq(M4lLKbWWTfkP(1yG4w=Ri-$ zg?#7*y`eAopdV}p+rt1D1UtgcunP=H?P3}jg-Q~yJj`t4==*Y@G86xZ^GN~4!lR((dY0beCudG8nJN1!jUi;vOrU)UcGfP>%=I1G*eKa@iS z1Yj~$!&In+AWT~{W6?2iEF2HBVJ@5qC&TG*CY%lDE;@hFg>W%k3YWu`a25O)t_Arm zfg9mAxC8ElyWw6~2@k-d@HqSczkt|^WlN{fp2dm{rtM1FpTsPSjxD6kN7hxa+Ba62 z_vkY7zSsQ)`d5*0h&5hgGXKv(E=6ufS{j$}497l2JTK~T7(26-aTZgNlaMQsyO8IR zFOl}VD{X@eK@LQM$f<~o%do#!y~Do!y()+IB>Q{S0qon~tIlHI{$BMM`|@rjVvTiN z=1%NWk+Da|z#)vBlW|7UCt~$^*!w%C(W5bjws&b)|Baq-X_p?^b!1m*XP!Y@auNN) zGXCIF+Kuh@{;@^J6-k?{Zf{LB?X5%T1H6KE)^){OE!b+oPqe3IEX<${_4tLC(T*x@ zsP%9P{IuxjMKV64t@ry(w~Ynzr74k~d60 z$u~*+NmjMcKk`TWMXY}LR>xn^*YS7uk4!NABmdX?L;mgmh#znEPwD^ox8FmLAN`~K z9?t%b6#6mj{Tyx9pFw-RwC}gruKy+WGxC3ZKFc3%#~0gIeh;}Gay<%9z&CJ8?pe7P z;+?C987@yU?Lm{2g9LoI2;LO zFbOK53Z}qOPy=;P4Jzy^w3*(^PQ|}4E(IDf8 zcOQrHbf!5G*B_JXl64ko~U zFcA)fgW*s(9FBxCm;{wj1ykTCsDV1DhY&>IXqW-Vz_Bn38etBc04KpIa4MV*XTp3q z2hM{lU;$hW*T6zp3{7wy+yKkqX1En@hvje=+yg6M72FRG!o%<=JPuF7)36$zgBRc> zcm-aAH{dN;1MkB7@F9E*pTg(xC9H*S;5+yMeu7`%H&_Q9aNu0Jfa&>}`PuoK=eIN-usDBE{xCHj za5Q59=c=)QWkqF0lZq;fCPOt$h1#O}q7XE|tfI!EIdB4;1gF5Ma5|g`^Whvg4=#X< z;1akDu7Cw_HCzJ=VKFqpb#MbLgPY-2xE+?mU2qSqfK_lmJO~fNqwqL92~Wdncn)5G zm*5q64c>saU=6$rpTQUK4U{t4a5oqQV_*;13&z4Ym;n31L^u!*hC|_SI1!GJ zOo5}I1{z@woB$`mDR3&B4rjuAI0w#y3*aKS1h!$s;|J?4D11W!B`jv6JS4>2nWK!a3~xOM?x7)f=Z}@DR30jKpo74|G;sJW-V%j zIdB4;1gF6nFdxo=3*aKS1TKRsU;$hW*T6zp3{8t}ShQ@>t&47ld}Jtc08)?4Lu8(zj2C(r zk#RyjXp5HdK?fo-E@(a?*SHtZ06kW-OjJ(g}lT(`?uS@-|nM!AGiCo-52dPYrh#&Z}w`xu>G?3 zpSAzC{qNAZL$?k&9eQ=>-Jx#>Ux)2F9Ma)1GZ){JR+2U}ZCKj)w0+YKN}I%dx!KJ7 zz6VxB=XgJ*=6Ju${O)xi^MsPk1*-2jtz!eoyrp|!g>8<~yUb7eg}F(tbWi$F7?wUN zefRV|()Ui^C;fo*gVHCZSESE@73uebD|4UBgJ4o- zMds|xxtWi{Gnp@Bz67sizMc6^=C_#xIt}QwL#L8XyLTGb=_a_f(>|fF#C|~BeTo0CuLV=PtLB*o|YZS zo|An>_E~UA_T||}Y~kNx^DbL<8OS`N{g}rd>Jsj9YnSJlMe=5s^sbp*PwaY9*Ck+$ z@#(^x_XD~g*}bg$vE7dcnfsHH3yRGN z`q4HgNah25RrGby@!K80oy?gmWZsm_4GJy{EsQKYZs8TopxJ&c?#1nZNf3*B zE$%B-z~aWmvlrjG_@TvX7Jtnw%I3v=m{+s=lKq!7E}6UJmL;o}d;mV?j*o<~%pdmwvwVi>1;Ltmh^%caxuUe$vs5 zVLk@owJc%YP8M$Vc{(yz=L6(JWLw7a%Dk?notS5gjLBkb9+JYCTsJZp8H33B0&|dM z$X!SZV{P3?Uqt4`k4DBIVIz%f=ODKrtB}W#Cy}R-XOJSs+U|_V+#iv*5t;v!!q{1v z_b79%WG>?{B-wmMYaZhyjwhSPxZ{?j2RYKnGPVyOH*V!hxeZy4tU}&E-b9K!qhpb! z$Q_8ajz46Jj$>oD9(jqJvDB=RisGV&U-26^8Iufr)FkwM5V$Pi>0vb&Le z*&c?Rid>9r&)8UN9f;xle-5$;S%Tb-3}Ec*PDqK7p=@_Wh9jeq-Hn{e);W*yLfpP( z%oP)?*E)i=U;^p@+ZXF3ctLMYjvJD|E&5PUZ zIqoBp@*43VLy#Jz!N>`0MJ`3IM6N<^L^gQdENw_TA)hlQ^ebdFV?tj-Mp0+%g;?|Q zWIVZyC6{r-KOxaybl#Dr2gft-s3Nn9IY*`F|05ylmw#57=jSuFKOpto6B*C!yid_{ z$aCj7=a%lD+`9?LA&B+!q`0jsDt z9#QiSpHTA--P9SwGD}T;F;>+VbEqqx$$U0iM~Ln}9{Ob$WeGEioUfue1^QrNh1G)|BHn>~2?%mPv2XsFO%Aulr zW%t_dq3&LE{EjdLcFh@)GZKA2N9p>jau((+%4vd+;M1JXbH0Rcl>YD6V}~9)sWe> zPJO&9dRD})k3}boK7Jl~19=;<^s(q*k%N$fkqeE8-W3^UMD(r5O-62ID`M$YnHPQ@ za)FWWjb63%r=>4NPgWt8o;(Fxbw&3T-QUbxllN}k`+4=)7GmRHf~=w5Uqzk2cInTt zGGxZNKVtOS0&HHH4|EUm4DuY(j5H!wB3Apo=yFSs%RGhXPua^Sx3RtK#$FbSd1%^* zv^~=HftcMqTiMO`VK+YoF?-pq?Bx-Tx$`l5d5*G|?^E{jqs-`ks$3`4 z5!lT8D0}(9sJ*-@eHE7SQ`pI3DT}RKn#5L~t!(9$87r}sAI^A^xz^fFb}2h~1a|U1 zpzY(?%07P1*vA`c7f;KYmDQNUF5WJ?ID26B&e^+Umt^mXjeM}Nk+p3+C;P)K=c6Ti8vDC{(}1RdO*=;I@UczDLNYu2^QJF&8h>c|VCe@-KZY-o*y8d`wY5D?7fXtW z-5#IK{w>Juh&UHlwQ$Gwxc0)!XVw>CT^IOp zFR~J`+wX&@H%gFQjSOdN+4Ex4UvFd?+uIPa@hv+)mpWz}q&LzRk@fqwr=Hmv*#!~% zU)Hx7iipiGBDTKB3y5X!zs~<+^NZN+{lU~N$?W|DaX$pH?EVY*|03jKq?kI!vj2DG ze|ZP6zXvq2zYMt=`wOdy`r@pCc8OLf6YrbF^?@HB39TG&WUn9@M`sSG~ z!B0x|mDy{Uw_VnBe3NyqY~_13H}eIiJK>&`6)9!ys@tt?_j9{d?H_Ajl@?^Jz^~H2 zOZz#ks$-C~Yrg7Ol^#qFrB6@)F#VhKs*GU9jEq@o9gu@FkK~o+^E&vUhCG_eO~wZ-7oGwA?J{sWjS}| zlro=cLXSgw)aS{%a5M87^XBDE$e)vcBCEiaZF}jqpKjZ{?a_VD?fZMt??p7Cne}T* ziq9EvK5M)A2JSI%{J?!#N@b3%^=eym-gOs~5kzIAuwiS_5V6 zlAo8zI(lO7WH)zH?V{E?ZKW>OP(e;bPD9?I{ZH0s%cHHM7c#<#teGV8Eb=07eacpNHmm^mq^4|9qVy)+5 zt@E-Zo$!%gXuEAjto3rrXj_r>IeTVd(<1vK6Oo4ySu4<5$8jxs{AWbg861wRMqWkU zLB2z*^-(6E6AwXFBd;P>IbKCseGDl^hYdm2pu4_7taZ7p^(3wL%LP35vYyc@=Fh%> zq-?9#AG6B&1lj}+K@LO8kw)y(3&ci!POocM_B{6ovDS+#eTVt>$XedPe@4V!eFm}C zO_lZJmLk_9HzGG7*7|bWeSzLZ)H-utv*rw9t)JG&J(GFz*8N+6j@9zA649QS;w=b z`@b>weHU}zKXKgmL#QL1_uYA)Te^?d{fpiYv!;mXjkmhJ-7SSOco=d7QjV-*U7X+B zH@CO0S9DI9(jBMd+jWKL1Mwrzc?46`oyJie405xE5U~dQfy=Jvy)uy(juk z)*O0`bjIs3S<5m}kF}(`L}yuX%hu9Q4Wzj>{q%S1pAFGF@+~Y&zmyog@u|@phf-!_ z{aH&ttWOVE&-=zc=Q4L*Bsvcs9U|kxiV>OjZRr&mCpI2w?0i}02|bRNrP0;T>MvWB zfxbg5{c9ahp-zzcLe>S@9%(dsT-F7VcJl?u1Bk2#BK1cKI=L@WhQH&H6vCACFsz^S z&9tdM#_O?D4Eb`)hXD7E`w1!;4$ib*}4t zDfiA=oA!k+FLimP%WGXyx_0b3s@v#pvTpH$?hCsw?tXpuLvza18p}g^4DT_Ed%dd1 zBdlQie2?tBzInxYBlAY*?VGoM-jR97v1aWVtXbQkS9&jBufkqa`Zn}^sPDslKkWNe z-?_eve6RRk_x0RZ$;}^Z?k^8`t4Hs_wQfae>^MJ?#BQ$4{OzKR=j0#F6-46 z7w-yst=eCU*D;?vb79uPFwBB)SRXpFc>3a}7eBN3r^UZ4{tf!GZkiwNV@)?%&rQ|? zeyi#2rnOC9H+{DBGsn8MOXxjaov2{eObyh8VUe@*&*%@1B2r|mZh1fjOHYe)>i^y7~A}7Cw zU5X^E2hVw<5vTp+9A{g`Jrlb$x?YM=5b_<%;d!Nb6JgN09oFr$Zu~ka6PEtij*lFi zYMyO*uG;bDlAX6zUYGt|`gezt{zLkgLXYAes!U6H9!CES)E;uvc~8>b_H<7J@Jh!q5N@!$

$+|y7z`ya6ox@5>;|J?4>%MKha(}aIjwnDkY}$9 zDnN8A3rghmgc9)d_sCGU{v9C`dO|*wq8rpb;`x*J3hQ|k9nDK+UQS+*JSOqx?E*5O z(hF&j0X=}_*X0whDO&&cr3R2Xpos6j%J)xoJ#cu+;ZfZ$dOuO;&-=^teM`Dt^n9X@ zPwm=%BlLXdye@fpO4kq08glp6kZ0o1oSz zKWLqQUAaT&w@2%X-p^?6WYhgan};@+L#60^`>&1l|G#?w&*}XFR;)V=#5OCz3K|NR zV(GBpq6}HZRvC?*avXNb8Q3WudUXUJOzAtN?@>?#bx;o>h`>YeA$$yxwFTa%aFU@b;lx_J=c0bkw9F}q_r*OafA@_6Yn+l(~zksj!u0jK=Tc5x;6@Gv@-Z@_RE=ap}9r!{^db>{T zviKIuTkSr9-`lNg_e1+1+OOkX_M5bC(|$?&HSKHGj;>FyPyaUkm-OQ^j?WmMIX-hH z$hQ|3a8vvz=;U@)x6dMN9WtS7Uc&Uz+GzR_?6ZGrPygS@tLZRgj! zyxv9DDSC^wam#a#%8BqTl6!OS&0U#$U+z=6&#*rB+qqw`M)vQlk3A}HRNj7YOy041 zr{-%8eNBVxycWvJ}zKeY?``+;V>RV^~-opn) zpB7E%zi)s0TGsOY2QS}$*t~cPzW$I`yc6Gl7-qizFn_?=11>P%evmb&3kU8#Z~~O^ z6^K&@E*rRNp#BEL{A(V*CS_6QMR|M+VjkauC|KNY@lJdXVtUieChJ=eB<0te`7X|H zd<$pNx=HIQ*Hx{n23eRT9eOlx-8>9R;cz$-tZzd6#y26X??22$ug*hGM`V3*>w6Fj z_+P~OF2rC{w*v=yIZ6A>ru_3=khGOdtH#VQxeX^dt2mvH~H=^=c{oRNfwsVma z5a)LzQl7(hMP!Y0krdk44n*#INz3bOMSeB1)yu3GiR@@(JX?{Vk$G%ImKb?}tw_o% z*uk%2?_$H2Ahk#YIU1RZoQRx+$UC=4Gi`bz*L|*K8C#M5wA+b9zq_+`fdM*sYr{&TwM8_^r`{109{ zc=2z)|Mt7}46JE>r}+cEfntUCCS`ODV%J+I9)ya3onCkJzN7bj@Kf*R-oZY>KC7a7SH7=f z*S*b!#r;P0+r8h|exm;@y=Un=OV7RA{BH9n&7Vi_y)*I31Tmt8}5SG9Vuncsa zdS+^}y7U)nQC)xjL=75WhyKy}(^+?>zUMOb?EKA zm-k-P`~KcP_D<ZUhH>NJt_2)GIzLlWY zx!*FnzP*h1x7M#0|9R1|{ko5O0qw($YEARERX=)TXU2~pR{!Um4g79_eGY|u!$9Uy ze27kx{thX>Ry*Mu+WbW9^Wd9LW&aGbu5ZG(58~%x=n`G$mxITWR| zovZvX-yxW0=1Htj?+LmMU9VkV&uoO_w^M%k(@1;iBZ$@i+emw`tZQzy52sw)PxY%= z{$;&f>5sMQf0p z`Ja$h+2*k?FN=+Ad$HY~HQB{|!C2-vv9*4$vhJ%r-enw@ujELdy|}M3`G?J>Lq*uz<4&Xajb;=a}q&T`|PvX6WTEcTP}2KI1DIWFn_AyvDVu`l^z{cdvj zuQ$j0WoZAWux}4>ahL0~e%&**zw;daKgJ%E^B3SQ-xprPRz6a>*5pIMxcG1i zOg>xT7IR$AAM9{1HRt7{H{|#PM>yk5I?Hg++oM@!dYw5Z>3|3aTnbp>8WQw&pOC< zy16fFagTnC3->FG|E>q<`106Fdfs(}zX$FT?)8rAnt;1}b+o(5pVjQ=vtM?gzHimW zy^($U^Vq@hT(9D|+?UUd|F!JPb7B2HH0dusNY-Z}e717mZZ`LQ0sHpw$2rouhU1c- z@0sU)E&Fmk*6&wGKBpY4(|;nq#9to!v5&}7K27+eaF=i=82>*x@~z$+pJMbtBl}z8 ze>Gc4zkGATo{sE8ba-Ws=P1v(-)ElRV)pI&>NQ6=rN;k*CY=-5m*?I3ooL+W<1Wwt zGUI z-_3sg8GLVr?P8OD{#C!l^L04O%yVS9FEj2_tm|T5^2Pe4nf!kPcZtvXz3xcQT66qS z>pof@&(`1}cHEaWxXX1caD>l!sE>M{@pzD|CK7>$qQK z3v_x;bcA1zyM(u&(TDTdmvUKr72oA#dxpukwd~vV!(?+G@uGiM>+nA_>C86!*5URh z-aOo;9F($^=eV=U=X&;gvcHyXA=?GylH`;1E4@aCf3rjXOu${z6*A?yo_+gy&zg86 z4)^Yk{AuL4e$5G4W1dU&_Dr8*i?w9{0^~yw&*s+JwJ~{XHztZ1*!}`_#^nKdX$p^y%2|<7(XHI^;W9cK01$_mj;1&BooHZ>!|Q5vvxoPjBQ*KEEgXE&SxTJ#9{J za@_t-mh5<`)6uct=-9XK{?pSZX>GJ8*ztb}_wwN9)%}~rwah?jbhc=eQ$u!76bwcD?K0K> zlPCKtf(><%_UYDcxS_T~mbFtK3RX5$MAD*1Ys+VJjO|p{rKy|`29iIIz6H4$P`i--b^P!?S@ET<>sAKnv_FDpt@!Yn}@2J zU@)|$%|m@nLwGBjhuZ48hH&T2V5ba{=xA~J(8rf=6-(i?RN2O|FJy84Dl{|=V>_Fg$!yp^@D&J=%@ z<&p9MISI~zx=3hdeUS7I>=JiyGUtQ>VeaOj9KXLR7^(>Pt843}fQvs<#=<+On;HrF z>l&o+?Wi-fJRI0*2Y+?gPg<%gciL={c9?KIf#6sQwlyVy_E zP9^Hf%!v)58WY|Sr?d7wRNeEV!ofO!LtQPma7ua2t}0iRn{gSYVpc9zl7}m&5RY$l zZNMK0g@U2dF4l83IaFRhh0H0hm{LBeCNQF_WQaPwCNM+XD@|_fmgGn{5~{A7JTexg zbg&I(-82h zbNA{IjbS=UAQ18gYO5puNx@)EpuBGHFm>gyV}P10D) zL-Mc(Y67*C%<aMX;uZi>?mV?VFd>gFFi+`2AY;Q6u?KUbBC%q+#fc zwSt)#%XgCpYIA^kGE~-7Hq_Q1sBVMKcG*8j-G*AUd9Z?qxH@vMDwk6N<@NshNXXCg zcSyVoDL=GRZSd%TzbX_6)KrHf;lota>YPc0Gd1iFN6N!f4p(JHIjent1$Q!ZgnGd3 zo_W3_!BF5xrGQLQ)Tt5yk9f^gG+-@-p-kx)aV=L0iZU+go}{9;9*YW|hk&HJB8fAJ zuiUIQ5SkoN)tO%>H=yz;8e5gRV5+fT^)YB0GG%g{OMOFl$`qY%#!r2)zFIvBv6I4q zP~@mMPb6uostRiBD^;j9S@K_HpsF18CK|FfT^y)^Wv5PEqV`b}R0+`K*^io=R8_BB zCRNFvcAusmbGwVbE~su&Nb5A^m*Y~=ge@mk>!=VT-f7g!4IQSHS62Fib%CSVudJNj zVOqFhlI+j$PNP<^XKO%5Et znV)AoG9@gD)up!4l_73Yo5_*cu57>DGp)ar-#M;Kzuu*mrnt?G?pI+X1wtpdwz6)y zc@VVwiLT8Z?u2`iYYT_t)WFQ}$q7N|s7`Th>G0i9$D=&Y)!vVFL*Y5q)j635f8A-Z zc+?}O!#myKsuOyKw(?Z1SY1_hrc0H1*^O4?M0%D>6=R&kaqjb7s?dsemAJEA>d99A zl*2hL^*o{GY65lVx>RXmrw%*dJiS*PuB@J19Xa3H3)h!d1TJu;iL$8PWEsA{|iVTAhjoG}B)n3RG3kpdd#ord*nMINVSbJ$#u< zJvY|b+5)@WWQD4u!cp_*ig@QhU1fMW*5s8gWsX=uU?UK*)ftGXJh|sX}gr9Ii)EUmKqcG+rp8(zh@^Xr3m0-XfQJ zgsnh`xpr~9S8?)-fw&|-Y~`+u#3q;0ruHyQ?k`PpL}&bUNdi;1{(4tO6|&m7A^s9; zg3|+`8{^#?>gxlcWjcc*GwTBs(6G@(H#uB&0&mtgFzVOT?YHPM5Or3S@2xs_qK*{o z+Z=)F$NqMmsHn%fhMJl?^uwUtcR#ar7x&tfYol%|YCag+uR`=mP zeaX6JmkYn&?l6Vw_yL_!+6NUX+Bfi^_GA<&it-_&C{QcxJ*@AHRqupm%47A2);u~K zJU!}C6;E8arWiby?2s-CkLzrTKTF;DCz73{Q~0FL!T6IX8Be)XO>5FA7yGpR>cxtB zMwcgjDjIQeWXfuNmbrMT5T4a%Y3onyf#;-N5`C!86Q}20-L#WAZ&E|WR8h`y>;+e6 zeN5D{{lJU%w5ZKrMOrK;Kn;!^+V|b(J6L zN=wwhM{&g>j5YVME?coY@hb}wO;GuX)-L9dSbb91KecOQKjny)2J!uwehPJ2js^0$ zu02(HzR(YkPMcIz{)+PYFLjpd+xC^Ky<|E0x7O4J6dTLu*Y?N2q)LhviTcJ~Or=@o zTNf6V1Z9eY`%gOYG{#JAsQ+0f zxt3g~^pLFT$S*oD%SrqD)it>eom)~83Jfd=M=DE7{QdzYCA>RVSMZ)7uc1SEgAEns z#SY58pW=X_s*Vk_A2fF?7}AHBqlwiJj`Z>U=Bf;Y$>TuVM)SLCjeBxUaFQgjE?h%t zD3J8(R1_BFHBg8r*99u`YJzo>?Fm1cH$qY!yUb0P2ljYk8Pq0Gk>OUB+hg5?7gOk4 z=PIBlJeWk!R*WEMDIU)h^ZvUk6im5cp2B#~1)HaV&-mtdOAAS1xPIJH8KK4xd{bH#|K@9O;WmgR5 zh?ErDH5yk_7h>WK)omSEE^YK6=An3{R22>@9A+z7CLs}NcKqAZXV1@ux?rd>5DG{m zO1;Y60JVf_5OQSF-;mI~)l$1mb-8c}uZhtU4=pioHPm z-PfA*f?COnglkP!q5T~fB{g43NfiwVv5X#85G`Mp9df$+8gK0K;%-yyJo^pWgjJ_K z*o`e?u{0;k@cQy<+q0yv)gId>ya;7skkjbJu-%w5_gtRsmX&79NB&vrbeQi9_rva3 zp2z2Jo8@dtNqj_o+9)Q%ttzhx2W-zrn?>b2)BQu5BPV0Krquj@QZ9<}TV<;LrKYE@ZT)lus}_1`m7PXsF@~JKJT#exfn;`FymD(J-6knZ zsP^q~{Y9zZjp8i#(}`~s{~G2S)qM1?s)#;Ht!G7oT1;jq{(XkIZmG1ryqlz`=D}7h z#($Ju=$r4p*b_5ejM8H$M&gV1KPcUH7}vVdN~!q=U6S<@GAniq z3Vdg~&$5~9POCub$p5JZhmw*YUA)pbRp3jg@O=6m@ErI4vErKKRln6Rit?hW$9j9# z+jHGlwu=R8iqyaL+Q0Rh>O6{R5b1f5{)o7WD@iG|E2xCGfq!_t=4UXcq`4rjXUTV- zTMeBFF#vK#bdc-$Zq*a69aYc31@0}Sv%k6~uA9ntp*uSiXrOt8Zn=_@{YDQg^j+k> zKO^Cp`xCFtv9LB_+fY$nG{M~SKhrq<7bGthMGI9$%)=(lqFAtr)>!P=CXLd5S7K49 zM=iFy31e*GiEp#G5_E*~j_L;ZHi_S^y#D4J8da@ra-s1zwQUPe!VQg8DzR((8**DW zd#Fx^SUNW8gJXL`+LYm%Q5YAy&rgpfe1k2w*eRPZP4SgQEDGndDECK*llJQUTg`0ZfwV1|OKqRg zv1jK0ay8R(UuP_THmTZ*1?#8|Hfb!e+NiCDR+_eByRG}zTo665m%7yyV%_2qR}MP2zQzLBijI#e_x91ck17lqD?Ue0$V-7hIZN?|GpIc`%)BB`5QOM#%>$v zCaM;?B`$ZjuMU^j)K4kDLjF^e6R(v2)TG7*?$ny#KuIp`_wEcdA+*=X$rA=B)m|!QD=;I&h=gD|7Rfx!YB!*Uy{W z?adMcH@oSL3`K%9x8OHWTQMbctNy39{x8#}E=T2E?oRgINbqiVrg8{RVKx^Y?lJq~zq+m>bg%7v>Xa4k?CAOIR)vDKEA1gL zPk)u|40i9cA7&w(``ua55ZJ8^Rz6^J(JRwD=yVA^WL#7-3DsZmuzPdcA_g=8%1oI@ z+?~uq2FBr0cV<-`OB@92jtpB4XWW~qBX+mdZne0=G?_7{Cz(C#?kI_38g00u z`Z;q0WKT^&d*0n?I>YJJVde0GJ59W+a`2))RTqkv+-j8uQ!rk3XNWhZaVVd!xI4%$ z>mW>j)xDWI!hAkv8xc|E%(~Zfs?53K^160WajV(*;Wu<-wPA64Q|DxLxVjG0g17Ux zbh4_$b>($$TL;xTLrj}oqff04Pvhu2?o5eap1VLrb$QLZ*1=-}q2PNuoib7FeeJJ- zN6{~bKA;$LUS*)>Lw%*wSf7KblOJh+WT;N($L7k(V2SP%cec7*cCF-m8gr@->WQ77 zxwo`@aq;D~OaYV?fj*Ch?XRjSpB(-|?@Mwjf`O{4FZKR3mc~%~U+G+v8Gn)PBkQ z->l<0jNi?!9LD=Pecz|aoXuu)U-VPj=;@-alzEsntWPCNX_2CeKsY?xqYEP|KFslS zP@edAu4hZ@Uw@dX@&5A4qqu?-JX%9Ze4)C^6FsVwn+rP0gv6R3CtD$v*Pr51C581r z=9!aa!4UM(srHlWPBSO7LdfY>IFrI>c+}%KO|9H@rbj(&(_~q%vn)TAmGjNN1bMdk z7hQFZCrw?zH2y!=vzhu|^rXK|E3)&f#OU4gE$`^73p`ryis~B3$_fK7GzX{Ra*-!p zg@s08?_!VE$Z|xK%q1oP>|Sbee45T|9&J_VSg7YRt6{)S z*BTeIU=}V5P2Pxq%EBV@LtP-cW~zj=SRBe3@k5rEGwh&ZiD&b82g!>jlZ}*>sew>k zpk}ElQ)<>U9+;gMDz6P+XYP)2o<2wo-?`qC5#}MXboULOY|-6zryD(;)XZ9R28+W4 zrw?4_$qq9oDil!8%IPN0=HcmTidPMl-}JC@yxG$`X5IKvLo-MSOK1cf11E0rup*Qm z)|7{$b{xIc!*U|#C`vHE^j_73>buRu1o_x$haKTK@pcdEAjF-B7k%m-B=p{ao^W=cQzqz-~FB|+A71%ia5?ECvME0etLCl zqjRMPGaU8-Pr2y)xY^ROM2Br~fgj5w93G0%P?X1?Va$tTQ#ec(7$8*rH&%cXy!#&X z92-p*z7yiwabqL1N4);%AMzY6bJXIZ)$1OxUYTB3k9Sta97^T6u}Nv|x*zt8u^*QA zF&3t)aMbS%3S?0Z#ygU)s`}NNyn;fXsg8Y*czUZw>D{U$`^&luB_$JBT|n|b?hgAN z^%Oe%O$^Q4pG6ESl6Zd1Gu7eQ_*Aa@tGCJt)q#pYw6(g$1CqVElhZW2*v{4^#P_)8 zbeB4R0xMaSl<26e>(PTcZ6~ciN?TYX>MCf4Ffr@k_mQRC_NH;W#q>Pk>8XMksmGeK za>oc+n{iK8W|4gLJ?ZK0I6lkrTHt%i<5LIsld-JzB_#)y*I-xQK<&&8UJWIP3 z_?~Vppl3Y#(ygFNN=BAPw3CEmQZN7dR(rN}B#5FAvip_0;Ct2-l~{=THZZ9n!QXS9 zEgiv)3)UC-o=-^7ZZji+{bW@{UEEZeQ!%~Z>E;MQ<+{qf7d?F)$41xIM`j*WT^X68 zlcM&=*ZW@b^l|tcSzaH(?6jv?Zph1?9EZo<0+XxjbkcmUc*Z%79>Chjyzf@-*H9Ob zYuI0{;ulkwQu*5!ztOw8AWL+wdX}f!A71I_`!jQLq6$z}*)dD)5YL|+6WnsB_O!=M z>9>}dNOr3IBz3{I36#nF>kEjTr8+|sJ;sg8iFVE=^|yh20*U9g-b0hjzccZzCu~D5 zG9mG*jV<|FDI1Wpwvm;I=^qv=$`=eSPPL!(Z;4iVNBxO=BwK!L+oVLdJZzx#J6JEq$nY8i6 zKt)3+Osjdc9`?PFPid9+ zV+Kb<)+)ib#xtOmGsa>E*9~x<^Nwfh#B=Aqoe?Y_G{VPu-rWE{iKFLZC(nzh} z0ptqRi<<3~z;2+#>;e^OGVi+bpJflbz_)fI-aEeb>=AcG`hCQfAYLLjusna`IdZ*0 z{i(wHtS<|(=qB=6O!1sgaEL;1ZpM|nndEJZ^X*qrrVJxj1petDHkQRqn zTW^a>r4SS2N6-AfHc51N)tMS4nWB?4{qz@QQ=bjbrk_3M|0UTJP15?Z$$Gi_#Um>d zmz0beGeZ5*^_|pDR{fdK1Qx62e)TYm+B$g_K@|9YGc6}7yo2ahoi;HzQnj3@8cYWt zJ7`4`=Mq&Hyi@yr_wv;=LJkjB@NiwA6mpw;paRn5WeKsSmw4BE^`|9^zU8Yw-8*(euU<#r z`o*7-82iLl?){k?aPhXQUhl=9l`Q&}FMj?8Tzp*gtzZ1vNn&s1&Y!cvvD?xRr?u3x zWj#mdCX2o0Yd_Ds_j+%oxmd^r3AShNT^ydb`kfp;z}PY2#-1K!n8TL^#YH-$mZ(2|e1;u2%+F1!v5( zb@J*4K&7G9A9P8{w*F#iyNmkAdX>gQWqkN9Zrhj#v>fv#Z5#8zmSeuun;Xpx>`^rU zEqZJCr_gtq_o~F6->8?3DnhoHHoDVSieJmc>vFFt(Fq1a1*4xM@>xB;#w9J2zAL=v zC7P>xsV|j2E{*pfoOnewL0 zFrB(-_WRS)1#o|rDW(3XV3HLto4zqmvMo)8zH7W?v8%MT6xcj%#N{Pz7P!`XqJAV) znENv?U<0pCUy~}M>&t@;xqRP3nLxt(piYM#DAMAIEgrCIcBdzOOMHvGx&>bASQ;J~ znLeVTB2c4Lr#9R18hw$JS61I*@5zapgQS|w+L#e4Dp@-6jn*9U&@VA>2fAX?d5=4f z3@;aZCcI6ka%jDbG{xx_`?(3aWvO?F&OqBOjz%+j1|KhFMp!gDj>JWDoj0ykIR?Doe|}rzJiT3JKLVDJ;PQ?d^daT10T(&eWFjd z9&8iK#aQ`^^PHHyVnl5!-IeS0CRk-gDxr@dz{N zbUhaBz%;c(bbl7K#P_R3_+q8Nb zU4I3>yS)3i>S@0~J@%S2Y;z}*Sp!me`R+FAUCxg1Nxxv7?t*Huk4&jvWm5X?@s2bp zN;tQLPBBYwE9Gq}$a|xC<_x99u4;nut?*KpQh~R0TVfZaZ@h|HozY@mtn?0#3(|DP zaVE8$?Cfb(cQ$YOD)ft;Zhr`$OGwAF9OM%3~*?tHLEClzH@| zFT`O~-tXPjmR4@6?Wr0S9LpDLaS7D5q(FTSm>S%=p!o9-q(QNoH_Y9nTlzs`=p^?# zlJBgS*GE?|RSzsB{~_~ooBX8xswYn|3G_Ye$lCanm>zK7Bi=!A*SnwkBK25|e9Q-< z2W%cp5q#7e-*rERNnpXy2>Ez5-;m^)h*LY(9k+#JGFGdS$Gr1gj`wg=4X861Vo^&Q zS*!%a>VVjBbuVKr$d0e6*tLPNv?r>F$490i4&0dQ zvnSntUHj9+SJB#qX%Adkq3?+eN!6ggI#o|@NU8?>)v0>QtGm5;C6s!JalL9xvbw1w z8Ol47&l87tYpCW>$SRr?2vsV4PkVPy>XAX;l{z3*W}PZB?s3$m`WbJX3Q0Y82k6f; z@6NnR>jf_MPC_D-&%{8?fh4K6ogf;mAzbKNZB*Bu;RBi3TdBrWDR0U2carm;^&YHT zxzGb_32iBI?I-#^rgG30YTOh4oT>P1DPg<}W5pyNt;@F^w48K~SQq%bH<%cKWh{uE z9ywAH%X1qrqut|!Odgl9m*eh%72^xu-BgU~9*)*uU{ww+1{{}SU8G+09>VaG-gEoI5ciTVyqX7S4=sHi?1c$ zaoK7w6LANu{C?G|XQin8-V@_ta?5wp_nPq>GhDRwK%MS%D)7DTs14%`4K-qj(&2lf zHD6J4fZ8>vE9_@ln`_JMi_!`dnd;#MO*VzyqROK zejGJtyhm&8lk&vDDQmpP#bQeK-X0$k!*la zWNgmTCtf`h$(bUV7ZrEmpL+F7B&WwQtnN|Z`^>wUQUv;_{AH}FcXONDEbaJ(cPpFY z`1(k7t+c)SzKpw0b#=-0e6)_MpsLY3g}$$hvXi^8PkFtuh)PQKDGyDJ-VonfZx0oB zg0Db`iC(_1z4=KGGX_0a!^c%5L%%U~j57!oV=TiYZDU!IIvZ=_G+4strj3It zcrm>cQ*<1_YES+M#{6$sD590)QyCkhk}Q?$NmQZl2V+G!FIhfF9&H(PWRpDQbZTK& zHaO>auHUEcNAF6vd06+ zL$pF`DU%Ye-KIvQ0!V(X4o{oy43#eXJU6p6%n7NX1OsowFEmMj zs&a@9B_mq%(qbP&cVcRG?7C*jh51fOoy%&bl_e$WTY9<|)i#39@3+mRkNMk#3zVtX zmpIGc@2{l`TE|<#drW!AzHrPYjXHKkf5(NYm49MM*o0RmuTpB1ZgMj$7Uy4e*}A2y z^=|vWK2@=y0Vk)v8#m=q4OnO)LCWZg9oxjWEf#RH=fDwn-0gE5YqNA&3s(K1GS-%f z{1b(&Et3^1Wv8T4u4F)gRDGj@x_L~w*m^KbU1+uTi&tw@UDPrcTitq7Re_BNO*Z=8 zY;;G8-kFzrbS&ESL>d=`lANh0Rph0qS2Hd&M@F)j9^R!YuH)ytK|r^N14r{k0X`~)T&4|Ltm7-Uz{Hbd}sp8GLH}0);&vFn4-!lv1nbKIyf#+3D90FTRHU-$ElK@ z4LtSIR6PUHDjf+I&`PwIrIy8|SP%IdA0548I)0O91mUX6B`ySXVK|_kG&8E}a_f!5 zj?%LK9knf7r_z_0d*rLF=pdM_rQK4PwOptxyIDMrp=a{Yo7Nm}fyJ+cRwie*q zpb&I0SEUY%3&sqFj1|-MWz5y7BjW-X7nmL$p`r66y4&(4`Tm=l8yCv{EIA#QqrPiW z_1tW$i0RdTY$wHsUg*0vRZl0K7_48f--W4%#VkX66J|6gV~1niD)z{Ey_L7TWNQNZ z7R6nh)N2h?q7zwew!l>7i&OQIjn>7DYp6B7iQxj@l2kK^jbeK=?I@y1!V`ni`xGva z1{8HCt@{*9=q5B#3w=$g^K@&hmBjs6M58(ssMPJCwrXZmMr(#B#MPvQg}$Y!b6QKx z7>53-2^wva7uL9m>r4TQmhWhfrPHOrcYTX~tzlfcLqM#%8;ot?3`Gt3iaI);Eo=Ok z@5WTK*rqjzCzdK2smp1KQnh6UO{VlF`WzRS%>1$f-_6Eo zO7wvx7j-P~-IA&&ZQDw>j1mZh7$KA3R^YoeRgarXIJ6;yT5e0#i%KOqxz$n0x2Ni1 zT+p7$M;Y6Fnu=+;$q_3yogMbkJQ8MfpwFGD zy6@ZaZu&&@pmYY9TlX|r&Rb+fz{=d%0z~@6Lf_q15p(!#F>CB;_uZ3vxY{4-IJ?zl zz)aNfk8=E;R$;z-Q*{P7gCAL5XF0`H4e^2aR;20?)0ST=7e)mwm$ z&Rgdv=He=oi%y>#mW%fp8^C#X3j?<7cfis>_nRA$5JU@uw&KHRdC%O42U2%Xew@Ku z`>{vc9+dCF)QPR0mef*FR$ObQobRDjYq{O{(#_(=c83Dr!>QwJ>1pXf-H2>{#Kde} zw^br$#XA)E9!-s#U)A!w09*PB<637O#)ZVB-h<;+f^Ld&%oKQR<8;a6sYlsT(PA!b znv(Q{(Z-hI(m5D^QL=KKwe+E-r=Co;eMcebh(g;n{PCPjI@9&3)QdKFh+AALczR-n z$A;a|^$a_V51!$FPp4k|ccd-pfV_mnJ@c1lTy(Txvf+veXdO$0Hbe9J@LX!g3d}vGyzpx9d}>G8@Vz&jUP$dw z#RvVwq5bBrlq{||!XO1c;)AXYgwi&1r?l@M3d)zkT&ewKlTw*hk3&O6AQX0G@UM(l z3si15X{IaVfJqH?k%qhqnxZD}I$%)$;{F4+>(_5!@xTECiU$rFFmT5~19s@Q^PtM| ez@$NyI~MO;wcUVOv}+BRW%Dw4$HDyv4EldlENr3x diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/optimizations.go b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/optimizations.go index ecdd8d82c1..92ef107ee5 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/optimizations.go +++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/optimizations.go @@ -133,7 +133,7 @@ func (c *Compiler) removeUnusedCode() error { } caller, ok := c.funcs[callerName] if !ok { - return fmt.Errorf("caller not found: %s (%s)", cg[i][0], callerName) + continue // without a caller, it should get removed anyways (right?) } callee, ok := c.funcs[calleeName] if !ok { diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go index 25cbc13b47..b7f1a27812 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go +++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go @@ -32,7 +32,6 @@ const ( opaWasmABIMinorVersionVar = "opa_wasm_abi_minor_version" ) -// nolint: deadcode,varcheck const ( opaTypeNull int32 = iota + 1 opaTypeBoolean @@ -90,6 +89,7 @@ var builtinsFunctions = map[string]string{ ast.Floor.Name: "opa_arith_floor", ast.Rem.Name: "opa_arith_rem", ast.ArrayConcat.Name: "opa_array_concat", + ast.ArrayFlatten.Name: "opa_array_flatten", ast.ArrayReverse.Name: "opa_array_reverse", ast.ArraySlice.Name: "opa_array_slice", ast.SetDiff.Name: "opa_set_diff", @@ -162,6 +162,7 @@ var builtinsFunctions = map[string]string{ ast.TrimRight.Name: "opa_strings_trim_right", ast.TrimSuffix.Name: "opa_strings_trim_suffix", ast.TrimSpace.Name: "opa_strings_trim_space", + ast.InternalTemplateString.Name: "opa_template_string", ast.NumbersRange.Name: "opa_numbers_range", ast.ToNumber.Name: "opa_to_number", ast.WalkBuiltin.Name: "opa_value_transitive_closure", @@ -414,7 +415,7 @@ func (c *Compiler) initModule() error { }, }, }, - Init: bytes.Repeat([]byte{0}, int(heapBase-offset)), + Init: make([]byte, int(heapBase-offset)), }) return nil @@ -1058,9 +1059,11 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err }, }) case *ir.AssignIntStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Target)}) - instrs = append(instrs, instruction.I64Const{Value: stmt.Value}) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueNumberSetInt)}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.Target)}, + instruction.I64Const{Value: stmt.Value}, + instruction.Call{Index: c.function(opaValueNumberSetInt)}, + ) case *ir.ScanStmt: if err := c.compileScan(stmt, &instrs); err != nil { return nil, err @@ -1073,12 +1076,14 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err } case *ir.DotStmt: if loc, ok := stmt.Source.Value.(ir.Local); ok { - instrs = append(instrs, instruction.GetLocal{Index: c.local(loc)}) - instrs = append(instrs, c.instrRead(stmt.Key)) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueGet)}) - instrs = append(instrs, instruction.TeeLocal{Index: c.local(stmt.Target)}) - instrs = append(instrs, instruction.I32Eqz{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(loc)}, + c.instrRead(stmt.Key), + instruction.Call{Index: c.function(opaValueGet)}, + instruction.TeeLocal{Index: c.local(stmt.Target)}, + instruction.I32Eqz{}, + instruction.BrIf{Index: 0}, + ) } else { // Booleans and string sources would lead to the BrIf (since opa_value_get // on them returns 0), so let's skip trying that. @@ -1086,97 +1091,131 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err break } case *ir.LenStmt: - instrs = append(instrs, c.instrRead(stmt.Source)) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueLength)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaNumberSize)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + c.instrRead(stmt.Source), + instruction.Call{Index: c.function(opaValueLength)}, + instruction.Call{Index: c.function(opaNumberSize)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.EqualStmt: - instrs = append(instrs, c.instrRead(stmt.A)) - instrs = append(instrs, c.instrRead(stmt.B)) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueCompare)}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + c.instrRead(stmt.A), + c.instrRead(stmt.B), + instruction.Call{Index: c.function(opaValueCompare)}, + instruction.BrIf{Index: 0}, + ) case *ir.NotEqualStmt: - instrs = append(instrs, c.instrRead(stmt.A)) - instrs = append(instrs, c.instrRead(stmt.B)) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueCompare)}) - instrs = append(instrs, instruction.I32Eqz{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + c.instrRead(stmt.A), + c.instrRead(stmt.B), + instruction.Call{Index: c.function(opaValueCompare)}, + instruction.I32Eqz{}, + instruction.BrIf{Index: 0}, + ) case *ir.MakeNullStmt: - instrs = append(instrs, instruction.Call{Index: c.function(opaNull)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.Call{Index: c.function(opaNull)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.MakeNumberIntStmt: - instrs = append(instrs, instruction.I64Const{Value: stmt.Value}) - instrs = append(instrs, instruction.Call{Index: c.function(opaNumberInt)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.I64Const{Value: stmt.Value}, + instruction.Call{Index: c.function(opaNumberInt)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.MakeNumberRefStmt: - instrs = append(instrs, instruction.I32Const{Value: c.stringAddr(stmt.Index)}) - instrs = append(instrs, instruction.I32Const{Value: int32(len(c.policy.Static.Strings[stmt.Index].Value))}) - instrs = append(instrs, instruction.Call{Index: c.function(opaNumberRef)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.I32Const{Value: c.stringAddr(stmt.Index)}, + instruction.I32Const{Value: int32(len(c.policy.Static.Strings[stmt.Index].Value))}, + instruction.Call{Index: c.function(opaNumberRef)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.MakeArrayStmt: - instrs = append(instrs, instruction.I32Const{Value: stmt.Capacity}) - instrs = append(instrs, instruction.Call{Index: c.function(opaArrayWithCap)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.I32Const{Value: stmt.Capacity}, + instruction.Call{Index: c.function(opaArrayWithCap)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.MakeObjectStmt: - instrs = append(instrs, instruction.Call{Index: c.function(opaObject)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.Call{Index: c.function(opaObject)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.MakeSetStmt: - instrs = append(instrs, instruction.Call{Index: c.function(opaSet)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.Call{Index: c.function(opaSet)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.IsArrayStmt: if loc, ok := stmt.Source.Value.(ir.Local); ok { - instrs = append(instrs, instruction.GetLocal{Index: c.local(loc)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueType)}) - instrs = append(instrs, instruction.I32Const{Value: opaTypeArray}) - instrs = append(instrs, instruction.I32Ne{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(loc)}, + instruction.Call{Index: c.function(opaValueType)}, + instruction.I32Const{Value: opaTypeArray}, + instruction.I32Ne{}, + instruction.BrIf{Index: 0}, + ) } else { instrs = append(instrs, instruction.Br{Index: 0}) break } case *ir.IsObjectStmt: if loc, ok := stmt.Source.Value.(ir.Local); ok { - instrs = append(instrs, instruction.GetLocal{Index: c.local(loc)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueType)}) - instrs = append(instrs, instruction.I32Const{Value: opaTypeObject}) - instrs = append(instrs, instruction.I32Ne{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(loc)}, + instruction.Call{Index: c.function(opaValueType)}, + instruction.I32Const{Value: opaTypeObject}, + instruction.I32Ne{}, + instruction.BrIf{Index: 0}, + ) } else { instrs = append(instrs, instruction.Br{Index: 0}) break } case *ir.IsSetStmt: if loc, ok := stmt.Source.Value.(ir.Local); ok { - instrs = append(instrs, instruction.GetLocal{Index: c.local(loc)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueType)}) - instrs = append(instrs, instruction.I32Const{Value: opaTypeSet}) - instrs = append(instrs, instruction.I32Ne{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(loc)}, + instruction.Call{Index: c.function(opaValueType)}, + instruction.I32Const{Value: opaTypeSet}, + instruction.I32Ne{}, + instruction.BrIf{Index: 0}, + ) } else { instrs = append(instrs, instruction.Br{Index: 0}) break } case *ir.IsUndefinedStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)}) - instrs = append(instrs, instruction.I32Const{Value: 0}) - instrs = append(instrs, instruction.I32Ne{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.Source)}, + instruction.I32Const{Value: 0}, + instruction.I32Ne{}, + instruction.BrIf{Index: 0}, + ) case *ir.ResetLocalStmt: - instrs = append(instrs, instruction.I32Const{Value: 0}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.I32Const{Value: 0}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.IsDefinedStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)}) - instrs = append(instrs, instruction.I32Eqz{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.Source)}, + instruction.I32Eqz{}, + instruction.BrIf{Index: 0}, + ) case *ir.ArrayAppendStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Array)}) - instrs = append(instrs, c.instrRead(stmt.Value)) - instrs = append(instrs, instruction.Call{Index: c.function(opaArrayAppend)}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.Array)}, + c.instrRead(stmt.Value), + instruction.Call{Index: c.function(opaArrayAppend)}, + ) case *ir.ObjectInsertStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Object)}) - instrs = append(instrs, c.instrRead(stmt.Key)) - instrs = append(instrs, c.instrRead(stmt.Value)) - instrs = append(instrs, instruction.Call{Index: c.function(opaObjectInsert)}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.Object)}, + c.instrRead(stmt.Key), + c.instrRead(stmt.Value), + instruction.Call{Index: c.function(opaObjectInsert)}, + ) case *ir.ObjectInsertOnceStmt: tmp := c.genLocal() instrs = append(instrs, instruction.Block{ @@ -1203,14 +1242,18 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err }, }) case *ir.ObjectMergeStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.A)}) - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.B)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueMerge)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.A)}, + instruction.GetLocal{Index: c.local(stmt.B)}, + instruction.Call{Index: c.function(opaValueMerge)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.SetAddStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Set)}) - instrs = append(instrs, c.instrRead(stmt.Value)) - instrs = append(instrs, instruction.Call{Index: c.function(opaSetAdd)}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.Set)}, + c.instrRead(stmt.Value), + instruction.Call{Index: c.function(opaSetAdd)}, + ) default: var buf bytes.Buffer err := ir.Pretty(&buf, stmt) @@ -1226,8 +1269,7 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err func (c *Compiler) compileScan(scan *ir.ScanStmt, result *[]instruction.Instruction) error { var instrs = *result - instrs = append(instrs, instruction.I32Const{Value: 0}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(scan.Key)}) + instrs = append(instrs, instruction.I32Const{Value: 0}, instruction.SetLocal{Index: c.local(scan.Key)}) body, err := c.compileScanBlock(scan) if err != nil { return err @@ -1242,23 +1284,22 @@ func (c *Compiler) compileScan(scan *ir.ScanStmt, result *[]instruction.Instruct } func (c *Compiler) compileScanBlock(scan *ir.ScanStmt) ([]instruction.Instruction, error) { - var instrs []instruction.Instruction - - // Execute iterator. - instrs = append(instrs, instruction.GetLocal{Index: c.local(scan.Source)}) - instrs = append(instrs, instruction.GetLocal{Index: c.local(scan.Key)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueIter)}) - - // Check for emptiness. - instrs = append(instrs, instruction.TeeLocal{Index: c.local(scan.Key)}) - instrs = append(instrs, instruction.I32Eqz{}) - instrs = append(instrs, instruction.BrIf{Index: 1}) - - // Load value. - instrs = append(instrs, instruction.GetLocal{Index: c.local(scan.Source)}) - instrs = append(instrs, instruction.GetLocal{Index: c.local(scan.Key)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueGet)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(scan.Value)}) + //nolint:prealloc // instruction list is known and fixed, clearer as slice literal + instrs := []instruction.Instruction{ + // Execute iterator. + instruction.GetLocal{Index: c.local(scan.Source)}, + instruction.GetLocal{Index: c.local(scan.Key)}, + instruction.Call{Index: c.function(opaValueIter)}, + // Check for emptiness. + instruction.TeeLocal{Index: c.local(scan.Key)}, + instruction.I32Eqz{}, + instruction.BrIf{Index: 1}, + // Load value. + instruction.GetLocal{Index: c.local(scan.Source)}, + instruction.GetLocal{Index: c.local(scan.Key)}, + instruction.Call{Index: c.function(opaValueGet)}, + instruction.SetLocal{Index: c.local(scan.Value)}, + } // Loop body. nested, err := c.compileBlock(scan.Block) @@ -1278,8 +1319,7 @@ func (c *Compiler) compileNot(not *ir.NotStmt, result *[]instruction.Instruction // generate and initialize condition variable cond := c.genLocal() - instrs = append(instrs, instruction.I32Const{Value: 1}) - instrs = append(instrs, instruction.SetLocal{Index: cond}) + instrs = append(instrs, instruction.I32Const{Value: 1}, instruction.SetLocal{Index: cond}) nested, err := c.compileBlock(not.Block) if err != nil { @@ -1287,14 +1327,15 @@ func (c *Compiler) compileNot(not *ir.NotStmt, result *[]instruction.Instruction } // unset condition variable if end of block is reached - nested = append(nested, instruction.I32Const{Value: 0}) - nested = append(nested, instruction.SetLocal{Index: cond}) - instrs = append(instrs, instruction.Block{Instrs: nested}) - - // break out of block if condition variable was unset - instrs = append(instrs, instruction.GetLocal{Index: cond}) - instrs = append(instrs, instruction.I32Eqz{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, instruction.Block{Instrs: append(nested, + instruction.I32Const{Value: 0}, + instruction.SetLocal{Index: cond}, + )}, + // break out of block if condition variable was unset + instruction.GetLocal{Index: cond}, + instruction.I32Eqz{}, + instruction.BrIf{Index: 0}, + ) *result = instrs return nil @@ -1304,34 +1345,36 @@ func (c *Compiler) compileWithStmt(with *ir.WithStmt, result *[]instruction.Inst var instrs = *result save := c.genLocal() - instrs = append(instrs, instruction.Call{Index: c.function(opaMemoizePush)}) - instrs = append(instrs, instruction.GetLocal{Index: c.local(with.Local)}) - instrs = append(instrs, instruction.SetLocal{Index: save}) + instrs = append(instrs, + instruction.Call{Index: c.function(opaMemoizePush)}, + instruction.GetLocal{Index: c.local(with.Local)}, + instruction.SetLocal{Index: save}, + ) if len(with.Path) == 0 { - instrs = append(instrs, c.instrRead(with.Value)) - instrs = append(instrs, instruction.SetLocal{Index: c.local(with.Local)}) + instrs = append(instrs, c.instrRead(with.Value), instruction.SetLocal{Index: c.local(with.Local)}) } else { instrs = c.compileUpsert(with.Local, with.Path, with.Value, with.Location, instrs) } undefined := c.genLocal() - instrs = append(instrs, instruction.I32Const{Value: 1}) - instrs = append(instrs, instruction.SetLocal{Index: undefined}) + instrs = append(instrs, instruction.I32Const{Value: 1}, instruction.SetLocal{Index: undefined}) nested, err := c.compileBlock(with.Block) if err != nil { return err } - nested = append(nested, instruction.I32Const{Value: 0}) - nested = append(nested, instruction.SetLocal{Index: undefined}) - instrs = append(instrs, instruction.Block{Instrs: nested}) - instrs = append(instrs, instruction.GetLocal{Index: save}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(with.Local)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaMemoizePop)}) - instrs = append(instrs, instruction.GetLocal{Index: undefined}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + nested = append(nested, instruction.I32Const{Value: 0}, instruction.SetLocal{Index: undefined}) + + instrs = append(instrs, + instruction.Block{Instrs: nested}, + instruction.GetLocal{Index: save}, + instruction.SetLocal{Index: c.local(with.Local)}, + instruction.Call{Index: c.function(opaMemoizePop)}, + instruction.GetLocal{Index: undefined}, + instruction.BrIf{Index: 0}, + ) *result = instrs @@ -1339,37 +1382,38 @@ func (c *Compiler) compileWithStmt(with *ir.WithStmt, result *[]instruction.Inst } func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _ ir.Location, instrs []instruction.Instruction) []instruction.Instruction { - lcopy := c.genLocal() // holds copy of local - instrs = append(instrs, instruction.GetLocal{Index: c.local(local)}) - instrs = append(instrs, instruction.SetLocal{Index: lcopy}) - - // Shallow copy the local if defined otherwise initialize to an empty object. - instrs = append(instrs, instruction.Block{ - Instrs: []instruction.Instruction{ - instruction.Block{Instrs: []instruction.Instruction{ - instruction.GetLocal{Index: lcopy}, - instruction.I32Eqz{}, - instruction.BrIf{Index: 0}, - instruction.GetLocal{Index: lcopy}, - instruction.Call{Index: c.function(opaValueShallowCopy)}, + instrs = append(instrs, + instruction.GetLocal{Index: c.local(local)}, + instruction.SetLocal{Index: lcopy}, + // Shallow copy the local if defined otherwise initialize to an empty object. + instruction.Block{ + Instrs: []instruction.Instruction{ + instruction.Block{Instrs: []instruction.Instruction{ + instruction.GetLocal{Index: lcopy}, + instruction.I32Eqz{}, + instruction.BrIf{Index: 0}, + instruction.GetLocal{Index: lcopy}, + instruction.Call{Index: c.function(opaValueShallowCopy)}, + instruction.TeeLocal{Index: lcopy}, + instruction.SetLocal{Index: c.local(local)}, + instruction.Br{Index: 1}, + }}, + instruction.Call{Index: c.function(opaObject)}, instruction.TeeLocal{Index: lcopy}, instruction.SetLocal{Index: c.local(local)}, - instruction.Br{Index: 1}, - }}, - instruction.Call{Index: c.function(opaObject)}, - instruction.TeeLocal{Index: lcopy}, - instruction.SetLocal{Index: c.local(local)}, - }, - }) + }, + }) // Initialize the locals that specify the path of the upsert operation. lpath := make(map[int]uint32, len(path)) for i := range path { lpath[i] = c.genLocal() - instrs = append(instrs, instruction.I32Const{Value: c.opaStringAddr(path[i])}) - instrs = append(instrs, instruction.SetLocal{Index: lpath[i]}) + instrs = append(instrs, + instruction.I32Const{Value: c.opaStringAddr(path[i])}, + instruction.SetLocal{Index: lpath[i]}, + ) } // Generate a block that traverses the path of the upsert operation, @@ -1379,36 +1423,34 @@ func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _ ltemp := c.genLocal() for i := range len(path) - 1 { - - // Lookup the next part of the path. - inner = append(inner, instruction.GetLocal{Index: lcopy}) - inner = append(inner, instruction.GetLocal{Index: lpath[i]}) - inner = append(inner, instruction.Call{Index: c.function(opaValueGet)}) - inner = append(inner, instruction.SetLocal{Index: ltemp}) - - // If the next node is missing, break. - inner = append(inner, instruction.GetLocal{Index: ltemp}) - inner = append(inner, instruction.I32Eqz{}) - inner = append(inner, instruction.BrIf{Index: uint32(i)}) - - // If the next node is not an object, break. - inner = append(inner, instruction.GetLocal{Index: ltemp}) - inner = append(inner, instruction.Call{Index: c.function(opaValueType)}) - inner = append(inner, instruction.I32Const{Value: opaTypeObject}) - inner = append(inner, instruction.I32Ne{}) - inner = append(inner, instruction.BrIf{Index: uint32(i)}) - - // Otherwise, shallow copy the next node node and insert into the copy - // before continuing. - inner = append(inner, instruction.GetLocal{Index: ltemp}) - inner = append(inner, instruction.Call{Index: c.function(opaValueShallowCopy)}) - inner = append(inner, instruction.SetLocal{Index: ltemp}) - inner = append(inner, instruction.GetLocal{Index: lcopy}) - inner = append(inner, instruction.GetLocal{Index: lpath[i]}) - inner = append(inner, instruction.GetLocal{Index: ltemp}) - inner = append(inner, instruction.Call{Index: c.function(opaObjectInsert)}) - inner = append(inner, instruction.GetLocal{Index: ltemp}) - inner = append(inner, instruction.SetLocal{Index: lcopy}) + inner = append(inner, + // Lookup the next part of the path. + instruction.GetLocal{Index: lcopy}, + instruction.GetLocal{Index: lpath[i]}, + instruction.Call{Index: c.function(opaValueGet)}, + instruction.SetLocal{Index: ltemp}, + // If the next node is missing, break. + instruction.GetLocal{Index: ltemp}, + instruction.I32Eqz{}, + instruction.BrIf{Index: uint32(i)}, + // If the next node is not an object, break. + instruction.GetLocal{Index: ltemp}, + instruction.Call{Index: c.function(opaValueType)}, + instruction.I32Const{Value: opaTypeObject}, + instruction.I32Ne{}, + instruction.BrIf{Index: uint32(i)}, + // Otherwise, shallow copy the next node node and insert into the copy + // before continuing. + instruction.GetLocal{Index: ltemp}, + instruction.Call{Index: c.function(opaValueShallowCopy)}, + instruction.SetLocal{Index: ltemp}, + instruction.GetLocal{Index: lcopy}, + instruction.GetLocal{Index: lpath[i]}, + instruction.GetLocal{Index: ltemp}, + instruction.Call{Index: c.function(opaObjectInsert)}, + instruction.GetLocal{Index: ltemp}, + instruction.SetLocal{Index: lcopy}, + ) } inner = append(inner, instruction.Br{Index: uint32(len(path) - 1)}) @@ -1418,31 +1460,33 @@ func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _ lval := c.genLocal() for i := range len(path) - 1 { - block = append(block, instruction.Block{Instrs: inner}) - block = append(block, instruction.Call{Index: c.function(opaObject)}) - block = append(block, instruction.SetLocal{Index: lval}) - block = append(block, instruction.GetLocal{Index: lcopy}) - block = append(block, instruction.GetLocal{Index: lpath[i]}) - block = append(block, instruction.GetLocal{Index: lval}) - block = append(block, instruction.Call{Index: c.function(opaObjectInsert)}) - block = append(block, instruction.GetLocal{Index: lval}) - block = append(block, instruction.SetLocal{Index: lcopy}) + block = append(block, + instruction.Block{Instrs: inner}, + instruction.Call{Index: c.function(opaObject)}, + instruction.SetLocal{Index: lval}, + instruction.GetLocal{Index: lcopy}, + instruction.GetLocal{Index: lpath[i]}, + instruction.GetLocal{Index: lval}, + instruction.Call{Index: c.function(opaObjectInsert)}, + instruction.GetLocal{Index: lval}, + instruction.SetLocal{Index: lcopy}, + ) inner = block block = nil } // Finish by inserting the statement's value into the shallow copied node. - instrs = append(instrs, instruction.Block{Instrs: inner}) - instrs = append(instrs, instruction.GetLocal{Index: lcopy}) - instrs = append(instrs, instruction.GetLocal{Index: lpath[len(path)-1]}) - instrs = append(instrs, c.instrRead(value)) - instrs = append(instrs, instruction.Call{Index: c.function(opaObjectInsert)}) - - return instrs + return append(instrs, + instruction.Block{Instrs: inner}, + instruction.GetLocal{Index: lcopy}, + instruction.GetLocal{Index: lpath[len(path)-1]}, + c.instrRead(value), + instruction.Call{Index: c.function(opaObjectInsert)}, + ) } func (c *Compiler) compileCallDynamicStmt(stmt *ir.CallDynamicStmt, result *[]instruction.Instruction) error { - instrs := []instruction.Instruction{} + instrs := make([]instruction.Instruction, 0, 3+3*len(stmt.Path)+len(stmt.Args)+10) larray := c.genLocal() lidx := c.genLocal() @@ -1515,7 +1559,7 @@ func (c *Compiler) compileCallStmt(stmt *ir.CallStmt, result *[]instruction.Inst func (c *Compiler) compileInternalCall(stmt *ir.CallStmt, index uint32, result *[]instruction.Instruction) error { - instrs := []instruction.Instruction{} + instrs := make([]instruction.Instruction, 0, len(stmt.Args)+4) // Prepare function args and call. for _, arg := range stmt.Args { diff --git a/vendor/github.com/open-policy-agent/opa/internal/config/config.go b/vendor/github.com/open-policy-agent/opa/internal/config/config.go deleted file mode 100644 index 53dfc6d6cb..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/config/config.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package config implements helper functions to parse OPA's configuration. -package config - -import ( - "encoding/json" - "fmt" - "os" - "regexp" - "strings" - - "sigs.k8s.io/yaml" - - "github.com/open-policy-agent/opa/internal/strvals" - "github.com/open-policy-agent/opa/v1/keys" - "github.com/open-policy-agent/opa/v1/logging" - "github.com/open-policy-agent/opa/v1/plugins/rest" - "github.com/open-policy-agent/opa/v1/tracing" - "github.com/open-policy-agent/opa/v1/util" -) - -// ServiceOptions stores the options passed to ParseServicesConfig -type ServiceOptions struct { - Raw json.RawMessage - AuthPlugin rest.AuthPluginLookupFunc - Keys map[string]*keys.Config - Logger logging.Logger - DistributedTacingOpts tracing.Options -} - -// ParseServicesConfig returns a set of named service clients. The service -// clients can be specified either as an array or as a map. Some systems (e.g., -// Helm) do not have proper support for configuration values nested under -// arrays, so just support both here. -func ParseServicesConfig(opts ServiceOptions) (map[string]rest.Client, error) { - - services := map[string]rest.Client{} - - var arr []json.RawMessage - var obj map[string]json.RawMessage - - if err := util.Unmarshal(opts.Raw, &arr); err == nil { - for _, s := range arr { - client, err := rest.New(s, opts.Keys, rest.AuthPluginLookup(opts.AuthPlugin), rest.Logger(opts.Logger), rest.DistributedTracingOpts(opts.DistributedTacingOpts)) - if err != nil { - return nil, err - } - services[client.Service()] = client - } - } else if util.Unmarshal(opts.Raw, &obj) == nil { - for k := range obj { - client, err := rest.New(obj[k], opts.Keys, rest.Name(k), rest.AuthPluginLookup(opts.AuthPlugin), rest.Logger(opts.Logger), rest.DistributedTracingOpts(opts.DistributedTacingOpts)) - if err != nil { - return nil, err - } - services[client.Service()] = client - } - } else { - // Return error from array decode as that is the default format. - return nil, err - } - - return services, nil -} - -// Load implements configuration file loading. The supplied config file will be -// read from disk (if specified) and overrides will be applied. If no config file is -// specified, the overrides can still be applied to an empty config. -func Load(configFile string, overrides []string, overrideFiles []string) ([]byte, error) { - baseConf := map[string]any{} - - // User specified config file - if configFile != "" { - var bytes []byte - var err error - bytes, err = os.ReadFile(configFile) - if err != nil { - return nil, err - } - - processedConf := subEnvVars(string(bytes)) - - if err := yaml.Unmarshal([]byte(processedConf), &baseConf); err != nil { - return nil, fmt.Errorf("failed to parse %s: %s", configFile, err) - } - } - - overrideConf := map[string]any{} - - // User specified a config override via --set - for _, override := range overrides { - processedOverride := subEnvVars(override) - if err := strvals.ParseInto(processedOverride, overrideConf); err != nil { - return nil, fmt.Errorf("failed parsing --set data: %s", err) - } - } - - // User specified a config override value via --set-file - for _, override := range overrideFiles { - reader := func(rs []rune) (any, error) { - bytes, err := os.ReadFile(string(rs)) - value := strings.TrimSpace(string(bytes)) - return value, err - } - if err := strvals.ParseIntoFile(override, overrideConf, reader); err != nil { - return nil, fmt.Errorf("failed parsing --set-file data: %s", err) - } - } - - // Merge together base config file and overrides, prefer the overrides - conf := mergeValues(baseConf, overrideConf) - - // Take the patched config and marshal back to YAML - return yaml.Marshal(conf) -} - -// regex looking for ${...} notation strings -var envRegex = regexp.MustCompile(`(?U:\${.*})`) - -// SubEnvVars will look for any environment variables in the passed in string -// with the syntax of ${VAR_NAME} and replace that string with ENV[VAR_NAME] -func SubEnvVars(s string) string { - return subEnvVars(s) -} - -func subEnvVars(s string) string { - updatedConfig := envRegex.ReplaceAllStringFunc(s, func(s string) string { - // Trim off the '${' and '}' - if len(s) <= 3 { - // This should never happen.. - return "" - } - varName := s[2 : len(s)-1] - - // Lookup the variable in the environment. We do not - // play by bash rules: if its undefined we'll keep it - // as-is, it could be replaced somewhere down the line. - // If it's set to "", we'll return that. - if lu, ok := os.LookupEnv(varName); ok { - return lu - } - return s - }) - - return updatedConfig -} - -// mergeValues will merge source and destination map, preferring values from the source map -func mergeValues(dest map[string]any, src map[string]any) map[string]any { - for k, v := range src { - // If the key doesn't exist already, then just set the key to that value - if _, exists := dest[k]; !exists { - dest[k] = v - continue - } - nextMap, ok := v.(map[string]any) - // If it isn't another map, overwrite the value - if !ok { - dest[k] = v - continue - } - // Edge case: If the key exists in the destination, but isn't a map - destMap, isMap := dest[k].(map[string]any) - // If the source map has a map for this key, prefer it - if !isMap { - dest[k] = v - continue - } - // If we got to this point, it is a map in both, so merge them - dest[k] = mergeValues(destMap, nextMap) - } - return dest -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go b/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go index bfacf3bcea..791753528f 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go +++ b/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go @@ -2,10 +2,12 @@ // which supports lookups, sets, appends, insertions, and deletions. package bitvector +import "slices" + // A BitVector is a variable sized vector of bits. It supports // lookups, sets, appends, insertions, and deletions. // -// This class is not thread safe. +// Operations are not thread safe. type BitVector struct { data []byte length int @@ -14,10 +16,25 @@ type BitVector struct { // NewBitVector creates and initializes a new bit vector with length // elements, using data as its initial contents. func NewBitVector(data []byte, length int) *BitVector { - return &BitVector{ - data: data, - length: length, + return &BitVector{data: data, length: length} +} + +func (vector *BitVector) Clear() *BitVector { + if vector == nil { + return nil } + clear(vector.data) + vector.length = 0 + + return vector +} + +func (vector *BitVector) Reset(size, length int) *BitVector { + clear(vector.data) + vector.data = slices.Grow(vector.data, size)[:size] + vector.length = length + + return vector } // Bytes returns a slice of the contents of the bit vector. If the caller changes the returned slice, diff --git a/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go b/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go index 1dafc57b0b..64511e5eb5 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go +++ b/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go @@ -148,14 +148,17 @@ package edittree import ( "errors" "fmt" - "math/big" - "sort" "strings" "github.com/open-policy-agent/opa/internal/edittree/bitvector" "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" ) +var refPool = util.NewSlicePool[*ast.Term](1) + +var editTreePool = util.NewSyncPool[EditTree]() + // Deletions are encoded with a nil value pointer. type EditTree struct { value *ast.Term @@ -172,120 +175,79 @@ func NewEditTree(term *ast.Term) *EditTree { return nil } - var tree EditTree + return initForTerm(&EditTree{}, term) +} + +func EditTreeFromPool(term *ast.Term) *EditTree { + return initForTerm(editTreePool.Get(), term) +} + +func Dispose(e *EditTree) { + if e != nil { + editTreePool.Put(e.Reset()) + } +} + +func (e *EditTree) Reset() *EditTree { + e.value = nil + clear(e.childKeys) + clear(e.childScalarValues) + clear(e.childCompositeValues) + + e.eliminated = e.eliminated.Clear() + e.insertions = e.insertions.Clear() + + return e +} + +func initForTerm(tree *EditTree, term *ast.Term) *EditTree { + tree.value = term + switch x := term.Value.(type) { case ast.Object, ast.Set: - tree = EditTree{ - value: term, - childKeys: map[int]*ast.Term{}, - childScalarValues: map[int]*ast.Term{}, - childCompositeValues: map[int]*EditTree{}, + if tree.childKeys == nil { + tree.childKeys = map[int]*ast.Term{} + } + if tree.childScalarValues == nil { + tree.childScalarValues = map[int]*ast.Term{} + } + if tree.childCompositeValues == nil { + tree.childCompositeValues = map[int]*EditTree{} } case *ast.Array: - tree = EditTree{ - value: term, - childScalarValues: map[int]*ast.Term{}, - childCompositeValues: map[int]*EditTree{}, + if tree.childScalarValues == nil { + tree.childScalarValues = map[int]*ast.Term{} + } + if tree.childCompositeValues == nil { + tree.childCompositeValues = map[int]*EditTree{} } bytesLength := ((x.Len() - 1) / 8) + 1 // How many bytes to use for the bit-vectors. - tree.eliminated = bitvector.NewBitVector(make([]byte, bytesLength), x.Len()) - tree.insertions = bitvector.NewBitVector(make([]byte, bytesLength), x.Len()) - default: - tree = EditTree{ - value: term, + if tree.eliminated == nil { + tree.eliminated = bitvector.NewBitVector(make([]byte, bytesLength), x.Len()) + } else { + tree.eliminated = tree.eliminated.Reset(bytesLength, x.Len()) + } + if tree.insertions == nil { + tree.insertions = bitvector.NewBitVector(make([]byte, bytesLength), x.Len()) + } else { + tree.insertions = tree.insertions.Reset(bytesLength, x.Len()) } } - return &tree + return tree } // Returns correct (collision-resolved) hash for this term + whether or not // it was found in the table already. func (e *EditTree) getKeyHash(key *ast.Term) (int, bool) { hash := key.Hash() - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v ast.Value) bool - - switch x := key.Value.(type) { - case ast.Null, ast.Boolean, ast.String, ast.Var: - equal = func(y ast.Value) bool { return x == y } - case ast.Number: - if xi, ok := x.Int64(); ok { - equal = func(y ast.Value) bool { - if y, ok := y.(ast.Number); ok { - if yi, ok := y.Int64(); ok { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b ast.Value) bool { - if bNum, ok := b.(ast.Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - return false - } - - default: - equal = func(y ast.Value) bool { return ast.Compare(x, y) == 0 } - } // Look through childKeys, looking up the original hash // value first, and then use linear-probing to iter // through the keys until we either find the Term we're // after, or run out of candidates. for curr, ok := e.childKeys[hash]; ok; { - if equal(curr.Value) { + if ast.KeyHashEqual(curr.Value, key.Value) { return hash, true } @@ -308,17 +270,14 @@ func isComposite(t *ast.Term) bool { } } -//gcassert:inline func (e *EditTree) setChildKey(hash int, key *ast.Term) { e.childKeys[hash] = key } -//gcassert:inline func (e *EditTree) setChildScalarValue(hash int, value *ast.Term) { e.childScalarValues[hash] = value } -//gcassert:inline func (e *EditTree) setChildCompositeValue(hash int, child *EditTree) { e.childCompositeValues[hash] = child } @@ -355,11 +314,10 @@ func (e *EditTree) Insert(key, value *ast.Term) (*EditTree, error) { // We only collapse this Set-typed node if a composite type is involved. if isComposite(key) { // TODO: Investigate re-rendering *only* the immediate composite children. - collapsed := e.Render() - e.value = collapsed - e.childKeys = map[int]*ast.Term{} - e.childScalarValues = map[int]*ast.Term{} - e.childCompositeValues = map[int]*EditTree{} + e.value = e.Render() + clear(e.childKeys) + clear(e.childScalarValues) + clear(e.childCompositeValues) } return e.unsafeInsertSet(key, value), nil case *ast.Array: @@ -378,12 +336,13 @@ func (e *EditTree) Insert(key, value *ast.Term) (*EditTree, error) { } func (e *EditTree) unsafeInsertObject(key, value *ast.Term) *EditTree { - child := NewEditTree(value) keyHash, found := e.getKeyHash(key) if found { e.deleteChildValue(keyHash) } e.setChildKey(keyHash, key) + + child := NewEditTree(value) if isComposite(value) { e.setChildCompositeValue(keyHash, child) } else { @@ -408,10 +367,9 @@ func (e *EditTree) unsafeInsertSet(key, value *ast.Term) *EditTree { } func (e *EditTree) unsafeInsertArray(idx int, value *ast.Term) *EditTree { - child := NewEditTree(value) // Collect insertion indexes above the insertion site for rewriting. - rewritesScalars := []int{} - rewritesComposites := []int{} + var rewritesScalars, rewritesComposites []int + for i := idx; i < e.insertions.Length(); i++ { if e.insertions.Element(i) == 1 { if _, ok := e.childScalarValues[i]; ok { @@ -446,6 +404,8 @@ func (e *EditTree) unsafeInsertArray(idx int, value *ast.Term) *EditTree { } else { e.insertions.Insert(1, idx) } + + child := NewEditTree(value) if isComposite(value) { e.setChildCompositeValue(idx, child) } else { @@ -504,9 +464,9 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) { // TODO: Investigate re-rendering *only* the immediate composite children. collapsed := e.Render() e.value = collapsed - e.childKeys = map[int]*ast.Term{} - e.childScalarValues = map[int]*ast.Term{} - e.childCompositeValues = map[int]*EditTree{} + clear(e.childKeys) + clear(e.childScalarValues) + clear(e.childCompositeValues) } else { keyHash, found := e.getKeyHash(key) // If child found, replace with delete node. If delete node already existed, error. @@ -616,7 +576,14 @@ func findIndexOfNthZero(n int, bv *bitvector.BitVector) (int, bool) { // Helper function for sets/objects when the key isn't present in either // child map. func (e *EditTree) fallbackDelete(key *ast.Term) (*EditTree, error) { - value, err := e.value.Value.Find(ast.Ref{key}) + // get ref from pool + rptr := refPool.Get(1) + defer refPool.Put(rptr) + + ref := *rptr + ref[0] = key + + value, err := e.value.Value.Find(ref) if err != nil { return nil, fmt.Errorf("cannot delete child key %v that does not exist", key) } @@ -672,14 +639,14 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) { } // Fall back to looking up the key in e.value. // Extend the tree if key is present. Error otherwise. - if v, err := x.Find(ast.Ref{path[0]}); err == nil { + if v, err := x.Find(path[:1]); err == nil { child, err := e.Insert(path[0], ast.NewTerm(v)) if err != nil { return nil, err } return child.Unfold(path[1:]) } - return nil, fmt.Errorf("path %v does not exist in object term %v", ast.Ref{path[0]}, e.value.Value) + return nil, fmt.Errorf("path %v does not exist in object term %v", path[0], e.value.Value) case ast.Set: // Sets' keys *are* their values, so in order to allow accurate // traversal, we have to collapse the tree beneath this node, @@ -688,12 +655,11 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) { if isComposite(key) { collapsed := e.Render() e.value = collapsed - e.childKeys = map[int]*ast.Term{} - e.childScalarValues = map[int]*ast.Term{} - e.childCompositeValues = map[int]*EditTree{} + clear(e.childKeys) + clear(e.childScalarValues) + clear(e.childCompositeValues) } else { - keyHash, found := e.getKeyHash(key) - if found { + if keyHash, found := e.getKeyHash(key); found { if term, ok := e.childScalarValues[keyHash]; ok { child := NewEditTree(term) return child.Unfold(path[1:]) @@ -702,14 +668,14 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) { } // Fall back to looking up the key in e.value. // Extend the tree if key is present. Error otherwise. - if v, err := e.value.Value.Find(ast.Ref{path[0]}); err == nil { + if v, err := e.value.Value.Find(path[:1]); err == nil { child, err := e.Insert(path[0], ast.NewTerm(v)) if err != nil { return nil, err } return child.Unfold(path[1:]) } - return nil, fmt.Errorf("path %v does not exist in set term %v", ast.Ref{path[0]}, e.value.Value) + return nil, fmt.Errorf("path %v does not exist in set term %v", path[:1], e.value.Value) case *ast.Array: idx, err := toIndex(e.insertions.Length(), path[0]) if err != nil { @@ -724,13 +690,17 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) { } idxt := ast.InternedTerm(idx) + rptr := refPool.Get(1) + defer refPool.Put(rptr) + + ref := *rptr + ref[0] = idxt // Fall back to looking up the key in e.value. // Extend the tree if key is present. Error otherwise. - if v, err := x.Find(ast.Ref{idxt}); err == nil { + if v, err := x.Find(ref); err == nil { // TODO: Consider a more efficient "Replace" function that special-cases this for arrays instead? - _, err := e.Delete(idxt) - if err != nil { + if _, err := e.Delete(idxt); err != nil { return nil, err } child, err := e.Insert(idxt, ast.NewTerm(v)) @@ -739,10 +709,10 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) { } return child.Unfold(path[1:]) } - return nil, fmt.Errorf("path %v does not exist in array term %v", ast.Ref{ast.IntNumberTerm(idx)}, e.value.Value) + return nil, fmt.Errorf("path %v does not exist in array term %v", ast.InternedTerm(idx), e.value.Value) default: // Catch all primitive types. - return nil, fmt.Errorf("expected composite type for path %v, found value: %v (type: %T)", ast.Ref{path[0]}, x, x) + return nil, fmt.Errorf("expected composite type for path %v, found value: %v (type: %T)", path[0], x, x) } } @@ -864,8 +834,7 @@ func (e *EditTree) Render() *ast.Term { if t, ok := e.childScalarValues[i]; ok { out = append(out, t) } else if child, ok := e.childCompositeValues[i]; ok { - t := child.Render() - out = append(out, t) + out = append(out, child.Render()) } else { panic(fmt.Errorf("invalid index %d does not exist in array", i)) } @@ -887,9 +856,10 @@ func (e *EditTree) InsertAtPath(path ast.Ref, value *ast.Term) (*EditTree, error if len(path) == 0 { e.value = value - e.childKeys = map[int]*ast.Term{} - e.childScalarValues = map[int]*ast.Term{} - e.childCompositeValues = map[int]*EditTree{} + clear(e.childKeys) + clear(e.childScalarValues) + clear(e.childCompositeValues) + if v, ok := value.Value.(*ast.Array); ok { bytesLength := ((v.Len() - 1) / 8) + 1 // How many bytes to use for the bit-vectors. e.eliminated = bitvector.NewBitVector(make([]byte, bytesLength), v.Len()) @@ -998,24 +968,19 @@ func (e *EditTree) Exists(path ast.Ref) bool { // so that we can accurately unfold it again for an update, // once we know that the key we care about is present. if isComposite(key) { - collapsed := e.Render() - e.value = collapsed - e.childKeys = map[int]*ast.Term{} - e.childScalarValues = map[int]*ast.Term{} - e.childCompositeValues = map[int]*EditTree{} - } else { - keyHash, found := e.getKeyHash(key) - if found { - if _, ok := e.childScalarValues[keyHash]; ok { - return len(path) == 1 - } + e.value = e.Render() + clear(e.childKeys) + clear(e.childScalarValues) + clear(e.childCompositeValues) + } else if keyHash, found := e.getKeyHash(key); found { + if _, ok := e.childScalarValues[keyHash]; ok { + return len(path) == 1 } } // Fallback if child lookup failed. _, err := e.value.Value.Find(path) return err == nil case *ast.Array: - var idx int idx, err := toIndex(e.insertions.Length(), path[0]) if err != nil { return false @@ -1028,7 +993,16 @@ func (e *EditTree) Exists(path ast.Ref) bool { } // Fallback if child lookup failed. // We have to ensure that the lookup term is a number here, or Find will fail. - _, err = x.Find(ast.Ref{ast.InternedTerm(idx)}.Concat(path[1:])) + rptr := refPool.Get(len(path)) + + ref := *rptr + ref[0] = ast.InternedTerm(idx) + copy(ref[1:], path[1:]) + + _, err = x.Find(ref) + + refPool.Put(rptr) + return err == nil default: // Catch all primitive types. @@ -1055,8 +1029,7 @@ func toIndex(arrayLength int, term *ast.Term) (int, error) { if v == "-" { return arrayLength, nil } - num := ast.Number(v) - if i, ok = num.Int(); !ok { + if i, ok = ast.Number(v).Int(); !ok { return 0, errors.New("invalid string for indexing") } if v != "0" && strings.HasPrefix(string(v), "0") { @@ -1081,6 +1054,14 @@ func (e *EditTree) Filter(paths []ast.Ref) *ast.Term { return nil } + // term pointer and ref pointer for reuse in lookups and iteration below. + tptr, rptr := ast.TermPtrPool.Get(), refPool.Get(1) + defer func() { + tptr.Value = nil + ast.TermPtrPool.Put(tptr) + refPool.Put(rptr) + }() + // Separate out keys for this level. // In the event of paths like "a", "a/b", "a/b/c", the "a" path will win out. // Nil keys, such as "" or [], are not permitted. (legacy behavior) @@ -1103,7 +1084,8 @@ func (e *EditTree) Filter(paths []ast.Ref) *ast.Term { renderNow := ast.NewSet(renderNowList...) // Clear everything out of the pathMap that has a renderNow candidate. for k := range pathMap { - if renderNow.Contains(ast.NewTerm(k)) { + tptr.Value = k + if renderNow.Contains(tptr) { delete(pathMap, k) } } @@ -1112,37 +1094,46 @@ func (e *EditTree) Filter(paths []ast.Ref) *ast.Term { switch e.value.Value.(type) { case ast.Object: out := make([][2]*ast.Term, 0, renderNow.Len()+len(pathMap)) + ref := *rptr + // Render any finished paths. - renderNow.Foreach(func(k *ast.Term) { - if e.Exists(ast.Ref{k}) { - subtreeResult, _ := e.RenderAtPath(ast.Ref{k}) + for _, k := range renderNow.Slice() { + ref[0] = k + if e.Exists(ref) { + subtreeResult, _ := e.RenderAtPath(ref) out = append(out, [2]*ast.Term{k, subtreeResult}) } - }) + } // Recursively descend remaining paths. for k, p := range pathMap { - if e.Exists(ast.Ref{ast.NewTerm(k)}) { - child, _ := e.Unfold(ast.Ref{ast.NewTerm(k)}) + tptr.Value = k + ref[0] = tptr + if e.Exists(ref) { + child, _ := e.Unfold(ref) subtreeResult := child.Filter(p) out = append(out, [2]*ast.Term{ast.NewTerm(k), subtreeResult}) } } + return ast.ObjectTerm(out...) case ast.Set: out := make([]*ast.Term, 0, renderNow.Len()+len(pathMap)) + ref := *rptr // Render any finished paths. - renderNow.Foreach(func(k *ast.Term) { - if e.Exists(ast.Ref{k}) { - subtreeResult, _ := e.RenderAtPath(ast.Ref{k}) + for _, k := range renderNow.Slice() { + ref[0] = k + if e.Exists(ref) { + subtreeResult, _ := e.RenderAtPath(ref) out = append(out, subtreeResult) } - }) + } // Recursively descend remaining paths. for k, p := range pathMap { - if e.Exists(ast.Ref{ast.NewTerm(k)}) { - child, _ := e.Unfold(ast.Ref{ast.NewTerm(k)}) - subtreeResult := child.Filter(p) - out = append(out, subtreeResult) + tptr.Value = k + ref[0] = tptr + if e.Exists(ref) { + child, _ := e.Unfold(ref) + out = append(out, child.Filter(p)) } } return ast.SetTerm(out...) @@ -1150,27 +1141,25 @@ func (e *EditTree) Filter(paths []ast.Ref) *ast.Term { // No early exit here, because we might have just deletes on the // original array. We build a new Array with modified/deleted keys. out := make([]*ast.Term, 0, renderNow.Len()+len(pathMap)) - // Sort array indexes before descending. - idxList := make([]*ast.Term, 0, len(pathMap)) - renderNow.Foreach(func(k *ast.Term) { - idxList = append(idxList, k) - }) + idxList := append(make([]*ast.Term, 0, renderNow.Len()+len(pathMap)), renderNow.Slice()...) for k := range pathMap { idxList = append(idxList, ast.NewTerm(k)) } - sort.Sort(termSlice(idxList)) - // Render child or recursively descend as needed. - for i := range idxList { + + ref := *rptr + + // Render child or recursively descend sorted indexes as needed. + for i := range util.SortedFunc(idxList, ast.TermValueCompare) { k := idxList[i] - if renderNow.Contains(k) { - if e.Exists(ast.Ref{k}) { - subtreeResult, _ := e.RenderAtPath(ast.Ref{k}) + ref[0] = k + if e.Exists(ref) { + if renderNow.Contains(k) { + subtreeResult, _ := e.RenderAtPath(ref) out = append(out, subtreeResult) + } else { + child, _ := e.Unfold(ref) + out = append(out, child.Filter(pathMap[k.Value])) } - } else if e.Exists(ast.Ref{k}) { - child, _ := e.Unfold(ast.Ref{k}) - subtreeResult := child.Filter(pathMap[k.Value]) - out = append(out, subtreeResult) } } return ast.ArrayTerm(out...) @@ -1178,9 +1167,3 @@ func (e *EditTree) Filter(paths []ast.Ref) *ast.Term { return e.value } } - -type termSlice []*ast.Term - -func (s termSlice) Less(i, j int) bool { return ast.Compare(s[i].Value, s[j].Value) < 0 } -func (s termSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s termSlice) Len() int { return len(s) } diff --git a/vendor/github.com/open-policy-agent/opa/internal/file/archive/tarball.go b/vendor/github.com/open-policy-agent/opa/internal/file/archive/tarball.go index 6b8ba48d15..93396aa96f 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/file/archive/tarball.go +++ b/vendor/github.com/open-policy-agent/opa/internal/file/archive/tarball.go @@ -4,39 +4,72 @@ import ( "archive/tar" "bytes" "compress/gzip" + "encoding/json" + "errors" + "io" "strings" ) -// MustWriteTarGz write the list of file names and content -// into a tarball. -func MustWriteTarGz(files [][2]string) *bytes.Buffer { - var buf bytes.Buffer - gw := gzip.NewWriter(&buf) - defer gw.Close() +type TarGzWriter struct { + *tar.Writer + + gw *gzip.Writer +} + +func NewTarGzWriter(w io.Writer) *TarGzWriter { + gw := gzip.NewWriter(w) tw := tar.NewWriter(gw) - defer tw.Close() - for _, file := range files { - if err := WriteFile(tw, file[0], []byte(file[1])); err != nil { - panic(err) - } + + return &TarGzWriter{ + Writer: tw, + gw: gw, } - return &buf } -// WriteFile adds a file header with content to the given tar writer -func WriteFile(tw *tar.Writer, path string, bs []byte) error { - +func (tgw *TarGzWriter) WriteFile(path string, bs []byte) (err error) { hdr := &tar.Header{ - Name: "/" + strings.TrimLeft(path, "/"), + Name: path, Mode: 0600, Typeflag: tar.TypeReg, Size: int64(len(bs)), } - if err := tw.WriteHeader(hdr); err != nil { - return err + if err = tgw.WriteHeader(hdr); err == nil { + _, err = tgw.Write(bs) } - _, err := tw.Write(bs) return err } + +func (tgw *TarGzWriter) WriteJSONFile(path string, v any) error { + buf := &bytes.Buffer{} + if err := json.NewEncoder(buf).Encode(v); err != nil { + return err + } + + return tgw.WriteFile(path, buf.Bytes()) +} + +func (tgw *TarGzWriter) Close() error { + return errors.Join(tgw.Writer.Close(), tgw.gw.Close()) +} + +// MustWriteTarGz writes the list of file names and content into a tarball. +// Paths are prefixed with "/". +func MustWriteTarGz(files [][2]string) *bytes.Buffer { + buf := &bytes.Buffer{} + tgw := NewTarGzWriter(buf) + defer tgw.Close() + + for _, file := range files { + if !strings.HasPrefix(file[0], "/") { + file[0] = "/" + file[0] + } + + if err := tgw.WriteFile(file[0], []byte(file[1])); err != nil { + panic(err) + } + } + + return buf +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/utils.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/utils.go index ca071930f2..a8639d4d9a 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/utils.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/utils.go @@ -23,7 +23,7 @@ // // created 26-02-2013 -// nolint: deadcode,unused,varcheck // Package in development (2021). +// nolint:unused // Package in development (2021). package gojsonschema import ( diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go b/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go index 8d59158717..445b5d7c2a 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go +++ b/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go @@ -1768,7 +1768,7 @@ func (p *Planner) planRef(ref ast.Ref, iter planiter) error { return errors.New("illegal ref: non-var head") } - if head.Compare(ast.DefaultRootDocument.Value) == 0 { + if head.Equal(ast.DefaultRootDocument.Value) { virtual := p.rules.Get(ref[0].Value) base := &baseptr{local: p.vars.GetOrEmpty(ast.DefaultRootDocument.Value.(ast.Var))} return p.planRefData(virtual, base, ref, 1, iter) @@ -2070,7 +2070,7 @@ func (p *Planner) planRefDataExtent(virtual *ruletrie, base *baseptr, iter plani } } if anyKeyNonGround { - var rules []*ast.Rule + rules := make([]*ast.Rule, 0, len(virtual.Children())) for _, key := range virtual.Children() { // TODO(sr): skip functions rules = append(rules, virtual.Get(key).Rules()...) diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go index 12679a15be..f93261a809 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go @@ -2,6 +2,7 @@ package crypto import ( "bytes" + "crypto/ecdh" "crypto/ecdsa" "crypto/elliptic" "crypto/hmac" @@ -27,27 +28,77 @@ func ECDSAKey(curve elliptic.Curve, d []byte) *ecdsa.PrivateKey { // ECDSAKeyFromPoint takes the given elliptic curve and point and returns the // private and public keypair func ECDSAKeyFromPoint(curve elliptic.Curve, d *big.Int) *ecdsa.PrivateKey { - pX, pY := curve.ScalarBaseMult(d.Bytes()) + dBytes := make([]byte, (curve.Params().BitSize+7)/8) + d.FillBytes(dBytes) privKey := &ecdsa.PrivateKey{ PublicKey: ecdsa.PublicKey{ Curve: curve, - X: pX, - Y: pY, }, D: d, } + var pubBytes []byte + switch curve { + case elliptic.P256(): + if ecdhPriv, err := ecdh.P256().NewPrivateKey(dBytes); err == nil { + pubBytes = ecdhPriv.PublicKey().Bytes() + } + case elliptic.P384(): + if ecdhPriv, err := ecdh.P384().NewPrivateKey(dBytes); err == nil { + pubBytes = ecdhPriv.PublicKey().Bytes() + } + case elliptic.P521(): + if ecdhPriv, err := ecdh.P521().NewPrivateKey(dBytes); err == nil { + pubBytes = ecdhPriv.PublicKey().Bytes() + } + } + + if len(pubBytes) > 0 { + byteLen := (curve.Params().BitSize + 7) / 8 + privKey.X = new(big.Int).SetBytes(pubBytes[1 : 1+byteLen]) + privKey.Y = new(big.Int).SetBytes(pubBytes[1+byteLen:]) + } else { + panic(fmt.Sprintf("unsupported curve or invalid private key: %v", curve)) + } + return privKey } +// mathIntToBytes writes val as a big-endian, fixed-length byte slice into out, +// zero-padding on the left when val.Bytes() is shorter than out. This satisfies +// the uncompressed SEC 1 encoding (0x04 || X || Y) expected by crypto/ecdh's +// NewPublicKey: https://pkg.go.dev/crypto/ecdh#Curve.NewPublicKey +func mathIntToBytes(val *big.Int, out []byte) { + valBytes := val.Bytes() + copy(out[len(out)-len(valBytes):], valBytes) +} + // ECDSAPublicKey takes the provide curve and (x, y) coordinates and returns // *ecdsa.PublicKey. Returns an error if the given points are not on the curve. func ECDSAPublicKey(curve elliptic.Curve, x, y []byte) (*ecdsa.PublicKey, error) { xPoint := (&big.Int{}).SetBytes(x) yPoint := (&big.Int{}).SetBytes(y) - if !curve.IsOnCurve(xPoint, yPoint) { + byteLen := (curve.Params().BitSize + 7) / 8 + buf := make([]byte, 1+2*byteLen) + buf[0] = 4 // uncompressed point + mathIntToBytes(xPoint, buf[1:1+byteLen]) + mathIntToBytes(yPoint, buf[1+byteLen:]) + + var err error + switch curve { + case elliptic.P256(): + _, err = ecdh.P256().NewPublicKey(buf) + case elliptic.P384(): + _, err = ecdh.P384().NewPublicKey(buf) + case elliptic.P521(): + _, err = ecdh.P521().NewPublicKey(buf) + default: + err = fmt.Errorf("unsupported curve for ECDSA: %v", curve) + } + + if err != nil { return nil, fmt.Errorf("point(%v, %v) is not on the given curve", xPoint.String(), yPoint.String()) } diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go index 07aa568fa2..c463ccbff8 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go @@ -158,6 +158,8 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body [] // include the values for the signed headers orderedKeys := util.KeysSorted(headersToSign) for _, k := range orderedKeys { + // TODO: fix later + //nolint:perfsprint canonicalReq += k + ":" + strings.Join(headersToSign[k], ",") + "\n" } canonicalReq += "\n" // linefeed to terminate headers diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go index 8f6d760e82..db20eddc9d 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go @@ -4,6 +4,7 @@ package aws import ( "bytes" "crypto" + "crypto/ecdh" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" @@ -115,7 +116,18 @@ func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey, priv := new(ecdsa.PrivateKey) priv.PublicKey.Curve = p256 priv.D = d - priv.PublicKey.X, priv.PublicKey.Y = p256.ScalarBaseMult(d.Bytes()) + + dBytes := make([]byte, 32) + d.FillBytes(dBytes) + + ecdhPriv, err := ecdh.P256().NewPrivateKey(dBytes) + if err != nil { + return nil, err + } + pubBytes := ecdhPriv.PublicKey().Bytes() + + priv.PublicKey.X = new(big.Int).SetBytes(pubBytes[1:33]) + priv.PublicKey.Y = new(big.Int).SetBytes(pubBytes[33:]) return priv, nil } diff --git a/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go b/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go index 653794b0a9..9590b8886b 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go +++ b/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go @@ -7,16 +7,16 @@ package ref import ( "errors" - "strings" "github.com/open-policy-agent/opa/v1/ast" "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/util" ) // ParseDataPath returns a ref from the slash separated path s rooted at data. // All path segments are treated as identifier strings. func ParseDataPath(s string) (ast.Ref, error) { - path, ok := storage.ParsePath("/" + strings.TrimPrefix(s, "/")) + path, ok := storage.ParsePath(util.WithPrefix(s, "/")) if !ok { return nil, errors.New("invalid path") } diff --git a/vendor/github.com/open-policy-agent/opa/internal/report/report.go b/vendor/github.com/open-policy-agent/opa/internal/report/report.go deleted file mode 100644 index bc71d66a3c..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/report/report.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package report provides functions to report OPA's version information to an external service and process the response. -package report - -import ( - "cmp" - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "os" - "runtime" - "strconv" - "strings" - "time" - - "github.com/open-policy-agent/opa/internal/semver" - "github.com/open-policy-agent/opa/v1/keys" - "github.com/open-policy-agent/opa/v1/logging" - "github.com/open-policy-agent/opa/v1/version" - - "github.com/open-policy-agent/opa/v1/plugins/rest" - "github.com/open-policy-agent/opa/v1/util" -) - -// ExternalServiceURL is the base HTTP URL for a github instance used -// to query for more recent version. -// If not otherwise specified, it will use the hard-coded default, api.github.com. -// GHRepo is the repository to use, and defaults to "open-policy-agent/opa" -// -// Override at build time via: -// -// -ldflags "-X github.com/open-policy-agent/opa/internal/report.ExternalServiceURL=" -// -ldflags "-X github.com/open-policy-agent/opa/internal/report.GHRepo=" -// -// ExternalServiceURL will be overridden if the OPA_TELEMETRY_SERVICE_URL environment variable -// is provided. -var ExternalServiceURL = "https://api.github.com" -var GHRepo = "open-policy-agent/opa" - -// Reporter reports information such as the version, heap usage about the running OPA instance to an external service -type Reporter interface { - SendReport(ctx context.Context) (*DataResponse, error) - RegisterGatherer(key string, f Gatherer) -} - -// Gatherer represents a mechanism to inject additional data in the telemetry report -type Gatherer func(ctx context.Context) (any, error) - -// DataResponse represents the data returned by the external service -type DataResponse struct { - Latest ReleaseDetails `json:"latest"` -} - -// ReleaseDetails holds information about the latest OPA release -type ReleaseDetails struct { - Download string `json:"download,omitempty"` // link to download the OPA release - ReleaseNotes string `json:"release_notes,omitempty"` // link to the OPA release notes - LatestRelease string `json:"latest_release,omitempty"` // latest OPA released version - OPAUpToDate bool `json:"opa_up_to_date,omitempty"` // is running OPA version greater than or equal to the latest released -} - -// Options supplies parameters to the reporter. -type Options struct { - Logger logging.Logger -} - -type GHVersionCollector struct { - client rest.Client -} - -type GHResponse struct { - TagName string `json:"tag_name,omitempty"` // latest OPA release tag - ReleaseNotes string `json:"html_url,omitempty"` // link to the OPA release notes - Download string `json:"assets_url,omitempty"` // link to download the OPA release -} - -// New returns an instance of the Reporter -func New(opts Options) (Reporter, error) { - r := GHVersionCollector{} - - url := cmp.Or(os.Getenv("OPA_TELEMETRY_SERVICE_URL"), ExternalServiceURL) - - restConfig := fmt.Appendf(nil, `{ - "url": %q, - }`, url) - - client, err := rest.New(restConfig, map[string]*keys.Config{}, rest.Logger(opts.Logger)) - if err != nil { - return nil, err - } - r.client = client - - // heap_usage_bytes is always present, so register it unconditionally - r.RegisterGatherer("heap_usage_bytes", readRuntimeMemStats) - - return &r, nil -} - -// SendReport sends the telemetry report which includes information such as the OPA version, current memory usage to -// the external service -func (r *GHVersionCollector) SendReport(ctx context.Context) (*DataResponse, error) { - rCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - resp, err := r.client.Do(rCtx, "GET", fmt.Sprintf("/repos/%s/releases/latest", GHRepo)) - if err != nil { - return nil, err - } - - defer util.Close(resp) - - switch resp.StatusCode { - case http.StatusOK: - if resp.Body != nil { - var result GHResponse - err := json.NewDecoder(resp.Body).Decode(&result) - if err != nil { - return nil, err - } - return createDataResponse(result) - } - return nil, nil - default: - return nil, fmt.Errorf("server replied with HTTP %v", resp.StatusCode) - } -} - -func createDataResponse(ghResp GHResponse) (*DataResponse, error) { - if ghResp.TagName == "" { - return nil, errors.New("server response does not contain tag_name") - } - - v := strings.TrimPrefix(version.Version, "v") - sv, err := semver.NewVersion(v) - if err != nil { - return nil, fmt.Errorf("failed to parse current version %q: %w", v, err) - } - - latestV := strings.TrimPrefix(ghResp.TagName, "v") - latestSV, err := semver.NewVersion(latestV) - if err != nil { - return nil, fmt.Errorf("failed to parse latest version %q: %w", latestV, err) - } - - isLatest := sv.Compare(*latestSV) >= 0 - - // Note: alternatively, we could look through the assets in the GH API response to find a matching asset, - // and use its URL. However, this is not guaranteed to be more robust, and wouldn't use the 'openpolicyagent.org' domain. - downloadLink := fmt.Sprintf("https://openpolicyagent.org/downloads/%v/opa_%v_%v", - ghResp.TagName, runtime.GOOS, runtime.GOARCH) - - if runtime.GOARCH == "arm64" { - downloadLink = fmt.Sprintf("%v_static", downloadLink) - } - - if strings.HasPrefix(runtime.GOOS, "win") { - downloadLink = fmt.Sprintf("%v.exe", downloadLink) - } - - return &DataResponse{ - Latest: ReleaseDetails{ - Download: downloadLink, - ReleaseNotes: ghResp.ReleaseNotes, - LatestRelease: ghResp.TagName, - OPAUpToDate: isLatest, - }, - }, nil -} - -func (*GHVersionCollector) RegisterGatherer(_ string, _ Gatherer) { - // no-op for this implementation -} - -// IsSet returns true if dr is populated. -func (dr *DataResponse) IsSet() bool { - return dr != nil && dr.Latest.LatestRelease != "" && dr.Latest.Download != "" && dr.Latest.ReleaseNotes != "" -} - -// Slice returns the dr as a slice of key-value string pairs. If dr is nil, this function returns an empty slice. -func (dr *DataResponse) Slice() [][2]string { - - if !dr.IsSet() { - return nil - } - - return [][2]string{ - {"Latest Upstream Version", strings.TrimPrefix(dr.Latest.LatestRelease, "v")}, - {"Download", dr.Latest.Download}, - {"Release Notes", dr.Latest.ReleaseNotes}, - } -} - -// Pretty returns OPA release information in a human-readable format. -func (dr *DataResponse) Pretty() string { - if !dr.IsSet() { - return "" - } - - pairs := dr.Slice() - lines := make([]string, 0, len(pairs)) - - for _, pair := range pairs { - lines = append(lines, fmt.Sprintf("%v: %v", pair[0], pair[1])) - } - - return strings.Join(lines, "\n") -} - -func readRuntimeMemStats(_ context.Context) (any, error) { - var m runtime.MemStats - runtime.ReadMemStats(&m) - return strconv.FormatUint(m.Alloc, 10), nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go b/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go deleted file mode 100644 index 4f93a4f127..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package init is an internal package with helpers for data and policy loading during initialization. -package init - -import ( - "context" - "fmt" - "io/fs" - "path/filepath" - "strings" - - storedversion "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/v1/ast" - "github.com/open-policy-agent/opa/v1/bundle" - "github.com/open-policy-agent/opa/v1/loader" - "github.com/open-policy-agent/opa/v1/metrics" - "github.com/open-policy-agent/opa/v1/storage" -) - -// InsertAndCompileOptions contains the input for the operation. -type InsertAndCompileOptions struct { - Store storage.Store - Txn storage.Transaction - Files loader.Result - Bundles map[string]*bundle.Bundle - MaxErrors int - EnablePrintStatements bool - ParserOptions ast.ParserOptions - BundleActivatorPlugin string -} - -// InsertAndCompileResult contains the output of the operation. -type InsertAndCompileResult struct { - Compiler *ast.Compiler - Metrics metrics.Metrics -} - -// InsertAndCompile writes data and policy into the store and returns a compiler for the -// store contents. -func InsertAndCompile(ctx context.Context, opts InsertAndCompileOptions) (*InsertAndCompileResult, error) { - if len(opts.Files.Documents) > 0 { - if err := opts.Store.Write(ctx, opts.Txn, storage.AddOp, storage.Path{}, opts.Files.Documents); err != nil { - return nil, fmt.Errorf("storage error: %w", err) - } - } - - policies := make(map[string]*ast.Module, len(opts.Files.Modules)) - - for id, parsed := range opts.Files.Modules { - policies[id] = parsed.Parsed - } - - compiler := ast.NewCompiler(). - WithDefaultRegoVersion(opts.ParserOptions.RegoVersion). - SetErrorLimit(opts.MaxErrors). - WithPathConflictsCheck(storage.NonEmpty(ctx, opts.Store, opts.Txn)). - WithEnablePrintStatements(opts.EnablePrintStatements) - m := metrics.New() - - activation := &bundle.ActivateOpts{ - Ctx: ctx, - Store: opts.Store, - Txn: opts.Txn, - Compiler: compiler, - Metrics: m, - Bundles: opts.Bundles, - ExtraModules: policies, - ParserOptions: opts.ParserOptions, - Plugin: opts.BundleActivatorPlugin, - } - - err := bundle.Activate(activation) - if err != nil { - return nil, err - } - - // Policies in bundles will have already been added to the store, but - // modules loaded outside of bundles will need to be added manually. - for id, parsed := range opts.Files.Modules { - if err := opts.Store.UpsertPolicy(ctx, opts.Txn, id, parsed.Raw); err != nil { - return nil, fmt.Errorf("storage error: %w", err) - } - } - - // Set the version in the store last to prevent data files from overwriting. - if err := storedversion.Write(ctx, opts.Store, opts.Txn); err != nil { - return nil, fmt.Errorf("storage error: %w", err) - } - - return &InsertAndCompileResult{Compiler: compiler, Metrics: m}, nil -} - -// LoadPathsResult contains the output loading a set of paths. -type LoadPathsResult struct { - Bundles map[string]*bundle.Bundle - Files loader.Result -} - -// WalkPathsResult contains the output loading a set of paths. -type WalkPathsResult struct { - BundlesLoader []BundleLoader - FileDescriptors []*Descriptor -} - -// BundleLoader contains information about files in a bundle -type BundleLoader struct { - DirectoryLoader bundle.DirectoryLoader - IsDir bool -} - -// Descriptor contains information about a file -type Descriptor struct { - Root string - Path string -} - -// LoadPaths reads data and policy from the given paths and returns a set of bundles or -// raw loader file results. -func LoadPaths(paths []string, - filter loader.Filter, - asBundle bool, - bvc *bundle.VerificationConfig, - skipVerify bool, - bundleLazyLoading bool, - processAnnotations bool, - caps *ast.Capabilities, - fsys fs.FS) (*LoadPathsResult, error) { - return LoadPathsForRegoVersion(ast.RegoV0, paths, filter, asBundle, bvc, skipVerify, bundleLazyLoading, processAnnotations, false, caps, fsys) -} - -func LoadPathsForRegoVersion(regoVersion ast.RegoVersion, - paths []string, - filter loader.Filter, - asBundle bool, - bvc *bundle.VerificationConfig, - skipVerify bool, - bundleLazyLoading bool, - processAnnotations bool, - followSymlinks bool, - caps *ast.Capabilities, - fsys fs.FS) (*LoadPathsResult, error) { - - if caps == nil { - caps = ast.CapabilitiesForThisVersion() - } - - // tar.gz files are automatically loaded as bundles - var likelyBundles, nonBundlePaths []string - if !asBundle { - likelyBundles, nonBundlePaths = splitByTarGzExt(paths) - paths = likelyBundles - } - - var result LoadPathsResult - var err error - if asBundle || len(likelyBundles) > 0 { - result.Bundles = make(map[string]*bundle.Bundle, len(paths)) - for _, path := range paths { - result.Bundles[path], err = loader.NewFileLoader(). - WithFS(fsys). - WithBundleVerificationConfig(bvc). - WithSkipBundleVerification(skipVerify). - WithBundleLazyLoadingMode(bundleLazyLoading). - WithFilter(filter). - WithProcessAnnotation(processAnnotations). - WithCapabilities(caps). - WithRegoVersion(regoVersion). - WithFollowSymlinks(followSymlinks). - AsBundle(path) - if err != nil { - return nil, err - } - } - } - - if len(nonBundlePaths) == 0 { - return &result, nil - } - - files, err := loader.NewFileLoader(). - WithFS(fsys). - WithBundleLazyLoadingMode(bundleLazyLoading). - WithProcessAnnotation(processAnnotations). - WithCapabilities(caps). - WithRegoVersion(regoVersion). - Filtered(nonBundlePaths, filter) - - if err != nil { - return nil, err - } - - result.Files = *files - - return &result, nil -} - -// splitByTarGzExt splits the paths in 2 groups. Ones with .tar.gz and another with -// non .tar.gz extensions. -func splitByTarGzExt(paths []string) (targzs []string, nonTargzs []string) { - for _, path := range paths { - if strings.HasSuffix(path, ".tar.gz") { - targzs = append(targzs, path) - } else { - nonTargzs = append(nonTargzs, path) - } - } - return -} - -// WalkPaths reads data and policy from the given paths and returns a set of bundle directory loaders -// or descriptors that contain information about files. -func WalkPaths(paths []string, filter loader.Filter, asBundle bool) (*WalkPathsResult, error) { - - var result WalkPathsResult - - if asBundle { - result.BundlesLoader = make([]BundleLoader, len(paths)) - for i, path := range paths { - bundleLoader, isDir, err := loader.GetBundleDirectoryLoader(path) - if err != nil { - return nil, err - } - - result.BundlesLoader[i] = BundleLoader{ - DirectoryLoader: bundleLoader, - IsDir: isDir, - } - } - return &result, nil - } - - result.FileDescriptors = []*Descriptor{} - for _, path := range paths { - filePaths, err := loader.FilteredPaths([]string{path}, filter) - if err != nil { - return nil, err - } - - for _, fp := range filePaths { - // Trim off the root directory and return path as if chrooted - cleanedPath := strings.TrimPrefix(fp, path) - if path == "." && filepath.Base(fp) == bundle.ManifestExt { - cleanedPath = fp - } - - if !strings.HasPrefix(cleanedPath, "/") { - cleanedPath = "/" + cleanedPath - } - - result.FileDescriptors = append(result.FileDescriptors, &Descriptor{ - Root: path, - Path: cleanedPath, - }) - } - } - - return &result, nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go b/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go index 389eeccc18..725f86318a 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go +++ b/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go @@ -14,222 +14,234 @@ // Semantic Versions http://semver.org -// Package semver has been vendored from: +// This file was originally vendored from: // https://github.com/coreos/go-semver/tree/e214231b295a8ea9479f11b70b35d5acf3556d9b/semver -// A number of the original functions of the package have been removed since -// they are not required for our built-ins. +// There isn't a single line left from the original source today, but being generous about +// attribution won't hurt. package semver import ( - "bytes" "fmt" "regexp" "strconv" "strings" + + "github.com/open-policy-agent/opa/v1/util" ) +// reMetaIdentifier matches pre-release and metadata identifiers against the spec requirements +var reMetaIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) + // Version represents a parsed SemVer type Version struct { Major int64 Minor int64 Patch int64 - PreRelease PreRelease - Metadata string + PreRelease string `json:"PreRelease,omitempty"` + Metadata string `json:"Metadata,omitempty"` } -// PreRelease represents a pre-release suffix string -type PreRelease string +// Parse constructs new semver Version from version string. +func Parse(version string) (v Version, err error) { + version = strings.TrimPrefix(version, "v") -func splitOff(input *string, delim string) (val string) { - parts := strings.SplitN(*input, delim, 2) + version, v.Metadata = cut(version, '+') + if v.Metadata != "" && !reMetaIdentifier.MatchString(v.Metadata) { + return v, fmt.Errorf("invalid metadata identifier: %s", v.Metadata) + } - if len(parts) == 2 { - *input = parts[0] - val = parts[1] + version, v.PreRelease = cut(version, '-') + if v.PreRelease != "" && !reMetaIdentifier.MatchString(v.PreRelease) { + return v, fmt.Errorf("invalid pre-release identifier: %s", v.PreRelease) } - return val -} + if strings.Count(version, ".") != 2 { + return v, fmt.Errorf("%s should contain major, minor, and patch versions", version) + } -// NewVersion constructs new SemVers from strings -func NewVersion(version string) (*Version, error) { - v := Version{} + major, after := cut(version, '.') + if v.Major, err = strconv.ParseInt(major, 10, 64); err != nil { + return v, err + } - if err := v.Set(version); err != nil { - return nil, err + minor, after := cut(after, '.') + if v.Minor, err = strconv.ParseInt(minor, 10, 64); err != nil { + return v, err } - return &v, nil -} + if v.Patch, err = strconv.ParseInt(after, 10, 64); err != nil { + return v, err + } -// Set parses and updates v from the given version string. Implements flag.Value -func (v *Version) Set(version string) error { - metadata := splitOff(&version, "+") - preRelease := PreRelease(splitOff(&version, "-")) - dotParts := strings.SplitN(version, ".", 3) + return v, nil +} - if len(dotParts) != 3 { - return fmt.Errorf("%s is not in dotted-tri format", version) +// MustParse is like Parse but panics if the version string is invalid instead of returning an error. +func MustParse(version string) Version { + v, err := Parse(version) + if err != nil { + panic(err) } - if err := validateIdentifier(string(preRelease)); err != nil { - return fmt.Errorf("failed to validate pre-release: %v", err) - } + return v +} - if err := validateIdentifier(metadata); err != nil { - return fmt.Errorf("failed to validate metadata: %v", err) +// Compare compares two semver strings. +func Compare(a, b string) int { + aV, err := Parse(a) + if err != nil { + return -1 } - parsed := make([]int64, 3) - - for i, v := range dotParts[:3] { - val, err := strconv.ParseInt(v, 10, 64) - parsed[i] = val - if err != nil { - return err - } + bV, err := Parse(b) + if err != nil { + return 1 } - v.Metadata = metadata - v.PreRelease = preRelease - v.Major = parsed[0] - v.Minor = parsed[1] - v.Patch = parsed[2] - return nil + return aV.Compare(bV) } -func (v Version) String() string { - var buffer bytes.Buffer +// AppendText appends the textual representation of the version to b and returns the extended buffer. +// This method conforms to the encoding.TextAppender interface, and is useful for serializing the Version +// without allocating, provided the caller has pre-allocated sufficient space in b. +func (v Version) AppendText(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, 0, length(v)) + } - fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + b = append(strconv.AppendInt(b, v.Major, 10), '.') + b = append(strconv.AppendInt(b, v.Minor, 10), '.') + b = strconv.AppendInt(b, v.Patch, 10) if v.PreRelease != "" { - fmt.Fprintf(&buffer, "-%s", v.PreRelease) + b = append(append(b, '-'), v.PreRelease...) } - if v.Metadata != "" { - fmt.Fprintf(&buffer, "+%s", v.Metadata) + b = append(append(b, '+'), v.Metadata...) } - return buffer.String() -} - -// Compare tests if v is less than, equal to, or greater than versionB, -// returning -1, 0, or +1 respectively. -func (v Version) Compare(versionB Version) int { - if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { - return cmp - } - return preReleaseCompare(v, versionB) + return b, nil } -// Slice converts the comparable parts of the semver into a slice of integers. -func (v Version) Slice() []int64 { - return []int64{v.Major, v.Minor, v.Patch} -} +// String returns the string representation of the version. +func (v Version) String() string { + bs := make([]byte, 0, length(v)) + bs, _ = v.AppendText(bs) -// Slice splits the pre-release suffix string -func (p PreRelease) Slice() []string { - preRelease := string(p) - return strings.Split(preRelease, ".") + return string(bs) } -func preReleaseCompare(versionA Version, versionB Version) int { - a := versionA.PreRelease - b := versionB.PreRelease +// Compare tests if v is less than, equal to, or greater than other, returning -1, 0, or +1 respectively. +// Comparison is based on the SemVer specification (https://semver.org/#spec-item-11). +func (v Version) Compare(other Version) int { + if v.Major > other.Major { + return 1 + } else if v.Major < other.Major { + return -1 + } - /* Handle the case where if two versions are otherwise equal it is the - * one without a PreRelease that is greater */ - if len(a) == 0 && (len(b) > 0) { + if v.Minor > other.Minor { return 1 - } else if len(b) == 0 && (len(a) > 0) { + } else if v.Minor < other.Minor { return -1 } - // If there is a prerelease, check and compare each part. - return recursivePreReleaseCompare(a.Slice(), b.Slice()) -} + if v.Patch > other.Patch { + return 1 + } else if v.Patch < other.Patch { + return -1 + } -func recursiveCompare(versionA []int64, versionB []int64) int { - if len(versionA) == 0 { + if v.PreRelease == other.PreRelease { return 0 } - a := versionA[0] - b := versionB[0] - - if a > b { + // if two versions are otherwise equal it is the one without a pre-release that is greater + if v.PreRelease == "" && other.PreRelease != "" { return 1 - } else if a < b { + } + if other.PreRelease == "" && v.PreRelease != "" { return -1 } - return recursiveCompare(versionA[1:], versionB[1:]) -} + a, afterA := cut(v.PreRelease, '.') + b, afterB := cut(other.PreRelease, '.') -func recursivePreReleaseCompare(versionA []string, versionB []string) int { - // A larger set of pre-release fields has a higher precedence than a smaller set, - // if all of the preceding identifiers are equal. - if len(versionA) == 0 { - if len(versionB) > 0 { + for { + if a == "" && b != "" { return -1 } - return 0 - } else if len(versionB) == 0 { - // We're longer than versionB so return 1. - return 1 - } - - a := versionA[0] - b := versionB[0] - - aInt := false - bInt := false + if a != "" && b == "" { + return 1 + } - aI, err := strconv.Atoi(versionA[0]) - if err == nil { - aInt = true - } + aIsInt := isAllDecimals(a) + bIsInt := isAllDecimals(b) - bI, err := strconv.Atoi(versionB[0]) - if err == nil { - bInt = true - } + // numeric identifiers have lower precedence than non-numeric + if aIsInt && !bIsInt { + return -1 + } else if !aIsInt && bIsInt { + return 1 + } - // Numeric identifiers always have lower precedence than non-numeric identifiers. - if aInt && !bInt { - return -1 - } else if !aInt && bInt { - return 1 - } + if aIsInt && bIsInt { + aInt, _ := strconv.Atoi(a) + bInt, _ := strconv.Atoi(b) + + if aInt > bInt { + return 1 + } else if aInt < bInt { + return -1 + } + } else { + // string comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + } - // Handle Integer Comparison - if aInt && bInt { - if aI > bI { + // a larger set of pre-release fields has a higher precedence than a + // smaller set, if all of the preceding identifiers are equal. + if afterA != "" && afterB == "" { return 1 - } else if aI < bI { + } else if afterA == "" && afterB != "" { return -1 } - } - // Handle String Comparison - if a > b { - return 1 - } else if a < b { - return -1 + a, afterA = cut(afterA, '.') + b, afterB = cut(afterB, '.') } +} - return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +func isAllDecimals(s string) bool { + for _, r := range s { + if r < '0' || r > '9' { + return false + } + } + return s != "" } -// validateIdentifier makes sure the provided identifier satisfies semver spec -func validateIdentifier(id string) error { - if id != "" && !reIdentifier.MatchString(id) { - return fmt.Errorf("%s is not a valid semver identifier", id) +// length allows calculating the length of the version for pre-allocation. +func length(v Version) int { + n := util.NumDigitsInt64(v.Major) + util.NumDigitsInt64(v.Minor) + util.NumDigitsInt64(v.Patch) + 2 + if v.PreRelease != "" { + n += len(v.PreRelease) + 1 } - return nil + if v.Metadata != "" { + n += len(v.Metadata) + 1 + } + return n } -// reIdentifier is a regular expression used to check that pre-release and metadata -// identifiers satisfy the spec requirements -var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) +// cut is a *slightly* faster version of strings.Cut only accepting +// single byte separators, and skipping the boolean return value. +func cut(s string, sep byte) (before, after string) { + if i := strings.IndexByte(s, sep); i >= 0 { + return s[:i], s[i+1:] + } + return s, "" +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/strvals/doc.go b/vendor/github.com/open-policy-agent/opa/internal/strvals/doc.go deleted file mode 100644 index 019dc87bb9..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/strvals/doc.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package strvals provides tools for working with strval lines. - -OPA runtime config supports a compressed format for YAML settings which we call strvals. -The format is roughly like this: - - name=value,topname.subname=value - -The above is equivalent to the YAML document - - name: value - topname: - subname: value - -This package provides a parser and utilities for converting the strvals format -to other formats. -*/ -package strvals diff --git a/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go b/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go deleted file mode 100644 index 6d867262f5..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go +++ /dev/null @@ -1,429 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strvals - -import ( - "bytes" - "errors" - "fmt" - "io" - "strconv" - "strings" - - "sigs.k8s.io/yaml" -) - -// ErrNotList indicates that a non-list was treated as a list. -var ErrNotList = errors.New("not a list") - -// MaxIndex is the maximum index that will be allowed by setIndex. -// The default value 65536 = 1024 * 64 -const MaxIndex = 65536 - -// ToYAML takes a string of arguments and converts to a YAML document. -func ToYAML(s string) (string, error) { - m, err := Parse(s) - if err != nil { - return "", err - } - d, err := yaml.Marshal(m) - return string(d), err -} - -// Parse parses a set line. -// -// A set line is of the form name1=value1,name2=value2 -func Parse(s string) (map[string]any, error) { - vals := map[string]any{} - scanner := bytes.NewBufferString(s) - t := newParser(scanner, vals, false) - err := t.parse() - return vals, err -} - -// ParseString parses a set line and forces a string value. -// -// A set line is of the form name1=value1,name2=value2 -func ParseString(s string) (map[string]any, error) { - vals := map[string]any{} - scanner := bytes.NewBufferString(s) - t := newParser(scanner, vals, true) - err := t.parse() - return vals, err -} - -// ParseInto parses a strvals line and merges the result into dest. -// -// If the strval string has a key that exists in dest, it overwrites the -// dest version. -func ParseInto(s string, dest map[string]any) error { - scanner := bytes.NewBufferString(s) - t := newParser(scanner, dest, false) - return t.parse() -} - -// ParseIntoFile parses a filevals line and merges the result into dest. -// -// This method always returns a string as the value. -func ParseIntoFile(s string, dest map[string]any, runesToVal runesToVal) error { - scanner := bytes.NewBufferString(s) - t := newFileParser(scanner, dest, runesToVal) - return t.parse() -} - -// ParseIntoString parses a strvals line and merges the result into dest. -// -// This method always returns a string as the value. -func ParseIntoString(s string, dest map[string]any) error { - scanner := bytes.NewBufferString(s) - t := newParser(scanner, dest, true) - return t.parse() -} - -// parser is a simple parser that takes a strvals line and parses it into a -// map representation. -// -// where sc is the source of the original data being parsed -// where data is the final parsed data from the parses with correct types -// where st is a boolean to figure out if we're forcing it to parse values as string -type parser struct { - sc *bytes.Buffer - data map[string]any - runesToVal runesToVal -} - -type runesToVal func([]rune) (any, error) - -func newParser(sc *bytes.Buffer, data map[string]any, stringBool bool) *parser { - rs2v := func(rs []rune) (any, error) { - return typedVal(rs, stringBool), nil - } - return &parser{sc: sc, data: data, runesToVal: rs2v} -} - -func newFileParser(sc *bytes.Buffer, data map[string]any, runesToVal runesToVal) *parser { - return &parser{sc: sc, data: data, runesToVal: runesToVal} -} - -func (t *parser) parse() error { - for { - err := t.key(t.data) - if err == nil { - continue - } - if err == io.EOF { - return nil - } - return err - } -} - -func runeSet(r []rune) map[rune]bool { - s := make(map[rune]bool, len(r)) - for _, rr := range r { - s[rr] = true - } - return s -} - -func (t *parser) key(data map[string]any) error { - stop := runeSet([]rune{'=', '[', ',', '.'}) - for { - switch k, last, err := runesUntil(t.sc, stop); { - case err != nil: - if len(k) == 0 { - return err - } - return fmt.Errorf("key %q has no value", string(k)) - case last == '[': - // We are in a list index context, so we need to set an index. - i, err := t.keyIndex() - if err != nil { - return fmt.Errorf("error parsing index: %s", err) - } - kk := string(k) - // Find or create target list - list := []any{} - if _, ok := data[kk]; ok { - list = data[kk].([]any) - } - - // Now we need to get the value after the ]. - list, err = t.listItem(list, i) - set(data, kk, list) - return err - case last == '=': - // End of key. Consume =, Get value. - // FIXME: Get value list first - vl, e := t.valList() - switch e { - case nil: - set(data, string(k), vl) - return nil - case io.EOF: - set(data, string(k), "") - return e - case ErrNotList: - rs, e := t.val() - if e != nil && e != io.EOF { - return e - } - v, e := t.runesToVal(rs) - set(data, string(k), v) - return e - default: - return e - } - - case last == ',': - // No value given. Set the value to empty string. Return error. - set(data, string(k), "") - return fmt.Errorf("key %q has no value (cannot end with ,)", string(k)) - case last == '.': - // First, create or find the target map. - inner := map[string]any{} - if _, ok := data[string(k)]; ok { - inner = data[string(k)].(map[string]any) - } - - // Recurse - e := t.key(inner) - if len(inner) == 0 { - return fmt.Errorf("key map %q has no value", string(k)) - } - set(data, string(k), inner) - return e - } - } -} - -func set(data map[string]any, key string, val any) { - // If key is empty, don't set it. - if len(key) == 0 { - return - } - data[key] = val -} - -func setIndex(list []any, index int, val any) (l2 []any, err error) { - // There are possible index values that are out of range on a target system - // causing a panic. This will catch the panic and return an error instead. - // The value of the index that causes a panic varies from system to system. - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("error processing index %d: %s", index, r) - } - }() - - if index < 0 { - return list, fmt.Errorf("negative %d index not allowed", index) - } - if index > MaxIndex { - return list, fmt.Errorf("index of %d is greater than maximum supported index of %d", index, MaxIndex) - } - if len(list) <= index { - newlist := make([]any, index+1) - copy(newlist, list) - list = newlist - } - list[index] = val - return list, nil -} - -func (t *parser) keyIndex() (int, error) { - // First, get the key. - stop := runeSet([]rune{']'}) - v, _, err := runesUntil(t.sc, stop) - if err != nil { - return 0, err - } - // v should be the index - return strconv.Atoi(string(v)) - -} -func (t *parser) listItem(list []any, i int) ([]any, error) { - if i < 0 { - return list, fmt.Errorf("negative %d index not allowed", i) - } - stop := runeSet([]rune{'[', '.', '='}) - switch k, last, err := runesUntil(t.sc, stop); { - case len(k) > 0: - return list, fmt.Errorf("unexpected data at end of array index: %q", k) - case err != nil: - return list, err - case last == '=': - vl, e := t.valList() - switch e { - case nil: - return setIndex(list, i, vl) - case io.EOF: - return setIndex(list, i, "") - case ErrNotList: - rs, e := t.val() - if e != nil && e != io.EOF { - return list, e - } - v, e := t.runesToVal(rs) - if e != nil { - return nil, e - } - return setIndex(list, i, v) - default: - return list, e - } - case last == '[': - // now we have a nested list. Read the index and handle. - i, err := t.keyIndex() - if err != nil { - return list, fmt.Errorf("error parsing index: %s", err) - } - // Now we need to get the value after the ]. - list2, err := t.listItem(list, i) - if err != nil { - return nil, err - } - return setIndex(list, i, list2) - case last == '.': - // We have a nested object. Send to t.key - inner := map[string]any{} - if len(list) > i { - var ok bool - inner, ok = list[i].(map[string]any) - if !ok { - // We have indices out of order. Initialize empty value. - list[i] = map[string]any{} - inner = list[i].(map[string]any) - } - } - - // Recurse - e := t.key(inner) - if e != nil { - return list, e - } - return setIndex(list, i, inner) - default: - return nil, fmt.Errorf("parse error: unexpected token %v", last) - } -} - -func (t *parser) val() ([]rune, error) { - stop := runeSet([]rune{','}) - v, _, err := runesUntil(t.sc, stop) - return v, err -} - -func (t *parser) valList() ([]any, error) { - r, _, e := t.sc.ReadRune() - if e != nil { - return []any{}, e - } - - if r != '{' { - e = t.sc.UnreadRune() - if e != nil { - return []any{}, e - } - return []any{}, ErrNotList - } - - list := []any{} - stop := runeSet([]rune{',', '}'}) - for { - switch rs, last, err := runesUntil(t.sc, stop); { - case err != nil: - if err == io.EOF { - err = errors.New("list must terminate with '}'") - } - return list, err - case last == '}': - // If this is followed by ',', consume it. - if r, _, e := t.sc.ReadRune(); e == nil && r != ',' { - e = t.sc.UnreadRune() - if e != nil { - return []any{}, e - } - } - v, e := t.runesToVal(rs) - list = append(list, v) - return list, e - case last == ',': - v, e := t.runesToVal(rs) - if e != nil { - return list, e - } - list = append(list, v) - } - } -} - -func runesUntil(in io.RuneReader, stop map[rune]bool) ([]rune, rune, error) { - var v []rune - for { - switch r, _, e := in.ReadRune(); { - case e != nil: - return v, r, e - case inMap(r, stop): - return v, r, nil - case r == '\\': - next, _, e := in.ReadRune() - if e != nil { - return v, next, e - } - v = append(v, next) - default: - v = append(v, r) - } - } -} - -func inMap(k rune, m map[rune]bool) bool { - _, ok := m[k] - return ok -} - -func typedVal(v []rune, st bool) any { - val := string(v) - - if st { - return val - } - - if strings.EqualFold(val, "true") { - return true - } - - if strings.EqualFold(val, "false") { - return false - } - - if strings.EqualFold(val, "null") { - return struct{}{} - } - - if strings.EqualFold(val, "0") { - return int64(0) - } - - // If this value does not start with zero, try parsing it to an int - if len(val) != 0 && val[0] != '0' { - if iv, err := strconv.ParseInt(val, 10, 64); err == nil { - return iv - } - } - - return val -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go b/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go index a18f024a25..63e1a5b071 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go +++ b/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go @@ -86,7 +86,7 @@ func byteDecimalToHexMAC(bytes []byte, sep string) string { hexs.Grow((l * 3) - 1) // 1 byte -> 2 hexes + 1 separator (if one char) for i, b := range bytes { - hexs.WriteString(fmt.Sprintf("%02x", b)) + fmt.Fprintf(&hexs, "%02x", b) if i < l-1 { hexs.WriteString(sep) } diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go index 0695ce94fe..a2f23d9a64 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go +++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go @@ -83,7 +83,7 @@ func readModule(r io.Reader) (*module.Module, error) { var m module.Module - if err := readSections(r, &m); err != nil && err != io.EOF { + if err := readSections(r, &m); err != io.EOF { return nil, err } diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go b/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go index 3465f0808f..603ab5cd77 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go @@ -34,6 +34,7 @@ type ( RelatedResources []*RelatedResourceAnnotation `json:"related_resources,omitempty"` Authors []*AuthorAnnotation `json:"authors,omitempty"` Schemas []*SchemaAnnotation `json:"schemas,omitempty"` + Compile *CompileAnnotation `json:"compile,omitempty"` Custom map[string]any `json:"custom,omitempty"` Location *Location `json:"location,omitempty"` @@ -48,6 +49,11 @@ type ( Definition *any `json:"definition,omitempty"` } + CompileAnnotation struct { + Unknowns []Ref `json:"unknowns,omitempty"` + MaskRule Ref `json:"mask_rule,omitempty"` // NOTE: This doesn't need to start with "data.package", it can be relative + } + AuthorAnnotation struct { Name string `json:"name"` Email string `json:"email,omitempty"` @@ -151,6 +157,10 @@ func (a *Annotations) Compare(other *Annotations) int { return cmp } + if cmp := a.Compile.Compare(other.Compile); cmp != 0 { + return cmp + } + if a.Entrypoint != other.Entrypoint { if a.Entrypoint { return 1 @@ -403,6 +413,8 @@ func (a *Annotations) Copy(node Node) *Annotations { cpy.Schemas[i] = a.Schemas[i].Copy() } + cpy.Compile = a.Compile.Copy() + if a.Custom != nil { cpy.Custom = deepcopy.Map(a.Custom) } @@ -421,18 +433,7 @@ func (a *Annotations) toObject() (*Object, *Error) { } if len(a.Scope) > 0 { - switch a.Scope { - case annotationScopeDocument: - obj.Insert(InternedTerm("scope"), InternedTerm("document")) - case annotationScopePackage: - obj.Insert(InternedTerm("scope"), InternedTerm("package")) - case annotationScopeRule: - obj.Insert(InternedTerm("scope"), InternedTerm("rule")) - case annotationScopeSubpackages: - obj.Insert(InternedTerm("scope"), InternedTerm("subpackages")) - default: - obj.Insert(InternedTerm("scope"), StringTerm(a.Scope)) - } + obj.Insert(InternedTerm("scope"), InternedTerm(a.Scope)) } if len(a.Title) > 0 { @@ -716,6 +717,41 @@ func (s *SchemaAnnotation) String() string { return string(bs) } +// Copy returns a deep copy of s. +func (c *CompileAnnotation) Copy() *CompileAnnotation { + if c == nil { + return nil + } + cpy := *c + for i := range c.Unknowns { + cpy.Unknowns[i] = c.Unknowns[i].Copy() + } + return &cpy +} + +// Compare returns an integer indicating if s is less than, equal to, or greater +// than other. +func (c *CompileAnnotation) Compare(other *CompileAnnotation) int { + switch { + case c == nil && other == nil: + return 0 + case c != nil && other == nil: + return 1 + case c == nil && other != nil: + return -1 + } + + if cmp := slices.CompareFunc(c.Unknowns, other.Unknowns, RefCompare); cmp != 0 { + return cmp + } + return c.MaskRule.Compare(other.MaskRule) +} + +func (c *CompileAnnotation) String() string { + bs, _ := json.Marshal(c) + return string(bs) +} + func newAnnotationSet() *AnnotationSet { return &AnnotationSet{ byRule: map[*Rule][]*Annotations{}, diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go b/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go index 6a45d0af46..7e30a8051c 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go @@ -26,11 +26,16 @@ func RegisterBuiltin(b *Builtin) { BuiltinMap[b.Infix] = b InternStringTerm(b.Infix) + InternVarValue(b.Infix) } - InternStringTerm(b.Name) if strings.Contains(b.Name, ".") { - InternStringTerm(strings.Split(b.Name, ".")...) + parts := strings.Split(b.Name, ".") + InternStringTerm(parts...) + InternVarValue(parts[0]) + } else { + InternStringTerm(b.Name) + InternVarValue(b.Name) } } @@ -90,6 +95,7 @@ var DefaultBuiltins = [...]*Builtin{ // Arrays ArrayConcat, + ArrayFlatten, ArraySlice, ArrayReverse, @@ -146,6 +152,7 @@ var DefaultBuiltins = [...]*Builtin{ Sprintf, StringReverse, RenderTemplate, + InternalTemplateString, // Numbers NumbersRange, @@ -347,7 +354,7 @@ var Equality = &Builtin{ types.Args(types.A, types.A), types.B, ), - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -362,7 +369,7 @@ var Assign = &Builtin{ types.Args(types.A, types.A), types.B, ), - canSkipBctx: true, + CanSkipBctx: true, } // Member represents the `in` (infix) operator. @@ -376,7 +383,7 @@ var Member = &Builtin{ ), types.B, ), - canSkipBctx: true, + CanSkipBctx: true, } // MemberWithKey represents the `in` (infix) operator when used @@ -392,7 +399,7 @@ var MemberWithKey = &Builtin{ ), types.B, ), - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -411,7 +418,7 @@ var GreaterThan = &Builtin{ ), types.Named("result", types.B).Description("true if `x` is greater than `y`; false otherwise"), ), - canSkipBctx: true, + CanSkipBctx: true, } var GreaterThanEq = &Builtin{ @@ -425,7 +432,7 @@ var GreaterThanEq = &Builtin{ ), types.Named("result", types.B).Description("true if `x` is greater or equal to `y`; false otherwise"), ), - canSkipBctx: true, + CanSkipBctx: true, } // LessThan represents the "<" comparison operator. @@ -440,7 +447,7 @@ var LessThan = &Builtin{ ), types.Named("result", types.B).Description("true if `x` is less than `y`; false otherwise"), ), - canSkipBctx: true, + CanSkipBctx: true, } var LessThanEq = &Builtin{ @@ -454,7 +461,7 @@ var LessThanEq = &Builtin{ ), types.Named("result", types.B).Description("true if `x` is less than or equal to `y`; false otherwise"), ), - canSkipBctx: true, + CanSkipBctx: true, } var NotEqual = &Builtin{ @@ -468,7 +475,7 @@ var NotEqual = &Builtin{ ), types.Named("result", types.B).Description("true if `x` is not equal to `y`; false otherwise"), ), - canSkipBctx: true, + CanSkipBctx: true, } // Equal represents the "==" comparison operator. @@ -483,7 +490,7 @@ var Equal = &Builtin{ ), types.Named("result", types.B).Description("true if `x` is equal to `y`; false otherwise"), ), - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -503,7 +510,7 @@ var Plus = &Builtin{ types.Named("z", types.N).Description("the sum of `x` and `y`"), ), Categories: number, - canSkipBctx: true, + CanSkipBctx: true, } var Minus = &Builtin{ @@ -518,7 +525,7 @@ var Minus = &Builtin{ types.Named("z", types.NewAny(types.N, types.SetOfAny)).Description("the difference of `x` and `y`"), ), Categories: category("sets", "numbers"), - canSkipBctx: true, + CanSkipBctx: true, } var Multiply = &Builtin{ @@ -533,7 +540,7 @@ var Multiply = &Builtin{ types.Named("z", types.N).Description("the product of `x` and `y`"), ), Categories: number, - canSkipBctx: true, + CanSkipBctx: true, } var Divide = &Builtin{ @@ -548,7 +555,7 @@ var Divide = &Builtin{ types.Named("z", types.N).Description("the result of `x` divided by `y`"), ), Categories: number, - canSkipBctx: true, + CanSkipBctx: true, } var Round = &Builtin{ @@ -561,7 +568,7 @@ var Round = &Builtin{ types.Named("y", types.N).Description("the result of rounding `x`"), ), Categories: number, - canSkipBctx: true, + CanSkipBctx: true, } var Ceil = &Builtin{ @@ -574,7 +581,7 @@ var Ceil = &Builtin{ types.Named("y", types.N).Description("the result of rounding `x` _up_"), ), Categories: number, - canSkipBctx: true, + CanSkipBctx: true, } var Floor = &Builtin{ @@ -587,7 +594,7 @@ var Floor = &Builtin{ types.Named("y", types.N).Description("the result of rounding `x` _down_"), ), Categories: number, - canSkipBctx: true, + CanSkipBctx: true, } var Abs = &Builtin{ @@ -600,7 +607,7 @@ var Abs = &Builtin{ types.Named("y", types.N).Description("the absolute value of `x`"), ), Categories: number, - canSkipBctx: true, + CanSkipBctx: true, } var Rem = &Builtin{ @@ -615,7 +622,7 @@ var Rem = &Builtin{ types.Named("z", types.N).Description("the remainder"), ), Categories: number, - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -632,7 +639,7 @@ var BitsOr = &Builtin{ ), types.Named("z", types.N).Description("the bitwise OR of `x` and `y`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var BitsAnd = &Builtin{ @@ -645,7 +652,7 @@ var BitsAnd = &Builtin{ ), types.Named("z", types.N).Description("the bitwise AND of `x` and `y`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var BitsNegate = &Builtin{ @@ -657,7 +664,7 @@ var BitsNegate = &Builtin{ ), types.Named("z", types.N).Description("the bitwise negation of `x`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var BitsXOr = &Builtin{ @@ -670,7 +677,7 @@ var BitsXOr = &Builtin{ ), types.Named("z", types.N).Description("the bitwise XOR of `x` and `y`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var BitsShiftLeft = &Builtin{ @@ -683,7 +690,7 @@ var BitsShiftLeft = &Builtin{ ), types.Named("z", types.N).Description("the result of shifting `x` `s` bits to the left"), ), - canSkipBctx: true, + CanSkipBctx: true, } var BitsShiftRight = &Builtin{ @@ -696,7 +703,7 @@ var BitsShiftRight = &Builtin{ ), types.Named("z", types.N).Description("the result of shifting `x` `s` bits to the right"), ), - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -717,7 +724,7 @@ var And = &Builtin{ types.Named("z", types.SetOfAny).Description("the intersection of `x` and `y`"), ), Categories: sets, - canSkipBctx: true, + CanSkipBctx: true, } // Or performs a union operation on sets. @@ -733,7 +740,7 @@ var Or = &Builtin{ types.Named("z", types.SetOfAny).Description("the union of `x` and `y`"), ), Categories: sets, - canSkipBctx: true, + CanSkipBctx: true, } var Intersection = &Builtin{ @@ -746,7 +753,7 @@ var Intersection = &Builtin{ types.Named("y", types.SetOfAny).Description("the intersection of all `xs` sets"), ), Categories: sets, - canSkipBctx: true, + CanSkipBctx: true, } var Union = &Builtin{ @@ -759,7 +766,7 @@ var Union = &Builtin{ types.Named("y", types.SetOfAny).Description("the union of all `xs` sets"), ), Categories: sets, - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -783,7 +790,7 @@ var Count = &Builtin{ types.Named("n", types.N).Description("the count of elements, key/val pairs, or characters, respectively."), ), Categories: aggregates, - canSkipBctx: true, + CanSkipBctx: true, } var Sum = &Builtin{ @@ -799,7 +806,7 @@ var Sum = &Builtin{ types.Named("n", types.N).Description("the sum of all elements"), ), Categories: aggregates, - canSkipBctx: true, + CanSkipBctx: true, } var Product = &Builtin{ @@ -815,7 +822,7 @@ var Product = &Builtin{ types.Named("n", types.N).Description("the product of all elements"), ), Categories: aggregates, - canSkipBctx: true, + CanSkipBctx: true, } var Max = &Builtin{ @@ -831,7 +838,7 @@ var Max = &Builtin{ types.Named("n", types.A).Description("the maximum of all elements"), ), Categories: aggregates, - canSkipBctx: true, + CanSkipBctx: true, } var Min = &Builtin{ @@ -847,7 +854,7 @@ var Min = &Builtin{ types.Named("n", types.A).Description("the minimum of all elements"), ), Categories: aggregates, - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -867,7 +874,7 @@ var Sort = &Builtin{ types.Named("n", types.NewArray(nil, types.A)).Description("the sorted array"), ), Categories: aggregates, - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -884,7 +891,19 @@ var ArrayConcat = &Builtin{ ), types.Named("z", types.NewArray(nil, types.A)).Description("the concatenation of `x` and `y`"), ), - canSkipBctx: true, + CanSkipBctx: true, +} + +var ArrayFlatten = &Builtin{ + Name: "array.flatten", + Description: "Non-recursively unpacks array items in arr into the flattened array. Other types are appended as-is.", + Decl: types.NewFunction( + types.Args( + types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be flattened"), + ), + types.Named("flattened", types.NewArray(nil, types.A)).Description("array flattened one level"), + ), + CanSkipBctx: true, } var ArraySlice = &Builtin{ @@ -898,7 +917,7 @@ var ArraySlice = &Builtin{ ), types.Named("slice", types.NewArray(nil, types.A)).Description("the subslice of `array`, from `start` to `end`, including `arr[start]`, but excluding `arr[end]`"), ), - canSkipBctx: true, + CanSkipBctx: true, } // NOTE(sr): this function really needs examples var ArrayReverse = &Builtin{ @@ -910,7 +929,7 @@ var ArrayReverse = &Builtin{ ), types.Named("rev", types.NewArray(nil, types.A)).Description("an array containing the elements of `arr` in reverse order"), ), - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -933,7 +952,7 @@ var ToNumber = &Builtin{ types.Named("num", types.N).Description("the numeric representation of `x`"), ), Categories: conversions, - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -961,7 +980,7 @@ var RegexIsValid = &Builtin{ ), types.Named("result", types.B).Description("true if `pattern` is a valid regular expression"), ), - canSkipBctx: true, + CanSkipBctx: true, } var RegexFindAllStringSubmatch = &Builtin{ @@ -975,7 +994,7 @@ var RegexFindAllStringSubmatch = &Builtin{ ), types.Named("output", types.NewArray(nil, types.NewArray(nil, types.S))).Description("array of all matches"), ), - canSkipBctx: false, + CanSkipBctx: false, } var RegexTemplateMatch = &Builtin{ @@ -990,7 +1009,7 @@ var RegexTemplateMatch = &Builtin{ ), types.Named("result", types.B).Description("true if `value` matches the `template`"), ), - canSkipBctx: true, + CanSkipBctx: true, } // TODO(sr): example:`regex.template_match("urn:foo:{.*}", "urn:foo:bar:baz", "{", "}")`` returns ``true``. var RegexSplit = &Builtin{ @@ -1003,7 +1022,7 @@ var RegexSplit = &Builtin{ ), types.Named("output", types.NewArray(nil, types.S)).Description("the parts obtained by splitting `value`"), ), - canSkipBctx: false, + CanSkipBctx: false, } // RegexFind takes two strings and a number, the pattern, the value and number of match values to @@ -1019,7 +1038,7 @@ var RegexFind = &Builtin{ ), types.Named("output", types.NewArray(nil, types.S)).Description("collected matches"), ), - canSkipBctx: false, + CanSkipBctx: false, } // GlobsMatch takes two strings regexp-style strings and evaluates to true if their @@ -1038,7 +1057,7 @@ The set of regex symbols is limited for this builtin: only ` + "`.`, `*`, `+`, ` ), types.Named("result", types.B).Description("true if the intersection of `glob1` and `glob2` matches a non-empty set of non-empty strings"), ), - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -1065,7 +1084,7 @@ var AnyPrefixMatch = &Builtin{ types.Named("result", types.B).Description("result of the prefix check"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var AnySuffixMatch = &Builtin{ @@ -1087,7 +1106,7 @@ var AnySuffixMatch = &Builtin{ types.Named("result", types.B).Description("result of the suffix check"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var Concat = &Builtin{ @@ -1104,7 +1123,7 @@ var Concat = &Builtin{ types.Named("output", types.S).Description("the joined string"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: false, } var FormatInt = &Builtin{ @@ -1118,7 +1137,7 @@ var FormatInt = &Builtin{ types.Named("output", types.S).Description("formatted number"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var IndexOf = &Builtin{ @@ -1132,7 +1151,7 @@ var IndexOf = &Builtin{ types.Named("output", types.N).Description("index of first occurrence, `-1` if not found"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var IndexOfN = &Builtin{ @@ -1146,7 +1165,7 @@ var IndexOfN = &Builtin{ types.Named("output", types.NewArray(nil, types.N)).Description("all indices at which `needle` occurs in `haystack`, may be empty"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var Substring = &Builtin{ @@ -1161,7 +1180,7 @@ var Substring = &Builtin{ types.Named("output", types.S).Description("substring of `value` from `offset`, of length `length`"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var Contains = &Builtin{ @@ -1175,7 +1194,7 @@ var Contains = &Builtin{ types.Named("result", types.B).Description("result of the containment check"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var StringCount = &Builtin{ @@ -1189,7 +1208,7 @@ var StringCount = &Builtin{ types.Named("output", types.N).Description("count of occurrences, `0` if not found"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var StartsWith = &Builtin{ @@ -1203,7 +1222,7 @@ var StartsWith = &Builtin{ types.Named("result", types.B).Description("result of the prefix check"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var EndsWith = &Builtin{ @@ -1217,7 +1236,7 @@ var EndsWith = &Builtin{ types.Named("result", types.B).Description("result of the suffix check"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var Lower = &Builtin{ @@ -1230,7 +1249,7 @@ var Lower = &Builtin{ types.Named("y", types.S).Description("lower-case of x"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var Upper = &Builtin{ @@ -1243,7 +1262,7 @@ var Upper = &Builtin{ types.Named("y", types.S).Description("upper-case of x"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var Split = &Builtin{ @@ -1257,7 +1276,7 @@ var Split = &Builtin{ types.Named("ys", types.NewArray(nil, types.S)).Description("split parts"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var Replace = &Builtin{ @@ -1272,7 +1291,7 @@ var Replace = &Builtin{ types.Named("y", types.S).Description("string with replaced substrings"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: false, } var ReplaceN = &Builtin{ @@ -1292,7 +1311,7 @@ The old string comparisons are done in argument order.`, ), types.Named("output", types.S).Description("string with replaced substrings"), ), - canSkipBctx: true, + CanSkipBctx: false, } var RegexReplace = &Builtin{ @@ -1306,7 +1325,7 @@ var RegexReplace = &Builtin{ ), types.Named("output", types.S).Description("string with replaced substrings"), ), - canSkipBctx: false, + CanSkipBctx: false, } var Trim = &Builtin{ @@ -1320,7 +1339,7 @@ var Trim = &Builtin{ types.Named("output", types.S).Description("string trimmed of `cutset` characters"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var TrimLeft = &Builtin{ @@ -1334,7 +1353,7 @@ var TrimLeft = &Builtin{ types.Named("output", types.S).Description("string left-trimmed of `cutset` characters"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var TrimPrefix = &Builtin{ @@ -1348,7 +1367,7 @@ var TrimPrefix = &Builtin{ types.Named("output", types.S).Description("string with `prefix` cut off"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var TrimRight = &Builtin{ @@ -1362,7 +1381,7 @@ var TrimRight = &Builtin{ types.Named("output", types.S).Description("string right-trimmed of `cutset` characters"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var TrimSuffix = &Builtin{ @@ -1376,7 +1395,7 @@ var TrimSuffix = &Builtin{ types.Named("output", types.S).Description("string with `suffix` cut off"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var TrimSpace = &Builtin{ @@ -1389,7 +1408,7 @@ var TrimSpace = &Builtin{ types.Named("output", types.S).Description("string leading and trailing white space cut off"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var Sprintf = &Builtin{ @@ -1403,7 +1422,7 @@ var Sprintf = &Builtin{ types.Named("output", types.S).Description("`format` formatted by the values in `values`"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var StringReverse = &Builtin{ @@ -1416,7 +1435,7 @@ var StringReverse = &Builtin{ types.Named("y", types.S).Description("reversed string"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } var RenderTemplate = &Builtin{ @@ -1431,7 +1450,7 @@ var RenderTemplate = &Builtin{ types.Named("result", types.S).Description("rendered template with template variables injected"), ), Categories: stringsCat, - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -1452,7 +1471,7 @@ var RandIntn = &Builtin{ ), Categories: number, Nondeterministic: true, - canSkipBctx: false, + CanSkipBctx: false, } var NumbersRange = &Builtin{ @@ -1465,7 +1484,7 @@ var NumbersRange = &Builtin{ ), types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b`"), ), - canSkipBctx: false, // needed for context timeout check + CanSkipBctx: false, // needed for context timeout check } var NumbersRangeStep = &Builtin{ @@ -1483,7 +1502,7 @@ var NumbersRangeStep = &Builtin{ ), types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b` in `step` increments"), ), - canSkipBctx: false, // needed for context timeout check + CanSkipBctx: false, // needed for context timeout check } /** @@ -1508,7 +1527,7 @@ respectively. Other units are case-insensitive.`, ), types.Named("y", types.N).Description("the parsed number"), ), - canSkipBctx: true, + CanSkipBctx: true, } var UnitsParseBytes = &Builtin{ @@ -1527,7 +1546,7 @@ and "MiB" are equivalent).`, ), types.Named("y", types.N).Description("the parsed number"), ), - canSkipBctx: true, + CanSkipBctx: true, } // @@ -1547,7 +1566,7 @@ var UUIDRFC4122 = &Builtin{ types.Named("output", types.S).Description("a version 4 UUID; for any given `k`, the output will be consistent throughout a query evaluation"), ), Nondeterministic: true, - canSkipBctx: false, + CanSkipBctx: false, } var UUIDParse = &Builtin{ @@ -1561,7 +1580,7 @@ var UUIDParse = &Builtin{ types.Named("result", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("Properties of UUID if valid (version, variant, etc). Undefined otherwise."), ), Relation: false, - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -1606,7 +1625,7 @@ var JSONFilter = &Builtin{ types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `paths`"), ), Categories: objectCat, - canSkipBctx: true, + CanSkipBctx: true, } var JSONRemove = &Builtin{ @@ -1645,7 +1664,7 @@ var JSONRemove = &Builtin{ types.Named("output", types.A).Description("result of removing all keys specified in `paths`"), ), Categories: objectCat, - canSkipBctx: true, + CanSkipBctx: true, } var JSONPatch = &Builtin{ @@ -1656,7 +1675,7 @@ var JSONPatch = &Builtin{ "Additionally works on sets, where a value contained in the set is considered to be its path.", Decl: types.NewFunction( types.Args( - types.Named("object", types.A).Description("the object to patch"), // TODO(sr): types.A? + types.Named("target", types.A).Description("the object, array or set to patch"), types.Named("patches", types.NewArray( nil, types.NewObject( @@ -1671,7 +1690,7 @@ var JSONPatch = &Builtin{ types.Named("output", types.A).Description("result obtained after consecutively applying all patch operations in `patches`"), ), Categories: objectCat, - canSkipBctx: true, + CanSkipBctx: true, } var ObjectSubset = &Builtin{ @@ -1705,7 +1724,7 @@ var ObjectSubset = &Builtin{ ), types.Named("result", types.A).Description("`true` if `sub` is a subset of `super`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var ObjectUnion = &Builtin{ @@ -1725,7 +1744,7 @@ var ObjectUnion = &Builtin{ ), types.Named("output", types.A).Description("a new object which is the result of an asymmetric recursive union of two objects where conflicts are resolved by choosing the key from the right-hand object `b`"), ), // TODO(sr): types.A? ^^^^^^^ (also below) - canSkipBctx: true, + CanSkipBctx: true, } var ObjectUnionN = &Builtin{ @@ -1741,7 +1760,7 @@ var ObjectUnionN = &Builtin{ ), types.Named("output", types.A).Description("asymmetric recursive union of all objects in `objects`, merged from left to right, where conflicts are resolved by choosing the key from the right-hand object"), ), - canSkipBctx: true, + CanSkipBctx: true, } var ObjectRemove = &Builtin{ @@ -1761,7 +1780,7 @@ var ObjectRemove = &Builtin{ ), types.Named("output", types.A).Description("result of removing the specified `keys` from `object`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var ObjectFilter = &Builtin{ @@ -1782,7 +1801,7 @@ var ObjectFilter = &Builtin{ ), types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `keys`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var ObjectGet = &Builtin{ @@ -1798,7 +1817,7 @@ var ObjectGet = &Builtin{ ), types.Named("value", types.A).Description("`object[key]` if present, otherwise `default`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var ObjectKeys = &Builtin{ @@ -1811,13 +1830,14 @@ var ObjectKeys = &Builtin{ ), types.Named("value", types.SetOfAny).Description("set of `object`'s keys"), ), - canSkipBctx: true, + CanSkipBctx: true, } /* * Encoding */ -var encoding = category("encoding") +// Not using 'encoding' to avoid having to alias stdlib "encoding" imports +var catEncoding = category("encoding") var JSONMarshal = &Builtin{ Name: "json.marshal", @@ -1828,8 +1848,8 @@ var JSONMarshal = &Builtin{ ), types.Named("y", types.S).Description("the JSON string representation of `x`"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var JSONMarshalWithOptions = &Builtin{ @@ -1850,8 +1870,8 @@ var JSONMarshalWithOptions = &Builtin{ ), types.Named("y", types.S).Description("the JSON string representation of `x`, with configured prefix/indent string(s) as appropriate"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var JSONUnmarshal = &Builtin{ @@ -1863,8 +1883,8 @@ var JSONUnmarshal = &Builtin{ ), types.Named("y", types.A).Description("the term deserialized from `x`"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var JSONIsValid = &Builtin{ @@ -1876,8 +1896,8 @@ var JSONIsValid = &Builtin{ ), types.Named("result", types.B).Description("`true` if `x` is valid JSON, `false` otherwise"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var Base64Encode = &Builtin{ @@ -1889,8 +1909,8 @@ var Base64Encode = &Builtin{ ), types.Named("y", types.S).Description("base64 serialization of `x`"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var Base64Decode = &Builtin{ @@ -1902,8 +1922,8 @@ var Base64Decode = &Builtin{ ), types.Named("y", types.S).Description("base64 deserialization of `x`"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var Base64IsValid = &Builtin{ @@ -1915,8 +1935,8 @@ var Base64IsValid = &Builtin{ ), types.Named("result", types.B).Description("`true` if `x` is valid base64 encoded value, `false` otherwise"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var Base64UrlEncode = &Builtin{ @@ -1928,8 +1948,8 @@ var Base64UrlEncode = &Builtin{ ), types.Named("y", types.S).Description("base64url serialization of `x`"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var Base64UrlEncodeNoPad = &Builtin{ @@ -1941,8 +1961,8 @@ var Base64UrlEncodeNoPad = &Builtin{ ), types.Named("y", types.S).Description("base64url serialization of `x`"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var Base64UrlDecode = &Builtin{ @@ -1954,8 +1974,8 @@ var Base64UrlDecode = &Builtin{ ), types.Named("y", types.S).Description("base64url deserialization of `x`"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var URLQueryDecode = &Builtin{ @@ -1967,8 +1987,8 @@ var URLQueryDecode = &Builtin{ ), types.Named("y", types.S).Description("URL-encoding deserialization of `x`"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var URLQueryEncode = &Builtin{ @@ -1980,8 +2000,8 @@ var URLQueryEncode = &Builtin{ ), types.Named("y", types.S).Description("URL-encoding serialization of `x`"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var URLQueryEncodeObject = &Builtin{ @@ -2004,8 +2024,8 @@ var URLQueryEncodeObject = &Builtin{ ), types.Named("y", types.S).Description("the URL-encoded serialization of `object`"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var URLQueryDecodeObject = &Builtin{ @@ -2019,8 +2039,8 @@ var URLQueryDecodeObject = &Builtin{ types.S, types.NewArray(nil, types.S)))).Description("the resulting object"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var YAMLMarshal = &Builtin{ @@ -2032,8 +2052,8 @@ var YAMLMarshal = &Builtin{ ), types.Named("y", types.S).Description("the YAML string representation of `x`"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var YAMLUnmarshal = &Builtin{ @@ -2045,8 +2065,8 @@ var YAMLUnmarshal = &Builtin{ ), types.Named("y", types.A).Description("the term deserialized from `x`"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } // YAMLIsValid verifies the input string is a valid YAML document. @@ -2059,8 +2079,8 @@ var YAMLIsValid = &Builtin{ ), types.Named("result", types.B).Description("`true` if `x` is valid YAML, `false` otherwise"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var HexEncode = &Builtin{ @@ -2072,8 +2092,8 @@ var HexEncode = &Builtin{ ), types.Named("y", types.S).Description("serialization of `x` using hex-encoding"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } var HexDecode = &Builtin{ @@ -2085,8 +2105,8 @@ var HexDecode = &Builtin{ ), types.Named("y", types.S).Description("deserialized from `x`"), ), - Categories: encoding, - canSkipBctx: true, + Categories: catEncoding, + CanSkipBctx: true, } /** @@ -2108,7 +2128,7 @@ var JWTDecode = &Builtin{ }, nil)).Description("`[header, payload, sig]`, where `header` and `payload` are objects; `sig` is the hexadecimal representation of the signature on the token."), ), Categories: tokensCat, - canSkipBctx: true, + CanSkipBctx: true, } var JWTVerifyRS256 = &Builtin{ @@ -2122,7 +2142,7 @@ var JWTVerifyRS256 = &Builtin{ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), ), Categories: tokensCat, - canSkipBctx: false, + CanSkipBctx: false, } var JWTVerifyRS384 = &Builtin{ @@ -2136,7 +2156,7 @@ var JWTVerifyRS384 = &Builtin{ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), ), Categories: tokensCat, - canSkipBctx: false, + CanSkipBctx: false, } var JWTVerifyRS512 = &Builtin{ @@ -2150,7 +2170,7 @@ var JWTVerifyRS512 = &Builtin{ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), ), Categories: tokensCat, - canSkipBctx: false, + CanSkipBctx: false, } var JWTVerifyPS256 = &Builtin{ @@ -2164,7 +2184,7 @@ var JWTVerifyPS256 = &Builtin{ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), ), Categories: tokensCat, - canSkipBctx: false, + CanSkipBctx: false, } var JWTVerifyPS384 = &Builtin{ @@ -2178,7 +2198,7 @@ var JWTVerifyPS384 = &Builtin{ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), ), Categories: tokensCat, - canSkipBctx: false, + CanSkipBctx: false, } var JWTVerifyPS512 = &Builtin{ @@ -2192,7 +2212,7 @@ var JWTVerifyPS512 = &Builtin{ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), ), Categories: tokensCat, - canSkipBctx: false, + CanSkipBctx: false, } var JWTVerifyES256 = &Builtin{ @@ -2206,7 +2226,7 @@ var JWTVerifyES256 = &Builtin{ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), ), Categories: tokensCat, - canSkipBctx: false, + CanSkipBctx: false, } var JWTVerifyES384 = &Builtin{ @@ -2220,7 +2240,7 @@ var JWTVerifyES384 = &Builtin{ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), ), Categories: tokensCat, - canSkipBctx: false, + CanSkipBctx: false, } var JWTVerifyES512 = &Builtin{ @@ -2234,7 +2254,7 @@ var JWTVerifyES512 = &Builtin{ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), ), Categories: tokensCat, - canSkipBctx: false, + CanSkipBctx: false, } var JWTVerifyEdDSA = &Builtin{ @@ -2248,7 +2268,7 @@ var JWTVerifyEdDSA = &Builtin{ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), ), Categories: tokensCat, - canSkipBctx: false, + CanSkipBctx: false, } var JWTVerifyHS256 = &Builtin{ @@ -2262,7 +2282,7 @@ var JWTVerifyHS256 = &Builtin{ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), ), Categories: tokensCat, - canSkipBctx: false, + CanSkipBctx: false, } var JWTVerifyHS384 = &Builtin{ @@ -2276,7 +2296,7 @@ var JWTVerifyHS384 = &Builtin{ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), ), Categories: tokensCat, - canSkipBctx: false, + CanSkipBctx: false, } var JWTVerifyHS512 = &Builtin{ @@ -2290,7 +2310,7 @@ var JWTVerifyHS512 = &Builtin{ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), ), Categories: tokensCat, - canSkipBctx: false, + CanSkipBctx: false, } // Marked non-deterministic because it relies on time internally. @@ -2311,7 +2331,7 @@ Supports the following algorithms: HS256, HS384, HS512, RS256, RS384, RS512, ES2 ), Categories: tokensCat, Nondeterministic: true, - canSkipBctx: false, + CanSkipBctx: false, } var tokenSign = category("tokensign") @@ -2330,7 +2350,7 @@ var JWTEncodeSignRaw = &Builtin{ ), Categories: tokenSign, Nondeterministic: true, - canSkipBctx: false, + CanSkipBctx: false, } // Marked non-deterministic because it relies on RNG internally. @@ -2347,7 +2367,7 @@ var JWTEncodeSign = &Builtin{ ), Categories: tokenSign, Nondeterministic: true, - canSkipBctx: false, + CanSkipBctx: false, } /** @@ -2363,7 +2383,7 @@ var NowNanos = &Builtin{ types.Named("now", types.N).Description("nanoseconds since epoch"), ), Nondeterministic: true, - canSkipBctx: false, + CanSkipBctx: false, } var ParseNanos = &Builtin{ @@ -2376,7 +2396,7 @@ var ParseNanos = &Builtin{ ), types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"), ), - canSkipBctx: true, + CanSkipBctx: true, } var ParseRFC3339Nanos = &Builtin{ @@ -2388,7 +2408,7 @@ var ParseRFC3339Nanos = &Builtin{ ), types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"), ), - canSkipBctx: true, + CanSkipBctx: true, } var ParseDurationNanos = &Builtin{ @@ -2400,7 +2420,7 @@ var ParseDurationNanos = &Builtin{ ), types.Named("ns", types.N).Description("the `duration` in nanoseconds"), ), - canSkipBctx: true, + CanSkipBctx: true, } var Format = &Builtin{ @@ -2416,7 +2436,7 @@ var Format = &Builtin{ ), types.Named("formatted timestamp", types.S).Description("the formatted timestamp represented for the nanoseconds since the epoch in the supplied timezone (or UTC)"), ), - canSkipBctx: true, + CanSkipBctx: true, } var Date = &Builtin{ @@ -2431,7 +2451,7 @@ var Date = &Builtin{ ), types.Named("date", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)).Description("an array of `year`, `month` (1-12), and `day` (1-31)"), ), - canSkipBctx: true, + CanSkipBctx: true, } var Clock = &Builtin{ @@ -2447,7 +2467,7 @@ var Clock = &Builtin{ types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)). Description("the `hour`, `minute` (0-59), and `second` (0-59) representing the time of day for the nanoseconds since epoch in the supplied timezone (or UTC)"), ), - canSkipBctx: true, + CanSkipBctx: true, } var Weekday = &Builtin{ @@ -2462,7 +2482,7 @@ var Weekday = &Builtin{ ), types.Named("day", types.S).Description("the weekday represented by `ns` nanoseconds since the epoch in the supplied timezone (or UTC)"), ), - canSkipBctx: true, + CanSkipBctx: true, } var AddDate = &Builtin{ @@ -2477,7 +2497,7 @@ var AddDate = &Builtin{ ), types.Named("output", types.N).Description("nanoseconds since the epoch representing the input time, with years, months and days added"), ), - canSkipBctx: true, + CanSkipBctx: true, } var Diff = &Builtin{ @@ -2496,7 +2516,7 @@ var Diff = &Builtin{ ), types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N, types.N, types.N, types.N}, nil)).Description("difference between `ns1` and `ns2` (in their supplied timezones, if supplied, or UTC) as array of numbers: `[years, months, days, hours, minutes, seconds]`"), ), - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -2516,7 +2536,7 @@ concatenated PEM blocks. The whole input of concatenated PEM blocks can optional ), types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed X.509 certificates represented as objects"), ), - canSkipBctx: true, + CanSkipBctx: true, } var CryptoX509ParseAndVerifyCertificates = &Builtin{ @@ -2536,7 +2556,7 @@ with all others being treated as intermediates.`, types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var CryptoX509ParseAndVerifyCertificatesWithOptions = &Builtin{ @@ -2561,7 +2581,7 @@ with all others being treated as intermediates.`, types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var CryptoX509ParseCertificateRequest = &Builtin{ @@ -2573,7 +2593,7 @@ var CryptoX509ParseCertificateRequest = &Builtin{ ), types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("X.509 CSR represented as an object"), ), - canSkipBctx: true, + CanSkipBctx: true, } var CryptoX509ParseKeyPair = &Builtin{ @@ -2586,7 +2606,7 @@ var CryptoX509ParseKeyPair = &Builtin{ ), types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("if key pair is valid, returns the tls.certificate(https://pkg.go.dev/crypto/tls#Certificate) as an object. If the key pair is invalid, nil and an error are returned."), ), - canSkipBctx: true, + CanSkipBctx: true, } var CryptoX509ParseRSAPrivateKey = &Builtin{ @@ -2598,7 +2618,7 @@ var CryptoX509ParseRSAPrivateKey = &Builtin{ ), types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWK as an object"), ), - canSkipBctx: true, + CanSkipBctx: true, } var CryptoParsePrivateKeys = &Builtin{ @@ -2612,7 +2632,7 @@ If the input is empty, the function will return null. The input string should be ), types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed private keys represented as objects"), ), - canSkipBctx: true, + CanSkipBctx: true, } var CryptoMd5 = &Builtin{ @@ -2624,7 +2644,7 @@ var CryptoMd5 = &Builtin{ ), types.Named("y", types.S).Description("MD5-hash of `x`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var CryptoSha1 = &Builtin{ @@ -2636,7 +2656,7 @@ var CryptoSha1 = &Builtin{ ), types.Named("y", types.S).Description("SHA1-hash of `x`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var CryptoSha256 = &Builtin{ @@ -2648,7 +2668,7 @@ var CryptoSha256 = &Builtin{ ), types.Named("y", types.S).Description("SHA256-hash of `x`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var CryptoHmacMd5 = &Builtin{ @@ -2661,7 +2681,7 @@ var CryptoHmacMd5 = &Builtin{ ), types.Named("y", types.S).Description("MD5-HMAC of `x`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var CryptoHmacSha1 = &Builtin{ @@ -2674,7 +2694,7 @@ var CryptoHmacSha1 = &Builtin{ ), types.Named("y", types.S).Description("SHA1-HMAC of `x`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var CryptoHmacSha256 = &Builtin{ @@ -2687,7 +2707,7 @@ var CryptoHmacSha256 = &Builtin{ ), types.Named("y", types.S).Description("SHA256-HMAC of `x`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var CryptoHmacSha512 = &Builtin{ @@ -2700,7 +2720,7 @@ var CryptoHmacSha512 = &Builtin{ ), types.Named("y", types.S).Description("SHA512-HMAC of `x`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var CryptoHmacEqual = &Builtin{ @@ -2713,7 +2733,7 @@ var CryptoHmacEqual = &Builtin{ ), types.Named("result", types.B).Description("`true` if the MACs are equals, `false` otherwise"), ), - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -2738,7 +2758,7 @@ var WalkBuiltin = &Builtin{ )).Description("pairs of `path` and `value`: `path` is an array representing the pointer to `value` in `x`. If `path` is assigned a wildcard (`_`), the `walk` function will skip path creation entirely for faster evaluation."), ), Categories: graphs, - canSkipBctx: true, + CanSkipBctx: true, } var ReachableBuiltin = &Builtin{ @@ -2759,7 +2779,7 @@ var ReachableBuiltin = &Builtin{ ), types.Named("output", types.SetOfAny).Description("set of vertices reachable from the `initial` vertices in the directed `graph`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var ReachablePathsBuiltin = &Builtin{ @@ -2780,7 +2800,7 @@ var ReachablePathsBuiltin = &Builtin{ ), types.Named("output", types.NewSet(types.NewArray(nil, types.A))).Description("paths reachable from the `initial` vertices in the directed `graph`"), ), - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -2798,7 +2818,7 @@ var IsNumber = &Builtin{ types.Named("result", types.B).Description("`true` if `x` is a number, `false` otherwise."), ), Categories: typesCat, - canSkipBctx: true, + CanSkipBctx: true, } var IsString = &Builtin{ @@ -2811,7 +2831,7 @@ var IsString = &Builtin{ types.Named("result", types.B).Description("`true` if `x` is a string, `false` otherwise."), ), Categories: typesCat, - canSkipBctx: true, + CanSkipBctx: true, } var IsBoolean = &Builtin{ @@ -2824,7 +2844,7 @@ var IsBoolean = &Builtin{ types.Named("result", types.B).Description("`true` if `x` is an boolean, `false` otherwise."), ), Categories: typesCat, - canSkipBctx: true, + CanSkipBctx: true, } var IsArray = &Builtin{ @@ -2837,7 +2857,7 @@ var IsArray = &Builtin{ types.Named("result", types.B).Description("`true` if `x` is an array, `false` otherwise."), ), Categories: typesCat, - canSkipBctx: true, + CanSkipBctx: true, } var IsSet = &Builtin{ @@ -2850,7 +2870,7 @@ var IsSet = &Builtin{ types.Named("result", types.B).Description("`true` if `x` is a set, `false` otherwise."), ), Categories: typesCat, - canSkipBctx: true, + CanSkipBctx: true, } var IsObject = &Builtin{ @@ -2863,7 +2883,7 @@ var IsObject = &Builtin{ types.Named("result", types.B).Description("`true` if `x` is an object, `false` otherwise."), ), Categories: typesCat, - canSkipBctx: true, + CanSkipBctx: true, } var IsNull = &Builtin{ @@ -2876,7 +2896,7 @@ var IsNull = &Builtin{ types.Named("result", types.B).Description("`true` if `x` is null, `false` otherwise."), ), Categories: typesCat, - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -2894,7 +2914,7 @@ var TypeNameBuiltin = &Builtin{ types.Named("type", types.S).Description(`one of "null", "boolean", "number", "string", "array", "object", "set"`), ), Categories: typesCat, - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -2914,7 +2934,7 @@ var HTTPSend = &Builtin{ Description("the HTTP response object"), ), Nondeterministic: true, - canSkipBctx: false, + CanSkipBctx: false, } /** @@ -2937,7 +2957,7 @@ var GraphQLParse = &Builtin{ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), }, nil)).Description("`output` is of the form `[query_ast, schema_ast]`. If the GraphQL query is valid given the provided schema, then `query_ast` and `schema_ast` are objects describing the ASTs for the query and schema."), ), - canSkipBctx: false, + CanSkipBctx: false, } // GraphQLParseAndVerify returns a boolean and a pair of AST object from parsing/validation. @@ -2957,7 +2977,7 @@ var GraphQLParseAndVerify = &Builtin{ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), }, nil)).Description(" `output` is of the form `[valid, query_ast, schema_ast]`. If the query is valid given the provided schema, then `valid` is `true`, and `query_ast` and `schema_ast` are objects describing the ASTs for the GraphQL query and schema. Otherwise, `valid` is `false` and `query_ast` and `schema_ast` are `{}`."), ), - canSkipBctx: false, + CanSkipBctx: false, } // GraphQLParseQuery parses the input GraphQL query and returns a JSON @@ -2971,7 +2991,7 @@ var GraphQLParseQuery = &Builtin{ ), types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL query."), ), - canSkipBctx: true, + CanSkipBctx: true, } // GraphQLParseSchema parses the input GraphQL schema and returns a JSON @@ -2985,7 +3005,7 @@ var GraphQLParseSchema = &Builtin{ ), types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL schema."), ), - canSkipBctx: false, + CanSkipBctx: false, } // GraphQLIsValid returns true if a GraphQL query is valid with a given @@ -3002,7 +3022,7 @@ var GraphQLIsValid = &Builtin{ ), types.Named("output", types.B).Description("`true` if the query is valid under the given schema. `false` otherwise."), ), - canSkipBctx: false, + CanSkipBctx: false, } // GraphQLSchemaIsValid returns true if the input is valid GraphQL schema, @@ -3017,7 +3037,7 @@ var GraphQLSchemaIsValid = &Builtin{ ), types.Named("output", types.B).Description("`true` if the schema is a valid GraphQL schema. `false` otherwise."), ), - canSkipBctx: false, + CanSkipBctx: false, } /** @@ -3041,7 +3061,7 @@ var JSONSchemaVerify = &Builtin{ Description("`output` is of the form `[valid, error]`. If the schema is valid, then `valid` is `true`, and `error` is `null`. Otherwise, `valid` is `false` and `error` is a string describing the error."), ), Categories: objectCat, - canSkipBctx: true, + CanSkipBctx: true, } // JSONMatchSchema returns empty array if the document matches the JSON schema, @@ -3073,7 +3093,7 @@ var JSONMatchSchema = &Builtin{ Description("`output` is of the form `[match, errors]`. If the document is valid given the schema, then `match` is `true`, and `errors` is an empty array. Otherwise, `match` is `false` and `errors` is an array of objects describing the error(s)."), ), Categories: objectCat, - canSkipBctx: false, + CanSkipBctx: false, } /** @@ -3096,7 +3116,7 @@ var ProvidersAWSSignReqObj = &Builtin{ Description("HTTP request object with `Authorization` header"), ), Categories: providersAWSCat, - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -3114,7 +3134,7 @@ var RegoParseModule = &Builtin{ types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). Description("AST object for the Rego module"), ), - canSkipBctx: true, + CanSkipBctx: true, } var RegoMetadataChain = &Builtin{ @@ -3127,7 +3147,7 @@ The first entry in the chain always points to the active rule, even if it has no types.Args(), types.Named("chain", types.NewArray(nil, types.A)).Description("each array entry represents a node in the path ancestry (chain) of the active rule that also has declared annotations"), ), - canSkipBctx: true, + CanSkipBctx: true, } // RegoMetadataRule returns the metadata for the active rule @@ -3138,7 +3158,7 @@ var RegoMetadataRule = &Builtin{ types.Args(), types.Named("output", types.A).Description("\"rule\" scope annotations for this rule; empty object if no annotations exist"), ), - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -3155,7 +3175,7 @@ var OPARuntime = &Builtin{ Description("includes a `config` key if OPA was started with a configuration file; an `env` key containing the environment variables that the OPA process was started with; includes `version` and `commit` keys containing the version and build commit of OPA."), ), Nondeterministic: true, - canSkipBctx: false, + CanSkipBctx: false, } /** @@ -3173,7 +3193,7 @@ var Trace = &Builtin{ types.Named("result", types.B).Description("always `true`"), ), Categories: tracing, - canSkipBctx: false, + CanSkipBctx: false, } /** @@ -3194,7 +3214,7 @@ var GlobMatch = &Builtin{ ), types.Named("result", types.B).Description("true if `match` can be found in `pattern` which is separated by `delimiters`"), ), - canSkipBctx: false, + CanSkipBctx: false, } var GlobQuoteMeta = &Builtin{ @@ -3206,7 +3226,7 @@ var GlobQuoteMeta = &Builtin{ ), types.Named("output", types.S).Description("the escaped string of `pattern`"), ), - canSkipBctx: true, + CanSkipBctx: true, // TODO(sr): example for this was: Calling ``glob.quote_meta("*.github.com", output)`` returns ``\\*.github.com`` as ``output``. } @@ -3224,7 +3244,7 @@ var NetCIDRIntersects = &Builtin{ ), types.Named("result", types.B).Description("`true` if `cidr1` intersects with `cidr2`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var NetCIDRExpand = &Builtin{ @@ -3236,7 +3256,7 @@ var NetCIDRExpand = &Builtin{ ), types.Named("hosts", types.SetOfStr).Description("set of IP addresses the CIDR `cidr` expands to"), ), - canSkipBctx: false, + CanSkipBctx: false, } var NetCIDRContains = &Builtin{ @@ -3249,7 +3269,7 @@ var NetCIDRContains = &Builtin{ ), types.Named("result", types.B).Description("`true` if `cidr_or_ip` is contained within `cidr`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var NetCIDRContainsMatches = &Builtin{ @@ -3263,7 +3283,7 @@ var NetCIDRContainsMatches = &Builtin{ ), types.Named("output", types.NewSet(types.NewArray([]types.Type{types.A, types.A}, nil))).Description("tuples identifying matches where `cidrs_or_ips` are contained within `cidrs`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var NetCIDRMerge = &Builtin{ @@ -3280,7 +3300,7 @@ Supports both IPv4 and IPv6 notations. IPv6 inputs need a prefix length (e.g. "/ ), types.Named("output", types.SetOfStr).Description("smallest possible set of CIDRs obtained after merging the provided list of IP addresses and subnets in `addrs`"), ), - canSkipBctx: true, + CanSkipBctx: true, } var NetCIDRIsValid = &Builtin{ @@ -3292,7 +3312,7 @@ var NetCIDRIsValid = &Builtin{ ), types.Named("result", types.B).Description("`true` if `cidr` is a valid CIDR"), ), - canSkipBctx: true, + CanSkipBctx: true, } var netCidrContainsMatchesOperandType = types.NewAny( @@ -3325,7 +3345,7 @@ var NetLookupIPAddr = &Builtin{ types.Named("addrs", types.SetOfStr).Description("IP addresses (v4 and v6) that `name` resolves to"), ), Nondeterministic: true, - canSkipBctx: false, + CanSkipBctx: false, } /** @@ -3341,7 +3361,7 @@ var SemVerIsValid = &Builtin{ ), types.Named("result", types.B).Description("`true` if `vsn` is a valid SemVer; `false` otherwise"), ), - canSkipBctx: true, + CanSkipBctx: true, } var SemVerCompare = &Builtin{ @@ -3354,7 +3374,7 @@ var SemVerCompare = &Builtin{ ), types.Named("result", types.N).Description("`-1` if `a < b`; `1` if `a > b`; `0` if `a == b`"), ), - canSkipBctx: true, + CanSkipBctx: true, } /** @@ -3383,6 +3403,12 @@ var InternalTestCase = &Builtin{ Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.A)}, nil), } +var InternalTemplateString = &Builtin{ + Name: "internal.template_string", + Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.A)}, types.S), + CanSkipBctx: true, // Uses bctx.Location for error reporting, but that is always provided in eval +} + /** * Deprecated built-ins. */ @@ -3397,8 +3423,8 @@ var SetDiff = &Builtin{ ), types.SetOfAny, ), - deprecated: true, - canSkipBctx: true, + Deprecated: true, + CanSkipBctx: true, } // NetCIDROverlap has been replaced by the `net.cidr_contains` built-in. @@ -3411,8 +3437,8 @@ var NetCIDROverlap = &Builtin{ ), types.B, ), - deprecated: true, - canSkipBctx: true, + Deprecated: true, + CanSkipBctx: true, } // CastArray checks the underlying type of the input. If it is array or set, an array @@ -3423,8 +3449,8 @@ var CastArray = &Builtin{ types.Args(types.A), types.NewArray(nil, types.A), ), - deprecated: true, - canSkipBctx: true, + Deprecated: true, + CanSkipBctx: true, } // CastSet checks the underlying type of the input. @@ -3437,8 +3463,8 @@ var CastSet = &Builtin{ types.Args(types.A), types.SetOfAny, ), - deprecated: true, - canSkipBctx: true, + Deprecated: true, + CanSkipBctx: true, } // CastString returns input if it is a string; if not returns error. @@ -3449,8 +3475,8 @@ var CastString = &Builtin{ types.Args(types.A), types.S, ), - deprecated: true, - canSkipBctx: true, + Deprecated: true, + CanSkipBctx: true, } // CastBoolean returns input if it is a boolean; if not returns error. @@ -3460,8 +3486,8 @@ var CastBoolean = &Builtin{ types.Args(types.A), types.B, ), - deprecated: true, - canSkipBctx: true, + Deprecated: true, + CanSkipBctx: true, } // CastNull returns null if input is null; if not returns error. @@ -3471,8 +3497,8 @@ var CastNull = &Builtin{ types.Args(types.A), types.Nl, ), - deprecated: true, - canSkipBctx: true, + Deprecated: true, + CanSkipBctx: true, } // CastObject returns the given object if it is null; throws an error otherwise @@ -3482,11 +3508,11 @@ var CastObject = &Builtin{ types.Args(types.A), types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), ), - deprecated: true, - canSkipBctx: true, + Deprecated: true, + CanSkipBctx: true, } -// RegexMatchDeprecated declares `re_match` which has been deprecated. Use `regex.match` instead. +// RegexMatchDeprecated declares `re_match` which has been Deprecated. Use `regex.match` instead. var RegexMatchDeprecated = &Builtin{ Name: "re_match", Decl: types.NewFunction( @@ -3496,8 +3522,8 @@ var RegexMatchDeprecated = &Builtin{ ), types.B, ), - deprecated: true, - canSkipBctx: false, + Deprecated: true, + CanSkipBctx: false, } // All takes a list and returns true if all of the items @@ -3513,8 +3539,8 @@ var All = &Builtin{ ), types.B, ), - deprecated: true, - canSkipBctx: true, + Deprecated: true, + CanSkipBctx: true, } // Any takes a collection and returns true if any of the items @@ -3530,8 +3556,8 @@ var Any = &Builtin{ ), types.B, ), - deprecated: true, - canSkipBctx: true, + Deprecated: true, + CanSkipBctx: true, } // Builtin represents a built-in function supported by OPA. Every built-in @@ -3545,11 +3571,11 @@ type Builtin struct { // "minus" for example, is part of two categories: numbers and sets. (NOTE(sr): aspirational) Categories []string `json:"categories,omitempty"` - Decl *types.Function `json:"decl"` // Built-in function type declaration. - Infix string `json:"infix,omitempty"` // Unique name of infix operator. Default should be unset. - Relation bool `json:"relation,omitempty"` // Indicates if the built-in acts as a relation. - deprecated bool // Indicates if the built-in has been deprecated. - canSkipBctx bool // Built-in needs no data from the built-in context. + Decl *types.Function `json:"decl"` // Built-in function type declaration. + Infix string `json:"infix,omitempty"` // Unique name of infix operator. Default should be unset. + Relation bool `json:"relation,omitempty"` // Indicates if the built-in acts as a relation. + Deprecated bool `json:"deprecated,omitempty"` // Indicates if the built-in has been deprecated. + CanSkipBctx bool `json:"-"` // Built-in needs no data from the built-in context. Nondeterministic bool `json:"nondeterministic,omitempty"` // Indicates if the built-in returns non-deterministic results. } @@ -3573,12 +3599,12 @@ func (b *Builtin) Minimal() *Builtin { return &cpy } -// IsDeprecated returns true if the Builtin function is deprecated and will be removed in a future release. +// IsDeprecated returns true if the Builtin function is Deprecated and will be removed in a future release. func (b *Builtin) IsDeprecated() bool { - return b.deprecated + return b.Deprecated } -// IsDeterministic returns true if the Builtin function returns non-deterministic results. +// IsNondeterministic returns true if the Builtin function returns non-deterministic results. func (b *Builtin) IsNondeterministic() bool { return b.Nondeterministic } @@ -3626,7 +3652,7 @@ func (b *Builtin) IsTargetPos(i int) bool { func (b *Builtin) NeedsBuiltInContext() bool { // Negated, so built-ins we don't know about (and who don't know about this option) // will get a built-in context provided to them. - return !b.canSkipBctx + return !b.CanSkipBctx } func init() { diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go b/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go index 8c98c0a9eb..170f0bf176 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go @@ -58,12 +58,14 @@ const FeatureRefHeads = "rule_head_refs" const FeatureRegoV1 = "rego_v1" const FeatureRegoV1Import = "rego_v1_import" const FeatureKeywordsInRefs = "keywords_in_refs" +const FeatureTemplateStrings = "template_strings" // Features carries the default features supported by this version of OPA. // Use RegisterFeatures to add to them. var Features = []string{ FeatureRegoV1, FeatureKeywordsInRefs, + FeatureTemplateStrings, } // RegisterFeatures lets applications wrapping OPA register features, to be @@ -95,7 +97,6 @@ type Capabilities struct { // As of now, this only controls fetching remote refs for using JSON Schemas in // the type checker. // TODO(sr): support ports to further restrict connection peers - // TODO(sr): support restricting `http.send` using the same mechanism (see https://github.com/open-policy-agent/opa/issues/3665) AllowNet []string `json:"allow_net,omitempty"` } @@ -220,19 +221,17 @@ func LoadCapabilitiesVersions() ([]string, error) { for _, ent := range ents { capabilitiesVersions = append(capabilitiesVersions, strings.Replace(ent.Name(), ".json", "", 1)) } + + slices.SortStableFunc(capabilitiesVersions, semver.Compare) + return capabilitiesVersions, nil } // MinimumCompatibleVersion returns the minimum compatible OPA version based on // the built-ins, features, and keywords in c. func (c *Capabilities) MinimumCompatibleVersion() (string, bool) { - var maxVersion semver.Version - // this is the oldest OPA release that includes capabilities - if err := maxVersion.Set("0.17.0"); err != nil { - panic("unreachable") - } - + maxVersion := semver.MustParse("0.17.0") minVersionIndex := minVersionIndexOnce() for _, bi := range c.Builtins { @@ -272,6 +271,12 @@ func (c *Capabilities) ContainsFeature(feature string) bool { return slices.Contains(c.Features, feature) } +func (c *Capabilities) ContainsBuiltin(name string) bool { + return slices.ContainsFunc(c.Builtins, func(builtin *Builtin) bool { + return builtin.Name == name + }) +} + // addBuiltinSorted inserts a built-in into c in sorted order. An existing built-in with the same name // will be overwritten. func (c *Capabilities) addBuiltinSorted(bi *Builtin) { diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/check.go b/vendor/github.com/open-policy-agent/opa/v1/ast/check.go index 0da7e26514..6e4d8ddd74 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/check.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/check.go @@ -6,8 +6,8 @@ package ast import ( "fmt" + "regexp" "slices" - "sort" "strings" "github.com/open-policy-agent/opa/v1/types" @@ -16,11 +16,6 @@ import ( type varRewriter func(Ref) Ref -// exprChecker defines the interface for executing type checking on a single -// expression. The exprChecker must update the provided TypeEnv with inferred -// types of vars. -type exprChecker func(*TypeEnv, *Expr) *Error - // typeChecker implements type checking on queries and rules. Errors are // accumulated on the typeChecker so that a single run can report multiple // issues. @@ -28,7 +23,6 @@ type typeChecker struct { builtins map[string]*Builtin required *Capabilities errs Errors - exprCheckers map[string]exprChecker varRewriter varRewriter ss *SchemaSet allowNet []string @@ -39,11 +33,7 @@ type typeChecker struct { // newTypeChecker returns a new typeChecker object that has no errors. func newTypeChecker() *typeChecker { - return &typeChecker{ - exprCheckers: map[string]exprChecker{ - "eq": checkExprEq, - }, - } + return &typeChecker{} } func (tc *typeChecker) newEnv(exist *TypeEnv) *TypeEnv { @@ -126,43 +116,39 @@ func (tc *typeChecker) Env(builtins map[string]*Builtin) *TypeEnv { // are found. The resulting TypeEnv wraps the provided one. The resulting // TypeEnv will be able to resolve types of vars contained in the body. func (tc *typeChecker) CheckBody(env *TypeEnv, body Body) (*TypeEnv, Errors) { + var errors []*Error - errors := []*Error{} env = tc.newEnv(env) vis := newRefChecker(env, tc.varRewriter) - - WalkExprs(body, func(expr *Expr) bool { - - closureErrs := tc.checkClosures(env, expr) - for _, err := range closureErrs { - errors = append(errors, err) - } - - hasClosureErrors := len(closureErrs) > 0 - - // reset errors from previous iteration - vis.errs = nil - NewGenericVisitor(vis.Visit).Walk(expr) - for _, err := range vis.errs { - errors = append(errors, err) - } - - hasRefErrors := len(vis.errs) > 0 - - if err := tc.checkExpr(env, expr); err != nil { - // Suppress this error if a more actionable one has occurred. In - // this case, if an error occurred in a ref or closure contained in - // this expression, and the error is due to a nil type, then it's - // likely to be the result of the more specific error. - skip := (hasClosureErrors || hasRefErrors) && causedByNilType(err) - if !skip { - errors = append(errors, err) + gv := NewGenericVisitor(vis.Visit) + + for _, bexpr := range body { + WalkExprs(bexpr, func(expr *Expr) bool { + closureErrs := tc.checkClosures(env, expr) + errors = append(errors, closureErrs...) + + // reset errors from previous iteration + vis.errs = nil + gv.Walk(expr) + errors = append(errors, vis.errs...) + + if err := tc.checkExpr(env, expr); err != nil { + hasClosureErrors := len(closureErrs) > 0 + hasRefErrors := len(vis.errs) > 0 + // Suppress this error if a more actionable one has occurred. In + // this case, if an error occurred in a ref or closure contained in + // this expression, and the error is due to a nil type, then it's + // likely to be the result of the more specific error. + skip := (hasClosureErrors || hasRefErrors) && causedByNilType(err) + if !skip { + errors = append(errors, err) + } } - } - return true - }) + return true + }) + } - tc.err(errors) + tc.err(errors...) return env, errors } @@ -243,7 +229,7 @@ func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) { for _, schemaAnnot := range schemaAnnots { refType, err := tc.getSchemaType(schemaAnnot, rule) if err != nil { - tc.err([]*Error{err}) + tc.err(err) continue } @@ -259,7 +245,7 @@ func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) { } else { newType, err := override(ref[len(prefixRef):], t, refType, rule) if err != nil { - tc.err([]*Error{err}) + tc.err(err) continue } env.tree.Put(prefixRef, newType) @@ -281,23 +267,25 @@ func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) { var tpe types.Type if len(rule.Head.Args) > 0 { - // If args are not referred to in body, infer as any. - WalkVars(rule.Head.Args, func(v Var) bool { - if cpy.GetByValue(v) == nil { - cpy.tree.PutOne(v, types.A) - } - return false - }) + for _, arg := range rule.Head.Args { + // If args are not referred to in body, infer as any. + WalkTerms(arg, func(t *Term) bool { + if _, ok := t.Value.(Var); ok { + if cpy.GetByValue(t.Value) == nil { + cpy.tree.PutOne(t.Value, types.A) + } + } + return false + }) + } // Construct function type. args := make([]types.Type, len(rule.Head.Args)) - for i := range len(rule.Head.Args) { + for i := range rule.Head.Args { args[i] = cpy.GetByValue(rule.Head.Args[i].Value) } - f := types.NewFunction(args, cpy.Get(rule.Head.Value)) - - tpe = f + tpe = types.NewFunction(args, cpy.GetByValue(rule.Head.Value.Value)) } else { switch rule.Head.RuleKind() { case SingleValue: @@ -310,7 +298,7 @@ func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) { var err error tpe, err = nestedObject(cpy, objPath, typeV) if err != nil { - tc.err([]*Error{NewError(TypeErr, rule.Head.Location, "%s", err.Error())}) + tc.err(NewError(TypeErr, rule.Head.Location, "%s", err.Error())) tpe = nil } } else if typeV != nil { @@ -374,19 +362,14 @@ func (tc *typeChecker) checkExpr(env *TypeEnv, expr *Expr) *Error { } } - checker := tc.exprCheckers[operator] - if checker != nil { - return checker(env, expr) + if operator == "eq" { + return checkExprEq(env, expr) } return tc.checkExprBuiltin(env, expr) } func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error { - - args := expr.Operands() - pre := getArgTypes(env, args) - // NOTE(tsandall): undefined functions will have been caught earlier in the // compiler. We check for undefined functions before the safety check so // that references to non-existent functions result in undefined function @@ -405,10 +388,12 @@ func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error { return NewError(TypeErr, expr.Location, "undefined function %v", name) } - // check if the expression refers to a function that contains an error - _, ok := tpe.(types.Any) - if ok { - return nil + if t, ok := tpe.(types.Any); ok { + // A type.Any with a len(0) is created by using types.A , this represents a potential non-local reference + // This is the exception when checking if the type represents a function + if len(t) == 0 { + return nil + } } ftpe, ok := tpe.(*types.Function) @@ -424,12 +409,14 @@ func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error { namedFargs.Args = append(namedFargs.Args, ftpe.NamedResult()) } + args := expr.Operands() + if len(args) > len(fargs.Args) && fargs.Variadic == nil { - return newArgError(expr.Location, name, "too many arguments", pre, namedFargs) + return newArgError(expr.Location, name, "too many arguments", getArgTypes(env, args), namedFargs) } if len(args) < len(ftpe.FuncArgs().Args) { - return newArgError(expr.Location, name, "too few arguments", pre, namedFargs) + return newArgError(expr.Location, name, "too few arguments", getArgTypes(env, args), namedFargs) } for i := range args { @@ -601,7 +588,7 @@ func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool { return unifies } return false - case Set: + case *set: switch tpe := tpe.(type) { case *types.Set: return unify1Set(env, v, tpe, union) @@ -676,14 +663,14 @@ func unify1Object(env *TypeEnv, val Object, tpe *types.Object, union bool) bool return !stop } -func unify1Set(env *TypeEnv, val Set, tpe *types.Set, union bool) bool { +func unify1Set(env *TypeEnv, val *set, tpe *types.Set, union bool) bool { of := types.Values(tpe) return !val.Until(func(elem *Term) bool { return !unify1(env, elem, of, union) }) } -func (tc *typeChecker) err(errors []*Error) { +func (tc *typeChecker) err(errors ...*Error) { tc.errs = append(tc.errs, errors...) } @@ -704,7 +691,6 @@ func newRefChecker(env *TypeEnv, f varRewriter) *refChecker { return &refChecker{ env: env, - errs: nil, varRewriter: f, } } @@ -716,8 +702,9 @@ func (rc *refChecker) Visit(x any) bool { case *Expr: switch terms := x.Terms.(type) { case []*Term: + vis := NewGenericVisitor(rc.Visit) for i := 1; i < len(terms); i++ { - NewGenericVisitor(rc.Visit).Walk(terms[i]) + vis.Walk(terms[i]) } return true case *Term: @@ -807,7 +794,6 @@ func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx i } func (rc *refChecker) checkRefLeaf(tpe types.Type, ref Ref, idx int) *Error { - if idx == len(ref) { return nil } @@ -822,16 +808,16 @@ func (rc *refChecker) checkRefLeaf(tpe types.Type, ref Ref, idx int) *Error { switch value := head.Value.(type) { case Var: - if exist := rc.env.GetByValue(value); exist != nil { + if exist := rc.env.GetByValue(head.Value); exist != nil { if !unifies(exist, keys) { return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe)) } } else { - rc.env.tree.PutOne(value, types.Keys(tpe)) + rc.env.tree.PutOne(head.Value, types.Keys(tpe)) } case Ref: - if exist := rc.env.Get(value); exist != nil { + if exist := rc.env.GetByRef(value); exist != nil { if !unifies(exist, keys) { return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe)) } @@ -1104,7 +1090,21 @@ func newRefErrInvalid(loc *Location, ref Ref, idx int, have, want types.Type, on } func newRefErrUnsupported(loc *Location, ref Ref, idx int, have types.Type) *Error { - err := newRefError(loc, ref) + var err *Error + switch have.(type) { + case *types.Function: + var function string + // drop any trailing references to unidentified parameters (e.g. __local1__) + if match, err := regexp.MatchString(`__local[0-9]+__`, ref[len(ref)-1].Value.String()); err == nil && match { + function = ref[:len(ref)-1].String() + } else { + function = ref.String() + } + + err = NewError(TypeErr, loc, "function %s used as reference, not called", function) + default: + err = newRefError(loc, ref) + } err.Details = &RefErrUnsupportedDetail{ Ref: ref, Pos: idx, @@ -1132,7 +1132,7 @@ func getOneOfForNode(node *typeTreeNode) (result []Value) { return false }) - sortValueSlice(result) + slices.SortFunc(result, Value.Compare) return result } @@ -1155,16 +1155,10 @@ func getOneOfForType(tpe types.Type) (result []Value) { } result = removeDuplicate(result) - sortValueSlice(result) + slices.SortFunc(result, Value.Compare) return result } -func sortValueSlice(sl []Value) { - sort.Slice(sl, func(i, j int) bool { - return sl[i].Compare(sl[j]) < 0 - }) -} - func removeDuplicate(list []Value) []Value { seen := make(map[Value]bool) var newResult []Value @@ -1188,13 +1182,13 @@ func getArgTypes(env *TypeEnv, args []*Term) []types.Type { // getPrefix returns the shortest prefix of ref that exists in env func getPrefix(env *TypeEnv, ref Ref) (Ref, types.Type) { if len(ref) == 1 { - t := env.Get(ref) + t := env.GetByRef(ref) if t != nil { return ref, t } } for i := 1; i < len(ref); i++ { - t := env.Get(ref[:i]) + t := env.GetByRef(ref[:i]) if t != nil { return ref[:i], t } @@ -1202,12 +1196,14 @@ func getPrefix(env *TypeEnv, ref Ref) (Ref, types.Type) { return nil, nil } +var dynamicAnyAny = types.NewDynamicProperty(types.A, types.A) + // override takes a type t and returns a type obtained from t where the path represented by ref within it has type o (overriding the original type of that path) func override(ref Ref, t types.Type, o types.Type, rule *Rule) (types.Type, *Error) { var newStaticProps []*types.StaticProperty obj, ok := t.(*types.Object) if !ok { - newType, err := getObjectType(ref, o, rule, types.NewDynamicProperty(types.A, types.A)) + newType, err := getObjectType(ref, o, rule, dynamicAnyAny) if err != nil { return nil, err } diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go index c4754341de..8cd2bf9dc4 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go @@ -5,9 +5,10 @@ package ast import ( - "encoding/json" + "cmp" "fmt" "math/big" + "strings" ) // Compare returns an integer indicating whether two AST values are less than, @@ -77,8 +78,7 @@ func Compare(a, b any) int { case Null: return 0 case Boolean: - b := b.(Boolean) - if a.Equal(b) { + if a == b.(Boolean) { return 0 } if !a { @@ -86,75 +86,23 @@ func Compare(a, b any) int { } return 1 case Number: - if ai, err := json.Number(a).Int64(); err == nil { - if bi, err := json.Number(b.(Number)).Int64(); err == nil { - if ai == bi { - return 0 - } - if ai < bi { - return -1 - } - return 1 - } - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var bigA, bigB *big.Rat - fa, ok := new(big.Float).SetString(string(a)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - bigA = new(big.Rat).SetInt64(0) - } - } - if bigA == nil { - bigA, ok = new(big.Rat).SetString(string(a)) - if !ok { - panic("illegal value") - } - } - - fb, ok := new(big.Float).SetString(string(b.(Number))) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - bigB = new(big.Rat).SetInt64(0) - } - } - if bigB == nil { - bigB, ok = new(big.Rat).SetString(string(b.(Number))) - if !ok { - panic("illegal value") - } - } - - return bigA.Cmp(bigB) + return NumberCompare(a, b.(Number)) case String: b := b.(String) - if a.Equal(b) { + if a == b { return 0 } if a < b { return -1 } return 1 + case *TemplateString: + b := b.(*TemplateString) + return a.Compare(b) case Var: return VarCompare(a, b.(Var)) case Ref: - b := b.(Ref) - return termSliceCompare(a, b) + return termSliceCompare(a, b.(Ref)) case *Array: b := b.(*Array) return termSliceCompare(a.elems, b.elems) @@ -164,11 +112,9 @@ func Compare(a, b any) int { if x, ok := b.(*lazyObj); ok { b = x.force() } - b := b.(*object) - return a.Compare(b) + return a.Compare(b.(*object)) case Set: - b := b.(Set) - return a.Compare(b) + return a.Compare(b.(Set)) case *ArrayComprehension: b := b.(*ArrayComprehension) if cmp := Compare(a.Term, b.Term); cmp != 0 { @@ -191,44 +137,31 @@ func Compare(a, b any) int { } return a.Body.Compare(b.Body) case Call: - b := b.(Call) - return termSliceCompare(a, b) + return termSliceCompare(a, b.(Call)) case *Expr: - b := b.(*Expr) - return a.Compare(b) + return a.Compare(b.(*Expr)) case *SomeDecl: - b := b.(*SomeDecl) - return a.Compare(b) + return a.Compare(b.(*SomeDecl)) case *Every: - b := b.(*Every) - return a.Compare(b) + return a.Compare(b.(*Every)) case *With: - b := b.(*With) - return a.Compare(b) + return a.Compare(b.(*With)) case Body: - b := b.(Body) - return a.Compare(b) + return a.Compare(b.(Body)) case *Head: - b := b.(*Head) - return a.Compare(b) + return a.Compare(b.(*Head)) case *Rule: - b := b.(*Rule) - return a.Compare(b) + return a.Compare(b.(*Rule)) case Args: - b := b.(Args) - return termSliceCompare(a, b) + return termSliceCompare(a, b.(Args)) case *Import: - b := b.(*Import) - return a.Compare(b) + return a.Compare(b.(*Import)) case *Package: - b := b.(*Package) - return a.Compare(b) + return a.Compare(b.(*Package)) case *Annotations: - b := b.(*Annotations) - return a.Compare(b) + return a.Compare(b.(*Annotations)) case *Module: - b := b.(*Module) - return a.Compare(b) + return a.Compare(b.(*Module)) } panic(fmt.Sprintf("illegal value: %T", a)) } @@ -249,26 +182,28 @@ func sortOrder(x any) int { return 2 case String: return 3 - case Var: + case *TemplateString: return 4 - case Ref: + case Var: return 5 - case *Array: + case Ref: return 6 - case Object: + case *Array: return 7 - case Set: + case Object: return 8 - case *ArrayComprehension: + case Set: return 9 - case *ObjectComprehension: + case *ArrayComprehension: return 10 - case *SetComprehension: + case *ObjectComprehension: return 11 - case Call: + case *SetComprehension: return 12 - case Args: + case Call: return 13 + case Args: + return 14 case *Expr: return 100 case *SomeDecl: @@ -392,14 +327,6 @@ func TermValueEqual(a, b *Term) bool { } func ValueEqual(a, b Value) bool { - // TODO(ae): why doesn't this work the same? - // - // case interface{ Equal(Value) bool }: - // return v.Equal(b) - // - // When put on top, golangci-lint even flags the other cases as unreachable.. - // but TestTopdownVirtualCache will have failing test cases when we replace - // the other cases with the above one.. 🤔 switch v := a.(type) { case Null: return v.Equal(b) @@ -415,6 +342,8 @@ func ValueEqual(a, b Value) bool { return v.Equal(b) case *Array: return v.Equal(b) + case *TemplateString: + return v.Equal(b) } return a.Compare(b) == 0 @@ -427,3 +356,84 @@ func RefCompare(a, b Ref) int { func RefEqual(a, b Ref) bool { return termSliceEqual(a, b) } + +func NumberCompare(x, y Number) int { + xs, ys := string(x), string(y) + + var xIsF, yIsF bool + + // Treat "1" and "1.0", "1.00", etc as "1" + if strings.Contains(xs, ".") { + if tx := strings.TrimRight(xs, ".0"); tx != xs { + // Still a float after trimming? + xIsF = strings.Contains(tx, ".") + xs = tx + } + } + if strings.Contains(ys, ".") { + if ty := strings.TrimRight(ys, ".0"); ty != ys { + yIsF = strings.Contains(ty, ".") + ys = ty + } + } + if xs == ys { + return 0 + } + + var xi, yi int64 + var xf, yf float64 + var xiOK, yiOK, xfOK, yfOK bool + + if xi, xiOK = x.Int64(); xiOK { + if yi, yiOK = y.Int64(); yiOK { + return cmp.Compare(xi, yi) + } + } + + if xIsF && yIsF { + if xf, xfOK = x.Float64(); xfOK { + if yf, yfOK = y.Float64(); yfOK { + if xf == yf { + return 0 + } + // could still be "equal" depending on precision, so we continue? + } + } + } + + var a *big.Rat + fa, ok := new(big.Float).SetString(string(x)) + if !ok { + panic("illegal value") + } + if fa.IsInt() { + if i, _ := fa.Int64(); i == 0 { + a = new(big.Rat).SetInt64(0) + } + } + if a == nil { + a, ok = new(big.Rat).SetString(string(x)) + if !ok { + panic("illegal value") + } + } + + var b *big.Rat + fb, ok := new(big.Float).SetString(string(y)) + if !ok { + panic("illegal value") + } + if fb.IsInt() { + if i, _ := fb.Int64(); i == 0 { + b = new(big.Rat).SetInt64(0) + } + } + if b == nil { + b, ok = new(big.Rat).SetString(string(y)) + if !ok { + panic("illegal value") + } + } + + return a.Cmp(b) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go index 094e659328..a9455145ef 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go @@ -11,8 +11,8 @@ import ( "maps" "slices" "sort" - "strconv" "strings" + "sync" "github.com/open-policy-agent/opa/internal/debug" "github.com/open-policy-agent/opa/internal/gojsonschema" @@ -27,7 +27,7 @@ import ( const CompileErrorLimitDefault = 10 var ( - errLimitReached = NewError(CompileErr, nil, "error limit reached") + errLimitReached = newErrorString(CompileErr, nil, "error limit reached") doubleEq = Equal.Ref() ) @@ -128,10 +128,11 @@ type Compiler struct { localvargen *localVarGenerator moduleLoader ModuleLoader - ruleIndices *util.HasherMap[Ref, RuleIndex] stages []stage maxErrs int - sorted []string // list of sorted module names + errCount uint32 + mu *sync.Mutex // Mutex to protect both 'errCount' and 'Errors' + sorted []string // list of sorted module names pathExists func([]string) (bool, error) pathConflictCheckRoots []string after map[string][]CompilerStageDefinition @@ -157,6 +158,8 @@ type Compiler struct { evalMode CompilerEvalMode // rewriteTestRulesForTracing bool // rewrite test rules to capture dynamic values for tracing. defaultRegoVersion RegoVersion + skipStages map[StageID]struct{} // stages to skip during compilation + plan *executionPlan // computed execution plan (cached) } func (c *Compiler) DefaultRegoVersion() RegoVersion { @@ -166,6 +169,93 @@ func (c *Compiler) DefaultRegoVersion() RegoVersion { // CompilerStage defines the interface for stages in the compiler. type CompilerStage func(*Compiler) *Error +// StageID uniquely identifies a compiler stage. +type StageID string + +// Compiler stage identifiers. +// Please use them when you depend on a compiler stage, like via [ast.Compiler.WithStageAfterID]. +// There is no guarantee that they are stable across OPA versions, but using the identifiers +// at least lets you know what your attention is needed when you depend on the stages. +const ( + StageResolveRefs StageID = "ResolveRefs" + StageInitLocalVarGen StageID = "InitLocalVarGen" + StageRewriteRuleHeadRefs StageID = "RewriteRuleHeadRefs" + StageCheckKeywordOverrides StageID = "CheckKeywordOverrides" + StageCheckDuplicateImports StageID = "CheckDuplicateImports" + StageRemoveImports StageID = "RemoveImports" + StageSetModuleTree StageID = "SetModuleTree" + StageSetRuleTree StageID = "SetRuleTree" + StageRewriteLocalVars StageID = "RewriteLocalVars" + StageRewriteTemplateStrings StageID = "RewriteTemplateStrings" + StageCheckVoidCalls StageID = "CheckVoidCalls" + StageRewritePrintCalls StageID = "RewritePrintCalls" + StageRewriteExprTerms StageID = "RewriteExprTerms" + StageParseMetadataBlocks StageID = "ParseMetadataBlocks" + StageSetAnnotationSet StageID = "SetAnnotationSet" + StageRewriteRegoMetadataCalls StageID = "RewriteRegoMetadataCalls" + StageSetGraph StageID = "SetGraph" + StageRewriteComprehensionTerms StageID = "RewriteComprehensionTerms" + StageRewriteRefsInHead StageID = "RewriteRefsInHead" + StageRewriteWithValues StageID = "RewriteWithValues" + StageCheckRuleConflicts StageID = "CheckRuleConflicts" + StageCheckUndefinedFuncs StageID = "CheckUndefinedFuncs" + StageCheckSafetyRuleHeads StageID = "CheckSafetyRuleHeads" + StageCheckSafetyRuleBodies StageID = "CheckSafetyRuleBodies" + StageRewriteEquals StageID = "RewriteEquals" + StageRewriteDynamicTerms StageID = "RewriteDynamicTerms" + StageRewriteTestRulesForTracing StageID = "RewriteTestRulesForTracing" + StageCheckRecursion StageID = "CheckRecursion" + StageCheckTypes StageID = "CheckTypes" + StageCheckUnsafeBuiltins StageID = "CheckUnsafeBuiltins" + StageCheckDeprecatedBuiltins StageID = "CheckDeprecatedBuiltins" + StageBuildRuleIndices StageID = "BuildRuleIndices" + StageBuildComprehensionIndices StageID = "BuildComprehensionIndices" + StageBuildRequiredCapabilities StageID = "BuildRequiredCapabilities" + + // These only exist in the [ast.QueryCompiler]: + StageCheckSafety StageID = "CheckSafety" +) + +// AllStages returns the complete list of compiler stages in execution order. +func AllStages() []StageID { + return []StageID{ + StageResolveRefs, + StageInitLocalVarGen, + StageRewriteRuleHeadRefs, + StageCheckKeywordOverrides, + StageCheckDuplicateImports, + StageRemoveImports, + StageSetModuleTree, + StageSetRuleTree, + StageRewriteLocalVars, + StageRewriteTemplateStrings, + StageCheckVoidCalls, + StageRewritePrintCalls, + StageRewriteExprTerms, + StageParseMetadataBlocks, + StageSetAnnotationSet, + StageRewriteRegoMetadataCalls, + StageSetGraph, + StageRewriteComprehensionTerms, + StageRewriteRefsInHead, + StageRewriteWithValues, + StageCheckRuleConflicts, + StageCheckUndefinedFuncs, + StageCheckSafetyRuleHeads, + StageCheckSafetyRuleBodies, + StageRewriteEquals, + StageRewriteDynamicTerms, + StageRewriteTestRulesForTracing, + StageCheckRecursion, + StageCheckTypes, + StageCheckUnsafeBuiltins, + StageCheckDeprecatedBuiltins, + StageBuildRuleIndices, + StageBuildComprehensionIndices, + StageBuildRequiredCapabilities, + } +} + // CompilerEvalMode allows toggling certain stages that are only // needed for certain modes, Concretely, only "topdown" mode will // have the compiler build comprehension and rule indices. @@ -188,6 +278,18 @@ type CompilerStageDefinition struct { Stage CompilerStage } +// executionPlan represents the complete ordered list of stages to execute. +type executionPlan struct { + stages []plannedStage +} + +// plannedStage represents a single stage in the execution plan. +type plannedStage struct { + name string + metricName string + f func() +} + // RulesOptions defines the options for retrieving rules by Ref from the // compiler. type RulesOptions struct { @@ -272,8 +374,15 @@ type QueryCompiler interface { // WithStageAfter registers a stage to run during query compilation after // the named stage. + // + // Caution: Use [ast.QueryCompiler.WithStageAfterID] instead. It provides + // more (Golang) compile-time safety WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler + // WithStageAfterID registers a stage to run during query compilation after + // the named stage. + WithStageAfterID(after StageID, stage QueryCompilerStageDefinition) QueryCompiler + // RewrittenVars maps generated vars in the compiled query to vars from the // parsed query. For example, given the query "input := 1" the rewritten // query would be "__local0__ = 1". The mapping would then be {__local0__: input}. @@ -298,7 +407,7 @@ type QueryCompilerStageDefinition struct { } type stage struct { - name string + name StageID metricName string f func() } @@ -310,8 +419,8 @@ func NewCompiler() *Compiler { Modules: map[string]*Module{}, RewrittenVars: map[Var]Var{}, Required: &Capabilities{}, - ruleIndices: util.NewHasherMap[Ref, RuleIndex](RefEqual), maxErrs: CompileErrorLimitDefault, + mu: &sync.Mutex{}, after: map[string][]CompilerStageDefinition{}, unsafeBuiltinsMap: map[string]struct{}{}, deprecatedBuiltinsMap: map[string]struct{}{}, @@ -327,42 +436,43 @@ func NewCompiler() *Compiler { // Reference resolution should run first as it may be used to lazily // load additional modules. If any stages run before resolution, they // need to be re-run after resolution. - {"ResolveRefs", "compile_stage_resolve_refs", c.resolveAllRefs}, + {StageResolveRefs, "compile_stage_resolve_refs", c.resolveAllRefs}, // The local variable generator must be initialized after references are // resolved and the dynamic module loader has run but before subsequent // stages that need to generate variables. - {"InitLocalVarGen", "compile_stage_init_local_var_gen", c.initLocalVarGen}, - {"RewriteRuleHeadRefs", "compile_stage_rewrite_rule_head_refs", c.rewriteRuleHeadRefs}, - {"CheckKeywordOverrides", "compile_stage_check_keyword_overrides", c.checkKeywordOverrides}, - {"CheckDuplicateImports", "compile_stage_check_imports", c.checkImports}, - {"RemoveImports", "compile_stage_remove_imports", c.removeImports}, - {"SetModuleTree", "compile_stage_set_module_tree", c.setModuleTree}, - {"SetRuleTree", "compile_stage_set_rule_tree", c.setRuleTree}, // depends on RewriteRuleHeadRefs - {"RewriteLocalVars", "compile_stage_rewrite_local_vars", c.rewriteLocalVars}, - {"CheckVoidCalls", "compile_stage_check_void_calls", c.checkVoidCalls}, - {"RewritePrintCalls", "compile_stage_rewrite_print_calls", c.rewritePrintCalls}, - {"RewriteExprTerms", "compile_stage_rewrite_expr_terms", c.rewriteExprTerms}, - {"ParseMetadataBlocks", "compile_stage_parse_metadata_blocks", c.parseMetadataBlocks}, - {"SetAnnotationSet", "compile_stage_set_annotationset", c.setAnnotationSet}, - {"RewriteRegoMetadataCalls", "compile_stage_rewrite_rego_metadata_calls", c.rewriteRegoMetadataCalls}, - {"SetGraph", "compile_stage_set_graph", c.setGraph}, - {"RewriteComprehensionTerms", "compile_stage_rewrite_comprehension_terms", c.rewriteComprehensionTerms}, - {"RewriteRefsInHead", "compile_stage_rewrite_refs_in_head", c.rewriteRefsInHead}, - {"RewriteWithValues", "compile_stage_rewrite_with_values", c.rewriteWithModifiers}, - {"CheckRuleConflicts", "compile_stage_check_rule_conflicts", c.checkRuleConflicts}, - {"CheckUndefinedFuncs", "compile_stage_check_undefined_funcs", c.checkUndefinedFuncs}, - {"CheckSafetyRuleHeads", "compile_stage_check_safety_rule_heads", c.checkSafetyRuleHeads}, - {"CheckSafetyRuleBodies", "compile_stage_check_safety_rule_bodies", c.checkSafetyRuleBodies}, - {"RewriteEquals", "compile_stage_rewrite_equals", c.rewriteEquals}, - {"RewriteDynamicTerms", "compile_stage_rewrite_dynamic_terms", c.rewriteDynamicTerms}, - {"RewriteTestRulesForTracing", "compile_stage_rewrite_test_rules_for_tracing", c.rewriteTestRuleEqualities}, // must run after RewriteDynamicTerms - {"CheckRecursion", "compile_stage_check_recursion", c.checkRecursion}, - {"CheckTypes", "compile_stage_check_types", c.checkTypes}, // must be run after CheckRecursion - {"CheckUnsafeBuiltins", "compile_state_check_unsafe_builtins", c.checkUnsafeBuiltins}, - {"CheckDeprecatedBuiltins", "compile_state_check_deprecated_builtins", c.checkDeprecatedBuiltins}, - {"BuildRuleIndices", "compile_stage_rebuild_indices", c.buildRuleIndices}, - {"BuildComprehensionIndices", "compile_stage_rebuild_comprehension_indices", c.buildComprehensionIndices}, - {"BuildRequiredCapabilities", "compile_stage_build_required_capabilities", c.buildRequiredCapabilities}, + {StageInitLocalVarGen, "compile_stage_init_local_var_gen", c.initLocalVarGen}, + {StageRewriteRuleHeadRefs, "compile_stage_rewrite_rule_head_refs", c.rewriteRuleHeadRefs}, + {StageCheckKeywordOverrides, "compile_stage_check_keyword_overrides", c.checkKeywordOverrides}, + {StageCheckDuplicateImports, "compile_stage_check_imports", c.checkImports}, + {StageRemoveImports, "compile_stage_remove_imports", c.removeImports}, + {StageSetModuleTree, "compile_stage_set_module_tree", c.setModuleTree}, + {StageSetRuleTree, "compile_stage_set_rule_tree", c.setRuleTree}, // depends on RewriteRuleHeadRefs + {StageRewriteLocalVars, "compile_stage_rewrite_local_vars", c.rewriteLocalVars}, + {StageRewriteTemplateStrings, "compile_stage_rewrite_template_strings", c.rewriteTemplateStrings}, + {StageCheckVoidCalls, "compile_stage_check_void_calls", c.checkVoidCalls}, + {StageRewritePrintCalls, "compile_stage_rewrite_print_calls", c.rewritePrintCalls}, + {StageRewriteExprTerms, "compile_stage_rewrite_expr_terms", c.rewriteExprTerms}, + {StageParseMetadataBlocks, "compile_stage_parse_metadata_blocks", c.parseMetadataBlocks}, + {StageSetAnnotationSet, "compile_stage_set_annotationset", c.setAnnotationSet}, + {StageRewriteRegoMetadataCalls, "compile_stage_rewrite_rego_metadata_calls", c.rewriteRegoMetadataCalls}, + {StageSetGraph, "compile_stage_set_graph", c.setGraph}, + {StageRewriteComprehensionTerms, "compile_stage_rewrite_comprehension_terms", c.rewriteComprehensionTerms}, + {StageRewriteRefsInHead, "compile_stage_rewrite_refs_in_head", c.rewriteRefsInHead}, + {StageRewriteWithValues, "compile_stage_rewrite_with_values", c.rewriteWithModifiers}, + {StageCheckRuleConflicts, "compile_stage_check_rule_conflicts", c.checkRuleConflicts}, + {StageCheckUndefinedFuncs, "compile_stage_check_undefined_funcs", c.checkUndefinedFuncs}, + {StageCheckSafetyRuleHeads, "compile_stage_check_safety_rule_heads", c.checkSafetyRuleHeads}, + {StageCheckSafetyRuleBodies, "compile_stage_check_safety_rule_bodies", c.checkSafetyRuleBodies}, + {StageRewriteEquals, "compile_stage_rewrite_equals", c.rewriteEquals}, + {StageRewriteDynamicTerms, "compile_stage_rewrite_dynamic_terms", c.rewriteDynamicTerms}, + {StageRewriteTestRulesForTracing, "compile_stage_rewrite_test_rules_for_tracing", c.rewriteTestRuleEqualities}, // must run after RewriteDynamicTerms + {StageCheckRecursion, "compile_stage_check_recursion", c.checkRecursion}, + {StageCheckTypes, "compile_stage_check_types", c.checkTypes}, // must be run after CheckRecursion + {StageCheckUnsafeBuiltins, "compile_state_check_unsafe_builtins", c.checkUnsafeBuiltins}, + {StageCheckDeprecatedBuiltins, "compile_state_check_deprecated_builtins", c.checkDeprecatedBuiltins}, + {StageBuildRuleIndices, "compile_stage_rebuild_indices", c.buildRuleIndices}, + {StageBuildComprehensionIndices, "compile_stage_rebuild_comprehension_indices", c.buildComprehensionIndices}, + {StageBuildRequiredCapabilities, "compile_stage_build_required_capabilities", c.buildRequiredCapabilities}, } return c @@ -402,11 +512,45 @@ func (c *Compiler) WithPathConflictsCheckRoots(rootPaths []string) *Compiler { // WithStageAfter registers a stage to run during compilation after // the named stage. +// +// Caution: Consider using [ast.QueryCompiler.WithStageAfterID] instead. It provides +// more (Golang) compile-time safety func (c *Compiler) WithStageAfter(after string, stage CompilerStageDefinition) *Compiler { c.after[after] = append(c.after[after], stage) + c.plan = nil // invalidate cached plan + return c +} + +// WithStageAfterID registers a stage to run during compilation after +// the identified stage. +func (c *Compiler) WithStageAfterID(after StageID, stage CompilerStageDefinition) *Compiler { + return c.WithStageAfter(string(after), stage) +} + +// WithSkipStages configures the compiler to skip the specified stages during +// compilation. This invalidates any cached execution plan. +func (c *Compiler) WithSkipStages(stages ...StageID) *Compiler { + if c.skipStages == nil { + c.skipStages = make(map[StageID]struct{}, len(stages)) + } + for _, s := range stages { + c.skipStages[s] = struct{}{} + } + c.plan = nil // invalidate cached plan return c } +// WithOnlyStagesUpTo configures the compiler to run only stages up to and +// including the specified target stage. All stages after the target will be skipped. +func (c *Compiler) WithOnlyStagesUpTo(target StageID) *Compiler { + allStages := AllStages() + i := slices.Index(allStages, target) + if i == -1 { + return c + } + return c.WithSkipStages(allStages[i+1:]...) +} + // WithMetrics will set a metrics.Metrics and be used for profiling // the Compiler instance. func (c *Compiler) WithMetrics(metrics metrics.Metrics) *Compiler { @@ -440,6 +584,7 @@ func (c *Compiler) WithDebug(sink io.Writer) *Compiler { } // WithBuiltins is deprecated. +// // Deprecated: Use WithCapabilities instead. func (c *Compiler) WithBuiltins(builtins map[string]*Builtin) *Compiler { c.customBuiltins = maps.Clone(builtins) @@ -447,6 +592,7 @@ func (c *Compiler) WithBuiltins(builtins map[string]*Builtin) *Compiler { } // WithUnsafeBuiltins is deprecated. +// // Deprecated: Use WithCapabilities instead. func (c *Compiler) WithUnsafeBuiltins(unsafeBuiltins map[string]struct{}) *Compiler { maps.Copy(c.unsafeBuiltinsMap, unsafeBuiltins) @@ -590,7 +736,7 @@ func (c *Compiler) GetRulesExact(ref Ref) (rules []*Rule) { } } - return extractRules(node.Values) + return node.Values } // GetRulesForVirtualDocument returns a slice of rules that produce the virtual @@ -617,11 +763,11 @@ func (c *Compiler) GetRulesForVirtualDocument(ref Ref) (rules []*Rule) { return nil } if len(node.Values) > 0 { - return extractRules(node.Values) + return node.Values } } - return extractRules(node.Values) + return node.Values } // GetRulesWithPrefix returns a slice of rules that share the prefix ref. @@ -652,7 +798,7 @@ func (c *Compiler) GetRulesWithPrefix(ref Ref) (rules []*Rule) { var acc func(node *TreeNode) acc = func(node *TreeNode) { - rules = append(rules, extractRules(node.Values)...) + rules = append(rules, node.Values...) for _, child := range node.Children { if child.Hide { continue @@ -666,14 +812,6 @@ func (c *Compiler) GetRulesWithPrefix(ref Ref) (rules []*Rule) { return rules } -func extractRules(s []any) []*Rule { - rules := make([]*Rule, len(s)) - for i := range s { - rules[i] = s[i].(*Rule) - } - return rules -} - // GetRules returns a slice of rules that are referred to by ref. // // E.g., given the following module: @@ -809,9 +947,9 @@ func (c *Compiler) GetRulesDynamicWithOpts(ref Ref, opts RulesOptions) []*Rule { } // Utility: add all rule values to the set. -func insertRules(set map[*Rule]struct{}, rules []any) { +func insertRules(set map[*Rule]struct{}, rules []*Rule) { for _, rule := range rules { - set[rule.(*Rule)] = struct{}{} + set[rule] = struct{}{} } } @@ -820,11 +958,10 @@ func insertRules(set map[*Rule]struct{}, rules []any) { // data.a.b.c.p, refs data.a.b.c.p.x and data.a.b.c would not return a // RuleIndex built for the rule. func (c *Compiler) RuleIndex(path Ref) RuleIndex { - r, ok := c.ruleIndices.Get(path) - if !ok { - return nil + if node := c.RuleTree.Find(path); node != nil { + return node.Index } - return r + return nil } // PassesTypeCheck determines whether the given body passes type checking @@ -837,7 +974,7 @@ func (c *Compiler) PassesTypeCheck(body Body) bool { // PassesTypeCheckRules determines whether the given rules passes type checking func (c *Compiler) PassesTypeCheckRules(rules []*Rule) Errors { - elems := []util.T{} + elems := make([]util.T, 0, len(rules)) for _, rule := range rules { elems = append(elems, rule) @@ -854,7 +991,7 @@ func (c *Compiler) PassesTypeCheckRules(rules []*Rule) Errors { tpe, err := loadSchema(schema, allowNet) if err != nil { - return Errors{NewError(TypeErr, nil, "%s", err.Error())} + return Errors{newErrorString(TypeErr, nil, err.Error())} } c.inputType = tpe } @@ -911,6 +1048,61 @@ func (c *Compiler) WithDefaultRegoVersion(regoVersion RegoVersion) *Compiler { return c } +// buildExecutionPlan creates the unified list of stages to execute, including +// both main stages and "after" stages, with filtering applied. +func (c *Compiler) buildExecutionPlan() *executionPlan { + plan := &executionPlan{ + stages: make([]plannedStage, 0, len(c.stages)*2), + } + + for _, s := range c.stages { + if _, skip := c.skipStages[s.name]; skip { + continue + } + + plan.stages = append(plan.stages, plannedStage{name: string(s.name), metricName: s.metricName, f: s.f}) + + for _, a := range c.after[string(s.name)] { + if _, skip := c.skipStages[StageID(a.Name)]; skip { + continue + } + + afterStage := a // Capture variables in closure properly + plan.stages = append(plan.stages, plannedStage{ + name: afterStage.Name, + metricName: afterStage.MetricName, + f: func() { + if err := afterStage.Stage(c); err != nil { + c.err(err) + } + }, + }) + } + } + + return plan +} + +// getOrBuildPlan ensures we have a valid execution plan. +func (c *Compiler) getOrBuildPlan() *executionPlan { + if c.plan == nil { + c.plan = c.buildExecutionPlan() + } + return c.plan +} + +// StagesToRun returns the list of stage IDs that will be executed during +// compilation, in execution order. This includes both main stages and any +// registered "after" stages. +func (c *Compiler) StagesToRun() []StageID { + plan := c.getOrBuildPlan() + result := make([]StageID, len(plan.stages)) + for i, s := range plan.stages { + result[i] = StageID(s.name) + } + return result +} + func (c *Compiler) counterAdd(name string, n uint64) { if c.metrics == nil { return @@ -924,11 +1116,15 @@ func (c *Compiler) buildRuleIndices() { if len(node.Values) == 0 { return false } - rules := extractRules(node.Values) + rules := node.Values hasNonGroundRef := false for _, r := range rules { hasNonGroundRef = !r.Head.Ref().IsGround() + if hasNonGroundRef { + break + } } + if hasNonGroundRef { // Collect children to ensure that all rules within the extent of a rule with a general ref // are found on the same index. E.g. the following rules should be indexed under data.a.b.c: @@ -939,7 +1135,7 @@ func (c *Compiler) buildRuleIndices() { // b.c.d2.e[x] := 3 { x := input.x } for _, child := range node.Children { child.DepthFirst(func(c *TreeNode) bool { - rules = append(rules, extractRules(c.Values)...) + rules = append(rules, c.Values...) return false }) } @@ -949,25 +1145,29 @@ func (c *Compiler) buildRuleIndices() { return isVirtual(c.RuleTree, ref.GroundPrefix()) }) if index.Build(rules) { - c.ruleIndices.Put(rules[0].Ref().GroundPrefix(), index) + node.Index = index } return hasNonGroundRef // currently, we don't allow those branches to go deeper }) - } func (c *Compiler) buildComprehensionIndices() { + vis := varVisitorPool.Get() + for _, name := range c.sorted { WalkRules(c.Modules[name], func(r *Rule) bool { - candidates := ReservedVars.Copy() + vis = vis.Clear() + vis.vars.Update(ReservedVars) if len(r.Head.Args) > 0 { - candidates.Update(r.Head.Args.Vars()) + vis.WalkArgs(r.Head.Args) } - n := buildComprehensionIndices(c.debug, c.GetArity, candidates, c.RewrittenVars, r.Body, c.comprehensionIndices) + n := buildComprehensionIndices(c.debug, c.GetArity, vis.vars, c.RewrittenVars, r.Body, c.comprehensionIndices) c.counterAdd(compileStageComprehensionIndexBuild, n) return false }) } + + varVisitorPool.Put(vis) } var futureKeywordsPrefix = Ref{FutureRootDocument, InternedTerm("keywords")} @@ -986,16 +1186,15 @@ func (c *Compiler) buildRequiredCapabilities() { for _, name := range c.sorted { for _, imp := range c.imports[name] { - mod := c.Modules[name] path := imp.Path.Value.(Ref) switch { case path.Equal(RegoV1CompatibleRef): - if !c.moduleIsRegoV1(mod) { + if !c.moduleIsRegoV1(c.Modules[name]) { features[FeatureRegoV1Import] = struct{}{} } case path.HasPrefix(futureKeywordsPrefix): if len(path) == 2 { - if c.moduleIsRegoV1(mod) { + if c.moduleIsRegoV1(c.Modules[name]) { for kw := range futureKeywords { keywords[kw] = struct{}{} } @@ -1006,7 +1205,7 @@ func (c *Compiler) buildRequiredCapabilities() { } } else { kw := string(path[2].Value.(String)) - if c.moduleIsRegoV1(mod) { + if c.moduleIsRegoV1(c.Modules[name]) { for allowedKw := range futureKeywords { if kw == allowedKw { keywords[kw] = struct{}{} @@ -1049,11 +1248,15 @@ func (c *Compiler) buildRequiredCapabilities() { } } - c.Required.Features = util.KeysSorted(features) - for i, bi := range c.Required.Builtins { c.Required.Builtins[i] = bi.Minimal() + + if bi.Name == InternalTemplateString.Name { + features[FeatureTemplateStrings] = struct{}{} + } } + + c.Required.Features = util.KeysSorted(features) } // checkRecursion ensures that there are no recursive definitions, i.e., there are @@ -1065,7 +1268,7 @@ func (c *Compiler) checkRecursion() { c.RuleTree.DepthFirst(func(node *TreeNode) bool { for _, rule := range node.Values { - for node := rule.(*Rule); node != nil; node = node.Else { + for node := rule; node != nil; node = node.Else { c.checkSelfPath(node.Loc(), eq, node, node) } } @@ -1080,7 +1283,9 @@ func (c *Compiler) checkSelfPath(loc *Location, eq func(a, b util.T) bool, a, b for _, x := range p { n = append(n, astNodeToString(x)) } - c.err(NewError(RecursionErr, loc, "rule %v is recursive: %v", astNodeToString(a), strings.Join(n, " -> "))) + if !c.err(NewError(RecursionErr, loc, "rule %v is recursive: %v", astNodeToString(a), strings.Join(n, " -> "))) { + return + } } } @@ -1106,7 +1311,7 @@ func (c *Compiler) checkRuleConflicts() { defaultRules := make([]*Rule, 0) for _, rule := range node.Values { - r := rule.(*Rule) + r := rule ref := r.Ref() name = rw(ref.CopyNonGround()).String() // varRewriter operates in-place kinds[r.Head.RuleKind()] = struct{}{} @@ -1163,26 +1368,24 @@ func (c *Compiler) checkRuleConflicts() { switch { case conflicts != nil: - c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "rule %v conflicts with %v", name, conflicts)) + return !c.err(NewError(TypeErr, node.Values[0].Loc(), "rule %v conflicts with %v", name, conflicts)) case len(kinds) > 1 || len(arities) > 1 || (completeRules >= 1 && partialRules >= 1): - c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "conflicting rules %v found", name)) + return !c.err(NewError(TypeErr, node.Values[0].Loc(), "conflicting rules %v found", name)) case len(defaultRules) > 1: + buf := append(append(append(make([]byte, 0, 64), "multiple default rules "...), name...), " found at "...) + buf, _ = defaultRules[0].Loc().AppendText(buf) - defaultRuleLocations := strings.Builder{} - defaultRuleLocations.WriteString(defaultRules[0].Loc().String()) - for i := 1; i < len(defaultRules); i++ { - defaultRuleLocations.WriteString(", ") - defaultRuleLocations.WriteString(defaultRules[i].Loc().String()) + for _, next := range defaultRules[1:] { + buf, _ = next.Loc().AppendText(append(buf, ", "...)) } - c.err(NewError( - TypeErr, - defaultRules[0].Module.Package.Loc(), - "multiple default rules %s found at %s", - name, defaultRuleLocations.String()), - ) + return !c.err(&Error{ + Code: TypeErr, + Location: defaultRules[0].Module.Package.Loc(), + Message: util.ByteSliceToString(buf), + }) } return false @@ -1212,8 +1415,9 @@ func (c *Compiler) checkRuleConflicts() { if childMod.Equal(mod) { continue // don't self-conflict } - msg := fmt.Sprintf("%v conflicts with rule %v defined at %v", childMod.Package, rule.Head.Ref(), rule.Loc()) - c.err(NewError(TypeErr, mod.Package.Loc(), "%s", msg)) + if !c.err(NewError(TypeErr, mod.Package.Loc(), "%v conflicts with rule %v defined at %v", childMod.Package, rule.Head.Ref(), rule.Loc())) { + return true + } } } } @@ -1225,9 +1429,7 @@ func (c *Compiler) checkRuleConflicts() { func (c *Compiler) checkUndefinedFuncs() { for _, name := range c.sorted { m := c.Modules[name] - for _, err := range checkUndefinedFuncs(c.TypeEnv, m, c.GetArity, c.RewrittenVars) { - c.err(err) - } + c.err(checkUndefinedFuncs(c.TypeEnv, m, c.GetArity, c.RewrittenVars)...) } } @@ -1283,25 +1485,29 @@ func arityMismatchError(env *TypeEnv, f Ref, expr *Expr, exp, act int) *Error { // positions of built-in expressions will be bound when evaluating the rule from left // to right, re-ordering as necessary. func (c *Compiler) checkSafetyRuleBodies() { + vis := varVisitorPool.Get() + for _, name := range c.sorted { m := c.Modules[name] WalkRules(m, func(r *Rule) bool { - safe := ReservedVars.Copy() + vis = vis.Clear() + // vis.vars == safe + vis.vars.Update(ReservedVars) if len(r.Head.Args) > 0 { - safe.Update(r.Head.Args.Vars()) + vis.WalkArgs(r.Head.Args) } - r.Body = c.checkBodySafety(safe, r.Body) + r.Body = c.checkBodySafety(vis.vars, r.Body) return false }) } + + varVisitorPool.Put(vis) } func (c *Compiler) checkBodySafety(safe VarSet, b Body) Body { reordered, unsafe := reorderBodyForSafety(c.builtins, c.GetArity, safe, b) if errs := safetyErrorSlice(unsafe, c.RewrittenVars); len(errs) > 0 { - for _, err := range errs { - c.err(err) - } + c.err(errs...) return b } return reordered @@ -1318,22 +1524,30 @@ var SafetyCheckVisitorParams = VarVisitorParams{ // checkSafetyRuleHeads ensures that variables appearing in the head of a // rule also appear in the body. func (c *Compiler) checkSafetyRuleHeads() { + vis := varVisitorPool.Get() + for _, name := range c.sorted { WalkRules(c.Modules[name], func(r *Rule) bool { - safe := r.Body.Vars(SafetyCheckVisitorParams) - if len(r.Head.Args) > 0 { - safe.Update(r.Head.Args.Vars()) - } if headMayHaveVars(r.Head) { + vis = vis.Clear().WithParams(SafetyCheckVisitorParams) + vis.WalkBody(r.Body) + + vis = vis.WithParams(VarVisitorParams{}) + if len(r.Head.Args) > 0 { + vis.WalkArgs(r.Head.Args) + } + vars := r.Head.Vars() - if vars.DiffCount(safe) > 0 { - unsafe := vars.Diff(safe) + if vars.DiffCount(vis.vars) > 0 { + unsafe := vars.Diff(vis.vars) for v := range unsafe { if w, ok := c.RewrittenVars[v]; ok { v = w } if !v.IsGenerated() { - c.err(NewError(UnsafeVarErr, r.Loc(), "var %v is unsafe", v)) + if !c.err(NewError(UnsafeVarErr, vars[v].Location, "var %v is unsafe", v)) { + return true + } } } } @@ -1341,6 +1555,8 @@ func (c *Compiler) checkSafetyRuleHeads() { return false }) } + + varVisitorPool.Put(vis) } func compileSchema(goSchema any, allowNet []string) (*gojsonschema.Schema, error) { @@ -1629,9 +1845,8 @@ func (c *Compiler) checkDeprecatedBuiltins() { } for _, name := range c.sorted { - mod := c.Modules[name] - if c.strict || mod.regoV1Compatible() { - errs := checkDeprecatedBuiltins(c.deprecatedBuiltinsMap, mod) + if c.strict || c.Modules[name].regoV1Compatible() { + errs := checkDeprecatedBuiltins(c.deprecatedBuiltinsMap, c.Modules[name]) for _, err := range errs { c.err(err) } @@ -1639,49 +1854,22 @@ func (c *Compiler) checkDeprecatedBuiltins() { } } -func (c *Compiler) runStage(metricName string, f func()) { - if c.metrics != nil { - c.metrics.Timer(metricName).Start() - defer c.metrics.Timer(metricName).Stop() - } - f() -} - -func (c *Compiler) runStageAfter(metricName string, s CompilerStage) *Error { - if c.metrics != nil { - c.metrics.Timer(metricName).Start() - defer c.metrics.Timer(metricName).Stop() - } - return s(c) -} - func (c *Compiler) compile() { + plan := c.getOrBuildPlan() - defer func() { - if r := recover(); r != nil && r != errLimitReached { - panic(r) - } - }() - - for _, s := range c.stages { - if c.evalMode == EvalModeIR { - switch s.name { - case "BuildRuleIndices", "BuildComprehensionIndices": - continue // skip these stages + if c.metrics != nil { + for _, s := range plan.stages { + c.metrics.Timer(s.metricName).Start() + s.f() + c.metrics.Timer(s.metricName).Stop() + if c.Failed() { + return } } - - if c.allowUndefinedFuncCalls && (s.name == "CheckUndefinedFuncs" || s.name == "CheckSafetyRuleBodies") { - continue - } - - c.runStage(s.metricName, s.f) - if c.Failed() { - return - } - for _, a := range c.after[s.name] { - if err := c.runStageAfter(a.MetricName, a.Stage); err != nil { - c.err(err) + } else { + for _, s := range plan.stages { + s.f() + if c.Failed() { return } } @@ -1739,7 +1927,9 @@ func (c *Compiler) init() { if schema := c.schemaSet.Get(SchemaRootRef); schema != nil { tpe, err := loadSchema(schema, c.capabilities.AllowNet) if err != nil { - c.err(NewError(TypeErr, nil, "%s", err.Error())) + if !c.err(newErrorString(TypeErr, nil, err.Error())) { + return + } } else { c.inputType = tpe } @@ -1751,26 +1941,58 @@ func (c *Compiler) init() { WithInputType(c.inputType). Env(c.builtins) + // Configure default stage skips based on existing configuration + if c.evalMode == EvalModeIR { + c.WithSkipStages(StageBuildRuleIndices, StageBuildComprehensionIndices) + } + if c.allowUndefinedFuncCalls { + c.WithSkipStages(StageCheckUndefinedFuncs, StageCheckSafetyRuleBodies) + } + c.initialized = true } -func (c *Compiler) err(err *Error) { - if c.maxErrs > 0 && len(c.Errors) >= c.maxErrs { +func (c *Compiler) err(errs ...*Error) bool { // returns if we should continue + if len(errs) == 0 { + return true + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.maxErrs <= 0 { + c.Errors = append(c.Errors, errs...) + return true + } + + remaining := c.maxErrs - int(c.errCount) + if remaining <= 0 { + // The error limit has already been reached or exceeded. + // No more errors should be added. + return false + } + + numToTake := min(remaining, len(errs)) + + // The limit is reached if, after adding numToTake errors, the total count + // is equal to c.maxErrs. + isLimitReachedInThisCall := (int(c.errCount)+numToTake == c.maxErrs) + + c.errCount += uint32(numToTake) + c.Errors = append(c.Errors, errs[:numToTake]...) + if isLimitReachedInThisCall { c.Errors = append(c.Errors, errLimitReached) - panic(errLimitReached) } - c.Errors = append(c.Errors, err) + + return !isLimitReachedInThisCall // Return false if the limit was reached, true otherwise. } func (c *Compiler) getExports() *util.HasherMap[Ref, []Ref] { - rules := util.NewHasherMap[Ref, []Ref](RefEqual) for _, name := range c.sorted { - mod := c.Modules[name] - - for _, rule := range mod.Rules { - hashMapAdd(rules, mod.Package.Path, rule.Head.Ref().GroundPrefix()) + for _, rule := range c.Modules[name].Rules { + hashMapAdd(rules, c.Modules[name].Package.Path, rule.Head.Ref().GroundPrefix()) } } @@ -1814,32 +2036,27 @@ func (c *Compiler) checkImports() { c.capabilities.ContainsFeature(FeatureRegoV1) for _, name := range c.sorted { - mod := c.Modules[name] - - for _, imp := range mod.Imports { + for _, imp := range c.Modules[name].Imports { if !supportsRegoV1Import && RegoV1CompatibleRef.Equal(imp.Path.Value) { - c.err(NewError(CompileErr, imp.Loc(), "rego.v1 import is not supported")) + if !c.err(NewError(CompileErr, imp.Loc(), "rego.v1 import is not supported")) { + continue + } } } - if c.strict || c.moduleIsRegoV1Compatible(mod) { - modules = append(modules, mod) + if c.strict || c.moduleIsRegoV1Compatible(c.Modules[name]) { + modules = append(modules, c.Modules[name]) } } - errs := checkDuplicateImports(modules) - for _, err := range errs { - c.err(err) - } + c.err(checkDuplicateImports(modules)...) } func (c *Compiler) checkKeywordOverrides() { for _, name := range c.sorted { - mod := c.Modules[name] - if c.strict || c.moduleIsRegoV1Compatible(mod) { - errs := checkRootDocumentOverrides(mod) - for _, err := range errs { - c.err(err) + if c.strict || c.moduleIsRegoV1Compatible(c.Modules[name]) { + if !c.err(checkRootDocumentOverrides(c.Modules[name])...) { + continue } } } @@ -1891,12 +2108,10 @@ func (c *Compiler) moduleIsRegoV1Compatible(mod *Module) bool { // // The reference "c.d.e" would be resolved to "data.a.b.c.d.e". func (c *Compiler) resolveAllRefs() { - rules := c.getExports() for _, name := range c.sorted { mod := c.Modules[name] - var ruleExports []Ref if x, ok := rules.Get(mod.Package.Path); ok { ruleExports = x @@ -1907,7 +2122,7 @@ func (c *Compiler) resolveAllRefs() { WalkRules(mod, func(rule *Rule) bool { err := resolveRefsInRule(globals, rule) if err != nil { - c.err(NewError(CompileErr, rule.Location, "%s", err.Error())) + return c.err(newErrorString(CompileErr, rule.Location, err.Error())) } return false }) @@ -1921,7 +2136,9 @@ func (c *Compiler) resolveAllRefs() { for v, u := range globals { if v.Equal(imp.Name()) && !u.used { - c.err(NewError(CompileErr, imp.Location, "%s unused", imp.String())) + if !c.err(NewError(CompileErr, imp.Location, "%s unused", imp.String())) { + return + } } } } @@ -1932,7 +2149,7 @@ func (c *Compiler) resolveAllRefs() { parsed, err := c.moduleLoader(c.Modules) if err != nil { - c.err(NewError(CompileErr, nil, "%s", err.Error())) + c.err(newErrorString(CompileErr, nil, err.Error())) return } @@ -1968,15 +2185,13 @@ func (c *Compiler) initLocalVarGen() { func (c *Compiler) rewriteComprehensionTerms() { f := newEqualityFactory(c.localvargen) for _, name := range c.sorted { - mod := c.Modules[name] - _, _ = rewriteComprehensionTerms(f, mod) // ignore error + _, _ = rewriteComprehensionTerms(f, c.Modules[name]) // ignore error } } func (c *Compiler) rewriteExprTerms() { for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { + WalkRules(c.Modules[name], func(rule *Rule) bool { rewriteExprTermsInHead(c.localvargen, rule) rule.Body = rewriteExprTermsInBody(c.localvargen, rule.Body) return false @@ -2019,7 +2234,9 @@ func (c *Compiler) rewriteRuleHeadRefs() { for i := 1; i < len(ref); i++ { if cannotSpeakGeneralRefs && (rule.Head.RuleKind() == MultiValue || i != len(ref)-1) { // last if _, ok := ref[i].Value.(String); !ok { - c.err(NewError(TypeErr, rule.Loc(), "rule heads with general refs (containing variables) are not supported: %v", rule.Head.Reference)) + if !c.err(NewError(TypeErr, rule.Loc(), "rule heads with general refs (containing variables) are not supported: %v", rule.Head.Reference)) { + return true + } continue } } @@ -2034,7 +2251,7 @@ func (c *Compiler) rewriteRuleHeadRefs() { rule.Head.Key = expr.Operand(0) } rule.Head.Reference[i] = expr.Operand(0) - rule.Body.Append(expr) + rule.Body = appendToBody(rule.Body, expr) } } @@ -2044,12 +2261,296 @@ func (c *Compiler) rewriteRuleHeadRefs() { } func (c *Compiler) checkVoidCalls() { + for _, name := range c.sorted { + c.err(checkVoidCalls(c.TypeEnv, c.Modules[name])...) + } +} + +func (c *Compiler) builtinLoc(ref Ref) *Builtin { + n := ref.String() + if b, ok := c.builtins[n]; ok { + return b + } + if b, ok := c.customBuiltins[n]; ok { + return b + } + return nil +} + +// isRefToKnownDefinedRule answers whether a rule (counting all incremental definitions) reference +// is known to evaluate to a value (not undefined). A rule reference is considered safe if it references +// a rule with no arguments (i.e. not a function) and: +// - The rule has a `default` value assigned +// - The rule is a multi-value rule — it generates a set that may be empty but not undefined +// - The rule is a "constant", meaning it has a single definition, a ground value and no body +func (c *Compiler) isRefToKnownDefinedRule(ref Ref) bool { + var matched *TreeNode + if len(ref) < 2 || !ref.HasPrefix(DefaultRootRef) { + return false + } + if matched = c.RuleTree.Find(ref); matched == nil || len(matched.Values) == 0 { + return false + } + first := matched.Values[0] + if len(first.Head.Args) > 0 { + return false + } + if first.Default || first.Head.RuleKind() == MultiValue { + return true + } + if len(matched.Values) == 1 { + return isConstantRule(first) + } + return slices.ContainsFunc(matched.Values[1:], func(r *Rule) bool { + return r.Default + }) +} + +// templateStringRewriter +type templateStringRewriter struct { + rule *Rule + gen *localVarGenerator + vis *VarVisitor + rewritten map[Var]Var + arity func(Ref) int + safeRuleRef func(Ref) bool + builtins builtinLocator + capsSupport bool +} + +func rewriterFromCompiler(c *Compiler) *templateStringRewriter { + return &templateStringRewriter{ + vis: NewVarVisitor(), + gen: c.localvargen, + builtins: c.builtinLoc, + arity: c.GetArity, + safeRuleRef: c.isRefToKnownDefinedRule, + rewritten: c.RewrittenVars, + capsSupport: c.capabilities.ContainsFeature(FeatureTemplateStrings) && + c.capabilities.ContainsBuiltin(InternalTemplateString.Name), + } +} + +func rewriterFromQueryCompiler(qc *queryCompiler, gen *localVarGenerator) *templateStringRewriter { + rw := rewriterFromCompiler(qc.compiler) + rw.gen = gen + return rw +} + +func (tsr *templateStringRewriter) Clear() *templateStringRewriter { + tsr.rule = nil + tsr.vis = tsr.vis.Clear() + return tsr +} + +// rewriteTemplateStrings rewrites template-string calls as they appear in bodies; e.g. rules, comprehensions, etc. +func (c *Compiler) rewriteTemplateStrings() { + tsr := rewriterFromCompiler(c) + modified := false for _, name := range c.sorted { mod := c.Modules[name] - for _, err := range checkVoidCalls(c.TypeEnv, mod) { - c.err(err) + WalkRules(mod, func(r *Rule) bool { + tsr = tsr.Clear() + safe := r.Head.Args.Vars() + + if len(r.Head.Args) > 0 { + tsr.vis = tsr.vis.WithParams(VarVisitorParams{SkipTemplateStrings: true}) + tsr.vis.WalkArgs(r.Head.Args) + } + + safe.Update(ReservedVars) + + modrec, safe, errs := rewriteTemplateStrings(tsr, safe, r.Body) + if modrec { + modified = true + } + c.err(errs...) + + if modrec, _, errs = rewriteTemplateStrings(tsr, safe, r.Head); modrec { + modified = true + } + c.err(errs...) + + return false + }) + } + if modified { + c.Required.addBuiltinSorted(InternalTemplateString) + } +} + +func rewriteTemplateStrings(tsr *templateStringRewriter, globals VarSet, x any) (bool, VarSet, Errors) { + var errs Errors + var modified bool + + // All output vars in the current body are safe, recursively + var safe VarSet + if b, ok := x.(Body); ok { + safe = outputVarsForBody(b, tsr.arity, globals, tsr.vis) + safe.Update(globals) + } else { + safe = globals.Copy() + } + + vis := &GenericVisitor{func(x any) bool { + var modrec bool + var errsrec Errors + switch x := x.(type) { + case *Term: + if _, ok := x.Value.(*TemplateString); ok { + modrec, errsrec = rewriteTemplateStringTerm(tsr, safe, x) + } + case *SetComprehension: + var s VarSet + modrec, s, errsrec = rewriteTemplateStrings(tsr, safe, x.Body) + if modrec { + modified = true + } + errs = append(errs, errsrec...) + + modrec, errsrec = rewriteTemplateStringTerm(tsr, s, x.Term) + case *ArrayComprehension: + var s VarSet + modrec, s, errsrec = rewriteTemplateStrings(tsr, safe, x.Body) + if modrec { + modified = true + } + errs = append(errs, errsrec...) + + modrec, errsrec = rewriteTemplateStringTerm(tsr, s, x.Term) + case *ObjectComprehension: + var s VarSet + modrec, s, errsrec = rewriteTemplateStrings(tsr, safe, x.Body) + if modrec { + modified = true + } + errs = append(errs, errsrec...) + + modrec, errsrec = rewriteTemplateStringTerm(tsr, s, x.Key) + if modrec { + modified = true + } + errs = append(errs, errsrec...) + + modrec, errsrec = rewriteTemplateStringTerm(tsr, s, x.Value) + case *Every: + modrec, errsrec = rewriteTemplateStringTerm(tsr, safe, x.Domain) + if modrec { + modified = true + } + errs = append(errs, errsrec...) + + s := safe.Copy() + s.Update(x.KeyValueVars()) + modrec, _, errsrec = rewriteTemplateStrings(tsr, s, x.Body) + } + if modrec { + modified = true + } + errs = append(errs, errsrec...) + return false + }} + vis.Walk(x) + + return modified, safe, errs +} + +func rewriteTemplateStringTerm(tsr *templateStringRewriter, globals VarSet, t *Term) (bool, Errors) { + if ts, ok := t.Value.(*TemplateString); ok { + call, errs := rewriteTemplateString(tsr, globals, t.Loc(), ts) + if len(errs) != 0 { + return false, errs + } + t.Value = call + return true, nil + } + return false, nil +} + +type builtinLocator func(Ref) *Builtin + +func rewriteTemplateString(tsr *templateStringRewriter, safe VarSet, loc *Location, ts *TemplateString) (Call, Errors) { + if !tsr.capsSupport { + return nil, Errors{NewError(CompileErr, loc, "template-strings are not supported")} + } + + var errs Errors + terms := make([]*Term, 0, len(ts.Parts)) + + if len(ts.Parts) == 0 { + terms = append(terms, NewTerm(InternedEmptyStringValue).SetLocation(loc)) + } else { + vis := ClearOrNewVarVisitor(nil).WithParams(SafetyCheckVisitorParams) + for _, p := range ts.Parts { + switch p := p.(type) { + case *Expr: + var t *Term + if p.IsCall() { + // Assert that the call isn't for a known relation built-in + if bi := tsr.builtins(p.Operator()); bi != nil && bi.Relation { + errs = append(errs, NewError( + CompileErr, + t.Loc(), + "illegal call to relation built-in '%s' that may cause multiple outputs", bi.Name, + )) + continue + } + t = CallTerm(p.Terms.([]*Term)...) + } else { + var ok bool + t, ok = p.Terms.(*Term) + if !ok { + errs = append(errs, NewError( + CompileErr, + p.Location, + "unexpected template-string expression type: %T", p.Terms)) + continue + } + } + + if ref, ok := t.Value.(Ref); ok && tsr.safeRuleRef(ref) { + terms = append(terms, SetTerm(t)) + continue + } + + if _, ok := t.Value.(Var); ok { + terms = append(terms, SetTerm(t)) + continue + } + + vis = ClearOrNewVarVisitor(vis).WithParams(SafetyCheckVisitorParams) + vis.Walk(t) + vars := vis.Vars() + if vars.DiffCount(safe) > 0 { + unsafe := vars.Diff(safe) + for _, v := range unsafe.Sorted() { + if w, ok := tsr.rewritten[v]; ok { + v = w + } + errs = append(errs, NewError(CompileErr, t.Loc(), "var %v is undeclared", v)) + } + } + + loc := t.Loc() + x := NewTerm(tsr.gen.Generate()).SetLocation(loc) + capture := Equality.Expr(x, t).SetLocation(loc) + capture.With = p.With + terms = append(terms, SetComprehensionTerm(x, NewBody(capture)).SetLocation(loc)) + case *Term: + terms = append(terms, p) + default: + errs = append(errs, NewError( + CompileErr, + loc, + "expected only term or expression parts in template-string, got %T", p, + )) + return nil, errs + } } } + + call := InternalTemplateString.Call(ArrayTerm(terms...)).Value.(Call) + return call, errs } func (c *Compiler) rewritePrintCalls() { @@ -2061,26 +2562,33 @@ func (c *Compiler) rewritePrintCalls() { } } } else { + vis := varVisitorPool.Get() + for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(r *Rule) bool { - safe := r.Head.Args.Vars() - safe.Update(ReservedVars) - vis := func(b Body) bool { - modrec, errs := rewritePrintCalls(c.localvargen, c.GetArity, safe, b) + WalkRules(c.Modules[name], func(r *Rule) bool { + vis = vis.Clear() + vis.vars.Update(ReservedVars) + if len(r.Head.Args) > 0 { + vis.WalkArgs(r.Head.Args) + } + + bodyVis := func(b Body) bool { + modrec, errs := rewritePrintCalls(c.localvargen, c.GetArity, vis.vars, b) if modrec { modified = true } - for _, err := range errs { - c.err(err) + if !c.err(errs...) { + return true } return false } - WalkBodies(r.Head, vis) - WalkBodies(r.Body, vis) + WalkBodies(r.Head, bodyVis) + WalkBodies(r.Body, bodyVis) return false }) } + + varVisitorPool.Put(vis) } if modified { c.Required.addBuiltinSorted(Print) @@ -2124,7 +2632,7 @@ func rewritePrintCalls(gen *localVarGenerator, getArity func(Ref) int, globals V // those bodies only close over variables that are safe. for i := range body { if ContainsClosures(body[i]) { - safe := outputVarsForBody(body[:i], getArity, globals) + safe := outputVarsForBody(body[:i], getArity, globals, nil) safe.Update(globals) WalkClosures(body[i], func(x any) bool { var modrec bool @@ -2161,13 +2669,27 @@ func rewritePrintCalls(gen *localVarGenerator, getArity func(Ref) int, globals V modified = true var errs Errors - safe := outputVarsForBody(body[:i], getArity, globals) + safe := outputVarsForBody(body[:i], getArity, globals, nil) safe.Update(globals) + + // Fixes Issue #7647 by adding generated variables to the safe set + WalkVars(body[:i], func(v Var) bool { + if v.IsGenerated() { + safe.Add(v) + } + return false + }) + args := body[i].Operands() var vis *VarVisitor + if len(args) > 0 { + vis = varVisitorPool.Get() + defer varVisitorPool.Put(vis) + } + for j := range args { - vis = vis.ClearOrNew().WithParams(SafetyCheckVisitorParams) + vis = vis.Clear().WithParams(SafetyCheckVisitorParams) vis.Walk(args[j]) vars := vis.Vars() if vars.DiffCount(safe) > 0 { @@ -2285,23 +2807,22 @@ func isPrintCall(x *Expr) bool { func (c *Compiler) rewriteRefsInHead() { f := newEqualityFactory(c.localvargen) for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { + WalkRules(c.Modules[name], func(rule *Rule) bool { if requiresEval(rule.Head.Key) { expr := f.Generate(rule.Head.Key) rule.Head.Key = expr.Operand(0) - rule.Body.Append(expr) + rule.Body = appendToBody(rule.Body, expr) } if requiresEval(rule.Head.Value) { expr := f.Generate(rule.Head.Value) rule.Head.Value = expr.Operand(0) - rule.Body.Append(expr) + rule.Body = appendToBody(rule.Body, expr) } for i := 0; i < len(rule.Head.Args); i++ { if requiresEval(rule.Head.Args[i]) { expr := f.Generate(rule.Head.Args[i]) rule.Head.Args[i] = expr.Operand(0) - rule.Body.Append(expr) + rule.Body = appendToBody(rule.Body, expr) } } return false @@ -2356,8 +2877,7 @@ func (c *Compiler) rewriteTestRuleEqualities() { f := newEqualityFactory(c.localvargen) for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { + WalkRules(c.Modules[name], func(rule *Rule) bool { if strings.HasPrefix(string(rule.Head.Name), "test_") { rule.Body = rewriteTestEqualities(f, rule.Body) } @@ -2370,8 +2890,7 @@ func (c *Compiler) parseMetadataBlocks() { // Only parse annotations if rego.metadata built-ins are called regoMetadataCalled := false for _, name := range c.sorted { - mod := c.Modules[name] - WalkExprs(mod, func(expr *Expr) bool { + WalkExprs(c.Modules[name], func(expr *Expr) bool { if isRegoMetadataChainCall(expr) || isRegoMetadataRuleCall(expr) { regoMetadataCalled = true } @@ -2387,13 +2906,11 @@ func (c *Compiler) parseMetadataBlocks() { // NOTE: Possible optimization: only parse annotations for modules on the path of rego.metadata-calling module for _, name := range c.sorted { mod := c.Modules[name] - if len(mod.Annotations) == 0 { var errs Errors mod.Annotations, errs = parseAnnotations(mod.Comments) - errs = append(errs, attachAnnotationsNodes(mod)...) - for _, err := range errs { - c.err(err) + if !c.err(errs...) || !c.err(attachAnnotationsNodes(mod)...) { + return } attachRuleAnnotations(mod) @@ -2409,11 +2926,8 @@ func (c *Compiler) rewriteRegoMetadataCalls() { _, ruleFuncAllowed := c.builtins[RegoMetadataRule.Name] for _, name := range c.sorted { - mod := c.Modules[name] - - WalkRules(mod, func(rule *Rule) bool { - var firstChainCall *Expr - var firstRuleCall *Expr + WalkRules(c.Modules[name], func(rule *Rule) bool { + var firstChainCall, firstRuleCall *Expr WalkExprs(rule, func(expr *Expr) bool { if chainFuncAllowed && firstChainCall == nil && isRegoMetadataChainCall(expr) { @@ -2436,14 +2950,13 @@ func (c *Compiler) rewriteRegoMetadataCalls() { chain, err := createMetadataChain(c.annotationSet.Chain(rule)) if err != nil { - c.err(err) - return false + return !c.err(err) } chain.Location = firstChainCall.Location eq := eqFactory.Generate(chain) metadataChainVar = eq.Operands()[0].Value.(Var) - body.Append(eq) + body = appendToBody(body, eq) } var metadataRuleVar Var @@ -2456,8 +2969,7 @@ func (c *Compiler) rewriteRegoMetadataCalls() { if a != nil { annotObj, err := a.toObject() if err != nil { - c.err(err) - return false + return !c.err(err) } metadataRuleTerm = NewTerm(*annotObj) } else { @@ -2468,19 +2980,14 @@ func (c *Compiler) rewriteRegoMetadataCalls() { metadataRuleTerm.Location = firstRuleCall.Location eq := eqFactory.Generate(metadataRuleTerm) metadataRuleVar = eq.Operands()[0].Value.(Var) - body.Append(eq) + body = appendToBody(body, eq) } - for _, expr := range rule.Body { - body.Append(expr) - } + body = appendToBody(body, rule.Body...) rule.Body = body vis := func(b Body) bool { - for _, err := range rewriteRegoMetadataCalls(&metadataChainVar, &metadataRuleVar, b, &c.RewrittenVars) { - c.err(err) - } - return false + return !c.err(rewriteRegoMetadataCalls(&metadataChainVar, &metadataRuleVar, b, &c.RewrittenVars)...) } WalkBodies(rule.Head, vis) WalkBodies(rule.Body, vis) @@ -2611,6 +3118,9 @@ func (c *Compiler) rewriteLocalVars() { // across else-branches. for rule := rule; rule != nil; rule = rule.Else { stack, errs := c.rewriteLocalVarsInRule(rule, unusedArgs, argsStack, gen) + if !c.err(errs...) { + return true + } if stack.assignment { assignment = true } @@ -2620,17 +3130,15 @@ func (c *Compiler) rewriteLocalVars() { delete(unusedArgs, arg) } } - - for _, err := range errs { - c.err(err) - } } if c.strict { // Report an error for each unused function argument for arg := range unusedArgs { if !arg.IsWildcard() { - c.err(NewError(CompileErr, rule.Head.Location, "unused argument %v. (hint: use _ (wildcard variable) instead)", arg)) + if !c.err(NewError(CompileErr, rule.Head.Location, "unused argument %v. (hint: use _ (wildcard variable) instead)", arg)) { + return true + } } } } @@ -2667,31 +3175,32 @@ func (c *Compiler) rewriteLocalVarsInRule(rule *Rule, unusedArgs VarSet, argsSta } NewGenericVisitor(nestedXform.Visit).Walk(rule.Head) - - for _, err := range nestedXform.errs { - c.err(err) - } + c.err(nestedXform.errs...) // NB(sr): This is a bit bogus -- Why not return them? // Rewrite assignments in body. - used = NewVarSet() + vis := NewVarVisitor() for _, t := range rule.Head.Ref()[1:] { - used.Update(t.Vars()) + if !IsScalar(t.Value) { + vis.Walk(t) + } } - if rule.Head.Key != nil { - used.Update(rule.Head.Key.Vars()) + if rule.Head.Key != nil && !IsScalar(rule.Head.Key.Value) { + vis.Walk(rule.Head.Key) } - if rule.Head.Value != nil { + if rule.Head.Value != nil && !IsScalar(rule.Head.Value.Value) { valueVars := rule.Head.Value.Vars() - used.Update(valueVars) + vis.vars.Update(valueVars) for arg := range unusedArgs { if valueVars.Contains(arg) { delete(unusedArgs, arg) } } } + + used = vis.Vars() } stack := argsStack.Copy() @@ -2766,19 +3275,21 @@ func (xform *rewriteNestedHeadVarLocalTransform) Visit(x any) bool { switch x := term.Value.(type) { case *object: + vis := NewGenericVisitor(xform.Visit) cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) { kcpy := k.Copy() - NewGenericVisitor(xform.Visit).Walk(kcpy) + vis.Walk(kcpy) vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) + vis.Walk(vcpy) return kcpy, vcpy, nil }) term.Value = cpy stop = true case *set: + vis := NewGenericVisitor(xform.Visit) cpy, _ := x.Map(func(v *Term) (*Term, error) { vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) + vis.Walk(vcpy) return vcpy, nil }) term.Value = cpy @@ -2792,6 +3303,9 @@ func (xform *rewriteNestedHeadVarLocalTransform) Visit(x any) bool { case *ObjectComprehension: xform.errs = rewriteDeclaredVarsInObjectComprehension(xform.gen, stack, x, xform.errs, xform.strict) stop = true + case *TemplateString: + xform.errs = rewriteDeclaredVarsInTemplateString(xform.gen, stack, x, xform.errs, xform.strict) + stop = true } maps.Copy(xform.RewrittenVars, stack.rewritten) @@ -2816,7 +3330,6 @@ func (xform rewriteHeadVarLocalTransform) Transform(x any) (any, error) { } func (c *Compiler) rewriteLocalArgVars(gen *localVarGenerator, stack *localDeclaredVars, rule *Rule) { - vis := &ruleArgLocalRewriter{ stack: stack, gen: gen, @@ -2826,9 +3339,7 @@ func (c *Compiler) rewriteLocalArgVars(gen *localVarGenerator, stack *localDecla Walk(vis, rule.Head.Args[i]) } - for i := range vis.errs { - c.err(vis.errs[i]) - } + c.err(vis.errs...) } type ruleArgLocalRewriter struct { @@ -2861,13 +3372,13 @@ func (vis *ruleArgLocalRewriter) Visit(x any) Visitor { Walk(vis, vcpy) return k, vcpy, nil }); err != nil { - vis.errs = append(vis.errs, NewError(CompileErr, t.Location, "%s", err.Error())) + vis.errs = append(vis.errs, newErrorString(CompileErr, t.Location, err.Error())) } else { t.Value = cpy } return nil - case Null, Boolean, Number, String, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Set: - // Scalars are no-ops. Comprehensions are handled above. Sets must not + case Null, Boolean, Number, String, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Set, *TemplateString: + // Scalars are no-ops. Comprehensions and template-strings are handled above. Sets must not // contain variables. return nil case Call: @@ -2956,6 +3467,10 @@ func (qc *queryCompiler) WithStageAfter(after string, stage QueryCompilerStageDe return qc } +func (qc *queryCompiler) WithStageAfterID(after StageID, stage QueryCompilerStageDefinition) QueryCompiler { + return qc.WithStageAfter(string(after), stage) +} + func (qc *queryCompiler) WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler { qc.unsafeBuiltins = unsafe return qc @@ -2991,7 +3506,7 @@ func (qc *queryCompiler) runStageAfter(metricName string, query Body, s QueryCom } type queryStage = struct { - name string + name StageID metricName string f func(*QueryContext, Body) (Body, error) } @@ -3004,20 +3519,21 @@ func (qc *queryCompiler) Compile(query Body) (Body, error) { query = query.Copy() stages := []queryStage{ - {"CheckKeywordOverrides", "query_compile_stage_check_keyword_overrides", qc.checkKeywordOverrides}, - {"ResolveRefs", "query_compile_stage_resolve_refs", qc.resolveRefs}, - {"RewriteLocalVars", "query_compile_stage_rewrite_local_vars", qc.rewriteLocalVars}, - {"CheckVoidCalls", "query_compile_stage_check_void_calls", qc.checkVoidCalls}, - {"RewritePrintCalls", "query_compile_stage_rewrite_print_calls", qc.rewritePrintCalls}, - {"RewriteExprTerms", "query_compile_stage_rewrite_expr_terms", qc.rewriteExprTerms}, - {"RewriteComprehensionTerms", "query_compile_stage_rewrite_comprehension_terms", qc.rewriteComprehensionTerms}, - {"RewriteWithValues", "query_compile_stage_rewrite_with_values", qc.rewriteWithModifiers}, - {"CheckUndefinedFuncs", "query_compile_stage_check_undefined_funcs", qc.checkUndefinedFuncs}, - {"CheckSafety", "query_compile_stage_check_safety", qc.checkSafety}, - {"RewriteDynamicTerms", "query_compile_stage_rewrite_dynamic_terms", qc.rewriteDynamicTerms}, - {"CheckTypes", "query_compile_stage_check_types", qc.checkTypes}, - {"CheckUnsafeBuiltins", "query_compile_stage_check_unsafe_builtins", qc.checkUnsafeBuiltins}, - {"CheckDeprecatedBuiltins", "query_compile_stage_check_deprecated_builtins", qc.checkDeprecatedBuiltins}, + {StageCheckKeywordOverrides, "query_compile_stage_check_keyword_overrides", qc.checkKeywordOverrides}, + {StageResolveRefs, "query_compile_stage_resolve_refs", qc.resolveRefs}, + {StageRewriteLocalVars, "query_compile_stage_rewrite_local_vars", qc.rewriteLocalVars}, + {StageRewriteTemplateStrings, "compile_stage_rewrite_template_strings", qc.rewriteTemplateStrings}, + {StageCheckVoidCalls, "query_compile_stage_check_void_calls", qc.checkVoidCalls}, + {StageRewritePrintCalls, "query_compile_stage_rewrite_print_calls", qc.rewritePrintCalls}, + {StageRewriteExprTerms, "query_compile_stage_rewrite_expr_terms", qc.rewriteExprTerms}, + {StageRewriteComprehensionTerms, "query_compile_stage_rewrite_comprehension_terms", qc.rewriteComprehensionTerms}, + {StageRewriteWithValues, "query_compile_stage_rewrite_with_values", qc.rewriteWithModifiers}, + {StageCheckUndefinedFuncs, "query_compile_stage_check_undefined_funcs", qc.checkUndefinedFuncs}, + {StageCheckSafety, "query_compile_stage_check_safety", qc.checkSafety}, + {StageRewriteDynamicTerms, "query_compile_stage_rewrite_dynamic_terms", qc.rewriteDynamicTerms}, + {StageCheckTypes, "query_compile_stage_check_types", qc.checkTypes}, + {StageCheckUnsafeBuiltins, "query_compile_stage_check_unsafe_builtins", qc.checkUnsafeBuiltins}, + {StageCheckDeprecatedBuiltins, "query_compile_stage_check_deprecated_builtins", qc.checkDeprecatedBuiltins}, } if qc.compiler.evalMode == EvalModeTopdown { stages = append(stages, queryStage{"BuildComprehensionIndex", "query_compile_stage_build_comprehension_index", qc.buildComprehensionIndices}) @@ -3031,7 +3547,7 @@ func (qc *queryCompiler) Compile(query Body) (Body, error) { if err != nil { return nil, qc.applyErrorLimit(err) } - for _, s := range qc.after[s.name] { + for _, s := range qc.after[string(s.name)] { query, err = qc.runStageAfter(s.MetricName, query, s.Stage) if err != nil { return nil, qc.applyErrorLimit(err) @@ -3130,6 +3646,15 @@ func (qc *queryCompiler) rewriteLocalVars(_ *QueryContext, body Body) (Body, err return body, nil } +func (qc *queryCompiler) rewriteTemplateStrings(_ *QueryContext, body Body) (Body, error) { + gen := newLocalVarGenerator("q", body) + tsr := rewriterFromQueryCompiler(qc, gen) + if _, _, errs := rewriteTemplateStrings(tsr, ReservedVars, body); len(errs) > 0 { + return nil, errs + } + return body, nil +} + func (qc *queryCompiler) rewritePrintCalls(_ *QueryContext, body Body) (Body, error) { if !qc.enablePrintStatements { _, cpy := erasePrintCallsInBody(body) @@ -3240,6 +3765,10 @@ func (ci *ComprehensionIndex) String() string { func buildComprehensionIndices(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, node Body, result map[*Term]*ComprehensionIndex) uint64 { var n uint64 cpy := candidates.Copy() + vis := varVisitorPool.Get() + + defer varVisitorPool.Put(vis) + WalkBodies(node, func(b Body) bool { for _, expr := range b { index := getComprehensionIndex(dbg, arity, cpy, rwVars, expr) @@ -3249,7 +3778,9 @@ func buildComprehensionIndices(dbg debug.Debug, arity func(Ref) int, candidates } // Any variables appearing in the expressions leading up to the comprehension // are fair-game to be used as index keys. - cpy.Update(expr.Vars(VarVisitorParams{SkipClosures: true, SkipRefCallHead: true})) + vis = vis.Clear().WithParams(VarVisitorParams{SkipClosures: true, SkipRefCallHead: true}) + vis.Walk(expr) + cpy.Update(vis.Vars()) } return false }) @@ -3257,7 +3788,6 @@ func buildComprehensionIndices(dbg debug.Debug, arity func(Ref) int, candidates } func getComprehensionIndex(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, expr *Expr) *ComprehensionIndex { - // Ignore everything except = expressions. Extract // the comprehension term from the expression. if !expr.IsEquality() || expr.Negated || len(expr.With) > 0 { @@ -3310,7 +3840,7 @@ func getComprehensionIndex(dbg debug.Debug, arity func(Ref) int, candidates VarS body = x.Body } - outputs := outputVarsForBody(body, arity, ReservedVars) + outputs := outputVarsForBody(body, arity, ReservedVars, nil) unsafe := body.Vars(SafetyCheckVisitorParams).Diff(outputs).Diff(ReservedVars) if len(unsafe) > 0 { @@ -3349,11 +3879,9 @@ func getComprehensionIndex(dbg debug.Debug, arity func(Ref) int, candidates VarS } result := make([]*Term, 0, len(indexVars)) - for v := range indexVars { result = append(result, NewTerm(v)) } - slices.SortFunc(result, TermValueCompare) debugRes := make([]*Term, len(result)) @@ -3444,9 +3972,11 @@ func (vis *comprehensionIndexNestedCandidateVisitor) visit(x any) bool { } if v, ok := x.(Value); ok && IsComprehension(v) { - varVis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true}) + varVis := varVisitorPool.Get().WithParams(VarVisitorParams{SkipRefHead: true}) varVis.Walk(v) vis.found = len(varVis.Vars().Intersect(vis.candidates)) > 0 + varVisitorPool.Put(varVis) + return true } @@ -3485,7 +4015,7 @@ func NewModuleTree(mods map[string]*Module) *ModuleTreeNode { c, ok := node.Children[x.Value] if !ok { var hide bool - if i == 1 && x.Value.Compare(SystemDocumentKey) == 0 { + if i == 1 && SystemDocumentKey.Equal(x.Value) { hide = true } c = &ModuleTreeNode{ @@ -3555,10 +4085,11 @@ func (n *ModuleTreeNode) DepthFirst(f func(*ModuleTreeNode) bool) { // rule path. type TreeNode struct { Key Value - Values []any + Values []*Rule Children map[Value]*TreeNode Sorted []Value Hide bool + Index RuleIndex } func (n *TreeNode) String() string { @@ -3591,7 +4122,7 @@ func NewRuleTree(mtree *ModuleTreeNode) *TreeNode { } root.DepthFirst(func(x *TreeNode) bool { - x.sort() + slices.SortFunc(x.Sorted, Value.Compare) return false }) @@ -3613,12 +4144,11 @@ func (n *TreeNode) add(path Ref, rule *Rule) { } // Size returns the number of rules in the tree. -func (n *TreeNode) Size() int { - s := len(n.Values) +func (n *TreeNode) Size() (s int) { for _, c := range n.Children { s += c.Size() } - return s + return s + len(n.Values) } // Child returns n's child with key k. @@ -3672,10 +4202,6 @@ func (n *TreeNode) DepthFirst(f func(*TreeNode) bool) { } } -func (n *TreeNode) sort() { - slices.SortFunc(n.Sorted, Value.Compare) -} - func treeNodeFromRef(ref Ref, rule *Rule) *TreeNode { depth := len(ref) - 1 key := ref[depth].Value @@ -3684,7 +4210,7 @@ func treeNodeFromRef(ref Ref, rule *Rule) *TreeNode { Children: nil, } if rule != nil { - node.Values = []any{rule} + node.Values = []*Rule{rule} } for i := len(ref) - 2; i >= 0; i-- { @@ -3704,15 +4230,14 @@ func (n *TreeNode) flattenChildren() []Ref { for _, sub := range n.Children { // we only want the children, so don't use n.DepthFirst() right away sub.DepthFirst(func(x *TreeNode) bool { for _, r := range x.Values { - rule := r.(*Rule) + rule := r ret.AddPrefix(rule.Ref()) } return false }) } - slices.SortFunc(ret.s, RefCompare) - return ret.s + return util.SortedFunc(ret.s, RefCompare) } // Graph represents the graph of dependencies between rules. @@ -3882,11 +4407,7 @@ func NewGraphTraversal(graph *Graph) *GraphTraversal { // Edges lists all dependency connections for a given node func (g *GraphTraversal) Edges(x util.T) []util.T { - r := []util.T{} - for v := range g.graph.Dependencies(x) { - r = append(r, v) - } - return r + return util.Keys(g.graph.Dependencies(x)) } // Visited returns whether a node has been visited, setting a node to visited if not @@ -3910,9 +4431,9 @@ type unsafeVars map[*Expr]VarSet func (vs unsafeVars) Add(e *Expr, v Var) { if u, ok := vs[e]; ok { - u[v] = struct{}{} + u[v] = struct{ *Location }{} } else { - vs[e] = VarSet{v: struct{}{}} + vs[e] = VarSet{v: struct{ *Location }{}} } } @@ -3930,7 +4451,6 @@ func (vs unsafeVars) Update(o unsafeVars) { } func (vs unsafeVars) Vars() (result []unsafeVarLoc) { - locs := map[Var]*Location{} // If var appears in multiple sets then pick first by location. @@ -3943,17 +4463,12 @@ func (vs unsafeVars) Vars() (result []unsafeVarLoc) { } for v, loc := range locs { - result = append(result, unsafeVarLoc{ - Var: v, - Loc: loc, - }) + result = append(result, unsafeVarLoc{Var: v, Loc: loc}) } - slices.SortFunc(result, func(a, b unsafeVarLoc) int { + return util.SortedFunc(result, func(a, b unsafeVarLoc) int { return a.Loc.Compare(b.Loc) }) - - return result } func (vs unsafeVars) Slice() (result []unsafePair) { @@ -3987,7 +4502,8 @@ func reorderBodyForSafety(builtins map[string]*Builtin, arity func(Ref) int, glo unsafe := make(unsafeVars, len(bodyVars)-len(safe)) for _, e := range body { - vis.Clear().WithParams(SafetyCheckVisitorParams).Walk(e) + vis = vis.Clear().WithParams(SafetyCheckVisitorParams) + vis.Walk(e) for v := range vis.Vars() { if _, ok := safe[v]; !ok { unsafe.Add(e, v) @@ -3996,7 +4512,8 @@ func reorderBodyForSafety(builtins map[string]*Builtin, arity func(Ref) int, glo } reordered := make(Body, 0, len(body)) - output := VarSet{} + output := NewVarSet() + unsVis := varVisitorPool.Get() for { n := len(reordered) @@ -4006,13 +4523,15 @@ func reorderBodyForSafety(builtins map[string]*Builtin, arity func(Ref) int, glo continue } - ovs := outputVarsForExpr(e, arity, safe, output) + ovs := outputVarsForExpr(e, arity, safe, output, vis) // check closures: is this expression closing over variables that // haven't been made safe by what's already included in `reordered`? - vs := unsafeVarsInClosures(e) - cv := vs.Intersect(bodyVars).Diff(globals) - ob := outputVarsForBody(reordered, arity, safe) + unsafeVarsInClosures(e, unsVis) + cv := unsVis.Vars().Intersect(bodyVars).Diff(globals) + unsVis.Clear() + + ob := outputVarsForBody(reordered, arity, safe, vis) if cv.DiffCount(ob) > 0 { uv := cv.Diff(ob) @@ -4040,26 +4559,25 @@ func reorderBodyForSafety(builtins map[string]*Builtin, arity func(Ref) int, glo } } + varVisitorPool.Put(unsVis) + // Recursively visit closures and perform the safety checks on them. // Update the globals at each expression to include the variables that could // be closed over. g := globals.Copy() - xform := &bodySafetyTransformer{ - builtins: builtins, - arity: arity, - } - gvis := &GenericVisitor{} + xform := newBodySafetyTransformer(builtins, arity) + xform.gv = NewGenericVisitor(xform.Visit) + for i, e := range reordered { if i > 0 { + vis = vis.Clear().WithParams(SafetyCheckVisitorParams) vis.Walk(reordered[i-1]) g.Update(vis.Vars()) - vis.Clear().WithParams(SafetyCheckVisitorParams) } xform.current = e xform.globals = g xform.unsafe = unsafe - gvis.f = xform.Visit - gvis.Walk(e) + xform.gv.Walk(e) } return reordered, unsafe @@ -4071,18 +4589,29 @@ type bodySafetyTransformer struct { current *Expr globals VarSet unsafe unsafeVars + gv *GenericVisitor +} + +func newBodySafetyTransformer(builtins map[string]*Builtin, arity func(Ref) int) *bodySafetyTransformer { + return &bodySafetyTransformer{ + builtins: builtins, + arity: arity, + } } func (xform *bodySafetyTransformer) Visit(x any) bool { + if xform.gv == nil { + xform.gv = NewGenericVisitor(xform.Visit) + } switch term := x.(type) { case *Term: switch x := term.Value.(type) { case *object: cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) { kcpy := k.Copy() - NewGenericVisitor(xform.Visit).Walk(kcpy) + xform.gv.Walk(kcpy) vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) + xform.gv.Walk(vcpy) return kcpy, vcpy, nil }) term.Value = cpy @@ -4090,7 +4619,7 @@ func (xform *bodySafetyTransformer) Visit(x any) bool { case *set: cpy, _ := x.Map(func(v *Term) (*Term, error) { vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) + xform.gv.Walk(vcpy) return vcpy, nil }) term.Value = cpy @@ -4151,10 +4680,8 @@ func (xform *bodySafetyTransformer) reorderSetComprehensionSafety(sc *SetCompreh // unsafeVarsInClosures collects vars that are contained in closures within // this expression. -func unsafeVarsInClosures(e *Expr) VarSet { - vs := VarSet{} +func unsafeVarsInClosures(e *Expr, vis *VarVisitor) { WalkClosures(e, func(x any) bool { - vis := &VarVisitor{vars: vs} if ev, ok := x.(*Every); ok { vis.WalkBody(ev.Body) return true @@ -4162,21 +4689,23 @@ func unsafeVarsInClosures(e *Expr) VarSet { vis.Walk(x) return true }) - return vs } // OutputVarsFromBody returns all variables which are the "output" for // the given body. For safety checks this means that they would be // made safe by the body. func OutputVarsFromBody(c *Compiler, body Body, safe VarSet) VarSet { - return outputVarsForBody(body, c.GetArity, safe) + return outputVarsForBody(body, c.GetArity, safe, nil) } -func outputVarsForBody(body Body, arity func(Ref) int, safe VarSet) VarSet { +func outputVarsForBody(body Body, arity func(Ref) int, safe VarSet, vis *VarVisitor) VarSet { o := safe.Copy() output := VarSet{} + + vis = ClearOrNewVarVisitor(vis) + for _, e := range body { - o.Update(outputVarsForExpr(e, arity, o, output)) + o.Update(outputVarsForExpr(e, arity, o, output, vis)) } return o.Diff(safe) } @@ -4185,20 +4714,22 @@ func outputVarsForBody(body Body, arity func(Ref) int, safe VarSet) VarSet { // the given expression. For safety checks this means that they would be // made safe by the expr. func OutputVarsFromExpr(c *Compiler, expr *Expr, safe VarSet) VarSet { - return outputVarsForExpr(expr, c.GetArity, safe, VarSet{}) + return outputVarsForExpr(expr, c.GetArity, safe, VarSet{}, nil) } -func outputVarsForExpr(expr *Expr, arity func(Ref) int, safe VarSet, output VarSet) VarSet { +func outputVarsForExpr(expr *Expr, arity func(Ref) int, safe VarSet, output VarSet, vis *VarVisitor) VarSet { // Negated expressions must be safe. if expr.Negated { return VarSet{} } - var vis *VarVisitor + if len(expr.With) > 0 { + vis = ClearOrNewVarVisitor(vis).WithParams(SafetyCheckVisitorParams) + } // With modifier inputs must be safe. for _, with := range expr.With { - vis = vis.ClearOrNew().WithParams(SafetyCheckVisitorParams) + vis = vis.Clear().WithParams(SafetyCheckVisitorParams) vis.Walk(with) if vis.Vars().DiffCount(safe) > 0 { return VarSet{} @@ -4206,8 +4737,11 @@ func outputVarsForExpr(expr *Expr, arity func(Ref) int, safe VarSet, output VarS } switch terms := expr.Terms.(type) { + case *TemplateString: + // Template-expressions have no output vars + return VarSet{} case *Term: - return outputVarsForTerms(expr, safe) + return outputVarsForTerms(expr, safe, nil) case []*Term: if expr.IsEquality() { return outputVarsForExprEq(expr, safe, output) @@ -4225,7 +4759,7 @@ func outputVarsForExpr(expr *Expr, arity func(Ref) int, safe VarSet, output VarS return outputVarsForExprCall(expr, ar, safe, terms, vis, output) case *Every: - return outputVarsForTerms(terms.Domain, safe) + return outputVarsForTerms(terms.Domain, safe, output) default: panic("illegal expression") } @@ -4236,7 +4770,7 @@ func outputVarsForExprEq(expr *Expr, safe VarSet, output VarSet) VarSet { return safe } - output.Update(outputVarsForTerms(expr, safe)) + output = outputVarsForTerms(expr, safe, output) output.Update(safe) output.Update(Unify(output, expr.Operand(0), expr.Operand(1))) @@ -4250,7 +4784,7 @@ func outputVarsForExprEq(expr *Expr, safe VarSet, output VarSet) VarSet { func outputVarsForExprCall(expr *Expr, arity int, safe VarSet, terms []*Term, vis *VarVisitor, output VarSet) VarSet { clear(output) - output.Update(outputVarsForTerms(expr, safe)) + output = outputVarsForTerms(expr, safe, output) numInputTerms := arity + 1 if numInputTerms >= len(terms) { @@ -4263,7 +4797,8 @@ func outputVarsForExprCall(expr *Expr, arity int, safe VarSet, terms []*Term, vi SkipObjectKeys: true, SkipRefHead: true, } - vis = vis.ClearOrNew().WithParams(params) + + vis = ClearOrNewVarVisitor(vis).WithParams(params) vis.WalkArgs(Args(terms[:numInputTerms])) unsafe := vis.Vars().Diff(output).DiffCount(safe) @@ -4277,11 +4812,13 @@ func outputVarsForExprCall(expr *Expr, arity int, safe VarSet, terms []*Term, vi return output } -func outputVarsForTerms(expr any, safe VarSet) VarSet { - output := VarSet{} +func outputVarsForTerms(expr any, safe, output VarSet) VarSet { + if output == nil { + output = VarSet{} + } WalkTerms(expr, func(x *Term) bool { switch r := x.Value.(type) { - case *SetComprehension, *ArrayComprehension, *ObjectComprehension: + case *SetComprehension, *ArrayComprehension, *ObjectComprehension, *TemplateString: return true case Ref: if !isRefSafe(r, safe) { @@ -4330,22 +4867,26 @@ func newLocalVarGeneratorForModuleSet(sorted []string, modules map[string]*Modul for _, key := range sorted { vis.Walk(modules[key]) } - return &localVarGenerator{exclude: vis.vars, next: 0} + return &localVarGenerator{exclude: vis.vars, suffix: LocalVarPrefix} } func newLocalVarGenerator(suffix string, node any) *localVarGenerator { vis := NewVarVisitor() vis.Walk(node) - return &localVarGenerator{exclude: vis.vars, suffix: suffix, next: 0} + return &localVarGenerator{exclude: vis.vars, suffix: LocalVarPrefix + suffix} } func (l *localVarGenerator) Generate() Var { + buf := make([]byte, 0, len(l.suffix)+util.NumDigitsInt(l.next)+2) for { - result := Var(LocalVarPrefix + l.suffix + strconv.Itoa(l.next) + "__") + buf = append(util.AppendInt(append(buf, l.suffix...), l.next), "__"...) + result := Var(util.ByteSliceToString(buf)) l.next++ if !l.exclude.Contains(result) { return result } + + buf = buf[:0] } } @@ -4376,8 +4917,7 @@ func requiresEval(x *Term) bool { } func resolveRef(globals map[Var]*usedRef, ignore *declaredVarStack, ref Ref) Ref { - - r := Ref{} + r := make(Ref, 0, len(ref)) for i, x := range ref { switch v := x.Value.(type) { case Var: @@ -4595,6 +5135,21 @@ func resolveRefsInTerm(globals map[Var]*usedRef, ignore *declaredVarStack, term cpy.Value = sc ignore.Pop() return &cpy + case *TemplateString: + ts := &TemplateString{} + if len(v.Parts) > 0 { + ts.Parts = make([]Node, 0, len(v.Parts)) + } + for _, p := range v.Parts { + if expr, ok := p.(*Expr); ok { + ts.Parts = append(ts.Parts, resolveRefsInExpr(globals, ignore, expr)) + } else { + ts.Parts = append(ts.Parts, p) + } + } + cpy := *term + cpy.Value = ts + return &cpy default: return term } @@ -4698,26 +5253,26 @@ func rewriteComprehensionTerms(f *equalityFactory, node any) (any, error) { if requiresEval(x.Term) { expr := f.Generate(x.Term) x.Term = expr.Operand(0) - x.Body.Append(expr) + x.Body = appendToBody(x.Body, expr) } return x, nil case *SetComprehension: if requiresEval(x.Term) { expr := f.Generate(x.Term) x.Term = expr.Operand(0) - x.Body.Append(expr) + x.Body = appendToBody(x.Body, expr) } return x, nil case *ObjectComprehension: if requiresEval(x.Key) { expr := f.Generate(x.Key) x.Key = expr.Operand(0) - x.Body.Append(expr) + x.Body = appendToBody(x.Body, expr) } if requiresEval(x.Value) { expr := f.Generate(x.Value) x.Value = expr.Operand(0) - x.Body.Append(expr) + x.Body = appendToBody(x.Body, expr) } return x, nil } @@ -4772,7 +5327,7 @@ func rewriteTestEqualities(f *equalityFactory, body Body) Body { every.Body = rewriteTestEqualities(f, every.Body) } } - result = appendExpr(result, expr) + result = appendToBody(result, expr) } return result } @@ -4817,19 +5372,15 @@ func rewriteDynamics(f *equalityFactory, body Body) Body { return result } -func appendExpr(body Body, expr *Expr) Body { - body.Append(expr) - return body -} - func rewriteDynamicsEqExpr(f *equalityFactory, expr *Expr, result Body) Body { if !validEqAssignArgCount(expr) { - return appendExpr(result, expr) + return appendToBody(result, expr) } terms := expr.Terms.([]*Term) result, terms[1] = rewriteDynamicsInTerm(expr, f, terms[1], result) result, terms[2] = rewriteDynamicsInTerm(expr, f, terms[2], result) - return appendExpr(result, expr) + result.Append(expr) + return result } func rewriteDynamicsCallExpr(f *equalityFactory, expr *Expr, result Body) Body { @@ -4837,20 +5388,23 @@ func rewriteDynamicsCallExpr(f *equalityFactory, expr *Expr, result Body) Body { for i := 1; i < len(terms); i++ { result, terms[i] = rewriteDynamicsOne(expr, f, terms[i], result) } - return appendExpr(result, expr) + result.Append(expr) + return result } func rewriteDynamicsEveryExpr(f *equalityFactory, expr *Expr, result Body) Body { ev := expr.Terms.(*Every) result, ev.Domain = rewriteDynamicsOne(expr, f, ev.Domain, result) ev.Body = rewriteDynamics(f, ev.Body) - return appendExpr(result, expr) + result.Append(expr) + return result } func rewriteDynamicsTermExpr(f *equalityFactory, expr *Expr, result Body) Body { term := expr.Terms.(*Term) result, expr.Terms = rewriteDynamicsInTerm(expr, f, term, result) - return appendExpr(result, expr) + result.Append(expr) + return result } func rewriteDynamicsInTerm(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { @@ -4937,27 +5491,69 @@ func rewriteDynamicsComprehensionBody(original *Expr, f *equalityFactory, body B func rewriteExprTermsInHead(gen *localVarGenerator, rule *Rule) { for i := range rule.Head.Args { support, output := expandExprTerm(gen, rule.Head.Args[i]) - for j := range support { - rule.Body.Append(support[j]) - } + rule.Body = appendToBody(rule.Body, support...) rule.Head.Args[i] = output } if rule.Head.Key != nil { support, output := expandExprTerm(gen, rule.Head.Key) - for i := range support { - rule.Body.Append(support[i]) - } + rule.Body = appendToBody(rule.Body, support...) rule.Head.Key = output } if rule.Head.Value != nil { support, output := expandExprTerm(gen, rule.Head.Value) - for i := range support { - rule.Body.Append(support[i]) - } + rule.Body = appendToBody(rule.Body, support...) rule.Head.Value = output } } +// isEmptyBody true for a rule like `pi := 3.14 if { true}` +func isEmptyBody(body Body) bool { + if len(body) == 1 { + if term, ok := body[0].Terms.(*Term); ok { + return Boolean(true).Equal(term.Value) + } + } + + return false +} + +func isConstantRule(rule *Rule) bool { + if isEmptyBody(rule.Body) { + switch v := rule.Head.Value.Value.(type) { + case String, Var, Number, Boolean, Null: + return true + case *Array, *object, Set: + return v.IsGround() + } + } + return false +} + +// appendToBody inlines Body.Append and adds additional logic for +// replacing a single 'true' expression (i.e an empty body) with +// the first expression to be appended, while appending the rest +// of the expressions as normal. Additionally accepts multiple +// expressions to append, which potentially reduces allocations +// in larger appends. +func appendToBody(body Body, exprs ...*Expr) Body { + if len(exprs) == 0 { + return body + } + + blen := len(body) + if blen == 1 && isEmptyBody(body) { + // body will no longer be empty, so instead of appending, + // replace the 'true' expression with the new expression. + exprs[0].Index = 0 + body[0], exprs = exprs[0], exprs[1:] + } + for i, expr := range exprs { + expr.Index = blen + i + } + + return append(body, exprs...) +} + func rewriteExprTermsInBody(gen *localVarGenerator, body Body) Body { cpy := make(Body, 0, len(body)) for i := range body { @@ -4982,9 +5578,8 @@ func expandExpr(gen *localVarGenerator, expr *Expr) (result []*Expr) { extras[i].With = expr.With } } - result = append(result, extras...) expr.Terms = term - result = append(result, expr) + result = append(append(result, extras...), expr) case []*Term: for i := 1; i < len(terms); i++ { var extras []*Expr @@ -5024,6 +5619,7 @@ func connectGeneratedExprs(parent *Expr, children ...*Expr) { func expandExprTerm(gen *localVarGenerator, term *Term) (support []*Expr, output *Term) { output = term + switch v := term.Value.(type) { case Call: for i := 1; i < len(v); i++ { @@ -5057,30 +5653,19 @@ func expandExprTerm(gen *localVarGenerator, term *Term) (support []*Expr, output output = NewTerm(cpy).SetLocation(term.Location) case *ArrayComprehension: support, term := expandExprTerm(gen, v.Term) - for i := range support { - v.Body.Append(support[i]) - } v.Term = term - v.Body = rewriteExprTermsInBody(gen, v.Body) + v.Body = rewriteExprTermsInBody(gen, appendToBody(v.Body, support...)) case *SetComprehension: support, term := expandExprTerm(gen, v.Term) - for i := range support { - v.Body.Append(support[i]) - } v.Term = term - v.Body = rewriteExprTermsInBody(gen, v.Body) + v.Body = rewriteExprTermsInBody(gen, appendToBody(v.Body, support...)) case *ObjectComprehension: support, key := expandExprTerm(gen, v.Key) - for i := range support { - v.Body.Append(support[i]) - } + v.Body = appendToBody(v.Body, support...) v.Key = key support, value := expandExprTerm(gen, v.Value) - for i := range support { - v.Body.Append(support[i]) - } v.Value = value - v.Body = rewriteExprTermsInBody(gen, v.Body) + v.Body = rewriteExprTermsInBody(gen, appendToBody(v.Body, support...)) } return } @@ -5498,7 +6083,7 @@ func rewriteEveryStatement(g *localVarGenerator, stack *localDeclaredVars, expr if v := every.Key.Value.(Var); !v.IsWildcard() { gv, err := rewriteDeclaredVar(g, stack, v, declaredVar) if err != nil { - return nil, append(errs, NewError(CompileErr, every.Loc(), "%s", err.Error())) + return nil, append(errs, newErrorString(CompileErr, every.Loc(), err.Error())) } every.Key.Value = gv } @@ -5510,7 +6095,7 @@ func rewriteEveryStatement(g *localVarGenerator, stack *localDeclaredVars, expr if v := every.Value.Value.(Var); !v.IsWildcard() { gv, err := rewriteDeclaredVar(g, stack, v, declaredVar) if err != nil { - return nil, append(errs, NewError(CompileErr, every.Loc(), "%s", err.Error())) + return nil, append(errs, newErrorString(CompileErr, every.Loc(), err.Error())) } every.Value.Value = gv } @@ -5528,7 +6113,7 @@ func rewriteSomeDeclStatement(g *localVarGenerator, stack *localDeclaredVars, ex switch v := decl.Symbols[i].Value.(type) { case Var: if _, err := rewriteDeclaredVar(g, stack, v, declaredVar); err != nil { - return nil, append(errs, NewError(CompileErr, decl.Loc(), "%s", err.Error())) + return nil, append(errs, newErrorString(CompileErr, decl.Loc(), err.Error())) } case Call: var key, val, container *Term @@ -5554,11 +6139,11 @@ func rewriteSomeDeclStatement(g *localVarGenerator, stack *localDeclaredVars, ex RefTerm(VarTerm(Equality.Name)), val, rhs, } - output := VarSet{} + output := NewVarSet() for _, v0 := range outputVarsForExprEq(e, container.Vars(), output).Sorted() { if _, err := rewriteDeclaredVar(g, stack, v0, declaredVar); err != nil { - return nil, append(errs, NewError(CompileErr, decl.Loc(), "%s", err.Error())) + return nil, append(errs, newErrorString(CompileErr, decl.Loc(), err.Error())) } } return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict) @@ -5612,7 +6197,7 @@ func rewriteDeclaredAssignment(g *localVarGenerator, stack *localDeclaredVars, e switch v := t.Value.(type) { case Var: if gv, err := rewriteDeclaredVar(g, stack, v, assignedVar); err != nil { - errs = append(errs, NewError(CompileErr, t.Location, "%s", err.Error())) + errs = append(errs, newErrorString(CompileErr, t.Location, err.Error())) } else { t.Value = gv } @@ -5627,7 +6212,7 @@ func rewriteDeclaredAssignment(g *localVarGenerator, stack *localDeclaredVars, e case Ref: if RootDocumentRefs.Contains(t) { if gv, err := rewriteDeclaredVar(g, stack, v[0].Value.(Var), assignedVar); err != nil { - errs = append(errs, NewError(CompileErr, t.Location, "%s", err.Error())) + errs = append(errs, newErrorString(CompileErr, t.Location, err.Error())) } else { t.Value = gv } @@ -5739,9 +6324,20 @@ func rewriteDeclaredVarsInWithRecursive(g *localVarGenerator, stack *localDeclar return rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict) } +func rewriteDeclaredVarsInTemplateString(g *localVarGenerator, stack *localDeclaredVars, ts *TemplateString, errs Errors, strict bool) Errors { + for i, p := range ts.Parts { + if expr, ok := p.(*Expr); ok { + stack.Push() + ts.Parts[i], errs = rewriteDeclaredVarsInExpr(g, stack, expr, errs, strict) + stack.Pop() + } + } + + return errs +} + func rewriteDeclaredVarsInArrayComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ArrayComprehension, errs Errors, strict bool) Errors { - used := NewVarSet() - used.Update(v.Term.Vars()) + used := v.Term.Vars() stack.Push() v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) @@ -5751,8 +6347,7 @@ func rewriteDeclaredVarsInArrayComprehension(g *localVarGenerator, stack *localD } func rewriteDeclaredVarsInSetComprehension(g *localVarGenerator, stack *localDeclaredVars, v *SetComprehension, errs Errors, strict bool) Errors { - used := NewVarSet() - used.Update(v.Term.Vars()) + used := v.Term.Vars() stack.Push() v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) @@ -5873,7 +6468,7 @@ func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr // and edge case anyways. if child := targetNode.Child(ref[len(ref)-1].Value); child != nil { for _, v := range child.Values { - if len(v.(*Rule).Head.Args) > 0 { + if len(v.Head.Args) > 0 { if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { return false, err // err may be nil } @@ -5887,7 +6482,7 @@ func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr // TODO: check that target ref doesn't exist? if valueNode := c.RuleTree.Find(r); valueNode != nil { for _, v := range valueNode.Values { - if len(v.(*Rule).Head.Args) > 0 { + if len(v.Head.Args) > 0 { return false, nil } } @@ -5895,7 +6490,6 @@ func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr } case isInputRef(target): // ok, valid case isBuiltinRefOrVar: - // NOTE(sr): first we ensure that parsed Var builtins (`count`, `concat`, etc) // are rewritten to their proper Ref convention if v, ok := target.Value.(Var); ok { @@ -5951,30 +6545,23 @@ func validateWithFunctionValue(bs map[string]*Builtin, unsafeMap map[string]stru } func isInputRef(term *Term) bool { - if ref, ok := term.Value.(Ref); ok { - if ref.HasPrefix(InputRootRef) { - return true - } - } - return false + ref, ok := term.Value.(Ref) + return ok && ref.HasPrefix(InputRootRef) } func isDataRef(term *Term) bool { - if ref, ok := term.Value.(Ref); ok { - if ref.HasPrefix(DefaultRootRef) { - return true - } - } - return false + ref, ok := term.Value.(Ref) + return ok && ref.HasPrefix(DefaultRootRef) } func isBuiltinRefOrVar(bs map[string]*Builtin, unsafeBuiltinsMap map[string]struct{}, term *Term) (bool, *Error) { switch v := term.Value.(type) { case Ref, Var: - if _, ok := unsafeBuiltinsMap[v.String()]; ok { + vs := v.String() + if _, ok := unsafeBuiltinsMap[vs]; ok { return false, NewError(CompileErr, term.Location, "with keyword replacing built-in function: target must not be unsafe: %q", v) } - _, ok := bs[v.String()] + _, ok := bs[vs] return ok, nil } return false, nil @@ -6020,9 +6607,7 @@ func safetyErrorSlice(unsafe unsafeVars, rewritten map[Var]Var) (result Errors) // If the expression contains unsafe generated variables, report which // expressions are unsafe instead of the variables that are unsafe (since // the latter are not meaningful to the user.) - pairs := unsafe.Slice() - - slices.SortFunc(pairs, func(a, b unsafePair) int { + pairs := util.SortedFunc(unsafe.Slice(), func(a, b unsafePair) int { return a.Expr.Location.Compare(b.Expr.Location) }) @@ -6115,6 +6700,5 @@ func (rs *refSet) Sorted() []*Term { for i := range rs.s { terms[i] = NewTerm(rs.s[i]) } - slices.SortFunc(terms, TermValueCompare) - return terms + return util.SortedFunc(terms, TermValueCompare) } diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go index 7d81d45e6d..4ea122f3cb 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go @@ -33,7 +33,8 @@ func CompileModulesWithOpt(modules map[string]string, opts CompileOpts) (*Compil compiler := NewCompiler(). WithDefaultRegoVersion(opts.ParserOptions.RegoVersion). - WithEnablePrintStatements(opts.EnablePrintStatements) + WithEnablePrintStatements(opts.EnablePrintStatements). + WithCapabilities(opts.ParserOptions.Capabilities) compiler.Compile(parsed) if compiler.Failed() { diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go b/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go index 685cc6b694..b119f80eac 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go @@ -63,9 +63,9 @@ func checkDocumentConflicts(node *TreeNode, exists func([]string) (bool, error), if len(node.Values) > 0 { s := strings.Join(path, "/") if ok, err := exists(path); err != nil { - return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflict check for data path %v: %v", s, err.Error())} + return Errors{NewError(CompileErr, node.Values[0].Loc(), "conflict check for data path %v: %v", s, err.Error())} } else if ok { - return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflicting rule for data path %v found", s)} + return Errors{NewError(CompileErr, node.Values[0].Loc(), "conflicting rule for data path %v found", s)} } } diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/env.go b/vendor/github.com/open-policy-agent/opa/v1/ast/env.go index 12d4be8918..3a3e793778 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/env.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/env.go @@ -29,6 +29,7 @@ func newTypeEnv(f func() *typeChecker) *TypeEnv { } // Get returns the type of x. +// // Deprecated: Use GetByValue or GetByRef instead, as they are more efficient. func (env *TypeEnv) Get(x any) types.Type { if term, ok := x.(*Term); ok { @@ -53,15 +54,14 @@ func (env *TypeEnv) GetByValue(v Value) types.Type { return types.B case Number: return types.N - case String: + case String, *TemplateString: return types.S // Composites. case *Array: static := make([]types.Type, x.Len()) for i := range static { - tpe := env.GetByValue(x.Elem(i).Value) - static[i] = tpe + static[i] = env.GetByValue(x.Elem(i).Value) } var dynamic types.Type @@ -79,17 +79,13 @@ func (env *TypeEnv) GetByValue(v Value) types.Type { x.Foreach(func(k, v *Term) { if IsConstant(k.Value) { - kjson, err := JSON(k.Value) - if err == nil { - tpe := env.GetByValue(v.Value) - static = append(static, types.NewStaticProperty(kjson, tpe)) + if kjson, err := JSON(k.Value); err == nil { + static = append(static, types.NewStaticProperty(kjson, env.GetByValue(v.Value))) return } } // Can't handle it as a static property, fallback to dynamic - typeK := env.GetByValue(k.Value) - typeV := env.GetByValue(v.Value) - dynamic = types.NewDynamicProperty(typeK, typeV) + dynamic = types.NewDynamicProperty(env.GetByValue(k.Value), env.GetByValue(v.Value)) }) if len(static) == 0 && dynamic == nil { @@ -98,7 +94,7 @@ func (env *TypeEnv) GetByValue(v Value) types.Type { return types.NewObject(static, dynamic) - case Set: + case *set: var tpe types.Type x.Foreach(func(elem *Term) { tpe = types.Or(tpe, env.GetByValue(elem.Value)) @@ -161,12 +157,13 @@ func (env *TypeEnv) GetByRef(ref Ref) types.Type { } func (env *TypeEnv) getRefFallback(ref Ref) types.Type { - if env.next != nil { return env.next.GetByRef(ref) } if RootDocumentNames.Contains(ref[0]) { + // types.A is an empty types.Any + // this is used to represent a potential non-local reference return types.A } @@ -298,15 +295,11 @@ func (n *typeTreeNode) PutOne(key Value, tpe types.Type) { func (n *typeTreeNode) Put(path Ref, tpe types.Type) { curr := n for _, term := range path { - c, ok := curr.children.Get(term.Value) - - var child *typeTreeNode + child, ok := curr.children.Get(term.Value) if !ok { child = newTypeTree() child.key = term.Value curr.children.Put(child.key, child) - } else { - child = c } curr = child @@ -320,23 +313,18 @@ func (n *typeTreeNode) Put(path Ref, tpe types.Type) { func (n *typeTreeNode) Insert(path Ref, tpe types.Type, env *TypeEnv) { curr := n for i, term := range path { - c, ok := curr.children.Get(term.Value) - - var child *typeTreeNode + child, ok := curr.children.Get(term.Value) if !ok { child = newTypeTree() child.key = term.Value curr.children.Put(child.key, child) - } else { - child = c - if child.value != nil && i+1 < len(path) { - // If child has an object value, merge the new value into it. - if o, ok := child.value.(*types.Object); ok { - var err error - child.value, err = insertIntoObject(o, path[i+1:], tpe, env) - if err != nil { - panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) - } + } else if child.value != nil && i+1 < len(path) { + // If child has an object value, merge the new value into it. + if o, ok := child.value.(*types.Object); ok { + var err error + child.value, err = insertIntoObject(o, path[i+1:], tpe, env) + if err != nil { + panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) } } } @@ -348,8 +336,7 @@ func (n *typeTreeNode) Insert(path Ref, tpe types.Type, env *TypeEnv) { if _, ok := tpe.(*types.Object); ok && curr.children.Len() > 0 { // merge all leafs into the inserted object - leafs := curr.Leafs() - for p, t := range leafs { + for p, t := range curr.Leafs() { var err error curr.value, err = insertIntoObject(curr.value.(*types.Object), *p, t, env) if err != nil { @@ -387,7 +374,8 @@ func mergeTypes(a, b types.Type) types.Type { bDynProps := bObj.DynamicProperties() dynProps := types.NewDynamicProperty( types.Or(aDynProps.Key, bDynProps.Key), - mergeTypes(aDynProps.Value, bDynProps.Value)) + mergeTypes(aDynProps.Value, bDynProps.Value), + ) return types.NewObject(nil, dynProps) } else if bAny, ok := b.(types.Any); ok && len(a.StaticProperties()) == 0 { // If a is an object type with no static components ... @@ -416,14 +404,14 @@ func mergeTypes(a, b types.Type) types.Type { } func (n *typeTreeNode) String() string { - b := strings.Builder{} + b := &strings.Builder{} + key := "-" if k := n.key; k != nil { - b.WriteString(k.String()) - } else { - b.WriteString("-") + key = k.String() } + b.WriteString(key) if v := n.value; v != nil { b.WriteString(": ") b.WriteString(v.String()) @@ -431,9 +419,7 @@ func (n *typeTreeNode) String() string { n.children.Iter(func(_ Value, child *typeTreeNode) bool { b.WriteString("\n\t+ ") - s := child.String() - s = strings.ReplaceAll(s, "\n", "\n\t") - b.WriteString(s) + b.WriteString(strings.ReplaceAll(child.String(), "\n", "\n\t")) return false }) @@ -484,7 +470,8 @@ func (n *typeTreeNode) Leafs() map[*Ref]types.Type { func collectLeafs(n *typeTreeNode, path Ref, leafs map[*Ref]types.Type) { nPath := append(path, NewTerm(n.key)) if n.Leaf() { - leafs[&nPath] = n.Value() + npc := nPath // copy of else nPath escapes to heap even if !n.Leaf() + leafs[&npc] = n.Value() return } n.children.Iter(func(_ Value, v *typeTreeNode) bool { @@ -512,7 +499,6 @@ func selectConstant(tpe types.Type, term *Term) types.Type { // contains vars or refs, then the returned type will be a union of the // possible types. func selectRef(tpe types.Type, ref Ref) types.Type { - if tpe == nil || len(ref) == 0 { return tpe } diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go b/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go index 75160afc6e..bf8bca7472 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go @@ -99,26 +99,35 @@ func (e *Error) Error() string { } } - msg := fmt.Sprintf("%v: %v", e.Code, e.Message) - + sb := strings.Builder{} if len(prefix) > 0 { - msg = prefix + ": " + msg + sb.WriteString(prefix) + sb.WriteString(": ") } + sb.WriteString(e.Code) + sb.WriteString(": ") + sb.WriteString(e.Message) + if e.Details != nil { for _, line := range e.Details.Lines() { - msg += "\n\t" + line + sb.WriteString("\n\t") + sb.WriteString(line) } } - return msg + return sb.String() } // NewError returns a new Error object. func NewError(code string, loc *Location, f string, a ...any) *Error { + return newErrorString(code, loc, fmt.Sprintf(f, a...)) +} + +func newErrorString(code string, loc *Location, m string) *Error { return &Error{ Code: code, Location: loc, - Message: fmt.Sprintf(f, a...), + Message: m, } } diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/index.go b/vendor/github.com/open-policy-agent/opa/v1/ast/index.go index bcaf4a7068..45c0fa7a81 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/index.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/index.go @@ -5,7 +5,6 @@ package ast import ( - "fmt" "slices" "sort" "strings" @@ -68,6 +67,7 @@ var ( globMatchRef = GlobMatch.Ref() internalPrintRef = InternalPrint.Ref() internalTestCaseRef = InternalTestCase.Ref() + internalMemberRef = Member.Ref() skipIndexing = NewSet(NewTerm(internalPrintRef), NewTerm(internalTestCaseRef)) ) @@ -87,6 +87,7 @@ func (i *baseDocEqIndex) Build(rules []*Rule) bool { i.kind = rules[0].Head.RuleKind() indices := newrefindices(i.isVirtual) + values := make(map[Var]Value) // build indices for each rule. for idx := range rules { @@ -106,8 +107,9 @@ func (i *baseDocEqIndex) Build(rules []*Rule) bool { } } if !skip { + clear(values) for i := range rule.Body { - indices.Update(rule, rule.Body[i]) + indices.Update(rule, rule.Body[i], values) } } return false @@ -124,7 +126,46 @@ func (i *baseDocEqIndex) Build(rules []*Rule) bool { node := i.root if indices.Indexed(rule) { for _, ref := range indices.Sorted() { - node = node.Insert(ref, indices.Value(rule, ref), indices.Mapper(rule, ref)) + var values []*refindex + for _, ri := range indices.rules[rule] { + if ri.Ref.Equal(ref) { + values = append(values, ri) + } + } + if len(values) == 0 { + node = node.Insert(ref, nil, nil) + } else if len(values) == 1 { + node = node.Insert(ref, values[0].Value, values[0].Mapper) + } else { + var hasVar bool + for i := range values { + if _, isVar := values[i].Value.(Var); isVar { + hasVar = true + break + } + } + + if hasVar { + child := node.Insert(ref, anyValue, values[0].Mapper) + for i := range values { + if values[i].Mapper != nil { + node.next.addMapper(values[i].Mapper) + } + } + node = child + } else { + // When a rule has multiple scalar values (e.g., internal.member_2 with a set), + // each value should have its own child node, and the rule is appended to each. + // This creates separate paths for each value so different rules with overlapping + // values don't interfere with each other. + for _, val := range values { + child := node.Insert(ref, val.Value, val.Mapper) + child.append([...]int{idx, prio}, rule) + } + prio++ + return false + } + } } } // Insert rule into trie with (insertion order, priority order) @@ -213,7 +254,7 @@ func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) { return result, nil } -func (i *baseDocEqIndex) AllRules(_ ValueResolver) (*IndexResult, error) { +func (i *baseDocEqIndex) AllRules(ValueResolver) (*IndexResult, error) { tr := newTrieTraversalResult() // Walk over the rule trie and accumulate _all_ rules @@ -285,14 +326,14 @@ func newrefindices(isVirtual func(Ref) bool) *refindices { } } +// anyValue is a fake variable we used to put "naked ref" expressions +// into the rule index +var anyValue = Var("__any__") + // Update attempts to update the refindices for the given expression in the // given rule. If the expression cannot be indexed the update does not affect // the indices. -func (i *refindices) Update(rule *Rule, expr *Expr) { - - if expr.Negated { - return - } +func (i *refindices) Update(rule *Rule, expr *Expr, values map[Var]Value) { if len(expr.With) > 0 { // NOTE(tsandall): In the future, we may need to consider expressions @@ -300,26 +341,58 @@ func (i *refindices) Update(rule *Rule, expr *Expr) { return } - op := expr.Operator() + if expr.Negated { + // NOTE(sr): We could try to cover simple expressions, like + // not input.funky => input.funky == false or undefined (two refindex?) + return + } - switch { - case op.Equal(equalityRef): - i.updateEq(rule, expr) + op := expr.Operator() + if op == nil { + if ts, ok := expr.Terms.(*Term); ok { + // NOTE(sr): If we wanted to cover function args, we'd need to also + // check for type "Var" here. But since it's impossible to call a + // function with a undefined argument, there's no point to recording + // "needs to be anything" for function args + if ref, ok := ts.Value.(Ref); ok { // "naked ref" + i.updateEq(rule, ref, anyValue, nil) + } + } + } - case op.Equal(equalRef) && len(expr.Operands()) == 2: + equalish := op.Equal(equalityRef) || // unification, no 3-operands version exists // NOTE(tsandall): if equal() is called with more than two arguments the // output value is being captured in which case the indexer cannot // exclude the rule if the equal() call would return false (because the // false value must still be produced.) - i.updateEq(rule, expr) + (op.Equal(equalRef) && len(expr.Operands()) == 2) + + a, b := expr.Operand(0), expr.Operand(1) + switch { + case equalish: + if !i.updateEqWildcardRef(rule, a.Value, b.Value, values) { + i.updateEq(rule, a.Value, b.Value, values) + } case op.Equal(globMatchRef) && len(expr.Operands()) == 3: // NOTE(sr): Same as with equal() above -- 4 operands means the output // of `glob.match` is captured and the rule can thus not be excluded. i.updateGlobMatch(rule, expr) + + case op.Equal(internalMemberRef) && len(expr.Operands()) == 2: + // NOTE(sr): Again, 3 operands means captured output (like above). + i.updateMember(rule, expr, values) } } +func (i *refindices) isValidIndexRef(ref Ref) bool { + // NB(sr): the ordering is intentional, cheapest-first + return RootDocumentNames.Contains(ref[0]) && + !ref.IsNested() && + ref.IsGround() && + !i.isVirtual(ref) +} + // Sorted returns a sorted list of references that the indices were built from. // References that appear more frequently in the indexed rules are ordered // before less frequently appearing references. @@ -366,17 +439,46 @@ func (i *refindices) Mapper(rule *Rule, ref Ref) *valueMapper { return nil } -func (i *refindices) updateEq(rule *Rule, expr *Expr) { - a, b := expr.Operand(0), expr.Operand(1) +func (i *refindices) updateEq(rule *Rule, a, b Value, constants map[Var]Value) { args := rule.Head.Args - if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, a, b); ok { - i.insert(rule, idx) - return + if !i.eqOperandsToRefAndValue(rule, args, a, b, constants) { + i.eqOperandsToRefAndValue(rule, args, b, a, constants) } - if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, b, a); ok { - i.insert(rule, idx) - return +} + +func (i *refindices) updateEqWildcardRef(rule *Rule, a, b Value, constants map[Var]Value) bool { + return i.tryIndexWildcardRef(rule, a, b, constants) || + i.tryIndexWildcardRef(rule, b, a, constants) +} + +func (i *refindices) tryIndexWildcardRef(rule *Rule, a, b Value, constants map[Var]Value) bool { + ref, ok := a.(Ref) + if !ok { + return false + } + + groundPrefix := ref.GroundPrefix() + if len(groundPrefix) != len(ref)-1 || !i.isValidIndexRef(groundPrefix) { + return false + } + + resolvedValue := b + if bvar, ok := b.(Var); ok { + if resolved, ok := constants[bvar]; ok { + resolvedValue = resolved + } + } else if val, ok := indexValue(b); ok { + resolvedValue = val + } else { + return false + } + + if !IsScalar(resolvedValue) { + return false } + + i.insert(rule, &refindex{Ref: groundPrefix, Value: resolvedValue}) + return true } func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) { @@ -392,21 +494,8 @@ func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) { // 3rd operand was a reference that has been rewritten and bound to a // variable earlier in the query OR a function argument variable. match := expr.Operand(2) - if _, ok := match.Value.(Var); ok { - var ref Ref - for _, other := range i.rules[rule] { - if _, ok := other.Value.(Var); ok && other.Value.Compare(match.Value) == 0 { - ref = other.Ref - } - } - if ref == nil { - for j, arg := range args { - if arg.Equal(match) { - ref = Ref{FunctionArgRootDocument, InternedTerm(j)} - } - } - } - if ref != nil { + if v, ok := match.Value.(Var); ok { + if ref := resolveVarToRef(i.rules[rule], args, v); ref != nil { i.insert(rule, &refindex{ Ref: ref, Value: arr.Value, @@ -425,19 +514,137 @@ func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) { } } -func (i *refindices) insert(rule *Rule, index *refindex) { +func (i *refindices) updateMember(rule *Rule, expr *Expr, constants map[Var]Value) { + args := rule.Head.Args + lhs, rhs := expr.Operand(0), expr.Operand(1) - count, ok := i.frequency.Get(index.Ref) - if !ok { - count = 0 + lvar, ok := lhs.Value.(Var) + if ok { + lref := resolveVarToRef(i.rules[rule], args, lvar) + if lref != nil { + i.updateMemberRefInValue(rule, lref, rhs, constants) // `ref in value` + return + } + } + + // `var0 in var1` case (var0 may be constant, var1 ref) + i.updateMemberValueInRef(rule, args, lhs.Value, rhs, constants) +} + +func (i *refindices) updateMemberValueInRef(rule *Rule, args []*Term, lval Value, rhs *Term, constants map[Var]Value) { + if lvar, ok := lval.(Var); ok { + val, ok := constants[lvar] + if ok { + lval = val + } + } else if !IsScalar(lval) { + return + } + + rref := i.resolveAndValidateRef(rule, args, rhs) + if rref == nil { + return + } + + i.insert(rule, &refindex{Ref: rref, Value: lval}) +} + +func (i *refindices) updateMemberRefInValue(rule *Rule, ref Ref, rhs *Term, constants map[Var]Value) { + rval := rhs.Value + if rvar, ok := rval.(Var); ok { // rhs is var, try to resolve + if resolved, ok := constants[rvar]; ok { + rval = resolved + } + } + + addRef := func(t *Term) error { + i.insert(rule, &refindex{Ref: ref, Value: t.Value}) + return nil + } + + switch rcol := rval.(type) { + case *Array: + _ = rcol.Iter(addRef) + case Set: + _ = rcol.Iter(addRef) + case Object: + _ = rcol.Iter(func(_, v *Term) error { + return addRef(v) + }) + } +} + +func (i *refindices) resolveAndValidateRef(rule *Rule, args []*Term, term *Term) Ref { + var ref Ref + switch v := term.Value.(type) { + case Ref: + ref = v + case Var: + ref = resolveVarToRef(i.rules[rule], args, v) + default: + return nil + } + + if ref == nil || !i.isValidIndexRef(ref) { + return nil + } + + return ref +} + +// resolveVarToRef checks the previously prepared `*refindex` slice for +// occurrences of the var `v`. Since we store `ref = var` expressions for +// "any" lookups (i.e. "return the rule if ref is anything"), we can +// resolve vars to refs in these simple cases: +// +// __local2__ = input.foo +// __local2__ = +// +// This what builtin calls involving refs are rewritten to, so it is used +// for var -> ref lookup when buiding the RI for glob.match or `v in col`. +// +// For convenience, we also resolve function arg vars here. +// +// NB: This also covers explicit var assignments, like `role := input.rule`, +// but it is no help with chains of assignments, like +// +// x := input.role +// y := x +// +// +// as we're not capturing `var = var` expressions in the index. +func resolveVarToRef(ri []*refindex, args []*Term, v Var) Ref { + for _, other := range ri { + if ov, ok := other.Value.(Var); ok && ov.Equal(v) { + return other.Ref + } + } + for j, arg := range args { + if arg.Value.Compare(v) == 0 { + return Ref{FunctionArgRootDocument, InternedTerm(j)} + } } + return nil +} + +func (i *refindices) insert(rule *Rule, index *refindex) { + count, _ := i.frequency.Get(index.Ref) i.frequency.Put(index.Ref, count+1) + _, indexValueIsVar := index.Value.(Var) + for pos, other := range i.rules[rule] { if other.Ref.Equal(index.Ref) { - i.rules[rule][pos] = index - return + + if ValueEqual(other.Value, index.Value) { + return + } + _, otherValueIsVar := other.Value.(Var) + if !indexValueIsVar && otherValueIsVar { + i.rules[rule][pos] = index + return + } } } @@ -454,7 +661,7 @@ func (i *refindices) index(rule *Rule, ref Ref) *refindex { } type trieWalker interface { - Do(x any) trieWalker + Do(any) trieWalker } type trieTraversalResult struct { @@ -483,7 +690,12 @@ func (tr *trieTraversalResult) Add(t *trieNode) { if !ok { tr.ordering = append(tr.ordering, root) } - tr.unordered[root] = append(nodes, node) + // Deduplicate: check if a ruleNode with this priority already exists + if !slices.ContainsFunc(nodes, func(existing *ruleNode) bool { + return existing.prio == node.prio + }) { + tr.unordered[root] = append(nodes, node) + } } if t.multiple { tr.multiple = true @@ -511,45 +723,6 @@ type trieNode struct { multiple bool } -func (node *trieNode) String() string { - var flags []string - flags = append(flags, fmt.Sprintf("self:%p", node)) - if len(node.ref) > 0 { - flags = append(flags, node.ref.String()) - } - if node.next != nil { - flags = append(flags, fmt.Sprintf("next:%p", node.next)) - } - if node.any != nil { - flags = append(flags, fmt.Sprintf("any:%p", node.any)) - } - if node.undefined != nil { - flags = append(flags, fmt.Sprintf("undefined:%p", node.undefined)) - } - if node.array != nil { - flags = append(flags, fmt.Sprintf("array:%p", node.array)) - } - if node.scalars.Len() > 0 { - buf := make([]string, 0, node.scalars.Len()) - node.scalars.Iter(func(key Value, val *trieNode) bool { - buf = append(buf, fmt.Sprintf("scalar(%v):%p", key, val)) - return false - }) - sort.Strings(buf) - flags = append(flags, strings.Join(buf, " ")) - } - if len(node.rules) > 0 { - flags = append(flags, fmt.Sprintf("%d rule(s)", len(node.rules))) - } - if len(node.mappers) > 0 { - flags = append(flags, fmt.Sprintf("%d mapper(s)", len(node.mappers))) - } - if node.value != nil { - flags = append(flags, "value exists") - } - return strings.Join(flags, " ") -} - func (node *trieNode) append(prio [2]int, rule *Rule) { node.rules = append(node.rules, &ruleNode{prio, rule}) @@ -574,28 +747,24 @@ func newTrieNodeImpl() *trieNode { } func (node *trieNode) Do(walker trieWalker) { + if node == nil { + return + } next := walker.Do(node) if next == nil { return } - if node.any != nil { - node.any.Do(next) - } - if node.undefined != nil { - node.undefined.Do(next) - } + + node.any.Do(next) + node.undefined.Do(next) node.scalars.Iter(func(_ Value, child *trieNode) bool { child.Do(next) return false }) - if node.array != nil { - node.array.Do(next) - } - if node.next != nil { - node.next.Do(next) - } + node.array.Do(next) + node.next.Do(next) } func (node *trieNode) Insert(ref Ref, value Value, mapper *valueMapper) *trieNode { @@ -687,7 +856,6 @@ func (node *trieNode) insertArray(arr *Array) *trieNode { } func (node *trieNode) traverse(resolver ValueResolver, tr *trieTraversalResult) error { - if node == nil { return nil } @@ -700,31 +868,31 @@ func (node *trieNode) traverse(resolver ValueResolver, tr *trieTraversalResult) return err } - if node.undefined != nil { - err = node.undefined.Traverse(resolver, tr) - if err != nil { - return err - } + err = node.undefined.Traverse(resolver, tr) + if err != nil { + return err } if v == nil { return nil } - if node.any != nil { - err = node.any.Traverse(resolver, tr) - if err != nil { - return err - } + err = node.any.Traverse(resolver, tr) + if err != nil { + return err } - if err := node.traverseValue(resolver, tr, v); err != nil { + err = node.traverseValue(resolver, tr, v) + if err != nil { return err } for i := range node.mappers { - if err := node.traverseValue(resolver, tr, node.mappers[i].MapValue(v)); err != nil { - return err + mapped := node.mappers[i].MapValue(v) + if !ValueEqual(mapped, v) { + if err := node.traverseValue(resolver, tr, mapped); err != nil { + return err + } } } @@ -734,11 +902,20 @@ func (node *trieNode) traverse(resolver ValueResolver, tr *trieTraversalResult) func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalResult, value Value) error { switch value := value.(type) { - case *Array: - if node.array == nil { - return nil + case *Array, Set, Object: + if node.array != nil { + if arr, ok := value.(*Array); ok { + if err := node.array.traverseArray(resolver, tr, arr); err != nil { + return err + } + } } - return node.array.traverseArray(resolver, tr, value) + + if node.scalars.Len() > 0 { + return node.traverseCollectionMembership(resolver, tr, value) + } + + return nil case Null, Boolean, Number, String: child, ok := node.scalars.Get(value) @@ -751,17 +928,41 @@ func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalRes return nil } +func (node *trieNode) traverseCollectionMembership(resolver ValueResolver, tr *trieTraversalResult, collection Value) error { + checkMember := func(t *Term) error { + if IsScalar(t.Value) { + child, _ := node.scalars.Get(t.Value) + return child.Traverse(resolver, tr) + } + return nil + } + + switch col := collection.(type) { + case *Array: + return col.Iter(checkMember) + case Set: + return col.Iter(checkMember) + case Object: + return col.Iter(func(_, v *Term) error { + return checkMember(v) + }) + } + + return nil +} + func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalResult, arr *Array) error { + if node == nil { + return nil + } if arr.Len() == 0 { return node.Traverse(resolver, tr) } - if node.any != nil { - err := node.any.traverseArray(resolver, tr, arr.Slice(1, -1)) - if err != nil { - return err - } + err := node.any.traverseArray(resolver, tr, arr.Slice(1, -1)) + if err != nil { + return err } head := arr.Elem(0).Value @@ -772,10 +973,7 @@ func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalRes switch head := head.(type) { case Null, Boolean, Number, String: - child, ok := node.scalars.Get(head) - if !ok { - return nil - } + child, _ := node.scalars.Get(head) return child.traverseArray(resolver, tr, arr.Slice(1, -1)) } @@ -783,7 +981,6 @@ func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalRes } func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalResult) error { - if node == nil { return nil } @@ -816,35 +1013,46 @@ func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalR // for the argument number. So for `f(x, y) { x = 10; y = 12 }`, we'll // bind `args[0]` and `args[1]` to this rule when called for (x=10) and // (y=12) respectively. -func eqOperandsToRefAndValue(isVirtual func(Ref) bool, args []*Term, a, b *Term) (*refindex, bool) { - switch v := a.Value.(type) { +func (i *refindices) eqOperandsToRefAndValue(rule *Rule, args []*Term, a, b Value, constants map[Var]Value) bool { + switch v := a.(type) { case Var: - for i, arg := range args { - if arg.Value.Compare(a.Value) == 0 { - if bval, ok := indexValue(b); ok { - return &refindex{Ref: Ref{FunctionArgRootDocument, InternedTerm(i)}, Value: bval}, true - } - } + // a is a var, but we have not been able to resolve it to a ref, save for later + if IsConstant(b) { + constants[v] = b } - case Ref: - if !RootDocumentNames.Contains(v[0]) { - return nil, false + + bval, ok := indexValue(b) + if !ok { + return false } - if isVirtual(v) { - return nil, false + if ref := resolveVarToRef(i.rules[rule], args, v); ref != nil { + i.insert(rule, &refindex{Ref: ref, Value: bval}) + return true } - if v.IsNested() || !v.IsGround() { - return nil, false + + case Ref: + if !i.isValidIndexRef(v) { + return false } - if bval, ok := indexValue(b); ok { - return &refindex{Ref: v, Value: bval}, true + + if bvar, ok := b.(Var); ok { // cheaper lookup first: constants + if resolved, ok := constants[bvar]; ok { + b = resolved + } + } else if bval, ok := indexValue(b); ok { + b = bval + } else { + return false } + + i.insert(rule, &refindex{Ref: v, Value: b}) + return true } - return nil, false + return false } -func indexValue(b *Term) (Value, bool) { - switch b := b.Value.(type) { +func indexValue(b Value) (Value, bool) { + switch b := b.(type) { case Null, Boolean, Number, String, Var: return b, true case *Array: @@ -872,7 +1080,6 @@ func indexValue(b *Term) (Value, bool) { } func globDelimiterToString(delim *Term) (string, bool) { - arr, ok := delim.Value.(*Array) if !ok { return "", false @@ -883,14 +1090,16 @@ func globDelimiterToString(delim *Term) (string, bool) { if arr.Len() == 0 { result = "." } else { + sb := strings.Builder{} for i := range arr.Len() { term := arr.Elem(i) s, ok := term.Value.(String) if !ok { return "", false } - result += string(s) + sb.WriteString(string(s)) } + result = sb.String() } return result, true diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/index_debug.go b/vendor/github.com/open-policy-agent/opa/v1/ast/index_debug.go new file mode 100644 index 0000000000..88d451b175 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/index_debug.go @@ -0,0 +1,219 @@ +// Copyright 2026 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "sort" + "strings" +) + +func (node *trieNode) mermaid() string { + var sb strings.Builder + sb.WriteString("graph TD\n") + nodeCounter := 0 + nodeIDs := make(map[*trieNode]string) + node.mermaidFormat(&sb, &nodeCounter, nodeIDs, "") + return sb.String() +} + +func (node *trieNode) mermaidFormat(sb *strings.Builder, counter *int, nodeIDs map[*trieNode]string, parentID string) { + currentID, exists := nodeIDs[node] + if !exists { + currentID = fmt.Sprintf("n%d", *counter) + *counter++ + nodeIDs[node] = currentID + label := node.mermaidLabel() + fmt.Fprintf(sb, " %s[\"%s\"]\n", currentID, label) + } + + if parentID != "" { + fmt.Fprintf(sb, " %s --> %s\n", parentID, currentID) + } + + if exists { + return + } + + if node.undefined != nil { + if childID, childExists := nodeIDs[node.undefined]; childExists { + fmt.Fprintf(sb, " %s -->|undefined| %s\n", currentID, childID) + } else { + node.undefined.mermaidFormat(sb, counter, nodeIDs, "") + fmt.Fprintf(sb, " %s -->|undefined| %s\n", currentID, nodeIDs[node.undefined]) + } + } + + if node.any != nil { + if childID, childExists := nodeIDs[node.any]; childExists { + fmt.Fprintf(sb, " %s -->|any| %s\n", currentID, childID) + } else { + node.any.mermaidFormat(sb, counter, nodeIDs, "") + fmt.Fprintf(sb, " %s -->|any| %s\n", currentID, nodeIDs[node.any]) + } + } + + if node.scalars.Len() > 0 { + type scalarPair struct { + key Value + node *trieNode + } + pairs := make([]scalarPair, 0, node.scalars.Len()) + node.scalars.Iter(func(key Value, val *trieNode) bool { + pairs = append(pairs, scalarPair{key, val}) + return false + }) + sort.Slice(pairs, func(a, b int) bool { + return pairs[a].key.Compare(pairs[b].key) < 0 + }) + for _, pair := range pairs { + var scalarLabel string + if s, ok := pair.key.(String); ok { + scalarLabel = string(s) + } else { + scalarLabel = pair.key.String() + } + if len(scalarLabel) > 20 { + scalarLabel = scalarLabel[:20] + "..." + } + scalarLabel = mermaidEscape(scalarLabel) + if childID, childExists := nodeIDs[pair.node]; childExists { + fmt.Fprintf(sb, " %s -->|\"%s\"| %s\n", currentID, scalarLabel, childID) + } else { + pair.node.mermaidFormat(sb, counter, nodeIDs, "") + fmt.Fprintf(sb, " %s -->|\"%s\"| %s\n", currentID, scalarLabel, nodeIDs[pair.node]) + } + } + } + + if node.array != nil { + if childID, childExists := nodeIDs[node.array]; childExists { + fmt.Fprintf(sb, " %s -->|array| %s\n", currentID, childID) + } else { + node.array.mermaidFormat(sb, counter, nodeIDs, "") + fmt.Fprintf(sb, " %s -->|array| %s\n", currentID, nodeIDs[node.array]) + } + } + + if node.next != nil { + node.next.mermaidFormat(sb, counter, nodeIDs, currentID) + } +} + +func (node *trieNode) mermaidLabel() string { + var parts []string + + if len(node.ref) > 0 { + parts = append(parts, node.ref.String()) + } + + if len(node.rules) > 0 { + for _, rn := range node.rules { + bodyStr := "" + if rn.rule.Body != nil { + bodyStr = rn.rule.Body.String() + if len(bodyStr) > 50 { + bodyStr = bodyStr[:50] + "..." + } + } + bodyStr = mermaidEscape(bodyStr) + parts = append(parts, bodyStr) + } + } + + if len(node.mappers) > 0 { + parts = append(parts, fmt.Sprintf("%d mapper(s)", len(node.mappers))) + } + if node.multiple { + parts = append(parts, "multiple") + } + + if len(parts) == 0 { + return "·" + } + + return strings.Join(parts, "
") +} + +func mermaidEscape(s string) string { + s = strings.ReplaceAll(s, `"`, `"`) + return s +} + +func (node *trieNode) String() string { + var sb strings.Builder + node.format(&sb, 0) + return sb.String() +} + +func (node *trieNode) format(sb *strings.Builder, depth int) { + indent := strings.Repeat(" ", depth) + + if len(node.ref) > 0 { + sb.WriteString(indent) + sb.WriteString(node.ref.String()) + } else if depth == 0 { + sb.WriteString("root") + } + + if len(node.rules) > 0 { + fmt.Fprintf(sb, " [%d rule(s)]", len(node.rules)) + } + if len(node.mappers) > 0 { + fmt.Fprintf(sb, " [%d mapper(s)]", len(node.mappers)) + } + if node.value != nil { + fmt.Fprintf(sb, " value=%v", node.value) + } + if node.multiple { + sb.WriteString(" [multiple]") + } + sb.WriteString("\n") + + if node.undefined != nil { + sb.WriteString(indent) + sb.WriteString(" undefined:\n") + node.undefined.format(sb, depth+2) + } + + if node.any != nil { + sb.WriteString(indent) + sb.WriteString(" any:\n") + node.any.format(sb, depth+2) + } + + if node.scalars.Len() > 0 { + scalars := make([]Value, 0, node.scalars.Len()) + nodes := make([]*trieNode, 0, node.scalars.Len()) + node.scalars.Iter(func(key Value, val *trieNode) bool { + scalars = append(scalars, key) + nodes = append(nodes, val) + return false + }) + sort.Slice(scalars, func(a, b int) bool { + return scalars[a].Compare(scalars[b]) < 0 + }) + for i := range scalars { + sb.WriteString(indent) + fmt.Fprintf(sb, " %v:\n", scalars[i]) + for j := range nodes { + if ValueEqual(scalars[i], scalars[j]) { + nodes[j].format(sb, depth+2) + break + } + } + } + } + + if node.array != nil { + sb.WriteString(indent) + sb.WriteString(" array:\n") + node.array.format(sb, depth+2) + } + + if node.next != nil { + node.next.format(sb, depth) + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go index 3741d37188..6b2b03b27a 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go @@ -158,18 +158,42 @@ func (s *Scanner) WithoutKeywords(kws map[string]tokens.Token) (*Scanner, map[st return &cpy, kw } +type ScanOptions struct { + continueTemplateString bool + rawTemplateString bool +} + +type ScanOption func(*ScanOptions) + +// ContinueTemplateString will continue scanning a template string +func ContinueTemplateString(raw bool) ScanOption { + return func(opts *ScanOptions) { + opts.continueTemplateString = true + opts.rawTemplateString = raw + } +} + // Scan will increment the scanners position in the source // code until the next token is found. The token, starting position // of the token, string literal, and any errors encountered are // returned. A token will always be returned, the caller must check // for any errors before using the other values. -func (s *Scanner) Scan() (tokens.Token, Position, string, []Error) { +func (s *Scanner) Scan(opts ...ScanOption) (tokens.Token, Position, string, []Error) { + scanOpts := &ScanOptions{} + for _, opt := range opts { + opt(scanOpts) + } pos := Position{Offset: s.offset - s.width, Row: s.row, Col: s.col, Tabs: s.tabs} var tok tokens.Token var lit string - - if s.isWhitespace() { + if scanOpts.continueTemplateString { + if scanOpts.rawTemplateString { + lit, tok = s.scanRawTemplateString() + } else { + lit, tok = s.scanTemplateString() + } + } else if s.isWhitespace() { // string(rune) is an unnecessary heap allocation in this case as we know all // the possible whitespace values, and can simply translate to string ourselves switch s.curr { @@ -275,6 +299,17 @@ func (s *Scanner) Scan() (tokens.Token, Position, string, []Error) { tok = tokens.Semicolon case '.': tok = tokens.Dot + case '$': + switch s.curr { + case '`': + s.next() + lit, tok = s.scanRawTemplateString() + case '"': + s.next() + lit, tok = s.scanTemplateString() + default: + s.error("illegal $ character") + } } } @@ -395,6 +430,116 @@ func (s *Scanner) scanRawString() string { return util.ByteSliceToString(s.bs[start : s.offset-1]) } +func (s *Scanner) scanTemplateString() (string, tokens.Token) { + tok := tokens.TemplateStringPart + start := s.literalStart() + var escapes []int + for { + ch := s.curr + + if ch == '\n' || ch < 0 { + s.error("non-terminated string") + break + } + + s.next() + + if ch == '"' { + tok = tokens.TemplateStringEnd + break + } + + if ch == '{' { + break + } + + if ch == '\\' { + switch s.curr { + case '\\', '"', '/', 'b', 'f', 'n', 'r', 't': + s.next() + case '{': + escapes = append(escapes, s.offset-1) + s.next() + case 'u': + s.next() + s.next() + s.next() + s.next() + default: + s.error("illegal escape sequence") + } + } + } + + // Lazily remove escapes to not unnecessarily allocate a new byte slice + if len(escapes) > 0 { + return util.ByteSliceToString(removeEscapes(s, escapes, start)), tok + } + + return util.ByteSliceToString(s.bs[start : s.offset-1]), tok +} + +func (s *Scanner) scanRawTemplateString() (string, tokens.Token) { + tok := tokens.RawTemplateStringPart + start := s.literalStart() + var escapes []int + for { + ch := s.curr + + if ch < 0 { + s.error("non-terminated string") + break + } + + s.next() + + if ch == '`' { + tok = tokens.RawTemplateStringEnd + break + } + + if ch == '{' { + break + } + + if ch == '\\' { + switch s.curr { + case '{': + escapes = append(escapes, s.offset-1) + s.next() + } + } + } + + // Lazily remove escapes to not unnecessarily allocate a new byte slice + if len(escapes) > 0 { + return util.ByteSliceToString(removeEscapes(s, escapes, start)), tok + } + + return util.ByteSliceToString(s.bs[start : s.offset-1]), tok +} + +func removeEscapes(s *Scanner, escapes []int, start int) []byte { + from := start + bs := make([]byte, 0, s.offset-start-len(escapes)) + + for _, escape := range escapes { + // Append the bytes before the escape sequence. + if escape > from { + bs = append(bs, s.bs[from:escape-1]...) + } + // Skip the escape character. + from = escape + } + + // Append the remaining bytes after the last escape sequence. + if from < s.offset-1 { + bs = append(bs, s.bs[from:s.offset-1]...) + } + + return bs +} + func (s *Scanner) scanComment() string { start := s.literalStart() for s.curr != '\n' && s.curr != -1 { diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go index 4033ba81ae..2721c3618b 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go @@ -39,6 +39,10 @@ const ( Number String + TemplateStringPart + TemplateStringEnd + RawTemplateStringPart + RawTemplateStringEnd LBrack RBrack @@ -67,6 +71,7 @@ const ( Lte Dot Semicolon + Dollar Every Contains @@ -74,53 +79,58 @@ const ( ) var strings = [...]string{ - Illegal: "illegal", - EOF: "eof", - Whitespace: "whitespace", - Comment: "comment", - Ident: "identifier", - Package: "package", - Import: "import", - As: "as", - Default: "default", - Else: "else", - Not: "not", - Some: "some", - With: "with", - Null: "null", - True: "true", - False: "false", - Number: "number", - String: "string", - LBrack: "[", - RBrack: "]", - LBrace: "{", - RBrace: "}", - LParen: "(", - RParen: ")", - Comma: ",", - Colon: ":", - Add: "plus", - Sub: "minus", - Mul: "mul", - Quo: "div", - Rem: "rem", - And: "and", - Or: "or", - Unify: "eq", - Equal: "equal", - Assign: "assign", - In: "in", - Neq: "neq", - Gt: "gt", - Lt: "lt", - Gte: "gte", - Lte: "lte", - Dot: ".", - Semicolon: ";", - Every: "every", - Contains: "contains", - If: "if", + Illegal: "illegal", + EOF: "eof", + Whitespace: "whitespace", + Comment: "comment", + Ident: "identifier", + Package: "package", + Import: "import", + As: "as", + Default: "default", + Else: "else", + Not: "not", + Some: "some", + With: "with", + Null: "null", + True: "true", + False: "false", + Number: "number", + String: "string", + TemplateStringPart: "template-string-part", + TemplateStringEnd: "template-string-end", + RawTemplateStringPart: "raw-template-string-part", + RawTemplateStringEnd: "raw-template-string-end", + LBrack: "[", + RBrack: "]", + LBrace: "{", + RBrace: "}", + LParen: "(", + RParen: ")", + Comma: ",", + Colon: ":", + Add: "plus", + Sub: "minus", + Mul: "mul", + Quo: "div", + Rem: "rem", + And: "and", + Or: "or", + Unify: "eq", + Equal: "equal", + Assign: "assign", + In: "in", + Neq: "neq", + Gt: "gt", + Lt: "lt", + Gte: "gte", + Lte: "lte", + Dot: ".", + Semicolon: ";", + Dollar: "dollar", + Every: "every", + Contains: "contains", + If: "if", } var keywords = map[string]Token{ @@ -147,3 +157,7 @@ func IsKeyword(tok Token) bool { _, ok := keywords[strings[tok]] return ok } + +func KeywordFor(tok Token) string { + return strings[tok] +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go b/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go index 564a3cc41f..4f454beb96 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go @@ -5,6 +5,7 @@ package ast import ( + "iter" "strconv" ) @@ -18,24 +19,47 @@ type internable interface { // at any time without notice. var ( - InternedNullTerm = &Term{Value: Null{}} + InternedNullValue Value = Null{} + InternedNullTerm = NewTerm(InternedNullValue) - InternedEmptyString = StringTerm("") - InternedEmptyObject = ObjectTerm() - InternedEmptyArray = ArrayTerm() - InternedEmptySet = SetTerm() + InternedBooleanTrueValue Value = Boolean(true) + InternedBooleanFalseValue Value = Boolean(false) + InternedEmptyStringValue Value = String("") + InternedEmptyArrayValue Value = NewArray() + InternedEmptyRefValue Value = Ref{} + InternedEmptyObjectValue Value = NewObject() + InternedEmptySetValue Value = NewSet() - InternedEmptyArrayValue = NewArray() - - booleanTrueTerm = &Term{Value: Boolean(true)} - booleanFalseTerm = &Term{Value: Boolean(false)} + InternedBooleanTrue = NewTerm(InternedBooleanTrueValue) + InternedBooleanFalse = NewTerm(InternedBooleanFalseValue) + InternedEmptyString = NewTerm(InternedEmptyStringValue) + InternedEmptyObject = NewTerm(InternedEmptyObjectValue) + InternedEmptyArray = NewTerm(InternedEmptyArrayValue) + InternedEmptySet = NewTerm(InternedEmptySetValue) // since this is by far the most common negative number - minusOneTerm = &Term{Value: Number("-1")} + minusOneValue Value = Number("-1") + minusOneTerm = NewTerm(minusOneValue) internedStringTerms = map[string]*Term{ "": InternedEmptyString, } + + internedVarValues = map[string]Value{ + "input": Var("input"), + "data": Var("data"), + "args": Var("args"), + "schema": Var("schema"), + "key": Var("key"), + "value": Var("value"), + "future": Var("future"), + "rego": Var("rego"), + "set": Var("set"), + "internal": Var("internal"), + "else": Var("else"), + + "i": Var("i"), "j": Var("j"), "k": Var("k"), "v": Var("v"), "x": Var("x"), "y": Var("y"), "z": Var("z"), + } ) // InternStringTerm interns the given strings as terms. Note that Interning is @@ -48,8 +72,100 @@ func InternStringTerm(str ...string) { continue } - internedStringTerms[s] = StringTerm(s) + internedStringTerms[s] = &Term{Value: String(s)} + } +} + +// InternVarValue interns the given variable names as Var Values. Note that Interning is +// considered experimental and should not be relied upon by external code. +// WARNING: This must **only** be called at initialization time, as the +// interned terms are shared globally, and the underlying map is not thread-safe. +func InternVarValue(names ...string) { + for _, name := range names { + if _, ok := internedVarValues[name]; ok { + continue + } + + internedVarValues[name] = Var(name) + } +} + +// HasInternedValue returns true if the given value is interned, otherwise false. +func HasInternedValue[T internable](v T) bool { + switch value := any(v).(type) { + case bool: + return true + case int: + return HasInternedIntNumberTerm(value) + case int8: + return HasInternedIntNumberTerm(int(value)) + case int16: + return HasInternedIntNumberTerm(int(value)) + case int32: + return HasInternedIntNumberTerm(int(value)) + case int64: + return HasInternedIntNumberTerm(int(value)) + case uint: + return HasInternedIntNumberTerm(int(value)) + case uint8: + return HasInternedIntNumberTerm(int(value)) + case uint16: + return HasInternedIntNumberTerm(int(value)) + case uint32: + return HasInternedIntNumberTerm(int(value)) + case uint64: + return HasInternedIntNumberTerm(int(value)) + case string: + _, ok := internedStringTerms[value] + return ok + } + return false +} + +// InternedValue returns an interned Value for scalar v, if the value is +// interned. If the value is not interned, a new Value is returned. +func InternedValue[T internable](v T) Value { + return InternedValueOr(v, internedTermValue) +} + +// InternedVarValue returns an interned Var Value for the given name. If the +// name is not interned, a new Var Value is returned. +func InternedVarValue(name string) Value { + if v, ok := internedVarValues[name]; ok { + return v + } + + return Var(name) +} + +// InternedValueOr returns an interned Value for scalar v. Calls supplier +// to produce a Value if the value is not interned. +func InternedValueOr[T internable](v T, supplier func(T) Value) Value { + switch value := any(v).(type) { + case bool: + return internedBooleanValue(value) + case int: + return internedIntNumberValue(value) + case int8: + return internedIntNumberValue(int(value)) + case int16: + return internedIntNumberValue(int(value)) + case int32: + return internedIntNumberValue(int(value)) + case int64: + return internedIntNumberValue(int(value)) + case uint: + return internedIntNumberValue(int(value)) + case uint8: + return internedIntNumberValue(int(value)) + case uint16: + return internedIntNumberValue(int(value)) + case uint32: + return internedIntNumberValue(int(value)) + case uint64: + return internedIntNumberValue(int(value)) } + return supplier(v) } // Interned returns a possibly interned term for the given scalar value. @@ -85,6 +201,13 @@ func InternedTerm[T internable](v T) *Term { } } +// InternedItem works just like [Item] but returns interned terms for both +// key and value where possible. This is mostly useful for making tests less +// verbose. +func InternedItem[K, V internable](key K, value V) [2]*Term { + return [2]*Term{InternedTerm(key), InternedTerm(value)} +} + // InternedIntFromString returns a term with the given integer value if the string // maps to an interned term. If the string does not map to an interned term, nil is // returned. @@ -96,6 +219,19 @@ func InternedIntNumberTermFromString(s string) *Term { return nil } +// InternedIntRange returns a sequence of interned integer number terms +// from start (inclusive) to end (exclusive). For values outside of the +// interned range, non-interned IntNumberTerms are returned. +func InternedIntRange(start, end int) iter.Seq[*Term] { + return func(yield func(*Term) bool) { + for i := start; i < end; i++ { + if !yield(internedIntNumberTerm(i)) { + return + } + } + } +} + // HasInternedIntNumberTerm returns true if the given integer value maps to an interned // term, otherwise false. func HasInternedIntNumberTerm(i int) bool { @@ -123,13 +259,33 @@ func InternedIntegerString(i int) *Term { return StringTerm(s) } +func internedBooleanValue(b bool) Value { + if b { + return InternedBooleanTrueValue + } + + return InternedBooleanFalseValue +} + // InternedBooleanTerm returns an interned term with the given boolean value. func internedBooleanTerm(b bool) *Term { if b { - return booleanTrueTerm + return InternedBooleanTrue + } + + return InternedBooleanFalse +} + +func internedIntNumberValue(i int) Value { + if i >= 0 && i < len(intNumberTerms) { + return intNumberValues[i] } - return booleanFalseTerm + if i == -1 { + return minusOneValue + } + + return Number(strconv.Itoa(i)) } // InternedIntNumberTerm returns a term with the given integer value. The term is @@ -158,10 +314,15 @@ func internedStringTerm(s string) *Term { return StringTerm(s) } +func internedTermValue[T internable](v T) Value { + return InternedTerm(v).Value +} + func init() { InternStringTerm( // Numbers - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", + "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "70", "71", "72", "73", "74", @@ -179,12 +340,15 @@ func init() { // Various "data", "input", "result", "keywords", "path", "v1", "error", "partial", // HTTP - "code", "message", "status_code", "method", "url", "uri", + "code", "message", "status_code", "method", "url", "uri", "body", "raw_body", "headers", "query_params", // JWT "enc", "cty", "iss", "exp", "nbf", "aud", "secret", "cert", // Decisions "revision", "labels", "decision_id", "bundles", "query", "mapped_result", "nd_builtin_cache", "erased", "masked", "requested_by", "timestamp", "metrics", "req_id", + + // Whitespace + " ", "\n", "\t", ) } @@ -705,518 +869,1034 @@ var stringToIntNumberTermMap = map[string]*Term{ "512": intNumberTerms[512], } +var intNumberValues = [...]Value{ + Number("0"), + Number("1"), + Number("2"), + Number("3"), + Number("4"), + Number("5"), + Number("6"), + Number("7"), + Number("8"), + Number("9"), + Number("10"), + Number("11"), + Number("12"), + Number("13"), + Number("14"), + Number("15"), + Number("16"), + Number("17"), + Number("18"), + Number("19"), + Number("20"), + Number("21"), + Number("22"), + Number("23"), + Number("24"), + Number("25"), + Number("26"), + Number("27"), + Number("28"), + Number("29"), + Number("30"), + Number("31"), + Number("32"), + Number("33"), + Number("34"), + Number("35"), + Number("36"), + Number("37"), + Number("38"), + Number("39"), + Number("40"), + Number("41"), + Number("42"), + Number("43"), + Number("44"), + Number("45"), + Number("46"), + Number("47"), + Number("48"), + Number("49"), + Number("50"), + Number("51"), + Number("52"), + Number("53"), + Number("54"), + Number("55"), + Number("56"), + Number("57"), + Number("58"), + Number("59"), + Number("60"), + Number("61"), + Number("62"), + Number("63"), + Number("64"), + Number("65"), + Number("66"), + Number("67"), + Number("68"), + Number("69"), + Number("70"), + Number("71"), + Number("72"), + Number("73"), + Number("74"), + Number("75"), + Number("76"), + Number("77"), + Number("78"), + Number("79"), + Number("80"), + Number("81"), + Number("82"), + Number("83"), + Number("84"), + Number("85"), + Number("86"), + Number("87"), + Number("88"), + Number("89"), + Number("90"), + Number("91"), + Number("92"), + Number("93"), + Number("94"), + Number("95"), + Number("96"), + Number("97"), + Number("98"), + Number("99"), + Number("100"), + Number("101"), + Number("102"), + Number("103"), + Number("104"), + Number("105"), + Number("106"), + Number("107"), + Number("108"), + Number("109"), + Number("110"), + Number("111"), + Number("112"), + Number("113"), + Number("114"), + Number("115"), + Number("116"), + Number("117"), + Number("118"), + Number("119"), + Number("120"), + Number("121"), + Number("122"), + Number("123"), + Number("124"), + Number("125"), + Number("126"), + Number("127"), + Number("128"), + Number("129"), + Number("130"), + Number("131"), + Number("132"), + Number("133"), + Number("134"), + Number("135"), + Number("136"), + Number("137"), + Number("138"), + Number("139"), + Number("140"), + Number("141"), + Number("142"), + Number("143"), + Number("144"), + Number("145"), + Number("146"), + Number("147"), + Number("148"), + Number("149"), + Number("150"), + Number("151"), + Number("152"), + Number("153"), + Number("154"), + Number("155"), + Number("156"), + Number("157"), + Number("158"), + Number("159"), + Number("160"), + Number("161"), + Number("162"), + Number("163"), + Number("164"), + Number("165"), + Number("166"), + Number("167"), + Number("168"), + Number("169"), + Number("170"), + Number("171"), + Number("172"), + Number("173"), + Number("174"), + Number("175"), + Number("176"), + Number("177"), + Number("178"), + Number("179"), + Number("180"), + Number("181"), + Number("182"), + Number("183"), + Number("184"), + Number("185"), + Number("186"), + Number("187"), + Number("188"), + Number("189"), + Number("190"), + Number("191"), + Number("192"), + Number("193"), + Number("194"), + Number("195"), + Number("196"), + Number("197"), + Number("198"), + Number("199"), + Number("200"), + Number("201"), + Number("202"), + Number("203"), + Number("204"), + Number("205"), + Number("206"), + Number("207"), + Number("208"), + Number("209"), + Number("210"), + Number("211"), + Number("212"), + Number("213"), + Number("214"), + Number("215"), + Number("216"), + Number("217"), + Number("218"), + Number("219"), + Number("220"), + Number("221"), + Number("222"), + Number("223"), + Number("224"), + Number("225"), + Number("226"), + Number("227"), + Number("228"), + Number("229"), + Number("230"), + Number("231"), + Number("232"), + Number("233"), + Number("234"), + Number("235"), + Number("236"), + Number("237"), + Number("238"), + Number("239"), + Number("240"), + Number("241"), + Number("242"), + Number("243"), + Number("244"), + Number("245"), + Number("246"), + Number("247"), + Number("248"), + Number("249"), + Number("250"), + Number("251"), + Number("252"), + Number("253"), + Number("254"), + Number("255"), + Number("256"), + Number("257"), + Number("258"), + Number("259"), + Number("260"), + Number("261"), + Number("262"), + Number("263"), + Number("264"), + Number("265"), + Number("266"), + Number("267"), + Number("268"), + Number("269"), + Number("270"), + Number("271"), + Number("272"), + Number("273"), + Number("274"), + Number("275"), + Number("276"), + Number("277"), + Number("278"), + Number("279"), + Number("280"), + Number("281"), + Number("282"), + Number("283"), + Number("284"), + Number("285"), + Number("286"), + Number("287"), + Number("288"), + Number("289"), + Number("290"), + Number("291"), + Number("292"), + Number("293"), + Number("294"), + Number("295"), + Number("296"), + Number("297"), + Number("298"), + Number("299"), + Number("300"), + Number("301"), + Number("302"), + Number("303"), + Number("304"), + Number("305"), + Number("306"), + Number("307"), + Number("308"), + Number("309"), + Number("310"), + Number("311"), + Number("312"), + Number("313"), + Number("314"), + Number("315"), + Number("316"), + Number("317"), + Number("318"), + Number("319"), + Number("320"), + Number("321"), + Number("322"), + Number("323"), + Number("324"), + Number("325"), + Number("326"), + Number("327"), + Number("328"), + Number("329"), + Number("330"), + Number("331"), + Number("332"), + Number("333"), + Number("334"), + Number("335"), + Number("336"), + Number("337"), + Number("338"), + Number("339"), + Number("340"), + Number("341"), + Number("342"), + Number("343"), + Number("344"), + Number("345"), + Number("346"), + Number("347"), + Number("348"), + Number("349"), + Number("350"), + Number("351"), + Number("352"), + Number("353"), + Number("354"), + Number("355"), + Number("356"), + Number("357"), + Number("358"), + Number("359"), + Number("360"), + Number("361"), + Number("362"), + Number("363"), + Number("364"), + Number("365"), + Number("366"), + Number("367"), + Number("368"), + Number("369"), + Number("370"), + Number("371"), + Number("372"), + Number("373"), + Number("374"), + Number("375"), + Number("376"), + Number("377"), + Number("378"), + Number("379"), + Number("380"), + Number("381"), + Number("382"), + Number("383"), + Number("384"), + Number("385"), + Number("386"), + Number("387"), + Number("388"), + Number("389"), + Number("390"), + Number("391"), + Number("392"), + Number("393"), + Number("394"), + Number("395"), + Number("396"), + Number("397"), + Number("398"), + Number("399"), + Number("400"), + Number("401"), + Number("402"), + Number("403"), + Number("404"), + Number("405"), + Number("406"), + Number("407"), + Number("408"), + Number("409"), + Number("410"), + Number("411"), + Number("412"), + Number("413"), + Number("414"), + Number("415"), + Number("416"), + Number("417"), + Number("418"), + Number("419"), + Number("420"), + Number("421"), + Number("422"), + Number("423"), + Number("424"), + Number("425"), + Number("426"), + Number("427"), + Number("428"), + Number("429"), + Number("430"), + Number("431"), + Number("432"), + Number("433"), + Number("434"), + Number("435"), + Number("436"), + Number("437"), + Number("438"), + Number("439"), + Number("440"), + Number("441"), + Number("442"), + Number("443"), + Number("444"), + Number("445"), + Number("446"), + Number("447"), + Number("448"), + Number("449"), + Number("450"), + Number("451"), + Number("452"), + Number("453"), + Number("454"), + Number("455"), + Number("456"), + Number("457"), + Number("458"), + Number("459"), + Number("460"), + Number("461"), + Number("462"), + Number("463"), + Number("464"), + Number("465"), + Number("466"), + Number("467"), + Number("468"), + Number("469"), + Number("470"), + Number("471"), + Number("472"), + Number("473"), + Number("474"), + Number("475"), + Number("476"), + Number("477"), + Number("478"), + Number("479"), + Number("480"), + Number("481"), + Number("482"), + Number("483"), + Number("484"), + Number("485"), + Number("486"), + Number("487"), + Number("488"), + Number("489"), + Number("490"), + Number("491"), + Number("492"), + Number("493"), + Number("494"), + Number("495"), + Number("496"), + Number("497"), + Number("498"), + Number("499"), + Number("500"), + Number("501"), + Number("502"), + Number("503"), + Number("504"), + Number("505"), + Number("506"), + Number("507"), + Number("508"), + Number("509"), + Number("510"), + Number("511"), + Number("512"), +} + var intNumberTerms = [...]*Term{ - {Value: Number("0")}, - {Value: Number("1")}, - {Value: Number("2")}, - {Value: Number("3")}, - {Value: Number("4")}, - {Value: Number("5")}, - {Value: Number("6")}, - {Value: Number("7")}, - {Value: Number("8")}, - {Value: Number("9")}, - {Value: Number("10")}, - {Value: Number("11")}, - {Value: Number("12")}, - {Value: Number("13")}, - {Value: Number("14")}, - {Value: Number("15")}, - {Value: Number("16")}, - {Value: Number("17")}, - {Value: Number("18")}, - {Value: Number("19")}, - {Value: Number("20")}, - {Value: Number("21")}, - {Value: Number("22")}, - {Value: Number("23")}, - {Value: Number("24")}, - {Value: Number("25")}, - {Value: Number("26")}, - {Value: Number("27")}, - {Value: Number("28")}, - {Value: Number("29")}, - {Value: Number("30")}, - {Value: Number("31")}, - {Value: Number("32")}, - {Value: Number("33")}, - {Value: Number("34")}, - {Value: Number("35")}, - {Value: Number("36")}, - {Value: Number("37")}, - {Value: Number("38")}, - {Value: Number("39")}, - {Value: Number("40")}, - {Value: Number("41")}, - {Value: Number("42")}, - {Value: Number("43")}, - {Value: Number("44")}, - {Value: Number("45")}, - {Value: Number("46")}, - {Value: Number("47")}, - {Value: Number("48")}, - {Value: Number("49")}, - {Value: Number("50")}, - {Value: Number("51")}, - {Value: Number("52")}, - {Value: Number("53")}, - {Value: Number("54")}, - {Value: Number("55")}, - {Value: Number("56")}, - {Value: Number("57")}, - {Value: Number("58")}, - {Value: Number("59")}, - {Value: Number("60")}, - {Value: Number("61")}, - {Value: Number("62")}, - {Value: Number("63")}, - {Value: Number("64")}, - {Value: Number("65")}, - {Value: Number("66")}, - {Value: Number("67")}, - {Value: Number("68")}, - {Value: Number("69")}, - {Value: Number("70")}, - {Value: Number("71")}, - {Value: Number("72")}, - {Value: Number("73")}, - {Value: Number("74")}, - {Value: Number("75")}, - {Value: Number("76")}, - {Value: Number("77")}, - {Value: Number("78")}, - {Value: Number("79")}, - {Value: Number("80")}, - {Value: Number("81")}, - {Value: Number("82")}, - {Value: Number("83")}, - {Value: Number("84")}, - {Value: Number("85")}, - {Value: Number("86")}, - {Value: Number("87")}, - {Value: Number("88")}, - {Value: Number("89")}, - {Value: Number("90")}, - {Value: Number("91")}, - {Value: Number("92")}, - {Value: Number("93")}, - {Value: Number("94")}, - {Value: Number("95")}, - {Value: Number("96")}, - {Value: Number("97")}, - {Value: Number("98")}, - {Value: Number("99")}, - {Value: Number("100")}, - {Value: Number("101")}, - {Value: Number("102")}, - {Value: Number("103")}, - {Value: Number("104")}, - {Value: Number("105")}, - {Value: Number("106")}, - {Value: Number("107")}, - {Value: Number("108")}, - {Value: Number("109")}, - {Value: Number("110")}, - {Value: Number("111")}, - {Value: Number("112")}, - {Value: Number("113")}, - {Value: Number("114")}, - {Value: Number("115")}, - {Value: Number("116")}, - {Value: Number("117")}, - {Value: Number("118")}, - {Value: Number("119")}, - {Value: Number("120")}, - {Value: Number("121")}, - {Value: Number("122")}, - {Value: Number("123")}, - {Value: Number("124")}, - {Value: Number("125")}, - {Value: Number("126")}, - {Value: Number("127")}, - {Value: Number("128")}, - {Value: Number("129")}, - {Value: Number("130")}, - {Value: Number("131")}, - {Value: Number("132")}, - {Value: Number("133")}, - {Value: Number("134")}, - {Value: Number("135")}, - {Value: Number("136")}, - {Value: Number("137")}, - {Value: Number("138")}, - {Value: Number("139")}, - {Value: Number("140")}, - {Value: Number("141")}, - {Value: Number("142")}, - {Value: Number("143")}, - {Value: Number("144")}, - {Value: Number("145")}, - {Value: Number("146")}, - {Value: Number("147")}, - {Value: Number("148")}, - {Value: Number("149")}, - {Value: Number("150")}, - {Value: Number("151")}, - {Value: Number("152")}, - {Value: Number("153")}, - {Value: Number("154")}, - {Value: Number("155")}, - {Value: Number("156")}, - {Value: Number("157")}, - {Value: Number("158")}, - {Value: Number("159")}, - {Value: Number("160")}, - {Value: Number("161")}, - {Value: Number("162")}, - {Value: Number("163")}, - {Value: Number("164")}, - {Value: Number("165")}, - {Value: Number("166")}, - {Value: Number("167")}, - {Value: Number("168")}, - {Value: Number("169")}, - {Value: Number("170")}, - {Value: Number("171")}, - {Value: Number("172")}, - {Value: Number("173")}, - {Value: Number("174")}, - {Value: Number("175")}, - {Value: Number("176")}, - {Value: Number("177")}, - {Value: Number("178")}, - {Value: Number("179")}, - {Value: Number("180")}, - {Value: Number("181")}, - {Value: Number("182")}, - {Value: Number("183")}, - {Value: Number("184")}, - {Value: Number("185")}, - {Value: Number("186")}, - {Value: Number("187")}, - {Value: Number("188")}, - {Value: Number("189")}, - {Value: Number("190")}, - {Value: Number("191")}, - {Value: Number("192")}, - {Value: Number("193")}, - {Value: Number("194")}, - {Value: Number("195")}, - {Value: Number("196")}, - {Value: Number("197")}, - {Value: Number("198")}, - {Value: Number("199")}, - {Value: Number("200")}, - {Value: Number("201")}, - {Value: Number("202")}, - {Value: Number("203")}, - {Value: Number("204")}, - {Value: Number("205")}, - {Value: Number("206")}, - {Value: Number("207")}, - {Value: Number("208")}, - {Value: Number("209")}, - {Value: Number("210")}, - {Value: Number("211")}, - {Value: Number("212")}, - {Value: Number("213")}, - {Value: Number("214")}, - {Value: Number("215")}, - {Value: Number("216")}, - {Value: Number("217")}, - {Value: Number("218")}, - {Value: Number("219")}, - {Value: Number("220")}, - {Value: Number("221")}, - {Value: Number("222")}, - {Value: Number("223")}, - {Value: Number("224")}, - {Value: Number("225")}, - {Value: Number("226")}, - {Value: Number("227")}, - {Value: Number("228")}, - {Value: Number("229")}, - {Value: Number("230")}, - {Value: Number("231")}, - {Value: Number("232")}, - {Value: Number("233")}, - {Value: Number("234")}, - {Value: Number("235")}, - {Value: Number("236")}, - {Value: Number("237")}, - {Value: Number("238")}, - {Value: Number("239")}, - {Value: Number("240")}, - {Value: Number("241")}, - {Value: Number("242")}, - {Value: Number("243")}, - {Value: Number("244")}, - {Value: Number("245")}, - {Value: Number("246")}, - {Value: Number("247")}, - {Value: Number("248")}, - {Value: Number("249")}, - {Value: Number("250")}, - {Value: Number("251")}, - {Value: Number("252")}, - {Value: Number("253")}, - {Value: Number("254")}, - {Value: Number("255")}, - {Value: Number("256")}, - {Value: Number("257")}, - {Value: Number("258")}, - {Value: Number("259")}, - {Value: Number("260")}, - {Value: Number("261")}, - {Value: Number("262")}, - {Value: Number("263")}, - {Value: Number("264")}, - {Value: Number("265")}, - {Value: Number("266")}, - {Value: Number("267")}, - {Value: Number("268")}, - {Value: Number("269")}, - {Value: Number("270")}, - {Value: Number("271")}, - {Value: Number("272")}, - {Value: Number("273")}, - {Value: Number("274")}, - {Value: Number("275")}, - {Value: Number("276")}, - {Value: Number("277")}, - {Value: Number("278")}, - {Value: Number("279")}, - {Value: Number("280")}, - {Value: Number("281")}, - {Value: Number("282")}, - {Value: Number("283")}, - {Value: Number("284")}, - {Value: Number("285")}, - {Value: Number("286")}, - {Value: Number("287")}, - {Value: Number("288")}, - {Value: Number("289")}, - {Value: Number("290")}, - {Value: Number("291")}, - {Value: Number("292")}, - {Value: Number("293")}, - {Value: Number("294")}, - {Value: Number("295")}, - {Value: Number("296")}, - {Value: Number("297")}, - {Value: Number("298")}, - {Value: Number("299")}, - {Value: Number("300")}, - {Value: Number("301")}, - {Value: Number("302")}, - {Value: Number("303")}, - {Value: Number("304")}, - {Value: Number("305")}, - {Value: Number("306")}, - {Value: Number("307")}, - {Value: Number("308")}, - {Value: Number("309")}, - {Value: Number("310")}, - {Value: Number("311")}, - {Value: Number("312")}, - {Value: Number("313")}, - {Value: Number("314")}, - {Value: Number("315")}, - {Value: Number("316")}, - {Value: Number("317")}, - {Value: Number("318")}, - {Value: Number("319")}, - {Value: Number("320")}, - {Value: Number("321")}, - {Value: Number("322")}, - {Value: Number("323")}, - {Value: Number("324")}, - {Value: Number("325")}, - {Value: Number("326")}, - {Value: Number("327")}, - {Value: Number("328")}, - {Value: Number("329")}, - {Value: Number("330")}, - {Value: Number("331")}, - {Value: Number("332")}, - {Value: Number("333")}, - {Value: Number("334")}, - {Value: Number("335")}, - {Value: Number("336")}, - {Value: Number("337")}, - {Value: Number("338")}, - {Value: Number("339")}, - {Value: Number("340")}, - {Value: Number("341")}, - {Value: Number("342")}, - {Value: Number("343")}, - {Value: Number("344")}, - {Value: Number("345")}, - {Value: Number("346")}, - {Value: Number("347")}, - {Value: Number("348")}, - {Value: Number("349")}, - {Value: Number("350")}, - {Value: Number("351")}, - {Value: Number("352")}, - {Value: Number("353")}, - {Value: Number("354")}, - {Value: Number("355")}, - {Value: Number("356")}, - {Value: Number("357")}, - {Value: Number("358")}, - {Value: Number("359")}, - {Value: Number("360")}, - {Value: Number("361")}, - {Value: Number("362")}, - {Value: Number("363")}, - {Value: Number("364")}, - {Value: Number("365")}, - {Value: Number("366")}, - {Value: Number("367")}, - {Value: Number("368")}, - {Value: Number("369")}, - {Value: Number("370")}, - {Value: Number("371")}, - {Value: Number("372")}, - {Value: Number("373")}, - {Value: Number("374")}, - {Value: Number("375")}, - {Value: Number("376")}, - {Value: Number("377")}, - {Value: Number("378")}, - {Value: Number("379")}, - {Value: Number("380")}, - {Value: Number("381")}, - {Value: Number("382")}, - {Value: Number("383")}, - {Value: Number("384")}, - {Value: Number("385")}, - {Value: Number("386")}, - {Value: Number("387")}, - {Value: Number("388")}, - {Value: Number("389")}, - {Value: Number("390")}, - {Value: Number("391")}, - {Value: Number("392")}, - {Value: Number("393")}, - {Value: Number("394")}, - {Value: Number("395")}, - {Value: Number("396")}, - {Value: Number("397")}, - {Value: Number("398")}, - {Value: Number("399")}, - {Value: Number("400")}, - {Value: Number("401")}, - {Value: Number("402")}, - {Value: Number("403")}, - {Value: Number("404")}, - {Value: Number("405")}, - {Value: Number("406")}, - {Value: Number("407")}, - {Value: Number("408")}, - {Value: Number("409")}, - {Value: Number("410")}, - {Value: Number("411")}, - {Value: Number("412")}, - {Value: Number("413")}, - {Value: Number("414")}, - {Value: Number("415")}, - {Value: Number("416")}, - {Value: Number("417")}, - {Value: Number("418")}, - {Value: Number("419")}, - {Value: Number("420")}, - {Value: Number("421")}, - {Value: Number("422")}, - {Value: Number("423")}, - {Value: Number("424")}, - {Value: Number("425")}, - {Value: Number("426")}, - {Value: Number("427")}, - {Value: Number("428")}, - {Value: Number("429")}, - {Value: Number("430")}, - {Value: Number("431")}, - {Value: Number("432")}, - {Value: Number("433")}, - {Value: Number("434")}, - {Value: Number("435")}, - {Value: Number("436")}, - {Value: Number("437")}, - {Value: Number("438")}, - {Value: Number("439")}, - {Value: Number("440")}, - {Value: Number("441")}, - {Value: Number("442")}, - {Value: Number("443")}, - {Value: Number("444")}, - {Value: Number("445")}, - {Value: Number("446")}, - {Value: Number("447")}, - {Value: Number("448")}, - {Value: Number("449")}, - {Value: Number("450")}, - {Value: Number("451")}, - {Value: Number("452")}, - {Value: Number("453")}, - {Value: Number("454")}, - {Value: Number("455")}, - {Value: Number("456")}, - {Value: Number("457")}, - {Value: Number("458")}, - {Value: Number("459")}, - {Value: Number("460")}, - {Value: Number("461")}, - {Value: Number("462")}, - {Value: Number("463")}, - {Value: Number("464")}, - {Value: Number("465")}, - {Value: Number("466")}, - {Value: Number("467")}, - {Value: Number("468")}, - {Value: Number("469")}, - {Value: Number("470")}, - {Value: Number("471")}, - {Value: Number("472")}, - {Value: Number("473")}, - {Value: Number("474")}, - {Value: Number("475")}, - {Value: Number("476")}, - {Value: Number("477")}, - {Value: Number("478")}, - {Value: Number("479")}, - {Value: Number("480")}, - {Value: Number("481")}, - {Value: Number("482")}, - {Value: Number("483")}, - {Value: Number("484")}, - {Value: Number("485")}, - {Value: Number("486")}, - {Value: Number("487")}, - {Value: Number("488")}, - {Value: Number("489")}, - {Value: Number("490")}, - {Value: Number("491")}, - {Value: Number("492")}, - {Value: Number("493")}, - {Value: Number("494")}, - {Value: Number("495")}, - {Value: Number("496")}, - {Value: Number("497")}, - {Value: Number("498")}, - {Value: Number("499")}, - {Value: Number("500")}, - {Value: Number("501")}, - {Value: Number("502")}, - {Value: Number("503")}, - {Value: Number("504")}, - {Value: Number("505")}, - {Value: Number("506")}, - {Value: Number("507")}, - {Value: Number("508")}, - {Value: Number("509")}, - {Value: Number("510")}, - {Value: Number("511")}, - {Value: Number("512")}, + {Value: intNumberValues[0]}, + {Value: intNumberValues[1]}, + {Value: intNumberValues[2]}, + {Value: intNumberValues[3]}, + {Value: intNumberValues[4]}, + {Value: intNumberValues[5]}, + {Value: intNumberValues[6]}, + {Value: intNumberValues[7]}, + {Value: intNumberValues[8]}, + {Value: intNumberValues[9]}, + {Value: intNumberValues[10]}, + {Value: intNumberValues[11]}, + {Value: intNumberValues[12]}, + {Value: intNumberValues[13]}, + {Value: intNumberValues[14]}, + {Value: intNumberValues[15]}, + {Value: intNumberValues[16]}, + {Value: intNumberValues[17]}, + {Value: intNumberValues[18]}, + {Value: intNumberValues[19]}, + {Value: intNumberValues[20]}, + {Value: intNumberValues[21]}, + {Value: intNumberValues[22]}, + {Value: intNumberValues[23]}, + {Value: intNumberValues[24]}, + {Value: intNumberValues[25]}, + {Value: intNumberValues[26]}, + {Value: intNumberValues[27]}, + {Value: intNumberValues[28]}, + {Value: intNumberValues[29]}, + {Value: intNumberValues[30]}, + {Value: intNumberValues[31]}, + {Value: intNumberValues[32]}, + {Value: intNumberValues[33]}, + {Value: intNumberValues[34]}, + {Value: intNumberValues[35]}, + {Value: intNumberValues[36]}, + {Value: intNumberValues[37]}, + {Value: intNumberValues[38]}, + {Value: intNumberValues[39]}, + {Value: intNumberValues[40]}, + {Value: intNumberValues[41]}, + {Value: intNumberValues[42]}, + {Value: intNumberValues[43]}, + {Value: intNumberValues[44]}, + {Value: intNumberValues[45]}, + {Value: intNumberValues[46]}, + {Value: intNumberValues[47]}, + {Value: intNumberValues[48]}, + {Value: intNumberValues[49]}, + {Value: intNumberValues[50]}, + {Value: intNumberValues[51]}, + {Value: intNumberValues[52]}, + {Value: intNumberValues[53]}, + {Value: intNumberValues[54]}, + {Value: intNumberValues[55]}, + {Value: intNumberValues[56]}, + {Value: intNumberValues[57]}, + {Value: intNumberValues[58]}, + {Value: intNumberValues[59]}, + {Value: intNumberValues[60]}, + {Value: intNumberValues[61]}, + {Value: intNumberValues[62]}, + {Value: intNumberValues[63]}, + {Value: intNumberValues[64]}, + {Value: intNumberValues[65]}, + {Value: intNumberValues[66]}, + {Value: intNumberValues[67]}, + {Value: intNumberValues[68]}, + {Value: intNumberValues[69]}, + {Value: intNumberValues[70]}, + {Value: intNumberValues[71]}, + {Value: intNumberValues[72]}, + {Value: intNumberValues[73]}, + {Value: intNumberValues[74]}, + {Value: intNumberValues[75]}, + {Value: intNumberValues[76]}, + {Value: intNumberValues[77]}, + {Value: intNumberValues[78]}, + {Value: intNumberValues[79]}, + {Value: intNumberValues[80]}, + {Value: intNumberValues[81]}, + {Value: intNumberValues[82]}, + {Value: intNumberValues[83]}, + {Value: intNumberValues[84]}, + {Value: intNumberValues[85]}, + {Value: intNumberValues[86]}, + {Value: intNumberValues[87]}, + {Value: intNumberValues[88]}, + {Value: intNumberValues[89]}, + {Value: intNumberValues[90]}, + {Value: intNumberValues[91]}, + {Value: intNumberValues[92]}, + {Value: intNumberValues[93]}, + {Value: intNumberValues[94]}, + {Value: intNumberValues[95]}, + {Value: intNumberValues[96]}, + {Value: intNumberValues[97]}, + {Value: intNumberValues[98]}, + {Value: intNumberValues[99]}, + {Value: intNumberValues[100]}, + {Value: intNumberValues[101]}, + {Value: intNumberValues[102]}, + {Value: intNumberValues[103]}, + {Value: intNumberValues[104]}, + {Value: intNumberValues[105]}, + {Value: intNumberValues[106]}, + {Value: intNumberValues[107]}, + {Value: intNumberValues[108]}, + {Value: intNumberValues[109]}, + {Value: intNumberValues[110]}, + {Value: intNumberValues[111]}, + {Value: intNumberValues[112]}, + {Value: intNumberValues[113]}, + {Value: intNumberValues[114]}, + {Value: intNumberValues[115]}, + {Value: intNumberValues[116]}, + {Value: intNumberValues[117]}, + {Value: intNumberValues[118]}, + {Value: intNumberValues[119]}, + {Value: intNumberValues[120]}, + {Value: intNumberValues[121]}, + {Value: intNumberValues[122]}, + {Value: intNumberValues[123]}, + {Value: intNumberValues[124]}, + {Value: intNumberValues[125]}, + {Value: intNumberValues[126]}, + {Value: intNumberValues[127]}, + {Value: intNumberValues[128]}, + {Value: intNumberValues[129]}, + {Value: intNumberValues[130]}, + {Value: intNumberValues[131]}, + {Value: intNumberValues[132]}, + {Value: intNumberValues[133]}, + {Value: intNumberValues[134]}, + {Value: intNumberValues[135]}, + {Value: intNumberValues[136]}, + {Value: intNumberValues[137]}, + {Value: intNumberValues[138]}, + {Value: intNumberValues[139]}, + {Value: intNumberValues[140]}, + {Value: intNumberValues[141]}, + {Value: intNumberValues[142]}, + {Value: intNumberValues[143]}, + {Value: intNumberValues[144]}, + {Value: intNumberValues[145]}, + {Value: intNumberValues[146]}, + {Value: intNumberValues[147]}, + {Value: intNumberValues[148]}, + {Value: intNumberValues[149]}, + {Value: intNumberValues[150]}, + {Value: intNumberValues[151]}, + {Value: intNumberValues[152]}, + {Value: intNumberValues[153]}, + {Value: intNumberValues[154]}, + {Value: intNumberValues[155]}, + {Value: intNumberValues[156]}, + {Value: intNumberValues[157]}, + {Value: intNumberValues[158]}, + {Value: intNumberValues[159]}, + {Value: intNumberValues[160]}, + {Value: intNumberValues[161]}, + {Value: intNumberValues[162]}, + {Value: intNumberValues[163]}, + {Value: intNumberValues[164]}, + {Value: intNumberValues[165]}, + {Value: intNumberValues[166]}, + {Value: intNumberValues[167]}, + {Value: intNumberValues[168]}, + {Value: intNumberValues[169]}, + {Value: intNumberValues[170]}, + {Value: intNumberValues[171]}, + {Value: intNumberValues[172]}, + {Value: intNumberValues[173]}, + {Value: intNumberValues[174]}, + {Value: intNumberValues[175]}, + {Value: intNumberValues[176]}, + {Value: intNumberValues[177]}, + {Value: intNumberValues[178]}, + {Value: intNumberValues[179]}, + {Value: intNumberValues[180]}, + {Value: intNumberValues[181]}, + {Value: intNumberValues[182]}, + {Value: intNumberValues[183]}, + {Value: intNumberValues[184]}, + {Value: intNumberValues[185]}, + {Value: intNumberValues[186]}, + {Value: intNumberValues[187]}, + {Value: intNumberValues[188]}, + {Value: intNumberValues[189]}, + {Value: intNumberValues[190]}, + {Value: intNumberValues[191]}, + {Value: intNumberValues[192]}, + {Value: intNumberValues[193]}, + {Value: intNumberValues[194]}, + {Value: intNumberValues[195]}, + {Value: intNumberValues[196]}, + {Value: intNumberValues[197]}, + {Value: intNumberValues[198]}, + {Value: intNumberValues[199]}, + {Value: intNumberValues[200]}, + {Value: intNumberValues[201]}, + {Value: intNumberValues[202]}, + {Value: intNumberValues[203]}, + {Value: intNumberValues[204]}, + {Value: intNumberValues[205]}, + {Value: intNumberValues[206]}, + {Value: intNumberValues[207]}, + {Value: intNumberValues[208]}, + {Value: intNumberValues[209]}, + {Value: intNumberValues[210]}, + {Value: intNumberValues[211]}, + {Value: intNumberValues[212]}, + {Value: intNumberValues[213]}, + {Value: intNumberValues[214]}, + {Value: intNumberValues[215]}, + {Value: intNumberValues[216]}, + {Value: intNumberValues[217]}, + {Value: intNumberValues[218]}, + {Value: intNumberValues[219]}, + {Value: intNumberValues[220]}, + {Value: intNumberValues[221]}, + {Value: intNumberValues[222]}, + {Value: intNumberValues[223]}, + {Value: intNumberValues[224]}, + {Value: intNumberValues[225]}, + {Value: intNumberValues[226]}, + {Value: intNumberValues[227]}, + {Value: intNumberValues[228]}, + {Value: intNumberValues[229]}, + {Value: intNumberValues[230]}, + {Value: intNumberValues[231]}, + {Value: intNumberValues[232]}, + {Value: intNumberValues[233]}, + {Value: intNumberValues[234]}, + {Value: intNumberValues[235]}, + {Value: intNumberValues[236]}, + {Value: intNumberValues[237]}, + {Value: intNumberValues[238]}, + {Value: intNumberValues[239]}, + {Value: intNumberValues[240]}, + {Value: intNumberValues[241]}, + {Value: intNumberValues[242]}, + {Value: intNumberValues[243]}, + {Value: intNumberValues[244]}, + {Value: intNumberValues[245]}, + {Value: intNumberValues[246]}, + {Value: intNumberValues[247]}, + {Value: intNumberValues[248]}, + {Value: intNumberValues[249]}, + {Value: intNumberValues[250]}, + {Value: intNumberValues[251]}, + {Value: intNumberValues[252]}, + {Value: intNumberValues[253]}, + {Value: intNumberValues[254]}, + {Value: intNumberValues[255]}, + {Value: intNumberValues[256]}, + {Value: intNumberValues[257]}, + {Value: intNumberValues[258]}, + {Value: intNumberValues[259]}, + {Value: intNumberValues[260]}, + {Value: intNumberValues[261]}, + {Value: intNumberValues[262]}, + {Value: intNumberValues[263]}, + {Value: intNumberValues[264]}, + {Value: intNumberValues[265]}, + {Value: intNumberValues[266]}, + {Value: intNumberValues[267]}, + {Value: intNumberValues[268]}, + {Value: intNumberValues[269]}, + {Value: intNumberValues[270]}, + {Value: intNumberValues[271]}, + {Value: intNumberValues[272]}, + {Value: intNumberValues[273]}, + {Value: intNumberValues[274]}, + {Value: intNumberValues[275]}, + {Value: intNumberValues[276]}, + {Value: intNumberValues[277]}, + {Value: intNumberValues[278]}, + {Value: intNumberValues[279]}, + {Value: intNumberValues[280]}, + {Value: intNumberValues[281]}, + {Value: intNumberValues[282]}, + {Value: intNumberValues[283]}, + {Value: intNumberValues[284]}, + {Value: intNumberValues[285]}, + {Value: intNumberValues[286]}, + {Value: intNumberValues[287]}, + {Value: intNumberValues[288]}, + {Value: intNumberValues[289]}, + {Value: intNumberValues[290]}, + {Value: intNumberValues[291]}, + {Value: intNumberValues[292]}, + {Value: intNumberValues[293]}, + {Value: intNumberValues[294]}, + {Value: intNumberValues[295]}, + {Value: intNumberValues[296]}, + {Value: intNumberValues[297]}, + {Value: intNumberValues[298]}, + {Value: intNumberValues[299]}, + {Value: intNumberValues[300]}, + {Value: intNumberValues[301]}, + {Value: intNumberValues[302]}, + {Value: intNumberValues[303]}, + {Value: intNumberValues[304]}, + {Value: intNumberValues[305]}, + {Value: intNumberValues[306]}, + {Value: intNumberValues[307]}, + {Value: intNumberValues[308]}, + {Value: intNumberValues[309]}, + {Value: intNumberValues[310]}, + {Value: intNumberValues[311]}, + {Value: intNumberValues[312]}, + {Value: intNumberValues[313]}, + {Value: intNumberValues[314]}, + {Value: intNumberValues[315]}, + {Value: intNumberValues[316]}, + {Value: intNumberValues[317]}, + {Value: intNumberValues[318]}, + {Value: intNumberValues[319]}, + {Value: intNumberValues[320]}, + {Value: intNumberValues[321]}, + {Value: intNumberValues[322]}, + {Value: intNumberValues[323]}, + {Value: intNumberValues[324]}, + {Value: intNumberValues[325]}, + {Value: intNumberValues[326]}, + {Value: intNumberValues[327]}, + {Value: intNumberValues[328]}, + {Value: intNumberValues[329]}, + {Value: intNumberValues[330]}, + {Value: intNumberValues[331]}, + {Value: intNumberValues[332]}, + {Value: intNumberValues[333]}, + {Value: intNumberValues[334]}, + {Value: intNumberValues[335]}, + {Value: intNumberValues[336]}, + {Value: intNumberValues[337]}, + {Value: intNumberValues[338]}, + {Value: intNumberValues[339]}, + {Value: intNumberValues[340]}, + {Value: intNumberValues[341]}, + {Value: intNumberValues[342]}, + {Value: intNumberValues[343]}, + {Value: intNumberValues[344]}, + {Value: intNumberValues[345]}, + {Value: intNumberValues[346]}, + {Value: intNumberValues[347]}, + {Value: intNumberValues[348]}, + {Value: intNumberValues[349]}, + {Value: intNumberValues[350]}, + {Value: intNumberValues[351]}, + {Value: intNumberValues[352]}, + {Value: intNumberValues[353]}, + {Value: intNumberValues[354]}, + {Value: intNumberValues[355]}, + {Value: intNumberValues[356]}, + {Value: intNumberValues[357]}, + {Value: intNumberValues[358]}, + {Value: intNumberValues[359]}, + {Value: intNumberValues[360]}, + {Value: intNumberValues[361]}, + {Value: intNumberValues[362]}, + {Value: intNumberValues[363]}, + {Value: intNumberValues[364]}, + {Value: intNumberValues[365]}, + {Value: intNumberValues[366]}, + {Value: intNumberValues[367]}, + {Value: intNumberValues[368]}, + {Value: intNumberValues[369]}, + {Value: intNumberValues[370]}, + {Value: intNumberValues[371]}, + {Value: intNumberValues[372]}, + {Value: intNumberValues[373]}, + {Value: intNumberValues[374]}, + {Value: intNumberValues[375]}, + {Value: intNumberValues[376]}, + {Value: intNumberValues[377]}, + {Value: intNumberValues[378]}, + {Value: intNumberValues[379]}, + {Value: intNumberValues[380]}, + {Value: intNumberValues[381]}, + {Value: intNumberValues[382]}, + {Value: intNumberValues[383]}, + {Value: intNumberValues[384]}, + {Value: intNumberValues[385]}, + {Value: intNumberValues[386]}, + {Value: intNumberValues[387]}, + {Value: intNumberValues[388]}, + {Value: intNumberValues[389]}, + {Value: intNumberValues[390]}, + {Value: intNumberValues[391]}, + {Value: intNumberValues[392]}, + {Value: intNumberValues[393]}, + {Value: intNumberValues[394]}, + {Value: intNumberValues[395]}, + {Value: intNumberValues[396]}, + {Value: intNumberValues[397]}, + {Value: intNumberValues[398]}, + {Value: intNumberValues[399]}, + {Value: intNumberValues[400]}, + {Value: intNumberValues[401]}, + {Value: intNumberValues[402]}, + {Value: intNumberValues[403]}, + {Value: intNumberValues[404]}, + {Value: intNumberValues[405]}, + {Value: intNumberValues[406]}, + {Value: intNumberValues[407]}, + {Value: intNumberValues[408]}, + {Value: intNumberValues[409]}, + {Value: intNumberValues[410]}, + {Value: intNumberValues[411]}, + {Value: intNumberValues[412]}, + {Value: intNumberValues[413]}, + {Value: intNumberValues[414]}, + {Value: intNumberValues[415]}, + {Value: intNumberValues[416]}, + {Value: intNumberValues[417]}, + {Value: intNumberValues[418]}, + {Value: intNumberValues[419]}, + {Value: intNumberValues[420]}, + {Value: intNumberValues[421]}, + {Value: intNumberValues[422]}, + {Value: intNumberValues[423]}, + {Value: intNumberValues[424]}, + {Value: intNumberValues[425]}, + {Value: intNumberValues[426]}, + {Value: intNumberValues[427]}, + {Value: intNumberValues[428]}, + {Value: intNumberValues[429]}, + {Value: intNumberValues[430]}, + {Value: intNumberValues[431]}, + {Value: intNumberValues[432]}, + {Value: intNumberValues[433]}, + {Value: intNumberValues[434]}, + {Value: intNumberValues[435]}, + {Value: intNumberValues[436]}, + {Value: intNumberValues[437]}, + {Value: intNumberValues[438]}, + {Value: intNumberValues[439]}, + {Value: intNumberValues[440]}, + {Value: intNumberValues[441]}, + {Value: intNumberValues[442]}, + {Value: intNumberValues[443]}, + {Value: intNumberValues[444]}, + {Value: intNumberValues[445]}, + {Value: intNumberValues[446]}, + {Value: intNumberValues[447]}, + {Value: intNumberValues[448]}, + {Value: intNumberValues[449]}, + {Value: intNumberValues[450]}, + {Value: intNumberValues[451]}, + {Value: intNumberValues[452]}, + {Value: intNumberValues[453]}, + {Value: intNumberValues[454]}, + {Value: intNumberValues[455]}, + {Value: intNumberValues[456]}, + {Value: intNumberValues[457]}, + {Value: intNumberValues[458]}, + {Value: intNumberValues[459]}, + {Value: intNumberValues[460]}, + {Value: intNumberValues[461]}, + {Value: intNumberValues[462]}, + {Value: intNumberValues[463]}, + {Value: intNumberValues[464]}, + {Value: intNumberValues[465]}, + {Value: intNumberValues[466]}, + {Value: intNumberValues[467]}, + {Value: intNumberValues[468]}, + {Value: intNumberValues[469]}, + {Value: intNumberValues[470]}, + {Value: intNumberValues[471]}, + {Value: intNumberValues[472]}, + {Value: intNumberValues[473]}, + {Value: intNumberValues[474]}, + {Value: intNumberValues[475]}, + {Value: intNumberValues[476]}, + {Value: intNumberValues[477]}, + {Value: intNumberValues[478]}, + {Value: intNumberValues[479]}, + {Value: intNumberValues[480]}, + {Value: intNumberValues[481]}, + {Value: intNumberValues[482]}, + {Value: intNumberValues[483]}, + {Value: intNumberValues[484]}, + {Value: intNumberValues[485]}, + {Value: intNumberValues[486]}, + {Value: intNumberValues[487]}, + {Value: intNumberValues[488]}, + {Value: intNumberValues[489]}, + {Value: intNumberValues[490]}, + {Value: intNumberValues[491]}, + {Value: intNumberValues[492]}, + {Value: intNumberValues[493]}, + {Value: intNumberValues[494]}, + {Value: intNumberValues[495]}, + {Value: intNumberValues[496]}, + {Value: intNumberValues[497]}, + {Value: intNumberValues[498]}, + {Value: intNumberValues[499]}, + {Value: intNumberValues[500]}, + {Value: intNumberValues[501]}, + {Value: intNumberValues[502]}, + {Value: intNumberValues[503]}, + {Value: intNumberValues[504]}, + {Value: intNumberValues[505]}, + {Value: intNumberValues[506]}, + {Value: intNumberValues[507]}, + {Value: intNumberValues[508]}, + {Value: intNumberValues[509]}, + {Value: intNumberValues[510]}, + {Value: intNumberValues[511]}, + {Value: intNumberValues[512]}, } diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go b/vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go index 6d1b16cdfc..6431b02ce9 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go @@ -8,6 +8,7 @@ import ( "fmt" astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/util" ) // Location records a position in source code @@ -28,10 +29,10 @@ func NewLocation(text []byte, file string, row int, col int) *Location { // Equal checks if two locations are equal to each other. func (loc *Location) Equal(other *Location) bool { - return bytes.Equal(loc.Text, other.Text) && - loc.File == other.File && + return loc.File == other.File && loc.Row == other.Row && - loc.Col == other.Col + loc.Col == other.Col && + bytes.Equal(loc.Text, other.Text) } // Errorf returns a new error value with a message formatted to include the location @@ -57,13 +58,35 @@ func (loc *Location) Format(f string, a ...any) string { } func (loc *Location) String() string { - if len(loc.File) > 0 { - return fmt.Sprintf("%v:%v", loc.File, loc.Row) + buf, _ := loc.AppendText(make([]byte, 0, loc.StringLength())) + return util.ByteSliceToString(buf) +} + +func (loc *Location) AppendText(buf []byte) ([]byte, error) { + if loc != nil { + switch { + case len(loc.File) > 0: + buf = util.AppendInt(append(append(buf, loc.File...), ':'), loc.Row) + case len(loc.Text) > 0: + buf = append(buf, loc.Text...) + default: + buf = util.AppendInt(append(util.AppendInt(buf, loc.Row), ':'), loc.Col) + } } - if len(loc.Text) > 0 { - return string(loc.Text) + return buf, nil +} + +func (loc *Location) StringLength() (n int) { + if loc != nil { + if l := len(loc.File); l > 0 { + n = l + 1 + util.NumDigitsInt(loc.Row) + } else if l := len(loc.Text); l > 0 { + n = l + } else { + n = util.NumDigitsInt(loc.Row) + 1 + util.NumDigitsInt(loc.Col) + } } - return fmt.Sprintf("%v:%v", loc.Row, loc.Col) + return n } // Compare returns -1, 0, or 1 to indicate if this loc is less than, equal to, @@ -71,7 +94,7 @@ func (loc *Location) String() string { // column of the Location (but not on the text.) Nil locations are greater than // non-nil locations. func (loc *Location) Compare(other *Location) int { - if loc == nil && other == nil { + if loc == other { return 0 } else if loc == nil { return 1 diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go b/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go index d897952eae..9e52b89a67 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go @@ -20,12 +20,13 @@ import ( "strings" "unicode/utf8" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" "github.com/open-policy-agent/opa/v1/ast/internal/scanner" "github.com/open-policy-agent/opa/v1/ast/internal/tokens" astJSON "github.com/open-policy-agent/opa/v1/ast/json" "github.com/open-policy-agent/opa/v1/ast/location" + "github.com/open-policy-agent/opa/v1/util" ) // DefaultMaxParsingRecursionDepth is the default maximum recursion @@ -57,6 +58,25 @@ const ( RegoV1 ) +var ( + // this is the name to use for instantiating an empty set, e.g., `set()`. + setConstructor = RefTerm(VarTerm("set")) + + preAllocWildcards = [...]Value{ + Var("$0"), Var("$1"), Var("$2"), Var("$3"), Var("$4"), Var("$5"), + Var("$6"), Var("$7"), Var("$8"), Var("$9"), Var("$10"), + } + + // use static references to avoid allocations, and + // copy them to the call term only when needed + memberWithKeyRef = MemberWithKey.Ref() + memberRef = Member.Ref() + + newlineBytes = []byte{'\n'} + metadataBytes = []byte("METADATA") + metadataParserPool = util.NewSyncPool[metadataParser]() +) + func (v RegoVersion) Int() int { if v == RegoV1 { return 1 @@ -88,17 +108,17 @@ func RegoVersionFromInt(i int) RegoVersion { // can do efficient shallow copies of these values when doing a // save() and restore(). type state struct { + errors Errors + comments []*Comment + hints []string s *scanner.Scanner + loc Location + lit string lastEnd int - skippedNL bool - tok tokens.Token tokEnd int - lit string - loc Location - errors Errors - hints []string - comments []*Comment wildcard int + tok tokens.Token + skippedNL bool } func (s *state) String() string { @@ -451,7 +471,6 @@ func (p *Parser) Parse() ([]Statement, []*Comment, Errors) { // next type of statement. If a statement can be parsed, continue from that // point trying to parse packages, imports, etc. in the same order. for p.s.tok != tokens.EOF { - s := p.save() if pkg := p.parsePackage(); pkg != nil { @@ -512,12 +531,12 @@ func (p *Parser) Parse() ([]Statement, []*Comment, Errors) { } func (p *Parser) parseAnnotations(stmts []Statement) []Statement { - annotStmts, errs := parseAnnotations(p.s.comments) for _, err := range errs { p.error(err.Location, err.Message) } + stmts = slices.Grow(stmts, len(annotStmts)) for _, annotStmt := range annotStmts { stmts = append(stmts, annotStmt) } @@ -525,53 +544,54 @@ func (p *Parser) parseAnnotations(stmts []Statement) []Statement { return stmts } -func parseAnnotations(comments []*Comment) ([]*Annotations, Errors) { +func parseAnnotations(comments []*Comment) (stmts []*Annotations, errs Errors) { + numBlocks := CountFunc(comments, isMetadataComment) + if numBlocks == 0 { + return nil, nil + } - var hint = []byte("METADATA") - var curr *metadataParser - var blocks []*metadataParser + stmts = make([]*Annotations, 0, numBlocks) + mdp := metadataParserPool.Get() + if mdp.buf == nil { + mdp.buf = &bytes.Buffer{} + } for i := range comments { - if curr != nil { - if comments[i].Location.Row == comments[i-1].Location.Row+1 && comments[i].Location.Col == 1 { - curr.Append(comments[i]) - continue + if isMetadataComment(comments[i]) { // scan until end of block + mdp.Reset(comments[i].Location) + for i++; i < len(comments) && !blockBuster(comments[i], comments[i-1]); i++ { + mdp.Append(comments[i]) } - curr = nil - } - if bytes.HasPrefix(bytes.TrimSpace(comments[i].Text), hint) { - curr = newMetadataParser(comments[i].Location) - blocks = append(blocks, curr) - } - } - var stmts []*Annotations - var errs Errors - for _, b := range blocks { - a, err := b.Parse() - if err != nil { - errs = append(errs, &Error{ - Code: ParseErr, - Message: err.Error(), - Location: b.loc, - }) - } else { - stmts = append(stmts, a) + if a, err := mdp.Parse(); err != nil { + errs = append(errs, &Error{Code: ParseErr, Message: err.Error(), Location: mdp.loc}) + } else { + stmts = append(stmts, a) + } } } + metadataParserPool.Put(mdp) + return stmts, errs } -func (p *Parser) parsePackage() *Package { +func isMetadataComment(c *Comment) bool { + return c.Location.Col == 1 && bytes.HasPrefix(bytes.TrimSpace(c.Text), metadataBytes) +} - var pkg Package - pkg.SetLoc(p.s.Loc()) +func blockBuster(curr, prev *Comment) bool { // or endOfBlock, but the name was too good to pass up + return curr.Location.Col != 1 || curr.Location.Row-1 != prev.Location.Row +} +func (p *Parser) parsePackage() *Package { if p.s.tok != tokens.Package { return nil } + var pkg Package + pkg.SetLoc(p.s.Loc()) + p.scanWS() // Make sure we allow the first term of refs to be the 'package' keyword. @@ -633,14 +653,13 @@ func (p *Parser) parsePackage() *Package { } func (p *Parser) parseImport() *Import { - - var imp Import - imp.SetLoc(p.s.Loc()) - if p.s.tok != tokens.Import { return nil } + var imp Import + imp.SetLoc(p.s.Loc()) + p.scanWS() // Make sure we allow the first term of refs to be the 'import' keyword. @@ -952,7 +971,7 @@ func (p *Parser) parseRules() []*Rule { next.Head.keywords = rule.Head.keywords for i := range next.Head.Args { if v, ok := next.Head.Args[i].Value.(Var); ok && v.IsWildcard() { - next.Head.Args[i].Value = Var(p.genwildcard()) + next.Head.Args[i].Value = p.genwildcard() } } setLocRecursive(next.Head, loc) @@ -972,7 +991,7 @@ func (p *Parser) parseElse(head *Head) *Rule { rule.Head.generatedValue = false for i := range rule.Head.Args { if v, ok := rule.Head.Args[i].Value.(Var); ok && v.IsWildcard() { - rule.Head.Args[i].Value = Var(p.genwildcard()) + rule.Head.Args[i].Value = p.genwildcard() } } rule.Head.SetLoc(p.s.Loc()) @@ -1056,7 +1075,7 @@ func (p *Parser) parseHead(defaultRule bool) (*Head, bool) { return nil, false } - ref := p.parseTermFinish(term, true) + ref := p.parseHeadFinish(term, true) if ref == nil { p.illegal("expected rule head name") return nil, false @@ -1259,34 +1278,33 @@ func (p *Parser) parseLiteralExpr(negated bool) *Expr { return nil } } - // If we find a plain `every` identifier, attempt to parse an every expression, - // add hint if it succeeds. - if term, ok := expr.Terms.(*Term); ok && Var("every").Equal(term.Value) { - var hint bool - t := p.save() - p.restore(s) - if expr := p.futureParser().parseEvery(); expr != nil { - _, hint = expr.Terms.(*Every) - } - p.restore(t) - if hint { - p.hint("`import future.keywords.every` for `every x in xs { ... }` expressions") + + if p.isFutureKeyword("every") { + // If we find a plain `every` identifier, attempt to parse an every expression, + // add hint if it succeeds. + if term, ok := expr.Terms.(*Term); ok && Var("every").Equal(term.Value) { + var hint bool + t := p.save() + p.restore(s) + if expr := p.futureParser().parseEvery(); expr != nil { + _, hint = expr.Terms.(*Every) + } + p.restore(t) + if hint { + p.hint("`import future.keywords.every` for `every x in xs { ... }` expressions") + } } } - return expr } - return nil + return expr } func (p *Parser) parseWith() []*With { - withs := []*With{} for { + with := With{Location: p.s.Loc()} - with := With{ - Location: p.s.Loc(), - } p.scan() if p.s.tok != tokens.Ident { @@ -1368,26 +1386,28 @@ func (p *Parser) parseSome() *Expr { } p.restore(s) - s = p.save() // new copy for later - var hint bool - p.scan() - if term := p.futureParser().parseTermInfixCall(); term != nil { - if call, ok := term.Value.(Call); ok { - switch call[0].String() { - case Member.Name, MemberWithKey.Name: - hint = true + + if p.isFutureKeyword("in") { + s = p.save() // new copy for later + var hint bool + p.scan() + if term := p.futureParser().parseTermInfixCall(); term != nil { + if call, ok := term.Value.(Call); ok { + switch call[0].String() { + case Member.Name, MemberWithKey.Name: + hint = true + } } } - } - // go on as before, it's `some x[...]` or illegal - p.restore(s) - if hint { - p.hint("`import future.keywords.in` for `some x in xs` expressions") + // go on as before, it's `some x[...]` or illegal + p.restore(s) + if hint { + p.hint("`import future.keywords.in` for `some x in xs` expressions") + } } for { // collecting var args - p.scan() if p.s.tok != tokens.Ident { @@ -1521,11 +1541,6 @@ func (p *Parser) parseTermInfixCallInList() *Term { return p.parseTermIn(nil, false, p.s.loc.Offset) } -// use static references to avoid allocations, and -// copy them to the call term only when needed -var memberWithKeyRef = MemberWithKey.Ref() -var memberRef = Member.Ref() - func (p *Parser) parseTermIn(lhs *Term, keyVal bool, offset int) *Term { if !p.enter() { return nil @@ -1727,6 +1742,10 @@ func (p *Parser) parseTerm() *Term { term = p.parseNumber() case tokens.String: term = p.parseString() + case tokens.TemplateStringPart, tokens.TemplateStringEnd: + term = p.parseTemplateString(false) + case tokens.RawTemplateStringPart, tokens.RawTemplateStringEnd: + term = p.parseTemplateString(true) case tokens.Ident, tokens.Contains: // NOTE(sr): contains anywhere BUT in rule heads gets no special treatment term = p.parseVar() case tokens.LBrack: @@ -1758,7 +1777,7 @@ func (p *Parser) parseTermFinish(head *Term, skipws bool) *Term { return nil } offset := p.s.loc.Offset - p.doScan(skipws) + p.doScan(skipws, noScanOptions...) switch p.s.tok { case tokens.LParen, tokens.Dot, tokens.LBrack: @@ -1774,6 +1793,35 @@ func (p *Parser) parseTermFinish(head *Term, skipws bool) *Term { } } +func (p *Parser) parseHeadFinish(head *Term, skipws bool) *Term { + if head == nil { + return nil + } + offset := p.s.loc.Offset + p.scanWS() + + switch p.s.tok { + case tokens.Add, tokens.Sub, tokens.Mul, tokens.Quo, tokens.Rem, + tokens.And, tokens.Or, + tokens.Equal, tokens.Neq, tokens.Gt, tokens.Gte, tokens.Lt, tokens.Lte: + p.illegalToken() + case tokens.Whitespace: + p.doScan(skipws, noScanOptions...) + } + + switch p.s.tok { + case tokens.LParen, tokens.Dot, tokens.LBrack: + return p.parseRef(head, offset) + case tokens.Whitespace: + p.scan() + } + + if _, ok := head.Value.(Var); ok && RootDocumentNames.Contains(head) { + return RefTerm(head).SetLocation(head.Location) + } + return head +} + func (p *Parser) parseNumber() *Term { var prefix string loc := p.s.Loc() @@ -1845,17 +1893,20 @@ func (p *Parser) parseNumber() *Term { func (p *Parser) parseString() *Term { if p.s.lit[0] == '"' { if p.s.lit == "\"\"" { - return NewTerm(InternedEmptyString.Value).SetLocation(p.s.Loc()) + return NewTerm(InternedEmptyStringValue).SetLocation(p.s.Loc()) + } + + inner := p.s.lit[1 : len(p.s.lit)-1] + if !strings.ContainsRune(inner, '\\') { // nothing to un-escape + return StringTerm(inner).SetLocation(p.s.Loc()) } var s string - err := json.Unmarshal([]byte(p.s.lit), &s) - if err != nil { + if err := json.Unmarshal([]byte(p.s.lit), &s); err != nil { p.errorf(p.s.Loc(), "illegal string literal: %s", p.s.lit) return nil } - term := StringTerm(s).SetLocation(p.s.Loc()) - return term + return StringTerm(s).SetLocation(p.s.Loc()) } return p.parseRawString() } @@ -1864,12 +1915,122 @@ func (p *Parser) parseRawString() *Term { if len(p.s.lit) < 2 { return nil } - term := StringTerm(p.s.lit[1 : len(p.s.lit)-1]).SetLocation(p.s.Loc()) - return term + return StringTerm(p.s.lit[1 : len(p.s.lit)-1]).SetLocation(p.s.Loc()) } -// this is the name to use for instantiating an empty set, e.g., `set()`. -var setConstructor = RefTerm(VarTerm("set")) +func templateStringPartToStringLiteral(tok tokens.Token, lit string) (string, error) { + switch tok { + case tokens.TemplateStringPart, tokens.TemplateStringEnd: + inner := lit[1 : len(lit)-1] + if !strings.ContainsRune(inner, '\\') { // nothing to un-escape + return inner, nil + } + + buf := make([]byte, 0, len(inner)+2) + buf = append(buf, '"') + buf = append(buf, inner...) + buf = append(buf, '"') + var s string + if err := json.Unmarshal(buf, &s); err != nil { + return "", fmt.Errorf("illegal template-string part: %s", lit) + } + return s, nil + case tokens.RawTemplateStringPart, tokens.RawTemplateStringEnd: + return lit[1 : len(lit)-1], nil + default: + return "", errors.New("expected template-string part") + } +} + +func (p *Parser) parseTemplateString(multiLine bool) *Term { + loc := p.s.Loc() + + if !p.po.Capabilities.ContainsFeature(FeatureTemplateStrings) { + p.errorf(loc, "template strings are not supported by current capabilities") + return nil + } + + var parts []Node + + for { + s, err := templateStringPartToStringLiteral(p.s.tok, p.s.lit) + if err != nil { + p.error(p.s.Loc(), err.Error()) + return nil + } + + // Don't add empty strings + if len(s) > 0 { + parts = append(parts, StringTerm(s).SetLocation(p.s.Loc())) + } + + if p.s.tok == tokens.TemplateStringEnd || p.s.tok == tokens.RawTemplateStringEnd { + break + } + + numCommentsBefore := len(p.s.comments) + p.scan() + numCommentsAfter := len(p.s.comments) + + expr := p.parseLiteral() + if expr == nil { + p.error(p.s.Loc(), "invalid template-string expression") + return nil + } + + if expr.Negated { + p.errorf(expr.Loc(), "unexpected negation ('%s') in template-string expression", tokens.KeywordFor(tokens.Not)) + return nil + } + + // Note: Actually unification + if expr.IsEquality() { + p.errorf(expr.Loc(), "unexpected unification ('=') in template-string expression") + return nil + } + + if expr.IsAssignment() { + p.errorf(expr.Loc(), "unexpected assignment (':=') in template-string expression") + return nil + } + + if expr.IsEvery() { + p.errorf(expr.Loc(), "unexpected '%s' in template-string expression", tokens.KeywordFor(tokens.Every)) + return nil + } + + if expr.IsSome() { + p.errorf(expr.Loc(), "unexpected '%s' in template-string expression", tokens.KeywordFor(tokens.Some)) + return nil + } + + // FIXME: Can we optimize for collections and comprehensions too? To qualify, they must not contain refs or calls. + var nonOptional bool + if term, ok := expr.Terms.(*Term); ok && numCommentsAfter == numCommentsBefore { + switch term.Value.(type) { + case String, Number, Boolean, Null: + nonOptional = true + parts = append(parts, term) + } + } + + if !nonOptional { + parts = append(parts, expr) + } + + if p.s.tok != tokens.RBrace { + p.errorf(p.s.Loc(), "expected %s to end template string expression", tokens.RBrace) + return nil + } + + p.doScan(false, scanner.ContinueTemplateString(multiLine)) + } + + // When there are template-expressions, the initial location will only contain the text up to the first expression + loc.Text = p.s.Text(loc.Offset, p.s.tokEnd) + + return TemplateStringTerm(multiLine, parts...).SetLocation(loc) +} func (p *Parser) parseCall(operator *Term, offset int) (term *Term) { if !p.enter() { @@ -1948,7 +2109,7 @@ func (p *Parser) parseRef(head *Term, offset int) (term *Term) { term = p.parseRef(term, offset) } } - end = p.s.tokEnd + end = p.s.lastEnd return term case tokens.LBrack: p.scan() @@ -2012,7 +2173,6 @@ func (p *Parser) parseArray() (term *Term) { // Does this represent a set comprehension or a set containing binary OR // call? We resolve the ambiguity by prioritizing comprehensions. head := p.parseTerm() - if head == nil { return nil } @@ -2256,7 +2416,7 @@ func (p *Parser) parseTermList(end tokens.Token, r []*Term) []*Term { } continue default: - p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end)) + p.illegal("expected %q or %q", tokens.Comma, end) return nil } } @@ -2286,12 +2446,12 @@ func (p *Parser) parseTermPairList(end tokens.Token, r [][2]*Term) [][2]*Term { } continue default: - p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end)) + p.illegal("expected %q or %q", tokens.Comma, end) return nil } } default: - p.illegal(fmt.Sprintf("expected %q", tokens.Colon)) + p.illegal("expected %q", tokens.Colon) return nil } } @@ -2301,7 +2461,8 @@ func (p *Parser) parseTermPairList(end tokens.Token, r [][2]*Term) [][2]*Term { func (p *Parser) parseTermOp(values ...tokens.Token) *Term { if slices.Contains(values, p.s.tok) { - r := RefTerm(VarTerm(p.s.tok.String()).SetLocation(p.s.Loc())).SetLocation(p.s.Loc()) + loc := p.s.Loc() + r := RefTerm(VarTerm(p.s.tok.String()).SetLocation(loc)).SetLocation(loc) p.scan() return r } @@ -2311,11 +2472,12 @@ func (p *Parser) parseTermOp(values ...tokens.Token) *Term { func (p *Parser) parseTermOpName(ref Ref, values ...tokens.Token) *Term { if slices.Contains(values, p.s.tok) { cp := ref.Copy() + loc := p.s.Loc() for _, r := range cp { - r.SetLocation(p.s.Loc()) + r.SetLocation(loc) } t := RefTerm(cp...) - t.SetLocation(p.s.Loc()) + t.SetLocation(loc) p.scan() return t } @@ -2323,48 +2485,69 @@ func (p *Parser) parseTermOpName(ref Ref, values ...tokens.Token) *Term { } func (p *Parser) parseVar() *Term { - - s := p.s.lit - - term := VarTerm(s).SetLocation(p.s.Loc()) - - // Update wildcard values with unique identifiers - if term.Equal(Wildcard) { - term.Value = Var(p.genwildcard()) + if p.s.lit == WildcardString { + // Update wildcard values with unique identifiers + return NewTerm(p.genwildcard()).SetLocation(p.s.Loc()) } - return term + return VarTerm(p.s.lit).SetLocation(p.s.Loc()) } -func (p *Parser) genwildcard() string { - c := p.s.wildcard +func (p *Parser) genwildcard() Value { + var v Value + if p.s.wildcard < len(preAllocWildcards) { + v = preAllocWildcards[p.s.wildcard] + } else { + v = Var(WildcardPrefix + strconv.Itoa(p.s.wildcard)) + } p.s.wildcard++ - return fmt.Sprintf("%v%d", WildcardPrefix, c) -} -func (p *Parser) error(loc *location.Location, reason string) { - p.errorf(loc, "%s", reason) + return v } -func (p *Parser) errorf(loc *location.Location, f string, a ...any) { - msg := strings.Builder{} - msg.WriteString(fmt.Sprintf(f, a...)) - - switch len(p.s.hints) { +func writeHints(msg *strings.Builder, hints []string) { + switch len(hints) { case 0: // nothing to do case 1: msg.WriteString(" (hint: ") - msg.WriteString(p.s.hints[0]) - msg.WriteRune(')') + msg.WriteString(hints[0]) + msg.WriteByte(')') default: msg.WriteString(" (hints: ") - for i, h := range p.s.hints { + for i, h := range hints { if i > 0 { msg.WriteString(", ") } msg.WriteString(h) } - msg.WriteRune(')') + msg.WriteByte(')') + } +} + +func (p *Parser) error(loc *location.Location, reason string) { + msg := reason + if len(p.s.hints) > 0 { + sb := &strings.Builder{} + sb.WriteString(reason) + writeHints(sb, p.s.hints) + msg = sb.String() + } + + p.s.errors = append(p.s.errors, &Error{ + Code: ParseErr, + Message: msg, + Location: loc, + Details: newParserErrorDetail(p.s.s.Bytes(), loc.Offset), + }) + p.s.hints = nil +} + +func (p *Parser) errorf(loc *location.Location, f string, a ...any) { + msg := &strings.Builder{} + fmt.Fprintf(msg, f, a...) + + if len(p.s.hints) > 0 { + writeHints(msg, p.s.hints) } p.s.errors = append(p.s.errors, &Error{ @@ -2376,28 +2559,25 @@ func (p *Parser) errorf(loc *location.Location, f string, a ...any) { p.s.hints = nil } -func (p *Parser) hint(f string, a ...any) { - p.s.hints = append(p.s.hints, fmt.Sprintf(f, a...)) +func (p *Parser) hint(s string) { + p.s.hints = append(p.s.hints, s) } func (p *Parser) illegal(note string, a ...any) { - tok := p.s.tok.String() - if p.s.tok == tokens.Illegal { p.errorf(p.s.Loc(), "illegal token") return } + tok := p.s.tok.String() + tokType := "token" - if tokens.IsKeyword(p.s.tok) { - tokType = "keyword" - } else if _, ok := allFutureKeywords[p.s.tok.String()]; ok { + if _, ok := allFutureKeywords[tok]; ok || tokens.IsKeyword(p.s.tok) { tokType = "keyword" } - note = fmt.Sprintf(note, a...) if len(note) > 0 { - p.errorf(p.s.Loc(), "unexpected %s %s: %s", tok, tokType, note) + p.errorf(p.s.Loc(), "unexpected %s %s: %s", tok, tokType, fmt.Sprintf(note, a...)) } else { p.errorf(p.s.Loc(), "unexpected %s %s", tok, tokType) } @@ -2407,15 +2587,17 @@ func (p *Parser) illegalToken() { p.illegal("") } +var noScanOptions []scanner.ScanOption + func (p *Parser) scan() { - p.doScan(true) + p.doScan(true, noScanOptions...) } func (p *Parser) scanWS() { - p.doScan(false) + p.doScan(false, noScanOptions...) } -func (p *Parser) doScan(skipws bool) { +func (p *Parser) doScan(skipws bool, scanOpts ...scanner.ScanOption) { // NOTE(tsandall): the last position is used to compute the "text" field for // complex AST nodes. Whitespace never affects the last position of an AST @@ -2428,7 +2610,7 @@ func (p *Parser) doScan(skipws bool) { var errs []scanner.Error for { var pos scanner.Position - p.s.tok, pos, p.s.lit, errs = p.s.s.Scan() + p.s.tok, pos, p.s.lit, errs = p.s.s.Scan(scanOpts...) p.s.tokEnd = pos.End p.s.loc.Row = pos.Row @@ -2483,12 +2665,10 @@ func (p *Parser) restore(s *state) { } func setLocRecursive(x any, loc *location.Location) { - NewGenericVisitor(func(x any) bool { - if node, ok := x.(Node); ok { - node.SetLoc(loc) - } + WalkNodes(x, func(n Node) bool { + n.SetLoc(loc) return false - }).Walk(x) + }) } func (p *Parser) setLoc(term *Term, loc *location.Location, offset, end int) *Term { @@ -2566,17 +2746,22 @@ type rawAnnotation struct { RelatedResources []any `yaml:"related_resources"` Authors []any `yaml:"authors"` Schemas []map[string]any `yaml:"schemas"` + Compile map[string]any `yaml:"compile"` Custom map[string]any `yaml:"custom"` } type metadataParser struct { - buf *bytes.Buffer comments []*Comment + buf *bytes.Buffer loc *location.Location } -func newMetadataParser(loc *Location) *metadataParser { - return &metadataParser{loc: loc, buf: bytes.NewBuffer(nil)} +func (b *metadataParser) Reset(loc *location.Location) { + b.comments = b.comments[:0] + b.loc = loc + if b.buf != nil { + b.buf.Reset() + } } func (b *metadataParser) Append(c *Comment) { @@ -2587,14 +2772,12 @@ func (b *metadataParser) Append(c *Comment) { var yamlLineErrRegex = regexp.MustCompile(`^yaml:(?: unmarshal errors:[\n\s]*)? line ([[:digit:]]+):`) -func (b *metadataParser) Parse() (*Annotations, error) { - - var raw rawAnnotation - +func (b *metadataParser) Parse() (result *Annotations, err error) { if len(bytes.TrimSpace(b.buf.Bytes())) == 0 { return nil, errors.New("expected METADATA block, found whitespace") } + var raw rawAnnotation if err := yaml.Unmarshal(b.buf.Bytes(), &raw); err != nil { var comment *Comment match := yamlLineErrRegex.FindStringSubmatch(err.Error()) @@ -2617,13 +2800,14 @@ func (b *metadataParser) Parse() (*Annotations, error) { return nil, augmentYamlError(err, b.comments) } - var result Annotations - result.comments = b.comments - result.Scope = raw.Scope - result.Entrypoint = raw.Entrypoint - result.Title = raw.Title - result.Description = raw.Description - result.Organizations = raw.Organizations + result = &Annotations{ + comments: b.comments, + Scope: raw.Scope, + Entrypoint: raw.Entrypoint, + Title: raw.Title, + Description: raw.Description, + Organizations: raw.Organizations, + } for _, v := range raw.RelatedResources { rr, err := parseRelatedResource(v) @@ -2633,6 +2817,40 @@ func (b *metadataParser) Parse() (*Annotations, error) { result.RelatedResources = append(result.RelatedResources, rr) } + if raw.Compile != nil { + result.Compile = &CompileAnnotation{} + if unknowns, ok := raw.Compile["unknowns"]; ok { + if unknowns, ok := unknowns.([]any); ok { + result.Compile.Unknowns = make([]Ref, len(unknowns)) + for i := range unknowns { + if unknown, ok := unknowns[i].(string); ok { + ref, err := ParseRef(unknown) + if err != nil { + return nil, fmt.Errorf("invalid unknowns element %q: %w", unknown, err) + } + result.Compile.Unknowns[i] = ref + } + } + } + } + if mask, ok := raw.Compile["mask_rule"]; ok { + if mask, ok := mask.(string); ok { + maskTerm, err := ParseTerm(mask) + if err != nil { + return nil, fmt.Errorf("invalid mask_rule annotation %q: %w", mask, err) + } + switch v := maskTerm.Value.(type) { + case Var, String: + result.Compile.MaskRule = Ref{maskTerm} + case Ref: + result.Compile.MaskRule = v + default: + return nil, fmt.Errorf("invalid mask_rule annotation type %q: %[1]T", mask) + } + } + } + } + for _, pair := range raw.Schemas { k, v := unwrapPair(pair) @@ -2671,32 +2889,30 @@ func (b *metadataParser) Parse() (*Annotations, error) { result.Authors = append(result.Authors, author) } - result.Custom = make(map[string]any) - for k, v := range raw.Custom { - val, err := convertYAMLMapKeyTypes(v, nil) - if err != nil { - return nil, err + if raw.Custom != nil { + result.Custom = make(map[string]any, len(raw.Custom)) + for k, v := range raw.Custom { + if result.Custom[k], err = convertYAMLMapKeyTypes(v, nil); err != nil { + return nil, err + } } - result.Custom[k] = val } result.Location = b.loc // recreate original text of entire metadata block for location text attribute - sb := strings.Builder{} - sb.WriteString("# METADATA\n") + original := bytes.TrimSuffix(b.buf.Bytes(), newlineBytes) + numLines := bytes.Count(original, newlineBytes) + 1 + preAlloc := len("# METADATA\n") + len(original) + numLines*2 // '# ' prefix added per line - lines := bytes.Split(b.buf.Bytes(), []byte{'\n'}) + result.Location.Text = append(make([]byte, 0, preAlloc), "# METADATA\n"...) - for _, line := range lines[:len(lines)-1] { - sb.WriteString("# ") - sb.Write(line) - sb.WriteByte('\n') + for line := range bytes.SplitAfterSeq(original, newlineBytes) { + result.Location.Text = append(result.Location.Text, "# "...) + result.Location.Text = append(result.Location.Text, line...) } - result.Location.Text = []byte(strings.TrimSuffix(sb.String(), "\n")) - - return &result, nil + return result, err } // augmentYamlError augments a YAML error with hints intended to help the user figure out the cause of an otherwise @@ -2705,30 +2921,29 @@ func (b *metadataParser) Parse() (*Annotations, error) { func augmentYamlError(err error, comments []*Comment) error { // Adding hints for when key/value ':' separator isn't suffixed with a legal YAML space symbol for _, comment := range comments { - txt := string(comment.Text) - parts := strings.Split(txt, ":") - if len(parts) > 1 { - parts = parts[1:] - var invalidSpaces []string - for partIndex, part := range parts { - if len(part) == 0 && partIndex == len(parts)-1 { - invalidSpaces = []string{} - break - } - - r, _ := utf8.DecodeRuneInString(part) - if r == ' ' || r == '\t' { - invalidSpaces = []string{} - break - } + if bytes.IndexByte(comment.Text, ':') == -1 { + continue + } + parts := bytes.Split(comment.Text, []byte{':'})[1:] - invalidSpaces = append(invalidSpaces, fmt.Sprintf("%+q", r)) + var invalidSpaces []string + for partIndex, part := range parts { + if len(part) == 0 && partIndex == len(parts)-1 { + break } - if len(invalidSpaces) > 0 { - err = fmt.Errorf( - "%s\n Hint: on line %d, symbol(s) %v immediately following a key/value separator ':' is not a legal yaml space character", - err.Error(), comment.Location.Row, invalidSpaces) + + r, _ := utf8.DecodeRune(part) + if r == ' ' || r == '\t' { + break } + + invalidSpaces = append(invalidSpaces, fmt.Sprintf("%+q", r)) + } + if len(invalidSpaces) > 0 { + err = fmt.Errorf( + "%s\n Hint: on line %d, symbol(s) %v immediately following a"+ + " key/value separator ':' is not a legal yaml space character", + err.Error(), comment.Location.Row, invalidSpaces) } } return err @@ -2846,7 +3061,7 @@ func parseAuthorString(s string) (*AuthorAnnotation, error) { if len(trailing) >= len(emailPrefix)+len(emailSuffix) && strings.HasPrefix(trailing, emailPrefix) && strings.HasSuffix(trailing, emailSuffix) { email = trailing[len(emailPrefix):] - email = email[0 : len(email)-len(emailSuffix)] + email = email[:len(email)-len(emailSuffix)] namePartCount -= 1 } @@ -2916,6 +3131,11 @@ func IsFutureKeywordForRegoVersion(s string, v RegoVersion) bool { return yes } +// isFutureKeyword answers if keyword is from the "future" with the parser options set. +func (p *Parser) isFutureKeyword(s string) bool { + return IsFutureKeywordForRegoVersion(s, p.po.RegoVersion) +} + func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]tokens.Token) { path := imp.Path.Value.(Ref) @@ -2929,10 +3149,7 @@ func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]toke return } - kwds := make([]string, 0, len(allowedFutureKeywords)) - for k := range allowedFutureKeywords { - kwds = append(kwds, k) - } + kwds := util.Keys(allowedFutureKeywords) switch len(path) { case 2: // all keywords imported, nothing to do @@ -2982,10 +3199,7 @@ func (p *Parser) regoV1Import(imp *Import) { } // import all future keywords with the rego.v1 import - kwds := make([]string, 0, len(futureKeywordsV0)) - for k := range futureKeywordsV0 { - kwds = append(kwds, k) - } + kwds := util.Keys(futureKeywordsV0) p.s.s.SetRegoV1Compatible() for _, kw := range kwds { diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go b/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go index f3d4e0d188..ab3de33a1f 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go @@ -11,7 +11,6 @@ package ast import ( - "bytes" "errors" "fmt" "slices" @@ -625,10 +624,9 @@ func ParseStatements(filename, input string) ([]Statement, []*Comment, error) { // ParseStatementsWithOpts returns a slice of parsed statements. This is the // default return value from the parser. func ParseStatementsWithOpts(filename, input string, popts ParserOptions) ([]Statement, []*Comment, error) { - parser := NewParser(). WithFilename(filename). - WithReader(bytes.NewBufferString(input)). + WithReader(strings.NewReader(input)). WithProcessAnnotation(popts.ProcessAnnotation). WithFutureKeywords(popts.FutureKeywords...). WithAllFutureKeywords(popts.AllFutureKeywords). @@ -638,7 +636,6 @@ func ParseStatementsWithOpts(filename, input string, popts ParserOptions) ([]Sta withUnreleasedKeywords(popts.unreleasedKeywords) stmts, comments, errs := parser.Parse() - if len(errs) > 0 { return nil, nil, errs } @@ -647,7 +644,6 @@ func ParseStatementsWithOpts(filename, input string, popts ParserOptions) ([]Sta } func parseModule(filename string, stmts []Statement, comments []*Comment, regoCompatibilityMode RegoVersion) (*Module, error) { - if len(stmts) == 0 { return nil, NewError(ParseErr, &Location{File: filename}, "empty module") } @@ -662,23 +658,21 @@ func parseModule(filename string, stmts []Statement, comments []*Comment, regoCo mod := &Module{ Package: pkg, - stmts: stmts, + // The comments slice only holds comments that were not their own statements. + Comments: comments, + stmts: stmts, } - // The comments slice only holds comments that were not their own statements. - mod.Comments = append(mod.Comments, comments...) - + mod.regoVersion = regoCompatibilityMode if regoCompatibilityMode == RegoUndefined { mod.regoVersion = DefaultRegoVersion - } else { - mod.regoVersion = regoCompatibilityMode } for i, stmt := range stmts[1:] { switch stmt := stmt.(type) { case *Import: mod.Imports = append(mod.Imports, stmt) - if mod.regoVersion == RegoV0 && Compare(stmt.Path.Value, RegoV1CompatibleRef) == 0 { + if mod.regoVersion == RegoV0 && RegoV1CompatibleRef.Equal(stmt.Path.Value) { mod.regoVersion = RegoV0CompatV1 } case *Rule: diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/performance.go b/vendor/github.com/open-policy-agent/opa/v1/ast/performance.go new file mode 100644 index 0000000000..564ee255d1 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/performance.go @@ -0,0 +1,99 @@ +// Copyright 2025 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. +package ast + +import ( + "encoding" + "strings" + "sync" +) + +var builtinNamesByNumParts = sync.OnceValue(func() map[int][]string { + m := map[int][]string{} + for name := range BuiltinMap { + parts := strings.Count(name, ".") + 1 + if parts > 1 { + m[parts] = append(m[parts], name) + } + } + return m +}) + +// BuiltinNameFromRef attempts to extract a known built-in function name from a ref, +// in the most efficient way possible. I.e. without allocating memory for a new string. +// If no built-in function name can be extracted, the second return value is false. +func BuiltinNameFromRef(ref Ref) (string, bool) { + reflen := len(ref) + if reflen == 0 { + return "", false + } + + _var, ok := ref[0].Value.(Var) + if !ok { + return "", false + } + + varName := string(_var) + if reflen == 1 { + if _, ok := BuiltinMap[varName]; ok { + return varName, true + } + return "", false + } + + totalLen := len(varName) + for _, term := range ref[1:] { + if _, ok = term.Value.(String); !ok { + return "", false + } + totalLen += 1 + len(term.Value.(String)) // account for dot + } + + matched, ok := builtinNamesByNumParts()[reflen] + if !ok { + return "", false + } + + for _, name := range matched { + // This check saves us a huge amount of work, as only very few built-in + // names will have the exact same length as the ref we are checking. + if len(name) != totalLen { + continue + } + // Example: `name` is "io.jwt.decode" (and so is ref) + // The first part is varName, which have already been established to be 'io': + // io, jwt.decode io == io + if curr, remaining, _ := strings.Cut(name, "."); curr == varName { + // Loop over the remaining (now known to be string) terms in the ref, e.g. "jwt" and "decode" + for _, term := range ref[1:] { + ts := string(term.Value.(String)) + // First iteration: jwt.decode != jwt, so we continue cutting + // Second iteration: remaining is "decode", and so is term + if remaining == ts { + return name, true + } + // Cutting remaining (e.g. jwt.decode), and we now get: + // jwt, decode, false || jwt != jwt + if curr, remaining, _ = strings.Cut(remaining, "."); remaining == "" || curr != ts { + break + } + } + } + } + + return "", false +} + +func AppendDelimeted[T encoding.TextAppender](buf []byte, appenders []T, delim string) ([]byte, error) { + for i, item := range appenders { + if i > 0 { + buf = append(buf, delim...) + } + var err error + if buf, err = item.AppendText(buf); err != nil { + return nil, err + } + } + return buf, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go b/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go index 62c82f51ec..632b5aa6d5 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go @@ -86,7 +86,11 @@ var ReservedVars = NewVarSet( ) // Wildcard represents the wildcard variable as defined in the language. -var Wildcard = &Term{Value: Var("_")} +var ( + WildcardString = "_" + WildcardValue Value = Var(WildcardString) + Wildcard = &Term{Value: WildcardValue} +) // WildcardPrefix is the special character that all wildcard variables are // prefixed with when the statement they are contained in is parsed. @@ -367,40 +371,8 @@ func (mod *Module) Equal(other *Module) bool { } func (mod *Module) String() string { - byNode := map[Node][]*Annotations{} - for _, a := range mod.Annotations { - byNode[a.node] = append(byNode[a.node], a) - } - - appendAnnotationStrings := func(buf []string, node Node) []string { - if as, ok := byNode[node]; ok { - for i := range as { - buf = append(buf, "# METADATA") - buf = append(buf, "# "+as[i].String()) - } - } - return buf - } - - buf := []string{} - buf = appendAnnotationStrings(buf, mod.Package) - buf = append(buf, mod.Package.String()) - - if len(mod.Imports) > 0 { - buf = append(buf, "") - for _, imp := range mod.Imports { - buf = appendAnnotationStrings(buf, imp) - buf = append(buf, imp.String()) - } - } - if len(mod.Rules) > 0 { - buf = append(buf, "") - for _, rule := range mod.Rules { - buf = appendAnnotationStrings(buf, rule) - buf = append(buf, rule.stringWithOpts(toStringOpts{regoVersion: mod.regoVersion})) - } - } - return strings.Join(buf, "\n") + buf, _ := mod.AppendText(make([]byte, 0, mod.StringLength())) + return util.ByteSliceToString(buf) } // RuleSet returns a RuleSet containing named rules in the mod. @@ -469,7 +441,8 @@ func (c *Comment) SetLoc(loc *Location) { } func (c *Comment) String() string { - return "#" + string(c.Text) + buf, _ := c.AppendText(make([]byte, 0, c.StringLength())) + return util.ByteSliceToString(buf) } // Copy returns a deep copy of c. @@ -519,16 +492,8 @@ func (pkg *Package) SetLoc(loc *Location) { } func (pkg *Package) String() string { - if pkg == nil { - return "" - } else if len(pkg.Path) <= 1 { - return fmt.Sprintf("package ", pkg.Path) - } - // Omit head as all packages have the DefaultRootDocument prepended at parse time. - path := make(Ref, len(pkg.Path)-1) - path[0] = VarTerm(string(pkg.Path[1].Value.(String))) - copy(path[1:], pkg.Path[2:]) - return fmt.Sprintf("package %v", path) + buf, _ := pkg.AppendText(make([]byte, 0, pkg.StringLength())) + return util.ByteSliceToString(buf) } func (pkg *Package) MarshalJSON() ([]byte, error) { @@ -615,7 +580,7 @@ func (imp *Import) SetLoc(loc *Location) { // document. This is the alias if defined otherwise the last element in the // path. func (imp *Import) Name() Var { - if len(imp.Alias) != 0 { + if imp.Alias != "" { return imp.Alias } switch v := imp.Path.Value.(type) { @@ -631,11 +596,8 @@ func (imp *Import) Name() Var { } func (imp *Import) String() string { - buf := []string{"import", imp.Path.String()} - if len(imp.Alias) > 0 { - buf = append(buf, "as", imp.Alias.String()) - } - return strings.Join(buf, " ") + buf, _ := imp.AppendText(make([]byte, 0, imp.StringLength())) + return util.ByteSliceToString(buf) } func (imp *Import) MarshalJSON() ([]byte, error) { @@ -726,6 +688,7 @@ func (rule *Rule) SetLoc(loc *Location) { // Path returns a ref referring to the document produced by this rule. If rule // is not contained in a module, this function panics. +// // Deprecated: Poor handling of ref rules. Use `(*Rule).Ref()` instead. func (rule *Rule) Path() Ref { if rule.Module == nil { @@ -745,11 +708,12 @@ func (rule *Rule) Ref() Ref { } func (rule *Rule) String() string { - regoVersion := DefaultRegoVersion + opts := toStringOpts{} if rule.Module != nil { - regoVersion = rule.Module.RegoVersion() + opts.regoVersion = rule.Module.RegoVersion() } - return rule.stringWithOpts(toStringOpts{regoVersion: regoVersion}) + buf, _ := rule.appendWithOpts(opts, make([]byte, 0, rule.stringLengthWithOpts(opts))) + return util.ByteSliceToString(buf) } type toStringOpts struct { @@ -763,80 +727,46 @@ func (o toStringOpts) RegoVersion() RegoVersion { return o.regoVersion } -func (rule *Rule) stringWithOpts(opts toStringOpts) string { - buf := []string{} - if rule.Default { - buf = append(buf, "default") - } - buf = append(buf, rule.Head.stringWithOpts(opts)) - if !rule.Default { - switch opts.RegoVersion() { - case RegoV1, RegoV0CompatV1: - buf = append(buf, "if") - } - buf = append(buf, "{", rule.Body.String(), "}") - } - if rule.Else != nil { - buf = append(buf, rule.Else.elseString(opts)) - } - return strings.Join(buf, " ") -} - func (rule *Rule) isFunction() bool { return len(rule.Head.Args) > 0 } +// ruleJSON is used for JSON serialization of Rule to avoid map allocation overhead. +// Field order is alphabetical to match previous map-based output. +type ruleJSON struct { + Annotations []*Annotations `json:"annotations,omitempty"` + Body Body `json:"body"` + Default bool `json:"default,omitempty"` + Else *Rule `json:"else,omitempty"` + Head *Head `json:"head"` + Location *Location `json:"location,omitempty"` +} + func (rule *Rule) MarshalJSON() ([]byte, error) { - data := map[string]any{ - "head": rule.Head, - "body": rule.Body, + data := ruleJSON{ + Head: rule.Head, + Body: rule.Body, } if rule.Default { - data["default"] = true + data.Default = true } if rule.Else != nil { - data["else"] = rule.Else + data.Else = rule.Else } if astJSON.GetOptions().MarshalOptions.IncludeLocation.Rule { - if rule.Location != nil { - data["location"] = rule.Location - } + data.Location = rule.Location } if len(rule.Annotations) != 0 { - data["annotations"] = rule.Annotations + data.Annotations = rule.Annotations } return json.Marshal(data) } -func (rule *Rule) elseString(opts toStringOpts) string { - var buf []string - - buf = append(buf, "else") - - value := rule.Head.Value - if value != nil { - buf = append(buf, "=", value.String()) - } - - switch opts.RegoVersion() { - case RegoV1, RegoV0CompatV1: - buf = append(buf, "if") - } - - buf = append(buf, "{", rule.Body.String(), "}") - - if rule.Else != nil { - buf = append(buf, rule.Else.elseString(opts)) - } - - return strings.Join(buf, " ") -} - // NewHead returns a new Head object. If args are provided, the first will be // used for the key and the second will be used for the value. func NewHead(name Var, args ...*Term) *Head { @@ -981,6 +911,7 @@ func (head *Head) Copy() *Head { cpy.Key = head.Key.Copy() cpy.Value = head.Value.Copy() cpy.keywords = nil + cpy.Assign = head.Assign return &cpy } @@ -994,37 +925,8 @@ func (head *Head) String() string { } func (head *Head) stringWithOpts(opts toStringOpts) string { - buf := strings.Builder{} - buf.WriteString(head.Ref().String()) - containsAdded := false - - switch { - case len(head.Args) != 0: - buf.WriteString(head.Args.String()) - case len(head.Reference) == 1 && head.Key != nil: - switch opts.RegoVersion() { - case RegoV0: - buf.WriteRune('[') - buf.WriteString(head.Key.String()) - buf.WriteRune(']') - default: - containsAdded = true - buf.WriteString(" contains ") - buf.WriteString(head.Key.String()) - } - } - if head.Value != nil { - if head.Assign { - buf.WriteString(" := ") - } else { - buf.WriteString(" = ") - } - buf.WriteString(head.Value.String()) - } else if !containsAdded && head.Name == "" && head.Key != nil { - buf.WriteString(" contains ") - buf.WriteString(head.Key.String()) - } - return buf.String() + buf, _ := head.appendWithOpts(opts, make([]byte, 0, head.stringLengthWithOpts(opts))) + return util.ByteSliceToString(buf) } func (head *Head) MarshalJSON() ([]byte, error) { @@ -1087,7 +989,7 @@ func (head *Head) HasDynamicRef() bool { // Copy returns a deep copy of a. func (a Args) Copy() Args { - cpy := Args{} + cpy := make(Args, 0, len(a)) for _, t := range a { cpy = append(cpy, t.Copy()) } @@ -1095,11 +997,8 @@ func (a Args) Copy() Args { } func (a Args) String() string { - buf := make([]string, 0, len(a)) - for _, t := range a { - buf = append(buf, t.String()) - } - return "(" + strings.Join(buf, ", ") + ")" + buf, _ := a.AppendText(make([]byte, 0, a.StringLength())) + return util.ByteSliceToString(buf) } // Loc returns the Location of a. @@ -1232,11 +1131,12 @@ func (body Body) SetLoc(loc *Location) { } func (body Body) String() string { - buf := make([]string, 0, len(body)) - for _, v := range body { - buf = append(buf, v.String()) - } - return strings.Join(buf, "; ") + buf, _ := body.AppendText(make([]byte, 0, body.StringLength())) + return util.ByteSliceToString(buf) +} + +func (body Body) AppendText(buf []byte) ([]byte, error) { + return AppendDelimeted(buf, body, "; ") } // Vars returns a VarSet containing variables in body. The params can be set to @@ -1547,50 +1447,41 @@ func (expr *Expr) SetLoc(loc *Location) { } func (expr *Expr) String() string { - buf := make([]string, 0, 2+len(expr.With)) - if expr.Negated { - buf = append(buf, "not") - } - switch t := expr.Terms.(type) { - case []*Term: - if expr.IsEquality() && validEqAssignArgCount(expr) { - buf = append(buf, fmt.Sprintf("%v %v %v", t[1], Equality.Infix, t[2])) - } else { - buf = append(buf, Call(t).String()) - } - case fmt.Stringer: - buf = append(buf, t.String()) - } - - for i := range expr.With { - buf = append(buf, expr.With[i].String()) - } + buf, _ := expr.AppendText(make([]byte, 0, expr.StringLength())) + return util.ByteSliceToString(buf) +} - return strings.Join(buf, " ") +// exprJSON is used for JSON serialization of Expr to avoid map allocation overhead. +// Field order is alphabetical to match previous map-based output. +type exprJSON struct { + Generated bool `json:"generated,omitempty"` + Index int `json:"index"` + Location *Location `json:"location,omitempty"` + Negated bool `json:"negated,omitempty"` + Terms any `json:"terms"` + With []*With `json:"with,omitempty"` } func (expr *Expr) MarshalJSON() ([]byte, error) { - data := map[string]any{ - "terms": expr.Terms, - "index": expr.Index, + data := exprJSON{ + Index: expr.Index, + Terms: expr.Terms, } if len(expr.With) > 0 { - data["with"] = expr.With + data.With = expr.With } if expr.Generated { - data["generated"] = true + data.Generated = true } if expr.Negated { - data["negated"] = true + data.Negated = true } if astJSON.GetOptions().MarshalOptions.IncludeLocation.Expr { - if expr.Location != nil { - data["location"] = expr.Location - } + data.Location = expr.Location } return json.Marshal(data) @@ -1660,17 +1551,8 @@ func visitCogeneratedExprs(expr *Expr, f func(*Expr) bool) { } func (d *SomeDecl) String() string { - if call, ok := d.Symbols[0].Value.(Call); ok { - if len(call) == 4 { - return "some " + call[1].String() + ", " + call[2].String() + " in " + call[3].String() - } - return "some " + call[1].String() + " in " + call[2].String() - } - buf := make([]string, len(d.Symbols)) - for i := range buf { - buf[i] = d.Symbols[i].String() - } - return "some " + strings.Join(buf, ", ") + buf, _ := d.AppendText(make([]byte, 0, d.StringLength())) + return util.ByteSliceToString(buf) } // SetLoc sets the Location on d. @@ -1789,7 +1671,8 @@ func (q *Every) MarshalJSON() ([]byte, error) { } func (w *With) String() string { - return "with " + w.Target.String() + " as " + w.Value.String() + buf, _ := w.AppendText(make([]byte, 0, w.StringLength())) + return util.ByteSliceToString(buf) } // Equal returns true if this With is equals the other With. @@ -1846,16 +1729,22 @@ func (w *With) SetLoc(loc *Location) { w.Location = loc } +// withJSON is used for JSON serialization of With to avoid map allocation overhead. +// Field order is alphabetical to match previous map-based output. +type withJSON struct { + Location *Location `json:"location,omitempty"` + Target *Term `json:"target"` + Value *Term `json:"value"` +} + func (w *With) MarshalJSON() ([]byte, error) { - data := map[string]any{ - "target": w.Target, - "value": w.Value, + data := withJSON{ + Target: w.Target, + Value: w.Value, } if astJSON.GetOptions().MarshalOptions.IncludeLocation.With { - if w.Location != nil { - data["location"] = w.Location - } + data.Location = w.Location } return json.Marshal(data) diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/policy_appenders.go b/vendor/github.com/open-policy-agent/opa/v1/ast/policy_appenders.go new file mode 100644 index 0000000000..1c9813aa97 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/policy_appenders.go @@ -0,0 +1,342 @@ +package ast + +import ( + "encoding" + "fmt" + + "github.com/open-policy-agent/opa/v1/util" +) + +func (m *Module) AppendText(buf []byte) ([]byte, error) { + if m == nil { + return append(buf, ""...), nil + } + + var err error + + // NOTE(anderseknert): this DOES allocate still, and while that's unfortunate, + // we'll be better off dealing with that when we have v2 JSON in the stdlib than + // doing manual JSON marshalling (and string length calculations) here. + for _, annotations := range m.Annotations { + // rule annotations are attached to rules, so only check for package scoped ones here + if annotations.Scope == "package" || annotations.Scope == "subpackages" { + buf = append(buf, "# METADATA\n# "...) + buf = append(buf, annotations.String()...) + buf = append(buf, '\n') + } + } + + if buf, err = m.Package.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, '\n') + + if len(m.Imports) > 0 { + for _, imp := range m.Imports { + buf = append(buf, '\n') + if buf, err = imp.AppendText(buf); err != nil { + return nil, err + } + } + buf = append(buf, '\n') + } + + if len(m.Rules) > 0 { + for _, rule := range m.Rules { + buf = append(buf, '\n') + if buf, err = rule.appendWithOpts(toStringOpts{regoVersion: m.regoVersion}, buf); err != nil { + return nil, err + } + } + } + + return buf, nil +} + +func (pkg *Package) AppendText(buf []byte) ([]byte, error) { + var err error + if pkg == nil { + return append(buf, ""...), nil + } + if len(pkg.Path) <= 1 { + buf = append(buf, "package "...), nil + } + + buf = append(buf, "package "...) + + path := pkg.Path[1:] // omit "data" + + return path.AppendText(buf) +} + +func (imp *Import) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, "import "...) + var err error + if buf, err = imp.Path.AppendText(buf); err != nil { + return nil, err + } + if imp.Alias != "" { + buf = append(buf, ' ', 'a', 's', ' ') + buf = append(buf, imp.Alias...) + } + return buf, nil +} + +func (r *Rule) AppendText(buf []byte) ([]byte, error) { + regoVersion := DefaultRegoVersion + if r.Module != nil { + regoVersion = r.Module.RegoVersion() + } + return r.appendWithOpts(toStringOpts{regoVersion: regoVersion}, buf) +} + +func (r *Rule) appendWithOpts(opts toStringOpts, buf []byte) ([]byte, error) { + // See note in [Module.AppendText] regarding annotations. + for _, annotations := range r.Annotations { + buf = append(buf, "# METADATA\n# "...) + buf = append(buf, annotations.String()...) + buf = append(buf, '\n') + } + + if r.Default { + buf = append(buf, "default "...) + } + + var err error + if buf, err = r.Head.appendWithOpts(opts, buf); err != nil { + return nil, err + } + + if !r.Default { + switch opts.RegoVersion() { + case RegoV1, RegoV0CompatV1: + buf = append(buf, " if { "...) + default: + buf = append(buf, " { "...) + } + if buf, err = r.Body.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, " }"...) + } + if r.Else != nil { + if buf, err = r.Else.appendElse(opts, buf); err != nil { + return nil, err + } + } + + return buf, nil +} + +func (r *Rule) appendElse(opts toStringOpts, buf []byte) ([]byte, error) { + buf = append(buf, " else "...) + + var err error + if r.Head.Value != nil { + buf = append(buf, "= "...) + if buf, err = r.Head.Value.AppendText(buf); err != nil { + return nil, err + } + } + + if v := opts.RegoVersion(); v == RegoV1 || v == RegoV0CompatV1 { + buf = append(buf, " if { "...) + } else { + buf = append(buf, " { "...) + } + if buf, err = r.Body.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, " }"...) + + if r.Else != nil { + if buf, err = r.Else.appendElse(opts, buf); err != nil { + return nil, err + } + } + + return buf, nil +} + +func (h *Head) AppendText(buf []byte) ([]byte, error) { + return h.appendWithOpts(toStringOpts{}, buf) +} + +func (h *Head) appendWithOpts(opts toStringOpts, buf []byte) ([]byte, error) { + var err error + if h.Reference == nil { + buf = append(buf, h.Name...) + } else { + if buf, err = h.Reference.AppendText(buf); err != nil { + return nil, err + } + } + + containsAdded := false + switch { + case len(h.Args) != 0: + if buf, err = h.Args.AppendText(buf); err != nil { + return nil, err + } + case len(h.Reference) == 1 && h.Key != nil: + switch opts.RegoVersion() { + case RegoV0: + buf = append(buf, '[') + if buf, err = h.Key.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, ']') + default: + if buf, err = h.Key.AppendText(append(buf, " contains "...)); err != nil { + return nil, err + } + containsAdded = true + } + } + if h.Value != nil { + if h.Assign { + buf = append(buf, " := "...) + } else { + buf = append(buf, " = "...) + } + if buf, err = h.Value.AppendText(buf); err != nil { + return nil, err + } + } else if !containsAdded && h.Name == "" && h.Key != nil { + if buf, err = h.Key.AppendText(append(buf, " contains "...)); err != nil { + return nil, err + } + } + return buf, nil +} + +func (a Args) AppendText(buf []byte) ([]byte, error) { + var err error + buf = append(buf, '(') + if buf, err = AppendDelimeted(buf, a, ", "); err != nil { + return nil, err + } + return append(buf, ')'), nil +} + +func (expr *Expr) AppendText(buf []byte) ([]byte, error) { + if expr.Negated { + buf = append(buf, "not "...) + } + + var err error + + switch t := expr.Terms.(type) { + case []*Term: + if expr.IsEquality() && validEqAssignArgCount(expr) { + if buf, err = t[1].AppendText(buf); err != nil { + return nil, err + } + buf = append(append(append(buf, ' '), Equality.Infix...), ' ') + if buf, err = t[2].AppendText(buf); err != nil { + return nil, err + } + } else if buf, err = Call(t).AppendText(buf); err != nil { + return nil, err + } + case encoding.TextAppender: + if buf, err = t.AppendText(buf); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unsupported expr terms type: %T", expr.Terms) + } + + if len(expr.With) > 0 { + buf = append(buf, ' ') + } + + return AppendDelimeted(buf, expr.With, " ") +} + +func (w *With) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, "with "...) + var err error + if buf, err = w.Target.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, " as "...) + if buf, err = w.Value.AppendText(buf); err != nil { + return nil, err + } + return buf, nil +} + +func (w *Every) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, "every "...) + var err error + if w.Key != nil { + if buf, err = w.Key.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, ", "...) + } + if buf, err = w.Value.AppendText(buf); err == nil { + buf = append(buf, " in "...) + if buf, err = w.Domain.AppendText(buf); err == nil { + buf = append(buf, " { "...) + if buf, err = w.Body.AppendText(buf); err == nil { + buf = append(buf, " }"...) + } + } + } + return buf, err +} + +func (d *SomeDecl) AppendText(buf []byte) ([]byte, error) { + var err error + buf = append(buf, "some "...) + if call, ok := d.Symbols[0].Value.(Call); ok { + if buf, err = call[1].AppendText(buf); err != nil { + return nil, err + } + if len(call) == 3 { + buf = append(buf, " in "...) + } else { + buf = append(buf, ", "...) + } + if buf, err = call[2].AppendText(buf); err != nil { + return nil, err + } + if len(call) == 4 { + buf = append(buf, " in "...) + if buf, err = call[3].AppendText(buf); err != nil { + return nil, err + } + } + return buf, nil + } + + buf, err = AppendDelimeted(buf, d.Symbols, ", ") + + return buf, err +} + +func (c *Comment) AppendText(buf []byte) ([]byte, error) { + return append(append(buf, '#'), c.Text...), nil +} + +// RulePath returns the string representation of the rule's path, i.e. its package path followed by the rule head ref. +func RulePath(r *Rule) string { + if r == nil { + return "" + } + if r.Module == nil { + return "" + } + buf := make([]byte, 0, r.Module.Package.Path.StringLength()+r.Head.Ref().StringLength()+1) + buf, _ = r.Module.Package.Path.AppendText(buf) + buf = append(buf, '.') + buf, _ = r.Head.Ref().AppendText(buf) + + return util.ByteSliceToString(buf) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go b/vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go index a702d9294c..db9e0f722c 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go @@ -27,13 +27,12 @@ func checkRootDocumentOverrides(node any) Errors { errors := Errors{} WalkRules(node, func(rule *Rule) bool { - var name string + name := rule.Head.Name if len(rule.Head.Reference) > 0 { - name = rule.Head.Reference[0].Value.(Var).String() - } else { - name = rule.Head.Name.String() + name = rule.Head.Reference[0].Value.(Var) } - if RootDocumentRefs.Contains(RefTerm(VarTerm(name))) { + + if ReservedVars.Contains(name) { errors = append(errors, NewError(CompileErr, rule.Location, "rules must not shadow %v (use a different rule name)", name)) } @@ -52,8 +51,8 @@ func checkRootDocumentOverrides(node any) Errors { if expr.IsAssignment() { // assign() can be called directly, so we need to assert its given first operand exists before checking its name. if nameOp := expr.Operand(0); nameOp != nil { - name := nameOp.String() - if RootDocumentRefs.Contains(RefTerm(VarTerm(name))) { + name := Var(nameOp.String()) + if ReservedVars.Contains(name) { errors = append(errors, NewError(CompileErr, expr.Location, "variables must not shadow %v (use a different variable name)", name)) } } @@ -65,26 +64,24 @@ func checkRootDocumentOverrides(node any) Errors { } func walkCalls(node any, f func(any) bool) { - vis := &GenericVisitor{func(x any) bool { - switch x := x.(type) { + vis := NewGenericVisitor(func(x any) bool { + switch y := x.(type) { case Call: return f(x) case *Expr: - if x.IsCall() { + if y.IsCall() { return f(x) } case *Head: // GenericVisitor doesn't walk the rule head ref - walkCalls(x.Reference, f) + walkCalls(y.Reference, f) } return false - }} + }) vis.Walk(node) } -func checkDeprecatedBuiltins(deprecatedBuiltinsMap map[string]struct{}, node any) Errors { - errs := make(Errors, 0) - +func checkDeprecatedBuiltins(deprecatedBuiltinsMap map[string]struct{}, node any) (errs Errors) { walkCalls(node, func(x any) bool { var operator string var loc *Location diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/slices.go b/vendor/github.com/open-policy-agent/opa/v1/ast/slices.go new file mode 100644 index 0000000000..5921ec0ca1 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/slices.go @@ -0,0 +1,15 @@ +// Copyright 2026 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +// CountFunc counts the number of items in a slice S that satisfy predicate function f. +func CountFunc[T any, S ~[]T](items S, f func(T) bool) (n int) { + for i := range items { + if f(items[i]) { + n++ + } + } + return n +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/string_length.go b/vendor/github.com/open-policy-agent/opa/v1/ast/string_length.go new file mode 100644 index 0000000000..fe53227d24 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/string_length.go @@ -0,0 +1,351 @@ +package ast + +import ( + "fmt" + "unicode/utf8" + + "github.com/open-policy-agent/opa/v1/util" +) + +// StringLengther is an interface for types that can report their string length without +// actually constructing the string. This is useful for pre-allocating buffers, like those +// used in AppendText, strings.Builder, bytes.Buffer, etc. +type StringLengther interface { + StringLength() int +} + +// TermSliceStringLength returns the total string length of the given terms, as reported +// by the [StringLengther.StringLength] method implementation of each term's [Value]. The +// delimLen value will be added between each term's length to account for a delimiter, or +// no delimiter if delimLen is 0. +// Implementation note: this function is optimized for inlining, and just meets the threshold +// for that. Don't change without making sure that's still the case. +func TermSliceStringLength(terms []*Term, delimLen int) (n int) { + for i := range terms { + n += terms[i].StringLength() + delimLen + } + return max(n-delimLen, 0) +} + +func (t *Term) StringLength() int { + if sl, ok := t.Value.(StringLengther); ok { + return sl.StringLength() + } + + panic("expected all ast.Value types to implement StringLenghter interface, got: " + ValueName(t.Value)) +} + +func (s String) StringLength() int { + n := 2 // surrounding quotes + bs := util.StringToByteSlice(s) + for i := 0; i < len(bs); { + r, size := utf8.DecodeRune(bs[i:]) + switch r { + case '\\', '"': + n += 2 // escaped backslash or quote + case '\b', '\f', '\n', '\r', '\t': + n += 2 // escaped control characters + default: + if r < 0x20 { + n += 6 // unicode escape for other control characters + } else { + n += size // normal rune + } + } + i += size + } + return n +} + +func (n Number) StringLength() int { + return len(n) +} + +func (b Boolean) StringLength() int { + if b { + return 4 + } + return 5 +} + +func (Null) StringLength() int { + return 4 +} + +func (s *set) StringLength() int { + if s.Len() == 0 { + return 5 // set() + } + // surrounding {} + ", " for every element - 1 + return TermSliceStringLength(s.Slice(), 2) + 2 +} + +func (a *Array) StringLength() int { + if a.Len() == 0 { + return 2 // [] + } + // surrounding brackets + ", " for every element - 1 + return TermSliceStringLength(a.elems, 2) + 2 +} + +func (o *object) StringLength() (n int) { + if o.Len() == 0 { + return 2 // {} + } + // ": " for every item + ", " for every item - 1 + o.Foreach(func(key, value *Term) { + n += key.StringLength() + 4 + value.StringLength() // ": " and ", " + }) + return n // surrounding {} but also minus last ", " +} + +func (l *lazyObj) StringLength() int { + return l.force().(*object).StringLength() +} + +func (ts *TemplateString) StringLength() (n int) { + for _, p := range ts.Parts { + switch x := p.(type) { + case *Expr: + n += 2 + x.StringLength() // for {} + case *Term: + if s, ok := x.Value.(String); ok { + n += len(s) + countUnescapedLeftCurly(string(s)) + } else { + n += x.StringLength() + } + default: + n += 9 // + } + } + return n + 3 // $"" or $`` +} + +func (c Call) StringLength() int { + return c[0].StringLength() + 2 + TermSliceStringLength(c[1:], 2) +} + +func (r Ref) StringLength() (n int) { + rlen := len(r) + if rlen == 0 { + return 0 + } + + if s, ok := r[0].Value.(String); ok { + n = len(s) // first term should never be quoted + } else { + n = r[0].StringLength() + } + + if rlen == 1 { + return n + } + + for _, p := range r[1:] { + switch v := p.Value.(type) { + case String: + str := string(v) + if IsVarCompatibleString(str) && !IsKeyword(str) { + n += 1 + len(str) // dot + name + } else { + n += 2 + p.StringLength() // brackets + } + default: + n += 2 + p.StringLength() // brackets + } + } + return n +} + +func (v Var) StringLength() int { + if v.IsWildcard() { + return 1 + } + return len(v) +} + +func (s *SetComprehension) StringLength() int { + return s.Term.StringLength() + s.Body.StringLength() + 5 // {} and " | " +} + +func (a *ArrayComprehension) StringLength() int { + return a.Term.StringLength() + a.Body.StringLength() + 5 // [] and " | " +} + +func (o *ObjectComprehension) StringLength() (n int) { + n += o.Key.StringLength() + n += o.Value.StringLength() + n += o.Body.StringLength() + return n + 7 // "{}"", " | ", and ": " +} + +func (m *Module) StringLength() (n int) { + if m.Package != nil { + n += m.Package.StringLength() + 2 // newlines + } + + if len(m.Imports) > 0 { + for _, imp := range m.Imports { + n += imp.StringLength() + 1 // newline + } + } + + if len(m.Rules) > 0 { + for _, rule := range m.Rules { + n += rule.stringLengthWithOpts(toStringOpts{regoVersion: m.regoVersion}) + 1 // newline + } + } + + return n +} + +func (p *Package) StringLength() int { + if p == nil { + return 21 // + } + if len(p.Path) <= 1 { + return 25 + p.Path.StringLength() // // package + } + + return 8 + p.Path[1:].StringLength() // "package ..." +} + +func (i *Import) StringLength() (n int) { + n = 7 + i.Path.StringLength() // "import " and path + if i.Alias != "" { + n += 4 + i.Alias.StringLength() // " as " and alias + } + return n +} + +func (r *Rule) StringLength() int { + return r.stringLengthWithOpts(toStringOpts{}) +} + +func (r *Rule) stringLengthWithOpts(opts toStringOpts) int { + n := 0 + if r.Default { + n += 8 // "default " + } + n += r.Head.stringLengthWithOpts(opts) + if !r.Default { + switch opts.RegoVersion() { + case RegoV1, RegoV0CompatV1: + n += 6 // " if { " + default: + n += 3 // " { " + } + n += r.Body.StringLength() + 2 // body and closing " }" + } + if r.Else != nil { + n += r.Else.stringLengthWithOpts(opts) + } + return n +} + +func (h *Head) StringLength() int { + return h.stringLengthWithOpts(toStringOpts{}) +} + +func (h *Head) stringLengthWithOpts(opts toStringOpts) int { + n := h.Reference.StringLength() + containsAdded := false + switch { + case len(h.Args) != 0: + n += h.Args.StringLength() + case len(h.Reference) == 1 && h.Key != nil: + switch opts.RegoVersion() { + case RegoV0: + n += 2 + h.Key.StringLength() // for [] + default: + n += 10 + h.Key.StringLength() // " contains " + containsAdded = true + } + } + if h.Value != nil { + if h.Assign { + n += 4 // " := " + } else { + n += 3 // " = " + } + n += h.Value.StringLength() + } else if !containsAdded && h.Name == "" && h.Key != nil { + n += 10 + h.Key.StringLength() // " contains " + } + return n +} + +func (a Args) StringLength() (n int) { + n = 2 // () + for _, t := range a { + n += t.StringLength() + 2 // ", " + } + return n - 2 // minus last ", " +} + +func (b Body) StringLength() (n int) { + for _, expr := range b { + n += expr.StringLength() + 2 // "; " + } + return max(n-2, 0) // minus last "; " (if `n` isn't 0) +} + +func (e *Expr) StringLength() (n int) { + if e.Negated { + n += 4 // "not " + } + switch terms := e.Terms.(type) { + case []*Term: + if e.IsEquality() && validEqAssignArgCount(e) { + n += terms[1].StringLength() + len(Equality.Infix) + terms[2].StringLength() + 2 // spaces around = + } else { + n += Call(terms).StringLength() + } + case StringLengther: + n += terms.StringLength() + default: + panic(fmt.Sprintf("string length estimation not implemented for type: %T", e.Terms)) + } + + for _, w := range e.With { + n += w.StringLength() + 1 // space before with + } + + return n +} + +func (w *With) StringLength() int { + return w.Target.StringLength() + w.Value.StringLength() + 9 // "with " and " as " +} + +func (e *Every) StringLength() int { + n := 6 // "every " + if e.Key != nil { + n += e.Key.StringLength() + 2 // ", " + } + n += e.Value.StringLength() + 4 // " in " + n += e.Domain.StringLength() + 3 // " { " + n += e.Body.StringLength() + 2 // " }" + return n +} + +func (s *SomeDecl) StringLength() int { + n := 5 // "some " + if call, ok := s.Symbols[0].Value.(Call); ok { + n += 4 // " in " + n += call[1].StringLength() + if len(call) == 4 { + n += 2 // ", " + } + n += call[2].StringLength() + if len(call) == 4 { + n += call[3].StringLength() + } + return n + } + return n + TermSliceStringLength(s.Symbols, 2) +} + +func (c *Comment) StringLength() int { + return 1 + len(c.Text) // '#' + text +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go b/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go index 8447522412..72ec03f8cc 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go @@ -48,6 +48,8 @@ func ValueName(x Value) string { return "objectcomprehension" case *SetComprehension: return "setcomprehension" + case *TemplateString: + return "templatestring" } return TypeName(x) diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go b/vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go index 82977c836b..500bb073cf 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go @@ -1,53 +1,31 @@ package ast import ( - "strings" + "bytes" "sync" -) -type termPtrPool struct { - pool sync.Pool -} - -type stringBuilderPool struct { - pool sync.Pool -} + "github.com/open-policy-agent/opa/v1/util" +) -type indexResultPool struct { - pool sync.Pool -} +var ( + TermPtrPool = util.NewSyncPool[Term]() + BytesReaderPool = util.NewSyncPool[bytes.Reader]() + IndexResultPool = util.NewSyncPool[IndexResult]() + + // Needs custom pool because of custom Put logic. + varVisitorPool = &vvPool{ + pool: sync.Pool{ + New: func() any { + return NewVarVisitor() + }, + }, + } +) type vvPool struct { pool sync.Pool } -func (p *termPtrPool) Get() *Term { - return p.pool.Get().(*Term) -} - -func (p *termPtrPool) Put(t *Term) { - p.pool.Put(t) -} - -func (p *stringBuilderPool) Get() *strings.Builder { - return p.pool.Get().(*strings.Builder) -} - -func (p *stringBuilderPool) Put(sb *strings.Builder) { - sb.Reset() - p.pool.Put(sb) -} - -func (p *indexResultPool) Get() *IndexResult { - return p.pool.Get().(*IndexResult) -} - -func (p *indexResultPool) Put(x *IndexResult) { - if x != nil { - p.pool.Put(x) - } -} - func (p *vvPool) Get() *VarVisitor { return p.pool.Get().(*VarVisitor) } @@ -58,35 +36,3 @@ func (p *vvPool) Put(vv *VarVisitor) { p.pool.Put(vv) } } - -var TermPtrPool = &termPtrPool{ - pool: sync.Pool{ - New: func() any { - return &Term{} - }, - }, -} - -var sbPool = &stringBuilderPool{ - pool: sync.Pool{ - New: func() any { - return &strings.Builder{} - }, - }, -} - -var varVisitorPool = &vvPool{ - pool: sync.Pool{ - New: func() any { - return NewVarVisitor() - }, - }, -} - -var IndexResultPool = &indexResultPool{ - pool: sync.Pool{ - New: func() any { - return &IndexResult{} - }, - }, -} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/term.go b/vendor/github.com/open-policy-agent/opa/v1/ast/term.go index 32d294f3ce..436b22eb2a 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/term.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/term.go @@ -2,7 +2,6 @@ // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. -// nolint: deadcode // Public API. package ast import ( @@ -12,9 +11,7 @@ import ( "fmt" "io" "math" - "math/big" "net/url" - "regexp" "slices" "strconv" "strings" @@ -27,7 +24,22 @@ import ( "github.com/open-policy-agent/opa/v1/util" ) -var errFindNotFound = errors.New("find: not found") +// maxBindingsEstimate is the cap for binding count estimates in comprehensions. +// This value aligns with maxLinearScan in topdown/bindings.go. +const maxBindingsEstimate = 16 + +// EstimateBodyBindingCount returns an estimate of the number of bindings needed +// for evaluating a comprehension body. It uses the body length as a heuristic, +// capped at maxBindingsEstimate. +func EstimateBodyBindingCount(body Body) (estimate int) { + return min(len(body), maxBindingsEstimate) +} + +var ( + NullValue Value = Null{} + + errFindNotFound = errors.New("find: not found") +) // Location records a position in source code. type Location = location.Location @@ -45,12 +57,15 @@ func NewLocation(text []byte, file string, row int, col int) *Location { // - Variables, References // - Array, Set, and Object Comprehensions // - Calls +// - Template Strings type Value interface { Compare(other Value) int // Compare returns <0, 0, or >0 if this Value is less than, equal to, or greater than other, respectively. Find(path Ref) (Value, error) // Find returns value referred to by path or an error if path is not found. Hash() int // Returns hash code of the value. IsGround() bool // IsGround returns true if this value is not a variable or contains no variables. String() string // String returns a human readable string representation of the value. + + StringLengther // All Values must be able to report their string length during optimization. } // InterfaceToValue converts a native Go value x to a Value. @@ -61,20 +76,20 @@ func InterfaceToValue(x any) (Value, error) { case nil: return NullValue, nil case bool: - return InternedTerm(x).Value, nil + return InternedValue(x), nil case json.Number: if interned := InternedIntNumberTermFromString(string(x)); interned != nil { return interned.Value, nil } return Number(x), nil + case int: + return InternedValueOr(x, newIntNumberValue), nil case int64: - return int64Number(x), nil + return InternedValueOr(x, newInt64NumberValue), nil case uint64: - return uint64Number(x), nil + return InternedValueOr(x, newUint64NumberValue), nil case float64: return floatNumber(x), nil - case int: - return intNumber(x), nil case string: return String(x), nil case []any: @@ -353,6 +368,8 @@ func (term *Term) Copy() *Term { cpy.Value = v.Copy() case *SetComprehension: cpy.Value = v.Copy() + case *TemplateString: + cpy.Value = v.Copy() case Call: cpy.Value = v.Copy() } @@ -406,19 +423,24 @@ func (term *Term) IsGround() bool { return term.Value.IsGround() } +// termJSON is used to serialize Term to JSON without map allocation. +type termJSON struct { + Location *Location `json:"location,omitempty"` + Type string `json:"type"` + Value Value `json:"value"` +} + // MarshalJSON returns the JSON encoding of the term. // // Specialized marshalling logic is required to include a type hint for Value. func (term *Term) MarshalJSON() ([]byte, error) { - d := map[string]any{ - "type": ValueName(term.Value), - "value": term.Value, + d := termJSON{ + Type: ValueName(term.Value), + Value: term.Value, } jsonOptions := astJSON.GetOptions().MarshalOptions if jsonOptions.IncludeLocation.Term { - if term.Location != nil { - d["location"] = term.Location - } + d.Location = term.Location } return json.Marshal(d) } @@ -458,7 +480,17 @@ func (term *Term) Vars() VarSet { } // IsConstant returns true if the AST value is constant. +// Note that this is only a shallow check as we currently don't have a real +// notion of constant "vars" in the AST implementation. Meaning that while we could +// derive that a reference to a constant value is also constant, we currently don't. func IsConstant(v Value) bool { + switch v.(type) { + case Null, Boolean, Number, String: + return true + case Var, Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call: + return false + } + found := false vis := GenericVisitor{ func(x any) bool { @@ -533,8 +565,6 @@ func IsScalar(v Value) bool { // Null represents the null value defined by JSON. type Null struct{} -var NullValue Value = Null{} - // NullTerm creates a new Term with a Null value. func NullTerm() *Term { return &Term{Value: NullValue} @@ -586,10 +616,7 @@ type Boolean bool // BooleanTerm creates a new Term with a Boolean value. func BooleanTerm(b bool) *Term { - if b { - return &Term{Value: InternedTerm(true).Value} - } - return &Term{Value: InternedTerm(false).Value} + return &Term{Value: internedBooleanValue(b)} } // Equal returns true if the other Value is a Boolean and is equal. @@ -655,13 +682,14 @@ func NumberTerm(n json.Number) *Term { } // IntNumberTerm creates a new Term with an integer Number value. +// For values between -1 and 512, returns a cached Term to reduce allocations. func IntNumberTerm(i int) *Term { - return &Term{Value: Number(strconv.Itoa(i))} + return internedIntNumberTerm(i) } // UIntNumberTerm creates a new Term with an unsigned integer Number value. func UIntNumberTerm(u uint64) *Term { - return &Term{Value: uint64Number(u)} + return &Term{Value: newUint64NumberValue(u)} } // FloatNumberTerm creates a new Term with a floating point Number value. @@ -672,22 +700,10 @@ func FloatNumberTerm(f float64) *Term { // Equal returns true if the other Value is a Number and is equal. func (num Number) Equal(other Value) bool { - switch other := other.(type) { - case Number: - if num == other { - return true - } - if n1, ok1 := num.Int64(); ok1 { - n2, ok2 := other.Int64() - if ok1 && ok2 { - return n1 == n2 - } - } - - return num.Compare(other) == 0 - default: - return false + if other, ok := other.(Number); ok { + return NumberCompare(num, other) == 0 } + return false } // Compare compares num to other, return <0, 0, or >0 if it is less than, equal to, @@ -695,17 +711,7 @@ func (num Number) Equal(other Value) bool { func (num Number) Compare(other Value) int { // Optimize for the common case, as calling Compare allocates on heap. if otherNum, yes := other.(Number); yes { - if ai, ok := num.Int64(); ok { - if bi, ok := otherNum.Int64(); ok { - if ai == bi { - return 0 - } - if ai < bi { - return -1 - } - return 1 - } - } + return NumberCompare(num, otherNum) } return Compare(num, other) @@ -726,13 +732,10 @@ func (num Number) Hash() int { return i } } - f, err := json.Number(num).Float64() - if err != nil { - bs := []byte(num) - h := xxhash.Sum64(bs) - return int(h) + if f, ok := num.Float64(); ok { + return int(f) } - return int(f) + return int(xxhash.Sum64String(string(num))) } // Int returns the int representation of num if possible. @@ -773,15 +776,15 @@ func (num Number) String() string { return string(num) } -func intNumber(i int) Number { +func newIntNumberValue(i int) Value { return Number(strconv.Itoa(i)) } -func int64Number(i int64) Number { +func newInt64NumberValue(i int64) Value { return Number(strconv.FormatInt(i, 10)) } -func uint64Number(u uint64) Number { +func newUint64NumberValue(u uint64) Value { return Number(strconv.FormatUint(u, 10)) } @@ -848,12 +851,159 @@ func (str String) Hash() int { return int(xxhash.Sum64String(string(str))) } +type TemplateString struct { + Parts []Node `json:"parts"` + MultiLine bool `json:"multi_line"` +} + +func (ts *TemplateString) Copy() *TemplateString { + cpy := &TemplateString{MultiLine: ts.MultiLine, Parts: make([]Node, len(ts.Parts))} + for i, p := range ts.Parts { + switch v := p.(type) { + case *Expr: + cpy.Parts[i] = v.Copy() + case *Term: + cpy.Parts[i] = v.Copy() + } + } + return cpy +} + +func (ts *TemplateString) Equal(other Value) bool { + if o, ok := other.(*TemplateString); ok && ts.MultiLine == o.MultiLine && len(ts.Parts) == len(o.Parts) { + for i, p := range ts.Parts { + switch v := p.(type) { + case *Expr: + if ope, ok := o.Parts[i].(*Expr); !ok || !v.Equal(ope) { + return false + } + case *Term: + if opt, ok := o.Parts[i].(*Term); !ok || !v.Equal(opt) { + return false + } + default: + return false + } + } + return true + } + return false +} + +func (ts *TemplateString) Compare(other Value) int { + if ots, ok := other.(*TemplateString); ok { + if ts.MultiLine != ots.MultiLine { + if !ts.MultiLine { + return -1 + } + return 1 + } + + if len(ts.Parts) != len(ots.Parts) { + return len(ts.Parts) - len(ots.Parts) + } + + for i := range ts.Parts { + if cmp := Compare(ts.Parts[i], ots.Parts[i]); cmp != 0 { + return cmp + } + } + + return 0 + } + return Compare(ts, other) +} + +func (ts *TemplateString) Find(path Ref) (Value, error) { + if len(path) == 0 { + return ts, nil + } + return nil, errFindNotFound +} + +func (ts *TemplateString) Hash() int { + hash := 0 + for _, p := range ts.Parts { + switch x := p.(type) { + case *Expr: + hash += x.Hash() + case *Term: + hash += x.Value.Hash() + default: + panic(fmt.Sprintf("invalid template part type %T", p)) + } + } + return hash +} + +func (*TemplateString) IsGround() bool { + return false +} + +func (ts *TemplateString) String() string { + buf, _ := ts.AppendText(make([]byte, 0, ts.StringLength())) + return util.ByteSliceToString(buf) +} + +func TemplateStringTerm(multiLine bool, parts ...Node) *Term { + return &Term{Value: &TemplateString{MultiLine: multiLine, Parts: parts}} +} + +// EscapeTemplateStringStringPart escapes unescaped left curly braces in s - i.e "{" becomes "\{". +// The internal representation of string terms within a template string does **NOT** +// treat '{' as special, but expects code dealing with template strings to escape them when +// required, such as when serializing the complete template string. Code that programmatically +// constructs template strings should not pre-escape left curly braces in string term parts. +// +// // TODO(anders): a future optimization would be to combine this with the other escaping done +// // for strings (e.g. escaping quotes, backslashes, and JSON control characters) in a single operation +// // to avoid multiple passes and allocations over the same string. That's currently done by +// // strconv.Quote, so we would need to re-implement that logic in code of our own. +// // NOTE(anders): I would love to come up with a better name for this component than +// // "TemplateStringStringPart".. +func EscapeTemplateStringStringPart(s string) string { + numUnescaped := countUnescapedLeftCurly(s) + if numUnescaped == 0 { + return s + } + + return util.ByteSliceToString(AppendEscapedTemplateStringStringPart(make([]byte, 0, len(s)+numUnescaped), s)) +} + +func AppendEscapedTemplateStringStringPart(buf []byte, s string) []byte { + if s[0] == '{' { + buf = append(buf, '\\', s[0]) + } else { + buf = append(buf, s[0]) + } + + for i := 1; i < len(s); i++ { + if s[i] == '{' && s[i-1] != '\\' { + buf = append(buf, '\\', s[i]) + } else { + buf = append(buf, s[i]) + } + } + + return buf +} + +func countUnescapedLeftCurly(s string) (n int) { + // Note(anders): while not the functions I'd intuitively reach for to solve this, + // they are hands down the fastest option here, as they're done in assembly, which + // performs about an order of magnitude better than a manual loop in Go. + if n = strings.Count(s, "{"); n > 0 { + n -= strings.Count(s, `\{`) + } + return n +} + // Var represents a variable as defined by the language. type Var string // VarTerm creates a new Term with a Variable value. func VarTerm(v string) *Term { - return &Term{Value: Var(v)} + return &Term{Value: InternedVarValue(v)} } // Equal returns true if the other Value is a Variable and has the same value @@ -910,7 +1060,7 @@ func (v Var) String() string { // illegal variable name character (WildcardPrefix) to avoid conflicts. When // we serialize the variable here, we need to make sure it's parseable. if v.IsWildcard() { - return Wildcard.String() + return WildcardString } return string(v) } @@ -981,14 +1131,14 @@ func (ref Ref) Insert(x *Term, pos int) Ref { // Extend returns a copy of ref with the terms from other appended. The head of // other will be converted to a string. func (ref Ref) Extend(other Ref) Ref { - dst := make(Ref, len(ref)+len(other)) + offset := len(ref) + dst := make(Ref, offset+len(other)) copy(dst, ref) head := other[0].Copy() head.Value = String(head.Value.(Var)) - offset := len(ref) - dst[offset] = head + dst[offset] = head copy(dst[offset+1:], other[1:]) return dst } @@ -1100,42 +1250,38 @@ func (ref Ref) HasPrefix(other Ref) bool { func (ref Ref) ConstantPrefix() Ref { i := ref.Dynamic() if i < 0 { - return ref.Copy() + return ref } - return ref[:i].Copy() + return ref[:i] } +// StringPrefix returns the string portion of the ref starting from the head. func (ref Ref) StringPrefix() Ref { for i := 1; i < len(ref); i++ { switch ref[i].Value.(type) { case String: // pass default: // cut off - return ref[:i].Copy() + return ref[:i] } } - return ref.Copy() + return ref } // GroundPrefix returns the ground portion of the ref starting from the head. By // definition, the head of the reference is always ground. func (ref Ref) GroundPrefix() Ref { - if ref.IsGround() { - return ref - } - - prefix := make(Ref, 0, len(ref)) - - for i, x := range ref { - if i > 0 && !x.IsGround() { - break + for i := range ref { + if i > 0 && !ref[i].IsGround() { + return ref[:i] } - prefix = append(prefix, x) } - return prefix + return ref } +// DynamicSuffix returns the dynamic portion of the ref. +// If the ref is not dynamic, nil is returned. func (ref Ref) DynamicSuffix() Ref { i := ref.Dynamic() if i < 0 { @@ -1146,7 +1292,7 @@ func (ref Ref) DynamicSuffix() Ref { // IsGround returns true if all of the parts of the Ref are ground. func (ref Ref) IsGround() bool { - if len(ref) == 0 { + if len(ref) < 2 { return true } return termSliceIsGround(ref[1:]) @@ -1166,69 +1312,84 @@ func (ref Ref) IsNested() bool { // contains non-string terms this function returns an error. Path // components are escaped. func (ref Ref) Ptr() (string, error) { - parts := make([]string, 0, len(ref)-1) - for _, term := range ref[1:] { - if str, ok := term.Value.(String); ok { - parts = append(parts, url.PathEscape(string(str))) - } else { + buf := &strings.Builder{} + tail := ref[1:] + + l := max(len(tail)-1, 0) // number of '/' to add + for i := range tail { + str, ok := tail[i].Value.(String) + if !ok { return "", errors.New("invalid path value type") } + l += len(str) } - return strings.Join(parts, "/"), nil -} + buf.Grow(l) -var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") + for i := range tail { + if i > 0 { + buf.WriteByte('/') + } + str := string(tail[i].Value.(String)) + // Sadly, the url package does not expose an appender for this. + buf.WriteString(url.PathEscape(str)) + } + return buf.String(), nil +} +// IsVarCompatibleString returns true if s is a valid variable name. String s is a valid variable +// name if it starts with a letter (a-z or A-Z) or underscore (_) and is followed by +// letters (a-z or A-Z), digits (0-9), and underscores. func IsVarCompatibleString(s string) bool { - return varRegexp.MatchString(s) + l := len(s) + if l == 0 { + return false + } + // not exactly easy on the eyes, but often orders of magnitude faster + // than using a compiled regex (see benchmarks in term_bench_test.go) + is_letter := func(c byte) bool { + return (c > 96 && c < 123) || (c > 64 && c < 91) + } + is_digit := func(c byte) bool { + return c > 47 && c < 58 + } + + // first character must be a letter or underscore + c := s[0] + if !(is_letter(c) || c == 95) { + return false + } + + // remaining characters must be letters, digits, or underscores + for i := 1; i < l; i++ { + if c = s[i]; !(is_letter(c) || is_digit(c) || c == 95) { + return false + } + } + + return true } func (ref Ref) String() string { - if len(ref) == 0 { + l := len(ref) + // First check for zero-alloc options, as making the buffer for AppendText + // always costs an allocation. + if l == 0 { return "" } - - if len(ref) == 1 { - switch p := ref[0].Value.(type) { - case Var: - return p.String() - } - } - - sb := sbPool.Get() - defer sbPool.Put(sb) - - sb.Grow(10 * len(ref)) - sb.WriteString(ref[0].Value.String()) - - for _, p := range ref[1:] { - switch p := p.Value.(type) { - case String: - str := string(p) - if varRegexp.MatchString(str) && !IsKeyword(str) { - sb.WriteByte('.') - sb.WriteString(str) - } else { - sb.WriteByte('[') - // Determine whether we need the full JSON-escaped form - if strings.ContainsFunc(str, isControlOrBackslash) { - // only now pay the cost of expensive JSON-escaped form - sb.WriteString(p.String()) - } else { - sb.WriteByte('"') - sb.WriteString(str) - sb.WriteByte('"') - } - sb.WriteByte(']') - } - default: - sb.WriteByte('[') - sb.WriteString(p.String()) - sb.WriteByte(']') + if l == 1 { + if s, ok := ref[0].Value.(String); ok { + // Ref head should normally be a Var, but if for some reason + // it's a string, don't quote it. + return string(s) } + return ref[0].Value.String() + } + if name, ok := BuiltinNameFromRef(ref); ok { + return name } - return sb.String() + buf, _ := ref.AppendText(make([]byte, 0, ref.StringLength())) + return util.ByteSliceToString(buf) } // OutputVars returns a VarSet containing variables that would be bound by evaluating @@ -1271,6 +1432,15 @@ func NewArray(a ...*Term) *Array { return arr } +// NewArrayWithCapacity returns a new empty Array with the given capacity pre-allocated. +func NewArrayWithCapacity(capacity int) *Array { + return &Array{ + elems: make([]*Term, 0, capacity), + hashs: make([]int, 0, capacity), + ground: true, + } +} + // Array represents an array as defined by the language. Arrays are similar to the // same types as defined by JSON with the exception that they can contain Vars // and References. @@ -1283,13 +1453,12 @@ type Array struct { // Copy returns a deep copy of arr. func (arr *Array) Copy() *Array { - cpy := make([]int, len(arr.elems)) - copy(cpy, arr.hashs) return &Array{ elems: termSliceCopy(arr.elems), - hashs: cpy, + hashs: slices.Clone(arr.hashs), hash: arr.hash, - ground: arr.IsGround()} + ground: arr.ground, + } } // Equal returns true if arr is equal to other. @@ -1400,21 +1569,8 @@ func (arr *Array) MarshalJSON() ([]byte, error) { } func (arr *Array) String() string { - sb := sbPool.Get() - sb.Grow(len(arr.elems) * 16) - - defer sbPool.Put(sb) - - sb.WriteByte('[') - for i, e := range arr.elems { - if i > 0 { - sb.WriteString(", ") - } - sb.WriteString(e.String()) - } - sb.WriteByte(']') - - return sb.String() + buf, _ := arr.AppendText(make([]byte, 0, arr.StringLength())) + return util.ByteSliceToString(buf) } // Len returns the number of elements in the array. @@ -1532,6 +1688,11 @@ func NewSet(t ...*Term) Set { return s } +// NewSetWithCapacity returns a new empty Set with the given capacity pre-allocated. +func NewSetWithCapacity(capacity int) Set { + return newset(capacity) +} + func newset(n int) *set { var keys []*Term if n > 0 { @@ -1568,13 +1729,19 @@ type set struct { // Copy returns a deep copy of s. func (s *set) Copy() Set { - terms := make([]*Term, len(s.keys)) - for i := range s.keys { - terms[i] = s.keys[i].Copy() + cpy := &set{ + hash: s.hash, + ground: s.ground, + sortGuard: sync.Once{}, + elems: make(map[int]*Term, len(s.elems)), + keys: make([]*Term, 0, len(s.keys)), } - cpy := NewSet(terms...).(*set) - cpy.hash = s.hash - cpy.ground = s.ground + + for hash := range s.elems { + cpy.elems[hash] = s.elems[hash].Copy() + cpy.keys = append(cpy.keys, cpy.elems[hash]) + } + return cpy } @@ -1589,25 +1756,8 @@ func (s *set) Hash() int { } func (s *set) String() string { - if s.Len() == 0 { - return "set()" - } - - sb := sbPool.Get() - sb.Grow(s.Len() * 16) - - defer sbPool.Put(sb) - - sb.WriteByte('{') - for i := range s.sortedKeys() { - if i > 0 { - sb.WriteString(", ") - } - sb.WriteString(s.keys[i].Value.String()) - } - sb.WriteByte('}') - - return sb.String() + buf, _ := s.AppendText(make([]byte, 0, s.StringLength())) + return util.ByteSliceToString(buf) } func (s *set) sortedKeys() []*Term { @@ -1648,14 +1798,14 @@ func (s *set) Diff(other Set) Set { return NewSet() } - terms := make([]*Term, 0, len(s.keys)) - for _, term := range s.sortedKeys() { + result := newset(len(s.keys)) + for _, term := range s.keys { if !other.Contains(term) { - terms = append(terms, term) + result.insert(term, false) } } - return NewSet(terms...) + return result } // Intersect returns the set containing elements in both s and other. @@ -1670,21 +1820,28 @@ func (s *set) Intersect(other Set) Set { n = m } - terms := make([]*Term, 0, n) - for _, term := range ss.sortedKeys() { + result := newset(n) + for _, term := range ss.keys { if so.Contains(term) { - terms = append(terms, term) + result.insert(term, false) } } - return NewSet(terms...) + return result } // Union returns the set containing all elements of s and other. func (s *set) Union(other Set) Set { - r := NewSet() - s.Foreach(r.Add) - other.Foreach(r.Add) + o := other.(*set) + // Pre-allocate with max size - avoids over-allocation for overlapping sets + // while only requiring one potential grow for disjoint sets. + r := newset(max(len(s.keys), len(o.keys))) + for _, term := range s.keys { + r.insert(term, false) + } + for _, term := range o.keys { + r.insert(term, false) + } return r } @@ -1779,85 +1936,9 @@ func (s *set) Slice() []*Term { func (s *set) insert(x *Term, resetSortGuard bool) { hash := x.Hash() insertHash := hash - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := x.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - - return false - } - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } for curr, ok := s.elems[insertHash]; ok; { - if equal(curr.Value) { + if KeyHashEqual(curr.Value, x.Value) { return } @@ -1883,87 +1964,18 @@ func (s *set) insert(x *Term, resetSortGuard bool) { } func (s *set) get(x *Term) *Term { - hash := x.Hash() - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := x.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - return false - - } - - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } + if len(s.elems) == 0 { + return nil } + hash := x.Hash() + for curr, ok := s.elems[hash]; ok; { - if equal(curr.Value) { + // Pointer equality check first + if curr == x { + return curr + } + if KeyHashEqual(curr.Value, x.Value) { return curr } @@ -2003,6 +2015,11 @@ func NewObject(t ...[2]*Term) Object { return obj } +// NewObjectWithCapacity returns a new empty Object with the given capacity pre-allocated. +func NewObjectWithCapacity(capacity int) Object { + return newobject(capacity) +} + // ObjectTerm creates a new Term with an Object value. func ObjectTerm(o ...[2]*Term) *Term { return &Term{Value: NewObject(o...)} @@ -2304,12 +2321,37 @@ func (obj *object) Insert(k, v *Term) { // Get returns the value of k in obj if k exists, otherwise nil. func (obj *object) Get(k *Term) *Term { - if elem := obj.get(k); elem != nil { - return elem.value + if len(obj.elems) == 0 { + return nil + } + + hash := k.Hash() + for curr := obj.elems[hash]; curr != nil; curr = curr.next { + // Pointer equality check always fastest, and not too unlikely with interning. + if curr.key == k { + return curr.value + } + + if KeyHashEqual(curr.key.Value, k.Value) { + return curr.value + } } return nil } +func KeyHashEqual(x, y Value) bool { + switch x := x.(type) { + case Null, Boolean, String, Var: + return x == y + case Number: + if y, ok := y.(Number); ok { + return x.Equal(y) + } + } + + return Compare(x, y) == 0 +} + // Hash returns the hash code for the Value. func (obj *object) Hash() int { return obj.hash @@ -2449,19 +2491,21 @@ func (obj *object) Merge(other Object) (Object, bool) { // is called. The conflictResolver can return a merged value and a boolean // indicating if the merge has failed and should stop. func (obj *object) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) { - result := NewObject() + // Might overallocate assuming no conflicts is the common case, + // but that's typically faster than iterating over each object twice. + result := newobject(obj.Len() + other.Len()) stop := obj.Until(func(k, v *Term) bool { v2 := other.Get(k) // The key didn't exist in other, keep the original value if v2 == nil { - result.Insert(k, v) + result.insert(k, v, false) return false } // The key exists in both, resolve the conflict if possible merged, stop := conflictResolver(v, v2) if !stop { - result.Insert(k, merged) + result.insert(k, merged, false) } return stop }) @@ -2473,7 +2517,7 @@ func (obj *object) MergeWith(other Object, conflictResolver func(v1, v2 *Term) ( // Copy in any values from other for keys that don't exist in obj other.Foreach(func(k, v *Term) { if v2 := obj.Get(k); v2 == nil { - result.Insert(k, v) + result.insert(k, v, false) } }) return result, true @@ -2496,114 +2540,11 @@ func (obj *object) Len() int { } func (obj *object) String() string { - sb := sbPool.Get() - sb.Grow(obj.Len() * 32) - - defer sbPool.Put(sb) - - sb.WriteByte('{') - - for i, elem := range obj.sortedKeys() { - if i > 0 { - sb.WriteString(", ") - } - sb.WriteString(elem.key.String()) - sb.WriteString(": ") - sb.WriteString(elem.value.String()) - } - sb.WriteByte('}') - - return sb.String() + buf, _ := obj.AppendText(make([]byte, 0, obj.StringLength())) + return util.ByteSliceToString(buf) } -func (obj *object) get(k *Term) *objectElem { - hash := k.Hash() - - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := k.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, ok := x.Int64(); ok { - equal = func(y Value) bool { - if x == y { - return true - } - if y, ok := y.(Number); ok { - if yi, ok := y.Int64(); ok { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - - return false - } - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } - - for curr := obj.elems[hash]; curr != nil; curr = curr.next { - if equal(curr.key.Value) { - return curr - } - } +func (*object) get(*Term) *objectElem { return nil } @@ -2612,88 +2553,9 @@ func (obj *object) get(k *Term) *objectElem { func (obj *object) insert(k, v *Term, resetSortGuard bool) { hash := k.Hash() head := obj.elems[hash] - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := k.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if x == y { - return true - } - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - - return false - } - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } for curr := head; curr != nil; curr = curr.next { - if equal(curr.key.Value) { + if KeyHashEqual(curr.key.Value, k.Value) { if curr.value.IsGround() { obj.ground-- } @@ -2750,7 +2612,7 @@ func filterObject(o Value, filter Value) (Value, error) { case String, Number, Boolean, Null: return o, nil case *Array: - values := NewArray() + values := make([]*Term, 0, v.Len()) for i := range v.Len() { subFilter := filteredObj.Get(InternedIntegerString(i)) if subFilter != nil { @@ -2758,10 +2620,10 @@ func filterObject(o Value, filter Value) (Value, error) { if err != nil { return nil, err } - values = values.Append(NewTerm(filteredValue)) + values = append(values, NewTerm(filteredValue)) } } - return values, nil + return NewArray(values...), nil case Set: terms := make([]*Term, 0, v.Len()) for _, t := range v.Slice() { @@ -2884,7 +2746,8 @@ func (ac *ArrayComprehension) IsGround() bool { } func (ac *ArrayComprehension) String() string { - return "[" + ac.Term.String() + " | " + ac.Body.String() + "]" + buf, _ := ac.AppendText(make([]byte, 0, ac.StringLength())) + return util.ByteSliceToString(buf) } // ObjectComprehension represents an object comprehension as defined in the language. @@ -2944,7 +2807,8 @@ func (oc *ObjectComprehension) IsGround() bool { } func (oc *ObjectComprehension) String() string { - return "{" + oc.Key.String() + ": " + oc.Value.String() + " | " + oc.Body.String() + "}" + buf, _ := oc.AppendText(make([]byte, 0, oc.StringLength())) + return util.ByteSliceToString(buf) } // SetComprehension represents a set comprehension as defined in the language. @@ -3001,7 +2865,8 @@ func (sc *SetComprehension) IsGround() bool { } func (sc *SetComprehension) String() string { - return "{" + sc.Term.String() + " | " + sc.Body.String() + "}" + buf, _ := sc.AppendText(make([]byte, 0, sc.StringLength())) + return util.ByteSliceToString(buf) } // Call represents as function call in the language. @@ -3039,18 +2904,31 @@ func (c Call) IsGround() bool { return termSliceIsGround(c) } -// MakeExpr returns an ew Expr from this call. +// MakeExpr returns a new Expr from this call. func (c Call) MakeExpr(output *Term) *Expr { terms := []*Term(c) return NewExpr(append(terms, output)) } -func (c Call) String() string { - args := make([]string, len(c)-1) - for i := 1; i < len(c); i++ { - args[i-1] = c[i].String() +func (c Call) Operator() Ref { + if len(c) == 0 { + return nil + } + + return c[0].Value.(Ref) +} + +func (c Call) Operands() []*Term { + if len(c) < 1 { + return nil } - return fmt.Sprintf("%v(%v)", c[0], strings.Join(args, ", ")) + + return c[1:] +} + +func (c Call) String() string { + buf, _ := c.AppendText(make([]byte, 0, c.StringLength())) + return util.ByteSliceToString(buf) } func termSliceCopy(a []*Term) []*Term { diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/term_appenders.go b/vendor/github.com/open-policy-agent/opa/v1/ast/term_appenders.go new file mode 100644 index 0000000000..93c0803910 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/term_appenders.go @@ -0,0 +1,266 @@ +package ast + +import ( + "encoding" + "strconv" + "strings" + + "github.com/open-policy-agent/opa/v1/util" +) + +// AppendText appends the text representation of term (i.e. as printed in policy) to +// buf and returns the extended buffer. +func (term *Term) AppendText(buf []byte) ([]byte, error) { + if app, ok := term.Value.(encoding.TextAppender); ok { + return app.AppendText(buf) + } + + return append(buf, term.Value.String()...), nil +} + +func (v Var) AppendText(buf []byte) ([]byte, error) { + if v.IsWildcard() { + return append(buf, WildcardString...), nil + } + return append(buf, v...), nil +} + +func (b Boolean) AppendText(buf []byte) ([]byte, error) { + if b { + return append(buf, "true"...), nil + } + return append(buf, "false"...), nil +} + +func (Null) AppendText(buf []byte) ([]byte, error) { + return append(buf, "null"...), nil +} + +func (str String) AppendText(buf []byte) ([]byte, error) { + return strconv.AppendQuote(buf, string(str)), nil +} + +func (str String) appendNoQuote(buf []byte) []byte { + // Append using strconv.AppendQuote for proper escaping, but trim off + // the leading and trailing quotes afterwards. + oldLen := len(buf) + buf = strconv.AppendQuote(buf, string(str)) + newLen := len(buf) + quoted := buf[oldLen:newLen] + + return append(buf[:oldLen], quoted[1:len(quoted)-1]...) +} + +func (num Number) AppendText(buf []byte) ([]byte, error) { + return append(buf, num...), nil +} + +func (arr *Array) AppendText(buf []byte) ([]byte, error) { + buf, err := AppendDelimeted(append(buf, '['), arr.elems, ", ") + if err != nil { + return nil, err + } + return append(buf, ']'), nil +} + +func (obj *object) AppendText(buf []byte) ([]byte, error) { + olen := obj.Len() + if olen == 0 { + return append(buf, "{}"...), nil + } + + buf = append(buf, '{') + + var err error + + // first key-value pair + keys := obj.sortedKeys() + for i := range keys { + if buf, err = keys[i].key.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, ": "...) + if buf, err = keys[i].value.AppendText(buf); err != nil { + return nil, err + } + if i < olen-1 { + buf = append(buf, ", "...) + } + } + + return append(buf, '}'), nil +} + +func (obj *lazyObj) AppendText(buf []byte) ([]byte, error) { + return append(buf, obj.force().String()...), nil +} + +func (s *set) AppendText(buf []byte) ([]byte, error) { + slen := s.Len() + if slen == 0 { + return append(buf, "set()"...), nil + } + + var err error + + buf = append(buf, '{') + if buf, err = AppendDelimeted(buf, s.sortedKeys(), ", "); err != nil { + return nil, err + } + + return append(buf, '}'), nil +} + +func (c Call) AppendText(buf []byte) ([]byte, error) { + if len(c) == 0 { + return buf, nil + } + + var err error + + if buf, err = c[0].AppendText(buf); err != nil { + return nil, err + } + + if buf, err = AppendDelimeted(append(buf, '('), c[1:], ", "); err != nil { + return nil, err + } + return append(buf, ')'), nil +} + +func (ts *TemplateString) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, "$\""...) + for _, p := range ts.Parts { + switch x := p.(type) { + case *Expr: + buf = append(buf, '{') + var err error + if buf, err = x.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, '}') + case *Term: + if str, ok := x.Value.(String); ok { + // TODO(anders): this is a bit of a mess, but as explained by the comment on + // [EscapeTemplateStringStringPart], required as long as we rely on strconv for escaping, which adds + // quotes around the string that we don't want here, and trying to "unappend" them is not nice at all.. + s := string(str) + ulc := countUnescapedLeftCurly(s) + sl := str.StringLength() + ulc - 2 // no surrounding quotes + + if sl == len(s) { // no escaping needed + buf = append(buf, s...) + } else { // some escaping needed + if sl == len(s)+ulc { // only unescaped { + buf = AppendEscapedTemplateStringStringPart(buf, string(str)) + } else { // full escaping needed. this is expensive but luckily rare + tmp := str.appendNoQuote(make([]byte, 0, sl)) + ets := EscapeTemplateStringStringPart(util.ByteSliceToString(tmp)) + buf = append(buf, ets...) + } + } + } else { + var err error + if buf, err = x.AppendText(buf); err != nil { + return nil, err + } + } + default: + buf = append(buf, ""...) + } + } + return append(buf, '"'), nil +} + +func (r Ref) AppendText(buf []byte) ([]byte, error) { + reflen := len(r) + if reflen == 0 { + return buf, nil + } + if reflen == 1 { + if s, ok := r[0].Value.(String); ok { + // While a ref head is typically a Var, a lone String term should not be quoted + return append(buf, s...), nil + } + return r[0].AppendText(buf) + } + if name, ok := BuiltinNameFromRef(r); ok { + return append(buf, name...), nil + } + + var err error + if s, ok := r[0].Value.(String); ok { + buf = append(buf, s...) + } else if buf, err = r[0].AppendText(buf); err != nil { + return nil, err + } + + for _, p := range r[1:] { + switch v := p.Value.(type) { + case String: + str := string(v) + if IsVarCompatibleString(str) && !IsKeyword(str) { + buf = append(append(buf, '.'), str...) + } else { + buf = append(buf, '[') + // Determine whether we need the full JSON-escaped form + if strings.ContainsFunc(str, isControlOrBackslash) { + if buf, err = v.AppendText(buf); err != nil { + return nil, err + } + } else { + buf = append(append(append(buf, '"'), str...), '"') + } + buf = append(buf, ']') + } + default: + buf = append(buf, '[') + if buf, err = p.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, ']') + } + } + + return buf, nil +} + +func (sc *SetComprehension) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, '{') + var err error + if buf, err = sc.Term.AppendText(buf); err != nil { + return nil, err + } + if buf, err = sc.Body.AppendText(append(buf, " | "...)); err != nil { + return nil, err + } + return append(buf, '}'), nil +} + +func (ac *ArrayComprehension) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, '[') + var err error + if buf, err = ac.Term.AppendText(buf); err != nil { + return nil, err + } + if buf, err = ac.Body.AppendText(append(buf, " | "...)); err != nil { + return nil, err + } + return append(buf, ']'), nil +} + +func (oc *ObjectComprehension) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, '{') + var err error + if buf, err = oc.Key.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, ": "...) + if buf, err = oc.Value.AppendText(buf); err != nil { + return nil, err + } + if buf, err = oc.Body.AppendText(append(buf, " | "...)); err != nil { + return nil, err + } + return append(buf, '}'), nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go b/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go index 197ab6457d..a71bc0a77c 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go @@ -19,7 +19,6 @@ type Transformer interface { // Transform iterates the AST and calls the Transform function on the // Transformer t for x before recursing. func Transform(t Transformer, x any) (any, error) { - if term, ok := x.(*Term); ok { return Transform(t, term.Value) } @@ -284,6 +283,19 @@ func Transform(t Transformer, x any) (any, error) { } } return y, nil + case *TemplateString: + for i := range y.Parts { + if expr, ok := y.Parts[i].(*Expr); ok { + transformed, err := Transform(t, expr) + if err != nil { + return nil, err + } + if y.Parts[i], ok = transformed.(*Expr); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", expr, transformed) + } + } + } + return y, nil default: return y, nil } @@ -291,29 +303,29 @@ func Transform(t Transformer, x any) (any, error) { // TransformRefs calls the function f on all references under x. func TransformRefs(x any, f func(Ref) (Value, error)) (any, error) { - t := &GenericTransformer{func(x any) (any, error) { + t := NewGenericTransformer(func(x any) (any, error) { if r, ok := x.(Ref); ok { return f(r) } return x, nil - }} + }) return Transform(t, x) } // TransformVars calls the function f on all vars under x. func TransformVars(x any, f func(Var) (Value, error)) (any, error) { - t := &GenericTransformer{func(x any) (any, error) { + t := NewGenericTransformer(func(x any) (any, error) { if v, ok := x.(Var); ok { return f(v) } return x, nil - }} + }) return Transform(t, x) } -// TransformComprehensions calls the functio nf on all comprehensions under x. +// TransformComprehensions calls the function f on all comprehensions under x. func TransformComprehensions(x any, f func(any) (Value, error)) (any, error) { - t := &GenericTransformer{func(x any) (any, error) { + t := NewGenericTransformer(func(x any) (any, error) { switch x := x.(type) { case *ArrayComprehension: return f(x) @@ -323,7 +335,7 @@ func TransformComprehensions(x any, f func(any) (Value, error)) (any, error) { return f(x) } return x, nil - }} + }) return Transform(t, x) } @@ -387,11 +399,7 @@ func transformTerm(t Transformer, term *Term) (*Term, error) { if err != nil { return nil, err } - r := &Term{ - Value: v, - Location: term.Location, - } - return r, nil + return &Term{Value: v, Location: term.Location}, nil } func transformValue(t Transformer, v Value) (Value, error) { @@ -407,13 +415,18 @@ func transformValue(t Transformer, v Value) (Value, error) { } func transformVar(t Transformer, v Var) (Var, error) { - v1, err := Transform(t, v) + tv, err := t.Transform(v) if err != nil { return "", err } - r, ok := v1.(Var) + + if tv == nil { + return "", nil + } + + r, ok := tv.(Var) if !ok { - return "", fmt.Errorf("illegal transform: %T != %T", v, v1) + return "", fmt.Errorf("illegal transform: %T != %T", v, tv) } return r, nil } diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go b/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go index acbe275c0f..67b89a2a93 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go @@ -11,12 +11,11 @@ func isRefSafe(ref Ref, safe VarSet) bool { case Call: return isCallSafe(head, safe) default: - for v := range ref[0].Vars() { - if !safe.Contains(v) { - return false - } - } - return true + vis := varVisitorPool.Get().WithParams(SafetyCheckVisitorParams) + vis.Walk(ref[0]) + isSafe := vis.Vars().DiffCount(safe) == 0 + varVisitorPool.Put(vis) + return isSafe } } diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go b/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go index e5bd52ae8c..55bbea80d0 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go @@ -12,7 +12,7 @@ import ( ) // VarSet represents a set of variables. -type VarSet map[Var]struct{} +type VarSet map[Var]struct{ *Location } // NewVarSet returns a new VarSet containing the specified variables. func NewVarSet(vs ...Var) VarSet { @@ -30,7 +30,16 @@ func NewVarSetOfSize(size int) VarSet { // Add updates the set to include the variable "v". func (s VarSet) Add(v Var) { - s[v] = struct{}{} + if _, ok := s[v]; !ok { + s[v] = struct{ *Location }{} + } +} + +func (s VarSet) AddLocation(v Var, l *Location) { + if entry, ok := s[v]; ok { + entry.Location = l + s[v] = entry + } } // Contains returns true if the set contains the variable "v". @@ -54,6 +63,7 @@ func (s VarSet) Diff(vs VarSet) VarSet { for v := range s { if !vs.Contains(v) { r.Add(v) + r.AddLocation(v, s[v].Location) } } return r diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json b/vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json index b02f785299..bd0df82ad6 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json @@ -3,1476 +3,1077 @@ "abs": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "all": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "and": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "any": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "array.concat": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 + }, + "array.flatten": { + "Major": 1, + "Minor": 13, + "Patch": 0 }, "array.reverse": { "Major": 0, "Minor": 36, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "array.slice": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "assign": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "base64.decode": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "base64.encode": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "base64.is_valid": { "Major": 0, "Minor": 24, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "base64url.decode": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "base64url.encode": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "base64url.encode_no_pad": { "Major": 0, "Minor": 25, "Patch": 0, - "PreRelease": "rc2", - "Metadata": "" + "PreRelease": "rc2" }, "bits.and": { "Major": 0, "Minor": 18, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "bits.lsh": { "Major": 0, "Minor": 18, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "bits.negate": { "Major": 0, "Minor": 18, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "bits.or": { "Major": 0, "Minor": 18, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "bits.rsh": { "Major": 0, "Minor": 18, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "bits.xor": { "Major": 0, "Minor": 18, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "cast_array": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "cast_boolean": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "cast_null": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "cast_object": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "cast_set": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "cast_string": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "ceil": { "Major": 0, "Minor": 26, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "concat": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "contains": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "count": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.hmac.equal": { "Major": 0, "Minor": 52, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.hmac.md5": { "Major": 0, "Minor": 36, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.hmac.sha1": { "Major": 0, "Minor": 36, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.hmac.sha256": { "Major": 0, "Minor": 36, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.hmac.sha512": { "Major": 0, "Minor": 36, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.md5": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.parse_private_keys": { "Major": 0, "Minor": 55, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.sha1": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.sha256": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.x509.parse_and_verify_certificates": { "Major": 0, "Minor": 31, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.x509.parse_and_verify_certificates_with_options": { "Major": 0, "Minor": 63, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.x509.parse_certificate_request": { "Major": 0, "Minor": 21, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.x509.parse_certificates": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.x509.parse_keypair": { "Major": 0, "Minor": 53, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "crypto.x509.parse_rsa_private_key": { "Major": 0, "Minor": 33, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "div": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "endswith": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "eq": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "equal": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "floor": { "Major": 0, "Minor": 26, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "format_int": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "glob.match": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "glob.quote_meta": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "graph.reachable": { "Major": 0, "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "graph.reachable_paths": { "Major": 0, "Minor": 37, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "graphql.is_valid": { "Major": 0, "Minor": 41, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "graphql.parse": { "Major": 0, "Minor": 41, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "graphql.parse_and_verify": { "Major": 0, "Minor": 41, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "graphql.parse_query": { "Major": 0, "Minor": 41, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "graphql.parse_schema": { "Major": 0, "Minor": 41, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "graphql.schema_is_valid": { "Major": 0, "Minor": 46, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "gt": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "gte": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "hex.decode": { "Major": 0, "Minor": 25, "Patch": 0, - "PreRelease": "rc2", - "Metadata": "" + "PreRelease": "rc2" }, "hex.encode": { "Major": 0, "Minor": 25, "Patch": 0, - "PreRelease": "rc2", - "Metadata": "" + "PreRelease": "rc2" }, "http.send": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "indexof": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "indexof_n": { "Major": 0, "Minor": 37, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "internal.member_2": { "Major": 0, "Minor": 34, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "internal.member_3": { "Major": 0, "Minor": 34, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "internal.print": { "Major": 0, "Minor": 34, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 + }, + "internal.template_string": { + "Major": 1, + "Minor": 12, + "Patch": 0 }, "internal.test_case": { "Major": 1, "Minor": 2, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "intersection": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.decode": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.decode_verify": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.encode_sign": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.encode_sign_raw": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.verify_eddsa": { "Major": 1, "Minor": 8, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.verify_es256": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.verify_es384": { "Major": 0, "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.verify_es512": { "Major": 0, "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.verify_hs256": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.verify_hs384": { "Major": 0, "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.verify_hs512": { "Major": 0, "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.verify_ps256": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.verify_ps384": { "Major": 0, "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.verify_ps512": { "Major": 0, "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.verify_rs256": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.verify_rs384": { "Major": 0, "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "io.jwt.verify_rs512": { "Major": 0, "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "is_array": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "is_boolean": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "is_null": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "is_number": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "is_object": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "is_set": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "is_string": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "json.filter": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "json.is_valid": { "Major": 0, "Minor": 25, "Patch": 0, - "PreRelease": "rc1", - "Metadata": "" + "PreRelease": "rc1" }, "json.marshal": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "json.marshal_with_options": { "Major": 0, "Minor": 64, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "json.match_schema": { "Major": 0, "Minor": 50, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "json.patch": { "Major": 0, "Minor": 25, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "json.remove": { "Major": 0, "Minor": 18, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "json.unmarshal": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "json.verify_schema": { "Major": 0, "Minor": 50, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "lower": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "lt": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "lte": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "max": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "min": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "minus": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "mul": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "neq": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "net.cidr_contains": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "net.cidr_contains_matches": { "Major": 0, "Minor": 19, "Patch": 0, - "PreRelease": "rc1", - "Metadata": "" + "PreRelease": "rc1" }, "net.cidr_expand": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "net.cidr_intersects": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "net.cidr_is_valid": { "Major": 0, "Minor": 46, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "net.cidr_merge": { "Major": 0, "Minor": 24, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "net.cidr_overlap": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "net.lookup_ip_addr": { "Major": 0, "Minor": 35, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "numbers.range": { "Major": 0, "Minor": 22, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "numbers.range_step": { "Major": 0, "Minor": 56, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "object.filter": { "Major": 0, "Minor": 17, - "Patch": 2, - "PreRelease": "", - "Metadata": "" + "Patch": 2 }, "object.get": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "object.keys": { "Major": 0, "Minor": 47, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "object.remove": { "Major": 0, "Minor": 17, - "Patch": 2, - "PreRelease": "", - "Metadata": "" + "Patch": 2 }, "object.subset": { "Major": 0, "Minor": 42, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "object.union": { "Major": 0, "Minor": 17, - "Patch": 2, - "PreRelease": "", - "Metadata": "" + "Patch": 2 }, "object.union_n": { "Major": 0, "Minor": 37, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "opa.runtime": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "or": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "plus": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "print": { "Major": 0, "Minor": 34, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "product": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "providers.aws.sign_req": { "Major": 0, "Minor": 47, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "rand.intn": { "Major": 0, "Minor": 31, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "re_match": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "regex.find_all_string_submatch_n": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "regex.find_n": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "regex.globs_match": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "regex.is_valid": { "Major": 0, "Minor": 23, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "regex.match": { "Major": 0, "Minor": 23, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "regex.replace": { "Major": 0, "Minor": 45, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "regex.split": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "regex.template_match": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "rego.metadata.chain": { "Major": 0, "Minor": 40, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "rego.metadata.rule": { "Major": 0, "Minor": 40, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "rego.parse_module": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "rem": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "replace": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "round": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "semver.compare": { "Major": 0, "Minor": 22, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "semver.is_valid": { "Major": 0, "Minor": 22, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "set_diff": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "sort": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "split": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "sprintf": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "startswith": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "strings.any_prefix_match": { "Major": 0, "Minor": 44, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "strings.any_suffix_match": { "Major": 0, "Minor": 44, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "strings.count": { "Major": 0, "Minor": 67, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "strings.render_template": { "Major": 0, "Minor": 59, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "strings.replace_n": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "strings.reverse": { "Major": 0, "Minor": 36, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "substring": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "sum": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "time.add_date": { "Major": 0, "Minor": 19, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "time.clock": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "time.date": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "time.diff": { "Major": 0, "Minor": 28, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "time.format": { "Major": 0, "Minor": 48, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "time.now_ns": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "time.parse_duration_ns": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "time.parse_ns": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "time.parse_rfc3339_ns": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "time.weekday": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "to_number": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "trace": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "trim": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "trim_left": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "trim_prefix": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "trim_right": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "trim_space": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "trim_suffix": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "type_name": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "union": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "units.parse": { "Major": 0, "Minor": 41, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "units.parse_bytes": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "upper": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "urlquery.decode": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "urlquery.decode_object": { "Major": 0, "Minor": 24, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "urlquery.encode": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "urlquery.encode_object": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "uuid.parse": { "Major": 0, "Minor": 57, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "uuid.rfc4122": { "Major": 0, "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "walk": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "yaml.is_valid": { "Major": 0, "Minor": 25, "Patch": 0, - "PreRelease": "rc1", - "Metadata": "" + "PreRelease": "rc1" }, "yaml.marshal": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "yaml.unmarshal": { "Major": 0, "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 } }, "features": { "keywords_in_refs": { "Major": 1, "Minor": 6, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "rego_v1": { "Major": 1, "Minor": 0, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "rego_v1_import": { "Major": 0, "Minor": 59, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "rule_head_ref_string_prefixes": { "Major": 0, "Minor": 46, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "rule_head_refs": { "Major": 0, "Minor": 59, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 + }, + "template_strings": { + "Major": 1, + "Minor": 12, + "Patch": 0 } }, "keywords": { "contains": { "Major": 0, "Minor": 42, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "every": { "Major": 0, "Minor": 38, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "if": { "Major": 0, "Minor": 42, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 }, "in": { "Major": 0, "Minor": 34, - "Patch": 0, - "PreRelease": "", - "Metadata": "" + "Patch": 0 } } } diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go b/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go index 4ae6569ad7..d7725f5a51 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go @@ -4,40 +4,109 @@ package ast -// Visitor defines the interface for iterating AST elements. The Visit function -// can return a Visitor w which will be used to visit the children of the AST -// element v. If the Visit function returns nil, the children will not be -// visited. -// Deprecated: use GenericVisitor or another visitor implementation -type Visitor interface { - Visit(v any) (w Visitor) -} +var ( + termTypeVisitor = newTypeVisitor[*Term]() + varTypeVisitor = newTypeVisitor[Var]() + exprTypeVisitor = newTypeVisitor[*Expr]() + ruleTypeVisitor = newTypeVisitor[*Rule]() + refTypeVisitor = newTypeVisitor[Ref]() + bodyTypeVisitor = newTypeVisitor[Body]() + withTypeVisitor = newTypeVisitor[*With]() +) -// BeforeAndAfterVisitor wraps Visitor to provide hooks for being called before -// and after the AST has been visited. -// Deprecated: use GenericVisitor or another visitor implementation -type BeforeAndAfterVisitor interface { - Visitor - Before(x any) - After(x any) -} +type ( + // GenericVisitor provides a utility to walk over AST nodes using a + // closure. If the closure returns true, the visitor will not walk + // over AST nodes under x. + GenericVisitor struct { + f func(x any) bool + } + + // BeforeAfterVisitor provides a utility to walk over AST nodes using + // closures. If the before closure returns true, the visitor will not + // walk over AST nodes under x. The after closure is invoked always + // after visiting a node. + BeforeAfterVisitor struct { + before func(x any) bool + after func(x any) + } + + // VarVisitor walks AST nodes under a given node and collects all encountered + // variables. The collected variables can be controlled by specifying + // VarVisitorParams when creating the visitor. + VarVisitor struct { + params VarVisitorParams + vars VarSet + } + + // VarVisitorParams contains settings for a VarVisitor. + VarVisitorParams struct { + SkipRefHead bool + SkipRefCallHead bool + SkipObjectKeys bool + SkipClosures bool + SkipWithTarget bool + SkipSets bool + SkipTemplateStrings bool + } -// Walk iterates the AST by calling the Visit function on the Visitor + // Visitor defines the interface for iterating AST elements. The Visit function + // can return a Visitor w which will be used to visit the children of the AST + // element v. If the Visit function returns nil, the children will not be + // visited. + // + // Deprecated: use [GenericVisitor] or another visitor implementation + Visitor interface { + Visit(v any) (w Visitor) + } + + // BeforeAndAfterVisitor wraps Visitor to provide hooks for being called before + // and after the AST has been visited. + // + // Deprecated: use [GenericVisitor] or another visitor implementation + BeforeAndAfterVisitor interface { + Visitor + Before(x any) + After(x any) + } + + // typeVisitor is a generic visitor for a specific type T (the "generic" name was + // however taken). Contrary to the [GenericVisitor], the typeVisitor only invokes + // the visit function for nodes of type T, saving both CPU cycles and type assertions. + // typeVisitor implementations carry no state, and can be shared freely across + // goroutines. Access is private for the time being, as there is already inflation + // in visitor types exposed in the AST package. The various WalkXXX functions however + // now leverage typeVisitor under the hood. + // + // While a typeVisitor is generally a more performant option over a GenericVisitor, + // it is not as flexible: a type visitor can only visit nodes of a single type T, + // whereas a GenericVisitor visits all nodes. Adding to that, a typeVisitor can only + // be instantiated for **concrete types** — not interfaces (e.g., [*Expr], not [Node]), + // as reflection would be required to determine the concrete type at runtime, thus + // nullifying the performance benefits of the typeVisitor in the first place. + typeVisitor[T any] struct { + typ any + } +) + +// Walk iterates the AST by calling the Visit function on the [Visitor] // v for x before recursing. -// Deprecated: use GenericVisitor.Walk +// +// Deprecated: use [GenericVisitor.Walk] func Walk(v Visitor, x any) { if bav, ok := v.(BeforeAndAfterVisitor); !ok { walk(v, x) } else { bav.Before(x) - defer bav.After(x) walk(bav, x) + bav.After(x) } } // WalkBeforeAndAfter iterates the AST by calling the Visit function on the // Visitor v for x before recursing. -// Deprecated: use GenericVisitor.Walk +// +// Deprecated: use [GenericVisitor.Walk] func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x any) { Walk(v, x) } @@ -149,132 +218,258 @@ func walk(v Visitor, x any) { for i := range x.Symbols { Walk(w, x.Symbols[i]) } + case *TemplateString: + for i := range x.Parts { + Walk(w, x.Parts[i]) + } } } // WalkVars calls the function f on all vars under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkVars(x any, f func(Var) bool) { - vis := &GenericVisitor{func(x any) bool { - if v, ok := x.(Var); ok { - return f(v) - } - return false - }} - vis.Walk(x) + varTypeVisitor.walk(x, f) } // WalkClosures calls the function f on all closures under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkClosures(x any, f func(any) bool) { - vis := &GenericVisitor{func(x any) bool { + vis := NewGenericVisitor(func(x any) bool { switch x := x.(type) { case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every: return f(x) } return false - }} + }) vis.Walk(x) } // WalkRefs calls the function f on all references under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkRefs(x any, f func(Ref) bool) { - vis := &GenericVisitor{func(x any) bool { - if r, ok := x.(Ref); ok { - return f(r) - } - return false - }} - vis.Walk(x) + refTypeVisitor.walk(x, f) } // WalkTerms calls the function f on all terms under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkTerms(x any, f func(*Term) bool) { - vis := &GenericVisitor{func(x any) bool { - if term, ok := x.(*Term); ok { - return f(term) - } - return false - }} - vis.Walk(x) + termTypeVisitor.walk(x, f) } // WalkWiths calls the function f on all with modifiers under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkWiths(x any, f func(*With) bool) { - vis := &GenericVisitor{func(x any) bool { - if w, ok := x.(*With); ok { - return f(w) - } - return false - }} - vis.Walk(x) + withTypeVisitor.walk(x, f) } // WalkExprs calls the function f on all expressions under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkExprs(x any, f func(*Expr) bool) { - vis := &GenericVisitor{func(x any) bool { - if r, ok := x.(*Expr); ok { - return f(r) - } - return false - }} - vis.Walk(x) + exprTypeVisitor.walk(x, f) } // WalkBodies calls the function f on all bodies under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkBodies(x any, f func(Body) bool) { - vis := &GenericVisitor{func(x any) bool { - if b, ok := x.(Body); ok { - return f(b) - } - return false - }} - vis.Walk(x) + bodyTypeVisitor.walk(x, f) } // WalkRules calls the function f on all rules under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkRules(x any, f func(*Rule) bool) { - vis := &GenericVisitor{func(x any) bool { - if r, ok := x.(*Rule); ok { - stop := f(r) - // NOTE(tsandall): since rules cannot be embedded inside of queries - // we can stop early if there is no else block. - if stop || r.Else == nil { - return true + switch x := x.(type) { + case *Module: + for i := range x.Rules { + if !f(x.Rules[i]) && x.Rules[i].Else != nil { + WalkRules(x.Rules[i].Else, f) } } - return false - }} - vis.Walk(x) + case *Rule: + if !f(x) && x.Else != nil { + WalkRules(x.Else, f) + } + default: + ruleTypeVisitor.walk(x, f) + } } // WalkNodes calls the function f on all nodes under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkNodes(x any, f func(Node) bool) { - vis := &GenericVisitor{func(x any) bool { + vis := NewGenericVisitor(func(x any) bool { if n, ok := x.(Node); ok { return f(n) } return false - }} + }) vis.Walk(x) } -// GenericVisitor provides a utility to walk over AST nodes using a -// closure. If the closure returns true, the visitor will not walk -// over AST nodes under x. -type GenericVisitor struct { - f func(x any) bool +func newTypeVisitor[T any]() *typeVisitor[T] { + var t T + + return &typeVisitor[T]{typ: any(t)} +} + +func (tv *typeVisitor[T]) walkArgs(args Args, visit func(x T) bool) { + // If T is not Args, avoid allocation by inlining the walk. + if _, ok := tv.typ.(Args); !ok { + for i := range args { + tv.walk(args[i], visit) + } + } else { + tv.walk(args, visit) // allocates + } +} + +func (tv *typeVisitor[T]) walkBody(body Body, visit func(x T) bool) { + if _, ok := tv.typ.(Body); !ok { + for i := range body { + tv.walk(body[i], visit) + } + } else { + tv.walk(body, visit) // allocates + } +} + +func (tv *typeVisitor[T]) walkRef(ref Ref, visit func(x T) bool) { + if _, ok := tv.typ.(Ref); !ok { + for i := range ref { + tv.walk(ref[i], visit) + } + } else { + tv.walk(ref, visit) // allocates + } +} + +func (tv *typeVisitor[T]) walk(x any, visit func(x T) bool) { + if v, ok := x.(T); ok && visit(v) { + return + } + + switch x := x.(type) { + case *Module: + tv.walk(x.Package, visit) + for i := range x.Imports { + tv.walk(x.Imports[i], visit) + } + for i := range x.Rules { + tv.walk(x.Rules[i], visit) + } + for i := range x.Annotations { + tv.walk(x.Annotations[i], visit) + } + for i := range x.Comments { + tv.walk(x.Comments[i], visit) + } + case *Package: + tv.walkRef(x.Path, visit) + case *Import: + tv.walk(x.Path, visit) + if _, ok := tv.typ.(Var); ok { + tv.walk(x.Alias, visit) + } + case *Rule: + tv.walk(x.Head, visit) + tv.walkBody(x.Body, visit) + if x.Else != nil { + tv.walk(x.Else, visit) + } + case *Head: + if _, ok := tv.typ.(Var); ok { + tv.walk(x.Name, visit) + } + tv.walkArgs(x.Args, visit) + if x.Key != nil { + tv.walk(x.Key, visit) + } + if x.Value != nil { + tv.walk(x.Value, visit) + } + case Body: + for i := range x { + tv.walk(x[i], visit) + } + case Args: + for i := range x { + tv.walk(x[i], visit) + } + case *Expr: + switch ts := x.Terms.(type) { + case *Term, *SomeDecl, *Every: + tv.walk(ts, visit) + case []*Term: + for i := range ts { + tv.walk(ts[i], visit) + } + } + for i := range x.With { + tv.walk(x.With[i], visit) + } + case *With: + tv.walk(x.Target, visit) + tv.walk(x.Value, visit) + case *Term: + tv.walk(x.Value, visit) + case Ref: + for i := range x { + tv.walk(x[i], visit) + } + case *object: + x.Foreach(func(k, v *Term) { + tv.walk(k, visit) + tv.walk(v, visit) + }) + case Object: + for _, k := range x.Keys() { + tv.walk(k, visit) + tv.walk(x.Get(k), visit) + } + case *Array: + for i := range x.Len() { + tv.walk(x.Elem(i), visit) + } + case Set: + xSlice := x.Slice() + for i := range xSlice { + tv.walk(xSlice[i], visit) + } + case *ArrayComprehension: + tv.walk(x.Term, visit) + tv.walkBody(x.Body, visit) + case *ObjectComprehension: + tv.walk(x.Key, visit) + tv.walk(x.Value, visit) + tv.walkBody(x.Body, visit) + case *SetComprehension: + tv.walk(x.Term, visit) + tv.walkBody(x.Body, visit) + case Call: + for i := range x { + tv.walk(x[i], visit) + } + case *Every: + if x.Key != nil { + tv.walk(x.Key, visit) + } + tv.walk(x.Value, visit) + tv.walk(x.Domain, visit) + tv.walkBody(x.Body, visit) + case *SomeDecl: + for i := range x.Symbols { + tv.walk(x.Symbols[i], visit) + } + case *TemplateString: + for i := range x.Parts { + tv.walk(x.Parts[i], visit) + } + } } // NewGenericVisitor returns a new GenericVisitor that will invoke the function -// f on AST nodes. +// f on AST nodes. Note that while it returns a pointer, the creating a GenericVisitor +// doesn't commonly allocate it on the heap, as long as it doesn't escape the function +// in which it is created and used (as it's trivially inlined). func NewGenericVisitor(f func(x any) bool) *GenericVisitor { return &GenericVisitor{f} } @@ -306,7 +501,9 @@ func (vis *GenericVisitor) Walk(x any) { vis.Walk(x.Path) case *Import: vis.Walk(x.Path) - vis.Walk(x.Alias) + if x.Alias != "" { + vis.f(x.Alias) + } case *Rule: vis.Walk(x.Head) vis.Walk(x.Body) @@ -314,8 +511,12 @@ func (vis *GenericVisitor) Walk(x any) { vis.Walk(x.Else) } case *Head: - vis.Walk(x.Name) - vis.Walk(x.Args) + if x.Name != "" { + vis.f(x.Name) + } + if x.Args != nil { + vis.Walk(x.Args) + } if x.Key != nil { vis.Walk(x.Key) } @@ -395,18 +596,13 @@ func (vis *GenericVisitor) Walk(x any) { for i := range x.Symbols { vis.Walk(x.Symbols[i]) } + case *TemplateString: + for i := range x.Parts { + vis.Walk(x.Parts[i]) + } } } -// BeforeAfterVisitor provides a utility to walk over AST nodes using -// closures. If the before closure returns true, the visitor will not -// walk over AST nodes under x. The after closure is invoked always -// after visiting a node. -type BeforeAfterVisitor struct { - before func(x any) bool - after func(x any) -} - // NewBeforeAfterVisitor returns a new BeforeAndAfterVisitor that // will invoke the functions before and after AST nodes. func NewBeforeAfterVisitor(before func(x any) bool, after func(x any)) *BeforeAfterVisitor { @@ -538,31 +734,29 @@ func (vis *BeforeAfterVisitor) Walk(x any) { } } -// VarVisitor walks AST nodes under a given node and collects all encountered -// variables. The collected variables can be controlled by specifying -// VarVisitorParams when creating the visitor. -type VarVisitor struct { - params VarVisitorParams - vars VarSet -} - -// VarVisitorParams contains settings for a VarVisitor. -type VarVisitorParams struct { - SkipRefHead bool - SkipRefCallHead bool - SkipObjectKeys bool - SkipClosures bool - SkipWithTarget bool - SkipSets bool -} - -// NewVarVisitor returns a new VarVisitor object. +// NewVarVisitor returns a new [VarVisitor] object. func NewVarVisitor() *VarVisitor { return &VarVisitor{ vars: NewVarSet(), } } +// ClearOrNewVarVisitor clears a non-nil [VarVisitor] or returns a new one. +func ClearOrNewVarVisitor(vis *VarVisitor) *VarVisitor { + if vis == nil { + return NewVarVisitor() + } + + return vis.Clear() +} + +// ClearOrNew resets the visitor to its initial state, or returns a new one if nil. +// +// Deprecated: use [ClearOrNewVarVisitor] instead. +func (vis *VarVisitor) ClearOrNew() *VarVisitor { + return ClearOrNewVarVisitor(vis) +} + // Clear resets the visitor to its initial state, and returns it for chaining. func (vis *VarVisitor) Clear() *VarVisitor { vis.params = VarVisitorParams{} @@ -571,14 +765,6 @@ func (vis *VarVisitor) Clear() *VarVisitor { return vis } -// ClearOrNew returns a new VarVisitor if vis is nil, or else a cleared VarVisitor. -func (vis *VarVisitor) ClearOrNew() *VarVisitor { - if vis == nil { - return NewVarVisitor() - } - return vis.Clear() -} - // WithParams sets the parameters in params on vis. func (vis *VarVisitor) WithParams(params VarVisitorParams) *VarVisitor { vis.params = params @@ -594,7 +780,7 @@ func (vis *VarVisitor) Add(v Var) { } } -// Vars returns a VarSet that contains collected vars. +// Vars returns a [VarSet] that contains collected vars. func (vis *VarVisitor) Vars() VarSet { return vis.vars } @@ -621,7 +807,7 @@ func (vis *VarVisitor) visit(v any) bool { } if vis.params.SkipClosures { switch v := v.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *TemplateString: return true case *Expr: if ev, ok := v.Terms.(*Every); ok { @@ -685,15 +871,20 @@ func (vis *VarVisitor) visit(v any) bool { return true } } + if vis.params.SkipTemplateStrings { + if _, ok := v.(*TemplateString); ok { + return true + } + } if v, ok := v.(Var); ok { vis.Add(v) + return true } return false } -// Walk iterates the AST by calling the function f on the -// GenericVisitor before recursing. Contrary to the generic Walk, this -// does not require allocating the visitor from heap. +// Walk iterates the AST by calling the function f on the [VarVisitor] before recursing. +// Contrary to the deprecated [Walk] function, this does not require allocating the visitor from heap. func (vis *VarVisitor) Walk(x any) { if vis.visit(x) { return @@ -701,16 +892,9 @@ func (vis *VarVisitor) Walk(x any) { switch x := x.(type) { case *Module: - vis.Walk(x.Package) - for i := range x.Imports { - vis.Walk(x.Imports[i]) - } for i := range x.Rules { vis.Walk(x.Rules[i]) } - for i := range x.Comments { - vis.Walk(x.Comments[i]) - } case *Package: vis.WalkRef(x.Path) case *Import: @@ -758,14 +942,17 @@ func (vis *VarVisitor) Walk(x any) { vis.Walk(x.Value.Value) case *Term: vis.Walk(x.Value) + if vVar, ok := x.Value.(Var); ok { + vis.vars.AddLocation(vVar, x.Location) + } case Ref: for i := range x { vis.Walk(x[i].Value) } case *object: - x.Foreach(func(k, _ *Term) { + x.Foreach(func(k, v *Term) { vis.Walk(k) - vis.Walk(x.Get(k)) + vis.Walk(v) }) case *Array: x.Foreach(func(t *Term) { @@ -801,6 +988,10 @@ func (vis *VarVisitor) Walk(x any) { for i := range x.Symbols { vis.Walk(x.Symbols[i]) } + case *TemplateString: + for i := range x.Parts { + vis.Walk(x.Parts[i]) + } } } @@ -820,6 +1011,9 @@ func (vis *VarVisitor) WalkRef(ref Ref) { } for _, term := range ref { vis.Walk(term.Value) + if vVar, ok := term.Value.(Var); ok { + vis.vars.AddLocation(vVar, term.Location) + } } } diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go index 5b418c360b..bf00e96ca2 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go @@ -6,9 +6,7 @@ package bundle import ( - "archive/tar" "bytes" - "compress/gzip" "encoding/hex" "encoding/json" "errors" @@ -24,6 +22,8 @@ import ( "sync" "github.com/gobwas/glob" + "golang.org/x/sync/errgroup" + "github.com/open-policy-agent/opa/internal/file/archive" "github.com/open-policy-agent/opa/internal/merge" "github.com/open-policy-agent/opa/v1/ast" @@ -51,6 +51,10 @@ const ( SnapshotBundleType = "snapshot" ) +var ( + empty Bundle +) + // Bundle represents a loaded bundle. The bundle can contain data and policies. type Bundle struct { Signatures SignaturesConfig @@ -96,7 +100,7 @@ type SignaturesConfig struct { // isEmpty returns if the SignaturesConfig is empty. func (s SignaturesConfig) isEmpty() bool { - return reflect.DeepEqual(s, SignaturesConfig{}) + return s.Signatures == nil && s.Plugin == "" } // DecodedSignature represents the decoded JWT payload. @@ -186,7 +190,6 @@ func (m *Manifest) SetRegoVersion(v ast.RegoVersion) { // Equal returns true if m is semantically equivalent to other. func (m Manifest) Equal(other Manifest) bool { - // This is safe since both are passed by value. m.Init() other.Init() @@ -323,7 +326,6 @@ func (ss stringSet) Equal(other stringSet) bool { } func (m *Manifest) validateAndInjectDefaults(b Bundle) error { - m.Init() // Validate roots in bundle. @@ -337,7 +339,7 @@ func (m *Manifest) validateAndInjectDefaults(b Bundle) error { for i := range len(roots) - 1 { for j := i + 1; j < len(roots); j++ { if RootPathsOverlap(roots[i], roots[j]) { - return fmt.Errorf("manifest has overlapped roots: '%v' and '%v'", roots[i], roots[j]) + return fmt.Errorf("manifest has overlapped roots: '%s' and '%s'", roots[i], roots[j]) } } } @@ -349,7 +351,7 @@ func (m *Manifest) validateAndInjectDefaults(b Bundle) error { found = RootPathsContain(roots, path) } if !found { - return fmt.Errorf("manifest roots %v do not permit '%v' in module '%v'", roots, module.Parsed.Package, module.Path) + return fmt.Errorf("manifest roots %v do not permit '%v' in module '%s'", roots, module.Parsed.Package, module.Path) } } @@ -368,7 +370,7 @@ func (m *Manifest) validateAndInjectDefaults(b Bundle) error { // Ensure wasm module entrypoint in within bundle roots if !RootPathsContain(roots, wmConfig.Entrypoint) { - return fmt.Errorf("manifest roots %v do not permit '%v' entrypoint for wasm module '%v'", roots, wmConfig.Entrypoint, wmConfig.Module) + return fmt.Errorf("manifest roots %v do not permit '%s' entrypoint for wasm module '%s'", roots, wmConfig.Entrypoint, wmConfig.Module) } if _, ok := seenEps[wmConfig.Entrypoint]; ok { @@ -504,14 +506,13 @@ func NewReader(r io.Reader) *Reader { // NewCustomReader returns a new Reader configured to use the // specified DirectoryLoader. func NewCustomReader(loader DirectoryLoader) *Reader { - nr := Reader{ + return &Reader{ loader: loader, - metrics: metrics.New(), + metrics: metrics.NoOp(), files: make(map[string]FileInfo), sizeLimitBytes: DefaultSizeLimitBytes + 1, lazyLoadingMode: HasExtension(), } - return &nr } // IncludeManifestInData sets whether the manifest metadata should be @@ -620,24 +621,17 @@ func (r *Reader) ParserOptions() ast.ParserOptions { // Read returns a new Bundle loaded from the reader. func (r *Reader) Read() (Bundle, error) { - - var bundle Bundle - var descriptors []*Descriptor - var err error - var raw []Raw - - bundle.Signatures, bundle.Patch, descriptors, err = preProcessBundle(r.loader, r.skipVerify, r.sizeLimitBytes) + bundle, descriptors, err := preProcessBundle(r.loader, r.skipVerify, r.sizeLimitBytes) if err != nil { - return bundle, err + return empty, err } bundle.lazyLoadingMode = r.lazyLoadingMode bundle.sizeLimitBytes = r.sizeLimitBytes if bundle.Type() == SnapshotBundleType { - err = r.checkSignaturesAndDescriptors(bundle.Signatures) - if err != nil { - return bundle, err + if err := r.checkSignaturesAndDescriptors(bundle.Signatures); err != nil { + return empty, err } bundle.Data = map[string]any{} @@ -647,7 +641,7 @@ func (r *Reader) Read() (Bundle, error) { for _, f := range descriptors { buf, err := readFile(f, r.sizeLimitBytes) if err != nil { - return bundle, err + return empty, err } // verify the file content @@ -663,7 +657,7 @@ func (r *Reader) Read() (Bundle, error) { delete(r.files, path) } else { if err = r.verifyBundleFile(path, buf); err != nil { - return bundle, err + return empty, err } } } @@ -690,7 +684,7 @@ func (r *Reader) Read() (Bundle, error) { p = modulePathWithPrefix(r.name, fullPath) } - raw = append(raw, Raw{Path: p, Value: bs, module: &mf}) + bundle.Raw = append(bundle.Raw, Raw{Path: p, Value: bs, module: &mf}) } } else if filepath.Base(path) == WasmFile { bundle.WasmModules = append(bundle.WasmModules, WasmModuleFile{ @@ -706,7 +700,7 @@ func (r *Reader) Read() (Bundle, error) { }) } else if filepath.Base(path) == dataFile { if r.lazyLoadingMode { - raw = append(raw, Raw{Path: path, Value: buf.Bytes()}) + bundle.Raw = append(bundle.Raw, Raw{Path: path, Value: buf.Bytes()}) continue } @@ -717,16 +711,16 @@ func (r *Reader) Read() (Bundle, error) { r.metrics.Timer(metrics.RegoDataParse).Stop() if err != nil { - return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) + return empty, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) } - if err := insertValue(&bundle, path, value); err != nil { - return bundle, err + if err := insertValue(bundle, path, value); err != nil { + return empty, err } } else if filepath.Base(path) == yamlDataFile || filepath.Base(path) == ymlDataFile { if r.lazyLoadingMode { - raw = append(raw, Raw{Path: path, Value: buf.Bytes()}) + bundle.Raw = append(bundle.Raw, Raw{Path: path, Value: buf.Bytes()}) continue } @@ -737,16 +731,16 @@ func (r *Reader) Read() (Bundle, error) { r.metrics.Timer(metrics.RegoDataParse).Stop() if err != nil { - return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) + return empty, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) } - if err := insertValue(&bundle, path, value); err != nil { - return bundle, err + if err := insertValue(bundle, path, value); err != nil { + return empty, err } } else if strings.HasSuffix(path, ManifestExt) { if err := util.NewJSONDecoder(&buf).Decode(&bundle.Manifest); err != nil { - return bundle, fmt.Errorf("bundle load failed on manifest decode: %w", err) + return empty, fmt.Errorf("bundle load failed on manifest decode: %w", err) } } } @@ -754,52 +748,63 @@ func (r *Reader) Read() (Bundle, error) { // Parse modules popts := r.ParserOptions() popts.RegoVersion = bundle.RegoVersion(popts.EffectiveRegoVersion()) - for _, mf := range modules { - modulePopts := popts + + g := &errgroup.Group{} + r.metrics.Timer(metrics.RegoModuleParse).Start() + + for i, mf := range modules { + mpopts := popts if regoVersion, err := bundle.RegoVersionForFile(mf.RelativePath, popts.EffectiveRegoVersion()); err != nil { - return bundle, err + return *bundle, err } else if regoVersion != ast.RegoUndefined { - // We don't expect ast.RegoUndefined here, but don't override configured rego-version if we do just to be extra protective - modulePopts.RegoVersion = regoVersion + // We don't expect ast.RegoUndefined here, but don't override + // configured rego-version if we do just to be extra protective + mpopts.RegoVersion = regoVersion } - r.metrics.Timer(metrics.RegoModuleParse).Start() - mf.Parsed, err = ast.ParseModuleWithOpts(mf.Path, util.ByteSliceToString(mf.Raw), modulePopts) - r.metrics.Timer(metrics.RegoModuleParse).Stop() - if err != nil { - return bundle, err - } - bundle.Modules = append(bundle.Modules, mf) + + g.Go(func() (err error) { + if mf.Parsed, err = ast.ParseModuleWithOpts(mf.Path, util.ByteSliceToString(mf.Raw), mpopts); err == nil { + modules[i] = mf + } + return err + }) + } + + err = g.Wait() + r.metrics.Timer(metrics.RegoModuleParse).Stop() + if err != nil { + return empty, err } + bundle.Modules = modules + if bundle.Type() == DeltaBundleType { if len(bundle.Data) != 0 { - return bundle, errors.New("delta bundle expected to contain only patch file but data files found") + return empty, errors.New("delta bundle expected to contain only patch file but data files found") } if len(bundle.Modules) != 0 { - return bundle, errors.New("delta bundle expected to contain only patch file but policy files found") + return empty, errors.New("delta bundle expected to contain only patch file but policy files found") } if len(bundle.WasmModules) != 0 { - return bundle, errors.New("delta bundle expected to contain only patch file but wasm files found") + return empty, errors.New("delta bundle expected to contain only patch file but wasm files found") } if r.persist { - return bundle, errors.New("'persist' property is true in config. persisting delta bundle to disk is not supported") + return empty, errors.New( + "'persist' property is true in config. persisting delta bundle to disk is not supported") } } // check if the bundle signatures specify any files that weren't found in the bundle if bundle.Type() == SnapshotBundleType && len(r.files) != 0 { - extra := []string{} - for k := range r.files { - extra = append(extra, k) - } - return bundle, fmt.Errorf("file(s) %v specified in bundle signatures but not found in the target bundle", extra) + return empty, fmt.Errorf( + "file(s) %v specified in bundle signatures but not found in the target bundle", util.Keys(r.files)) } - if err := bundle.Manifest.validateAndInjectDefaults(bundle); err != nil { - return bundle, err + if err := bundle.Manifest.validateAndInjectDefaults(*bundle); err != nil { + return empty, err } // Inject the wasm module entrypoint refs into the WasmModuleFile structs @@ -812,36 +817,33 @@ func (r *Reader) Read() (Bundle, error) { for _, entrypoint := range entrypoints { ref, err := ast.PtrRef(ast.DefaultRootDocument, entrypoint) if err != nil { - return bundle, fmt.Errorf("failed to parse wasm module entrypoint '%s': %s", entrypoint, err) + return empty, fmt.Errorf("failed to parse wasm module entrypoint '%s': %s", entrypoint, err) } bundle.WasmModules[i].Entrypoints = append(bundle.WasmModules[i].Entrypoints, ref) } } if r.includeManifestInData { - var metadata map[string]any - b, err := json.Marshal(&bundle.Manifest) if err != nil { - return bundle, fmt.Errorf("bundle load failed on manifest marshal: %w", err) + return empty, fmt.Errorf("bundle load failed on manifest marshal: %w", err) } - err = util.UnmarshalJSON(b, &metadata) - if err != nil { - return bundle, fmt.Errorf("bundle load failed on manifest unmarshal: %w", err) + var metadata map[string]any + if err := util.UnmarshalJSON(b, &metadata); err != nil { + return empty, fmt.Errorf("bundle load failed on manifest unmarshal: %w", err) } // For backwards compatibility always write to the old unnamed manifest path // This will *not* be correct if >1 bundle is in use... if err := bundle.insertData(legacyManifestStoragePath, metadata); err != nil { - return bundle, fmt.Errorf("bundle load failed on %v: %w", legacyRevisionStoragePath, err) + return empty, fmt.Errorf("bundle load failed on %v: %w", legacyRevisionStoragePath, err) } } bundle.Etag = r.etag - bundle.Raw = raw - return bundle, nil + return *bundle, nil } func (r *Reader) isFileExcluded(path string) bool { @@ -869,10 +871,9 @@ func (r *Reader) checkSignaturesAndDescriptors(signatures SignaturesConfig) erro } // verify the JWT signatures included in the `.signatures.json` file - if err := r.verifyBundleSignature(signatures); err != nil { - return err - } + return r.verifyBundleSignature(signatures) } + return nil } @@ -931,19 +932,10 @@ func (w *Writer) DisableFormat(yes bool) *Writer { // Write writes the bundle to the writer's output stream. func (w *Writer) Write(bundle Bundle) error { - gw := gzip.NewWriter(w.w) - tw := tar.NewWriter(gw) - - bundleType := bundle.Type() + tw := archive.NewTarGzWriter(w.w) - if bundleType == SnapshotBundleType { - var buf bytes.Buffer - - if err := json.NewEncoder(&buf).Encode(bundle.Data); err != nil { - return err - } - - if err := archive.WriteFile(tw, "data.json", buf.Bytes()); err != nil { + if bundle.Type() == SnapshotBundleType { + if err := tw.WriteJSONFile("/data.json", bundle.Data); err != nil { return err } @@ -953,7 +945,7 @@ func (w *Writer) Write(bundle Bundle) error { path = module.Path } - if err := archive.WriteFile(tw, path, module.Raw); err != nil { + if err := tw.WriteFile(util.WithPrefix(path, "/"), module.Raw); err != nil { return err } } @@ -969,55 +961,48 @@ func (w *Writer) Write(bundle Bundle) error { if err := w.writePlan(tw, bundle); err != nil { return err } - } else if bundleType == DeltaBundleType { - if err := writePatch(tw, bundle); err != nil { + } else if bundle.Type() == DeltaBundleType { + if err := tw.WriteJSONFile("/patch.json", bundle.Patch); err != nil { return err } } - if err := writeManifest(tw, bundle); err != nil { - return err - } - - if err := tw.Close(); err != nil { - return err + if !bundle.Manifest.Empty() { + if err := tw.WriteJSONFile("/.manifest", bundle.Manifest); err != nil { + return err + } } - return gw.Close() + return tw.Close() } -func (w *Writer) writeWasm(tw *tar.Writer, bundle Bundle) error { +func (w *Writer) writeWasm(tw *archive.TarGzWriter, bundle Bundle) error { for _, wm := range bundle.WasmModules { path := wm.URL if w.usePath { path = wm.Path } - err := archive.WriteFile(tw, path, wm.Raw) - if err != nil { + if err := tw.WriteFile(util.WithPrefix(path, "/"), wm.Raw); err != nil { return err } } - if len(bundle.Wasm) > 0 { - err := archive.WriteFile(tw, "/"+WasmFile, bundle.Wasm) - if err != nil { - return err - } + if len(bundle.Wasm) == 0 { + return nil } - return nil + return tw.WriteFile(util.WithPrefix(WasmFile, "/"), bundle.Wasm) } -func (w *Writer) writePlan(tw *tar.Writer, bundle Bundle) error { +func (w *Writer) writePlan(tw *archive.TarGzWriter, bundle Bundle) error { for _, wm := range bundle.PlanModules { path := wm.URL if w.usePath { path = wm.Path } - err := archive.WriteFile(tw, path, wm.Raw) - if err != nil { + if err := tw.WriteFile(util.WithPrefix(path, "/"), wm.Raw); err != nil { return err } } @@ -1025,34 +1010,7 @@ func (w *Writer) writePlan(tw *tar.Writer, bundle Bundle) error { return nil } -func writeManifest(tw *tar.Writer, bundle Bundle) error { - - if bundle.Manifest.Empty() { - return nil - } - - var buf bytes.Buffer - - if err := json.NewEncoder(&buf).Encode(bundle.Manifest); err != nil { - return err - } - - return archive.WriteFile(tw, ManifestExt, buf.Bytes()) -} - -func writePatch(tw *tar.Writer, bundle Bundle) error { - - var buf bytes.Buffer - - if err := json.NewEncoder(&buf).Encode(bundle.Patch); err != nil { - return err - } - - return archive.WriteFile(tw, patchFile, buf.Bytes()) -} - -func writeSignatures(tw *tar.Writer, bundle Bundle) error { - +func writeSignatures(tw *archive.TarGzWriter, bundle Bundle) error { if bundle.Signatures.isEmpty() { return nil } @@ -1062,7 +1020,7 @@ func writeSignatures(tw *tar.Writer, bundle Bundle) error { return err } - return archive.WriteFile(tw, fmt.Sprintf(".%v", SignaturesFile), bs) + return tw.WriteFile(util.WithPrefix(SignaturesFile, "/."), bs) } func hashBundleFiles(hash SignatureHasher, b *Bundle) ([]FileInfo, error) { @@ -1115,8 +1073,7 @@ func hashBundleFiles(hash SignatureHasher, b *Bundle) ([]FileInfo, error) { return files, err } - bs, err = hash.HashFile(result) - if err != nil { + if bs, err = hash.HashFile(result); err != nil { return files, err } @@ -1227,10 +1184,6 @@ func (b *Bundle) GenerateSignature(signingConfig *SigningConfig, keyID string, u return err } - if b.Signatures.isEmpty() { - b.Signatures = SignaturesConfig{} - } - if signingConfig.Plugin != "" { b.Signatures.Plugin = signingConfig.Plugin } @@ -1243,7 +1196,6 @@ func (b *Bundle) GenerateSignature(signingConfig *SigningConfig, keyID string, u // ParsedModules returns a map of parsed modules with names that are // unique and human readable for the given a bundle name. func (b *Bundle) ParsedModules(bundleName string) map[string]*ast.Module { - mods := make(map[string]*ast.Module, len(b.Modules)) for _, mf := range b.Modules { @@ -1255,9 +1207,10 @@ func (b *Bundle) ParsedModules(bundleName string) map[string]*ast.Module { func (b *Bundle) RegoVersion(def ast.RegoVersion) ast.RegoVersion { if v := b.Manifest.RegoVersion; v != nil { - if *v == 0 { + switch *v { + case 0: return ast.RegoV0 - } else if *v == 1 { + case 1: return ast.RegoV1 } } @@ -1328,10 +1281,6 @@ func (m *Manifest) numericRegoVersionForFile(path string) (*int, error) { // Equal returns true if this bundle's contents equal the other bundle's // contents. func (b Bundle) Equal(other Bundle) bool { - if !reflect.DeepEqual(b.Data, other.Data) { - return false - } - if len(b.Modules) != len(other.Modules) { return false } @@ -1357,6 +1306,10 @@ func (b Bundle) Equal(other Bundle) bool { return false } + if !reflect.DeepEqual(b.Data, other.Data) { + return false + } + return bytes.Equal(b.Wasm, other.Wasm) } @@ -1487,7 +1440,6 @@ func Merge(bundles []*Bundle) (*Bundle, error) { // If usePath is true, per-file rego-versions will be calculated using the file's ModuleFile.Path; otherwise, the file's // ModuleFile.URL will be used. func MergeWithRegoVersion(bundles []*Bundle, regoVersion ast.RegoVersion, usePath bool) (*Bundle, error) { - if len(bundles) == 0 { return nil, errors.New("expected at least one bundle") } @@ -1512,7 +1464,6 @@ func MergeWithRegoVersion(bundles []*Bundle, regoVersion ast.RegoVersion, usePat var result Bundle for _, b := range bundles { - if b.Manifest.Roots == nil { return nil, errors.New("bundle manifest not initialized") } @@ -1607,16 +1558,11 @@ func bundleRelativePath(m ModuleFile, usePath bool) string { } func bundleAbsolutePath(m ModuleFile, usePath bool) string { - var p string + p := m.URL if usePath { p = m.Path - } else { - p = m.URL - } - if !path.IsAbs(p) { - p = "/" + p } - return path.Clean(p) + return path.Clean(util.WithPrefix(p, "/")) } // RootPathsOverlap takes in two bundle root paths and returns true if they overlap. @@ -1642,7 +1588,6 @@ func rootPathSegments(path string) []string { } func rootContains(root []string, other []string) bool { - // A single segment, empty string root always contains the other. if len(root) == 1 && root[0] == "" { return true @@ -1674,7 +1619,7 @@ func getNormalizedPath(path string) []string { // other hand, if the path is empty, filepath.Dir will return '.'. // Note: filepath.Dir can return paths with '\' separators, always use // filepath.ToSlash to keep them normalized. - dirpath := strings.TrimLeft(normalizePath(filepath.Dir(path)), "/.") + dirpath := strings.TrimLeft(filepath.ToSlash(filepath.Dir(path)), "/.") var key []string if dirpath != "" { key = strings.Split(dirpath, "/") @@ -1701,56 +1646,52 @@ func dfs(value any, path string, fn func(string, any) (bool, error)) error { } func modulePathWithPrefix(bundleName string, modulePath string) string { - // Default prefix is just the bundle name - prefix := bundleName - // Bundle names are sometimes just file paths, some of which // are full urls (file:///foo/). Parse these and only use the path. parsed, err := url.Parse(bundleName) if err == nil { - prefix = filepath.Join(parsed.Host, parsed.Path) + return path.Join(parsed.Host, parsed.Path, modulePath) } - // Note: filepath.Join can return paths with '\' separators, always use - // filepath.ToSlash to keep them normalized. - return normalizePath(filepath.Join(prefix, modulePath)) + return path.Join(bundleName, modulePath) } // IsStructuredDoc checks if the file name equals a structured file extension ex. ".json" func IsStructuredDoc(name string) bool { - return filepath.Base(name) == dataFile || filepath.Base(name) == yamlDataFile || - filepath.Base(name) == SignaturesFile || filepath.Base(name) == ManifestExt + base := filepath.Base(name) + return base == dataFile || base == yamlDataFile || base == SignaturesFile || base == ManifestExt } -func preProcessBundle(loader DirectoryLoader, skipVerify bool, sizeLimitBytes int64) (SignaturesConfig, Patch, []*Descriptor, error) { +func preProcessBundle(loader DirectoryLoader, skipVerify bool, sizeLimitBytes int64) (*Bundle, []*Descriptor, error) { + bundle := &Bundle{} descriptors := []*Descriptor{} - var signatures SignaturesConfig - var patch Patch for { f, err := loader.NextFile() - if err == io.EOF { - break - } - if err != nil { - return signatures, patch, nil, fmt.Errorf("bundle read failed: %w", err) + if err == io.EOF { + break + } + return bundle, nil, fmt.Errorf("bundle read failed: %w", err) } - // check for the signatures file - if !skipVerify && strings.HasSuffix(f.Path(), SignaturesFile) { + isSignaturesFile := strings.HasSuffix(f.Path(), SignaturesFile) + + if !skipVerify && isSignaturesFile { buf, err := readFile(f, sizeLimitBytes) if err != nil { - return signatures, patch, nil, err + return bundle, nil, err } - if err := util.NewJSONDecoder(&buf).Decode(&signatures); err != nil { - return signatures, patch, nil, fmt.Errorf("bundle load failed on signatures decode: %w", err) + if err := util.NewJSONDecoder(&buf).Decode(&bundle.Signatures); err != nil { + return bundle, nil, fmt.Errorf("bundle load failed on signatures decode: %w", err) } - } else if !strings.HasSuffix(f.Path(), SignaturesFile) { + } else if !isSignaturesFile { descriptors = append(descriptors, f) - if filepath.Base(f.Path()) == patchFile { + base := filepath.Base(f.Path()) + + if base == patchFile { var b bytes.Buffer tee := io.TeeReader(f.reader, &b) @@ -1758,18 +1699,19 @@ func preProcessBundle(loader DirectoryLoader, skipVerify bool, sizeLimitBytes in buf, err := readFile(f, sizeLimitBytes) if err != nil { - return signatures, patch, nil, err + return bundle, nil, err } - if err := util.NewJSONDecoder(&buf).Decode(&patch); err != nil { - return signatures, patch, nil, fmt.Errorf("bundle load failed on patch decode: %w", err) + if err := util.NewJSONDecoder(&buf).Decode(&bundle.Patch); err != nil { + return bundle, nil, fmt.Errorf("bundle load failed on patch decode: %w", err) } f.reader = &b } } } - return signatures, patch, descriptors, nil + + return bundle, descriptors, nil } func readFile(f *Descriptor, sizeLimitBytes int64) (bytes.Buffer, error) { @@ -1839,7 +1781,3 @@ func fstatFileSize(f *os.File) (int64, error) { } return fileInfo.Size(), nil } - -func normalizePath(p string) string { - return filepath.ToSlash(p) -} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go index 12e159254c..4897ee7b91 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go @@ -352,12 +352,10 @@ func (t *tarballLoader) NextFile() (*Descriptor, error) { for { header, err := t.tr.Next() - - if err == io.EOF { - break - } - if err != nil { + if err == io.EOF { + break + } return nil, err } @@ -365,7 +363,6 @@ func (t *tarballLoader) NextFile() (*Descriptor, error) { if header.Typeflag == tar.TypeReg { if t.filter != nil { - if t.filter(filepath.ToSlash(header.Name), header.FileInfo(), getdepth(header.Name, false)) { continue } @@ -462,7 +459,7 @@ func (it *iterator) Next() (*storage.Update, error) { f := it.files[it.idx] it.idx++ - isPolicy := false + var isPolicy bool if strings.HasSuffix(f.name, RegoExt) { isPolicy = true } @@ -504,9 +501,9 @@ func getdepth(path string, isDir bool) int { } func getFileStoragePath(path string) (storage.Path, error) { - fpath := strings.TrimLeft(normalizePath(filepath.Dir(path)), "/.") + fpath := strings.TrimLeft(filepath.ToSlash(filepath.Dir(path)), "/.") if strings.HasSuffix(path, RegoExt) { - fpath = strings.Trim(normalizePath(path), "/") + fpath = strings.Trim(filepath.ToSlash(path), "/") } p, ok := storage.ParsePathEscaped("/" + fpath) diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go index 7ab3de989c..fc6fc1896f 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go @@ -1,6 +1,3 @@ -//go:build go1.16 -// +build go1.16 - package bundle import ( diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go index 5a62d2dc00..dd9dfe5149 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go @@ -14,7 +14,6 @@ import ( "fmt" "hash" "io" - "strings" "github.com/open-policy-agent/opa/v1/util" ) @@ -132,5 +131,5 @@ func encodePrimitive(v any) []byte { encoder := json.NewEncoder(&buf) encoder.SetEscapeHTML(false) _ = encoder.Encode(v) - return []byte(strings.Trim(buf.String(), "\n")) + return bytes.Trim(buf.Bytes(), "\n") } diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go index f203f7086b..f6c13c75fc 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go @@ -86,6 +86,12 @@ func moduleInfoPath(id string) storage.Path { func read(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (any, error) { value, err := store.Read(ctx, txn, path) if err != nil { + if storage.IsNotFound(err) { + return nil, &storage.Error{ + Code: storage.NotFoundErr, + Message: strings.TrimPrefix(path.String(), "/system") + ": document does not exist", + } + } return nil, err } @@ -565,12 +571,11 @@ func doDFS(obj map[string]json.RawMessage, path string, roots []string) error { } for key := range obj { - newPath := filepath.Join(strings.Trim(path, "/"), key) // Note: filepath.Join can return paths with '\' separators, always use // filepath.ToSlash to keep them normalized. - newPath = strings.TrimLeft(normalizePath(newPath), "/.") + newPath = strings.TrimLeft(filepath.ToSlash(newPath), "/.") contains := false prefix := false @@ -965,7 +970,7 @@ func compileModules(compiler *ast.Compiler, m metrics.Metrics, bundles map[strin m.Timer(metrics.RegoModuleCompile).Start() defer m.Timer(metrics.RegoModuleCompile).Stop() - modules := map[string]*ast.Module{} + modules := make(map[string]*ast.Module, len(compiler.Modules)+len(extraModules)+len(bundles)) // preserve any modules already on the compiler maps.Copy(modules, compiler.Modules) @@ -1185,17 +1190,20 @@ func applyPatches(ctx context.Context, store storage.Store, txn storage.Transact // Helpers for the older single (unnamed) bundle style manifest storage. // LegacyManifestStoragePath is the older unnamed bundle path for manifests to be stored. +// // Deprecated: Use ManifestStoragePath and named bundles instead. var legacyManifestStoragePath = storage.MustParsePath("/system/bundle/manifest") var legacyRevisionStoragePath = append(legacyManifestStoragePath, "revision") // LegacyWriteManifestToStore will write the bundle manifest to the older single (unnamed) bundle manifest location. +// // Deprecated: Use WriteManifestToStore and named bundles instead. func LegacyWriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, manifest Manifest) error { return write(ctx, store, txn, legacyManifestStoragePath, manifest) } // LegacyEraseManifestFromStore will erase the bundle manifest from the older single (unnamed) bundle manifest location. +// // Deprecated: Use WriteManifestToStore and named bundles instead. func LegacyEraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) error { err := store.Write(ctx, txn, storage.RemoveOp, legacyManifestStoragePath, nil) @@ -1206,12 +1214,14 @@ func LegacyEraseManifestFromStore(ctx context.Context, store storage.Store, txn } // LegacyReadRevisionFromStore will read the bundle manifest revision from the older single (unnamed) bundle manifest location. +// // Deprecated: Use ReadBundleRevisionFromStore and named bundles instead. func LegacyReadRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (string, error) { return readRevisionFromStore(ctx, store, txn, legacyRevisionStoragePath) } // ActivateLegacy calls Activate for the bundles but will also write their manifest to the older unnamed store location. +// // Deprecated: Use Activate with named bundles instead. func ActivateLegacy(opts *ActivateOpts) error { opts.legacy = true diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go index 82e308b49e..42c8908f73 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go @@ -144,10 +144,6 @@ func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignatur // Because we want to fallback to ds.KeyID when we can't find the // keyID, we need to parse the payload here already. - // - // (lestrrat) Whoa, you're going to trust the payload before you - // verify the signature? Even if it's for backwrds compatibility, - // Is this OK? decoder := base64.RawURLEncoding payload := make([]byte, decoder.DecodedLen(len(payloadb64))) if _, err := decoder.Decode(payload, payloadb64); err != nil { diff --git a/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go b/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go index 5b0bb1ea52..69ac718ebd 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go +++ b/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go @@ -2,13 +2,10 @@ // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. -//go:build go1.16 -// +build go1.16 - package capabilities import ( - v0 "github.com/open-policy-agent/opa/capabilities" + v0 "github.com/open-policy-agent/opa/capabilities" //nolint:staticcheck ) // FS contains the embedded capabilities/ directory of the built version, diff --git a/vendor/github.com/open-policy-agent/opa/v1/config/config.go b/vendor/github.com/open-policy-agent/opa/v1/config/config.go deleted file mode 100644 index 1912d1f38c..0000000000 --- a/vendor/github.com/open-policy-agent/opa/v1/config/config.go +++ /dev/null @@ -1,393 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package config implements OPA configuration file parsing and validation. -package config - -import ( - "encoding/json" - "errors" - "fmt" - "maps" - "os" - "path/filepath" - "reflect" - "sort" - "strings" - - "github.com/open-policy-agent/opa/internal/ref" - "github.com/open-policy-agent/opa/v1/ast" - "github.com/open-policy-agent/opa/v1/util" - "github.com/open-policy-agent/opa/v1/version" -) - -// ServerConfig represents the different server configuration options. -type ServerConfig struct { - Metrics json.RawMessage `json:"metrics,omitempty"` - - Encoding json.RawMessage `json:"encoding,omitempty"` - Decoding json.RawMessage `json:"decoding,omitempty"` -} - -// Clone creates a deep copy of ServerConfig. -func (s *ServerConfig) Clone() *ServerConfig { - if s == nil { - return nil - } - - clone := &ServerConfig{} - - if s.Encoding != nil { - clone.Encoding = make(json.RawMessage, len(s.Encoding)) - copy(clone.Encoding, s.Encoding) - } - if s.Decoding != nil { - clone.Decoding = make(json.RawMessage, len(s.Decoding)) - copy(clone.Decoding, s.Decoding) - } - if s.Metrics != nil { - clone.Metrics = make(json.RawMessage, len(s.Metrics)) - copy(clone.Metrics, s.Metrics) - } - - return clone -} - -// StorageConfig represents Config's storage options. -type StorageConfig struct { - Disk json.RawMessage `json:"disk,omitempty"` -} - -// Clone creates a deep copy of StorageConfig. -func (s *StorageConfig) Clone() *StorageConfig { - if s == nil { - return nil - } - - clone := &StorageConfig{} - - if s.Disk != nil { - clone.Disk = make(json.RawMessage, len(s.Disk)) - copy(clone.Disk, s.Disk) - } - - return clone -} - -// Config represents the configuration file that OPA can be started with. -type Config struct { - Services json.RawMessage `json:"services,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Discovery json.RawMessage `json:"discovery,omitempty"` - Bundle json.RawMessage `json:"bundle,omitempty"` // Deprecated: Use `bundles` instead - Bundles json.RawMessage `json:"bundles,omitempty"` - DecisionLogs json.RawMessage `json:"decision_logs,omitempty"` - Status json.RawMessage `json:"status,omitempty"` - Plugins map[string]json.RawMessage `json:"plugins,omitempty"` - Keys json.RawMessage `json:"keys,omitempty"` - DefaultDecision *string `json:"default_decision,omitempty"` - DefaultAuthorizationDecision *string `json:"default_authorization_decision,omitempty"` - Caching json.RawMessage `json:"caching,omitempty"` - NDBuiltinCache bool `json:"nd_builtin_cache,omitempty"` - PersistenceDirectory *string `json:"persistence_directory,omitempty"` - DistributedTracing json.RawMessage `json:"distributed_tracing,omitempty"` - Server *ServerConfig `json:"server,omitempty"` - Storage *StorageConfig `json:"storage,omitempty"` - Extra map[string]json.RawMessage `json:"-"` -} - -// ParseConfig returns a valid Config object with defaults injected. The id -// and version parameters will be set in the labels map. -func ParseConfig(raw []byte, id string) (*Config, error) { - // NOTE(sr): based on https://stackoverflow.com/a/33499066/993018 - var result Config - objValue := reflect.ValueOf(&result).Elem() - knownFields := map[string]reflect.Value{} - for i := 0; i != objValue.NumField(); i++ { - jsonName := strings.Split(objValue.Type().Field(i).Tag.Get("json"), ",")[0] - knownFields[jsonName] = objValue.Field(i) - } - - if err := util.Unmarshal(raw, &result.Extra); err != nil { - return nil, err - } - - for key, chunk := range result.Extra { - if field, found := knownFields[key]; found { - if err := util.Unmarshal(chunk, field.Addr().Interface()); err != nil { - return nil, err - } - delete(result.Extra, key) - } - } - if len(result.Extra) == 0 { - result.Extra = nil - } - return &result, result.validateAndInjectDefaults(id) -} - -// PluginNames returns a sorted list of names of enabled plugins. -func (c Config) PluginNames() (result []string) { - if c.Bundle != nil || c.Bundles != nil { - result = append(result, "bundles") - } - if c.Status != nil { - result = append(result, "status") - } - if c.DecisionLogs != nil { - result = append(result, "decision_logs") - } - for name := range c.Plugins { - result = append(result, name) - } - sort.Strings(result) - return result -} - -// PluginsEnabled returns true if one or more plugin features are enabled. -// -// Deprecated: Use PluginNames instead. -func (c Config) PluginsEnabled() bool { - return c.Bundle != nil || c.Bundles != nil || c.DecisionLogs != nil || c.Status != nil || len(c.Plugins) > 0 -} - -// DefaultDecisionRef returns the default decision as a reference. -func (c Config) DefaultDecisionRef() ast.Ref { - r, _ := ref.ParseDataPath(*c.DefaultDecision) - return r -} - -// DefaultAuthorizationDecisionRef returns the default authorization decision -// as a reference. -func (c Config) DefaultAuthorizationDecisionRef() ast.Ref { - r, _ := ref.ParseDataPath(*c.DefaultAuthorizationDecision) - return r -} - -// NDBuiltinCacheEnabled returns if the ND builtins cache should be used. -func (c Config) NDBuiltinCacheEnabled() bool { - return c.NDBuiltinCache -} - -// GetPersistenceDirectory returns the configured persistence directory, or $PWD/.opa if none is configured -func (c Config) GetPersistenceDirectory() (string, error) { - if c.PersistenceDirectory == nil { - pwd, err := os.Getwd() - if err != nil { - return "", err - } - return filepath.Join(pwd, ".opa"), nil - } - return *c.PersistenceDirectory, nil -} - -// ActiveConfig returns OPA's active configuration -// with the credentials and crypto keys removed -func (c *Config) ActiveConfig() (any, error) { - bs, err := json.Marshal(c) - if err != nil { - return nil, err - } - - var result map[string]any - if err := util.UnmarshalJSON(bs, &result); err != nil { - return nil, err - } - for k, e := range c.Extra { - var v any - if err := util.UnmarshalJSON(e, &v); err != nil { - return nil, err - } - result[k] = v - } - - if err := removeServiceCredentials(result["services"]); err != nil { - return nil, err - } - - if err := removeCryptoKeys(result["keys"]); err != nil { - return nil, err - } - - return result, nil -} - -// Clone creates a deep copy of the Config struct -func (c *Config) Clone() *Config { - if c == nil { - return nil - } - - clone := &Config{ - NDBuiltinCache: c.NDBuiltinCache, - Server: c.Server.Clone(), - Storage: c.Storage.Clone(), - Labels: maps.Clone(c.Labels), - } - - if c.Services != nil { - clone.Services = make(json.RawMessage, len(c.Services)) - copy(clone.Services, c.Services) - } - if c.Discovery != nil { - clone.Discovery = make(json.RawMessage, len(c.Discovery)) - copy(clone.Discovery, c.Discovery) - } - if c.Bundle != nil { - clone.Bundle = make(json.RawMessage, len(c.Bundle)) - copy(clone.Bundle, c.Bundle) - } - if c.Bundles != nil { - clone.Bundles = make(json.RawMessage, len(c.Bundles)) - copy(clone.Bundles, c.Bundles) - } - if c.DecisionLogs != nil { - clone.DecisionLogs = make(json.RawMessage, len(c.DecisionLogs)) - copy(clone.DecisionLogs, c.DecisionLogs) - } - if c.Status != nil { - clone.Status = make(json.RawMessage, len(c.Status)) - copy(clone.Status, c.Status) - } - if c.Keys != nil { - clone.Keys = make(json.RawMessage, len(c.Keys)) - copy(clone.Keys, c.Keys) - } - if c.Caching != nil { - clone.Caching = make(json.RawMessage, len(c.Caching)) - copy(clone.Caching, c.Caching) - } - if c.DistributedTracing != nil { - clone.DistributedTracing = make(json.RawMessage, len(c.DistributedTracing)) - copy(clone.DistributedTracing, c.DistributedTracing) - } - - if c.DefaultDecision != nil { - s := *c.DefaultDecision - clone.DefaultDecision = &s - } - if c.DefaultAuthorizationDecision != nil { - s := *c.DefaultAuthorizationDecision - clone.DefaultAuthorizationDecision = &s - } - if c.PersistenceDirectory != nil { - s := *c.PersistenceDirectory - clone.PersistenceDirectory = &s - } - - if c.Plugins != nil { - clone.Plugins = make(map[string]json.RawMessage, len(c.Plugins)) - for k, v := range c.Plugins { - if v != nil { - clone.Plugins[k] = make(json.RawMessage, len(v)) - copy(clone.Plugins[k], v) - } - } - } - - if c.Extra != nil { - clone.Extra = make(map[string]json.RawMessage, len(c.Extra)) - for k, v := range c.Extra { - if v != nil { - clone.Extra[k] = make(json.RawMessage, len(v)) - copy(clone.Extra[k], v) - } - } - } - - return clone -} - -func (c *Config) validateAndInjectDefaults(id string) error { - if c.DefaultDecision == nil { - s := defaultDecisionPath - c.DefaultDecision = &s - } - - _, err := ref.ParseDataPath(*c.DefaultDecision) - if err != nil { - return err - } - - if c.DefaultAuthorizationDecision == nil { - s := defaultAuthorizationDecisionPath - c.DefaultAuthorizationDecision = &s - } - - _, err = ref.ParseDataPath(*c.DefaultAuthorizationDecision) - if err != nil { - return err - } - - if c.Labels == nil { - c.Labels = map[string]string{} - } - - c.Labels["id"] = id - c.Labels["version"] = version.Version - - return nil -} - -func removeServiceCredentials(x any) error { - switch x := x.(type) { - case nil: - return nil - case []any: - for _, v := range x { - err := removeKey(v, "credentials") - if err != nil { - return err - } - } - - case map[string]any: - for _, v := range x { - err := removeKey(v, "credentials") - if err != nil { - return err - } - } - default: - return fmt.Errorf("illegal service config type: %T", x) - } - - return nil -} - -func removeCryptoKeys(x any) error { - switch x := x.(type) { - case nil: - return nil - case map[string]any: - for _, v := range x { - err := removeKey(v, "key", "private_key") - if err != nil { - return err - } - } - default: - return fmt.Errorf("illegal keys config type: %T", x) - } - - return nil -} - -func removeKey(x any, keys ...string) error { - val, ok := x.(map[string]any) - if !ok { - return errors.New("type assertion error") - } - - for _, key := range keys { - delete(val, key) - } - - return nil -} - -const ( - defaultDecisionPath = "/system/main" - defaultAuthorizationDecisionPath = "/system/authz/allow" -) diff --git a/vendor/github.com/open-policy-agent/opa/v1/format/format.go b/vendor/github.com/open-policy-agent/opa/v1/format/format.go index 4867594905..c14a6ce798 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/format/format.go +++ b/vendor/github.com/open-policy-agent/opa/v1/format/format.go @@ -9,7 +9,6 @@ import ( "bytes" "errors" "fmt" - "regexp" "slices" "sort" "strings" @@ -18,6 +17,17 @@ import ( "github.com/open-policy-agent/opa/internal/future" "github.com/open-policy-agent/opa/v1/ast" "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +// defaultLocationFile is the file name used in `Ast()` for terms +// without a location, as could happen when pretty-printing the +// results of partial eval. +const defaultLocationFile = "__format_default__" + +var ( + expandedConst = ast.NewBody(ast.NewExpr(ast.InternedTerm(true))) + commentsSlicePool = util.NewSlicePool[*ast.Comment](50) ) // Opts lets you control the code formatting via `AstWithOpts()`. @@ -38,6 +48,11 @@ type Opts struct { // Imports are only removed if [Opts.RegoVersion] makes them redundant. DropV0Imports bool + // SkipDefensiveCopying, if true, will avoid deep-copying the AST before formatting it. + // This is true by default for all Source* functions, but false by default for Ast* functions, + // as some formatting operations may otherwise mutate the AST. + SkipDefensiveCopying bool + Capabilities *ast.Capabilities } @@ -48,16 +63,11 @@ func (o Opts) effectiveRegoVersion() ast.RegoVersion { return o.RegoVersion } -// defaultLocationFile is the file name used in `Ast()` for terms -// without a location, as could happen when pretty-printing the -// results of partial eval. -const defaultLocationFile = "__format_default__" - // Source formats a Rego source file. The bytes provided must describe a complete // Rego module. If they don't, Source will return an error resulting from the attempt // to parse the bytes. func Source(filename string, src []byte) ([]byte, error) { - return SourceWithOpts(filename, src, Opts{}) + return SourceWithOpts(filename, src, Opts{SkipDefensiveCopying: true}) } func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) { @@ -72,6 +82,9 @@ func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) { parserOpts.RegoVersion = ast.RegoV1 } + // Copying the node does not make sense when both input and output are bytes. + opts.SkipDefensiveCopying = true + if parserOpts.RegoVersion == ast.RegoUndefined { parserOpts.RegoVersion = ast.DefaultRegoVersion } @@ -166,7 +179,9 @@ func AstWithOpts(x any, opts Opts) ([]byte, error) { // The node has to be deep copied because it may be mutated below. Alternatively, // we could avoid the copy by checking if mutation will occur first. For now, // since format is not latency sensitive, just deep copy in all cases. - x = ast.Copy(x) + if !opts.SkipDefensiveCopying { + x = ast.Copy(x) + } wildcards := map[ast.Var]*ast.Term{} @@ -233,10 +248,11 @@ func AstWithOpts(x any, opts Opts) ([]byte, error) { } case *ast.Rule: - if len(n.Head.Ref()) > 2 { + headLen := len(n.Head.Ref()) + if headLen > 2 { o.refHeads = true } - if len(n.Head.Ref()) == 2 && n.Head.Key != nil && n.Head.Value == nil { // p.q contains "x" + if headLen == 2 && n.Head.Key != nil && n.Head.Value == nil { // p.q contains "x" o.refHeads = true } } @@ -339,6 +355,7 @@ func AstWithOpts(x any, opts Opts) ([]byte, error) { if len(w.errs) > 0 { return nil, w.errs } + return squashTrailingNewlines(w.buf.Bytes()), nil } @@ -545,8 +562,6 @@ func (w *writer) writeRules(rules []*ast.Rule, comments []*ast.Comment) ([]*ast. return comments, nil } -var expandedConst = ast.NewBody(ast.NewExpr(ast.InternedTerm(true))) - func (w *writer) groupableOneLiner(rule *ast.Rule) bool { // Location required to determine if two rules are adjacent in the policy. // If not, we respect line breaks between rules. @@ -667,8 +682,6 @@ func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment) return comments, nil } -var elseVar ast.Value = ast.Var("else") - func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) ([]*ast.Comment, error) { // If there was nothing else on the line before the "else" starts // then preserve this style of else block, otherwise it will be @@ -715,7 +728,7 @@ func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) ([]*ast.Comm rule.Else.Head.Name = "else" // NOTE(sr): whaaat - elseHeadReference := ast.NewTerm(elseVar) // construct a reference for the term + elseHeadReference := ast.VarTerm("else") // construct a reference for the term elseHeadReference.Location = rule.Else.Head.Location // and set the location to match the rule location rule.Else.Head.Reference = ast.Ref{elseHeadReference} @@ -1127,18 +1140,33 @@ func (w *writer) writeWith(with *ast.With, comments []*ast.Comment, indented boo return comments, nil } +// saveComments saves a copy of the comments slice in a pooled slice to and returns it. +// This is to avoid having to create a new slice every time we need to save comments. +// The caller is responsible for putting the slice back in the pool when done. +func saveComments(comments []*ast.Comment) *[]*ast.Comment { + cmlen := len(comments) + saved := commentsSlicePool.Get(cmlen) + + copy(*saved, comments) + + return saved +} + func (w *writer) writeTerm(term *ast.Term, comments []*ast.Comment) ([]*ast.Comment, error) { - currentComments := make([]*ast.Comment, len(comments)) - copy(currentComments, comments) + if len(comments) == 0 { + return w.writeTermParens(false, term, comments) + } currentLen := w.buf.Len() + currentComments := saveComments(comments) + defer commentsSlicePool.Put(currentComments) comments, err := w.writeTermParens(false, term, comments) if err != nil { if errors.As(err, &unexpectedCommentError{}) { w.buf.Truncate(currentLen) - comments, uErr := w.writeUnformatted(term.Location, currentComments) + comments, uErr := w.writeUnformatted(term.Location, *currentComments) if uErr != nil { return nil, uErr } @@ -1156,16 +1184,16 @@ func (w *writer) writeUnformatted(location *ast.Location, currentComments []*ast return nil, errors.New("original unformatted text is empty") } - rawRule := string(location.Text) - rowNum := len(strings.Split(rawRule, "\n")) + rowNum := bytes.Count(location.Text, []byte{'\n'}) + 1 - w.write(string(location.Text)) + w.writeBytes(location.Text) comments := make([]*ast.Comment, 0, len(currentComments)) for _, c := range currentComments { // if there is a body then wait to write the last comment if w.writeCommentOnFinalLine && c.Location.Row == location.Row+rowNum-1 { - w.write(" " + string(c.Location.Text)) + w.write(" ") + w.writeBytes(c.Location.Text) continue } @@ -1227,19 +1255,19 @@ func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Co case ast.String: if term.Location.Text[0] == '`' { // To preserve raw strings, we need to output the original text, - w.write(string(term.Location.Text)) + w.writeBytes(term.Location.Text) } else { // x.String() cannot be used by default because it can change the input string "\u0000" to "\x00" - var after, quote string + var after, quote []byte var found bool // term.Location.Text could contain the prefix `else :=`, remove it switch term.Location.Text[len(term.Location.Text)-1] { case '"': - quote = "\"" - _, after, found = strings.Cut(string(term.Location.Text), quote) + quote = []byte{'"'} + _, after, found = bytes.Cut(term.Location.Text, quote) case '`': - quote = "`" - _, after, found = strings.Cut(string(term.Location.Text), quote) + quote = []byte{'`'} + _, after, found = bytes.Cut(term.Location.Text, quote) } if !found { @@ -1247,10 +1275,16 @@ func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Co // e.g. partial_set.y to partial_set["y"] w.write(x.String()) } else { - w.write(quote + after) + w.writeBytes(quote) + w.writeBytes(after) } } + case *ast.TemplateString: + comments, err = w.writeTemplateString(x, comments) + if err != nil { + return nil, err + } case ast.Var: w.write(w.formatVar(x)) case ast.Call: @@ -1268,6 +1302,91 @@ func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Co return comments, nil } +func (w *writer) writeTemplateString(ts *ast.TemplateString, comments []*ast.Comment) ([]*ast.Comment, error) { + w.write("$") + if ts.MultiLine { + w.write("`") + } else { + w.write(`"`) + } + + for i, p := range ts.Parts { + switch x := p.(type) { + case *ast.Expr: + w.write("{") + w.up() + + if w.beforeEnd != nil { + // We have a comment on the same line as the opening template-expression brace '{' + w.endLine() + w.startLine() + } else { + // We might have comments to write; the first of which should be on the same line as the opening template-expression brace '{' + before, _, _ := partitionComments(comments, x.Location) + if len(before) > 0 { + w.write(" ") + w.inline = true + if err := w.writeComments(before); err != nil { + return nil, err + } + + comments = comments[len(before):] + } + } + + var err error + comments, err = w.writeExpr(x, comments) + if err != nil { + return comments, err + } + + // write trailing comments + if i+1 < len(ts.Parts) { + before, _, _ := partitionComments(comments, ts.Parts[i+1].Loc()) + if len(before) > 0 { + w.endLine() + if err := w.writeComments(before); err != nil { + return nil, err + } + + comments = comments[len(before):] + w.startLine() + } + } + + w.write("}") + + if err := w.down(); err != nil { + return nil, err + } + case *ast.Term: + if s, ok := x.Value.(ast.String); ok { + if ts.MultiLine { + w.write(ast.EscapeTemplateStringStringPart(string(s))) + } else { + str := ast.EscapeTemplateStringStringPart(s.String()) + w.write(str[1 : len(str)-1]) + } + } else { + s := x.String() + s = strings.TrimPrefix(s, "\"") + s = strings.TrimSuffix(s, "\"") + w.write(s) + } + default: + w.write("") + } + } + + if ts.MultiLine { + w.write("`") + } else { + w.write(`"`) + } + + return comments, nil +} + func (w *writer) writeRef(x ast.Ref, comments []*ast.Comment) ([]*ast.Comment, error) { if len(x) > 0 { parens := false @@ -1310,8 +1429,6 @@ func (w *writer) writeBracketed(str string) { w.write("[" + str + "]") } -var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") - func (w *writer) writeRefStringPath(s ast.String, l *ast.Location) { str := string(s) if w.shouldBracketRefTerm(str, l) { @@ -1322,7 +1439,7 @@ func (w *writer) writeRefStringPath(s ast.String, l *ast.Location) { } func (w *writer) shouldBracketRefTerm(s string, l *ast.Location) bool { - if !varRegexp.MatchString(s) { + if !ast.IsVarCompatibleString(s) { return true } @@ -1900,7 +2017,7 @@ func partitionComments(comments []*ast.Comment, l *ast.Location) ([]*ast.Comment var at *ast.Comment before := make([]*ast.Comment, 0, numBefore) - after := comments[0 : 0 : len(comments)-numBefore] + after := make([]*ast.Comment, 0, numAfter) for _, c := range comments { switch cmp := c.Location.Row - l.Row; { @@ -2130,11 +2247,16 @@ func (w *writer) blankLine() { w.write("\n") } -// write the input string and writes it to the buffer. +// write writes string s to the buffer. func (w *writer) write(s string) { w.buf.WriteString(s) } +// writeBytes writes []byte b to the buffer. +func (w *writer) writeBytes(b []byte) { + w.buf.Write(b) +} + // writeLine writes the string on a newly started line, then terminate the line. func (w *writer) writeLine(s string) { if !w.inline { diff --git a/vendor/github.com/open-policy-agent/opa/v1/hooks/hooks.go b/vendor/github.com/open-policy-agent/opa/v1/hooks/hooks.go deleted file mode 100644 index cb756e5020..0000000000 --- a/vendor/github.com/open-policy-agent/opa/v1/hooks/hooks.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2023 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package hooks - -import ( - "context" - "fmt" - - "github.com/open-policy-agent/opa/v1/config" - topdown_cache "github.com/open-policy-agent/opa/v1/topdown/cache" -) - -// Hook is a hook to be called in some select places in OPA's operation. -// -// The base Hook interface is any, and wherever a hook can occur, the calling code -// will check if your hook implements an appropriate interface. If so, your hook -// is called. -// -// This allows you to only hook in to behavior you care about, and it allows the -// OPA to add more hooks in the future. -// -// All hook interfaces in this package have Hook in the name. Hooks must be safe -// for concurrent use. It is expected that hooks are fast; if a hook needs to take -// time, then copy what you need and ensure the hook is async. -// -// When multiple instances of a hook are provided, they are all going to be executed -// in an unspecified order (it's a map-range call underneath). If you need hooks to -// be run in order, you can wrap them into another hook, and configure that one. -type Hook any - -// Hooks is the type used for every struct in OPA that can work with hooks. -type Hooks struct { - m map[Hook]struct{} // we are NOT providing a stable invocation ordering -} - -// New creates a new instance of Hooks. -func New(hs ...Hook) Hooks { - h := Hooks{m: make(map[Hook]struct{}, len(hs))} - for i := range hs { - h.m[hs[i]] = struct{}{} - } - return h -} - -func (hs Hooks) Each(fn func(Hook)) { - for h := range hs.m { - fn(h) - } -} - -func (hs Hooks) Len() int { - return len(hs.m) -} - -// ConfigHook allows inspecting or rewriting the configuration when the plugin -// manager is processing it. -// Note that this hook is not run when the plugin manager is reconfigured. This -// usually only happens when there's a new config from a discovery bundle, and -// for processing _that_, there's `ConfigDiscoveryHook`. -type ConfigHook interface { - OnConfig(context.Context, *config.Config) (*config.Config, error) -} - -// ConfigHook allows inspecting or rewriting the discovered configuration when -// the discovery plugin is processing it. -type ConfigDiscoveryHook interface { - OnConfigDiscovery(context.Context, *config.Config) (*config.Config, error) -} - -// InterQueryCacheHook allows access to the server's inter-query cache instance. -// It's useful for out-of-tree handlers that also need to evaluate something. -// Using this hook, they can share the caches with the rest of OPA. -type InterQueryCacheHook interface { - OnInterQueryCache(context.Context, topdown_cache.InterQueryCache) error -} - -// InterQueryValueCacheHook allows access to the server's inter-query value cache -// instance. -type InterQueryValueCacheHook interface { - OnInterQueryValueCache(context.Context, topdown_cache.InterQueryValueCache) error -} - -func (hs Hooks) Validate() error { - for h := range hs.m { - switch h.(type) { - case InterQueryCacheHook, - InterQueryValueCacheHook, - ConfigHook, - ConfigDiscoveryHook: // OK - default: - return fmt.Errorf("unknown hook type %T", h) - } - } - return nil -} diff --git a/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go b/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go index 42a59d031f..d97e3e5409 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go +++ b/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go @@ -495,6 +495,7 @@ func loadOneSchema(path string) (any, error) { } // All returns a Result object loaded (recursively) from the specified paths. +// // Deprecated: Use FileLoader.Filtered() instead. func All(paths []string) (*Result, error) { return NewFileLoader().Filtered(paths, nil) @@ -503,6 +504,7 @@ func All(paths []string) (*Result, error) { // Filtered returns a Result object loaded (recursively) from the specified // paths while applying the given filters. If any filter returns true, the // file/directory is excluded. +// // Deprecated: Use FileLoader.Filtered() instead. func Filtered(paths []string, filter Filter) (*Result, error) { return NewFileLoader().Filtered(paths, filter) @@ -511,6 +513,7 @@ func Filtered(paths []string, filter Filter) (*Result, error) { // AsBundle loads a path as a bundle. If it is a single file // it will be treated as a normal tarball bundle. If a directory // is supplied it will be loaded as an unzipped bundle tree. +// // Deprecated: Use FileLoader.AsBundle() instead. func AsBundle(path string) (*bundle.Bundle, error) { return NewFileLoader().AsBundle(path) @@ -631,11 +634,10 @@ func (l *Result) mergeDocument(path string, doc any) error { } func (l *Result) withParent(p string) *Result { - path := append(l.path, p) return &Result{ Documents: l.Documents, Modules: l.Modules, - path: path, + path: append(l.path, p), } } diff --git a/vendor/github.com/open-policy-agent/opa/v1/logging/buffered_logger.go b/vendor/github.com/open-policy-agent/opa/v1/logging/buffered_logger.go new file mode 100644 index 0000000000..1fbd560035 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/logging/buffered_logger.go @@ -0,0 +1,182 @@ +package logging + +import ( + "fmt" + "maps" + "sync" + "time" +) + +type logEntry struct { + level Level + message string + fields map[string]any + time time.Time +} + +// BufferedLogger captures log entries in memory until Flush is called, +// at which point it replays all buffered entries to the target logger. +// After Flush() is called, the BufferedLogger should not be used anymore. +type BufferedLogger struct { + mu sync.Mutex + buffer []logEntry + maxEntries int + currentLevel Level +} + +// NewBufferedLogger creates a new buffered logger that will buffer up to maxEntries. +func NewBufferedLogger(maxEntries int) *BufferedLogger { + if maxEntries <= 0 { + maxEntries = 1000 + } + return &BufferedLogger{ + buffer: make([]logEntry, 0, maxEntries), + maxEntries: maxEntries, + currentLevel: Info, + } +} + +func (b *BufferedLogger) addToBuffer(level Level, format string, args []any, fields map[string]any) { + message := format + if len(args) > 0 { + message = fmt.Sprintf(format, args...) + } + + entry := logEntry{ + level: level, + message: message, + fields: fields, + time: time.Now(), + } + + b.mu.Lock() + defer b.mu.Unlock() + + if len(b.buffer) >= b.maxEntries { + b.buffer = b.buffer[1:] + } + b.buffer = append(b.buffer, entry) +} + +func (*BufferedLogger) logToTarget(target Logger, entry logEntry) { + fields := make(map[string]any, len(entry.fields)) + maps.Copy(fields, entry.fields) + fields["time"] = entry.time + + logger := target.WithFields(fields) + + switch entry.level { + case Debug: + logger.Debug("%s", entry.message) + case Info: + logger.Info("%s", entry.message) + case Warn: + logger.Warn("%s", entry.message) + case Error: + logger.Error("%s", entry.message) + } +} + +func (b *BufferedLogger) Debug(format string, args ...any) { + b.addToBuffer(Debug, format, args, nil) +} + +func (b *BufferedLogger) Info(format string, args ...any) { + b.addToBuffer(Info, format, args, nil) +} + +func (b *BufferedLogger) Warn(format string, args ...any) { + b.addToBuffer(Warn, format, args, nil) +} + +func (b *BufferedLogger) Error(format string, args ...any) { + b.addToBuffer(Error, format, args, nil) +} + +// WithFields returns a new logger with additional fields. +func (b *BufferedLogger) WithFields(fields map[string]any) Logger { + return &bufferedLoggerWithFields{ + parent: b, + fields: fields, + } +} + +// GetLevel returns the current log level. +func (b *BufferedLogger) GetLevel() Level { + b.mu.Lock() + defer b.mu.Unlock() + return b.currentLevel +} + +// SetLevel sets the log level. +func (b *BufferedLogger) SetLevel(level Level) { + b.mu.Lock() + defer b.mu.Unlock() + b.currentLevel = level +} + +// Close discards all buffered entries without flushing them. +// After calling Close, the BufferedLogger should not be used anymore. +func (b *BufferedLogger) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.buffer = nil +} + +// Flush replays all buffered entries to the target logger. +// After calling Flush, the BufferedLogger should not be used anymore. +// The caller should switch to using the target logger directly. +func (b *BufferedLogger) Flush(targetLogger Logger) { + if targetLogger == nil { + return + } + + b.mu.Lock() + targetLogger.SetLevel(b.currentLevel) + entries := b.buffer + b.buffer = nil + b.mu.Unlock() + + for _, entry := range entries { + b.logToTarget(targetLogger, entry) + } +} + +type bufferedLoggerWithFields struct { + parent *BufferedLogger + fields map[string]any +} + +func (b *bufferedLoggerWithFields) Debug(format string, args ...any) { + b.parent.addToBuffer(Debug, format, args, b.fields) +} + +func (b *bufferedLoggerWithFields) Info(format string, args ...any) { + b.parent.addToBuffer(Info, format, args, b.fields) +} + +func (b *bufferedLoggerWithFields) Warn(format string, args ...any) { + b.parent.addToBuffer(Warn, format, args, b.fields) +} + +func (b *bufferedLoggerWithFields) Error(format string, args ...any) { + b.parent.addToBuffer(Error, format, args, b.fields) +} + +func (b *bufferedLoggerWithFields) WithFields(fields map[string]any) Logger { + merged := make(map[string]any, len(b.fields)+len(fields)) + maps.Copy(merged, b.fields) + maps.Copy(merged, fields) + return &bufferedLoggerWithFields{ + parent: b.parent, + fields: merged, + } +} + +func (b *bufferedLoggerWithFields) GetLevel() Level { + return b.parent.GetLevel() +} + +func (b *bufferedLoggerWithFields) SetLevel(level Level) { + b.parent.SetLevel(level) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/logging/logging.go b/vendor/github.com/open-policy-agent/opa/v1/logging/logging.go index 5ff27a2116..135785994f 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/logging/logging.go +++ b/vendor/github.com/open-policy-agent/opa/v1/logging/logging.go @@ -2,9 +2,12 @@ package logging import ( "context" + "fmt" "io" + "log/slog" "maps" "net/http" + "time" "github.com/sirupsen/logrus" ) @@ -36,6 +39,23 @@ type Logger interface { SetLevel(Level) } +// LoggerWithContext is an optional interface that Logger implementations +// can implement to support extracting trace information from a context. +// Use WithContext to call this method on a Logger if it is supported. +type LoggerWithContext interface { + WithContext(context.Context) Logger +} + +// WithContext returns a logger with context information if the logger +// supports it (i.e., implements LoggerWithContext). Otherwise, the +// logger is returned unchanged. +func WithContext(logger Logger, ctx context.Context) Logger { + if lc, ok := logger.(LoggerWithContext); ok { + return lc.WithContext(ctx) + } + return logger +} + // StandardLogger is the default OPA logger implementation. type StandardLogger struct { logger *logrus.Logger @@ -272,3 +292,179 @@ func BatchDecisionIDFromContext(ctx context.Context) (string, bool) { s, ok := ctx.Value(batchDecisionCtxKey).(string) return s, ok } + +// SlogHandler adapts a Logger to slog.Handler interface +type SlogHandler struct { + logger Logger +} + +// NewSlogHandler creates an slog.Handler from a Logger +func NewSlogHandler(logger Logger) slog.Handler { + return &SlogHandler{logger: logger} +} + +func (h *SlogHandler) Enabled(_ context.Context, level slog.Level) bool { + lvl := h.logger.GetLevel() + switch level { + case slog.LevelDebug: + return lvl >= Debug + case slog.LevelInfo: + return lvl >= Info + case slog.LevelWarn: + return lvl >= Warn + case slog.LevelError: + return lvl >= Error + } + return true +} + +func (h *SlogHandler) Handle(ctx context.Context, record slog.Record) error { + attrs := make(map[string]any) + record.Attrs(func(a slog.Attr) bool { + attrs[a.Key] = a.Value.Any() + return true + }) + + logger := h.logger + if len(attrs) > 0 { + logger = logger.WithFields(attrs) + } + if ctx != nil { + logger = WithContext(logger, ctx) + } + + msg := record.Message + switch record.Level { + case slog.LevelDebug: + logger.Debug(msg) + case slog.LevelInfo: + logger.Info(msg) + case slog.LevelWarn: + logger.Warn(msg) + case slog.LevelError: + logger.Error(msg) + default: + logger.Info(msg) + } + return nil +} + +func (h *SlogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + fields := make(map[string]any, len(attrs)) + for _, a := range attrs { + fields[a.Key] = a.Value.Any() + } + return &SlogHandler{ + logger: h.logger.WithFields(fields), + } +} + +func (h *SlogHandler) WithGroup(name string) slog.Handler { + return h +} + +// loggerFromSlogHandler wraps an slog.Handler to implement the Logger interface +type loggerFromSlogHandler struct { + handler slog.Handler + level Level + fields map[string]any + ctx context.Context +} + +var _ LoggerWithContext = (*loggerFromSlogHandler)(nil) + +// NewLoggerFromSlogHandler creates a Logger from an slog.Handler +func NewLoggerFromSlogHandler(handler slog.Handler, level Level) Logger { + return &loggerFromSlogHandler{ + handler: handler, + level: level, + fields: make(map[string]any), + ctx: context.Background(), + } +} + +func (l *loggerFromSlogHandler) log(level slog.Level, format string, args ...any) { + if !l.handler.Enabled(l.ctx, level) { + return + } + msg := format + if len(args) > 0 { + msg = fmt.Sprintf(format, args...) + } + + attrs := make([]slog.Attr, 0, len(l.fields)) + for k, v := range l.fields { + var attr slog.Attr + switch val := v.(type) { + case string: + attr = slog.String(k, val) + case int: + attr = slog.Int(k, val) + case int64: + attr = slog.Int64(k, val) + case uint64: + attr = slog.Uint64(k, val) + case float64: + attr = slog.Float64(k, val) + case bool: + attr = slog.Bool(k, val) + case time.Time: + attr = slog.Time(k, val) + case time.Duration: + attr = slog.Duration(k, val) + default: + attr = slog.Any(k, v) + } + attrs = append(attrs, attr) + } + + record := slog.NewRecord(time.Now(), level, msg, 0) + record.AddAttrs(attrs...) + + _ = l.handler.Handle(l.ctx, record) +} + +func (l *loggerFromSlogHandler) Debug(format string, args ...any) { + l.log(slog.LevelDebug, format, args...) +} + +func (l *loggerFromSlogHandler) Info(format string, args ...any) { + l.log(slog.LevelInfo, format, args...) +} + +func (l *loggerFromSlogHandler) Warn(format string, args ...any) { + l.log(slog.LevelWarn, format, args...) +} + +func (l *loggerFromSlogHandler) Error(format string, args ...any) { + l.log(slog.LevelError, format, args...) +} + +func (l *loggerFromSlogHandler) WithFields(fields map[string]any) Logger { + merged := make(map[string]any, len(l.fields)+len(fields)) + maps.Copy(merged, l.fields) + maps.Copy(merged, fields) + return &loggerFromSlogHandler{ + handler: l.handler, + level: l.level, + fields: merged, + ctx: l.ctx, + } +} + +func (l *loggerFromSlogHandler) WithContext(ctx context.Context) Logger { + return &loggerFromSlogHandler{ + handler: l.handler, + level: l.level, + fields: l.fields, + ctx: ctx, + } +} + +func (l *loggerFromSlogHandler) GetLevel() Level { + return l.level +} + +func (l *loggerFromSlogHandler) SetLevel(level Level) { + l.level = level +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go b/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go index 316ffe7897..481f27337e 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go +++ b/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go @@ -19,21 +19,27 @@ import ( // Well-known metric names. const ( - BundleRequest = "bundle_request" - ServerHandler = "server_handler" - ServerQueryCacheHit = "server_query_cache_hit" - SDKDecisionEval = "sdk_decision_eval" - RegoQueryCompile = "rego_query_compile" - RegoQueryEval = "rego_query_eval" - RegoQueryParse = "rego_query_parse" - RegoModuleParse = "rego_module_parse" - RegoDataParse = "rego_data_parse" - RegoModuleCompile = "rego_module_compile" - RegoPartialEval = "rego_partial_eval" - RegoInputParse = "rego_input_parse" - RegoLoadFiles = "rego_load_files" - RegoLoadBundles = "rego_load_bundles" - RegoExternalResolve = "rego_external_resolve" + BundleRequest = "bundle_request" + ServerHandler = "server_handler" + ServerQueryCacheHit = "server_query_cache_hit" + SDKDecisionEval = "sdk_decision_eval" + RegoQueryCompile = "rego_query_compile" + RegoQueryEval = "rego_query_eval" + RegoQueryParse = "rego_query_parse" + RegoModuleParse = "rego_module_parse" + RegoDataParse = "rego_data_parse" + RegoModuleCompile = "rego_module_compile" + RegoPartialEval = "rego_partial_eval" + RegoInputParse = "rego_input_parse" + RegoLoadFiles = "rego_load_files" + RegoLoadBundles = "rego_load_bundles" + RegoExternalResolve = "rego_external_resolve" + CompilePrepPartial = "compile_prep_partial" + CompileEvalConstraints = "compile_eval_constraints" + CompileTranslateQueries = "compile_translate_queries" + CompileExtractAnnotationsUnknowns = "compile_extract_annotations_unknowns" + CompileExtractAnnotationsMask = "compile_extract_annotations_mask" + CompileEvalMaskRule = "compile_eval_mask_rule" ) // Info contains attributes describing the underlying metrics provider. diff --git a/vendor/github.com/open-policy-agent/opa/v1/plugins/plugins.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/plugins.go deleted file mode 100644 index ca8df1ee48..0000000000 --- a/vendor/github.com/open-policy-agent/opa/v1/plugins/plugins.go +++ /dev/null @@ -1,1195 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package plugins implements plugin management for the policy engine. -package plugins - -import ( - "context" - "errors" - "fmt" - "maps" - mr "math/rand" - "net/http" - "sync" - "time" - - "github.com/open-policy-agent/opa/internal/report" - "github.com/prometheus/client_golang/prometheus" - "go.opentelemetry.io/otel/sdk/trace" - - bundleUtils "github.com/open-policy-agent/opa/internal/bundle" - cfg "github.com/open-policy-agent/opa/internal/config" - initload "github.com/open-policy-agent/opa/internal/runtime/init" - "github.com/open-policy-agent/opa/v1/ast" - "github.com/open-policy-agent/opa/v1/bundle" - "github.com/open-policy-agent/opa/v1/config" - "github.com/open-policy-agent/opa/v1/hooks" - "github.com/open-policy-agent/opa/v1/keys" - "github.com/open-policy-agent/opa/v1/loader" - "github.com/open-policy-agent/opa/v1/logging" - "github.com/open-policy-agent/opa/v1/plugins/rest" - "github.com/open-policy-agent/opa/v1/resolver/wasm" - "github.com/open-policy-agent/opa/v1/storage" - "github.com/open-policy-agent/opa/v1/topdown/cache" - "github.com/open-policy-agent/opa/v1/topdown/print" - "github.com/open-policy-agent/opa/v1/tracing" -) - -// Factory defines the interface OPA uses to instantiate your plugin. -// -// When OPA processes it's configuration it looks for factories that -// have been registered by calling runtime.RegisterPlugin. Factories -// are registered to a name which is used to key into the -// configuration blob. If your plugin has not been configured, your -// factory will not be invoked. -// -// plugins: -// my_plugin1: -// some_key: foo -// # my_plugin2: -// # some_key2: bar -// -// If OPA was started with the configuration above and received two -// calls to runtime.RegisterPlugins (one with NAME "my_plugin1" and -// one with NAME "my_plugin2"), it would only invoke the factory for -// for my_plugin1. -// -// OPA instantiates and reconfigures plugins in two steps. First, OPA -// will call Validate to check the configuration. Assuming the -// configuration is valid, your factory should return a configuration -// value that can be used to construct your plugin. Second, OPA will -// call New to instantiate your plugin providing the configuration -// value returned from the Validate call. -// -// Validate receives a slice of bytes representing plugin -// configuration and returns a configuration value that can be used to -// instantiate your plugin. The manager is provided to give access to -// the OPA's compiler, storage layer, and global configuration. Your -// Validate function will typically: -// -// 1. Deserialize the raw config bytes -// 2. Validate the deserialized config for semantic errors -// 3. Inject default values -// 4. Return a deserialized/parsed config -// -// New receives a valid configuration for your plugin and returns a -// plugin object. Your New function will typically: -// -// 1. Cast the config value to it's own type -// 2. Instantiate a plugin object -// 3. Return the plugin object -// 4. Update status via `plugins.Manager#UpdatePluginStatus` -// -// After a plugin has been created subsequent status updates can be -// send anytime the plugin enters a ready or error state. -type Factory interface { - Validate(manager *Manager, config []byte) (any, error) - New(manager *Manager, config any) Plugin -} - -// Plugin defines the interface OPA uses to manage your plugin. -// -// When OPA starts it will start all of the plugins it was configured -// to instantiate. Each time a new plugin is configured (via -// discovery), OPA will start it. You can use the Start call to spawn -// additional goroutines or perform initialization tasks. -// -// Currently OPA will not call Stop on plugins. -// -// When OPA receives new configuration for your plugin via discovery -// it will first Validate the configuration using your factory and -// then call Reconfigure. -type Plugin interface { - Start(ctx context.Context) error - Stop(ctx context.Context) - Reconfigure(ctx context.Context, config any) -} - -// Triggerable defines the interface plugins use for manual plugin triggers. -type Triggerable interface { - Trigger(context.Context) error -} - -// State defines the state that a Plugin instance is currently -// in with pre-defined states. -type State string - -const ( - // StateNotReady indicates that the Plugin is not in an error state, but isn't - // ready for normal operation yet. This should only happen at - // initialization time. - StateNotReady State = "NOT_READY" - - // StateOK signifies that the Plugin is operating normally. - StateOK State = "OK" - - // StateErr indicates that the Plugin is in an error state and should not - // be considered as functional. - StateErr State = "ERROR" - - // StateWarn indicates the Plugin is operating, but in a potentially dangerous or - // degraded state. It may be used to indicate manual remediation is needed, or to - // alert admins of some other noteworthy state. - StateWarn State = "WARN" -) - -// TriggerMode defines the trigger mode utilized by a Plugin for bundle download, -// log upload etc. -type TriggerMode string - -const ( - // TriggerPeriodic represents periodic polling mechanism - TriggerPeriodic TriggerMode = "periodic" - - // TriggerManual represents manual triggering mechanism - TriggerManual TriggerMode = "manual" - - // DefaultTriggerMode represents default trigger mechanism - DefaultTriggerMode TriggerMode = "periodic" -) - -// default interval between OPA report uploads -var defaultUploadIntervalSec = int64(3600) - -// Status has a Plugin's current status plus an optional Message. -type Status struct { - State State `json:"state"` - Message string `json:"message,omitempty"` -} - -func (s *Status) String() string { - return fmt.Sprintf("{%v %q}", s.State, s.Message) -} - -func (s *Status) Equal(other *Status) bool { - if s == nil || other == nil { - return s == nil && other == nil - } - - return s.State == other.State && s.Message == other.Message -} - -// StatusListener defines a handler to register for status updates. -type StatusListener func(status map[string]*Status) - -// Manager implements lifecycle management of plugins and gives plugins access -// to engine-wide components like storage. -type Manager struct { - Store storage.Store - // Config values should be accessed from the thread-safe GetConfig method. - Config *config.Config - Info *ast.Term - ID string - - compiler *ast.Compiler - compilerMux sync.RWMutex - wasmResolvers []*wasm.Resolver - wasmResolversMtx sync.RWMutex - services map[string]rest.Client - keys map[string]*keys.Config - plugins []namedplugin - registeredTriggers []func(storage.Transaction) - mtx sync.Mutex - pluginStatus map[string]*Status - pluginStatusListeners map[string]StatusListener - initBundles map[string]*bundle.Bundle - initFiles loader.Result - maxErrors int - initialized bool - interQueryBuiltinCacheConfig *cache.Config - gracefulShutdownPeriod int - registeredCacheTriggers []func(*cache.Config) - logger logging.Logger - consoleLogger logging.Logger - serverInitialized chan struct{} - serverInitializedOnce sync.Once - printHook print.Hook - enablePrintStatements bool - router *http.ServeMux - prometheusRegister prometheus.Registerer - tracerProvider *trace.TracerProvider - distributedTacingOpts tracing.Options - registeredNDCacheTriggers []func(bool) - registeredTelemetryGatherers map[string]report.Gatherer - bootstrapConfigLabels map[string]string - hooks hooks.Hooks - enableTelemetry bool - reporter report.Reporter - opaReportNotifyCh chan struct{} - stop chan chan struct{} - parserOptions ast.ParserOptions - extraRoutes map[string]ExtraRoute - extraMiddlewares []func(http.Handler) http.Handler - extraAuthorizerRoutes []func(string, []any) bool - bundleActivatorPlugin string -} - -type ( - managerContextKey string - managerWasmResolverKey string -) - -const ( - managerCompilerContextKey = managerContextKey("compiler") - managerWasmResolverContextKey = managerWasmResolverKey("wasmResolvers") -) - -// SetCompilerOnContext puts the compiler into the storage context. Calling this -// function before committing updated policies to storage allows the manager to -// skip parsing and compiling of modules. Instead, the manager will use the -// compiler that was stored on the context. -func SetCompilerOnContext(context *storage.Context, compiler *ast.Compiler) { - context.Put(managerCompilerContextKey, compiler) -} - -// GetCompilerOnContext gets the compiler cached on the storage context. -func GetCompilerOnContext(context *storage.Context) *ast.Compiler { - compiler, ok := context.Get(managerCompilerContextKey).(*ast.Compiler) - if !ok { - return nil - } - return compiler -} - -// SetWasmResolversOnContext puts a set of Wasm Resolvers into the storage -// context. Calling this function before committing updated wasm modules to -// storage allows the manager to skip initializing modules before using them. -// Instead, the manager will use the compiler that was stored on the context. -func SetWasmResolversOnContext(context *storage.Context, rs []*wasm.Resolver) { - context.Put(managerWasmResolverContextKey, rs) -} - -// getWasmResolversOnContext gets the resolvers cached on the storage context. -func getWasmResolversOnContext(context *storage.Context) []*wasm.Resolver { - resolvers, ok := context.Get(managerWasmResolverContextKey).([]*wasm.Resolver) - if !ok { - return nil - } - return resolvers -} - -func validateTriggerMode(mode TriggerMode) error { - switch mode { - case TriggerPeriodic, TriggerManual: - return nil - default: - return fmt.Errorf("invalid trigger mode %q (want %q or %q)", mode, TriggerPeriodic, TriggerManual) - } -} - -// ValidateAndInjectDefaultsForTriggerMode validates the trigger mode and injects default values -func ValidateAndInjectDefaultsForTriggerMode(a, b *TriggerMode) (*TriggerMode, error) { - if a == nil && b != nil { - err := validateTriggerMode(*b) - if err != nil { - return nil, err - } - return b, nil - } else if a != nil && b == nil { - err := validateTriggerMode(*a) - if err != nil { - return nil, err - } - return a, nil - } else if a != nil && b != nil { - if *a != *b { - return nil, fmt.Errorf("trigger mode mismatch: %s and %s (hint: check discovery configuration)", *a, *b) - } - err := validateTriggerMode(*a) - if err != nil { - return nil, err - } - return a, nil - } - - t := DefaultTriggerMode - return &t, nil -} - -type namedplugin struct { - name string - plugin Plugin -} - -// Info sets the runtime information on the manager. The runtime information is -// propagated to opa.runtime() built-in function calls. -func Info(term *ast.Term) func(*Manager) { - return func(m *Manager) { - m.Info = term - } -} - -// InitBundles provides the initial set of bundles to load. -func InitBundles(b map[string]*bundle.Bundle) func(*Manager) { - return func(m *Manager) { - m.initBundles = b - } -} - -// InitFiles provides the initial set of other data/policy files to load. -func InitFiles(f loader.Result) func(*Manager) { - return func(m *Manager) { - m.initFiles = f - } -} - -// MaxErrors sets the error limit for the manager's shared compiler. -func MaxErrors(n int) func(*Manager) { - return func(m *Manager) { - m.maxErrors = n - } -} - -// GracefulShutdownPeriod passes the configured graceful shutdown period to plugins -func GracefulShutdownPeriod(gracefulShutdownPeriod int) func(*Manager) { - return func(m *Manager) { - m.gracefulShutdownPeriod = gracefulShutdownPeriod - } -} - -// Logger configures the passed logger on the plugin manager (useful to -// configure default fields) -func Logger(logger logging.Logger) func(*Manager) { - return func(m *Manager) { - m.logger = logger - } -} - -// ConsoleLogger sets the passed logger to be used by plugins that are -// configured with console logging enabled. -func ConsoleLogger(logger logging.Logger) func(*Manager) { - return func(m *Manager) { - m.consoleLogger = logger - } -} - -func EnablePrintStatements(yes bool) func(*Manager) { - return func(m *Manager) { - m.enablePrintStatements = yes - } -} - -func PrintHook(h print.Hook) func(*Manager) { - return func(m *Manager) { - m.printHook = h - } -} - -func WithRouter(r *http.ServeMux) func(*Manager) { - return func(m *Manager) { - m.router = r - } -} - -// WithPrometheusRegister sets the passed prometheus.Registerer to be used by plugins -func WithPrometheusRegister(prometheusRegister prometheus.Registerer) func(*Manager) { - return func(m *Manager) { - m.prometheusRegister = prometheusRegister - } -} - -// WithTracerProvider sets the passed *trace.TracerProvider to be used by plugins -func WithTracerProvider(tracerProvider *trace.TracerProvider) func(*Manager) { - return func(m *Manager) { - m.tracerProvider = tracerProvider - } -} - -// WithDistributedTracingOpts sets the options to be used by distributed tracing. -func WithDistributedTracingOpts(tr tracing.Options) func(*Manager) { - return func(m *Manager) { - m.distributedTacingOpts = tr - } -} - -// WithHooks allows passing hooks to the plugin manager. -func WithHooks(hs hooks.Hooks) func(*Manager) { - return func(m *Manager) { - m.hooks = hs - } -} - -// WithParserOptions sets the parser options to be used by the plugin manager. -func WithParserOptions(opts ast.ParserOptions) func(*Manager) { - return func(m *Manager) { - m.parserOptions = opts - } -} - -// WithEnableTelemetry controls whether OPA will send telemetry reports to an external service. -func WithEnableTelemetry(enableTelemetry bool) func(*Manager) { - return func(m *Manager) { - m.enableTelemetry = enableTelemetry - } -} - -// WithTelemetryGatherers allows registration of telemetry gatherers which enable injection of additional data in the -// telemetry report -func WithTelemetryGatherers(gs map[string]report.Gatherer) func(*Manager) { - return func(m *Manager) { - m.registeredTelemetryGatherers = gs - } -} - -// WithBundleActivatorPlugin sets the name of the activator plugin to load bundles into the store -func WithBundleActivatorPlugin(bundleActivatorPlugin string) func(*Manager) { - return func(m *Manager) { - m.bundleActivatorPlugin = bundleActivatorPlugin - } -} - -// New creates a new Manager using config. -func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*Manager, error) { - parsedConfig, err := config.ParseConfig(raw, id) - if err != nil { - return nil, err - } - - m := &Manager{ - Store: store, - Config: parsedConfig, - ID: id, - pluginStatus: map[string]*Status{}, - pluginStatusListeners: map[string]StatusListener{}, - maxErrors: -1, - serverInitialized: make(chan struct{}), - bootstrapConfigLabels: parsedConfig.Labels, - extraRoutes: map[string]ExtraRoute{}, - } - - for _, f := range opts { - f(m) - } - - if m.parserOptions.RegoVersion == ast.RegoUndefined { - // Default to v1 if rego-version is not set through options - m.parserOptions.RegoVersion = ast.DefaultRegoVersion - } - - if m.logger == nil { - m.logger = logging.Get() - } - - if m.consoleLogger == nil { - m.consoleLogger = logging.New() - } - - m.hooks.Each(func(h hooks.Hook) { - if f, ok := h.(hooks.ConfigHook); ok { - if c, e := f.OnConfig(context.Background(), parsedConfig); e != nil { - err = errors.Join(err, e) - } else { - parsedConfig = c - } - } - }) - if err != nil { - return nil, err - } - - // do after options and overrides - m.keys, err = keys.ParseKeysConfig(parsedConfig.Keys) - if err != nil { - return nil, err - } - - m.interQueryBuiltinCacheConfig, err = cache.ParseCachingConfig(parsedConfig.Caching) - if err != nil { - return nil, err - } - - serviceOpts := m.DefaultServiceOpts(parsedConfig) - - m.services, err = cfg.ParseServicesConfig(serviceOpts) - if err != nil { - return nil, err - } - - if m.enableTelemetry { - reporter, err := report.New(report.Options{Logger: m.logger}) - if err != nil { - return nil, err - } - m.reporter = reporter - - m.reporter.RegisterGatherer("min_compatible_version", func(_ context.Context) (any, error) { - var minimumCompatibleVersion string - if c := m.GetCompiler(); c != nil && c.Required != nil { - minimumCompatibleVersion, _ = c.Required.MinimumCompatibleVersion() - } - return minimumCompatibleVersion, nil - }) - - // register any additional gatherers - for k, g := range m.registeredTelemetryGatherers { - m.reporter.RegisterGatherer(k, g) - } - } - - return m, nil -} - -// Init returns an error if the manager could not initialize itself. Init() should -// be called before Start(). Init() is idempotent. -func (m *Manager) Init(ctx context.Context) error { - if m.initialized { - return nil - } - - params := storage.TransactionParams{ - Write: true, - Context: storage.NewContext(), - } - - if m.enableTelemetry { - m.opaReportNotifyCh = make(chan struct{}) - m.stop = make(chan chan struct{}) - go m.sendOPAUpdateLoop(ctx) - } - - err := storage.Txn(ctx, m.Store, params, func(txn storage.Transaction) error { - result, err := initload.InsertAndCompile(ctx, initload.InsertAndCompileOptions{ - Store: m.Store, - Txn: txn, - Files: m.initFiles, - Bundles: m.initBundles, - MaxErrors: m.maxErrors, - EnablePrintStatements: m.enablePrintStatements, - ParserOptions: m.parserOptions, - BundleActivatorPlugin: m.bundleActivatorPlugin, - }) - if err != nil { - return err - } - - SetCompilerOnContext(params.Context, result.Compiler) - - resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, m.Store, txn, nil) - if err != nil { - return err - } - SetWasmResolversOnContext(params.Context, resolvers) - - _, err = m.Store.Register(ctx, txn, storage.TriggerConfig{OnCommit: m.onCommit}) - return err - }) - if err != nil { - if m.stop != nil { - done := make(chan struct{}) - m.stop <- done - <-done - } - - return err - } - - m.initialized = true - return nil -} - -// Labels returns the set of labels from the configuration. -func (m *Manager) Labels() map[string]string { - m.mtx.Lock() - defer m.mtx.Unlock() - - return maps.Clone(m.Config.Labels) -} - -// InterQueryBuiltinCacheConfig returns the configuration for the inter-query caches. -func (m *Manager) InterQueryBuiltinCacheConfig() *cache.Config { - m.mtx.Lock() - defer m.mtx.Unlock() - - return m.interQueryBuiltinCacheConfig.Clone() -} - -// GetConfig returns a deep copy of the manager's configuration. -func (m *Manager) GetConfig() *config.Config { - m.mtx.Lock() - defer m.mtx.Unlock() - - return m.Config.Clone() -} - -// Register adds a plugin to the manager. When the manager is started, all of -// the plugins will be started. -func (m *Manager) Register(name string, plugin Plugin) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.plugins = append(m.plugins, namedplugin{ - name: name, - plugin: plugin, - }) - if _, ok := m.pluginStatus[name]; !ok { - m.pluginStatus[name] = &Status{State: StateNotReady} - } -} - -// Plugins returns the list of plugins registered with the manager. -func (m *Manager) Plugins() []string { - m.mtx.Lock() - defer m.mtx.Unlock() - result := make([]string, len(m.plugins)) - for i := range m.plugins { - result[i] = m.plugins[i].name - } - return result -} - -// Plugin returns the plugin registered with name or nil if name is not found. -func (m *Manager) Plugin(name string) Plugin { - m.mtx.Lock() - defer m.mtx.Unlock() - for i := range m.plugins { - if m.plugins[i].name == name { - return m.plugins[i].plugin - } - } - return nil -} - -// AuthPlugin returns the HTTPAuthPlugin registered with name or nil if name is not found. -func (m *Manager) AuthPlugin(name string) rest.HTTPAuthPlugin { - m.mtx.Lock() - defer m.mtx.Unlock() - for i := range m.plugins { - if m.plugins[i].name == name { - return m.plugins[i].plugin.(rest.HTTPAuthPlugin) - } - } - return nil -} - -// GetCompiler returns the manager's compiler. -func (m *Manager) GetCompiler() *ast.Compiler { - m.compilerMux.RLock() - defer m.compilerMux.RUnlock() - return m.compiler -} - -func (m *Manager) setCompiler(compiler *ast.Compiler) { - m.compilerMux.Lock() - defer m.compilerMux.Unlock() - m.compiler = compiler -} - -type ExtraRoute struct { - PromName string // name is for prometheus metrics - HandlerFunc http.HandlerFunc -} - -func (m *Manager) ExtraRoutes() map[string]ExtraRoute { - return m.extraRoutes -} - -func (m *Manager) ExtraMiddlewares() []func(http.Handler) http.Handler { - return m.extraMiddlewares -} - -func (m *Manager) ExtraAuthorizerRoutes() []func(string, []any) bool { - return m.extraAuthorizerRoutes -} - -// ExtraRoute registers an extra route to be served by the HTTP -// server later. Using this instead of directly registering routes -// with GetRouter() lets the server apply its handler wrapping for -// Prometheus and OpenTelemetry. -// Caution: This cannot be used to dynamically register and un- -// register HTTP handlers. It's meant as a late-stage set up helper, -// to be called from a plugin's init methods. -func (m *Manager) ExtraRoute(path, name string, hf http.HandlerFunc) { - if _, ok := m.extraRoutes[path]; ok { - panic("extra route already registered: " + path) - } - m.extraRoutes[path] = ExtraRoute{ - PromName: name, - HandlerFunc: hf, - } -} - -// ExtraMiddleware registers extra middlewares (`func(http.Handler) http.Handler`) -// to be injected into the HTTP handler chain in the server later. -// Caution: This cannot be used to dynamically register and un- -// register middlewares. It's meant as a late-stage set up helper, -// to be called from a plugin's init methods. -func (m *Manager) ExtraMiddleware(mw ...func(http.Handler) http.Handler) { - m.extraMiddlewares = append(m.extraMiddlewares, mw...) -} - -// ExtraAuthorizerRoute registers an extra URL path validator function for use -// in the server authorizer. These functions designate specific methods and URL -// prefixes or paths where the authorizer should allow request body parsing. -// Caution: This cannot be used to dynamically register and un- -// register path validator functions. It's meant as a late-stage -// set up helper, to be called from a plugin's init methods. -func (m *Manager) ExtraAuthorizerRoute(validatorFunc func(string, []any) bool) { - m.extraAuthorizerRoutes = append(m.extraAuthorizerRoutes, validatorFunc) -} - -// GetRouter returns the managers router if set -func (m *Manager) GetRouter() *http.ServeMux { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.router -} - -// RegisterCompilerTrigger registers for change notifications when the compiler -// is changed. -func (m *Manager) RegisterCompilerTrigger(f func(storage.Transaction)) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.registeredTriggers = append(m.registeredTriggers, f) -} - -// GetWasmResolvers returns the manager's set of Wasm Resolvers. -func (m *Manager) GetWasmResolvers() []*wasm.Resolver { - m.wasmResolversMtx.RLock() - defer m.wasmResolversMtx.RUnlock() - return m.wasmResolvers -} - -func (m *Manager) setWasmResolvers(rs []*wasm.Resolver) { - m.wasmResolversMtx.Lock() - defer m.wasmResolversMtx.Unlock() - m.wasmResolvers = rs -} - -// Start starts the manager. Init() should be called once before Start(). -func (m *Manager) Start(ctx context.Context) error { - if m == nil { - return nil - } - - if !m.initialized { - if err := m.Init(ctx); err != nil { - return err - } - } - - var toStart []Plugin - - func() { - m.mtx.Lock() - defer m.mtx.Unlock() - toStart = make([]Plugin, len(m.plugins)) - for i := range m.plugins { - toStart[i] = m.plugins[i].plugin - } - }() - - for i := range toStart { - if err := toStart[i].Start(ctx); err != nil { - return err - } - } - - return nil -} - -// Stop stops the manager, stopping all the plugins registered with it. -// Any plugin that needs to perform cleanup should do so within the duration -// of the graceful shutdown period passed with the context as a timeout. -// Note that a graceful shutdown period configured with the Manager instance -// will override the timeout of the passed in context (if applicable). -func (m *Manager) Stop(ctx context.Context) { - var toStop []Plugin - - func() { - m.mtx.Lock() - defer m.mtx.Unlock() - toStop = make([]Plugin, len(m.plugins)) - for i := range m.plugins { - toStop[i] = m.plugins[i].plugin - } - }() - - var cancel context.CancelFunc - if m.gracefulShutdownPeriod > 0 { - ctx, cancel = context.WithTimeout(ctx, time.Duration(m.gracefulShutdownPeriod)*time.Second) - } else { - ctx, cancel = context.WithCancel(ctx) - } - defer cancel() - for i := range toStop { - toStop[i].Stop(ctx) - } - if c, ok := m.Store.(interface{ Close(context.Context) error }); ok { - if err := c.Close(ctx); err != nil { - m.logger.Error("Error closing store: %v", err) - } - } - - if m.stop != nil { - done := make(chan struct{}) - m.stop <- done - <-done - } -} - -func (m *Manager) DefaultServiceOpts(config *config.Config) cfg.ServiceOptions { - return cfg.ServiceOptions{ - Raw: config.Services, - AuthPlugin: m.AuthPlugin, - Logger: m.logger, - Keys: m.keys, - DistributedTacingOpts: m.distributedTacingOpts, - } -} - -// Reconfigure updates the configuration on the manager. -func (m *Manager) Reconfigure(newCfg *config.Config) error { - config := newCfg.Clone() - - opts := m.DefaultServiceOpts(config) - - keys, err := keys.ParseKeysConfig(config.Keys) - if err != nil { - return err - } - opts.Keys = keys - - services, err := cfg.ParseServicesConfig(opts) - if err != nil { - return err - } - - interQueryBuiltinCacheConfig, err := cache.ParseCachingConfig(config.Caching) - if err != nil { - return err - } - - m.mtx.Lock() - defer m.mtx.Unlock() - - // don't overwrite existing labels, only allow additions - always based on the boostrap config - if config.Labels == nil { - config.Labels = m.bootstrapConfigLabels - } else { - maps.Copy(config.Labels, m.bootstrapConfigLabels) - } - - // don't erase persistence directory - if config.PersistenceDirectory == nil { - // update is ok since we have the lock - config.PersistenceDirectory = m.Config.PersistenceDirectory - } - - m.Config = config - m.interQueryBuiltinCacheConfig = interQueryBuiltinCacheConfig - - maps.Copy(m.services, services) - maps.Copy(m.keys, keys) - - for _, trigger := range m.registeredCacheTriggers { - trigger(interQueryBuiltinCacheConfig) - } - - for _, trigger := range m.registeredNDCacheTriggers { - trigger(config.NDBuiltinCache) - } - - return nil -} - -// PluginStatus returns the current statuses of any plugins registered. -func (m *Manager) PluginStatus() map[string]*Status { - m.mtx.Lock() - defer m.mtx.Unlock() - - return m.copyPluginStatus() -} - -// RegisterPluginStatusListener registers a StatusListener to be -// called when plugin status updates occur. -func (m *Manager) RegisterPluginStatusListener(name string, listener StatusListener) { - m.mtx.Lock() - defer m.mtx.Unlock() - - m.pluginStatusListeners[name] = listener -} - -// UnregisterPluginStatusListener removes a StatusListener registered with the -// same name. -func (m *Manager) UnregisterPluginStatusListener(name string) { - m.mtx.Lock() - defer m.mtx.Unlock() - - delete(m.pluginStatusListeners, name) -} - -// UpdatePluginStatus updates a named plugins status. Any registered -// listeners will be called with a copy of the new state of all -// plugins. -func (m *Manager) UpdatePluginStatus(pluginName string, status *Status) { - var toNotify map[string]StatusListener - var statuses map[string]*Status - - func() { - m.mtx.Lock() - defer m.mtx.Unlock() - m.pluginStatus[pluginName] = status - toNotify = make(map[string]StatusListener, len(m.pluginStatusListeners)) - maps.Copy(toNotify, m.pluginStatusListeners) - statuses = m.copyPluginStatus() - }() - - for _, l := range toNotify { - l(statuses) - } -} - -func (m *Manager) copyPluginStatus() map[string]*Status { - statusCpy := map[string]*Status{} - for k, v := range m.pluginStatus { - var cpy *Status - if v != nil { - cpy = &Status{ - State: v.State, - Message: v.Message, - } - } - statusCpy[k] = cpy - } - return statusCpy -} - -func (m *Manager) onCommit(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) { - compiler := GetCompilerOnContext(event.Context) - - // If the context does not contain the compiler fallback to loading the - // compiler from the store. Currently the bundle plugin sets the - // compiler on the context but the server does not (nor would users - // implementing their own policy loading.) - if compiler == nil && event.PolicyChanged() { - compiler, _ = loadCompilerFromStore(ctx, m.Store, txn, m.enablePrintStatements, m.ParserOptions()) - } - - if compiler != nil { - m.setCompiler(compiler) - - if m.enableTelemetry && event.PolicyChanged() { - m.opaReportNotifyCh <- struct{}{} - } - - for _, f := range m.registeredTriggers { - f(txn) - } - } - - // Similar to the compiler, look for a set of resolvers on the transaction - // context. If they are not set we may need to reload from the store. - resolvers := getWasmResolversOnContext(event.Context) - if resolvers != nil { - m.setWasmResolvers(resolvers) - } else if event.DataChanged() { - if requiresWasmResolverReload(event) { - resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, m.Store, txn, nil) - if err != nil { - panic(err) - } - m.setWasmResolvers(resolvers) - } else { - err := m.updateWasmResolversData(ctx, event) - if err != nil { - panic(err) - } - } - } -} - -func loadCompilerFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, enablePrintStatements bool, popts ast.ParserOptions) (*ast.Compiler, error) { - policies, err := store.ListPolicies(ctx, txn) - if err != nil { - return nil, err - } - modules := map[string]*ast.Module{} - - for _, policy := range policies { - bs, err := store.GetPolicy(ctx, txn, policy) - if err != nil { - return nil, err - } - module, err := ast.ParseModuleWithOpts(policy, string(bs), popts) - if err != nil { - return nil, err - } - modules[policy] = module - } - - compiler := ast.NewCompiler(). - WithEnablePrintStatements(enablePrintStatements) - - if popts.RegoVersion != ast.RegoUndefined { - compiler = compiler.WithDefaultRegoVersion(popts.RegoVersion) - } - - compiler.Compile(modules) - return compiler, nil -} - -func requiresWasmResolverReload(event storage.TriggerEvent) bool { - // If the data changes touched the bundle path (which includes - // the wasm modules) we will reload them. Otherwise update - // data for each module already on the manager. - for _, dataEvent := range event.Data { - if dataEvent.Path.HasPrefix(bundle.BundlesBasePath) { - return true - } - } - return false -} - -func (m *Manager) updateWasmResolversData(ctx context.Context, event storage.TriggerEvent) error { - m.wasmResolversMtx.Lock() - defer m.wasmResolversMtx.Unlock() - - for _, resolver := range m.wasmResolvers { - for _, dataEvent := range event.Data { - var err error - if dataEvent.Removed { - err = resolver.RemoveDataPath(ctx, dataEvent.Path) - } else { - err = resolver.SetDataPath(ctx, dataEvent.Path, dataEvent.Data) - } - if err != nil { - return fmt.Errorf("failed to update wasm runtime data: %s", err) - } - } - } - return nil -} - -// PublicKeys returns a public keys that can be used for verifying signed bundles. -func (m *Manager) PublicKeys() map[string]*keys.Config { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.keys == nil { - return make(map[string]*keys.Config) - } - - result := make(map[string]*keys.Config, len(m.keys)) - for k, v := range m.keys { - if v != nil { - copied := *v - result[k] = &copied - } - } - return result -} - -// Client returns a client for communicating with a remote service. -func (m *Manager) Client(name string) rest.Client { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.services[name] -} - -// Services returns a list of services that m can provide clients for. -func (m *Manager) Services() []string { - m.mtx.Lock() - defer m.mtx.Unlock() - s := make([]string, 0, len(m.services)) - for name := range m.services { - s = append(s, name) - } - return s -} - -// Logger gets the standard logger for this plugin manager. -func (m *Manager) Logger() logging.Logger { - return m.logger -} - -// ConsoleLogger gets the console logger for this plugin manager. -func (m *Manager) ConsoleLogger() logging.Logger { - return m.consoleLogger -} - -func (m *Manager) PrintHook() print.Hook { - return m.printHook -} - -func (m *Manager) EnablePrintStatements() bool { - return m.enablePrintStatements -} - -// ServerInitialized signals a channel indicating that the OPA -// server has finished initialization. -func (m *Manager) ServerInitialized() { - m.serverInitializedOnce.Do(func() { close(m.serverInitialized) }) -} - -// ServerInitializedChannel returns a receive-only channel that -// is closed when the OPA server has finished initialization. -// Be aware that the socket of the server listener may not be -// open by the time this channel is closed. There is a very -// small window where the socket may still be closed, due to -// a race condition. -func (m *Manager) ServerInitializedChannel() <-chan struct{} { - return m.serverInitialized -} - -// RegisterCacheTrigger accepts a func that receives new inter-query cache config generated by -// a reconfigure of the plugin manager, so that it can be propagated to existing inter-query caches. -func (m *Manager) RegisterCacheTrigger(trigger func(*cache.Config)) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.registeredCacheTriggers = append(m.registeredCacheTriggers, trigger) -} - -// PrometheusRegister gets the prometheus.Registerer for this plugin manager. -func (m *Manager) PrometheusRegister() prometheus.Registerer { - return m.prometheusRegister -} - -// TracerProvider gets the *trace.TracerProvider for this plugin manager. -func (m *Manager) TracerProvider() *trace.TracerProvider { - return m.tracerProvider -} - -func (m *Manager) RegisterNDCacheTrigger(trigger func(bool)) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.registeredNDCacheTriggers = append(m.registeredNDCacheTriggers, trigger) -} - -func (m *Manager) sendOPAUpdateLoop(ctx context.Context) { - ticker := time.NewTicker(time.Duration(int64(time.Second) * defaultUploadIntervalSec)) - mr.New(mr.NewSource(time.Now().UnixNano())) - - ctx, cancel := context.WithCancel(ctx) - - var opaReportNotify bool - - for { - select { - case <-m.opaReportNotifyCh: - opaReportNotify = true - case <-ticker.C: - ticker.Stop() - - if opaReportNotify { - opaReportNotify = false - _, err := m.reporter.SendReport(ctx) - if err != nil { - m.logger.WithFields(map[string]any{"err": err}).Debug("Unable to send OPA telemetry report.") - } - } - - newInterval := mr.Int63n(defaultUploadIntervalSec) + defaultUploadIntervalSec - ticker = time.NewTicker(time.Duration(int64(time.Second) * newInterval)) - case done := <-m.stop: - cancel() - ticker.Stop() - done <- struct{}{} - return - } - } -} - -func (m *Manager) ParserOptions() ast.ParserOptions { - return m.parserOptions -} diff --git a/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/auth.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/auth.go deleted file mode 100644 index 3927a25435..0000000000 --- a/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/auth.go +++ /dev/null @@ -1,1211 +0,0 @@ -// Copyright 2019 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package rest - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/sha512" - "crypto/tls" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "hash" - "io" - "maps" - "math/big" - "net/http" - "net/url" - "os" - "strings" - "time" - - "github.com/lestrrat-go/jwx/v3/jwa" - "github.com/lestrrat-go/jwx/v3/jws" - "github.com/open-policy-agent/opa/internal/providers/aws" - "github.com/open-policy-agent/opa/internal/uuid" - "github.com/open-policy-agent/opa/v1/keys" - "github.com/open-policy-agent/opa/v1/logging" -) - -const ( - // Default to s3 when the service for sigv4 signing is not specified for backwards compatibility - awsSigv4SigningDefaultService = "s3" - // Default to urn:ietf:params:oauth:client-assertion-type:jwt-bearer for ClientAssertionType when not specified - defaultClientAssertionType = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" -) - -// DefaultTLSConfig defines standard TLS configurations based on the Config -func DefaultTLSConfig(c Config) (*tls.Config, error) { - t := &tls.Config{} - url, err := url.Parse(c.URL) - if err != nil { - return nil, err - } - if url.Scheme == "https" { - t.InsecureSkipVerify = c.AllowInsecureTLS - } - - if c.TLS != nil && c.TLS.CACert != "" { - caCert, err := os.ReadFile(c.TLS.CACert) - if err != nil { - return nil, err - } - - var rootCAs *x509.CertPool - if c.TLS.SystemCARequired { - rootCAs, err = x509.SystemCertPool() - if err != nil { - return nil, err - } - } else { - rootCAs = x509.NewCertPool() - } - - ok := rootCAs.AppendCertsFromPEM(caCert) - if !ok { - return nil, errors.New("unable to parse and append CA certificate to certificate pool") - } - t.RootCAs = rootCAs - } - - return t, nil -} - -// DefaultRoundTripperClient is a reasonable set of defaults for HTTP auth plugins -func DefaultRoundTripperClient(t *tls.Config, timeout int64) *http.Client { - // Ensure we use a http.Transport with proper settings: the zero values are not - // a good choice, as they cause leaking connections: - // https://github.com/golang/go/issues/19620 - - // copy, we don't want to alter the default client's Transport - tr := http.DefaultTransport.(*http.Transport).Clone() - tr.ResponseHeaderTimeout = time.Duration(timeout) * time.Second - tr.TLSClientConfig = t - - c := *http.DefaultClient - c.Transport = tr - return &c -} - -// defaultAuthPlugin represents baseline 'no auth' behavior if no alternative plugin is specified for a service -type defaultAuthPlugin struct{} - -func (*defaultAuthPlugin) NewClient(c Config) (*http.Client, error) { - t, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil -} - -func (*defaultAuthPlugin) Prepare(*http.Request) error { - return nil -} - -type serverTLSConfig struct { - CACert string `json:"ca_cert,omitempty"` - SystemCARequired bool `json:"system_ca_required,omitempty"` -} - -// bearerAuthPlugin represents authentication via a bearer token in the HTTP Authorization header -type bearerAuthPlugin struct { - Token string `json:"token"` - TokenPath string `json:"token_path"` - Scheme string `json:"scheme,omitempty"` - - // encode is set to true for the OCIDownloader because - // it expects tokens in plain text but needs them in base64. - encode bool - logger logging.Logger -} - -func (ap *bearerAuthPlugin) NewClient(c Config) (*http.Client, error) { - t, err := DefaultTLSConfig(c) - - ap.logger = c.logger - - if err != nil { - return nil, err - } - - if ap.Token != "" && ap.TokenPath != "" { - return nil, errors.New("invalid config: specify a value for either the \"token\" or \"token_path\" field") - } - - if ap.Scheme == "" { - ap.Scheme = "Bearer" - } - - if c.Type == "oci" { - // Standard rest clients use the bearer token as it is defined in the Config - // but the OCIDownloader needs it encoded to base64 before using to sign a request. - ap.encode = true - } - - return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil -} - -func (ap *bearerAuthPlugin) Prepare(req *http.Request) error { - token := ap.Token - if ap.logger == nil { - ap.logger = logging.Get() - } - - if ap.TokenPath != "" { - bytes, err := os.ReadFile(ap.TokenPath) - if err != nil { - return err - } - token = strings.TrimSpace(string(bytes)) - } - - if ap.encode { - token = base64.StdEncoding.EncodeToString([]byte(token)) - } - - if req.Response != nil && (req.Response.StatusCode == http.StatusPermanentRedirect || req.Response.StatusCode == http.StatusTemporaryRedirect) { - ap.logger.Debug("not attaching authorization header as the response contains a redirect") - } else { - ap.logger.Debug("attaching authorization header") - req.Header.Add("Authorization", fmt.Sprintf("%v %v", ap.Scheme, token)) - } - return nil -} - -type tokenEndpointResponse struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - ExpiresIn int64 `json:"expires_in"` -} - -type awsKmsKeyConfig struct { - Name string `json:"name"` - Algorithm string `json:"algorithm"` -} - -type azureKeyVaultConfig struct { - Key string `json:"key"` - KeyVersion string `json:"key_version"` - Alg string `json:"key_algorithm"` - Vault string `json:"vault"` - URL *url.URL - APIVersion string `json:"api_version"` -} - -func convertSignatureToBase64(alg string, der []byte) (string, error) { - r, s, derErr := pointsFromDER(der) - if derErr != nil { - return "", fmt.Errorf("failed to read points from der %v", derErr) - } - - signatureData, err := convertPointsToBase64(alg, r.Bytes(), s.Bytes()) - if err != nil { - return "", err - } - return signatureData, nil -} - -func pointsFromDER(der []byte) (R, S *big.Int, err error) { //nolint:gocritic - R, S = &big.Int{}, &big.Int{} - data := asn1.RawValue{} - if _, err := asn1.Unmarshal(der, &data); err != nil { - return nil, nil, fmt.Errorf("failed to unmarshall the signature from DER format %v", err) - - } - // https://docs.aws.amazon.com/kms/latest/APIReference/API_Sign.html#API_Sign_ResponseSyntax - // https://datatracker.ietf.org/doc/html/rfc3279#section-2.2.3 - // The format of our DER string is 0x02 + rlen + r + 0x02 + slen + s - rLen := data.Bytes[1] // The entire length of R + offset of 2 for 0x02 and rlen - r := data.Bytes[2 : rLen+2] - // Ignore the next 0x02 and slen bytes and just take the start of S to the end of the byte array - s := data.Bytes[rLen+4:] - R.SetBytes(r) - S.SetBytes(s) - return -} - -func convertPointsToBase64(alg string, r, s []byte) (string, error) { - curveBits, err := retrieveCurveBits(alg) - if err != nil { - return "", err - } - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes++ - } - // We serialize the outputs (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(r):], r) - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(s):], s) - signatureEnc := append(rBytesPadded, sBytesPadded...) - - return base64.RawURLEncoding.EncodeToString(signatureEnc), nil -} - -func retrieveCurveBits(alg string) (int, error) { - var curveBits int - switch alg { - case "ECDSA_SHA_256": - curveBits = 256 - case "ECDSA_SHA_384": - curveBits = 384 - case "ECDSA_SHA_512": - curveBits = 512 - default: - return 0, fmt.Errorf("unsupported sign algorithm %s", alg) - } - return curveBits, nil -} - -func messageDigest(message []byte, alg string) ([]byte, error) { - var digest hash.Hash - - switch alg { - case "ECDSA_SHA_256", "ES256", "ES256K", "PS256", "RS256": - digest = sha256.New() - case "ECDSA_SHA_384", "ES384", "PS384", "RS384": - digest = sha512.New384() - case "ECDSA_SHA_512", "ES512", "PS512", "RS512": - digest = sha512.New() - default: - return []byte{}, fmt.Errorf("unsupported sign algorithm %s", alg) - } - - _, err := digest.Write(message) - if err != nil { - return nil, err - } - return digest.Sum(nil), nil -} - -// oauth2ClientCredentialsAuthPlugin represents authentication via a bearer token in the HTTP Authorization header -// obtained through the OAuth2 client credentials flow -type oauth2ClientCredentialsAuthPlugin struct { - GrantType string `json:"grant_type"` - TokenURL string `json:"token_url"` - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - SigningKeyID string `json:"signing_key"` - Thumbprint string `json:"thumbprint"` - Claims map[string]any `json:"additional_claims"` - IncludeJti bool `json:"include_jti_claim"` - Scopes []string `json:"scopes,omitempty"` - AdditionalHeaders map[string]string `json:"additional_headers,omitempty"` - AdditionalParameters map[string]string `json:"additional_parameters,omitempty"` - AWSKmsKey *awsKmsKeyConfig `json:"aws_kms,omitempty"` - AWSSigningPlugin *awsSigningAuthPlugin `json:"aws_signing,omitempty"` - AzureKeyVault *azureKeyVaultConfig `json:"azure_keyvault,omitempty"` - AzureSigningPlugin *azureSigningAuthPlugin `json:"azure_signing,omitempty"` - ClientAssertionType string `json:"client_assertion_type"` - ClientAssertion string `json:"client_assertion"` - ClientAssertionPath string `json:"client_assertion_path"` - - signingKey *keys.Config - signingKeyParsed any - tokenCache *oauth2Token - tlsSkipVerify bool - logger logging.Logger -} - -type oauth2Token struct { - Token string - ExpiresAt time.Time -} - -func (ap *oauth2ClientCredentialsAuthPlugin) createJWSParts(extClaims map[string]any) ([]byte, []byte, string, error) { - now := time.Now() - claims := map[string]any{ - "iat": now.Unix(), - "exp": now.Add(10 * time.Minute).Unix(), - } - maps.Copy(claims, extClaims) - - if len(ap.Scopes) > 0 { - claims["scope"] = strings.Join(ap.Scopes, " ") - } - - if ap.IncludeJti { - jti, err := uuid.New(rand.Reader) - if err != nil { - return nil, nil, "", err - } - claims["jti"] = jti - } - - payload, err := json.Marshal(claims) - if err != nil { - return nil, nil, "", err - } - - var jwsHeaders []byte - var signatureAlg string - switch { - case ap.AWSKmsKey == nil && ap.AzureKeyVault == nil: - signatureAlg = ap.signingKey.Algorithm - case ap.AWSKmsKey != nil && ap.AWSKmsKey.Algorithm != "": - signatureAlg, err = ap.mapKMSAlgToSign(ap.AWSKmsKey.Algorithm) - if err != nil { - return nil, nil, "", err - } - case ap.AzureKeyVault != nil && ap.AzureKeyVault.Alg != "": - signatureAlg = ap.AzureKeyVault.Alg - } - if ap.Thumbprint != "" { - bytes, err := hex.DecodeString(ap.Thumbprint) - if err != nil { - return nil, nil, "", err - } - x5t := base64.URLEncoding.EncodeToString(bytes) - jwsHeaders = fmt.Appendf(nil, `{"typ":"JWT","alg":"%s","x5t":"%s"}`, signatureAlg, x5t) - } else { - jwsHeaders = fmt.Appendf(nil, `{"typ":"JWT","alg":"%s"}`, signatureAlg) - } - - return jwsHeaders, payload, signatureAlg, nil -} - -func (ap *oauth2ClientCredentialsAuthPlugin) createAuthJWT(ctx context.Context, extClaims map[string]any, signingKey any) (*string, error) { - header, payload, alg, err := ap.createJWSParts(extClaims) - if err != nil { - return nil, err - } - - var clientAssertion []byte - switch { - case ap.AWSKmsKey != nil: - clientAssertion, err = ap.SignWithKMS(ctx, payload, header) - case ap.AzureKeyVault != nil: - clientAssertion, err = ap.SignWithKeyVault(ctx, payload, header) - default: - // Parse the algorithm string to jwa.SignatureAlgorithm - algObj, ok := jwa.LookupSignatureAlgorithm(alg) - if !ok { - return nil, fmt.Errorf("unknown signature algorithm: %s", alg) - } - - // Parse headers - var headers map[string]interface{} - if err := json.Unmarshal(header, &headers); err != nil { - return nil, err - } - - // Create protected headers - protectedHeaders := jws.NewHeaders() - for k, v := range headers { - if err := protectedHeaders.Set(k, v); err != nil { - return nil, err - } - } - - clientAssertion, err = jws.Sign(payload, - jws.WithKey(algObj, signingKey, jws.WithProtectedHeaders(protectedHeaders))) - } - if err != nil { - return nil, err - } - jwt := string(clientAssertion) - - return &jwt, nil -} - -func (*oauth2ClientCredentialsAuthPlugin) mapKMSAlgToSign(alg string) (string, error) { - switch alg { - case "ECDSA_SHA_256": - return "ES256", nil - case "ECDSA_SHA_384": - return "ES384", nil - case "ECDSA_SHA_512": - return "ES512", nil - default: - return "", fmt.Errorf("unsupported sign algorithm %s", alg) - } -} - -// SignWithKMS will sign the JWT in AWS using the key stored in the supplied kmsArn -func (ap *oauth2ClientCredentialsAuthPlugin) SignWithKMS(ctx context.Context, payload []byte, hdrBuf []byte) ([]byte, error) { - - encodedHdr := base64.RawURLEncoding.EncodeToString(hdrBuf) - encodedPayload := base64.RawURLEncoding.EncodeToString(payload) - input := encodedHdr + "." + encodedPayload - digest, err := messageDigest([]byte(input), ap.AWSKmsKey.Algorithm) - if err != nil { - return nil, err - } - if ap.AWSSigningPlugin != nil { - signature, err := ap.AWSSigningPlugin.SignDigest(ctx, digest, ap.AWSKmsKey.Name, ap.AWSKmsKey.Algorithm) - if err != nil { - return nil, err - } - der, err := base64.StdEncoding.DecodeString(signature) - if err != nil { - return nil, err - } - signatureData, err := convertSignatureToBase64(ap.AWSKmsKey.Algorithm, der) - if err != nil { - return nil, err - } - - signedAssertion := input + "." + signatureData - - return []byte(signedAssertion), nil - } - return nil, errors.New("missing AWS credentials, failed to sign the assertion with kms") -} - -func (ap *oauth2ClientCredentialsAuthPlugin) SignWithKeyVault(ctx context.Context, payload []byte, hdrBuf []byte) ([]byte, error) { - if ap.AzureSigningPlugin == nil { - return nil, errors.New("missing Azure credentials, failed to sign the assertion with KeyVault") - } - - encodedHdr := base64.RawURLEncoding.EncodeToString(hdrBuf) - encodedPayload := base64.RawURLEncoding.EncodeToString(payload) - input := encodedHdr + "." + encodedPayload - digest, err := messageDigest([]byte(input), ap.AzureSigningPlugin.keyVaultSignPlugin.config.Alg) - if err != nil { - fmt.Println("unsupported algorithm", ap.AzureSigningPlugin.keyVaultSignPlugin.config.Alg) - return nil, err - } - - signature, err := ap.AzureSigningPlugin.SignDigest(ctx, digest) - if err != nil { - return nil, err - } - - return []byte(input + "." + signature), nil -} - -func (ap *oauth2ClientCredentialsAuthPlugin) parseSigningKey(c Config) (err error) { - if ap.SigningKeyID == "" { - return errors.New("signing_key required for jwt_bearer grant type") - } - - if val, ok := c.keys[ap.SigningKeyID]; ok { - if val.PrivateKey == "" { - return errors.New("referenced signing_key does not include a private key") - } - ap.signingKey = val - } else { - return errors.New("signing_key refers to non-existent key") - } - - alg, ok := jwa.LookupSignatureAlgorithm(ap.signingKey.Algorithm) - if !ok { - return fmt.Errorf("unknown signature algorithm: %s", ap.signingKey.Algorithm) - } - - // Parse the private key directly - keyData := ap.signingKey.PrivateKey - - // For HMAC algorithms, return the key as bytes - if alg == jwa.HS256() || alg == jwa.HS384() || alg == jwa.HS512() { - ap.signingKeyParsed = []byte(keyData) - return nil - } - - // For RSA/ECDSA algorithms, parse the PEM-encoded key - block, _ := pem.Decode([]byte(keyData)) - if block == nil { - return errors.New("failed to decode PEM key") - } - - switch block.Type { - case "RSA PRIVATE KEY": - ap.signingKeyParsed, err = x509.ParsePKCS1PrivateKey(block.Bytes) - case "PRIVATE KEY": - ap.signingKeyParsed, err = x509.ParsePKCS8PrivateKey(block.Bytes) - case "EC PRIVATE KEY": - ap.signingKeyParsed, err = x509.ParseECPrivateKey(block.Bytes) - default: - return fmt.Errorf("unsupported key type: %s", block.Type) - } - - if err != nil { - return err - } - - return nil -} - -func (ap *oauth2ClientCredentialsAuthPlugin) NewClient(c Config) (*http.Client, error) { - t, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - - if ap.GrantType == "" { - // Use client_credentials as default to not break existing config - ap.GrantType = grantTypeClientCredentials - } else if ap.GrantType != grantTypeClientCredentials && ap.GrantType != grantTypeJwtBearer { - return nil, errors.New("grant_type must be either client_credentials or jwt_bearer") - } - - if ap.GrantType == grantTypeJwtBearer || (ap.GrantType == grantTypeClientCredentials && ap.SigningKeyID != "") { - if err = ap.parseSigningKey(c); err != nil { - return nil, err - } - } - - // Inherit skip verify from the "parent" settings. Should this be configurable on the credentials too? - ap.tlsSkipVerify = c.AllowInsecureTLS - - ap.logger = c.logger - - if !strings.HasPrefix(ap.TokenURL, "https://") { - return nil, errors.New("token_url required to use https scheme") - } - if ap.GrantType == grantTypeClientCredentials { - clientCredentialExists := make(map[string]bool) - clientCredentialExists["client_secret"] = ap.ClientSecret != "" - clientCredentialExists["signing_key"] = ap.SigningKeyID != "" - clientCredentialExists["aws_kms"] = ap.AWSKmsKey != nil - clientCredentialExists["azure_keyvault"] = ap.AzureKeyVault != nil - clientCredentialExists["client_assertion"] = ap.ClientAssertion != "" - clientCredentialExists["client_assertion_path"] = ap.ClientAssertionPath != "" - - var notEmptyVarCount int - - for _, credentialSet := range clientCredentialExists { - if credentialSet { - notEmptyVarCount++ - } - } - - if notEmptyVarCount == 0 { - return nil, errors.New("please provide one of client_secret, signing_key, aws_kms, azure_keyvault, client_assertion, or client_assertion_path required") - } - - if notEmptyVarCount > 1 { - return nil, errors.New("can only use one of client_secret, signing_key, aws_kms, azure_keyvault, client_assertion, or client_assertion_path") - } - - switch { - case clientCredentialExists["aws_kms"]: - if ap.AWSSigningPlugin == nil { - return nil, errors.New("aws_kms and aws_signing required") - } - // initialize the awsSigningAuthPlugin - _, err = ap.AWSSigningPlugin.NewClient(c) - if err != nil { - return nil, err - } - case clientCredentialExists["azure_keyvault"]: - _, err := ap.AzureSigningPlugin.NewClient(c) - if err != nil { - return nil, err - } - case clientCredentialExists["client_assertion"]: - if ap.ClientAssertionType == "" { - ap.ClientAssertionType = defaultClientAssertionType - } - if ap.ClientID == "" { - return nil, errors.New("client_id and client_assertion required") - } - case clientCredentialExists["client_assertion_path"]: - if ap.ClientAssertionType == "" { - ap.ClientAssertionType = defaultClientAssertionType - } - if ap.ClientID == "" { - return nil, errors.New("client_id and client_assertion_path required") - } - case clientCredentialExists["client_secret"] && ap.ClientID == "": - return nil, errors.New("client_id and client_secret required") - } - } - - return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil -} - -func (ap *oauth2ClientCredentialsAuthPlugin) createTokenReqBody(ctx context.Context) (url.Values, error) { - body := url.Values{} - - if len(ap.Scopes) > 0 { - body.Add("scope", strings.Join(ap.Scopes, " ")) - } - - for k, v := range ap.AdditionalParameters { - body.Set(k, v) - } - - if ap.GrantType == grantTypeJwtBearer { - authJWT, err := ap.createAuthJWT(ctx, ap.Claims, ap.signingKeyParsed) - if err != nil { - return nil, err - } - body.Add("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer") - body.Add("assertion", *authJWT) - return body, nil - } - - body.Add("grant_type", grantTypeClientCredentials) - - switch { - case ap.SigningKeyID != "" || ap.AWSKmsKey != nil || ap.AzureKeyVault != nil: - authJwt, err := ap.createAuthJWT(ctx, ap.Claims, ap.signingKeyParsed) - if err != nil { - return nil, err - } - body.Add("client_assertion_type", defaultClientAssertionType) - body.Add("client_assertion", *authJwt) - - if ap.ClientID != "" { - body.Add("client_id", ap.ClientID) - } - case ap.ClientAssertion != "": - if ap.ClientAssertionType == "" { - ap.ClientAssertionType = defaultClientAssertionType - } - if ap.ClientID != "" { - body.Add("client_id", ap.ClientID) - } - body.Add("client_assertion_type", ap.ClientAssertionType) - body.Add("client_assertion", ap.ClientAssertion) - - case ap.ClientAssertionPath != "": - if ap.ClientAssertionType == "" { - ap.ClientAssertionType = defaultClientAssertionType - } - bytes, err := os.ReadFile(ap.ClientAssertionPath) - if err != nil { - return nil, err - } - if ap.ClientID != "" { - body.Add("client_id", ap.ClientID) - } - body.Add("client_assertion_type", ap.ClientAssertionType) - body.Add("client_assertion", strings.TrimSpace(string(bytes))) - } - - return body, nil -} - -// requestToken tries to obtain an access token using either the client credentials flow -// https://tools.ietf.org/html/rfc6749#section-4.4 -// or the JWT authorization grant -// https://tools.ietf.org/html/rfc7523 -func (ap *oauth2ClientCredentialsAuthPlugin) requestToken(ctx context.Context) (*oauth2Token, error) { - body, err := ap.createTokenReqBody(ctx) - if err != nil { - return nil, err - } - - r, err := http.NewRequestWithContext(ctx, http.MethodPost, ap.TokenURL, strings.NewReader(body.Encode())) - if err != nil { - return nil, err - } - r.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - if ap.GrantType == grantTypeClientCredentials && ap.ClientSecret != "" { - r.SetBasicAuth(ap.ClientID, ap.ClientSecret) - } - - for k, v := range ap.AdditionalHeaders { - r.Header.Add(k, v) - } - - client := DefaultRoundTripperClient(&tls.Config{InsecureSkipVerify: ap.tlsSkipVerify}, 10) - response, err := client.Do(r) - if err != nil { - return nil, err - } - defer response.Body.Close() - - bodyRaw, err := io.ReadAll(response.Body) - if err != nil { - return nil, err - } - - if response.StatusCode != 200 { - return nil, fmt.Errorf("error in response from OAuth2 token endpoint: %v", string(bodyRaw)) - } - - var tokenResponse tokenEndpointResponse - err = json.Unmarshal(bodyRaw, &tokenResponse) - if err != nil { - return nil, err - } - - if !strings.EqualFold(tokenResponse.TokenType, "bearer") { - return nil, errors.New("unknown token type returned from token endpoint") - } - - return &oauth2Token{ - Token: strings.TrimSpace(tokenResponse.AccessToken), - ExpiresAt: time.Now().Add(time.Duration(tokenResponse.ExpiresIn) * time.Second), - }, nil -} - -func (ap *oauth2ClientCredentialsAuthPlugin) Prepare(req *http.Request) error { - minTokenLifetime := float64(10) - if ap.tokenCache == nil || time.Until(ap.tokenCache.ExpiresAt).Seconds() < minTokenLifetime { - ap.logger.Debug("Requesting token from token_url %v", ap.TokenURL) - token, err := ap.requestToken(req.Context()) - if err != nil { - return err - } - ap.tokenCache = token - } - - req.Header.Add("Authorization", fmt.Sprintf("Bearer %v", ap.tokenCache.Token)) - return nil -} - -// clientTLSAuthPlugin represents authentication via client certificate on a TLS connection -type clientTLSAuthPlugin struct { - Cert string `json:"cert"` - PrivateKey string `json:"private_key"` - PrivateKeyPassphrase string `json:"private_key_passphrase,omitempty"` - CACert string `json:"ca_cert,omitempty"` // Deprecated: Use `services[_].tls.ca_cert` instead - SystemCARequired bool `json:"system_ca_required,omitempty"` // Deprecated: Use `services[_].tls.system_ca_required` instead -} - -func (ap *clientTLSAuthPlugin) NewClient(c Config) (*http.Client, error) { - tlsConfig, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - - if ap.Cert == "" { - return nil, errors.New("client certificate is needed when client TLS is enabled") - } - if ap.PrivateKey == "" { - return nil, errors.New("private key is needed when client TLS is enabled") - } - - var keyPEMBlock []byte - data, err := os.ReadFile(ap.PrivateKey) - if err != nil { - return nil, err - } - - block, _ := pem.Decode(data) - if block == nil { - return nil, errors.New("PEM data could not be found") - } - - // nolint: staticcheck // We don't want to forbid users from using this encryption. - if x509.IsEncryptedPEMBlock(block) { - if ap.PrivateKeyPassphrase == "" { - return nil, errors.New("client certificate passphrase is needed, because the certificate is password encrypted") - } - // nolint: staticcheck // We don't want to forbid users from using this encryption. - block, err := x509.DecryptPEMBlock(block, []byte(ap.PrivateKeyPassphrase)) - if err != nil { - return nil, err - } - key, err := x509.ParsePKCS8PrivateKey(block) - if err != nil { - key, err = x509.ParsePKCS1PrivateKey(block) - if err != nil { - return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err) - } - } - rsa, ok := key.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("private key is invalid") - } - keyPEMBlock = pem.EncodeToMemory( - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(rsa), - }, - ) - } else { - keyPEMBlock = data - } - - certPEMBlock, err := os.ReadFile(ap.Cert) - if err != nil { - return nil, err - } - - cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) - if err != nil { - return nil, err - } - tlsConfig.Certificates = []tls.Certificate{cert} - - var client *http.Client - - if c.TLS != nil && c.TLS.CACert != "" { - client = DefaultRoundTripperClient(tlsConfig, *c.ResponseHeaderTimeoutSeconds) - } else { - if ap.CACert != "" { - c.logger.Warn("Deprecated 'services[_].credentials.client_tls.ca_cert' configuration specified. Use 'services[_].tls.ca_cert' instead. See https://www.openpolicyagent.org/docs/latest/configuration/#services") - caCert, err := os.ReadFile(ap.CACert) - if err != nil { - return nil, err - } - - var caCertPool *x509.CertPool - if ap.SystemCARequired { - caCertPool, err = x509.SystemCertPool() - if err != nil { - return nil, err - } - } else { - caCertPool = x509.NewCertPool() - } - - ok := caCertPool.AppendCertsFromPEM(caCert) - if !ok { - return nil, errors.New("unable to parse and append CA certificate to certificate pool") - } - tlsConfig.RootCAs = caCertPool - } - - client = DefaultRoundTripperClient(tlsConfig, *c.ResponseHeaderTimeoutSeconds) - } - - return client, nil -} - -func (*clientTLSAuthPlugin) Prepare(_ *http.Request) error { - return nil -} - -// awsSigningAuthPlugin represents authentication using AWS V4 HMAC signing in the Authorization header -type awsSigningAuthPlugin struct { - AWSEnvironmentCredentials *awsEnvironmentCredentialService `json:"environment_credentials,omitempty"` - AWSMetadataCredentials *awsMetadataCredentialService `json:"metadata_credentials,omitempty"` - AWSAssumeRoleCredentials *awsAssumeRoleCredentialService `json:"assume_role_credentials,omitempty"` - AWSWebIdentityCredentials *awsWebIdentityCredentialService `json:"web_identity_credentials,omitempty"` - AWSProfileCredentials *awsProfileCredentialService `json:"profile_credentials,omitempty"` - AWSSSOCredentials *awsSSOCredentialsService `json:"sso_credentials,omitempty"` - - AWSService string `json:"service,omitempty"` - AWSSignatureVersion string `json:"signature_version,omitempty"` - - host string - ecrAuthPlugin *ecrAuthPlugin - kmsSignPlugin *awsKMSSignPlugin - - logger logging.Logger -} - -type awsCredentialServiceChain struct { - awsCredentialServices []awsCredentialService - logger logging.Logger -} - -func (acs *awsCredentialServiceChain) addService(service awsCredentialService) { - acs.awsCredentialServices = append(acs.awsCredentialServices, service) -} - -type awsCredentialCheckErrors []*awsCredentialCheckError - -func (e awsCredentialCheckErrors) Error() string { - - if len(e) == 0 { - return "no error(s)" - } - - if len(e) == 1 { - return fmt.Sprintf("1 error occurred: %v", e[0].Error()) - } - - s := make([]string, len(e)) - for i, err := range e { - s[i] = err.Error() - } - - return fmt.Sprintf("%d errors occurred:\n%s", len(e), strings.Join(s, "\n")) -} - -type awsCredentialCheckError struct { - message string -} - -func newAWSCredentialError(message string) *awsCredentialCheckError { - return &awsCredentialCheckError{ - message: message, - } -} - -func (e *awsCredentialCheckError) Error() string { - return e.message -} - -func (acs *awsCredentialServiceChain) credentials(ctx context.Context) (aws.Credentials, error) { - var errs awsCredentialCheckErrors - - for _, service := range acs.awsCredentialServices { - credential, err := service.credentials(ctx) - if err != nil { - acs.logger.Debug("awsSigningAuthPlugin:%T failed: %v", service, err) - - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return aws.Credentials{}, err - } - - errs = append(errs, newAWSCredentialError(err.Error())) - continue - } - - acs.logger.Debug("awsSigningAuthPlugin:%T successful", service) - return credential, nil - } - - return aws.Credentials{}, fmt.Errorf("all AWS credential providers failed: %v", errs) -} - -func (ap *awsSigningAuthPlugin) awsCredentialService() awsCredentialService { - chain := awsCredentialServiceChain{ - logger: ap.logger, - } - - /* - Here we maintain the order of addition to the chain inline with - the order of credential providers followed by default by the - AWS SDK. For example - - https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/DefaultAWSCredentialsProviderChain.html - */ - - if ap.AWSEnvironmentCredentials != nil { - ap.AWSEnvironmentCredentials.logger = ap.logger - chain.addService(ap.AWSEnvironmentCredentials) - } - - if ap.AWSAssumeRoleCredentials != nil { - ap.AWSAssumeRoleCredentials.logger = ap.logger - chain.addService(ap.AWSAssumeRoleCredentials) - } - - if ap.AWSWebIdentityCredentials != nil { - ap.AWSWebIdentityCredentials.logger = ap.logger - chain.addService(ap.AWSWebIdentityCredentials) - } - - if ap.AWSProfileCredentials != nil { - ap.AWSProfileCredentials.logger = ap.logger - chain.addService(ap.AWSProfileCredentials) - } - - if ap.AWSMetadataCredentials != nil { - ap.AWSMetadataCredentials.logger = ap.logger - chain.addService(ap.AWSMetadataCredentials) - } - - if ap.AWSSSOCredentials != nil { - ap.AWSSSOCredentials.logger = ap.logger - chain.addService(ap.AWSSSOCredentials) - } - - return &chain -} - -func (ap *awsSigningAuthPlugin) NewClient(c Config) (*http.Client, error) { - t, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - - url, err := url.Parse(c.URL) - if err != nil { - return nil, err - } - - ap.host = url.Host - - if ap.logger == nil { - ap.logger = c.logger - } - - if err := ap.validateAndSetDefaults(c.Type); err != nil { - return nil, err - } - - return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil -} - -func (ap *awsSigningAuthPlugin) Prepare(req *http.Request) error { - if ap.host != req.URL.Host { - // Return early if the host does not match. - // This can happen when the OCI registry responded with a redirect to another host. - // For instance, ECR redirects to S3 and the ECR auth header should not be included in the S3 request. - return nil - } - - switch ap.AWSService { - case "ecr": - return ap.ecrAuthPlugin.Prepare(req) - default: - creds, err := ap.awsCredentialService().credentials(req.Context()) - if err != nil { - return fmt.Errorf("failed to get aws credentials: %w", err) - } - - ap.logger.Debug("Signing request with AWS credentials.") - - return aws.SignRequest(req, ap.AWSService, creds, time.Now(), ap.AWSSignatureVersion) - } -} - -func (ap *awsSigningAuthPlugin) validateAndSetDefaults(serviceType string) error { - cfgs := map[bool]int{} - cfgs[ap.AWSEnvironmentCredentials != nil]++ - cfgs[ap.AWSMetadataCredentials != nil]++ - cfgs[ap.AWSAssumeRoleCredentials != nil]++ - cfgs[ap.AWSWebIdentityCredentials != nil]++ - cfgs[ap.AWSProfileCredentials != nil]++ - cfgs[ap.AWSSSOCredentials != nil]++ - - if cfgs[true] == 0 { - return errors.New("a AWS credential service must be specified when S3 signing is enabled") - } - - if ap.AWSMetadataCredentials != nil { - if ap.AWSMetadataCredentials.RegionName == "" { - return errors.New("at least aws_region must be specified for AWS metadata credential service") - } - } - - if ap.AWSAssumeRoleCredentials != nil { - if err := ap.AWSAssumeRoleCredentials.populateFromEnv(); err != nil { - return err - } - } - - if ap.AWSWebIdentityCredentials != nil { - if err := ap.AWSWebIdentityCredentials.populateFromEnv(); err != nil { - return err - } - } - - ap.AWSService = strings.ToLower(ap.AWSService) - - // Only allow ECR for OCI service types - if serviceType == "oci" { - if ap.AWSService == "" { - ap.AWSService = "ecr" - } - - if ap.AWSService != "ecr" { - return fmt.Errorf(`cannot use aws service %q with service type "oci"`, ap.AWSService) - } - - // We need to setup a special auth plugin for ECR. - ap.ecrAuthPlugin = newECRAuthPlugin(ap) - } else { - // Disallow ECR for non-OCI service types - if ap.AWSService == "ecr" { - return errors.New(`aws service "ecr" must be used with service type "oci"`) - } - if ap.AWSService == "kms" && ap.kmsSignPlugin == nil { - // We need a special plugin for KMS. - ap.kmsSignPlugin = newKMSSignPlugin(ap) - } - if ap.AWSService == "" { - ap.AWSService = awsSigv4SigningDefaultService - } - } - - if ap.AWSSignatureVersion == "" { - ap.AWSSignatureVersion = "4" - } - - return nil -} - -func (ap *awsSigningAuthPlugin) SignDigest(ctx context.Context, digest []byte, keyID string, signingAlgorithm string) (string, error) { - switch ap.AWSService { - case "kms": - return ap.kmsSignPlugin.SignDigest(ctx, digest, keyID, signingAlgorithm) - default: - return "", fmt.Errorf(`cannot use SignDigest with aws service %q`, ap.AWSService) - } -} - -type azureSigningAuthPlugin struct { - MIAuthPlugin *azureManagedIdentitiesAuthPlugin `json:"azure_managed_identity,omitempty"` - keyVaultSignPlugin *azureKeyVaultSignPlugin - keyVaultConfig *azureKeyVaultConfig - host string - Service string `json:"service"` - logger logging.Logger -} - -func (ap *azureSigningAuthPlugin) NewClient(c Config) (*http.Client, error) { - t, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - - tknURL, err := url.Parse(c.URL) - if err != nil { - return nil, err - } - - ap.host = tknURL.Host - - if ap.logger == nil { - ap.logger = c.logger - } - - if c.Credentials.OAuth2.AzureKeyVault == nil { - return nil, errors.New("missing keyvault config") - } - ap.keyVaultConfig = c.Credentials.OAuth2.AzureKeyVault - - if err := ap.validateAndSetDefaults(); err != nil { - return nil, err - } - - return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil -} - -func (ap *azureSigningAuthPlugin) validateAndSetDefaults() error { - if ap.MIAuthPlugin == nil { - return errors.New("missing azure managed identity config") - } - ap.MIAuthPlugin.setDefaults() - - if ap.keyVaultSignPlugin != nil { - return nil - } - ap.keyVaultConfig.URL = &url.URL{ - Scheme: "https", - Host: ap.keyVaultConfig.Vault + ".vault.azure.net", - } - ap.keyVaultSignPlugin = newKeyVaultSignPlugin(ap.MIAuthPlugin, ap.keyVaultConfig) - ap.keyVaultSignPlugin.setDefaults() - ap.keyVaultConfig = &ap.keyVaultSignPlugin.config - - return nil -} - -func (ap *azureSigningAuthPlugin) Prepare(req *http.Request) error { - switch ap.Service { - case "keyvault": - tkn, err := ap.keyVaultSignPlugin.tokener() - if err != nil { - return err - } - req.Header.Add("Authorization", "Bearer "+tkn) - return nil - default: - return fmt.Errorf("azureSigningAuthPlugin.Prepare() with %s not supported", ap.Service) - } -} - -func (ap *azureSigningAuthPlugin) SignDigest(ctx context.Context, digest []byte) (string, error) { - switch ap.Service { - case "keyvault": - return ap.keyVaultSignPlugin.SignDigest(ctx, digest) - default: - return "", fmt.Errorf(`cannot use SignDigest with azure service %q`, ap.Service) - } -} diff --git a/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/aws.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/aws.go deleted file mode 100644 index 45c708ab80..0000000000 --- a/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/aws.go +++ /dev/null @@ -1,1088 +0,0 @@ -// Copyright 2019 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package rest - -import ( - "bytes" - "context" - "crypto/sha1" - "encoding/hex" - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "strings" - "time" - - "github.com/go-ini/ini" - "github.com/open-policy-agent/opa/internal/providers/aws" - "github.com/open-policy-agent/opa/v1/logging" -) - -const ( - // ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - ec2DefaultCredServicePath = "http://169.254.169.254/latest/meta-data/iam/security-credentials/" - - // ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html - ec2DefaultTokenPath = "http://169.254.169.254/latest/api/token" - - // ref. https://docs.aws.amazon.com/AmazonECS/latest/userguide/task-iam-roles.html - ecsDefaultCredServicePath = "http://169.254.170.2" - ecsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" - ecsFullPathEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" - ecsAuthorizationTokenEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" - ecsAuthorizationTokenFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" - - // ref. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html - stsDefaultDomain = "amazonaws.com" - stsDefaultPath = "https://sts.%s" - stsRegionPath = "https://sts.%s.%s" - - // ref. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html - accessKeyEnvVar = "AWS_ACCESS_KEY_ID" - secretKeyEnvVar = "AWS_SECRET_ACCESS_KEY" - securityTokenEnvVar = "AWS_SECURITY_TOKEN" - sessionTokenEnvVar = "AWS_SESSION_TOKEN" - awsRegionEnvVar = "AWS_REGION" - awsDomainEnvVar = "AWS_DOMAIN" - awsRoleArnEnvVar = "AWS_ROLE_ARN" - awsWebIdentityTokenFileEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE" - awsCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" - awsConfigFileEnvVar = "AWS_CONFIG_FILE" - awsProfileEnvVar = "AWS_PROFILE" - - // ref. https://docs.aws.amazon.com/sdkref/latest/guide/settings-global.html - accessKeyGlobalSetting = "aws_access_key_id" - secretKeyGlobalSetting = "aws_secret_access_key" - securityTokenGlobalSetting = "aws_session_token" -) - -// awsCredentialService represents the interface for AWS credential providers -type awsCredentialService interface { - credentials(context.Context) (aws.Credentials, error) -} - -// awsEnvironmentCredentialService represents an static environment-variable credential provider for AWS -type awsEnvironmentCredentialService struct { - logger logging.Logger -} - -func (*awsEnvironmentCredentialService) credentials(context.Context) (aws.Credentials, error) { - var creds aws.Credentials - creds.AccessKey = os.Getenv(accessKeyEnvVar) - if creds.AccessKey == "" { - return creds, errors.New("no " + accessKeyEnvVar + " set in environment") - } - creds.SecretKey = os.Getenv(secretKeyEnvVar) - if creds.SecretKey == "" { - return creds, errors.New("no " + secretKeyEnvVar + " set in environment") - } - creds.RegionName = os.Getenv(awsRegionEnvVar) - if creds.RegionName == "" { - return creds, errors.New("no " + awsRegionEnvVar + " set in environment") - } - // SessionToken is required if using temporary ENV credentials from assumed IAM role - // Missing SessionToken results with 403 s3 error. - creds.SessionToken = os.Getenv(sessionTokenEnvVar) - if creds.SessionToken == "" { - // In case of missing SessionToken try to get SecurityToken - // AWS switched to use SessionToken, but SecurityToken was left for backward compatibility - creds.SessionToken = os.Getenv(securityTokenEnvVar) - } - - return creds, nil -} - -type ssoSessionDetails struct { - StartUrl string `json:"startUrl"` - Region string `json:"region"` - Name string - AccountID string - RoleName string - AccessToken string `json:"accessToken"` - ExpiresAt time.Time `json:"expiresAt"` - RegistrationExpiresAt time.Time `json:"registrationExpiresAt"` - RefreshToken string `json:"refreshToken"` - ClientId string `json:"clientId"` - ClientSecret string `json:"clientSecret"` -} - -type awsSSOCredentialsService struct { - Path string `json:"path,omitempty"` - SSOCachePath string `json:"cache_path,omitempty"` - - Profile string `json:"profile,omitempty"` - - logger logging.Logger - - creds aws.Credentials - - credentialsExpiresAt time.Time - - session *ssoSessionDetails -} - -func (cs *awsSSOCredentialsService) configPath() (string, error) { - if len(cs.Path) != 0 { - return cs.Path, nil - } - - if cs.Path = os.Getenv(awsConfigFileEnvVar); len(cs.Path) != 0 { - return cs.Path, nil - } - - homeDir, err := os.UserHomeDir() - if err != nil { - return "", fmt.Errorf("user home directory not found: %w", err) - } - - cs.Path = filepath.Join(homeDir, ".aws", "config") - - return cs.Path, nil -} -func (cs *awsSSOCredentialsService) ssoCachePath() (string, error) { - if len(cs.SSOCachePath) != 0 { - return cs.SSOCachePath, nil - } - - homeDir, err := os.UserHomeDir() - if err != nil { - return "", fmt.Errorf("user home directory not found: %w", err) - } - - cs.Path = filepath.Join(homeDir, ".aws", "sso", "cache") - - return cs.Path, nil -} - -func (cs *awsSSOCredentialsService) cacheKeyFileName() (string, error) { - - val := cs.session.StartUrl - if cs.session.Name != "" { - val = cs.session.Name - } - - hash := sha1.New() - hash.Write([]byte(val)) - cacheKey := hex.EncodeToString(hash.Sum(nil)) - - return cacheKey + ".json", nil -} - -func (cs *awsSSOCredentialsService) loadSSOCredentials() error { - ssoCachePath, err := cs.ssoCachePath() - if err != nil { - return fmt.Errorf("failed to get sso cache path: %w", err) - } - - cacheKeyFile, err := cs.cacheKeyFileName() - if err != nil { - return err - } - - cacheFile := path.Join(ssoCachePath, cacheKeyFile) - cache, err := os.ReadFile(cacheFile) - if err != nil { - return fmt.Errorf("failed to load cache file: %v", err) - } - - if err := json.Unmarshal(cache, &cs.session); err != nil { - return fmt.Errorf("failed to unmarshal cache file: %v", err) - } - - return nil - -} - -func (cs *awsSSOCredentialsService) loadSession() error { - configPath, err := cs.configPath() - if err != nil { - return fmt.Errorf("failed to get config path: %w", err) - } - config, err := ini.Load(configPath) - if err != nil { - return fmt.Errorf("failed to load config file: %w", err) - } - - section, err := config.GetSection("profile " + cs.Profile) - - if err != nil { - return fmt.Errorf("failed to find profile %s", cs.Profile) - } - - accountID, err := section.GetKey("sso_account_id") - if err != nil { - return fmt.Errorf("failed to find sso_account_id key in profile %s", cs.Profile) - } - - region, err := section.GetKey("region") - if err != nil { - return fmt.Errorf("failed to find region key in profile %s", cs.Profile) - } - - roleName, err := section.GetKey("sso_role_name") - if err != nil { - return fmt.Errorf("failed to find sso_role_name key in profile %s", cs.Profile) - } - - ssoSession, err := section.GetKey("sso_session") - if err != nil { - return fmt.Errorf("failed to find sso_session key in profile %s", cs.Profile) - } - - sessionName := ssoSession.Value() - - session, err := config.GetSection("sso-session " + sessionName) - if err != nil { - return fmt.Errorf("failed to find sso-session %s", sessionName) - } - - startUrl, err := session.GetKey("sso_start_url") - if err != nil { - return fmt.Errorf("failed to find sso_start_url key in sso-session %s", sessionName) - } - - cs.session = &ssoSessionDetails{ - StartUrl: startUrl.Value(), - Name: sessionName, - AccountID: accountID.Value(), - Region: region.Value(), - RoleName: roleName.Value(), - } - - return nil -} - -func (cs *awsSSOCredentialsService) tryRefreshToken() error { - // Check if refresh token is empty - if cs.session.RefreshToken == "" { - return errors.New("refresh token is empty") - } - - // Use the refresh token to get a new access token - // using the clientId, clientSecret and refreshToken from the loaded token - // return the new token - // if error, return error - - type refreshTokenRequest struct { - ClientId string `json:"clientId"` - ClientSecret string `json:"clientSecret"` - RefreshToken string `json:"refreshToken"` - GrantType string `json:"grantType"` - } - - data := refreshTokenRequest{ - ClientId: cs.session.ClientId, - ClientSecret: cs.session.ClientSecret, - RefreshToken: cs.session.RefreshToken, - GrantType: "refresh_token", - } - - body, err := json.Marshal(data) - if err != nil { - return fmt.Errorf("failed to marshal refresh token request: %v", err) - } - - endpoint := fmt.Sprintf("https://oidc.%s.amazonaws.com/token", cs.session.Region) - r, err := http.NewRequest("POST", endpoint, bytes.NewReader(body)) - if err != nil { - return fmt.Errorf("failed to create new request: %v", err) - } - - r.Header.Add("Content-Type", "application/json") - c := &http.Client{} - resp, err := c.Do(r) - if err != nil { - return fmt.Errorf("failed to do request: %v", err) - } - defer resp.Body.Close() - - type refreshTokenResponse struct { - AccessToken string `json:"accessToken"` - ExpiresIn int `json:"expiresIn"` - RefreshToken string `json:"refreshToken"` - } - - refreshedToken := refreshTokenResponse{} - - if err := json.NewDecoder(resp.Body).Decode(&refreshedToken); err != nil { - return fmt.Errorf("failed to decode response: %v", err) - } - - cs.session.AccessToken = refreshedToken.AccessToken - cs.session.ExpiresAt = time.Now().Add(time.Duration(refreshedToken.ExpiresIn) * time.Second) - cs.session.RefreshToken = refreshedToken.RefreshToken - - return nil -} - -func (cs *awsSSOCredentialsService) refreshCredentials() error { - url := fmt.Sprintf("https://portal.sso.%s.amazonaws.com/federation/credentials?account_id=%s&role_name=%s", cs.session.Region, cs.session.AccountID, cs.session.RoleName) - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return err - } - - req.Header.Set("Authorization", "Bearer "+cs.session.AccessToken) - req.Header.Set("Content-Type", "application/json") - - client := &http.Client{} - resp, err := client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - type roleCredentials struct { - AccessKeyId string `json:"accessKeyId"` - SecretAccessKey string `json:"secretAccessKey"` - SessionToken string `json:"sessionToken"` - Expiration int64 `json:"expiration"` - } - type getRoleCredentialsResponse struct { - RoleCredentials roleCredentials `json:"roleCredentials"` - } - - var result getRoleCredentialsResponse - - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { - return fmt.Errorf("failed to decode response: %v", err) - } - - cs.creds = aws.Credentials{ - AccessKey: result.RoleCredentials.AccessKeyId, - SecretKey: result.RoleCredentials.SecretAccessKey, - SessionToken: result.RoleCredentials.SessionToken, - RegionName: cs.session.Region, - } - - cs.credentialsExpiresAt = time.Unix(result.RoleCredentials.Expiration, 0) - - return nil -} - -func (cs *awsSSOCredentialsService) loadProfile() { - if cs.Profile != "" { - return - } - - cs.Profile = os.Getenv(awsProfileEnvVar) - - if cs.Profile == "" { - cs.Profile = "default" - } - -} - -func (cs *awsSSOCredentialsService) init() error { - cs.loadProfile() - - if err := cs.loadSession(); err != nil { - return fmt.Errorf("failed to load session: %w", err) - } - - if err := cs.loadSSOCredentials(); err != nil { - return fmt.Errorf("failed to load SSO credentials: %w", err) - } - - // this enforces fetching credentials - cs.credentialsExpiresAt = time.Unix(0, 0) - return nil -} - -func (cs *awsSSOCredentialsService) credentials(context.Context) (aws.Credentials, error) { - if cs.session == nil { - if err := cs.init(); err != nil { - return aws.Credentials{}, err - } - } - - if cs.credentialsExpiresAt.Before(time.Now().Add(5 * time.Minute)) { - // Check if the sso token we have is still valid, - // if not, try to refresh it - if cs.session.ExpiresAt.Before(time.Now()) { - // we try and get a new token if we can - if cs.session.RegistrationExpiresAt.Before(time.Now()) { - return aws.Credentials{}, errors.New("cannot refresh token, registration expired") - } - - if err := cs.tryRefreshToken(); err != nil { - return aws.Credentials{}, fmt.Errorf("failed to refresh token: %w", err) - } - } - - if err := cs.refreshCredentials(); err != nil { - return aws.Credentials{}, fmt.Errorf("failed to refresh credentials: %w", err) - } - } - - return cs.creds, nil -} - -// awsProfileCredentialService represents a credential provider for AWS that extracts credentials from the AWS -// credentials file -type awsProfileCredentialService struct { - - // Path to the credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - Path string `json:"path,omitempty"` - - // AWS Profile to extract credentials from the credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - Profile string `json:"profile,omitempty"` - - RegionName string `json:"aws_region"` - - logger logging.Logger -} - -func (cs *awsProfileCredentialService) credentials(context.Context) (aws.Credentials, error) { - var creds aws.Credentials - - filename, err := cs.path() - if err != nil { - return creds, err - } - - cfg, err := ini.Load(filename) - if err != nil { - return creds, fmt.Errorf("failed to read credentials file: %v", err) - } - - profile, err := cfg.GetSection(cs.profile()) - if err != nil { - return creds, fmt.Errorf("failed to get profile: %v", err) - } - - creds.AccessKey = profile.Key(accessKeyGlobalSetting).String() - if creds.AccessKey == "" { - return creds, fmt.Errorf("profile \"%v\" in credentials file %v does not contain \"%v\"", cs.Profile, cs.Path, accessKeyGlobalSetting) - } - - creds.SecretKey = profile.Key(secretKeyGlobalSetting).String() - if creds.SecretKey == "" { - return creds, fmt.Errorf("profile \"%v\" in credentials file %v does not contain \"%v\"", cs.Profile, cs.Path, secretKeyGlobalSetting) - } - - creds.SessionToken = profile.Key(securityTokenGlobalSetting).String() // default to empty string - - if cs.RegionName == "" { - if cs.RegionName = os.Getenv(awsRegionEnvVar); cs.RegionName == "" { - return creds, errors.New("no " + awsRegionEnvVar + " set in environment or configuration") - } - } - creds.RegionName = cs.RegionName - - return creds, nil -} - -func (cs *awsProfileCredentialService) path() (string, error) { - if len(cs.Path) != 0 { - return cs.Path, nil - } - - if cs.Path = os.Getenv(awsCredentialsFileEnvVar); len(cs.Path) != 0 { - return cs.Path, nil - } - - homeDir, err := os.UserHomeDir() - if err != nil { - return "", fmt.Errorf("user home directory not found: %w", err) - } - - cs.Path = filepath.Join(homeDir, ".aws", "credentials") - - return cs.Path, nil -} - -func (cs *awsProfileCredentialService) profile() string { - if cs.Profile != "" { - return cs.Profile - } - - cs.Profile = os.Getenv(awsProfileEnvVar) - - if cs.Profile == "" { - cs.Profile = "default" - } - - return cs.Profile -} - -// awsMetadataCredentialService represents an EC2 metadata service credential provider for AWS -type awsMetadataCredentialService struct { - RoleName string `json:"iam_role,omitempty"` - RegionName string `json:"aws_region"` - creds aws.Credentials - expiration time.Time - credServicePath string - tokenPath string - logger logging.Logger -} - -func (cs *awsMetadataCredentialService) urlForMetadataService() (string, error) { - // override default path for testing - if cs.credServicePath != "" { - return cs.credServicePath + cs.RoleName, nil - } - // otherwise, normal flow - // if a role name is provided, look up via the EC2 credential service - if cs.RoleName != "" { - return ec2DefaultCredServicePath + cs.RoleName, nil - } - // otherwise, check environment to see if it looks like we're in an ECS - // container (with implied role association) - if isECS() { - // first check if the relative env var exists; if so we use that otherwise we - // use the "full" var - if _, relativeExists := os.LookupEnv(ecsRelativePathEnvVar); relativeExists { - return ecsDefaultCredServicePath + os.Getenv(ecsRelativePathEnvVar), nil - } - return os.Getenv(ecsFullPathEnvVar), nil - } - // if there's no role name and we don't appear to have a path to the - // ECS container service, then the configuration is invalid - return "", errors.New("metadata endpoint cannot be determined from settings and environment") -} - -func (cs *awsMetadataCredentialService) tokenRequest(ctx context.Context) (*http.Request, error) { - tokenURL := ec2DefaultTokenPath - if cs.tokenPath != "" { - // override for testing - tokenURL = cs.tokenPath - } - req, err := http.NewRequestWithContext(ctx, http.MethodPut, tokenURL, nil) - if err != nil { - return nil, err - } - - // we are going to use the token in the immediate future, so a long TTL is not necessary - req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", "60") - return req, nil -} - -func (cs *awsMetadataCredentialService) refreshFromService(ctx context.Context) error { - // define the expected JSON payload from the EC2 credential service - // ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - type metadataPayload struct { - Code string - AccessKeyID string `json:"AccessKeyId"` - SecretAccessKey string - Token string - Expiration time.Time - } - - // Short circuit if a reasonable amount of time until credential expiration remains - const tokenExpirationMargin = 5 * time.Minute - - if time.Now().Add(tokenExpirationMargin).Before(cs.expiration) { - cs.logger.Debug("Credentials previously obtained from metadata service still valid.") - return nil - } - - cs.logger.Debug("Obtaining credentials from metadata service.") - metaDataURL, err := cs.urlForMetadataService() - if err != nil { - // configuration issue or missing ECS environment - return err - } - - // construct an HTTP client with a reasonably short timeout - client := &http.Client{Timeout: time.Second * 10} - req, err := http.NewRequestWithContext(ctx, http.MethodGet, metaDataURL, nil) - if err != nil { - return errors.New("unable to construct metadata HTTP request: " + err.Error()) - } - - // if using the AWS_CONTAINER_CREDENTIALS_FULL_URI variable, we need to associate the token - // to the request - if _, useFullPath := os.LookupEnv(ecsFullPathEnvVar); useFullPath { - var token string - tokenFilePath, tokenFilePathExists := os.LookupEnv(ecsAuthorizationTokenFileEnvVar) - - if tokenFilePathExists { - tokenBytes, err := os.ReadFile(tokenFilePath) - if err != nil { - return errors.New("failed to read ECS metadata authorization token from file: " + err.Error()) - } - token = string(tokenBytes) - // If token doesn't exist as a file check if it exists as an environment variable - } else { - var tokenExists bool - token, tokenExists = os.LookupEnv(ecsAuthorizationTokenEnvVar) - if !tokenExists { - return errors.New("unable to get ECS metadata authorization token") - } - } - req.Header.Set("Authorization", token) - } - - // if in the EC2 environment, we will use IMDSv2, which requires a session cookie from a - // PUT request on the token endpoint before it will give the credentials, this provides - // protection from SSRF attacks - if !isECS() { - tokenReq, err := cs.tokenRequest(ctx) - if err != nil { - return errors.New("unable to construct metadata token HTTP request: " + err.Error()) - } - body, err := aws.DoRequestWithClient(tokenReq, client, "metadata token", cs.logger) - if err != nil { - return err - } - // token is the body of response; add to header of metadata request - req.Header.Set("X-aws-ec2-metadata-token", string(body)) - } - - body, err := aws.DoRequestWithClient(req, client, "metadata", cs.logger) - if err != nil { - return err - } - - var payload metadataPayload - err = json.Unmarshal(body, &payload) - if err != nil { - return errors.New("failed to parse credential response from metadata service: " + err.Error()) - } - - // Only the EC2 endpoint returns the "Code" element which indicates whether the query was - // successful; the ECS endpoint does not! Some other fields are missing in the ECS payload - // but we do not depend on them. - if cs.RoleName != "" && payload.Code != "Success" { - return errors.New("metadata service query did not succeed: " + payload.Code) - } - - cs.expiration = payload.Expiration - cs.creds.AccessKey = payload.AccessKeyID - cs.creds.SecretKey = payload.SecretAccessKey - cs.creds.SessionToken = payload.Token - cs.creds.RegionName = cs.RegionName - - return nil -} - -func (cs *awsMetadataCredentialService) credentials(ctx context.Context) (aws.Credentials, error) { - err := cs.refreshFromService(ctx) - if err != nil { - return cs.creds, err - } - return cs.creds, nil -} - -// awsAssumeRoleCredentialService represents a STS credential service that uses active IAM credentials -// to obtain temporary security credentials generated by AWS STS via AssumeRole API operation -type awsAssumeRoleCredentialService struct { - RegionName string `json:"aws_region"` - RoleArn string `json:"iam_role_arn"` - SessionName string `json:"session_name"` - Domain string `json:"aws_domain"` - AWSSigningPlugin *awsSigningAuthPlugin `json:"aws_signing,omitempty"` - stsURL string - creds aws.Credentials - expiration time.Time - logger logging.Logger -} - -func (cs *awsAssumeRoleCredentialService) populateFromEnv() error { - if cs.AWSSigningPlugin == nil { - return errors.New("a AWS signing plugin must be specified when AssumeRole credential provider is enabled") - } - - switch { - case cs.AWSSigningPlugin.AWSEnvironmentCredentials != nil: - case cs.AWSSigningPlugin.AWSProfileCredentials != nil: - case cs.AWSSigningPlugin.AWSMetadataCredentials != nil: - default: - return errors.New("unsupported AWS signing plugin with AssumeRole credential provider") - } - - if cs.AWSSigningPlugin.AWSMetadataCredentials != nil { - if cs.AWSSigningPlugin.AWSMetadataCredentials.RegionName == "" { - if cs.AWSSigningPlugin.AWSMetadataCredentials.RegionName = os.Getenv(awsRegionEnvVar); cs.AWSSigningPlugin.AWSMetadataCredentials.RegionName == "" { - return errors.New("no " + awsRegionEnvVar + " set in environment or configuration") - } - } - } - - if cs.AWSSigningPlugin.AWSSignatureVersion == "" { - cs.AWSSigningPlugin.AWSSignatureVersion = "4" - } - - if cs.Domain == "" { - cs.Domain = os.Getenv(awsDomainEnvVar) - } - - if cs.RegionName == "" { - if cs.RegionName = os.Getenv(awsRegionEnvVar); cs.RegionName == "" { - return errors.New("no " + awsRegionEnvVar + " set in environment or configuration") - } - } - - if cs.RoleArn == "" { - if cs.RoleArn = os.Getenv(awsRoleArnEnvVar); cs.RoleArn == "" { - return errors.New("no " + awsRoleArnEnvVar + " set in environment or configuration") - } - } - - return nil -} - -func (cs *awsAssumeRoleCredentialService) signingCredentials(ctx context.Context) (aws.Credentials, error) { - if cs.AWSSigningPlugin.AWSEnvironmentCredentials != nil { - cs.AWSSigningPlugin.AWSEnvironmentCredentials.logger = cs.logger - return cs.AWSSigningPlugin.AWSEnvironmentCredentials.credentials(ctx) - } - - if cs.AWSSigningPlugin.AWSProfileCredentials != nil { - cs.AWSSigningPlugin.AWSProfileCredentials.logger = cs.logger - return cs.AWSSigningPlugin.AWSProfileCredentials.credentials(ctx) - } - - cs.AWSSigningPlugin.AWSMetadataCredentials.logger = cs.logger - return cs.AWSSigningPlugin.AWSMetadataCredentials.credentials(ctx) -} - -func (cs *awsAssumeRoleCredentialService) stsPath() string { - return getSTSPath(cs.Domain, cs.stsURL, cs.RegionName) -} - -func (cs *awsAssumeRoleCredentialService) refreshFromService(ctx context.Context) error { - // define the expected JSON payload from the EC2 credential service - // ref. https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html - type responsePayload struct { - Result struct { - Credentials struct { - SessionToken string - SecretAccessKey string - Expiration time.Time - AccessKeyID string `xml:"AccessKeyId"` - } - } `xml:"AssumeRoleResult"` - } - - // short circuit if a reasonable amount of time until credential expiration remains - if time.Now().Add(time.Minute * 5).Before(cs.expiration) { - cs.logger.Debug("Credentials previously obtained from sts service still valid.") - return nil - } - - cs.logger.Debug("Obtaining credentials from sts for role %s.", cs.RoleArn) - - var sessionName string - if cs.SessionName == "" { - sessionName = "open-policy-agent" - } else { - sessionName = cs.SessionName - } - - queryVals := url.Values{ - "Action": []string{"AssumeRole"}, - "RoleSessionName": []string{sessionName}, - "RoleArn": []string{cs.RoleArn}, - "Version": []string{"2011-06-15"}, - } - stsRequestURL, _ := url.Parse(cs.stsPath()) - - // construct an HTTP client with a reasonably short timeout - client := &http.Client{Timeout: time.Second * 10} - req, err := http.NewRequestWithContext(ctx, http.MethodPost, stsRequestURL.String(), strings.NewReader(queryVals.Encode())) - if err != nil { - return errors.New("unable to construct STS HTTP request: " + err.Error()) - } - - req.Header.Add("Content-Type", "application/x-www-form-urlencoded") - - // Note: Calls to AWS STS AssumeRole must be signed using the access key ID - // and secret access key - signingCreds, err := cs.signingCredentials(ctx) - if err != nil { - return err - } - - err = aws.SignRequest(req, "sts", signingCreds, time.Now(), cs.AWSSigningPlugin.AWSSignatureVersion) - if err != nil { - return err - } - - body, err := aws.DoRequestWithClient(req, client, "STS", cs.logger) - if err != nil { - return err - } - - var payload responsePayload - err = xml.Unmarshal(body, &payload) - if err != nil { - return errors.New("failed to parse credential response from STS service: " + err.Error()) - } - - cs.expiration = payload.Result.Credentials.Expiration - cs.creds.AccessKey = payload.Result.Credentials.AccessKeyID - cs.creds.SecretKey = payload.Result.Credentials.SecretAccessKey - cs.creds.SessionToken = payload.Result.Credentials.SessionToken - cs.creds.RegionName = cs.RegionName - - return nil -} - -func (cs *awsAssumeRoleCredentialService) credentials(ctx context.Context) (aws.Credentials, error) { - err := cs.refreshFromService(ctx) - if err != nil { - return cs.creds, err - } - return cs.creds, nil -} - -// awsWebIdentityCredentialService represents an STS WebIdentity credential services -type awsWebIdentityCredentialService struct { - RoleArn string - WebIdentityTokenFile string - RegionName string `json:"aws_region"` - SessionName string `json:"session_name"` - Domain string `json:"aws_domain"` - stsURL string - creds aws.Credentials - expiration time.Time - logger logging.Logger -} - -func (cs *awsWebIdentityCredentialService) populateFromEnv() error { - cs.RoleArn = os.Getenv(awsRoleArnEnvVar) - if cs.RoleArn == "" { - return errors.New("no " + awsRoleArnEnvVar + " set in environment") - } - cs.WebIdentityTokenFile = os.Getenv(awsWebIdentityTokenFileEnvVar) - if cs.WebIdentityTokenFile == "" { - return errors.New("no " + awsWebIdentityTokenFileEnvVar + " set in environment") - } - - if cs.Domain == "" { - cs.Domain = os.Getenv(awsDomainEnvVar) - } - - if cs.RegionName == "" { - if cs.RegionName = os.Getenv(awsRegionEnvVar); cs.RegionName == "" { - return errors.New("no " + awsRegionEnvVar + " set in environment or configuration") - } - } - return nil -} - -func (cs *awsWebIdentityCredentialService) stsPath() string { - return getSTSPath(cs.Domain, cs.stsURL, cs.RegionName) -} - -func (cs *awsWebIdentityCredentialService) refreshFromService(ctx context.Context) error { - // define the expected JSON payload from the EC2 credential service - // ref. https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html - type responsePayload struct { - Result struct { - Credentials struct { - SessionToken string - SecretAccessKey string - Expiration time.Time - AccessKeyID string `xml:"AccessKeyId"` - } - } `xml:"AssumeRoleWithWebIdentityResult"` - } - - // short circuit if a reasonable amount of time until credential expiration remains - if time.Now().Add(time.Minute * 5).Before(cs.expiration) { - cs.logger.Debug("Credentials previously obtained from sts service still valid.") - return nil - } - - cs.logger.Debug("Obtaining credentials from sts for role %s.", cs.RoleArn) - - var sessionName string - if cs.SessionName == "" { - sessionName = "open-policy-agent" - } else { - sessionName = cs.SessionName - } - - tokenData, err := os.ReadFile(cs.WebIdentityTokenFile) - if err != nil { - return errors.New("unable to read web token for sts HTTP request: " + err.Error()) - } - - token := string(tokenData) - - queryVals := url.Values{ - "Action": []string{"AssumeRoleWithWebIdentity"}, - "RoleSessionName": []string{sessionName}, - "RoleArn": []string{cs.RoleArn}, - "WebIdentityToken": []string{token}, - "Version": []string{"2011-06-15"}, - } - stsRequestURL, _ := url.Parse(cs.stsPath()) - - // construct an HTTP client with a reasonably short timeout - client := &http.Client{Timeout: time.Second * 10} - req, err := http.NewRequestWithContext(ctx, http.MethodPost, stsRequestURL.String(), strings.NewReader(queryVals.Encode())) - if err != nil { - return errors.New("unable to construct STS HTTP request: " + err.Error()) - } - - req.Header.Add("Content-Type", "application/x-www-form-urlencoded") - - body, err := aws.DoRequestWithClient(req, client, "STS", cs.logger) - if err != nil { - return err - } - - var payload responsePayload - err = xml.Unmarshal(body, &payload) - if err != nil { - return errors.New("failed to parse credential response from STS service: " + err.Error()) - } - - cs.expiration = payload.Result.Credentials.Expiration - cs.creds.AccessKey = payload.Result.Credentials.AccessKeyID - cs.creds.SecretKey = payload.Result.Credentials.SecretAccessKey - cs.creds.SessionToken = payload.Result.Credentials.SessionToken - cs.creds.RegionName = cs.RegionName - - return nil -} - -func (cs *awsWebIdentityCredentialService) credentials(ctx context.Context) (aws.Credentials, error) { - err := cs.refreshFromService(ctx) - if err != nil { - return cs.creds, err - } - return cs.creds, nil -} - -func isECS() bool { - // the special relative path URI is set by the container agent in the ECS environment only - _, isECSRelative := os.LookupEnv(ecsRelativePathEnvVar) - _, isECSFull := os.LookupEnv(ecsFullPathEnvVar) - return isECSRelative || isECSFull -} - -// ecrAuthPlugin authorizes requests to AWS ECR. -type ecrAuthPlugin struct { - token aws.ECRAuthorizationToken - - // awsAuthPlugin is used to sign ecr authorization token requests. - awsAuthPlugin *awsSigningAuthPlugin - - // ecr represents the service we request tokens from. - ecr ecr - - logger logging.Logger -} - -type ecr interface { - GetAuthorizationToken(context.Context, aws.Credentials, string) (aws.ECRAuthorizationToken, error) -} - -func newECRAuthPlugin(ap *awsSigningAuthPlugin) *ecrAuthPlugin { - return &ecrAuthPlugin{ - awsAuthPlugin: ap, - ecr: aws.NewECR(ap.logger), - logger: ap.logger, - } -} - -// Prepare should be called with any request to AWS ECR. -// It takes care of retrieving an ECR authorization token to sign -// the request with. -func (ap *ecrAuthPlugin) Prepare(r *http.Request) error { - if !ap.token.IsValid() { - ap.logger.Debug("Refreshing ECR auth token") - if err := ap.refreshAuthorizationToken(r.Context()); err != nil { - return err - } - } - - ap.logger.Debug("Signing request with ECR authorization token") - - r.Header.Set("Authorization", "Basic "+ap.token.AuthorizationToken) - return nil -} - -func (ap *ecrAuthPlugin) refreshAuthorizationToken(ctx context.Context) error { - creds, err := ap.awsAuthPlugin.awsCredentialService().credentials(ctx) - if err != nil { - return fmt.Errorf("failed to get aws credentials: %w", err) - } - - token, err := ap.ecr.GetAuthorizationToken(ctx, creds, ap.awsAuthPlugin.AWSSignatureVersion) - if err != nil { - return fmt.Errorf("ecr: failed to get authorization token: %w", err) - } - - ap.token = token - return nil -} - -// awsKMSSignPlugin signs digests using AWS KMS. -type awsKMSSignPlugin struct { - - // awsAuthPlugin is used to sign kms sign requests. - awsAuthPlugin *awsSigningAuthPlugin - - // kms represents the service for signing digests. - kms awskms - - logger logging.Logger -} - -type awskms interface { - SignDigest(ctx context.Context, digest []byte, keyID string, signingAlgorithm string, creds aws.Credentials, signatureVersion string) (string, error) -} - -func newKMSSignPlugin(ap *awsSigningAuthPlugin) *awsKMSSignPlugin { - return &awsKMSSignPlugin{ - awsAuthPlugin: ap, - kms: aws.NewKMS(ap.logger), - logger: ap.logger, - } -} - -func (ap *awsKMSSignPlugin) SignDigest(ctx context.Context, digest []byte, keyID string, signingAlgorithm string) (string, error) { - creds, err := ap.awsAuthPlugin.awsCredentialService().credentials(ctx) - if err != nil { - return "", fmt.Errorf("failed to get aws credentials: %w", err) - } - - signature, err := ap.kms.SignDigest(ctx, digest, keyID, signingAlgorithm, creds, ap.awsAuthPlugin.AWSSignatureVersion) - if err != nil { - return "", fmt.Errorf("kms: failed to sign digest: %w", err) - } - - return signature, nil -} - -func getSTSPath(stsDomain, stsURL, regionName string) string { - var domain string - if stsDomain != "" { - domain = strings.ToLower(stsDomain) - } else { - domain = stsDefaultDomain - } - - var stsPath string - switch { - case stsURL != "": - stsPath = stsURL - case regionName != "": - stsPath = fmt.Sprintf(stsRegionPath, strings.ToLower(regionName), domain) - default: - stsPath = fmt.Sprintf(stsDefaultPath, domain) - } - return stsPath -} diff --git a/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/azure.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/azure.go deleted file mode 100644 index 9f7a164327..0000000000 --- a/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/azure.go +++ /dev/null @@ -1,287 +0,0 @@ -package rest - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "os" - "time" -) - -var ( - azureIMDSEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" - defaultAPIVersion = "2018-02-01" - defaultResource = "https://storage.azure.com/" - timeout = 5 * time.Second - defaultAPIVersionForAppServiceMsi = "2019-08-01" - defaultKeyVaultAPIVersion = "7.4" -) - -// azureManagedIdentitiesToken holds a token for managed identities for Azure resources -type azureManagedIdentitiesToken struct { - AccessToken string `json:"access_token"` - ExpiresIn string `json:"expires_in"` - ExpiresOn string `json:"expires_on"` - NotBefore string `json:"not_before"` - Resource string `json:"resource"` - TokenType string `json:"token_type"` -} - -// azureManagedIdentitiesError represents an error fetching an azureManagedIdentitiesToken -type azureManagedIdentitiesError struct { - Err string `json:"error"` - Description string `json:"error_description"` - Endpoint string - StatusCode int -} - -func (e *azureManagedIdentitiesError) Error() string { - return fmt.Sprintf("%v %s retrieving azure token from %s: %s", e.StatusCode, e.Err, e.Endpoint, e.Description) -} - -// azureManagedIdentitiesAuthPlugin uses an azureManagedIdentitiesToken.AccessToken for bearer authorization -type azureManagedIdentitiesAuthPlugin struct { - Endpoint string `json:"endpoint"` - APIVersion string `json:"api_version"` - Resource string `json:"resource"` - ObjectID string `json:"object_id"` - ClientID string `json:"client_id"` - MiResID string `json:"mi_res_id"` - UseAppServiceMsi bool `json:"use_app_service_msi,omitempty"` -} - -func (ap *azureManagedIdentitiesAuthPlugin) setDefaults() { - if ap.Endpoint == "" { - identityEndpoint := os.Getenv("IDENTITY_ENDPOINT") - if identityEndpoint != "" { - ap.UseAppServiceMsi = true - ap.Endpoint = identityEndpoint - } else { - ap.Endpoint = azureIMDSEndpoint - } - } - - if ap.Resource == "" { - ap.Resource = defaultResource - } - - if ap.APIVersion == "" { - if ap.UseAppServiceMsi { - ap.APIVersion = defaultAPIVersionForAppServiceMsi - } else { - ap.APIVersion = defaultAPIVersion - } - } - -} - -func (ap *azureManagedIdentitiesAuthPlugin) NewClient(c Config) (*http.Client, error) { - if c.Type == "oci" { - return nil, errors.New("azure managed identities auth: OCI service not supported") - } - ap.setDefaults() - t, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - - return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil -} - -func (ap *azureManagedIdentitiesAuthPlugin) Prepare(req *http.Request) error { - token, err := azureManagedIdentitiesTokenRequest( - ap.Endpoint, ap.APIVersion, ap.Resource, - ap.ObjectID, ap.ClientID, ap.MiResID, - ap.UseAppServiceMsi, - ) - if err != nil { - return err - } - - req.Header.Add("Authorization", "Bearer "+token.AccessToken) - return nil -} - -// azureManagedIdentitiesTokenRequest fetches an azureManagedIdentitiesToken -func azureManagedIdentitiesTokenRequest( - endpoint, apiVersion, resource, objectID, clientID, miResID string, - useAppServiceMsi bool, -) (azureManagedIdentitiesToken, error) { - var token azureManagedIdentitiesToken - e := buildAzureManagedIdentitiesRequestPath(endpoint, apiVersion, resource, objectID, clientID, miResID) - - request, err := http.NewRequest("GET", e, nil) - if err != nil { - return token, err - } - if useAppServiceMsi { - identityHeader := os.Getenv("IDENTITY_HEADER") - if identityHeader == "" { - return token, errors.New("azure managed identities auth: IDENTITY_HEADER env var not found") - } - request.Header.Add("x-identity-header", identityHeader) - } else { - request.Header.Add("Metadata", "true") - } - - httpClient := http.Client{Timeout: timeout} - response, err := httpClient.Do(request) - if err != nil { - return token, err - } - defer response.Body.Close() - - data, err := io.ReadAll(response.Body) - if err != nil { - return token, err - } - - if s := response.StatusCode; s != http.StatusOK { - var azureError azureManagedIdentitiesError - err = json.Unmarshal(data, &azureError) - if err != nil { - return token, err - } - - azureError.Endpoint = e - azureError.StatusCode = s - return token, &azureError - } - - err = json.Unmarshal(data, &token) - if err != nil { - return token, err - } - return token, nil -} - -// buildAzureManagedIdentitiesRequestPath constructs the request URL for an Azure managed identities token request -func buildAzureManagedIdentitiesRequestPath( - endpoint, apiVersion, resource, objectID, clientID, miResID string, -) string { - params := url.Values{ - "api-version": []string{apiVersion}, - "resource": []string{resource}, - } - - if objectID != "" { - params.Add("object_id", objectID) - } - - if clientID != "" { - params.Add("client_id", clientID) - } - - if miResID != "" { - params.Add("mi_res_id", miResID) - } - - return endpoint + "?" + params.Encode() -} - -type azureKeyVaultSignPlugin struct { - config azureKeyVaultConfig - tokener func() (string, error) -} - -func newKeyVaultSignPlugin(ap *azureManagedIdentitiesAuthPlugin, cfg *azureKeyVaultConfig) *azureKeyVaultSignPlugin { - resp := &azureKeyVaultSignPlugin{ - tokener: func() (string, error) { - resp, err := azureManagedIdentitiesTokenRequest( - ap.Endpoint, - ap.APIVersion, - cfg.URL.String(), - ap.ObjectID, - ap.ClientID, - ap.MiResID, - ap.UseAppServiceMsi) - if err != nil { - return "", err - } - return resp.AccessToken, nil - }, - config: *cfg, - } - return resp -} - -func (akv *azureKeyVaultSignPlugin) setDefaults() { - if akv.config.APIVersion == "" { - akv.config.APIVersion = defaultKeyVaultAPIVersion - } -} - -type kvRequest struct { - Alg string `json:"alg"` - Value string `json:"value"` -} - -type kvResponse struct { - KID string `json:"kid"` - Value string `json:"value"` -} - -// SignDigest() uses the Microsoft keyvault rest api to sign a byte digest -// https://learn.microsoft.com/en-us/rest/api/keyvault/keys/sign/sign -func (ap *azureKeyVaultSignPlugin) SignDigest(ctx context.Context, digest []byte) (string, error) { - tkn, err := ap.tokener() - if err != nil { - return "", err - } - if ap.config.URL.Host == "" { - return "", errors.New("keyvault host not set") - } - - signingURL := ap.config.URL.JoinPath("keys", ap.config.Key, ap.config.KeyVersion, "sign") - q := signingURL.Query() - q.Set("api-version", ap.config.APIVersion) - signingURL.RawQuery = q.Encode() - reqBody, err := json.Marshal(kvRequest{ - Alg: ap.config.Alg, - Value: base64.StdEncoding.EncodeToString(digest)}) - if err != nil { - return "", err - } - - req, err := http.NewRequestWithContext(ctx, http.MethodPost, signingURL.String(), bytes.NewBuffer(reqBody)) - if err != nil { - return "", err - } - - req.Header.Add("Authorization", "Bearer "+tkn) - req.Header.Add("Content-Type", "application/json") - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return "", err - } - - if resp.StatusCode != http.StatusOK { - if resp.Body != nil { - defer resp.Body.Close() - b, _ := io.ReadAll(resp.Body) - return "", fmt.Errorf("non 200 status code, got: %d. Body: %v", resp.StatusCode, string(b)) - } - return "", fmt.Errorf("non 200 status code from keyvault sign, got: %d", resp.StatusCode) - } - defer resp.Body.Close() - - respBytes, err := io.ReadAll(resp.Body) - if err != nil { - return "", errors.New("failed to read keyvault response body") - } - - var res kvResponse - err = json.Unmarshal(respBytes, &res) - if err != nil { - return "", fmt.Errorf("no valid keyvault response, got: %v", string(respBytes)) - } - - return res.Value, nil -} diff --git a/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/gcp.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/gcp.go deleted file mode 100644 index c717105c7f..0000000000 --- a/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/gcp.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package rest - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "strings" - "time" -) - -var ( - defaultGCPMetadataEndpoint = "http://metadata.google.internal" - defaultAccessTokenPath = "/computeMetadata/v1/instance/service-accounts/default/token" - defaultIdentityTokenPath = "/computeMetadata/v1/instance/service-accounts/default/identity" -) - -// AccessToken holds a GCP access token. -type AccessToken struct { - AccessToken string `json:"access_token"` - ExpiresIn int64 `json:"expires_in"` - TokenType string `json:"token_type"` -} - -type gcpMetadataError struct { - err error - endpoint string - statusCode int -} - -func (e *gcpMetadataError) Error() string { - return fmt.Sprintf("error retrieving gcp ID token from %s %d: %v", e.endpoint, e.statusCode, e.err) -} - -func (e *gcpMetadataError) Unwrap() error { return e.err } - -var ( - errGCPMetadataNotFound = errors.New("not found") - errGCPMetadataInvalidRequest = errors.New("invalid request") - errGCPMetadataUnexpected = errors.New("unexpected error") -) - -// gcpMetadataAuthPlugin represents authentication via GCP metadata service. -type gcpMetadataAuthPlugin struct { - AccessTokenPath string `json:"access_token_path"` - Audience string `json:"audience"` - Endpoint string `json:"endpoint"` - IdentityTokenPath string `json:"identity_token_path"` - Scopes []string `json:"scopes"` -} - -func (ap *gcpMetadataAuthPlugin) NewClient(c Config) (*http.Client, error) { - if ap.Audience == "" && len(ap.Scopes) == 0 { - return nil, errors.New("audience or scopes is required when gcp metadata is enabled") - } - - if ap.Audience != "" && len(ap.Scopes) > 0 { - return nil, errors.New("either audience or scopes can be set, not both, when gcp metadata is enabled") - } - - if ap.Endpoint == "" { - ap.Endpoint = defaultGCPMetadataEndpoint - } - - if ap.AccessTokenPath == "" { - ap.AccessTokenPath = defaultAccessTokenPath - } - - if ap.IdentityTokenPath == "" { - ap.IdentityTokenPath = defaultIdentityTokenPath - } - - t, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - - return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil -} - -func (ap *gcpMetadataAuthPlugin) Prepare(req *http.Request) error { - var err error - var token string - - if ap.Audience != "" { - token, err = identityTokenFromMetadataService(ap.Endpoint, ap.IdentityTokenPath, ap.Audience) - if err != nil { - return fmt.Errorf("error retrieving identity token from gcp metadata service: %w", err) - } - } - - if len(ap.Scopes) != 0 { - token, err = accessTokenFromMetadataService(ap.Endpoint, ap.AccessTokenPath, ap.Scopes) - if err != nil { - return fmt.Errorf("error retrieving access token from gcp metadata service: %w", err) - } - } - - req.Header.Add("Authorization", fmt.Sprintf("Bearer %v", token)) - return nil -} - -// accessTokenFromMetadataService returns an access token based on the scopes. -func accessTokenFromMetadataService(endpoint, path string, scopes []string) (string, error) { - s := strings.Join(scopes, ",") - - e := fmt.Sprintf("%s%s?scopes=%s", endpoint, path, s) - - data, err := gcpMetadataServiceRequest(e) - if err != nil { - return "", err - } - - var accessToken AccessToken - err = json.Unmarshal(data, &accessToken) - if err != nil { - return "", err - } - - return accessToken.AccessToken, nil -} - -// identityTokenFromMetadataService returns an identity token based on the audience. -func identityTokenFromMetadataService(endpoint, path, audience string) (string, error) { - e := fmt.Sprintf("%s%s?audience=%s", endpoint, path, audience) - - data, err := gcpMetadataServiceRequest(e) - if err != nil { - return "", err - } - return string(data), nil -} - -func gcpMetadataServiceRequest(endpoint string) ([]byte, error) { - request, err := http.NewRequest("GET", endpoint, nil) - if err != nil { - return nil, err - } - - request.Header.Add("Metadata-Flavor", "Google") - - timeout := time.Duration(5) * time.Second - httpClient := http.Client{Timeout: timeout} - - response, err := httpClient.Do(request) - if err != nil { - return nil, err - } - defer response.Body.Close() - - switch s := response.StatusCode; s { - case 200: - break - case 400: - return nil, &gcpMetadataError{errGCPMetadataInvalidRequest, endpoint, s} - case 404: - return nil, &gcpMetadataError{errGCPMetadataNotFound, endpoint, s} - default: - return nil, &gcpMetadataError{errGCPMetadataUnexpected, endpoint, s} - } - - data, err := io.ReadAll(response.Body) - if err != nil { - return nil, err - } - - return data, nil -} diff --git a/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/rest.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/rest.go deleted file mode 100644 index f8be30af5e..0000000000 --- a/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/rest.go +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package rest implements a REST client for communicating with remote services. -package rest - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "maps" - "net/http" - "net/http/httputil" - "reflect" - "strings" - - "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/v1/keys" - "github.com/open-policy-agent/opa/v1/logging" - "github.com/open-policy-agent/opa/v1/tracing" - "github.com/open-policy-agent/opa/v1/util" -) - -const ( - defaultResponseHeaderTimeoutSeconds = int64(10) - defaultResponseSizeLimitBytes = 1024 - - grantTypeClientCredentials = "client_credentials" - grantTypeJwtBearer = "jwt_bearer" -) - -var maskedHeaderKeys = map[string]struct{}{ - "Authorization": {}, - "X-Amz-Security-Token": {}, -} - -// An HTTPAuthPlugin represents a mechanism to construct and configure HTTP authentication for a REST service -type HTTPAuthPlugin interface { - // implementations can assume NewClient will be called before Prepare - NewClient(Config) (*http.Client, error) - Prepare(*http.Request) error -} - -// Config represents configuration for a REST client. -type Config struct { - Name string `json:"name"` - URL string `json:"url"` - Headers map[string]string `json:"headers"` - AllowInsecureTLS bool `json:"allow_insecure_tls,omitempty"` - ResponseHeaderTimeoutSeconds *int64 `json:"response_header_timeout_seconds,omitempty"` - TLS *serverTLSConfig `json:"tls,omitempty"` - Credentials struct { - Bearer *bearerAuthPlugin `json:"bearer,omitempty"` - OAuth2 *oauth2ClientCredentialsAuthPlugin `json:"oauth2,omitempty"` - ClientTLS *clientTLSAuthPlugin `json:"client_tls,omitempty"` - S3Signing *awsSigningAuthPlugin `json:"s3_signing,omitempty"` - GCPMetadata *gcpMetadataAuthPlugin `json:"gcp_metadata,omitempty"` - AzureManagedIdentity *azureManagedIdentitiesAuthPlugin `json:"azure_managed_identity,omitempty"` - Plugin *string `json:"plugin,omitempty"` - } `json:"credentials"` - Type string `json:"type,omitempty"` - keys map[string]*keys.Config - logger logging.Logger -} - -// Equal returns true if this client config is equal to the other. -func (c *Config) Equal(other *Config) bool { - otherWithoutLogger := *other - otherWithoutLogger.logger = c.logger - return reflect.DeepEqual(c, &otherWithoutLogger) -} - -// An AuthPluginLookupFunc can lookup auth plugins by their name. -type AuthPluginLookupFunc func(name string) HTTPAuthPlugin - -// AuthPlugin should be used to get an authentication method from the config. -func (c *Config) AuthPlugin(lookup AuthPluginLookupFunc) (HTTPAuthPlugin, error) { - var candidate HTTPAuthPlugin - if c.Credentials.Plugin != nil { - if lookup == nil { - // if no authPluginLookup function is passed we can't resolve the plugin - return nil, errors.New("missing auth plugin lookup function") - } - - candidate := lookup(*c.Credentials.Plugin) - if candidate == nil { - return nil, fmt.Errorf("auth plugin %q not found", *c.Credentials.Plugin) - } - - return candidate, nil - } - // reflection avoids need for this code to change as auth plugins are added - s := reflect.ValueOf(c.Credentials) - for i := range s.NumField() { - if s.Field(i).IsNil() { - continue - } - - if candidate != nil { - return nil, errors.New("a maximum one credential method must be specified") - } - - candidate = s.Field(i).Interface().(HTTPAuthPlugin) - } - - if candidate == nil { - return &defaultAuthPlugin{}, nil - } - return candidate, nil -} - -func (c *Config) authHTTPClient(lookup AuthPluginLookupFunc) (*http.Client, error) { - plugin, err := c.AuthPlugin(lookup) - if err != nil { - return nil, err - } - return plugin.NewClient(*c) -} - -func (c *Config) authPrepare(req *http.Request, lookup AuthPluginLookupFunc) error { - plugin, err := c.AuthPlugin(lookup) - if err != nil { - return err - } - return plugin.Prepare(req) -} - -// Client implements an HTTP/REST client for communicating with remote -// services. -type Client struct { - bytes *[]byte - json *any - config Config - headers map[string]string - authPluginLookup AuthPluginLookupFunc - logger logging.Logger - loggerFields map[string]any - distributedTacingOpts tracing.Options -} - -// Name returns an option that overrides the service name on the client. -func Name(s string) func(*Client) { - return func(c *Client) { - c.config.Name = s - } -} - -// AuthPluginLookup assigns a function to lookup an HTTPAuthPlugin to a new Client. -// It's intended to be used when creating a Client using New(). Usually this is passed -// the plugins.AuthPlugin func, which retrieves a registered HTTPAuthPlugin from the -// plugin manager. -func AuthPluginLookup(l AuthPluginLookupFunc) func(*Client) { - return func(c *Client) { - c.authPluginLookup = l - } -} - -// Logger assigns a logger to the client -func Logger(l logging.Logger) func(*Client) { - return func(c *Client) { - c.logger = l - } -} - -// DistributedTracingOpts sets the options to be used by distributed tracing. -func DistributedTracingOpts(tr tracing.Options) func(*Client) { - return func(c *Client) { - c.distributedTacingOpts = tr - } -} - -// New returns a new Client for config. -func New(config []byte, keys map[string]*keys.Config, opts ...func(*Client)) (Client, error) { - var parsedConfig Config - if err := util.Unmarshal(config, &parsedConfig); err != nil { - return Client{}, err - } - - parsedConfig.URL = strings.TrimRight(parsedConfig.URL, "/") - - if parsedConfig.ResponseHeaderTimeoutSeconds == nil { - timeout := defaultResponseHeaderTimeoutSeconds - parsedConfig.ResponseHeaderTimeoutSeconds = &timeout - } - - parsedConfig.keys = keys - - client := Client{ - config: parsedConfig, - } - - for _, f := range opts { - f(&client) - } - - if client.logger == nil { - client.logger = logging.Get() - } - - client.config.logger = client.logger - - return client, nil -} - -// AuthPluginLookup returns the lookup function to find a custom registered -// auth plugin by its name. -func (c Client) AuthPluginLookup() AuthPluginLookupFunc { - return c.authPluginLookup -} - -// Service returns the name of the service this Client is configured for. -func (c Client) Service() string { - return c.config.Name -} - -// Config returns this Client's configuration -func (c Client) Config() *Config { - return &c.config -} - -// SetResponseHeaderTimeout sets the "ResponseHeaderTimeout" in the http client's Transport -func (c Client) SetResponseHeaderTimeout(timeout *int64) Client { - c.config.ResponseHeaderTimeoutSeconds = timeout - return c -} - -// Logger returns the logger assigned to the Client -func (c Client) Logger() logging.Logger { - return c.logger -} - -// LoggerFields returns the fields used for log statements used by Client -func (c Client) LoggerFields() map[string]any { - return c.loggerFields -} - -// WithHeader returns a shallow copy of the client with a header to include the -// requests. -func (c Client) WithHeader(k, v string) Client { - if v == "" { - return c - } - if c.headers == nil { - c.headers = map[string]string{} - } - c.headers[k] = v - return c -} - -// WithJSON returns a shallow copy of the client with the JSON value set as the -// message body to include the requests. This function sets the Content-Type -// header. -func (c Client) WithJSON(body any) Client { - c = c.WithHeader("Content-Type", "application/json") - c.json = &body - return c -} - -// WithBytes returns a shallow copy of the client with the bytes set as the -// message body to include in the requests. -func (c Client) WithBytes(body []byte) Client { - c.bytes = &body - return c -} - -// Do executes a request using the client. -func (c Client) Do(ctx context.Context, method, path string) (*http.Response, error) { - - httpClient, err := c.config.authHTTPClient(c.authPluginLookup) - if err != nil { - return nil, err - } - - if len(c.distributedTacingOpts) > 0 { - httpClient.Transport = tracing.NewTransport(httpClient.Transport, c.distributedTacingOpts) - } - - path = strings.Trim(path, "/") - - var body io.Reader - - if c.bytes != nil { - body = bytes.NewReader(*c.bytes) - } else if c.json != nil { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(*c.json); err != nil { - return nil, err - } - body = &buf - } - - url := c.config.URL + "/" + path - req, err := http.NewRequestWithContext(ctx, method, url, body) - if err != nil { - return nil, err - } - - headers := map[string]string{ - "User-Agent": version.UserAgent, - } - - // Copy custom headers from config. - maps.Copy(headers, c.config.Headers) - - // Overwrite with headers set directly on client. - maps.Copy(headers, c.headers) - - for key, value := range headers { - req.Header.Add(key, value) - } - - if err = c.config.authPrepare(req, c.authPluginLookup); err != nil { - return nil, err - } - - if c.logger.GetLevel() >= logging.Debug { - c.loggerFields = map[string]any{ - "method": method, - "url": url, - "headers": withMaskedHeaders(req.Header), - } - - c.logger.WithFields(c.loggerFields).Debug("Sending request.") - } - - resp, err := httpClient.Do(req) - - if resp != nil && c.logger.GetLevel() >= logging.Debug { - // Only log for debug purposes. If an error occurred, the caller should handle - // that. In the non-error case, the caller may not do anything. - c.loggerFields["status"] = resp.Status - c.loggerFields["headers"] = resp.Header - - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - dump, err := httputil.DumpResponse(resp, true) - if err != nil { - return nil, err - } - - if len(dump) < defaultResponseSizeLimitBytes { - c.loggerFields["response"] = string(dump) - } else { - c.loggerFields["response"] = fmt.Sprintf("%v...", string(dump[:defaultResponseSizeLimitBytes])) - } - } - c.logger.WithFields(c.loggerFields).Debug("Received response.") - } - - return resp, err -} - -func withMaskedHeaders(headers http.Header) http.Header { - masked := make(http.Header) - for k, v := range headers { - if _, ok := maskedHeaderKeys[k]; ok { - masked.Set(k, "REDACTED") - } else { - masked[k] = v - } - } - return masked -} diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go b/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go index 55b5ed7803..0043321acf 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go +++ b/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go @@ -41,3 +41,9 @@ func RegisterPlugin(name string, p TargetPlugin) { } targetPlugins[name] = p } + +func ResetTargetPlugins() { + pluginMtx.Lock() + defer pluginMtx.Unlock() + targetPlugins = map[string]TargetPlugin{} +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go b/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go index 8a47d90a93..a69dba1bb8 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go +++ b/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go @@ -25,8 +25,8 @@ import ( "github.com/open-policy-agent/opa/v1/bundle" "github.com/open-policy-agent/opa/v1/ir" "github.com/open-policy-agent/opa/v1/loader" + "github.com/open-policy-agent/opa/v1/loader/filter" "github.com/open-policy-agent/opa/v1/metrics" - "github.com/open-policy-agent/opa/v1/plugins" "github.com/open-policy-agent/opa/v1/resolver" "github.com/open-policy-agent/opa/v1/storage" "github.com/open-policy-agent/opa/v1/storage/inmem" @@ -42,10 +42,7 @@ import ( const ( defaultPartialNamespace = "partial" wasmVarPrefix = "^" -) -// nolint: deadcode,varcheck -const ( targetWasm = "wasm" targetRego = "rego" ) @@ -235,6 +232,7 @@ func EvalInstrument(instrument bool) EvalOption { } // EvalTracer configures a tracer for a Prepared Query's evaluation +// // Deprecated: Use EvalQueryTracer instead. func EvalTracer(tracer topdown.Tracer) EvalOption { return func(e *EvalContext) { @@ -664,12 +662,11 @@ type Rego struct { enablePrintStatements bool distributedTracingOpts tracing.Options strict bool - pluginMgr *plugins.Manager - plugins []TargetPlugin targetPrepState TargetPluginEval regoVersion ast.RegoVersion compilerHook func(*ast.Compiler) evalMode *ast.CompilerEvalMode + filter filter.LoaderFilter } func (r *Rego) RegoVersion() ast.RegoVersion { @@ -1046,6 +1043,12 @@ func LoadBundle(path string) func(r *Rego) { } } +func WithFilter(f filter.LoaderFilter) func(r *Rego) { + return func(r *Rego) { + r.filter = f + } +} + // ParsedBundle returns an argument that adds a bundle to be loaded. func ParsedBundle(name string, b *bundle.Bundle) func(r *Rego) { return func(r *Rego) { @@ -1071,6 +1074,16 @@ func Store(s storage.Store) func(r *Rego) { } } +// Data returns an argument that sets the Rego data document. Data should be +// a map representing the data document. This is a simpler alternative to +// using Store with inmem.NewFromObject for cases where an in-memory store +// with static data is sufficient. +func Data(x map[string]any) func(r *Rego) { + return func(r *Rego) { + r.store = inmem.NewFromObject(x) + } +} + // StoreReadAST returns an argument that sets whether the store should eagerly convert data to AST values. // // Only applicable when no store has been set on the Rego object through the Store option. @@ -1115,6 +1128,7 @@ func Trace(yes bool) func(r *Rego) { } // Tracer returns an argument that adds a query tracer to r. +// // Deprecated: Use QueryTracer instead. func Tracer(t topdown.Tracer) func(r *Rego) { return func(r *Rego) { @@ -1354,6 +1368,7 @@ func New(options ...func(r *Rego)) *Rego { callHook := r.compiler == nil // call hook only if we created the compiler here if r.compiler == nil { + //nolint:staticcheck r.compiler = ast.NewCompiler(). WithUnsafeBuiltins(r.unsafeBuiltins). WithBuiltins(r.builtinDecls). @@ -1408,15 +1423,6 @@ func New(options ...func(r *Rego)) *Rego { r.generateJSON = generateJSON } - if r.pluginMgr != nil { - for _, pluginName := range r.pluginMgr.Plugins() { - p := r.pluginMgr.Plugin(pluginName) - if p0, ok := p.(TargetPlugin); ok { - r.plugins = append(r.plugins, p0) - } - } - } - if t := r.targetPlugin(r.target); t != nil { r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR) } @@ -1798,7 +1804,7 @@ func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (Prepa } // nolint: staticcheck // SA4006 false positive - data, err := r.store.Read(ctx, r.txn, storage.Path{}) + data, err := r.store.Read(ctx, r.txn, storage.RootPath) if err != nil { _ = txnClose(ctx, err) // Ignore error return PreparedEvalQuery{}, err @@ -2020,7 +2026,7 @@ func (r *Rego) loadFiles(ctx context.Context, txn storage.Transaction, m metrics } if len(result.Documents) > 0 { - err = r.store.Write(ctx, txn, storage.AddOp, storage.Path{}, result.Documents) + err = r.store.Write(ctx, txn, storage.AddOp, storage.RootPath, result.Documents) if err != nil { return err } @@ -2044,6 +2050,7 @@ func (r *Rego) loadBundles(_ context.Context, _ storage.Transaction, m metrics.M WithSkipBundleVerification(r.skipBundleVerification). WithRegoVersion(r.regoVersion). WithCapabilities(r.capabilities). + WithFilter(r.filter). AsBundle(path) if err != nil { return fmt.Errorf("loading error: %s", err) @@ -2201,7 +2208,7 @@ func (r *Rego) compileQuery(query ast.Body, imports []*ast.Import, _ metrics.Met if r.pkg != "" { var err error - pkg, err = ast.ParsePackage(fmt.Sprintf("package %v", r.pkg)) + pkg, err = ast.ParsePackage("package " + r.pkg) if err != nil { return nil, nil, err } @@ -2220,7 +2227,7 @@ func (r *Rego) compileQuery(query ast.Body, imports []*ast.Import, _ metrics.Met WithStrict(false) for _, extra := range extras { - qc = qc.WithStageAfter(extra.after, extra.stage) + qc = qc.WithStageAfterID(extra.after, extra.stage) } compiled, err := qc.Compile(query) @@ -2487,7 +2494,7 @@ func (r *Rego) partialResult(ctx context.Context, pCfg *PrepareConfig) (PartialR Module: module, } module.Rules[i] = rule - if checkPartialResultForRecursiveRefs(body, rule.Path()) { + if checkPartialResultForRecursiveRefs(body, module.Package.Path.Extend(rule.Head.Reference.GroundPrefix())) { return PartialResult{}, Errors{errPartialEvaluationNotEffective} } } @@ -2678,7 +2685,7 @@ func (r *Rego) rewriteQueryToCaptureValue(_ ast.QueryCompiler, query ast.Body) ( expr.Terms = ast.Equality.Expr(terms, capture).Terms r.capture[expr] = capture.Value.(ast.Var) case []*ast.Term: - tpe := r.compiler.TypeEnv.Get(terms[0]) + tpe := r.compiler.TypeEnv.GetByValue(terms[0].Value) if !types.Void(tpe) && types.Arity(tpe) == len(terms)-1 { capture = r.generateTermVar() expr.Terms = append(terms, capture) @@ -2868,7 +2875,7 @@ func (m rawModule) ParseWithOpts(opts ast.ParserOptions) (*ast.Module, error) { } type extraStage struct { - after string + after ast.StageID stage ast.QueryCompilerStageDefinition } diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go b/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go index 983de2223e..f1618799eb 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go +++ b/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go @@ -79,12 +79,24 @@ func (ev *ExpressionValue) String() string { // return `true` for a query like `data.authz.allow = x`, which always has result // set element with value true, but could also have a binding `x: false`. func (rs ResultSet) Allowed() bool { + x, _ := ResultValue[bool](rs) + return x +} + +// ResultValue is a helper function that'll return a value of type T if all of +// these conditions hold: +// - the result set only has one element +// - there is only one expression in the result set's only element +// - that expression has type T +// - there are no bindings. +func ResultValue[T any](rs ResultSet) (T, bool) { + var zero T if len(rs) == 1 && len(rs[0].Bindings) == 0 { if exprs := rs[0].Expressions; len(exprs) == 1 { - if b, ok := exprs[0].Value.(bool); ok { - return b + if v, ok := exprs[0].Value.(T); ok { + return v, true } } } - return false + return zero, false } diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go index 941cbeef51..40f18ab0de 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go @@ -73,10 +73,9 @@ func (u *updateAST) Apply(v any) any { } func newUpdateAST(data any, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) { - switch data.(type) { case ast.Null, ast.Boolean, ast.Number, ast.String: - return nil, errors.NewNotFoundError(path) + return nil, errors.NotFoundErr } switch data := data.(type) { @@ -94,11 +93,10 @@ func newUpdateAST(data any, op storage.PatchOp, path storage.Path, idx int, valu } func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) { - if idx == len(path)-1 { if path[idx] == "-" || path[idx] == strconv.Itoa(data.Len()) { if op != storage.AddOp { - return nil, invalidPatchError("%v: invalid patch path", path) + return nil, errors.NewInvalidPatchError("%v: invalid patch path", path) } cpy := data.Append(ast.NewTerm(value)) @@ -161,7 +159,7 @@ func newUpdateObjectAST(data ast.Object, op storage.PatchOp, path storage.Path, switch op { case storage.ReplaceOp, storage.RemoveOp: if val == nil { - return nil, errors.NewNotFoundError(path) + return nil, errors.NotFoundErr } } return &updateAST{path, op == storage.RemoveOp, value}, nil @@ -171,14 +169,7 @@ func newUpdateObjectAST(data ast.Object, op storage.PatchOp, path storage.Path, return newUpdateAST(val.Value, op, path, idx+1, value) } - return nil, errors.NewNotFoundError(path) -} - -func interfaceToValue(v any) (ast.Value, error) { - if v, ok := v.(ast.Value); ok { - return v, nil - } - return ast.InterfaceToValue(v) + return nil, errors.NotFoundErr } // setInAst updates the value in the AST at the given path with the given value. diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go index 742d6c167f..9fa145a051 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go @@ -27,6 +27,7 @@ import ( "github.com/open-policy-agent/opa/internal/merge" "github.com/open-policy-agent/opa/v1/ast" "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/internal/errors" "github.com/open-policy-agent/opa/v1/util" ) @@ -50,6 +51,7 @@ func NewWithOpts(opts ...Opt) storage.Store { if s.returnASTValuesOnRead { s.data = ast.NewObject() + s.roundTripOnWrite = false } else { s.data = map[string]any{} } @@ -71,7 +73,7 @@ func NewFromObjectWithOpts(data map[string]any, opts ...Opt) storage.Store { if err != nil { panic(err) } - if err := db.Write(ctx, txn, storage.AddOp, storage.Path{}, data); err != nil { + if err := db.Write(ctx, txn, storage.AddOp, storage.RootPath, data); err != nil { panic(err) } if err := db.Commit(ctx, txn); err != nil { @@ -89,9 +91,8 @@ func NewFromReader(r io.Reader) storage.Store { // NewFromReader returns a new in-memory store from a reader that produces a // JSON serialized object, with extra options. This function is for test purposes. func NewFromReaderWithOpts(r io.Reader, opts ...Opt) storage.Store { - d := util.NewJSONDecoder(r) var data map[string]any - if err := d.Decode(&data); err != nil { + if err := util.NewJSONDecoder(r).Decode(&data); err != nil { panic(err) } return NewFromObjectWithOpts(data, opts...) @@ -120,35 +121,39 @@ type handle struct { } func (db *store) NewTransaction(_ context.Context, params ...storage.TransactionParams) (storage.Transaction, error) { - var write bool - var ctx *storage.Context + txn := &transaction{ + xid: atomic.AddUint64(&db.xid, uint64(1)), + db: db, + } + if len(params) > 0 { - write = params[0].Write - ctx = params[0].Context + txn.write = params[0].Write + txn.context = params[0].Context } - xid := atomic.AddUint64(&db.xid, uint64(1)) - if write { + + if txn.write { db.wmu.Lock() } else { db.rmu.RLock() } - return newTransaction(xid, write, ctx, db), nil + + return txn, nil } // Truncate implements the storage.Store interface. This method must be called within a transaction. func (db *store) Truncate(ctx context.Context, txn storage.Transaction, params storage.TransactionParams, it storage.Iterator) error { var update *storage.Update var err error - mergedData := map[string]any{} underlying, err := db.underlying(txn) if err != nil { return err } + mergedData := map[string]any{} + for { - update, err = it.Next() - if err != nil { + if update, err = it.Next(); err != nil { break } @@ -159,8 +164,7 @@ func (db *store) Truncate(ctx context.Context, txn storage.Transaction, params s } } else { var value any - err = util.Unmarshal(update.Value, &value) - if err != nil { + if err = util.Unmarshal(update.Value, &value); err != nil { return err } @@ -193,11 +197,7 @@ func (db *store) Truncate(ctx context.Context, txn storage.Transaction, params s // For backwards compatibility, check if `RootOverwrite` was configured. if params.RootOverwrite { - newPath, ok := storage.ParsePathEscaped("/") - if !ok { - return fmt.Errorf("storage path invalid: %v", newPath) - } - return underlying.Write(storage.AddOp, newPath, mergedData) + return underlying.Write(storage.AddOp, storage.RootPath, mergedData) } for _, root := range params.BasePaths { @@ -310,12 +310,7 @@ func (db *store) Read(_ context.Context, txn storage.Transaction, path storage.P return nil, err } - v, err := underlying.Read(path) - if err != nil { - return nil, err - } - - return v, nil + return underlying.Read(path) } func (db *store) Write(_ context.Context, txn storage.Transaction, op storage.PatchOp, path storage.Path, value any) error { @@ -323,12 +318,19 @@ func (db *store) Write(_ context.Context, txn storage.Transaction, op storage.Pa if err != nil { return err } + + if db.returnASTValuesOnRead || !util.NeedsRoundTrip(value) { + // Fast path when value is nil, bool, string or json.Number. + return underlying.Write(op, path, value) + } + val := util.Reference(value) if db.roundTripOnWrite { if err := util.RoundTrip(val); err != nil { return err } } + return underlying.Write(op, path, *val) } @@ -347,10 +349,25 @@ func (h *handle) Unregister(_ context.Context, txn storage.Transaction) { } func (db *store) runOnCommitTriggers(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) { - if db.returnASTValuesOnRead && len(db.triggers) > 0 { - // FIXME: Not very performant for large data. + // While it's unlikely, the API allows one trigger to be configured to want + // data conversion, and another that doesn't. So let's handle that properly. + var wantsDataConversion bool + if db.returnASTValuesOnRead && len(event.Data) > 0 { + for _, t := range db.triggers { + if !t.SkipDataConversion { + wantsDataConversion = true + break + } + } + } - dataEvents := make([]storage.DataEvent, 0, len(event.Data)) + var converted storage.TriggerEvent + if wantsDataConversion { + converted = storage.TriggerEvent{ + Policy: event.Policy, + Data: make([]storage.DataEvent, 0, len(event.Data)), + Context: event.Context, + } for _, dataEvent := range event.Data { if astData, ok := dataEvent.Data.(ast.Value); ok { @@ -358,25 +375,21 @@ func (db *store) runOnCommitTriggers(ctx context.Context, txn storage.Transactio if err != nil { panic(err) } - dataEvents = append(dataEvents, storage.DataEvent{ + converted.Data = append(converted.Data, storage.DataEvent{ Path: dataEvent.Path, Data: jsn, Removed: dataEvent.Removed, }) - } else { - dataEvents = append(dataEvents, dataEvent) } } - - event = storage.TriggerEvent{ - Policy: event.Policy, - Data: dataEvents, - Context: event.Context, - } } for _, t := range db.triggers { - t.OnCommit(ctx, txn, event) + if wantsDataConversion && !t.SkipDataConversion { + t.OnCommit(ctx, txn, converted) + } else { + t.OnCommit(ctx, txn, event) + } } } @@ -409,22 +422,12 @@ func (db *store) underlying(txn storage.Transaction) (*transaction, error) { return underlying, nil } -const rootMustBeObjectMsg = "root must be object" -const rootCannotBeRemovedMsg = "root cannot be removed" - -func invalidPatchError(f string, a ...any) *storage.Error { - return &storage.Error{ - Code: storage.InvalidPatchErr, - Message: fmt.Sprintf(f, a...), - } -} - func mktree(path []string, value any) (map[string]any, error) { if len(path) == 0 { // For 0 length path the value is the full tree. obj, ok := value.(map[string]any) if !ok { - return nil, invalidPatchError(rootMustBeObjectMsg) + return nil, errors.RootMustBeObjectErr } return obj, nil } diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go index 28e68c20f2..e76bccd013 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go @@ -7,6 +7,7 @@ package inmem import ( "container/list" "encoding/json" + "slices" "strconv" "github.com/open-policy-agent/opa/internal/deepcopy" @@ -34,13 +35,13 @@ import ( // Read transactions do not require any special handling and simply passthrough // to the underlying store. Read transactions do not support upgrade. type transaction struct { - xid uint64 - write bool - stale bool db *store updates *list.List - policies map[string]policyUpdate context *storage.Context + policies map[string]policyUpdate + xid uint64 + write bool + stale bool } type policyUpdate struct { @@ -48,28 +49,17 @@ type policyUpdate struct { remove bool } -func newTransaction(xid uint64, write bool, context *storage.Context, db *store) *transaction { - return &transaction{ - xid: xid, - write: write, - db: db, - policies: map[string]policyUpdate{}, - updates: list.New(), - context: context, - } -} - func (txn *transaction) ID() uint64 { return txn.xid } func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value any) error { - if !txn.write { - return &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "data write during read transaction", - } + return &storage.Error{Code: storage.InvalidTransactionErr, Message: "data write during read transaction"} + } + + if txn.updates == nil { + txn.updates = list.New() } if len(path) == 0 { @@ -85,9 +75,20 @@ func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value any) if update.Path().Equal(path) { if update.Remove() { if op != storage.AddOp { - return errors.NewNotFoundError(path) + return errors.NotFoundErr } } + // If the last update has the same path and value, we have nothing to do. + if txn.db.returnASTValuesOnRead { + if astValue, ok := update.Value().(ast.Value); ok { + if equalsValue(value, astValue) { + return nil + } + } + } else if comparableEquals(update.Value(), value) { + return nil + } + txn.updates.Remove(curr) break } @@ -106,7 +107,7 @@ func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value any) // existing update is mutated. if path.HasPrefix(update.Path()) { if update.Remove() { - return errors.NewNotFoundError(path) + return errors.NotFoundErr } suffix := path[len(update.Path()):] newUpdate, err := txn.db.newUpdate(update.Value(), op, suffix, 0, value) @@ -129,33 +130,53 @@ func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value any) return nil } +func comparableEquals(a, b any) bool { + switch a := a.(type) { + case nil: + return b == nil + case bool: + if vb, ok := b.(bool); ok { + return vb == a + } + case string: + if vs, ok := b.(string); ok { + return vs == a + } + case json.Number: + if vn, ok := b.(json.Number); ok { + return vn == a + } + } + return false +} + func (txn *transaction) updateRoot(op storage.PatchOp, value any) error { if op == storage.RemoveOp { - return invalidPatchError(rootCannotBeRemovedMsg) + return errors.RootCannotBeRemovedErr } var update any if txn.db.returnASTValuesOnRead { - valueAST, err := interfaceToValue(value) + valueAST, err := ast.InterfaceToValue(value) if err != nil { return err } if _, ok := valueAST.(ast.Object); !ok { - return invalidPatchError(rootMustBeObjectMsg) + return errors.RootMustBeObjectErr } update = &updateAST{ - path: storage.Path{}, + path: storage.RootPath, remove: false, value: valueAST, } } else { if _, ok := value.(map[string]any); !ok { - return invalidPatchError(rootMustBeObjectMsg) + return errors.RootMustBeObjectErr } update = &updateRaw{ - path: storage.Path{}, + path: storage.RootPath, remove: false, value: value, } @@ -163,21 +184,36 @@ func (txn *transaction) updateRoot(op storage.PatchOp, value any) error { txn.updates.Init() txn.updates.PushFront(update) + return nil } func (txn *transaction) Commit() (result storage.TriggerEvent) { result.Context = txn.context - for curr := txn.updates.Front(); curr != nil; curr = curr.Next() { - action := curr.Value.(dataUpdate) - txn.db.data = action.Apply(txn.db.data) - result.Data = append(result.Data, storage.DataEvent{ - Path: action.Path(), - Data: action.Value(), - Removed: action.Remove(), - }) + if txn.updates != nil { + if len(txn.db.triggers) > 0 { + result.Data = slices.Grow(result.Data, txn.updates.Len()) + } + + for curr := txn.updates.Front(); curr != nil; curr = curr.Next() { + action := curr.Value.(dataUpdate) + txn.db.data = action.Apply(txn.db.data) + + if len(txn.db.triggers) > 0 { + result.Data = append(result.Data, storage.DataEvent{ + Path: action.Path(), + Data: action.Value(), + Removed: action.Remove(), + }) + } + } + } + + if len(txn.policies) > 0 && len(txn.db.triggers) > 0 { + result.Policy = slices.Grow(result.Policy, len(txn.policies)) } + for id, upd := range txn.policies { if upd.remove { delete(txn.db.policies, id) @@ -185,11 +221,13 @@ func (txn *transaction) Commit() (result storage.TriggerEvent) { txn.db.policies[id] = upd.value } - result.Policy = append(result.Policy, storage.PolicyEvent{ - ID: id, - Data: upd.value, - Removed: upd.remove, - }) + if len(txn.db.triggers) > 0 { + result.Policy = append(result.Policy, storage.PolicyEvent{ + ID: id, + Data: upd.value, + Removed: upd.remove, + }) + } } return result } @@ -218,8 +256,7 @@ func deepcpy(v any) any { } func (txn *transaction) Read(path storage.Path) (any, error) { - - if !txn.write { + if !txn.write || txn.updates == nil { return pointer(txn.db.data, path) } @@ -231,7 +268,7 @@ func (txn *transaction) Read(path storage.Path) (any, error) { if path.HasPrefix(upd.Path()) { if upd.Remove() { - return nil, errors.NewNotFoundError(path) + return nil, errors.NotFoundErr } return pointer(upd.Value(), path[len(upd.Path()):]) } @@ -260,8 +297,7 @@ func (txn *transaction) Read(path storage.Path) (any, error) { return cpy, nil } -func (txn *transaction) ListPolicies() []string { - var ids []string +func (txn *transaction) ListPolicies() (ids []string) { for id := range txn.db.policies { if _, ok := txn.policies[id]; !ok { ids = append(ids, id) @@ -276,11 +312,13 @@ func (txn *transaction) ListPolicies() []string { } func (txn *transaction) GetPolicy(id string) ([]byte, error) { - if update, ok := txn.policies[id]; ok { - if !update.remove { - return update.value, nil + if txn.policies != nil { + if update, ok := txn.policies[id]; ok { + if !update.remove { + return update.value, nil + } + return nil, errors.NewNotFoundErrorf("policy id %q", id) } - return nil, errors.NewNotFoundErrorf("policy id %q", id) } if exist, ok := txn.db.policies[id]; ok { return exist, nil @@ -289,24 +327,24 @@ func (txn *transaction) GetPolicy(id string) ([]byte, error) { } func (txn *transaction) UpsertPolicy(id string, bs []byte) error { - if !txn.write { - return &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "policy write during read transaction", - } - } - txn.policies[id] = policyUpdate{bs, false} - return nil + return txn.updatePolicy(id, policyUpdate{bs, false}) } func (txn *transaction) DeletePolicy(id string) error { + return txn.updatePolicy(id, policyUpdate{nil, true}) +} + +func (txn *transaction) updatePolicy(id string, update policyUpdate) error { if !txn.write { - return &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "policy write during read transaction", - } + return &storage.Error{Code: storage.InvalidTransactionErr, Message: "policy write during read transaction"} + } + + if txn.policies == nil { + txn.policies = map[string]policyUpdate{id: update} + } else { + txn.policies[id] = update } - txn.policies[id] = policyUpdate{nil, true} + return nil } @@ -327,13 +365,33 @@ type updateRaw struct { value any // value to add/replace at path (ignored if remove is true) } +func equalsValue(a any, v ast.Value) bool { + if a, ok := a.(ast.Value); ok { + return a.Compare(v) == 0 + } + switch a := a.(type) { + case nil: + return v == ast.NullValue + case bool: + if vb, ok := v.(ast.Boolean); ok { + return bool(vb) == a + } + case string: + if vs, ok := v.(ast.String); ok { + return string(vs) == a + } + } + + return false +} + func (db *store) newUpdate(data any, op storage.PatchOp, path storage.Path, idx int, value any) (dataUpdate, error) { if db.returnASTValuesOnRead { - astData, err := interfaceToValue(data) + astData, err := ast.InterfaceToValue(data) if err != nil { return nil, err } - astValue, err := interfaceToValue(value) + astValue, err := ast.InterfaceToValue(value) if err != nil { return nil, err } @@ -343,10 +401,9 @@ func (db *store) newUpdate(data any, op storage.PatchOp, path storage.Path, idx } func newUpdateRaw(data any, op storage.PatchOp, path storage.Path, idx int, value any) (dataUpdate, error) { - switch data.(type) { case nil, bool, json.Number, string: - return nil, errors.NewNotFoundError(path) + return nil, errors.NotFoundErr } switch data := data.(type) { @@ -364,11 +421,10 @@ func newUpdateRaw(data any, op storage.PatchOp, path storage.Path, idx int, valu } func newUpdateArray(data []any, op storage.PatchOp, path storage.Path, idx int, value any) (dataUpdate, error) { - if idx == len(path)-1 { if path[idx] == "-" || path[idx] == strconv.Itoa(len(data)) { if op != storage.AddOp { - return nil, invalidPatchError("%v: invalid patch path", path) + return nil, errors.NewInvalidPatchError("%v: invalid patch path", path) } cpy := make([]any, len(data)+1) copy(cpy, data) @@ -417,7 +473,7 @@ func newUpdateObject(data map[string]any, op storage.PatchOp, path storage.Path, switch op { case storage.ReplaceOp, storage.RemoveOp: if _, ok := data[path[idx]]; !ok { - return nil, errors.NewNotFoundError(path) + return nil, errors.NotFoundErr } } return &updateRaw{path, op == storage.RemoveOp, value}, nil @@ -427,7 +483,7 @@ func newUpdateObject(data map[string]any, op storage.PatchOp, path storage.Path, return newUpdateRaw(data, op, path, idx+1, value) } - return nil, errors.NewNotFoundError(path) + return nil, errors.NotFoundErr } func (u *updateRaw) Remove() bool { diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go b/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go index a783caae09..cb2e811dc4 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go @@ -54,6 +54,14 @@ type NonEmptyer interface { NonEmpty(context.Context, Transaction) func([]string) (bool, error) } +// Closer is an optional interface that storage implementations can implement +// to perform cleanup operations when the store is being shut down. +// If a Store implements this interface, Close will be called during +// graceful shutdown of the OPA runtime. +type Closer interface { + Close(context.Context) error +} + // TransactionParams describes a new transaction. type TransactionParams struct { @@ -210,6 +218,10 @@ func (e TriggerEvent) DataChanged() bool { // TriggerConfig contains the trigger registration configuration. type TriggerConfig struct { + // SkipDataConversion when set to true, avoids converting data passed to + // trigger functions from the store to Go types, and instead passes the + // original representation (e.g., ast.Value). + SkipDataConversion bool // OnCommit is invoked when a transaction is successfully committed. The // callback is invoked with a handle to the write transaction that diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go index d13fff50fc..a478b9f257 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go @@ -11,27 +11,31 @@ import ( "github.com/open-policy-agent/opa/v1/storage" ) -const ArrayIndexTypeMsg = "array index must be integer" -const DoesNotExistMsg = "document does not exist" -const OutOfRangeMsg = "array index out of range" +const ( + ArrayIndexTypeMsg = "array index must be integer" + DoesNotExistMsg = "document does not exist" + OutOfRangeMsg = "array index out of range" + RootMustBeObjectMsg = "root must be object" + RootCannotBeRemovedMsg = "root cannot be removed" +) -func NewNotFoundError(path storage.Path) *storage.Error { - return NewNotFoundErrorWithHint(path, DoesNotExistMsg) -} +var ( + NotFoundErr = &storage.Error{Code: storage.NotFoundErr, Message: DoesNotExistMsg} + RootMustBeObjectErr = &storage.Error{Code: storage.InvalidPatchErr, Message: RootMustBeObjectMsg} + RootCannotBeRemovedErr = &storage.Error{Code: storage.InvalidPatchErr, Message: RootCannotBeRemovedMsg} +) func NewNotFoundErrorWithHint(path storage.Path, hint string) *storage.Error { - message := path.String() + ": " + hint return &storage.Error{ Code: storage.NotFoundErr, - Message: message, + Message: path.String() + ": " + hint, } } func NewNotFoundErrorf(f string, a ...any) *storage.Error { - msg := fmt.Sprintf(f, a...) return &storage.Error{ Code: storage.NotFoundErr, - Message: msg, + Message: fmt.Sprintf(f, a...), } } @@ -41,3 +45,10 @@ func NewWriteConflictError(p storage.Path) *storage.Error { Message: p.String(), } } + +func NewInvalidPatchError(f string, a ...any) *storage.Error { + return &storage.Error{ + Code: storage.InvalidPatchErr, + Message: fmt.Sprintf(f, a...), + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go index c5e380af04..bef39ebf49 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go @@ -21,7 +21,7 @@ func Ptr(data any, path storage.Path) (any, error) { case map[string]any: var ok bool if node, ok = curr[key]; !ok { - return nil, errors.NewNotFoundError(path) + return nil, errors.NotFoundErr } case []any: pos, err := ValidateArrayIndex(curr, key, path) @@ -30,7 +30,7 @@ func Ptr(data any, path storage.Path) (any, error) { } node = curr[pos] default: - return nil, errors.NewNotFoundError(path) + return nil, errors.NotFoundErr } } @@ -38,24 +38,45 @@ func Ptr(data any, path storage.Path) (any, error) { } func ValuePtr(data ast.Value, path storage.Path) (ast.Value, error) { + var keyTerm *ast.Term + + defer func() { + if keyTerm != nil { + ast.TermPtrPool.Put(keyTerm) + } + }() + node := data for i := range path { key := path[i] switch curr := node.(type) { case ast.Object: - // This term is only created for the lookup, which is not.. ideal. - // By using the pool, we can at least avoid allocating the term itself, - // while still having to pay 1 allocation for the value. A better solution - // would be dynamically interned string terms. - keyTerm := ast.TermPtrPool.Get() - keyTerm.Value = ast.String(key) - - val := curr.Get(keyTerm) - ast.TermPtrPool.Put(keyTerm) - if val == nil { - return nil, errors.NewNotFoundError(path) + // Note(anders): + // This term is only created for the lookup, which is not great — especially + // considering the path likely was converted from a ref, where we had all + // the terms available already! Without chaging the storage API, our options + // for performant lookups are limitied to using interning or a pool. Prefer + // interning when possible, as that is zero alloc. Using the pool avoids at + // least allocating a new term for every lookup, but still requires an alloc + // for the string Value. + if ast.HasInternedValue(key) { + if val := curr.Get(ast.InternedTerm(key)); val != nil { + node = val.Value + } else { + return nil, errors.NotFoundErr + } + } else { + if keyTerm == nil { + keyTerm = ast.TermPtrPool.Get() + } + // 1 alloc + keyTerm.Value = ast.String(key) + if val := curr.Get(keyTerm); val != nil { + node = val.Value + } else { + return nil, errors.NotFoundErr + } } - node = val.Value case *ast.Array: pos, err := ValidateASTArrayIndex(curr, key, path) if err != nil { @@ -63,7 +84,7 @@ func ValuePtr(data ast.Value, path storage.Path) (ast.Value, error) { } node = curr.Elem(pos).Value default: - return nil, errors.NewNotFoundError(path) + return nil, errors.NotFoundErr } } diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/path.go b/vendor/github.com/open-policy-agent/opa/v1/storage/path.go index f774d2eeda..16bb3e42c5 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/storage/path.go +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/path.go @@ -8,40 +8,40 @@ import ( "errors" "fmt" "net/url" + "slices" "strconv" "strings" "github.com/open-policy-agent/opa/v1/ast" ) +// RootPath refers to the root document in storage. +var RootPath = Path{} + // Path refers to a document in storage. type Path []string // ParsePath returns a new path for the given str. func ParsePath(str string) (path Path, ok bool) { - if len(str) == 0 { - return nil, false - } - if str[0] != '/' { + if len(str) == 0 || str[0] != '/' { return nil, false } if len(str) == 1 { return Path{}, true } - parts := strings.Split(str[1:], "/") - return parts, true + + return strings.Split(str[1:], "/"), true } // ParsePathEscaped returns a new path for the given escaped str. func ParsePathEscaped(str string) (path Path, ok bool) { - path, ok = ParsePath(str) - if !ok { - return - } - for i := range path { - segment, err := url.PathUnescape(path[i]) - if err == nil { - path[i] = segment + if path, ok = ParsePath(str); ok { + for i := range path { + if segment, err := url.PathUnescape(path[i]); err == nil { + path[i] = segment + } else { + return nil, false + } } } return @@ -49,7 +49,6 @@ func ParsePathEscaped(str string) (path Path, ok bool) { // NewPathForRef returns a new path for the given ref. func NewPathForRef(ref ast.Ref) (path Path, err error) { - if len(ref) == 0 { return nil, errors.New("empty reference (indicates error in caller)") } @@ -85,36 +84,17 @@ func NewPathForRef(ref ast.Ref) (path Path, err error) { // is less than other, 0 if p is equal to other, or 1 if p is greater than // other. func (p Path) Compare(other Path) (cmp int) { - for i := range min(len(p), len(other)) { - if cmp := strings.Compare(p[i], other[i]); cmp != 0 { - return cmp - } - } - if len(p) < len(other) { - return -1 - } - if len(p) == len(other) { - return 0 - } - return 1 + return slices.Compare(p, other) } // Equal returns true if p is the same as other. func (p Path) Equal(other Path) bool { - return p.Compare(other) == 0 + return slices.Equal(p, other) } // HasPrefix returns true if p starts with other. func (p Path) HasPrefix(other Path) bool { - if len(other) > len(p) { - return false - } - for i := range other { - if p[i] != other[i] { - return false - } - } - return true + return len(other) <= len(p) && p[:len(other)].Equal(other) } // Ref returns a ref that represents p rooted at head. diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go index eec49f7b88..356e7b38b0 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go @@ -45,12 +45,13 @@ func builtinSum(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err // Non-integer values found, so we need to sum as floats. sum := big.NewFloat(0) + tmp := new(big.Float) err := a.Iter(func(x *ast.Term) error { n, ok := x.Value.(ast.Number) if !ok { return builtins.NewOperandElementErr(1, a, x.Value, "number") } - sum = new(big.Float).Add(sum, builtins.NumberToFloat(n)) + sum = new(big.Float).Add(sum, builtins.NumberToFloatInto(tmp, n)) return nil }) if err != nil { @@ -74,12 +75,13 @@ func builtinSum(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err } sum := big.NewFloat(0) + tmp := new(big.Float) err := a.Iter(func(x *ast.Term) error { n, ok := x.Value.(ast.Number) if !ok { return builtins.NewOperandElementErr(1, a, x.Value, "number") } - sum = new(big.Float).Add(sum, builtins.NumberToFloat(n)) + sum = new(big.Float).Add(sum, builtins.NumberToFloatInto(tmp, n)) return nil }) if err != nil { @@ -94,12 +96,13 @@ func builtinProduct(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) switch a := operands[0].Value.(type) { case *ast.Array: product := big.NewFloat(1) + tmp := new(big.Float) err := a.Iter(func(x *ast.Term) error { n, ok := x.Value.(ast.Number) if !ok { return builtins.NewOperandElementErr(1, a, x.Value, "number") } - product = new(big.Float).Mul(product, builtins.NumberToFloat(n)) + product = new(big.Float).Mul(product, builtins.NumberToFloatInto(tmp, n)) return nil }) if err != nil { @@ -108,12 +111,13 @@ func builtinProduct(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) return iter(ast.NewTerm(builtins.FloatToNumber(product))) case ast.Set: product := big.NewFloat(1) + tmp := new(big.Float) err := a.Iter(func(x *ast.Term) error { n, ok := x.Value.(ast.Number) if !ok { return builtins.NewOperandElementErr(1, a, x.Value, "number") } - product = new(big.Float).Mul(product, builtins.NumberToFloat(n)) + product = new(big.Float).Mul(product, builtins.NumberToFloatInto(tmp, n)) return nil }) if err != nil { @@ -177,7 +181,7 @@ func builtinMin(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err // The null term is considered to be less than any other term, // so in order for min of a set to make sense, we need to check // for it. - if min.Value.Compare(ast.InternedNullTerm.Value) == 0 { + if min.Value.Compare(ast.InternedNullValue) == 0 { return elem, nil } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/array.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/array.go index 526e3ed26d..ca40b7793f 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/array.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/array.go @@ -43,6 +43,42 @@ func builtinArrayConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T return iter(ast.ArrayTerm(arrC...)) } +func builtinArrayFlatten(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + arr, err := builtins.ArrayOperand(operands[0].Value, 1) + if err != nil { + return err + } + + size := arr.Len() + preAlloc := size + containsArray := false + + for i := range size { + if nested, ok := arr.Elem(i).Value.(*ast.Array); ok { + containsArray = true + preAlloc += nested.Len() - 1 + } + } + + if !containsArray && size == preAlloc { + return iter(operands[0]) // Empty array, or no nested arrays -> nothing to flatten. + } + + flattened := make([]*ast.Term, 0, preAlloc) + for i := range size { + elem := arr.Elem(i) + if nested, ok := elem.Value.(*ast.Array); ok { + for j := range nested.Len() { + flattened = append(flattened, nested.Elem(j)) + } + } else { + flattened = append(flattened, elem) + } + } + + return iter(ast.ArrayTerm(flattened...)) +} + func builtinArraySlice(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { arr, err := builtins.ArrayOperand(operands[0].Value, 1) if err != nil { @@ -59,12 +95,14 @@ func builtinArraySlice(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te return err } + l := arr.Len() + // Clamp stopIndex to avoid out-of-range errors. If negative, clamp to zero. // Otherwise, clamp to length of array. if stopIndex < 0 { stopIndex = 0 - } else if stopIndex > arr.Len() { - stopIndex = arr.Len() + } else if stopIndex > l { + stopIndex = l } // Clamp startIndex to avoid out-of-range errors. If negative, clamp to zero. @@ -75,7 +113,7 @@ func builtinArraySlice(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te startIndex = stopIndex } - if startIndex == 0 && stopIndex >= arr.Len() { + if startIndex == 0 && stopIndex >= l { return iter(operands[0]) } @@ -89,8 +127,16 @@ func builtinArrayReverse(_ BuiltinContext, operands []*ast.Term, iter func(*ast. } length := arr.Len() - reversedArr := make([]*ast.Term, length) + if length == 0 { + return iter(ast.InternedEmptyArray) + } + + if length == 1 { + return iter(operands[0]) + } + + reversedArr := make([]*ast.Term, length) for index := range length { reversedArr[index] = arr.Elem(length - index - 1) } @@ -100,6 +146,7 @@ func builtinArrayReverse(_ BuiltinContext, operands []*ast.Term, iter func(*ast. func init() { RegisterBuiltinFunc(ast.ArrayConcat.Name, builtinArrayConcat) + RegisterBuiltinFunc(ast.ArrayFlatten.Name, builtinArrayFlatten) RegisterBuiltinFunc(ast.ArraySlice.Name, builtinArraySlice) RegisterBuiltinFunc(ast.ArrayReverse.Name, builtinArrayReverse) } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go index 9dd55f1ba7..809a31676c 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go @@ -40,6 +40,14 @@ func newBindings(id uint64, instr *Instrumentation) *bindings { return &bindings{id, values, instr} } +// newBindingsWithSize creates bindings pre-sized for the expected number of entries. +// This avoids over-allocation when the binding count is known in advance (e.g., function arguments). +// For sizeHint <= maxLinearScan, it uses array mode; for larger hints, it pre-allocates a map. +func newBindingsWithSize(id uint64, instr *Instrumentation, sizeHint int) *bindings { + values := newBindingsArrayHashmapWithSize(sizeHint) + return &bindings{id, values, instr} +} + func (u *bindings) Iter(caller *bindings, iter func(*ast.Term, *ast.Term) error) error { var err error @@ -216,16 +224,25 @@ func (vis namespacingVisitor) Visit(x any) bool { switch x := x.(type) { case *ast.ArrayComprehension: x.Term = vis.namespaceTerm(x.Term) - ast.NewGenericVisitor(vis.Visit).Walk(x.Body) + vis := ast.NewGenericVisitor(vis.Visit) + for _, expr := range x.Body { + vis.Walk(expr) + } return true case *ast.SetComprehension: x.Term = vis.namespaceTerm(x.Term) - ast.NewGenericVisitor(vis.Visit).Walk(x.Body) + vis := ast.NewGenericVisitor(vis.Visit) + for _, expr := range x.Body { + vis.Walk(expr) + } return true case *ast.ObjectComprehension: x.Key = vis.namespaceTerm(x.Key) x.Value = vis.namespaceTerm(x.Value) - ast.NewGenericVisitor(vis.Visit).Walk(x.Body) + vis := ast.NewGenericVisitor(vis.Visit) + for _, expr := range x.Body { + vis.Walk(expr) + } return true case *ast.Expr: switch terms := x.Terms.(type) { @@ -291,12 +308,16 @@ func (vis namespacingVisitor) namespaceTerm(a *ast.Term) *ast.Term { const maxLinearScan = 16 -// bindingsArrayHashMap uses an array with linear scan instead +// bindingsArrayHashMap uses a dynamically growing slice with linear scan instead // of a hash map for smaller # of entries. Hash maps start to // show off their performance advantage only after 16 keys. +// +// Memory optimization: The slice grows incrementally (2 -> 4 -> 8 -> 16) to avoid +// wasting memory when only a few bindings are used. This is critical for scenarios +// like comprehensions and functions with few arguments that are called thousands of times. type bindingsArrayHashmap struct { - n int // Entries in the array. - a *[maxLinearScan]bindingArrayKeyValue + n int // Entries in the slice. + a []bindingArrayKeyValue m map[ast.Var]bindingArrayKeyValue } @@ -309,29 +330,74 @@ func newBindingsArrayHashmap() bindingsArrayHashmap { return bindingsArrayHashmap{} } +// newBindingsArrayHashmapWithSize creates a bindingsArrayHashmap pre-sized for the expected number of entries. +// This optimization reduces memory waste when the binding count is known in advance. +// +// Size selection strategy: +// - sizeHint == 0: lazy allocation (no pre-allocation) +// - sizeHint <= maxLinearScan: pre-allocate slice with exact capacity to avoid reallocation +// - sizeHint > maxLinearScan: pre-allocate map with exact capacity +// +// Memory impact example: +// - Without hint: dynamic growth 0 -> 2 -> 4 -> 8 -> 16 (saves memory for small counts) +// - With hint=2: pre-allocates slice with capacity 2 (exact fit, no waste) +// - With hint=20: pre-allocates map with capacity 20 (saves array allocation + reallocation) +func newBindingsArrayHashmapWithSize(sizeHint int) bindingsArrayHashmap { + if sizeHint <= 0 { + // For unknown sizes, use default lazy allocation with dynamic growth. + return bindingsArrayHashmap{} + } + + if sizeHint <= maxLinearScan { + // For small known sizes, pre-allocate slice with exact capacity to avoid growth overhead. + return bindingsArrayHashmap{ + a: make([]bindingArrayKeyValue, 0, sizeHint), + } + } + + // For larger sizes, pre-allocate map to avoid array allocation + transition cost. + return bindingsArrayHashmap{ + m: make(map[ast.Var]bindingArrayKeyValue, sizeHint), + } +} + func (b *bindingsArrayHashmap) Put(key *ast.Term, value value) { if b.m == nil { - if b.a == nil { - b.a = new([maxLinearScan]bindingArrayKeyValue) - } else if i := b.find(key); i >= 0 { + // Check if key already exists and update value + if i := b.find(key); i >= 0 { b.a[i].value = value return } + // Still room in slice mode (< maxLinearScan) if b.n < maxLinearScan { - b.a[b.n] = bindingArrayKeyValue{key, value} + // Grow slice if needed using exponential growth strategy + if b.n == cap(b.a) { + newCap := cap(b.a) * 2 + if newCap == 0 { + newCap = 2 // Start with 2 elements + } + if newCap > maxLinearScan { + newCap = maxLinearScan + } + newA := make([]bindingArrayKeyValue, b.n, newCap) + copy(newA, b.a) + b.a = newA + } + b.a = append(b.a, bindingArrayKeyValue{key, value}) b.n++ return } - // Array is full, revert to using the hash map instead. - + // Slice is full (reached maxLinearScan), transition to map mode. b.m = make(map[ast.Var]bindingArrayKeyValue, maxLinearScan+1) - for _, kv := range *b.a { + for _, kv := range b.a { b.m[kv.key.Value.(ast.Var)] = bindingArrayKeyValue{kv.key, kv.value} } b.m[key.Value.(ast.Var)] = bindingArrayKeyValue{key, value} + // Clear slice to allow GC + b.a = nil b.n = 0 return } @@ -363,7 +429,8 @@ func (b *bindingsArrayHashmap) Delete(key *ast.Term) { if i < n { b.a[i] = b.a[n] } - + // Shrink slice to reflect deletion + b.a = b.a[:n] b.n = n } return @@ -374,9 +441,11 @@ func (b *bindingsArrayHashmap) Delete(key *ast.Term) { func (b *bindingsArrayHashmap) Iter(f func(k *ast.Term, v value) bool) { if b.m == nil { - for i := range b.n { - if f(b.a[i].key, b.a[i].value) { - return + if b.a != nil { + for i := range b.n { + if f(b.a[i].key, b.a[i].value) { + return + } } } return @@ -390,6 +459,9 @@ func (b *bindingsArrayHashmap) Iter(f func(k *ast.Term, v value) bool) { } func (b *bindingsArrayHashmap) find(key *ast.Term) int { + if b.a == nil || b.n == 0 { + return -1 + } v := key.Value.(ast.Var) for i := range b.n { if b.a[i].key.Value.(ast.Var) == v { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go index 7a1bdede6b..c81b772484 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go @@ -209,8 +209,7 @@ func SetOperand(x ast.Value, pos int) (ast.Set, error) { return s, nil } -// StringOperand converts x to a string. If the cast fails, a descriptive error is -// returned. +// StringOperand returns x as [ast.String], or a descriptive error if the conversion fails. func StringOperand(x ast.Value, pos int) (ast.String, error) { s, ok := x.(ast.String) if !ok { @@ -219,6 +218,17 @@ func StringOperand(x ast.Value, pos int) (ast.String, error) { return s, nil } +// StringOperandByteSlice returns x a []byte, assuming x is [ast.String], or a descriptive error +// if that is not the case. The returned byte slice points directly at the underlying array backing +// the string, and should not be modified. +func StringOperandByteSlice(x ast.Value, pos int) ([]byte, error) { + s, err := StringOperand(x, pos) + if err != nil { + return nil, err + } + return util.StringToByteSlice(string(s)), nil +} + // ObjectOperand converts x to an object. If the cast fails, a descriptive // error is returned. func ObjectOperand(x ast.Value, pos int) (ast.Object, error) { @@ -241,11 +251,18 @@ func ArrayOperand(x ast.Value, pos int) (*ast.Array, error) { // NumberToFloat converts n to a big float. func NumberToFloat(n ast.Number) *big.Float { - r, ok := new(big.Float).SetString(string(n)) - if !ok { + return NumberToFloatInto(nil, n) +} + +// NumberToFloatInto converts n to a big float, storing it in dst when provided. +func NumberToFloatInto(dst *big.Float, n ast.Number) *big.Float { + if dst == nil { + dst = new(big.Float) + } + if _, ok := dst.SetString(string(n)); !ok { panic("illegal value") } - return r + return dst } // FloatToNumber converts f to a number. diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go index d514bed787..1c2eacc99e 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go @@ -32,7 +32,7 @@ func getDefaultInterQueryBuiltinValueCacheConfig(name string) *NamedValueCacheCo // RegisterDefaultInterQueryBuiltinValueCacheConfig registers a default configuration for the inter-query value cache; // used when none has been explicitly configured. -// To disable a named cache when not configured, pass a nil config. +// To disable a named cache when not configured, pass a config with the disabled value set to true. func RegisterDefaultInterQueryBuiltinValueCacheConfig(name string, config *NamedValueCacheConfig) { interQueryBuiltinValueCacheDefaultConfigs[name] = config } @@ -58,7 +58,8 @@ func (c *Config) Clone() *Config { // NamedValueCacheConfig represents the configuration of a named cache that built-in functions can utilize. // A default configuration to be used if not explicitly configured can be registered using RegisterDefaultInterQueryBuiltinValueCacheConfig. type NamedValueCacheConfig struct { - MaxNumEntries *int `json:"max_num_entries,omitempty"` + MaxNumEntries *int `json:"max_num_entries,omitempty"` + Disabled *bool `json:"disabled,omitempty"` } // Clone creates a deep copy of NamedValueCacheConfig. @@ -73,6 +74,10 @@ func (n *NamedValueCacheConfig) Clone() *NamedValueCacheConfig { maxEntries := *n.MaxNumEntries clone.MaxNumEntries = &maxEntries } + if n.Disabled != nil { + disabled := *n.Disabled + clone.Disabled = &disabled + } return clone } @@ -220,9 +225,15 @@ func (c *Config) validateAndInjectDefaults() error { } for name, namedConfig := range c.InterQueryBuiltinValueCache.NamedCacheConfigs { - numEntries := *namedConfig.MaxNumEntries - if numEntries < 0 { - return fmt.Errorf("invalid max_num_entries %v for named cache %v", numEntries, name) + if namedConfig == nil || (namedConfig.MaxNumEntries == nil && namedConfig.Disabled == nil) { + return fmt.Errorf("missing configuration for named cache %v", name) + } + + if namedConfig.MaxNumEntries != nil { + numEntries := *namedConfig.MaxNumEntries + if numEntries < 0 { + return fmt.Errorf("invalid max_num_entries %v for named cache %v", numEntries, name) + } } } @@ -605,6 +616,10 @@ func (c *interQueryBuiltinValueCache) GetCache(name string) InterQueryValueCache return nil } + if config.Disabled != nil && *config.Disabled { + return nil + } + nc = &interQueryValueCacheBucket{ items: *newItemsMap(), config: config, diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go index 12a4414963..e6e2bb3ae7 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go @@ -300,7 +300,7 @@ func builtinNetCIDRMerge(_ BuiltinContext, operands []*ast.Term, iter func(*ast. merged := evalNetCIDRMerge(networks) - result := ast.NewSet() + result := ast.NewSetWithCapacity(len(merged)) for _, network := range merged { result.Add(ast.StringTerm(network.String())) } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go index 7767e7ff52..607855632d 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go @@ -9,6 +9,7 @@ import ( "sort" "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" ) // CopyPropagator implements a simple copy propagation optimization to remove @@ -49,17 +50,7 @@ func (l *localVarGenerator) Generate() ast.Var { // New returns a new CopyPropagator that optimizes queries while preserving vars // in the livevars set. func New(livevars ast.VarSet) *CopyPropagator { - - sorted := make([]ast.Var, 0, len(livevars)) - for v := range livevars { - sorted = append(sorted, v) - } - - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].Compare(sorted[j]) < 0 - }) - - return &CopyPropagator{livevars: livevars, sorted: sorted, localvargen: &localVarGenerator{}} + return &CopyPropagator{livevars: livevars, sorted: util.KeysSorted(livevars), localvargen: &localVarGenerator{}} } // WithEnsureNonEmptyBody configures p to ensure that results are always non-empty. @@ -344,7 +335,7 @@ func (p *CopyPropagator) livevarRef(a *ast.Term) bool { } for _, v := range p.sorted { - if ref[0].Value.Compare(v) == 0 { + if v.Equal(ref[0].Value) { return true } } @@ -403,7 +394,7 @@ func containedIn(value ast.Value, x any) bool { if v, ok := value.(ast.Ref); ok { match = x.HasPrefix(v) } else { - match = x.Compare(value) == 0 + match = x.Equal(value) } if stop || match { stop = true diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go index 144c01ee95..f4ca23fae5 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go @@ -255,17 +255,17 @@ func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err er } func builtinCryptoX509ParseKeyPair(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - certificate, err := builtins.StringOperand(operands[0].Value, 1) + certificate, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - key, err := builtins.StringOperand(operands[1].Value, 1) + key, err := builtins.StringOperandByteSlice(operands[1].Value, 1) if err != nil { return err } - certs, err := getTLSx509KeyPairFromString([]byte(certificate), []byte(key)) + certs, err := getTLSx509KeyPairFromString(certificate, key) if err != nil { return err } @@ -326,10 +326,7 @@ func builtinCryptoX509ParseCertificateRequest(_ BuiltinContext, operands []*ast. } func builtinCryptoJWKFromPrivateKey(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - var x any - - a := operands[0].Value - input, err := builtins.StringOperand(a, 1) + input, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return err } @@ -371,6 +368,7 @@ func builtinCryptoJWKFromPrivateKey(_ BuiltinContext, operands []*ast.Term, iter return err } + var x any if err := util.UnmarshalJSON(jsonKey, &x); err != nil { return err } @@ -430,53 +428,51 @@ func toHexEncodedString(src []byte) string { } func builtinCryptoMd5(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - md5sum := md5.Sum([]byte(s)) + md5sum := md5.Sum(bs) return iter(ast.StringTerm(toHexEncodedString(md5sum[:]))) } func builtinCryptoSha1(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - sha1sum := sha1.Sum([]byte(s)) + sha1sum := sha1.Sum(bs) return iter(ast.StringTerm(toHexEncodedString(sha1sum[:]))) } func builtinCryptoSha256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - sha256sum := sha256.Sum256([]byte(s)) + sha256sum := sha256.Sum256(bs) return iter(ast.StringTerm(toHexEncodedString(sha256sum[:]))) } func hmacHelper(operands []*ast.Term, iter func(*ast.Term) error, h func() hash.Hash) error { - a1 := operands[0].Value - message, err := builtins.StringOperand(a1, 1) + message, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - a2 := operands[1].Value - key, err := builtins.StringOperand(a2, 2) + key, err := builtins.StringOperandByteSlice(operands[1].Value, 2) if err != nil { return err } - mac := hmac.New(h, []byte(key)) - mac.Write([]byte(message)) + mac := hmac.New(h, key) + mac.Write(message) messageDigest := mac.Sum(nil) return iter(ast.StringTerm(hex.EncodeToString(messageDigest))) @@ -499,21 +495,17 @@ func builtinCryptoHmacSha512(_ BuiltinContext, operands []*ast.Term, iter func(* } func builtinCryptoHmacEqual(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - a1 := operands[0].Value - mac1, err := builtins.StringOperand(a1, 1) + mac1, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - a2 := operands[1].Value - mac2, err := builtins.StringOperand(a2, 2) + mac2, err := builtins.StringOperandByteSlice(operands[1].Value, 2) if err != nil { return err } - res := hmac.Equal([]byte(mac1), []byte(mac2)) - - return iter(ast.InternedTerm(res)) + return iter(ast.InternedTerm(hmac.Equal(mac1, mac2))) } func init() { @@ -668,7 +660,7 @@ func addCACertsFromFile(pool *x509.CertPool, filePath string) (*x509.CertPool, e pool = x509.NewCertPool() } - caCert, err := readCertFromFile(filePath) + caCert, err := os.ReadFile(filePath) if err != nil { return nil, err } @@ -703,17 +695,7 @@ func addCACertsFromEnv(pool *x509.CertPool, envName string) (*x509.CertPool, err return nil, fmt.Errorf("could not add CA certificates from envvar %q: %w", envName, err) } - return pool, err -} - -// ReadCertFromFile reads a cert from file -func readCertFromFile(localCertFile string) ([]byte, error) { - // Read in the cert file - certPEM, err := os.ReadFile(localCertFile) - if err != nil { - return nil, err - } - return certPEM, nil + return pool, nil } var beginPrefix = []byte("-----BEGIN ") @@ -771,13 +753,3 @@ func getTLSx509KeyPairFromString(certPemBlock []byte, keyPemBlock []byte) (*tls. return &cert, nil } - -// ReadKeyFromFile reads a key from file -func readKeyFromFile(localKeyFile string) ([]byte, error) { - // Read in the cert file - key, err := os.ReadFile(localKeyFile) - if err != nil { - return nil, err - } - return key, nil -} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go index 541b50d0a9..5ed8df68f3 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go @@ -5,7 +5,6 @@ package topdown import ( - "bytes" "encoding/base64" "encoding/hex" "encoding/json" @@ -21,7 +20,6 @@ import ( ) func builtinJSONMarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - asJSON, err := ast.JSON(operands[0].Value) if err != nil { return err @@ -32,11 +30,10 @@ func builtinJSONMarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T return err } - return iter(ast.StringTerm(string(bs))) + return iter(ast.StringTerm(util.ByteSliceToString(bs))) } func builtinJSONMarshalWithOpts(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - asJSON, err := ast.JSON(operands[0].Value) if err != nil { return err @@ -101,36 +98,34 @@ func builtinJSONMarshalWithOpts(_ BuiltinContext, operands []*ast.Term, iter fun } var bs []byte - if shouldPrettyPrint { bs, err = json.MarshalIndent(asJSON, prefixWith, indentWith) } else { bs, err = json.Marshal(asJSON) } - if err != nil { return err } + s := util.ByteSliceToString(bs) + if shouldPrettyPrint { // json.MarshalIndent() function will not prefix the first line of emitted JSON - return iter(ast.StringTerm(prefixWith + string(bs))) + return iter(ast.StringTerm(prefixWith + s)) } - return iter(ast.StringTerm(string(bs))) + return iter(ast.StringTerm(s)) } func builtinJSONUnmarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } var x any - - if err := util.UnmarshalJSON([]byte(str), &x); err != nil { + if err := util.UnmarshalJSON(bs, &x); err != nil { return err } v, err := ast.InterfaceToValue(x) @@ -141,22 +136,21 @@ func builtinJSONUnmarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast } func builtinJSONIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return iter(ast.InternedTerm(false)) } - return iter(ast.InternedTerm(json.Valid([]byte(str)))) + return iter(ast.InternedTerm(json.Valid(bs))) } func builtinBase64Encode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - return iter(ast.StringTerm(base64.StdEncoding.EncodeToString([]byte(str)))) + return iter(ast.StringTerm(base64.StdEncoding.EncodeToString(bs))) } func builtinBase64Decode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -183,20 +177,20 @@ func builtinBase64IsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast } func builtinBase64UrlEncode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - return iter(ast.StringTerm(base64.URLEncoding.EncodeToString([]byte(str)))) + return iter(ast.StringTerm(base64.URLEncoding.EncodeToString(bs))) } func builtinBase64UrlEncodeNoPad(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - return iter(ast.StringTerm(base64.RawURLEncoding.EncodeToString([]byte(str)))) + return iter(ast.StringTerm(base64.RawURLEncoding.EncodeToString(bs))) } func builtinBase64UrlDecode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -293,7 +287,7 @@ func builtinURLQueryDecodeObject(_ BuiltinContext, operands []*ast.Term, iter fu return err } - queryObject := ast.NewObject() + queryObject := ast.NewObjectWithCapacity(len(queryParams)) for k, v := range queryParams { paramsArray := make([]*ast.Term, len(v)) for i, param := range v { @@ -306,45 +300,39 @@ func builtinURLQueryDecodeObject(_ BuiltinContext, operands []*ast.Term, iter fu } func builtinYAMLMarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - asJSON, err := ast.JSON(operands[0].Value) if err != nil { return err } - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - if err := encoder.Encode(asJSON); err != nil { - return err - } - - bs, err := yaml.JSONToYAML(buf.Bytes()) + bs, err := yaml.Marshal(asJSON) if err != nil { return err } - return iter(ast.StringTerm(string(bs))) + return iter(ast.StringTerm(util.ByteSliceToString(bs))) } func builtinYAMLUnmarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - bs, err := yaml.YAMLToJSON([]byte(str)) + js, err := yaml.YAMLToJSON(bs) if err != nil { return err } - buf := bytes.NewBuffer(bs) - decoder := util.NewJSONDecoder(buf) + reader := ast.BytesReaderPool.Get() + defer ast.BytesReaderPool.Put(reader) + reader.Reset(js) + var val any - err = decoder.Decode(&val) - if err != nil { + if err = util.NewJSONDecoder(reader).Decode(&val); err != nil { return err } + v, err := ast.InterfaceToValue(val) if err != nil { return err @@ -353,22 +341,22 @@ func builtinYAMLUnmarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast } func builtinYAMLIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return iter(ast.InternedTerm(false)) } var x any - err = yaml.Unmarshal([]byte(str), &x) + err = yaml.Unmarshal(bs, &x) return iter(ast.InternedTerm(err == nil)) } func builtinHexEncode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - return iter(ast.StringTerm(hex.EncodeToString([]byte(str)))) + return iter(ast.StringTerm(hex.EncodeToString(bs))) } func builtinHexDecode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go index cadd163198..e80339e312 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go @@ -6,9 +6,9 @@ package topdown import ( "errors" - "fmt" "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" ) // Halt is a special error type that built-in function implementations return to indicate @@ -82,13 +82,27 @@ func (e *Error) Is(target error) bool { } func (e *Error) Error() string { - msg := fmt.Sprintf("%v: %v", e.Code, e.Message) + buf, _ := e.AppendText(make([]byte, 0, e.StringLength())) + return util.ByteSliceToString(buf) +} +func (e *Error) AppendText(buf []byte) ([]byte, error) { if e.Location != nil { - msg = e.Location.String() + ": " + msg + buf, _ := e.Location.AppendText(buf) + buf = append(append(buf, ": "...), e.Code...) + buf = append(append(buf, ": "...), e.Message...) + return buf, nil } - return msg + return append(append(append(buf, e.Code...), ": "...), e.Message...), nil +} + +func (e *Error) StringLength() int { + l := len(e.Code) + 2 + len(e.Message) + if e.Location != nil { + l += e.Location.StringLength() + 2 + } + return l } func (e *Error) Wrap(err error) *Error { @@ -124,11 +138,11 @@ func objectDocKeyConflictErr(loc *ast.Location) error { } } -func unsupportedBuiltinErr(loc *ast.Location) error { +func unsupportedBuiltinErr(loc *ast.Location, name string) error { return &Error{ Code: InternalErr, Location: loc, - Message: "unsupported built-in", + Message: "unsupported built-in: " + name, } } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go index f0f301e6a7..6f93ba530e 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go @@ -32,6 +32,13 @@ type queryIDFactory struct { curr uint64 } +type biunifyArraysRecParams struct { + a, b *ast.Array + b1, b2 *bindings + iter unifyIterator + idx int +} + // Note: The first call to Next() returns 0. func (f *queryIDFactory) Next() uint64 { curr := f.curr @@ -106,6 +113,7 @@ type eval struct { tracers []QueryTracer tracingOpts tracing.Options queryID uint64 + timeStart int64 index int genvarid int indexing bool @@ -118,26 +126,54 @@ type eval struct { defined bool } -type evp struct { - pool sync.Pool +type ( + evfp struct{ pool sync.Pool } + evbp struct{ pool sync.Pool } +) + +func (ep *evfp) Put(e *evalFunc) { + if e != nil { + e.e, e.terms, e.ir = nil, nil, nil + ep.pool.Put(e) + } } -func (ep *evp) Put(e *eval) { - ep.pool.Put(e) +func (ep *evfp) Get() *evalFunc { + return ep.pool.Get().(*evalFunc) } -func (ep *evp) Get() *eval { - return ep.pool.Get().(*eval) +func (ep *evbp) Put(e *evalBuiltin) { + if e != nil { + e.e, e.bi, e.bctx, e.f, e.terms = nil, nil, nil, nil, nil + ep.pool.Put(e) + } } -var evalPool = evp{ - pool: sync.Pool{ - New: func() any { - return &eval{} - }, - }, +func (ep *evbp) Get() *evalBuiltin { + return ep.pool.Get().(*evalBuiltin) } +var ( + evalPool = util.NewSyncPool[eval]() + deecPool = util.NewSyncPool[deferredEarlyExitContainer]() + resolverPool = util.NewSyncPool[evalResolver]() + arraysRecPool = util.NewSyncPool[biunifyArraysRecParams]() + evalFuncPool = &evfp{ + pool: sync.Pool{ + New: func() any { + return &evalFunc{} + }, + }, + } + evalBuiltinPool = &evbp{ + pool: sync.Pool{ + New: func() any { + return &evalBuiltin{} + }, + }, + } +) + func (e *eval) Run(iter evalIterator) error { if !e.traceEnabled { // avoid function literal escaping to heap if we don't need the trace @@ -171,16 +207,17 @@ func (e *eval) string(s *strings.Builder) { func (e *eval) builtinFunc(name string) (*ast.Builtin, BuiltinFunc, bool) { decl, ok := ast.BuiltinMap[name] if ok { - f, ok := builtinFunctions[name] - if ok { + if f, ok := builtinFunctions[name]; ok { return decl, f, true } - } else { - bi, ok := e.builtins[name] - if ok { - return bi.Decl, bi.Func, true + if bi, ok := e.builtins[name]; ok { + return decl, bi.Func, true } } + if bi, ok := e.builtins[name]; ok { + return bi.Decl, bi.Func, true + } + return nil, nil, false } @@ -193,12 +230,14 @@ func (e *eval) closure(query ast.Body, cpy *eval) { cpy.findOne = false } -func (e *eval) child(query ast.Body, cpy *eval) { +// childWithBindingSizeHint creates a child evaluator with bindings pre-sized for the expected number of variables. +// This reduces memory waste when evaluating functions or rules with known argument counts. +func (e *eval) childWithBindingSizeHint(query ast.Body, cpy *eval, sizeHint int) { *cpy = *e cpy.index = 0 cpy.query = query cpy.queryID = cpy.queryIDFact.Next() - cpy.bindings = newBindings(cpy.queryID, e.instr) + cpy.bindings = newBindingsWithSize(cpy.queryID, e.instr, sizeHint) cpy.parent = e cpy.findOne = false } @@ -279,7 +318,6 @@ func (e *eval) traceUnify(a, b *ast.Term) { } func (e *eval) traceEvent(op Op, x ast.Node, msg string, target *ast.Ref) { - if !e.traceEnabled { return } @@ -399,9 +437,11 @@ func (e *eval) evalExpr(iter evalIterator) error { } return nil } - expr := e.query[e.index] - e.traceEval(expr) + expr := e.query[e.index] + if e.traceEnabled { + e.traceEval(expr) + } if len(expr.With) > 0 { return e.evalWith(iter) @@ -519,7 +559,7 @@ func (e *eval) evalStep(iter evalIterator) error { // generateVar inlined here to avoid extra allocations in hot path rterm := ast.VarTerm(e.fmtVarTerm()) err = e.unify(terms, rterm, func() error { - if e.saveSet.Contains(rterm, e.bindings) { + if e.saveSet != nil && e.saveSet.Contains(rterm, e.bindings) { return e.saveExpr(ast.NewExpr(rterm), e.bindings, func() error { return iter(e) }) @@ -886,13 +926,12 @@ func (e *eval) evalNotPartialSupport(negationID uint64, expr *ast.Expr, unknowns } func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { - ref := terms[0].Value.(ast.Ref) mock, mocked := e.functionMocks.Get(ref) if mocked { if m, ok := mock.Value.(ast.Ref); ok && isFunction(e.compiler.TypeEnv, m) { // builtin or data function - mockCall := append([]*ast.Term{ast.NewTerm(m)}, terms[1:]...) + mockCall := append([]*ast.Term{mock}, terms[1:]...) e.functionMocks.Push() err := e.evalCall(mockCall, func() error { @@ -910,8 +949,8 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { if ref[0].Equal(ast.DefaultRootDocument) { if mocked { - f := e.compiler.TypeEnv.Get(ref).(*types.Function) - return e.evalCallValue(f.Arity(), terms, mock, iter) + arity := e.compiler.TypeEnv.GetByRef(ref).(*types.Function).Arity() + return e.evalCallValue(arity, terms, mock, iter) } var ir *ast.IndexResult @@ -926,18 +965,20 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { return err } - eval := evalFunc{ - e: e, - terms: terms, - ir: ir, - } + eval := evalFuncPool.Get() + defer evalFuncPool.Put(eval) + + eval.e = e + eval.terms = terms + eval.ir = ir + return eval.eval(iter) } builtinName := ref.String() bi, f, ok := e.builtinFunc(builtinName) if !ok { - return unsupportedBuiltinErr(e.query[e.index].Location) + return unsupportedBuiltinErr(e.query[e.index].Location, builtinName) } if mocked { // value replacement of built-in call @@ -951,7 +992,7 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { var bctx *BuiltinContext // Creating a BuiltinContext is expensive, so only do it if the builtin depends on it. - if bi.NeedsBuiltInContext() { + if !bi.CanSkipBctx { var parentID uint64 if e.parent != nil { parentID = e.parent.queryID @@ -962,6 +1003,10 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { capabilities = e.compiler.Capabilities() } + if e.time == nil { + e.time = ast.NumberTerm(int64ToJSONNumber(e.timeStart)) + } + bctx = &BuiltinContext{ Context: e.ctx, Metrics: e.metrics, @@ -985,13 +1030,14 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { } } - eval := evalBuiltin{ - e: e, - bi: bi, - bctx: bctx, - f: f, - terms: terms[1:], - } + eval := evalBuiltinPool.Get() + defer evalBuiltinPool.Put(eval) + + eval.e = e + eval.bi = bi + eval.bctx = bctx + eval.f = f + eval.terms = terms[1:] return eval.eval(iter) } @@ -1048,7 +1094,14 @@ func (e *eval) biunify(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) err case ast.Var, ast.Ref, *ast.ArrayComprehension: return e.biunifyValues(a, b, b1, b2, iter) case *ast.Array: - return e.biunifyArrays(vA, vB, b1, b2, iter) + if vA.Len() == vB.Len() { + params := arraysRecPool.Get() + params.a, params.b = vA, vB + params.b1, params.b2 = b1, b2 + params.iter = iter + params.idx = 0 + return e.biunifyArraysRec(params) + } } case ast.Object: switch vB := b.Value.(type) { @@ -1063,19 +1116,15 @@ func (e *eval) biunify(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) err return nil } -func (e *eval) biunifyArrays(a, b *ast.Array, b1, b2 *bindings, iter unifyIterator) error { - if a.Len() != b.Len() { - return nil - } - return e.biunifyArraysRec(a, b, b1, b2, iter, 0) -} - -func (e *eval) biunifyArraysRec(a, b *ast.Array, b1, b2 *bindings, iter unifyIterator, idx int) error { - if idx == a.Len() { - return iter() +func (e *eval) biunifyArraysRec(params *biunifyArraysRecParams) error { + if params.idx == params.a.Len() { + err := params.iter() + arraysRecPool.Put(params) + return err } - return e.biunify(a.Elem(idx), b.Elem(idx), b1, b2, func() error { - return e.biunifyArraysRec(a, b, b1, b2, iter, idx+1) + return e.biunify(params.a.Elem(params.idx), params.b.Elem(params.idx), params.b1, params.b2, func() error { + params.idx++ + return e.biunifyArraysRec(params) }) } @@ -1211,7 +1260,6 @@ func (e *eval) biunifyValues(a, b *ast.Term, b1, b2 *bindings, iter unifyIterato } func (e *eval) biunifyRef(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error { - ref := a.Value.(ast.Ref) if ref[0].Equal(ast.DefaultRootDocument) { @@ -1335,7 +1383,7 @@ func (e *eval) buildComprehensionCacheArray(x *ast.ArrayComprehension, keys []*a child := evalPool.Get() defer evalPool.Put(child) - e.child(x.Body, child) + e.childWithBindingSizeHint(x.Body, child, ast.EstimateBodyBindingCount(x.Body)) node := newComprehensionCacheElem() return node, child.Run(func(child *eval) error { values := make([]*ast.Term, len(keys)) @@ -1357,7 +1405,7 @@ func (e *eval) buildComprehensionCacheSet(x *ast.SetComprehension, keys []*ast.T child := evalPool.Get() defer evalPool.Put(child) - e.child(x.Body, child) + e.childWithBindingSizeHint(x.Body, child, ast.EstimateBodyBindingCount(x.Body)) node := newComprehensionCacheElem() return node, child.Run(func(child *eval) error { values := make([]*ast.Term, len(keys)) @@ -1380,7 +1428,7 @@ func (e *eval) buildComprehensionCacheObject(x *ast.ObjectComprehension, keys [] child := evalPool.Get() defer evalPool.Put(child) - e.child(x.Body, child) + e.childWithBindingSizeHint(x.Body, child, ast.EstimateBodyBindingCount(x.Body)) node := newComprehensionCacheElem() return node, child.Run(func(child *eval) error { values := make([]*ast.Term, len(keys)) @@ -1460,36 +1508,50 @@ func (e *eval) amendComprehension(a *ast.Term, b1 *bindings) (*ast.Term, error) } func (e *eval) biunifyComprehensionArray(x *ast.ArrayComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error { - result := ast.NewArray() + var elements []*ast.Term child := evalPool.Get() e.closure(x.Body, child) defer evalPool.Put(child) err := child.Run(func(child *eval) error { - result = result.Append(child.bindings.Plug(x.Term)) + elements = append(elements, child.bindings.Plug(x.Term)) return nil }) if err != nil { return err } - return e.biunify(ast.NewTerm(result), b, b1, b2, iter) + + if len(elements) == 0 { + return e.biunify(ast.InternedEmptyArray, b, b1, b2, iter) + } + + return e.biunify(ast.NewTerm(ast.NewArray(elements...)), b, b1, b2, iter) } func (e *eval) biunifyComprehensionSet(x *ast.SetComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error { - result := ast.NewSet() child := evalPool.Get() e.closure(x.Body, child) defer evalPool.Put(child) + var result ast.Set err := child.Run(func(child *eval) error { - result.Add(child.bindings.Plug(x.Term)) + if result == nil { + result = ast.NewSet(child.bindings.Plug(x.Term)) + } else { + result.Add(child.bindings.Plug(x.Term)) + } return nil }) if err != nil { return err } + + if result == nil { + return e.biunify(ast.InternedEmptySet, b, b1, b2, iter) + } + return e.biunify(ast.NewTerm(result), b, b1, b2, iter) } @@ -1499,21 +1561,28 @@ func (e *eval) biunifyComprehensionObject(x *ast.ObjectComprehension, b *ast.Ter e.closure(x.Body, child) - result := ast.NewObject() - + var result ast.Object err := child.Run(func(child *eval) error { key := child.bindings.Plug(x.Key) value := child.bindings.Plug(x.Value) - exist := result.Get(key) - if exist != nil && !exist.Equal(value) { - return objectDocKeyConflictErr(x.Key.Location) + if result == nil { + result = ast.NewObject(ast.Item(key, value)) + } else { + if exist := result.Get(key); exist != nil && !exist.Equal(value) { + return objectDocKeyConflictErr(x.Key.Location) + } + result.Insert(key, value) } - result.Insert(key, value) return nil }) if err != nil { return err } + + if result == nil { + return e.biunify(ast.InternedEmptyObject, b, b1, b2, iter) + } + return e.biunify(ast.NewTerm(result), b, b1, b2, iter) } @@ -1632,12 +1701,12 @@ func (e *eval) getRules(ref ast.Ref, args []*ast.Term) (*ast.IndexResult, error) e.instr.startTimer(evalOpRuleIndex) defer e.instr.stopTimer(evalOpRuleIndex) - index := e.compiler.RuleIndex(ref) + index := e.ruleIndex(ref) if index == nil { return nil, nil } - resolver := resolverPool.Get().(*evalResolver) + resolver := resolverPool.Get() defer func() { resolver.e = nil resolver.args = nil @@ -1646,12 +1715,11 @@ func (e *eval) getRules(ref ast.Ref, args []*ast.Term) (*ast.IndexResult, error) var result *ast.IndexResult var err error + resolver.e = e if e.indexing { - resolver.e = e resolver.args = args result, err = index.Lookup(resolver) } else { - resolver.e = e result, err = index.AllRules(resolver) } if err != nil { @@ -1684,6 +1752,11 @@ func (e *eval) getRules(ref ast.Ref, args []*ast.Term) (*ast.IndexResult, error) return result, err } +// ruleIndex performs a lookup for a RuleIndex in the compiler's RuleTree. +func (e *eval) ruleIndex(ref ast.Ref) ast.RuleIndex { + return e.compiler.RuleIndex(ref) +} + func (e *eval) Resolve(ref ast.Ref) (ast.Value, error) { return (&evalResolver{e: e}).Resolve(ref) } @@ -1693,14 +1766,6 @@ type evalResolver struct { args []*ast.Term } -var ( - resolverPool = sync.Pool{ - New: func() any { - return &evalResolver{} - }, - } -) - func (e *evalResolver) Resolve(ref ast.Ref) (ast.Value, error) { e.e.instr.startTimer(evalOpResolve) @@ -2047,8 +2112,7 @@ type evalFunc struct { terms []*ast.Term } -func (e evalFunc) eval(iter unifyIterator) error { - +func (e *evalFunc) eval(iter unifyIterator) error { if e.ir.Empty() { return nil } @@ -2060,13 +2124,13 @@ func (e evalFunc) eval(iter unifyIterator) error { argCount = len(e.ir.Default.Head.Args) } - if len(e.ir.Else) > 0 && e.e.unknown(e.e.query[e.e.index], e.e.bindings) { - // Partial evaluation of ordered rules is not supported currently. Save the - // expression and continue. This could be revisited in the future. - return e.e.saveCall(argCount, e.terms, iter) - } - if e.e.partial() { + if len(e.ir.Else) > 0 && e.e.unknown(e.e.query[e.e.index], e.e.bindings) { + // Partial evaluation of ordered rules is not supported currently. Save the + // expression and continue. This could be revisited in the future. + return e.e.saveCall(argCount, e.terms, iter) + } + var mustGenerateSupport bool if defRule := e.ir.Default; defRule != nil { @@ -2104,7 +2168,7 @@ func (e evalFunc) eval(iter unifyIterator) error { return e.evalValue(iter, argCount, e.ir.EarlyExit) } -func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) error { +func (e *evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) error { var cacheKey ast.Ref if !e.e.partial() { var hit bool @@ -2189,7 +2253,7 @@ func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) erro }) } -func (e evalFunc) evalCache(argCount int, iter unifyIterator) (ast.Ref, bool, error) { +func (e *evalFunc) evalCache(argCount int, iter unifyIterator) (ast.Ref, bool, error) { plen := len(e.terms) if plen == argCount+2 { // func name + output = 2 plen -= 1 @@ -2221,11 +2285,15 @@ func (e evalFunc) evalCache(argCount int, iter unifyIterator) (ast.Ref, bool, er return cacheKey, false, nil } -func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, args []*ast.Term, cacheKey ast.Ref, prev *ast.Term, findOne bool) (*ast.Term, error) { +func (e *evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, args []*ast.Term, cacheKey ast.Ref, prev *ast.Term, findOne bool) (*ast.Term, error) { child := evalPool.Get() defer evalPool.Put(child) - e.e.child(rule.Body, child) + // Optimization: pre-size bindings based on function argument count to reduce memory waste. + // Function argument count is known at compile time and most functions have < 10 arguments. + // This avoids allocating the default 16-slot array when only 2-3 bindings are needed. + sizeHint := len(args) + e.e.childWithBindingSizeHint(rule.Body, child, sizeHint) child.findOne = findOne var result *ast.Term @@ -2283,7 +2351,7 @@ func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, args []*ast.Te return result, err } -func (e evalFunc) partialEvalSupport(declArgsLen int, iter unifyIterator) error { +func (e *evalFunc) partialEvalSupport(declArgsLen int, iter unifyIterator) error { path := e.e.namespaceRef(e.terms[0].Value.(ast.Ref)) if !e.e.saveSupport.Exists(path) { @@ -2311,11 +2379,11 @@ func (e evalFunc) partialEvalSupport(declArgsLen int, iter unifyIterator) error return e.e.saveCall(declArgsLen, append([]*ast.Term{term}, e.terms[1:]...), iter) } -func (e evalFunc) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) error { +func (e *evalFunc) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) error { child := evalPool.Get() defer evalPool.Put(child) - e.e.child(rule.Body, child) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.traceEnter(rule) e.e.saveStack.PushQuery(nil) @@ -2390,12 +2458,6 @@ func (dc *deferredEarlyExitContainer) copyError() *deferredEarlyExitError { return &cpy } -var deecPool = sync.Pool{ - New: func() any { - return &deferredEarlyExitContainer{} - }, -} - type evalTree struct { e *eval bindings *bindings @@ -2470,6 +2532,20 @@ func (e evalTree) next(iter unifyIterator, plugged *ast.Term) error { return cpy.eval(iter) } +// enumerateNext is a helper to avoid closure allocation in enumerate loops. +// Method values don't allocate, unlike explicit closures. +// Using a pointer to evalTree avoids copying the 96-byte structure. +// Fields are ordered by size for optimal memory alignment (16 > 8 > 8 bytes). +type enumerateNext struct { + iter unifyIterator // 16 bytes (interface) + e *evalTree // 8 bytes (pointer) + key *ast.Term // 8 bytes (pointer) +} + +func (en *enumerateNext) call() error { + return en.e.next(en.iter, en.key) +} + func (e evalTree) enumerate(iter unifyIterator) error { if e.e.inliningControl.Disabled(e.plugged[:e.pos], true) { @@ -2481,18 +2557,21 @@ func (e evalTree) enumerate(iter unifyIterator) error { return err } - dc := deecPool.Get().(*deferredEarlyExitContainer) + dc := deecPool.Get() dc.deferred = nil defer deecPool.Put(dc) + // Use method value to avoid closure allocation. + // Create once and reuse for both doc and virtual doc enumeration. + en := enumerateNext{iter: iter, e: &e, key: nil} + if doc != nil { switch doc := doc.(type) { case *ast.Array: for i := range doc.Len() { k := ast.InternedTerm(i) - err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { - return e.next(iter, k) - }) + en.key = k + err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, en.call) if err := dc.handleErr(err); err != nil { return err @@ -2501,21 +2580,20 @@ func (e evalTree) enumerate(iter unifyIterator) error { case ast.Object: ki := doc.KeysIterator() for k, more := ki.Next(); more; k, more = ki.Next() { - err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { - return e.next(iter, k) - }) + en.key = k + err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, en.call) if err := dc.handleErr(err); err != nil { return err } } case ast.Set: - if err := doc.Iter(func(elem *ast.Term) error { - err := e.e.biunify(elem, e.ref[e.pos], e.bindings, e.bindings, func() error { - return e.next(iter, elem) - }) - return dc.handleErr(err) - }); err != nil { - return err + // Use Slice() to avoid closure allocation in Iter() + for _, elem := range doc.Slice() { + en.key = elem + err := e.e.biunify(elem, e.ref[e.pos], e.bindings, e.bindings, en.call) + if err := dc.handleErr(err); err != nil { + return err + } } } } @@ -2528,11 +2606,11 @@ func (e evalTree) enumerate(iter unifyIterator) error { return nil } + // Reuse the same enumerateNext for virtual documents for _, k := range e.node.Sorted { key := ast.NewTerm(k) - if err := e.e.biunify(key, e.ref[e.pos], e.bindings, e.bindings, func() error { - return e.next(iter, key) - }); err != nil { + en.key = key + if err := e.e.biunify(key, e.ref[e.pos], e.bindings, e.bindings, en.call); err != nil { return err } } @@ -2580,9 +2658,7 @@ func (e evalTree) leaves(plugged ast.Ref, node *ast.TreeNode) (ast.Object, error result := ast.NewObject() for _, k := range node.Sorted { - child := node.Children[k] - if child.Hide { continue } @@ -2845,7 +2921,7 @@ func (e evalVirtualPartial) evalAllRulesNoCache(rules []*ast.Rule) (*ast.Term, e defer evalPool.Put(child) for _, rule := range rules { - e.e.child(rule.Body, child) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.traceEnter(rule) err := child.eval(func(*eval) error { child.traceExit(rule) @@ -2881,7 +2957,7 @@ func (e evalVirtualPartial) evalOneRulePreUnify(iter unifyIterator, rule *ast.Ru child := evalPool.Get() defer evalPool.Put(child) - e.e.child(rule.Body, child) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.traceEnter(rule) var defined bool @@ -2976,7 +3052,7 @@ func (e evalVirtualPartial) evalOneRulePostUnify(iter unifyIterator, rule *ast.R child := evalPool.Get() defer evalPool.Put(child) - e.e.child(rule.Body, child) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.traceEnter(rule) var defined bool @@ -3063,7 +3139,7 @@ func (e evalVirtualPartial) partialEvalSupportRule(rule *ast.Rule, _ ast.Ref) (b child := evalPool.Get() defer evalPool.Put(child) - e.e.child(rule.Body, child) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.traceEnter(rule) e.e.saveStack.PushQuery(nil) @@ -3297,15 +3373,26 @@ func (vcKeyScope) IsGround() bool { } func (q vcKeyScope) String() string { - buf := make([]string, 0, len(q.Ref)) + buf, _ := q.AppendText(make([]byte, 0, 2+q.StringLength())) + return util.ByteSliceToString(buf) +} + +func (q vcKeyScope) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, '<') for _, t := range q.Ref { if _, ok := t.Value.(ast.Var); ok { - buf = append(buf, "_") + buf = append(buf, '_') } else { - buf = append(buf, t.String()) + var err error + if buf, err = t.AppendText(buf); err != nil { + return nil, err + } } + buf = append(buf, ',') } - return fmt.Sprintf("<%s>", strings.Join(buf, ",")) + buf[len(buf)-1] = '>' + + return buf, nil } // reduce removes vars from the tail of the ref. @@ -3346,7 +3433,12 @@ func getNestedObject(ref ast.Ref, rootObj *ast.Object, b *bindings, l *ast.Locat } func hasCollisions(path ast.Ref, visitedRefs *[]ast.Ref, b *bindings) bool { - collisionPathTerm := b.Plug(ast.NewTerm(path)) + // Avoid allocating a new term just for the sake of a lookup + term := ast.TermPtrPool.Get() + term.Value = path + collisionPathTerm := b.Plug(term) + ast.TermPtrPool.Put(term) + collisionPath := collisionPathTerm.Value.(ast.Ref) for _, c := range *visitedRefs { if collisionPath.HasPrefix(c) && !collisionPath.Equal(c) { @@ -3358,15 +3450,15 @@ func hasCollisions(path ast.Ref, visitedRefs *[]ast.Ref, b *bindings) bool { } func (e evalVirtualPartial) reduce(rule *ast.Rule, b *bindings, result *ast.Term, visitedRefs *[]ast.Ref) (*ast.Term, bool, error) { - var exists bool head := rule.Head switch v := result.Value.(type) { case ast.Set: key := b.Plug(head.Key) - exists = v.Contains(key) - v.Add(key) + if exists = v.Contains(key); !exists { + v.Add(key) + } case ast.Object: // data.p.q[r].s.t := 42 {...} // |----|-| @@ -3548,9 +3640,10 @@ func (e evalVirtualComplete) evalValueRule(iter unifyIterator, rule *ast.Rule, p child := evalPool.Get() defer evalPool.Put(child) - e.e.child(rule.Body, child) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.findOne = findOne child.traceEnter(rule) + var result *ast.Term err := child.eval(func(child *eval) error { child.traceExit(rule) @@ -3569,8 +3662,7 @@ func (e evalVirtualComplete) evalValueRule(iter unifyIterator, rule *ast.Rule, p e.e.virtualCache.Put(e.plugged[:e.pos+1], result) term, termbindings := child.bindings.apply(rule.Head.Value) - err := e.evalTerm(iter, term, termbindings) - if err != nil { + if err := e.evalTerm(iter, term, termbindings); err != nil { return err } @@ -3587,15 +3679,14 @@ func (e evalVirtualComplete) partialEval(iter unifyIterator) error { defer evalPool.Put(child) for _, rule := range e.ir.Rules { - e.e.child(rule.Body, child) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.traceEnter(rule) err := child.eval(func(child *eval) error { child.traceExit(rule) term, termbindings := child.bindings.apply(rule.Head.Value) - err := e.evalTerm(iter, term, termbindings) - if err != nil { + if err := e.evalTerm(iter, term, termbindings); err != nil { return err } @@ -3612,17 +3703,22 @@ func (e evalVirtualComplete) partialEval(iter unifyIterator) error { } func (e evalVirtualComplete) partialEvalSupport(iter unifyIterator) error { - - path := e.e.namespaceRef(e.plugged[:e.pos+1]) + originalPath := e.plugged[:e.pos+1] + namespacePath := e.e.namespaceRef(originalPath) term := ast.NewTerm(e.e.namespaceRef(e.ref)) var defined bool - if e.e.saveSupport.Exists(path) { + if e.e.saveSupport.Exists(namespacePath) { defined = true } else { for i := range e.ir.Rules { - ok, err := e.partialEvalSupportRule(e.ir.Rules[i], path) + // Split the rule from the package + ruleRef := originalPath.Copy()[len(e.ir.Rules[i].Module.Package.Path):] + ruleRef[0].Value = ast.Var(ruleRef[0].Value.(ast.String)) + // Get the namespaced package path without the rule + packagePath := namespacePath.Copy()[:len(namespacePath)-len(ruleRef)] + ok, err := e.partialEvalSupportRule(e.ir.Rules[i], packagePath, ruleRef) if err != nil { return err } @@ -3632,7 +3728,12 @@ func (e evalVirtualComplete) partialEvalSupport(iter unifyIterator) error { } if e.ir.Default != nil { - ok, err := e.partialEvalSupportRule(e.ir.Default, path) + // Split the rule from the package + ruleRef := originalPath.Copy()[len(e.ir.Default.Module.Package.Path):] + ruleRef[0].Value = ast.Var(ruleRef[0].Value.(ast.String)) + // Get the namespaced package path without the rule + packagePath := namespacePath.Copy()[:len(namespacePath)-len(ruleRef)] + ok, err := e.partialEvalSupportRule(e.ir.Default, packagePath, ruleRef) if err != nil { return err } @@ -3649,11 +3750,11 @@ func (e evalVirtualComplete) partialEvalSupport(iter unifyIterator) error { return e.e.saveUnify(term, e.rterm, e.bindings, e.rbindings, iter) } -func (e evalVirtualComplete) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) (bool, error) { +func (e evalVirtualComplete) partialEvalSupportRule(rule *ast.Rule, packagePath ast.Ref, ruleRef ast.Ref) (bool, error) { child := evalPool.Get() defer evalPool.Put(child) - e.e.child(rule.Body, child) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.traceEnter(rule) e.e.saveStack.PushQuery(nil) @@ -3668,7 +3769,6 @@ func (e evalVirtualComplete) partialEvalSupportRule(rule *ast.Rule, path ast.Ref // Skip this rule body if it fails to type-check. // Type-checking failure means the rule body will never succeed. if e.e.compiler.PassesTypeCheck(plugged) { - pkg, ruleRef := splitPackageAndRule(path) head := ast.RefHead(ruleRef, child.bindings.PlugNamespaced(rule.Head.Value, e.e.caller.bindings)) if !e.e.inliningControl.shallow { @@ -3678,7 +3778,7 @@ func (e evalVirtualComplete) partialEvalSupportRule(rule *ast.Rule, path ast.Ref plugged = applyCopyPropagation(cp, e.e.instr, plugged) } - e.e.saveSupport.InsertByPkg(pkg, &ast.Rule{ + e.e.saveSupport.InsertByPkg(packagePath, &ast.Rule{ Head: head, Body: plugged, Default: rule.Default, @@ -3875,12 +3975,10 @@ func (e evalTerm) get(plugged *ast.Term) (*ast.Term, *bindings) { } func (e evalTerm) save(iter unifyIterator) error { - v := e.e.generateVar(fmt.Sprintf("ref_%d", e.e.genvarid)) e.e.genvarid++ return e.e.biunify(e.term, v, e.termbindings, e.bindings, func() error { - suffix := e.ref[e.pos:] ref := make(ast.Ref, len(suffix)+1) ref[0] = v @@ -3899,7 +3997,8 @@ type evalEvery struct { func (e evalEvery) eval(iter unifyIterator) error { // unknowns in domain or body: save the expression, PE its body - if e.e.unknown(e.Domain, e.e.bindings) || e.e.unknown(e.Body, e.e.bindings) { + // partial() check to avoid e.Body -> Node boxing allocation + if e.e.partial() && (e.e.unknown(e.Domain, e.e.bindings) || e.e.unknown(e.Body, e.e.bindings)) { return e.save(iter) } @@ -3938,12 +4037,19 @@ func (e evalEvery) eval(iter unifyIterator) error { child.closure(e.Body, body) body.findOne = true - body.traceEnter(e.Body) + + if e.e.traceEnabled { + body.traceEnter(e.Body) + } + done := false err := body.eval(func(*eval) error { - body.traceExit(e.Body) + if e.e.traceEnabled { + body.traceExit(e.Body) + body.traceRedo(e.Body) + } done = true - body.traceRedo(e.Body) + return nil }) if !done { @@ -4232,7 +4338,7 @@ func isFunction(env *ast.TypeEnv, ref any) bool { default: panic("expected ast.Value or *ast.Term") } - _, ok := env.Get(r).(*types.Function) + _, ok := env.GetByRef(r).(*types.Function) return ok } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go index 8539a9e0dc..c47f7dc4e6 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go @@ -13,15 +13,15 @@ import ( gqlast "github.com/vektah/gqlparser/v2/ast" gqlparser "github.com/vektah/gqlparser/v2/parser" gqlvalidator "github.com/vektah/gqlparser/v2/validator" - - // Side-effecting import. Triggers GraphQL library's validation rule init() functions. - _ "github.com/vektah/gqlparser/v2/validator/rules" + "github.com/vektah/gqlparser/v2/validator/rules" "github.com/open-policy-agent/opa/v1/ast" "github.com/open-policy-agent/opa/v1/topdown/builtins" "github.com/open-policy-agent/opa/v1/topdown/cache" ) +var defaultRules = rules.NewDefaultRules() + // Parses a GraphQL schema, and returns the GraphQL AST for the schema. func parseSchema(schema string) (*gqlast.SchemaDocument, error) { // NOTE(philipc): We don't include the "built-in schema defs" from the @@ -51,8 +51,7 @@ func parseQuery(query string) (*gqlast.QueryDocument, error) { // just the first error message in the list. func validateQuery(schema *gqlast.Schema, query *gqlast.QueryDocument) error { // Validate the query against the schema, erroring if there's an issue. - err := gqlvalidator.Validate(schema, query) - if err != nil { + if err := gqlvalidator.ValidateWithRules(schema, query, defaultRules); err != nil { return formatGqlParserError(err) } return nil @@ -146,7 +145,7 @@ func pruneIrrelevantGraphQLASTNodes(value ast.Value) ast.Value { // extant ast type! switch x := value.(type) { case *ast.Array: - result := ast.NewArray() + result := ast.NewArrayWithCapacity(x.Len()) // Iterate over the array's elements, and do the following: // - Drop any Nulls // - Drop any any empty object/array value (after running the pruner) @@ -173,7 +172,7 @@ func pruneIrrelevantGraphQLASTNodes(value ast.Value) ast.Value { } return result case ast.Object: - result := ast.NewObject() + result := ast.NewObjectWithCapacity(x.Len()) // Iterate over our object's keys, and do the following: // - Drop "Position". // - Drop any key with a Null value. @@ -674,8 +673,7 @@ func cacheKeyWithPrefix(bctx BuiltinContext, t *ast.Term, prefix string) (string const gqlCacheName = "graphql" func init() { - - var defaultCacheEntries int = 10 + var defaultCacheEntries = 10 var graphqlCacheConfig = cache.NamedValueCacheConfig{ MaxNumEntries: &defaultCacheEntries, } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go index 36fa1572ec..79d49f33ca 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go @@ -90,15 +90,14 @@ var cacheableHTTPStatusCodes = [...]int{ } var ( - httpSendNetworkErrTerm = ast.StringTerm(HTTPSendNetworkErr) - httpSendInternalErrTerm = ast.StringTerm(HTTPSendInternalErr) + httpSendNetworkErrTerm, httpSendInternalErrTerm *ast.Term allowedKeys = ast.NewSet() - keyCache = make(map[string]*ast.Term, len(allowedKeyNames)) cacheableCodes = ast.NewSet() requiredKeys = ast.NewSet(ast.InternedTerm("method"), ast.InternedTerm("url")) httpSendLatencyMetricKey = "rego_builtin_http_send" httpSendInterQueryCacheHits = httpSendLatencyMetricKey + "_interquery_cache_hits" + httpSendNetworkRequests = httpSendLatencyMetricKey + "_network_requests" ) type httpSendKey string @@ -221,13 +220,13 @@ func getHTTPResponse(bctx BuiltinContext, req ast.Object) (*ast.Term, error) { func getKeyFromRequest(req ast.Object) (ast.Object, error) { // deep copy so changes to key do not reflect in the request object key := req.Copy() - cacheIgnoredHeadersTerm := req.Get(keyCache["cache_ignored_headers"]) - allHeadersTerm := req.Get(ast.StringTerm("headers")) + cacheIgnoredHeadersTerm := req.Get(ast.InternedTerm("cache_ignored_headers")) + allHeadersTerm := req.Get(ast.InternedTerm("headers")) // skip because no headers to delete if cacheIgnoredHeadersTerm == nil || allHeadersTerm == nil { // need to explicitly set cache_ignored_headers to null // equivalent requests might have different sets of exclusion lists - key.Insert(ast.StringTerm("cache_ignored_headers"), ast.InternedNullTerm) + key.Insert(ast.InternedTerm("cache_ignored_headers"), ast.InternedNullTerm) return key, nil } var cacheIgnoredHeaders []string @@ -247,14 +246,22 @@ func getKeyFromRequest(req ast.Object) (ast.Object, error) { if err != nil { return nil, err } - key.Insert(keyCache["headers"], ast.NewTerm(val)) + key.Insert(ast.InternedTerm("headers"), ast.NewTerm(val)) // remove cache_ignored_headers key - key.Insert(keyCache["cache_ignored_headers"], ast.InternedNullTerm) + key.Insert(ast.InternedTerm("cache_ignored_headers"), ast.InternedNullTerm) return key, nil } func init() { - createKeys() + for _, element := range allowedKeyNames { + ast.InternStringTerm(element) + allowedKeys.Add(ast.InternedTerm(element)) + } + + ast.InternStringTerm(HTTPSendNetworkErr, HTTPSendInternalErr) + httpSendNetworkErrTerm = ast.InternedTerm(HTTPSendNetworkErr) + httpSendInternalErrTerm = ast.InternedTerm(HTTPSendInternalErr) + createCacheableHTTPStatusCodes() initDefaults() RegisterBuiltinFunc(ast.HTTPSend.Name, builtinHTTPSend) @@ -278,8 +285,7 @@ func handleHTTPSendErr(bctx BuiltinContext, err error) error { } func initDefaults() { - timeoutDuration := os.Getenv(defaultHTTPRequestTimeoutEnv) - if timeoutDuration != "" { + if timeoutDuration := os.Getenv(defaultHTTPRequestTimeoutEnv); timeoutDuration != "" { var err error defaultHTTPRequestTimeout, err = time.ParseDuration(timeoutDuration) if err != nil { @@ -313,7 +319,6 @@ func validateHTTPRequestOperand(term *ast.Term, pos int) (ast.Object, error) { } return obj, nil - } // canonicalizeHeaders returns a copy of the headers where the keys are in @@ -332,7 +337,7 @@ func canonicalizeHeaders(headers map[string]any) map[string]any { // a DialContext that opens a socket (specified in the http call). // The url is expected to contain socket=/path/to/socket (url encoded) // Ex. "unix://localhost/end/point?socket=%2Ftmp%2Fhttp.sock" -func useSocket(rawURL string, tlsConfig *tls.Config) (bool, string, *http.Transport) { +func useSocket(rawURL string) (bool, string, *http.Transport) { u, err := url.Parse(rawURL) if err != nil { return false, "", nil @@ -361,7 +366,6 @@ func useSocket(rawURL string, tlsConfig *tls.Config) (bool, string, *http.Transp tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { return http.DefaultTransport.(*http.Transport).DialContext(ctx, "unix", socket) } - tr.TLSClientConfig = tlsConfig tr.DisableKeepAlives = true return true, u.String(), tr @@ -532,6 +536,10 @@ func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *htt } } + if len(customHeaders) != 0 { + customHeaders = canonicalizeHeaders(customHeaders) + } + isTLS := false client := &http.Client{ Timeout: timeout, @@ -578,13 +586,6 @@ func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *htt tlsConfig.Certificates = append(tlsConfig.Certificates, cert) } - // Use system certs if no CA cert is provided - // or system certs flag is not set - if len(tlsCaCert) == 0 && tlsCaCertFile == "" && tlsCaCertEnvVar == "" && tlsUseSystemCerts == nil { - trueValue := true - tlsUseSystemCerts = &trueValue - } - // Check the system certificates config first so that we // load additional certificated into the correct pool. if tlsUseSystemCerts != nil && *tlsUseSystemCerts && runtime.GOOS != "windows" { @@ -628,21 +629,31 @@ func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *htt tlsConfig.RootCAs = pool } + // If Host header is set, use it for TLS server name. + if host, hasHost := customHeaders["Host"]; hasHost { + // Only default the ServerName if the caller has + // specified the host. If we don't specify anything, + // Go will default to the target hostname. This name + // is not the same as the default that Go populates + // `req.Host` with, which is why we don't just set + // this unconditionally. + isTLS = true + tlsConfig.ServerName, _ = host.(string) + } + + if tlsServerName != "" { + isTLS = true + tlsConfig.ServerName = tlsServerName + } + var transport *http.Transport - if isTLS { - if ok, parsedURL, tr := useSocket(url, &tlsConfig); ok { - transport = tr - url = parsedURL - } else { - transport = http.DefaultTransport.(*http.Transport).Clone() - transport.TLSClientConfig = &tlsConfig - transport.DisableKeepAlives = true - } - } else { - if ok, parsedURL, tr := useSocket(url, nil); ok { - transport = tr - url = parsedURL - } + if ok, parsedURL, tr := useSocket(url); ok { + transport = tr + url = parsedURL + } else if isTLS { + transport = http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = &tlsConfig + transport.DisableKeepAlives = true } if bctx.RoundTripper != nil { @@ -675,8 +686,6 @@ func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *htt // Add custom headers if len(customHeaders) != 0 { - customHeaders = canonicalizeHeaders(customHeaders) - for k, v := range customHeaders { header, ok := v.(string) if !ok { @@ -696,21 +705,9 @@ func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *htt if host, hasHost := customHeaders["Host"]; hasHost { host := host.(string) // We already checked that it's a string. req.Host = host - - // Only default the ServerName if the caller has - // specified the host. If we don't specify anything, - // Go will default to the target hostname. This name - // is not the same as the default that Go populates - // `req.Host` with, which is why we don't just set - // this unconditionally. - tlsConfig.ServerName = host } } - if tlsServerName != "" { - tlsConfig.ServerName = tlsServerName - } - if len(bctx.DistributedTracingOpts) > 0 { client.Transport = tracing.NewTransport(client.Transport, bctx.DistributedTracingOpts) } @@ -722,7 +719,7 @@ func executeHTTPRequest(req *http.Request, client *http.Client, inputReqObj ast. var err error var retry int - retry, err = getNumberValFromReqObj(inputReqObj, keyCache["max_retry_attempts"]) + retry, err = getNumberValFromReqObj(inputReqObj, ast.InternedTerm("max_retry_attempts")) if err != nil { return nil, err } @@ -1016,15 +1013,6 @@ func insertIntoHTTPSendInterQueryCache(bctx BuiltinContext, key ast.Value, resp return nil } -func createKeys() { - for _, element := range allowedKeyNames { - term := ast.StringTerm(element) - - allowedKeys.Add(term) - keyCache[element] = term - } -} - func createCacheableHTTPStatusCodes() { for _, element := range cacheableHTTPStatusCodes { cacheableCodes.Add(ast.InternedTerm(element)) @@ -1088,7 +1076,7 @@ func getNumberValFromReqObj(req ast.Object, key *ast.Term) (int, error) { } func getCachingMode(req ast.Object) (cachingMode, error) { - key := keyCache["caching_mode"] + key := ast.InternedTerm("caching_mode") var s ast.String var ok bool if v := req.Get(key); v != nil { @@ -1191,7 +1179,8 @@ func newInterQueryCacheData(bctx BuiltinContext, resp *http.Response, respBody [ RespBody: respBody, Status: resp.Status, StatusCode: resp.StatusCode, - Headers: resp.Header} + Headers: resp.Header, + } return &cv, nil } @@ -1221,7 +1210,8 @@ func (c *interQueryCacheData) Clone() (cache.InterQueryCacheValue, error) { RespBody: dup, Status: c.Status, StatusCode: c.StatusCode, - Headers: c.Headers.Clone()}, nil + Headers: c.Headers.Clone(), + }, nil } type responseHeaders struct { @@ -1315,7 +1305,7 @@ func parseCacheControlHeader(headers http.Header) map[string]string { ccDirectives := map[string]string{} ccHeader := headers.Get("cache-control") - for _, part := range strings.Split(ccHeader, ",") { + for part := range strings.SplitSeq(ccHeader, ",") { part = strings.Trim(part, " ") if part == "" { continue @@ -1383,7 +1373,6 @@ func parseMaxAgeCacheDirective(cc map[string]string) (deltaSeconds, error) { } func formatHTTPResponseToAST(resp *http.Response, forceJSONDecode, forceYAMLDecode bool) (ast.Value, []byte, error) { - resultRawBody, err := io.ReadAll(resp.Body) if err != nil { return nil, nil, err @@ -1487,11 +1476,11 @@ func (c *interQueryCache) CheckCache() (ast.Value, error) { return resp, nil } - c.forceJSONDecode, err = getBoolValFromReqObj(c.key, keyCache["force_json_decode"]) + c.forceJSONDecode, err = getBoolValFromReqObj(c.key, ast.InternedTerm("force_json_decode")) if err != nil { return nil, handleHTTPSendErr(c.bctx, err) } - c.forceYAMLDecode, err = getBoolValFromReqObj(c.key, keyCache["force_yaml_decode"]) + c.forceYAMLDecode, err = getBoolValFromReqObj(c.key, ast.InternedTerm("force_yaml_decode")) if err != nil { return nil, handleHTTPSendErr(c.bctx, err) } @@ -1535,6 +1524,9 @@ func (c *interQueryCache) ExecuteHTTPRequest() (*http.Response, error) { return nil, handleHTTPSendErr(c.bctx, err) } + // Increment counter for actual network requests + c.bctx.Metrics.Counter(httpSendNetworkRequests).Incr() + return executeHTTPRequest(c.httpReq, c.httpClient, c.req) } @@ -1555,11 +1547,11 @@ func (c *intraQueryCache) CheckCache() (ast.Value, error) { // InsertIntoCache inserts the key set on this object into the cache with the given value func (c *intraQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) { - forceJSONDecode, err := getBoolValFromReqObj(c.key, keyCache["force_json_decode"]) + forceJSONDecode, err := getBoolValFromReqObj(c.key, ast.InternedTerm("force_json_decode")) if err != nil { return nil, handleHTTPSendErr(c.bctx, err) } - forceYAMLDecode, err := getBoolValFromReqObj(c.key, keyCache["force_yaml_decode"]) + forceYAMLDecode, err := getBoolValFromReqObj(c.key, ast.InternedTerm("force_yaml_decode")) if err != nil { return nil, handleHTTPSendErr(c.bctx, err) } @@ -1586,16 +1578,20 @@ func (c *intraQueryCache) ExecuteHTTPRequest() (*http.Response, error) { if err != nil { return nil, handleHTTPSendErr(c.bctx, err) } + + // Increment counter for actual network requests + c.bctx.Metrics.Counter(httpSendNetworkRequests).Incr() + return executeHTTPRequest(httpReq, httpClient, c.req) } func useInterQueryCache(req ast.Object) (bool, *forceCacheParams, error) { - value, err := getBoolValFromReqObj(req, keyCache["cache"]) + value, err := getBoolValFromReqObj(req, ast.InternedTerm("cache")) if err != nil { return false, nil, err } - valueForceCache, err := getBoolValFromReqObj(req, keyCache["force_cache"]) + valueForceCache, err := getBoolValFromReqObj(req, ast.InternedTerm("force_cache")) if err != nil { return false, nil, err } @@ -1613,7 +1609,7 @@ type forceCacheParams struct { } func newForceCacheParams(req ast.Object) (*forceCacheParams, error) { - term := req.Get(keyCache["force_cache_duration_seconds"]) + term := req.Get(ast.InternedTerm("force_cache_duration_seconds")) if term == nil { return nil, errors.New("'force_cache' set but 'force_cache_duration_seconds' parameter is missing") } @@ -1631,7 +1627,7 @@ func newForceCacheParams(req ast.Object) (*forceCacheParams, error) { func getRaiseErrorValue(req ast.Object) (bool, error) { result := ast.Boolean(true) var ok bool - if v := req.Get(keyCache["raise_error"]); v != nil { + if v := req.Get(ast.InternedTerm("raise_error")); v != nil { if result, ok = v.Value.(ast.Boolean); !ok { return false, errors.New("invalid value for raise_error field") } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup.go index 1b9ffc2350..5ad2abd8a8 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup.go @@ -1,5 +1,4 @@ -//go:build !go1.18 || !darwin -// +build !go1.18 !darwin +//go:build !darwin package topdown diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup_darwin.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup_darwin.go index ff3058ef40..e2941cbfa1 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup_darwin.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup_darwin.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - package topdown func fixupDarwinGo118(x, y string) string { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/json.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/json.go index 2c7d642883..9bca11cbe4 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/json.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/json.go @@ -17,8 +17,7 @@ import ( func builtinJSONRemove(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { // Expect an object and a string or array/set of strings - _, err := builtins.ObjectOperand(operands[0].Value, 1) - if err != nil { + if _, err := builtins.ObjectOperand(operands[0].Value, 1); err != nil { return err } @@ -65,7 +64,7 @@ func jsonRemove(a *ast.Term, b *ast.Term) (*ast.Term, error) { case ast.String, ast.Number, ast.Boolean, ast.Null: return a, nil case ast.Object: - newObj := ast.NewObject() + newObj := ast.NewObjectWithCapacity(aValue.Len()) err := aValue.Iter(func(k *ast.Term, v *ast.Term) error { // recurse and add the diff of sub objects as needed diffValue, err := jsonRemove(v, bObj.Get(k)) @@ -80,7 +79,7 @@ func jsonRemove(a *ast.Term, b *ast.Term) (*ast.Term, error) { } return ast.NewTerm(newObj), nil case ast.Set: - newSet := ast.NewSet() + newSet := ast.NewSetWithCapacity(aValue.Len()) err := aValue.Iter(func(v *ast.Term) error { // recurse and add the diff of sub objects as needed diffValue, err := jsonRemove(v, bObj.Get(v)) @@ -97,7 +96,7 @@ func jsonRemove(a *ast.Term, b *ast.Term) (*ast.Term, error) { case *ast.Array: // When indexes are removed we shift left to close empty spots in the array // as per the JSON patch spec. - newArray := ast.NewArray() + newArraySlice := make([]*ast.Term, 0, aValue.Len()) for i := range aValue.Len() { v := aValue.Elem(i) // recurse and add the diff of sub objects as needed @@ -107,10 +106,10 @@ func jsonRemove(a *ast.Term, b *ast.Term) (*ast.Term, error) { return nil, err } if diffValue != nil { - newArray = newArray.Append(diffValue) + newArraySlice = append(newArraySlice, diffValue) } } - return ast.NewTerm(newArray), nil + return ast.ArrayTerm(newArraySlice...), nil default: return nil, fmt.Errorf("invalid value type %T", a) } @@ -139,11 +138,10 @@ func builtinJSONFilter(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te return iter(ast.NewTerm(r)) } -func getJSONPaths(operand ast.Value) ([]ast.Ref, error) { - var paths []ast.Ref - +func getJSONPaths(operand ast.Value) (paths []ast.Ref, err error) { switch v := operand.(type) { case *ast.Array: + paths = make([]ast.Ref, 0, v.Len()) for i := range v.Len() { filter, err := parsePath(v.Elem(i)) if err != nil { @@ -152,16 +150,13 @@ func getJSONPaths(operand ast.Value) ([]ast.Ref, error) { paths = append(paths, filter) } case ast.Set: - err := v.Iter(func(f *ast.Term) error { - filter, err := parsePath(f) + paths = make([]ast.Ref, 0, v.Len()) + for _, item := range v.Slice() { + filter, err := parsePath(item) if err != nil { - return err + return nil, err } paths = append(paths, filter) - return nil - }) - if err != nil { - return nil, err } default: return nil, builtins.NewOperandTypeErr(2, v, "set", "array") @@ -170,6 +165,7 @@ func getJSONPaths(operand ast.Value) ([]ast.Ref, error) { return paths, nil } +// parsePath parses a JSON pointer path or array of path segments into an ast.Ref. func parsePath(path *ast.Term) (ast.Ref, error) { // paths can either be a `/` separated json path or // an array or set of values @@ -177,30 +173,43 @@ func parsePath(path *ast.Term) (ast.Ref, error) { switch p := path.Value.(type) { case ast.String: if p == "" { - return ast.Ref{}, nil + return ast.InternedEmptyRefValue.(ast.Ref), nil } - parts := strings.Split(strings.TrimLeft(string(p), "/"), "/") - for _, part := range parts { - part = strings.ReplaceAll(strings.ReplaceAll(part, "~1", "/"), "~0", "~") - pathSegments = append(pathSegments, ast.StringTerm(part)) + + s := strings.TrimLeft(string(p), "/") + n := strings.Count(s, "/") + 1 + + pathSegments = make(ast.Ref, 0, n) + + part, remaining, found := strings.Cut(s, "/") + unescaped := strings.ReplaceAll(strings.ReplaceAll(part, "~1", "/"), "~0", "~") + pathSegments = append(pathSegments, ast.InternedTerm(unescaped)) + + for found { + part, remaining, found = strings.Cut(remaining, "/") + unescaped := strings.ReplaceAll(strings.ReplaceAll(part, "~1", "/"), "~0", "~") + pathSegments = append(pathSegments, ast.InternedTerm(unescaped)) } case *ast.Array: - p.Foreach(func(term *ast.Term) { - pathSegments = append(pathSegments, term) - }) + pathSegments = make(ast.Ref, 0, p.Len()) + for i := range p.Len() { + pathSegments = append(pathSegments, p.Elem(i)) + } default: - return nil, builtins.NewOperandErr(2, "must be one of {set, array} containing string paths or array of path segments but got %v", ast.ValueName(p)) + return nil, builtins.NewOperandErr(2, + "must be one of {set, array} containing string paths or array of path segments but got "+ast.ValueName(p), + ) } return pathSegments, nil } func pathsToObject(paths []ast.Ref) ast.Object { - root := ast.NewObject() + root := ast.NewObjectWithCapacity(len(paths)) for _, path := range paths { node := root - var done bool + done := false // If the path is an empty JSON path, skip all further processing. if len(path) == 0 { @@ -209,7 +218,6 @@ func pathsToObject(paths []ast.Ref) ast.Object { // Otherwise, we should have 1+ path segments to work with. for i := 0; i < len(path)-1 && !done; i++ { - k := path[i] child := node.Get(k) @@ -238,106 +246,67 @@ func pathsToObject(paths []ast.Ref) ast.Object { return root } -type jsonPatch struct { - op string - path *ast.Term - from *ast.Term - value *ast.Term -} +func applyPatches(source *ast.Term, operations *ast.Array) (*ast.Term, error) { + et := edittree.EditTreeFromPool(source) + defer edittree.Dispose(et) -func getPatch(o ast.Object) (jsonPatch, error) { - validOps := map[string]struct{}{"add": {}, "remove": {}, "replace": {}, "move": {}, "copy": {}, "test": {}} - var out jsonPatch - var ok bool - getAttribute := func(attr string) (*ast.Term, error) { - if term := o.Get(ast.StringTerm(attr)); term != nil { - return term, nil + for i := range operations.Len() { + object, ok := operations.Elem(i).Value.(ast.Object) + if !ok { + return nil, errors.New("must be an array of JSON-Patch objects, but at least one element is not an object") } - return nil, fmt.Errorf("missing '%s' attribute", attr) - } - - opTerm, err := getAttribute("op") - if err != nil { - return out, err - } - op, ok := opTerm.Value.(ast.String) - if !ok { - return out, errors.New("attribute 'op' must be a string") - } - out.op = string(op) - if _, found := validOps[out.op]; !found { - out.op = "" - return out, fmt.Errorf("unrecognized op '%s'", string(op)) - } - - pathTerm, err := getAttribute("path") - if err != nil { - return out, err - } - out.path = pathTerm - - // Only fetch the "from" parameter for move/copy ops. - switch out.op { - case "move", "copy": - fromTerm, err := getAttribute("from") - if err != nil { - return out, err + // Validate + if object.Get(ast.InternedTerm("path")) == nil { + return nil, errors.New("missing required attribute 'path'") } - out.from = fromTerm - } - // Only fetch the "value" parameter for add/replace/test ops. - switch out.op { - case "add", "replace", "test": - valueTerm, err := getAttribute("value") - if err != nil { - return out, err + opTerm := object.Get(ast.InternedTerm("op")) + if opTerm == nil { + return nil, errors.New("missing required attribute 'op'") } - out.value = valueTerm - } - return out, nil -} - -func applyPatches(source *ast.Term, operations *ast.Array) (*ast.Term, error) { - et := edittree.NewEditTree(source) - for i := range operations.Len() { - object, ok := operations.Elem(i).Value.(ast.Object) + opStr, ok := opTerm.Value.(ast.String) if !ok { - return nil, errors.New("must be an array of JSON-Patch objects, but at least one element is not an object") + return nil, errors.New("attribute 'op' must be a string but found: " + ast.ValueName(opTerm.Value)) } - patch, err := getPatch(object) - if err != nil { - return nil, err - } - path, err := parsePath(patch.path) + + path, err := parsePath(object.Get(ast.InternedTerm("path"))) if err != nil { return nil, err } - switch patch.op { + switch string(opStr) { case "add": - _, err = et.InsertAtPath(path, patch.value) - if err != nil { + value := object.Get(ast.InternedTerm("value")) + if value == nil { + return nil, errors.New("missing required attribute 'value'") + } + if _, err = et.InsertAtPath(path, value); err != nil { return nil, err } case "remove": - _, err = et.DeleteAtPath(path) - if err != nil { + if _, err = et.DeleteAtPath(path); err != nil { return nil, err } case "replace": - _, err = et.DeleteAtPath(path) - if err != nil { + if _, err = et.DeleteAtPath(path); err != nil { return nil, err } - _, err = et.InsertAtPath(path, patch.value) - if err != nil { + value := object.Get(ast.InternedTerm("value")) + if value == nil { + return nil, errors.New("missing required attribute 'value'") + } + if _, err = et.InsertAtPath(path, value); err != nil { return nil, err } case "move": - from, err := parsePath(patch.from) + fromValue := object.Get(ast.InternedTerm("from")) + if fromValue == nil { + return nil, errors.New("missing required attribute 'from'") + } + + from, err := parsePath(fromValue) if err != nil { return nil, err } @@ -345,16 +314,18 @@ func applyPatches(source *ast.Term, operations *ast.Array) (*ast.Term, error) { if err != nil { return nil, err } - _, err = et.DeleteAtPath(from) - if err != nil { + if _, err = et.DeleteAtPath(from); err != nil { return nil, err } - _, err = et.InsertAtPath(path, chunk) - if err != nil { + if _, err = et.InsertAtPath(path, chunk); err != nil { return nil, err } case "copy": - from, err := parsePath(patch.from) + fromValue := object.Get(ast.InternedTerm("from")) + if fromValue == nil { + return nil, errors.New("missing required attribute 'from'") + } + from, err := parsePath(fromValue) if err != nil { return nil, err } @@ -362,8 +333,7 @@ func applyPatches(source *ast.Term, operations *ast.Array) (*ast.Term, error) { if err != nil { return nil, err } - _, err = et.InsertAtPath(path, chunk) - if err != nil { + if _, err = et.InsertAtPath(path, chunk); err != nil { return nil, err } case "test": @@ -371,34 +341,41 @@ func applyPatches(source *ast.Term, operations *ast.Array) (*ast.Term, error) { if err != nil { return nil, err } - if !chunk.Equal(patch.value) { - return nil, fmt.Errorf("value from EditTree != patch value.\n\nExpected: %v\n\nFound: %v", patch.value, chunk) + value := object.Get(ast.InternedTerm("value")) + if value == nil { + return nil, errors.New("missing required attribute 'value'") + } + if !chunk.Equal(value) { + return nil, fmt.Errorf("value from EditTree != patch value.\n\nExpected: %v\n\nFound: %v", value, chunk) } + default: + return nil, fmt.Errorf("unrecognized op: '%s'", string(opStr)) } } - final := et.Render() - // TODO: Nil check here? - return final, nil + + return et.Render(), nil } func builtinJSONPatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // JSON patch supports arrays, objects as well as values as the target. - target := ast.NewTerm(operands[0].Value) - // Expect an array of operations. operations, err := builtins.ArrayOperand(operands[1].Value, 2) if err != nil { return err } - patched, err := applyPatches(target, operations) + // JSON patch supports arrays, objects as well as values as the target. + patched, err := applyPatches(operands[0], operations) if err != nil { - return nil + return err } return iter(patched) } func init() { + for _, key := range []string{"op", "path", "from", "value", "add", "remove", "replace", "move", "copy", "test"} { + ast.InternStringTerm(key) + } + RegisterBuiltinFunc(ast.JSONFilter.Name, builtinJSONFilter) RegisterBuiltinFunc(ast.JSONRemove.Name, builtinJSONRemove) RegisterBuiltinFunc(ast.JSONPatch.Name, builtinJSONPatch) diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go index 699f1d0d99..e48719d145 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go @@ -27,7 +27,7 @@ func astValueToJSONSchemaLoader(value ast.Value) (gojsonschema.JSONLoader, error return nil, errors.New("invalid JSON string") } loader = gojsonschema.NewStringLoader(string(x)) - case ast.Object: + case ast.Object, *ast.Array: // In case of object serialize it to JSON representation. var data any data, err = ast.JSON(value) @@ -110,7 +110,7 @@ func builtinJSONMatchSchema(bctx BuiltinContext, operands []*ast.Term, iter func } // In case of validation errors produce Rego array of objects to describe the errors. - arr := ast.NewArray() + arr := ast.NewArrayWithCapacity(len(result.Errors())) for _, re := range result.Errors() { o := ast.NewObject( [...]*ast.Term{ast.StringTerm("error"), ast.StringTerm(re.String())}, diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/net.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/net.go index 17ed779844..6caa068b47 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/net.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/net.go @@ -49,7 +49,7 @@ func builtinLookupIPAddr(bctx BuiltinContext, operands []*ast.Term, iter func(*a return err } - ret := ast.NewSet() + ret := ast.NewSetWithCapacity(len(addrs)) for _, a := range addrs { ret.Add(ast.StringTerm(a.String())) diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go index a3f8f0854f..fdf6444939 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go @@ -15,8 +15,10 @@ import ( type randIntCachingKey string -var zero = big.NewInt(0) -var one = big.NewInt(1) +var ( + zero = big.NewInt(0) + one = big.NewInt(1) +) func builtinNumbersRange(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { if canGenerateCheapRange(operands) { @@ -45,8 +47,9 @@ func builtinNumbersRangeStep(bctx BuiltinContext, operands []*ast.Term, iter fun if canGenerateCheapRangeStep(operands) { step, _ := builtins.IntOperand(operands[2].Value, 3) if step <= 0 { - return errors.New("numbers.range_step: step must be a positive number above zero") + return errors.New("numbers.range_step: step must be a positive integer") } + return generateCheapRange(operands, step, iter) } @@ -66,7 +69,7 @@ func builtinNumbersRangeStep(bctx BuiltinContext, operands []*ast.Term, iter fun } if step.Cmp(zero) <= 0 { - return errors.New("numbers.range_step: step must be a positive number above zero") + return errors.New("numbers.range_step: step must be a positive integer") } ast, err := generateRange(bctx, x, y, step, "numbers.range_step") @@ -93,7 +96,7 @@ func canGenerateCheapRange(operands []*ast.Term) bool { func canGenerateCheapRangeStep(operands []*ast.Term) bool { if canGenerateCheapRange(operands) { - step, err := builtins.IntOperand(operands[1].Value, 3) + step, err := builtins.IntOperand(operands[2].Value, 3) if err == nil && ast.HasInternedIntNumberTerm(step) { return true } @@ -158,11 +161,9 @@ func generateRange(bctx BuiltinContext, x *big.Int, y *big.Int, step *big.Int, f } func builtinRandIntn(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - strOp, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return err - } n, err := builtins.IntOperand(operands[1].Value, 2) @@ -178,7 +179,7 @@ func builtinRandIntn(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.T n = -n } - var key = randIntCachingKey(fmt.Sprintf("%s-%d", strOp, n)) + key := randIntCachingKey(fmt.Sprintf("%s-%d", strOp, n)) if val, ok := bctx.Cache.Get(key); ok { return iter(val.(*ast.Term)) diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/object.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/object.go index c6fbe7022f..fe5ccf093f 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/object.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/object.go @@ -52,13 +52,21 @@ func builtinObjectUnionN(_ BuiltinContext, operands []*ast.Term, iter func(*ast. // Example: // Input: [{"a": {"b": 2}}, {"a": 4}, {"a": {"c": 3}}] // Want Output: {"a": {"c": 3}} - result := ast.NewObject() - frozenKeys := map[*ast.Term]struct{}{} - for i := arr.Len() - 1; i >= 0; i-- { + + // First pass: count total keys for pre-allocation + totalSize := 0 + for i := range arr.Len() { o, ok := arr.Elem(i).Value.(ast.Object) if !ok { return builtins.NewOperandElementErr(1, arr, arr.Elem(i).Value, "object") } + totalSize += o.Len() + } + + result := ast.NewObjectWithCapacity(totalSize) + frozenKeys := make(map[*ast.Term]struct{}, totalSize) + for i := arr.Len() - 1; i >= 0; i-- { + o := arr.Elem(i).Value.(ast.Object) // Already validated above mergewithOverwriteInPlace(result, o, frozenKeys) } @@ -77,7 +85,9 @@ func builtinObjectRemove(_ BuiltinContext, operands []*ast.Term, iter func(*ast. if err != nil { return err } - r := ast.NewObject() + + // Pre-allocate with obj size (upper bound for result) + r := ast.NewObjectWithCapacity(obj.Len()) obj.Foreach(func(key *ast.Term, value *ast.Term) { if !keysToRemove.Contains(key) { r.Insert(key, value) @@ -100,7 +110,8 @@ func builtinObjectFilter(_ BuiltinContext, operands []*ast.Term, iter func(*ast. return err } - filterObj := ast.NewObject() + // Pre-allocate with keys size (upper bound for filter object) + filterObj := ast.NewObjectWithCapacity(keys.Len()) keys.Foreach(func(key *ast.Term) { filterObj.Insert(key, ast.InternedNullTerm) }) @@ -158,15 +169,19 @@ func builtinObjectKeys(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te } // getObjectKeysParam returns a set of key values -// from a supplied ast array, object, set value +// from a supplied ast array, object, set value. +// The returned set must not be mutated. For Set +// inputs, it may be the original. func getObjectKeysParam(arrayOrSet ast.Value) (ast.Set, error) { switch v := arrayOrSet.(type) { case *ast.Array: - keys := ast.NewSet() + keys := ast.NewSetWithCapacity(v.Len()) v.Foreach(keys.Add) return keys, nil case ast.Set: - return ast.NewSet(v.Slice()...), nil + // Return directly. Callers only use this for Contains() checks + // without mutating the set. + return v, nil case ast.Object: return ast.NewSet(v.Keys()...), nil } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go index f852f3e320..acf55f0f3f 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go @@ -28,7 +28,6 @@ func (h printHook) Print(_ print.Context, msg string) error { } func builtinPrint(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - if bctx.PrintHook == nil { return iter(nil) } @@ -40,7 +39,7 @@ func builtinPrint(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term buf := make([]string, arr.Len()) - err = builtinPrintCrossProductOperands(bctx, buf, arr, 0, func(buf []string) error { + err = builtinPrintCrossProductOperands(bctx.Location, buf, arr, 0, func(buf []string) error { pctx := print.Context{ Context: bctx.Context, Location: bctx.Location, @@ -54,20 +53,32 @@ func builtinPrint(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term return iter(nil) } -func builtinPrintCrossProductOperands(bctx BuiltinContext, buf []string, operands *ast.Array, i int, f func([]string) error) error { - +func builtinPrintCrossProductOperands(loc *ast.Location, buf []string, operands *ast.Array, i int, f func([]string) error) error { if i >= operands.Len() { return f(buf) } - xs, ok := operands.Elem(i).Value.(ast.Set) + operand := operands.Elem(i) + + // We allow primitives ... + switch x := operand.Value.(type) { + case ast.String: + buf[i] = string(x) + return builtinPrintCrossProductOperands(loc, buf, operands, i+1, f) + case ast.Number, ast.Boolean, ast.Null: + buf[i] = x.String() + return builtinPrintCrossProductOperands(loc, buf, operands, i+1, f) + } + + // ... but all other operand types must be sets. + xs, ok := operand.Value.(ast.Set) if !ok { - return Halt{Err: internalErr(bctx.Location, fmt.Sprintf("illegal argument type: %v", ast.ValueName(operands.Elem(i).Value)))} + return Halt{Err: internalErr(loc, "illegal argument type: "+ast.ValueName(operand.Value))} } if xs.Len() == 0 { buf[i] = "" - return builtinPrintCrossProductOperands(bctx, buf, operands, i+1, f) + return builtinPrintCrossProductOperands(loc, buf, operands, i+1, f) } return xs.Iter(func(x *ast.Term) error { @@ -77,7 +88,7 @@ func builtinPrintCrossProductOperands(bctx BuiltinContext, buf []string, operand default: buf[i] = v.String() } - return builtinPrintCrossProductOperands(bctx, buf, operands, i+1, f) + return builtinPrintCrossProductOperands(loc, buf, operands, i+1, f) }) } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go index dd84026e4b..29d721e4b2 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go @@ -14,12 +14,7 @@ import ( "github.com/open-policy-agent/opa/v1/topdown/builtins" ) -var awsRequiredConfigKeyNames = ast.NewSet( - ast.StringTerm("aws_service"), - ast.StringTerm("aws_access_key"), - ast.StringTerm("aws_secret_access_key"), - ast.StringTerm("aws_region"), -) +var awsRequiredConfigKeyNames ast.Set func stringFromTerm(t *ast.Term) string { if v, ok := t.Value.(ast.String); ok { @@ -103,7 +98,7 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a if err != nil { return err } - service := stringFromTerm(awsConfigObj.Get(ast.StringTerm("aws_service"))) + service := stringFromTerm(awsConfigObj.Get(ast.InternedTerm("aws_service"))) awsCreds := aws.CredentialsFromObject(awsConfigObj) // Timestamp for signing. @@ -130,11 +125,12 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a // Prepare required fields from the HTTP request object. var theURL *url.URL var method string - reqURL := reqObj.Get(ast.StringTerm("url")) - reqMethod := reqObj.Get(ast.StringTerm("method")) + reqURL := reqObj.Get(ast.InternedTerm("url")) + keyMethod := ast.InternedTerm("method") + reqMethod := reqObj.Get(keyMethod) headers := ast.NewObject() - headersTerm := reqObj.Get(ast.StringTerm("headers")) + headersTerm := reqObj.Get(ast.InternedTerm("headers")) if headersTerm != nil { var ok bool headers, ok = headersTerm.Value.(ast.Object) @@ -146,10 +142,10 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a // Check types on the request parameters. invalidParameters := ast.NewSet() if _, ok := reqURL.Value.(ast.String); !ok { - invalidParameters.Add(ast.StringTerm("url")) + invalidParameters.Add(ast.InternedTerm("url")) } if _, ok := reqMethod.Value.(ast.String); !ok { - invalidParameters.Add(ast.StringTerm("method")) + invalidParameters.Add(keyMethod) } if invalidParameters.Len() > 0 { return builtins.NewOperandErr(1, "invalid values for required request parameters(s): %v", invalidParameters) @@ -161,8 +157,8 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a } method = stringFromTerm(reqMethod) - bodyTerm := reqObj.Get(ast.StringTerm("body")) - rawBodyTerm := reqObj.Get(ast.StringTerm("raw_body")) + bodyTerm := reqObj.Get(ast.InternedTerm("body")) + rawBodyTerm := reqObj.Get(ast.InternedTerm("raw_body")) body, err := getReqBodyBytes(bodyTerm, rawBodyTerm) if err != nil { return err @@ -173,7 +169,7 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a // if payload signing config is set, pass it down to the signing method disablePayloadSigning := false - t := awsConfigObj.Get(ast.StringTerm("disable_payload_signing")) + t := awsConfigObj.Get(ast.InternedTerm("disable_payload_signing")) if t != nil { if v, ok := t.Value.(ast.Boolean); ok { disablePayloadSigning = bool(v) @@ -188,24 +184,37 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a for k, v := range headersMap { // objectToMap doesn't support arrays if len(v) == 1 { - signedHeadersObj.Insert(ast.StringTerm(k), ast.StringTerm(v[0])) + signedHeadersObj.Insert(ast.InternedTerm(k), ast.StringTerm(v[0])) } } // Set authorization header - signedHeadersObj.Insert(ast.StringTerm("Authorization"), ast.StringTerm(authHeader)) + signedHeadersObj.Insert(ast.InternedTerm("Authorization"), ast.StringTerm(authHeader)) // set aws signature headers for k, v := range awsHeadersMap { - signedHeadersObj.Insert(ast.StringTerm(k), ast.StringTerm(v)) + signedHeadersObj.Insert(ast.InternedTerm(k), ast.StringTerm(v)) } // Create new request object with updated headers. out := reqObj.Copy() - out.Insert(ast.StringTerm("headers"), ast.NewTerm(signedHeadersObj)) + out.Insert(ast.InternedTerm("headers"), ast.NewTerm(signedHeadersObj)) return iter(ast.NewTerm(out)) } func init() { + for _, key := range []string{ + "aws_service", "aws_access_key", "aws_secret_access_key", "aws_region", "disable_payload_signing", + } { + ast.InternStringTerm(key) + } + + awsRequiredConfigKeyNames = ast.NewSet( + ast.InternedTerm("aws_service"), + ast.InternedTerm("aws_access_key"), + ast.InternedTerm("aws_secret_access_key"), + ast.InternedTerm("aws_region"), + ) + RegisterBuiltinFunc(ast.ProvidersAWSSignReqObj.Name, builtinAWSSigV4SignReq) } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go index aee6ba12eb..a65f8a312c 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go @@ -121,6 +121,7 @@ func (q *Query) WithInput(input *ast.Term) *Query { } // WithTracer adds a query tracer to use during evaluation. This is optional. +// // Deprecated: Use WithQueryTracer instead. func (q *Query) WithTracer(tracer Tracer) *Query { qt, ok := tracer.(QueryTracer) @@ -133,7 +134,7 @@ func (q *Query) WithTracer(tracer Tracer) *Query { // WithQueryTracer adds a query tracer to use during evaluation. This is optional. // Disabled QueryTracers will be ignored. func (q *Query) WithQueryTracer(tracer QueryTracer) *Query { - if !tracer.Enabled() { + if tracer == nil || !tracer.Enabled() { return q } @@ -374,7 +375,7 @@ func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support [] ctx: ctx, metrics: q.metrics, seed: q.seed, - time: ast.NumberTerm(int64ToJSONNumber(q.time.UnixNano())), + timeStart: q.time.UnixNano(), cancel: q.cancel, query: q.query, queryCompiler: q.queryCompiler, @@ -569,7 +570,7 @@ func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error { ctx: ctx, metrics: q.metrics, seed: q.seed, - time: ast.NumberTerm(int64ToJSONNumber(q.time.UnixNano())), + timeStart: q.time.UnixNano(), cancel: q.cancel, query: q.query, queryCompiler: q.queryCompiler, diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go index 1d2906ee2e..0313452033 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go @@ -7,6 +7,7 @@ package topdown import ( "fmt" "regexp" + "regexp/syntax" "sync" gintersect "github.com/yashtewari/glob-intersection" @@ -15,25 +16,24 @@ import ( "github.com/open-policy-agent/opa/v1/topdown/builtins" ) -const regexCacheMaxSize = 100 -const regexInterQueryValueCacheHits = "rego_builtin_regex_interquery_value_cache_hits" +const ( + regexCacheMaxSize = 100 + regexInterQueryValueCacheHits = "rego_builtin_regex_interquery_value_cache_hits" +) -var regexpCacheLock = sync.Mutex{} -var regexpCache map[string]*regexp.Regexp +var ( + regexpCacheLock = sync.RWMutex{} + regexpCache = make(map[string]*regexp.Regexp) +) func builtinRegexIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return iter(ast.InternedTerm(false)) - } - - _, err = regexp.Compile(string(s)) - if err != nil { - return iter(ast.InternedTerm(false)) + if s, err := builtins.StringOperand(operands[0].Value, 1); err == nil { + if _, err = syntax.Parse(string(s), syntax.Perl); err == nil { + return iter(ast.InternedTerm(true)) + } } - return iter(ast.InternedTerm(true)) + return iter(ast.InternedTerm(false)) } func builtinRegexMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -107,7 +107,8 @@ func builtinRegexSplit(bctx BuiltinContext, operands []*ast.Term, iter func(*ast func getRegexp(bctx BuiltinContext, pat string) (*regexp.Regexp, error) { if bctx.InterQueryBuiltinValueCache != nil { // TODO: Use named cache - val, ok := bctx.InterQueryBuiltinValueCache.Get(ast.String(pat)) + var key ast.Value = ast.String(pat) + val, ok := bctx.InterQueryBuiltinValueCache.Get(key) if ok { res, valid := val.(*regexp.Regexp) if !valid { @@ -124,20 +125,23 @@ func getRegexp(bctx BuiltinContext, pat string) (*regexp.Regexp, error) { if err != nil { return nil, err } - bctx.InterQueryBuiltinValueCache.Insert(ast.String(pat), re) + bctx.InterQueryBuiltinValueCache.Insert(key, re) return re, nil } - regexpCacheLock.Lock() - defer regexpCacheLock.Unlock() + regexpCacheLock.RLock() re, ok := regexpCache[pat] + numCached := len(regexpCache) + regexpCacheLock.RUnlock() if !ok { var err error re, err = regexp.Compile(pat) if err != nil { return nil, err } - if len(regexpCache) >= regexCacheMaxSize { + + regexpCacheLock.Lock() + if numCached >= regexCacheMaxSize { // Delete a (semi-)random key to make room for the new one. for k := range regexpCache { delete(regexpCache, k) @@ -145,21 +149,24 @@ func getRegexp(bctx BuiltinContext, pat string) (*regexp.Regexp, error) { } } regexpCache[pat] = re + regexpCacheLock.Unlock() } return re, nil } func getRegexpTemplate(pat string, delimStart, delimEnd byte) (*regexp.Regexp, error) { - regexpCacheLock.Lock() - defer regexpCacheLock.Unlock() + regexpCacheLock.RLock() re, ok := regexpCache[pat] + regexpCacheLock.RUnlock() if !ok { var err error re, err = compileRegexTemplate(pat, delimStart, delimEnd) if err != nil { return nil, err } + regexpCacheLock.Lock() regexpCache[pat] = re + regexpCacheLock.Unlock() } return re, nil } @@ -259,7 +266,41 @@ func builtinRegexReplace(bctx BuiltinContext, operands []*ast.Term, iter func(*a return err } - res := re.ReplaceAllString(string(base), string(value)) + // If no cancellation context, use the fast path + if bctx.Cancel == nil { + res := re.ReplaceAllString(string(base), string(value)) + if res == string(base) { + return iter(operands[0]) + } + return iter(ast.InternedTerm(res)) + } + + // Use sink writer for cancellation-aware replacement + sink := newSink(ast.RegexReplace.Name, len(base), bctx.Cancel) + src := []byte(base) + repl := []byte(value) + + // Find all matches at once to preserve anchor behavior: replace("foo", "^[a-z]", "F") => "Foo" + allMatches := re.FindAllSubmatchIndex(src, -1) + + lastEnd := 0 + for _, match := range allMatches { + if _, err := sink.Write(src[lastEnd:match[0]]); err != nil { + return err + } + + if _, err := sink.Write(re.Expand(nil, repl, src, match)); err != nil { + return err + } + + lastEnd = match[1] + } + + if _, err := sink.Write(src[lastEnd:]); err != nil { + return err + } + + res := sink.String() if res == string(base) { return iter(operands[0]) } @@ -268,7 +309,6 @@ func builtinRegexReplace(bctx BuiltinContext, operands []*ast.Term, iter func(*a } func init() { - regexpCache = map[string]*regexp.Regexp{} RegisterBuiltinFunc(ast.RegexIsValid.Name, builtinRegexIsValid) RegisterBuiltinFunc(ast.RegexMatch.Name, builtinRegexMatch) RegisterBuiltinFunc(ast.RegexMatchDeprecated.Name, builtinRegexMatch) diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go index 8fff22b1d3..6688c83061 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go @@ -99,7 +99,7 @@ func (t *resolverTrie) mktree(e *eval, in resolver.Input) (ast.Value, error) { } return result.Value, nil } - obj := ast.NewObject() + obj := ast.NewObjectWithCapacity(len(t.children)) for k, child := range t.children { v, err := child.mktree(e, resolver.Input{Ref: append(in.Ref, ast.NewTerm(k)), Input: in.Input, Metrics: in.Metrics}) if err != nil { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go index 3b79ebd586..1b2ac79038 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go @@ -23,34 +23,25 @@ func builtinSemVerCompare(_ BuiltinContext, operands []*ast.Term, iter func(*ast return err } - versionA, err := semver.NewVersion(string(versionStringA)) + versionA, err := semver.Parse(string(versionStringA)) if err != nil { return fmt.Errorf("operand 1: string %s is not a valid SemVer", versionStringA) } - versionB, err := semver.NewVersion(string(versionStringB)) + versionB, err := semver.Parse(string(versionStringB)) if err != nil { return fmt.Errorf("operand 2: string %s is not a valid SemVer", versionStringB) } - result := versionA.Compare(*versionB) - - return iter(ast.InternedTerm(result)) + return iter(ast.InternedTerm(versionA.Compare(versionB))) } func builtinSemVerIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { versionString, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return iter(ast.InternedTerm(false)) - } - - result := true - - _, err = semver.NewVersion(string(versionString)) - if err != nil { - result = false + if err == nil { + _, err = semver.Parse(string(versionString)) } - return iter(ast.InternedTerm(result)) + return iter(ast.InternedTerm(err == nil)) } func init() { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go index c50efe4a80..6ee467efc8 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go @@ -61,22 +61,34 @@ func builtinSetIntersection(_ BuiltinContext, operands []*ast.Term, iter func(*a // builtinSetUnion returns the union of the given input sets func builtinSetUnion(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // The set union logic here is duplicated and manually inlined on - // purpose. By lifting this logic up a level, and not doing pairwise - // set unions, we avoid a number of heap allocations. This improves - // performance dramatically over the naive approach. - result := ast.NewSet() - + // The set union logic here is manually inlined on purpose. By lifting + // this logic up a level and not doing pairwise set unions, we avoid + // many heap allocations. We also pre-allocate the result set by first + // counting total elements across all input sets. inputSet, err := builtins.SetOperand(operands[0].Value, 1) if err != nil { return err } + // First pass: count total elements for pre-allocation + totalSize := 0 err = inputSet.Iter(func(x *ast.Term) error { item, err := builtins.SetOperand(x.Value, 1) if err != nil { return err } + totalSize += item.Len() + return nil + }) + if err != nil { + return err + } + + // Pre-allocate result set with estimated capacity + result := ast.NewSetWithCapacity(totalSize) + + err = inputSet.Iter(func(x *ast.Term) error { + item, _ := builtins.SetOperand(x.Value, 1) // error checked above item.Foreach(result.Add) return nil }) diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/sink.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/sink.go new file mode 100644 index 0000000000..15208086b2 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/sink.go @@ -0,0 +1,73 @@ +package topdown + +import ( + "bytes" + "io" +) + +var _ io.Writer = (*sinkW)(nil) + +type sinkWriter interface { + io.Writer + String() string + Grow(int) + WriteByte(byte) error + WriteString(string) (int, error) +} + +type sinkW struct { + buf *bytes.Buffer + cancel Cancel + err error +} + +func newSink(name string, hint int, c Cancel) sinkWriter { + b := &bytes.Buffer{} + if hint > 0 { + b.Grow(hint) + } + + if c == nil { + return b + } + + return &sinkW{ + cancel: c, + buf: b, + err: Halt{ + Err: &Error{ + Code: CancelErr, + Message: name + ": timed out before finishing", + }, + }, + } +} + +func (sw *sinkW) Grow(n int) { + sw.buf.Grow(n) +} + +func (sw *sinkW) Write(bs []byte) (int, error) { + if sw.cancel.Cancelled() { + return 0, sw.err + } + return sw.buf.Write(bs) +} + +func (sw *sinkW) WriteByte(b byte) error { + if sw.cancel.Cancelled() { + return sw.err + } + return sw.buf.WriteByte(b) +} + +func (sw *sinkW) WriteString(s string) (int, error) { + if sw.cancel.Cancelled() { + return 0, sw.err + } + return sw.buf.WriteString(s) +} + +func (sw *sinkW) String() string { + return sw.buf.String() +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go index 53108ca0db..27c841102b 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "math/big" - "sort" "strconv" "strings" "unicode" @@ -18,6 +17,7 @@ import ( "github.com/open-policy-agent/opa/v1/ast" "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/util" ) func builtinAnyPrefixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -151,7 +151,7 @@ func builtinFormatInt(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter return iter(ast.InternedTerm(fmt.Sprintf(format, i))) } -func builtinConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinConcat(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { join, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return err @@ -162,11 +162,13 @@ func builtinConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) return iter(term) } + sb := newSink(ast.Concat.Name, 0, bctx.Cancel) + // NOTE(anderseknert): // More or less Go's strings.Join implementation, but where we avoid // creating an intermediate []string slice to pass to that function, // as that's expensive (3.5x more space allocated). Instead we build - // the string directly using a strings.Builder to concatenate the string + // the string directly using the sink to concatenate the string // values from the array/set with the separator. n := 0 switch b := operands[1].Value.(type) { @@ -181,25 +183,36 @@ func builtinConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) } sep := string(join) n += len(sep) * (l - 1) - var sb strings.Builder sb.Grow(n) - sb.WriteString(string(b.Elem(0).Value.(ast.String))) + if _, err := sb.WriteString(string(b.Elem(0).Value.(ast.String))); err != nil { + return err + } if sep == "" { for i := 1; i < l; i++ { - sb.WriteString(string(b.Elem(i).Value.(ast.String))) + if _, err := sb.WriteString(string(b.Elem(i).Value.(ast.String))); err != nil { + return err + } } } else if len(sep) == 1 { // when the separator is a single byte, sb.WriteByte is substantially faster bsep := sep[0] for i := 1; i < l; i++ { - sb.WriteByte(bsep) - sb.WriteString(string(b.Elem(i).Value.(ast.String))) + if err := sb.WriteByte(bsep); err != nil { + return err + } + if _, err := sb.WriteString(string(b.Elem(i).Value.(ast.String))); err != nil { + return err + } } } else { // for longer separators, there is no such difference between WriteString and Write for i := 1; i < l; i++ { - sb.WriteString(sep) - sb.WriteString(string(b.Elem(i).Value.(ast.String))) + if _, err := sb.WriteString(sep); err != nil { + return err + } + if _, err := sb.WriteString(string(b.Elem(i).Value.(ast.String))); err != nil { + return err + } } } return iter(ast.InternedTerm(sb.String())) @@ -214,12 +227,15 @@ func builtinConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) sep := string(join) l := b.Len() n += len(sep) * (l - 1) - var sb strings.Builder sb.Grow(n) for i, v := range b.Slice() { - sb.WriteString(string(v.Value.(ast.String))) + if _, err := sb.WriteString(string(v.Value.(ast.String))); err != nil { + return err + } if i < l-1 { - sb.WriteString(sep) + if _, err := sb.WriteString(sep); err != nil { + return err + } } } return iter(ast.InternedTerm(sb.String())) @@ -514,21 +530,15 @@ func builtinSplit(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e return err } - if !strings.Contains(string(s), string(d)) { + text, delim := string(s), string(d) + if !strings.Contains(text, delim) { return iter(ast.ArrayTerm(operands[0])) } - elems := strings.Split(string(s), string(d)) - arr := make([]*ast.Term, len(elems)) - - for i := range elems { - arr[i] = ast.InternedTerm(elems[i]) - } - - return iter(ast.ArrayTerm(arr...)) + return iter(ast.ArrayTerm(util.SplitMap(text, delim, ast.InternedTerm)...)) } -func builtinReplace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinReplace(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { s, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return err @@ -544,7 +554,12 @@ func builtinReplace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) return err } - replaced := strings.ReplaceAll(string(s), string(old), string(n)) + sink := newSink(ast.Replace.Name, len(s), bctx.Cancel) + replacer := strings.NewReplacer(string(old), string(n)) + if _, err := replacer.WriteString(sink, string(s)); err != nil { + return err + } + replaced := sink.String() if replaced == string(s) { return iter(operands[0]) } @@ -552,34 +567,38 @@ func builtinReplace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) return iter(ast.InternedTerm(replaced)) } -func builtinReplaceN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinReplaceN(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { patterns, err := builtins.ObjectOperand(operands[0].Value, 1) if err != nil { return err } - keys := patterns.Keys() - sort.Slice(keys, func(i, j int) bool { return ast.Compare(keys[i].Value, keys[j].Value) < 0 }) s, err := builtins.StringOperand(operands[1].Value, 2) if err != nil { return err } - oldnewArr := make([]string, 0, len(keys)*2) + keys := util.SortedFunc(patterns.Keys(), ast.TermValueCompare) + pairs := make([]string, 0, len(keys)*2) + for _, k := range keys { keyVal, ok := k.Value.(ast.String) if !ok { return builtins.NewOperandErr(1, "non-string key found in pattern object") } - val := patterns.Get(k) // cannot be nil - strVal, ok := val.Value.(ast.String) + strVal, ok := patterns.Get(k).Value.(ast.String) if !ok { return builtins.NewOperandErr(1, "non-string value found in pattern object") } - oldnewArr = append(oldnewArr, string(keyVal), string(strVal)) + pairs = append(pairs, string(keyVal), string(strVal)) } - return iter(ast.InternedTerm(strings.NewReplacer(oldnewArr...).Replace(string(s)))) + sink := newSink(ast.ReplaceN.Name, len(s), bctx.Cancel) + replacer := strings.NewReplacer(pairs...) + if _, err := replacer.WriteString(sink, string(s)); err != nil { + return err + } + return iter(ast.InternedTerm(sink.String())) } func builtinTrim(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -593,12 +612,13 @@ func builtinTrim(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) er return err } - trimmed := strings.Trim(string(s), string(c)) - if trimmed == string(s) { + str := string(s) + trimmed := strings.Trim(str, string(c)) + if trimmed == str { return iter(operands[0]) } - return iter(ast.InternedTerm(strings.Trim(string(s), string(c)))) + return iter(ast.InternedTerm(trimmed)) } func builtinTrimLeft(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/template.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/template.go index 29038a6579..524c5bde0d 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/template.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/template.go @@ -2,6 +2,7 @@ package topdown import ( "bytes" + "strings" "text/template" "github.com/open-policy-agent/opa/v1/ast" @@ -30,14 +31,13 @@ func renderTemplate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) return err } - // Do not attempt to render if template variable keys are missing - tmpl.Option("missingkey=error") var buf bytes.Buffer if err := tmpl.Execute(&buf, templateVariables); err != nil { return err } - return iter(ast.StringTerm(buf.String())) + res := strings.ReplaceAll(buf.String(), "", "") + return iter(ast.StringTerm(res)) } func init() { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/template_string.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/template_string.go new file mode 100644 index 0000000000..0e705b81bf --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/template_string.go @@ -0,0 +1,45 @@ +// Copyright 2025 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "strings" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +func builtinTemplateString(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + arr, err := builtins.ArrayOperand(operands[0].Value, 1) + if err != nil { + return err + } + + buf := make([]string, arr.Len()) + + var count int + err = builtinPrintCrossProductOperands(bctx.Location, buf, arr, 0, func(buf []string) error { + count += 1 + // Precautionary run-time assertion that template-strings can't produce multiple outputs; e.g. for custom relation type built-ins not known at compile-time. + if count > 1 { + return Halt{Err: &Error{ + Code: ConflictErr, + Location: bctx.Location, + Message: "template-strings must not produce multiple outputs", + }} + } + return nil + }) + + if err != nil { + return err + } + + return iter(ast.StringTerm(strings.Join(buf, ""))) +} + +func init() { + RegisterBuiltinFunc(ast.InternalTemplateString.Name, builtinTemplateString) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go index 72d290073c..bb2c9e1c1c 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go @@ -21,6 +21,7 @@ import ( "fmt" "hash" "math/big" + "strconv" "strings" "github.com/lestrrat-go/jwx/v3/jwk" @@ -342,7 +343,7 @@ func getKeysFromCertOrJWK(certificate string) ([]verificationKey, error) { if !ok { continue } - var key interface{} + var key any if err := jwk.Export(k, &key); err != nil { return nil, err } @@ -428,7 +429,7 @@ func builtinJWTVerify(bctx BuiltinContext, jwt ast.Value, keyStr ast.Value, hash // If a match is found, verify using only that key. Only applicable when a JWKS was provided. if header.kid != "" { if key := getKeyByKid(header.kid, keys); key != nil { - err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature)) + err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), signature) return done(err == nil) } @@ -440,7 +441,7 @@ func builtinJWTVerify(bctx BuiltinContext, jwt ast.Value, keyStr ast.Value, hash if key.alg == "" { // No algorithm provided for the key - this is likely a certificate and not a JWKS, so // we'll need to verify to find out - err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature)) + err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), signature) if err == nil { return done(true) } @@ -448,7 +449,7 @@ func builtinJWTVerify(bctx BuiltinContext, jwt ast.Value, keyStr ast.Value, hash if header.alg != key.alg { continue } - err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature)) + err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), signature) if err == nil { return done(true) } @@ -509,7 +510,7 @@ func builtinJWTVerifyHS(bctx BuiltinContext, operands []*ast.Term, hashF func() return err } - valid := hmac.Equal([]byte(signature), mac.Sum(nil)) + valid := hmac.Equal(signature, mac.Sum(nil)) putTokenInCache(bctx, jwt, astSecret, nil, nil, valid) @@ -662,7 +663,7 @@ func (constraints *tokenConstraints) validate() error { } // verify verifies a JWT using the constraints and the algorithm from the header -func (constraints *tokenConstraints) verify(kid, alg, header, payload, signature string) error { +func (constraints *tokenConstraints) verify(kid, alg, header, payload string, signature []byte) error { // Construct the payload plaintext := append(append([]byte(header), '.'), []byte(payload)...) @@ -670,7 +671,7 @@ func (constraints *tokenConstraints) verify(kid, alg, header, payload, signature if constraints.keys != nil { if kid != "" { if key := getKeyByKid(kid, constraints.keys); key != nil { - err := jwsbb.Verify(key.key, alg, plaintext, []byte(signature)) + err := jwsbb.Verify(key.key, alg, plaintext, signature) if err != nil { return errSignatureNotVerified } @@ -681,7 +682,7 @@ func (constraints *tokenConstraints) verify(kid, alg, header, payload, signature verified := false for _, key := range constraints.keys { if key.alg == "" { - err := jwsbb.Verify(key.key, alg, plaintext, []byte(signature)) + err := jwsbb.Verify(key.key, alg, plaintext, signature) if err == nil { verified = true break @@ -690,7 +691,7 @@ func (constraints *tokenConstraints) verify(kid, alg, header, payload, signature if alg != key.alg { continue } - err := jwsbb.Verify(key.key, alg, plaintext, []byte(signature)) + err := jwsbb.Verify(key.key, alg, plaintext, signature) if err == nil { verified = true break @@ -704,7 +705,7 @@ func (constraints *tokenConstraints) verify(kid, alg, header, payload, signature return nil } if constraints.secret != "" { - err := jwsbb.Verify([]byte(constraints.secret), alg, plaintext, []byte(signature)) + err := jwsbb.Verify([]byte(constraints.secret), alg, plaintext, signature) if err != nil { return errSignatureNotVerified } @@ -1131,8 +1132,8 @@ func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func switch v := nbf.Value.(type) { case ast.Number: // constraints.time is in nanoseconds but nbf Value is in seconds - compareTime := ast.FloatNumberTerm(constraints.time / 1000000000) - if ast.Compare(compareTime, v) == -1 { + compareTime := ast.Number(strconv.FormatFloat(constraints.time/1000000000, 'g', -1, 64)) + if compareTime.Compare(v) == -1 { return iter(unverified) } default: @@ -1170,17 +1171,17 @@ func decodeJWT(a ast.Value) (*JSONWebToken, error) { return &JSONWebToken{header: parts[0], payload: parts[1], signature: parts[2]}, nil } -func (token *JSONWebToken) decodeSignature() (string, error) { +func (token *JSONWebToken) decodeSignature() ([]byte, error) { decodedSignature, err := getResult(builtinBase64UrlDecode, ast.StringTerm(token.signature)) if err != nil { - return "", err + return nil, err } - signatureAst, err := builtins.StringOperand(decodedSignature.Value, 1) + signatureBs, err := builtins.StringOperandByteSlice(decodedSignature.Value, 1) if err != nil { - return "", err + return nil, err } - return string(signatureAst), err + return signatureBs, nil } // Extract, validate and return the JWT header as an ast.Object. @@ -1289,7 +1290,11 @@ func createTokenCacheKey(serializedJwt ast.Value, publicKey ast.Value) ast.Value func init() { // By default, the JWT cache is disabled. - cache.RegisterDefaultInterQueryBuiltinValueCacheConfig(tokenCacheName, nil) + disabled := true + var tokenCache = cache.NamedValueCacheConfig{ + Disabled: &disabled, + } + cache.RegisterDefaultInterQueryBuiltinValueCacheConfig(tokenCacheName, &tokenCache) RegisterBuiltinFunc(ast.JWTDecode.Name, builtinJWTDecode) RegisterBuiltinFunc(ast.JWTVerifyRS256.Name, builtinJWTVerifyRS256) diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go index c9df12b4c5..52451dc6bd 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go @@ -170,6 +170,7 @@ func (evt *Event) equalNodes(other *Event) bool { } // Tracer defines the interface for tracing in the top-down evaluation engine. +// // Deprecated: Use QueryTracer instead. type Tracer interface { Enabled() bool @@ -230,6 +231,7 @@ func (b *BufferTracer) Enabled() bool { } // Trace adds the event to the buffer. +// // Deprecated: Use TraceEvent instead. func (b *BufferTracer) Trace(evt *Event) { *b = append(*b, evt) @@ -409,7 +411,7 @@ func formatEvent(event *Event, depth int) string { var details any if node, ok := event.Node.(*ast.Rule); ok { - details = node.Path() + details = ast.RulePath(node) } else if event.Ref != nil { details = event.Ref } else { @@ -806,7 +808,7 @@ func printPrettyVars(w *bytes.Buffer, exprVars map[string]varInfo) { w.WriteString("\n\nWhere:\n") for _, info := range byName { - w.WriteString(fmt.Sprintf("\n%s: %s", info.Title(), iStrs.Truncate(info.Value(), maxPrettyExprVarWidth))) + fmt.Fprintf(w, "\n%s: %s", info.Title(), iStrs.Truncate(info.Value(), maxPrettyExprVarWidth)) } return @@ -878,7 +880,7 @@ func printArrows(w *bytes.Buffer, l []varInfo, printValueAt int) { valueStr := iStrs.Truncate(info.Value(), maxPrettyExprVarWidth) if (i > 0 && col == l[i-1].col) || (i < len(l)-1 && col == l[i+1].col) { // There is another var on this column, so we need to include the name to differentiate them. - w.WriteString(fmt.Sprintf("%s: %s", info.Title(), valueStr)) + fmt.Fprintf(w, "%s: %s", info.Title(), valueStr) } else { w.WriteString(valueStr) } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go index 1c8961e71f..9f39f12567 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go @@ -25,21 +25,22 @@ func evalWalk(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error func walk(filter, path *ast.Array, input *ast.Term, iter func(*ast.Term) error) error { if filter == nil || filter.Len() == 0 { - var pathCopy *ast.Array if path == nil { - pathCopy = ast.InternedEmptyArrayValue + if err := iter(ast.ArrayTerm(ast.NewTerm(ast.InternedEmptyArrayValue), input)); err != nil { + return err + } } else { // Shallow copy, as while the array is modified, the elements are not - pathCopy = copyShallow(path) - } - - // TODO(ae): I'd *really* like these terms to be retrieved from a sync.Pool, and - // returned after iter is called. However, all my atttempts to do this have failed - // as there seems to be something holding on to these references after the call, - // leading to modifications that entirely alter the results. Perhaps this is not - // possible to do, but if it is,it would be a huge performance win. - if err := iter(ast.ArrayTerm(ast.NewTerm(pathCopy), input)); err != nil { - return err + pathCopy := copyShallow(path) + + // TODO(ae): I'd *really* like these terms to be retrieved from a sync.Pool, and + // returned after iter is called. However, all my atttempts to do this have failed + // as there seems to be something holding on to these references after the call, + // leading to modifications that entirely alter the results. Perhaps this is not + // possible to do, but if it is,it would be a huge performance win. + if err := iter(ast.ArrayTerm(ast.NewTerm(pathCopy), input)); err != nil { + return err + } } } diff --git a/vendor/github.com/open-policy-agent/opa/v1/types/types.go b/vendor/github.com/open-policy-agent/opa/v1/types/types.go index 366903f0cb..fc1db120a0 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/types/types.go +++ b/vendor/github.com/open-policy-agent/opa/v1/types/types.go @@ -219,7 +219,7 @@ func (t *Array) toMap() map[string]any { func (t *Array) String() string { prefix := "array" - buf := []string{} + buf := make([]string, 0, len(t.static)) for _, tpe := range t.static { buf = append(buf, Sprint(tpe)) } @@ -716,6 +716,7 @@ func (t *Function) NamedFuncArgs() FuncArgs { } // Args returns the function's arguments as a slice, ignoring variadic arguments. +// // Deprecated: Use FuncArgs instead. func (t *Function) Args() []Type { cpy := make([]Type, len(t.args)) diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/graph.go b/vendor/github.com/open-policy-agent/opa/v1/util/graph.go index f0e8242454..acb62590b6 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/util/graph.go +++ b/vendor/github.com/open-policy-agent/opa/v1/util/graph.go @@ -77,13 +77,10 @@ func dfsRecursive(t Traversal, eq Equals, u, z T, path []T) []T { } for _, v := range t.Edges(u) { if eq(v, z) { - path = append(path, z) - path = append(path, u) - return path + return append(path, z, u) } if p := dfsRecursive(t, eq, v, z, path); len(p) > 0 { - path = append(p, u) - return path + return append(p, u) } } return path diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/json.go b/vendor/github.com/open-policy-agent/opa/v1/util/json.go index fdb2626c78..de95ed50bf 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/util/json.go +++ b/vendor/github.com/open-policy-agent/opa/v1/util/json.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "reflect" + "strconv" "sigs.k8s.io/yaml" @@ -19,15 +20,14 @@ import ( // UnmarshalJSON parses the JSON encoded data and stores the result in the value // pointed to by x. // -// This function is intended to be used in place of the standard json.Marshal -// function when json.Number is required. +// This function is intended to be used in place of the standard [json.Marshal] +// function when [json.Number] is required. func UnmarshalJSON(bs []byte, x any) error { return unmarshalJSON(bs, x, true) } func unmarshalJSON(bs []byte, x any, ext bool) error { - buf := bytes.NewBuffer(bs) - decoder := NewJSONDecoder(buf) + decoder := NewJSONDecoder(bytes.NewBuffer(bs)) if err := decoder.Decode(x); err != nil { if handler := extension.FindExtension(".json"); handler != nil && ext { return handler(bs, x) @@ -49,8 +49,8 @@ func unmarshalJSON(bs []byte, x any, ext bool) error { // NewJSONDecoder returns a new decoder that reads from r. // -// This function is intended to be used in place of the standard json.NewDecoder -// when json.Number is required. +// This function is intended to be used in place of the standard [json.NewDecoder] +// when [json.Number] is required. func NewJSONDecoder(r io.Reader) *json.Decoder { decoder := json.NewDecoder(r) decoder.UseNumber() @@ -87,6 +87,55 @@ func MustMarshalJSON(x any) []byte { // rego.Input and inmem's Write operations. Works with both references and // values. func RoundTrip(x *any) error { + // Avoid round-tripping types that won't change as a result of + // marshalling/unmarshalling, as even for those values, round-tripping + // comes with a significant cost. + if x == nil || !NeedsRoundTrip(*x) { + return nil + } + + // For number types, we can write the json.Number representation + // directly into x without marshalling to bytes and back. + a := *x + switch v := a.(type) { + case int: + *x = json.Number(strconv.Itoa(v)) + return nil + case int8: + *x = json.Number(strconv.FormatInt(int64(v), 10)) + return nil + case int16: + *x = json.Number(strconv.FormatInt(int64(v), 10)) + return nil + case int32: + *x = json.Number(strconv.FormatInt(int64(v), 10)) + return nil + case int64: + *x = json.Number(strconv.FormatInt(v, 10)) + return nil + case uint: + *x = json.Number(strconv.FormatUint(uint64(v), 10)) + return nil + case uint8: + *x = json.Number(strconv.FormatUint(uint64(v), 10)) + return nil + case uint16: + *x = json.Number(strconv.FormatUint(uint64(v), 10)) + return nil + case uint32: + *x = json.Number(strconv.FormatUint(uint64(v), 10)) + return nil + case uint64: + *x = json.Number(strconv.FormatUint(v, 10)) + return nil + case float32: + *x = json.Number(strconv.FormatFloat(float64(v), 'f', -1, 32)) + return nil + case float64: + *x = json.Number(strconv.FormatFloat(v, 'f', -1, 64)) + return nil + } + bs, err := json.Marshal(x) if err != nil { return err @@ -94,15 +143,28 @@ func RoundTrip(x *any) error { return UnmarshalJSON(bs, x) } +// NeedsRoundTrip returns true if the value won't change as a result of +// a marshalling/unmarshalling round-trip. Since [RoundTrip] itself calls +// this you normally don't need to call this function directly, unless you +// want to make decisions based on the round-tripability of a value without +// actually doing the round-trip. +func NeedsRoundTrip(x any) bool { + switch x.(type) { + case nil, bool, string, json.Number: + return false + } + return true +} + // Reference returns a pointer to its argument unless the argument already is // a pointer. If the argument is **t, or ***t, etc, it will return *t. // // Used for preparing Go types (including pointers to structs) into values to be -// put through util.RoundTrip(). +// put through [RoundTrip]. func Reference(x any) *any { var y any rv := reflect.ValueOf(x) - if rv.Kind() == reflect.Ptr { + if rv.Kind() == reflect.Pointer { return Reference(rv.Elem().Interface()) } if rv.Kind() != reflect.Invalid { diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/performance.go b/vendor/github.com/open-policy-agent/opa/v1/util/performance.go index e9b4468188..3c852638e2 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/util/performance.go +++ b/vendor/github.com/open-policy-agent/opa/v1/util/performance.go @@ -1,11 +1,41 @@ package util import ( - "math" "slices" + "strconv" + "strings" + "sync" "unsafe" ) +// SyncPool is a generic sync.Pool for type T, providing some convenience +// over sync.Pool directly: [SyncPool.Put] ensures that nil values are not +// put into the pool, and [SyncPool.Get] returns a pointer to T without having +// to do a type assertion at the call site. +type SyncPool[T any] struct { + pool sync.Pool +} + +func NewSyncPool[T any]() *SyncPool[T] { + return &SyncPool[T]{ + pool: sync.Pool{ + New: func() any { + return new(T) + }, + }, + } +} + +func (p *SyncPool[T]) Get() *T { + return p.pool.Get().(*T) +} + +func (p *SyncPool[T]) Put(x *T) { + if x != nil { + p.pool.Put(x) + } +} + // NewPtrSlice returns a slice of pointers to T with length n, // with only 2 allocations performed no matter the size of n. // See: @@ -42,6 +72,12 @@ func StringToByteSlice[T ~string](s T) []byte { // NumDigitsInt returns the number of digits in n. // This is useful for pre-allocating buffers for string conversion. func NumDigitsInt(n int) int { + return NumDigitsInt64(int64(n)) +} + +// NumDigitsInt64 returns the number of digits in n. +// This is useful for pre-allocating buffers for string conversion. +func NumDigitsInt64(n int64) int { if n == 0 { return 1 } @@ -50,7 +86,12 @@ func NumDigitsInt(n int) int { n = -n } - return int(math.Log10(float64(n))) + 1 + count := 0 + for n > 0 { + n /= 10 + count++ + } + return count } // NumDigitsUint returns the number of digits in n. @@ -60,16 +101,78 @@ func NumDigitsUint(n uint64) int { return 1 } - return int(math.Log10(float64(n))) + 1 -} - -// KeysCount returns the number of keys in m that satisfy predicate p. -func KeysCount[K comparable, V any](m map[K]V, p func(K) bool) int { count := 0 - for k := range m { - if p(k) { - count++ - } + for n > 0 { + n /= 10 + count++ } return count } + +// AppendInt is a less messy version of strconv.AppendInt for base 10 ints. +func AppendInt(buf []byte, n int) []byte { + return strconv.AppendInt(buf, int64(n), 10) +} + +// SplitMap calls fn for each delim-separated part of text and returns a slice of the results. +// Cheaper than calling fn on strings.Split(text, delim), as it avoids allocating an intermediate slice of strings. +func SplitMap[T any](text string, delim string, fn func(string) T) []T { + sl := make([]T, 0, strings.Count(text, delim)+1) + for s := range strings.SplitSeq(text, delim) { + sl = append(sl, fn(s)) + } + return sl +} + +// SlicePool is a pool for (pointers to) slices of type T. +// It uses sync.Pool to pool the slices, and grows them as needed. +type SlicePool[T any] struct { + pool sync.Pool +} + +// NewSlicePool creates a new SlicePool for slices of type T with the given initial length. +// This number is only a hint, as the slices will grow as needed. For best performance, store +// slices of similar lengths in the same pool. +func NewSlicePool[T any](length int) *SlicePool[T] { + return &SlicePool[T]{ + pool: sync.Pool{ + New: func() any { + s := make([]T, length) + return &s + }, + }, + } +} + +// Get returns a pointer to a slice of type T with the given length +// from the pool. The slice capacity will grow as needed to accommodate +// the requested length. The returned slice will have all its elements +// set to the zero value of T. Returns a pointer to avoid allocating. +func (sp *SlicePool[T]) Get(length int) *[]T { + s := sp.pool.Get().(*[]T) + d := *s + + if cap(d) < length { + d = slices.Grow(d, length) + } + + d = d[:length] // reslice to requested length, while keeping capacity + + clear(d) + + *s = d + return s +} + +// Put returns a pointer to a slice of type T to the pool. +func (sp *SlicePool[T]) Put(s *[]T) { + if s != nil { + sp.pool.Put(s) + } +} + +// SortedFunc is simply a shorthand for [slices.SortFunc] which also returns the sorted slice. +func SortedFunc[T any, S ~[]T](s S, cmp func(a, b T) int) S { + slices.SortFunc(s, cmp) + return s +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go b/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go index ddffe2a4de..97dacd0c96 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go +++ b/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go @@ -3,79 +3,58 @@ package util import ( "bytes" "compress/gzip" - "encoding/binary" "errors" "io" "net/http" "strings" - "sync" "github.com/open-policy-agent/opa/v1/util/decoding" ) -var gzipReaderPool = sync.Pool{ - New: func() any { - reader := new(gzip.Reader) - return reader - }, -} +var gzipReaderPool = NewSyncPool[gzip.Reader]() // Note(philipc): Originally taken from server/server.go -// The DecodingLimitHandler handles validating that the gzip payload is within the -// allowed max size limit. Thus, in the event of a forged payload size trailer, -// the worst that can happen is that we waste memory up to the allowed max gzip -// payload size, but not an unbounded amount of memory, as was potentially -// possible before. +// The DecodingLimitHandler handles setting the max size limits in the context. +// This function enforces those limits. For gzip payloads, we use a LimitReader +// to ensure we don't decompress more than the allowed maximum, preventing +// memory exhaustion from forged gzip trailers. func ReadMaybeCompressedBody(r *http.Request) ([]byte, error) { - var content *bytes.Buffer - // Note(philipc): If the request body is of unknown length (such as what - // happens when 'Transfer-Encoding: chunked' is set), we have to do an - // incremental read of the body. In this case, we can't be too clever, we - // just do the best we can with whatever is streamed over to us. - // Fetch gzip payload size limit from request context. - if maxLength, ok := decoding.GetServerDecodingMaxLen(r.Context()); ok { - bs, err := io.ReadAll(io.LimitReader(r.Body, maxLength)) - if err != nil { - return bs, err - } - content = bytes.NewBuffer(bs) - } else { - // Read content from the request body into a buffer of known size. - content = bytes.NewBuffer(make([]byte, 0, r.ContentLength)) - if _, err := io.CopyN(content, r.Body, r.ContentLength); err != nil { - return content.Bytes(), err - } + length := r.ContentLength + if maxLenConf, ok := decoding.GetServerDecodingMaxLen(r.Context()); ok { + length = maxLenConf + } + + content, err := io.ReadAll(io.LimitReader(r.Body, length)) + if err != nil { + return nil, err } - // Decompress gzip content by reading from the buffer. if strings.Contains(r.Header.Get("Content-Encoding"), "gzip") { - // Fetch gzip payload size limit from request context. gzipMaxLength, _ := decoding.GetServerDecodingGzipMaxLen(r.Context()) - // Note(philipc): The last 4 bytes of a well-formed gzip blob will - // always be a little-endian uint32, representing the decompressed - // content size, modulo 2^32. We validate that the size is safe, - // earlier in DecodingLimitHandler. - sizeTrailerField := binary.LittleEndian.Uint32(content.Bytes()[content.Len()-4:]) - if sizeTrailerField > uint32(gzipMaxLength) { - return content.Bytes(), errors.New("gzip payload too large") + gzReader := gzipReaderPool.Get() + defer func() { + gzReader.Close() + gzipReaderPool.Put(gzReader) + }() + + if err := gzReader.Reset(bytes.NewReader(content)); err != nil { + return nil, err } - // Pull a gzip decompressor from the pool, and assign it to the current - // buffer, using Reset(). Later, return it back to the pool for another - // request to use. - gzReader := gzipReaderPool.Get().(*gzip.Reader) - if err := gzReader.Reset(content); err != nil { + + decompressed := bytes.NewBuffer(make([]byte, 0, len(content))) + limitReader := io.LimitReader(gzReader, gzipMaxLength+1) + if _, err := decompressed.ReadFrom(limitReader); err != nil { return nil, err } - defer gzReader.Close() - defer gzipReaderPool.Put(gzReader) - decompressedContent := bytes.NewBuffer(make([]byte, 0, sizeTrailerField)) - if _, err := io.CopyN(decompressedContent, gzReader, int64(sizeTrailerField)); err != nil { - return decompressedContent.Bytes(), err + + if int64(decompressed.Len()) > gzipMaxLength { + return nil, errors.New("gzip payload too large") } - return decompressedContent.Bytes(), nil + + return decompressed.Bytes(), nil } // Request was not compressed; return the content bytes. - return content.Bytes(), nil + return content, nil } diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/strings.go b/vendor/github.com/open-policy-agent/opa/v1/util/strings.go new file mode 100644 index 0000000000..8ea0aedc35 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/strings.go @@ -0,0 +1,13 @@ +package util + +import "strings" + +// WithPrefix ensures that the string s starts with the given prefix. +// If s already starts with prefix, it is returned unchanged. +func WithPrefix(s, prefix string) string { + if strings.HasPrefix(s, prefix) { + return s + } + + return prefix + s +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/version/version.go b/vendor/github.com/open-policy-agent/opa/v1/version/version.go index ea6d25bbb4..a5e82132b4 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/version/version.go +++ b/vendor/github.com/open-policy-agent/opa/v1/version/version.go @@ -10,7 +10,7 @@ import ( "runtime/debug" ) -var Version = "1.8.0" +var Version = "1.15.1" // GoVersion is the version of Go this was built with var GoVersion = runtime.Version() diff --git a/vendor/github.com/opencontainers/runc/internal/linux/linux.go b/vendor/github.com/opencontainers/runc/internal/linux/linux.go index 88fe5e0dfd..1371315932 100644 --- a/vendor/github.com/opencontainers/runc/internal/linux/linux.go +++ b/vendor/github.com/opencontainers/runc/internal/linux/linux.go @@ -16,7 +16,7 @@ func Dup3(oldfd, newfd, flags int) error { } // Exec wraps [unix.Exec]. -func Exec(cmd string, args []string, env []string) error { +func Exec(cmd string, args, env []string) error { err := retryOnEINTR(func() error { return unix.Exec(cmd, args, env) }) @@ -66,6 +66,22 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from unix.Sockaddr, err error return n, from, err } +// SchedSetaffinity wraps sched_setaffinity syscall without unix.CPUSet size limitation. +func SchedSetaffinity(pid int, buf []byte) error { + err := retryOnEINTR(func() error { + _, _, errno := unix.Syscall( + unix.SYS_SCHED_SETAFFINITY, + uintptr(pid), + uintptr(len(buf)), + uintptr((unsafe.Pointer)(&buf[0]))) + if errno != 0 { + return errno + } + return nil + }) + return os.NewSyscallError("sched_setaffinity", err) +} + // Sendmsg wraps [unix.Sendmsg]. func Sendmsg(fd int, p, oob []byte, to unix.Sockaddr, flags int) error { err := retryOnEINTR(func() error { @@ -75,13 +91,9 @@ func Sendmsg(fd int, p, oob []byte, to unix.Sockaddr, flags int) error { } // SetMempolicy wraps set_mempolicy. -func SetMempolicy(mode uint, mask *unix.CPUSet) error { +func SetMempolicy(mode int, mask *unix.CPUSet) error { err := retryOnEINTR(func() error { - _, _, errno := unix.Syscall(unix.SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), unsafe.Sizeof(*mask)*8) - if errno != 0 { - return errno - } - return nil + return unix.SetMemPolicy(mode, mask) }) return os.NewSyscallError("set_mempolicy", err) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/memorypolicy.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/memorypolicy.go index 8c34609006..b7b9c6bce1 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/memorypolicy.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/memorypolicy.go @@ -3,29 +3,31 @@ package configs import "golang.org/x/sys/unix" // Memory policy modes and flags as defined in /usr/include/linux/mempolicy.h - +// +// Deprecated: use constants from [unix] instead. +// //nolint:revive,staticcheck,nolintlint // ignore ALL_CAPS errors in consts from numaif.h, will match unix.* in the future const ( - MPOL_DEFAULT = 0 - MPOL_PREFERRED = 1 - MPOL_BIND = 2 - MPOL_INTERLEAVE = 3 - MPOL_LOCAL = 4 - MPOL_PREFERRED_MANY = 5 - MPOL_WEIGHTED_INTERLEAVE = 6 + MPOL_DEFAULT = unix.MPOL_DEFAULT + MPOL_PREFERRED = unix.MPOL_PREFERRED + MPOL_BIND = unix.MPOL_BIND + MPOL_INTERLEAVE = unix.MPOL_INTERLEAVE + MPOL_LOCAL = unix.MPOL_LOCAL + MPOL_PREFERRED_MANY = unix.MPOL_PREFERRED_MANY + MPOL_WEIGHTED_INTERLEAVE = unix.MPOL_WEIGHTED_INTERLEAVE - MPOL_F_STATIC_NODES = 1 << 15 - MPOL_F_RELATIVE_NODES = 1 << 14 - MPOL_F_NUMA_BALANCING = 1 << 13 + MPOL_F_STATIC_NODES = unix.MPOL_F_STATIC_NODES + MPOL_F_RELATIVE_NODES = unix.MPOL_F_RELATIVE_NODES + MPOL_F_NUMA_BALANCING = unix.MPOL_F_NUMA_BALANCING ) // LinuxMemoryPolicy contains memory policy configuration. type LinuxMemoryPolicy struct { // Mode specifies memory policy mode without mode flags. See // set_mempolicy() documentation for details. - Mode uint `json:"mode,omitempty"` + Mode int `json:"mode,omitempty"` // Flags contains mode flags. - Flags uint `json:"flags,omitempty"` + Flags int `json:"flags,omitempty"` // Nodes contains NUMA nodes to which the mode applies. Nodes *unix.CPUSet `json:"nodes,omitempty"` } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/internal/userns/userns_maps_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/internal/userns/userns_maps_linux.go index 7a8c2b023b..c2fb8ca719 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/internal/userns/userns_maps_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/internal/userns/userns_maps_linux.go @@ -39,7 +39,7 @@ func parseIdmapData(data []byte) (ms []configs.IDMap, err error) { // Do something equivalent to nsenter --user= cat , but more // efficiently. Returns the contents of the requested file from within the user // namespace. -func spawnUserNamespaceCat(nsPath string, path string) ([]byte, error) { +func spawnUserNamespaceCat(nsPath, path string) ([]byte, error) { rdr, wtr, err := os.Pipe() if err != nil { return nil, fmt.Errorf("create pipe for userns spawn failed: %w", err) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go index 06849c83c1..476f51d588 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go @@ -65,6 +65,7 @@ var archs = map[string]string{ "SCMP_ARCH_RISCV64": "riscv64", "SCMP_ARCH_S390": "s390", "SCMP_ARCH_S390X": "s390x", + "SCMP_ARCH_LOONGARCH64": "loong64", } // KnownArchs returns the list of the known archs. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/specconv/spec_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/specconv/spec_linux.go index 214197c3db..5713460e48 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/specconv/spec_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/specconv/spec_linux.go @@ -42,8 +42,8 @@ var ( flag int } complexFlags map[string]func(*configs.Mount) - mpolModeMap map[string]uint - mpolModeFMap map[string]uint + mpolModeMap map[string]int + mpolModeFMap map[string]int ) func initMaps() { @@ -152,20 +152,20 @@ func initMaps() { }, } - mpolModeMap = map[string]uint{ - string(specs.MpolDefault): configs.MPOL_DEFAULT, - string(specs.MpolPreferred): configs.MPOL_PREFERRED, - string(specs.MpolBind): configs.MPOL_BIND, - string(specs.MpolInterleave): configs.MPOL_INTERLEAVE, - string(specs.MpolLocal): configs.MPOL_LOCAL, - string(specs.MpolPreferredMany): configs.MPOL_PREFERRED_MANY, - string(specs.MpolWeightedInterleave): configs.MPOL_WEIGHTED_INTERLEAVE, + mpolModeMap = map[string]int{ + string(specs.MpolDefault): unix.MPOL_DEFAULT, + string(specs.MpolPreferred): unix.MPOL_PREFERRED, + string(specs.MpolBind): unix.MPOL_BIND, + string(specs.MpolInterleave): unix.MPOL_INTERLEAVE, + string(specs.MpolLocal): unix.MPOL_LOCAL, + string(specs.MpolPreferredMany): unix.MPOL_PREFERRED_MANY, + string(specs.MpolWeightedInterleave): unix.MPOL_WEIGHTED_INTERLEAVE, } - mpolModeFMap = map[string]uint{ - string(specs.MpolFStaticNodes): configs.MPOL_F_STATIC_NODES, - string(specs.MpolFRelativeNodes): configs.MPOL_F_RELATIVE_NODES, - string(specs.MpolFNumaBalancing): configs.MPOL_F_NUMA_BALANCING, + mpolModeFMap = map[string]int{ + string(specs.MpolFStaticNodes): unix.MPOL_F_STATIC_NODES, + string(specs.MpolFRelativeNodes): unix.MPOL_F_RELATIVE_NODES, + string(specs.MpolFNumaBalancing): unix.MPOL_F_NUMA_BALANCING, } }) } @@ -758,7 +758,7 @@ func initSystemdProps(spec *specs.Spec) ([]systemdDbus.Property, error) { return nil, fmt.Errorf("annotation %s=%s value parse error: %w", k, v, err) } // Check for Sec suffix. - if trimName := strings.TrimSuffix(name, "Sec"); len(trimName) < len(name) { + if trimName, ok := strings.CutSuffix(name, "Sec"); ok && len(trimName) > 0 { // Check for a lowercase ascii a-z just before Sec. if ch := trimName[len(trimName)-1]; ch >= 'a' && ch <= 'z' { // Convert from Sec to USec. @@ -1164,11 +1164,11 @@ func parseMountOptions(options []string) *configs.Mount { } else { recAttrSet |= f.flag recAttrClr &= ^f.flag - if f.flag&unix.MOUNT_ATTR__ATIME == f.flag { - // https://man7.org/linux/man-pages/man2/mount_setattr.2.html - // "cannot simply specify the access-time setting in attr_set, but must also include MOUNT_ATTR__ATIME in the attr_clr field." - recAttrClr |= unix.MOUNT_ATTR__ATIME - } + } + if f.flag&unix.MOUNT_ATTR__ATIME == f.flag { + // https://man7.org/linux/man-pages/man2/mount_setattr.2.html + // "cannot simply specify the access-time setting in attr_set, but must also include MOUNT_ATTR__ATIME in the attr_clr field." + recAttrClr |= unix.MOUNT_ATTR__ATIME } } else if f, exists := extensionFlags[o]; exists { if f.clear { diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go index 61302592ea..222c7f0cc7 100644 --- a/vendor/github.com/openshift/api/config/v1/register.go +++ b/vendor/github.com/openshift/api/config/v1/register.go @@ -72,6 +72,12 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ImageDigestMirrorSetList{}, &ImageTagMirrorSet{}, &ImageTagMirrorSetList{}, + &ImagePolicy{}, + &ImagePolicyList{}, + &ClusterImagePolicy{}, + &ClusterImagePolicyList{}, + &InsightsDataGather{}, + &InsightsDataGatherList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go index d4d09e7fee..e7106ef7ab 100644 --- a/vendor/github.com/openshift/api/config/v1/types.go +++ b/vendor/github.com/openshift/api/config/v1/types.go @@ -9,7 +9,7 @@ import ( // The namespace must be specified at the point of use. type ConfigMapFileReference struct { Name string `json:"name"` - // Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + // key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. Key string `json:"key,omitempty"` } @@ -17,7 +17,6 @@ type ConfigMapFileReference struct { // The namespace must be specified at the point of use. type ConfigMapNameReference struct { // name is the metadata.name of the referenced config map - // +kubebuilder:validation:Required // +required Name string `json:"name"` } @@ -26,7 +25,6 @@ type ConfigMapNameReference struct { // The namespace must be specified at the point of use. type SecretNameReference struct { // name is the metadata.name of the referenced secret - // +kubebuilder:validation:Required // +required Name string `json:"name"` } @@ -35,47 +33,47 @@ type SecretNameReference struct { type HTTPServingInfo struct { // ServingInfo is the HTTP serving information ServingInfo `json:",inline"` - // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. + // maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. MaxRequestsInFlight int64 `json:"maxRequestsInFlight"` - // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if + // requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if // -1 there is no limit on requests. RequestTimeoutSeconds int64 `json:"requestTimeoutSeconds"` } // ServingInfo holds information about serving web pages type ServingInfo struct { - // BindAddress is the ip:port to serve on + // bindAddress is the ip:port to serve on BindAddress string `json:"bindAddress"` - // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // bindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", // "tcp4", and "tcp6" BindNetwork string `json:"bindNetwork"` // CertInfo is the TLS cert info for serving secure traffic. // this is anonymous so that we can inline it for serialization CertInfo `json:",inline"` - // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + // clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates // +optional ClientCA string `json:"clientCA,omitempty"` - // NamedCertificates is a list of certificates to use to secure requests to specific hostnames + // namedCertificates is a list of certificates to use to secure requests to specific hostnames NamedCertificates []NamedCertificate `json:"namedCertificates,omitempty"` - // MinTLSVersion is the minimum TLS version supported. + // minTLSVersion is the minimum TLS version supported. // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants MinTLSVersion string `json:"minTLSVersion,omitempty"` - // CipherSuites contains an overridden list of ciphers for the server to support. + // cipherSuites contains an overridden list of ciphers for the server to support. // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants CipherSuites []string `json:"cipherSuites,omitempty"` } // CertInfo relates a certificate with a private key type CertInfo struct { - // CertFile is a file containing a PEM-encoded certificate + // certFile is a file containing a PEM-encoded certificate CertFile string `json:"certFile"` - // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + // keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile KeyFile string `json:"keyFile"` } // NamedCertificate specifies a certificate/key, and the names it should be served for type NamedCertificate struct { - // Names is a list of DNS names this certificate should be used to secure + // names is a list of DNS names this certificate should be used to secure // A name can be a normal DNS name, or can contain leading wildcard segments. Names []string `json:"names,omitempty"` // CertInfo is the TLS cert info for serving secure traffic @@ -121,24 +119,24 @@ type StringSource struct { // StringSourceSpec specifies a string value, or external location type StringSourceSpec struct { - // Value specifies the cleartext value, or an encrypted value if keyFile is specified. + // value specifies the cleartext value, or an encrypted value if keyFile is specified. Value string `json:"value"` - // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. + // env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. Env string `json:"env"` - // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified. + // file references a file containing the cleartext value, or an encrypted value if a keyFile is specified. File string `json:"file"` - // KeyFile references a file containing the key to use to decrypt the value. + // keyFile references a file containing the key to use to decrypt the value. KeyFile string `json:"keyFile"` } // RemoteConnectionInfo holds information necessary for establishing a remote connection type RemoteConnectionInfo struct { - // URL is the remote URL to connect to + // url is the remote URL to connect to URL string `json:"url"` - // CA is the CA for verifying TLS connections + // ca is the CA for verifying TLS connections CA string `json:"ca"` // CertInfo is the TLS client cert information to present // this is anonymous so that we can inline it for serialization @@ -160,11 +158,11 @@ type AdmissionConfig struct { // AdmissionPluginConfig holds the necessary configuration options for admission plugins type AdmissionPluginConfig struct { - // Location is the path to a configuration file that contains the plugin's + // location is the path to a configuration file that contains the plugin's // configuration Location string `json:"location"` - // Configuration is an embedded configuration object to be used as the plugin's + // configuration is an embedded configuration object to be used as the plugin's // configuration. If present, it will be used instead of the path to the configuration file. // +nullable // +kubebuilder:pruning:PreserveUnknownFields @@ -205,9 +203,9 @@ type AuditConfig struct { // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB. MaximumFileSizeMegabytes int32 `json:"maximumFileSizeMegabytes"` - // PolicyFile is a path to the file that defines the audit policy configuration. + // policyFile is a path to the file that defines the audit policy configuration. PolicyFile string `json:"policyFile"` - // PolicyConfiguration is an embedded policy configuration object to be used + // policyConfiguration is an embedded policy configuration object to be used // as the audit policy configuration. If present, it will be used instead of // the path to the policy file. // +nullable @@ -225,9 +223,9 @@ type AuditConfig struct { // EtcdConnectionInfo holds information necessary for connecting to an etcd server type EtcdConnectionInfo struct { - // URLs are the URLs for etcd + // urls are the URLs for etcd URLs []string `json:"urls,omitempty"` - // CA is a file containing trusted roots for the etcd server certificates + // ca is a file containing trusted roots for the etcd server certificates CA string `json:"ca"` // CertInfo is the TLS client cert information for securing communication to etcd // this is anonymous so that we can inline it for serialization @@ -237,7 +235,7 @@ type EtcdConnectionInfo struct { type EtcdStorageConfig struct { EtcdConnectionInfo `json:",inline"` - // StoragePrefix is the path within etcd that the OpenShift resources will + // storagePrefix is the path within etcd that the OpenShift resources will // be rooted under. This value, if changed, will mean existing objects in etcd will // no longer be located. StoragePrefix string `json:"storagePrefix"` @@ -286,8 +284,13 @@ type ClientConnectionOverrides struct { } // GenericControllerConfig provides information to configure a controller +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 type GenericControllerConfig struct { - // ServingInfo is the HTTP serving information for the controller's endpoints + metav1.TypeMeta `json:",inline"` + + // servingInfo is the HTTP serving information for the controller's endpoints ServingInfo HTTPServingInfo `json:"servingInfo"` // leaderElection provides information to elect a leader. Only override this if you have a specific need @@ -324,7 +327,6 @@ type RequiredHSTSPolicy struct { // The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. // foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*. // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:Required // +required DomainPatterns []string `json:"domainPatterns"` diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index d815556d28..b8a4399dbc 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -27,7 +27,6 @@ type APIServer struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec APIServerSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -35,6 +34,7 @@ type APIServer struct { Status APIServerStatus `json:"status"` } +// +openshift:validation:FeatureGateAwareXValidation:featureGate=TLSAdherence,rule="has(oldSelf.tlsAdherence) ? has(self.tlsAdherence) : true",message="tlsAdherence may not be removed once set" type APIServerSpec struct { // servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates // will be used for serving secure traffic. @@ -52,17 +52,50 @@ type APIServerSpec struct { // server from JavaScript applications. // The values are regular expressions that correspond to the Golang regular expression language. // +optional + // +listType=atomic AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"` // encryption allows the configuration of encryption of resources at the datastore layer. // +optional Encryption APIServerEncryption `json:"encryption"` // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. // - // If unset, a default (which may change between releases) is chosen. Note that only Old, - // Intermediate and Custom profiles are currently supported, and the maximum available - // minTLSVersion is VersionTLS12. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is the Intermediate profile. // +optional TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + // tlsAdherence controls if components in the cluster adhere to the TLS security profile + // configured on this APIServer resource. + // + // Valid values are "LegacyAdheringComponentsOnly" and "StrictAllComponents". + // + // When set to "LegacyAdheringComponentsOnly", components that already honor the + // cluster-wide TLS profile continue to do so. Components that do not already honor + // it continue to use their individual TLS configurations. + // + // When set to "StrictAllComponents", all components must honor the configured TLS + // profile unless they have a component-specific TLS configuration that overrides + // it. This mode is recommended for security-conscious deployments and is required + // for certain compliance frameworks. + // + // Note: Some components such as Kubelet and IngressController have their own + // dedicated TLS configuration mechanisms via KubeletConfig and IngressController + // CRs respectively. When these component-specific TLS configurations are set, + // they take precedence over the cluster-wide tlsSecurityProfile. When not set, + // these components fall back to the cluster-wide default. + // + // Components that encounter an unknown value for tlsAdherence should treat it + // as "StrictAllComponents" and log a warning to ensure forward compatibility + // while defaulting to the more secure behavior. + // + // This field is optional. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is LegacyAdheringComponentsOnly. + // + // Once set, this field may be changed to a different value, but may not be removed. + // +openshift:enable:FeatureGate=TLSAdherence + // +optional + TLSAdherence TLSAdherencePolicy `json:"tlsAdherence,omitempty"` // audit specifies the settings for audit configuration to be applied to all OpenShift-provided // API servers in the cluster. // +optional @@ -129,7 +162,6 @@ type Audit struct { type AuditCustomRule struct { // group is a name of group a request user must be member of in order to this profile to apply. // - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Group string `json:"group"` @@ -146,9 +178,8 @@ type AuditCustomRule struct { // // If unset, the 'Default' profile is used as the default. // - // +kubebuilder:validation:Required // +required - Profile AuditProfileType `json:"profile,omitempty"` + Profile AuditProfileType `json:"profile"` } type APIServerServingCerts struct { @@ -156,6 +187,8 @@ type APIServerServingCerts struct { // If no named certificates are provided, or no named certificates match the server name as understood by a client, // the defaultServingCertificate will be used. // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 NamedCertificates []APIServerNamedServingCert `json:"namedCertificates,omitempty"` } @@ -165,6 +198,8 @@ type APIServerNamedServingCert struct { // serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. // Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=64 Names []string `json:"names,omitempty"` // servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. // The secret must exist in the openshift-config namespace and contain the following required fields: @@ -173,6 +208,9 @@ type APIServerNamedServingCert struct { ServingCertificate SecretNameReference `json:"servingCertificate"` } +// APIServerEncryption is used to encrypt sensitive resources on the cluster. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=KMSEncryptionProvider,rule="has(self.type) && self.type == 'KMS' ? has(self.kms) : !has(self.kms)",message="kms config is required when encryption type is KMS, and forbidden otherwise" +// +union type APIServerEncryption struct { // type defines what encryption type should be used to encrypt resources at the datastore layer. // When this field is unset (i.e. when it is set to the empty string), identity is implied. @@ -191,9 +229,24 @@ type APIServerEncryption struct { // +unionDiscriminator // +optional Type EncryptionType `json:"type,omitempty"` + + // kms defines the configuration for the external KMS instance that manages the encryption keys, + // when KMS encryption is enabled sensitive resources will be encrypted using keys managed by an + // externally configured KMS instance. + // + // The Key Management Service (KMS) instance provides symmetric encryption and is responsible for + // managing the lifecyle of the encryption keys outside of the control plane. + // This allows integration with an external provider to manage the data encryption keys securely. + // + // +openshift:enable:FeatureGate=KMSEncryptionProvider + // +unionMember + // +optional + KMS *KMSConfig `json:"kms,omitempty"` } -// +kubebuilder:validation:Enum="";identity;aescbc;aesgcm +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";identity;aescbc;aesgcm +// +openshift:validation:FeatureGateAwareEnum:featureGate=KMSEncryptionProvider,enum="";identity;aescbc;aesgcm;KMS +// +openshift:validation:FeatureGateAwareEnum:featureGate=KMSEncryption,enum="";identity;aescbc;aesgcm;KMS type EncryptionType string const ( @@ -208,11 +261,45 @@ const ( // aesgcm refers to a type where AES-GCM with random nonce and a 32-byte key // is used to perform encryption at the datastore layer. EncryptionTypeAESGCM EncryptionType = "aesgcm" + + // kms refers to a type of encryption where the encryption keys are managed + // outside the control plane in a Key Management Service instance, + // encryption is still performed at the datastore layer. + EncryptionTypeKMS EncryptionType = "KMS" ) type APIServerStatus struct { } +// TLSAdherencePolicy defines which components adhere to the TLS security profile. +// Implementors should use the ShouldHonorClusterTLSProfile helper function from library-go +// rather than checking these values directly. +// +kubebuilder:validation:Enum=LegacyAdheringComponentsOnly;StrictAllComponents +type TLSAdherencePolicy string + +const ( + // TLSAdherencePolicyNoOpinion represents an empty/unset value for tlsAdherence. + // This value cannot be explicitly set and is only present when the field is omitted. + // When the field is omitted, the cluster defaults to LegacyAdheringComponentsOnly + // behavior. Components should treat this the same as LegacyAdheringComponentsOnly. + TLSAdherencePolicyNoOpinion TLSAdherencePolicy = "" + + // TLSAdherencePolicyLegacyAdheringComponentsOnly maintains backward-compatible behavior. + // Components that already honor the cluster-wide TLS profile (such as kube-apiserver, + // openshift-apiserver, oauth-apiserver, and others) continue to do so. Components that do + // not already honor it continue to use their individual TLS configurations (e.g., + // IngressController.spec.tlsSecurityProfile, KubeletConfig.spec.tlsSecurityProfile, + // or component defaults). No additional components are required to start honoring the + // cluster-wide profile in this mode. + TLSAdherencePolicyLegacyAdheringComponentsOnly TLSAdherencePolicy = "LegacyAdheringComponentsOnly" + + // TLSAdherencePolicyStrictAllComponents means all components must honor the configured TLS + // profile unless they have a component-specific TLS configuration that overrides it. + // This mode is recommended for security-conscious deployments and is required + // for certain compliance frameworks. + TLSAdherencePolicyStrictAllComponents TLSAdherencePolicy = "StrictAllComponents" +) + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index f6f0c12a3b..75e57c3709 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -5,7 +5,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC;ExternalOIDCWithUIDAndExtraClaimMappings;ExternalOIDCWithUpstreamParity,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" // Authentication specifies cluster-wide settings for authentication (like OAuth and // webhook token authenticators). The canonical name of an instance is `cluster`. @@ -26,7 +26,6 @@ type Authentication struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec AuthenticationSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -81,8 +80,7 @@ type AuthenticationSpec struct { // +optional ServiceAccountIssuer string `json:"serviceAccountIssuer"` - // OIDCProviders are OIDC identity providers that can issue tokens - // for this cluster + // oidcProviders are OIDC identity providers that can issue tokens for this cluster // Can only be set if "Type" is set to "OIDC". // // At most one provider can be configured. @@ -91,6 +89,9 @@ type AuthenticationSpec struct { // +listMapKey=name // +kubebuilder:validation:MaxItems=1 // +openshift:enable:FeatureGate=ExternalOIDC + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + // +openshift:enable:FeatureGate=ExternalOIDCWithUpstreamParity + // +optional OIDCProviders []OIDCProvider `json:"oidcProviders,omitempty"` } @@ -108,16 +109,18 @@ type AuthenticationStatus struct { // If the config map or expected key is not found, no metadata is served. // If the specified metadata is not valid, no metadata is served. // The namespace for this config map is openshift-config-managed. + // +optional IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"` - // OIDCClients is where participating operators place the current OIDC client status - // for OIDC clients that can be customized by the cluster-admin. + // oidcClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin. // // +listType=map // +listMapKey=componentNamespace // +listMapKey=componentName // +kubebuilder:validation:MaxItems=20 // +openshift:enable:FeatureGate=ExternalOIDC + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + // +optional OIDCClients []OIDCClientStatus `json:"oidcClients"` } @@ -136,13 +139,12 @@ type AuthenticationList struct { } // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";None;IntegratedOAuth -// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC,enum="";None;IntegratedOAuth;OIDC +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC;ExternalOIDCWithUIDAndExtraClaimMappings,enum="";None;IntegratedOAuth;OIDC type AuthenticationType string const ( // None means that no cluster managed authentication system is in place. - // Note that user login will only work if a manually configured system is in place and - // referenced in authentication spec via oauthMetadata and + // Note that user login will only work if a manually configured system is in place and referenced in authentication spec via oauthMetadata and // webhookTokenAuthenticator/oidcProviders AuthenticationTypeNone AuthenticationType = "None" @@ -181,7 +183,6 @@ type WebhookTokenAuthenticator struct { // The key "kubeConfig" is used to locate the data. // If the secret or expected key is not found, the webhook is not honored. // If the specified kube config data is not valid, the webhook is not honored. - // +kubebuilder:validation:Required // +required KubeConfig SecretNameReference `json:"kubeConfig"` } @@ -195,157 +196,381 @@ const ( ) type OIDCProvider struct { - // Name of the OIDC provider + // name is a required field that configures the unique human-readable identifier associated with the identity provider. + // It is used to distinguish between multiple identity providers and has no impact on token validation or authentication mechanics. + // + // name must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required Name string `json:"name"` - // Issuer describes atributes of the OIDC token issuer + + // issuer is a required field that configures how the platform interacts with the identity provider and how tokens issued from the identity provider are evaluated by the Kubernetes API server. // - // +kubebuilder:validation:Required // +required Issuer TokenIssuer `json:"issuer"` - // OIDCClients contains configuration for the platform's clients that - // need to request tokens from the issuer + // oidcClients is an optional field that configures how on-cluster, platform clients should request tokens from the identity provider. + // oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. // // +listType=map // +listMapKey=componentNamespace // +listMapKey=componentName // +kubebuilder:validation:MaxItems=20 + // +optional OIDCClients []OIDCClientConfig `json:"oidcClients"` - // ClaimMappings describes rules on how to transform information from an - // ID token into a cluster identity + // claimMappings is a required field that configures the rules to be used by the Kubernetes API server for translating claims in a JWT token, issued by the identity provider, to a cluster identity. + // + // +required ClaimMappings TokenClaimMappings `json:"claimMappings"` - // ClaimValidationRules are rules that are applied to validate token claims to authenticate users. + // claimValidationRules is an optional field that configures the rules to be used by the Kubernetes API server for validating the claims in a JWT token issued by the identity provider. + // + // Validation rules are joined via an AND operation. // // +listType=atomic + // +optional ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"` + + // userValidationRules is an optional field that configures the set of rules used to validate the cluster user identity that was constructed via mapping token claims to user identity attributes. + // Rules are CEL expressions that must evaluate to 'true' for authentication to succeed. + // If any rule in the chain of rules evaluates to 'false', authentication will fail. + // When specified, at least one rule must be specified and no more than 64 rules may be specified. + // + // +kubebuilder:validation:MaxItems=64 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=expression + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUpstreamParity + UserValidationRules []TokenUserValidationRule `json:"userValidationRules,omitempty"` } // +kubebuilder:validation:MinLength=1 type TokenAudience string +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUpstreamParity,rule="self.?discoveryURL.orValue(\"\").size() > 0 ? (self.issuerURL.size() == 0 || self.discoveryURL.find('^.+[^/]') != self.issuerURL.find('^.+[^/]')) : true",message="discoveryURL must be different from issuerURL" type TokenIssuer struct { - // URL is the serving URL of the token issuer. - // Must use the https:// scheme. - // - // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` - // +kubebuilder:validation:Required + // issuerURL is a required field that configures the URL used to issue tokens by the identity provider. + // The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. + // + // Must be at least 1 character and must not exceed 512 characters in length. + // Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user. + // + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="must be a valid URL" + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getScheme() == 'https'",message="must use the 'https' scheme" + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getQuery() == {}",message="must not have a query" + // +kubebuilder:validation:XValidation:rule="self.find('#(.+)$') == ''",message="must not have a fragment" + // +kubebuilder:validation:XValidation:rule="self.find('@') == ''",message="must not have user info" + // +kubebuilder:validation:MaxLength=512 + // +kubebuilder:validation:MinLength=1 // +required URL string `json:"issuerURL"` - // Audiences is an array of audiences that the token was issued for. - // Valid tokens must include at least one of these values in their - // "aud" claim. - // Must be set to exactly one value. + // audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. + // At least one of the entries must match the 'aud' claim in the JWT token. + // + // audiences must contain at least one entry and must not exceed ten entries. // // +listType=set - // +kubebuilder:validation:Required // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=10 // +required Audiences []TokenAudience `json:"audiences"` - // CertificateAuthority is a reference to a config map in the - // configuration namespace. The .data of the configMap must contain - // the "ca-bundle.crt" key. - // If unset, system trust is used instead. + // issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information. + // + // When not specified, the system trust is used. + // + // When specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap. + // + // +optional CertificateAuthority ConfigMapNameReference `json:"issuerCertificateAuthority"` + // discoveryURL is an optional field that, if specified, overrides the default discovery endpoint used to retrieve OIDC configuration metadata. + // By default, the discovery URL is derived from `issuerURL` as "{issuerURL}/.well-known/openid-configuration". + // + // The discoveryURL must be a valid absolute HTTPS URL. + // It must not contain query parameters, user information, or fragments. + // Additionally, it must differ from the value of `issuerURL` (ignoring trailing slashes). + // The discoveryURL value must be at least 1 character long and no longer than 2048 characters. + // + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUpstreamParity + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="discoveryURL must be a valid URL" + // +kubebuilder:validation:XValidation:rule="url(self).getScheme() == 'https'",message="discoveryURL must be a valid https URL" + // +kubebuilder:validation:XValidation:rule="url(self).getQuery().size() == 0",message="discoveryURL must not contain query parameters" + // +kubebuilder:validation:XValidation:rule="self.matches('^[^#]*$')",message="discoveryURL must not contain fragments" + // +kubebuilder:validation:XValidation:rule="!self.matches('^https://.+:.+@.+/.*$')",message="discoveryURL must not contain user info" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 + DiscoveryURL string `json:"discoveryURL,omitempty"` } type TokenClaimMappings struct { - // Username is a name of the claim that should be used to construct - // usernames for the cluster identity. + // username is a required field that configures how the username of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. // - // Default value: "sub" - Username UsernameClaimMapping `json:"username,omitempty"` + // +required + Username UsernameClaimMapping `json:"username"` - // Groups is a name of the claim that should be used to construct - // groups for the cluster identity. - // The referenced claim must use array of strings values. + // groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. + // + // When referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (','). + // + // For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. + // + // +optional Groups PrefixedClaimMapping `json:"groups,omitempty"` + + // uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity. + // + // When using uid.claim to specify the claim it must be a single string value. + // When using uid.expression the expression must result in a single string value. + // + // When omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time. + // + // The current default is to use the 'sub' claim. + // + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + UID *TokenClaimOrExpressionMapping `json:"uid,omitempty"` + + // extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. + // When omitted, no extra attributes will be present on the cluster identity. + // + // key values for extra mappings must be unique. + // A maximum of 32 extra attribute mappings may be provided. + // + // +optional + // +kubebuilder:validation:MaxItems=32 + // +listType=map + // +listMapKey=key + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + Extra []ExtraMapping `json:"extra,omitempty"` } +// TokenClaimMapping allows specifying a JWT token claim to be used when mapping claims from an authentication token to cluster identities. +// +openshift:validation:FeatureGateAwareXValidation:featureGate="",rule="has(self.claim)",message="claim is required" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC,rule="has(self.claim)",message="claim is required" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUIDAndExtraClaimMappings,rule="has(self.claim)",message="claim is required" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUpstreamParity,rule="(size(self.?claim.orValue(\"\")) > 0) ? !has(self.expression) : true",message="expression must not be set if claim is specified and is not an empty string" type TokenClaimMapping struct { - // Claim is a JWT token claim to be used in the mapping + // claim is an optional field for specifying the JWT token claim that is used in the mapping. + // The value of this claim will be assigned to the field in which this mapping is associated. + // claim must not exceed 256 characters in length. + // When set to the empty string `""`, this means that no named claim should be used for the group mapping. + // claim is required when the ExternalOIDCWithUpstreamParity feature gate is not enabled. // - // +kubebuilder:validation:Required - // +required + // +optional + // +kubebuilder:validation:MaxLength=256 Claim string `json:"claim"` + + // expression is an optional CEL expression used to derive + // group values from JWT claims. + // + // CEL expressions have access to the token claims through a CEL variable, 'claims'. + // + // expression must be at least 1 character and must not exceed 1024 characters in length . + // + // When specified, claim must not be set or be explicitly set to the empty string (`""`). + // + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUpstreamParity + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + Expression string `json:"expression,omitempty"` +} + +// TokenClaimOrExpressionMapping allows specifying either a JWT token claim or CEL expression to be used when mapping claims from an authentication token to cluster identities. +// +kubebuilder:validation:XValidation:rule="has(self.claim) ? !has(self.expression) : has(self.expression)",message="precisely one of claim or expression must be set" +type TokenClaimOrExpressionMapping struct { + // claim is an optional field for specifying the JWT token claim that is used in the mapping. + // The value of this claim will be assigned to the field in which this mapping is associated. + // + // Precisely one of claim or expression must be set. + // claim must not be specified when expression is set. + // When specified, claim must be at least 1 character in length and must not exceed 256 characters in length. + // + // +optional + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:MinLength=1 + Claim string `json:"claim,omitempty"` + + // expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims. + // + // CEL expressions have access to the token claims through a CEL variable, 'claims'. + // 'claims' is a map of claim names to claim values. + // For example, the 'sub' claim value can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation ('claims.foo.bar'). + // + // Precisely one of claim or expression must be set. + // expression must not be specified when claim is set. + // When specified, expression must be at least 1 character in length and must not exceed 1024 characters in length. + // + // +optional + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:MinLength=1 + Expression string `json:"expression,omitempty"` +} + +// ExtraMapping allows specifying a key and CEL expression to evaluate the keys' value. +// It is used to create additional mappings and attributes added to a cluster identity from a provided authentication token. +type ExtraMapping struct { + // key is a required field that specifies the string to use as the extra attribute key. + // + // key must be a domain-prefix path (e.g 'example.org/foo'). + // key must not exceed 510 characters in length. + // key must contain the '/' character, separating the domain and path characters. + // key must not be empty. + // + // The domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. + // It must not exceed 253 characters in length. + // It must start and end with an alphanumeric character. + // It must only contain lower case alphanumeric characters and '-' or '.'. + // It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". + // + // The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. + // It must not exceed 256 characters in length. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=510 + // +kubebuilder:validation:XValidation:rule="self.contains('/')",message="key must contain the '/' character" + // + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0].matches(\"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\")",message="the domain of the key must consist of only lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0].size() <= 253",message="the domain of the key must not exceed 253 characters in length" + // + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0] != 'kubernetes.io'",message="the domain 'kubernetes.io' is reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="!self.split('/', 2)[0].endsWith('.kubernetes.io')",message="the subdomains '*.kubernetes.io' are reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0] != 'k8s.io'",message="the domain 'k8s.io' is reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="!self.split('/', 2)[0].endsWith('.k8s.io')",message="the subdomains '*.k8s.io' are reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0] != 'openshift.io'",message="the domain 'openshift.io' is reserved for OpenShift use" + // +kubebuilder:validation:XValidation:rule="!self.split('/', 2)[0].endsWith('.openshift.io')",message="the subdomains '*.openshift.io' are reserved for OpenShift use" + // + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[1].matches('[A-Za-z0-9/\\\\-._~%!$&\\'()*+;=:]+')",message="the path of the key must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, apostrophe, '-', '.', '_', '~', '!', '$', '&', '(', ')', '*', '+', ',', ';', '=', and ':'" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[1].size() <= 256",message="the path of the key must not exceed 256 characters in length" + Key string `json:"key"` + + // valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. + // valueExpression must produce a string or string array value. + // "", [], and null are treated as the extra mapping not being present. + // Empty string values within an array are filtered out. + // + // CEL expressions have access to the token claims through a CEL variable, 'claims'. + // 'claims' is a map of claim names to claim values. + // For example, the 'sub' claim value can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation ('claims.foo.bar'). + // + // valueExpression must not exceed 1024 characters in length. + // valueExpression must not be empty. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + ValueExpression string `json:"valueExpression"` } +// OIDCClientConfig configures how platform clients interact with identity providers as an authentication method. type OIDCClientConfig struct { - // ComponentName is the name of the component that is supposed to consume this - // client configuration + // componentName is a required field that specifies the name of the platform component being configured to use the identity provider as an authentication mode. + // + // It is used in combination with componentNamespace as a unique identifier. + // + // componentName must not be an empty string ("") and must not exceed 256 characters in length. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required // +required ComponentName string `json:"componentName"` - // ComponentNamespace is the namespace of the component that is supposed to consume this - // client configuration + // componentNamespace is a required field that specifies the namespace in which the platform component being configured to use the identity provider as an authentication mode is running. + // + // It is used in combination with componentName as a unique identifier. + // + // componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required // +required ComponentNamespace string `json:"componentNamespace"` - // ClientID is the identifier of the OIDC client from the OIDC provider + // clientID is a required field that configures the client identifier, from the identity provider, that the platform component uses for authentication requests made to the identity provider. + // The identity provider must accept this identifier for platform components to be able to use the identity provider as an authentication mode. + // + // clientID must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required ClientID string `json:"clientID"` - // ClientSecret refers to a secret in the `openshift-config` namespace that - // contains the client secret in the `clientSecret` key of the `.data` field + // clientSecret is an optional field that configures the client secret used by the platform component when making authentication requests to the identity provider. + // + // When not specified, no client secret will be used when making authentication requests to the identity provider. + // + // When specified, clientSecret references a Secret in the 'openshift-config' namespace that contains the client secret in the 'clientSecret' key of the '.data' field. + // + // The client secret will be used when making authentication requests to the identity provider. + // + // Public clients do not require a client secret but private clients do require a client secret to work with the identity provider. + // + // +optional ClientSecret SecretNameReference `json:"clientSecret"` - // ExtraScopes is an optional set of scopes to request tokens with. + // extraScopes is an optional field that configures the extra scopes that should be requested by the platform component when making authentication requests to the identity provider. + // This is useful if you have configured claim mappings that requires specific scopes to be requested beyond the standard OIDC scopes. + // + // When omitted, no additional scopes are requested. // // +listType=set + // +optional ExtraScopes []string `json:"extraScopes"` } +// OIDCClientStatus represents the current state +// of platform components and how they interact with +// the configured identity providers. type OIDCClientStatus struct { - // ComponentName is the name of the component that will consume a client configuration. + // componentName is a required field that specifies the name of the platform component using the identity provider as an authentication mode. + // It is used in combination with componentNamespace as a unique identifier. + // + // componentName must not be an empty string ("") and must not exceed 256 characters in length. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required // +required ComponentName string `json:"componentName"` - // ComponentNamespace is the namespace of the component that will consume a client configuration. + // componentNamespace is a required field that specifies the namespace in which the platform component using the identity provider as an authentication mode is running. + // + // It is used in combination with componentName as a unique identifier. + // + // componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required // +required ComponentNamespace string `json:"componentNamespace"` - // CurrentOIDCClients is a list of clients that the component is currently using. + // currentOIDCClients is an optional list of clients that the component is currently using. + // + // Entries must have unique issuerURL/clientID pairs. // // +listType=map // +listMapKey=issuerURL // +listMapKey=clientID + // +optional CurrentOIDCClients []OIDCClientReference `json:"currentOIDCClients"` - // ConsumingUsers is a slice of ServiceAccounts that need to have read - // permission on the `clientSecret` secret. + // consumingUsers is an optional list of ServiceAccounts requiring read permissions on the `clientSecret` secret. + // + // consumingUsers must not exceed 5 entries. // // +kubebuilder:validation:MaxItems=5 // +listType=set + // +optional ConsumingUsers []ConsumingUser `json:"consumingUsers"` - // Conditions are used to communicate the state of the `oidcClients` entry. + // conditions are used to communicate the state of the `oidcClients` entry. // // Supported conditions include Available, Degraded and Progressing. // @@ -355,63 +580,107 @@ type OIDCClientStatus struct { // // +listType=map // +listMapKey=type + // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` } +// OIDCClientReference is a reference to a platform component +// client configuration. type OIDCClientReference struct { - // OIDCName refers to the `name` of the provider from `oidcProviders` + // oidcProviderName is a required reference to the 'name' of the identity provider configured in 'oidcProviders' that this client is associated with. + // + // oidcProviderName must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required OIDCProviderName string `json:"oidcProviderName"` - // URL is the serving URL of the token issuer. - // Must use the https:// scheme. + // issuerURL is a required field that specifies the URL of the identity provider that this client is configured to make requests against. + // + // issuerURL must use the 'https' scheme. // // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` - // +kubebuilder:validation:Required // +required IssuerURL string `json:"issuerURL"` - // ClientID is the identifier of the OIDC client from the OIDC provider + // clientID is a required field that specifies the client identifier, from the identity provider, that the platform component is using for authentication requests made to the identity provider. + // + // clientID must not be empty. // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required ClientID string `json:"clientID"` } // +kubebuilder:validation:XValidation:rule="has(self.prefixPolicy) && self.prefixPolicy == 'Prefix' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)",message="prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" +// +union +// +openshift:validation:FeatureGateAwareXValidation:featureGate="",rule="has(self.claim)",message="claim is required" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC,rule="has(self.claim)",message="claim is required" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUIDAndExtraClaimMappings,rule="has(self.claim)",message="claim is required" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUpstreamParity,rule="has(self.claim) ? !has(self.expression) : has(self.expression)",message="precisely one of claim or expression must be set" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUpstreamParity,rule="has(self.expression) && size(self.expression) > 0 ? !has(self.prefixPolicy) || self.prefixPolicy != 'Prefix' : true",message="prefixPolicy must not be set to 'Prefix' when expression is set" type UsernameClaimMapping struct { - TokenClaimMapping `json:",inline"` + // claim is an optional field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping. + // claim is required when the ExternalOIDCWithUpstreamParity feature gate is not enabled. + // When the ExternalOIDCWithUpstreamParity feature gate is enabled, claim must not be set when expression is set. + // + // claim must not be an empty string ("") and must not exceed 256 characters. + // + // +optional + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=256 + Claim string `json:"claim,omitempty"` - // PrefixPolicy specifies how a prefix should apply. + // expression is an optional CEL expression used to derive + // the username from JWT claims. + // + // CEL expressions have access to the token claims + // through a CEL variable, 'claims'. // - // By default, claims other than `email` will be prefixed with the issuer URL to - // prevent naming clashes with other plugins. + // expression must be at least 1 character and must not exceed 1024 characters in length. + // expression must not be set when claim is set. + // + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUpstreamParity + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + Expression string `json:"expression,omitempty"` + + // prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field. // - // Set to "NoPrefix" to disable prefixing. + // Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). // - // Example: - // (1) `prefix` is set to "myoidc:" and `claim` is set to "username". - // If the JWT claim `username` contains value `userA`, the resulting - // mapped value will be "myoidc:userA". - // (2) `prefix` is set to "myoidc:" and `claim` is set to "email". If the - // JWT `email` claim contains value "userA@myoidc.tld", the resulting - // mapped value will be "myoidc:userA@myoidc.tld". - // (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, - // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", - // and `claim` is set to: - // (a) "username": the mapped value will be "https://myoidc.tld#userA" - // (b) "email": the mapped value will be "userA@myoidc.tld" + // When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. + // The prefix field must be set when prefixPolicy is 'Prefix'. + // Must not be set to 'Prefix' when expression is set. + // When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. + // When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. + // Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. + // + // As an example, consider the following scenario: + // + // `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, + // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", + // and `claim` is set to: + // - "username": the mapped value will be "https://myoidc.tld#userA" + // - "email": the mapped value will be "userA@myoidc.tld" // // +kubebuilder:validation:Enum={"", "NoPrefix", "Prefix"} + // +optional + // +unionDiscriminator PrefixPolicy UsernamePrefixPolicy `json:"prefixPolicy"` + // prefix configures the prefix that should be prepended to the value of the JWT claim. + // + // prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. + // + // +optional + // +unionMember Prefix *UsernamePrefix `json:"prefix"` } +// UsernamePrefixPolicy configures how prefixes should be applied to values extracted from the JWT claims during the process of mapping JWT claims to cluster identity attributes. +// +enum type UsernamePrefixPolicy string var ( @@ -426,58 +695,139 @@ var ( Prefix UsernamePrefixPolicy = "Prefix" ) +// UsernamePrefix configures the string that should +// be used as a prefix for username claim mappings. type UsernamePrefix struct { - // +kubebuilder:validation:Required + // prefixString is a required field that configures the prefix that will be applied to cluster identity username attribute during the process of mapping JWT claims to cluster identity attributes. + // + // prefixString must not be an empty string (""). + // // +kubebuilder:validation:MinLength=1 // +required PrefixString string `json:"prefixString"` } +// PrefixedClaimMapping configures a claim mapping +// that allows for an optional prefix. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUpstreamParity,rule="has(self.expression) && size(self.expression) > 0 ? (!has(self.prefix) || size(self.prefix) == 0) : true",message="prefix must not be set to a non-empty value when expression is set" type PrefixedClaimMapping struct { TokenClaimMapping `json:",inline"` - // Prefix is a string to prefix the value from the token in the result of the - // claim mapping. + // prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. // - // By default, no prefixing occurs. + // When omitted or set to an empty string (""), no prefix is applied to the cluster identity attribute. + // Must not be set to a non-empty value when expression is set. // - // Example: if `prefix` is set to "myoidc:"" and the `claim` in JWT contains - // an array of strings "a", "b" and "c", the mapping will result in an - // array of string "myoidc:a", "myoidc:b" and "myoidc:c". + // Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". + // + // +optional Prefix string `json:"prefix"` } +// TokenValidationRuleType defines the type of token validation rule. +// +enum +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="RequiredClaim"; +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC,enum="RequiredClaim"; +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDCWithUIDAndExtraClaimMappings,enum="RequiredClaim"; +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDCWithUpstreamParity,enum="RequiredClaim";"CEL" type TokenValidationRuleType string const ( + // TokenValidationRuleTypeRequiredClaim indicates that the token must contain a specific claim. + // Used as a value for TokenValidationRuleType. TokenValidationRuleTypeRequiredClaim = "RequiredClaim" + // TokenValidationRuleTypeCEL indicates that the token validation is defined via a CEL expression. + // Used as a value for TokenValidationRuleType. + TokenValidationRuleTypeCEL = "CEL" ) +// TokenClaimValidationRule represents a validation rule based on token claims. +// If type is RequiredClaim, requiredClaim must be set. +// If Type is CEL, CEL must be set and RequiredClaim must be omitted. +// +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'RequiredClaim' ? has(self.requiredClaim) : !has(self.requiredClaim)",message="requiredClaim must be set when type is 'RequiredClaim', and forbidden otherwise" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUpstreamParity,rule="has(self.type) && self.type == 'CEL' ? has(self.cel) : !has(self.cel)",message="cel must be set when type is 'CEL', and forbidden otherwise" type TokenClaimValidationRule struct { - // Type sets the type of the validation rule + // type is an optional field that configures the type of the validation rule. + // + // Allowed values are "RequiredClaim" and "CEL". + // + // When set to 'RequiredClaim', the Kubernetes API server will be configured to validate that the incoming JWT contains the required claim and that its value matches the required value. // - // +kubebuilder:validation:Enum={"RequiredClaim"} - // +kubebuilder:default="RequiredClaim" + // When set to 'CEL', the Kubernetes API server will be configured to validate the incoming JWT against the configured CEL expression. + // +required Type TokenValidationRuleType `json:"type"` - // RequiredClaim allows configuring a required claim name and its expected - // value - RequiredClaim *TokenRequiredClaim `json:"requiredClaim"` + // requiredClaim allows configuring a required claim name and its expected value. + // This field is required when `type` is set to RequiredClaim, and must be omitted when `type` is set to any other value. + // The Kubernetes API server uses this field to validate if an incoming JWT is valid for this identity provider. + // + // +optional + RequiredClaim *TokenRequiredClaim `json:"requiredClaim,omitempty"` + + // cel holds the CEL expression and message for validation. + // Must be set when Type is "CEL", and forbidden otherwise. + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUpstreamParity + CEL TokenClaimValidationCELRule `json:"cel,omitempty,omitzero"` } type TokenRequiredClaim struct { - // Claim is a name of a required claim. Only claims with string values are - // supported. + // claim is a required field that configures the name of the required claim. + // When taken from the JWT claims, claim must be a string value. + // + // claim must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required Claim string `json:"claim"` - // RequiredValue is the required value for the claim. + // requiredValue is a required field that configures the value that 'claim' must have when taken from the incoming JWT claims. + // If the value in the JWT claims does not match, the token will be rejected for authentication. + // + // requiredValue must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required RequiredValue string `json:"requiredValue"` } + +type TokenClaimValidationCELRule struct { + // expression is a CEL expression evaluated against token claims. + // expression is required, must be at least 1 character in length and must not exceed 1024 characters. + // The expression must return a boolean value where 'true' signals a valid token and 'false' an invalid one. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + // +required + Expression string `json:"expression,omitempty"` + + // message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. + // message must be at least 1 character in length and must not exceed 256 characters. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Message string `json:"message,omitempty"` +} + +// TokenUserValidationRule provides a CEL-based rule used to validate a token subject. +// Each rule contains a CEL expression that is evaluated against the token’s claims. +type TokenUserValidationRule struct { + // expression is a required CEL expression that performs a validation on cluster user identity attributes like username, groups, etc. + // + // The expression must evaluate to a boolean value. + // When the expression evaluates to 'true', the cluster user identity is considered valid. + // When the expression evaluates to 'false', the cluster user identity is not considered valid. + // expression must be at least 1 character in length and must not exceed 1024 characters. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + Expression string `json:"expression,omitempty"` + // message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. + // message must be at least 1 character in length and must not exceed 256 characters. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Message string `json:"message,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go index dad47666db..dcde1fc5b8 100644 --- a/vendor/github.com/openshift/api/config/v1/types_build.go +++ b/vendor/github.com/openshift/api/config/v1/types_build.go @@ -29,14 +29,13 @@ type Build struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // Spec holds user-settable values for the build controller configuration - // +kubebuilder:validation:Required + // spec holds user-settable values for the build controller configuration // +required Spec BuildSpec `json:"spec"` } type BuildSpec struct { - // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that // should be trusted for image pushes and pulls during builds. // The namespace for this config map is openshift-config. // @@ -45,16 +44,16 @@ type BuildSpec struct { // // +optional AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` - // BuildDefaults controls the default information for Builds + // buildDefaults controls the default information for Builds // +optional BuildDefaults BuildDefaults `json:"buildDefaults"` - // BuildOverrides controls override settings for builds + // buildOverrides controls override settings for builds // +optional BuildOverrides BuildOverrides `json:"buildOverrides"` } type BuildDefaults struct { - // DefaultProxy contains the default proxy settings for all build operations, including image pull/push + // defaultProxy contains the default proxy settings for all build operations, including image pull/push // and source download. // // Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables @@ -62,55 +61,55 @@ type BuildDefaults struct { // +optional DefaultProxy *ProxySpec `json:"defaultProxy,omitempty"` - // GitProxy contains the proxy settings for git operations only. If set, this will override + // gitProxy contains the proxy settings for git operations only. If set, this will override // any Proxy settings for all git commands, such as git clone. // // Values that are not set here will be inherited from DefaultProxy. // +optional GitProxy *ProxySpec `json:"gitProxy,omitempty"` - // Env is a set of default environment variables that will be applied to the + // env is a set of default environment variables that will be applied to the // build if the specified variables do not exist on the build // +optional Env []corev1.EnvVar `json:"env,omitempty"` - // ImageLabels is a list of docker labels that are applied to the resulting image. + // imageLabels is a list of docker labels that are applied to the resulting image. // User can override a default label by providing a label with the same name in their // Build/BuildConfig. // +optional ImageLabels []ImageLabel `json:"imageLabels,omitempty"` - // Resources defines resource requirements to execute the build. + // resources defines resource requirements to execute the build. // +optional Resources corev1.ResourceRequirements `json:"resources"` } type ImageLabel struct { - // Name defines the name of the label. It must have non-zero length. + // name defines the name of the label. It must have non-zero length. Name string `json:"name"` - // Value defines the literal value of the label. + // value defines the literal value of the label. // +optional Value string `json:"value,omitempty"` } type BuildOverrides struct { - // ImageLabels is a list of docker labels that are applied to the resulting image. + // imageLabels is a list of docker labels that are applied to the resulting image. // If user provided a label in their Build/BuildConfig with the same name as one in this // list, the user's label will be overwritten. // +optional ImageLabels []ImageLabel `json:"imageLabels,omitempty"` - // NodeSelector is a selector which must be true for the build pod to fit on a node + // nodeSelector is a selector which must be true for the build pod to fit on a node // +optional NodeSelector map[string]string `json:"nodeSelector,omitempty"` - // Tolerations is a list of Tolerations that will override any existing + // tolerations is a list of Tolerations that will override any existing // tolerations set on a build pod. // +optional Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - // ForcePull overrides, if set, the equivalent value in the builds, + // forcePull overrides, if set, the equivalent value in the builds, // i.e. false disables force pull for all builds, // true enables force pull for all builds, // independently of what each build specifies itself diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go b/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go new file mode 100644 index 0000000000..491390098c --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go @@ -0,0 +1,87 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterImagePolicy holds cluster-wide configuration for image signature verification +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusterimagepolicies,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2310 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=SigstoreImageVerification +// +openshift:compatibility-gen:level=1 +type ClusterImagePolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata"` + + // spec contains the configuration for the cluster image policy. + // +required + Spec ClusterImagePolicySpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ClusterImagePolicyStatus `json:"status"` +} + +// CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource. +type ClusterImagePolicySpec struct { + // scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2". + // Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). + // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository + // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). + // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. + // This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. + // For additional details about the format, please refer to the document explaining the docker transport field, + // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker + // +required + // +kubebuilder:validation:MaxItems=256 + // +listType=set + Scopes []ImageScope `json:"scopes"` + // policy is a required field that contains configuration to allow scopes to be verified, and defines how + // images not matching the verification policy will be treated. + // +required + Policy ImageSigstoreVerificationPolicy `json:"policy"` +} + +// +k8s:deepcopy-gen=true +type ClusterImagePolicyStatus struct { + // conditions provide details on the status of this API Resource. + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterImagePolicyList is a list of ClusterImagePolicy resources +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterImagePolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ListMeta `json:"metadata"` + + // items is a list of ClusterImagePolices + // +kubebuilder:validation:MaxItems=1000 + // +required + Items []ClusterImagePolicy `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go index 7951762ccd..8323040389 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -9,10 +9,9 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterOperator is the Custom Resource object which holds the current state -// of an operator. This object is used by operators to convey their state to -// the rest of the cluster. -// +// ClusterOperator holds the status of a core or optional OpenShift component +// managed by the Cluster Version Operator (CVO). This object is used by +// operators to convey their state to the rest of the cluster. // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/497 @@ -34,7 +33,6 @@ type ClusterOperator struct { metav1.ObjectMeta `json:"metadata"` // spec holds configuration that could apply to any operator. - // +kubebuilder:validation:Required // +required Spec ClusterOperatorSpec `json:"spec"` @@ -54,6 +52,8 @@ type ClusterOperatorStatus struct { // conditions describes the state of the operator's managed and monitored components. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type // +optional Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` @@ -80,14 +80,12 @@ type ClusterOperatorStatus struct { type OperandVersion struct { // name is the name of the particular operand this version is for. It usually matches container images, not operators. - // +kubebuilder:validation:Required // +required Name string `json:"name"` // version indicates which version of a particular operand is currently being managed. It must always match the Available // operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout // 1.1.0 - // +kubebuilder:validation:Required // +required Version string `json:"version"` } @@ -95,18 +93,15 @@ type OperandVersion struct { // ObjectReference contains enough information to let you inspect or modify the referred object. type ObjectReference struct { // group of the referent. - // +kubebuilder:validation:Required // +required Group string `json:"group"` // resource of the referent. - // +kubebuilder:validation:Required // +required Resource string `json:"resource"` // namespace of the referent. // +optional Namespace string `json:"namespace,omitempty"` // name of the referent. - // +kubebuilder:validation:Required // +required Name string `json:"name"` } @@ -128,17 +123,14 @@ const ( // +k8s:deepcopy-gen=true type ClusterOperatorStatusCondition struct { // type specifies the aspect reported by this condition. - // +kubebuilder:validation:Required // +required Type ClusterStatusConditionType `json:"type"` // status of the condition, one of True, False, Unknown. - // +kubebuilder:validation:Required // +required Status ConditionStatus `json:"status"` // lastTransitionTime is the time of the last update to the current status property. - // +kubebuilder:validation:Required // +required LastTransitionTime metav1.Time `json:"lastTransitionTime"` @@ -161,15 +153,21 @@ const ( // is functional and available in the cluster. Available=False means at least // part of the component is non-functional, and that the condition requires // immediate administrator intervention. + // A component must not report Available=False during the course of a normal upgrade. OperatorAvailable ClusterStatusConditionType = "Available" // Progressing indicates that the component (operator and all configured operands) - // is actively rolling out new code, propagating config changes, or otherwise + // is actively rolling out new code, propagating config changes (e.g, a version change), or otherwise // moving from one steady state to another. Operators should not report - // progressing when they are reconciling (without action) a previously known - // state. If the observed cluster state has changed and the component is - // reacting to it (scaling up for instance), Progressing should become true + // Progressing when they are reconciling (without action) a previously known + // state. Operators should not report Progressing only because DaemonSets owned by them + // are adjusting to a new node from cluster scaleup or a node rebooting from cluster upgrade. + // If the observed cluster state has changed and the component is + // reacting to it (updated proxy configuration for instance), Progressing should become true // since it is moving from one steady state to another. + // A component in a cluster with less than 250 nodes must complete a version + // change within a limited period of time: 90 minutes for Machine Config Operator and 20 minutes for others. + // Machine Config Operator is given more time as it needs to restart control plane nodes. OperatorProgressing ClusterStatusConditionType = "Progressing" // Degraded indicates that the component (operator and all configured operands) @@ -182,7 +180,7 @@ const ( // Degraded because it may have a lower quality of service. A component may be // Progressing but not Degraded because the transition from one state to // another does not persist over a long enough period to report Degraded. A - // component should not report Degraded during the course of a normal upgrade. + // component must not report Degraded during the course of a normal upgrade. // A component may report Degraded in response to a persistent infrastructure // failure that requires eventual administrator intervention. For example, if // a control plane host is unhealthy and must be replaced. A component should diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index 2b392298e8..f8d45114a8 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -34,7 +34,6 @@ type ClusterVersion struct { // spec is the desired state of the cluster version - the operator will work // to ensure that the desired version is applied to the cluster. - // +kubebuilder:validation:Required // +required Spec ClusterVersionSpec `json:"spec"` // status contains information about the available updates and any in-progress @@ -51,7 +50,6 @@ type ClusterVersionSpec struct { // clusterID uniquely identifies this cluster. This is expected to be // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in // hexadecimal values). This is a required field. - // +kubebuilder:validation:Required // +required ClusterID ClusterID `json:"clusterID"` @@ -64,7 +62,7 @@ type ClusterVersionSpec struct { // // Some of the fields are inter-related with restrictions and meanings described here. // 1. image is specified, version is specified, architecture is specified. API validation error. - // 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. + // 2. image is specified, version is specified, architecture is not specified. The version extracted from the referenced image must match the specified version. // 3. image is specified, version is not specified, architecture is specified. API validation error. // 4. image is specified, version is not specified, architecture is not specified. image is used. // 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. @@ -74,8 +72,10 @@ type ClusterVersionSpec struct { // // If an upgrade fails the operator will halt and report status // about the failing component. Setting the desired update value back to - // the previous version will cause a rollback to be attempted. Not all - // rollbacks will succeed. + // the previous version will cause a rollback to be attempted if the + // previous version is within the current minor version. Not all + // rollbacks will succeed, and some may unrecoverably break the + // cluster. // // +optional DesiredUpdate *Update `json:"desiredUpdate,omitempty"` @@ -85,8 +85,8 @@ type ClusterVersionSpec struct { // // +optional Upstream URL `json:"upstream,omitempty"` - // channel is an identifier for explicitly requesting that a non-default - // set of updates be applied to this cluster. The default channel will be + // channel is an identifier for explicitly requesting a non-default set + // of updates to be applied to this cluster. The default channel will // contain stable updates that are appropriate for production clusters. // // +optional @@ -138,7 +138,6 @@ type ClusterVersionStatus struct { // desired is the version that the cluster is reconciling towards. // If the cluster is not yet fully initialized desired will be set // with the information available, which may be an image or a tag. - // +kubebuilder:validation:Required // +required Desired Release `json:"desired"` @@ -156,18 +155,17 @@ type ClusterVersionStatus struct { // observedGeneration reports which version of the spec is being synced. // If this value is not equal to metadata.generation, then the desired // and conditions fields may represent a previous version. - // +kubebuilder:validation:Required // +required ObservedGeneration int64 `json:"observedGeneration"` // versionHash is a fingerprint of the content that the cluster will be // updated with. It is used by the operator to avoid unnecessary work // and is for internal use only. - // +kubebuilder:validation:Required // +required VersionHash string `json:"versionHash"` // capabilities describes the state of optional, core cluster components. + // +optional Capabilities ClusterVersionCapabilitiesStatus `json:"capabilities"` // conditions provides information about the cluster version. The condition @@ -190,7 +188,6 @@ type ClusterVersionStatus struct { // may be empty if no updates are recommended, if the update service // is unavailable, or if an invalid channel has been specified. // +nullable - // +kubebuilder:validation:Required // +listType=atomic // +required AvailableUpdates []Release `json:"availableUpdates"` @@ -202,9 +199,23 @@ type ClusterVersionStatus struct { // availableUpdates. This list may be empty if no updates are // recommended, if the update service is unavailable, or if an empty // or invalid channel has been specified. + // +kubebuilder:validation:MaxItems=500 // +listType=atomic // +optional ConditionalUpdates []ConditionalUpdate `json:"conditionalUpdates,omitempty"` + + // conditionalUpdateRisks contains the list of risks associated with conditionalUpdates. + // When performing a conditional update, all its associated risks will be compared with the set of accepted risks in the spec.desiredUpdate.acceptRisks field. + // If all risks for a conditional update are included in the spec.desiredUpdate.acceptRisks set, the conditional update can proceed, otherwise it is blocked. + // The risk names in the list must be unique. + // conditionalUpdateRisks must not contain more than 500 entries. + // +openshift:enable:FeatureGate=ClusterUpdateAcceptRisks + // +kubebuilder:validation:MaxItems=500 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=name + // +optional + ConditionalUpdateRisks []ConditionalUpdateRisk `json:"conditionalUpdateRisks,omitempty"` } // UpdateState is a constant representing whether an update was successfully @@ -226,12 +237,10 @@ type UpdateHistory struct { // indicates the update is not fully applied, while the Completed state // indicates the update was successfully rolled out at least once (all // parts of the update successfully applied). - // +kubebuilder:validation:Required // +required State UpdateState `json:"state"` // startedTime is the time at which the update was started. - // +kubebuilder:validation:Required // +required StartedTime metav1.Time `json:"startedTime"` @@ -239,7 +248,6 @@ type UpdateHistory struct { // that is currently being applied will have a null completion time. // Completion time will always be set for entries that are not the current // update (usually to the started time of the next update). - // +kubebuilder:validation:Required // +required // +nullable CompletionTime *metav1.Time `json:"completionTime"` @@ -253,7 +261,6 @@ type UpdateHistory struct { // image is a container image location that contains the update. This value // is always populated. - // +kubebuilder:validation:Required // +required Image string `json:"image"` @@ -261,13 +268,12 @@ type UpdateHistory struct { // before it was installed. If this is false the cluster may not be trusted. // Verified does not cover upgradeable checks that depend on the cluster // state at the time when the update target was accepted. - // +kubebuilder:validation:Required // +required Verified bool `json:"verified"` // acceptedRisks records risks which were accepted to initiate the update. - // For example, it may menition an Upgradeable=False or missing signature - // that was overriden via desiredUpdate.force, or an update that was + // For example, it may mention an Upgradeable=False or missing signature + // that was overridden via desiredUpdate.force, or an update that was // initiated despite not being in the availableUpdates set of recommended // update targets. // +optional @@ -277,6 +283,16 @@ type UpdateHistory struct { // ClusterID is string RFC4122 uuid. type ClusterID string +// UpdateMode defines how an update should be processed. +// +enum +// +kubebuilder:validation:Enum=Preflight +type UpdateMode string + +const ( + // UpdateModePreflight allows an update to be checked for compatibility without committing to updating the cluster. + UpdateModePreflight UpdateMode = "Preflight" +) + // ClusterVersionArchitecture enumerates valid cluster architectures. // +kubebuilder:validation:Enum="Multi";"" type ClusterVersionArchitecture string @@ -671,28 +687,23 @@ type ClusterVersionCapabilitiesStatus struct { // +k8s:deepcopy-gen=true type ComponentOverride struct { // kind indentifies which object to override. - // +kubebuilder:validation:Required // +required Kind string `json:"kind"` // group identifies the API group that the kind is in. - // +kubebuilder:validation:Required // +required Group string `json:"group"` // namespace is the component's namespace. If the resource is cluster // scoped, the namespace should be empty. - // +kubebuilder:validation:Required // +required Namespace string `json:"namespace"` // name is the component's name. - // +kubebuilder:validation:Required // +required Name string `json:"name"` // unmanaged controls if cluster version operator should stop managing the // resources in this cluster. // Default: false - // +kubebuilder:validation:Required // +required Unmanaged bool `json:"unmanaged"` } @@ -701,8 +712,8 @@ type ComponentOverride struct { type URL string // Update represents an administrator update request. -// +kubebuilder:validation:XValidation:rule="has(self.architecture) && has(self.image) ? (self.architecture == '' || self.image == '') : true",message="cannot set both Architecture and Image" -// +kubebuilder:validation:XValidation:rule="has(self.architecture) && self.architecture != '' ? self.version != '' : true",message="Version must be set if Architecture is set" +// +kubebuilder:validation:XValidation:rule="has(self.architecture) && has(self.image) ? (self.architecture == \"\" || self.image == \"\") : true",message="cannot set both Architecture and Image" +// +kubebuilder:validation:XValidation:rule="has(self.architecture) && self.architecture != \"\" ? self.version != \"\" : true",message="Version must be set if Architecture is set" // +k8s:deepcopy-gen=true type Update struct { // architecture is an optional field that indicates the desired @@ -718,29 +729,73 @@ type Update struct { Architecture ClusterVersionArchitecture `json:"architecture"` // version is a semantic version identifying the update version. - // version is ignored if image is specified and required if - // architecture is specified. + // version is required if architecture is specified. + // If both version and image are set, the version extracted from the referenced image must match the specified version. // // +optional Version string `json:"version"` // image is a container image location that contains the update. // image should be used when the desired version does not exist in availableUpdates or history. - // When image is set, version is ignored. When image is set, version should be empty. // When image is set, architecture cannot be specified. + // If both version and image are set, the version extracted from the referenced image must match the specified version. // // +optional Image string `json:"image"` // force allows an administrator to update to an image that has failed - // verification or upgradeable checks. This option should only - // be used when the authenticity of the provided image has been verified out - // of band because the provided image will run with full administrative access - // to the cluster. Do not use this flag with images that comes from unknown + // verification or upgradeable checks that are designed to keep your + // cluster safe. Only use this if: + // * you are testing unsigned release images in short-lived test clusters or + // * you are working around a known bug in the cluster-version + // operator and you have verified the authenticity of the provided + // image yourself. + // The provided image will run with full administrative access + // to the cluster. Do not use this flag with images that come from unknown // or potentially malicious sources. // // +optional Force bool `json:"force"` + + // acceptRisks is an optional set of names of conditional update risks that are considered acceptable. + // A conditional update is performed only if all of its risks are acceptable. + // This list may contain entries that apply to current, previous or future updates. + // The entries therefore may not map directly to a risk in .status.conditionalUpdateRisks. + // acceptRisks must not contain more than 1000 entries. + // Entries in this list must be unique. + // +openshift:enable:FeatureGate=ClusterUpdateAcceptRisks + // +kubebuilder:validation:MaxItems=1000 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=name + // +optional + AcceptRisks []AcceptRisk `json:"acceptRisks,omitempty"` + + // mode determines how an update should be processed. + // The only valid value is "Preflight". + // When omitted, the cluster performs a normal update by applying the specified version or image to the cluster. + // This is the standard update behavior. + // When set to "Preflight", the cluster runs compatibility checks against the target release without + // performing an actual update. Compatibility results, including any detected risks, are reported + // in status.conditionalUpdates and status.conditionalUpdateRisks alongside risks from the update + // recommendation service. + // This allows administrators to assess update readiness and address issues before committing to the update. + // Preflight mode is particularly useful for skip-level updates where upgrade compatibility needs to be + // verified across multiple minor versions. + // When mode is set to "Preflight", the same rules for version, image, and architecture apply as for normal updates. + // +openshift:enable:FeatureGate=ClusterUpdatePreflight + // +optional + Mode UpdateMode `json:"mode,omitempty"` +} + +// AcceptRisk represents a risk that is considered acceptable. +type AcceptRisk struct { + // name is the name of the acceptable risk. + // It must be a non-empty string and must not exceed 256 characters. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +required + Name string `json:"name,omitempty"` } // Release represents an OpenShift release image and associated metadata. @@ -793,17 +848,30 @@ const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates" // may not be recommended for the current cluster. type ConditionalUpdate struct { // release is the target of the update. - // +kubebuilder:validation:Required // +required Release Release `json:"release"` + // riskNames represents the set of the names of conditionalUpdateRisks that are relevant to this update for some clusters. + // The Applies condition of each conditionalUpdateRisks entry declares if that risk applies to this cluster. + // A conditional update is accepted only if each of its risks either does not apply to the cluster or is considered acceptable by the cluster administrator. + // The latter means that the risk names are included in value of the spec.desiredUpdate.acceptRisks field. + // Entries must be unique and must not exceed 256 characters. + // riskNames must not contain more than 500 entries. + // +openshift:enable:FeatureGate=ClusterUpdateAcceptRisks + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:items:MaxLength=256 + // +kubebuilder:validation:MaxItems=500 + // +listType=set + // +optional + RiskNames []string `json:"riskNames,omitempty"` + // risks represents the range of issues associated with // updating to the target release. The cluster-version // operator will evaluate all entries, and only recommend the // update if there is at least one entry and all entries // recommend the update. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=200 // +patchMergeKey=name // +patchStrategy=merge // +listType=map @@ -814,19 +882,31 @@ type ConditionalUpdate struct { // conditions represents the observations of the conditional update's // current status. Known types are: // * Recommended, for whether the update is recommended for the current cluster. - // +patchMergeKey=type - // +patchStrategy=merge // +listType=map // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` } // ConditionalUpdateRisk represents a reason and cluster-state // for not recommending a conditional update. // +k8s:deepcopy-gen=true type ConditionalUpdateRisk struct { + // conditions represents the observations of the conditional update + // risk's current status. Known types are: + // * Applies, for whether the risk applies to the current cluster. + // The condition's types in the list must be unique. + // conditions must not contain more than one entry. + // +openshift:enable:FeatureGate=ClusterUpdateAcceptRisks + // +kubebuilder:validation:XValidation:rule="self.exists_one(x, x.type == 'Applies')",message="must contain a condition of type 'Applies'" + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + // url contains information about this risk. - // +kubebuilder:validation:Required // +kubebuilder:validation:Format=uri // +kubebuilder:validation:MinLength=1 // +required @@ -835,7 +915,6 @@ type ConditionalUpdateRisk struct { // name is the CamelCase reason for not recommending a // conditional update, in the event that matchingRules match the // cluster state. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Name string `json:"name"` @@ -845,7 +924,6 @@ type ConditionalUpdateRisk struct { // state. This is only to be consumed by humans. It may // contain Line Feed characters (U+000A), which should be // rendered as new lines. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Message string `json:"message"` @@ -856,7 +934,6 @@ type ConditionalUpdateRisk struct { // operator will walk the slice in order, and stop after the // first it can successfully evaluate. If no condition can be // successfully evaluated, the update will not be recommended. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinItems=1 // +listType=atomic // +required @@ -871,24 +948,22 @@ type ConditionalUpdateRisk struct { type ClusterCondition struct { // type represents the cluster-condition type. This defines // the members and semantics of any additional properties. - // +kubebuilder:validation:Required // +kubebuilder:validation:Enum={"Always","PromQL"} // +required Type string `json:"type"` - // promQL represents a cluster condition based on PromQL. + // promql represents a cluster condition based on PromQL. // +optional PromQL *PromQLClusterCondition `json:"promql,omitempty"` } // PromQLClusterCondition represents a cluster condition based on PromQL. type PromQLClusterCondition struct { - // PromQL is a PromQL query classifying clusters. This query + // promql is a PromQL query classifying clusters. This query // query should return a 1 in the match case and a 0 in the // does-not-match case. Queries which return no time // series, or which return values besides 0 or 1, are // evaluation failures. - // +kubebuilder:validation:Required // +required PromQL string `json:"promql"` } @@ -917,7 +992,7 @@ type SignatureStore struct { // // +kubebuilder:validation:Type=string // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL" - // +kubebuilder:validation:Required + // +required URL string `json:"url"` // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go index e8f197b344..dc6967bf15 100644 --- a/vendor/github.com/openshift/api/config/v1/types_console.go +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -28,7 +28,6 @@ type Console struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ConsoleSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -46,6 +45,7 @@ type ConsoleSpec struct { type ConsoleStatus struct { // The URL for the console. This will be derived from the host for the route that // is created for the console. + // +optional ConsoleURL string `json:"consoleURL"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go index 5daa5d78d2..efbdc3ae54 100644 --- a/vendor/github.com/openshift/api/config/v1/types_dns.go +++ b/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -24,7 +24,6 @@ type DNS struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec DNSSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -121,7 +120,7 @@ type DNSPlatformSpec struct { // and must handle unrecognized platforms with best-effort defaults. // // +unionDiscriminator - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:XValidation:rule="self in ['','AWS']",message="allowed values are '' and 'AWS'" Type PlatformType `json:"type"` @@ -135,7 +134,14 @@ type AWSDNSSpec struct { // privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing // operations on the cluster's private hosted zone specified in the cluster DNS config. // When left empty, no role should be assumed. - // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$` + // + // The ARN must follow the format: arn::iam:::role/, where: + // is the AWS partition (aws, aws-cn, aws-us-gov, or aws-eusc), + // is a 12-digit numeric identifier for the AWS account, + // is the IAM role name. + // + // +openshift:validation:FeatureGateAwareXValidation:featureGate="",rule=`matches(self, '^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role/.*$')`,message=`privateZoneIAMRole must be a valid AWS IAM role ARN in the format: arn::iam:::role/` + // +openshift:validation:FeatureGateAwareXValidation:featureGate=AWSEuropeanSovereignCloudInstall,rule=`matches(self, '^arn:(aws|aws-cn|aws-us-gov|aws-eusc):iam::[0-9]{12}:role/.*$')`,message=`privateZoneIAMRole must be a valid AWS IAM role ARN in the format: arn::iam:::role/` // +optional PrivateZoneIAMRole string `json:"privateZoneIAMRole"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 88d94ac527..e111d518ab 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -26,7 +26,6 @@ type FeatureGate struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required // +kubebuilder:validation:XValidation:rule="has(oldSelf.featureSet) ? has(self.featureSet) : true",message=".spec.featureSet cannot be removed" Spec FeatureGateSpec `json:"spec"` @@ -54,8 +53,12 @@ var ( // your cluster may fail in an unrecoverable way. CustomNoUpgrade FeatureSet = "CustomNoUpgrade" + // OKD turns on features for OKD. Turning this feature set ON is supported for OKD clusters, but NOT for OpenShift clusters. + // Once enabled, this feature set cannot be changed back to Default, but can be changed to other feature sets and it allows upgrades. + OKD FeatureSet = "OKD" + // AllFixedFeatureSets are the featuresets that have known featuregates. Custom doesn't for instance. LatencySensitive is dead - AllFixedFeatureSets = []FeatureSet{Default, TechPreviewNoUpgrade, DevPreviewNoUpgrade} + AllFixedFeatureSets = []FeatureSet{Default, TechPreviewNoUpgrade, DevPreviewNoUpgrade, OKD} ) type FeatureGateSpec struct { @@ -68,10 +71,11 @@ type FeatureGateSelection struct { // Turning on or off features may cause irreversible changes in your cluster which cannot be undone. // +unionDiscriminator // +optional - // +kubebuilder:validation:Enum=CustomNoUpgrade;DevPreviewNoUpgrade;TechPreviewNoUpgrade;"" + // +kubebuilder:validation:Enum=CustomNoUpgrade;DevPreviewNoUpgrade;TechPreviewNoUpgrade;OKD;"" // +kubebuilder:validation:XValidation:rule="oldSelf == 'CustomNoUpgrade' ? self == 'CustomNoUpgrade' : true",message="CustomNoUpgrade may not be changed" // +kubebuilder:validation:XValidation:rule="oldSelf == 'TechPreviewNoUpgrade' ? self == 'TechPreviewNoUpgrade' : true",message="TechPreviewNoUpgrade may not be changed" // +kubebuilder:validation:XValidation:rule="oldSelf == 'DevPreviewNoUpgrade' ? self == 'DevPreviewNoUpgrade' : true",message="DevPreviewNoUpgrade may not be changed" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'OKD' ? self != '' : true",message="OKD cannot transition to Default" FeatureSet FeatureSet `json:"featureSet,omitempty"` // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. @@ -100,6 +104,7 @@ type FeatureGateStatus struct { // Known .status.conditions.type are: "DeterminationDegraded" // +listType=map // +listMapKey=type + // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` // featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion. @@ -112,12 +117,12 @@ type FeatureGateStatus struct { // Only featureGates with .version in the ClusterVersion.status will be present in this list. // +listType=map // +listMapKey=version + // +optional FeatureGates []FeatureGateDetails `json:"featureGates"` } type FeatureGateDetails struct { // version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field. - // +kubebuilder:validation:Required // +required Version string `json:"version"` // enabled is a list of all feature gates that are enabled in the cluster for the named version. @@ -130,7 +135,7 @@ type FeatureGateDetails struct { type FeatureGateAttributes struct { // name is the name of the FeatureGate. - // +kubebuilder:validation:Required + // +required Name FeatureGateName `json:"name"` // possible (probable?) future additions include diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go index d3c694a56f..82f46c8b6c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image.go +++ b/vendor/github.com/openshift/api/config/v1/types_image.go @@ -29,7 +29,6 @@ type Image struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ImageSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -162,6 +161,8 @@ type RegistryLocation struct { } // RegistrySources holds cluster-wide information about how to handle the registries config. +// +// +kubebuilder:validation:XValidation:rule="has(self.blockedRegistries) ? !has(self.allowedRegistries) : true",message="Only one of blockedRegistries or allowedRegistries may be set" type RegistrySources struct { // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. // +optional diff --git a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go index 74df4027f9..0bd0d77705 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go @@ -25,7 +25,6 @@ type ImageContentPolicy struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ImageContentPolicySpec `json:"spec"` } @@ -76,7 +75,6 @@ type ImageContentPolicyList struct { type RepositoryDigestMirrors struct { // source is the repository that users refer to, e.g. in image pull specifications. // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` Source string `json:"source"` // allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. diff --git a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go index 43d748c0c3..df2258d12f 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go @@ -25,7 +25,6 @@ type ImageDigestMirrorSet struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ImageDigestMirrorSetSpec `json:"spec"` // status contains the observed state of the resource. @@ -110,7 +109,6 @@ type ImageDigestMirrors struct { // for more information about the format, see the document about the location field: // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` Source string `json:"source"` // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. diff --git a/vendor/github.com/openshift/api/config/v1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_policy.go new file mode 100644 index 0000000000..3cc46141c9 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image_policy.go @@ -0,0 +1,322 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePolicy holds namespace-wide configuration for image signature verification +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagepolicies,scope=Namespaced +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2310 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=SigstoreImageVerification +// +openshift:compatibility-gen:level=1 +type ImagePolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata"` + + // spec holds user settable values for configuration + // +required + Spec ImagePolicySpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ImagePolicyStatus `json:"status"` +} + +// ImagePolicySpec is the specification of the ImagePolicy CRD. +type ImagePolicySpec struct { + // scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2". + // Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). + // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository + // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). + // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. + // This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. + // For additional details about the format, please refer to the document explaining the docker transport field, + // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker + // +required + // +kubebuilder:validation:MaxItems=256 + // +listType=set + Scopes []ImageScope `json:"scopes"` + // policy is a required field that contains configuration to allow scopes to be verified, and defines how + // images not matching the verification policy will be treated. + // +required + Policy ImageSigstoreVerificationPolicy `json:"policy"` +} + +// +kubebuilder:validation:XValidation:rule="size(self.split('/')[0].split('.')) == 1 ? self.split('/')[0].split('.')[0].split(':')[0] == 'localhost' : true",message="invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" +// +kubebuilder:validation:XValidation:rule=`self.contains('*') ? self.matches('^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$') : true`,message="invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching" +// +kubebuilder:validation:XValidation:rule=`!self.contains('*') ? self.matches('^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$') : true`,message="invalid repository namespace or image specification in the image scope" +// +kubebuilder:validation:MaxLength=512 +type ImageScope string + +// ImageSigstoreVerificationPolicy defines the verification policy for the items in the scopes list. +type ImageSigstoreVerificationPolicy struct { + // rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. + // This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated. + // +required + RootOfTrust PolicyRootOfTrust `json:"rootOfTrust"` + // signedIdentity is an optional field specifies what image identity the signature claims about the image. This is useful when the image identity in the signature differs from the original image spec, such as when mirror registry is configured for the image scope, the signature from the mirror registry contains the image identity of the mirror instead of the original scope. + // The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is "MatchRepoDigestOrExact". + // +optional + SignedIdentity *PolicyIdentity `json:"signedIdentity,omitempty"` +} + +// PolicyRootOfTrust defines the root of trust based on the selected policyType. +// +union +// +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'PublicKey' ? has(self.publicKey) : !has(self.publicKey)",message="publicKey is required when policyType is PublicKey, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'FulcioCAWithRekor' ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)",message="fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=SigstoreImageVerificationPKI,rule="has(self.policyType) && self.policyType == 'PKI' ? has(self.pki) : !has(self.pki)",message="pki is required when policyType is PKI, and forbidden otherwise" +type PolicyRootOfTrust struct { + // policyType is a required field specifies the type of the policy for verification. This field must correspond to how the policy was generated. + // Allowed values are "PublicKey", "FulcioCAWithRekor", and "PKI". + // When set to "PublicKey", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. + // When set to "FulcioCAWithRekor", the policy is based on the Fulcio certification and incorporates a Rekor verification. + // When set to "PKI", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). + // +unionDiscriminator + // +required + PolicyType PolicyType `json:"policyType"` + // publicKey defines the root of trust configuration based on a sigstore public key. Optionally include a Rekor public key for Rekor verification. + // publicKey is required when policyType is PublicKey, and forbidden otherwise. + // +optional + PublicKey *ImagePolicyPublicKeyRootOfTrust `json:"publicKey,omitempty"` + // fulcioCAWithRekor defines the root of trust configuration based on the Fulcio certificate and the Rekor public key. + // fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise + // For more information about Fulcio and Rekor, please refer to the document at: + // https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor + // +optional + FulcioCAWithRekor *ImagePolicyFulcioCAWithRekorRootOfTrust `json:"fulcioCAWithRekor,omitempty"` + // pki defines the root of trust configuration based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates. + // pki is required when policyType is PKI, and forbidden otherwise. + // +optional + // +openshift:enable:FeatureGate=SigstoreImageVerificationPKI + PKI *ImagePolicyPKIRootOfTrust `json:"pki,omitempty"` +} + +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=PublicKey;FulcioCAWithRekor +// +openshift:validation:FeatureGateAwareEnum:featureGate=SigstoreImageVerificationPKI,enum=PublicKey;FulcioCAWithRekor;PKI +type PolicyType string + +const ( + PublicKeyRootOfTrust PolicyType = "PublicKey" + FulcioCAWithRekorRootOfTrust PolicyType = "FulcioCAWithRekor" + PKIRootOfTrust PolicyType = "PKI" +) + +// ImagePolicyPublicKeyRootOfTrust defines the root of trust based on a sigstore public key. +type ImagePolicyPublicKeyRootOfTrust struct { + // keyData is a required field contains inline base64-encoded data for the PEM format public key. + // keyData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=68 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the keyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the keyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + KeyData []byte `json:"keyData"` + // rekorKeyData is an optional field contains inline base64-encoded data for the PEM format from the Rekor public key. + // rekorKeyData must be at most 8192 characters. + // +optional + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the rekorKeyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the rekorKeyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + RekorKeyData []byte `json:"rekorKeyData,omitempty"` +} + +// ImagePolicyFulcioCAWithRekorRootOfTrust defines the root of trust based on the Fulcio certificate and the Rekor public key. +type ImagePolicyFulcioCAWithRekorRootOfTrust struct { + // fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. + // fulcioCAData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the fulcioCAData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the fulcioCAData must end with base64 encoding of '-----END CERTIFICATE-----'." + FulcioCAData []byte `json:"fulcioCAData"` + // rekorKeyData is a required field contains inline base64-encoded data for the PEM format from the Rekor public key. + // rekorKeyData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the rekorKeyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the rekorKeyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + RekorKeyData []byte `json:"rekorKeyData"` + // fulcioSubject is a required field specifies OIDC issuer and the email of the Fulcio authentication configuration. + // +required + FulcioSubject PolicyFulcioSubject `json:"fulcioSubject"` +} + +// PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration. +type PolicyFulcioSubject struct { + // oidcIssuer is a required filed contains the expected OIDC issuer. The oidcIssuer must be a valid URL and at most 2048 characters in length. + // It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. + // When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. + // Example: "https://expected.OIDC.issuer/" + // +required + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="oidcIssuer must be a valid URL" + OIDCIssuer string `json:"oidcIssuer"` + // signedEmail is a required field holds the email address that the Fulcio certificate is issued for. + // The signedEmail must be a valid email address and at most 320 characters in length. + // Example: "expected-signing-user@example.com" + // +required + // +kubebuilder:validation:MaxLength=320 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address" + SignedEmail string `json:"signedEmail"` +} + +// ImagePolicyPKIRootOfTrust defines the root of trust based on Root CA(s) and corresponding intermediate certificates. +type ImagePolicyPKIRootOfTrust struct { + // caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=72 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the caRootsData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the caRootsData must end with base64 encoding of '-----END CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).findAll('-----BEGIN CERTIFICATE-----').size() == string(self).findAll('-----END CERTIFICATE-----').size()",message="caRootsData must be base64 encoding of valid PEM format data contain the same number of '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers." + CertificateAuthorityRootsData []byte `json:"caRootsData"` + // caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. + // caIntermediatesData requires caRootsData to be set. + // +optional + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the caIntermediatesData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the caIntermediatesData must end with base64 encoding of '-----END CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).findAll('-----BEGIN CERTIFICATE-----').size() == string(self).findAll('-----END CERTIFICATE-----').size()",message="caIntermediatesData must be base64 encoding of valid PEM format data contain the same number of '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers." + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=72 + CertificateAuthorityIntermediatesData []byte `json:"caIntermediatesData,omitempty"` + + // pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued. + // +required + PKICertificateSubject PKICertificateSubject `json:"pkiCertificateSubject"` +} + +// PKICertificateSubject defines the requirements imposed on the subject to which the certificate was issued. +// +kubebuilder:validation:XValidation:rule="has(self.email) || has(self.hostname)", message="at least one of email or hostname must be set in pkiCertificateSubject" +// +openshift:enable:FeatureGate=SigstoreImageVerificationPKI +type PKICertificateSubject struct { + // email specifies the expected email address imposed on the subject to which the certificate was issued, and must match the email address listed in the Subject Alternative Name (SAN) field of the certificate. + // The email must be a valid email address and at most 320 characters in length. + // +optional + // +kubebuilder:validation:MaxLength:=320 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address" + Email string `json:"email,omitempty"` + // hostname specifies the expected hostname imposed on the subject to which the certificate was issued, and it must match the hostname listed in the Subject Alternative Name (SAN) DNS field of the certificate. + // The hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.', and at most 253 characters in length. + // It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk. + // +optional + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:XValidation:rule="self.startsWith('*.') ? !format.dns1123Subdomain().validate(self.replace('*.', '', 1)).hasValue() : !format.dns1123Subdomain().validate(self).hasValue()",message="hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.'. It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk." + Hostname string `json:"hostname,omitempty"` +} + +// PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is "MatchRepoDigestOrExact". +// +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'ExactRepository') ? has(self.exactRepository) : !has(self.exactRepository)",message="exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'RemapIdentity') ? has(self.remapIdentity) : !has(self.remapIdentity)",message="remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" +// +union +type PolicyIdentity struct { + // matchPolicy is a required filed specifies matching strategy to verify the image identity in the signature against the image scope. + // Allowed values are "MatchRepoDigestOrExact", "MatchRepository", "ExactRepository", "RemapIdentity". When omitted, the default value is "MatchRepoDigestOrExact". + // When set to "MatchRepoDigestOrExact", the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. + // When set to "MatchRepository", the identity in the signature must be in the same repository as the image identity. + // When set to "ExactRepository", the exactRepository must be specified. The identity in the signature must be in the same repository as a specific identity specified by "repository". + // When set to "RemapIdentity", the remapIdentity must be specified. The signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the "prefix" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix. + // +unionDiscriminator + // +required + MatchPolicy IdentityMatchPolicy `json:"matchPolicy"` + // exactRepository specifies the repository that must be exactly matched by the identity in the signature. + // exactRepository is required if matchPolicy is set to "ExactRepository". It is used to verify that the signature claims an identity matching this exact repository, rather than the original image identity. + // +optional + PolicyMatchExactRepository *PolicyMatchExactRepository `json:"exactRepository,omitempty"` + // remapIdentity specifies the prefix remapping rule for verifying image identity. + // remapIdentity is required if matchPolicy is set to "RemapIdentity". It is used to verify that the signature claims a different registry/repository prefix than the original image. + // +optional + PolicyMatchRemapIdentity *PolicyMatchRemapIdentity `json:"remapIdentity,omitempty"` +} + +// +kubebuilder:validation:MaxLength=512 +// +kubebuilder:validation:XValidation:rule=`self.matches('.*:([\\w][\\w.-]{0,127})$')? self.matches('^(localhost:[0-9]+)$'): true`,message="invalid repository or prefix in the signedIdentity, should not include the tag or digest" +// +kubebuilder:validation:XValidation:rule=`self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$')`,message="invalid repository or prefix in the signedIdentity. The repository or prefix must starts with 'localhost' or a valid '.' separated domain. If contains registry paths, the path component names must start with at least one letter or number, with following parts able to be separated by one period, one or two underscore and multiple dashes." +type IdentityRepositoryPrefix string + +type PolicyMatchExactRepository struct { + // repository is the reference of the image identity to be matched. + // repository is required if matchPolicy is set to "ExactRepository". + // The value should be a repository name (by omitting the tag or digest) in a registry implementing the "Docker Registry HTTP API V2". For example, docker.io/library/busybox + // +required + Repository IdentityRepositoryPrefix `json:"repository"` +} + +type PolicyMatchRemapIdentity struct { + // prefix is required if matchPolicy is set to "RemapIdentity". + // prefix is the prefix of the image identity to be matched. + // If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). + // This is useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. + // The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + // +required + Prefix IdentityRepositoryPrefix `json:"prefix"` + // signedPrefix is required if matchPolicy is set to "RemapIdentity". + // signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as "prefix". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + // +required + SignedPrefix IdentityRepositoryPrefix `json:"signedPrefix"` +} + +// IdentityMatchPolicy defines the type of matching for "matchPolicy". +// +kubebuilder:validation:Enum=MatchRepoDigestOrExact;MatchRepository;ExactRepository;RemapIdentity +type IdentityMatchPolicy string + +const ( + IdentityMatchPolicyMatchRepoDigestOrExact IdentityMatchPolicy = "MatchRepoDigestOrExact" + IdentityMatchPolicyMatchRepository IdentityMatchPolicy = "MatchRepository" + IdentityMatchPolicyExactRepository IdentityMatchPolicy = "ExactRepository" + IdentityMatchPolicyRemapIdentity IdentityMatchPolicy = "RemapIdentity" +) + +// +k8s:deepcopy-gen=true +type ImagePolicyStatus struct { + // conditions provide details on the status of this API Resource. + // condition type 'Pending' indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid. + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePolicyList is a list of ImagePolicy resources +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImagePolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ListMeta `json:"metadata"` + + // items is a list of ImagePolicies + // +kubebuilder:validation:MaxItems=1000 + // +required + Items []ImagePolicy `json:"items"` +} + +const ( + // ImagePolicyPending indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid. + ImagePolicyPending = "Pending" + // ImagePolicyApplied indicates that the policy has been applied + ImagePolicyApplied = "Applied" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go index ca8d35515e..b7e1a6a873 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go @@ -25,7 +25,6 @@ type ImageTagMirrorSet struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ImageTagMirrorSetSpec `json:"spec"` // status contains the observed state of the resource. @@ -95,7 +94,6 @@ type ImageTagMirrors struct { // for more information about the format, see the document about the location field: // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` Source string `json:"source"` // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 1ebe2dfb9c..160f8fd4c0 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -27,7 +27,6 @@ type Infrastructure struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec InfrastructureSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -63,11 +62,13 @@ type InfrastructureStatus struct { // infrastructureName uniquely identifies a cluster with a human friendly name. // Once set it should not be changed. Must be of max length 27 and must have only // alphanumeric or hyphen characters. + // +optional InfrastructureName string `json:"infrastructureName"` // platform is the underlying infrastructure provider for the cluster. // // Deprecated: Use platformStatus.type instead. + // +optional Platform PlatformType `json:"platform,omitempty"` // platformStatus holds status information specific to the underlying @@ -79,17 +80,20 @@ type InfrastructureStatus struct { // etcd servers and clients. // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery // deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release. + // +optional EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"` // apiServerURL is a valid URI with scheme 'https', address and // optionally a port (defaulting to 443). apiServerURL can be used by components like the web console // to tell users where to find the Kubernetes API. + // +optional APIServerURL string `json:"apiServerURL"` // apiServerInternalURL is a valid URI with scheme 'https', // address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components // like kubelets, to contact the Kubernetes API server using the // infrastructure provider rather than Kubernetes networking. + // +optional APIServerInternalURL string `json:"apiServerInternalURI"` // controlPlaneTopology expresses the expectations for operands that normally run on control nodes. @@ -98,8 +102,12 @@ type InfrastructureStatus struct { // and the operators should not configure the operand for highly-available operation // The 'External' mode indicates that the control plane is hosted externally to the cluster and that // its components are not visible within the cluster. + // The 'HighlyAvailableArbiter' mode indicates that the control plane will consist of 2 control-plane nodes + // that run conventional services and 1 smaller sized arbiter node that runs a bare minimum of services to maintain quorum. // +kubebuilder:default=HighlyAvailable - // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate=DualReplica,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;DualReplica;External + // +optional ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"` // infrastructureTopology expresses the expectations for infrastructure services that do not run on control @@ -111,7 +119,8 @@ type InfrastructureStatus struct { // NOTE: External topology mode is not applicable for this field. // +kubebuilder:default=HighlyAvailable // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica - InfrastructureTopology TopologyMode `json:"infrastructureTopology"` + // +optional + InfrastructureTopology TopologyMode `json:"infrastructureTopology,omitempty"` // cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. // CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. @@ -136,9 +145,15 @@ const ( // "HighlyAvailable" is for operators to configure high-availability as much as possible. HighlyAvailableTopologyMode TopologyMode = "HighlyAvailable" + // "HighlyAvailableArbiter" is for operators to configure for an arbiter HA deployment. + HighlyAvailableArbiterMode TopologyMode = "HighlyAvailableArbiter" + // "SingleReplica" is for operators to avoid spending resources for high-availability purpose. SingleReplicaTopologyMode TopologyMode = "SingleReplica" + // "DualReplica" is for operators to configure for two node topology. + DualReplicaTopologyMode TopologyMode = "DualReplica" + // "External" indicates that the component is running externally to the cluster. When specified // as the control plane topology, operators should avoid scheduling workloads to masters or assume // that any of the control plane components such as kubernetes API server or etcd are visible within @@ -168,6 +183,17 @@ const ( LoadBalancerTypeOpenShiftManagedDefault PlatformLoadBalancerType = "OpenShiftManagedDefault" ) +// DNSRecordsType defines whether api, api-int, and ingress records are provided by +// the internal DNS infrastructure or must be configured external to the cluster. +// +kubebuilder:validation:Enum=Internal;External +// +enum +type DNSRecordsType string + +const ( + DNSRecordsTypeExternal DNSRecordsType = "External" + DNSRecordsTypeInternal DNSRecordsType = "Internal" +) + // PlatformType is a specific supported infrastructure provider. // +kubebuilder:validation:Enum="";AWS;Azure;BareMetal;GCP;Libvirt;OpenStack;None;VSphere;oVirt;IBMCloud;KubeVirt;EquinixMetal;PowerVS;AlibabaCloud;Nutanix;External type PlatformType string @@ -257,7 +283,7 @@ const ( // ExternalPlatformSpec holds the desired state for the generic External infrastructure provider. type ExternalPlatformSpec struct { - // PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. + // platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. // This field is solely for informational and reporting purposes and is not expected to be used for decision-making. // +kubebuilder:default:="Unknown" // +default="Unknown" @@ -276,62 +302,63 @@ type PlatformSpec struct { // balancers, dynamic volume provisioning, machine creation and deletion, and // other integrations are enabled. If None, no infrastructure automation is // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", - // "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", - // "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, - // and must handle unrecognized platforms as None if they do not support that platform. + // "OpenStack", "VSphere", "oVirt", "IBMCloud", "KubeVirt", "EquinixMetal", + // "PowerVS", "AlibabaCloud", "Nutanix", "External", and "None". Individual + // components may not support all platforms, and must handle unrecognized + // platforms as None if they do not support that platform. // // +unionDiscriminator Type PlatformType `json:"type"` - // AWS contains settings specific to the Amazon Web Services infrastructure provider. + // aws contains settings specific to the Amazon Web Services infrastructure provider. // +optional AWS *AWSPlatformSpec `json:"aws,omitempty"` - // Azure contains settings specific to the Azure infrastructure provider. + // azure contains settings specific to the Azure infrastructure provider. // +optional Azure *AzurePlatformSpec `json:"azure,omitempty"` - // GCP contains settings specific to the Google Cloud Platform infrastructure provider. + // gcp contains settings specific to the Google Cloud Platform infrastructure provider. // +optional GCP *GCPPlatformSpec `json:"gcp,omitempty"` - // BareMetal contains settings specific to the BareMetal platform. + // baremetal contains settings specific to the BareMetal platform. // +optional BareMetal *BareMetalPlatformSpec `json:"baremetal,omitempty"` - // OpenStack contains settings specific to the OpenStack infrastructure provider. + // openstack contains settings specific to the OpenStack infrastructure provider. // +optional OpenStack *OpenStackPlatformSpec `json:"openstack,omitempty"` - // Ovirt contains settings specific to the oVirt infrastructure provider. + // ovirt contains settings specific to the oVirt infrastructure provider. // +optional Ovirt *OvirtPlatformSpec `json:"ovirt,omitempty"` - // VSphere contains settings specific to the VSphere infrastructure provider. + // vsphere contains settings specific to the VSphere infrastructure provider. // +optional VSphere *VSpherePlatformSpec `json:"vsphere,omitempty"` - // IBMCloud contains settings specific to the IBMCloud infrastructure provider. + // ibmcloud contains settings specific to the IBMCloud infrastructure provider. // +optional IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` - // Kubevirt contains settings specific to the kubevirt infrastructure provider. + // kubevirt contains settings specific to the kubevirt infrastructure provider. // +optional Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"` - // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // equinixMetal contains settings specific to the Equinix Metal infrastructure provider. // +optional EquinixMetal *EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` - // PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. + // powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. // +optional PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"` - // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. // +optional AlibabaCloud *AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"` - // Nutanix contains settings specific to the Nutanix infrastructure provider. + // nutanix contains settings specific to the Nutanix infrastructure provider. // +optional Nutanix *NutanixPlatformSpec `json:"nutanix,omitempty"` @@ -401,59 +428,59 @@ type PlatformStatus struct { // Currently this value cannot be changed once set. Type PlatformType `json:"type"` - // AWS contains settings specific to the Amazon Web Services infrastructure provider. + // aws contains settings specific to the Amazon Web Services infrastructure provider. // +optional AWS *AWSPlatformStatus `json:"aws,omitempty"` - // Azure contains settings specific to the Azure infrastructure provider. + // azure contains settings specific to the Azure infrastructure provider. // +optional Azure *AzurePlatformStatus `json:"azure,omitempty"` - // GCP contains settings specific to the Google Cloud Platform infrastructure provider. + // gcp contains settings specific to the Google Cloud Platform infrastructure provider. // +optional GCP *GCPPlatformStatus `json:"gcp,omitempty"` - // BareMetal contains settings specific to the BareMetal platform. + // baremetal contains settings specific to the BareMetal platform. // +optional BareMetal *BareMetalPlatformStatus `json:"baremetal,omitempty"` - // OpenStack contains settings specific to the OpenStack infrastructure provider. + // openstack contains settings specific to the OpenStack infrastructure provider. // +optional OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"` - // Ovirt contains settings specific to the oVirt infrastructure provider. + // ovirt contains settings specific to the oVirt infrastructure provider. // +optional Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"` - // VSphere contains settings specific to the VSphere infrastructure provider. + // vsphere contains settings specific to the VSphere infrastructure provider. // +optional VSphere *VSpherePlatformStatus `json:"vsphere,omitempty"` - // IBMCloud contains settings specific to the IBMCloud infrastructure provider. + // ibmcloud contains settings specific to the IBMCloud infrastructure provider. // +optional IBMCloud *IBMCloudPlatformStatus `json:"ibmcloud,omitempty"` - // Kubevirt contains settings specific to the kubevirt infrastructure provider. + // kubevirt contains settings specific to the kubevirt infrastructure provider. // +optional Kubevirt *KubevirtPlatformStatus `json:"kubevirt,omitempty"` - // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // equinixMetal contains settings specific to the Equinix Metal infrastructure provider. // +optional EquinixMetal *EquinixMetalPlatformStatus `json:"equinixMetal,omitempty"` - // PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. + // powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider. // +optional PowerVS *PowerVSPlatformStatus `json:"powervs,omitempty"` - // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. // +optional AlibabaCloud *AlibabaCloudPlatformStatus `json:"alibabaCloud,omitempty"` - // Nutanix contains settings specific to the Nutanix infrastructure provider. + // nutanix contains settings specific to the Nutanix infrastructure provider. // +optional Nutanix *NutanixPlatformStatus `json:"nutanix,omitempty"` - // External contains settings specific to the generic External infrastructure provider. + // external contains settings specific to the generic External infrastructure provider. // +optional External *ExternalPlatformStatus `json:"external,omitempty"` } @@ -476,6 +503,21 @@ type AWSServiceEndpoint struct { URL string `json:"url"` } +// IPFamilyType represents the IP protocol family that cloud platform resources should use. +// +kubebuilder:validation:Enum=IPv4;DualStackIPv6Primary;DualStackIPv4Primary +type IPFamilyType string + +const ( + // IPv4 indicates that cloud platform resources should use IPv4 addressing only. + IPv4 IPFamilyType = "IPv4" + + // DualStackIPv6Primary indicates that cloud platform resources should use dual-stack networking with IPv6 as primary. + DualStackIPv6Primary IPFamilyType = "DualStackIPv6Primary" + + // DualStackIPv4Primary indicates that cloud platform resources should use dual-stack networking with IPv4 as primary. + DualStackIPv4Primary IPFamilyType = "DualStackIPv4Primary" +) + // AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. // This only includes fields that can be modified in the cluster. type AWSPlatformSpec struct { @@ -492,7 +534,7 @@ type AWSPlatformStatus struct { // region holds the default AWS region for new AWS resources created by the cluster. Region string `json:"region"` - // ServiceEndpoints list contains custom endpoints which will override default + // serviceEndpoints list contains custom endpoints which will override default // service endpoint of AWS Services. // There must be only one ServiceEndpoint for a service. // +listType=atomic @@ -517,28 +559,42 @@ type AWSPlatformStatus struct { // // +default={"dnsType": "PlatformDefault"} // +kubebuilder:default={"dnsType": "PlatformDefault"} - // +openshift:enable:FeatureGate=AWSClusterHostedDNS + // +openshift:enable:FeatureGate=AWSClusterHostedDNSInstall // +optional // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` + + // ipFamily specifies the IP protocol family that should be used for AWS + // network resources. This controls whether AWS resources are created with + // IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + // protocol family. + // + // +default="IPv4" + // +kubebuilder:default="IPv4" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="ipFamily is immutable once set" + // +openshift:enable:FeatureGate=AWSDualStackInstall + // +optional + IPFamily IPFamilyType `json:"ipFamily,omitempty"` } // AWSResourceTag is a tag to apply to AWS resources created for the cluster. type AWSResourceTag struct { - // key is the key of the tag - // +kubebuilder:validation:Required + // key sets the key of the AWS resource tag key-value pair. Key is required when defining an AWS resource tag. + // Key should consist of between 1 and 128 characters, and may + // contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +kubebuilder:validation:XValidation:rule=`self.matches('^[0-9A-Za-z_.:/=+-@ ]+$')`,message="invalid AWS resource tag key. The string can contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', '@'" // +required Key string `json:"key"` - // value is the value of the tag. + // value sets the value of the AWS resource tag key-value pair. Value is required when defining an AWS resource tag. + // Value should consist of between 1 and 256 characters, and may + // contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'. // Some AWS service do not support empty values. Since tags are added to resources in many services, the // length of the tag value must meet the requirements of all services. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +kubebuilder:validation:XValidation:rule=`self.matches('^[0-9A-Za-z_.:/=+-@ ]+$')`,message="invalid AWS resource tag value. The string can contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', '@'" // +required Value string `json:"value"` } @@ -577,6 +633,31 @@ type AzurePlatformStatus struct { // +listType=atomic // +optional ResourceTags []AzureResourceTag `json:"resourceTags,omitempty"` + + // cloudLoadBalancerConfig holds configuration related to DNS and cloud + // load balancers. It allows configuration of in-cluster DNS as an alternative + // to the platform default DNS implementation. + // When using the ClusterHosted DNS type, Load Balancer IP addresses + // must be provided for the API and internal API load balancers as well as the + // ingress load balancer. + // + // +default={"dnsType": "PlatformDefault"} + // +kubebuilder:default={"dnsType": "PlatformDefault"} + // +openshift:enable:FeatureGate=AzureClusterHostedDNSInstall + // +optional + CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` + + // ipFamily specifies the IP protocol family that should be used for Azure + // network resources. This controls whether Azure resources are created with + // IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + // protocol family. + // + // +default="IPv4" + // +kubebuilder:default="IPv4" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="ipFamily is immutable once set" + // +openshift:enable:FeatureGate=AzureDualStackInstall + // +optional + IPFamily IPFamilyType `json:"ipFamily,omitempty"` } // AzureResourceTag is a tag to apply to Azure resources created for the cluster. @@ -584,14 +665,14 @@ type AzureResourceTag struct { // key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key // must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric // characters and the following special characters `_ . -`. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 // +kubebuilder:validation:Pattern=`^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$` Key string `json:"key"` // value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value // must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.=+-@]+$` @@ -619,13 +700,51 @@ const ( AzureStackCloud AzureCloudEnvironment = "AzureStackCloud" ) +// Start: TOMBSTONE + +// GCPServiceEndpointName is the name of the GCP Service Endpoint. +// +kubebuilder:validation:Enum=Compute;Container;CloudResourceManager;DNS;File;IAM;IAMCredentials;OAuth;ServiceUsage;Storage;STS +//type GCPServiceEndpointName string + +// GCPServiceEndpoint store the configuration of a custom url to +// override existing defaults of GCP Services. +// type GCPServiceEndpoint struct { +// name is the name of the GCP service whose endpoint is being overridden. +// This must be provided and cannot be empty. +// +// Allowed values are Compute, Container, CloudResourceManager, DNS, File, IAM, ServiceUsage, +// Storage, and TagManager. +// +// As an example, when setting the name to Compute all requests made by the caller to the GCP Compute +// Service will be directed to the endpoint specified in the url field. +// +// +required +// Name GCPServiceEndpointName `json:"name"` + +// url is a fully qualified URI that overrides the default endpoint for a client using the GCP service specified +// in the name field. +// url is required, must use the scheme https, must not be more than 253 characters in length, +// and must be a valid URL according to Go's net/url package (https://pkg.go.dev/net/url#URL) +// +// An example of a valid endpoint that overrides the Compute Service: "https://compute-myendpoint1.p.googleapis.com" +// +// +required +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:XValidation:rule="isURL(self)",message="must be a valid URL" +// +kubebuilder:validation:XValidation:rule="isURL(self) ? (url(self).getScheme() == \"https\") : true",message="scheme must be https" +// +kubebuilder:validation:XValidation:rule="url(self).getEscapedPath() == \"\" || url(self).getEscapedPath() == \"/\"",message="url must consist only of a scheme and domain. The url path must be empty." +// URL string `json:"url"` +//} + +// End: TOMBSTONE + // GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. // This only includes fields that can be modified in the cluster. type GCPPlatformSpec struct{} // GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider. -// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation" -// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" type GCPPlatformStatus struct { // resourceGroupName is the Project ID for new GCP resources created for the cluster. ProjectID string `json:"projectID"` @@ -642,7 +761,6 @@ type GCPPlatformStatus struct { // +listType=map // +listMapKey=key // +optional - // +openshift:enable:FeatureGate=GCPLabelsTags ResourceLabels []GCPResourceLabel `json:"resourceLabels,omitempty"` // resourceTags is a list of additional tags to apply to GCP resources created for the cluster. @@ -653,7 +771,6 @@ type GCPPlatformStatus struct { // +listType=map // +listMapKey=key // +optional - // +openshift:enable:FeatureGate=GCPLabelsTags ResourceTags []GCPResourceTag `json:"resourceTags,omitempty"` // This field was introduced and removed under tech preview. @@ -670,10 +787,26 @@ type GCPPlatformStatus struct { // // +default={"dnsType": "PlatformDefault"} // +kubebuilder:default={"dnsType": "PlatformDefault"} - // +openshift:enable:FeatureGate=GCPClusterHostedDNS + // +openshift:enable:FeatureGate=GCPClusterHostedDNSInstall // +optional // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` + + // This field was introduced and removed under tech preview. + // serviceEndpoints specifies endpoints that override the default endpoints + // used when creating clients to interact with GCP services. + // When not specified, the default endpoint for the GCP region will be used. + // Only 1 endpoint override is permitted for each GCP service. + // The maximum number of endpoint overrides allowed is 11. + // To avoid conflicts with serialisation, this field name may never be used again. + // Tombstone the field as a reminder. + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=11 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x.name == y.name))",message="only 1 endpoint override is permitted per GCP service name" + // +optional + // +openshift:enable:FeatureGate=GCPCustomAPIEndpointsInstall + // ServiceEndpoints []GCPServiceEndpoint `json:"serviceEndpoints,omitempty"` } // GCPResourceLabel is a label to apply to GCP resources created for the cluster. @@ -683,7 +816,7 @@ type GCPResourceLabel struct { // and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` // and `openshift-io`. // +kubebuilder:validation:XValidation:rule="!self.startsWith('openshift-io') && !self.startsWith('kubernetes-io')",message="label keys must not start with either `openshift-io` or `kubernetes-io`" - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[a-z][0-9a-z_-]{0,62}$` @@ -691,7 +824,7 @@ type GCPResourceLabel struct { // value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. // Value must contain only lowercase letters, numeric characters, and the following special characters `_-`. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[0-9a-z_-]{1,63}$` @@ -707,7 +840,7 @@ type GCPResourceTag struct { // An OrganizationID must consist of decimal numbers, and cannot have leading zeroes. // A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, // and hyphens, and must start with a letter, and cannot end with a hyphen. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=32 // +kubebuilder:validation:Pattern=`(^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$)` @@ -716,7 +849,7 @@ type GCPResourceTag struct { // key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. // Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase // alphanumeric characters, and the following special characters `._-`. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$` @@ -725,7 +858,7 @@ type GCPResourceTag struct { // value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. // Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase // alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$` @@ -873,6 +1006,7 @@ type BareMetalPlatformSpec struct { // BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. // For more information about the network architecture used with the BareMetal platform type, see: // https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" type BareMetalPlatformStatus struct { // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used // by components inside the cluster, like kubelets using the infrastructure rather @@ -922,10 +1056,25 @@ type BareMetalPlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *BareMetalPlatformLoadBalancer `json:"loadBalancer,omitempty"` + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` + // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. // +listType=atomic // +kubebuilder:validation:MaxItems=32 @@ -1002,6 +1151,7 @@ type OpenStackPlatformSpec struct { } // OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" type OpenStackPlatformStatus struct { // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used // by components inside the cluster, like kubelets using the infrastructure rather @@ -1058,6 +1208,22 @@ type OpenStackPlatformStatus struct { // +optional LoadBalancer *OpenStackPlatformLoadBalancer `json:"loadBalancer,omitempty"` + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` + // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. // +listType=atomic // +kubebuilder:validation:MaxItems=32 @@ -1092,6 +1258,7 @@ type OvirtPlatformLoadBalancer struct { type OvirtPlatformSpec struct{} // OvirtPlatformStatus holds the current status of the oVirt infrastructure provider. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" type OvirtPlatformStatus struct { // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used // by components inside the cluster, like kubelets using the infrastructure rather @@ -1136,9 +1303,24 @@ type OvirtPlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *OvirtPlatformLoadBalancer `json:"loadBalancer,omitempty"` + + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` } // VSpherePlatformLoadBalancer defines the load balancer used by the cluster on VSphere platform. @@ -1189,7 +1371,7 @@ const ( type VSpherePlatformFailureDomainSpec struct { // name defines the arbitrary but unique name // of a failure domain. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 Name string `json:"name"` @@ -1199,7 +1381,7 @@ type VSpherePlatformFailureDomainSpec struct { // category in vCenter must be named openshift-region. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=80 - // +kubebuilder:validation:Required + // +required Region string `json:"region"` // zone defines the name of a zone tag that will @@ -1207,7 +1389,7 @@ type VSpherePlatformFailureDomainSpec struct { // category in vCenter must be named openshift-zone. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=80 - // +kubebuilder:validation:Required + // +required Zone string `json:"zone"` // regionAffinity holds the type of region, Datacenter or ComputeCluster. @@ -1226,15 +1408,15 @@ type VSpherePlatformFailureDomainSpec struct { ZoneAffinity *VSphereFailureDomainZoneAffinity `json:"zoneAffinity,omitempty"` // server is the fully-qualified domain name or the IP address of the vCenter server. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=255 // --- // + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname Server string `json:"server"` - // Topology describes a given failure domain using vSphere constructs - // +kubebuilder:validation:Required + // topology describes a given failure domain using vSphere constructs + // +required Topology VSpherePlatformTopology `json:"topology"` } @@ -1243,7 +1425,7 @@ type VSpherePlatformFailureDomainSpec struct { type VSpherePlatformTopology struct { // datacenter is the name of vCenter datacenter in which virtual machines will be located. // The maximum length of the datacenter name is 80 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=80 Datacenter string `json:"datacenter"` @@ -1251,7 +1433,7 @@ type VSpherePlatformTopology struct { // in which virtual machine will be located. // The absolute path is of the form //host/. // The maximum length of the path is 2048 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=2048 // +kubebuilder:validation:Pattern=`^/.*?/host/.*?` ComputeCluster string `json:"computeCluster"` @@ -1264,7 +1446,7 @@ type VSpherePlatformTopology struct { // `govc ls 'network/*'` // Networks should be in the form of an absolute path: // //network/. - // +kubebuilder:validation:Required + // +required // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 // +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiNetworks,maxItems=10 // +kubebuilder:validation:MinItems=1 @@ -1275,7 +1457,7 @@ type VSpherePlatformTopology struct { // virtual machine is located. // The absolute path is of the form //datastore/ // The maximum length of the path is 2048 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=2048 // +kubebuilder:validation:Pattern=`^/.*?/datastore/.*?` Datastore string `json:"datastore"` @@ -1306,7 +1488,6 @@ type VSpherePlatformTopology struct { // VSpherePlatformFailureDomainSpec. // For example, for zone=zonea, region=region1, and infrastructure name=test, // the template path would be calculated as //vm/test-rhcos-region1-zonea. - // +openshift:enable:FeatureGate=VSphereControlPlaneMachineSet // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=2048 // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` @@ -1327,7 +1508,7 @@ type VSphereFailureDomainZoneAffinity struct { // When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and // this means the zone is defined by the grouping of those fields. // +kubebuilder:validation:Enum:=HostGroup;ComputeCluster - // +kubebuilder:validation:Required + // +required // +unionDiscriminator Type VSphereFailureDomainZoneType `json:"type"` @@ -1348,7 +1529,7 @@ type VSphereFailureDomainRegionAffinity struct { // When set to Datacenter, this means the vCenter Datacenter defined is the region. // When set to ComputeCluster, this means the vCenter cluster defined is the region. // +kubebuilder:validation:Enum:=ComputeCluster;Datacenter - // +kubebuilder:validation:Required + // +required // +unionDiscriminator Type VSphereFailureDomainRegionType `json:"type"` } @@ -1362,7 +1543,7 @@ type VSphereFailureDomainHostGroup struct { // This field is required when the VSphereFailureDomain ZoneType is HostGroup // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=80 - // +kubebuilder:validation:Required + // +required VMGroup string `json:"vmGroup"` // hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. @@ -1370,7 +1551,7 @@ type VSphereFailureDomainHostGroup struct { // This field is required when the VSphereFailureDomain ZoneType is HostGroup // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=80 - // +kubebuilder:validation:Required + // +required HostGroup string `json:"hostGroup"` // vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. @@ -1378,7 +1559,7 @@ type VSphereFailureDomainHostGroup struct { // This field is required when the VSphereFailureDomain ZoneType is HostGroup // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=80 - // +kubebuilder:validation:Required + // +required VMHostRule string `json:"vmHostRule"` } @@ -1387,7 +1568,7 @@ type VSphereFailureDomainHostGroup struct { type VSpherePlatformVCenterSpec struct { // server is the fully-qualified domain name or the IP address of the vCenter server. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=255 // --- // + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname @@ -1408,7 +1589,7 @@ type VSpherePlatformVCenterSpec struct { // be used by the Cloud Controller Manager. // Each datacenter listed here should be used within // a topology. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinItems=1 // +listType=set Datacenters []string `json:"datacenters"` @@ -1474,8 +1655,7 @@ type VSpherePlatformSpec struct { // + If VCenters is not defined use the existing cloud-config configmap defined // + in openshift-config. // +kubebuilder:validation:MinItems=0 - // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 - // +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiVCenters,maxItems=3 + // +kubebuilder:validation:MaxItems=3 // +kubebuilder:validation:XValidation:rule="size(self) != size(oldSelf) ? size(oldSelf) == 0 && size(self) < 2 : true",message="vcenters cannot be added or removed once set" // +listType=atomic // +optional @@ -1538,6 +1718,7 @@ type VSpherePlatformSpec struct { } // VSpherePlatformStatus holds the current status of the vSphere infrastructure provider. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" type VSpherePlatformStatus struct { // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used // by components inside the cluster, like kubelets using the infrastructure rather @@ -1587,10 +1768,25 @@ type VSpherePlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *VSpherePlatformLoadBalancer `json:"loadBalancer,omitempty"` + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` + // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. // +listType=atomic // +kubebuilder:validation:MaxItems=32 @@ -1609,45 +1805,67 @@ type IBMCloudServiceEndpoint struct { // Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured // with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com` // - // +kubebuilder:validation:Required + // +required Name IBMCloudServiceName `json:"name"` // url is fully qualified URI with scheme https, that overrides the default generated // endpoint for a client. - // This must be provided and cannot be empty. + // This must be provided and cannot be empty. The path must follow the pattern + // /v[0,9]+ or /api/v[0,9]+ // - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Type=string + // +kubebuilder:validation:MaxLength=300 // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL" + // +openshift:validation:FeatureGateAwareXValidation:featureGate=DyanmicServiceEndpointIBMCloud,rule="url(self).getScheme() == \"https\"",message="url must use https scheme" + // +openshift:validation:FeatureGateAwareXValidation:featureGate=DyanmicServiceEndpointIBMCloud,rule=`matches((url(self).getEscapedPath()), '^/(api/)?v[0-9]+/{0,1}$')`,message="url path must match /v[0,9]+ or /api/v[0,9]+" URL string `json:"url"` } // IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. // This only includes fields that can be modified in the cluster. -type IBMCloudPlatformSpec struct{} +type IBMCloudPlatformSpec struct { + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of an IBM service. These endpoints are used by components + // within the cluster when trying to reach the IBM Cloud Services that have been + // overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + // endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus + // are updated to reflect the same custom endpoints. + // A maximum of 13 service endpoints overrides are supported. + // +kubebuilder:validation:MaxItems=13 + // +listType=map + // +listMapKey=name + // +optional + // +openshift:enable:FeatureGate=DyanmicServiceEndpointIBMCloud + ServiceEndpoints []IBMCloudServiceEndpoint `json:"serviceEndpoints,omitempty"` +} // IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider. type IBMCloudPlatformStatus struct { - // Location is where the cluster has been deployed + // location is where the cluster has been deployed Location string `json:"location,omitempty"` - // ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. + // resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. ResourceGroupName string `json:"resourceGroupName,omitempty"` - // ProviderType indicates the type of cluster that was created + // providerType indicates the type of cluster that was created ProviderType IBMCloudProviderType `json:"providerType,omitempty"` - // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + // cisInstanceCRN is the CRN of the Cloud Internet Services instance managing // the DNS zone for the cluster's base domain CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` - // DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + // dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone // for the cluster's base domain DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"` // serviceEndpoints is a list of custom endpoints which will override the default - // service endpoints of an IBM Cloud service. These endpoints are consumed by - // components within the cluster to reach the respective IBM Cloud Services. + // service endpoints of an IBM service. These endpoints are used by components + // within the cluster when trying to reach the IBM Cloud Services that have been + // overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + // endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus + // are updated to reflect the same custom endpoints. + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=DyanmicServiceEndpointIBMCloud,maxItems=13 // +listType=map // +listMapKey=name // +optional @@ -1697,7 +1915,7 @@ type PowerVSServiceEndpoint struct { // ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller // Power Cloud - https://cloud.ibm.com/apidocs/power-cloud // - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Enum=CIS;COS;COSConfig;DNSServices;GlobalCatalog;GlobalSearch;GlobalTagging;HyperProtect;IAM;KeyProtect;Power;ResourceController;ResourceManager;VPC Name string `json:"name"` @@ -1705,7 +1923,7 @@ type PowerVSServiceEndpoint struct { // endpoint for a client. // This must be provided and cannot be empty. // - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Type=string // +kubebuilder:validation:Format=uri // +kubebuilder:validation:Pattern=`^https://` @@ -1752,11 +1970,11 @@ type PowerVSPlatformStatus struct { // +optional ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"` - // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + // cisInstanceCRN is the CRN of the Cloud Internet Services instance managing // the DNS zone for the cluster's base domain CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` - // DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + // dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone // for the cluster's base domain DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"` } @@ -1768,7 +1986,6 @@ type AlibabaCloudPlatformSpec struct{} // AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider. type AlibabaCloudPlatformStatus struct { // region specifies the region for Alibaba Cloud resources created for the cluster. - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^[0-9A-Za-z-]+$` // +required Region string `json:"region"` @@ -1787,13 +2004,11 @@ type AlibabaCloudPlatformStatus struct { // AlibabaCloudResourceTag is the set of tags to add to apply to resources. type AlibabaCloudResourceTag struct { // key is the key of the tag. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 // +required Key string `json:"key"` // value is the value of the tag. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 // +required @@ -1828,7 +2043,7 @@ type NutanixPlatformSpec struct { // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the // proxy spec.noProxy list. - // +kubebuilder:validation:Required + // +required PrismCentral NutanixPrismEndpoint `json:"prismCentral"` // prismElements holds one or more endpoint address and port data to access the Nutanix @@ -1836,7 +2051,7 @@ type NutanixPlatformSpec struct { // Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) // used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) // spread over multiple Prism Elements (clusters) of the Prism Central. - // +kubebuilder:validation:Required + // +required // +listType=map // +listMapKey=name PrismElements []NutanixPrismElementEndpoint `json:"prismElements"` @@ -1858,7 +2073,7 @@ type NutanixFailureDomain struct { // It must consist of only lower case alphanumeric characters and hyphens (-). // It must start and end with an alphanumeric character. // This value is arbitrary and is used to identify the failure domain within the platform. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=64 // +kubebuilder:validation:Pattern=`[a-z0-9]([-a-z0-9]*[a-z0-9])?` @@ -1867,14 +2082,14 @@ type NutanixFailureDomain struct { // cluster is to identify the cluster (the Prism Element under management of the Prism Central), // in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained // from the Prism Central console or using the prism_central API. - // +kubebuilder:validation:Required + // +required Cluster NutanixResourceIdentifier `json:"cluster"` // subnets holds a list of identifiers (one or more) of the cluster's network subnets // If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. // for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be // obtained from the Prism Central console or using the prism_central API. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinItems=1 // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 // +openshift:validation:FeatureGateAwareMaxItems:featureGate=NutanixMultiSubnets,maxItems=32 @@ -1902,7 +2117,7 @@ const ( type NutanixResourceIdentifier struct { // type is the identifier type to use for this resource. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Type NutanixIdentifierType `json:"type"` // uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. @@ -1917,12 +2132,12 @@ type NutanixResourceIdentifier struct { // NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster) type NutanixPrismEndpoint struct { // address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=256 Address string `json:"address"` // port is the port number to access the Nutanix Prism Central or Element (cluster) - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 Port int32 `json:"port"` @@ -1932,7 +2147,7 @@ type NutanixPrismEndpoint struct { type NutanixPrismElementEndpoint struct { // name is the name of the Prism Element (cluster). This value will correspond with // the cluster field configured on other resources (eg Machines, PVCs, etc). - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=256 Name string `json:"name"` @@ -1940,11 +2155,12 @@ type NutanixPrismElementEndpoint struct { // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the // proxy spec.noProxy list. - // +kubebuilder:validation:Required + // +required Endpoint NutanixPrismEndpoint `json:"endpoint"` } // NutanixPlatformStatus holds the current status of the Nutanix infrastructure provider. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" type NutanixPlatformStatus struct { // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used // by components inside the cluster, like kubelets using the infrastructure rather @@ -1986,9 +2202,24 @@ type NutanixPlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *NutanixPlatformLoadBalancer `json:"loadBalancer,omitempty"` + + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go index 302913a16f..26e0ebf218 100644 --- a/vendor/github.com/openshift/api/config/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -27,7 +27,6 @@ type Ingress struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec IngressSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -44,6 +43,7 @@ type IngressSpec struct { // default ingresscontroller domain will follow this pattern: "*.". // // Once set, changing domain is not currently supported. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="domain is immutable once set" Domain string `json:"domain"` // appsDomain is an optional domain to use instead of the one specified @@ -150,8 +150,8 @@ type AWSIngressSpec struct { // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb // +unionDiscriminator // +kubebuilder:validation:Enum:=NLB;Classic - // +kubebuilder:validation:Required - Type AWSLBType `json:"type,omitempty"` + // +required + Type AWSLBType `json:"type"` } type AWSLBType string @@ -223,7 +223,6 @@ type ComponentRouteSpec struct { // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required // +required Namespace string `json:"namespace"` @@ -233,12 +232,10 @@ type ComponentRouteSpec struct { // entry in the list of status.componentRoutes if the route is to be customized. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required // +required Name string `json:"name"` // hostname is the hostname that should be used by the route. - // +kubebuilder:validation:Required // +required Hostname Hostname `json:"hostname"` @@ -260,7 +257,6 @@ type ComponentRouteStatus struct { // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required // +required Namespace string `json:"namespace"` @@ -271,12 +267,10 @@ type ComponentRouteStatus struct { // entry in the list of spec.componentRoutes if the route is to be customized. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required // +required Name string `json:"name"` // defaultHostname is the hostname of this route prior to customization. - // +kubebuilder:validation:Required // +required DefaultHostname Hostname `json:"defaultHostname"` @@ -310,7 +304,6 @@ type ComponentRouteStatus struct { // relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:Required // +required RelatedObjects []ObjectReference `json:"relatedObjects"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_insights.go b/vendor/github.com/openshift/api/config/v1/types_insights.go new file mode 100644 index 0000000000..710d4303da --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_insights.go @@ -0,0 +1,231 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// InsightsDataGather provides data gather configuration options for the Insights Operator. +// +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=insightsdatagathers,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2448 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=InsightsConfig +// +openshift:capability=Insights +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type InsightsDataGather struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec holds user settable values for configuration + // +required + Spec InsightsDataGatherSpec `json:"spec,omitempty,omitzero"` +} + +// InsightsDataGatherSpec contains the configuration for the data gathering. +type InsightsDataGatherSpec struct { + // gatherConfig is a required spec attribute that includes all the configuration options related to gathering of the Insights data and its uploading to the ingress. + // +required + GatherConfig GatherConfig `json:"gatherConfig,omitempty,omitzero"` +} + +// GatherConfig provides data gathering configuration options. +type GatherConfig struct { + // dataPolicy is an optional list of DataPolicyOptions that allows user to enable additional obfuscation of the Insights archive data. + // It may not exceed 2 items and must not contain duplicates. + // Valid values are ObfuscateNetworking and WorkloadNames. + // When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. + // When set to WorkloadNames, the gathered data about cluster resources will not contain the workload names for your deployments. Resources UIDs will be used instead. + // When omitted no obfuscation is applied. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))",message="dataPolicy items must be unique" + // +listType=atomic + // +optional + DataPolicy []DataPolicyOption `json:"dataPolicy,omitempty"` + // gatherers is a required field that specifies the configuration of the gatherers. + // +required + Gatherers Gatherers `json:"gatherers,omitempty,omitzero"` + // storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. + // If omitted, the gathering job will use ephemeral storage. + // +optional + Storage Storage `json:"storage,omitempty,omitzero"` +} + +// Gatherers specifies the configuration of the gatherers +// +kubebuilder:validation:XValidation:rule="has(self.mode) && self.mode == 'Custom' ? has(self.custom) : !has(self.custom)",message="custom is required when mode is Custom, and forbidden otherwise" +// +union +type Gatherers struct { + // mode is a required field that specifies the mode for gatherers. Allowed values are All, None, and Custom. + // When set to All, all gatherers will run and gather data. + // When set to None, all gatherers will be disabled and no data will be gathered. + // When set to Custom, the custom configuration from the custom field will be applied. + // +unionDiscriminator + // +required + Mode GatheringMode `json:"mode,omitempty"` + // custom provides gathering configuration. + // It is required when mode is Custom, and forbidden otherwise. + // Custom configuration allows user to disable only a subset of gatherers. + // Gatherers that are not explicitly disabled in custom configuration will run. + // +unionMember + // +optional + Custom Custom `json:"custom,omitempty,omitzero"` +} + +// Custom provides the custom configuration of gatherers +type Custom struct { + // configs is a required list of gatherers configurations that can be used to enable or disable specific gatherers. + // It may not exceed 100 items and each gatherer can be present only once. + // It is possible to disable an entire set of gatherers while allowing a specific function within that set. + // The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + // Run the following command to get the names of last active gatherers: + // "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=100 + // +listType=map + // +listMapKey=name + // +required + Configs []GathererConfig `json:"configs,omitempty"` +} + +// GatheringMode defines the valid gathering modes. +// +kubebuilder:validation:Enum=All;None;Custom +type GatheringMode string + +const ( + // Enabled enables all gatherers + GatheringModeAll GatheringMode = "All" + // Disabled disables all gatherers + GatheringModeNone GatheringMode = "None" + // Custom applies the configuration from GatheringConfig. + GatheringModeCustom GatheringMode = "Custom" +) + +// DataPolicyOption declares valid data policy options +// +kubebuilder:validation:Enum=ObfuscateNetworking;WorkloadNames +type DataPolicyOption string + +const ( + // IP addresses and cluster domain name are obfuscated + DataPolicyOptionObfuscateNetworking DataPolicyOption = "ObfuscateNetworking" + // Data from Deployment Validation Operator are obfuscated + DataPolicyOptionObfuscateWorkloadNames DataPolicyOption = "WorkloadNames" +) + +// Storage provides persistent storage configuration options for gathering jobs. +// If the type is set to PersistentVolume, then the PersistentVolume must be defined. +// If the type is set to Ephemeral, then the PersistentVolume must not be defined. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'PersistentVolume' ? has(self.persistentVolume) : !has(self.persistentVolume)",message="persistentVolume is required when type is PersistentVolume, and forbidden otherwise" +// +union +type Storage struct { + // type is a required field that specifies the type of storage that will be used to store the Insights data archive. + // Valid values are "PersistentVolume" and "Ephemeral". + // When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. + // When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the persistentVolume field. + // +unionDiscriminator + // +required + Type StorageType `json:"type,omitempty"` + // persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. + // The PersistentVolume must be created in the openshift-insights namespace. + // +unionMember + // +optional + PersistentVolume PersistentVolumeConfig `json:"persistentVolume,omitempty,omitzero"` +} + +// StorageType declares valid storage types +// +kubebuilder:validation:Enum=PersistentVolume;Ephemeral +type StorageType string + +const ( + // StorageTypePersistentVolume storage type + StorageTypePersistentVolume StorageType = "PersistentVolume" + // StorageTypeEphemeral storage type + StorageTypeEphemeral StorageType = "Ephemeral" +) + +// PersistentVolumeConfig provides configuration options for PersistentVolume storage. +type PersistentVolumeConfig struct { + // claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. + // The PersistentVolumeClaim must be created in the openshift-insights namespace. + // +required + Claim PersistentVolumeClaimReference `json:"claim,omitempty,omitzero"` + // mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default mount path is /var/lib/insights-operator + // The path may not exceed 1024 characters and must not contain a colon. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:XValidation:rule="!self.contains(':')",message="mountPath must not contain a colon" + // +optional + MountPath string `json:"mountPath,omitempty"` +} + +// PersistentVolumeClaimReference is a reference to a PersistentVolumeClaim. +type PersistentVolumeClaimReference struct { + // name is the name of the PersistentVolumeClaim that will be used to store the Insights data archive. + // It is a string that follows the DNS1123 subdomain format. + // It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character. + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +required + Name string `json:"name,omitempty"` +} + +// GathererConfig allows to configure specific gatherers +type GathererConfig struct { + // name is the required name of a specific gatherer. + // It may not exceed 256 characters. + // The format for a gatherer name is: {gatherer}/{function} where the function is optional. + // Gatherer consists of a lowercase letters only that may include underscores (_). + // Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). + // The particular gatherers can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + // Run the following command to get the names of last active gatherers: + // "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:XValidation:rule=`self.matches("^[a-z]+[_a-z]*[a-z]([/a-z][_a-z]*)?[a-z]$")`,message=`gatherer name must be in the format of {gatherer}/{function} where the gatherer and function are lowercase letters only that may include underscores (_) and are separated by a forward slash (/) if the function is provided` + // +required + Name string `json:"name,omitempty"` + // state is a required field that allows you to configure specific gatherer. Valid values are "Enabled" and "Disabled". + // When set to Enabled the gatherer will run. + // When set to Disabled the gatherer will not run. + // +required + State GathererState `json:"state,omitempty"` +} + +// GathererState declares valid gatherer state types. +// +kubebuilder:validation:Enum=Enabled;Disabled +type GathererState string + +const ( + // GathererStateEnabled gatherer state, which means that the gatherer will run. + GathererStateEnabled GathererState = "Enabled" + // GathererStateDisabled gatherer state, which means that the gatherer will not run. + GathererStateDisabled GathererState = "Disabled" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InsightsDataGatherList is a collection of items +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type InsightsDataGatherList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the required standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ListMeta `json:"metadata,omitempty"` + // items is the required list of InsightsDataGather objects + // it may not exceed 100 items + // +kubebuilder:validation:MinItems=0 + // +kubebuilder:validation:MaxItems=100 + // +required + Items []InsightsDataGather `json:"items,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_kmsencryption.go b/vendor/github.com/openshift/api/config/v1/types_kmsencryption.go new file mode 100644 index 0000000000..3293204fa4 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_kmsencryption.go @@ -0,0 +1,55 @@ +package v1 + +// KMSConfig defines the configuration for the KMS instance +// that will be used with KMSEncryptionProvider encryption +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'AWS' ? has(self.aws) : !has(self.aws)",message="aws config is required when kms provider type is AWS, and forbidden otherwise" +// +union +type KMSConfig struct { + // type defines the kind of platform for the KMS provider. + // Available provider types are AWS only. + // + // +unionDiscriminator + // +required + Type KMSProviderType `json:"type"` + + // aws defines the key config for using an AWS KMS instance + // for the encryption. The AWS KMS instance is managed + // by the user outside the purview of the control plane. + // + // +unionMember + // +optional + AWS *AWSKMSConfig `json:"aws,omitempty"` +} + +// AWSKMSConfig defines the KMS config specific to AWS KMS provider +type AWSKMSConfig struct { + // keyARN specifies the Amazon Resource Name (ARN) of the AWS KMS key used for encryption. + // The value must adhere to the format `arn:aws:kms:::key/`, where: + // - `` is the AWS region consisting of lowercase letters and hyphens followed by a number. + // - `` is a 12-digit numeric identifier for the AWS account. + // - `` is a unique identifier for the KMS key, consisting of lowercase hexadecimal characters and hyphens. + // + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="self.matches('^arn:aws:kms:[a-z0-9-]+:[0-9]{12}:key/[a-f0-9-]+$')",message="keyARN must follow the format `arn:aws:kms:::key/`. The account ID must be a 12 digit number and the region and key ID should consist only of lowercase hexadecimal characters and hyphens (-)." + // +required + KeyARN string `json:"keyARN"` + // region specifies the AWS region where the KMS instance exists, and follows the format + // `--`, e.g.: `us-east-1`. + // Only lowercase letters and hyphens followed by numbers are allowed. + // + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="self.matches('^[a-z0-9]+(-[a-z0-9]+)*$')",message="region must be a valid AWS region, consisting of lowercase characters, digits and hyphens (-) only." + // +required + Region string `json:"region"` +} + +// KMSProviderType is a specific supported KMS provider +// +kubebuilder:validation:Enum=AWS +type KMSProviderType string + +const ( + // AWSKMSProvider represents a supported KMS provider for use with AWS KMS + AWSKMSProvider KMSProviderType = "AWS" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index 1eeae69dac..fb8ed2fff7 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -30,7 +30,6 @@ type Network struct { // As a general rule, this SHOULD NOT be read directly. Instead, you should // consume the NetworkStatus, as it indicates the currently deployed configuration. // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. - // +kubebuilder:validation:Required // +required Spec NetworkSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -42,7 +41,7 @@ type Network struct { // As a general rule, this SHOULD NOT be read directly. Instead, you should // consume the NetworkStatus, as it indicates the currently deployed configuration. // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. -// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkDiagnosticsConfig,rule="!has(self.networkDiagnostics) || !has(self.networkDiagnostics.mode) || self.networkDiagnostics.mode!='Disabled' || !has(self.networkDiagnostics.sourcePlacement) && !has(self.networkDiagnostics.targetPlacement)",message="cannot set networkDiagnostics.sourcePlacement and networkDiagnostics.targetPlacement when networkDiagnostics.mode is Disabled" +// +kubebuilder:validation:XValidation:rule="!has(self.networkDiagnostics) || !has(self.networkDiagnostics.mode) || self.networkDiagnostics.mode!='Disabled' || !has(self.networkDiagnostics.sourcePlacement) && !has(self.networkDiagnostics.targetPlacement)",message="cannot set networkDiagnostics.sourcePlacement and networkDiagnostics.targetPlacement when networkDiagnostics.mode is Disabled" type NetworkSpec struct { // IP address pool to use for pod IPs. // This field is immutable after installation. @@ -55,7 +54,7 @@ type NetworkSpec struct { // +listType=atomic ServiceNetwork []string `json:"serviceNetwork"` - // NetworkType is the plugin that is to be deployed (e.g. OVNKubernetes). + // networkType is the plugin that is to be deployed (e.g. OVNKubernetes). // This should match a value that the cluster-network-operator understands, // or else no networking will be installed. // Currently supported values are: @@ -86,7 +85,6 @@ type NetworkSpec struct { // the network diagnostics feature will be disabled. // // +optional - // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig NetworkDiagnostics NetworkDiagnostics `json:"networkDiagnostics"` } @@ -94,31 +92,33 @@ type NetworkSpec struct { type NetworkStatus struct { // IP address pool to use for pod IPs. // +listType=atomic + // +optional ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` // IP address pool for services. // Currently, we only support a single entry here. // +listType=atomic + // +optional ServiceNetwork []string `json:"serviceNetwork,omitempty"` - // NetworkType is the plugin that is deployed (e.g. OVNKubernetes). + // networkType is the plugin that is deployed (e.g. OVNKubernetes). + // +optional NetworkType string `json:"networkType,omitempty"` - // ClusterNetworkMTU is the MTU for inter-pod networking. + // clusterNetworkMTU is the MTU for inter-pod networking. + // +optional ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"` - // Migration contains the cluster network migration configuration. + // migration contains the cluster network migration configuration. + // +optional Migration *NetworkMigration `json:"migration,omitempty"` // conditions represents the observations of a network.config current state. // Known .status.conditions.type are: "NetworkDiagnosticsAvailable" // +optional - // +patchMergeKey=type - // +patchStrategy=merge // +listType=map // +listMapKey=type - // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs @@ -185,35 +185,35 @@ type NetworkList struct { // NetworkMigration represents the network migration status. type NetworkMigration struct { - // NetworkType is the target plugin that is being deployed. + // networkType is the target plugin that is being deployed. // DEPRECATED: network type migration is no longer supported, // so this should always be unset. // +optional NetworkType string `json:"networkType,omitempty"` - // MTU is the MTU configuration that is being deployed. + // mtu is the MTU configuration that is being deployed. // +optional MTU *MTUMigration `json:"mtu,omitempty"` } // MTUMigration contains infomation about MTU migration. type MTUMigration struct { - // Network contains MTU migration configuration for the default network. + // network contains MTU migration configuration for the default network. // +optional Network *MTUMigrationValues `json:"network,omitempty"` - // Machine contains MTU migration configuration for the machine's uplink. + // machine contains MTU migration configuration for the machine's uplink. // +optional Machine *MTUMigrationValues `json:"machine,omitempty"` } // MTUMigrationValues contains the values for a MTU migration. type MTUMigrationValues struct { - // To is the MTU to migrate to. + // to is the MTU to migrate to. // +kubebuilder:validation:Minimum=0 To *uint32 `json:"to"` - // From is the MTU to migrate from. + // from is the MTU to migrate from. // +kubebuilder:validation:Minimum=0 // +optional From *uint32 `json:"from,omitempty"` diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go index a50328c91f..2f627be11e 100644 --- a/vendor/github.com/openshift/api/config/v1/types_node.go +++ b/vendor/github.com/openshift/api/config/v1/types_node.go @@ -28,7 +28,6 @@ type Node struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec NodeSpec `json:"spec"` @@ -38,11 +37,11 @@ type Node struct { } type NodeSpec struct { - // CgroupMode determines the cgroups version on the node + // cgroupMode determines the cgroups version on the node // +optional CgroupMode CgroupMode `json:"cgroupMode,omitempty"` - // WorkerLatencyProfile determins the how fast the kubelet is updating + // workerLatencyProfile determins the how fast the kubelet is updating // the status and corresponding reaction of the cluster // +optional WorkerLatencyProfile WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` @@ -69,22 +68,19 @@ type NodeSpec struct { type NodeStatus struct { // conditions contain the details and the current state of the nodes.config object - // +patchMergeKey=type - // +patchStrategy=merge // +listType=map // +listMapKey=type // +optional - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } -// +kubebuilder:validation:Enum=v1;v2;"" +// +kubebuilder:validation:Enum=v2;"" type CgroupMode string const ( CgroupModeEmpty CgroupMode = "" // Empty string indicates to honor user set value on the system that should not be overridden by OpenShift - CgroupModeV1 CgroupMode = "v1" CgroupModeV2 CgroupMode = "v2" - CgroupModeDefault CgroupMode = CgroupModeV1 + CgroupModeDefault CgroupMode = CgroupModeV2 ) // +kubebuilder:validation:Enum=Default;MediumUpdateAverageReaction;LowUpdateSlowReaction diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go index dce08a17f5..20845e4dbe 100644 --- a/vendor/github.com/openshift/api/config/v1/types_oauth.go +++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go @@ -27,7 +27,6 @@ type OAuth struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec OAuthSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go index 1fddfa51e5..a4971a20c5 100644 --- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -28,6 +28,7 @@ type OperatorHubSpec struct { type OperatorHubStatus struct { // sources encapsulates the result of applying the configuration for each // hub source + // +optional Sources []HubSourceStatus `json:"sources,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go index 78fd3f41a0..3d219862be 100644 --- a/vendor/github.com/openshift/api/config/v1/types_project.go +++ b/vendor/github.com/openshift/api/config/v1/types_project.go @@ -24,7 +24,6 @@ type Project struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ProjectSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go index 2dfc66b1cd..ed40176ce3 100644 --- a/vendor/github.com/openshift/api/config/v1/types_proxy.go +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -25,8 +25,7 @@ type Proxy struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // Spec holds user-settable values for the proxy configuration - // +kubebuilder:validation:Required + // spec holds user-settable values for the proxy configuration // +required Spec ProxySpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go index 2749f4f70d..a81ed9f30c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -25,7 +25,6 @@ type Scheduler struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec SchedulerSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -49,7 +48,9 @@ type SchedulerSpec struct { // +optional Profile SchedulerProfile `json:"profile,omitempty"` // profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles. - // +openshift:enable:FeatureGate=DynamicResourceAllocation + // Deprecated: no longer needed, since DRA is GA starting with 4.21, and + // is enabled by' default in the cluster, this field will be removed in 4.24. + // +openshift:enable:FeatureGate=HyperShiftOnlyDynamicResourceAllocation // +optional ProfileCustomizations ProfileCustomizations `json:"profileCustomizations"` // defaultNodeSelector helps set the cluster-wide default node selector to @@ -74,7 +75,7 @@ type SchedulerSpec struct { // would not be applied. // +optional DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"` - // MastersSchedulable allows masters nodes to be schedulable. When this flag is + // mastersSchedulable allows masters nodes to be schedulable. When this flag is // turned on, all the master nodes in the cluster will be made schedulable, // so that workload pods can run on them. The default value for this field is false, // meaning none of the master nodes are schedulable. diff --git a/vendor/github.com/openshift/api/config/v1/types_testreporting.go b/vendor/github.com/openshift/api/config/v1/types_testreporting.go index 4d642e060b..00953957f4 100644 --- a/vendor/github.com/openshift/api/config/v1/types_testreporting.go +++ b/vendor/github.com/openshift/api/config/v1/types_testreporting.go @@ -15,7 +15,6 @@ type TestReporting struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required // +required Spec TestReportingSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -24,20 +23,20 @@ type TestReporting struct { } type TestReportingSpec struct { - // TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing. + // testsForFeatureGates is a list, indexed by FeatureGate and includes information about testing. TestsForFeatureGates []FeatureGateTests `json:"testsForFeatureGates"` } type FeatureGateTests struct { - // FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance. + // featureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance. FeatureGate string `json:"featureGate"` - // Tests contains an item for every TestName + // tests contains an item for every TestName Tests []TestDetails `json:"tests"` } type TestDetails struct { - // TestName is the name of the test as it appears in junit XMLs. + // testName is the name of the test as it appears in junit XMLs. // It does not include the suite name since the same test can be executed in many suites. TestName string `json:"testName"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go index b18ef647c2..48657b0894 100644 --- a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go +++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go @@ -4,178 +4,115 @@ package v1 // is used by operators to apply TLS security settings to operands. // +union type TLSSecurityProfile struct { - // type is one of Old, Intermediate, Modern or Custom. Custom provides - // the ability to specify individual TLS security profile parameters. - // Old, Intermediate and Modern are TLS security profiles based on: + // type is one of Old, Intermediate, Modern or Custom. Custom provides the + // ability to specify individual TLS security profile parameters. // - // https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + // The profiles are based on version 5.7 of the Mozilla Server Side TLS + // configuration guidelines. The cipher lists consist of the configuration's + // "ciphersuites" followed by the Go-specific "ciphers" from the guidelines. + // See: https://ssl-config.mozilla.org/guidelines/5.7.json // - // The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers - // are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be - // reduced. - // - // Note that the Modern profile is currently not supported because it is not - // yet well adopted by common software libraries. + // The profiles are intent based, so they may change over time as new ciphers are + // developed and existing ciphers are found to be insecure. Depending on + // precisely which ciphers are available to a process, the list may be reduced. // // +unionDiscriminator // +optional Type TLSProfileType `json:"type"` - // old is a TLS security profile based on: - // - // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility - // - // and looks like this (yaml): + + // old is a TLS profile for use when services need to be accessed by very old + // clients or libraries and should be used only as a last resort. // + // This profile is equivalent to a Custom profile specified as: + // minTLSVersion: VersionTLS10 // ciphers: - // // - TLS_AES_128_GCM_SHA256 - // // - TLS_AES_256_GCM_SHA384 - // // - TLS_CHACHA20_POLY1305_SHA256 - // // - ECDHE-ECDSA-AES128-GCM-SHA256 - // // - ECDHE-RSA-AES128-GCM-SHA256 - // // - ECDHE-ECDSA-AES256-GCM-SHA384 - // // - ECDHE-RSA-AES256-GCM-SHA384 - // // - ECDHE-ECDSA-CHACHA20-POLY1305 - // // - ECDHE-RSA-CHACHA20-POLY1305 - // - // - DHE-RSA-AES128-GCM-SHA256 - // - // - DHE-RSA-AES256-GCM-SHA384 - // - // - DHE-RSA-CHACHA20-POLY1305 - // // - ECDHE-ECDSA-AES128-SHA256 - // // - ECDHE-RSA-AES128-SHA256 - // // - ECDHE-ECDSA-AES128-SHA - // // - ECDHE-RSA-AES128-SHA - // - // - ECDHE-ECDSA-AES256-SHA384 - // - // - ECDHE-RSA-AES256-SHA384 - // // - ECDHE-ECDSA-AES256-SHA - // // - ECDHE-RSA-AES256-SHA - // - // - DHE-RSA-AES128-SHA256 - // - // - DHE-RSA-AES256-SHA256 - // // - AES128-GCM-SHA256 - // // - AES256-GCM-SHA384 - // // - AES128-SHA256 - // - // - AES256-SHA256 - // // - AES128-SHA - // // - AES256-SHA - // // - DES-CBC3-SHA // - // minTLSVersion: VersionTLS10 - // // +optional // +nullable Old *OldTLSProfile `json:"old,omitempty"` - // intermediate is a TLS security profile based on: - // - // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 - // - // and looks like this (yaml): + + // intermediate is a TLS profile for use when you do not need compatibility with + // legacy clients and want to remain highly secure while being compatible with + // most clients currently in use. // + // This profile is equivalent to a Custom profile specified as: + // minTLSVersion: VersionTLS12 // ciphers: - // // - TLS_AES_128_GCM_SHA256 - // // - TLS_AES_256_GCM_SHA384 - // // - TLS_CHACHA20_POLY1305_SHA256 - // // - ECDHE-ECDSA-AES128-GCM-SHA256 - // // - ECDHE-RSA-AES128-GCM-SHA256 - // // - ECDHE-ECDSA-AES256-GCM-SHA384 - // // - ECDHE-RSA-AES256-GCM-SHA384 - // // - ECDHE-ECDSA-CHACHA20-POLY1305 - // // - ECDHE-RSA-CHACHA20-POLY1305 // - // - DHE-RSA-AES128-GCM-SHA256 - // - // - DHE-RSA-AES256-GCM-SHA384 - // - // minTLSVersion: VersionTLS12 - // // +optional // +nullable Intermediate *IntermediateTLSProfile `json:"intermediate,omitempty"` - // modern is a TLS security profile based on: - // - // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility - // - // and looks like this (yaml): + + // modern is a TLS security profile for use with clients that support TLS 1.3 and + // do not need backward compatibility for older clients. // + // This profile is equivalent to a Custom profile specified as: + // minTLSVersion: VersionTLS13 // ciphers: - // // - TLS_AES_128_GCM_SHA256 - // // - TLS_AES_256_GCM_SHA384 - // // - TLS_CHACHA20_POLY1305_SHA256 // - // minTLSVersion: VersionTLS13 - // // +optional // +nullable Modern *ModernTLSProfile `json:"modern,omitempty"` + // custom is a user-defined TLS security profile. Be extremely careful using a custom // profile as invalid configurations can be catastrophic. An example custom profile // looks like this: // + // minTLSVersion: VersionTLS11 // ciphers: - // // - ECDHE-ECDSA-CHACHA20-POLY1305 - // // - ECDHE-RSA-CHACHA20-POLY1305 - // // - ECDHE-RSA-AES128-GCM-SHA256 - // // - ECDHE-ECDSA-AES128-GCM-SHA256 // - // minTLSVersion: VersionTLS11 - // // +optional // +nullable Custom *CustomTLSProfile `json:"custom,omitempty"` } -// OldTLSProfile is a TLS security profile based on: -// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility +// OldTLSProfile is a TLS security profile based on the "old" configuration of +// the Mozilla Server Side TLS configuration guidelines. type OldTLSProfile struct{} -// IntermediateTLSProfile is a TLS security profile based on: -// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 +// IntermediateTLSProfile is a TLS security profile based on the "intermediate" +// configuration of the Mozilla Server Side TLS configuration guidelines. type IntermediateTLSProfile struct{} -// ModernTLSProfile is a TLS security profile based on: -// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility +// ModernTLSProfile is a TLS security profile based on the "modern" configuration +// of the Mozilla Server Side TLS configuration guidelines. type ModernTLSProfile struct{} // CustomTLSProfile is a user-defined TLS security profile. Be extremely careful @@ -189,28 +126,33 @@ type CustomTLSProfile struct { type TLSProfileType string const ( - // Old is a TLS security profile based on: - // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + // TLSProfileOldType sets parameters based on the "old" configuration of + // the Mozilla Server Side TLS configuration guidelines. TLSProfileOldType TLSProfileType = "Old" - // Intermediate is a TLS security profile based on: - // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 + + // TLSProfileIntermediateType sets parameters based on the "intermediate" + // configuration of the Mozilla Server Side TLS configuration guidelines. TLSProfileIntermediateType TLSProfileType = "Intermediate" - // Modern is a TLS security profile based on: - // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + + // TLSProfileModernType sets parameters based on the "modern" configuration + // of the Mozilla Server Side TLS configuration guidelines. TLSProfileModernType TLSProfileType = "Modern" - // Custom is a TLS security profile that allows for user-defined parameters. + + // TLSProfileCustomType is a TLS security profile that allows for user-defined parameters. TLSProfileCustomType TLSProfileType = "Custom" ) // TLSProfileSpec is the desired behavior of a TLSSecurityProfile. type TLSProfileSpec struct { // ciphers is used to specify the cipher algorithms that are negotiated - // during the TLS handshake. Operators may remove entries their operands - // do not support. For example, to use DES-CBC3-SHA (yaml): + // during the TLS handshake. Operators may remove entries that their operands + // do not support. For example, to use only ECDHE-RSA-AES128-GCM-SHA256 (yaml): // // ciphers: - // - DES-CBC3-SHA + // - ECDHE-RSA-AES128-GCM-SHA256 // + // TLS 1.3 cipher suites (e.g. TLS_AES_128_GCM_SHA256) are not configurable + // and are always enabled when TLS 1.3 is negotiated. // +listType=atomic Ciphers []string `json:"ciphers"` // minTLSVersion is used to specify the minimal version of the TLS protocol @@ -219,8 +161,6 @@ type TLSProfileSpec struct { // // minTLSVersion: VersionTLS11 // - // NOTE: currently the highest minTLSVersion allowed is VersionTLS12 - // MinTLSVersion TLSProtocolVersion `json:"minTLSVersion"` } @@ -245,11 +185,18 @@ const ( VersionTLS13 TLSProtocolVersion = "VersionTLS13" ) -// TLSProfiles Contains a map of TLSProfileType names to TLSProfileSpec. +// TLSProfiles contains a map of TLSProfileType names to TLSProfileSpec. +// +// These profiles are based on version 5.7 of the Mozilla Server Side TLS +// configuration guidelines. See: https://ssl-config.mozilla.org/guidelines/5.7.json +// +// Each Ciphers slice is the configuration's "ciphersuites" followed by the +// Go-specific "ciphers" from the guidelines JSON. // -// NOTE: The caller needs to make sure to check that these constants are valid for their binary. Not all -// entries map to values for all binaries. In the case of ties, the kube-apiserver wins. Do not fail, -// just be sure to whitelist only and everything will be ok. +// NOTE: The caller needs to make sure to check that these constants are valid +// for their binary. Not all entries map to values for all binaries. In the case +// of ties, the kube-apiserver wins. Do not fail, just be sure to include only +// valid entries and everything will be ok. var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ TLSProfileOldType: { Ciphers: []string{ @@ -262,23 +209,15 @@ var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-RSA-CHACHA20-POLY1305", - "DHE-RSA-AES128-GCM-SHA256", - "DHE-RSA-AES256-GCM-SHA384", - "DHE-RSA-CHACHA20-POLY1305", "ECDHE-ECDSA-AES128-SHA256", "ECDHE-RSA-AES128-SHA256", "ECDHE-ECDSA-AES128-SHA", "ECDHE-RSA-AES128-SHA", - "ECDHE-ECDSA-AES256-SHA384", - "ECDHE-RSA-AES256-SHA384", "ECDHE-ECDSA-AES256-SHA", "ECDHE-RSA-AES256-SHA", - "DHE-RSA-AES128-SHA256", - "DHE-RSA-AES256-SHA256", "AES128-GCM-SHA256", "AES256-GCM-SHA384", "AES128-SHA256", - "AES256-SHA256", "AES128-SHA", "AES256-SHA", "DES-CBC3-SHA", @@ -296,8 +235,6 @@ var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-RSA-CHACHA20-POLY1305", - "DHE-RSA-AES128-GCM-SHA256", - "DHE-RSA-AES256-GCM-SHA384", }, MinTLSVersion: VersionTLS12, }, diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index b013d4595e..a604d2f634 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 @@ -42,6 +42,11 @@ func (in *APIServer) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *APIServerEncryption) DeepCopyInto(out *APIServerEncryption) { *out = *in + if in.KMS != nil { + in, out := &in.KMS, &out.KMS + *out = new(KMSConfig) + (*in).DeepCopyInto(*out) + } return } @@ -143,7 +148,7 @@ func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) { *out = make([]string, len(*in)) copy(*out, *in) } - out.Encryption = in.Encryption + in.Encryption.DeepCopyInto(&out.Encryption) if in.TLSSecurityProfile != nil { in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile *out = new(TLSSecurityProfile) @@ -211,6 +216,22 @@ func (in *AWSIngressSpec) DeepCopy() *AWSIngressSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSKMSConfig) DeepCopyInto(out *AWSKMSConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSKMSConfig. +func (in *AWSKMSConfig) DeepCopy() *AWSKMSConfig { + if in == nil { + return nil + } + out := new(AWSKMSConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSPlatformSpec) DeepCopyInto(out *AWSPlatformSpec) { *out = *in @@ -295,6 +316,22 @@ func (in *AWSServiceEndpoint) DeepCopy() *AWSServiceEndpoint { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceptRisk) DeepCopyInto(out *AcceptRisk) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceptRisk. +func (in *AcceptRisk) DeepCopy() *AcceptRisk { + if in == nil { + return nil + } + out := new(AcceptRisk) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) { *out = *in @@ -595,6 +632,11 @@ func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) { *out = make([]AzureResourceTag, len(*in)) copy(*out, *in) } + if in.CloudLoadBalancerConfig != nil { + in, out := &in.CloudLoadBalancerConfig, &out.CloudLoadBalancerConfig + *out = new(CloudLoadBalancerConfig) + (*in).DeepCopyInto(*out) + } return } @@ -1003,6 +1045,112 @@ func (in *ClusterCondition) DeepCopy() *ClusterCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicy) DeepCopyInto(out *ClusterImagePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicy. +func (in *ClusterImagePolicy) DeepCopy() *ClusterImagePolicy { + if in == nil { + return nil + } + out := new(ClusterImagePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImagePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicyList) DeepCopyInto(out *ClusterImagePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterImagePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicyList. +func (in *ClusterImagePolicyList) DeepCopy() *ClusterImagePolicyList { + if in == nil { + return nil + } + out := new(ClusterImagePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImagePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicySpec) DeepCopyInto(out *ClusterImagePolicySpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ImageScope, len(*in)) + copy(*out, *in) + } + in.Policy.DeepCopyInto(&out.Policy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicySpec. +func (in *ClusterImagePolicySpec) DeepCopy() *ClusterImagePolicySpec { + if in == nil { + return nil + } + out := new(ClusterImagePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicyStatus) DeepCopyInto(out *ClusterImagePolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicyStatus. +func (in *ClusterImagePolicyStatus) DeepCopy() *ClusterImagePolicyStatus { + if in == nil { + return nil + } + out := new(ClusterImagePolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { *out = *in @@ -1261,7 +1409,7 @@ func (in *ClusterVersionSpec) DeepCopyInto(out *ClusterVersionSpec) { if in.DesiredUpdate != nil { in, out := &in.DesiredUpdate, &out.DesiredUpdate *out = new(Update) - **out = **in + (*in).DeepCopyInto(*out) } if in.Capabilities != nil { in, out := &in.Capabilities, &out.Capabilities @@ -1324,6 +1472,13 @@ func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ConditionalUpdateRisks != nil { + in, out := &in.ConditionalUpdateRisks, &out.ConditionalUpdateRisks + *out = make([]ConditionalUpdateRisk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1412,6 +1567,11 @@ func (in *ComponentRouteStatus) DeepCopy() *ComponentRouteStatus { func (in *ConditionalUpdate) DeepCopyInto(out *ConditionalUpdate) { *out = *in in.Release.DeepCopyInto(&out.Release) + if in.RiskNames != nil { + in, out := &in.RiskNames, &out.RiskNames + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.Risks != nil { in, out := &in.Risks, &out.Risks *out = make([]ConditionalUpdateRisk, len(*in)) @@ -1442,6 +1602,13 @@ func (in *ConditionalUpdate) DeepCopy() *ConditionalUpdate { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConditionalUpdateRisk) DeepCopyInto(out *ConditionalUpdateRisk) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.MatchingRules != nil { in, out := &in.MatchingRules, &out.MatchingRules *out = make([]ClusterCondition, len(*in)) @@ -1604,6 +1771,27 @@ func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Custom) DeepCopyInto(out *Custom) { + *out = *in + if in.Configs != nil { + in, out := &in.Configs, &out.Configs + *out = make([]GathererConfig, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Custom. +func (in *Custom) DeepCopy() *Custom { + if in == nil { + return nil + } + out := new(Custom) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomFeatureGates) DeepCopyInto(out *CustomFeatureGates) { *out = *in @@ -2000,6 +2188,22 @@ func (in *ExternalPlatformStatus) DeepCopy() *ExternalPlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtraMapping) DeepCopyInto(out *ExtraMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraMapping. +func (in *ExtraMapping) DeepCopy() *ExtraMapping { + if in == nil { + return nil + } + out := new(ExtraMapping) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureGate) DeepCopyInto(out *FeatureGate) { *out = *in @@ -2271,6 +2475,62 @@ func (in *GCPResourceTag) DeepCopy() *GCPResourceTag { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatherConfig) DeepCopyInto(out *GatherConfig) { + *out = *in + if in.DataPolicy != nil { + in, out := &in.DataPolicy, &out.DataPolicy + *out = make([]DataPolicyOption, len(*in)) + copy(*out, *in) + } + in.Gatherers.DeepCopyInto(&out.Gatherers) + out.Storage = in.Storage + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatherConfig. +func (in *GatherConfig) DeepCopy() *GatherConfig { + if in == nil { + return nil + } + out := new(GatherConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GathererConfig) DeepCopyInto(out *GathererConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GathererConfig. +func (in *GathererConfig) DeepCopy() *GathererConfig { + if in == nil { + return nil + } + out := new(GathererConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gatherers) DeepCopyInto(out *Gatherers) { + *out = *in + in.Custom.DeepCopyInto(&out.Custom) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gatherers. +func (in *Gatherers) DeepCopy() *Gatherers { + if in == nil { + return nil + } + out := new(Gatherers) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) { *out = *in @@ -2300,6 +2560,7 @@ func (in *GenericAPIServerConfig) DeepCopy() *GenericAPIServerConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GenericControllerConfig) DeepCopyInto(out *GenericControllerConfig) { *out = *in + out.TypeMeta = in.TypeMeta in.ServingInfo.DeepCopyInto(&out.ServingInfo) out.LeaderElection = in.LeaderElection out.Authentication = in.Authentication @@ -2450,6 +2711,11 @@ func (in *HubSourceStatus) DeepCopy() *HubSourceStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IBMCloudPlatformSpec) DeepCopyInto(out *IBMCloudPlatformSpec) { *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]IBMCloudServiceEndpoint, len(*in)) + copy(*out, *in) + } return } @@ -2841,24 +3107,232 @@ func (in *ImageList) DeepCopyInto(out *ImageList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. -func (in *ImageList) DeepCopy() *ImageList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { + if in == nil { + return nil + } + out := new(ImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicy) DeepCopyInto(out *ImagePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicy. +func (in *ImagePolicy) DeepCopy() *ImagePolicy { + if in == nil { + return nil + } + out := new(ImagePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyFulcioCAWithRekorRootOfTrust) DeepCopyInto(out *ImagePolicyFulcioCAWithRekorRootOfTrust) { + *out = *in + if in.FulcioCAData != nil { + in, out := &in.FulcioCAData, &out.FulcioCAData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.FulcioSubject = in.FulcioSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyFulcioCAWithRekorRootOfTrust. +func (in *ImagePolicyFulcioCAWithRekorRootOfTrust) DeepCopy() *ImagePolicyFulcioCAWithRekorRootOfTrust { + if in == nil { + return nil + } + out := new(ImagePolicyFulcioCAWithRekorRootOfTrust) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyList) DeepCopyInto(out *ImagePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImagePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyList. +func (in *ImagePolicyList) DeepCopy() *ImagePolicyList { + if in == nil { + return nil + } + out := new(ImagePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyPKIRootOfTrust) DeepCopyInto(out *ImagePolicyPKIRootOfTrust) { + *out = *in + if in.CertificateAuthorityRootsData != nil { + in, out := &in.CertificateAuthorityRootsData, &out.CertificateAuthorityRootsData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CertificateAuthorityIntermediatesData != nil { + in, out := &in.CertificateAuthorityIntermediatesData, &out.CertificateAuthorityIntermediatesData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.PKICertificateSubject = in.PKICertificateSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyPKIRootOfTrust. +func (in *ImagePolicyPKIRootOfTrust) DeepCopy() *ImagePolicyPKIRootOfTrust { + if in == nil { + return nil + } + out := new(ImagePolicyPKIRootOfTrust) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyPublicKeyRootOfTrust) DeepCopyInto(out *ImagePolicyPublicKeyRootOfTrust) { + *out = *in + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyPublicKeyRootOfTrust. +func (in *ImagePolicyPublicKeyRootOfTrust) DeepCopy() *ImagePolicyPublicKeyRootOfTrust { + if in == nil { + return nil + } + out := new(ImagePolicyPublicKeyRootOfTrust) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicySpec) DeepCopyInto(out *ImagePolicySpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ImageScope, len(*in)) + copy(*out, *in) + } + in.Policy.DeepCopyInto(&out.Policy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicySpec. +func (in *ImagePolicySpec) DeepCopy() *ImagePolicySpec { + if in == nil { + return nil + } + out := new(ImagePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyStatus) DeepCopyInto(out *ImagePolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyStatus. +func (in *ImagePolicyStatus) DeepCopy() *ImagePolicyStatus { + if in == nil { + return nil + } + out := new(ImagePolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSigstoreVerificationPolicy) DeepCopyInto(out *ImageSigstoreVerificationPolicy) { + *out = *in + in.RootOfTrust.DeepCopyInto(&out.RootOfTrust) + if in.SignedIdentity != nil { + in, out := &in.SignedIdentity, &out.SignedIdentity + *out = new(PolicyIdentity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSigstoreVerificationPolicy. +func (in *ImageSigstoreVerificationPolicy) DeepCopy() *ImageSigstoreVerificationPolicy { if in == nil { return nil } - out := new(ImageList) + out := new(ImageSigstoreVerificationPolicy) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { *out = *in @@ -3263,6 +3737,83 @@ func (in *IngressStatus) DeepCopy() *IngressStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGather) DeepCopyInto(out *InsightsDataGather) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGather. +func (in *InsightsDataGather) DeepCopy() *InsightsDataGather { + if in == nil { + return nil + } + out := new(InsightsDataGather) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsDataGather) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherList) DeepCopyInto(out *InsightsDataGatherList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InsightsDataGather, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherList. +func (in *InsightsDataGatherList) DeepCopy() *InsightsDataGatherList { + if in == nil { + return nil + } + out := new(InsightsDataGatherList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsDataGatherList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherSpec) DeepCopyInto(out *InsightsDataGatherSpec) { + *out = *in + in.GatherConfig.DeepCopyInto(&out.GatherConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherSpec. +func (in *InsightsDataGatherSpec) DeepCopy() *InsightsDataGatherSpec { + if in == nil { + return nil + } + out := new(InsightsDataGatherSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) { *out = *in @@ -3279,6 +3830,27 @@ func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfig) DeepCopyInto(out *KMSConfig) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSKMSConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfig. +func (in *KMSConfig) DeepCopy() *KMSConfig { + if in == nil { + return nil + } + out := new(KMSConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) { *out = *in @@ -4271,6 +4843,11 @@ func (in *OIDCProvider) DeepCopyInto(out *OIDCProvider) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.UserValidationRules != nil { + in, out := &in.UserValidationRules, &out.UserValidationRules + *out = make([]TokenUserValidationRule, len(*in)) + copy(*out, *in) + } return } @@ -4648,6 +5225,55 @@ func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PKICertificateSubject) DeepCopyInto(out *PKICertificateSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PKICertificateSubject. +func (in *PKICertificateSubject) DeepCopy() *PKICertificateSubject { + if in == nil { + return nil + } + out := new(PKICertificateSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimReference) DeepCopyInto(out *PersistentVolumeClaimReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimReference. +func (in *PersistentVolumeClaimReference) DeepCopy() *PersistentVolumeClaimReference { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeConfig) DeepCopyInto(out *PersistentVolumeConfig) { + *out = *in + out.Claim = in.Claim + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeConfig. +func (in *PersistentVolumeConfig) DeepCopy() *PersistentVolumeConfig { + if in == nil { + return nil + } + out := new(PersistentVolumeConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { *out = *in @@ -4689,7 +5315,7 @@ func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { if in.IBMCloud != nil { in, out := &in.IBMCloud, &out.IBMCloud *out = new(IBMCloudPlatformSpec) - **out = **in + (*in).DeepCopyInto(*out) } if in.Kubevirt != nil { in, out := &in.Kubevirt, &out.Kubevirt @@ -4820,6 +5446,111 @@ func (in *PlatformStatus) DeepCopy() *PlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyFulcioSubject) DeepCopyInto(out *PolicyFulcioSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyFulcioSubject. +func (in *PolicyFulcioSubject) DeepCopy() *PolicyFulcioSubject { + if in == nil { + return nil + } + out := new(PolicyFulcioSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyIdentity) DeepCopyInto(out *PolicyIdentity) { + *out = *in + if in.PolicyMatchExactRepository != nil { + in, out := &in.PolicyMatchExactRepository, &out.PolicyMatchExactRepository + *out = new(PolicyMatchExactRepository) + **out = **in + } + if in.PolicyMatchRemapIdentity != nil { + in, out := &in.PolicyMatchRemapIdentity, &out.PolicyMatchRemapIdentity + *out = new(PolicyMatchRemapIdentity) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyIdentity. +func (in *PolicyIdentity) DeepCopy() *PolicyIdentity { + if in == nil { + return nil + } + out := new(PolicyIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMatchExactRepository) DeepCopyInto(out *PolicyMatchExactRepository) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMatchExactRepository. +func (in *PolicyMatchExactRepository) DeepCopy() *PolicyMatchExactRepository { + if in == nil { + return nil + } + out := new(PolicyMatchExactRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMatchRemapIdentity) DeepCopyInto(out *PolicyMatchRemapIdentity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMatchRemapIdentity. +func (in *PolicyMatchRemapIdentity) DeepCopy() *PolicyMatchRemapIdentity { + if in == nil { + return nil + } + out := new(PolicyMatchRemapIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRootOfTrust) DeepCopyInto(out *PolicyRootOfTrust) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(ImagePolicyPublicKeyRootOfTrust) + (*in).DeepCopyInto(*out) + } + if in.FulcioCAWithRekor != nil { + in, out := &in.FulcioCAWithRekor, &out.FulcioCAWithRekor + *out = new(ImagePolicyFulcioCAWithRekorRootOfTrust) + (*in).DeepCopyInto(*out) + } + if in.PKI != nil { + in, out := &in.PKI, &out.PKI + *out = new(ImagePolicyPKIRootOfTrust) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRootOfTrust. +func (in *PolicyRootOfTrust) DeepCopy() *PolicyRootOfTrust { + if in == nil { + return nil + } + out := new(PolicyRootOfTrust) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PowerVSPlatformSpec) DeepCopyInto(out *PowerVSPlatformSpec) { *out = *in @@ -5457,6 +6188,23 @@ func (in *SignatureStore) DeepCopy() *SignatureStore { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + out.PersistentVolume = in.PersistentVolume + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StringSource) DeepCopyInto(out *StringSource) { *out = *in @@ -5659,6 +6407,16 @@ func (in *TokenClaimMappings) DeepCopyInto(out *TokenClaimMappings) { *out = *in in.Username.DeepCopyInto(&out.Username) out.Groups = in.Groups + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(TokenClaimOrExpressionMapping) + **out = **in + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make([]ExtraMapping, len(*in)) + copy(*out, *in) + } return } @@ -5672,6 +6430,38 @@ func (in *TokenClaimMappings) DeepCopy() *TokenClaimMappings { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimOrExpressionMapping) DeepCopyInto(out *TokenClaimOrExpressionMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimOrExpressionMapping. +func (in *TokenClaimOrExpressionMapping) DeepCopy() *TokenClaimOrExpressionMapping { + if in == nil { + return nil + } + out := new(TokenClaimOrExpressionMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimValidationCELRule) DeepCopyInto(out *TokenClaimValidationCELRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimValidationCELRule. +func (in *TokenClaimValidationCELRule) DeepCopy() *TokenClaimValidationCELRule { + if in == nil { + return nil + } + out := new(TokenClaimValidationCELRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenClaimValidationRule) DeepCopyInto(out *TokenClaimValidationRule) { *out = *in @@ -5680,6 +6470,7 @@ func (in *TokenClaimValidationRule) DeepCopyInto(out *TokenClaimValidationRule) *out = new(TokenRequiredClaim) **out = **in } + out.CEL = in.CEL return } @@ -5752,9 +6543,30 @@ func (in *TokenRequiredClaim) DeepCopy() *TokenRequiredClaim { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenUserValidationRule) DeepCopyInto(out *TokenUserValidationRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenUserValidationRule. +func (in *TokenUserValidationRule) DeepCopy() *TokenUserValidationRule { + if in == nil { + return nil + } + out := new(TokenUserValidationRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Update) DeepCopyInto(out *Update) { *out = *in + if in.AcceptRisks != nil { + in, out := &in.AcceptRisks, &out.AcceptRisks + *out = make([]AcceptRisk, len(*in)) + copy(*out, *in) + } return } @@ -5792,7 +6604,6 @@ func (in *UpdateHistory) DeepCopy() *UpdateHistory { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UsernameClaimMapping) DeepCopyInto(out *UsernameClaimMapping) { *out = *in - out.TokenClaimMapping = in.TokenClaimMapping if in.Prefix != nil { in, out := &in.Prefix, &out.Prefix *out = new(UsernamePrefix) diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index b2bc82e1a9..173dd1daf2 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -5,7 +5,10 @@ apiservers.config.openshift.io: CRDName: apiservers.config.openshift.io Capability: "" Category: "" - FeatureGates: [] + FeatureGates: + - KMSEncryption + - KMSEncryptionProvider + - TLSAdherence FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" @@ -29,6 +32,8 @@ authentications.config.openshift.io: Category: "" FeatureGates: - ExternalOIDC + - ExternalOIDCWithUIDAndExtraClaimMappings + - ExternalOIDCWithUpstreamParity FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" @@ -64,6 +69,30 @@ builds.config.openshift.io: TopLevelFeatureGates: [] Version: v1 +clusterimagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2310 + CRDName: clusterimagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SigstoreImageVerification + - SigstoreImageVerificationPKI + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterImagePolicy + Labels: {} + PluralName: clusterimagepolicies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - SigstoreImageVerification + Version: v1 + clusteroperators.config.openshift.io: Annotations: include.release.openshift.io/self-managed-high-availability: "true" @@ -115,6 +144,8 @@ clusterversions.config.openshift.io: Capability: "" Category: "" FeatureGates: + - ClusterUpdateAcceptRisks + - ClusterUpdatePreflight - ImageStreamImportMode - SignatureStores FilenameOperatorName: cluster-version-operator @@ -175,7 +206,8 @@ dnses.config.openshift.io: CRDName: dnses.config.openshift.io Capability: "" Category: "" - FeatureGates: [] + FeatureGates: + - AWSEuropeanSovereignCloudInstall FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" @@ -280,6 +312,30 @@ imagedigestmirrorsets.config.openshift.io: TopLevelFeatureGates: [] Version: v1 +imagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2310 + CRDName: imagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SigstoreImageVerification + - SigstoreImageVerificationPKI + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImagePolicy + Labels: {} + PluralName: imagepolicies + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: + - SigstoreImageVerification + Version: v1 + imagetagmirrorsets.config.openshift.io: Annotations: release.openshift.io/bootstrap-required: "true" @@ -311,15 +367,17 @@ infrastructures.config.openshift.io: Capability: "" Category: "" FeatureGates: - - AWSClusterHostedDNS - - BareMetalLoadBalancer - - GCPClusterHostedDNS - - GCPLabelsTags + - AWSClusterHostedDNSInstall + - AWSDualStackInstall + - AzureClusterHostedDNSInstall + - AzureDualStackInstall + - DualReplica + - DyanmicServiceEndpointIBMCloud + - GCPClusterHostedDNSInstall - NutanixMultiSubnets - - VSphereControlPlaneMachineSet + - OnPremDNSRecords - VSphereHostVMGroupZonal - VSphereMultiNetworks - - VSphereMultiVCenters FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" @@ -356,6 +414,29 @@ ingresses.config.openshift.io: TopLevelFeatureGates: [] Version: v1 +insightsdatagathers.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2448 + CRDName: insightsdatagathers.config.openshift.io + Capability: Insights + Category: "" + FeatureGates: + - InsightsConfig + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: false + KindName: InsightsDataGather + Labels: {} + PluralName: insightsdatagathers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - InsightsConfig + Version: v1 + networks.config.openshift.io: Annotations: release.openshift.io/bootstrap-required: "true" @@ -363,8 +444,7 @@ networks.config.openshift.io: CRDName: networks.config.openshift.io Capability: "" Category: "" - FeatureGates: - - NetworkDiagnosticsConfig + FeatureGates: [] FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" @@ -497,7 +577,7 @@ schedulers.config.openshift.io: Capability: "" Category: "" FeatureGates: - - DynamicResourceAllocation + - HyperShiftOnlyDynamicResourceAllocation FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index a89678edb9..4a5346dba8 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -22,8 +22,8 @@ func (AdmissionConfig) SwaggerDoc() map[string]string { var map_AdmissionPluginConfig = map[string]string{ "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins", - "location": "Location is the path to a configuration file that contains the plugin's configuration", - "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", + "location": "location is the path to a configuration file that contains the plugin's configuration", + "configuration": "configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", } func (AdmissionPluginConfig) SwaggerDoc() map[string]string { @@ -37,8 +37,8 @@ var map_AuditConfig = map[string]string{ "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.", "maximumRetainedFiles": "Maximum number of old log files to retain.", "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.", - "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.", - "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", + "policyFile": "policyFile is a path to the file that defines the audit policy configuration.", + "policyConfiguration": "policyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", "logFormat": "Format of saved audits (legacy or json).", "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.", "webHookMode": "Strategy for sending audit events (block or batch).", @@ -50,8 +50,8 @@ func (AuditConfig) SwaggerDoc() map[string]string { var map_CertInfo = map[string]string{ "": "CertInfo relates a certificate with a private key", - "certFile": "CertFile is a file containing a PEM-encoded certificate", - "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "certFile": "certFile is a file containing a PEM-encoded certificate", + "keyFile": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", } func (CertInfo) SwaggerDoc() map[string]string { @@ -71,7 +71,7 @@ func (ClientConnectionOverrides) SwaggerDoc() map[string]string { var map_ConfigMapFileReference = map[string]string{ "": "ConfigMapFileReference references a config map in a specific namespace. The namespace must be specified at the point of use.", - "key": "Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.", + "key": "key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.", } func (ConfigMapFileReference) SwaggerDoc() map[string]string { @@ -107,8 +107,8 @@ func (DelegatedAuthorization) SwaggerDoc() map[string]string { var map_EtcdConnectionInfo = map[string]string{ "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server", - "urls": "URLs are the URLs for etcd", - "ca": "CA is a file containing trusted roots for the etcd server certificates", + "urls": "urls are the URLs for etcd", + "ca": "ca is a file containing trusted roots for the etcd server certificates", } func (EtcdConnectionInfo) SwaggerDoc() map[string]string { @@ -116,7 +116,7 @@ func (EtcdConnectionInfo) SwaggerDoc() map[string]string { } var map_EtcdStorageConfig = map[string]string{ - "storagePrefix": "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", + "storagePrefix": "storagePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", } func (EtcdStorageConfig) SwaggerDoc() map[string]string { @@ -137,8 +137,8 @@ func (GenericAPIServerConfig) SwaggerDoc() map[string]string { } var map_GenericControllerConfig = map[string]string{ - "": "GenericControllerConfig provides information to configure a controller", - "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints", + "": "GenericControllerConfig provides information to configure a controller\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "servingInfo": "servingInfo is the HTTP serving information for the controller's endpoints", "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need", "authentication": "authentication allows configuration of authentication for the endpoints", "authorization": "authorization allows configuration of authentication for the endpoints", @@ -150,8 +150,8 @@ func (GenericControllerConfig) SwaggerDoc() map[string]string { var map_HTTPServingInfo = map[string]string{ "": "HTTPServingInfo holds configuration for serving HTTP", - "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", - "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", + "maxRequestsInFlight": "maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", + "requestTimeoutSeconds": "requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", } func (HTTPServingInfo) SwaggerDoc() map[string]string { @@ -193,7 +193,7 @@ func (MaxAgePolicy) SwaggerDoc() map[string]string { var map_NamedCertificate = map[string]string{ "": "NamedCertificate specifies a certificate/key, and the names it should be served for", - "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", + "names": "names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", } func (NamedCertificate) SwaggerDoc() map[string]string { @@ -202,8 +202,8 @@ func (NamedCertificate) SwaggerDoc() map[string]string { var map_RemoteConnectionInfo = map[string]string{ "": "RemoteConnectionInfo holds information necessary for establishing a remote connection", - "url": "URL is the remote URL to connect to", - "ca": "CA is the CA for verifying TLS connections", + "url": "url is the remote URL to connect to", + "ca": "ca is the CA for verifying TLS connections", } func (RemoteConnectionInfo) SwaggerDoc() map[string]string { @@ -233,12 +233,12 @@ func (SecretNameReference) SwaggerDoc() map[string]string { var map_ServingInfo = map[string]string{ "": "ServingInfo holds information about serving web pages", - "bindAddress": "BindAddress is the ip:port to serve on", - "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", - "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", - "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", - "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", - "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", + "bindAddress": "bindAddress is the ip:port to serve on", + "bindNetwork": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "clientCA": "clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "namedCertificates": "namedCertificates is a list of certificates to use to secure requests to specific hostnames", + "minTLSVersion": "minTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "cipherSuites": "cipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", } func (ServingInfo) SwaggerDoc() map[string]string { @@ -255,10 +255,10 @@ func (StringSource) SwaggerDoc() map[string]string { var map_StringSourceSpec = map[string]string{ "": "StringSourceSpec specifies a string value, or external location", - "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", - "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", - "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", - "keyFile": "KeyFile references a file containing the key to use to decrypt the value.", + "value": "value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "env": "env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "file": "file references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "keyFile": "keyFile references a file containing the key to use to decrypt the value.", } func (StringSourceSpec) SwaggerDoc() map[string]string { @@ -277,7 +277,9 @@ func (APIServer) SwaggerDoc() map[string]string { } var map_APIServerEncryption = map[string]string{ + "": "APIServerEncryption is used to encrypt sensitive resources on the cluster.", "type": "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices.\n\nWhen encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is:\n\n 1. secrets\n 2. configmaps\n 3. routes.route.openshift.io\n 4. oauthaccesstokens.oauth.openshift.io\n 5. oauthauthorizetokens.oauth.openshift.io", + "kms": "kms defines the configuration for the external KMS instance that manages the encryption keys, when KMS encryption is enabled sensitive resources will be encrypted using keys managed by an externally configured KMS instance.\n\nThe Key Management Service (KMS) instance provides symmetric encryption and is responsible for managing the lifecyle of the encryption keys outside of the control plane. This allows integration with an external provider to manage the data encryption keys securely.", } func (APIServerEncryption) SwaggerDoc() map[string]string { @@ -316,7 +318,8 @@ var map_APIServerSpec = map[string]string{ "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.", "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.", - "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available minTLSVersion is VersionTLS12.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nWhen omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is the Intermediate profile.", + "tlsAdherence": "tlsAdherence controls if components in the cluster adhere to the TLS security profile configured on this APIServer resource.\n\nValid values are \"LegacyAdheringComponentsOnly\" and \"StrictAllComponents\".\n\nWhen set to \"LegacyAdheringComponentsOnly\", components that already honor the cluster-wide TLS profile continue to do so. Components that do not already honor it continue to use their individual TLS configurations.\n\nWhen set to \"StrictAllComponents\", all components must honor the configured TLS profile unless they have a component-specific TLS configuration that overrides it. This mode is recommended for security-conscious deployments and is required for certain compliance frameworks.\n\nNote: Some components such as Kubelet and IngressController have their own dedicated TLS configuration mechanisms via KubeletConfig and IngressController CRs respectively. When these component-specific TLS configurations are set, they take precedence over the cluster-wide tlsSecurityProfile. When not set, these components fall back to the cluster-wide default.\n\nComponents that encounter an unknown value for tlsAdherence should treat it as \"StrictAllComponents\" and log a warning to ensure forward compatibility while defaulting to the more secure behavior.\n\nThis field is optional. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is LegacyAdheringComponentsOnly.\n\nOnce set, this field may be changed to a different value, but may not be removed.", "audit": "audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster.", } @@ -369,7 +372,7 @@ var map_AuthenticationSpec = map[string]string{ "webhookTokenAuthenticators": "webhookTokenAuthenticators is DEPRECATED, setting it has no effect.", "webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.\n\nCan only be set if \"Type\" is set to \"None\".", "serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.", - "oidcProviders": "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.", + "oidcProviders": "oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.", } func (AuthenticationSpec) SwaggerDoc() map[string]string { @@ -378,7 +381,7 @@ func (AuthenticationSpec) SwaggerDoc() map[string]string { var map_AuthenticationStatus = map[string]string{ "integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.", - "oidcClients": "OIDCClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.", + "oidcClients": "oidcClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.", } func (AuthenticationStatus) SwaggerDoc() map[string]string { @@ -394,12 +397,23 @@ func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { return map_DeprecatedWebhookTokenAuthenticator } +var map_ExtraMapping = map[string]string{ + "": "ExtraMapping allows specifying a key and CEL expression to evaluate the keys' value. It is used to create additional mappings and attributes added to a cluster identity from a provided authentication token.", + "key": "key is a required field that specifies the string to use as the extra attribute key.\n\nkey must be a domain-prefix path (e.g 'example.org/foo'). key must not exceed 510 characters in length. key must contain the '/' character, separating the domain and path characters. key must not be empty.\n\nThe domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. It must not exceed 253 characters in length. It must start and end with an alphanumeric character. It must only contain lower case alphanumeric characters and '-' or '.'. It must not use the reserved domains, or be subdomains of, \"kubernetes.io\", \"k8s.io\", and \"openshift.io\".\n\nThe path portion of the key (string of characters after the '/') must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. It must not exceed 256 characters in length.", + "valueExpression": "valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. valueExpression must produce a string or string array value. \"\", [], and null are treated as the extra mapping not being present. Empty string values within an array are filtered out.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nvalueExpression must not exceed 1024 characters in length. valueExpression must not be empty.", +} + +func (ExtraMapping) SwaggerDoc() map[string]string { + return map_ExtraMapping +} + var map_OIDCClientConfig = map[string]string{ - "componentName": "ComponentName is the name of the component that is supposed to consume this client configuration", - "componentNamespace": "ComponentNamespace is the namespace of the component that is supposed to consume this client configuration", - "clientID": "ClientID is the identifier of the OIDC client from the OIDC provider", - "clientSecret": "ClientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field", - "extraScopes": "ExtraScopes is an optional set of scopes to request tokens with.", + "": "OIDCClientConfig configures how platform clients interact with identity providers as an authentication method.", + "componentName": "componentName is a required field that specifies the name of the platform component being configured to use the identity provider as an authentication mode.\n\nIt is used in combination with componentNamespace as a unique identifier.\n\ncomponentName must not be an empty string (\"\") and must not exceed 256 characters in length.", + "componentNamespace": "componentNamespace is a required field that specifies the namespace in which the platform component being configured to use the identity provider as an authentication mode is running.\n\nIt is used in combination with componentName as a unique identifier.\n\ncomponentNamespace must not be an empty string (\"\") and must not exceed 63 characters in length.", + "clientID": "clientID is a required field that configures the client identifier, from the identity provider, that the platform component uses for authentication requests made to the identity provider. The identity provider must accept this identifier for platform components to be able to use the identity provider as an authentication mode.\n\nclientID must not be an empty string (\"\").", + "clientSecret": "clientSecret is an optional field that configures the client secret used by the platform component when making authentication requests to the identity provider.\n\nWhen not specified, no client secret will be used when making authentication requests to the identity provider.\n\nWhen specified, clientSecret references a Secret in the 'openshift-config' namespace that contains the client secret in the 'clientSecret' key of the '.data' field.\n\nThe client secret will be used when making authentication requests to the identity provider.\n\nPublic clients do not require a client secret but private clients do require a client secret to work with the identity provider.", + "extraScopes": "extraScopes is an optional field that configures the extra scopes that should be requested by the platform component when making authentication requests to the identity provider. This is useful if you have configured claim mappings that requires specific scopes to be requested beyond the standard OIDC scopes.\n\nWhen omitted, no additional scopes are requested.", } func (OIDCClientConfig) SwaggerDoc() map[string]string { @@ -407,9 +421,10 @@ func (OIDCClientConfig) SwaggerDoc() map[string]string { } var map_OIDCClientReference = map[string]string{ - "oidcProviderName": "OIDCName refers to the `name` of the provider from `oidcProviders`", - "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", - "clientID": "ClientID is the identifier of the OIDC client from the OIDC provider", + "": "OIDCClientReference is a reference to a platform component client configuration.", + "oidcProviderName": "oidcProviderName is a required reference to the 'name' of the identity provider configured in 'oidcProviders' that this client is associated with.\n\noidcProviderName must not be an empty string (\"\").", + "issuerURL": "issuerURL is a required field that specifies the URL of the identity provider that this client is configured to make requests against.\n\nissuerURL must use the 'https' scheme.", + "clientID": "clientID is a required field that specifies the client identifier, from the identity provider, that the platform component is using for authentication requests made to the identity provider.\n\nclientID must not be empty.", } func (OIDCClientReference) SwaggerDoc() map[string]string { @@ -417,11 +432,12 @@ func (OIDCClientReference) SwaggerDoc() map[string]string { } var map_OIDCClientStatus = map[string]string{ - "componentName": "ComponentName is the name of the component that will consume a client configuration.", - "componentNamespace": "ComponentNamespace is the namespace of the component that will consume a client configuration.", - "currentOIDCClients": "CurrentOIDCClients is a list of clients that the component is currently using.", - "consumingUsers": "ConsumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.", - "conditions": "Conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", + "": "OIDCClientStatus represents the current state of platform components and how they interact with the configured identity providers.", + "componentName": "componentName is a required field that specifies the name of the platform component using the identity provider as an authentication mode. It is used in combination with componentNamespace as a unique identifier.\n\ncomponentName must not be an empty string (\"\") and must not exceed 256 characters in length.", + "componentNamespace": "componentNamespace is a required field that specifies the namespace in which the platform component using the identity provider as an authentication mode is running.\n\nIt is used in combination with componentName as a unique identifier.\n\ncomponentNamespace must not be an empty string (\"\") and must not exceed 63 characters in length.", + "currentOIDCClients": "currentOIDCClients is an optional list of clients that the component is currently using.\n\nEntries must have unique issuerURL/clientID pairs.", + "consumingUsers": "consumingUsers is an optional list of ServiceAccounts requiring read permissions on the `clientSecret` secret.\n\nconsumingUsers must not exceed 5 entries.", + "conditions": "conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", } func (OIDCClientStatus) SwaggerDoc() map[string]string { @@ -429,11 +445,12 @@ func (OIDCClientStatus) SwaggerDoc() map[string]string { } var map_OIDCProvider = map[string]string{ - "name": "Name of the OIDC provider", - "issuer": "Issuer describes atributes of the OIDC token issuer", - "oidcClients": "OIDCClients contains configuration for the platform's clients that need to request tokens from the issuer", - "claimMappings": "ClaimMappings describes rules on how to transform information from an ID token into a cluster identity", - "claimValidationRules": "ClaimValidationRules are rules that are applied to validate token claims to authenticate users.", + "name": "name is a required field that configures the unique human-readable identifier associated with the identity provider. It is used to distinguish between multiple identity providers and has no impact on token validation or authentication mechanics.\n\nname must not be an empty string (\"\").", + "issuer": "issuer is a required field that configures how the platform interacts with the identity provider and how tokens issued from the identity provider are evaluated by the Kubernetes API server.", + "oidcClients": "oidcClients is an optional field that configures how on-cluster, platform clients should request tokens from the identity provider. oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs.", + "claimMappings": "claimMappings is a required field that configures the rules to be used by the Kubernetes API server for translating claims in a JWT token, issued by the identity provider, to a cluster identity.", + "claimValidationRules": "claimValidationRules is an optional field that configures the rules to be used by the Kubernetes API server for validating the claims in a JWT token issued by the identity provider.\n\nValidation rules are joined via an AND operation.", + "userValidationRules": "userValidationRules is an optional field that configures the set of rules used to validate the cluster user identity that was constructed via mapping token claims to user identity attributes. Rules are CEL expressions that must evaluate to 'true' for authentication to succeed. If any rule in the chain of rules evaluates to 'false', authentication will fail. When specified, at least one rule must be specified and no more than 64 rules may be specified.", } func (OIDCProvider) SwaggerDoc() map[string]string { @@ -441,7 +458,8 @@ func (OIDCProvider) SwaggerDoc() map[string]string { } var map_PrefixedClaimMapping = map[string]string{ - "prefix": "Prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", + "": "PrefixedClaimMapping configures a claim mapping that allows for an optional prefix.", + "prefix": "prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes.\n\nWhen omitted or set to an empty string (\"\"), no prefix is applied to the cluster identity attribute. Must not be set to a non-empty value when expression is set.\n\nExample: if `prefix` is set to \"myoidc:\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", } func (PrefixedClaimMapping) SwaggerDoc() map[string]string { @@ -449,7 +467,9 @@ func (PrefixedClaimMapping) SwaggerDoc() map[string]string { } var map_TokenClaimMapping = map[string]string{ - "claim": "Claim is a JWT token claim to be used in the mapping", + "": "TokenClaimMapping allows specifying a JWT token claim to be used when mapping claims from an authentication token to cluster identities.", + "claim": "claim is an optional field for specifying the JWT token claim that is used in the mapping. The value of this claim will be assigned to the field in which this mapping is associated. claim must not exceed 256 characters in length. When set to the empty string `\"\"`, this means that no named claim should be used for the group mapping. claim is required when the ExternalOIDCWithUpstreamParity feature gate is not enabled.", + "expression": "expression is an optional CEL expression used to derive group values from JWT claims.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'.\n\nexpression must be at least 1 character and must not exceed 1024 characters in length .\n\nWhen specified, claim must not be set or be explicitly set to the empty string (`\"\"`).", } func (TokenClaimMapping) SwaggerDoc() map[string]string { @@ -457,17 +477,40 @@ func (TokenClaimMapping) SwaggerDoc() map[string]string { } var map_TokenClaimMappings = map[string]string{ - "username": "Username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"", - "groups": "Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.", + "username": "username is a required field that configures how the username of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider.", + "groups": "groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider.\n\nWhen referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (',').\n\nFor example - '\"example\"' and '\"exampleOne\", \"exampleTwo\", \"exampleThree\"' are valid claim values.", + "uid": "uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity.\n\nWhen using uid.claim to specify the claim it must be a single string value. When using uid.expression the expression must result in a single string value.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time.\n\nThe current default is to use the 'sub' claim.", + "extra": "extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity.\n\nkey values for extra mappings must be unique. A maximum of 32 extra attribute mappings may be provided.", } func (TokenClaimMappings) SwaggerDoc() map[string]string { return map_TokenClaimMappings } +var map_TokenClaimOrExpressionMapping = map[string]string{ + "": "TokenClaimOrExpressionMapping allows specifying either a JWT token claim or CEL expression to be used when mapping claims from an authentication token to cluster identities.", + "claim": "claim is an optional field for specifying the JWT token claim that is used in the mapping. The value of this claim will be assigned to the field in which this mapping is associated.\n\nPrecisely one of claim or expression must be set. claim must not be specified when expression is set. When specified, claim must be at least 1 character in length and must not exceed 256 characters in length.", + "expression": "expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nPrecisely one of claim or expression must be set. expression must not be specified when claim is set. When specified, expression must be at least 1 character in length and must not exceed 1024 characters in length.", +} + +func (TokenClaimOrExpressionMapping) SwaggerDoc() map[string]string { + return map_TokenClaimOrExpressionMapping +} + +var map_TokenClaimValidationCELRule = map[string]string{ + "expression": "expression is a CEL expression evaluated against token claims. expression is required, must be at least 1 character in length and must not exceed 1024 characters. The expression must return a boolean value where 'true' signals a valid token and 'false' an invalid one.", + "message": "message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. message must be at least 1 character in length and must not exceed 256 characters.", +} + +func (TokenClaimValidationCELRule) SwaggerDoc() map[string]string { + return map_TokenClaimValidationCELRule +} + var map_TokenClaimValidationRule = map[string]string{ - "type": "Type sets the type of the validation rule", - "requiredClaim": "RequiredClaim allows configuring a required claim name and its expected value", + "": "TokenClaimValidationRule represents a validation rule based on token claims. If type is RequiredClaim, requiredClaim must be set. If Type is CEL, CEL must be set and RequiredClaim must be omitted.", + "type": "type is an optional field that configures the type of the validation rule.\n\nAllowed values are \"RequiredClaim\" and \"CEL\".\n\nWhen set to 'RequiredClaim', the Kubernetes API server will be configured to validate that the incoming JWT contains the required claim and that its value matches the required value.\n\nWhen set to 'CEL', the Kubernetes API server will be configured to validate the incoming JWT against the configured CEL expression.", + "requiredClaim": "requiredClaim allows configuring a required claim name and its expected value. This field is required when `type` is set to RequiredClaim, and must be omitted when `type` is set to any other value. The Kubernetes API server uses this field to validate if an incoming JWT is valid for this identity provider.", + "cel": "cel holds the CEL expression and message for validation. Must be set when Type is \"CEL\", and forbidden otherwise.", } func (TokenClaimValidationRule) SwaggerDoc() map[string]string { @@ -475,9 +518,10 @@ func (TokenClaimValidationRule) SwaggerDoc() map[string]string { } var map_TokenIssuer = map[string]string{ - "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", - "audiences": "Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.", - "issuerCertificateAuthority": "CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the \"ca-bundle.crt\" key. If unset, system trust is used instead.", + "issuerURL": "issuerURL is a required field that configures the URL used to issue tokens by the identity provider. The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers.\n\nMust be at least 1 character and must not exceed 512 characters in length. Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user.", + "audiences": "audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. At least one of the entries must match the 'aud' claim in the JWT token.\n\naudiences must contain at least one entry and must not exceed ten entries.", + "issuerCertificateAuthority": "issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information.\n\nWhen not specified, the system trust is used.\n\nWhen specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap.", + "discoveryURL": "discoveryURL is an optional field that, if specified, overrides the default discovery endpoint used to retrieve OIDC configuration metadata. By default, the discovery URL is derived from `issuerURL` as \"{issuerURL}/.well-known/openid-configuration\".\n\nThe discoveryURL must be a valid absolute HTTPS URL. It must not contain query parameters, user information, or fragments. Additionally, it must differ from the value of `issuerURL` (ignoring trailing slashes). The discoveryURL value must be at least 1 character long and no longer than 2048 characters.", } func (TokenIssuer) SwaggerDoc() map[string]string { @@ -485,22 +529,44 @@ func (TokenIssuer) SwaggerDoc() map[string]string { } var map_TokenRequiredClaim = map[string]string{ - "claim": "Claim is a name of a required claim. Only claims with string values are supported.", - "requiredValue": "RequiredValue is the required value for the claim.", + "claim": "claim is a required field that configures the name of the required claim. When taken from the JWT claims, claim must be a string value.\n\nclaim must not be an empty string (\"\").", + "requiredValue": "requiredValue is a required field that configures the value that 'claim' must have when taken from the incoming JWT claims. If the value in the JWT claims does not match, the token will be rejected for authentication.\n\nrequiredValue must not be an empty string (\"\").", } func (TokenRequiredClaim) SwaggerDoc() map[string]string { return map_TokenRequiredClaim } +var map_TokenUserValidationRule = map[string]string{ + "": "TokenUserValidationRule provides a CEL-based rule used to validate a token subject. Each rule contains a CEL expression that is evaluated against the token’s claims.", + "expression": "expression is a required CEL expression that performs a validation on cluster user identity attributes like username, groups, etc.\n\nThe expression must evaluate to a boolean value. When the expression evaluates to 'true', the cluster user identity is considered valid. When the expression evaluates to 'false', the cluster user identity is not considered valid. expression must be at least 1 character in length and must not exceed 1024 characters.", + "message": "message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. message must be at least 1 character in length and must not exceed 256 characters.", +} + +func (TokenUserValidationRule) SwaggerDoc() map[string]string { + return map_TokenUserValidationRule +} + var map_UsernameClaimMapping = map[string]string{ - "prefixPolicy": "PrefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"", + "claim": "claim is an optional field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping. claim is required when the ExternalOIDCWithUpstreamParity feature gate is not enabled. When the ExternalOIDCWithUpstreamParity feature gate is enabled, claim must not be set when expression is set.\n\nclaim must not be an empty string (\"\") and must not exceed 256 characters.", + "expression": "expression is an optional CEL expression used to derive the username from JWT claims.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'.\n\nexpression must be at least 1 character and must not exceed 1024 characters in length. expression must not be set when claim is set.", + "prefixPolicy": "prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field.\n\nAllowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string).\n\nWhen set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. The prefix field must be set when prefixPolicy is 'Prefix'. Must not be set to 'Prefix' when expression is set. When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'.\n\nAs an example, consider the following scenario:\n\n `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n - \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n - \"email\": the mapped value will be \"userA@myoidc.tld\"", + "prefix": "prefix configures the prefix that should be prepended to the value of the JWT claim.\n\nprefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise.", } func (UsernameClaimMapping) SwaggerDoc() map[string]string { return map_UsernameClaimMapping } +var map_UsernamePrefix = map[string]string{ + "": "UsernamePrefix configures the string that should be used as a prefix for username claim mappings.", + "prefixString": "prefixString is a required field that configures the prefix that will be applied to cluster identity username attribute during the process of mapping JWT claims to cluster identity attributes.\n\nprefixString must not be an empty string (\"\").", +} + +func (UsernamePrefix) SwaggerDoc() map[string]string { + return map_UsernamePrefix +} + var map_WebhookTokenAuthenticator = map[string]string{ "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator", "kubeConfig": "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config.\n\nFor further details, see:\n\nhttps://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication\n\nThe key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored.", @@ -513,7 +579,7 @@ func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { var map_Build = map[string]string{ "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec holds user-settable values for the build controller configuration", + "spec": "spec holds user-settable values for the build controller configuration", } func (Build) SwaggerDoc() map[string]string { @@ -521,11 +587,11 @@ func (Build) SwaggerDoc() map[string]string { } var map_BuildDefaults = map[string]string{ - "defaultProxy": "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.", - "gitProxy": "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.", - "env": "Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", - "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", - "resources": "Resources defines resource requirements to execute the build.", + "defaultProxy": "defaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.", + "gitProxy": "gitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.", + "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "imageLabels": "imageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", + "resources": "resources defines resource requirements to execute the build.", } func (BuildDefaults) SwaggerDoc() map[string]string { @@ -542,10 +608,10 @@ func (BuildList) SwaggerDoc() map[string]string { } var map_BuildOverrides = map[string]string{ - "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", - "nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node", - "tolerations": "Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", - "forcePull": "ForcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", + "imageLabels": "imageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", + "tolerations": "tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", + "forcePull": "forcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", } func (BuildOverrides) SwaggerDoc() map[string]string { @@ -553,9 +619,9 @@ func (BuildOverrides) SwaggerDoc() map[string]string { } var map_BuildSpec = map[string]string{ - "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", - "buildDefaults": "BuildDefaults controls the default information for Builds", - "buildOverrides": "BuildOverrides controls override settings for builds", + "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", + "buildDefaults": "buildDefaults controls the default information for Builds", + "buildOverrides": "buildOverrides controls override settings for builds", } func (BuildSpec) SwaggerDoc() map[string]string { @@ -563,16 +629,55 @@ func (BuildSpec) SwaggerDoc() map[string]string { } var map_ImageLabel = map[string]string{ - "name": "Name defines the name of the label. It must have non-zero length.", - "value": "Value defines the literal value of the label.", + "name": "name defines the name of the label. It must have non-zero length.", + "value": "value defines the literal value of the label.", } func (ImageLabel) SwaggerDoc() map[string]string { return map_ImageLabel } +var map_ClusterImagePolicy = map[string]string{ + "": "ClusterImagePolicy holds cluster-wide configuration for image signature verification\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the configuration for the cluster image policy.", + "status": "status contains the observed state of the resource.", +} + +func (ClusterImagePolicy) SwaggerDoc() map[string]string { + return map_ClusterImagePolicy +} + +var map_ClusterImagePolicyList = map[string]string{ + "": "ClusterImagePolicyList is a list of ClusterImagePolicy resources\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ClusterImagePolices", +} + +func (ClusterImagePolicyList) SwaggerDoc() map[string]string { + return map_ClusterImagePolicyList +} + +var map_ClusterImagePolicySpec = map[string]string{ + "": "CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource.", + "scopes": "scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "policy": "policy is a required field that contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", +} + +func (ClusterImagePolicySpec) SwaggerDoc() map[string]string { + return map_ClusterImagePolicySpec +} + +var map_ClusterImagePolicyStatus = map[string]string{ + "conditions": "conditions provide details on the status of this API Resource.", +} + +func (ClusterImagePolicyStatus) SwaggerDoc() map[string]string { + return map_ClusterImagePolicyStatus +} + var map_ClusterOperator = map[string]string{ - "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ClusterOperator holds the status of a core or optional OpenShift component managed by the Cluster Version Operator (CVO). This object is used by operators to convey their state to the rest of the cluster. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec holds configuration that could apply to any operator.", "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.", @@ -645,10 +750,19 @@ func (OperandVersion) SwaggerDoc() map[string]string { return map_OperandVersion } +var map_AcceptRisk = map[string]string{ + "": "AcceptRisk represents a risk that is considered acceptable.", + "name": "name is the name of the acceptable risk. It must be a non-empty string and must not exceed 256 characters.", +} + +func (AcceptRisk) SwaggerDoc() map[string]string { + return map_AcceptRisk +} + var map_ClusterCondition = map[string]string{ "": "ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate.", "type": "type represents the cluster-condition type. This defines the members and semantics of any additional properties.", - "promql": "promQL represents a cluster condition based on PromQL.", + "promql": "promql represents a cluster condition based on PromQL.", } func (ClusterCondition) SwaggerDoc() map[string]string { @@ -698,9 +812,9 @@ func (ClusterVersionList) SwaggerDoc() map[string]string { var map_ClusterVersionSpec = map[string]string{ "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.", "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", - "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", + "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. The version extracted from the referenced image must match the specified version. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted if the previous version is within the current minor version. Not all rollbacks will succeed, and some may unrecoverably break the cluster.", "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", - "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.", + "channel": "channel is an identifier for explicitly requesting a non-default set of updates to be applied to this cluster. The default channel will contain stable updates that are appropriate for production clusters.", "capabilities": "capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics.", "signatureStores": "signatureStores contains the upstream URIs to verify release signatures and optional reference to a config map by name containing the PEM-encoded CA bundle.\n\nBy default, CVO will use existing signature stores if this property is empty. The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature in these stores in parallel only when local ConfigMaps did not include a valid signature. Validation will fail if none of the signature stores reply with valid signature before timeout. Setting signatureStores will replace the default signature stores with custom signature stores. Default stores can be used with custom signature stores by adding them manually.\n\nA maximum of 32 signature stores may be configured.", "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.", @@ -711,15 +825,16 @@ func (ClusterVersionSpec) SwaggerDoc() map[string]string { } var map_ClusterVersionStatus = map[string]string{ - "": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.", - "desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.", - "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.", - "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.", - "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.", - "capabilities": "capabilities describes the state of optional, core cluster components.", - "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", - "availableUpdates": "availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.", - "conditionalUpdates": "conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified.", + "": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.", + "desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.", + "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.", + "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.", + "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.", + "capabilities": "capabilities describes the state of optional, core cluster components.", + "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", + "availableUpdates": "availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.", + "conditionalUpdates": "conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified.", + "conditionalUpdateRisks": "conditionalUpdateRisks contains the list of risks associated with conditionalUpdates. When performing a conditional update, all its associated risks will be compared with the set of accepted risks in the spec.desiredUpdate.acceptRisks field. If all risks for a conditional update are included in the spec.desiredUpdate.acceptRisks set, the conditional update can proceed, otherwise it is blocked. The risk names in the list must be unique. conditionalUpdateRisks must not contain more than 500 entries.", } func (ClusterVersionStatus) SwaggerDoc() map[string]string { @@ -742,6 +857,7 @@ func (ComponentOverride) SwaggerDoc() map[string]string { var map_ConditionalUpdate = map[string]string{ "": "ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster.", "release": "release is the target of the update.", + "riskNames": "riskNames represents the set of the names of conditionalUpdateRisks that are relevant to this update for some clusters. The Applies condition of each conditionalUpdateRisks entry declares if that risk applies to this cluster. A conditional update is accepted only if each of its risks either does not apply to the cluster or is considered acceptable by the cluster administrator. The latter means that the risk names are included in value of the spec.desiredUpdate.acceptRisks field. Entries must be unique and must not exceed 256 characters. riskNames must not contain more than 500 entries.", "risks": "risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update.", "conditions": "conditions represents the observations of the conditional update's current status. Known types are: * Recommended, for whether the update is recommended for the current cluster.", } @@ -752,6 +868,7 @@ func (ConditionalUpdate) SwaggerDoc() map[string]string { var map_ConditionalUpdateRisk = map[string]string{ "": "ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update.", + "conditions": "conditions represents the observations of the conditional update risk's current status. Known types are: * Applies, for whether the risk applies to the current cluster. The condition's types in the list must be unique. conditions must not contain more than one entry.", "url": "url contains information about this risk.", "name": "name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state.", "message": "message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", @@ -764,7 +881,7 @@ func (ConditionalUpdateRisk) SwaggerDoc() map[string]string { var map_PromQLClusterCondition = map[string]string{ "": "PromQLClusterCondition represents a cluster condition based on PromQL.", - "promql": "PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.", + "promql": "promql is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.", } func (PromQLClusterCondition) SwaggerDoc() map[string]string { @@ -797,9 +914,11 @@ func (SignatureStore) SwaggerDoc() map[string]string { var map_Update = map[string]string{ "": "Update represents an administrator update request.", "architecture": "architecture is an optional field that indicates the desired value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. architecture can only be set to Multi thereby only allowing updates from single to multi architecture. If architecture is set, image cannot be set and version must be set. Valid values are 'Multi' and empty.", - "version": "version is a semantic version identifying the update version. version is ignored if image is specified and required if architecture is specified.", - "image": "image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, version is ignored. When image is set, version should be empty. When image is set, architecture cannot be specified.", - "force": "force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.", + "version": "version is a semantic version identifying the update version. version is required if architecture is specified. If both version and image are set, the version extracted from the referenced image must match the specified version.", + "image": "image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, architecture cannot be specified. If both version and image are set, the version extracted from the referenced image must match the specified version.", + "force": "force allows an administrator to update to an image that has failed verification or upgradeable checks that are designed to keep your cluster safe. Only use this if: * you are testing unsigned release images in short-lived test clusters or * you are working around a known bug in the cluster-version\n operator and you have verified the authenticity of the provided\n image yourself.\nThe provided image will run with full administrative access to the cluster. Do not use this flag with images that come from unknown or potentially malicious sources.", + "acceptRisks": "acceptRisks is an optional set of names of conditional update risks that are considered acceptable. A conditional update is performed only if all of its risks are acceptable. This list may contain entries that apply to current, previous or future updates. The entries therefore may not map directly to a risk in .status.conditionalUpdateRisks. acceptRisks must not contain more than 1000 entries. Entries in this list must be unique.", + "mode": "mode determines how an update should be processed. The only valid value is \"Preflight\". When omitted, the cluster performs a normal update by applying the specified version or image to the cluster. This is the standard update behavior. When set to \"Preflight\", the cluster runs compatibility checks against the target release without performing an actual update. Compatibility results, including any detected risks, are reported in status.conditionalUpdates and status.conditionalUpdateRisks alongside risks from the update recommendation service. This allows administrators to assess update readiness and address issues before committing to the update. Preflight mode is particularly useful for skip-level updates where upgrade compatibility needs to be verified across multiple minor versions. When mode is set to \"Preflight\", the same rules for version, image, and architecture apply as for normal updates.", } func (Update) SwaggerDoc() map[string]string { @@ -814,7 +933,7 @@ var map_UpdateHistory = map[string]string{ "version": "version is a semantic version identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.", "image": "image is a container image location that contains the update. This value is always populated.", "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted.", - "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.", + "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may mention an Upgradeable=False or missing signature that was overridden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.", } func (UpdateHistory) SwaggerDoc() map[string]string { @@ -869,7 +988,7 @@ func (ConsoleStatus) SwaggerDoc() map[string]string { var map_AWSDNSSpec = map[string]string{ "": "AWSDNSSpec contains DNS configuration specific to the Amazon Web Services cloud provider.", - "privateZoneIAMRole": "privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed.", + "privateZoneIAMRole": "privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed.\n\nThe ARN must follow the format: arn::iam:::role/, where: is the AWS partition (aws, aws-cn, aws-us-gov, or aws-eusc), is a 12-digit numeric identifier for the AWS account, is the IAM role name.", } func (AWSDNSSpec) SwaggerDoc() map[string]string { @@ -1135,6 +1254,147 @@ func (ImageDigestMirrors) SwaggerDoc() map[string]string { return map_ImageDigestMirrors } +var map_ImagePolicy = map[string]string{ + "": "ImagePolicy holds namespace-wide configuration for image signature verification\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status contains the observed state of the resource.", +} + +func (ImagePolicy) SwaggerDoc() map[string]string { + return map_ImagePolicy +} + +var map_ImagePolicyFulcioCAWithRekorRootOfTrust = map[string]string{ + "": "ImagePolicyFulcioCAWithRekorRootOfTrust defines the root of trust based on the Fulcio certificate and the Rekor public key.", + "fulcioCAData": "fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. fulcioCAData must be at most 8192 characters. ", + "rekorKeyData": "rekorKeyData is a required field contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters. ", + "fulcioSubject": "fulcioSubject is a required field specifies OIDC issuer and the email of the Fulcio authentication configuration.", +} + +func (ImagePolicyFulcioCAWithRekorRootOfTrust) SwaggerDoc() map[string]string { + return map_ImagePolicyFulcioCAWithRekorRootOfTrust +} + +var map_ImagePolicyList = map[string]string{ + "": "ImagePolicyList is a list of ImagePolicy resources\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ImagePolicies", +} + +func (ImagePolicyList) SwaggerDoc() map[string]string { + return map_ImagePolicyList +} + +var map_ImagePolicyPKIRootOfTrust = map[string]string{ + "": "ImagePolicyPKIRootOfTrust defines the root of trust based on Root CA(s) and corresponding intermediate certificates.", + "caRootsData": "caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. ", + "caIntermediatesData": "caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. caIntermediatesData requires caRootsData to be set. ", + "pkiCertificateSubject": "pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", +} + +func (ImagePolicyPKIRootOfTrust) SwaggerDoc() map[string]string { + return map_ImagePolicyPKIRootOfTrust +} + +var map_ImagePolicyPublicKeyRootOfTrust = map[string]string{ + "": "ImagePolicyPublicKeyRootOfTrust defines the root of trust based on a sigstore public key.", + "keyData": "keyData is a required field contains inline base64-encoded data for the PEM format public key. keyData must be at most 8192 characters. ", + "rekorKeyData": "rekorKeyData is an optional field contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters. ", +} + +func (ImagePolicyPublicKeyRootOfTrust) SwaggerDoc() map[string]string { + return map_ImagePolicyPublicKeyRootOfTrust +} + +var map_ImagePolicySpec = map[string]string{ + "": "ImagePolicySpec is the specification of the ImagePolicy CRD.", + "scopes": "scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "policy": "policy is a required field that contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", +} + +func (ImagePolicySpec) SwaggerDoc() map[string]string { + return map_ImagePolicySpec +} + +var map_ImagePolicyStatus = map[string]string{ + "conditions": "conditions provide details on the status of this API Resource. condition type 'Pending' indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid.", +} + +func (ImagePolicyStatus) SwaggerDoc() map[string]string { + return map_ImagePolicyStatus +} + +var map_ImageSigstoreVerificationPolicy = map[string]string{ + "": "ImageSigstoreVerificationPolicy defines the verification policy for the items in the scopes list.", + "rootOfTrust": "rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated.", + "signedIdentity": "signedIdentity is an optional field specifies what image identity the signature claims about the image. This is useful when the image identity in the signature differs from the original image spec, such as when mirror registry is configured for the image scope, the signature from the mirror registry contains the image identity of the mirror instead of the original scope. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is \"MatchRepoDigestOrExact\".", +} + +func (ImageSigstoreVerificationPolicy) SwaggerDoc() map[string]string { + return map_ImageSigstoreVerificationPolicy +} + +var map_PKICertificateSubject = map[string]string{ + "": "PKICertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", + "email": "email specifies the expected email address imposed on the subject to which the certificate was issued, and must match the email address listed in the Subject Alternative Name (SAN) field of the certificate. The email must be a valid email address and at most 320 characters in length.", + "hostname": "hostname specifies the expected hostname imposed on the subject to which the certificate was issued, and it must match the hostname listed in the Subject Alternative Name (SAN) DNS field of the certificate. The hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.', and at most 253 characters in length. It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk.", +} + +func (PKICertificateSubject) SwaggerDoc() map[string]string { + return map_PKICertificateSubject +} + +var map_PolicyFulcioSubject = map[string]string{ + "": "PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration.", + "oidcIssuer": "oidcIssuer is a required filed contains the expected OIDC issuer. The oidcIssuer must be a valid URL and at most 2048 characters in length. It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. Example: \"https://expected.OIDC.issuer/\"", + "signedEmail": "signedEmail is a required field holds the email address that the Fulcio certificate is issued for. The signedEmail must be a valid email address and at most 320 characters in length. Example: \"expected-signing-user@example.com\"", +} + +func (PolicyFulcioSubject) SwaggerDoc() map[string]string { + return map_PolicyFulcioSubject +} + +var map_PolicyIdentity = map[string]string{ + "": "PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is \"MatchRepoDigestOrExact\".", + "matchPolicy": "matchPolicy is a required filed specifies matching strategy to verify the image identity in the signature against the image scope. Allowed values are \"MatchRepoDigestOrExact\", \"MatchRepository\", \"ExactRepository\", \"RemapIdentity\". When omitted, the default value is \"MatchRepoDigestOrExact\". When set to \"MatchRepoDigestOrExact\", the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. When set to \"MatchRepository\", the identity in the signature must be in the same repository as the image identity. When set to \"ExactRepository\", the exactRepository must be specified. The identity in the signature must be in the same repository as a specific identity specified by \"repository\". When set to \"RemapIdentity\", the remapIdentity must be specified. The signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the \"prefix\" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix.", + "exactRepository": "exactRepository specifies the repository that must be exactly matched by the identity in the signature. exactRepository is required if matchPolicy is set to \"ExactRepository\". It is used to verify that the signature claims an identity matching this exact repository, rather than the original image identity.", + "remapIdentity": "remapIdentity specifies the prefix remapping rule for verifying image identity. remapIdentity is required if matchPolicy is set to \"RemapIdentity\". It is used to verify that the signature claims a different registry/repository prefix than the original image.", +} + +func (PolicyIdentity) SwaggerDoc() map[string]string { + return map_PolicyIdentity +} + +var map_PolicyMatchExactRepository = map[string]string{ + "repository": "repository is the reference of the image identity to be matched. repository is required if matchPolicy is set to \"ExactRepository\". The value should be a repository name (by omitting the tag or digest) in a registry implementing the \"Docker Registry HTTP API V2\". For example, docker.io/library/busybox", +} + +func (PolicyMatchExactRepository) SwaggerDoc() map[string]string { + return map_PolicyMatchExactRepository +} + +var map_PolicyMatchRemapIdentity = map[string]string{ + "prefix": "prefix is required if matchPolicy is set to \"RemapIdentity\". prefix is the prefix of the image identity to be matched. If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). This is useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.", + "signedPrefix": "signedPrefix is required if matchPolicy is set to \"RemapIdentity\". signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as \"prefix\". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.", +} + +func (PolicyMatchRemapIdentity) SwaggerDoc() map[string]string { + return map_PolicyMatchRemapIdentity +} + +var map_PolicyRootOfTrust = map[string]string{ + "": "PolicyRootOfTrust defines the root of trust based on the selected policyType.", + "policyType": "policyType is a required field specifies the type of the policy for verification. This field must correspond to how the policy was generated. Allowed values are \"PublicKey\", \"FulcioCAWithRekor\", and \"PKI\". When set to \"PublicKey\", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. When set to \"FulcioCAWithRekor\", the policy is based on the Fulcio certification and incorporates a Rekor verification. When set to \"PKI\", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI).", + "publicKey": "publicKey defines the root of trust configuration based on a sigstore public key. Optionally include a Rekor public key for Rekor verification. publicKey is required when policyType is PublicKey, and forbidden otherwise.", + "fulcioCAWithRekor": "fulcioCAWithRekor defines the root of trust configuration based on the Fulcio certificate and the Rekor public key. fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise For more information about Fulcio and Rekor, please refer to the document at: https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor", + "pki": "pki defines the root of trust configuration based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates. pki is required when policyType is PKI, and forbidden otherwise.", +} + +func (PolicyRootOfTrust) SwaggerDoc() map[string]string { + return map_PolicyRootOfTrust +} + var map_ImageTagMirrorSet = map[string]string{ "": "ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -1187,9 +1447,10 @@ func (AWSPlatformSpec) SwaggerDoc() map[string]string { var map_AWSPlatformStatus = map[string]string{ "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.", "region": "region holds the default AWS region for new AWS resources created by the cluster.", - "serviceEndpoints": "ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", + "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.", "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", + "ipFamily": "ipFamily specifies the IP protocol family that should be used for AWS network resources. This controls whether AWS resources are created with IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary protocol family.", } func (AWSPlatformStatus) SwaggerDoc() map[string]string { @@ -1198,8 +1459,8 @@ func (AWSPlatformStatus) SwaggerDoc() map[string]string { var map_AWSResourceTag = map[string]string{ "": "AWSResourceTag is a tag to apply to AWS resources created for the cluster.", - "key": "key is the key of the tag", - "value": "value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", + "key": "key sets the key of the AWS resource tag key-value pair. Key is required when defining an AWS resource tag. Key should consist of between 1 and 128 characters, and may contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'.", + "value": "value sets the value of the AWS resource tag key-value pair. Value is required when defining an AWS resource tag. Value should consist of between 1 and 256 characters, and may contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", } func (AWSResourceTag) SwaggerDoc() map[string]string { @@ -1260,6 +1521,8 @@ var map_AzurePlatformStatus = map[string]string{ "cloudName": "cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`.", "armEndpoint": "armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.", "resourceTags": "resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration.", + "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", + "ipFamily": "ipFamily specifies the IP protocol family that should be used for Azure network resources. This controls whether Azure resources are created with IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary protocol family.", } func (AzurePlatformStatus) SwaggerDoc() map[string]string { @@ -1304,6 +1567,7 @@ var map_BareMetalPlatformStatus = map[string]string{ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", } @@ -1361,7 +1625,7 @@ func (EquinixMetalPlatformStatus) SwaggerDoc() map[string]string { var map_ExternalPlatformSpec = map[string]string{ "": "ExternalPlatformSpec holds the desired state for the generic External infrastructure provider.", - "platformName": "PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.", + "platformName": "platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.", } func (ExternalPlatformSpec) SwaggerDoc() map[string]string { @@ -1420,7 +1684,8 @@ func (GCPResourceTag) SwaggerDoc() map[string]string { } var map_IBMCloudPlatformSpec = map[string]string{ - "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", + "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. A maximum of 13 service endpoints overrides are supported.", } func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string { @@ -1429,12 +1694,12 @@ func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string { var map_IBMCloudPlatformStatus = map[string]string{ "": "IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider.", - "location": "Location is where the cluster has been deployed", - "resourceGroupName": "ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", - "providerType": "ProviderType indicates the type of cluster that was created", - "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", - "dnsInstanceCRN": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", - "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM Cloud service. These endpoints are consumed by components within the cluster to reach the respective IBM Cloud Services.", + "location": "location is where the cluster has been deployed", + "resourceGroupName": "resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", + "providerType": "providerType indicates the type of cluster that was created", + "cisInstanceCRN": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", + "dnsInstanceCRN": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints.", } func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { @@ -1444,7 +1709,7 @@ func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { var map_IBMCloudServiceEndpoint = map[string]string{ "": "IBMCloudServiceEndpoint stores the configuration of a custom url to override existing defaults of IBM Cloud Services.", "name": "name is the name of the IBM Cloud service. Possible values are: CIS, COS, COSConfig, DNSServices, GlobalCatalog, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`", - "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", + "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. The path must follow the pattern /v[0,9]+ or /api/v[0,9]+", } func (IBMCloudServiceEndpoint) SwaggerDoc() map[string]string { @@ -1489,7 +1754,7 @@ var map_InfrastructureStatus = map[string]string{ "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.", "apiServerURL": "apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.", "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.", - "controlPlaneTopology": "controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster.", + "controlPlaneTopology": "controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. The 'HighlyAvailableArbiter' mode indicates that the control plane will consist of 2 control-plane nodes that run conventional services and 1 smaller sized arbiter node that runs a bare minimum of services to maintain quorum.", "infrastructureTopology": "infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.", "cpuPartitioning": "cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. Valid values are \"None\" and \"AllNodes\". When omitted, the default value is \"None\". The default value of \"None\" indicates that no nodes will be setup with CPU partitioning. The \"AllNodes\" value indicates that all nodes have been setup with CPU partitioning, and can then be further configured via the PerformanceProfile API.", } @@ -1554,6 +1819,7 @@ var map_NutanixPlatformStatus = map[string]string{ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", } func (NutanixPlatformStatus) SwaggerDoc() map[string]string { @@ -1620,6 +1886,7 @@ var map_OpenStackPlatformStatus = map[string]string{ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", } @@ -1652,6 +1919,7 @@ var map_OvirtPlatformStatus = map[string]string{ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", "nodeDNSIP": "deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.", "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", } func (OvirtPlatformStatus) SwaggerDoc() map[string]string { @@ -1660,20 +1928,20 @@ func (OvirtPlatformStatus) SwaggerDoc() map[string]string { var map_PlatformSpec = map[string]string{ "": "PlatformSpec holds the desired state specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.", - "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", - "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", - "azure": "Azure contains settings specific to the Azure infrastructure provider.", - "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", - "baremetal": "BareMetal contains settings specific to the BareMetal platform.", - "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", - "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", - "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.", - "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", - "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", - "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", - "powervs": "PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.", - "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", - "nutanix": "Nutanix contains settings specific to the Nutanix infrastructure provider.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"IBMCloud\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\", \"External\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", + "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "azure contains settings specific to the Azure infrastructure provider.", + "gcp": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "baremetal contains settings specific to the BareMetal platform.", + "openstack": "openstack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "vsphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "ibmcloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "equinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", + "nutanix": "nutanix contains settings specific to the Nutanix infrastructure provider.", "external": "ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately.", } @@ -1684,20 +1952,20 @@ func (PlatformSpec) SwaggerDoc() map[string]string { var map_PlatformStatus = map[string]string{ "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.", "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.\n\nThis value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set.", - "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", - "azure": "Azure contains settings specific to the Azure infrastructure provider.", - "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", - "baremetal": "BareMetal contains settings specific to the BareMetal platform.", - "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", - "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", - "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.", - "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", - "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", - "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", - "powervs": "PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider.", - "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", - "nutanix": "Nutanix contains settings specific to the Nutanix infrastructure provider.", - "external": "External contains settings specific to the generic External infrastructure provider.", + "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "azure contains settings specific to the Azure infrastructure provider.", + "gcp": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "baremetal contains settings specific to the BareMetal platform.", + "openstack": "openstack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "vsphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "ibmcloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "equinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", + "nutanix": "nutanix contains settings specific to the Nutanix infrastructure provider.", + "external": "external contains settings specific to the generic External infrastructure provider.", } func (PlatformStatus) SwaggerDoc() map[string]string { @@ -1719,8 +1987,8 @@ var map_PowerVSPlatformStatus = map[string]string{ "zone": "zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported", "resourceGroup": "resourceGroup is the resource group name for new IBMCloud resources created for a cluster. The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. When omitted, the image registry operator won't be able to configure storage, which results in the image registry cluster operator not being in an available state.", "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", - "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", - "dnsInstanceCRN": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", + "cisInstanceCRN": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", + "dnsInstanceCRN": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", } func (PowerVSPlatformStatus) SwaggerDoc() map[string]string { @@ -1775,7 +2043,7 @@ var map_VSpherePlatformFailureDomainSpec = map[string]string{ "regionAffinity": "regionAffinity holds the type of region, Datacenter or ComputeCluster. When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology.", "zoneAffinity": "zoneAffinity holds the type of the zone and the hostGroup which vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", "server": "server is the fully-qualified domain name or the IP address of the vCenter server.", - "topology": "Topology describes a given failure domain using vSphere constructs", + "topology": "topology describes a given failure domain using vSphere constructs", } func (VSpherePlatformFailureDomainSpec) SwaggerDoc() map[string]string { @@ -1834,6 +2102,7 @@ var map_VSpherePlatformStatus = map[string]string{ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", } @@ -1962,6 +2231,124 @@ func (LoadBalancer) SwaggerDoc() map[string]string { return map_LoadBalancer } +var map_Custom = map[string]string{ + "": "Custom provides the custom configuration of gatherers", + "configs": "configs is a required list of gatherers configurations that can be used to enable or disable specific gatherers. It may not exceed 100 items and each gatherer can be present only once. It is possible to disable an entire set of gatherers while allowing a specific function within that set. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\"", +} + +func (Custom) SwaggerDoc() map[string]string { + return map_Custom +} + +var map_GatherConfig = map[string]string{ + "": "GatherConfig provides data gathering configuration options.", + "dataPolicy": "dataPolicy is an optional list of DataPolicyOptions that allows user to enable additional obfuscation of the Insights archive data. It may not exceed 2 items and must not contain duplicates. Valid values are ObfuscateNetworking and WorkloadNames. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When set to WorkloadNames, the gathered data about cluster resources will not contain the workload names for your deployments. Resources UIDs will be used instead. When omitted no obfuscation is applied.", + "gatherers": "gatherers is a required field that specifies the configuration of the gatherers.", + "storage": "storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. If omitted, the gathering job will use ephemeral storage.", +} + +func (GatherConfig) SwaggerDoc() map[string]string { + return map_GatherConfig +} + +var map_GathererConfig = map[string]string{ + "": "GathererConfig allows to configure specific gatherers", + "name": "name is the required name of a specific gatherer. It may not exceed 256 characters. The format for a gatherer name is: {gatherer}/{function} where the function is optional. Gatherer consists of a lowercase letters only that may include underscores (_). Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). The particular gatherers can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\"", + "state": "state is a required field that allows you to configure specific gatherer. Valid values are \"Enabled\" and \"Disabled\". When set to Enabled the gatherer will run. When set to Disabled the gatherer will not run.", +} + +func (GathererConfig) SwaggerDoc() map[string]string { + return map_GathererConfig +} + +var map_Gatherers = map[string]string{ + "": "Gatherers specifies the configuration of the gatherers", + "mode": "mode is a required field that specifies the mode for gatherers. Allowed values are All, None, and Custom. When set to All, all gatherers will run and gather data. When set to None, all gatherers will be disabled and no data will be gathered. When set to Custom, the custom configuration from the custom field will be applied.", + "custom": "custom provides gathering configuration. It is required when mode is Custom, and forbidden otherwise. Custom configuration allows user to disable only a subset of gatherers. Gatherers that are not explicitly disabled in custom configuration will run.", +} + +func (Gatherers) SwaggerDoc() map[string]string { + return map_Gatherers +} + +var map_InsightsDataGather = map[string]string{ + "": "InsightsDataGather provides data gather configuration options for the Insights Operator.\n\n\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", +} + +func (InsightsDataGather) SwaggerDoc() map[string]string { + return map_InsightsDataGather +} + +var map_InsightsDataGatherList = map[string]string{ + "": "InsightsDataGatherList is a collection of items Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the required standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the required list of InsightsDataGather objects it may not exceed 100 items", +} + +func (InsightsDataGatherList) SwaggerDoc() map[string]string { + return map_InsightsDataGatherList +} + +var map_InsightsDataGatherSpec = map[string]string{ + "": "InsightsDataGatherSpec contains the configuration for the data gathering.", + "gatherConfig": "gatherConfig is a required spec attribute that includes all the configuration options related to gathering of the Insights data and its uploading to the ingress.", +} + +func (InsightsDataGatherSpec) SwaggerDoc() map[string]string { + return map_InsightsDataGatherSpec +} + +var map_PersistentVolumeClaimReference = map[string]string{ + "": "PersistentVolumeClaimReference is a reference to a PersistentVolumeClaim.", + "name": "name is the name of the PersistentVolumeClaim that will be used to store the Insights data archive. It is a string that follows the DNS1123 subdomain format. It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character.", +} + +func (PersistentVolumeClaimReference) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimReference +} + +var map_PersistentVolumeConfig = map[string]string{ + "": "PersistentVolumeConfig provides configuration options for PersistentVolume storage.", + "claim": "claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. The PersistentVolumeClaim must be created in the openshift-insights namespace.", + "mountPath": "mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default mount path is /var/lib/insights-operator The path may not exceed 1024 characters and must not contain a colon.", +} + +func (PersistentVolumeConfig) SwaggerDoc() map[string]string { + return map_PersistentVolumeConfig +} + +var map_Storage = map[string]string{ + "": "Storage provides persistent storage configuration options for gathering jobs. If the type is set to PersistentVolume, then the PersistentVolume must be defined. If the type is set to Ephemeral, then the PersistentVolume must not be defined.", + "type": "type is a required field that specifies the type of storage that will be used to store the Insights data archive. Valid values are \"PersistentVolume\" and \"Ephemeral\". When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the persistentVolume field.", + "persistentVolume": "persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. The PersistentVolume must be created in the openshift-insights namespace.", +} + +func (Storage) SwaggerDoc() map[string]string { + return map_Storage +} + +var map_AWSKMSConfig = map[string]string{ + "": "AWSKMSConfig defines the KMS config specific to AWS KMS provider", + "keyARN": "keyARN specifies the Amazon Resource Name (ARN) of the AWS KMS key used for encryption. The value must adhere to the format `arn:aws:kms:::key/`, where: - `` is the AWS region consisting of lowercase letters and hyphens followed by a number. - `` is a 12-digit numeric identifier for the AWS account. - `` is a unique identifier for the KMS key, consisting of lowercase hexadecimal characters and hyphens.", + "region": "region specifies the AWS region where the KMS instance exists, and follows the format `--`, e.g.: `us-east-1`. Only lowercase letters and hyphens followed by numbers are allowed.", +} + +func (AWSKMSConfig) SwaggerDoc() map[string]string { + return map_AWSKMSConfig +} + +var map_KMSConfig = map[string]string{ + "": "KMSConfig defines the configuration for the KMS instance that will be used with KMSEncryptionProvider encryption", + "type": "type defines the kind of platform for the KMS provider. Available provider types are AWS only.", + "aws": "aws defines the key config for using an AWS KMS instance for the encryption. The AWS KMS instance is managed by the user outside the purview of the control plane.", +} + +func (KMSConfig) SwaggerDoc() map[string]string { + return map_KMSConfig +} + var map_ClusterNetworkEntry = map[string]string{ "": "ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated.", "cidr": "The complete block for pod IPs.", @@ -1994,8 +2381,8 @@ func (ExternalIPPolicy) SwaggerDoc() map[string]string { var map_MTUMigration = map[string]string{ "": "MTUMigration contains infomation about MTU migration.", - "network": "Network contains MTU migration configuration for the default network.", - "machine": "Machine contains MTU migration configuration for the machine's uplink.", + "network": "network contains MTU migration configuration for the default network.", + "machine": "machine contains MTU migration configuration for the machine's uplink.", } func (MTUMigration) SwaggerDoc() map[string]string { @@ -2004,8 +2391,8 @@ func (MTUMigration) SwaggerDoc() map[string]string { var map_MTUMigrationValues = map[string]string{ "": "MTUMigrationValues contains the values for a MTU migration.", - "to": "To is the MTU to migrate to.", - "from": "From is the MTU to migrate from.", + "to": "to is the MTU to migrate to.", + "from": "from is the MTU to migrate from.", } func (MTUMigrationValues) SwaggerDoc() map[string]string { @@ -2064,8 +2451,8 @@ func (NetworkList) SwaggerDoc() map[string]string { var map_NetworkMigration = map[string]string{ "": "NetworkMigration represents the network migration status.", - "networkType": "NetworkType is the target plugin that is being deployed. DEPRECATED: network type migration is no longer supported, so this should always be unset.", - "mtu": "MTU is the MTU configuration that is being deployed.", + "networkType": "networkType is the target plugin that is being deployed. DEPRECATED: network type migration is no longer supported, so this should always be unset.", + "mtu": "mtu is the MTU configuration that is being deployed.", } func (NetworkMigration) SwaggerDoc() map[string]string { @@ -2076,7 +2463,7 @@ var map_NetworkSpec = map[string]string{ "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.", "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.", - "networkType": "NetworkType is the plugin that is to be deployed (e.g. OVNKubernetes). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OVNKubernetes This field is immutable after installation.", + "networkType": "networkType is the plugin that is to be deployed (e.g. OVNKubernetes). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OVNKubernetes This field is immutable after installation.", "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.", "serviceNodePortRange": "The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed.", "networkDiagnostics": "networkDiagnostics defines network diagnostics configuration.\n\nTakes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. If networkDiagnostics is not specified or is empty, and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, the network diagnostics feature will be disabled.", @@ -2090,9 +2477,9 @@ var map_NetworkStatus = map[string]string{ "": "NetworkStatus is the current network configuration.", "clusterNetwork": "IP address pool to use for pod IPs.", "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.", - "networkType": "NetworkType is the plugin that is deployed (e.g. OVNKubernetes).", - "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.", - "migration": "Migration contains the cluster network migration configuration.", + "networkType": "networkType is the plugin that is deployed (e.g. OVNKubernetes).", + "clusterNetworkMTU": "clusterNetworkMTU is the MTU for inter-pod networking.", + "migration": "migration contains the cluster network migration configuration.", "conditions": "conditions represents the observations of a network.config current state. Known .status.conditions.type are: \"NetworkDiagnosticsAvailable\"", } @@ -2121,8 +2508,8 @@ func (NodeList) SwaggerDoc() map[string]string { } var map_NodeSpec = map[string]string{ - "cgroupMode": "CgroupMode determines the cgroups version on the node", - "workerLatencyProfile": "WorkerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster", + "cgroupMode": "cgroupMode determines the cgroups version on the node", + "workerLatencyProfile": "workerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster", "minimumKubeletVersion": "minimumKubeletVersion is the lowest version of a kubelet that can join the cluster. Specifically, the apiserver will deny most authorization requests of kubelets that are older than the specified version, only allowing the kubelet to get and update its node object, and perform subjectaccessreviews. This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads, and will eventually be marked as not ready. Its max length is 8, so maximum version allowed is either \"9.999.99\" or \"99.99.99\". Since the kubelet reports the version of the kubernetes release, not Openshift, this field references the underlying kubernetes version this version of Openshift is based off of. In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then they should set the minimumKubeletVersion to 1.30.0. When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version. Thus, a kubelet with version \"1.0.0-ec.0\" will be compatible with minimumKubeletVersion \"1.0.0\" or earlier.", } @@ -2470,7 +2857,7 @@ func (TemplateReference) SwaggerDoc() map[string]string { var map_Proxy = map[string]string{ "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec holds user-settable values for the proxy configuration", + "spec": "spec holds user-settable values for the proxy configuration", "status": "status holds observed values from the cluster. They may not be overridden.", } @@ -2543,9 +2930,9 @@ func (SchedulerList) SwaggerDoc() map[string]string { var map_SchedulerSpec = map[string]string{ "policy": "DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.", "profile": "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods.\n\nValid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"", - "profileCustomizations": "profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles.", + "profileCustomizations": "profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles. Deprecated: no longer needed, since DRA is GA starting with 4.21, and is enabled by' default in the cluster, this field will be removed in 4.24.", "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod's selector. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.", - "mastersSchedulable": "MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", + "mastersSchedulable": "mastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", } func (SchedulerSpec) SwaggerDoc() map[string]string { @@ -2553,8 +2940,8 @@ func (SchedulerSpec) SwaggerDoc() map[string]string { } var map_FeatureGateTests = map[string]string{ - "featureGate": "FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.", - "tests": "Tests contains an item for every TestName", + "featureGate": "featureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.", + "tests": "tests contains an item for every TestName", } func (FeatureGateTests) SwaggerDoc() map[string]string { @@ -2562,7 +2949,7 @@ func (FeatureGateTests) SwaggerDoc() map[string]string { } var map_TestDetails = map[string]string{ - "testName": "TestName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.", + "testName": "testName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.", } func (TestDetails) SwaggerDoc() map[string]string { @@ -2580,7 +2967,7 @@ func (TestReporting) SwaggerDoc() map[string]string { } var map_TestReportingSpec = map[string]string{ - "testsForFeatureGates": "TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.", + "testsForFeatureGates": "testsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.", } func (TestReportingSpec) SwaggerDoc() map[string]string { @@ -2596,7 +2983,7 @@ func (CustomTLSProfile) SwaggerDoc() map[string]string { } var map_IntermediateTLSProfile = map[string]string{ - "": "IntermediateTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29", + "": "IntermediateTLSProfile is a TLS security profile based on the \"intermediate\" configuration of the Mozilla Server Side TLS configuration guidelines.", } func (IntermediateTLSProfile) SwaggerDoc() map[string]string { @@ -2604,7 +2991,7 @@ func (IntermediateTLSProfile) SwaggerDoc() map[string]string { } var map_ModernTLSProfile = map[string]string{ - "": "ModernTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility", + "": "ModernTLSProfile is a TLS security profile based on the \"modern\" configuration of the Mozilla Server Side TLS configuration guidelines.", } func (ModernTLSProfile) SwaggerDoc() map[string]string { @@ -2612,7 +2999,7 @@ func (ModernTLSProfile) SwaggerDoc() map[string]string { } var map_OldTLSProfile = map[string]string{ - "": "OldTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility", + "": "OldTLSProfile is a TLS security profile based on the \"old\" configuration of the Mozilla Server Side TLS configuration guidelines.", } func (OldTLSProfile) SwaggerDoc() map[string]string { @@ -2621,8 +3008,8 @@ func (OldTLSProfile) SwaggerDoc() map[string]string { var map_TLSProfileSpec = map[string]string{ "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.", - "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA", - "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: VersionTLS11\n\nNOTE: currently the highest minTLSVersion allowed is VersionTLS12", + "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries that their operands do not support. For example, to use only ECDHE-RSA-AES128-GCM-SHA256 (yaml):\n\n ciphers:\n - ECDHE-RSA-AES128-GCM-SHA256\n\nTLS 1.3 cipher suites (e.g. TLS_AES_128_GCM_SHA256) are not configurable and are always enabled when TLS 1.3 is negotiated.", + "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: VersionTLS11", } func (TLSProfileSpec) SwaggerDoc() map[string]string { @@ -2631,11 +3018,11 @@ func (TLSProfileSpec) SwaggerDoc() map[string]string { var map_TLSSecurityProfile = map[string]string{ "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.", - "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.\n\nNote that the Modern profile is currently not supported because it is not yet well adopted by common software libraries.", - "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES256-GCM-SHA384\n\n - ECDHE-RSA-AES256-GCM-SHA384\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - DHE-RSA-AES128-GCM-SHA256\n\n - DHE-RSA-AES256-GCM-SHA384\n\n - DHE-RSA-CHACHA20-POLY1305\n\n - ECDHE-ECDSA-AES128-SHA256\n\n - ECDHE-RSA-AES128-SHA256\n\n - ECDHE-ECDSA-AES128-SHA\n\n - ECDHE-RSA-AES128-SHA\n\n - ECDHE-ECDSA-AES256-SHA384\n\n - ECDHE-RSA-AES256-SHA384\n\n - ECDHE-ECDSA-AES256-SHA\n\n - ECDHE-RSA-AES256-SHA\n\n - DHE-RSA-AES128-SHA256\n\n - DHE-RSA-AES256-SHA256\n\n - AES128-GCM-SHA256\n\n - AES256-GCM-SHA384\n\n - AES128-SHA256\n\n - AES256-SHA256\n\n - AES128-SHA\n\n - AES256-SHA\n\n - DES-CBC3-SHA\n\n minTLSVersion: VersionTLS10", - "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES256-GCM-SHA384\n\n - ECDHE-RSA-AES256-GCM-SHA384\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - DHE-RSA-AES128-GCM-SHA256\n\n - DHE-RSA-AES256-GCM-SHA384\n\n minTLSVersion: VersionTLS12", - "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n minTLSVersion: VersionTLS13", - "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n minTLSVersion: VersionTLS11", + "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters.\n\nThe profiles are based on version 5.7 of the Mozilla Server Side TLS configuration guidelines. The cipher lists consist of the configuration's \"ciphersuites\" followed by the Go-specific \"ciphers\" from the guidelines. See: https://ssl-config.mozilla.org/guidelines/5.7.json\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.", + "old": "old is a TLS profile for use when services need to be accessed by very old clients or libraries and should be used only as a last resort.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS10\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA", + "intermediate": "intermediate is a TLS profile for use when you do not need compatibility with legacy clients and want to remain highly secure while being compatible with most clients currently in use.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS12\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305", + "modern": "modern is a TLS security profile for use with clients that support TLS 1.3 and do not need backward compatibility for older clients.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS13\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256", + "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n minTLSVersion: VersionTLS11\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256", } func (TLSSecurityProfile) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/pelletier/go-toml/v2/.gitignore b/vendor/github.com/pelletier/go-toml/v2/.gitignore index 4b7c4eda3a..eaf580dfd8 100644 --- a/vendor/github.com/pelletier/go-toml/v2/.gitignore +++ b/vendor/github.com/pelletier/go-toml/v2/.gitignore @@ -5,3 +5,4 @@ cmd/tomljson/tomljson cmd/tomltestgen/tomltestgen dist tests/ +test-results diff --git a/vendor/github.com/pelletier/go-toml/v2/.golangci.toml b/vendor/github.com/pelletier/go-toml/v2/.golangci.toml index 067db55174..7d2e5b04cf 100644 --- a/vendor/github.com/pelletier/go-toml/v2/.golangci.toml +++ b/vendor/github.com/pelletier/go-toml/v2/.golangci.toml @@ -1,84 +1,76 @@ -[service] -golangci-lint-version = "1.39.0" - -[linters-settings.wsl] -allow-assign-and-anything = true - -[linters-settings.exhaustive] -default-signifies-exhaustive = true +version = "2" [linters] -disable-all = true +default = "none" enable = [ "asciicheck", "bodyclose", - "cyclop", - "deadcode", - "depguard", "dogsled", "dupl", "durationcheck", "errcheck", "errorlint", "exhaustive", - # "exhaustivestruct", - "exportloopref", "forbidigo", - # "forcetypeassert", - "funlen", - "gci", - # "gochecknoglobals", "gochecknoinits", - "gocognit", "goconst", "gocritic", - "gocyclo", - "godot", - "godox", - # "goerr113", - "gofmt", - "gofumpt", + "godoclint", "goheader", - "goimports", - "golint", - "gomnd", - # "gomoddirectives", "gomodguard", "goprintffuncname", "gosec", - "gosimple", "govet", - # "ifshort", "importas", "ineffassign", "lll", "makezero", + "mirror", "misspell", "nakedret", - "nestif", "nilerr", - # "nlreturn", "noctx", "nolintlint", - #"paralleltest", + "perfsprint", "prealloc", "predeclared", "revive", "rowserrcheck", "sqlclosecheck", "staticcheck", - "structcheck", - "stylecheck", - # "testpackage", "thelper", "tparallel", - "typecheck", "unconvert", "unparam", "unused", - "varcheck", + "usetesting", "wastedassign", "whitespace", - # "wrapcheck", - # "wsl" +] + +[linters.settings.exhaustive] +default-signifies-exhaustive = true + +[linters.settings.lll] +line-length = 150 + +[[linters.exclusions.rules]] +path = ".test.go" +linters = ["goconst", "gosec"] + +[[linters.exclusions.rules]] +path = "main.go" +linters = ["forbidigo"] + +[[linters.exclusions.rules]] +path = "internal" +linters = ["revive"] +text = "(exported|indent-error-flow): " + +[formatters] +enable = [ + "gci", + "gofmt", + "gofumpt", + "goimports", ] diff --git a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml index 47f0f59142..3e19ea710a 100644 --- a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml +++ b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -22,7 +22,6 @@ builds: - linux_riscv64 - windows_amd64 - windows_arm64 - - windows_arm - darwin_amd64 - darwin_arm64 - id: tomljson @@ -42,7 +41,6 @@ builds: - linux_riscv64 - windows_amd64 - windows_arm64 - - windows_arm - darwin_amd64 - darwin_arm64 - id: jsontoml @@ -62,7 +60,6 @@ builds: - linux_arm - windows_amd64 - windows_arm64 - - windows_arm - darwin_amd64 - darwin_arm64 universal_binaries: diff --git a/vendor/github.com/pelletier/go-toml/v2/AGENTS.md b/vendor/github.com/pelletier/go-toml/v2/AGENTS.md new file mode 100644 index 0000000000..dafe44d764 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/AGENTS.md @@ -0,0 +1,64 @@ +# Agent Guidelines for go-toml + +This file provides guidelines for AI agents contributing to go-toml. All agents must follow these rules derived from [CONTRIBUTING.md](./CONTRIBUTING.md). + +## Project Overview + +go-toml is a TOML library for Go. The goal is to provide an easy-to-use and efficient TOML implementation that gets the job done without getting in the way. + +## Code Change Rules + +### Backward Compatibility + +- **No backward-incompatible changes** unless explicitly discussed and approved +- Avoid breaking people's programs unless absolutely necessary + +### Testing Requirements + +- **All bug fixes must include regression tests** +- **All new code must be tested** +- Run tests before submitting: `go test -race ./...` +- Test coverage must not decrease. Check with: + ```bash + go test -covermode=atomic -coverprofile=coverage.out + go tool cover -func=coverage.out + ``` +- All lines of code touched by changes should be covered by tests + +### Performance Requirements + +- go-toml aims to stay efficient; avoid performance regressions +- Run benchmarks to verify: `go test ./... -bench=. -count=10` +- Compare results using [benchstat](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) + +### Documentation + +- New features or feature extensions must include documentation +- Documentation lives in [README.md](./README.md) and throughout source code + +### Code Style + +- Follow existing code format and structure +- Code must pass `go fmt` +- Code must pass linting with the same golangci-lint version as CI (see version in `.github/workflows/lint.yml`): + ```bash + # Install specific version (check lint.yml for current version) + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b $(go env GOPATH)/bin + # Run linter + golangci-lint run ./... + ``` + +### Commit Messages + +- Commit messages must explain **why** the change is needed +- Keep messages clear and informative even if details are in the PR description + +## Pull Request Checklist + +Before submitting: + +1. Tests pass (`go test -race ./...`) +2. No backward-incompatible changes (unless discussed) +3. Relevant documentation added/updated +4. No performance regression (verify with benchmarks) +5. Title is clear and understandable for changelog diff --git a/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md index 96ecf9e2b3..28b88ec334 100644 --- a/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md +++ b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md @@ -33,7 +33,7 @@ The documentation is present in the [README][readme] and thorough the source code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a change to the documentation, create a pull request with your proposed changes. For simple changes like that, the easiest way to go is probably the "Fork this -project and edit the file" button on Github, displayed at the top right of the +project and edit the file" button on GitHub, displayed at the top right of the file. Unless it's a trivial change (for example a typo), provide a little bit of context in your pull request description or commit message. @@ -92,6 +92,48 @@ However, given GitHub's new policy to _not_ run Actions on pull requests until a maintainer clicks on button, it is highly recommended that you run them locally as you make changes. +### Test across Go versions + +The repository includes tooling to test go-toml across multiple Go versions +(1.11 through 1.25) both locally and in GitHub Actions. + +#### Local testing with Docker + +Prerequisites: Docker installed and running, Bash shell, `rsync` command. + +```bash +# Test all Go versions in parallel (default) +./test-go-versions.sh + +# Test specific versions +./test-go-versions.sh 1.21 1.22 1.23 + +# Test sequentially (slower but uses less resources) +./test-go-versions.sh --sequential + +# Verbose output with custom results directory +./test-go-versions.sh --verbose --output ./my-results 1.24 1.25 + +# Show all options +./test-go-versions.sh --help +``` + +The script creates Docker containers for each Go version and runs the full test +suite. Results are saved to a `test-results/` directory with individual logs and +a comprehensive summary report. + +The script only exits with a non-zero status code if either of the two most +recent Go versions fail. + +#### GitHub Actions testing (maintainers) + +1. Go to the **Actions** tab in the GitHub repository +2. Select **"Go Versions Compatibility Test"** from the workflow list +3. Click **"Run workflow"** +4. Optionally customize: + - **Go versions**: Space-separated list (e.g., `1.21 1.22 1.23`) + - **Execution mode**: Parallel (faster) or sequential (more stable) + ### Check coverage We use `go tool cover` to compute test coverage. Most code editors have a way to @@ -111,7 +153,7 @@ code lowers the coverage. Go-toml aims to stay efficient. We rely on a set of scenarios executed with Go's builtin benchmark systems. Because of their noisy nature, containers provided by -Github Actions cannot be reliably used for benchmarking. As a result, you are +GitHub Actions cannot be reliably used for benchmarking. As a result, you are responsible for checking that your changes do not incur a performance penalty. You can run their following to execute benchmarks: @@ -168,13 +210,13 @@ Checklist: 1. Decide on the next version number. Use semver. Review commits since last version to assess. 2. Tag release. For example: -``` -git checkout v2 -git pull -git tag v2.2.0 -git push --tags -``` -3. CI automatically builds a draft Github release. Review it and edit as + ``` + git checkout v2 + git pull + git tag v2.2.0 + git push --tags + ``` +3. CI automatically builds a draft GitHub release. Review it and edit as necessary. Look for "Other changes". That would indicate a pull request not labeled properly. Tweak labels and pull request titles until changelog looks good for users. diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md index 0755e55642..61cdd181f3 100644 --- a/vendor/github.com/pelletier/go-toml/v2/README.md +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -107,7 +107,11 @@ type MyConfig struct { ### Unmarshaling [`Unmarshal`][unmarshal] reads a TOML document and fills a Go structure with its -content. For example: +content. + +Note that the struct variable names are _capitalized_, while the variables in the toml document are _lowercase_. + +For example: ```go doc := ` @@ -133,6 +137,62 @@ fmt.Println("tags:", cfg.Tags) [unmarshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Unmarshal + +Here is an example using tables with some simple nesting: + +```go +doc := ` +age = 45 +fruits = ["apple", "pear"] + +# these are very important! +[my-variables] +first = 1 +second = 0.2 +third = "abc" + +# this is not so important. +[my-variables.b] +bfirst = 123 +` + +var Document struct { + Age int + Fruits []string + + Myvariables struct { + First int + Second float64 + Third string + + B struct { + Bfirst int + } + } `toml:"my-variables"` +} + +err := toml.Unmarshal([]byte(doc), &Document) +if err != nil { + panic(err) +} + +fmt.Println("age:", Document.Age) +fmt.Println("fruits:", Document.Fruits) +fmt.Println("my-variables.first:", Document.Myvariables.First) +fmt.Println("my-variables.second:", Document.Myvariables.Second) +fmt.Println("my-variables.third:", Document.Myvariables.Third) +fmt.Println("my-variables.B.Bfirst:", Document.Myvariables.B.Bfirst) + +// Output: +// age: 45 +// fruits: [apple pear] +// my-variables.first: 1 +// my-variables.second: 0.2 +// my-variables.third: abc +// my-variables.B.Bfirst: 123 +``` + + ### Marshaling [`Marshal`][marshal] is the opposite of Unmarshal: it represents a Go structure @@ -175,17 +235,17 @@ the AST level. See https://pkg.go.dev/github.com/pelletier/go-toml/v2/unstable. Execution time speedup compared to other Go TOML libraries: - - - - - - - - - - - + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x2.2x
Marshal/ReferenceFile/map-21.7x2.1x
Marshal/ReferenceFile/struct-22.2x3.0x
Unmarshal/HugoFrontMatter-22.9x2.7x
Unmarshal/ReferenceFile/map-22.6x2.7x
Unmarshal/ReferenceFile/struct-24.6x5.1x
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-22.1x2.0x
Marshal/ReferenceFile/map-22.0x2.0x
Marshal/ReferenceFile/struct-22.3x2.5x
Unmarshal/HugoFrontMatter-23.3x2.8x
Unmarshal/ReferenceFile/map-22.9x3.0x
Unmarshal/ReferenceFile/struct-24.8x5.0x
See more

The table above has the results of the most common use-cases. The table below @@ -193,22 +253,22 @@ contains the results of all benchmarks, including unrealistic ones. It is provided for completeness.

- - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.7x
Marshal/SimpleDocument/struct-22.7x3.8x
Unmarshal/SimpleDocument/map-23.8x3.0x
Unmarshal/SimpleDocument/struct-25.6x4.1x
UnmarshalDataset/example-23.0x3.2x
UnmarshalDataset/code-22.3x2.9x
UnmarshalDataset/twitter-22.6x2.7x
UnmarshalDataset/citm_catalog-22.2x2.3x
UnmarshalDataset/canada-21.8x1.5x
UnmarshalDataset/config-24.1x2.9x
geomean2.7x2.8x
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-22.0x2.9x
Marshal/SimpleDocument/struct-22.5x3.6x
Unmarshal/SimpleDocument/map-24.2x3.4x
Unmarshal/SimpleDocument/struct-25.9x4.4x
UnmarshalDataset/example-23.2x2.9x
UnmarshalDataset/code-22.4x2.8x
UnmarshalDataset/twitter-22.7x2.5x
UnmarshalDataset/citm_catalog-22.3x2.3x
UnmarshalDataset/canada-21.9x1.5x
UnmarshalDataset/config-25.4x3.0x
geomean2.9x2.8x

This table can be generated with ./ci.sh benchmark -a -html.

diff --git a/vendor/github.com/pelletier/go-toml/v2/ci.sh b/vendor/github.com/pelletier/go-toml/v2/ci.sh index 86217a9b09..30c23d1a17 100644 --- a/vendor/github.com/pelletier/go-toml/v2/ci.sh +++ b/vendor/github.com/pelletier/go-toml/v2/ci.sh @@ -147,7 +147,7 @@ bench() { pushd "$dir" if [ "${replace}" != "" ]; then - find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2|${replace}|g" {} \; + find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2\"|${replace}\"|g" {} \; go get "${replace}" fi @@ -195,6 +195,11 @@ for line in reversed(lines[2:]): "%.1fx" % (float(line[3])/v2), # v1 "%.1fx" % (float(line[7])/v2), # bs ]) + +if not results: + print("No benchmark results to display.", file=sys.stderr) + sys.exit(1) + # move geomean to the end results.append(results[0]) del results[0] diff --git a/vendor/github.com/pelletier/go-toml/v2/decode.go b/vendor/github.com/pelletier/go-toml/v2/decode.go index f0ec3b1705..f3f14eff18 100644 --- a/vendor/github.com/pelletier/go-toml/v2/decode.go +++ b/vendor/github.com/pelletier/go-toml/v2/decode.go @@ -230,8 +230,8 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) { return t, nil, err } - if t.Second > 60 { - return t, nil, unstable.NewParserError(b[6:8], "seconds cannot be greater 60") + if t.Second > 59 { + return t, nil, unstable.NewParserError(b[6:8], "seconds cannot be greater than 59") } b = b[8:] @@ -279,7 +279,6 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) { return t, b, nil } -//nolint:cyclop func parseFloat(b []byte) (float64, error) { if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' { return math.NaN(), nil diff --git a/vendor/github.com/pelletier/go-toml/v2/errors.go b/vendor/github.com/pelletier/go-toml/v2/errors.go index 309733f1f9..d68835dfa6 100644 --- a/vendor/github.com/pelletier/go-toml/v2/errors.go +++ b/vendor/github.com/pelletier/go-toml/v2/errors.go @@ -2,10 +2,10 @@ package toml import ( "fmt" + "reflect" "strconv" "strings" - "github.com/pelletier/go-toml/v2/internal/danger" "github.com/pelletier/go-toml/v2/unstable" ) @@ -54,6 +54,18 @@ func (s *StrictMissingError) String() string { return buf.String() } +// Unwrap returns wrapped decode errors +// +// Implements errors.Join() interface. +func (s *StrictMissingError) Unwrap() []error { + errs := make([]error, len(s.Errors)) + for i := range s.Errors { + errs[i] = &s.Errors[i] + } + return errs +} + +// Key represents a TOML key as a sequence of key parts. type Key []string // Error returns the error message contained in the DecodeError. @@ -78,7 +90,7 @@ func (e *DecodeError) Key() Key { return e.key } -// decodeErrorFromHighlight creates a DecodeError referencing a highlighted +// wrapDecodeError creates a DecodeError referencing a highlighted // range of bytes from document. // // highlight needs to be a sub-slice of document, or this function panics. @@ -88,7 +100,7 @@ func (e *DecodeError) Key() Key { // //nolint:funlen func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError { - offset := danger.SubsliceOffset(document, de.Highlight) + offset := subsliceOffset(document, de.Highlight) errMessage := de.Error() errLine, errColumn := positionAtEnd(document[:offset]) @@ -248,5 +260,24 @@ func positionAtEnd(b []byte) (row int, column int) { } } - return + return row, column +} + +// subsliceOffset returns the byte offset of subslice within data. +// subslice must share the same backing array as data. +func subsliceOffset(data []byte, subslice []byte) int { + if len(subslice) == 0 { + return 0 + } + + // Use reflect to get the data pointers of both slices. + // This is safe because we're only reading the pointer values for comparison. + dataPtr := reflect.ValueOf(data).Pointer() + subPtr := reflect.ValueOf(subslice).Pointer() + + offset := int(subPtr - dataPtr) + if offset < 0 || offset > len(data) { + panic("subslice is not within data") + } + return offset } diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go b/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go index 80f698db4b..50a6d17029 100644 --- a/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go +++ b/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go @@ -1,6 +1,6 @@ package characters -var invalidAsciiTable = [256]bool{ +var invalidASCIITable = [256]bool{ 0x00: true, 0x01: true, 0x02: true, @@ -37,6 +37,6 @@ var invalidAsciiTable = [256]bool{ 0x7F: true, } -func InvalidAscii(b byte) bool { - return invalidAsciiTable[b] +func InvalidASCII(b byte) bool { + return invalidASCIITable[b] } diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go b/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go index db4f45acbf..7c5cb55e42 100644 --- a/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go +++ b/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go @@ -1,20 +1,12 @@ +// Package characters provides functions for working with string encodings. package characters import ( "unicode/utf8" ) -type utf8Err struct { - Index int - Size int -} - -func (u utf8Err) Zero() bool { - return u.Size == 0 -} - -// Verified that a given string is only made of valid UTF-8 characters allowed -// by the TOML spec: +// Utf8TomlValidAlreadyEscaped verifies that a given string is only made of +// valid UTF-8 characters allowed by the TOML spec: // // Any Unicode character may be used except those that must be escaped: // quotation mark, backslash, and the control characters other than tab (U+0000 @@ -23,8 +15,8 @@ func (u utf8Err) Zero() bool { // It is a copy of the Go 1.17 utf8.Valid implementation, tweaked to exit early // when a character is not allowed. // -// The returned utf8Err is Zero() if the string is valid, or contains the byte -// index and size of the invalid character. +// The returned slice is empty if the string is valid, or contains the bytes +// of the invalid character. // // quotation mark => already checked // backslash => already checked @@ -32,9 +24,8 @@ func (u utf8Err) Zero() bool { // 0x9 => tab, ok // 0xA - 0x1F => invalid // 0x7F => invalid -func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { +func Utf8TomlValidAlreadyEscaped(p []byte) []byte { // Fast path. Check for and skip 8 bytes of ASCII characters per iteration. - offset := 0 for len(p) >= 8 { // Combining two 32 bit loads allows the same code to be used // for 32 and 64 bit platforms. @@ -48,24 +39,19 @@ func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { } for i, b := range p[:8] { - if InvalidAscii(b) { - err.Index = offset + i - err.Size = 1 - return + if InvalidASCII(b) { + return p[i : i+1] } } p = p[8:] - offset += 8 } n := len(p) for i := 0; i < n; { pi := p[i] if pi < utf8.RuneSelf { - if InvalidAscii(pi) { - err.Index = offset + i - err.Size = 1 - return + if InvalidASCII(pi) { + return p[i : i+1] } i++ continue @@ -73,44 +59,34 @@ func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { x := first[pi] if x == xx { // Illegal starter byte. - err.Index = offset + i - err.Size = 1 - return + return p[i : i+1] } size := int(x & 7) if i+size > n { // Short or invalid. - err.Index = offset + i - err.Size = n - i - return + return p[i:n] } accept := acceptRanges[x>>4] if c := p[i+1]; c < accept.lo || accept.hi < c { - err.Index = offset + i - err.Size = 2 - return - } else if size == 2 { + return p[i : i+2] + } else if size == 2 { //revive:disable:empty-block } else if c := p[i+2]; c < locb || hicb < c { - err.Index = offset + i - err.Size = 3 - return - } else if size == 3 { + return p[i : i+3] + } else if size == 3 { //revive:disable:empty-block } else if c := p[i+3]; c < locb || hicb < c { - err.Index = offset + i - err.Size = 4 - return + return p[i : i+4] } i += size } - return + return nil } -// Return the size of the next rune if valid, 0 otherwise. +// Utf8ValidNext returns the size of the next rune if valid, 0 otherwise. func Utf8ValidNext(p []byte) int { c := p[0] if c < utf8.RuneSelf { - if InvalidAscii(c) { + if InvalidASCII(c) { return 0 } return 1 @@ -129,10 +105,10 @@ func Utf8ValidNext(p []byte) int { accept := acceptRanges[x>>4] if c := p[1]; c < accept.lo || accept.hi < c { return 0 - } else if size == 2 { + } else if size == 2 { //nolint:revive } else if c := p[2]; c < locb || hicb < c { return 0 - } else if size == 3 { + } else if size == 3 { //nolint:revive } else if c := p[3]; c < locb || hicb < c { return 0 } diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go b/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go deleted file mode 100644 index e38e1131b8..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go +++ /dev/null @@ -1,65 +0,0 @@ -package danger - -import ( - "fmt" - "reflect" - "unsafe" -) - -const maxInt = uintptr(int(^uint(0) >> 1)) - -func SubsliceOffset(data []byte, subslice []byte) int { - datap := (*reflect.SliceHeader)(unsafe.Pointer(&data)) - hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice)) - - if hlp.Data < datap.Data { - panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data)) - } - offset := hlp.Data - datap.Data - - if offset > maxInt { - panic(fmt.Errorf("slice offset larger than int (%d)", offset)) - } - - intoffset := int(offset) - - if intoffset > datap.Len { - panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len)) - } - - if intoffset+hlp.Len > datap.Len { - panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len)) - } - - return intoffset -} - -func BytesRange(start []byte, end []byte) []byte { - if start == nil || end == nil { - panic("cannot call BytesRange with nil") - } - startp := (*reflect.SliceHeader)(unsafe.Pointer(&start)) - endp := (*reflect.SliceHeader)(unsafe.Pointer(&end)) - - if startp.Data > endp.Data { - panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data)) - } - - l := startp.Len - endLen := int(endp.Data-startp.Data) + endp.Len - if endLen > l { - l = endLen - } - - if l > startp.Cap { - panic(fmt.Errorf("range length is larger than capacity")) - } - - return start[:l] -} - -func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer { - // TODO: replace with unsafe.Add when Go 1.17 is released - // https://github.com/golang/go/issues/40481 - return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset)) -} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go b/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go deleted file mode 100644 index 9d41c28a2f..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go +++ /dev/null @@ -1,23 +0,0 @@ -package danger - -import ( - "reflect" - "unsafe" -) - -// typeID is used as key in encoder and decoder caches to enable using -// the optimize runtime.mapaccess2_fast64 function instead of the more -// expensive lookup if we were to use reflect.Type as map key. -// -// typeID holds the pointer to the reflect.Type value, which is unique -// in the program. -// -// https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61 -type TypeID unsafe.Pointer - -func MakeTypeID(t reflect.Type) TypeID { - // reflect.Type has the fields: - // typ unsafe.Pointer - // ptr unsafe.Pointer - return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1]) -} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go index 149b17f538..6344fd047d 100644 --- a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go @@ -36,7 +36,7 @@ func (t *KeyTracker) Pop(node *unstable.Node) { } } -// Key returns the current key +// Key returns the current key. func (t *KeyTracker) Key() []string { k := make([]string, len(t.k)) copy(k, t.k) diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go index 76df2d5b6a..2062358001 100644 --- a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go @@ -288,11 +288,12 @@ func (s *SeenTracker) checkKeyValue(node *unstable.Node) (bool, error) { idx = s.create(parentIdx, k, tableKind, false, true) } else { entry := s.entries[idx] - if it.IsLast() { + switch { + case it.IsLast(): return false, fmt.Errorf("toml: key %s is already defined", string(k)) - } else if entry.kind != tableKind { + case entry.kind != tableKind: return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) - } else if entry.explicit { + case entry.explicit: return false, fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) } } @@ -309,16 +310,16 @@ func (s *SeenTracker) checkKeyValue(node *unstable.Node) (bool, error) { return s.checkInlineTable(value) case unstable.Array: return s.checkArray(value) + default: + return false, nil } - - return false, nil } func (s *SeenTracker) checkArray(node *unstable.Node) (first bool, err error) { it := node.Children() for it.Next() { n := it.Node() - switch n.Kind { + switch n.Kind { //nolint:exhaustive case unstable.InlineTable: first, err = s.checkInlineTable(n) if err != nil { diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go index bf0317392f..ed510382c0 100644 --- a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go @@ -1 +1,2 @@ +// Package tracker provides functions for keeping track of AST nodes. package tracker diff --git a/vendor/github.com/pelletier/go-toml/v2/localtime.go b/vendor/github.com/pelletier/go-toml/v2/localtime.go index a856bfdb0d..502ef2f2f1 100644 --- a/vendor/github.com/pelletier/go-toml/v2/localtime.go +++ b/vendor/github.com/pelletier/go-toml/v2/localtime.go @@ -45,7 +45,7 @@ func (d *LocalDate) UnmarshalText(b []byte) error { type LocalTime struct { Hour int // Hour of the day: [0; 24[ Minute int // Minute of the hour: [0; 60[ - Second int // Second of the minute: [0; 60[ + Second int // Second of the minute: [0; 59] Nanosecond int // Nanoseconds within the second: [0, 1000000000[ Precision int // Number of digits to display for Nanosecond. } diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go index 161acd9343..ca462d40e0 100644 --- a/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding" "encoding/json" + "errors" "fmt" "io" "math" @@ -42,7 +43,7 @@ type Encoder struct { arraysMultiline bool indentSymbol string indentTables bool - marshalJsonNumbers bool + marshalJSONNumbers bool } // NewEncoder returns a new Encoder that writes to w. @@ -89,14 +90,14 @@ func (enc *Encoder) SetIndentTables(indent bool) *Encoder { return enc } -// SetMarshalJsonNumbers forces the encoder to serialize `json.Number` as a +// SetMarshalJSONNumbers forces the encoder to serialize `json.Number` as a // float or integer instead of relying on TextMarshaler to emit a string. // // *Unstable:* This method does not follow the compatibility guarantees of // semver. It can be changed or removed without a new major version being // issued. -func (enc *Encoder) SetMarshalJsonNumbers(indent bool) *Encoder { - enc.marshalJsonNumbers = indent +func (enc *Encoder) SetMarshalJSONNumbers(indent bool) *Encoder { + enc.marshalJSONNumbers = indent return enc } @@ -161,6 +162,8 @@ func (enc *Encoder) SetMarshalJsonNumbers(indent bool) *Encoder { // // The "omitempty" option prevents empty values or groups from being emitted. // +// The "omitzero" option prevents zero values or groups from being emitted. +// // The "commented" option prefixes the value and all its children with a comment // symbol. // @@ -177,7 +180,7 @@ func (enc *Encoder) Encode(v interface{}) error { ctx.inline = enc.tablesInline if v == nil { - return fmt.Errorf("toml: cannot encode a nil interface") + return errors.New("toml: cannot encode a nil interface") } b, err := enc.encode(b, ctx, reflect.ValueOf(v)) @@ -196,6 +199,7 @@ func (enc *Encoder) Encode(v interface{}) error { type valueOptions struct { multiline bool omitempty bool + omitzero bool commented bool comment string } @@ -266,16 +270,15 @@ func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, e case LocalDateTime: return append(b, x.String()...), nil case json.Number: - if enc.marshalJsonNumbers { + if enc.marshalJSONNumbers { if x == "" { /// Useful zero value. return append(b, "0"...), nil } else if v, err := x.Int64(); err == nil { return enc.encode(b, ctx, reflect.ValueOf(v)) } else if f, err := x.Float64(); err == nil { return enc.encode(b, ctx, reflect.ValueOf(f)) - } else { - return nil, fmt.Errorf("toml: unable to convert %q to int64 or float64", x) } + return nil, fmt.Errorf("toml: unable to convert %q to int64 or float64", x) } } @@ -309,7 +312,7 @@ func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, e return enc.encodeSlice(b, ctx, v) case reflect.Interface: if v.IsNil() { - return nil, fmt.Errorf("toml: encoding a nil interface is not supported") + return nil, errors.New("toml: encoding a nil interface is not supported") } return enc.encode(b, ctx, v.Elem()) @@ -326,28 +329,30 @@ func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, e case reflect.Float32: f := v.Float() - if math.IsNaN(f) { + switch { + case math.IsNaN(f): b = append(b, "nan"...) - } else if f > math.MaxFloat32 { + case f > math.MaxFloat32: b = append(b, "inf"...) - } else if f < -math.MaxFloat32 { + case f < -math.MaxFloat32: b = append(b, "-inf"...) - } else if math.Trunc(f) == f { + case math.Trunc(f) == f: b = strconv.AppendFloat(b, f, 'f', 1, 32) - } else { + default: b = strconv.AppendFloat(b, f, 'f', -1, 32) } case reflect.Float64: f := v.Float() - if math.IsNaN(f) { + switch { + case math.IsNaN(f): b = append(b, "nan"...) - } else if f > math.MaxFloat64 { + case f > math.MaxFloat64: b = append(b, "inf"...) - } else if f < -math.MaxFloat64 { + case f < -math.MaxFloat64: b = append(b, "-inf"...) - } else if math.Trunc(f) == f { + case math.Trunc(f) == f: b = strconv.AppendFloat(b, f, 'f', 1, 64) - } else { + default: b = strconv.AppendFloat(b, f, 'f', -1, 64) } case reflect.Bool: @@ -384,6 +389,31 @@ func shouldOmitEmpty(options valueOptions, v reflect.Value) bool { return options.omitempty && isEmptyValue(v) } +func shouldOmitZero(options valueOptions, v reflect.Value) bool { + if !options.omitzero { + return false + } + + // Check if the type implements isZeroer interface (has a custom IsZero method). + if v.Type().Implements(isZeroerType) { + return v.Interface().(isZeroer).IsZero() + } + + // Check if pointer type implements isZeroer. + if reflect.PointerTo(v.Type()).Implements(isZeroerType) { + if v.CanAddr() { + return v.Addr().Interface().(isZeroer).IsZero() + } + // Create a temporary addressable copy to call the pointer receiver method. + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + return pv.Interface().(isZeroer).IsZero() + } + + // Fall back to reflect's IsZero for types without custom IsZero method. + return v.IsZero() +} + func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) { var err error @@ -434,8 +464,9 @@ func isEmptyValue(v reflect.Value) bool { return v.Float() == 0 case reflect.Interface, reflect.Ptr: return v.IsNil() + default: + return false } - return false } func isEmptyStruct(v reflect.Value) bool { @@ -479,7 +510,7 @@ func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byt func needsQuoting(v string) bool { // TODO: vectorize for _, b := range []byte(v) { - if b == '\'' || b == '\r' || b == '\n' || characters.InvalidAscii(b) { + if b == '\'' || b == '\r' || b == '\n' || characters.InvalidASCII(b) { return true } } @@ -517,12 +548,26 @@ func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byt del = 0x7f ) - for _, r := range []byte(v) { + bv := []byte(v) + for i := 0; i < len(bv); i++ { + r := bv[i] switch r { case '\\': b = append(b, `\\`...) case '"': - b = append(b, `\"`...) + if multiline { + // Quotation marks do not need to be quoted in multiline strings unless + // it contains 3 consecutive. If 3+ quotes appear, quote all of them + // because it's visually better + if i+2 > len(bv) || bv[i+1] != '"' || bv[i+2] != '"' { + b = append(b, r) + } else { + b = append(b, `\"\"\"`...) + i += 2 + } + } else { + b = append(b, `\"`...) + } case '\b': b = append(b, `\b`...) case '\f': @@ -559,9 +604,9 @@ func (enc *Encoder) encodeUnquotedKey(b []byte, v string) []byte { return append(b, v...) } -func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error) { +func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) []byte { if len(ctx.parentKey) == 0 { - return b, nil + return b } b = enc.encodeComment(ctx.indent, ctx.options.comment, b) @@ -581,10 +626,9 @@ func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error) b = append(b, "]\n"...) - return b, nil + return b } -//nolint:cyclop func (enc *Encoder) encodeKey(b []byte, k string) []byte { needsQuotation := false cannotUseLiteral := false @@ -621,30 +665,33 @@ func (enc *Encoder) encodeKey(b []byte, k string) []byte { func (enc *Encoder) keyToString(k reflect.Value) (string, error) { keyType := k.Type() - switch { - case keyType.Kind() == reflect.String: - return k.String(), nil - - case keyType.Implements(textMarshalerType): + if keyType.Implements(textMarshalerType) { keyB, err := k.Interface().(encoding.TextMarshaler).MarshalText() if err != nil { return "", fmt.Errorf("toml: error marshalling key %v from text: %w", k, err) } return string(keyB), nil + } + + switch keyType.Kind() { + case reflect.String: + return k.String(), nil - case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return strconv.FormatInt(k.Int(), 10), nil - case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return strconv.FormatUint(k.Uint(), 10), nil - case keyType.Kind() == reflect.Float32: + case reflect.Float32: return strconv.FormatFloat(k.Float(), 'f', -1, 32), nil - case keyType.Kind() == reflect.Float64: + case reflect.Float64: return strconv.FormatFloat(k.Float(), 'f', -1, 64), nil + + default: + return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind()) } - return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind()) } func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { @@ -657,8 +704,18 @@ func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte for iter.Next() { v := iter.Value() - if isNil(v) { - continue + // Handle nil values: convert nil pointers to zero value, + // skip nil interfaces and nil maps. + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + v = reflect.Zero(v.Type().Elem()) + } + case reflect.Interface, reflect.Map: + if v.IsNil() { + continue + } + default: } k, err := enc.keyToString(iter.Key()) @@ -748,9 +805,8 @@ func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { walkStruct(ctx, t, f.Elem()) } continue - } else { - k = fieldType.Name } + k = fieldType.Name } if isNil(f) { @@ -760,6 +816,7 @@ func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { options := valueOptions{ multiline: opts.multiline, omitempty: opts.omitempty, + omitzero: opts.omitzero, commented: opts.commented, comment: fieldType.Tag.Get("comment"), } @@ -820,6 +877,7 @@ type tagOptions struct { multiline bool inline bool omitempty bool + omitzero bool commented bool } @@ -832,7 +890,7 @@ func parseTag(tag string) (string, tagOptions) { } raw := tag[idx+1:] - tag = string(tag[:idx]) + tag = tag[:idx] for raw != "" { var o string i := strings.Index(raw, ",") @@ -848,6 +906,8 @@ func parseTag(tag string) (string, tagOptions) { opts.inline = true case "omitempty": opts.omitempty = true + case "omitzero": + opts.omitzero = true case "commented": opts.commented = true } @@ -866,10 +926,7 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro } if !ctx.skipTableHeader { - b, err = enc.encodeTableHeader(ctx, b) - if err != nil { - return nil, err - } + b = enc.encodeTableHeader(ctx, b) if enc.indentTables && len(ctx.parentKey) > 0 { ctx.indent++ @@ -882,6 +939,9 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro if shouldOmitEmpty(kv.Options, kv.Value) { continue } + if kv.Options.omitzero && shouldOmitZero(kv.Options, kv.Value) { + continue + } hasNonEmptyKV = true ctx.setKey(kv.Key) @@ -901,6 +961,9 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro if shouldOmitEmpty(table.Options, table.Value) { continue } + if table.Options.omitzero && shouldOmitZero(table.Options, table.Value) { + continue + } if first { first = false if hasNonEmptyKV { @@ -935,6 +998,9 @@ func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte if shouldOmitEmpty(kv.Options, kv.Value) { continue } + if kv.Options.omitzero && shouldOmitZero(kv.Options, kv.Value) { + continue + } if first { first = false @@ -963,11 +1029,14 @@ func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { if !v.IsValid() { return false } - if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { + t := v.Type() + if t == timeType || t.Implements(textMarshalerType) { + return false + } + if v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PointerTo(t).Implements(textMarshalerType) { return false } - t := v.Type() switch t.Kind() { case reflect.Map, reflect.Struct: return !ctx.inline diff --git a/vendor/github.com/pelletier/go-toml/v2/strict.go b/vendor/github.com/pelletier/go-toml/v2/strict.go index 802e7e4d15..2a147c0260 100644 --- a/vendor/github.com/pelletier/go-toml/v2/strict.go +++ b/vendor/github.com/pelletier/go-toml/v2/strict.go @@ -1,7 +1,6 @@ package toml import ( - "github.com/pelletier/go-toml/v2/internal/danger" "github.com/pelletier/go-toml/v2/internal/tracker" "github.com/pelletier/go-toml/v2/unstable" ) @@ -13,6 +12,9 @@ type strict struct { key tracker.KeyTracker missing []unstable.ParserError + + // Reference to the document for computing key ranges. + doc []byte } func (s *strict) EnterTable(node *unstable.Node) { @@ -53,7 +55,7 @@ func (s *strict) MissingTable(node *unstable.Node) { } s.missing = append(s.missing, unstable.ParserError{ - Highlight: keyLocation(node), + Highlight: s.keyLocation(node), Message: "missing table", Key: s.key.Key(), }) @@ -65,7 +67,7 @@ func (s *strict) MissingField(node *unstable.Node) { } s.missing = append(s.missing, unstable.ParserError{ - Highlight: keyLocation(node), + Highlight: s.keyLocation(node), Message: "missing field", Key: s.key.Key(), }) @@ -88,7 +90,7 @@ func (s *strict) Error(doc []byte) error { return err } -func keyLocation(node *unstable.Node) []byte { +func (s *strict) keyLocation(node *unstable.Node) []byte { k := node.Key() hasOne := k.Next() @@ -96,12 +98,17 @@ func keyLocation(node *unstable.Node) []byte { panic("should not be called with empty key") } - start := k.Node().Data - end := k.Node().Data + // Get the range from the first key to the last key. + firstRaw := k.Node().Raw + lastRaw := firstRaw for k.Next() { - end = k.Node().Data + lastRaw = k.Node().Raw } - return danger.BytesRange(start, end) + // Compute the slice from the document using the ranges. + start := firstRaw.Offset + end := lastRaw.Offset + lastRaw.Length + + return s.doc[start:end] } diff --git a/vendor/github.com/pelletier/go-toml/v2/test-go-versions.sh b/vendor/github.com/pelletier/go-toml/v2/test-go-versions.sh new file mode 100644 index 0000000000..5fe5c77722 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/test-go-versions.sh @@ -0,0 +1,597 @@ +#!/usr/bin/env bash + +set -uo pipefail + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Go versions to test (1.11 through 1.26) +GO_VERSIONS=( + "1.11" + "1.12" + "1.13" + "1.14" + "1.15" + "1.16" + "1.17" + "1.18" + "1.19" + "1.20" + "1.21" + "1.22" + "1.23" + "1.24" + "1.25" + "1.26" +) + +# Default values +PARALLEL=true +VERBOSE=false +OUTPUT_DIR="test-results" +DOCKER_TIMEOUT="10m" + +usage() { + cat << EOF +Usage: $0 [OPTIONS] [GO_VERSIONS...] + +Test go-toml across multiple Go versions using Docker containers. + +The script reports the lowest continuous supported Go version (where all subsequent +versions pass) and only exits with non-zero status if either of the two most recent +Go versions fail, indicating immediate attention is needed. + +Note: For Go versions < 1.21, the script automatically updates go.mod to match the +target version, but older versions may still fail due to missing standard library +features (e.g., the 'slices' package introduced in Go 1.21). + +OPTIONS: + -h, --help Show this help message + -s, --sequential Run tests sequentially instead of in parallel + -v, --verbose Enable verbose output + -o, --output DIR Output directory for test results (default: test-results) + -t, --timeout TIME Docker timeout for each test (default: 10m) + --list List available Go versions and exit + +ARGUMENTS: + GO_VERSIONS Specific Go versions to test (default: all supported versions) + Examples: 1.21 1.22 1.23 + +EXAMPLES: + $0 # Test all Go versions in parallel + $0 --sequential # Test all Go versions sequentially + $0 1.21 1.22 1.23 # Test specific versions + $0 --verbose --output ./results 1.25 1.26 # Verbose output to custom directory + +EXIT CODES: + 0 Recent Go versions pass (good compatibility) + 1 Recent Go versions fail (needs attention) or script error + +EOF +} + +log() { + echo -e "${BLUE}[$(date +'%H:%M:%S')]${NC} $*" >&2 +} + +log_success() { + echo -e "${GREEN}[$(date +'%H:%M:%S')] ✓${NC} $*" >&2 +} + +log_error() { + echo -e "${RED}[$(date +'%H:%M:%S')] ✗${NC} $*" >&2 +} + +log_warning() { + echo -e "${YELLOW}[$(date +'%H:%M:%S')] ⚠${NC} $*" >&2 +} + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + usage + exit 0 + ;; + -s|--sequential) + PARALLEL=false + shift + ;; + -v|--verbose) + VERBOSE=true + shift + ;; + -o|--output) + OUTPUT_DIR="$2" + shift 2 + ;; + -t|--timeout) + DOCKER_TIMEOUT="$2" + shift 2 + ;; + --list) + echo "Available Go versions:" + printf '%s\n' "${GO_VERSIONS[@]}" + exit 0 + ;; + -*) + echo "Unknown option: $1" >&2 + usage + exit 1 + ;; + *) + # Remaining arguments are Go versions + break + ;; + esac +done + +# If specific versions provided, use those instead of defaults +if [[ $# -gt 0 ]]; then + GO_VERSIONS=("$@") +fi + +# Validate Go versions +for version in "${GO_VERSIONS[@]}"; do + if ! [[ "$version" =~ ^1\.(1[1-9]|2[0-6])$ ]]; then + log_error "Invalid Go version: $version. Supported versions: 1.11-1.26" + exit 1 + fi +done + +# Check if Docker is available +if ! command -v docker &> /dev/null; then + log_error "Docker is required but not installed or not in PATH" + exit 1 +fi + +# Check if Docker daemon is running +if ! docker info &> /dev/null; then + log_error "Docker daemon is not running" + exit 1 +fi + +# Create output directory +mkdir -p "$OUTPUT_DIR" + +# Function to test a single Go version +test_go_version() { + local go_version="$1" + local container_name="go-toml-test-${go_version}" + local result_file="${OUTPUT_DIR}/go-${go_version}.txt" + local dockerfile_content + + log "Testing Go $go_version..." + + # Create a temporary Dockerfile for this version + # For Go versions < 1.21, we need to update go.mod to match the Go version + local needs_go_mod_update=false + if [[ $(echo "$go_version 1.21" | tr ' ' '\n' | sort -V | head -n1) == "$go_version" && "$go_version" != "1.21" ]]; then + needs_go_mod_update=true + fi + + dockerfile_content="FROM golang:${go_version}-alpine + +# Install git (required for go mod) +RUN apk add --no-cache git + +# Set working directory +WORKDIR /app + +# Copy source code +COPY . ." + + # Add go.mod update step for older Go versions + if [[ "$needs_go_mod_update" == true ]]; then + dockerfile_content="$dockerfile_content + +# Update go.mod to match Go version (required for Go < 1.21) +RUN if [ -f go.mod ]; then sed -i 's/^go [0-9]\\+\\.[0-9]\\+\\(\\.[0-9]\\+\\)\\?/go $go_version/' go.mod; fi + +# Note: Go versions < 1.21 may fail due to missing standard library packages (e.g., slices) +# This is expected for projects that use Go 1.21+ features" + fi + + dockerfile_content="$dockerfile_content + +# Run tests +CMD [\"sh\", \"-c\", \"go version && echo '--- Running go test ./... ---' && go test ./...\"]" + + # Create temporary directory for this test + local temp_dir + temp_dir=$(mktemp -d) + + # Copy source to temp directory (excluding test results and git) + rsync -a --exclude="$OUTPUT_DIR" --exclude=".git" --exclude="*.test" . "$temp_dir/" + + # Create Dockerfile in temp directory + echo "$dockerfile_content" > "$temp_dir/Dockerfile" + + # Build and run container + local exit_code=0 + local output + + if $VERBOSE; then + log "Building Docker image for Go $go_version..." + fi + + # Capture both stdout and stderr, and the exit code + if output=$(cd "$temp_dir" && timeout "$DOCKER_TIMEOUT" docker build -t "$container_name" . 2>&1 && \ + timeout "$DOCKER_TIMEOUT" docker run --rm "$container_name" 2>&1); then + log_success "Go $go_version: PASSED" + echo "PASSED" > "${result_file}.status" + else + exit_code=$? + log_error "Go $go_version: FAILED (exit code: $exit_code)" + echo "FAILED" > "${result_file}.status" + fi + + # Save full output + echo "$output" > "$result_file" + + # Clean up + docker rmi "$container_name" &> /dev/null || true + rm -rf "$temp_dir" + + if $VERBOSE; then + echo "--- Go $go_version output ---" + echo "$output" + echo "--- End Go $go_version output ---" + fi + + return $exit_code +} + +# Function to run tests in parallel +run_parallel() { + local pids=() + local failed_versions=() + + log "Starting parallel tests for ${#GO_VERSIONS[@]} Go versions..." + + # Start all tests in background + for version in "${GO_VERSIONS[@]}"; do + test_go_version "$version" & + pids+=($!) + done + + # Wait for all tests to complete + for i in "${!pids[@]}"; do + local pid=${pids[$i]} + local version=${GO_VERSIONS[$i]} + + if ! wait $pid; then + failed_versions+=("$version") + fi + done + + return ${#failed_versions[@]} +} + +# Function to run tests sequentially +run_sequential() { + local failed_versions=() + + log "Starting sequential tests for ${#GO_VERSIONS[@]} Go versions..." + + for version in "${GO_VERSIONS[@]}"; do + if ! test_go_version "$version"; then + failed_versions+=("$version") + fi + done + + return ${#failed_versions[@]} +} + +# Main execution +main() { + local start_time + start_time=$(date +%s) + + log "Starting Go version compatibility tests..." + log "Testing versions: ${GO_VERSIONS[*]}" + log "Output directory: $OUTPUT_DIR" + log "Parallel execution: $PARALLEL" + + local failed_count + if $PARALLEL; then + run_parallel + failed_count=$? + else + run_sequential + failed_count=$? + fi + + local end_time + end_time=$(date +%s) + local duration=$((end_time - start_time)) + + # Collect results for display + local passed_versions=() + local failed_versions=() + local unknown_versions=() + local passed_count=0 + + for version in "${GO_VERSIONS[@]}"; do + local status_file="${OUTPUT_DIR}/go-${version}.txt.status" + if [[ -f "$status_file" ]]; then + local status + status=$(cat "$status_file") + if [[ "$status" == "PASSED" ]]; then + passed_versions+=("$version") + ((passed_count++)) + else + failed_versions+=("$version") + fi + else + unknown_versions+=("$version") + fi + done + + # Generate summary report + local summary_file="${OUTPUT_DIR}/summary.txt" + { + echo "Go Version Compatibility Test Summary" + echo "=====================================" + echo "Date: $(date)" + echo "Duration: ${duration}s" + echo "Parallel: $PARALLEL" + echo "" + echo "Results:" + + for version in "${GO_VERSIONS[@]}"; do + local status_file="${OUTPUT_DIR}/go-${version}.txt.status" + if [[ -f "$status_file" ]]; then + local status + status=$(cat "$status_file") + if [[ "$status" == "PASSED" ]]; then + echo " Go $version: ✓ PASSED" + else + echo " Go $version: ✗ FAILED" + fi + else + echo " Go $version: ? UNKNOWN (no status file)" + fi + done + + echo "" + echo "Summary: $passed_count/${#GO_VERSIONS[@]} versions passed" + + if [[ $failed_count -gt 0 ]]; then + echo "" + echo "Failed versions details:" + for version in "${failed_versions[@]}"; do + echo "" + echo "--- Go $version (FAILED) ---" + local result_file="${OUTPUT_DIR}/go-${version}.txt" + if [[ -f "$result_file" ]]; then + tail -n 30 "$result_file" + fi + done + fi + } > "$summary_file" + + # Find lowest continuous supported version and check recent versions + local lowest_continuous_version="" + local recent_versions_failed=false + + # Sort versions to ensure proper order + local sorted_versions=() + for version in "${GO_VERSIONS[@]}"; do + sorted_versions+=("$version") + done + # Sort versions numerically (1.11, 1.12, ..., 1.25) + IFS=$'\n' sorted_versions=($(sort -V <<< "${sorted_versions[*]}")) + + # Find lowest continuous supported version (all versions from this point onwards pass) + for version in "${sorted_versions[@]}"; do + local status_file="${OUTPUT_DIR}/go-${version}.txt.status" + local all_subsequent_pass=true + + # Check if this version and all subsequent versions pass + local found_current=false + for check_version in "${sorted_versions[@]}"; do + if [[ "$check_version" == "$version" ]]; then + found_current=true + fi + + if [[ "$found_current" == true ]]; then + local check_status_file="${OUTPUT_DIR}/go-${check_version}.txt.status" + if [[ -f "$check_status_file" ]]; then + local status + status=$(cat "$check_status_file") + if [[ "$status" != "PASSED" ]]; then + all_subsequent_pass=false + break + fi + else + all_subsequent_pass=false + break + fi + fi + done + + if [[ "$all_subsequent_pass" == true ]]; then + lowest_continuous_version="$version" + break + fi + done + + # Check if the two most recent versions failed + local num_versions=${#sorted_versions[@]} + if [[ $num_versions -ge 2 ]]; then + local second_recent="${sorted_versions[$((num_versions-2))]}" + local most_recent="${sorted_versions[$((num_versions-1))]}" + + local second_recent_status_file="${OUTPUT_DIR}/go-${second_recent}.txt.status" + local most_recent_status_file="${OUTPUT_DIR}/go-${most_recent}.txt.status" + + local second_recent_failed=false + local most_recent_failed=false + + if [[ -f "$second_recent_status_file" ]]; then + local status + status=$(cat "$second_recent_status_file") + if [[ "$status" != "PASSED" ]]; then + second_recent_failed=true + fi + else + second_recent_failed=true + fi + + if [[ -f "$most_recent_status_file" ]]; then + local status + status=$(cat "$most_recent_status_file") + if [[ "$status" != "PASSED" ]]; then + most_recent_failed=true + fi + else + most_recent_failed=true + fi + + if [[ "$second_recent_failed" == true || "$most_recent_failed" == true ]]; then + recent_versions_failed=true + fi + elif [[ $num_versions -eq 1 ]]; then + # Only one version tested, check if it's the most recent and failed + local only_version="${sorted_versions[0]}" + local only_status_file="${OUTPUT_DIR}/go-${only_version}.txt.status" + + if [[ -f "$only_status_file" ]]; then + local status + status=$(cat "$only_status_file") + if [[ "$status" != "PASSED" ]]; then + recent_versions_failed=true + fi + else + recent_versions_failed=true + fi + fi + + # Display summary + echo "" + log "Test completed in ${duration}s" + log "Summary report: $summary_file" + + echo "" + echo "========================================" + echo " FINAL RESULTS" + echo "========================================" + echo "" + + # Display passed versions + if [[ ${#passed_versions[@]} -gt 0 ]]; then + log_success "PASSED (${#passed_versions[@]}/${#GO_VERSIONS[@]}):" + # Sort passed versions for display + local sorted_passed=() + for version in "${sorted_versions[@]}"; do + for passed_version in "${passed_versions[@]}"; do + if [[ "$version" == "$passed_version" ]]; then + sorted_passed+=("$version") + break + fi + done + done + for version in "${sorted_passed[@]}"; do + echo -e " ${GREEN}✓${NC} Go $version" + done + echo "" + fi + + # Display failed versions + if [[ ${#failed_versions[@]} -gt 0 ]]; then + log_error "FAILED (${#failed_versions[@]}/${#GO_VERSIONS[@]}):" + # Sort failed versions for display + local sorted_failed=() + for version in "${sorted_versions[@]}"; do + for failed_version in "${failed_versions[@]}"; do + if [[ "$version" == "$failed_version" ]]; then + sorted_failed+=("$version") + break + fi + done + done + for version in "${sorted_failed[@]}"; do + echo -e " ${RED}✗${NC} Go $version" + done + echo "" + + # Show failure details + echo "========================================" + echo " FAILURE DETAILS" + echo "========================================" + echo "" + + for version in "${sorted_failed[@]}"; do + echo -e "${RED}--- Go $version FAILURE LOGS (last 30 lines) ---${NC}" + local result_file="${OUTPUT_DIR}/go-${version}.txt" + if [[ -f "$result_file" ]]; then + tail -n 30 "$result_file" | sed 's/^/ /' + else + echo " No log file found: $result_file" + fi + echo "" + done + fi + + # Display unknown versions + if [[ ${#unknown_versions[@]} -gt 0 ]]; then + log_warning "UNKNOWN (${#unknown_versions[@]}/${#GO_VERSIONS[@]}):" + for version in "${unknown_versions[@]}"; do + echo -e " ${YELLOW}?${NC} Go $version (no status file)" + done + echo "" + fi + + echo "========================================" + echo " COMPATIBILITY SUMMARY" + echo "========================================" + echo "" + + if [[ -n "$lowest_continuous_version" ]]; then + log_success "Lowest continuous supported version: Go $lowest_continuous_version" + echo " (All versions from Go $lowest_continuous_version onwards pass)" + else + log_error "No continuous version support found" + echo " (No version has all subsequent versions passing)" + fi + + echo "" + echo "========================================" + echo "Full detailed logs available in: $OUTPUT_DIR" + echo "========================================" + + # Determine exit code based on recent versions + if [[ "$recent_versions_failed" == true ]]; then + log_error "OVERALL RESULT: Recent Go versions failed - this needs attention!" + if [[ -n "$lowest_continuous_version" ]]; then + echo "Note: Continuous support starts from Go $lowest_continuous_version" + fi + exit 1 + else + log_success "OVERALL RESULT: Recent Go versions pass - compatibility looks good!" + if [[ -n "$lowest_continuous_version" ]]; then + echo "Continuous support starts from Go $lowest_continuous_version" + fi + exit 0 + fi +} + +# Trap to clean up on exit +cleanup() { + # Kill any remaining background processes + jobs -p | xargs -r kill 2>/dev/null || true + + # Clean up any remaining Docker containers + docker ps -q --filter "name=go-toml-test-" | xargs -r docker stop 2>/dev/null || true + docker images -q --filter "reference=go-toml-test-*" | xargs -r docker rmi 2>/dev/null || true +} + +trap cleanup EXIT + +# Run main function +main diff --git a/vendor/github.com/pelletier/go-toml/v2/types.go b/vendor/github.com/pelletier/go-toml/v2/types.go index 3c6b8fe570..6d12fe5802 100644 --- a/vendor/github.com/pelletier/go-toml/v2/types.go +++ b/vendor/github.com/pelletier/go-toml/v2/types.go @@ -6,9 +6,18 @@ import ( "time" ) -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() -var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() -var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() -var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}(nil)) -var sliceInterfaceType = reflect.TypeOf([]interface{}(nil)) -var stringType = reflect.TypeOf("") +// isZeroer is used to check if a type has a custom IsZero method. +// This allows custom types to define their own zero-value semantics. +type isZeroer interface { + IsZero() bool +} + +var ( + timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem() + mapStringInterfaceType = reflect.TypeOf(map[string]interface{}(nil)) + sliceInterfaceType = reflect.TypeOf([]interface{}(nil)) + stringType = reflect.TypeOf("") +) diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index 189be525e1..e7db8128ce 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -12,7 +12,6 @@ import ( "sync/atomic" "time" - "github.com/pelletier/go-toml/v2/internal/danger" "github.com/pelletier/go-toml/v2/internal/tracker" "github.com/pelletier/go-toml/v2/unstable" ) @@ -57,13 +56,18 @@ func (d *Decoder) DisallowUnknownFields() *Decoder { // EnableUnmarshalerInterface allows to enable unmarshaler interface. // -// With this feature enabled, types implementing the unstable/Unmarshaler +// With this feature enabled, types implementing the unstable.Unmarshaler // interface can be decoded from any structure of the document. It allows types // that don't have a straightforward TOML representation to provide their own // decoding logic. // -// Currently, types can only decode from a single value. Tables and array tables -// are not supported. +// The UnmarshalTOML method receives raw TOML bytes: +// - For single values: the raw value bytes (e.g., `"hello"` for a string) +// - For tables: all key-value lines belonging to that table +// - For inline tables/arrays: the raw bytes of the inline structure +// +// The unstable.RawMessage type can be used to capture raw TOML bytes for +// later processing, similar to json.RawMessage. // // *Unstable:* This method does not follow the compatibility guarantees of // semver. It can be changed or removed without a new major version being @@ -123,6 +127,7 @@ func (d *Decoder) Decode(v interface{}) error { dec := decoder{ strict: strict{ Enabled: d.strict, + doc: b, }, unmarshalerInterface: d.unmarshalerInterface, } @@ -226,7 +231,7 @@ func (d *decoder) FromParser(v interface{}) error { } if r.IsNil() { - return fmt.Errorf("toml: decoding pointer target cannot be nil") + return errors.New("toml: decoding pointer target cannot be nil") } r = r.Elem() @@ -273,7 +278,7 @@ func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) err var err error var first bool // used for to clear array tables on first use - if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) { + if !d.skipUntilTable || expr.Kind != unstable.KeyValue { first, err = d.seen.CheckExpression(expr) if err != nil { return err @@ -378,7 +383,7 @@ func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflec case reflect.Array: idx := d.arrayIndex(true, v) if idx >= v.Len() { - return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) + return v, fmt.Errorf("%w at position %d", d.typeMismatchError("array table", v.Type()), idx) } elem := v.Index(idx) _, err := d.handleArrayTable(key, elem) @@ -416,27 +421,51 @@ func (d *decoder) handleArrayTableCollection(key unstable.Iterator, v reflect.Va return v, nil case reflect.Slice: - elem := v.Index(v.Len() - 1) + // Create a new element when the slice is empty; otherwise operate on + // the last element. + var ( + elem reflect.Value + created bool + ) + if v.Len() == 0 { + created = true + elemType := v.Type().Elem() + if elemType.Kind() == reflect.Interface { + elem = makeMapStringInterface() + } else { + elem = reflect.New(elemType).Elem() + } + } else { + elem = v.Index(v.Len() - 1) + } + x, err := d.handleArrayTable(key, elem) if err != nil || d.skipUntilTable { return reflect.Value{}, err } if x.IsValid() { - elem.Set(x) + if created { + elem = x + } else { + elem.Set(x) + } } + if created { + return reflect.Append(v, elem), nil + } return v, err case reflect.Array: idx := d.arrayIndex(false, v) if idx >= v.Len() { - return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) + return v, fmt.Errorf("%w at position %d", d.typeMismatchError("array table", v.Type()), idx) } elem := v.Index(idx) _, err := d.handleArrayTable(key, elem) return v, err + default: + return d.handleArrayTable(key, v) } - - return d.handleArrayTable(key, v) } func (d *decoder) handleKeyPart(key unstable.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) { @@ -470,7 +499,8 @@ func (d *decoder) handleKeyPart(key unstable.Iterator, v reflect.Value, nextFn h mv := v.MapIndex(mk) set := false - if !mv.IsValid() { + switch { + case !mv.IsValid(): // If there is no value in the map, create a new one according to // the map type. If the element type is interface, create either a // map[string]interface{} or a []interface{} depending on whether @@ -483,13 +513,13 @@ func (d *decoder) handleKeyPart(key unstable.Iterator, v reflect.Value, nextFn h mv = reflect.New(t).Elem() } set = true - } else if mv.Kind() == reflect.Interface { + case mv.Kind() == reflect.Interface: mv = mv.Elem() if !mv.IsValid() { mv = makeFn() } set = true - } else if !mv.CanAddr() { + case !mv.CanAddr(): vt := v.Type() t := vt.Elem() oldmv := mv @@ -574,18 +604,28 @@ func (d *decoder) handleArrayTablePart(key unstable.Iterator, v reflect.Value) ( // cannot handle it. func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { if v.Kind() == reflect.Slice { - if v.Len() == 0 { - return reflect.Value{}, unstable.NewParserError(key.Node().Data, "cannot store a table in a slice") - } - elem := v.Index(v.Len() - 1) - x, err := d.handleTable(key, elem) - if err != nil { - return reflect.Value{}, err + // For non-empty slices, work with the last element + if v.Len() > 0 { + elem := v.Index(v.Len() - 1) + x, err := d.handleTable(key, elem) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + elem.Set(x) + } + return reflect.Value{}, nil } - if x.IsValid() { - elem.Set(x) + // Empty slice - check if it implements Unmarshaler (e.g., RawMessage) + // and we're at the end of the key path + if d.unmarshalerInterface && !key.Next() { + if v.CanAddr() && v.Addr().CanInterface() { + if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok { + return d.handleKeyValuesUnmarshaler(outi) + } + } } - return reflect.Value{}, nil + return reflect.Value{}, unstable.NewParserError(key.Node().Data, "cannot store a table in a slice") } if key.Next() { // Still scoping the key @@ -599,6 +639,24 @@ func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.V // Handle root expressions until the end of the document or the next // non-key-value. func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { + // Check if target implements Unmarshaler before processing key-values. + // This allows types to handle entire tables themselves. + if d.unmarshalerInterface { + vv := v + for vv.Kind() == reflect.Ptr { + if vv.IsNil() { + vv.Set(reflect.New(vv.Type().Elem())) + } + vv = vv.Elem() + } + if vv.CanAddr() && vv.Addr().CanInterface() { + if outi, ok := vv.Addr().Interface().(unstable.Unmarshaler); ok { + // Collect all key-value expressions for this table + return d.handleKeyValuesUnmarshaler(outi) + } + } + } + var rv reflect.Value for d.nextExpr() { expr := d.expr() @@ -628,6 +686,41 @@ func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { return rv, nil } +// handleKeyValuesUnmarshaler collects all key-value expressions for a table +// and passes them to the Unmarshaler as raw TOML bytes. +func (d *decoder) handleKeyValuesUnmarshaler(u unstable.Unmarshaler) (reflect.Value, error) { + // Collect raw bytes from all key-value expressions for this table. + // We use the Raw field on each KeyValue expression to preserve the + // original formatting (whitespace, quoting style, etc.) from the document. + var buf []byte + + for d.nextExpr() { + expr := d.expr() + if expr.Kind != unstable.KeyValue { + d.stashExpr() + break + } + + _, err := d.seen.CheckExpression(expr) + if err != nil { + return reflect.Value{}, err + } + + // Use the raw bytes from the original document to preserve formatting + if expr.Raw.Length > 0 { + raw := d.p.Raw(expr.Raw) + buf = append(buf, raw...) + } + buf = append(buf, '\n') + } + + if err := u.UnmarshalTOML(buf); err != nil { + return reflect.Value{}, err + } + + return reflect.Value{}, nil +} + type ( handlerFn func(key unstable.Iterator, v reflect.Value) (reflect.Value, error) valueMakerFn func() reflect.Value @@ -672,14 +765,21 @@ func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error { if d.unmarshalerInterface { if v.CanAddr() && v.Addr().CanInterface() { if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok { - return outi.UnmarshalTOML(value) + // Pass raw bytes from the original document + return outi.UnmarshalTOML(d.p.Raw(value.Raw)) } } } - ok, err := d.tryTextUnmarshaler(value, v) - if ok || err != nil { - return err + // Only try TextUnmarshaler for scalar types. For Array and InlineTable, + // fall through to struct/map unmarshaling to allow flexible unmarshaling + // where a type can implement UnmarshalText for string values but still + // be populated field-by-field from a table. See issue #974. + if value.Kind != unstable.Array && value.Kind != unstable.InlineTable { + ok, err := d.tryTextUnmarshaler(value, v) + if ok || err != nil { + return err + } } switch value.Kind { @@ -821,6 +921,9 @@ func (d *decoder) unmarshalDateTime(value *unstable.Node, v reflect.Value) error return err } + if v.Kind() != reflect.Interface && v.Type() != timeType { + return unstable.NewParserError(d.p.Raw(value.Raw), "%s", d.typeMismatchString("datetime", v.Type())) + } v.Set(reflect.ValueOf(dt)) return nil } @@ -831,14 +934,14 @@ func (d *decoder) unmarshalLocalDate(value *unstable.Node, v reflect.Value) erro return err } + if v.Kind() != reflect.Interface && v.Type() != timeType { + return unstable.NewParserError(d.p.Raw(value.Raw), "%s", d.typeMismatchString("local date", v.Type())) + } if v.Type() == timeType { - cast := ld.AsTime(time.Local) - v.Set(reflect.ValueOf(cast)) + v.Set(reflect.ValueOf(ld.AsTime(time.Local))) return nil } - v.Set(reflect.ValueOf(ld)) - return nil } @@ -852,6 +955,9 @@ func (d *decoder) unmarshalLocalTime(value *unstable.Node, v reflect.Value) erro return unstable.NewParserError(rest, "extra characters at the end of a local time") } + if v.Kind() != reflect.Interface { + return unstable.NewParserError(d.p.Raw(value.Raw), "%s", d.typeMismatchString("local time", v.Type())) + } v.Set(reflect.ValueOf(lt)) return nil } @@ -866,15 +972,14 @@ func (d *decoder) unmarshalLocalDateTime(value *unstable.Node, v reflect.Value) return unstable.NewParserError(rest, "extra characters at the end of a local date time") } + if v.Kind() != reflect.Interface && v.Type() != timeType { + return unstable.NewParserError(d.p.Raw(value.Raw), "%s", d.typeMismatchString("local datetime", v.Type())) + } if v.Type() == timeType { - cast := ldt.AsTime(time.Local) - - v.Set(reflect.ValueOf(cast)) + v.Set(reflect.ValueOf(ldt.AsTime(time.Local))) return nil } - v.Set(reflect.ValueOf(ldt)) - return nil } @@ -929,8 +1034,9 @@ const ( // compile time, so it is computed during initialization. var maxUint int64 = math.MaxInt64 -func init() { +func init() { //nolint:gochecknoinits m := uint64(^uint(0)) + // #nosec G115 if m < uint64(maxUint) { maxUint = int64(m) } @@ -1010,7 +1116,7 @@ func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error case reflect.Interface: r = reflect.ValueOf(i) default: - return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("integer", v.Type())) + return unstable.NewParserError(d.p.Raw(value.Raw), "%s", d.typeMismatchString("integer", v.Type())) } if !r.Type().AssignableTo(v.Type()) { @@ -1029,7 +1135,7 @@ func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error { case reflect.Interface: v.Set(reflect.ValueOf(string(value.Data))) default: - return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("string", v.Type())) + return unstable.NewParserError(d.p.Raw(value.Raw), "%s", d.typeMismatchString("string", v.Type())) } return nil @@ -1080,35 +1186,39 @@ func (d *decoder) keyFromData(keyType reflect.Type, data []byte) (reflect.Value, return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) } return mk.Elem(), nil + } - case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: + switch keyType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: key, err := strconv.ParseInt(string(data), 10, 64) if err != nil { return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from integer: %w", stringType, err) } return reflect.ValueOf(key).Convert(keyType), nil - case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: key, err := strconv.ParseUint(string(data), 10, 64) if err != nil { return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from unsigned integer: %w", stringType, err) } return reflect.ValueOf(key).Convert(keyType), nil - case keyType.Kind() == reflect.Float32: + case reflect.Float32: key, err := strconv.ParseFloat(string(data), 32) if err != nil { return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) } return reflect.ValueOf(float32(key)), nil - case keyType.Kind() == reflect.Float64: + case reflect.Float64: key, err := strconv.ParseFloat(string(data), 64) if err != nil { return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) } return reflect.ValueOf(float64(key)), nil + + default: + return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType) } - return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType) } func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { @@ -1154,6 +1264,18 @@ func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node case reflect.Struct: path, found := structFieldPath(v, string(key.Node().Data)) if !found { + // If no matching struct field is found but the target implements the + // unstable.Unmarshaler interface (and it is enabled), delegate the + // decoding of this value to the custom unmarshaler. + if d.unmarshalerInterface { + if v.CanAddr() && v.Addr().CanInterface() { + if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok { + // Pass raw bytes from the original document + return reflect.Value{}, outi.UnmarshalTOML(d.p.Raw(value.Raw)) + } + } + } + // Otherwise, keep previous behavior and skip until the next table. d.skipUntilTable = true break } @@ -1259,13 +1381,13 @@ func fieldByIndex(v reflect.Value, path []int) reflect.Value { type fieldPathsMap = map[string][]int -var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap +var globalFieldPathsCache atomic.Value // map[reflect.Type]fieldPathsMap func structFieldPath(v reflect.Value, name string) ([]int, bool) { t := v.Type() - cache, _ := globalFieldPathsCache.Load().(map[danger.TypeID]fieldPathsMap) - fieldPaths, ok := cache[danger.MakeTypeID(t)] + cache, _ := globalFieldPathsCache.Load().(map[reflect.Type]fieldPathsMap) + fieldPaths, ok := cache[t] if !ok { fieldPaths = map[string][]int{} @@ -1276,8 +1398,8 @@ func structFieldPath(v reflect.Value, name string) ([]int, bool) { fieldPaths[strings.ToLower(name)] = path }) - newCache := make(map[danger.TypeID]fieldPathsMap, len(cache)+1) - newCache[danger.MakeTypeID(t)] = fieldPaths + newCache := make(map[reflect.Type]fieldPathsMap, len(cache)+1) + newCache[t] = fieldPaths for k, v := range cache { newCache[k] = v } @@ -1301,7 +1423,9 @@ func forEachField(t reflect.Type, path []int, do func(name string, path []int)) continue } - fieldPath := append(path, i) + fieldPath := make([]int, 0, len(path)+1) + fieldPath = append(fieldPath, path...) + fieldPath = append(fieldPath, i) fieldPath = fieldPath[:len(fieldPath):len(fieldPath)] name := f.Tag.Get("toml") diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go b/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go index f526bf2c09..6b21592d6e 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go @@ -1,10 +1,8 @@ package unstable import ( + "errors" "fmt" - "unsafe" - - "github.com/pelletier/go-toml/v2/internal/danger" ) // Iterator over a sequence of nodes. @@ -19,30 +17,43 @@ import ( // // do something with n // } type Iterator struct { + nodes *[]Node + idx int32 started bool - node *Node } // Next moves the iterator forward and returns true if points to a // node, false otherwise. func (c *Iterator) Next() bool { + if c.nodes == nil { + return false + } + nodes := *c.nodes if !c.started { c.started = true - } else if c.node.Valid() { - c.node = c.node.Next() + } else { + idx := c.idx + if idx >= 0 && int(idx) < len(nodes) { + c.idx = nodes[idx].next + } } - return c.node.Valid() + return c.idx >= 0 && int(c.idx) < len(nodes) } // IsLast returns true if the current node of the iterator is the last // one. Subsequent calls to Next() will return false. func (c *Iterator) IsLast() bool { - return c.node.next == 0 + return c.nodes == nil || c.idx < 0 || (*c.nodes)[c.idx].next < 0 } // Node returns a pointer to the node pointed at by the iterator. func (c *Iterator) Node() *Node { - return c.node + if c.nodes == nil || c.idx < 0 { + return nil + } + n := &(*c.nodes)[c.idx] + n.nodes = c.nodes + return n } // Node in a TOML expression AST. @@ -65,11 +76,12 @@ type Node struct { Raw Range // Raw bytes from the input. Data []byte // Node value (either allocated or referencing the input). - // References to other nodes, as offsets in the backing array - // from this node. References can go backward, so those can be - // negative. - next int // 0 if last element - child int // 0 if no child + // Absolute indices into the backing nodes slice. -1 means none. + next int32 + child int32 + + // Reference to the backing nodes slice for navigation. + nodes *[]Node } // Range of bytes in the document. @@ -80,24 +92,24 @@ type Range struct { // Next returns a pointer to the next node, or nil if there is no next node. func (n *Node) Next() *Node { - if n.next == 0 { + if n.next < 0 { return nil } - ptr := unsafe.Pointer(n) - size := unsafe.Sizeof(Node{}) - return (*Node)(danger.Stride(ptr, size, n.next)) + next := &(*n.nodes)[n.next] + next.nodes = n.nodes + return next } // Child returns a pointer to the first child node of this node. Other children -// can be accessed calling Next on the first child. Returns an nil if this Node +// can be accessed calling Next on the first child. Returns nil if this Node // has no child. func (n *Node) Child() *Node { - if n.child == 0 { + if n.child < 0 { return nil } - ptr := unsafe.Pointer(n) - size := unsafe.Sizeof(Node{}) - return (*Node)(danger.Stride(ptr, size, n.child)) + child := &(*n.nodes)[n.child] + child.nodes = n.nodes + return child } // Valid returns true if the node's kind is set (not to Invalid). @@ -111,13 +123,14 @@ func (n *Node) Valid() bool { func (n *Node) Key() Iterator { switch n.Kind { case KeyValue: - value := n.Child() - if !value.Valid() { - panic(fmt.Errorf("KeyValue should have at least two children")) + child := n.child + if child < 0 { + panic(errors.New("KeyValue should have at least two children")) } - return Iterator{node: value.Next()} + valueNode := &(*n.nodes)[child] + return Iterator{nodes: n.nodes, idx: valueNode.next} case Table, ArrayTable: - return Iterator{node: n.Child()} + return Iterator{nodes: n.nodes, idx: n.child} default: panic(fmt.Errorf("Key() is not supported on a %s", n.Kind)) } @@ -132,5 +145,5 @@ func (n *Node) Value() *Node { // Children returns an iterator over a node's children. func (n *Node) Children() Iterator { - return Iterator{node: n.Child()} + return Iterator{nodes: n.nodes, idx: n.child} } diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go b/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go index 9538e30df9..e4354985be 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go @@ -7,15 +7,6 @@ type root struct { nodes []Node } -// Iterator over the top level nodes. -func (r *root) Iterator() Iterator { - it := Iterator{} - if len(r.nodes) > 0 { - it.node = &r.nodes[0] - } - return it -} - func (r *root) at(idx reference) *Node { return &r.nodes[idx] } @@ -33,12 +24,10 @@ type builder struct { lastIdx int } -func (b *builder) Tree() *root { - return &b.tree -} - func (b *builder) NodeAt(ref reference) *Node { - return b.tree.at(ref) + n := b.tree.at(ref) + n.nodes = &b.tree.nodes + return n } func (b *builder) Reset() { @@ -48,24 +37,28 @@ func (b *builder) Reset() { func (b *builder) Push(n Node) reference { b.lastIdx = len(b.tree.nodes) + n.next = -1 + n.child = -1 b.tree.nodes = append(b.tree.nodes, n) return reference(b.lastIdx) } func (b *builder) PushAndChain(n Node) reference { newIdx := len(b.tree.nodes) + n.next = -1 + n.child = -1 b.tree.nodes = append(b.tree.nodes, n) if b.lastIdx >= 0 { - b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx + b.tree.nodes[b.lastIdx].next = int32(newIdx) //nolint:gosec // TOML ASTs are small } b.lastIdx = newIdx return reference(b.lastIdx) } func (b *builder) AttachChild(parent reference, child reference) { - b.tree.nodes[parent].child = int(child) - int(parent) + b.tree.nodes[parent].child = int32(child) //nolint:gosec // TOML ASTs are small } func (b *builder) Chain(from reference, to reference) { - b.tree.nodes[from].next = int(to) - int(from) + b.tree.nodes[from].next = int32(to) //nolint:gosec // TOML ASTs are small } diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go b/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go index ff9df1bef8..f87a95a78d 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go @@ -6,28 +6,40 @@ import "fmt" type Kind int const ( - // Meta + // Invalid represents an invalid meta node. Invalid Kind = iota + // Comment represents a comment meta node. Comment + // Key represents a key meta node. Key - // Top level structures + // Table represents a top-level table. Table + // ArrayTable represents a top-level array table. ArrayTable + // KeyValue represents a top-level key value. KeyValue - // Containers values + // Array represents an array container value. Array + // InlineTable represents an inline table container value. InlineTable - // Values + // String represents a string value. String + // Bool represents a boolean value. Bool + // Float represents a floating point value. Float + // Integer represents an integer value. Integer + // LocalDate represents a a local date value. LocalDate + // LocalTime represents a local time value. LocalTime + // LocalDateTime represents a local date/time value. LocalDateTime + // DateTime represents a data/time value. DateTime ) diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go index 50358a44ff..e7c68dc5c2 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go @@ -6,7 +6,6 @@ import ( "unicode" "github.com/pelletier/go-toml/v2/internal/characters" - "github.com/pelletier/go-toml/v2/internal/danger" ) // ParserError describes an error relative to the content of the document. @@ -70,11 +69,26 @@ func (p *Parser) Data() []byte { // panics. func (p *Parser) Range(b []byte) Range { return Range{ - Offset: uint32(danger.SubsliceOffset(p.data, b)), - Length: uint32(len(b)), + Offset: uint32(p.subsliceOffset(b)), //nolint:gosec // TOML documents are small + Length: uint32(len(b)), //nolint:gosec // TOML documents are small } } +// rangeOfToken computes the Range of a token given the remaining bytes after the token. +// This is used when the token was extracted from the beginning of some position, +// and 'rest' is what remains after the token. +func (p *Parser) rangeOfToken(token, rest []byte) Range { + offset := len(p.data) - len(token) - len(rest) + return Range{Offset: uint32(offset), Length: uint32(len(token))} //nolint:gosec // TOML documents are small +} + +// subsliceOffset returns the byte offset of subslice b within p.data. +// b must be a suffix (tail) of p.data. +func (p *Parser) subsliceOffset(b []byte) int { + // b is a suffix of p.data, so its offset is len(p.data) - len(b) + return len(p.data) - len(b) +} + // Raw returns the slice corresponding to the bytes in the given range. func (p *Parser) Raw(raw Range) []byte { return p.data[raw.Offset : raw.Offset+raw.Length] @@ -158,9 +172,17 @@ type Shape struct { End Position } -func (p *Parser) position(b []byte) Position { - offset := danger.SubsliceOffset(p.data, b) +// Shape returns the shape of the given range in the input. Will +// panic if the range is not a subslice of the input. +func (p *Parser) Shape(r Range) Shape { + return Shape{ + Start: p.positionAt(int(r.Offset)), + End: p.positionAt(int(r.Offset + r.Length)), + } +} +// positionAt returns the position at the given byte offset in the document. +func (p *Parser) positionAt(offset int) Position { lead := p.data[:offset] return Position{ @@ -170,16 +192,6 @@ func (p *Parser) position(b []byte) Position { } } -// Shape returns the shape of the given range in the input. Will -// panic if the range is not a subslice of the input. -func (p *Parser) Shape(r Range) Shape { - raw := p.Raw(r) - return Shape{ - Start: p.position(raw), - End: p.position(raw[r.Length:]), - } -} - func (p *Parser) parseNewline(b []byte) ([]byte, error) { if b[0] == '\n' { return b[1:], nil @@ -199,7 +211,7 @@ func (p *Parser) parseComment(b []byte) (reference, []byte, error) { if p.KeepComments && err == nil { ref = p.builder.Push(Node{ Kind: Comment, - Raw: p.Range(data), + Raw: p.rangeOfToken(data, rest), Data: data, }) } @@ -316,6 +328,9 @@ func (p *Parser) parseStdTable(b []byte) (reference, []byte, error) { func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) { // keyval = key keyval-sep val + // Track the start position for Raw range + startB := b + ref := p.builder.Push(Node{ Kind: KeyValue, }) @@ -330,7 +345,7 @@ func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) { b = p.parseWhitespace(b) if len(b) == 0 { - return invalidReference, nil, NewParserError(b, "expected = after a key, but the document ends there") + return invalidReference, nil, NewParserError(startB[:len(startB)-len(b)], "expected = after a key, but the document ends there") } b, err = expect('=', b) @@ -348,6 +363,11 @@ func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) { p.builder.Chain(valRef, key) p.builder.AttachChild(ref, valRef) + // Set Raw to span the entire key-value expression. + // Access the node directly in the slice to avoid the write barrier + // that NodeAt's nodes-pointer setup would trigger. + p.builder.tree.nodes[ref].Raw = p.rangeOfToken(startB[:len(startB)-len(b)], b) + return ref, b, err } @@ -376,7 +396,7 @@ func (p *Parser) parseVal(b []byte) (reference, []byte, error) { if err == nil { ref = p.builder.Push(Node{ Kind: String, - Raw: p.Range(raw), + Raw: p.rangeOfToken(raw, b), Data: v, }) } @@ -394,7 +414,7 @@ func (p *Parser) parseVal(b []byte) (reference, []byte, error) { if err == nil { ref = p.builder.Push(Node{ Kind: String, - Raw: p.Range(raw), + Raw: p.rangeOfToken(raw, b), Data: v, }) } @@ -456,7 +476,7 @@ func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) { // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] parent := p.builder.Push(Node{ Kind: InlineTable, - Raw: p.Range(b[:1]), + Raw: p.rangeOfToken(b[:1], b[1:]), }) first := true @@ -542,7 +562,7 @@ func (p *Parser) parseValArray(b []byte) (reference, []byte, error) { var err error for len(b) > 0 { - cref := invalidReference + var cref reference cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) if err != nil { return parent, nil, err @@ -611,12 +631,13 @@ func (p *Parser) parseOptionalWhitespaceCommentNewline(b []byte) (reference, []b latestCommentRef := invalidReference addComment := func(ref reference) { - if rootCommentRef == invalidReference { + switch { + case rootCommentRef == invalidReference: rootCommentRef = ref - } else if latestCommentRef == invalidReference { + case latestCommentRef == invalidReference: p.builder.AttachChild(rootCommentRef, ref) latestCommentRef = ref - } else { + default: p.builder.Chain(latestCommentRef, ref) latestCommentRef = ref } @@ -704,11 +725,11 @@ func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er if !escaped { str := token[startIdx:endIdx] - verr := characters.Utf8TomlValidAlreadyEscaped(str) - if verr.Zero() { + highlight := characters.Utf8TomlValidAlreadyEscaped(str) + if len(highlight) == 0 { return token, str, rest, nil } - return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + return nil, nil, nil, NewParserError(highlight, "invalid UTF-8") } var builder bytes.Buffer @@ -744,7 +765,7 @@ func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er i += j for ; i < len(token)-3; i++ { c := token[i] - if !(c == '\n' || c == '\r' || c == ' ' || c == '\t') { + if c != '\n' && c != '\r' && c != ' ' && c != '\t' { i-- break } @@ -820,7 +841,7 @@ func (p *Parser) parseKey(b []byte) (reference, []byte, error) { ref := p.builder.Push(Node{ Kind: Key, - Raw: p.Range(raw), + Raw: p.rangeOfToken(raw, b), Data: key, }) @@ -836,7 +857,7 @@ func (p *Parser) parseKey(b []byte) (reference, []byte, error) { p.builder.PushAndChain(Node{ Kind: Key, - Raw: p.Range(raw), + Raw: p.rangeOfToken(raw, b), Data: key, }) } else { @@ -897,11 +918,11 @@ func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { // validate the string and return a direct reference to the buffer. if !escaped { str := token[startIdx:endIdx] - verr := characters.Utf8TomlValidAlreadyEscaped(str) - if verr.Zero() { + highlight := characters.Utf8TomlValidAlreadyEscaped(str) + if len(highlight) == 0 { return token, str, rest, nil } - return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + return nil, nil, nil, NewParserError(highlight, "invalid UTF-8") } i := startIdx @@ -972,7 +993,7 @@ func hexToRune(b []byte, length int) (rune, error) { var r uint32 for i, c := range b { - d := uint32(0) + var d uint32 switch { case '0' <= c && c <= '9': d = uint32(c - '0') @@ -1013,7 +1034,7 @@ func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error) return p.builder.Push(Node{ Kind: Float, Data: b[:3], - Raw: p.Range(b[:3]), + Raw: p.rangeOfToken(b[:3], b[3:]), }), b[3:], nil case 'n': if !scanFollowsNan(b) { @@ -1023,7 +1044,7 @@ func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error) return p.builder.Push(Node{ Kind: Float, Data: b[:3], - Raw: p.Range(b[:3]), + Raw: p.rangeOfToken(b[:3], b[3:]), }), b[3:], nil case '+', '-': return p.scanIntOrFloat(b) @@ -1076,7 +1097,7 @@ byteLoop: } case c == 'T' || c == 't' || c == ':' || c == '.': hasTime = true - case c == '+' || c == '-' || c == 'Z' || c == 'z': + case c == '+' || c == 'Z' || c == 'z': hasTz = true case c == ' ': if !seenSpace && i+1 < len(b) && isDigit(b[i+1]) { @@ -1148,7 +1169,7 @@ func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { return p.builder.Push(Node{ Kind: Integer, Data: b[:i], - Raw: p.Range(b[:i]), + Raw: p.rangeOfToken(b[:i], b[i:]), }), b[i:], nil } @@ -1172,7 +1193,7 @@ func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { return p.builder.Push(Node{ Kind: Float, Data: b[:i+3], - Raw: p.Range(b[:i+3]), + Raw: p.rangeOfToken(b[:i+3], b[i+3:]), }), b[i+3:], nil } @@ -1184,7 +1205,7 @@ func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { return p.builder.Push(Node{ Kind: Float, Data: b[:i+3], - Raw: p.Range(b[:i+3]), + Raw: p.rangeOfToken(b[:i+3], b[i+3:]), }), b[i+3:], nil } @@ -1207,7 +1228,7 @@ func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { return p.builder.Push(Node{ Kind: kind, Data: b[:i], - Raw: p.Range(b[:i]), + Raw: p.rangeOfToken(b[:i], b[i:]), }), b[i:], nil } diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go index 00cfd6de45..5a79da88e8 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go @@ -1,7 +1,32 @@ package unstable -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a TOML document. +// Unmarshaler is implemented by types that can unmarshal a TOML +// description of themselves. The input is a valid TOML document +// containing the relevant portion of the parsed document. +// +// For tables (including split tables defined in multiple places), +// the data contains the raw key-value bytes from the original document +// with adjusted table headers to be relative to the unmarshaling target. type Unmarshaler interface { - UnmarshalTOML(value *Node) error + UnmarshalTOML(data []byte) error +} + +// RawMessage is a raw encoded TOML value. It implements Unmarshaler +// and can be used to delay TOML decoding or capture raw content. +// +// Example usage: +// +// type Config struct { +// Plugin RawMessage `toml:"plugin"` +// } +// +// var cfg Config +// toml.NewDecoder(r).EnableUnmarshalerInterface().Decode(&cfg) +// // cfg.Plugin now contains the raw TOML bytes for [plugin] +type RawMessage []byte + +// UnmarshalTOML implements Unmarshaler. +func (m *RawMessage) UnmarshalTOML(data []byte) error { + *m = append((*m)[0:0], data...) + return nil } diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go index 9254d1a4b0..d80946fd4d 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go @@ -203,6 +203,10 @@ type AlertmanagerSpec struct { // +optional //nolint:kubeapilinter // standard Kubernetes node selector format NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // schedulerName defines the scheduler to use for Pod scheduling. If not specified, the default scheduler is used. + // +optional + // +kubebuilder:validation:MinLength=1 + SchedulerName string `json:"schedulerName,omitempty"` // resources defines the resource requests and limits of the Pods. // +optional Resources v1.ResourceRequirements `json:"resources,omitempty"` @@ -270,27 +274,43 @@ type AlertmanagerSpec struct { // +optional UpdateStrategy *StatefulSetUpdateStrategy `json:"updateStrategy,omitempty"` - // containers allows injecting additional containers. This is meant to - // allow adding an authentication proxy to an Alertmanager pod. - // Containers described here modify an operator generated container if they - // share the same name and modifications are done via a strategic merge - // patch. The current container names are: `alertmanager` and - // `config-reloader`. Overriding containers is entirely outside the scope - // of what the maintainers will support and by doing so, you accept that - // this behaviour may break at any time without notice. + // containers allows injecting additional containers or modifying operator + // generated containers. This can be used to allow adding an authentication + // proxy to the Pods or to change the behavior of an operator generated + // container. Containers described here modify an operator generated + // container if they share the same name and modifications are done via a + // strategic merge patch. + // + // The names of containers managed by the operator are: + // * `alertmanager` + // * `config-reloader` + // * `thanos-sidecar` + // + // Overriding containers which are managed by the operator require careful + // testing, especially when upgrading to a new version of the operator. + // // +optional Containers []v1.Container `json:"containers,omitempty"` - // initContainers allows adding initContainers to the pod definition. Those can be used to e.g. - // fetch secrets for injection into the Alertmanager configuration from external sources. Any - // errors during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - // InitContainers described here modify an operator - // generated init containers if they share the same name and modifications are - // done via a strategic merge patch. The current init container name is: - // `init-config-reloader`. Overriding init containers is entirely outside the - // scope of what the maintainers will support and by doing so, you accept that - // this behaviour may break at any time without notice. + + // initContainers allows injecting initContainers to the Pod definition. Those + // can be used to e.g. fetch secrets for injection into the Prometheus + // configuration from external sources. Any errors during the execution of + // an initContainer will lead to a restart of the Pod. More info: + // https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // InitContainers described here modify an operator generated init + // containers if they share the same name and modifications are done via a + // strategic merge patch. + // + // The names of init container name managed by the operator are: + // * `init-config-reloader`. + // + // Overriding init containers which are managed by the operator require + // careful testing, especially when upgrading to a new version of the + // operator. + // // +optional InitContainers []v1.Container `json:"initContainers,omitempty"` + // priorityClassName assigned to the Pods // +optional PriorityClassName string `json:"priorityClassName,omitempty"` @@ -358,6 +378,17 @@ type AlertmanagerSpec struct { // +listMapKey=ip // +optional HostAliases []HostAlias `json:"hostAliases,omitempty"` + // hostNetwork controls whether the pod may use the node network namespace. + // + // Make sure to understand the security implications if you want to enable + // it (https://kubernetes.io/docs/concepts/configuration/overview/). + // + // When hostNetwork is enabled, this will set the DNS policy to + // `ClusterFirstWithHostNet` automatically (unless `.spec.dnsPolicy` is set + // to a different value). + // + // +optional + HostNetwork bool `json:"hostNetwork,omitempty"` // nolint:kubeapilinter // web defines the web command line flags when starting Alertmanager. // +optional Web *AlertmanagerWebSpec `json:"web,omitempty"` @@ -569,6 +600,10 @@ func (a *Alertmanager) ExpectedReplicas() int { return int(*a.Spec.Replicas) } +func (a *Alertmanager) GetAvailableReplicas() int { return int(a.Status.AvailableReplicas) } +func (a *Alertmanager) GetUpdatedReplicas() int { return int(a.Status.UpdatedReplicas) } +func (a *Alertmanager) GetConditions() []Condition { return a.Status.Conditions } + func (a *Alertmanager) SetReplicas(i int) { a.Status.Replicas = int32(i) } func (a *Alertmanager) SetUpdatedReplicas(i int) { a.Status.UpdatedReplicas = int32(i) } func (a *Alertmanager) SetAvailableReplicas(i int) { a.Status.AvailableReplicas = int32(i) } @@ -645,6 +680,14 @@ type GlobalSMTPConfig struct { // tlsConfig defines the default TLS configuration for SMTP receivers // +optional TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` + + // forceImplicitTLS defines whether to force use of implicit TLS (direct TLS connection) for better security. + // true: force use of implicit TLS (direct TLS connection on any port) + // false: force disable implicit TLS (use explicit TLS/STARTTLS if required) + // nil (default): auto-detect based on port (465=implicit, other=explicit) for backward compatibility + // It requires Alertmanager >= v0.31.0. + // +optional + ForceImplicitTLS *bool `json:"forceImplicitTLS,omitempty"` // nolint:kubeapilinter } // GlobalTelegramConfig configures global Telegram parameters. @@ -767,7 +810,3 @@ type ClusterTLSConfig struct { // +required ClientTLS SafeTLSConfig `json:"client"` } - -// URL represents a valid URL -// +kubebuilder:validation:Pattern:="^(http|https)://.+$" -type URL string diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go index 812f6efbc8..e0e62c2f34 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go @@ -86,6 +86,12 @@ func (l *Prometheus) GetStatus() PrometheusStatus { return l.Status } +func (p *Prometheus) ExpectedReplicas() int { return p.Spec.CommonPrometheusFields.ExpectedReplicas() } + +func (p *Prometheus) GetAvailableReplicas() int { return int(p.Status.AvailableReplicas) } +func (p *Prometheus) GetUpdatedReplicas() int { return int(p.Status.UpdatedReplicas) } +func (p *Prometheus) GetConditions() []Condition { return p.Status.Conditions } + // +kubebuilder:validation:Enum=OnResource;OnShard type AdditionalLabelSelectors string @@ -433,6 +439,11 @@ type CommonPrometheusFields struct { //nolint:kubeapilinter NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // schedulerName defines the scheduler to use for Pod scheduling. If not specified, the default scheduler is used. + // +optional + // +kubebuilder:validation:MinLength=1 + SchedulerName string `json:"schedulerName,omitempty"` + // serviceAccountName is the name of the ServiceAccount to use to run the // Prometheus Pods. // +optional @@ -538,13 +549,14 @@ type CommonPrometheusFields struct { // * `config-reloader` // * `thanos-sidecar` // - // Overriding containers is entirely outside the scope of what the - // maintainers will support and by doing so, you accept that this behaviour - // may break at any time without notice. + // Overriding containers which are managed by the operator require careful + // testing, especially when upgrading to a new version of the operator. + // // +optional Containers []v1.Container `json:"containers,omitempty"` + // initContainers allows injecting initContainers to the Pod definition. Those - // can be used to e.g. fetch secrets for injection into the Prometheus + // can be used to e.g. fetch secrets for injection into the Prometheus // configuration from external sources. Any errors during the execution of // an initContainer will lead to a restart of the Pod. More info: // https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ @@ -555,9 +567,10 @@ type CommonPrometheusFields struct { // The names of init container name managed by the operator are: // * `init-config-reloader`. // - // Overriding init containers is entirely outside the scope of what the - // maintainers will support and by doing so, you accept that this behaviour - // may break at any time without notice. + // Overriding init containers which are managed by the operator require + // careful testing, especially when upgrading to a new version of the + // operator. + // // +optional InitContainers []v1.Container `json:"initContainers,omitempty"` @@ -1013,6 +1026,18 @@ type CommonPrometheusFields struct { HostUsers *bool `json:"hostUsers,omitempty"` // nolint:kubeapilinter } +func (cpf CommonPrometheusFields) ExpectedReplicas() int { + replicas := 1 + if cpf.Replicas != nil { + replicas = int(*cpf.Replicas) + } + shards := 1 + if cpf.Shards != nil { + shards = int(*cpf.Shards) + } + return replicas * shards +} + // Specifies the validation scheme for metric and label names. // // Supported values are: @@ -1554,7 +1579,7 @@ type ThanosSpec struct { // grpcServerTlsConfig defines the TLS parameters for the gRPC server providing the StoreAPI. // - // Note: Currently only the `caFile`, `certFile`, and `keyFile` fields are supported. + // Note: Currently only the `minVersion`, `caFile`, `certFile`, and `keyFile` fields are supported. // // +optional GRPCServerTLSConfig *TLSConfig `json:"grpcServerTlsConfig,omitempty"` @@ -1620,9 +1645,10 @@ type ThanosSpec struct { // +k8s:openapi-gen=true type RemoteWriteSpec struct { // url defines the URL of the endpoint to send samples to. - // +kubebuilder:validation:MinLength=1 + // + // It must use the HTTP or HTTPS scheme. // +required - URL string `json:"url"` + URL URL `json:"url"` // name of the remote write queue, it must be unique if specified. The // name is used in metrics and logging in order to differentiate queues. diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go index 49dd72f0bd..a78f5a3d18 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go @@ -122,6 +122,11 @@ type ThanosRulerSpec struct { //nolint:kubeapilinter NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // schedulerName defines the scheduler to use for Pod scheduling. If not specified, the default scheduler is used. + // +optional + // +kubebuilder:validation:MinLength=1 + SchedulerName string `json:"schedulerName,omitempty"` + // resources defines the resource requirements for single Pods. // If not provided, no requests/limits will be set // +optional @@ -366,22 +371,29 @@ type ThanosRulerSpec struct { // +optional Retention Duration `json:"retention,omitempty"` - // containers allows injecting additional containers or modifying operator generated - // containers. This can be used to allow adding an authentication proxy to a ThanosRuler pod or - // to change the behavior of an operator generated container. Containers described here modify - // an operator generated container if they share the same name and modifications are done via a - // strategic merge patch. The current container names are: `thanos-ruler` and `config-reloader`. - // Overriding containers is entirely outside the scope of what the maintainers will support and by doing - // so, you accept that this behaviour may break at any time without notice. + // containers allows injecting additional containers or modifying operator + // generated containers. This can be used to allow adding an authentication + // proxy to the Pods or to change the behavior of an operator generated + // container. Containers described here modify an operator generated + // container if they share the same name and modifications are done via a + // strategic merge patch. + // + // The names of containers managed by the operator are: + // * `thanos-ruler` + // * `config-reloader` + // + // Overriding containers which are managed by the operator require careful + // testing, especially when upgrading to a new version of the operator. + // // +optional Containers []v1.Container `json:"containers,omitempty"` - // initContainers allows adding initContainers to the pod definition. Those can be used to e.g. - // fetch secrets for injection into the ThanosRuler configuration from external sources. Any - // errors during the execution of an initContainer will lead to a restart of the Pod. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - // Using initContainers for any use case other then secret fetching is entirely outside the scope - // of what the maintainers will support and by doing so, you accept that this behaviour may break - // at any time without notice. + + // initContainers allows injecting initContainers to the Pod definition. + // Those can be used to e.g. fetch secrets for injection into the + // configuration from external sources. Any errors during the execution of + // an initContainer will lead to a restart of the Pod. More info: + // https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // // +optional InitContainers []v1.Container `json:"initContainers,omitempty"` @@ -440,8 +452,9 @@ type ThanosRulerSpec struct { // grpcServerTlsConfig defines the gRPC server from which Thanos Querier reads // recorded rule data. - // Note: Currently only the CAFile, CertFile, and KeyFile fields are supported. - // Maps to the '--grpc-server-tls-*' CLI args. + // + // Note: Currently only the `minVersion`, `caFile`, `certFile`, and `keyFile` fields are supported. + // // +optional GRPCServerTLSConfig *TLSConfig `json:"grpcServerTlsConfig,omitempty"` @@ -595,6 +608,10 @@ func (tr *ThanosRuler) ExpectedReplicas() int { return int(*tr.Spec.Replicas) } +func (tr *ThanosRuler) GetAvailableReplicas() int { return int(tr.Status.AvailableReplicas) } +func (tr *ThanosRuler) GetUpdatedReplicas() int { return int(tr.Status.UpdatedReplicas) } +func (tr *ThanosRuler) GetConditions() []Condition { return tr.Status.Conditions } + func (tr *ThanosRuler) SetReplicas(i int) { tr.Status.Replicas = int32(i) } func (tr *ThanosRuler) SetUpdatedReplicas(i int) { tr.Status.UpdatedReplicas = int32(i) } func (tr *ThanosRuler) SetAvailableReplicas(i int) { tr.Status.AvailableReplicas = int32(i) } diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go index 2f1cd1f1e6..36db7f95d3 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go @@ -34,6 +34,10 @@ const ( Version = "v1" ) +// URL represents a valid URL +// +kubebuilder:validation:Pattern:="^(http|https)://.+$" +type URL string + // ByteSize is a valid memory size type based on powers-of-2, so 1KB is 1024B. // Supported units: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB, EB, EiB Ex: `512MB`. // +kubebuilder:validation:Pattern:="(^0|([0-9]*[.])?[0-9]+((K|M|G|T|E|P)i?)?B)$" @@ -49,12 +53,6 @@ func (bs *ByteSize) IsEmpty() bool { // +kubebuilder:validation:Pattern:="^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$" type Duration string -// DurationPointer is a helper function to parse a Duration string into a *Duration. -func DurationPointer(s string) *Duration { - d := Duration(s) - return &d -} - // NonEmptyDuration is a valid time duration that can be parsed by Prometheus model.ParseDuration() function. // Compared to Duration, NonEmptyDuration enforces a minimum length of 1. // Supported units: y, w, d, h, m, s, ms diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go index 73314958dc..fec9816d4a 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go @@ -1576,6 +1576,11 @@ func (in *GlobalSMTPConfig) DeepCopyInto(out *GlobalSMTPConfig) { *out = new(SafeTLSConfig) (*in).DeepCopyInto(*out) } + if in.ForceImplicitTLS != nil { + in, out := &in.ForceImplicitTLS, &out.ForceImplicitTLS + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalSMTPConfig. diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 23ecd4505b..eac920ba80 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -34,6 +34,14 @@ linters: capital: true misspell: locale: US + revive: + rules: + - name: var-naming + # TODO(SuperQ): See: https://github.com/prometheus/prometheus/issues/17766 + arguments: + - [] + - [] + - - skip-package-name-checks: true exclusions: presets: - comments diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 6f61bec48f..cce3ef1d16 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -1,4 +1,4 @@ -# Copyright 2018 The Prometheus Authors +# Copyright The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -55,13 +55,13 @@ ifneq ($(shell command -v gotestsum 2> /dev/null),) endif endif -PROMU_VERSION ?= 0.17.0 +PROMU_VERSION ?= 0.18.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v2.1.5 +GOLANGCI_LINT_VERSION ?= v2.10.1 GOLANGCI_FMT_OPTS ?= # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. @@ -82,11 +82,50 @@ endif PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) -DOCKERFILE_PATH ?= ./Dockerfile DOCKERBUILD_CONTEXT ?= ./ DOCKER_REPO ?= prom +# Check if deprecated DOCKERFILE_PATH is set +ifdef DOCKERFILE_PATH +$(error DOCKERFILE_PATH is deprecated. Use DOCKERFILE_VARIANTS ?= $(DOCKERFILE_PATH) in the Makefile) +endif + DOCKER_ARCHS ?= amd64 +DOCKERFILE_VARIANTS ?= Dockerfile $(wildcard Dockerfile.*) + +# Function to extract variant from Dockerfile label. +# Returns the variant name from io.prometheus.image.variant label, or "default" if not found. +define dockerfile_variant +$(strip $(or $(shell sed -n 's/.*io\.prometheus\.image\.variant="\([^"]*\)".*/\1/p' $(1)),default)) +endef + +# Check for duplicate variant names (including default for Dockerfiles without labels). +DOCKERFILE_VARIANT_NAMES := $(foreach df,$(DOCKERFILE_VARIANTS),$(call dockerfile_variant,$(df))) +DOCKERFILE_VARIANT_NAMES_SORTED := $(sort $(DOCKERFILE_VARIANT_NAMES)) +ifneq ($(words $(DOCKERFILE_VARIANT_NAMES)),$(words $(DOCKERFILE_VARIANT_NAMES_SORTED))) +$(error Duplicate variant names found. Each Dockerfile must have a unique io.prometheus.image.variant label, and only one can be without a label (default)) +endif + +# Build variant:dockerfile pairs for shell iteration. +DOCKERFILE_VARIANTS_WITH_NAMES := $(foreach df,$(DOCKERFILE_VARIANTS),$(call dockerfile_variant,$(df)):$(df)) + +# Shell helper to check whether a dockerfile/arch pair is excluded. +define dockerfile_arch_is_excluded +case " $(DOCKERFILE_ARCH_EXCLUSIONS) " in \ + *" $$dockerfile:$(1) "*) true ;; \ + *) false ;; \ +esac +endef + +# Shell helper to check whether a registry/arch pair is excluded. +# Extracts registry from DOCKER_REPO (e.g., quay.io/prometheus -> quay.io) +define registry_arch_is_excluded +registry=$$(echo "$(DOCKER_REPO)" | cut -d'/' -f1); \ +case " $(DOCKER_REGISTRY_ARCH_EXCLUSIONS) " in \ + *" $$registry:$(1) "*) true ;; \ + *) false ;; \ +esac +endef BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) @@ -112,7 +151,7 @@ common-all: precheck style check_license lint yamllint unused build test .PHONY: common-style common-style: @echo ">> checking code style" - @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ + @fmtRes=$$($(GOFMT) -d $$(git ls-files '*.go' ':!:vendor/*' || find . -path ./vendor -prune -o -name '*.go' -print)); \ if [ -n "$${fmtRes}" ]; then \ echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ echo "Please ensure you are using $$($(GO) version) for formatting code."; \ @@ -122,13 +161,19 @@ common-style: .PHONY: common-check_license common-check_license: @echo ">> checking license header" - @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ + @licRes=$$(for file in $$(git ls-files '*.go' ':!:vendor/*' || find . -path ./vendor -prune -o -type f -iname '*.go' -print) ; do \ awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ done); \ if [ -n "$${licRes}" ]; then \ echo "license header checking failed:"; echo "$${licRes}"; \ exit 1; \ fi + @echo ">> checking for copyright years 2026 or later" + @futureYearRes=$$(git grep -E 'Copyright (202[6-9]|20[3-9][0-9])' -- '*.go' ':!:vendor/*' || true); \ + if [ -n "$${futureYearRes}" ]; then \ + echo "Files with copyright year 2026 or later found (should use 'Copyright The Prometheus Authors'):"; echo "$${futureYearRes}"; \ + exit 1; \ + fi .PHONY: common-deps common-deps: @@ -220,28 +265,194 @@ common-docker-repo-name: .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ - -f $(DOCKERFILE_PATH) \ - --build-arg ARCH="$*" \ - --build-arg OS="linux" \ - $(DOCKERBUILD_CONTEXT) + @for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \ + dockerfile=$${variant#*:}; \ + variant_name=$${variant%%:*}; \ + if $(call dockerfile_arch_is_excluded,$*); then \ + echo "Skipping $$variant_name variant for linux-$* (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + distroless_arch="$*"; \ + if [ "$*" = "armv7" ]; then \ + distroless_arch="arm"; \ + fi; \ + if [ "$$dockerfile" = "Dockerfile" ]; then \ + echo "Building default variant ($$variant_name) for linux-$* using $$dockerfile"; \ + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ + -f $$dockerfile \ + --build-arg ARCH="$*" \ + --build-arg OS="linux" \ + --build-arg DISTROLESS_ARCH="$$distroless_arch" \ + $(DOCKERBUILD_CONTEXT); \ + if [ "$$variant_name" != "default" ]; then \ + echo "Tagging default variant with $$variant_name suffix"; \ + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ + "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \ + fi; \ + else \ + echo "Building $$variant_name variant for linux-$* using $$dockerfile"; \ + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" \ + -f $$dockerfile \ + --build-arg ARCH="$*" \ + --build-arg OS="linux" \ + --build-arg DISTROLESS_ARCH="$$distroless_arch" \ + $(DOCKERBUILD_CONTEXT); \ + fi; \ + done .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" + @for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \ + dockerfile=$${variant#*:}; \ + variant_name=$${variant%%:*}; \ + if $(call dockerfile_arch_is_excluded,$*); then \ + echo "Skipping push for $$variant_name variant on linux-$* (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + if $(call registry_arch_is_excluded,$*); then \ + echo "Skipping push for $$variant_name variant on linux-$* to $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \ + echo "Pushing $$variant_name variant for linux-$*"; \ + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \ + fi; \ + if [ "$$dockerfile" = "Dockerfile" ]; then \ + echo "Pushing default variant ($$variant_name) for linux-$*"; \ + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"; \ + fi; \ + if [ "$(DOCKER_IMAGE_TAG)" = "latest" ]; then \ + if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \ + echo "Pushing $$variant_name variant version tags for linux-$*"; \ + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name"; \ + fi; \ + if [ "$$dockerfile" = "Dockerfile" ]; then \ + echo "Pushing default variant version tag for linux-$*"; \ + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"; \ + fi; \ + fi; \ + done DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" + @for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \ + dockerfile=$${variant#*:}; \ + variant_name=$${variant%%:*}; \ + if $(call dockerfile_arch_is_excluded,$*); then \ + echo "Skipping tag for $$variant_name variant on linux-$* (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + if $(call registry_arch_is_excluded,$*); then \ + echo "Skipping tag for $$variant_name variant on linux-$* for $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \ + echo "Tagging $$variant_name variant for linux-$* as latest"; \ + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest-$$variant_name"; \ + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name"; \ + fi; \ + if [ "$$dockerfile" = "Dockerfile" ]; then \ + echo "Tagging default variant ($$variant_name) for linux-$* as latest"; \ + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"; \ + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"; \ + fi; \ + done .PHONY: common-docker-manifest common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" + @for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \ + dockerfile=$${variant#*:}; \ + variant_name=$${variant%%:*}; \ + if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \ + echo "Creating manifest for $$variant_name variant"; \ + refs=""; \ + for arch in $(DOCKER_ARCHS); do \ + if $(call dockerfile_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for $$variant_name (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + if $(call registry_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for $$variant_name on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + refs="$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \ + done; \ + if [ -z "$$refs" ]; then \ + echo "Skipping manifest for $$variant_name variant (no supported architectures)"; \ + continue; \ + fi; \ + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" $$refs; \ + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \ + fi; \ + if [ "$$dockerfile" = "Dockerfile" ]; then \ + echo "Creating default variant ($$variant_name) manifest"; \ + refs=""; \ + for arch in $(DOCKER_ARCHS); do \ + if $(call dockerfile_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for default variant (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + if $(call registry_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for default variant on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + refs="$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:$(SANITIZED_DOCKER_IMAGE_TAG)"; \ + done; \ + if [ -z "$$refs" ]; then \ + echo "Skipping default variant manifest (no supported architectures)"; \ + continue; \ + fi; \ + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $$refs; \ + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"; \ + fi; \ + if [ "$(DOCKER_IMAGE_TAG)" = "latest" ]; then \ + if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \ + echo "Creating manifest for $$variant_name variant version tag"; \ + refs=""; \ + for arch in $(DOCKER_ARCHS); do \ + if $(call dockerfile_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for $$variant_name version tag (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + if $(call registry_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for $$variant_name version tag on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + refs="$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name"; \ + done; \ + if [ -z "$$refs" ]; then \ + echo "Skipping version-tag manifest for $$variant_name variant (no supported architectures)"; \ + continue; \ + fi; \ + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name" $$refs; \ + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name"; \ + fi; \ + if [ "$$dockerfile" = "Dockerfile" ]; then \ + echo "Creating default variant version tag manifest"; \ + refs=""; \ + for arch in $(DOCKER_ARCHS); do \ + if $(call dockerfile_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for default variant version tag (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + if $(call registry_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for default variant version tag on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + refs="$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:v$(DOCKER_MAJOR_VERSION_TAG)"; \ + done; \ + if [ -z "$$refs" ]; then \ + echo "Skipping default variant version-tag manifest (no supported architectures)"; \ + continue; \ + fi; \ + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)" $$refs; \ + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)"; \ + fi; \ + fi; \ + done .PHONY: promu promu: $(PROMU) @@ -266,6 +477,10 @@ $(GOLANGCI_LINT): | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif +.PHONY: common-print-golangci-lint-version +common-print-golangci-lint-version: + @echo $(GOLANGCI_LINT_VERSION) + .PHONY: precheck precheck:: diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index 5fe6cecd3d..4b23d8d6b5 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -12,7 +12,6 @@ // limitations under the License. //go:build linux -// +build linux package procfs @@ -502,7 +501,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { return cpuinfo, nil } -func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode +func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { //nolint:unused return nil, errors.New("not implemented") } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go index 8f155551e5..b09035ff38 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -12,8 +12,6 @@ // limitations under the License. //go:build linux && (arm || arm64) -// +build linux -// +build arm arm64 package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go index e81a5db949..7bb20211f9 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go @@ -12,7 +12,6 @@ // limitations under the License. //go:build linux -// +build linux package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go index 4be2b1cc54..fd75d0f79d 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -12,8 +12,6 @@ // limitations under the License. //go:build linux && (mips || mipsle || mips64 || mips64le) -// +build linux -// +build mips mipsle mips64 mips64le package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go index e713bae8df..3d36ba0e6b 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -12,7 +12,6 @@ // limitations under the License. //go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x -// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go index 0825aa1a83..b3425051ef 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -12,8 +12,6 @@ // limitations under the License. //go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go index 496770b05f..72598230c3 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go @@ -12,8 +12,6 @@ // limitations under the License. //go:build linux && (riscv || riscv64) -// +build linux -// +build riscv riscv64 package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go index b3228ce3d8..50a8239cbc 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -12,7 +12,6 @@ // limitations under the License. //go:build linux -// +build linux package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go index 575eb022eb..00edb30a5c 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go @@ -12,8 +12,6 @@ // limitations under the License. //go:build linux && (386 || amd64) -// +build linux -// +build 386 amd64 package procfs diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 3c53023c54..0bef25bdd9 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -12,7 +12,6 @@ // limitations under the License. //go:build !freebsd && !linux -// +build !freebsd,!linux package procfs diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go index 80fce48478..d183330390 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -12,7 +12,6 @@ // limitations under the License. //go:build freebsd || linux -// +build freebsd linux package procfs diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index 8318d8dfd5..f6a4a4de62 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -12,8 +12,6 @@ // limitations under the License. //go:build (linux || darwin) && !appengine -// +build linux darwin -// +build !appengine package util diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go index 15bb096ee2..c80e082cb9 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -12,7 +12,6 @@ // limitations under the License. //go:build (linux && appengine) || (!linux && !darwin) -// +build linux,appengine !linux,!darwin package util diff --git a/vendor/github.com/prometheus/procfs/kernel_hung.go b/vendor/github.com/prometheus/procfs/kernel_hung.go index 539c111514..0c7a69f99f 100644 --- a/vendor/github.com/prometheus/procfs/kernel_hung.go +++ b/vendor/github.com/prometheus/procfs/kernel_hung.go @@ -12,7 +12,6 @@ // limitations under the License. //go:build !windows -// +build !windows package procfs diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go index b66565a104..e7c5b8cf2b 100644 --- a/vendor/github.com/prometheus/procfs/kernel_random.go +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -12,7 +12,6 @@ // limitations under the License. //go:build !windows -// +build !windows package procfs diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go index 610ea78e56..2c7f9bc7c3 100644 --- a/vendor/github.com/prometheus/procfs/net_tcp.go +++ b/vendor/github.com/prometheus/procfs/net_tcp.go @@ -25,6 +25,7 @@ type ( // NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp. +// // Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCP() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp")) @@ -32,6 +33,7 @@ func (fs FS) NetTCP() (NetTCP, error) { // NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp6. +// // Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp6")) @@ -39,6 +41,7 @@ func (fs FS) NetTCP6() (NetTCP, error) { // NetTCPSummary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp. +// // Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp")) @@ -46,6 +49,7 @@ func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { // NetTCP6Summary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp6. +// // Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp6")) diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go index b942c50723..643b500d5d 100644 --- a/vendor/github.com/prometheus/procfs/proc_interrupts.go +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -42,7 +42,7 @@ type Interrupts map[string]Interrupt // Interrupts creates a new instance from a given Proc instance. func (p Proc) Interrupts() (Interrupts, error) { - data, err := util.ReadFileNoStat(p.path("interrupts")) + data, err := util.ReadFileNoStat(p.fs.proc.Path("interrupts")) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index cc519f92f9..08b89a6eb9 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -12,8 +12,6 @@ // limitations under the License. //go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris -// +build !js package procfs diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index 3e48afd1d3..f637309b3d 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -12,7 +12,6 @@ // limitations under the License. //go:build !windows -// +build !windows package procfs diff --git a/vendor/github.com/prometheus/procfs/proc_statm.go b/vendor/github.com/prometheus/procfs/proc_statm.go index b0a9360167..6bcc97ec9c 100644 --- a/vendor/github.com/prometheus/procfs/proc_statm.go +++ b/vendor/github.com/prometheus/procfs/proc_statm.go @@ -45,6 +45,7 @@ type ProcStatm struct { } // NewStatm returns the current status information of the process. +// // Deprecated: Use p.Statm() instead. func (p Proc) NewStatm() (ProcStatm, error) { return p.Statm() diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 1ed2bced41..12d65581c8 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -83,6 +83,19 @@ type ProcStatus struct { // CpusAllowedList: List of cpu cores processes are allowed to run on. CpusAllowedList []uint64 + + // CapInh is the bitmap of inheritable capabilities + // + // See: https://www.kernel.org/doc/man-pages/online/pages/man7/capabilities.7.html + CapInh uint64 + // CapPrm is the bitmap of permitted capabilities + CapPrm uint64 + // CapEff is the bitmap of effective capabilities + CapEff uint64 + // CapBnd is the bitmap of bounding capabilities + CapBnd uint64 + // CapAmb is the bitmap of ambient capabilities + CapAmb uint64 } // NewStatus returns the current status information of the process. @@ -190,6 +203,36 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.NonVoluntaryCtxtSwitches = vUint case "Cpus_allowed_list": s.CpusAllowedList = calcCpusAllowedList(vString) + case "CapInh": + var err error + s.CapInh, err = strconv.ParseUint(vString, 16, 64) + if err != nil { + return err + } + case "CapPrm": + var err error + s.CapPrm, err = strconv.ParseUint(vString, 16, 64) + if err != nil { + return err + } + case "CapEff": + var err error + s.CapEff, err = strconv.ParseUint(vString, 16, 64) + if err != nil { + return err + } + case "CapBnd": + var err error + s.CapBnd, err = strconv.ParseUint(vString, 16, 64) + if err != nil { + return err + } + case "CapAmb": + var err error + s.CapAmb, err = strconv.ParseUint(vString, 16, 64) + if err != nil { + return err + } } return nil diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index 2a8d763909..52180c03e2 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -12,7 +12,6 @@ // limitations under the License. //go:build !windows -// +build !windows package procfs diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index 806e171147..63d1898bc8 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -12,7 +12,6 @@ // limitations under the License. //go:build !windows -// +build !windows package procfs diff --git a/vendor/github.com/protocolbuffers/txtpbfmt/ast/ast.go b/vendor/github.com/protocolbuffers/txtpbfmt/ast/ast.go index 1d285fb5d2..6a7abeafdf 100644 --- a/vendor/github.com/protocolbuffers/txtpbfmt/ast/ast.go +++ b/vendor/github.com/protocolbuffers/txtpbfmt/ast/ast.go @@ -77,6 +77,8 @@ type Node struct { // Used when we want to break between the field name and values when a // single-line node exceeds the requested wrap column. PutSingleValueOnNextLine bool + // Field number from proto definition (0 if unknown/not applicable). + FieldNumber int32 } // NodeLess is a sorting function that compares two *Nodes, possibly using the parent Node @@ -183,21 +185,24 @@ func getFieldValueForByFieldValue(n *Node) *Value { return n.Values[0] } -// ByFieldValue is a NodeLess function that orders adjacent scalar nodes with the same name by -// their scalar value. -func ByFieldValue(_, ni, nj *Node, isWholeSlice bool) bool { - if isWholeSlice { - return false - } - vi := getFieldValueForByFieldValue(ni) - vj := getFieldValueForByFieldValue(nj) - if vi == nil { - return vj != nil - } - if vj == nil { - return false +// ByFieldValue returns a NodeLess function that orders adjacent scalar nodes +// with the same name by their scalar value. The values are passed through +// `projection` before sorting. +func ByFieldValue(projection func(string) string) NodeLess { + return func(_, ni, nj *Node, isWholeSlice bool) bool { + if isWholeSlice { + return false + } + vi := getFieldValueForByFieldValue(ni) + vj := getFieldValueForByFieldValue(nj) + if vi == nil { + return vj != nil + } + if vj == nil { + return false + } + return projection(vi.Value) < projection(vj.Value) } - return vi.Value < vj.Value } func getChildValueByFieldSubfield(field, subfield string, n *Node) *Value { @@ -245,8 +250,9 @@ func ByFieldSubfield(field, subfield string) NodeLess { // ByFieldSubfieldPath returns a NodeLess function that orders adjacent message nodes with the given // field name by the given subfield path value. If no field name is provided, it compares the -// subfields of any adjacent nodes with matching names. -func ByFieldSubfieldPath(field string, subfieldPath []string) NodeLess { +// subfields of any adjacent nodes with matching names. Values are passed +// through `projection` before sorting. +func ByFieldSubfieldPath(field string, subfieldPath []string, projection func(string) string) NodeLess { return func(_, ni, nj *Node, isWholeSlice bool) bool { if isWholeSlice { return false @@ -259,8 +265,49 @@ func ByFieldSubfieldPath(field string, subfieldPath []string) NodeLess { if vj == nil { return false } - return vi.Value < vj.Value + return projection(vi.Value) < projection(vj.Value) + } +} + +// ByFieldNumber is a NodeLess function that orders fields by their field numbers. +// Field numbers are populated during parsing from descriptor information. +func ByFieldNumber(_, ni, nj *Node, isWholeSlice bool) bool { + if !isWholeSlice { + return false + } + + numI, numJ := ni.FieldNumber, nj.FieldNumber + + // If both have field numbers, sort by field number + if numI > 0 && numJ > 0 { + return numI < numJ + } + + // If only one has field number, prioritize it + if numI > 0 && numJ == 0 { + return true // ni has priority + } + if numI == 0 && numJ > 0 { + return false // nj has priority } + + // If neither has field number, fall back to alphabetical order + return ni.Name < nj.Name +} + +// Formatter is a function that can format nodes in the AST. +type Formatter func([]*Node) error + +var extraFormatters []Formatter + +// RegisterFormatter registers an extra formatter that will be called after parsing. +func RegisterFormatter(f Formatter) { + extraFormatters = append(extraFormatters, f) +} + +// GetFormatters returns all registered formatters. +func GetFormatters() []Formatter { + return extraFormatters } // getChildValue returns the Value of the child with the given field name, diff --git a/vendor/github.com/protocolbuffers/txtpbfmt/config/config.go b/vendor/github.com/protocolbuffers/txtpbfmt/config/config.go index 429093cee0..c2b78952a3 100644 --- a/vendor/github.com/protocolbuffers/txtpbfmt/config/config.go +++ b/vendor/github.com/protocolbuffers/txtpbfmt/config/config.go @@ -24,6 +24,15 @@ type Config struct { // Sort fields by field name. SortFieldsByFieldName bool + // Sort fields by field number from proto definition. + SortFieldsByFieldNumber bool + + // Path to protobuf descriptor file (.desc). + ProtoDescriptor string + + // Full message type name for field number lookup (required, e.g. google.protobuf.Any). + MessageFullName string + // Sort adjacent scalar fields of the same field name by their contents. SortRepeatedFieldsByContent bool @@ -35,6 +44,11 @@ type Config struct { // Sort the Sort* fields by descending order instead of ascending order. ReverseSort bool + // Sort content fields in a way that's suitable for DNS names. It splits the + // value around '.' characters, reverses the substrings, and concatenates to + // generate the sort key. + DNSSortOrder bool + // Map from Node.Name to the order of all fields within that node. See AddFieldSortOrder(). FieldSortOrder map[string][]string @@ -73,6 +87,11 @@ type Config struct { // Use single quotes around strings that contain double but not single quotes. SmartQuotes bool + // Use a short representation for repeated primitive fields (`x: 1 x: 2` vs `x: [1, 2]`). If this + // field is true, all repeated primitive fields will use the short representation; otherwise, the + // latter will be used only if it's being used in the input textproto. + UseShortRepeatedPrimitiveFields bool + // Logger enables logging when it is non-nil. // If the log messages aren't going to be useful, it's best to leave Logger // set to nil, as otherwise log messages will be constructed. diff --git a/vendor/github.com/protocolbuffers/txtpbfmt/descriptor/descriptor.go b/vendor/github.com/protocolbuffers/txtpbfmt/descriptor/descriptor.go new file mode 100644 index 0000000000..f763d8f0e3 --- /dev/null +++ b/vendor/github.com/protocolbuffers/txtpbfmt/descriptor/descriptor.go @@ -0,0 +1,83 @@ +// Package descriptor provides functionality to load and parse Protocol Buffer descriptor files. +package descriptor + +import ( + "fmt" + "os" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// Loader provides functionality to load field numbers from descriptor files. +type Loader struct { + descriptorFile string + files *protoregistry.Files +} + +// NewLoader creates a new descriptor loader for the given descriptor file. +func NewLoader(descriptorFile string) (*Loader, error) { + if descriptorFile == "" { + return nil, fmt.Errorf("descriptor file is required") + } + + data, err := os.ReadFile(descriptorFile) + if err != nil { + return nil, fmt.Errorf("failed to read descriptor file %s: %v", descriptorFile, err) + } + + fileDescSet := &descriptorpb.FileDescriptorSet{} + if err := proto.Unmarshal(data, fileDescSet); err != nil { + return nil, fmt.Errorf("failed to unmarshal descriptor file %s: %v", descriptorFile, err) + } + + files, err := protodesc.NewFiles(fileDescSet) + if err != nil { + return nil, fmt.Errorf("failed to create files from descriptor file %s: %v", descriptorFile, err) + } + + return &Loader{ + descriptorFile: descriptorFile, + files: files, + }, nil +} + +// GetRootMessageDescriptor returns the root message descriptor for the specified messageFullName. +// messageFullName is required and must be a valid full name (e.g., "google.protobuf.Any"). +func (l *Loader) GetRootMessageDescriptor(messageFullName string) (protoreflect.MessageDescriptor, error) { + if l.files == nil { + return nil, fmt.Errorf("descriptor not loaded, call NewLoader() first") + } + + if messageFullName == "" { + // Collect available messages to help user + var availableMessages []string + l.files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { + messages := fd.Messages() + for i := 0; i < messages.Len(); i++ { + msg := messages.Get(i) + availableMessages = append(availableMessages, string(msg.FullName())) + } + return true + }) + + if len(availableMessages) == 0 { + return nil, fmt.Errorf("No messages found in descriptor") + } + return nil, fmt.Errorf("message_full_name is required. Available messages: %v", availableMessages) + } + + // Find specific message type + desc, err := l.files.FindDescriptorByName(protoreflect.FullName(messageFullName)) + if err != nil { + return nil, fmt.Errorf("message type %s not found: %v", messageFullName, err) + } + if msgDesc, ok := desc.(protoreflect.MessageDescriptor); ok { + return msgDesc, nil + } + return nil, fmt.Errorf("%s is not a message type", messageFullName) +} diff --git a/vendor/github.com/protocolbuffers/txtpbfmt/impl/impl.go b/vendor/github.com/protocolbuffers/txtpbfmt/impl/impl.go index 18a43c64d5..22422c9a46 100644 --- a/vendor/github.com/protocolbuffers/txtpbfmt/impl/impl.go +++ b/vendor/github.com/protocolbuffers/txtpbfmt/impl/impl.go @@ -9,8 +9,10 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/reflect/protoreflect" "github.com/protocolbuffers/txtpbfmt/ast" "github.com/protocolbuffers/txtpbfmt/config" + "github.com/protocolbuffers/txtpbfmt/descriptor" "github.com/protocolbuffers/txtpbfmt/quote" "github.com/protocolbuffers/txtpbfmt/sort" "github.com/protocolbuffers/txtpbfmt/wrap" @@ -52,25 +54,30 @@ func (s *bracketState) processChar(c byte, i int, in []byte, allowTripleQuotedSt if s.insideComment { return } - delim := string(c) - tripleQuoted := false - if allowTripleQuotedStrings && i+3 <= len(in) { - triple := string(in[i : i+3]) - if triple == `"""` || triple == `'''` { - delim = triple - tripleQuoted = true - } + s.handleQuotes(c, i, in, allowTripleQuotedStrings) + } +} + +func (s *bracketState) handleQuotes(c byte, i int, in []byte, allowTripleQuotedStrings bool) { + delim := string(c) + tripleQuoted := false + if allowTripleQuotedStrings && i+3 <= len(in) { + triple := string(in[i : i+3]) + if triple == `"""` || triple == `'''` { + delim = triple + tripleQuoted = true } - if s.insideString { - if s.stringDelimiter == delim && (s.insideTripleQuotedString || !s.isEscapedChar) { - s.insideString = false - s.insideTripleQuotedString = false - } - } else { - s.insideString = true - s.insideTripleQuotedString = tripleQuoted - s.stringDelimiter = delim + } + + if s.insideString { + if s.stringDelimiter == delim && (s.insideTripleQuotedString || !s.isEscapedChar) { + s.insideString = false + s.insideTripleQuotedString = false } + } else { + s.insideString = true + s.insideTripleQuotedString = tripleQuoted + s.stringDelimiter = delim } } @@ -148,19 +155,44 @@ func ParseWithMetaCommentConfig(in []byte, c config.Config) ([]*ast.Node, error) if err != nil { return nil, err } + + // Load descriptor if field number sorting is enabled + var rootDesc protoreflect.MessageDescriptor + if c.SortFieldsByFieldNumber { + if c.ProtoDescriptor == "" { + return nil, fmt.Errorf("proto_descriptor is required when using sort_fields_by_field_number") + } + + loader, err := descriptor.NewLoader(c.ProtoDescriptor) + if err != nil { + return nil, fmt.Errorf("failed to create descriptor loader: %v", err) + } + + // Get root message descriptor + rootDesc, err = loader.GetRootMessageDescriptor(c.MessageFullName) + if err != nil { + return nil, fmt.Errorf("failed to get root message descriptor: %v", err) + } + } + if p.config.InfoLevel() { p.config.Infof("p.in: %q", string(p.in)) p.config.Infof("p.length: %v", p.length) } // Although unnamed nodes aren't strictly allowed, some formats represent a // list of protos as a list of unnamed top-level nodes. - nodes, _, err := p.parse( /*isRoot=*/ true) + nodes, _, err := p.parse( /*isRoot=*/ true, rootDesc) if err != nil { return nil, err } if p.index < p.length { return nil, fmt.Errorf("parser didn't consume all input. Stopped at %s", p.errorContext()) } + for _, f := range ast.GetFormatters() { + if err := f(nodes); err != nil { + return nil, err + } + } if err := wrap.Strings(nodes, 0, c); err != nil { return nil, err } @@ -174,7 +206,7 @@ func ParseWithMetaCommentConfig(in []byte, c config.Config) ([]*ast.Node, error) // have the equal sign. Currently there are only two MetaComments that are in the former format: // // "sort_repeated_fields_by_subfield": If this appears multiple times, then they will all be added -// to the config and the order is perserved. +// to the config and the order is preserved. // "wrap_strings_at_column": The is expected to be an integer. If it is not, then it will be // ignored. If this appears multiple times, only the last one saved. func addToConfig(metaComment string, c *config.Config) error { @@ -209,6 +241,8 @@ func addToConfig(metaComment string, c *config.Config) error { c.SortRepeatedFieldsBySubfield = append(c.SortRepeatedFieldsBySubfield, val) case "reverse_sort": c.ReverseSort = true + case "dns_sort_order": + c.DNSSortOrder = true case "wrap_strings_at_column": // If multiple of this MetaComment exists in the file, take the last one. if !hasEqualSign { @@ -225,6 +259,8 @@ func addToConfig(metaComment string, c *config.Config) error { c.WrapStringsAfterNewlines = true case "wrap_strings_without_wordwrap": c.WrapStringsWithoutWordwrap = true + case "use_short_repeated_primitive_fields": + c.UseShortRepeatedPrimitiveFields = true case "on": // This doesn't change the overall config. case "off": // This doesn't change the overall config. default: @@ -286,6 +322,35 @@ func newParser(in []byte, c config.Config) (*parser, error) { return parser, nil } +// getFieldNumber returns the field number for a given field name in the descriptor. +func getFieldNumber(desc protoreflect.MessageDescriptor, fieldName string) int32 { + if desc == nil { + return 0 + } + + field := desc.Fields().ByTextName(fieldName) + if field == nil { + return 0 + } + return int32(field.Number()) +} + +// findChildDescriptor finds the descriptor for a nested message field. +func (p *parser) findChildDescriptor(desc protoreflect.MessageDescriptor, fieldName string) protoreflect.MessageDescriptor { + if desc == nil { + return nil + } + + field := desc.Fields().ByTextName(fieldName) + if field == nil { + return nil + } + if field.Kind() == protoreflect.MessageKind { + return field.Message() + } + return nil +} + func (p *parser) nextInputIs(b byte) bool { return p.index < p.length && p.in[p.index] == b } @@ -396,7 +461,7 @@ func (p *parser) consumeOptionalSeparator() error { // format (sequence of messages, each of which passes proto.UnmarshalText()). // endPos is the position of the first character on the first line // after parsed nodes: that's the position to append more children. -func (p *parser) parse(isRoot bool) (result []*ast.Node, endPos ast.Position, err error) { +func (p *parser) parse(isRoot bool, desc protoreflect.MessageDescriptor) (result []*ast.Node, endPos ast.Position, err error) { var res []*ast.Node res = []*ast.Node{} // empty children is different from nil children for ld := p.getLoopDetector(); p.index < p.length; { @@ -441,26 +506,8 @@ func (p *parser) parse(isRoot bool) (result []*ast.Node, endPos ast.Position, er comments = append(comments, c...) } - if endPos := p.position(); p.consume('}') || p.consume('>') || p.consume(']') { - // Handle comments after last child. - - if len(comments) > 0 { - res = append(res, &ast.Node{Start: startPos, PreComments: comments}) - } - - // endPos points at the closing brace, but we should rather return the position - // of the first character after the previous item. Therefore let's rewind a bit: - for endPos.Byte > 0 && p.in[endPos.Byte-1] == ' ' { - endPos.Byte-- - endPos.Column-- - } - - if err = p.consumeOptionalSeparator(); err != nil { - return nil, ast.Position{}, err - } - - // Done parsing children. - return res, endPos, nil + if end, endPos, err := p.handleEndOfMessage(startPos, comments, &res); end { + return res, endPos, err } nd := &ast.Node{ @@ -491,10 +538,9 @@ func (p *parser) parse(isRoot bool) (result []*ast.Node, endPos ast.Position, er } // Handle end of file. - if p.index >= p.length { - nd.End = p.position() - if len(nd.PreComments) > 0 { - res = append(res, nd) + if end, err := p.handleEndOfFile(nd, &res); end { + if err != nil { + return nil, ast.Position{}, err } break } @@ -503,37 +549,19 @@ func (p *parser) parse(isRoot bool) (result []*ast.Node, endPos ast.Position, er return nil, ast.Position{}, err } + // Set field number from descriptor if available + nd.FieldNumber = getFieldNumber(desc, nd.Name) + // Skip separator. preCommentsBeforeColon, _ := p.skipWhiteSpaceAndReadComments(true /* multiLine */) nd.SkipColon = !p.consume(':') previousPos := p.position() preCommentsAfterColon, _ := p.skipWhiteSpaceAndReadComments(true /* multiLine */) - if p.consume('{') || p.consume('<') { - if err := p.parseMessage(nd); err != nil { - return nil, ast.Position{}, err - } - } else if p.consume('[') { - if err := p.parseList(nd, preCommentsBeforeColon, preCommentsAfterColon); err != nil { - return nil, ast.Position{}, err - } - if nd.ValuesAsList { - res = append(res, nd) - continue - } - } else { - // Rewind comments. - p.rollbackPosition(previousPos) - // Handle Values. - var err error - nd.Values, err = p.readValues() - if err != nil { - return nil, ast.Position{}, err - } - if err := p.consumeOptionalSeparator(); err != nil { - return nil, ast.Position{}, err - } + if err := p.parseFieldValue(nd, desc, preCommentsBeforeColon, preCommentsAfterColon, previousPos); err != nil { + return nil, ast.Position{}, err } + if p.config.InfoLevel() && p.index < p.length { p.config.Infof("p.in[p.index]: %q", string(p.in[p.index])) } @@ -542,6 +570,70 @@ func (p *parser) parse(isRoot bool) (result []*ast.Node, endPos ast.Position, er return res, p.position(), nil } +func (p *parser) parseFieldValue(nd *ast.Node, desc protoreflect.MessageDescriptor, preCommentsBeforeColon, preCommentsAfterColon []string, previousPos ast.Position) error { + if p.consume('{') || p.consume('<') { + if err := p.parseMessage(nd, desc); err != nil { + return err + } + } else if p.consume('[') { + if err := p.parseList(nd, preCommentsBeforeColon, preCommentsAfterColon); err != nil { + return err + } + if nd.ValuesAsList { + return nil + } + } else { + // Rewind comments. + p.rollbackPosition(previousPos) + // Handle Values. + var err error + nd.Values, err = p.readValues() + if err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + } + return nil +} + +func (p *parser) handleEndOfFile(nd *ast.Node, res *[]*ast.Node) (bool, error) { + if p.index >= p.length { + nd.End = p.position() + if len(nd.PreComments) > 0 { + *res = append(*res, nd) + } + return true, nil + } + return false, nil +} + +func (p *parser) handleEndOfMessage(startPos ast.Position, comments []string, res *[]*ast.Node) (bool, ast.Position, error) { + if endPos := p.position(); p.consume('}') || p.consume('>') || p.consume(']') { + // Handle comments after last child. + + if len(comments) > 0 { + *res = append(*res, &ast.Node{Start: startPos, PreComments: comments}) + } + + // endPos points at the closing brace, but we should rather return the position + // of the first character after the previous item. Therefore let's rewind a bit: + for endPos.Byte > 0 && p.in[endPos.Byte-1] == ' ' { + endPos.Byte-- + endPos.Column-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return true, ast.Position{}, err + } + + // Done parsing children. + return true, endPos, nil + } + return false, ast.Position{}, nil +} + func (p *parser) parseFieldName(nd *ast.Node, isRoot bool) error { if p.consume('[') { // Read Name (of proto extension). @@ -560,14 +652,15 @@ func (p *parser) parseFieldName(nd *ast.Node, isRoot bool) error { return nil } -func (p *parser) parseMessage(nd *ast.Node) error { +func (p *parser) parseMessage(nd *ast.Node, desc protoreflect.MessageDescriptor) error { if p.config.SkipAllColons { nd.SkipColon = true } nd.ChildrenSameLine = p.bracketSameLine[p.index-1] nd.IsAngleBracket = p.config.PreserveAngleBrackets && p.in[p.index-1] == '<' // Recursive call to parse child nodes. - nodes, lastPos, err := p.parse( /*isRoot=*/ false) + childDesc := p.findChildDescriptor(desc, nd.Name) + nodes, lastPos, err := p.parse( /*isRoot=*/ false, childDesc) if err != nil { return err } @@ -591,62 +684,71 @@ func (p *parser) parseList(nd *ast.Node, preCommentsBeforeColon, preCommentsAfte if p.nextInputIs('{') { // Handle list of nodes. - nd.ChildrenAsList = true - - nodes, lastPos, err := p.parse( /*isRoot=*/ true) - if err != nil { - return err - } - if len(nodes) > 0 { - nodes[0].PreComments = preComments - } - - nd.Children = nodes - nd.End = lastPos - nd.ClosingBraceComment = p.readInlineComment() - nd.ChildrenSameLine = openBracketLine == p.line + return p.parseListOfNodes(nd, preComments, openBracketLine) } else { // Handle list of values. - nd.ValuesAsList = true // We found values in list - keep it as list. + return p.parseListOfValues(nd, preComments, openBracketLine) + } +} - for ld := p.getLoopDetector(); !p.consume(']') && p.index < p.length; { - if err := ld.iter(); err != nil { - return err - } +func (p *parser) parseListOfNodes(nd *ast.Node, preComments []string, openBracketLine int) error { + nd.ChildrenAsList = true - // Read each value in the list. - vals, err := p.readValues() - if err != nil { - return err - } - if len(vals) != 1 { - return fmt.Errorf("multiple-string value not supported (%v). Please add comma explicitly, see http://b/162070952", vals) - } - if len(preComments) > 0 { - // If we read preComments before readValues(), they should go first, - // but avoid copy overhead if there are none. - vals[0].PreComments = append(preComments, vals[0].PreComments...) - } + nodes, lastPos, err := p.parse( /*isRoot=*/ true, nil) + if err != nil { + return err + } + if len(nodes) > 0 { + nodes[0].PreComments = preComments + } - // Skip separator. - _, _ = p.skipWhiteSpaceAndReadComments(false /* multiLine */) - if p.consume(',') { - vals[0].InlineComment = p.readInlineComment() - } + nd.Children = nodes + nd.End = lastPos + nd.ClosingBraceComment = p.readInlineComment() + nd.ChildrenSameLine = openBracketLine == p.line + return nil +} - nd.Values = append(nd.Values, vals...) +func (p *parser) parseListOfValues(nd *ast.Node, preComments []string, openBracketLine int) error { + nd.ValuesAsList = true // We found values in list - keep it as list. - preComments, _ = p.skipWhiteSpaceAndReadComments(true /* multiLine */) + for ld := p.getLoopDetector(); !p.consume(']') && p.index < p.length; { + if err := ld.iter(); err != nil { + return err } - nd.ChildrenSameLine = openBracketLine == p.line - // Handle comments after last line (or for empty list) - nd.PostValuesComments = preComments - nd.ClosingBraceComment = p.readInlineComment() - - if err := p.consumeOptionalSeparator(); err != nil { + // Read each value in the list. + vals, err := p.readValues() + if err != nil { return err } + if len(vals) != 1 { + return fmt.Errorf("multiple-string value not supported (%v). Please add comma explicitly, see http://b/162070952", vals) + } + if len(preComments) > 0 { + // If we read preComments before readValues(), they should go first, + // but avoid copy overhead if there are none. + vals[0].PreComments = append(preComments, vals[0].PreComments...) + } + + // Skip separator. + _, _ = p.skipWhiteSpaceAndReadComments(false /* multiLine */) + if p.consume(',') { + vals[0].InlineComment = p.readInlineComment() + } + + nd.Values = append(nd.Values, vals...) + + preComments, _ = p.skipWhiteSpaceAndReadComments(true /* multiLine */) + } + nd.ChildrenSameLine = openBracketLine == p.line + + // Handle comments after last line (or for empty list) + nd.PostValuesComments = preComments + nd.ClosingBraceComment = p.readInlineComment() + + if err := p.consumeOptionalSeparator(); err != nil { + return err } return nil } diff --git a/vendor/github.com/protocolbuffers/txtpbfmt/sort/sort.go b/vendor/github.com/protocolbuffers/txtpbfmt/sort/sort.go index 8f61504a7b..4748a63f2c 100644 --- a/vendor/github.com/protocolbuffers/txtpbfmt/sort/sort.go +++ b/vendor/github.com/protocolbuffers/txtpbfmt/sort/sort.go @@ -32,6 +32,19 @@ func (e *UnsortedFieldsError) Error() string { return fmt.Sprintf("fields parsed that were not specified in the parser.AddFieldSortOrder() call:\n%s", strings.Join(errs, "\n")) } +func identityProjection(s string) string { + return s +} + +func dnsProjection(s string) string { + parts := strings.Split(s, ".") + // Reverse `parts`. + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + return strings.Join(parts, ".") +} + // nodeSortFunction sorts the given nodes, using the parent node as context. parent can be nil. type nodeSortFunction func(parent *ast.Node, nodes []*ast.Node) error @@ -43,11 +56,11 @@ type valuesSortFunction func(values []*ast.Value) // Process sorts and filters the given nodes. func Process(parent *ast.Node, nodes []*ast.Node, c config.Config) error { - return process(parent, nodes, nodeSortFunctionConfig(c), nodeFilterFunctionConfig(c), valuesSortFunctionConfig(c)) + return process(parent, nodes, nodeSortFunctionConfig(c), nodeFilterFunctionConfig(c), valuesSortFunctionConfig(c), c) } // process sorts and filters the given nodes. -func process(parent *ast.Node, nodes []*ast.Node, sortFunction nodeSortFunction, filterFunction nodeFilterFunction, valuesSortFunction valuesSortFunction) error { +func process(parent *ast.Node, nodes []*ast.Node, sortFunction nodeSortFunction, filterFunction nodeFilterFunction, valuesSortFunction valuesSortFunction, c config.Config) error { if len(nodes) == 0 { return nil } @@ -55,7 +68,7 @@ func process(parent *ast.Node, nodes []*ast.Node, sortFunction nodeSortFunction, filterFunction(nodes) } for _, nd := range nodes { - err := process(nd, nd.Children, sortFunction, filterFunction, valuesSortFunction) + err := process(nd, nd.Children, sortFunction, filterFunction, valuesSortFunction, c) if err != nil { return err } @@ -64,11 +77,46 @@ func process(parent *ast.Node, nodes []*ast.Node, sortFunction nodeSortFunction, } } if sortFunction != nil { - return sortFunction(parent, nodes) + if err := sortFunction(parent, nodes); err != nil { + return err + } + } + if c.UseShortRepeatedPrimitiveFields { + groupRepeatedPrimitiveFields(nodes) } return nil } +func isPrimitive(n *ast.Node) bool { + return len(n.Children) == 0 && len(n.Values) == 1 +} + +func groupRepeatedPrimitiveFields(nodes []*ast.Node) { + for i := 0; i < len(nodes); { + node := nodes[i] + if node.Deleted || !isPrimitive(node) { + i++ + continue + } + j := i + 1 + for ; j < len(nodes); j++ { + if nodes[j].Deleted || !isPrimitive(nodes[j]) || nodes[j].Name != node.Name || len(nodes[j].PreComments) > 0 || len(nodes[j].PostValuesComments) > 0 { + break + } + } + if j > i+1 { + // Found group of repeated primitive fields: nodes[i...j-1] + node.ValuesAsList = true + node.ChildrenSameLine = true + for k := i + 1; k < j; k++ { + node.Values = append(node.Values, nodes[k].Values...) + nodes[k].Deleted = true + } + } + i = j + } +} + // removeDuplicates marks duplicate key:value pairs from nodes as Deleted. func removeDuplicates(nodes []*ast.Node) { type nameAndValue struct { @@ -130,13 +178,21 @@ func nodeSortFunctionConfig(c config.Config) nodeSortFunction { if c.SortFieldsByFieldName { sorter = ast.ChainNodeLess(sorter, ast.ByFieldName) } + if c.SortFieldsByFieldNumber { + sorter = ast.ChainNodeLess(sorter, ast.ByFieldNumber) + } + projection := identityProjection + if c.DNSSortOrder { + projection = dnsProjection + } if c.SortRepeatedFieldsByContent { - sorter = ast.ChainNodeLess(sorter, ast.ByFieldValue) + sorter = ast.ChainNodeLess(sorter, ast.ByFieldValue(projection)) } for _, sf := range c.SortRepeatedFieldsBySubfield { field, subfieldPath := parseSubfieldSpec(sf) if len(subfieldPath) > 0 { - sorter = ast.ChainNodeLess(sorter, ast.ByFieldSubfieldPath(field, subfieldPath)) + sorter = ast.ChainNodeLess(sorter, ast.ByFieldSubfieldPath(field, subfieldPath, + projection)) } } if sorter != nil { diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md index 27ddfee8b8..6492bfe851 100644 --- a/vendor/github.com/rcrowley/go-metrics/README.md +++ b/vendor/github.com/rcrowley/go-metrics/README.md @@ -7,6 +7,15 @@ Go port of Coda Hale's Metrics library: . Documentation: . +Archived as of April 1 2025 +----- +This repository is no longer maintained. The authors recommend you explore the +following newer, more widely adopted libraries for your Go instrumentation +needs: + +* [OpenTelemetry Go SDK](https://opentelemetry.io/docs/languages/go/instrumentation/#metrics) +* [Prometheus Go Client Library](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) + Usage ----- diff --git a/vendor/github.com/sagikazarmark/locafero/.envrc b/vendor/github.com/sagikazarmark/locafero/.envrc index 5c95dc7989..a0350624bd 100644 --- a/vendor/github.com/sagikazarmark/locafero/.envrc +++ b/vendor/github.com/sagikazarmark/locafero/.envrc @@ -1,4 +1,4 @@ -if ! has nix_direnv_version || ! nix_direnv_version 3.1.0; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.1.0/direnvrc" "sha256-yMJ2OVMzrFaDPn7q8nCBZFRYpL/f0RcHzhmw/i6btJM=" -fi -use flake . --impure +export DIRENV_WARN_TIMEOUT=20s + +eval "$(devenv direnvrc)" +use devenv diff --git a/vendor/github.com/sagikazarmark/locafero/.gitignore b/vendor/github.com/sagikazarmark/locafero/.gitignore index 8f07e60163..102d6bf47f 100644 --- a/vendor/github.com/sagikazarmark/locafero/.gitignore +++ b/vendor/github.com/sagikazarmark/locafero/.gitignore @@ -1,8 +1,5 @@ -/.devenv/ +# Devenv +/.devenv* /.direnv/ -/.task/ -/bin/ -/build/ -/tmp/ -/var/ -/vendor/ +devenv.local.nix +.pre-commit-config.yaml diff --git a/vendor/github.com/sagikazarmark/locafero/devenv.lock b/vendor/github.com/sagikazarmark/locafero/devenv.lock new file mode 100644 index 0000000000..c71c5dc48b --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/devenv.lock @@ -0,0 +1,103 @@ +{ + "nodes": { + "devenv": { + "locked": { + "dir": "src/modules", + "lastModified": 1758285954, + "owner": "cachix", + "repo": "devenv", + "rev": "0f0e4c6921995ae9334b546eac71a5800621baa8", + "type": "github" + }, + "original": { + "dir": "src/modules", + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1747046372, + "owner": "edolstra", + "repo": "flake-compat", + "rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "git-hooks": { + "inputs": { + "flake-compat": "flake-compat", + "gitignore": "gitignore", + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1758108966, + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "54df955a695a84cd47d4a43e08e1feaf90b1fd9b", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "git-hooks.nix", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "git-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1709087332, + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1755783167, + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "4a880fb247d24fbca57269af672e8f78935b0328", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "git-hooks": "git-hooks", + "nixpkgs": "nixpkgs", + "pre-commit-hooks": [ + "git-hooks" + ] + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/sagikazarmark/locafero/devenv.nix b/vendor/github.com/sagikazarmark/locafero/devenv.nix new file mode 100644 index 0000000000..ffd26a0d17 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/devenv.nix @@ -0,0 +1,17 @@ +{ pkgs, ... }: + +{ + cachix.pull = [ "sagikazarmark-dev" ]; + + languages = { + go = { + enable = true; + package = pkgs.go_1_25; + }; + }; + + packages = with pkgs; [ + just + golangci-lint + ]; +} diff --git a/vendor/github.com/sagikazarmark/locafero/devenv.yaml b/vendor/github.com/sagikazarmark/locafero/devenv.yaml new file mode 100644 index 0000000000..68616a49cd --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/devenv.yaml @@ -0,0 +1,4 @@ +# yaml-language-server: $schema=https://devenv.sh/devenv.schema.json +inputs: + nixpkgs: + url: github:cachix/devenv-nixpkgs/rolling diff --git a/vendor/github.com/sagikazarmark/locafero/finder.go b/vendor/github.com/sagikazarmark/locafero/finder.go index ce43c78264..5e86d31b0e 100644 --- a/vendor/github.com/sagikazarmark/locafero/finder.go +++ b/vendor/github.com/sagikazarmark/locafero/finder.go @@ -7,8 +7,9 @@ import ( "path/filepath" "strings" - "github.com/sourcegraph/conc/pool" "github.com/spf13/afero" + + "github.com/sagikazarmark/locafero/internal/queue" ) // Finder looks for files and directories in an [afero.Fs] filesystem. @@ -43,12 +44,11 @@ type Finder struct { // Find looks for files and directories in an [afero.Fs] filesystem. func (f Finder) Find(fsys afero.Fs) ([]string, error) { - // Arbitrary go routine limit (TODO: make this a parameter) - p := pool.NewWithResults[[]searchResult]().WithMaxGoroutines(5).WithErrors().WithFirstError() + q := queue.NewEager[[]searchResult]() for _, searchPath := range f.Paths { for _, searchName := range f.Names { - p.Go(func() ([]searchResult, error) { + q.Add(func() ([]searchResult, error) { // If the name contains any glob character, perform a glob match if strings.ContainsAny(searchName, globMatch) { return globWalkSearch(fsys, searchPath, searchName, f.Type) @@ -59,7 +59,7 @@ func (f Finder) Find(fsys afero.Fs) ([]string, error) { } } - searchResults, err := flatten(p.Wait()) + searchResults, err := flatten(q.Wait()) if err != nil { return nil, err } diff --git a/vendor/github.com/sagikazarmark/locafero/flake.lock b/vendor/github.com/sagikazarmark/locafero/flake.lock deleted file mode 100644 index b14a842c2f..0000000000 --- a/vendor/github.com/sagikazarmark/locafero/flake.lock +++ /dev/null @@ -1,255 +0,0 @@ -{ - "nodes": { - "cachix": { - "inputs": { - "devenv": [ - "devenv" - ], - "flake-compat": [ - "devenv" - ], - "git-hooks": [ - "devenv", - "git-hooks" - ], - "nixpkgs": [ - "devenv", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1748883665, - "narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=", - "owner": "cachix", - "repo": "cachix", - "rev": "f707778d902af4d62d8dd92c269f8e70de09acbe", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "latest", - "repo": "cachix", - "type": "github" - } - }, - "devenv": { - "inputs": { - "cachix": "cachix", - "flake-compat": "flake-compat", - "git-hooks": "git-hooks", - "nix": "nix", - "nixpkgs": "nixpkgs" - }, - "locked": { - "lastModified": 1753981111, - "narHash": "sha256-uBJOyMxOkGRmxhD2M5rbN2aV6oP1T2AKq5oBaHHC4mw=", - "owner": "cachix", - "repo": "devenv", - "rev": "d4d70df706b153b601a87ab8e81c88a0b1a373b6", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1747046372, - "narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-parts": { - "inputs": { - "nixpkgs-lib": [ - "devenv", - "nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1733312601, - "narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "flake-parts_2": { - "inputs": { - "nixpkgs-lib": "nixpkgs-lib" - }, - "locked": { - "lastModified": 1753121425, - "narHash": "sha256-TVcTNvOeWWk1DXljFxVRp+E0tzG1LhrVjOGGoMHuXio=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "644e0fc48951a860279da645ba77fe4a6e814c5e", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "git-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1750779888, - "narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=", - "owner": "cachix", - "repo": "git-hooks.nix", - "rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "git-hooks.nix", - "type": "github" - } - }, - "gitignore": { - "inputs": { - "nixpkgs": [ - "devenv", - "git-hooks", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1709087332, - "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", - "owner": "hercules-ci", - "repo": "gitignore.nix", - "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "gitignore.nix", - "type": "github" - } - }, - "nix": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-parts": "flake-parts", - "git-hooks-nix": [ - "devenv", - "git-hooks" - ], - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-23-11": [ - "devenv" - ], - "nixpkgs-regression": [ - "devenv" - ] - }, - "locked": { - "lastModified": 1752773918, - "narHash": "sha256-dOi/M6yNeuJlj88exI+7k154z+hAhFcuB8tZktiW7rg=", - "owner": "cachix", - "repo": "nix", - "rev": "031c3cf42d2e9391eee373507d8c12e0f9606779", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "devenv-2.30", - "repo": "nix", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1750441195, - "narHash": "sha256-yke+pm+MdgRb6c0dPt8MgDhv7fcBbdjmv1ZceNTyzKg=", - "owner": "cachix", - "repo": "devenv-nixpkgs", - "rev": "0ceffe312871b443929ff3006960d29b120dc627", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "rolling", - "repo": "devenv-nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "lastModified": 1751159883, - "narHash": "sha256-urW/Ylk9FIfvXfliA1ywh75yszAbiTEVgpPeinFyVZo=", - "owner": "nix-community", - "repo": "nixpkgs.lib", - "rev": "14a40a1d7fb9afa4739275ac642ed7301a9ba1ab", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "nixpkgs.lib", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1753939845, - "narHash": "sha256-K2ViRJfdVGE8tpJejs8Qpvvejks1+A4GQej/lBk5y7I=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "94def634a20494ee057c76998843c015909d6311", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "root": { - "inputs": { - "devenv": "devenv", - "flake-parts": "flake-parts_2", - "nixpkgs": "nixpkgs_2" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/vendor/github.com/sagikazarmark/locafero/flake.nix b/vendor/github.com/sagikazarmark/locafero/flake.nix deleted file mode 100644 index bdb10dbe4f..0000000000 --- a/vendor/github.com/sagikazarmark/locafero/flake.nix +++ /dev/null @@ -1,42 +0,0 @@ -{ - inputs = { - nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; - flake-parts.url = "github:hercules-ci/flake-parts"; - devenv.url = "github:cachix/devenv"; - }; - - outputs = - inputs@{ flake-parts, ... }: - flake-parts.lib.mkFlake { inherit inputs; } { - imports = [ - inputs.devenv.flakeModule - ]; - - systems = [ - "x86_64-linux" - "aarch64-darwin" - ]; - - perSystem = - { pkgs, ... }: - { - devenv.shells = { - default = { - languages = { - go.enable = true; - go.package = pkgs.lib.mkDefault pkgs.go_1_24; - }; - - packages = with pkgs; [ - just - - golangci-lint - ]; - - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; - }; - }; - }; - }; -} diff --git a/vendor/github.com/sagikazarmark/locafero/internal/queue/eager.go b/vendor/github.com/sagikazarmark/locafero/internal/queue/eager.go new file mode 100644 index 0000000000..d0c0338ace --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/internal/queue/eager.go @@ -0,0 +1,53 @@ +package queue + +import "sync" + +// NewEager creates a new eager queue. +func NewEager[T any]() Queue[T] { + return &Eager[T]{} +} + +// Eager is a queue that processes items eagerly. +type Eager[T any] struct { + results []T + error error + + mu sync.Mutex +} + +// Add implements the [Queue] interface. +func (p *Eager[T]) Add(fn func() (T, error)) { + p.mu.Lock() + defer p.mu.Unlock() + + // Return early if there's an error + if p.error != nil { + return + } + + result, err := fn() + if err != nil { + p.error = err + + return + } + + p.results = append(p.results, result) +} + +// Wait implements the [Queue] interface. +func (p *Eager[T]) Wait() ([]T, error) { + p.mu.Lock() + defer p.mu.Unlock() + + if p.error != nil { + return nil, p.error + } + + results := p.results + + // Reset results for reuse + p.results = nil + + return results, nil +} diff --git a/vendor/github.com/sagikazarmark/locafero/internal/queue/queue.go b/vendor/github.com/sagikazarmark/locafero/internal/queue/queue.go new file mode 100644 index 0000000000..3a4966943a --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/internal/queue/queue.go @@ -0,0 +1,8 @@ +// Package queue provides a generic queue implementation. +package queue + +// Queue represents a generic queue. +type Queue[T any] interface { + Add(func() (T, error)) + Wait() ([]T, error) +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go index abc860a491..a94e739f2c 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go @@ -96,7 +96,6 @@ func encodeCanonical(obj interface{}, result *strings.Builder) (err error) { if i < (len(mapKeys) - 1) { result.WriteString(",") } - i++ } result.WriteString("}") diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/envelope.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/envelope.go index ed223e90b5..8e48cc6fe1 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/envelope.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/envelope.go @@ -38,7 +38,7 @@ type Signature struct { } /* -PAE implementes the DSSE Pre-Authentic Encoding +PAE implements the DSSE Pre-Authentic Encoding https://github.com/secure-systems-lab/dsse/blob/master/protocol.md#signature-definition */ func PAE(payloadType string, payload []byte) []byte { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go index 244a806774..427ef5686f 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go @@ -47,7 +47,7 @@ algorithms to sign the data. The threshold parameter is legacy and is ignored. Deprecated: This function simply calls NewEnvelopeSigner, and that function should be preferred. */ -func NewMultiEnvelopeSigner(threshold int, p ...Signer) (*EnvelopeSigner, error) { +func NewMultiEnvelopeSigner(_ int, p ...Signer) (*EnvelopeSigner, error) { return NewEnvelopeSigner(p...) } diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go index d04246747c..034e4faaf3 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go @@ -43,8 +43,8 @@ func (ev *EnvelopeVerifier) Verify(ctx context.Context, e *Envelope) ([]Accepted // If *any* signature is found to be incorrect, it is skipped var acceptedKeys []AcceptedKey usedKeyids := make(map[string]string) - unverified_providers := make([]Verifier, len(ev.providers)) - copy(unverified_providers, ev.providers) + unverifiedProviders := make([]Verifier, len(ev.providers)) + copy(unverifiedProviders, ev.providers) for _, s := range e.Signatures { sig, err := b64Decode(s.Sig) if err != nil { @@ -55,7 +55,7 @@ func (ev *EnvelopeVerifier) Verify(ctx context.Context, e *Envelope) ([]Accepted // If provider and signature include key IDs but do not match skip. // If a provider recognizes the key, we exit // the loop and use the result. - providers := unverified_providers + providers := unverifiedProviders for i, v := range providers { keyID, err := v.KeyID() @@ -81,7 +81,7 @@ func (ev *EnvelopeVerifier) Verify(ctx context.Context, e *Envelope) ([]Accepted KeyID: keyID, Sig: s, } - unverified_providers = removeIndex(providers, i) + unverifiedProviders = removeIndex(providers, i) // See https://github.com/in-toto/in-toto/pull/251 if _, ok := usedKeyids[keyID]; ok { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go index 691091af99..4227941d96 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go @@ -57,7 +57,7 @@ func NewECDSASignerVerifierFromSSLibKey(key *SSLibKey) (*ECDSASignerVerifier, er } // Sign creates a signature for `data`. -func (sv *ECDSASignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { +func (sv *ECDSASignerVerifier) Sign(_ context.Context, data []byte) ([]byte, error) { if sv.private == nil { return nil, ErrNotPrivateKey } @@ -68,7 +68,7 @@ func (sv *ECDSASignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, e } // Verify verifies the `sig` value passed in against `data`. -func (sv *ECDSASignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { +func (sv *ECDSASignerVerifier) Verify(_ context.Context, data []byte, sig []byte) error { hashedData := getECDSAHashedData(data, sv.curveSize) if ok := ecdsa.VerifyASN1(sv.public, hashedData, sig); !ok { @@ -93,8 +93,7 @@ func (sv *ECDSASignerVerifier) Public() crypto.PublicKey { // LoadECDSAKeyFromFile returns an SSLibKey instance for an ECDSA key stored in // a file in the custom securesystemslib format. // -// Deprecated: use LoadKey(). The custom serialization format has been -// deprecated. Use +// Deprecated: use LoadKey(). The custom serialization format is deprecated. Use // https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py // to convert your key. func LoadECDSAKeyFromFile(path string) (*SSLibKey, error) { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go index d954e14b74..348351e8cd 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go @@ -57,7 +57,7 @@ func NewED25519SignerVerifierFromSSLibKey(key *SSLibKey) (*ED25519SignerVerifier } // Sign creates a signature for `data`. -func (sv *ED25519SignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { +func (sv *ED25519SignerVerifier) Sign(_ context.Context, data []byte) ([]byte, error) { if len(sv.private) == 0 { return nil, ErrNotPrivateKey } @@ -67,7 +67,7 @@ func (sv *ED25519SignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, } // Verify verifies the `sig` value passed in against `data`. -func (sv *ED25519SignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { +func (sv *ED25519SignerVerifier) Verify(_ context.Context, data []byte, sig []byte) error { if ok := ed25519.Verify(sv.public, data, sig); ok { return nil } @@ -89,8 +89,7 @@ func (sv *ED25519SignerVerifier) Public() crypto.PublicKey { // LoadED25519KeyFromFile returns an SSLibKey instance for an ED25519 key stored // in a file in the custom securesystemslib format. // -// Deprecated: use LoadKey(). The custom serialization format has been -// deprecated. Use +// Deprecated: use LoadKey(). The custom serialization format is deprecated. Use // https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py // to convert your key. func LoadED25519KeyFromFile(path string) (*SSLibKey, error) { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go index 2abfcb27c4..8aaa531c06 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go @@ -59,7 +59,7 @@ func NewRSAPSSSignerVerifierFromSSLibKey(key *SSLibKey) (*RSAPSSSignerVerifier, } // Sign creates a signature for `data`. -func (sv *RSAPSSSignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { +func (sv *RSAPSSSignerVerifier) Sign(_ context.Context, data []byte) ([]byte, error) { if sv.private == nil { return nil, ErrNotPrivateKey } @@ -70,7 +70,7 @@ func (sv *RSAPSSSignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, } // Verify verifies the `sig` value passed in against `data`. -func (sv *RSAPSSSignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { +func (sv *RSAPSSSignerVerifier) Verify(_ context.Context, data []byte, sig []byte) error { hashedData := hashBeforeSigning(data, sha256.New()) if err := rsa.VerifyPSS(sv.public, crypto.SHA256, hashedData, sig, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}); err != nil { @@ -95,8 +95,7 @@ func (sv *RSAPSSSignerVerifier) Public() crypto.PublicKey { // LoadRSAPSSKeyFromFile returns an SSLibKey instance for an RSA key stored in a // file. // -// Deprecated: use LoadKey(). The custom serialization format has been -// deprecated. Use +// Deprecated: use LoadKey(). The custom serialization format is deprecated. Use // https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py // to convert your key. func LoadRSAPSSKeyFromFile(path string) (*SSLibKey, error) { diff --git a/vendor/github.com/segmentio/asm/LICENSE b/vendor/github.com/segmentio/asm/LICENSE index 29e1ab6b05..5e93dab621 100644 --- a/vendor/github.com/segmentio/asm/LICENSE +++ b/vendor/github.com/segmentio/asm/LICENSE @@ -1,21 +1,16 @@ -MIT License +MIT No Attribution -Copyright (c) 2021 Segment +Copyright 2023 Segment -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Permission is hereby granted, free of charge, to any person obtaining a copy of this +software and associated documentation files (the "Software"), to deal in the Software +without restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/segmentio/asm/base64/decode_arm64.s b/vendor/github.com/segmentio/asm/base64/decode_arm64.s index 4374d5ce17..67d206f8cc 100644 --- a/vendor/github.com/segmentio/asm/base64/decode_arm64.s +++ b/vendor/github.com/segmentio/asm/base64/decode_arm64.s @@ -130,7 +130,12 @@ loop: ADVANCE_LOOP(loop) // Store results and continue done: - RETURN() + // RETURN() replacing the macro to please go vet. + SUB R0, R3; + SUB R1, R4; + MOVD R3, ret+56(FP); + MOVD R4, ret1+64(FP); + RET // func decodeStdARM64(dst []byte, src []byte, lut *int8) (int, int) @@ -145,7 +150,12 @@ loop: ADVANCE_LOOP(loop) // Store results and continue done: - RETURN() + // RETURN() replacing the macro to please go vet. + SUB R0, R3; + SUB R1, R4; + MOVD R3, ret+56(FP); + MOVD R4, ret1+64(FP); + RET DATA ·mask_lut+0x00(SB)/1, $0xa8 diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/entry.go b/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/entry.go index 2914bffbed..2d43793309 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/entry.go +++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/entry.go @@ -57,6 +57,24 @@ func init() { type V001Entry struct { DSSEObj models.DSSEV001Schema env *dsse.Envelope + + // memory optimization: extract and store these during Unmarshal + // so we can clear the huge payload from memory + extractedIndexKeys []string + isInsertable bool +} + +type indexKeyExtract struct { + Subject []struct { + Digest map[string]string `json:"digest"` + } `json:"subject"` + Predicate json.RawMessage `json:"predicate"` +} + +type materialsExtract struct { + Materials []struct { + Digest map[string]string `json:"digest"` + } `json:"materials"` } func (v V001Entry) APIVersion() string { @@ -108,58 +126,13 @@ func (v V001Entry) IndexKeys() ([]string, error) { return result, nil } - switch v.env.PayloadType { - case in_toto.PayloadType: - - if v.env.Payload == "" { - log.Logger.Info("DSSEObj DSSE payload is empty") - return result, nil - } - decodedPayload, err := v.env.DecodeB64Payload() - if err != nil { - return result, fmt.Errorf("could not decode envelope payload: %w", err) - } - statement, err := parseStatement(decodedPayload) - if err != nil { - return result, err - } - for _, s := range statement.Subject { - for alg, ds := range s.Digest { - result = append(result, alg+":"+ds) - } - } - // Not all in-toto statements will contain a SLSA provenance predicate. - // See https://github.com/in-toto/attestation/blob/main/spec/README.md#predicate - // for other predicates. - if predicate, err := parseSlsaPredicate(decodedPayload); err == nil { - if predicate.Predicate.Materials != nil { - for _, s := range predicate.Predicate.Materials { - for alg, ds := range s.Digest { - result = append(result, alg+":"+ds) - } - } - } - } - default: + if v.env.PayloadType == in_toto.PayloadType { + result = append(result, v.extractedIndexKeys...) + } else { log.Logger.Infof("Unknown DSSE envelope payloadType: %s", v.env.PayloadType) } - return result, nil -} - -func parseStatement(p []byte) (*in_toto.Statement, error) { - ps := in_toto.Statement{} - if err := json.Unmarshal(p, &ps); err != nil { - return nil, err - } - return &ps, nil -} -func parseSlsaPredicate(p []byte) (*in_toto.ProvenanceStatement, error) { - predicate := in_toto.ProvenanceStatement{} - if err := json.Unmarshal(p, &predicate); err != nil { - return nil, err - } - return &predicate, nil + return result, nil } // DecodeEntry performs direct decode into the provided output pointer @@ -344,6 +317,28 @@ func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { return err } + // extraction of index keys - done here so we can clear the huge strings from memory + if env.PayloadType == in_toto.PayloadType { + var extract indexKeyExtract + if err := json.Unmarshal(decodedPayload, &extract); err == nil { + for _, s := range extract.Subject { + for alg, ds := range s.Digest { + v.extractedIndexKeys = append(v.extractedIndexKeys, alg+":"+ds) + } + } + if extract.Predicate != nil { + var materials materialsExtract + if err := json.Unmarshal(extract.Predicate, &materials); err == nil { + for _, m := range materials.Materials { + for alg, ds := range m.Digest { + v.extractedIndexKeys = append(v.extractedIndexKeys, alg+":"+ds) + } + } + } + } + } + } + payloadHash := sha256.Sum256(decodedPayload) dsseObj.PayloadHash = &models.DSSEV001SchemaPayloadHash{ Algorithm: conv.Pointer(models.DSSEV001SchemaPayloadHashAlgorithmSha256), @@ -359,6 +354,11 @@ func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { // we've gotten through all processing without error, now update the object we're unmarshalling into v.DSSEObj = *dsseObj v.env = env + v.isInsertable = true + + // memory optimization: clear huge strings/buffers + v.env.Payload = "" + v.DSSEObj.ProposedContent = nil return nil } @@ -533,6 +533,9 @@ func (v V001Entry) ArtifactHash() (string, error) { } func (v V001Entry) Insertable() (bool, error) { + if v.isInsertable { + return true, nil + } if v.DSSEObj.ProposedContent == nil { return false, errors.New("missing proposed content") } diff --git a/vendor/github.com/sigstore/rekor/pkg/verify/verify.go b/vendor/github.com/sigstore/rekor/pkg/verify/verify.go index 61846923b7..7081d34d68 100644 --- a/vendor/github.com/sigstore/rekor/pkg/verify/verify.go +++ b/vendor/github.com/sigstore/rekor/pkg/verify/verify.go @@ -145,7 +145,10 @@ func VerifyInclusion(ctx context.Context, e *models.LogEntryAnon) error { hashes := [][]byte{} for _, h := range e.Verification.InclusionProof.Hashes { - hb, _ := hex.DecodeString(h) + hb, err := hex.DecodeString(h) + if err != nil { + return err + } hashes = append(hashes, hb) } @@ -155,7 +158,11 @@ func VerifyInclusion(ctx context.Context, e *models.LogEntryAnon) error { } // Verify the inclusion proof. - entryBytes, err := base64.StdEncoding.DecodeString(e.Body.(string)) + b, ok := e.Body.(string) + if !ok { + return fmt.Errorf("entry body must be a string, was %T", e.Body) + } + entryBytes, err := base64.StdEncoding.DecodeString(b) if err != nil { return err } diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauth/interactive.go b/vendor/github.com/sigstore/sigstore/pkg/oauth/interactive.go index d9ee1234af..d24d89d746 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauth/interactive.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauth/interactive.go @@ -16,15 +16,14 @@ package oauth import ( - "bytes" "fmt" - "text/template" + "strings" ) // GetInteractiveSuccessHTML is the page displayed upon success when using a web browser during an interactive Oauth token flow. // The page will close automatically if autoclose is true with the timeout specified. func GetInteractiveSuccessHTML(autoclose bool, timeout int) (string, error) { - const successTemplate = ` + const successTemplateHead = ` Sigstore Authentication @@ -90,10 +89,11 @@ func GetInteractiveSuccessHTML(autoclose bool, timeout int) (string, error) { +` - {{ if .Autoclose -}} + const autocloseScript = ` - {{- end }} + + +` + + const successTemplateTail = ` ` - // Parse the template - tmpl, err := template.New("success").Parse(successTemplate) - if err != nil { - return "", fmt.Errorf("error parsing success template: %w", err) - } - // Pass autoclose and timeout to the template - data := struct { - Autoclose bool - Timeout int - }{ - autoclose, - timeout, - } - var htmlPage bytes.Buffer - if err := tmpl.Execute(&htmlPage, data); err != nil { - return "", fmt.Errorf("error executing template: %w", err) + + var sb strings.Builder + + sb.WriteString(successTemplateHead) + + if autoclose { + fmt.Fprintf(&sb, autocloseScript, timeout) } - return htmlPage.String(), nil + + sb.WriteString(successTemplateTail) + + return sb.String(), nil } const ( diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go index 3dad8c34f4..3d44044028 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go @@ -32,12 +32,14 @@ import ( const ( // SigstoreDeviceURL specifies the Device Code endpoint for the public good Sigstore service - /* #nosec */ + // // Deprecated: this constant (while correct) should not be used + /* #nosec */ SigstoreDeviceURL = "https://oauth2.sigstore.dev/auth/device/code" // SigstoreTokenURL specifies the Token endpoint for the public good Sigstore service - /* #nosec */ + // // Deprecated: this constant (while correct) should not be used + /* #nosec */ SigstoreTokenURL = "https://oauth2.sigstore.dev/auth/device/token" ) @@ -64,6 +66,7 @@ type DeviceFlowTokenGetter struct { } // NewDeviceFlowTokenGetter creates a new DeviceFlowTokenGetter that retrieves an OIDC Identity Token using a Device Code Grant +// // Deprecated: NewDeviceFlowTokenGetter is deprecated; use NewDeviceFlowTokenGetterForIssuer() instead func NewDeviceFlowTokenGetter(issuer, codeURL, _ string) *DeviceFlowTokenGetter { return &DeviceFlowTokenGetter{ diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go index de21064e45..3a4052d20f 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go @@ -201,6 +201,7 @@ func startRedirectListener(state, htmlPage, redirectURL string, doneCh chan stri } m.HandleFunc(urlListener.Path, func(w http.ResponseWriter, r *http.Request) { + r.Body = http.MaxBytesReader(w, r.Body, 1<<20) // even though these are fetched from the FormValue method, // these are supplied as query parameters if r.FormValue("state") != state { diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go b/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go index 02c032b02d..802256ac65 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go @@ -62,7 +62,7 @@ type AlgorithmDetails struct { // The underlying type of these parameters is dependent on the keyType. // For example, ECDSA algorithms will store an elliptic curve here whereas, RSA keys will store the key size. // Algorithms that don't require any extra parameters leave this set to nil. - extraKeyParams interface{} + extraKeyParams any // flagValue is a string representation of the signature algorithm that follows the naming conventions of CLI // arguments that are used for Sigstore services. @@ -157,7 +157,7 @@ var supportedAlgorithms = []AlgorithmDetails{ {v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(4096), "rsa-sign-pkcs1-4096-sha256"}, {v1.PublicKeyDetails_PKIX_RSA_PSS_2048_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(2048), "rsa-sign-pss-2048-sha256"}, {v1.PublicKeyDetails_PKIX_RSA_PSS_3072_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(3072), "rsa-sign-pss-3072-sha256"}, - {v1.PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(4096), "rsa-sign-pss-4092-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(4096), "rsa-sign-pss-4096-sha256"}, {v1.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256, ECDSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, elliptic.P256(), "ecdsa-sha2-256-nistp256"}, {v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384, ECDSA, crypto.SHA384, v1.HashAlgorithm_SHA2_384, elliptic.P384(), "ecdsa-sha2-384-nistp384"}, {v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_256, ECDSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, elliptic.P384(), "ecdsa-sha2-256-nistp384"}, //nolint:staticcheck diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/encoding/options.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/encoding/options.go index 09e4671ad1..7e6de0782e 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/encoding/options.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/encoding/options.go @@ -17,7 +17,7 @@ package encoding // We have some type assertions that seem like they may panic, but this is just to satisfy -// golanci-lint's forcetypeassert linter. If they were to ever fail, unit tests would also fail. +// golangci-lint's forcetypeassert linter. If they were to ever fail, unit tests would also fail. // We know the asserted types are valid because otherwise we would have compiler failures. import ( @@ -55,9 +55,9 @@ func PackRPCOptions(opts []signature.RPCOption) *common.RPCOptions { func UnpackRPCOptions(commonOpts *common.RPCOptions) []signature.RPCOption { opts := []signature.RPCOption{} if commonOpts.CtxDeadline != nil { - // no need fot this package to cancel the context early, + // no need for this package to cancel the context early, // and users may still check if the deadline is exceeded with ctx.Err(). - ctx, _ := context.WithDeadline(context.Background(), *commonOpts.CtxDeadline) //nolint:govet + ctx, _ := context.WithDeadline(context.Background(), *commonOpts.CtxDeadline) //nolint:govet,gosec opts = append(opts, options.WithContext(ctx)) } if commonOpts.KeyVersion != nil { @@ -90,7 +90,7 @@ func PackMessageOptions(opts []signature.MessageOption) *common.MessageOptions { // PackPublicKeyOptions extracts properties of all of opts into struct ready for serializing. func PackPublicKeyOptions(opts []signature.PublicKeyOption) *common.PublicKeyOptions { - rpcOpts := []signature.RPCOption{} + rpcOpts := make([]signature.RPCOption, 0, len(opts)) for _, opt := range opts { rpcOpts = append(rpcOpts, opt) } @@ -101,8 +101,9 @@ func PackPublicKeyOptions(opts []signature.PublicKeyOption) *common.PublicKeyOpt // UnpackPublicKeyOptions builds the []signature.PublicKeyOption from common.PublicKeyOptions. func UnpackPublicKeyOptions(commonOpts *common.PublicKeyOptions) []signature.PublicKeyOption { - opts := []signature.PublicKeyOption{} - for _, opt := range UnpackRPCOptions(&commonOpts.RPCOptions) { + rpcOpts := UnpackRPCOptions(&commonOpts.RPCOptions) + opts := make([]signature.PublicKeyOption, 0, len(rpcOpts)) + for _, opt := range rpcOpts { opt, ok := opt.(signature.PublicKeyOption) if !ok { panic("cannot assert as PublicKeyOption") @@ -124,13 +125,13 @@ func UnpackMessageOptions(commonOpts *common.MessageOptions) []signature.Message return opts } -// PackSignOptions extracts properties of all of opts into struct ready for serializing, +// PackSignOptions extracts properties of all of opts into struct ready for serializing. func PackSignOptions(opts []signature.SignOption) *common.SignOptions { - rpcOpts := []signature.RPCOption{} + rpcOpts := make([]signature.RPCOption, 0, len(opts)) for _, opt := range opts { rpcOpts = append(rpcOpts, opt) } - messageOpts := []signature.MessageOption{} + messageOpts := make([]signature.MessageOption, 0, len(opts)) for _, opt := range opts { messageOpts = append(messageOpts, opt) } @@ -140,17 +141,19 @@ func PackSignOptions(opts []signature.SignOption) *common.SignOptions { } } -// UnpackSignOptions builds the []]signature.SignOption from common.SignOptions. +// UnpackSignOptions builds the []signature.SignOption from common.SignOptions. func UnpackSignOptions(commonOpts *common.SignOptions) []signature.SignOption { - opts := []signature.SignOption{} - for _, opt := range UnpackRPCOptions(&commonOpts.RPCOptions) { + rpcOpts := UnpackRPCOptions(&commonOpts.RPCOptions) + msgOpts := UnpackMessageOptions(&commonOpts.MessageOptions) + opts := make([]signature.SignOption, 0, len(rpcOpts)+len(msgOpts)) + for _, opt := range rpcOpts { opt, ok := opt.(signature.SignOption) if !ok { panic("cannot assert as SignOption") } opts = append(opts, opt) } - for _, opt := range UnpackMessageOptions(&commonOpts.MessageOptions) { + for _, opt := range msgOpts { opt, ok := opt.(signature.SignOption) if !ok { panic("cannot assert as SignOption") @@ -160,13 +163,13 @@ func UnpackSignOptions(commonOpts *common.SignOptions) []signature.SignOption { return opts } -// PackVerifyOptions extracts properties of all of opts into struct ready for serializing, +// PackVerifyOptions extracts properties of all of opts into struct ready for serializing. func PackVerifyOptions(opts []signature.VerifyOption) *common.VerifyOptions { - rpcOpts := []signature.RPCOption{} + rpcOpts := make([]signature.RPCOption, 0, len(opts)) for _, opt := range opts { rpcOpts = append(rpcOpts, opt) } - messageOpts := []signature.MessageOption{} + messageOpts := make([]signature.MessageOption, 0, len(opts)) for _, opt := range opts { messageOpts = append(messageOpts, opt) } @@ -176,17 +179,19 @@ func PackVerifyOptions(opts []signature.VerifyOption) *common.VerifyOptions { } } -// UnpackVerifyOptions builds the []]signature.VerifyOption from common.VerifyOptions. +// UnpackVerifyOptions builds the []signature.VerifyOption from common.VerifyOptions. func UnpackVerifyOptions(commonOpts *common.VerifyOptions) []signature.VerifyOption { - opts := []signature.VerifyOption{} - for _, opt := range UnpackRPCOptions(&commonOpts.RPCOptions) { + rpcOpts := UnpackRPCOptions(&commonOpts.RPCOptions) + msgOpts := UnpackMessageOptions(&commonOpts.MessageOptions) + opts := make([]signature.VerifyOption, 0, len(rpcOpts)+len(msgOpts)) + for _, opt := range rpcOpts { opt, ok := opt.(signature.VerifyOption) if !ok { panic("cannot assert as VerifyOption") } opts = append(opts, opt) } - for _, opt := range UnpackMessageOptions(&commonOpts.MessageOptions) { + for _, opt := range msgOpts { opt, ok := opt.(signature.VerifyOption) if !ok { panic("cannot assert as VerifyOption") diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/message.go b/vendor/github.com/sigstore/sigstore/pkg/signature/message.go index 44771ff3da..bd715b0c54 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/message.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/message.go @@ -21,18 +21,14 @@ import ( "errors" "fmt" "io" + "slices" ) func isSupportedAlg(alg crypto.Hash, supportedAlgs []crypto.Hash) bool { if supportedAlgs == nil { return true } - for _, supportedAlg := range supportedAlgs { - if alg == supportedAlg { - return true - } - } - return false + return slices.Contains(supportedAlgs, alg) } // ComputeDigestForSigning calculates the digest value for the specified message using a hash function selected by the following process: diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go index cab6f5b98a..58cbff7973 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go @@ -29,8 +29,8 @@ const CosignSignatureType = "cosign container image signature" // SimpleContainerImage describes the structure of a basic container image signature payload, as defined at: // https://github.com/containers/image/blob/main/docs/containers-signature.5.md#json-data-format type SimpleContainerImage struct { - Critical Critical `json:"critical"` // Critical data critical to correctly evaluating the validity of the signature - Optional map[string]interface{} `json:"optional"` // Optional optional metadata about the image + Critical Critical `json:"critical"` // Critical data critical to correctly evaluating the validity of the signature + Optional map[string]any `json:"optional"` // Optional optional metadata about the image } // Critical data critical to correctly evaluating the validity of a signature @@ -65,7 +65,7 @@ type Cosign struct { // - Older versions of cosign generate signatures where ClaimedIdentity only contains a registry/…/repo ; signature consumers should allow users // to determine whether such images should be accepted (and, long-term, the default SHOULD be to reject them) ClaimedIdentity string - Annotations map[string]interface{} + Annotations map[string]any } // SimpleContainerImage returns information about a container image in the github.com/containers/image/signature format diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/util.go b/vendor/github.com/sigstore/sigstore/pkg/signature/util.go index 3f8beff49c..e4d7c4190d 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/util.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/util.go @@ -28,7 +28,7 @@ import ( ) // SignImage signs a container manifest using the specified signer object -func SignImage(signer SignerVerifier, image name.Digest, optionalAnnotations map[string]interface{}) (payload, signature []byte, err error) { +func SignImage(signer SignerVerifier, image name.Digest, optionalAnnotations map[string]any) (payload, signature []byte, err error) { imgPayload := sigpayload.Cosign{ Image: image, Annotations: optionalAnnotations, @@ -45,7 +45,7 @@ func SignImage(signer SignerVerifier, image name.Digest, optionalAnnotations map } // VerifyImageSignature verifies a signature over a container manifest -func VerifyImageSignature(signer SignerVerifier, payload, signature []byte) (image name.Digest, annotations map[string]interface{}, err error) { +func VerifyImageSignature(signer SignerVerifier, payload, signature []byte) (image name.Digest, annotations map[string]any, err error) { if err := signer.VerifySignature(bytes.NewReader(signature), bytes.NewReader(payload)); err != nil { return name.Digest{}, nil, fmt.Errorf("signature verification failed: %w", err) } diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify.go b/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify.go index 4f6c77c792..f4952d16be 100644 --- a/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify.go +++ b/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify.go @@ -237,7 +237,8 @@ func verifyNonce(requestNonce *big.Int, opts VerifyOpts) error { return nil } -// VerifyTimestampResponse the timestamp response using a timestamp certificate chain. +// VerifyTimestampResponse verifies the timestamp response using a timestamp certificate chain. +// Note: This function does not perform CRL/OCSP certificate revocation checks. func VerifyTimestampResponse(tsrBytes []byte, artifact io.Reader, opts VerifyOpts) (*timestamp.Timestamp, error) { // Verify the status of the TSR does not contain an error // handled by the timestamp.ParseResponse function diff --git a/vendor/github.com/sourcegraph/conc/.golangci.yml b/vendor/github.com/sourcegraph/conc/.golangci.yml deleted file mode 100644 index ae65a760a9..0000000000 --- a/vendor/github.com/sourcegraph/conc/.golangci.yml +++ /dev/null @@ -1,11 +0,0 @@ -linters: - disable-all: true - enable: - - errcheck - - godot - - gosimple - - govet - - ineffassign - - staticcheck - - typecheck - - unused diff --git a/vendor/github.com/sourcegraph/conc/Makefile b/vendor/github.com/sourcegraph/conc/Makefile deleted file mode 100644 index 3e0720a123..0000000000 --- a/vendor/github.com/sourcegraph/conc/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -.DEFAULT_GOAL := help - -GO_BIN ?= $(shell go env GOPATH)/bin - -.PHONY: help -help: - @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' - -$(GO_BIN)/golangci-lint: - @echo "==> Installing golangci-lint within "${GO_BIN}"" - @go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@latest - -.PHONY: lint -lint: $(GO_BIN)/golangci-lint ## Run linting on Go files - @echo "==> Linting Go source files" - @golangci-lint run -v --fix -c .golangci.yml ./... - -.PHONY: test -test: ## Run tests - go test -race -v ./... -coverprofile ./coverage.txt - -.PHONY: bench -bench: ## Run benchmarks. See https://pkg.go.dev/cmd/go#hdr-Testing_flags - go test ./... -bench . -benchtime 5s -timeout 0 -run=XXX -cpu 1 -benchmem diff --git a/vendor/github.com/sourcegraph/conc/README.md b/vendor/github.com/sourcegraph/conc/README.md deleted file mode 100644 index 1c87c3c969..0000000000 --- a/vendor/github.com/sourcegraph/conc/README.md +++ /dev/null @@ -1,464 +0,0 @@ -![conch](https://user-images.githubusercontent.com/12631702/210295964-785cc63d-d697-420c-99ff-f492eb81dec9.svg) - -# `conc`: better structured concurrency for go - -[![Go Reference](https://pkg.go.dev/badge/github.com/sourcegraph/conc.svg)](https://pkg.go.dev/github.com/sourcegraph/conc) -[![Sourcegraph](https://img.shields.io/badge/view%20on-sourcegraph-A112FE?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAEZklEQVRoQ+2aXWgUZxSG3292sxtNN43BhBakFPyhxSujRSxiU1pr7SaGXqgUxOIEW0IFkeYighYUxAuLUlq0lrq2iCDpjWtmFVtoG6QVNOCFVShVLyxIk0DVjZLMxt3xTGTccd2ZOd/8JBHci0CY9zvnPPN+/7sCIXwKavOwAcy2QgngQiIztDSE0OwQlDPYR1ebiaH6J5kZChyfW12gRG4QVgGTBfMchMbFP9Sn5nlZL2D0JjLD6710lc+z0NfqSGTXQRQ4bX07Mq423yoBL3OSyHSvUxirMuaEvgbJWrdcvkHMoJwxYuq4INUhyuWvQa1jvdMGxAvCxJlyEC9XOBCWL04wwRzpbDoDQ7wfZJzIQLi5Eggk6DiRhZgWIAbE3NrM4A3LPT8Q7UgqAqLqTmLSHLGPkyzG/qXEczhd0q6RH+zaSBfaUoc4iQx19pIClIscrTkNZzG6gd7qMY6eC2Hqyo705ZfTf+eqJmhMzcSbYtQpOXc92ZsZjLVAL4YNUQbJ5Ttg4CQrQdGYj44Xr9m1XJCzmZusFDJOWNpHjmh5x624a2ZFtOKDVL+uNo2TuXE3bZQQZUf8gtgqP31uI94Z/rMqix+IGiRfWw3xN9dCgVx+L3WrHm4Dju6PXz/EkjuXJ6R+IGgyOE1TbZqTq9y1eo0EZo7oMo1ktPu3xjHvuiLT5AFNszUyDULtWpzE2/fEsey8O5TbWuGWwxrs5rS7nFNMWJrNh2No74s9Ec4vRNmRRzPXMP19fBMSVsGcOJ98G8N3Wl2gXcbTjbX7vUBxLaeASDQCm5Cu/0E2tvtb0Ea+BowtskFD0wvlc6Rf2M+Jx7dTu7ubFr2dnKDRaMQe2v/tcIrNB7FH0O50AcrBaApmRDVwFO31ql3pD8QW4dP0feNwl/Q+kFEtRyIGyaWXnpy1OO0qNJWHo1y6iCmAGkBb/Ru+HenDWIF2mo4r8G+tRRzoniSn2uqFLxANhe9LKHVyTbz6egk9+x5w5fK6ulSNNMhZ/Feno+GebLZV6isTTa6k5qNl5RnZ5u56Ib6SBvFzaWBBVFZzvnERWlt/Cg4l27XChLCqFyLekjhy6xJyoytgjPf7opIB8QPx7sYFiMXHPGt76m741MhCKMZfng0nBOIjmoJPsLqWHwgFpe6V6qtfcopxveR2Oy+J0ntIN/zCWkf8QNAJ7y6d8Bq4lxLc2/qJl5K7t432XwcqX5CrI34gzATWuYILQtdQPyePDK3iuOekCR3Efjhig1B1Uq5UoXEEoZX7d1q535J5S9VOeFyYyEBku5XTMXXKQTToX5Rg7OI44nbW5oKYeYK4EniMeF0YFNSmb+grhc84LyRCEP1/OurOcipCQbKxDeK2V5FcVyIDMQvsgz5gwFhcWWwKyRlvQ3gv29RwWoDYAbIofNyBxI9eDlQ+n3YgsgCWnr4MStGXQXmv9pF2La/k3OccV54JEBM4yp9EsXa/3LfO0dGPcYq0Y7DfZB8nJzZw2rppHgKgVHs8L5wvRwAAAABJRU5ErkJggg==)](https://sourcegraph.com/github.com/sourcegraph/conc) -[![Go Report Card](https://goreportcard.com/badge/github.com/sourcegraph/conc)](https://goreportcard.com/report/github.com/sourcegraph/conc) -[![codecov](https://codecov.io/gh/sourcegraph/conc/branch/main/graph/badge.svg?token=MQZTEA1QWT)](https://codecov.io/gh/sourcegraph/conc) -[![Discord](https://img.shields.io/badge/discord-chat-%235765F2)](https://discord.gg/bvXQXmtRjN) - -`conc` is your toolbelt for structured concurrency in go, making common tasks -easier and safer. - -```sh -go get github.com/sourcegraph/conc -``` - -# At a glance - -- Use [`conc.WaitGroup`](https://pkg.go.dev/github.com/sourcegraph/conc#WaitGroup) if you just want a safer version of `sync.WaitGroup` -- Use [`pool.Pool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool) if you want a concurrency-limited task runner -- Use [`pool.ResultPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultPool) if you want a concurrent task runner that collects task results -- Use [`pool.(Result)?ErrorPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool) if your tasks are fallible -- Use [`pool.(Result)?ContextPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ContextPool) if your tasks should be canceled on failure -- Use [`stream.Stream`](https://pkg.go.dev/github.com/sourcegraph/conc/stream#Stream) if you want to process an ordered stream of tasks in parallel with serial callbacks -- Use [`iter.Map`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#Map) if you want to concurrently map a slice -- Use [`iter.ForEach`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#ForEach) if you want to concurrently iterate over a slice -- Use [`panics.Catcher`](https://pkg.go.dev/github.com/sourcegraph/conc/panics#Catcher) if you want to catch panics in your own goroutines - -All pools are created with -[`pool.New()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#New) -or -[`pool.NewWithResults[T]()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#NewWithResults), -then configured with methods: - -- [`p.WithMaxGoroutines()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.MaxGoroutines) configures the maximum number of goroutines in the pool -- [`p.WithErrors()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithErrors) configures the pool to run tasks that return errors -- [`p.WithContext(ctx)`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithContext) configures the pool to run tasks that should be canceled on first error -- [`p.WithFirstError()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool.WithFirstError) configures error pools to only keep the first returned error rather than an aggregated error -- [`p.WithCollectErrored()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultContextPool.WithCollectErrored) configures result pools to collect results even when the task errored - -# Goals - -The main goals of the package are: -1) Make it harder to leak goroutines -2) Handle panics gracefully -3) Make concurrent code easier to read - -## Goal #1: Make it harder to leak goroutines - -A common pain point when working with goroutines is cleaning them up. It's -really easy to fire off a `go` statement and fail to properly wait for it to -complete. - -`conc` takes the opinionated stance that all concurrency should be scoped. -That is, goroutines should have an owner and that owner should always -ensure that its owned goroutines exit properly. - -In `conc`, the owner of a goroutine is always a `conc.WaitGroup`. Goroutines -are spawned in a `WaitGroup` with `(*WaitGroup).Go()`, and -`(*WaitGroup).Wait()` should always be called before the `WaitGroup` goes out -of scope. - -In some cases, you might want a spawned goroutine to outlast the scope of the -caller. In that case, you could pass a `WaitGroup` into the spawning function. - -```go -func main() { - var wg conc.WaitGroup - defer wg.Wait() - - startTheThing(&wg) -} - -func startTheThing(wg *conc.WaitGroup) { - wg.Go(func() { ... }) -} -``` - -For some more discussion on why scoped concurrency is nice, check out [this -blog -post](https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/). - -## Goal #2: Handle panics gracefully - -A frequent problem with goroutines in long-running applications is handling -panics. A goroutine spawned without a panic handler will crash the whole process -on panic. This is usually undesirable. - -However, if you do add a panic handler to a goroutine, what do you do with the -panic once you catch it? Some options: -1) Ignore it -2) Log it -3) Turn it into an error and return that to the goroutine spawner -4) Propagate the panic to the goroutine spawner - -Ignoring panics is a bad idea since panics usually mean there is actually -something wrong and someone should fix it. - -Just logging panics isn't great either because then there is no indication to the spawner -that something bad happened, and it might just continue on as normal even though your -program is in a really bad state. - -Both (3) and (4) are reasonable options, but both require the goroutine to have -an owner that can actually receive the message that something went wrong. This -is generally not true with a goroutine spawned with `go`, but in the `conc` -package, all goroutines have an owner that must collect the spawned goroutine. -In the conc package, any call to `Wait()` will panic if any of the spawned goroutines -panicked. Additionally, it decorates the panic value with a stacktrace from the child -goroutine so that you don't lose information about what caused the panic. - -Doing this all correctly every time you spawn something with `go` is not -trivial and it requires a lot of boilerplate that makes the important parts of -the code more difficult to read, so `conc` does this for you. - - - - - - - - - - -
stdlibconc
- -```go -type caughtPanicError struct { - val any - stack []byte -} - -func (e *caughtPanicError) Error() string { - return fmt.Sprintf( - "panic: %q\n%s", - e.val, - string(e.stack) - ) -} - -func main() { - done := make(chan error) - go func() { - defer func() { - if v := recover(); v != nil { - done <- &caughtPanicError{ - val: v, - stack: debug.Stack() - } - } else { - done <- nil - } - }() - doSomethingThatMightPanic() - }() - err := <-done - if err != nil { - panic(err) - } -} -``` - - -```go -func main() { - var wg conc.WaitGroup - wg.Go(doSomethingThatMightPanic) - // panics with a nice stacktrace - wg.Wait() -} -``` -
- -## Goal #3: Make concurrent code easier to read - -Doing concurrency correctly is difficult. Doing it in a way that doesn't -obfuscate what the code is actually doing is more difficult. The `conc` package -attempts to make common operations easier by abstracting as much boilerplate -complexity as possible. - -Want to run a set of concurrent tasks with a bounded set of goroutines? Use -`pool.New()`. Want to process an ordered stream of results concurrently, but -still maintain order? Try `stream.New()`. What about a concurrent map over -a slice? Take a peek at `iter.Map()`. - -Browse some examples below for some comparisons with doing these by hand. - -# Examples - -Each of these examples forgoes propagating panics for simplicity. To see -what kind of complexity that would add, check out the "Goal #2" header above. - -Spawn a set of goroutines and waiting for them to finish: - - - - - - - - - - -
stdlibconc
- -```go -func main() { - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - // crashes on panic! - doSomething() - }() - } - wg.Wait() -} -``` - - -```go -func main() { - var wg conc.WaitGroup - for i := 0; i < 10; i++ { - wg.Go(doSomething) - } - wg.Wait() -} -``` -
- -Process each element of a stream in a static pool of goroutines: - - - - - - - - - - -
stdlibconc
- -```go -func process(stream chan int) { - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for elem := range stream { - handle(elem) - } - }() - } - wg.Wait() -} -``` - - -```go -func process(stream chan int) { - p := pool.New().WithMaxGoroutines(10) - for elem := range stream { - elem := elem - p.Go(func() { - handle(elem) - }) - } - p.Wait() -} -``` -
- -Process each element of a slice in a static pool of goroutines: - - - - - - - - - - -
stdlibconc
- -```go -func process(values []int) { - feeder := make(chan int, 8) - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for elem := range feeder { - handle(elem) - } - }() - } - - for _, value := range values { - feeder <- value - } - close(feeder) - wg.Wait() -} -``` - - -```go -func process(values []int) { - iter.ForEach(values, handle) -} -``` -
- -Concurrently map a slice: - - - - - - - - - - -
stdlibconc
- -```go -func concMap( - input []int, - f func(int) int, -) []int { - res := make([]int, len(input)) - var idx atomic.Int64 - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - for { - i := int(idx.Add(1) - 1) - if i >= len(input) { - return - } - - res[i] = f(input[i]) - } - }() - } - wg.Wait() - return res -} -``` - - -```go -func concMap( - input []int, - f func(*int) int, -) []int { - return iter.Map(input, f) -} -``` -
- -Process an ordered stream concurrently: - - - - - - - - - - - -
stdlibconc
- -```go -func mapStream( - in chan int, - out chan int, - f func(int) int, -) { - tasks := make(chan func()) - taskResults := make(chan chan int) - - // Worker goroutines - var workerWg sync.WaitGroup - for i := 0; i < 10; i++ { - workerWg.Add(1) - go func() { - defer workerWg.Done() - for task := range tasks { - task() - } - }() - } - - // Ordered reader goroutines - var readerWg sync.WaitGroup - readerWg.Add(1) - go func() { - defer readerWg.Done() - for result := range taskResults { - item := <-result - out <- item - } - }() - - // Feed the workers with tasks - for elem := range in { - resultCh := make(chan int, 1) - taskResults <- resultCh - tasks <- func() { - resultCh <- f(elem) - } - } - - // We've exhausted input. - // Wait for everything to finish - close(tasks) - workerWg.Wait() - close(taskResults) - readerWg.Wait() -} -``` - - -```go -func mapStream( - in chan int, - out chan int, - f func(int) int, -) { - s := stream.New().WithMaxGoroutines(10) - for elem := range in { - elem := elem - s.Go(func() stream.Callback { - res := f(elem) - return func() { out <- res } - }) - } - s.Wait() -} -``` -
- -# Status - -This package is currently pre-1.0. There are likely to be minor breaking -changes before a 1.0 release as we stabilize the APIs and tweak defaults. -Please open an issue if you have questions, concerns, or requests that you'd -like addressed before the 1.0 release. Currently, a 1.0 is targeted for -March 2023. diff --git a/vendor/github.com/sourcegraph/conc/panics/panics.go b/vendor/github.com/sourcegraph/conc/panics/panics.go deleted file mode 100644 index abbed7fa05..0000000000 --- a/vendor/github.com/sourcegraph/conc/panics/panics.go +++ /dev/null @@ -1,102 +0,0 @@ -package panics - -import ( - "fmt" - "runtime" - "runtime/debug" - "sync/atomic" -) - -// Catcher is used to catch panics. You can execute a function with Try, -// which will catch any spawned panic. Try can be called any number of times, -// from any number of goroutines. Once all calls to Try have completed, you can -// get the value of the first panic (if any) with Recovered(), or you can just -// propagate the panic (re-panic) with Repanic(). -type Catcher struct { - recovered atomic.Pointer[Recovered] -} - -// Try executes f, catching any panic it might spawn. It is safe -// to call from multiple goroutines simultaneously. -func (p *Catcher) Try(f func()) { - defer p.tryRecover() - f() -} - -func (p *Catcher) tryRecover() { - if val := recover(); val != nil { - rp := NewRecovered(1, val) - p.recovered.CompareAndSwap(nil, &rp) - } -} - -// Repanic panics if any calls to Try caught a panic. It will panic with the -// value of the first panic caught, wrapped in a panics.Recovered with caller -// information. -func (p *Catcher) Repanic() { - if val := p.Recovered(); val != nil { - panic(val) - } -} - -// Recovered returns the value of the first panic caught by Try, or nil if -// no calls to Try panicked. -func (p *Catcher) Recovered() *Recovered { - return p.recovered.Load() -} - -// NewRecovered creates a panics.Recovered from a panic value and a collected -// stacktrace. The skip parameter allows the caller to skip stack frames when -// collecting the stacktrace. Calling with a skip of 0 means include the call to -// NewRecovered in the stacktrace. -func NewRecovered(skip int, value any) Recovered { - // 64 frames should be plenty - var callers [64]uintptr - n := runtime.Callers(skip+1, callers[:]) - return Recovered{ - Value: value, - Callers: callers[:n], - Stack: debug.Stack(), - } -} - -// Recovered is a panic that was caught with recover(). -type Recovered struct { - // The original value of the panic. - Value any - // The caller list as returned by runtime.Callers when the panic was - // recovered. Can be used to produce a more detailed stack information with - // runtime.CallersFrames. - Callers []uintptr - // The formatted stacktrace from the goroutine where the panic was recovered. - // Easier to use than Callers. - Stack []byte -} - -// String renders a human-readable formatting of the panic. -func (p *Recovered) String() string { - return fmt.Sprintf("panic: %v\nstacktrace:\n%s\n", p.Value, p.Stack) -} - -// AsError casts the panic into an error implementation. The implementation -// is unwrappable with the cause of the panic, if the panic was provided one. -func (p *Recovered) AsError() error { - if p == nil { - return nil - } - return &ErrRecovered{*p} -} - -// ErrRecovered wraps a panics.Recovered in an error implementation. -type ErrRecovered struct{ Recovered } - -var _ error = (*ErrRecovered)(nil) - -func (p *ErrRecovered) Error() string { return p.String() } - -func (p *ErrRecovered) Unwrap() error { - if err, ok := p.Value.(error); ok { - return err - } - return nil -} diff --git a/vendor/github.com/sourcegraph/conc/panics/try.go b/vendor/github.com/sourcegraph/conc/panics/try.go deleted file mode 100644 index 4ded92a1cb..0000000000 --- a/vendor/github.com/sourcegraph/conc/panics/try.go +++ /dev/null @@ -1,11 +0,0 @@ -package panics - -// Try executes f, catching and returning any panic it might spawn. -// -// The recovered panic can be propagated with panic(), or handled as a normal error with -// (*panics.Recovered).AsError(). -func Try(f func()) *Recovered { - var c Catcher - c.Try(f) - return c.Recovered() -} diff --git a/vendor/github.com/sourcegraph/conc/pool/context_pool.go b/vendor/github.com/sourcegraph/conc/pool/context_pool.go deleted file mode 100644 index 85c34e5aef..0000000000 --- a/vendor/github.com/sourcegraph/conc/pool/context_pool.go +++ /dev/null @@ -1,104 +0,0 @@ -package pool - -import ( - "context" -) - -// ContextPool is a pool that runs tasks that take a context. -// A new ContextPool should be created with `New().WithContext(ctx)`. -// -// The configuration methods (With*) will panic if they are used after calling -// Go() for the first time. -type ContextPool struct { - errorPool ErrorPool - - ctx context.Context - cancel context.CancelFunc - - cancelOnError bool -} - -// Go submits a task. If it returns an error, the error will be -// collected and returned by Wait(). If all goroutines in the pool -// are busy, a call to Go() will block until the task can be started. -func (p *ContextPool) Go(f func(ctx context.Context) error) { - p.errorPool.Go(func() error { - if p.cancelOnError { - // If we are cancelling on error, then we also want to cancel if a - // panic is raised. To do this, we need to recover, cancel, and then - // re-throw the caught panic. - defer func() { - if r := recover(); r != nil { - p.cancel() - panic(r) - } - }() - } - - err := f(p.ctx) - if err != nil && p.cancelOnError { - // Leaky abstraction warning: We add the error directly because - // otherwise, canceling could cause another goroutine to exit and - // return an error before this error was added, which breaks the - // expectations of WithFirstError(). - p.errorPool.addErr(err) - p.cancel() - return nil - } - return err - }) -} - -// Wait cleans up all spawned goroutines, propagates any panics, and -// returns an error if any of the tasks errored. -func (p *ContextPool) Wait() error { - // Make sure we call cancel after pool is done to avoid memory leakage. - defer p.cancel() - return p.errorPool.Wait() -} - -// WithFirstError configures the pool to only return the first error -// returned by a task. By default, Wait() will return a combined error. -// This is particularly useful for (*ContextPool).WithCancelOnError(), -// where all errors after the first are likely to be context.Canceled. -func (p *ContextPool) WithFirstError() *ContextPool { - p.panicIfInitialized() - p.errorPool.WithFirstError() - return p -} - -// WithCancelOnError configures the pool to cancel its context as soon as -// any task returns an error or panics. By default, the pool's context is not -// canceled until the parent context is canceled. -// -// In this case, all errors returned from the pool after the first will -// likely be context.Canceled - you may want to also use -// (*ContextPool).WithFirstError() to configure the pool to only return -// the first error. -func (p *ContextPool) WithCancelOnError() *ContextPool { - p.panicIfInitialized() - p.cancelOnError = true - return p -} - -// WithFailFast is an alias for the combination of WithFirstError and -// WithCancelOnError. By default, the errors from all tasks are returned and -// the pool's context is not canceled until the parent context is canceled. -func (p *ContextPool) WithFailFast() *ContextPool { - p.panicIfInitialized() - p.WithFirstError() - p.WithCancelOnError() - return p -} - -// WithMaxGoroutines limits the number of goroutines in a pool. -// Defaults to unlimited. Panics if n < 1. -func (p *ContextPool) WithMaxGoroutines(n int) *ContextPool { - p.panicIfInitialized() - p.errorPool.WithMaxGoroutines(n) - return p -} - -func (p *ContextPool) panicIfInitialized() { - p.errorPool.panicIfInitialized() -} diff --git a/vendor/github.com/sourcegraph/conc/pool/error_pool.go b/vendor/github.com/sourcegraph/conc/pool/error_pool.go deleted file mode 100644 index e1789e61b6..0000000000 --- a/vendor/github.com/sourcegraph/conc/pool/error_pool.go +++ /dev/null @@ -1,100 +0,0 @@ -package pool - -import ( - "context" - "errors" - "sync" -) - -// ErrorPool is a pool that runs tasks that may return an error. -// Errors are collected and returned by Wait(). -// -// The configuration methods (With*) will panic if they are used after calling -// Go() for the first time. -// -// A new ErrorPool should be created using `New().WithErrors()`. -type ErrorPool struct { - pool Pool - - onlyFirstError bool - - mu sync.Mutex - errs []error -} - -// Go submits a task to the pool. If all goroutines in the pool -// are busy, a call to Go() will block until the task can be started. -func (p *ErrorPool) Go(f func() error) { - p.pool.Go(func() { - p.addErr(f()) - }) -} - -// Wait cleans up any spawned goroutines, propagating any panics and -// returning any errors from tasks. -func (p *ErrorPool) Wait() error { - p.pool.Wait() - - errs := p.errs - p.errs = nil // reset errs - - if len(errs) == 0 { - return nil - } else if p.onlyFirstError { - return errs[0] - } else { - return errors.Join(errs...) - } -} - -// WithContext converts the pool to a ContextPool for tasks that should -// run under the same context, such that they each respect shared cancellation. -// For example, WithCancelOnError can be configured on the returned pool to -// signal that all goroutines should be cancelled upon the first error. -func (p *ErrorPool) WithContext(ctx context.Context) *ContextPool { - p.panicIfInitialized() - ctx, cancel := context.WithCancel(ctx) - return &ContextPool{ - errorPool: p.deref(), - ctx: ctx, - cancel: cancel, - } -} - -// WithFirstError configures the pool to only return the first error -// returned by a task. By default, Wait() will return a combined error. -func (p *ErrorPool) WithFirstError() *ErrorPool { - p.panicIfInitialized() - p.onlyFirstError = true - return p -} - -// WithMaxGoroutines limits the number of goroutines in a pool. -// Defaults to unlimited. Panics if n < 1. -func (p *ErrorPool) WithMaxGoroutines(n int) *ErrorPool { - p.panicIfInitialized() - p.pool.WithMaxGoroutines(n) - return p -} - -// deref is a helper that creates a shallow copy of the pool with the same -// settings. We don't want to just dereference the pointer because that makes -// the copylock lint angry. -func (p *ErrorPool) deref() ErrorPool { - return ErrorPool{ - pool: p.pool.deref(), - onlyFirstError: p.onlyFirstError, - } -} - -func (p *ErrorPool) panicIfInitialized() { - p.pool.panicIfInitialized() -} - -func (p *ErrorPool) addErr(err error) { - if err != nil { - p.mu.Lock() - p.errs = append(p.errs, err) - p.mu.Unlock() - } -} diff --git a/vendor/github.com/sourcegraph/conc/pool/pool.go b/vendor/github.com/sourcegraph/conc/pool/pool.go deleted file mode 100644 index 8f4494efb1..0000000000 --- a/vendor/github.com/sourcegraph/conc/pool/pool.go +++ /dev/null @@ -1,174 +0,0 @@ -package pool - -import ( - "context" - "sync" - - "github.com/sourcegraph/conc" -) - -// New creates a new Pool. -func New() *Pool { - return &Pool{} -} - -// Pool is a pool of goroutines used to execute tasks concurrently. -// -// Tasks are submitted with Go(). Once all your tasks have been submitted, you -// must call Wait() to clean up any spawned goroutines and propagate any -// panics. -// -// Goroutines are started lazily, so creating a new pool is cheap. There will -// never be more goroutines spawned than there are tasks submitted. -// -// The configuration methods (With*) will panic if they are used after calling -// Go() for the first time. -// -// Pool is efficient, but not zero cost. It should not be used for very short -// tasks. Startup and teardown come with an overhead of around 1µs, and each -// task has an overhead of around 300ns. -type Pool struct { - handle conc.WaitGroup - limiter limiter - tasks chan func() - initOnce sync.Once -} - -// Go submits a task to be run in the pool. If all goroutines in the pool -// are busy, a call to Go() will block until the task can be started. -func (p *Pool) Go(f func()) { - p.init() - - if p.limiter == nil { - // No limit on the number of goroutines. - select { - case p.tasks <- f: - // A goroutine was available to handle the task. - default: - // No goroutine was available to handle the task. - // Spawn a new one and send it the task. - p.handle.Go(func() { - p.worker(f) - }) - } - } else { - select { - case p.limiter <- struct{}{}: - // If we are below our limit, spawn a new worker rather - // than waiting for one to become available. - p.handle.Go(func() { - p.worker(f) - }) - case p.tasks <- f: - // A worker is available and has accepted the task. - return - } - } - -} - -// Wait cleans up spawned goroutines, propagating any panics that were -// raised by a tasks. -func (p *Pool) Wait() { - p.init() - - close(p.tasks) - - // After Wait() returns, reset the struct so tasks will be reinitialized on - // next use. This better matches the behavior of sync.WaitGroup - defer func() { p.initOnce = sync.Once{} }() - - p.handle.Wait() -} - -// MaxGoroutines returns the maximum size of the pool. -func (p *Pool) MaxGoroutines() int { - return p.limiter.limit() -} - -// WithMaxGoroutines limits the number of goroutines in a pool. -// Defaults to unlimited. Panics if n < 1. -func (p *Pool) WithMaxGoroutines(n int) *Pool { - p.panicIfInitialized() - if n < 1 { - panic("max goroutines in a pool must be greater than zero") - } - p.limiter = make(limiter, n) - return p -} - -// init ensures that the pool is initialized before use. This makes the -// zero value of the pool usable. -func (p *Pool) init() { - p.initOnce.Do(func() { - p.tasks = make(chan func()) - }) -} - -// panicIfInitialized will trigger a panic if a configuration method is called -// after the pool has started any goroutines for the first time. In the case that -// new settings are needed, a new pool should be created. -func (p *Pool) panicIfInitialized() { - if p.tasks != nil { - panic("pool can not be reconfigured after calling Go() for the first time") - } -} - -// WithErrors converts the pool to an ErrorPool so the submitted tasks can -// return errors. -func (p *Pool) WithErrors() *ErrorPool { - p.panicIfInitialized() - return &ErrorPool{ - pool: p.deref(), - } -} - -// deref is a helper that creates a shallow copy of the pool with the same -// settings. We don't want to just dereference the pointer because that makes -// the copylock lint angry. -func (p *Pool) deref() Pool { - p.panicIfInitialized() - return Pool{ - limiter: p.limiter, - } -} - -// WithContext converts the pool to a ContextPool for tasks that should -// run under the same context, such that they each respect shared cancellation. -// For example, WithCancelOnError can be configured on the returned pool to -// signal that all goroutines should be cancelled upon the first error. -func (p *Pool) WithContext(ctx context.Context) *ContextPool { - p.panicIfInitialized() - ctx, cancel := context.WithCancel(ctx) - return &ContextPool{ - errorPool: p.WithErrors().deref(), - ctx: ctx, - cancel: cancel, - } -} - -func (p *Pool) worker(initialFunc func()) { - // The only time this matters is if the task panics. - // This makes it possible to spin up new workers in that case. - defer p.limiter.release() - - if initialFunc != nil { - initialFunc() - } - - for f := range p.tasks { - f() - } -} - -type limiter chan struct{} - -func (l limiter) limit() int { - return cap(l) -} - -func (l limiter) release() { - if l != nil { - <-l - } -} diff --git a/vendor/github.com/sourcegraph/conc/pool/result_context_pool.go b/vendor/github.com/sourcegraph/conc/pool/result_context_pool.go deleted file mode 100644 index 6bc30dd63c..0000000000 --- a/vendor/github.com/sourcegraph/conc/pool/result_context_pool.go +++ /dev/null @@ -1,85 +0,0 @@ -package pool - -import ( - "context" -) - -// ResultContextPool is a pool that runs tasks that take a context and return a -// result. The context passed to the task will be canceled if any of the tasks -// return an error, which makes its functionality different than just capturing -// a context with the task closure. -// -// The configuration methods (With*) will panic if they are used after calling -// Go() for the first time. -type ResultContextPool[T any] struct { - contextPool ContextPool - agg resultAggregator[T] - collectErrored bool -} - -// Go submits a task to the pool. If all goroutines in the pool -// are busy, a call to Go() will block until the task can be started. -func (p *ResultContextPool[T]) Go(f func(context.Context) (T, error)) { - idx := p.agg.nextIndex() - p.contextPool.Go(func(ctx context.Context) error { - res, err := f(ctx) - p.agg.save(idx, res, err != nil) - return err - }) -} - -// Wait cleans up all spawned goroutines, propagates any panics, and -// returns an error if any of the tasks errored. -func (p *ResultContextPool[T]) Wait() ([]T, error) { - err := p.contextPool.Wait() - results := p.agg.collect(p.collectErrored) - p.agg = resultAggregator[T]{} - return results, err -} - -// WithCollectErrored configures the pool to still collect the result of a task -// even if the task returned an error. By default, the result of tasks that errored -// are ignored and only the error is collected. -func (p *ResultContextPool[T]) WithCollectErrored() *ResultContextPool[T] { - p.panicIfInitialized() - p.collectErrored = true - return p -} - -// WithFirstError configures the pool to only return the first error -// returned by a task. By default, Wait() will return a combined error. -func (p *ResultContextPool[T]) WithFirstError() *ResultContextPool[T] { - p.panicIfInitialized() - p.contextPool.WithFirstError() - return p -} - -// WithCancelOnError configures the pool to cancel its context as soon as -// any task returns an error. By default, the pool's context is not -// canceled until the parent context is canceled. -func (p *ResultContextPool[T]) WithCancelOnError() *ResultContextPool[T] { - p.panicIfInitialized() - p.contextPool.WithCancelOnError() - return p -} - -// WithFailFast is an alias for the combination of WithFirstError and -// WithCancelOnError. By default, the errors from all tasks are returned and -// the pool's context is not canceled until the parent context is canceled. -func (p *ResultContextPool[T]) WithFailFast() *ResultContextPool[T] { - p.panicIfInitialized() - p.contextPool.WithFailFast() - return p -} - -// WithMaxGoroutines limits the number of goroutines in a pool. -// Defaults to unlimited. Panics if n < 1. -func (p *ResultContextPool[T]) WithMaxGoroutines(n int) *ResultContextPool[T] { - p.panicIfInitialized() - p.contextPool.WithMaxGoroutines(n) - return p -} - -func (p *ResultContextPool[T]) panicIfInitialized() { - p.contextPool.panicIfInitialized() -} diff --git a/vendor/github.com/sourcegraph/conc/pool/result_error_pool.go b/vendor/github.com/sourcegraph/conc/pool/result_error_pool.go deleted file mode 100644 index 832cd9bb47..0000000000 --- a/vendor/github.com/sourcegraph/conc/pool/result_error_pool.go +++ /dev/null @@ -1,80 +0,0 @@ -package pool - -import ( - "context" -) - -// ResultErrorPool is a pool that executes tasks that return a generic result -// type and an error. Tasks are executed in the pool with Go(), then the -// results of the tasks are returned by Wait(). -// -// The order of the results is guaranteed to be the same as the order the -// tasks were submitted. -// -// The configuration methods (With*) will panic if they are used after calling -// Go() for the first time. -type ResultErrorPool[T any] struct { - errorPool ErrorPool - agg resultAggregator[T] - collectErrored bool -} - -// Go submits a task to the pool. If all goroutines in the pool -// are busy, a call to Go() will block until the task can be started. -func (p *ResultErrorPool[T]) Go(f func() (T, error)) { - idx := p.agg.nextIndex() - p.errorPool.Go(func() error { - res, err := f() - p.agg.save(idx, res, err != nil) - return err - }) -} - -// Wait cleans up any spawned goroutines, propagating any panics and -// returning the results and any errors from tasks. -func (p *ResultErrorPool[T]) Wait() ([]T, error) { - err := p.errorPool.Wait() - results := p.agg.collect(p.collectErrored) - p.agg = resultAggregator[T]{} // reset for reuse - return results, err -} - -// WithCollectErrored configures the pool to still collect the result of a task -// even if the task returned an error. By default, the result of tasks that errored -// are ignored and only the error is collected. -func (p *ResultErrorPool[T]) WithCollectErrored() *ResultErrorPool[T] { - p.panicIfInitialized() - p.collectErrored = true - return p -} - -// WithContext converts the pool to a ResultContextPool for tasks that should -// run under the same context, such that they each respect shared cancellation. -// For example, WithCancelOnError can be configured on the returned pool to -// signal that all goroutines should be cancelled upon the first error. -func (p *ResultErrorPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] { - p.panicIfInitialized() - return &ResultContextPool[T]{ - contextPool: *p.errorPool.WithContext(ctx), - } -} - -// WithFirstError configures the pool to only return the first error -// returned by a task. By default, Wait() will return a combined error. -func (p *ResultErrorPool[T]) WithFirstError() *ResultErrorPool[T] { - p.panicIfInitialized() - p.errorPool.WithFirstError() - return p -} - -// WithMaxGoroutines limits the number of goroutines in a pool. -// Defaults to unlimited. Panics if n < 1. -func (p *ResultErrorPool[T]) WithMaxGoroutines(n int) *ResultErrorPool[T] { - p.panicIfInitialized() - p.errorPool.WithMaxGoroutines(n) - return p -} - -func (p *ResultErrorPool[T]) panicIfInitialized() { - p.errorPool.panicIfInitialized() -} diff --git a/vendor/github.com/sourcegraph/conc/pool/result_pool.go b/vendor/github.com/sourcegraph/conc/pool/result_pool.go deleted file mode 100644 index f73a77261e..0000000000 --- a/vendor/github.com/sourcegraph/conc/pool/result_pool.go +++ /dev/null @@ -1,142 +0,0 @@ -package pool - -import ( - "context" - "sort" - "sync" -) - -// NewWithResults creates a new ResultPool for tasks with a result of type T. -// -// The configuration methods (With*) will panic if they are used after calling -// Go() for the first time. -func NewWithResults[T any]() *ResultPool[T] { - return &ResultPool[T]{ - pool: *New(), - } -} - -// ResultPool is a pool that executes tasks that return a generic result type. -// Tasks are executed in the pool with Go(), then the results of the tasks are -// returned by Wait(). -// -// The order of the results is guaranteed to be the same as the order the -// tasks were submitted. -type ResultPool[T any] struct { - pool Pool - agg resultAggregator[T] -} - -// Go submits a task to the pool. If all goroutines in the pool -// are busy, a call to Go() will block until the task can be started. -func (p *ResultPool[T]) Go(f func() T) { - idx := p.agg.nextIndex() - p.pool.Go(func() { - p.agg.save(idx, f(), false) - }) -} - -// Wait cleans up all spawned goroutines, propagating any panics, and returning -// a slice of results from tasks that did not panic. -func (p *ResultPool[T]) Wait() []T { - p.pool.Wait() - results := p.agg.collect(true) - p.agg = resultAggregator[T]{} // reset for reuse - return results -} - -// MaxGoroutines returns the maximum size of the pool. -func (p *ResultPool[T]) MaxGoroutines() int { - return p.pool.MaxGoroutines() -} - -// WithErrors converts the pool to an ResultErrorPool so the submitted tasks -// can return errors. -func (p *ResultPool[T]) WithErrors() *ResultErrorPool[T] { - p.panicIfInitialized() - return &ResultErrorPool[T]{ - errorPool: *p.pool.WithErrors(), - } -} - -// WithContext converts the pool to a ResultContextPool for tasks that should -// run under the same context, such that they each respect shared cancellation. -// For example, WithCancelOnError can be configured on the returned pool to -// signal that all goroutines should be cancelled upon the first error. -func (p *ResultPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] { - p.panicIfInitialized() - return &ResultContextPool[T]{ - contextPool: *p.pool.WithContext(ctx), - } -} - -// WithMaxGoroutines limits the number of goroutines in a pool. -// Defaults to unlimited. Panics if n < 1. -func (p *ResultPool[T]) WithMaxGoroutines(n int) *ResultPool[T] { - p.panicIfInitialized() - p.pool.WithMaxGoroutines(n) - return p -} - -func (p *ResultPool[T]) panicIfInitialized() { - p.pool.panicIfInitialized() -} - -// resultAggregator is a utility type that lets us safely append from multiple -// goroutines. The zero value is valid and ready to use. -type resultAggregator[T any] struct { - mu sync.Mutex - len int - results []T - errored []int -} - -// nextIndex reserves a slot for a result. The returned value should be passed -// to save() when adding a result to the aggregator. -func (r *resultAggregator[T]) nextIndex() int { - r.mu.Lock() - defer r.mu.Unlock() - - nextIdx := r.len - r.len += 1 - return nextIdx -} - -func (r *resultAggregator[T]) save(i int, res T, errored bool) { - r.mu.Lock() - defer r.mu.Unlock() - - if i >= len(r.results) { - old := r.results - r.results = make([]T, r.len) - copy(r.results, old) - } - - r.results[i] = res - - if errored { - r.errored = append(r.errored, i) - } -} - -// collect returns the set of aggregated results. -func (r *resultAggregator[T]) collect(collectErrored bool) []T { - if !r.mu.TryLock() { - panic("collect should not be called until all goroutines have exited") - } - - if collectErrored || len(r.errored) == 0 { - return r.results - } - - filtered := r.results[:0] - sort.Ints(r.errored) - for i, e := range r.errored { - if i == 0 { - filtered = append(filtered, r.results[:e]...) - } else { - filtered = append(filtered, r.results[r.errored[i-1]+1:e]...) - } - } - return filtered -} diff --git a/vendor/github.com/sourcegraph/conc/waitgroup.go b/vendor/github.com/sourcegraph/conc/waitgroup.go deleted file mode 100644 index 47b1bc1a5c..0000000000 --- a/vendor/github.com/sourcegraph/conc/waitgroup.go +++ /dev/null @@ -1,52 +0,0 @@ -package conc - -import ( - "sync" - - "github.com/sourcegraph/conc/panics" -) - -// NewWaitGroup creates a new WaitGroup. -func NewWaitGroup() *WaitGroup { - return &WaitGroup{} -} - -// WaitGroup is the primary building block for scoped concurrency. -// Goroutines can be spawned in the WaitGroup with the Go method, -// and calling Wait() will ensure that each of those goroutines exits -// before continuing. Any panics in a child goroutine will be caught -// and propagated to the caller of Wait(). -// -// The zero value of WaitGroup is usable, just like sync.WaitGroup. -// Also like sync.WaitGroup, it must not be copied after first use. -type WaitGroup struct { - wg sync.WaitGroup - pc panics.Catcher -} - -// Go spawns a new goroutine in the WaitGroup. -func (h *WaitGroup) Go(f func()) { - h.wg.Add(1) - go func() { - defer h.wg.Done() - h.pc.Try(f) - }() -} - -// Wait will block until all goroutines spawned with Go exit and will -// propagate any panics spawned in a child goroutine. -func (h *WaitGroup) Wait() { - h.wg.Wait() - - // Propagate a panic if we caught one from a child goroutine. - h.pc.Repanic() -} - -// WaitAndRecover will block until all goroutines spawned with Go exit and -// will return a *panics.Recovered if one of the child goroutines panics. -func (h *WaitGroup) WaitAndRecover() *panics.Recovered { - h.wg.Wait() - - // Return a recovered panic if we caught one from a child goroutine. - return h.pc.Recovered() -} diff --git a/vendor/github.com/stoewer/go-strcase/.gitignore b/vendor/github.com/stoewer/go-strcase/.gitignore deleted file mode 100644 index db5247b944..0000000000 --- a/vendor/github.com/stoewer/go-strcase/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -vendor -doc - -# Temporary files -*~ -*.swp - -# Editor and IDE config -.idea -*.iml -.vscode diff --git a/vendor/github.com/stoewer/go-strcase/.golangci.yml b/vendor/github.com/stoewer/go-strcase/.golangci.yml deleted file mode 100644 index 0e75d86ae0..0000000000 --- a/vendor/github.com/stoewer/go-strcase/.golangci.yml +++ /dev/null @@ -1,19 +0,0 @@ -version: "2" - -linters: - enable: - - dupl - - goconst - - gocyclo - - godox - - gosec - - lll - - misspell - - prealloc - - staticcheck - - unconvert - - unparam - -formatters: - enable: - - gofmt diff --git a/vendor/github.com/stoewer/go-strcase/LICENSE b/vendor/github.com/stoewer/go-strcase/LICENSE deleted file mode 100644 index a105a3819a..0000000000 --- a/vendor/github.com/stoewer/go-strcase/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2017, Adrian Stoewer - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/stoewer/go-strcase/README.md b/vendor/github.com/stoewer/go-strcase/README.md deleted file mode 100644 index 84a640e714..0000000000 --- a/vendor/github.com/stoewer/go-strcase/README.md +++ /dev/null @@ -1,50 +0,0 @@ -[![GH Actions](https://github.com/stoewer/go-strcase/actions/workflows/lint-test.yml/badge.svg?branch=master)](https://github.com/stoewer/go-strcase/actions) -[![codecov](https://codecov.io/github/stoewer/go-strcase/branch/master/graph/badge.svg?token=c0UokYnop5)](https://codecov.io/github/stoewer/go-strcase) -[![GoDoc](https://godoc.org/github.com/stoewer/go-strcase?status.svg)](https://pkg.go.dev/github.com/stoewer/go-strcase) ---- - -Go strcase -========== - -The package `strcase` converts between different kinds of naming formats such as camel case -(`CamelCase`), snake case (`snake_case`) or kebab case (`kebab-case`). -The package is designed to work only with strings consisting of standard ASCII letters. -Unicode is currently not supported. - -Versioning and stability ------------------------- - -Although the master branch is supposed to remain always backward compatible, the repository -contains version tags in order to support vendoring tools. -The tag names follow semantic versioning conventions and have the following format `v1.0.0`. -This package supports Go modules introduced with version 1.11. - -Example -------- - -```go -import "github.com/stoewer/go-strcase" - -var snake = strcase.SnakeCase("CamelCase") -``` - -Dependencies ------------- - -### Build dependencies - -* none - -### Test dependencies - -* `github.com/stretchr/testify` - -Run linters and unit tests --------------------------- - -To run the static code analysis, linters and tests use the following commands: - -``` -golangci-lint run --config .golangci.yml ./... -go test ./... -``` diff --git a/vendor/github.com/stoewer/go-strcase/camel.go b/vendor/github.com/stoewer/go-strcase/camel.go deleted file mode 100644 index 7a9bec7c10..0000000000 --- a/vendor/github.com/stoewer/go-strcase/camel.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -package strcase - -import ( - "strings" -) - -// UpperCamelCase converts a string into camel case starting with a upper case letter. -func UpperCamelCase(s string) string { - return camelCase(s, true) -} - -// LowerCamelCase converts a string into camel case starting with a lower case letter. -func LowerCamelCase(s string) string { - return camelCase(s, false) -} - -func camelCase(s string, upper bool) string { - s = strings.TrimSpace(s) - buffer := make([]rune, 0, len(s)) - - stringIter(s, func(prev, curr, next rune) { - if !isDelimiter(curr) { - if isDelimiter(prev) || (upper && prev == 0) { - buffer = append(buffer, toUpper(curr)) - } else if isLower(prev) { - buffer = append(buffer, curr) - } else if isUpper(prev) && isUpper(curr) && isLower(next) { - // Assume a case like "R" for "XRequestId" - buffer = append(buffer, curr) - } else if isUpper(curr) && isDigit(prev) { - // Preserve uppercase letters after numbers - buffer = append(buffer, curr) - } else { - buffer = append(buffer, toLower(curr)) - } - } - }) - - return string(buffer) -} diff --git a/vendor/github.com/stoewer/go-strcase/doc.go b/vendor/github.com/stoewer/go-strcase/doc.go deleted file mode 100644 index 3e441ca3ef..0000000000 --- a/vendor/github.com/stoewer/go-strcase/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -// Package strcase converts between different kinds of naming formats such as camel case -// (CamelCase), snake case (snake_case) or kebab case (kebab-case). The package is designed -// to work only with strings consisting of standard ASCII letters. Unicode is currently not -// supported. -package strcase diff --git a/vendor/github.com/stoewer/go-strcase/helper.go b/vendor/github.com/stoewer/go-strcase/helper.go deleted file mode 100644 index 96e79d6e13..0000000000 --- a/vendor/github.com/stoewer/go-strcase/helper.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -package strcase - -// isLower checks if a character is lower case. More precisely it evaluates if it is -// in the range of ASCII character 'a' to 'z'. -func isLower(ch rune) bool { - return ch >= 'a' && ch <= 'z' -} - -// toLower converts a character in the range of ASCII characters 'A' to 'Z' to its lower -// case counterpart. Other characters remain the same. -func toLower(ch rune) rune { - if ch >= 'A' && ch <= 'Z' { - return ch + 32 - } - return ch -} - -// isLower checks if a character is upper case. More precisely it evaluates if it is -// in the range of ASCII characters 'A' to 'Z'. -func isUpper(ch rune) bool { - return ch >= 'A' && ch <= 'Z' -} - -// toLower converts a character in the range of ASCII characters 'a' to 'z' to its lower -// case counterpart. Other characters remain the same. -func toUpper(ch rune) rune { - if ch >= 'a' && ch <= 'z' { - return ch - 32 - } - return ch -} - -// isSpace checks if a character is some kind of whitespace. -func isSpace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// isDigit checks if a character is a digit. More precisely it evaluates if it is -// in the range of ASCII characters '0' to '9'. -func isDigit(ch rune) bool { - return ch >= '0' && ch <= '9' -} - -// isDelimiter checks if a character is some kind of whitespace or '_' or '-'. -func isDelimiter(ch rune) bool { - return ch == '-' || ch == '_' || isSpace(ch) -} - -// iterFunc is a callback that is called fro a specific position in a string. Its arguments are the -// rune at the respective string position as well as the previous and the next rune. If curr is at the -// first position of the string prev is zero. If curr is at the end of the string next is zero. -type iterFunc func(prev, curr, next rune) - -// stringIter iterates over a string, invoking the callback for every single rune in the string. -func stringIter(s string, callback iterFunc) { - var prev rune - var curr rune - for _, next := range s { - if curr == 0 { - prev = curr - curr = next - continue - } - - callback(prev, curr, next) - - prev = curr - curr = next - } - - if len(s) > 0 { - callback(prev, curr, 0) - } -} diff --git a/vendor/github.com/stoewer/go-strcase/kebab.go b/vendor/github.com/stoewer/go-strcase/kebab.go deleted file mode 100644 index e9a6487579..0000000000 --- a/vendor/github.com/stoewer/go-strcase/kebab.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -package strcase - -// KebabCase converts a string into kebab case. -func KebabCase(s string) string { - return delimiterCase(s, '-', false) -} - -// UpperKebabCase converts a string into kebab case with capital letters. -func UpperKebabCase(s string) string { - return delimiterCase(s, '-', true) -} diff --git a/vendor/github.com/stoewer/go-strcase/snake.go b/vendor/github.com/stoewer/go-strcase/snake.go deleted file mode 100644 index 1b216e20cf..0000000000 --- a/vendor/github.com/stoewer/go-strcase/snake.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -package strcase - -import ( - "strings" -) - -// SnakeCase converts a string into snake case. -func SnakeCase(s string) string { - return delimiterCase(s, '_', false) -} - -// UpperSnakeCase converts a string into snake case with capital letters. -func UpperSnakeCase(s string) string { - return delimiterCase(s, '_', true) -} - -// delimiterCase converts a string into snake_case or kebab-case depending on the delimiter passed -// as second argument. When upperCase is true the result will be UPPER_SNAKE_CASE or UPPER-KEBAB-CASE. -func delimiterCase(s string, delimiter rune, upperCase bool) string { - s = strings.TrimSpace(s) - buffer := make([]rune, 0, len(s)+3) - - adjustCase := toLower - if upperCase { - adjustCase = toUpper - } - - var prev rune - var curr rune - for _, next := range s { - if isDelimiter(curr) { - if !isDelimiter(prev) { - buffer = append(buffer, delimiter) - } - } else if isUpper(curr) { - if isLower(prev) || (isUpper(prev) && isLower(next)) { - buffer = append(buffer, delimiter) - } - buffer = append(buffer, adjustCase(curr)) - } else if curr != 0 { - buffer = append(buffer, adjustCase(curr)) - } - prev = curr - curr = next - } - - if len(s) > 0 { - if isUpper(curr) && isLower(prev) && prev != 0 { - buffer = append(buffer, delimiter) - } - buffer = append(buffer, adjustCase(curr)) - } - - return string(buffer) -} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go index 2123e57dfc..6972de7a5b 100644 --- a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go @@ -84,7 +84,12 @@ func (cfg *UpdaterConfig) EnsurePathsExist() error { } for _, path := range []string{cfg.LocalMetadataDir, cfg.LocalTargetsDir} { - if err := os.MkdirAll(path, os.ModePerm); err != nil { + // Use 0700 for cache directories: only the owner can read, write, and + // access the directory. This prevents other users on shared systems from + // reading or writing to the TUF cache, which could be a security risk. + // If different permissions are needed, pre-create the directories with + // the desired permissions before calling this function. + if err := os.MkdirAll(path, 0700); err != nil { return err } } diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go index 57e38612be..c6b148807e 100644 --- a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go @@ -118,16 +118,17 @@ func KeyFromPublicKey(k crypto.PublicKey) (*Key, error) { return key, nil } -// ID returns the keyID value for the given Key -func (k *Key) ID() string { +// ID returns the keyID value for the given Key, or an error if the key +// cannot be canonically encoded. +func (k *Key) ID() (string, error) { // the identifier is a hexdigest of the SHA-256 hash of the canonical form of the key if k.id == "" { data, err := cjson.EncodeCanonical(k) if err != nil { - panic(fmt.Errorf("error creating key ID: %w", err)) + return "", fmt.Errorf("error creating key ID: %w", err) } digest := sha256.Sum256(data) k.id = hex.EncodeToString(digest[:]) } - return k.id + return k.id, nil } diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go index bd3f1e44bd..7ebd1bca53 100644 --- a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go @@ -21,6 +21,7 @@ import ( "encoding/hex" "encoding/json" "errors" + "fmt" ) // The following marshal/unmarshal methods override the default behavior for for each TUF type @@ -522,6 +523,15 @@ func (role *SuccinctRoles) UnmarshalJSON(data []byte) error { } *role = SuccinctRoles(a) + // Validate BitLength: must be between 1 and 32 inclusive. + // - BitLength determines the number of bins as 2^BitLength + // - We use the leftmost BitLength bits of a SHA-256 hash (32 bits max from 4 bytes) + // - BitLength < 1 would result in 0 or fractional bins + // - BitLength > 32 would cause a negative shift value in GetRolesForTarget + if role.BitLength < 1 || role.BitLength > 32 { + return fmt.Errorf("invalid bit_length: %d, must be between 1 and 32", role.BitLength) + } + var dict map[string]any if err := json.Unmarshal(data, &dict); err != nil { return err diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go index 0d0afd850e..6308d3cba0 100644 --- a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go @@ -226,15 +226,19 @@ func (meta *Metadata[T]) Sign(signer signature.Signer) (*Signature, error) { if err != nil { return nil, err } + keyID, err := key.ID() + if err != nil { + return nil, err + } // build signature sig := &Signature{ - KeyID: key.ID(), + KeyID: keyID, Signature: sb, } // update the Signatures part meta.Signatures = append(meta.Signatures, *sig) // return the new signature - log.Info("Signed metadata with key", "ID", key.ID()) + log.Info("Signed metadata with key", "ID", keyID) return sig, nil } @@ -292,6 +296,13 @@ func (meta *Metadata[T]) VerifyDelegate(delegatedRole string, delegatedMetadata if len(roleKeyIDs) == 0 { return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)} } + + if roleThreshold < 1 { + return &ErrValue{Msg: fmt.Sprintf("insufficient threshold (%d) configured for %s", + roleThreshold, + delegatedRole)} + } + // loop through each role keyID for _, keyID := range roleKeyIDs { key, ok := keys[keyID] @@ -619,7 +630,7 @@ func (role *SuccinctRoles) GetRoles() []string { res := []string{} suffixLen, numberOfBins := role.GetSuffixLen() - for binNumber := 0; binNumber < numberOfBins; binNumber++ { + for binNumber := range numberOfBins { suffix := fmt.Sprintf("%0*x", suffixLen, binNumber) res = append(res, fmt.Sprintf("%s-%s", role.NamePrefix, suffix)) } @@ -660,20 +671,24 @@ func (role *SuccinctRoles) IsDelegatedRole(roleName string) bool { } // AddKey adds new signing key for delegated role "role" -// keyID: Identifier of the key to be added for “role“. -// key: Signing key to be added for “role“. -// role: Name of the role, for which “key“ is added. +// keyID: Identifier of the key to be added for "role". +// key: Signing key to be added for "role". +// role: Name of the role, for which "key" is added. func (signed *RootType) AddKey(key *Key, role string) error { // verify role is present if _, ok := signed.Roles[role]; !ok { return &ErrValue{Msg: fmt.Sprintf("role %s doesn't exist", role)} } + keyID, err := key.ID() + if err != nil { + return err + } // add keyID to role - if !slices.Contains(signed.Roles[role].KeyIDs, key.ID()) { - signed.Roles[role].KeyIDs = append(signed.Roles[role].KeyIDs, key.ID()) + if !slices.Contains(signed.Roles[role].KeyIDs, keyID) { + signed.Roles[role].KeyIDs = append(signed.Roles[role].KeyIDs, keyID) } // update Keys - signed.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + signed.Keys[keyID] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? return nil } @@ -710,14 +725,18 @@ func (signed *RootType) RevokeKey(keyID, role string) error { } // AddKey adds new signing key for delegated role "role" -// key: Signing key to be added for “role“. -// role: Name of the role, for which “key“ is added. +// key: Signing key to be added for "role". +// role: Name of the role, for which "key" is added. // If SuccinctRoles is used then the "role" argument can be ignored. func (signed *TargetsType) AddKey(key *Key, role string) error { // check if Delegations are even present if signed.Delegations == nil { return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} } + keyID, err := key.ID() + if err != nil { + return err + } // standard delegated roles if signed.Delegations.Roles != nil { // loop through all delegated roles @@ -727,12 +746,12 @@ func (signed *TargetsType) AddKey(key *Key, role string) error { if d.Name == role { isDelegatedRole = true // add key if keyID is not already part of keyIDs for that role - if !slices.Contains(d.KeyIDs, key.ID()) { - signed.Delegations.Roles[i].KeyIDs = append(signed.Delegations.Roles[i].KeyIDs, key.ID()) - signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + if !slices.Contains(d.KeyIDs, keyID) { + signed.Delegations.Roles[i].KeyIDs = append(signed.Delegations.Roles[i].KeyIDs, keyID) + signed.Delegations.Keys[keyID] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? return nil } - log.Info("Delegated role already has keyID", "role", role, "ID", key.ID()) + log.Info("Delegated role already has keyID", "role", role, "ID", keyID) } } if !isDelegatedRole { @@ -740,15 +759,15 @@ func (signed *TargetsType) AddKey(key *Key, role string) error { } } else if signed.Delegations.SuccinctRoles != nil { // add key if keyID is not already part of keyIDs for the SuccinctRoles role - if !slices.Contains(signed.Delegations.SuccinctRoles.KeyIDs, key.ID()) { - signed.Delegations.SuccinctRoles.KeyIDs = append(signed.Delegations.SuccinctRoles.KeyIDs, key.ID()) - signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + if !slices.Contains(signed.Delegations.SuccinctRoles.KeyIDs, keyID) { + signed.Delegations.SuccinctRoles.KeyIDs = append(signed.Delegations.SuccinctRoles.KeyIDs, keyID) + signed.Delegations.Keys[keyID] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? return nil } - log.Info("SuccinctRoles role already has keyID", "ID", key.ID()) + log.Info("SuccinctRoles role already has keyID", "ID", keyID) } - signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + signed.Delegations.Keys[keyID] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? return nil } @@ -900,7 +919,15 @@ func checkType[T Roles](data []byte) error { if err := json.Unmarshal(data, &m); err != nil { return err } - signedType := m["signed"].(map[string]any)["_type"].(string) + signed, ok := m["signed"].(map[string]any) + if !ok { + return &ErrValue{Msg: "metadata 'signed' field is missing or not an object"} + } + signedType, ok := signed["_type"].(string) + if !ok { + return &ErrValue{Msg: "no _type found in signed"} + } + switch i.(type) { case *RootType: if ROOT != signedType { diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go index 0726f31f88..3ae32781cd 100644 --- a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go @@ -24,7 +24,11 @@ import ( "github.com/theupdateframework/go-tuf/v2/metadata" ) -// TrustedMetadata struct for storing trusted metadata +// TrustedMetadata struct for storing trusted metadata. +// +// Thread Safety: TrustedMetadata is NOT safe for concurrent use. If multiple +// goroutines need to access a TrustedMetadata instance concurrently, external +// synchronization is required (e.g., a sync.Mutex). type TrustedMetadata struct { Root *metadata.Metadata[metadata.RootType] Snapshot *metadata.Metadata[metadata.SnapshotType] diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go index bd533b63c3..d84c869df2 100644 --- a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go @@ -58,6 +58,11 @@ import ( // target file is already locally cached. // - DownloadTarget() downloads a target file and ensures it is // verified correct by the metadata. +// +// Thread Safety: Updater is NOT safe for concurrent use. If multiple goroutines +// need to use an Updater concurrently, external synchronization is required +// (e.g., a sync.Mutex). Alternatively, create separate Updater instances for +// each goroutine. type Updater struct { trusted *trustedmetadata.TrustedMetadata cfg *config.UpdaterConfig @@ -570,7 +575,7 @@ func (update *Updater) preOrderDepthFirstWalk(targetFilePath string) (*metadata. // onto delegationsToVisit. Roles are popped from the end of // the list slices.Reverse(childRolesToVisit) - delegationsToVisit = append(delegationsToVisit, childRolesToVisit...) + delegationsToVisit = slices.Concat(delegationsToVisit, childRolesToVisit) } } if len(delegationsToVisit) > 0 { @@ -605,7 +610,7 @@ func (update *Updater) persistMetadata(roleName string, data []byte) error { if errRemove != nil { log.Info("Failed to delete temporary file", "name", file.Name()) } - return err + return errors.Join(err, errRemove) } // write the data content to the temporary file _, err = file.Write(data) @@ -616,7 +621,7 @@ func (update *Updater) persistMetadata(roleName string, data []byte) error { if errRemove != nil { log.Info("Failed to delete temporary file", "name", file.Name()) } - return err + return errors.Join(err, errRemove) } // can't move/rename an open file on windows, so close it first diff --git a/vendor/github.com/valyala/fastjson/arena.go b/vendor/github.com/valyala/fastjson/arena.go index 9fe21a48c8..1a512d5f35 100644 --- a/vendor/github.com/valyala/fastjson/arena.go +++ b/vendor/github.com/valyala/fastjson/arena.go @@ -8,10 +8,10 @@ import ( // // Typical Arena lifecycle: // -// 1) Construct Values via the Arena and Value.Set* calls. -// 2) Marshal the constructed Values with Value.MarshalTo call. -// 3) Reset all the constructed Values at once by Arena.Reset call. -// 4) Go to 1 and re-use the Arena. +// 1. Construct Values via the Arena and Value.Set* calls. +// 2. Marshal the constructed Values with Value.MarshalTo call. +// 3. Reset all the constructed Values at once by Arena.Reset call. +// 4. Go to 1 and re-use the Arena. // // It is unsafe calling Arena methods from concurrent goroutines. // Use per-goroutine Arenas or ArenaPool instead. diff --git a/vendor/github.com/valyala/fastjson/doc.go b/vendor/github.com/valyala/fastjson/doc.go index 8076189cfe..3dbff364d1 100644 --- a/vendor/github.com/valyala/fastjson/doc.go +++ b/vendor/github.com/valyala/fastjson/doc.go @@ -4,6 +4,5 @@ Package fastjson provides fast JSON parsing. Arbitrary JSON may be parsed by fastjson without the need for creating structs or for generating go code. Just parse JSON and get the required fields with Get* functions. - */ package fastjson diff --git a/vendor/github.com/valyala/fastjson/fuzz.go b/vendor/github.com/valyala/fastjson/fuzz.go index 9130797c70..d9da1f1abc 100644 --- a/vendor/github.com/valyala/fastjson/fuzz.go +++ b/vendor/github.com/valyala/fastjson/fuzz.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz package fastjson diff --git a/vendor/github.com/valyala/fastjson/parser.go b/vendor/github.com/valyala/fastjson/parser.go index 885e1841ef..f9f5ce4989 100644 --- a/vendor/github.com/valyala/fastjson/parser.go +++ b/vendor/github.com/valyala/fastjson/parser.go @@ -32,7 +32,7 @@ func (p *Parser) Parse(s string) (*Value, error) { p.b = append(p.b[:0], s...) p.c.reset() - v, tail, err := parseValue(b2s(p.b), &p.c, 0) + v, tail, err := p.c.parseValue(b2s(p.b), 0) if err != nil { return nil, fmt.Errorf("cannot parse JSON: %s; unparsed tail: %q", err, startEndString(tail)) } @@ -57,7 +57,11 @@ type cache struct { } func (c *cache) reset() { - c.vs = c.vs[:0] + vs := c.vs + for i := range vs { + vs[i].reset() + } + c.vs = vs[:0] } func (c *cache) getValue() *Value { @@ -66,7 +70,6 @@ func (c *cache) getValue() *Value { } else { c.vs = append(c.vs, Value{}) } - // Do not reset the value, since the caller must properly init it. return &c.vs[len(c.vs)-1] } @@ -98,7 +101,7 @@ type kv struct { // MaxDepth is the maximum depth for nested JSON. const MaxDepth = 300 -func parseValue(s string, c *cache, depth int) (*Value, string, error) { +func (c *cache) parseValue(s string, depth int) (*Value, string, error) { if len(s) == 0 { return nil, s, fmt.Errorf("cannot parse empty string") } @@ -108,14 +111,14 @@ func parseValue(s string, c *cache, depth int) (*Value, string, error) { } if s[0] == '{' { - v, tail, err := parseObject(s[1:], c, depth) + v, tail, err := c.parseObject(s[1:], depth) if err != nil { return nil, tail, fmt.Errorf("cannot parse object: %s", err) } return v, tail, nil } if s[0] == '[' { - v, tail, err := parseArray(s[1:], c, depth) + v, tail, err := c.parseArray(s[1:], depth) if err != nil { return nil, tail, fmt.Errorf("cannot parse array: %s", err) } @@ -167,7 +170,7 @@ func parseValue(s string, c *cache, depth int) (*Value, string, error) { return v, tail, nil } -func parseArray(s string, c *cache, depth int) (*Value, string, error) { +func (c *cache) parseArray(s string, depth int) (*Value, string, error) { s = skipWS(s) if len(s) == 0 { return nil, s, fmt.Errorf("missing ']'") @@ -188,7 +191,7 @@ func parseArray(s string, c *cache, depth int) (*Value, string, error) { var err error s = skipWS(s) - v, s, err = parseValue(s, c, depth) + v, s, err = c.parseValue(s, depth) if err != nil { return nil, s, fmt.Errorf("cannot parse array value: %s", err) } @@ -210,7 +213,7 @@ func parseArray(s string, c *cache, depth int) (*Value, string, error) { } } -func parseObject(s string, c *cache, depth int) (*Value, string, error) { +func (c *cache) parseObject(s string, depth int) (*Value, string, error) { s = skipWS(s) if len(s) == 0 { return nil, s, fmt.Errorf("missing '}'") @@ -247,7 +250,7 @@ func parseObject(s string, c *cache, depth int) (*Value, string, error) { // Parse value s = skipWS(s) - kv.v, s, err = parseValue(s, c, depth) + kv.v, s, err = c.parseValue(s, depth) if err != nil { return nil, s, fmt.Errorf("cannot parse object value: %s", err) } @@ -283,7 +286,7 @@ func hasSpecialChars(s string) bool { if strings.IndexByte(s, '"') >= 0 || strings.IndexByte(s, '\\') >= 0 { return true } - for i := 0; i < len(s); i++ { + for i := range len(s) { if s[i] < 0x20 { return true } @@ -375,7 +378,7 @@ func unescapeStringBestEffort(s string) string { // parseRawKey is similar to parseRawString, but is optimized // for small-sized keys without escape sequences. func parseRawKey(s string) (string, string, error) { - for i := 0; i < len(s); i++ { + for i := range len(s) { if s[i] == '"' { // Fast path. return s[:i], s[i+1:], nil @@ -424,7 +427,7 @@ func parseRawNumber(s string) (string, string, error) { // The caller must ensure len(s) > 0 // Find the end of the number. - for i := 0; i < len(s); i++ { + for i := range len(s) { ch := s[i] if (ch >= '0' && ch <= '9') || ch == '.' || ch == '-' || ch == 'e' || ch == 'E' || ch == '+' { continue @@ -455,14 +458,19 @@ type Object struct { } func (o *Object) reset() { + // o.kvs entries can point to external byte slices. Clear these references, so GC could free memory. + clear(o.kvs) o.kvs = o.kvs[:0] + o.keysUnescaped = false } // MarshalTo appends marshaled o to dst and returns the result. func (o *Object) MarshalTo(dst []byte) []byte { dst = append(dst, '{') - for i, kv := range o.kvs { + kvs := o.kvs + for i := range kvs { + kv := &kvs[i] if o.keysUnescaped { dst = escapeString(dst, kv.k) } else { @@ -525,7 +533,9 @@ func (o *Object) Len() int { func (o *Object) Get(key string) *Value { if !o.keysUnescaped && strings.IndexByte(key, '\\') < 0 { // Fast path - try searching for the key without object keys unescaping. - for _, kv := range o.kvs { + kvs := o.kvs + for i := range kvs { + kv := &kvs[i] if kv.k == key { return kv.v } @@ -535,7 +545,9 @@ func (o *Object) Get(key string) *Value { // Slow path - unescape object keys. o.unescapeKeys() - for _, kv := range o.kvs { + kvs := o.kvs + for i := range kvs { + kv := &kvs[i] if kv.k == key { return kv.v } @@ -554,7 +566,9 @@ func (o *Object) Visit(f func(key []byte, v *Value)) { o.unescapeKeys() - for _, kv := range o.kvs { + kvs := o.kvs + for i := range kvs { + kv := &kvs[i] f(s2b(kv.k), kv.v) } } @@ -572,6 +586,16 @@ type Value struct { t Type } +func (v *Value) reset() { + v.o.reset() + + clear(v.a) + v.a = v.a[:0] + + v.s = "" + v.t = 0 +} + // MarshalTo appends marshaled v to dst and returns the result. func (v *Value) MarshalTo(dst []byte) []byte { switch v.t { diff --git a/vendor/github.com/valyala/fastjson/pool.go b/vendor/github.com/valyala/fastjson/pool.go index 00cfb42fa6..3f40fb4e21 100644 --- a/vendor/github.com/valyala/fastjson/pool.go +++ b/vendor/github.com/valyala/fastjson/pool.go @@ -48,5 +48,6 @@ func (ap *ArenaPool) Get() *Arena { // // a and objects created by a cannot be used after a is put into ap. func (ap *ArenaPool) Put(a *Arena) { + a.Reset() ap.pool.Put(a) } diff --git a/vendor/github.com/valyala/fastjson/scanner.go b/vendor/github.com/valyala/fastjson/scanner.go index 89b38816f0..5db3f14edb 100644 --- a/vendor/github.com/valyala/fastjson/scanner.go +++ b/vendor/github.com/valyala/fastjson/scanner.go @@ -65,7 +65,7 @@ func (sc *Scanner) Next() bool { } sc.c.reset() - v, tail, err := parseValue(sc.s, &sc.c, 0) + v, tail, err := sc.c.parseValue(sc.s, 0) if err != nil { sc.err = err return false diff --git a/vendor/github.com/valyala/fastjson/update.go b/vendor/github.com/valyala/fastjson/update.go index f8099bdbb9..0b64e384df 100644 --- a/vendor/github.com/valyala/fastjson/update.go +++ b/vendor/github.com/valyala/fastjson/update.go @@ -12,7 +12,9 @@ func (o *Object) Del(key string) { } if !o.keysUnescaped && strings.IndexByte(key, '\\') < 0 { // Fast path - try searching for the key without object keys unescaping. - for i, kv := range o.kvs { + kvs := o.kvs + for i := range kvs { + kv := &kvs[i] if kv.k == key { o.kvs = append(o.kvs[:i], o.kvs[i+1:]...) return @@ -23,7 +25,9 @@ func (o *Object) Del(key string) { // Slow path - unescape object keys before item search. o.unescapeKeys() - for i, kv := range o.kvs { + kvs := o.kvs + for i := range kvs { + kv := &kvs[i] if kv.k == key { o.kvs = append(o.kvs[:i], o.kvs[i+1:]...) return @@ -62,8 +66,9 @@ func (o *Object) Set(key string, value *Value) { o.unescapeKeys() // Try substituting already existing entry with the given key. - for i := range o.kvs { - kv := &o.kvs[i] + kvs := o.kvs + for i := range kvs { + kv := &kvs[i] if kv.k == key { kv.v = value return @@ -106,5 +111,8 @@ func (v *Value) SetArrayItem(idx int, value *Value) { for idx >= len(v.a) { v.a = append(v.a, valueNull) } + if value == nil { + value = valueNull + } v.a[idx] = value } diff --git a/vendor/github.com/valyala/fastjson/validate.go b/vendor/github.com/valyala/fastjson/validate.go index 196f1c3dc6..8f1173cf61 100644 --- a/vendor/github.com/valyala/fastjson/validate.go +++ b/vendor/github.com/valyala/fastjson/validate.go @@ -51,7 +51,7 @@ func validateValue(s string) (string, error) { return tail, fmt.Errorf("cannot parse string: %s", err) } // Scan the string for control chars. - for i := 0; i < len(sv); i++ { + for i := range len(sv) { if sv[i] < 0x20 { return tail, fmt.Errorf("string cannot contain control char 0x%02X", sv[i]) } @@ -142,7 +142,7 @@ func validateObject(s string) (string, error) { return s, fmt.Errorf("cannot parse object key: %s", err) } // Scan the key for control chars. - for i := 0; i < len(key); i++ { + for i := range len(key) { if key[i] < 0x20 { return s, fmt.Errorf("object key cannot contain control char 0x%02X", key[i]) } @@ -177,7 +177,7 @@ func validateObject(s string) (string, error) { // validateKey is similar to validateString, but is optimized // for typical object keys, which are quite small and have no escape sequences. func validateKey(s string) (string, string, error) { - for i := 0; i < len(s); i++ { + for i := range len(s) { if s[i] == '"' { // Fast path - the key doesn't contain escape sequences. return s[:i], s[i+1:], nil diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/argmap.go b/vendor/github.com/vektah/gqlparser/v2/ast/argmap.go index 43f6a3d6fc..1b65c27693 100644 --- a/vendor/github.com/vektah/gqlparser/v2/ast/argmap.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/argmap.go @@ -1,11 +1,15 @@ package ast -func arg2map(defs ArgumentDefinitionList, args ArgumentList, vars map[string]interface{}) map[string]interface{} { - result := map[string]interface{}{} +func arg2map( + defs ArgumentDefinitionList, + args ArgumentList, + vars map[string]any, +) map[string]any { + result := map[string]any{} var err error for _, argDef := range defs { - var val interface{} + var val any var hasValue bool if argValue := args.ForName(argDef.Name); argValue != nil { diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/definition.go b/vendor/github.com/vektah/gqlparser/v2/ast/definition.go index 9ceebf1bee..426db7588c 100644 --- a/vendor/github.com/vektah/gqlparser/v2/ast/definition.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/definition.go @@ -1,5 +1,7 @@ package ast +import "slices" + type DefinitionKind string const ( @@ -54,12 +56,7 @@ func (d *Definition) IsInputType() bool { } func (d *Definition) OneOf(types ...string) bool { - for _, t := range types { - if d.Name == t { - return true - } - } - return false + return slices.Contains(types, d.Name) } type FieldDefinition struct { diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/directive.go b/vendor/github.com/vektah/gqlparser/v2/ast/directive.go index b11867c2e4..de8d984028 100644 --- a/vendor/github.com/vektah/gqlparser/v2/ast/directive.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/directive.go @@ -3,7 +3,7 @@ package ast type DirectiveLocation string const ( - // Executable + // Executable. LocationQuery DirectiveLocation = `QUERY` LocationMutation DirectiveLocation = `MUTATION` LocationSubscription DirectiveLocation = `SUBSCRIPTION` @@ -12,7 +12,7 @@ const ( LocationFragmentSpread DirectiveLocation = `FRAGMENT_SPREAD` LocationInlineFragment DirectiveLocation = `INLINE_FRAGMENT` - // Type System + // Type System. LocationSchema DirectiveLocation = `SCHEMA` LocationScalar DirectiveLocation = `SCALAR` LocationObject DirectiveLocation = `OBJECT` @@ -38,6 +38,9 @@ type Directive struct { Location DirectiveLocation } -func (d *Directive) ArgumentMap(vars map[string]interface{}) map[string]interface{} { +func (d *Directive) ArgumentMap(vars map[string]any) map[string]any { + if d.Definition == nil { + return nil + } return arg2map(d.Definition.Arguments, d.Arguments, vars) } diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/document.go b/vendor/github.com/vektah/gqlparser/v2/ast/document.go index e2520ffb7c..66d55b43b4 100644 --- a/vendor/github.com/vektah/gqlparser/v2/ast/document.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/document.go @@ -42,7 +42,7 @@ type Schema struct { Comment *CommentGroup } -// AddTypes is the helper to add types definition to the schema +// AddTypes is the helper to add types definition to the schema. func (s *Schema) AddTypes(defs ...*Definition) { if s.Types == nil { s.Types = make(map[string]*Definition) @@ -56,7 +56,7 @@ func (s *Schema) AddPossibleType(name string, def *Definition) { s.PossibleTypes[name] = append(s.PossibleTypes[name], def) } -// GetPossibleTypes will enumerate all the definitions for a given interface or union +// GetPossibleTypes will enumerate all the definitions for a given interface or union. func (s *Schema) GetPossibleTypes(def *Definition) []*Definition { return s.PossibleTypes[def.Name] } @@ -65,7 +65,8 @@ func (s *Schema) AddImplements(name string, iface *Definition) { s.Implements[name] = append(s.Implements[name], iface) } -// GetImplements returns all the interface and union definitions that the given definition satisfies +// GetImplements returns all the interface and union definitions that the given definition +// satisfies. func (s *Schema) GetImplements(def *Definition) []*Definition { return s.Implements[def.Name] } diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/dumper.go b/vendor/github.com/vektah/gqlparser/v2/ast/dumper.go index e9ea88a12a..26cf693d9c 100644 --- a/vendor/github.com/vektah/gqlparser/v2/ast/dumper.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/dumper.go @@ -8,8 +8,8 @@ import ( "strings" ) -// Dump turns ast into a stable string format for assertions in tests -func Dump(i interface{}) string { +// Dump turns ast into a stable string format for assertions in tests. +func Dump(i any) string { v := reflect.ValueOf(i) d := dumper{Buffer: &bytes.Buffer{}} @@ -126,7 +126,6 @@ func isZero(v reflect.Value) bool { return v.IsNil() case reflect.Func, reflect.Map: return v.IsNil() - case reflect.Array, reflect.Slice: if v.IsNil() { return true @@ -144,10 +143,13 @@ func isZero(v reflect.Value) bool { return z case reflect.String: return v.String() == "" + case reflect.Bool: + // Never consider Bool field as zero value. + // Always include them in AST dump. + return false + default: + return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) } - - // Compare other types directly: - return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type())) } func (d *dumper) dumpPtr(v reflect.Value) { diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/path.go b/vendor/github.com/vektah/gqlparser/v2/ast/path.go index f40aa953dd..63f53a67a2 100644 --- a/vendor/github.com/vektah/gqlparser/v2/ast/path.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/path.go @@ -27,7 +27,7 @@ func (path Path) String() string { for i, v := range path { switch v := v.(type) { case PathIndex: - str.WriteString(fmt.Sprintf("[%d]", v)) + fmt.Fprintf(&str, "[%d]", v) case PathName: if i != 0 { str.WriteByte('.') @@ -41,7 +41,7 @@ func (path Path) String() string { } func (path *Path) UnmarshalJSON(b []byte) error { - var vs []interface{} + var vs []any err := json.Unmarshal(b, &vs) if err != nil { return err diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/selection.go b/vendor/github.com/vektah/gqlparser/v2/ast/selection.go index 1858dc2136..b6e6890d22 100644 --- a/vendor/github.com/vektah/gqlparser/v2/ast/selection.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/selection.go @@ -36,6 +36,9 @@ type Argument struct { Comment *CommentGroup } -func (f *Field) ArgumentMap(vars map[string]interface{}) map[string]interface{} { +func (f *Field) ArgumentMap(vars map[string]any) map[string]any { + if f.Definition == nil { + return nil + } return arg2map(f.Definition.Arguments, f.Arguments, vars) } diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/source.go b/vendor/github.com/vektah/gqlparser/v2/ast/source.go index 2949f83f7b..213a63a3c5 100644 --- a/vendor/github.com/vektah/gqlparser/v2/ast/source.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/source.go @@ -1,6 +1,6 @@ package ast -// Source covers a single *.graphql file +// Source covers a single *.graphql file. type Source struct { // Name is the filename of the source Name string diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/value.go b/vendor/github.com/vektah/gqlparser/v2/ast/value.go index 45fa8016b5..83b471633d 100644 --- a/vendor/github.com/vektah/gqlparser/v2/ast/value.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/value.go @@ -29,9 +29,10 @@ type Value struct { Comment *CommentGroup // Require validation - Definition *Definition - VariableDefinition *VariableDefinition - ExpectedType *Type + Definition *Definition + VariableDefinition *VariableDefinition + ExpectedType *Type + ExpectedTypeHasDefault bool } type ChildValue struct { @@ -41,7 +42,7 @@ type ChildValue struct { Comment *CommentGroup } -func (v *Value) Value(vars map[string]interface{}) (interface{}, error) { +func (v *Value) Value(vars map[string]any) (any, error) { if v == nil { return nil, nil } @@ -65,7 +66,7 @@ func (v *Value) Value(vars map[string]interface{}) (interface{}, error) { case NullValue: return nil, nil case ListValue: - var val []interface{} + var val []any for _, elem := range v.Children { elemVal, err := elem.Value.Value(vars) if err != nil { @@ -75,7 +76,7 @@ func (v *Value) Value(vars map[string]interface{}) (interface{}, error) { } return val, nil case ObjectValue: - val := map[string]interface{}{} + val := map[string]any{} for _, elem := range v.Children { elemVal, err := elem.Value.Value(vars) if err != nil { diff --git a/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go b/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go index d9f2028871..b2ba01bbe5 100644 --- a/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go +++ b/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go @@ -11,12 +11,12 @@ import ( // Error is the standard graphql error type described in https://spec.graphql.org/draft/#sec-Errors type Error struct { - Err error `json:"-"` - Message string `json:"message"` - Path ast.Path `json:"path,omitempty"` - Locations []Location `json:"locations,omitempty"` - Extensions map[string]interface{} `json:"extensions,omitempty"` - Rule string `json:"-"` + Err error `json:"-"` + Message string `json:"message"` + Path ast.Path `json:"path,omitempty"` + Locations []Location `json:"locations,omitempty"` + Extensions map[string]any `json:"extensions,omitempty"` + Rule string `json:"-"` } func (err *Error) SetFile(file string) { @@ -24,7 +24,7 @@ func (err *Error) SetFile(file string) { return } if err.Extensions == nil { - err.Extensions = map[string]interface{}{} + err.Extensions = map[string]any{} } err.Extensions["file"] = file @@ -99,7 +99,7 @@ func (errs List) Is(target error) bool { return false } -func (errs List) As(target interface{}) bool { +func (errs List) As(target any) bool { for _, err := range errs { if errors.As(err, target) { return true @@ -141,7 +141,8 @@ func WrapIfUnwrapped(err error) *Error { if err == nil { return nil } - if gqlErr, ok := err.(*Error); ok { + gqlErr := &Error{} + if errors.As(err, &gqlErr) { return gqlErr } return &Error{ @@ -150,20 +151,20 @@ func WrapIfUnwrapped(err error) *Error { } } -func Errorf(message string, args ...interface{}) *Error { +func Errorf(message string, args ...any) *Error { return &Error{ Message: fmt.Sprintf(message, args...), } } -func ErrorPathf(path ast.Path, message string, args ...interface{}) *Error { +func ErrorPathf(path ast.Path, message string, args ...any) *Error { return &Error{ Message: fmt.Sprintf(message, args...), Path: path, } } -func ErrorPosf(pos *ast.Position, message string, args ...interface{}) *Error { +func ErrorPosf(pos *ast.Position, message string, args ...any) *Error { if pos == nil { return ErrorLocf( "", @@ -182,10 +183,10 @@ func ErrorPosf(pos *ast.Position, message string, args ...interface{}) *Error { ) } -func ErrorLocf(file string, line int, col int, message string, args ...interface{}) *Error { - var extensions map[string]interface{} +func ErrorLocf(file string, line, col int, message string, args ...any) *Error { + var extensions map[string]any if file != "" { - extensions = map[string]interface{}{"file": file} + extensions = map[string]any{"file": file} } return &Error{ Message: fmt.Sprintf(message, args...), diff --git a/vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go b/vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go index 1cbb4a0308..7a82a42b33 100644 --- a/vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go +++ b/vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go @@ -2,13 +2,14 @@ package lexer import ( "bytes" + "slices" "unicode/utf8" "github.com/vektah/gqlparser/v2/ast" "github.com/vektah/gqlparser/v2/gqlerror" ) -// Lexer turns graphql request and schema strings into tokens +// Lexer turns graphql request and schema strings into tokens. type Lexer struct { *ast.Source // An offset into the string in bytes @@ -32,7 +33,7 @@ func New(src *ast.Source) Lexer { } } -// take one rune from input and advance end +// take one rune from input and advance end. func (s *Lexer) peek() (rune, int) { return utf8.DecodeRuneInString(s.Input[s.end:]) } @@ -55,7 +56,7 @@ func (s *Lexer) makeValueToken(kind Type, value string) (Token, error) { }, nil } -func (s *Lexer) makeError(format string, args ...interface{}) (Token, *gqlerror.Error) { +func (s *Lexer) makeError(format string, args ...any) (Token, *gqlerror.Error) { column := s.endRunes - s.lineStartRunes + 1 return Token{ Kind: Invalid, @@ -122,7 +123,59 @@ func (s *Lexer) ReadToken() (Token, error) { case '#': return s.readComment() - case '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z': + case '_', + 'a', + 'b', + 'c', + 'd', + 'e', + 'f', + 'g', + 'h', + 'i', + 'j', + 'k', + 'l', + 'm', + 'n', + 'o', + 'p', + 'q', + 'r', + 's', + 't', + 'u', + 'v', + 'w', + 'x', + 'y', + 'z', + 'A', + 'B', + 'C', + 'D', + 'E', + 'F', + 'G', + 'H', + 'I', + 'J', + 'K', + 'L', + 'M', + 'N', + 'O', + 'P', + 'Q', + 'R', + 'S', + 'T', + 'U', + 'V', + 'W', + 'X', + 'Y', + 'Z': return s.readName() case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': @@ -144,14 +197,16 @@ func (s *Lexer) ReadToken() (Token, error) { } if r == '\'' { - return s.makeError(`Unexpected single quote character ('), did you mean to use a double quote (")?`) + return s.makeError( + `Unexpected single quote character ('), did you mean to use a double quote (")?`, + ) } return s.makeError(`Cannot parse the unexpected character "%s".`, string(r)) } // ws reads from body starting at startPosition until it finds a non-whitespace -// or commented character, and updates the token end to include all whitespace +// or commented character, and updates the token end to include all whitespace. func (s *Lexer) ws() { for s.end < len(s.Input) { switch s.Input[s.end] { @@ -189,7 +244,7 @@ func (s *Lexer) ws() { // readComment from the input // -// #[\u0009\u0020-\uFFFF]* +// #[\u0009\u0020-\uFFFF]*. func (s *Lexer) readComment() (Token, error) { for s.end < len(s.Input) { r, w := s.peek() @@ -256,23 +311,21 @@ func (s *Lexer) readNumber() (Token, error) { return s.makeToken(Int) } -// acceptByte if it matches any of given bytes, returning true if it found anything +// acceptByte if it matches any of given bytes, returning true if it found anything. func (s *Lexer) acceptByte(bytes ...uint8) bool { if s.end >= len(s.Input) { return false } - for _, accepted := range bytes { - if s.Input[s.end] == accepted { - s.end++ - s.endRunes++ - return true - } + if slices.Contains(bytes, s.Input[s.end]) { + s.end++ + s.endRunes++ + return true } return false } -// acceptDigits from the input, returning the number of digits it found +// acceptDigits from the input, returning the number of digits it found. func (s *Lexer) acceptDigits() int { consumed := 0 for s.end < len(s.Input) && s.Input[s.end] >= '0' && s.Input[s.end] <= '9' { @@ -285,7 +338,7 @@ func (s *Lexer) acceptDigits() int { } // describeNext peeks at the input and returns a human readable string. This should will alloc -// and should only be used in errors +// and should only be used in errors. func (s *Lexer) describeNext() string { if s.end < len(s.Input) { return `"` + string(s.Input[s.end]) + `"` @@ -295,7 +348,7 @@ func (s *Lexer) describeNext() string { // readString from the input // -// "([^"\\\u000A\u000D]|(\\(u[0-9a-fA-F]{4}|["\\/bfnrt])))*" +// "([^"\\\u000A\u000D]|(\\(u[0-9a-fA-F]{4}|["\\/bfnrt])))*". func (s *Lexer) readString() (Token, error) { inputLen := len(s.Input) @@ -332,7 +385,8 @@ func (s *Lexer) readString() (Token, error) { case '"': t, err := s.makeToken(String) - // the token should not include the quotes in its value, but should cover them in its position + // the token should not include the quotes in its value, but should cover them in its + // position t.Pos.Start-- t.Pos.End++ @@ -370,7 +424,10 @@ func (s *Lexer) readString() (Token, error) { if !ok { s.end++ s.endRunes++ - return s.makeError("Invalid character escape sequence: \\%s.", s.Input[s.end:s.end+5]) + return s.makeError( + "Invalid character escape sequence: \\%s.", + s.Input[s.end:s.end+5], + ) } buf.WriteRune(r) s.end += 6 @@ -405,7 +462,7 @@ func (s *Lexer) readString() (Token, error) { // readBlockString from the input // -// """("?"?(\\"""|\\(?!=""")|[^"\\]))*""" +// """("?"?(\\"""|\\(?!=""")|[^"\\]))*""". func (s *Lexer) readBlockString() (Token, error) { inputLen := len(s.Input) @@ -433,7 +490,7 @@ func (s *Lexer) readBlockString() (Token, error) { // If we have at least 3 quotes, use the last 3 as the closing quote if quoteCount >= 3 { // Add any extra quotes to the buffer (except the last 3) - for j := 0; j < quoteCount-3; j++ { + for range quoteCount - 3 { buf.WriteByte('"') } @@ -508,7 +565,7 @@ func unhex(b string) (v rune, ok bool) { // readName from the input // -// [_A-Za-z][_0-9A-Za-z]* +// [_A-Za-z][_0-9A-Za-z]*. func (s *Lexer) readName() (Token, error) { for s.end < len(s.Input) { r, w := s.peek() diff --git a/vendor/github.com/vektah/gqlparser/v2/parser/parser.go b/vendor/github.com/vektah/gqlparser/v2/parser/parser.go index 2aba983796..b6a306406c 100644 --- a/vendor/github.com/vektah/gqlparser/v2/parser/parser.go +++ b/vendor/github.com/vektah/gqlparser/v2/parser/parser.go @@ -91,7 +91,7 @@ func (p *parser) peek() lexer.Token { return p.peekToken } -func (p *parser) error(tok lexer.Token, format string, args ...interface{}) { +func (p *parser) error(tok lexer.Token, format string, args ...any) { if p.err != nil { return } @@ -165,7 +165,7 @@ func (p *parser) unexpectedToken(tok lexer.Token) { p.error(tok, "Unexpected %s", tok.String()) } -func (p *parser) many(start lexer.Type, end lexer.Type, cb func()) { +func (p *parser) many(start, end lexer.Type, cb func()) { hasDef := p.skip(start) if !hasDef { return @@ -177,7 +177,7 @@ func (p *parser) many(start lexer.Type, end lexer.Type, cb func()) { p.next() } -func (p *parser) some(start lexer.Type, end lexer.Type, cb func()) *ast.CommentGroup { +func (p *parser) some(start, end lexer.Type, cb func()) *ast.CommentGroup { hasDef := p.skip(start) if !hasDef { return nil diff --git a/vendor/github.com/vektah/gqlparser/v2/parser/query.go b/vendor/github.com/vektah/gqlparser/v2/parser/query.go index 47ac214a91..271a2ffe81 100644 --- a/vendor/github.com/vektah/gqlparser/v2/parser/query.go +++ b/vendor/github.com/vektah/gqlparser/v2/parser/query.go @@ -1,9 +1,8 @@ package parser import ( - "github.com/vektah/gqlparser/v2/lexer" - . "github.com/vektah/gqlparser/v2/ast" //nolint:staticcheck // bad, yeah + "github.com/vektah/gqlparser/v2/lexer" ) func ParseQuery(source *Source) (*QueryDocument, error) { @@ -259,7 +258,12 @@ func (p *parser) parseValueLiteral(isConst bool) *Value { p.unexpectedError() return nil } - return &Value{Position: &token.Pos, Comment: p.comment, Raw: p.parseVariable(), Kind: Variable} + return &Value{ + Position: &token.Pos, + Comment: p.comment, + Raw: p.parseVariable(), + Kind: Variable, + } case lexer.Int: kind = IntValue case lexer.Float: diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/core/helpers.go b/vendor/github.com/vektah/gqlparser/v2/validator/core/helpers.go index b395a8402b..f977d00a81 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/core/helpers.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/core/helpers.go @@ -8,11 +8,12 @@ import ( "strings" "github.com/agnivade/levenshtein" + "github.com/vektah/gqlparser/v2/ast" "github.com/vektah/gqlparser/v2/gqlerror" ) -func Message(msg string, args ...interface{}) ErrorOption { +func Message(msg string, args ...any) ErrorOption { return func(err *gqlerror.Error) { err.Message += fmt.Sprintf(msg, args...) } @@ -33,7 +34,7 @@ func At(position *ast.Position) ErrorOption { } } -func SuggestListQuoted(prefix string, typed string, suggestions []string) ErrorOption { +func SuggestListQuoted(prefix, typed string, suggestions []string) ErrorOption { suggested := SuggestionList(typed, suggestions) return func(err *gqlerror.Error) { if len(suggested) > 0 { @@ -42,7 +43,7 @@ func SuggestListQuoted(prefix string, typed string, suggestions []string) ErrorO } } -func SuggestListUnquoted(prefix string, typed string, suggestions []string) ErrorOption { +func SuggestListUnquoted(prefix, typed string, suggestions []string) ErrorOption { suggested := SuggestionList(typed, suggestions) return func(err *gqlerror.Error) { if len(suggested) > 0 { @@ -51,7 +52,7 @@ func SuggestListUnquoted(prefix string, typed string, suggestions []string) Erro } } -func Suggestf(suggestion string, args ...interface{}) ErrorOption { +func Suggestf(suggestion string, args ...any) ErrorOption { return func(err *gqlerror.Error) { err.Message += " Did you mean " + fmt.Sprintf(suggestion, args...) + "?" } @@ -117,12 +118,8 @@ func SuggestionList(input string, options []string) []string { func calcThreshold(a string) (threshold int) { // the logic is copied from here // https://github.com/graphql/graphql-js/blob/47bd8c8897c72d3efc17ecb1599a95cee6bac5e8/src/jsutils/suggestionList.ts#L14 - threshold = int(math.Floor(float64(len(a))*0.4) + 1) - - if threshold < 1 { - threshold = 1 - } - return + threshold = max(int(math.Floor(float64(len(a))*0.4)+1), 1) + return threshold } // Computes the lexical distance between strings A and B. @@ -136,7 +133,7 @@ func calcThreshold(a string) (threshold int) { // as a single edit which helps identify mis-cased values with an edit distance // of 1. // -// This distance can be useful for detecting typos in input or sorting +// This distance can be useful for detecting typos in input or sorting. func lexicalDistance(a, b string) int { if a == b { return 0 diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/core/walk.go b/vendor/github.com/vektah/gqlparser/v2/validator/core/walk.go index 09a3016fd4..4e4c9b30f1 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/core/walk.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/core/walk.go @@ -142,7 +142,11 @@ func (w *Walker) walkFragment(it *ast.FragmentDefinition) { } } -func (w *Walker) walkDirectives(parentDef *ast.Definition, directives []*ast.Directive, location ast.DirectiveLocation) { +func (w *Walker) walkDirectives( + parentDef *ast.Definition, + directives []*ast.Directive, + location ast.DirectiveLocation, +) { for _, dir := range directives { def := w.Schema.Directives[dir.Name] dir.Definition = def @@ -182,6 +186,8 @@ func (w *Walker) walkValue(value *ast.Value) { fieldDef := value.Definition.Fields.ForName(child.Name) if fieldDef != nil { child.Value.ExpectedType = fieldDef.Type + child.Value.ExpectedTypeHasDefault = fieldDef.DefaultValue != nil && + fieldDef.DefaultValue.Kind != ast.NullValue child.Value.Definition = w.Schema.Types[fieldDef.Type.Name()] } } @@ -208,6 +214,8 @@ func (w *Walker) walkValue(value *ast.Value) { func (w *Walker) walkArgument(argDef *ast.ArgumentDefinition, arg *ast.Argument) { if argDef != nil { arg.Value.ExpectedType = argDef.Type + arg.Value.ExpectedTypeHasDefault = argDef.DefaultValue != nil && + argDef.DefaultValue.Kind != ast.NullValue arg.Value.Definition = w.Schema.Types[argDef.Type.Name()] } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go index e4a67eb0cd..1c6f55877e 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go @@ -3,10 +3,8 @@ package rules import ( "fmt" "sort" - "strings" "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) @@ -17,12 +15,24 @@ func ruleFuncFieldsOnCorrectType(observers *Events, addError AddErrFunc, disable return } - message := fmt.Sprintf(`Cannot query field "%s" on type "%s".`, field.Name, field.ObjectDefinition.Name) + message := fmt.Sprintf( + `Cannot query field "%s" on type "%s".`, + field.Name, + field.ObjectDefinition.Name, + ) if !disableSuggestion { - if suggestedTypeNames := getSuggestedTypeNames(walker, field.ObjectDefinition, field.Name); suggestedTypeNames != nil { - message += " Did you mean to use an inline fragment on " + QuotedOrList(suggestedTypeNames...) + "?" - } else if suggestedFieldNames := getSuggestedFieldNames(field.ObjectDefinition, field.Name); suggestedFieldNames != nil { + if suggestedTypeNames := getSuggestedTypeNames( + walker, + field.ObjectDefinition, + field.Name, + ); suggestedTypeNames != nil { + message += " Did you mean to use an inline fragment on " + QuotedOrList( + suggestedTypeNames...) + "?" + } else if suggestedFieldNames := getSuggestedFieldNames( + field.ObjectDefinition, + field.Name, + ); suggestedFieldNames != nil { message += " Did you mean " + QuotedOrList(suggestedFieldNames...) + "?" } } @@ -89,7 +99,7 @@ func getSuggestedTypeNames(walker *Walker, parent *ast.Definition, name string) if diff != 0 { return diff < 0 } - return strings.Compare(typeA, typeB) < 0 + return typeA < typeB }) return suggestedTypes @@ -99,8 +109,8 @@ func getSuggestedTypeNames(walker *Walker, parent *ast.Definition, name string) // where max is set to the slice’s length, // we ensure that appending elements results // in a slice backed by a distinct array. -// This method prevents the shared array issue -func concatSlice(first []string, second []string) []string { +// This method prevents the shared array issue. +func concatSlice(first, second []string) []string { n := len(first) return append(first[:n:n], second...) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go index 8fb2692589..bd1f84944b 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) @@ -18,7 +17,10 @@ var FragmentsOnCompositeTypesRule = Rule{ return } - message := fmt.Sprintf(`Fragment cannot condition on non composite type "%s".`, inlineFragment.TypeCondition) + message := fmt.Sprintf( + `Fragment cannot condition on non composite type "%s".`, + inlineFragment.TypeCondition, + ) addError( Message("%s", message), @@ -27,11 +29,16 @@ var FragmentsOnCompositeTypesRule = Rule{ }) observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) { - if fragment.Definition == nil || fragment.TypeCondition == "" || fragment.Definition.IsCompositeType() { + if fragment.Definition == nil || fragment.TypeCondition == "" || + fragment.Definition.IsCompositeType() { return } - message := fmt.Sprintf(`Fragment "%s" cannot condition on non composite type "%s".`, fragment.Name, fragment.TypeCondition) + message := fmt.Sprintf( + `Fragment "%s" cannot condition on non composite type "%s".`, + fragment.Name, + fragment.TypeCondition, + ) addError( Message("%s", message), diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go index 4c065a715e..af7ca6db17 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) @@ -21,7 +20,12 @@ func ruleFuncKnownArgumentNames(observers *Events, addError AddErrFunc, disableS if disableSuggestion { addError( - Message(`Unknown argument "%s" on field "%s.%s".`, arg.Name, field.ObjectDefinition.Name, field.Name), + Message( + `Unknown argument "%s" on field "%s.%s".`, + arg.Name, + field.ObjectDefinition.Name, + field.Name, + ), At(field.Position), ) } else { @@ -30,7 +34,12 @@ func ruleFuncKnownArgumentNames(observers *Events, addError AddErrFunc, disableS suggestions = append(suggestions, argDef.Name) } addError( - Message(`Unknown argument "%s" on field "%s.%s".`, arg.Name, field.ObjectDefinition.Name, field.Name), + Message( + `Unknown argument "%s" on field "%s.%s".`, + arg.Name, + field.ObjectDefinition.Name, + field.Name, + ), SuggestListQuoted("Did you mean", arg.Name, suggestions), At(field.Position), ) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go index 2430d29b23..b81b93ed72 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go @@ -1,8 +1,9 @@ package rules import ( - "github.com/vektah/gqlparser/v2/ast" + "slices" + "github.com/vektah/gqlparser/v2/ast" //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) @@ -25,10 +26,8 @@ var KnownDirectivesRule = Rule{ return } - for _, loc := range directive.Definition.Locations { - if loc == directive.Location { - return - } + if slices.Contains(directive.Definition.Locations, directive.Location) { + return } // position must be exists if directive.Definition != nil @@ -40,7 +39,11 @@ var KnownDirectivesRule = Rule{ if !seen[tmp] { addError( - Message(`Directive "@%s" may not be used on %s.`, directive.Name, directive.Location), + Message( + `Directive "@%s" may not be used on %s.`, + directive.Name, + directive.Location, + ), At(directive.Position), ) seen[tmp] = true diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go index c9b9f90d4a..c55cd658c4 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go index b67da68ce7..bc2d2d00b9 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go index a0f10fba75..5a85d3bfe7 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go index dfa851c577..890f71f7ed 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/max_introspection_depth.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/max_introspection_depth.go index 651b23b4e3..012a47448d 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/max_introspection_depth.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/max_introspection_depth.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) @@ -29,7 +28,11 @@ var MaxIntrospectionDepth = Rule{ }, } -func checkDepthSelectionSet(selectionSet ast.SelectionSet, visitedFragments map[string]bool, depth int) bool { +func checkDepthSelectionSet( + selectionSet ast.SelectionSet, + visitedFragments map[string]bool, + depth int, +) bool { for _, child := range selectionSet { if field, ok := child.(*ast.Field); ok { if checkDepthField(field, visitedFragments, depth) { @@ -63,7 +66,11 @@ func checkDepthField(field *ast.Field, visitedFragments map[string]bool, depth i return checkDepthSelectionSet(field.SelectionSet, visitedFragments, depth) } -func checkDepthFragmentSpread(fragmentSpread *ast.FragmentSpread, visitedFragments map[string]bool, depth int) bool { +func checkDepthFragmentSpread( + fragmentSpread *ast.FragmentSpread, + visitedFragments map[string]bool, + depth int, +) bool { fragmentName := fragmentSpread.Name if visited, ok := visitedFragments[fragmentName]; ok && visited { // Fragment cycles are handled by `NoFragmentCyclesRule`. diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go index fb3ac6ad3c..ead6ae9152 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go @@ -5,7 +5,6 @@ import ( "strings" "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) @@ -55,7 +54,11 @@ var NoFragmentCyclesRule = Rule{ via = fmt.Sprintf(" via %s", strings.Join(fragmentNames, ", ")) } addError( - Message(`Cannot spread fragment "%s" within itself%s.`, spreadName, via), + Message( + `Cannot spread fragment "%s" within itself%s.`, + spreadName, + via, + ), At(spreadNode.Position), ) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go index 562d7f19ce..76cafb805a 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) @@ -11,13 +10,18 @@ var NoUndefinedVariablesRule = Rule{ Name: "NoUndefinedVariables", RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnValue(func(walker *Walker, value *ast.Value) { - if walker.CurrentOperation == nil || value.Kind != ast.Variable || value.VariableDefinition != nil { + if walker.CurrentOperation == nil || value.Kind != ast.Variable || + value.VariableDefinition != nil { return } if walker.CurrentOperation.Name != "" { addError( - Message(`Variable "%s" is not defined by operation "%s".`, value, walker.CurrentOperation.Name), + Message( + `Variable "%s" is not defined by operation "%s".`, + value, + walker.CurrentOperation.Name, + ), At(value.Position), ) } else { diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go index 6d27e11e9e..d28389a42e 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go index a4ce07090c..b393c5b272 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) @@ -18,7 +17,11 @@ var NoUnusedVariablesRule = Rule{ if operation.Name != "" { addError( - Message(`Variable "$%s" is never used in operation "%s".`, varDef.Variable, operation.Name), + Message( + `Variable "$%s" is never used in operation "%s".`, + varDef.Variable, + operation.Name, + ), At(varDef.Position), ) } else { diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go index 9e843e760b..77014e3ec7 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go @@ -6,7 +6,6 @@ import ( "reflect" "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) @@ -82,7 +81,8 @@ var OverlappingFieldsCanBeMergedRule = Rule{ }) observers.OnField(func(walker *Walker, field *ast.Field) { if walker.CurrentOperation == nil { - // When checking both Operation and Fragment, errors are duplicated when processing FragmentDefinition referenced from Operation + // When checking both Operation and Fragment, errors are duplicated when processing + // FragmentDefinition referenced from Operation return } m.walker = walker @@ -112,7 +112,11 @@ type pairSet struct { data map[string]map[string]bool } -func (pairSet *pairSet) Add(a *ast.FragmentSpread, b *ast.FragmentSpread, areMutuallyExclusive bool) { +func (pairSet *pairSet) Add( + a *ast.FragmentSpread, + b *ast.FragmentSpread, + areMutuallyExclusive bool, +) { add := func(a *ast.FragmentSpread, b *ast.FragmentSpread) { m := pairSet.data[a.Name] if m == nil { @@ -125,7 +129,11 @@ func (pairSet *pairSet) Add(a *ast.FragmentSpread, b *ast.FragmentSpread, areMut add(b, a) } -func (pairSet *pairSet) Has(a *ast.FragmentSpread, b *ast.FragmentSpread, areMutuallyExclusive bool) bool { +func (pairSet *pairSet) Has( + a *ast.FragmentSpread, + b *ast.FragmentSpread, + areMutuallyExclusive bool, +) bool { am, ok := pairSet.data[a.Name] if !ok { return false @@ -224,7 +232,11 @@ func (m *ConflictMessage) addFieldsConflictMessage(addError AddErrFunc) { var buf bytes.Buffer m.String(&buf) addError( - Message(`Fields "%s" conflict because %s. Use different aliases on the fields to fetch both if this was intentional.`, m.ResponseName, buf.String()), + Message( + `Fields "%s" conflict because %s. Use different aliases on the fields to fetch both if this was intentional.`, + m.ResponseName, + buf.String(), + ), At(m.Position), ) } @@ -240,7 +252,9 @@ type overlappingFieldsCanBeMergedManager struct { comparedFragments map[string]bool } -func (m *overlappingFieldsCanBeMergedManager) findConflictsWithinSelectionSet(selectionSet ast.SelectionSet) []*ConflictMessage { +func (m *overlappingFieldsCanBeMergedManager) findConflictsWithinSelectionSet( + selectionSet ast.SelectionSet, +) []*ConflictMessage { if len(selectionSet) == 0 { return nil } @@ -271,7 +285,12 @@ func (m *overlappingFieldsCanBeMergedManager) findConflictsWithinSelectionSet(se return conflicts.Conflicts } -func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFieldsAndFragment(conflicts *conflictMessageContainer, areMutuallyExclusive bool, fieldsMap *sequentialFieldsMap, fragmentSpread *ast.FragmentSpread) { +func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFieldsAndFragment( + conflicts *conflictMessageContainer, + areMutuallyExclusive bool, + fieldsMap *sequentialFieldsMap, + fragmentSpread *ast.FragmentSpread, +) { if m.comparedFragments[fragmentSpread.Name] { return } @@ -299,11 +318,21 @@ func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFieldsAndFr if fragmentSpread.Name == baseFragmentSpread.Name { continue } - m.collectConflictsBetweenFieldsAndFragment(conflicts, areMutuallyExclusive, fieldsMap, fragmentSpread) + m.collectConflictsBetweenFieldsAndFragment( + conflicts, + areMutuallyExclusive, + fieldsMap, + fragmentSpread, + ) } } -func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFragments(conflicts *conflictMessageContainer, areMutuallyExclusive bool, fragmentSpreadA *ast.FragmentSpread, fragmentSpreadB *ast.FragmentSpread) { +func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFragments( + conflicts *conflictMessageContainer, + areMutuallyExclusive bool, + fragmentSpreadA *ast.FragmentSpread, + fragmentSpreadB *ast.FragmentSpread, +) { var check func(fragmentSpreadA *ast.FragmentSpread, fragmentSpreadB *ast.FragmentSpread) check = func(fragmentSpreadA *ast.FragmentSpread, fragmentSpreadB *ast.FragmentSpread) { if fragmentSpreadA.Name == fragmentSpreadB.Name { @@ -322,8 +351,12 @@ func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFragments(c return } - fieldsMapA, fragmentSpreadsA := getFieldsAndFragmentNames(fragmentSpreadA.Definition.SelectionSet) - fieldsMapB, fragmentSpreadsB := getFieldsAndFragmentNames(fragmentSpreadB.Definition.SelectionSet) + fieldsMapA, fragmentSpreadsA := getFieldsAndFragmentNames( + fragmentSpreadA.Definition.SelectionSet, + ) + fieldsMapB, fragmentSpreadsB := getFieldsAndFragmentNames( + fragmentSpreadB.Definition.SelectionSet, + ) // (F) First, collect all conflicts between these two collections of fields // (not including any nested fragments). @@ -344,7 +377,11 @@ func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFragments(c check(fragmentSpreadA, fragmentSpreadB) } -func (m *overlappingFieldsCanBeMergedManager) findConflictsBetweenSubSelectionSets(areMutuallyExclusive bool, selectionSetA ast.SelectionSet, selectionSetB ast.SelectionSet) *conflictMessageContainer { +func (m *overlappingFieldsCanBeMergedManager) findConflictsBetweenSubSelectionSets( + areMutuallyExclusive bool, + selectionSetA ast.SelectionSet, + selectionSetB ast.SelectionSet, +) *conflictMessageContainer { var conflicts conflictMessageContainer fieldsMapA, fragmentSpreadsA := getFieldsAndFragmentNames(selectionSetA) @@ -357,14 +394,24 @@ func (m *overlappingFieldsCanBeMergedManager) findConflictsBetweenSubSelectionSe // those referenced by each fragment name associated with the second. for _, fragmentSpread := range fragmentSpreadsB { m.comparedFragments = make(map[string]bool) - m.collectConflictsBetweenFieldsAndFragment(&conflicts, areMutuallyExclusive, fieldsMapA, fragmentSpread) + m.collectConflictsBetweenFieldsAndFragment( + &conflicts, + areMutuallyExclusive, + fieldsMapA, + fragmentSpread, + ) } // (I) Then collect conflicts between the second collection of fields and // those referenced by each fragment name associated with the first. for _, fragmentSpread := range fragmentSpreadsA { m.comparedFragments = make(map[string]bool) - m.collectConflictsBetweenFieldsAndFragment(&conflicts, areMutuallyExclusive, fieldsMapB, fragmentSpread) + m.collectConflictsBetweenFieldsAndFragment( + &conflicts, + areMutuallyExclusive, + fieldsMapB, + fragmentSpread, + ) } // (J) Also collect conflicts between any fragment names by the first and @@ -372,7 +419,12 @@ func (m *overlappingFieldsCanBeMergedManager) findConflictsBetweenSubSelectionSe // names to each item in the second set of names. for _, fragmentSpreadA := range fragmentSpreadsA { for _, fragmentSpreadB := range fragmentSpreadsB { - m.collectConflictsBetweenFragments(&conflicts, areMutuallyExclusive, fragmentSpreadA, fragmentSpreadB) + m.collectConflictsBetweenFragments( + &conflicts, + areMutuallyExclusive, + fragmentSpreadA, + fragmentSpreadB, + ) } } @@ -383,7 +435,10 @@ func (m *overlappingFieldsCanBeMergedManager) findConflictsBetweenSubSelectionSe return &conflicts } -func (m *overlappingFieldsCanBeMergedManager) collectConflictsWithin(conflicts *conflictMessageContainer, fieldsMap *sequentialFieldsMap) { +func (m *overlappingFieldsCanBeMergedManager) collectConflictsWithin( + conflicts *conflictMessageContainer, + fieldsMap *sequentialFieldsMap, +) { for _, fields := range fieldsMap.Iterator() { for idx, fieldA := range fields { for _, fieldB := range fields[idx+1:] { @@ -396,7 +451,12 @@ func (m *overlappingFieldsCanBeMergedManager) collectConflictsWithin(conflicts * } } -func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetween(conflicts *conflictMessageContainer, parentFieldsAreMutuallyExclusive bool, fieldsMapA *sequentialFieldsMap, fieldsMapB *sequentialFieldsMap) { +func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetween( + conflicts *conflictMessageContainer, + parentFieldsAreMutuallyExclusive bool, + fieldsMapA *sequentialFieldsMap, + fieldsMapB *sequentialFieldsMap, +) { for _, fieldsEntryA := range fieldsMapA.KeyValueIterator() { fieldsB, ok := fieldsMapB.Get(fieldsEntryA.ResponseName) if !ok { @@ -413,7 +473,11 @@ func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetween(conflicts } } -func (m *overlappingFieldsCanBeMergedManager) findConflict(parentFieldsAreMutuallyExclusive bool, fieldA *ast.Field, fieldB *ast.Field) *ConflictMessage { +func (m *overlappingFieldsCanBeMergedManager) findConflict( + parentFieldsAreMutuallyExclusive bool, + fieldA *ast.Field, + fieldB *ast.Field, +) *ConflictMessage { if fieldA.ObjectDefinition == nil || fieldB.ObjectDefinition == nil { return nil } @@ -437,8 +501,12 @@ func (m *overlappingFieldsCanBeMergedManager) findConflict(parentFieldsAreMutual if fieldA.Name != fieldB.Name { return &ConflictMessage{ ResponseName: fieldNameA, - Message: fmt.Sprintf(`"%s" and "%s" are different fields`, fieldA.Name, fieldB.Name), - Position: fieldB.Position, + Message: fmt.Sprintf( + `"%s" and "%s" are different fields`, + fieldA.Name, + fieldB.Name, + ), + Position: fieldB.Position, } } @@ -452,18 +520,27 @@ func (m *overlappingFieldsCanBeMergedManager) findConflict(parentFieldsAreMutual } } - if fieldA.Definition != nil && fieldB.Definition != nil && doTypesConflict(m.walker, fieldA.Definition.Type, fieldB.Definition.Type) { + if fieldA.Definition != nil && fieldB.Definition != nil && + doTypesConflict(m.walker, fieldA.Definition.Type, fieldB.Definition.Type) { return &ConflictMessage{ ResponseName: fieldNameA, - Message: fmt.Sprintf(`they return conflicting types "%s" and "%s"`, fieldA.Definition.Type.String(), fieldB.Definition.Type.String()), - Position: fieldB.Position, + Message: fmt.Sprintf( + `they return conflicting types "%s" and "%s"`, + fieldA.Definition.Type.String(), + fieldB.Definition.Type.String(), + ), + Position: fieldB.Position, } } // Collect and compare sub-fields. Use the same "visited fragment names" list // for both collections so fields in a fragment reference are never // compared to themselves. - conflicts := m.findConflictsBetweenSubSelectionSets(areMutuallyExclusive, fieldA.SelectionSet, fieldB.SelectionSet) + conflicts := m.findConflictsBetweenSubSelectionSets( + areMutuallyExclusive, + fieldA.SelectionSet, + fieldB.SelectionSet, + ) if conflicts == nil { return nil } @@ -474,7 +551,7 @@ func (m *overlappingFieldsCanBeMergedManager) findConflict(parentFieldsAreMutual } } -func sameArguments(args1 []*ast.Argument, args2 []*ast.Argument) bool { +func sameArguments(args1, args2 []*ast.Argument) bool { if len(args1) != len(args2) { return false } @@ -493,7 +570,7 @@ func sameArguments(args1 []*ast.Argument, args2 []*ast.Argument) bool { return true } -func sameValue(value1 *ast.Value, value2 *ast.Value) bool { +func sameValue(value1, value2 *ast.Value) bool { if value1.Kind != value2.Kind { return false } @@ -503,7 +580,7 @@ func sameValue(value1 *ast.Value, value2 *ast.Value) bool { return true } -func doTypesConflict(walker *Walker, type1 *ast.Type, type2 *ast.Type) bool { +func doTypesConflict(walker *Walker, type1, type2 *ast.Type) bool { if type1.Elem != nil { if type2.Elem != nil { return doTypesConflict(walker, type1.Elem, type2.Elem) @@ -522,14 +599,17 @@ func doTypesConflict(walker *Walker, type1 *ast.Type, type2 *ast.Type) bool { t1 := walker.Schema.Types[type1.NamedType] t2 := walker.Schema.Types[type2.NamedType] - if (t1.Kind == ast.Scalar || t1.Kind == ast.Enum) && (t2.Kind == ast.Scalar || t2.Kind == ast.Enum) { + if (t1.Kind == ast.Scalar || t1.Kind == ast.Enum) && + (t2.Kind == ast.Scalar || t2.Kind == ast.Enum) { return t1.Name != t2.Name } return false } -func getFieldsAndFragmentNames(selectionSet ast.SelectionSet) (*sequentialFieldsMap, []*ast.FragmentSpread) { +func getFieldsAndFragmentNames( + selectionSet ast.SelectionSet, +) (*sequentialFieldsMap, []*ast.FragmentSpread) { fieldsMap := sequentialFieldsMap{ data: make(map[string][]*ast.Field), } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go index f932ac8c2e..94cc6356c6 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) @@ -49,7 +48,11 @@ var PossibleFragmentSpreadsRule = Rule{ observers.OnInlineFragment(func(walker *Walker, inlineFragment *ast.InlineFragment) { validate(walker, inlineFragment.ObjectDefinition, inlineFragment.TypeCondition, func() { addError( - Message(`Fragment cannot be spread here as objects of type "%s" can never be of type "%s".`, inlineFragment.ObjectDefinition.Name, inlineFragment.TypeCondition), + Message( + `Fragment cannot be spread here as objects of type "%s" can never be of type "%s".`, + inlineFragment.ObjectDefinition.Name, + inlineFragment.TypeCondition, + ), At(inlineFragment.Position), ) }) @@ -59,12 +62,22 @@ var PossibleFragmentSpreadsRule = Rule{ if fragmentSpread.Definition == nil { return } - validate(walker, fragmentSpread.ObjectDefinition, fragmentSpread.Definition.TypeCondition, func() { - addError( - Message(`Fragment "%s" cannot be spread here as objects of type "%s" can never be of type "%s".`, fragmentSpread.Name, fragmentSpread.ObjectDefinition.Name, fragmentSpread.Definition.TypeCondition), - At(fragmentSpread.Position), - ) - }) + validate( + walker, + fragmentSpread.ObjectDefinition, + fragmentSpread.Definition.TypeCondition, + func() { + addError( + Message( + `Fragment "%s" cannot be spread here as objects of type "%s" can never be of type "%s".`, + fragmentSpread.Name, + fragmentSpread.ObjectDefinition.Name, + fragmentSpread.Definition.TypeCondition, + ), + At(fragmentSpread.Position), + ) + }, + ) }) }, } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/rules.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/rules.go index 803543ed17..e94151b06e 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/rules.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/rules.go @@ -77,6 +77,7 @@ func (r *Rules) AddRule(name string, ruleFunc core.RuleFunc) { // GetInner returns the internal rule map. // If the map is not initialized, it returns an empty map. +// This returns a copy of the rules map, not the original map. func (r *Rules) GetInner() map[string]core.RuleFunc { if r == nil { return nil // impossible nonsense, hopefully @@ -84,7 +85,13 @@ func (r *Rules) GetInner() map[string]core.RuleFunc { if r.rules == nil { return make(map[string]core.RuleFunc) } - return r.rules + + rules := make(map[string]core.RuleFunc) + for k, v := range r.rules { + rules[k] = v + } + + return rules } // RemoveRule removes a rule with the specified name from the rule set. diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go index e4f210d757..95fd2298f4 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) @@ -22,14 +21,22 @@ var ScalarLeafsRule = Rule{ if fieldType.IsLeafType() && len(field.SelectionSet) > 0 { addError( - Message(`Field "%s" must not have a selection since type "%s" has no subfields.`, field.Name, fieldType.Name), + Message( + `Field "%s" must not have a selection since type "%s" has no subfields.`, + field.Name, + fieldType.Name, + ), At(field.Position), ) } if !fieldType.IsLeafType() && len(field.SelectionSet) == 0 { addError( - Message(`Field "%s" of type "%s" must have a selection of subfields.`, field.Name, field.Definition.Type.String()), + Message( + `Field "%s" of type "%s" must have a selection of subfields.`, + field.Name, + field.Definition.Type.String(), + ), Suggestf(`"%s { ... }"`, field.Name), At(field.Position), ) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go index feed91d5ce..2d4322da1b 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go @@ -5,7 +5,6 @@ import ( "strings" "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go index 2ed1da2b34..882a8cb168 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go index 0f57702814..a1a0101671 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) @@ -16,7 +15,10 @@ var UniqueDirectivesPerLocationRule = Rule{ for _, dir := range directives { if dir.Name != "repeatable" && seen[dir.Name] { addError( - Message(`The directive "@%s" can only be used once at this location.`, dir.Name), + Message( + `The directive "@%s" can only be used once at this location.`, + dir.Name, + ), At(dir.Position), ) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go index 136b0fdb5a..7b1a40ceb1 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go index 41d8d667aa..67c92de180 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go index ae4c54eede..199a4b0f5c 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go index 4d4a6a87f7..6a33d0e936 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go index 43c1a1bfb5..5126245e54 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go @@ -6,7 +6,6 @@ import ( "strconv" "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) @@ -19,7 +18,11 @@ func ruleFuncValuesOfCorrectType(observers *Events, addError AddErrFunc, disable if value.Kind == ast.NullValue && value.ExpectedType.NonNull { addError( - Message(`Expected value of type "%s", found %s.`, value.ExpectedType.String(), value.String()), + Message( + `Expected value of type "%s", found %s.`, + value.ExpectedType.String(), + value.String(), + ), At(value.Position), ) } @@ -66,13 +69,21 @@ func ruleFuncValuesOfCorrectType(observers *Events, addError AddErrFunc, disable if value.Definition.Kind == ast.Enum { if disableSuggestion { addError( - Message(`Enum "%s" cannot represent non-enum value: %s.`, value.ExpectedType.String(), value.String()), + Message( + `Enum "%s" cannot represent non-enum value: %s.`, + value.ExpectedType.String(), + value.String(), + ), At(value.Position), ) } else { rawValStr := fmt.Sprint(rawVal) addError( - Message(`Enum "%s" cannot represent non-enum value: %s.`, value.ExpectedType.String(), value.String()), + Message( + `Enum "%s" cannot represent non-enum value: %s.`, + value.ExpectedType.String(), + value.String(), + ), SuggestListQuoted("Did you mean the enum value", rawValStr, possibleEnums), At(value.Position), ) @@ -92,20 +103,32 @@ func ruleFuncValuesOfCorrectType(observers *Events, addError AddErrFunc, disable rawValStr := fmt.Sprint(rawVal) addError( unexpectedTypeMessageOnly(value), - SuggestListUnquoted("Did you mean the enum value", rawValStr, possibleEnums), + SuggestListUnquoted( + "Did you mean the enum value", + rawValStr, + possibleEnums, + ), At(value.Position), ) } } else if value.Definition.EnumValues.ForName(value.Raw) == nil { if disableSuggestion { addError( - Message(`Value "%s" does not exist in "%s" enum.`, value.String(), value.ExpectedType.String()), + Message( + `Value "%s" does not exist in "%s" enum.`, + value.String(), + value.ExpectedType.String(), + ), At(value.Position), ) } else { rawValStr := fmt.Sprint(rawVal) addError( - Message(`Value "%s" does not exist in "%s" enum.`, value.String(), value.ExpectedType.String()), + Message( + `Value "%s" does not exist in "%s" enum.`, + value.String(), + value.ExpectedType.String(), + ), SuggestListQuoted("Did you mean the enum value", rawValStr, possibleEnums), At(value.Position), ) @@ -124,7 +147,12 @@ func ruleFuncValuesOfCorrectType(observers *Events, addError AddErrFunc, disable fieldValue := value.Children.ForName(field.Name) if fieldValue == nil && field.DefaultValue == nil { addError( - Message(`Field "%s.%s" of required type "%s" was not provided.`, value.Definition.Name, field.Name, field.Type.String()), + Message( + `Field "%s.%s" of required type "%s" was not provided.`, + value.Definition.Name, + field.Name, + field.Type.String(), + ), At(value.Position), ) continue @@ -137,7 +165,10 @@ func ruleFuncValuesOfCorrectType(observers *Events, addError AddErrFunc, disable func() { if len(value.Children) != 1 { addError( - Message(`OneOf Input Object "%s" must specify exactly one key.`, value.Definition.Name), + Message( + `OneOf Input Object "%s" must specify exactly one key.`, + value.Definition.Name, + ), At(value.Position), ) return @@ -147,7 +178,11 @@ func ruleFuncValuesOfCorrectType(observers *Events, addError AddErrFunc, disable isNullLiteral := fieldValue == nil || fieldValue.Kind == ast.NullValue if isNullLiteral { addError( - Message(`Field "%s.%s" must be non-null.`, value.Definition.Name, value.Definition.Fields[0].Name), + Message( + `Field "%s.%s" must be non-null.`, + value.Definition.Name, + value.Definition.Fields[0].Name, + ), At(fieldValue.Position), ) return @@ -159,7 +194,11 @@ func ruleFuncValuesOfCorrectType(observers *Events, addError AddErrFunc, disable isNullableVariable := !fieldValue.VariableDefinition.Type.NonNull if isNullableVariable { addError( - Message(`Variable "%s" must be non-nullable to be used for OneOf Input Object "%s".`, variableName, value.Definition.Name), + Message( + `Variable "%s" must be non-nullable to be used for OneOf Input Object "%s".`, + variableName, + value.Definition.Name, + ), At(fieldValue.Position), ) } @@ -172,7 +211,11 @@ func ruleFuncValuesOfCorrectType(observers *Events, addError AddErrFunc, disable if value.Definition.Fields.ForName(fieldValue.Name) == nil { if disableSuggestion { addError( - Message(`Field "%s" is not defined by type "%s".`, fieldValue.Name, value.Definition.Name), + Message( + `Field "%s" is not defined by type "%s".`, + fieldValue.Name, + value.Definition.Name, + ), At(fieldValue.Position), ) } else { @@ -182,7 +225,11 @@ func ruleFuncValuesOfCorrectType(observers *Events, addError AddErrFunc, disable } addError( - Message(`Field "%s" is not defined by type "%s".`, fieldValue.Name, value.Definition.Name), + Message( + `Field "%s" is not defined by type "%s".`, + fieldValue.Name, + value.Definition.Name, + ), SuggestListQuoted("Did you mean", fieldValue.Name, suggestions), At(fieldValue.Position), ) @@ -223,7 +270,12 @@ func unexpectedTypeMessage(addError AddErrFunc, v *ast.Value) { func unexpectedTypeMessageOnly(v *ast.Value) ErrorOption { switch v.ExpectedType.String() { case "Int", "Int!": - if _, err := strconv.ParseInt(v.Raw, 10, 32); err != nil && errors.Is(err, strconv.ErrRange) { + if _, err := strconv.ParseInt( + v.Raw, + 10, + 32, + ); err != nil && + errors.Is(err, strconv.ErrRange) { return Message(`Int cannot represent non 32-bit signed integer value: %s`, v.String()) } return Message(`Int cannot represent non-integer value: %s`, v.String()) @@ -236,11 +288,20 @@ func unexpectedTypeMessageOnly(v *ast.Value) ErrorOption { case "ID", "ID!": return Message(`ID cannot represent a non-string and non-integer value: %s`, v.String()) // case "Enum": - // return Message(`Enum "%s" cannot represent non-enum value: %s`, v.ExpectedType.String(), v.String()) + // return Message(`Enum "%s" cannot represent non-enum value: %s`, v.ExpectedType.String(), + // v.String()) default: if v.Definition.Kind == ast.Enum { - return Message(`Enum "%s" cannot represent non-enum value: %s.`, v.ExpectedType.String(), v.String()) + return Message( + `Enum "%s" cannot represent non-enum value: %s.`, + v.ExpectedType.String(), + v.String(), + ) } - return Message(`Expected value of type "%s", found %s.`, v.ExpectedType.String(), v.String()) + return Message( + `Expected value of type "%s", found %s.`, + v.ExpectedType.String(), + v.String(), + ) } } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go index 77f116bb50..b0670404ef 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go index b2af7e1923..d3d36a293f 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go @@ -2,7 +2,6 @@ package rules import ( "github.com/vektah/gqlparser/v2/ast" - //nolint:staticcheck // Validator rules each use dot imports for convenience. . "github.com/vektah/gqlparser/v2/validator/core" ) @@ -11,7 +10,9 @@ var VariablesInAllowedPositionRule = Rule{ Name: "VariablesInAllowedPosition", RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnValue(func(walker *Walker, value *ast.Value) { - if value.Kind != ast.Variable || value.ExpectedType == nil || value.VariableDefinition == nil || walker.CurrentOperation == nil { + if value.Kind != ast.Variable || value.ExpectedType == nil || + value.VariableDefinition == nil || + walker.CurrentOperation == nil { return } @@ -19,12 +20,18 @@ var VariablesInAllowedPositionRule = Rule{ // todo: move me into walk // If there is a default non nullable types can be null - if value.VariableDefinition.DefaultValue != nil && value.VariableDefinition.DefaultValue.Kind != ast.NullValue { + if value.VariableDefinition.DefaultValue != nil && + value.VariableDefinition.DefaultValue.Kind != ast.NullValue { if value.ExpectedType.NonNull { tmp.NonNull = false } } + // If the expected type has a default, the given variable can be null + if value.ExpectedTypeHasDefault { + tmp.NonNull = false + } + if !value.VariableDefinition.Type.IsCompatible(&tmp) { addError( Message( diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/schema.go b/vendor/github.com/vektah/gqlparser/v2/validator/schema.go index a8754afc2b..d40b9f192c 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/schema.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/schema.go @@ -1,6 +1,7 @@ package validator import ( + "slices" "sort" "strconv" "strings" @@ -48,7 +49,13 @@ func ValidateSchemaDocument(sd *SchemaDocument) (*Schema, error) { } if def.Kind != ext.Kind { - return nil, gqlerror.ErrorPosf(ext.Position, "Cannot extend type %s because the base type is a %s, not %s.", ext.Name, def.Kind, ext.Kind) + return nil, gqlerror.ErrorPosf( + ext.Position, + "Cannot extend type %s because the base type is a %s, not %s.", + ext.Name, + def.Kind, + ext.Kind, + ) } def.Directives = append(def.Directives, ext.Directives...) @@ -95,14 +102,21 @@ func ValidateSchemaDocument(sd *SchemaDocument) (*Schema, error) { // version of gqlparser, in which case they're in trouble // anyway. default: - return nil, gqlerror.ErrorPosf(dir.Position, "Cannot redeclare directive %s.", dir.Name) + return nil, gqlerror.ErrorPosf( + dir.Position, + "Cannot redeclare directive %s.", + dir.Name, + ) } } schema.Directives[dir.Name] = sd.Directives[i] } if len(sd.Schema) > 1 { - return nil, gqlerror.ErrorPosf(sd.Schema[1].Position, "Cannot have multiple schema entry points, consider schema extensions instead.") + return nil, gqlerror.ErrorPosf( + sd.Schema[1].Position, + "Cannot have multiple schema entry points, consider schema extensions instead.", + ) } if len(sd.Schema) == 1 { @@ -110,7 +124,12 @@ func ValidateSchemaDocument(sd *SchemaDocument) (*Schema, error) { for _, entrypoint := range sd.Schema[0].OperationTypes { def := schema.Types[entrypoint.Type] if def == nil { - return nil, gqlerror.ErrorPosf(entrypoint.Position, "Schema root %s refers to a type %s that does not exist.", entrypoint.Operation, entrypoint.Type) + return nil, gqlerror.ErrorPosf( + entrypoint.Position, + "Schema root %s refers to a type %s that does not exist.", + entrypoint.Operation, + entrypoint.Type, + ) } switch entrypoint.Operation { case Query: @@ -121,7 +140,12 @@ func ValidateSchemaDocument(sd *SchemaDocument) (*Schema, error) { schema.Subscription = def } } - if err := validateDirectives(&schema, sd.Schema[0].Directives, LocationSchema, nil); err != nil { + if err := validateDirectives( + &schema, + sd.Schema[0].Directives, + LocationSchema, + nil, + ); err != nil { return nil, err } schema.SchemaDirectives = append(schema.SchemaDirectives, sd.Schema[0].Directives...) @@ -131,7 +155,12 @@ func ValidateSchemaDocument(sd *SchemaDocument) (*Schema, error) { for _, entrypoint := range ext.OperationTypes { def := schema.Types[entrypoint.Type] if def == nil { - return nil, gqlerror.ErrorPosf(entrypoint.Position, "Schema root %s refers to a type %s that does not exist.", entrypoint.Operation, entrypoint.Type) + return nil, gqlerror.ErrorPosf( + entrypoint.Position, + "Schema root %s refers to a type %s that does not exist.", + entrypoint.Operation, + entrypoint.Type, + ) } switch entrypoint.Operation { case Query: @@ -259,7 +288,13 @@ func validateDefinition(schema *Schema, def *Definition) *gqlerror.Error { return gqlerror.ErrorPosf(def.Position, "Undefined type %s.", strconv.Quote(typ)) } if !isValidKind(typDef.Kind, Object) { - return gqlerror.ErrorPosf(def.Position, "%s type %s must be %s.", def.Kind, strconv.Quote(typ), kindList(Object)) + return gqlerror.ErrorPosf( + def.Position, + "%s type %s must be %s.", + def.Kind, + strconv.Quote(typ), + kindList(Object), + ) } } @@ -272,37 +307,75 @@ func validateDefinition(schema *Schema, def *Definition) *gqlerror.Error { switch def.Kind { case Object, Interface: if len(def.Fields) == 0 { - return gqlerror.ErrorPosf(def.Position, "%s %s: must define one or more fields.", def.Kind, def.Name) + return gqlerror.ErrorPosf( + def.Position, + "%s %s: must define one or more fields.", + def.Kind, + def.Name, + ) } for _, field := range def.Fields { if typ, ok := schema.Types[field.Type.Name()]; ok { if !isValidKind(typ.Kind, Scalar, Object, Interface, Union, Enum) { - return gqlerror.ErrorPosf(field.Position, "%s %s: field must be one of %s.", def.Kind, def.Name, kindList(Scalar, Object, Interface, Union, Enum)) + return gqlerror.ErrorPosf( + field.Position, + "%s %s: field must be one of %s.", + def.Kind, + def.Name, + kindList(Scalar, Object, Interface, Union, Enum), + ) } } } case Enum: if len(def.EnumValues) == 0 { - return gqlerror.ErrorPosf(def.Position, "%s %s: must define one or more unique enum values.", def.Kind, def.Name) + return gqlerror.ErrorPosf( + def.Position, + "%s %s: must define one or more unique enum values.", + def.Kind, + def.Name, + ) } for _, value := range def.EnumValues { for _, nonEnum := range [3]string{"true", "false", "null"} { if value.Name == nonEnum { - return gqlerror.ErrorPosf(def.Position, "%s %s: non-enum value %s.", def.Kind, def.Name, value.Name) + return gqlerror.ErrorPosf( + def.Position, + "%s %s: non-enum value %s.", + def.Kind, + def.Name, + value.Name, + ) } } - if err := validateDirectives(schema, value.Directives, LocationEnumValue, nil); err != nil { + if err := validateDirectives( + schema, + value.Directives, + LocationEnumValue, + nil, + ); err != nil { return err } } case InputObject: if len(def.Fields) == 0 { - return gqlerror.ErrorPosf(def.Position, "%s %s: must define one or more input fields.", def.Kind, def.Name) + return gqlerror.ErrorPosf( + def.Position, + "%s %s: must define one or more input fields.", + def.Kind, + def.Name, + ) } for _, field := range def.Fields { if typ, ok := schema.Types[field.Type.Name()]; ok { if !isValidKind(typ.Kind, Scalar, Enum, InputObject) { - return gqlerror.ErrorPosf(field.Position, "%s %s: field must be one of %s.", typ.Kind, field.Name, kindList(Scalar, Enum, InputObject)) + return gqlerror.ErrorPosf( + field.Position, + "%s %s: field must be one of %s.", + typ.Kind, + field.Name, + kindList(Scalar, Enum, InputObject), + ) } } } @@ -311,7 +384,12 @@ func validateDefinition(schema *Schema, def *Definition) *gqlerror.Error { for idx, field1 := range def.Fields { for _, field2 := range def.Fields[idx+1:] { if field1.Name == field2.Name { - return gqlerror.ErrorPosf(field2.Position, "Field %s.%s can only be defined once.", def.Name, field2.Name) + return gqlerror.ErrorPosf( + field2.Position, + "Field %s.%s can only be defined once.", + def.Name, + field2.Name, + ) } } } @@ -334,7 +412,11 @@ func validateTypeRef(schema *Schema, typ *Type) *gqlerror.Error { return nil } -func validateArgs(schema *Schema, args ArgumentDefinitionList, currentDirective *DirectiveDefinition) *gqlerror.Error { +func validateArgs( + schema *Schema, + args ArgumentDefinitionList, + currentDirective *DirectiveDefinition, +) *gqlerror.Error { for _, arg := range args { if err := validateName(arg.Position, arg.Name); err != nil { // now, GraphQL spec doesn't have reserved argument name @@ -353,45 +435,71 @@ func validateArgs(schema *Schema, args ArgumentDefinitionList, currentDirective def.Kind, ) } - if err := validateDirectives(schema, arg.Directives, LocationArgumentDefinition, currentDirective); err != nil { + if err := validateDirectives( + schema, + arg.Directives, + LocationArgumentDefinition, + currentDirective, + ); err != nil { return err } } return nil } -func validateDirectives(schema *Schema, dirs DirectiveList, location DirectiveLocation, currentDirective *DirectiveDefinition) *gqlerror.Error { +func validateDirectives( + schema *Schema, + dirs DirectiveList, + location DirectiveLocation, + currentDirective *DirectiveDefinition, +) *gqlerror.Error { for _, dir := range dirs { if err := validateName(dir.Position, dir.Name); err != nil { // now, GraphQL spec doesn't have reserved directive name return err } if currentDirective != nil && dir.Name == currentDirective.Name { - return gqlerror.ErrorPosf(dir.Position, "Directive %s cannot refer to itself.", currentDirective.Name) + return gqlerror.ErrorPosf( + dir.Position, + "Directive %s cannot refer to itself.", + currentDirective.Name, + ) } dirDefinition := schema.Directives[dir.Name] if dirDefinition == nil { return gqlerror.ErrorPosf(dir.Position, "Undefined directive %s.", dir.Name) } - validKind := false - for _, dirLocation := range dirDefinition.Locations { - if dirLocation == location { - validKind = true - break - } - } + validKind := slices.Contains(dirDefinition.Locations, location) if !validKind { - return gqlerror.ErrorPosf(dir.Position, "Directive %s is not applicable on %s.", dir.Name, location) + return gqlerror.ErrorPosf( + dir.Position, + "Directive %s is not applicable on %s.", + dir.Name, + location, + ) } for _, arg := range dir.Arguments { if dirDefinition.Arguments.ForName(arg.Name) == nil { - return gqlerror.ErrorPosf(arg.Position, "Undefined argument %s for directive %s.", arg.Name, dir.Name) + return gqlerror.ErrorPosf( + arg.Position, + "Undefined argument %s for directive %s.", + arg.Name, + dir.Name, + ) } } for _, schemaArg := range dirDefinition.Arguments { if schemaArg.Type.NonNull && schemaArg.DefaultValue == nil { - if arg := dir.Arguments.ForName(schemaArg.Name); arg == nil || arg.Value.Kind == NullValue { - return gqlerror.ErrorPosf(dir.Position, "Argument %s for directive %s cannot be null.", schemaArg.Name, dir.Name) + if arg := dir.Arguments.ForName( + schemaArg.Name, + ); arg == nil || + arg.Value.Kind == NullValue { + return gqlerror.ErrorPosf( + dir.Position, + "Argument %s for directive %s cannot be null.", + schemaArg.Name, + dir.Name, + ) } } } @@ -408,7 +516,12 @@ func validateImplements(schema *Schema, def *Definition, intfName string) *gqler return gqlerror.ErrorPosf(def.Position, "Undefined type %s.", strconv.Quote(intfName)) } if intf.Kind != Interface { - return gqlerror.ErrorPosf(def.Position, "%s is a non interface type %s.", strconv.Quote(intfName), intf.Kind) + return gqlerror.ErrorPosf( + def.Position, + "%s is a non interface type %s.", + strconv.Quote(intfName), + intf.Kind, + ) } for _, requiredField := range intf.Fields { foundField := def.Fields.ForName(requiredField.Name) @@ -429,24 +542,37 @@ func validateImplements(schema *Schema, def *Definition, intfName string) *gqler for _, requiredArg := range requiredField.Arguments { foundArg := foundField.Arguments.ForName(requiredArg.Name) if foundArg == nil { - return gqlerror.ErrorPosf(foundField.Position, + return gqlerror.ErrorPosf( + foundField.Position, `For %s to implement %s the field %s must have the same arguments but it is missing %s.`, - def.Name, intf.Name, requiredField.Name, requiredArg.Name, + def.Name, + intf.Name, + requiredField.Name, + requiredArg.Name, ) } if !requiredArg.Type.IsCompatible(foundArg.Type) { - return gqlerror.ErrorPosf(foundArg.Position, + return gqlerror.ErrorPosf( + foundArg.Position, `For %s to implement %s the field %s must have the same arguments but %s has the wrong type.`, - def.Name, intf.Name, requiredField.Name, requiredArg.Name, + def.Name, + intf.Name, + requiredField.Name, + requiredArg.Name, ) } } for _, foundArgs := range foundField.Arguments { - if requiredField.Arguments.ForName(foundArgs.Name) == nil && foundArgs.Type.NonNull && foundArgs.DefaultValue == nil { - return gqlerror.ErrorPosf(foundArgs.Position, + if requiredField.Arguments.ForName(foundArgs.Name) == nil && foundArgs.Type.NonNull && + foundArgs.DefaultValue == nil { + return gqlerror.ErrorPosf( + foundArgs.Position, `For %s to implement %s any additional arguments on %s must be optional or have a default value but %s is required.`, - def.Name, intf.Name, foundField.Name, foundArgs.Name, + def.Name, + intf.Name, + foundField.Name, + foundArgs.Name, ) } } @@ -456,7 +582,11 @@ func validateImplements(schema *Schema, def *Definition, intfName string) *gqler // validateTypeImplementsAncestors // https://github.com/graphql/graphql-js/blob/47bd8c8897c72d3efc17ecb1599a95cee6bac5e8/src/type/validate.ts#L428 -func validateTypeImplementsAncestors(schema *Schema, def *Definition, intfName string) *gqlerror.Error { +func validateTypeImplementsAncestors( + schema *Schema, + def *Definition, + intfName string, +) *gqlerror.Error { intf := schema.Types[intfName] if intf == nil { return gqlerror.ErrorPosf(def.Position, "Undefined type %s.", strconv.Quote(intfName)) @@ -479,15 +609,10 @@ func validateTypeImplementsAncestors(schema *Schema, def *Definition, intfName s } func containsString(slice []string, want string) bool { - for _, str := range slice { - if want == str { - return true - } - } - return false + return slices.Contains(slice, want) } -func isCovariant(schema *Schema, required *Type, actual *Type) bool { +func isCovariant(schema *Schema, required, actual *Type) bool { if required.NonNull && !actual.NonNull { return false } @@ -513,18 +638,17 @@ func isCovariant(schema *Schema, required *Type, actual *Type) bool { func validateName(pos *Position, name string) *gqlerror.Error { if strings.HasPrefix(name, "__") { - return gqlerror.ErrorPosf(pos, `Name "%s" must not begin with "__", which is reserved by GraphQL introspection.`, name) + return gqlerror.ErrorPosf( + pos, + `Name "%s" must not begin with "__", which is reserved by GraphQL introspection.`, + name, + ) } return nil } func isValidKind(kind DefinitionKind, valid ...DefinitionKind) bool { - for _, k := range valid { - if kind == k { - return true - } - } - return false + return slices.Contains(valid, kind) } func kindList(kinds ...DefinitionKind) string { diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/validator.go b/vendor/github.com/vektah/gqlparser/v2/validator/validator.go index 1214ed16e6..9fb40d6a18 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/validator.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/validator.go @@ -2,6 +2,7 @@ package validator import ( "sort" + //nolint:staticcheck // bad, yeah . "github.com/vektah/gqlparser/v2/ast" "github.com/vektah/gqlparser/v2/gqlerror" @@ -24,7 +25,7 @@ var ( OrList = core.OrList ) -// Walk is an alias for core.Walk +// Walk is an alias for core.Walk. func Walk(schema *Schema, document *QueryDocument, observers *Events) { core.Walk(schema, document, observers) } @@ -49,9 +50,9 @@ func AddRule(name string, ruleFunc RuleFunc) { // RemoveRule removes an existing rule from the rule set // if one of the same name exists. -// The rule set is global, so it is not safe for concurrent changes +// The rule set is global, so it is not safe for concurrent changes. func RemoveRule(name string) { - var result []Rule // nolint:prealloc // using initialized with len(rules) produces a race condition + var result []Rule //nolint:prealloc // using initialized with len(rules) produces a race condition for _, r := range specifiedRules { if r.Name == name { continue @@ -64,10 +65,10 @@ func RemoveRule(name string) { // ReplaceRule replaces an existing rule from the rule set // if one of the same name exists. // If no match is found, it will add a new rule to the rule set. -// The rule set is global, so it is not safe for concurrent changes +// The rule set is global, so it is not safe for concurrent changes. func ReplaceRule(name string, ruleFunc RuleFunc) { var found bool - var result []Rule // nolint:prealloc // using initialized with len(rules) produces a race condition + var result []Rule //nolint:prealloc // using initialized with len(rules) produces a race condition for _, r := range specifiedRules { if r.Name == name { found = true @@ -117,7 +118,11 @@ func Validate(schema *Schema, doc *QueryDocument, rules ...Rule) gqlerror.List { return errs } -func ValidateWithRules(schema *Schema, doc *QueryDocument, rules *validatorrules.Rules) gqlerror.List { +func ValidateWithRules( + schema *Schema, + doc *QueryDocument, + rules *validatorrules.Rules, +) gqlerror.List { if rules == nil { rules = validatorrules.NewDefaultRules() } @@ -134,7 +139,7 @@ func ValidateWithRules(schema *Schema, doc *QueryDocument, rules *validatorrules } observers := &core.Events{} - var currentRules []Rule // nolint:prealloc // would require extra local refs for len + var currentRules []Rule //nolint:prealloc // would require extra local refs for len for name, ruleFunc := range rules.GetInner() { currentRules = append(currentRules, Rule{Name: name, RuleFunc: ruleFunc}) // ensure deterministic order evaluation diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/vars.go b/vendor/github.com/vektah/gqlparser/v2/validator/vars.go index 205a7fb516..50e2cdb295 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/vars.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/vars.go @@ -2,6 +2,7 @@ package validator import ( "encoding/json" + "errors" "fmt" "reflect" "strconv" @@ -12,11 +13,15 @@ import ( ) //nolint:staticcheck // We do not care about capitalized error strings -var ErrUnexpectedType = fmt.Errorf("Unexpected Type") +var ErrUnexpectedType = errors.New("Unexpected Type") -// VariableValues coerces and validates variable values -func VariableValues(schema *ast.Schema, op *ast.OperationDefinition, variables map[string]interface{}) (map[string]interface{}, error) { - coercedVars := map[string]interface{}{} +// VariableValues coerces and validates variable values. +func VariableValues( + schema *ast.Schema, + op *ast.OperationDefinition, + variables map[string]any, +) (map[string]any, error) { + coercedVars := map[string]any{} validator := varValidator{ path: ast.Path{ast.PathName("variable")}, @@ -60,13 +65,23 @@ func VariableValues(schema *ast.Schema, op *ast.OperationDefinition, variables m case "Int": n, err := jsonNumber.Int64() if err != nil { - return nil, gqlerror.ErrorPathf(validator.path, "cannot use value %d as %s", n, v.Type.NamedType) + return nil, gqlerror.ErrorPathf( + validator.path, + "cannot use value %d as %s", + n, + v.Type.NamedType, + ) } rv = reflect.ValueOf(n) case "Float": f, err := jsonNumber.Float64() if err != nil { - return nil, gqlerror.ErrorPathf(validator.path, "cannot use value %f as %s", f, v.Type.NamedType) + return nil, gqlerror.ErrorPathf( + validator.path, + "cannot use value %f as %s", + f, + v.Type.NamedType, + ) } rv = reflect.ValueOf(f) } @@ -93,7 +108,10 @@ type varValidator struct { schema *ast.Schema } -func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflect.Value, *gqlerror.Error) { +func (v *varValidator) validateVarType( + typ *ast.Type, + val reflect.Value, +) (reflect.Value, *gqlerror.Error) { currentPath := v.path resetPath := func() { v.path = currentPath @@ -137,7 +155,8 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec switch def.Kind { case ast.Enum: kind := val.Type().Kind() - if kind != reflect.Int && kind != reflect.Int32 && kind != reflect.Int64 && kind != reflect.String { + if kind != reflect.Int && kind != reflect.Int32 && kind != reflect.Int64 && + kind != reflect.String { return val, gqlerror.ErrorPathf(v.path, "enums must be ints or strings") } isValidEnum := false @@ -154,11 +173,17 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec kind := val.Type().Kind() switch typ.NamedType { case "Int": - if kind == reflect.Int || kind == reflect.Int32 || kind == reflect.Int64 || kind == reflect.Float32 || kind == reflect.Float64 || IsValidIntString(val, kind) { + if kind == reflect.Int || kind == reflect.Int32 || kind == reflect.Int64 || + kind == reflect.Float32 || + kind == reflect.Float64 || + IsValidIntString(val, kind) { return val, nil } case "Float": - if kind == reflect.Float32 || kind == reflect.Float64 || kind == reflect.Int || kind == reflect.Int32 || kind == reflect.Int64 || IsValidFloatString(val, kind) { + if kind == reflect.Float32 || kind == reflect.Float64 || kind == reflect.Int || + kind == reflect.Int32 || + kind == reflect.Int64 || + IsValidFloatString(val, kind) { return val, nil } case "String": @@ -172,7 +197,8 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec } case "ID": - if kind == reflect.Int || kind == reflect.Int32 || kind == reflect.Int64 || kind == reflect.String { + if kind == reflect.Int || kind == reflect.Int32 || kind == reflect.Int64 || + kind == reflect.String { return val, nil } default: diff --git a/vendor/github.com/xrash/smetrics/jaro.go b/vendor/github.com/xrash/smetrics/jaro.go index 75f924e117..fc1ad19527 100644 --- a/vendor/github.com/xrash/smetrics/jaro.go +++ b/vendor/github.com/xrash/smetrics/jaro.go @@ -75,7 +75,7 @@ func Jaro(a, b string) float64 { } // The number of unaligned matches divided by two, is the number of _transpositions_. - transpositions := math.Floor(float64(unaligned / 2)) + transpositions := math.Floor(float64(unaligned) / 2) // Jaro distance is the average between these three numbers: // 1. matches / length of string A diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/.gitignore b/vendor/gitlab.com/gitlab-org/api/client-go/.gitignore index ec1d0dab94..6dc065c6b3 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/.gitignore +++ b/vendor/gitlab.com/gitlab-org/api/client-go/.gitignore @@ -33,8 +33,11 @@ _testmain.go vendor .go/ .golangci-lint/ +scripts/commitlint/node_modules # reports gl-code-quality-report.json .mise/ +Gitlab-license.txt +.vscode/ diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/.gitlab-ci.yml b/vendor/gitlab.com/gitlab-org/api/client-go/.gitlab-ci.yml index 0e6f4bdb38..76df79623a 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/.gitlab-ci.yml +++ b/vendor/gitlab.com/gitlab-org/api/client-go/.gitlab-ci.yml @@ -7,9 +7,11 @@ workflow: include: - template: Jobs/SAST.gitlab-ci.yml - - component: ${CI_SERVER_FQDN}/gitlab-org/components/danger-review/danger-review@2.0.0 + - component: ${CI_SERVER_FQDN}/components/dependency-scanning/main@0.8.0 + - component: ${CI_SERVER_FQDN}/components/dependency-scanning/main@0.8.0 + - component: ${CI_SERVER_FQDN}/gitlab-com/gl-infra/common-ci-tasks/danger@v3.5.3 inputs: - job_stage: lint + stage: lint job_allow_failure: true # NOTE: the two includes below are a hack to conditionally set the tags node @@ -18,17 +20,38 @@ include: # groups. However, there is no easy way to conditionally set tags or even variables without # jeopardizing existing (complex) workflow:rules or job:rules. Thus, we resort to # this nasty conditionally include hack. - - local: '.gitlab/ci/gitlab-go-runner-tags.gitlab-ci.yml' + - local: ".gitlab/ci/gitlab-go-runner-tags.gitlab-ci.yml" rules: - if: $CI_PROJECT_ROOT_NAMESPACE == 'gitlab-org' || $CI_PROJECT_ROOT_NAMESPACE == 'gitlab-community' - - local: '.gitlab/ci/community-go-runner-tags.gitlab-ci.yml' + - local: ".gitlab/ci/community-go-runner-tags.gitlab-ci.yml" rules: - if: $CI_PROJECT_ROOT_NAMESPACE != 'gitlab-org' && $CI_PROJECT_ROOT_NAMESPACE != 'gitlab-community' + + # Add Work Type Classification labels to merge requests, + # see https://gitlab.com/gitlab-com/gl-infra/common-ci-tasks/-/blob/main/autolabels.md + - project: "gitlab-com/gl-infra/common-ci-tasks" + ref: v2.93.0 + file: "autolabels.yml" + inputs: + stage: lint + + + # Add Work Type Classification labels to merge requests, + # see https://gitlab.com/gitlab-com/gl-infra/common-ci-tasks/-/blob/main/autolabels.md + - project: "gitlab-com/gl-infra/common-ci-tasks" + ref: v2.93.0 + file: "autolabels.yml" + inputs: + stage: lint + # Analyze commits to determine whether to cut a release # see https://gitlab.com/gitlab-com/gl-infra/common-ci-tasks/-/blob/main/semantic-release.md - - project: 'gitlab-com/gl-infra/common-ci-tasks' - ref: v2.85.0 # renovate:managed - file: 'semantic-release.yml' + - project: "gitlab-com/gl-infra/common-ci-tasks" + ref: v2.93.0 + file: "semantic-release.yml" + - project: "gitlab-com/gl-infra/common-ci-tasks" + ref: v2.93.0 + file: "semantic-release.yml" inputs: stage: deploy validate_stage: lint @@ -43,8 +66,8 @@ stages: parallel: matrix: - GOLANG_IMAGE_VERSION: - - '1.23' - '1.24' + - '1.25' .go:base: extends: @@ -71,17 +94,17 @@ stages: - $GOLANGCI_LINT_CACHE/ - key: prefix: mise- - files: ['.tool-versions'] + files: [".tool-versions"] paths: - $MISE_DATA_DIR # We only need to run Go-related jobs when actual Go files changed # or when running either on the default branch or for a tag. rules: - - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + - if: "$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH" - if: $CI_COMMIT_TAG - changes: - - '**/*.go' + - "**/*.go" - testdata/** - go.mod - go.sum @@ -97,8 +120,8 @@ golangci-lint: stage: lint needs: [] variables: - REPORT_FILENAME: 'gl-code-quality-report.json' - image: golangci/golangci-lint:v2.4.0 + REPORT_FILENAME: "gl-code-quality-report.json" + image: golangci/golangci-lint:v2.10.1 script: - golangci-lint run artifacts: @@ -134,28 +157,37 @@ verify-generated-code: exit 1; } -commitlint: - stage: lint +Download GraphQL schema: + stage: test needs: [] - rules: - - if: $CI_PIPELINE_SOURCE == 'merge_request_event' - image: - name: commitlint/commitlint:19.9.1 - entrypoint: [""] + image: node:25-alpine script: - - commitlint --from ${CI_MERGE_REQUEST_DIFF_BASE_SHA} --to ${CI_COMMIT_SHA} + - | + if [ ! -f schema/gitlab.graphql ]; then + mkdir schema + npm install -g get-graphql-schema + get-graphql-schema "${CI_API_GRAPHQL_URL}" --sdl >schema/gitlab.graphql + fi + cache: + key: $CI_COMMIT_REF_SLUG + paths: + - schema/gitlab.graphql + policy: pull-push + artifacts: + paths: + - schema/gitlab.graphql tests:unit: extends: - - .go:base - .go:versions stage: test - needs: [] + needs: + - Download GraphQL schema image: golang:$GOLANG_IMAGE_VERSION variables: # configure tooling versions - GOTESTSUM_VERSION: 'v1.12.0' - GOCOVER_COBERTURA_VERSION: 'v1.2.1-0.20240107185409-0818f3538137' + GOTESTSUM_VERSION: 'v1.13.0' + GOCOVER_COBERTURA_VERSION: 'v1.4.0' # configure artifact files JUNIT_FILENAME: tests.xml @@ -179,6 +211,61 @@ tests:unit: coverage_format: cobertura when: always +tests:integration: + stage: test + rules: + - if: $ENABLE_EE_ACCEPTANCE_TESTS == "true" + needs: [] + image: golang:1.25-bookworm + timeout: 2 hours + services: + - docker:29.2.1-dind + variables: + # Docker-in-docker configuration + DOCKER_HOST: tcp://docker:2376 + DOCKER_TLS_VERIFY: 1 + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_CERT_PATH: "$DOCKER_TLS_CERTDIR/client" + + # GitLab test configuration + GITLAB_BASE_URL: http://docker:8095/api/v4 + + # Since we're not using the makefile, this doesn't default to the testing token + # properly. + GITLAB_TOKEN: "glpat-ACCTEST1234567890123" + + # configure tooling versions + GOTESTSUM_VERSION: 'v1.12.0' + GOCOVER_COBERTURA_VERSION: 'v1.2.1-0.20240107185409-0818f3538137' + MISE_VERSION: v2025.6.1 + + # configure artifact files + JUNIT_FILENAME: integration-tests.xml + COVERPROFILE_FILENAME: integration-coverage.out + COVERPROFILE_XML_FILENAME: integration-coverage.xml + before_script: + # Install docker + compose + - curl -fsSL https://get.docker.com | sh + # Install mise + - curl --retry 3 https://mise.run | MISE_VERSION=${MISE_VERSION} MISE_INSTALL_PATH=/usr/local/bin/mise sh + - eval "$(mise activate bash --shims)" + + # Install Go and other tools with mise + - mise install + + # Copy the license to the correct location + - test -f "${GITLAB_LICENSE}" && cp "${GITLAB_LICENSE}" "${CI_PROJECT_DIR}/Gitlab-license.txt" + + # Start GitLab instance + - make testacc-up SERVICE=gitlab-ee + script: + - go run gotest.tools/gotestsum@${GOTESTSUM_VERSION} --format=standard-quiet --junitfile=$JUNIT_FILENAME -- -race -coverprofile=$COVERPROFILE_FILENAME -covermode=atomic -tags=integration ./gitlab_test/... + - grep -v '_generated.go' "$COVERPROFILE_FILENAME" | grep -v '_mock.go' > "${COVERPROFILE_FILENAME}.tmp" + - mv "${COVERPROFILE_FILENAME}.tmp" "$COVERPROFILE_FILENAME" + - go run github.com/boumenot/gocover-cobertura@${GOCOVER_COBERTURA_VERSION} < $COVERPROFILE_FILENAME > $COVERPROFILE_XML_FILENAME + - go tool cover -func $COVERPROFILE_FILENAME + + # Update rules on SAST to ensure the jobs show up in the pipeline # this prevents forks that don't have `ultimate` from skipping SAST scans # since gitlab-advaced-sast replaces semgrep. @@ -187,12 +274,11 @@ semgrep-sast: rules: - when: always -# Patch common-ci semantic release jobs -semantic_release_check: - before_script: - - npm install -g semantic-release @semantic-release/gitlab @semantic-release/git @semantic-release/changelog - -semantic_release: - script: - - npm install -g semantic-release @semantic-release/gitlab @semantic-release/git @semantic-release/changelog - - semantic-release +# Update rules on dependency scanning so that it only runs when on the +# canonical project or the community fork. This ensures that when people +# use personal forks, it doesn't fail stating the forks are unlicensed. +dependency-scanning: + rules: + - if: $CI_PROJECT_ID == "65271576" # Canonical repository + - if: $CI_PROJECT_ID == "65275361" # Community Fork + - when: never diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/.golangci.yml b/vendor/gitlab.com/gitlab-org/api/client-go/.golangci.yml index 48438462dd..7df81bdf51 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/.golangci.yml +++ b/vendor/gitlab.com/gitlab-org/api/client-go/.golangci.yml @@ -18,19 +18,26 @@ output: linters: enable: - asciicheck + - canonicalheader - dogsled - dupword - errorlint - goconst + - godoclint - misspell + - modernize - nakedret - nolintlint - revive + - sloglint - staticcheck - testifylint - unconvert - usestdlibvars + - usetesting - whitespace + - tparallel + - paralleltest disable: - errcheck settings: @@ -38,14 +45,31 @@ linters: locale: US ignore-rules: - noteable + modernize: + disable: + - omitzero revive: enable-all-rules: false rules: + - name: comment-spacings - name: deep-exit + - name: error-strings - name: get-return + - name: identical-switch-branches + - name: increment-decrement + - name: indent-error-flow - name: redundant-import-alias + - name: struct-tag - name: time-date + - name: unnecessary-stmt + - name: unused-parameter + - name: use-errors-new - name: var-naming + arguments: + - [] + - [] + - - skip-package-name-collision-with-go-std: true + staticcheck: checks: - ST1020 @@ -65,6 +89,7 @@ linters: - error-nil - expected-actual - formatter + - go-require - len - negative-positive - nil-compare @@ -74,6 +99,10 @@ linters: http-method: false http-status-code: false time-date-month: true + usetesting: + context-background: true + context-todo: true + os-temp-dir: true # List of regexps of issue texts to exclude. exclusions: @@ -91,28 +120,11 @@ linters: - source: "// GitLab API docs:" linters: - staticcheck - - text: "var-naming: struct field PipelineId should be PipelineID" - path: commits\.go - linters: - - revive - - text: "var-naming: struct field SelectiveSyncNamespaceIds should be SelectiveSyncNamespaceIDs" - path: geo_nodes\.go - linters: - - revive - - text: "var-naming: struct field RefsUrl should be RefsURL" - path: import\.go - linters: - - revive - - text: "var-naming: struct field BitbucketServerUrl should be BitbucketServerURL" - path: import\.go - linters: - - revive - - text: "var-naming: struct field CrlUrl should be CrlURL" - path: tags\.go + - path: _test\.go$ + text: unused-parameter linters: - revive - - text: "var-naming: type ServicePingNonSqlMetrics should be ServicePingNonSQLMetrics" - path: usage_data\.go + - path: ^testing/ linters: - revive paths: diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/.releaserc.json b/vendor/gitlab.com/gitlab-org/api/client-go/.releaserc.json index c65b5cd952..ab9aa899cd 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/.releaserc.json +++ b/vendor/gitlab.com/gitlab-org/api/client-go/.releaserc.json @@ -1,23 +1,7 @@ { "branches": ["main"], "plugins": [ - [ - "@semantic-release/commit-analyzer", - { - "preset": "angular", - "releaseRules": [ - {"type": "breaking", "release": "major"}, - {"type": "feat", "release": "minor"}, - {"type": "fix", "release": "patch"}, - {"type": "refactor", "release": "patch"}, - {"type": "chore", "release": "patch"}, - {"type": "docs", "release": "patch"}, - {"type": "style", "release": false}, - {"type": "test", "release": false}, - {"scope": "no-release", "release": false} - ] - } - ], + "@gitlab/semantic-release-merge-request-analyzer", "@semantic-release/release-notes-generator", "@semantic-release/changelog", "@semantic-release/gitlab", @@ -30,4 +14,3 @@ ] ] } - diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/.tool-versions b/vendor/gitlab.com/gitlab-org/api/client-go/.tool-versions index f499c6fcd8..afedc36bfb 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/.tool-versions +++ b/vendor/gitlab.com/gitlab-org/api/client-go/.tool-versions @@ -1,4 +1,5 @@ -golang 1.23 -golangci-lint 2.3.1 +golang 1.24 +golangci-lint 2.10.1 gofumpt 0.8.0 buf 1.55.1 +node 24.8.0 diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/AGENTS.md b/vendor/gitlab.com/gitlab-org/api/client-go/AGENTS.md new file mode 100644 index 0000000000..26c8909681 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/AGENTS.md @@ -0,0 +1,401 @@ +# AI Agent Guidelines for GitLab client-go + +This document provides comprehensive guidelines for AI agents working with the GitLab client-go repository. It covers development practices, testing requirements, code formatting, API alignment, and code generation procedures. + +## Repository Overview + +The GitLab client-go is a Go client library for the GitLab API, enabling Go programs to interact with GitLab in a simple and uniform way. The repository follows strict Go best practices and maintains close alignment with GitLab's official API documentation. + +## Development Workflow + +### Prerequisites + +When asked to modify code, read CONTRIBUTING.md and README.md for examples and formatting instructions. Where the +instructions in CONTRIBUTING.md and README.md conflict with information in AGENTS.md, prefer the instructions in +CONTRIBUTING.md and README.md over the instructions in AGENTS.md + +When asked to perform analysis on the codebase instead of changing code, skipping the read of CONTRIBUTING.md and README.md +is allowed and preferred, since understanding contributing guidelines is not required to perform analysis. + +### Required Tools + +- **Go** - Use the version specified in go.mod +- **gofumpt** - Code formatter +- **golangci-lint** - Linting tool +- **buf** - Protocol buffer tools for code generation +- **gomock** - Mock generation + +### Running Tests + +```bash +# Run all tests with race detection +mise exec -- make test + +# Run the complete reviewable process (includes tests) +mise exec -- make reviewable +``` + +### Test Patterns + +- Tests are parallelized using `t.Parallel()` +- Mock HTTP handlers are used for API testing +- Test data is stored in `testdata/` directory +- Each service method should have corresponding test coverage + - **CRITICAL** - When fixing bugs or creating new features, ensure new test scenarios are added to cover the new logic. +- When writing a test, write Gherkin comments in-line with the test to make the tests easier to read. This means adding GIVEN/WHEN/THEN comments in tests. +- All tests use the `testing` package with `testify/assert` + +Do not use `reflect.DeepEqual` in tests, use testify instead. + +```go +// This is an example of the incorrect test setup +want := &MyStruct{ + MyField: "Meow Kitty" +} +if !reflect.DeepEqual(want, otherStruct) { + t.Errorf("MyStruct returned %+v, want %+v", otherStruct, want) +} + +// Instead, use this +want := &MyStruct{ + MyField: "Meow Kitty" +} +assert.Equal(t, want, otherStruct) +``` + +### Test Structure Example + +```go +func TestGetUser(t *testing.T) { + t.Parallel() + mux, client := setup(t) + + path := "/api/v4/users/1" + mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, http.MethodGet) + mustWriteHTTPResponse(t, w, "testdata/get_user.json") + }) + + user, _, err := client.Users.GetUser(1, GetUsersOptions{}) + assert.NoError(t, err) + // ... assertions +} +``` + +## Code Formatting and Linting + +### Formatting + +The project uses `gofumpt` for code formatting: + +```bash +# Format all Go files +mise exec -- make fmt +``` + +**Formatting Rules:** +- Line width for comments: < 80 characters +- Line width for code: < 100 characters (where sensible) +- Use `gofumpt` for consistent formatting +- Follow Go best practices + +### Linting + +```bash +# Run all linters +mise exec -- make lint +``` + +**Linting Configuration:** +- Uses `golangci-lint` with custom configuration in `.golangci.yml` +- Enabled linters: asciicheck, dogsled, dupword, errorlint, goconst, misspell, + nakedret, nolintlint, revive, staticcheck, testifylint, unconvert, + usestdlibvars, whitespace +- Excludes generated files and examples directory + +## Mock generation + +This repository uses gomock to generate testing structs, which are in the `testing/` folder. These need to be kept up-to-date with function signatures to that the Service implementations match the interfaces that have generated mocks. + +### Available Generation Commands + +```bash +# Generate all code (protobuf, mocks, testing client) +mise exec -- make generate + +# Clean generated files +mise exec -- make clean +``` + +### Generation Scripts + +1. **`scripts/generate_testing_client.sh`** - Generates testing client with mocks +2. **`scripts/generate_mock_api.sh`** - Generates mock interfaces for all services +3. **`scripts/generate_service_interface_map.sh`** - Generates service interface mapping + +### When to Regenerate + +- After adding new service interfaces +- After modifying existing interfaces +- Before committing changes +- When mock generation fails + +## Function Comment Formatting + +### Required Comment Structure + +Every public function, type, and method must have properly formatted comments: + +```go +// FunctionName performs a specific action with the given parameters. +// +// GitLab API docs: https://docs.gitlab.com/api/endpoint/ +func (s *ServiceName) FunctionName(param Type, opt *Options, options ...RequestOptionFunc) (*ReturnType, *Response, error) { + // Implementation +} +``` + +### Comment Guidelines + +1. **Function Comments:** + - Start with function name (no "The" or "This function") + - Use present tense ("performs", "returns", "creates") + - Keep under 80 characters per line + - Include GitLab API documentation link + +2. **Type Comments:** + - Start with type name + - Describe the purpose and usage + - Include GitLab API documentation link + +3. **Struct Field Comments:** + - Use `json:"field_name"` tags + - Include `url:"field_name,omitempty"` for query parameters + - Document complex fields + +### GitLab API Documentation Alignment + +**CRITICAL: All code must align with GitLab's official API documentation.** + +#### API Documentation References + +Every function must reference the corresponding GitLab API documentation: + +```go +// GitLab API docs: https://docs.gitlab.com/api/users/ +// GitLab API docs: https://docs.gitlab.com/api/projects/#list-all-projects +// GitLab API docs: https://docs.gitlab.com/api/commits/#get-the-diff-of-a-commit +``` + +#### Field Ordering + +Struct fields and methods should be ordered to match the GitLab API documentation: + +```go +type CreateProjectOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + // ... other fields in API documentation order +} +``` + +#### Parameter Validation + +- Use `any` type for project/group IDs to support both int64 and string. +- Implement proper parameter parsing with `parseID()` function +- Validate required parameters before making API calls + +## Code Structure and Patterns + +### Service Structure + +Each GitLab API service follows this pattern: + +```go +type ( + ServiceNameInterface interface { + MethodName(opt *MethodOptions, options ...RequestOptionFunc) (*ReturnType, *Response, error) + // ... other methods + } + + // ServiceName handles communication with the service related methods + // of the GitLab API. + // + // GitLab API docs: https://docs.gitlab.com/api/service/ + ServiceName struct { + client *Client + } +) + +var _ ServiceNameInterface = (*ServiceName)(nil) +``` + +### Request Options Pattern + +All API methods should accept `options ...RequestOptionFunc`: + +```go +func (s *ServiceName) MethodName(opt *MethodOptions, options ...RequestOptionFunc) (*ReturnType, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + + u := fmt.Sprintf("projects/%s/endpoint", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var result *ReturnType + resp, err := s.client.Do(req, &result) + if err != nil { + return nil, resp, err + } + + return result, resp, nil +} +``` + +### Error Handling + +- Always return `(*Type, *Response, error)` tuple +- Use `PathEscape()` for URL path parameters +- Use `url.PathEscape()` for query parameters +- Handle `parseID()` errors for project/group IDs + +### Type Usage + +- Do not use `interface{}`, use the `any` alias instead! +- Do not use `int`, use `int64` instead! This applies to both slices and maps. + +## Pre-commit Checklist + +**CRITICAL: Tests MUST be run for every build or code modification.** +**CRITICAL: Linting MUST pass for every build or code modification.** +**CRITICAL: Mock generation should be run any time function signatures change** + +You can accomplish all three of these by running `mise exec -- make reviewable`, which will do: + +1. `mise exec -- make setup` - Install dependencies +2. `mise exec -- make generate` - Generate required code +3. `mise exec -- make fmt` - Format code +4. `mise exec -- make lint` - Run linters +5. `mise exec -- make test` - Run tests + +## Code Generation Guidelines + +### When Adding New Services + +1. Create the service file (e.g., `new_service.go`) +2. Define the interface and struct following the established pattern +3. Implement all methods with proper error handling +4. Add comprehensive tests in `new_service_test.go` +5. Run `mise exec -- make generate` to update mocks and testing client +6. Ensure all tests pass with `mise exec -- make test` + +### Mock Generation + +The repository uses `gomock` for generating mocks: + +```bash +# Generate mocks for all interfaces +mise exec -- make generate +``` + +Mocks are automatically generated in the `testing/` package and should not be manually edited. + +## File Organization + +### Service Files + +- One service per file (e.g., `users.go`, `projects.go`) +- Corresponding test file (e.g., `users_test.go`) +- Interface definition at the top of the file +- Service struct and implementation below + +### Generated Files + +- `testing/*_mock.go` - Generated mock files +- `testing/*_generated.go` - Generated testing client files +- `*_generated_test.go` - Generated test files + +**Never edit generated files manually.** + +## Common Patterns and Best Practices + +### Pointer Usage + +Use pointers for optional fields in structs: + +```go +type CreateUserOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Email *string `url:"email,omitempty" json:"email,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` +} +``` + +### Time Handling + +Use `*time.Time` for time fields and `ISOTime` for custom time types that only support year-month-day formatting: + +```go +type User struct { + CreatedAt *time.Time `json:"created_at"` + LastActivityOn *ISOTime `json:"last_activity_on"` +} +``` + +### Response Handling + +Always return the full response for pagination and metadata: + +```go +users, resp, err := client.Users.ListUsers(&gitlab.ListUsersOptions{}) +if err != nil { + return err +} + +// Access pagination info +fmt.Printf("Total pages: %d\n", resp.TotalPages) +``` + +## Troubleshooting + +### Common Issues + +1. **Tests failing after changes:** + - Run `mise exec -- make generate` to update mocks + - Check for linting errors with `mise exec -- make lint` + - Ensure all imports are correct + +2. **Linting errors:** + - Run `mise exec -- make fmt` to fix formatting issues + - Check `.golangci.yml` for specific rule configurations + - Address any static analysis warnings + +3. **Generation failures:** + - Ensure all interfaces are properly defined + - Check that service files follow the correct pattern + - Verify that all required tools are installed + +### Getting Help + +- Check existing issues in the [issue tracker](https://gitlab.com/gitlab-org/api/client-go/-/issues) +- Review the [contributing guide](CONTRIBUTING.md) +- Examine similar implementations in the codebase +- Refer to [GitLab API documentation](https://docs.gitlab.com/ee/api/) + +## Summary + +When working with this repository: + +1. **Always run tests** - `mise exec -- make test` is mandatory +2. **Follow formatting rules** - Use `gofumpt` and respect line limits +3. **Align with GitLab API docs** - Every function must reference official documentation +4. **Generate code when needed** - Run `mise exec -- make generate` after interface changes +5. **Use proper commenting** - Include GitLab API links and follow format guidelines +6. **Maintain consistency** - Follow established patterns and conventions diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/CHANGELOG.md b/vendor/gitlab.com/gitlab-org/api/client-go/CHANGELOG.md index 8369948d39..123f38d425 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/CHANGELOG.md +++ b/vendor/gitlab.com/gitlab-org/api/client-go/CHANGELOG.md @@ -1,3 +1,1427 @@ +## 1.46.0 + +### 🚀 Features + +- Update urls and add missing ([!2785](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2785)) by [Mohamed Mongy](https://gitlab.com/mohamedmongy96) + + + +# [1.46.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.45.0...v1.46.0) (2026-03-01) + +## 1.45.0 + +### 🚀 Features + +- Add LockMembershipsToSAML support to Application Settings ([!2791](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2791)) by [Mohamed Mongy](https://gitlab.com/mohamedmongy96) + +### 🔄 Other Changes + +- test(integration): Use epic IID instead of ID in `DeleteEpic` cleanup. ([!2794](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2794)) by [Florian Forster](https://gitlab.com/fforster) + + + +# [1.45.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.44.0...v1.45.0) (2026-02-27) + + +### Bug Fixes + +* **test:** Use epic IID instead of ID in `DeleteEpic` cleanup. ([49dea05](https://gitlab.com/gitlab-org/api/client-go/commit/49dea0587894cd75d5962e69080974fccedde406)) + +## 1.44.0 + +### 🚀 Features + +- Implement runner controller instance-level runner scope support ([!2765](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2765)) by [Timo Furrer](https://gitlab.com/timofurrer) + +### 🔄 Other Changes + +- chore(deps): update module github.com/graph-gophers/graphql-go to v1.9.0 ([!2789](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2789)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.44.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.43.0...v1.44.0) (2026-02-26) + +## 1.43.0 + +### 🚀 Features + +- feat(pagination): Add `ScanAndCollectN` to collect at most _n_ results. ([!2788](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2788)) by [Florian Forster](https://gitlab.com/fforster) + + + +# [1.43.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.42.0...v1.43.0) (2026-02-25) + + +### Features + +* **pagination:** Add `ScanAndCollectN` to collect at most _n_ results. ([f821c08](https://gitlab.com/gitlab-org/api/client-go/commit/f821c08c2a460755a0ae4db08fa468b54cbb4be1)) + +## 1.42.0 + +### 🚀 Features + +- feat: Add public_email to CreateUserOptions ([!2787](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2787)) by [Mohamed Othman](https://gitlab.com/mohamed.othman27) + + + +# [1.42.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.41.1...v1.42.0) (2026-02-24) + + +### Features + +* Add public_email to CreateUserOptions ([ab1ec31](https://gitlab.com/gitlab-org/api/client-go/commit/ab1ec3131687de457c8518c60150c254cc56fd83)) + +## 1.41.1 + +### 🐛 Bug Fixes + +- fix: Fixed a set of endpoints where inputs were escaped and should not be escaped ([!2772](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2772)) by [kilianpaquier](https://gitlab.com/u.kilianpaquier) + +### 🔄 Other Changes + +- Add `primary_domain` and `pages_primary_domain` to Pages structs ([!2786](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2786)) by [Mohamed Mongy](https://gitlab.com/mohamedmongy96) + + + +## [1.41.1](https://gitlab.com/gitlab-org/api/client-go/compare/v1.41.0...v1.41.1) (2026-02-24) + + +### Bug Fixes + +* Fixed a set of endpoints where inputs were escaped and should not be escaped ([d6d7b17](https://gitlab.com/gitlab-org/api/client-go/commit/d6d7b17f0c4d63c2613ae2aed2ea2901e87c7b8b)) + +## 1.41.0 + +### 🚀 Features + +- feat: Add missing event toggles to Group Slack integration ([!2784](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2784)) by [Mohamed Othman](https://gitlab.com/mohamed.othman27) + +### 🔄 Other Changes + +- chore(deps): update module buf.build/go/protovalidate to v1.1.3 ([!2783](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2783)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.41.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.40.1...v1.41.0) (2026-02-22) + + +### Features + +* Add missing event toggles to Group Slack integration ([a4e84a2](https://gitlab.com/gitlab-org/api/client-go/commit/a4e84a27f22083a40f351591c5a851ba19b6a7dc)) + +## 1.40.1 + +### 🐛 Bug Fixes + +- Add missing group API parameters to Go SDK structs ([!2764](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2764)) by [Mohamed Mongy](https://gitlab.com/mohamedmongy96) + + + +## [1.40.1](https://gitlab.com/gitlab-org/api/client-go/compare/v1.40.0...v1.40.1) (2026-02-21) + +## 1.40.0 + +### 🚀 Features + +- feat: Add visibility option to listgroupoptions ([!2775](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2775)) by [Mohamed Othman](https://gitlab.com/mohamed.othman27) +- Add missing parameters to MergeRequestDiff struct ([!2767](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2767)) by [Alekhin Sergey](https://gitlab.com/a.sergey) + +### 🔄 Other Changes + +- chore(oauth): use go:embed to extract the html ([!2740](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2740)) by [Tomas Vik](https://gitlab.com/viktomas) + + + +# [1.40.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.39.0...v1.40.0) (2026-02-21) + + +### Features + +* Add visibility option to listgroupoptions ([ca08a62](https://gitlab.com/gitlab-org/api/client-go/commit/ca08a62935f8d946dc52e35fcef5528c5950c104)) + +## 1.39.0 + +### 🚀 Features + +- feat: Add hide backlog and closed list properties to IssueBoards ([!2780](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2780)) by [Mohamed Othman](https://gitlab.com/mohamed.othman27) +- feat(groups): Add provider to AddGroupSAMLLinkOptions ([!2776](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2776)) by [Mohamed Othman](https://gitlab.com/mohamed.othman27) + + + +# [1.39.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.38.0...v1.39.0) (2026-02-19) + + +### Features + +* Add hide backlog and closed list properties to IssueBoards ([a66984e](https://gitlab.com/gitlab-org/api/client-go/commit/a66984ee5934bc55b9618f83d16272b6a4ebf94f)) +* **groups:** Add provider to AddGroupSAMLLinkOptions ([bb97c7f](https://gitlab.com/gitlab-org/api/client-go/commit/bb97c7f334ab6cab3eb7153457f14a71b9ff0c55)) + +## 1.38.0 + +### 🚀 Features + +- feat(events): Add missing parameters for label operations and update documentation links ([!2781](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2781)) by [Mohamed Mongy](https://gitlab.com/mohamedmongy96) +- feat(labels): add missing params and edit links ([!2778](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2778)) by [Mohamed Mongy](https://gitlab.com/mohamedmongy96) + +### 🔄 Other Changes + +- docs: Fix broken GitLab docs anchors for alert_management API ([!2777](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2777)) by [Mohamed Othman](https://gitlab.com/mohamed.othman27) +- docs: Fix broken documentation links in attestations.go ([!2779](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2779)) by [Mohamed Othman](https://gitlab.com/mohamed.othman27) + + + +# [1.38.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.37.0...v1.38.0) (2026-02-19) + + +### Features + +* **events:** Add missing parameters for label operations and update documentation links ([11b9f08](https://gitlab.com/gitlab-org/api/client-go/commit/11b9f08b37a4c2ada9413259282f163f28b94051)) +* **labels:** add missing params and edit links ([ec1b92b](https://gitlab.com/gitlab-org/api/client-go/commit/ec1b92bff403c10446ab1ff6566a3a638871bb7e)) + +## 1.37.0 + +### 🚀 Features + +- Support system & system_action fields for merge event attributes ([!2737](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2737)) by [Artem Mikheev](https://gitlab.com/renbou) + +### 🔄 Other Changes + +- Update links of geo_sites.go ([!2782](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2782)) by [Mohamed Mongy](https://gitlab.com/mohamedmongy96) +- chore(deps): update dependency golangci-lint to v2.10.1 ([!2770](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2770)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): update golangci/golangci-lint docker tag to v2.10.1 ([!2771](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2771)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): update golangci/golangci-lint docker tag to v2.10.0 ([!2769](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2769)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): update dependency golangci-lint to v2.10.0 ([!2768](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2768)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.37.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.36.0...v1.37.0) (2026-02-19) + +## 1.36.0 + +### 🚀 Features + +- feat: add support for google chat APIs ([!2766](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2766)) by [Zubeen](https://gitlab.com/syedzubeen) + +### 🔄 Other Changes + +- chore(deps): update module buf.build/go/protovalidate to v1.1.2 ([!2757](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2757)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.36.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.35.0...v1.36.0) (2026-02-17) + + +### Features + +* add support for google chat APIs ([81e58cb](https://gitlab.com/gitlab-org/api/client-go/commit/81e58cbc5296f1ed7651498de367ee42f1a46b1f)) + +## 1.35.0 + +### 🚀 Features + +- feat(groups): add code_owner_approval_required in a group's default_branch_protection_defaults ([!2725](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2725)) by [Preethi Atchudan](https://gitlab.com/preethiatchudan) + +### 🐛 Bug Fixes + +- fix(integration): Add missing json tags to ms teams struct ([!2703](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2703)) by [aishahsofea](https://gitlab.com/aishahsofea) + +### 🔄 Other Changes + +- chore(deps): update module buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go to v1.36.11-20260209202127-80ab13bee0bf.1 ([!2749](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2749)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): update node docker tag to v25 ([!2762](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2762)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.35.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.34.0...v1.35.0) (2026-02-16) + + +### Bug Fixes + +* **integration:** Add missing json tags to ms teams struct ([dafd6fd](https://gitlab.com/gitlab-org/api/client-go/commit/dafd6fd9937246278d151e0858aa6cd2a0e8343a)) + +## 1.34.0 + +### 🚀 Features + +- feat(workitems): Add an initial "Work Items" service with "Get" and "List" methods. ([!2719](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2719)) by [Florian Forster](https://gitlab.com/fforster) + +### 🔄 Other Changes + +- refactor: migrate to math/rand/v2 ([!2759](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2759)) by [Ville Skyttä](https://gitlab.com/scop) + + + +# [1.34.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.33.0...v1.34.0) (2026-02-13) + + +### Bug Fixes + +* **workitems:** Use `int64` for global work item IDs. ([f04e3d0](https://gitlab.com/gitlab-org/api/client-go/commit/f04e3d08a0e73f535f8049bab43b25753f62cbc0)) + + +### Features + +* **request_options:** Add boolean return value to `WithNext`. ([1cd1e1e](https://gitlab.com/gitlab-org/api/client-go/commit/1cd1e1e5ca3ad9c330ada3cbac4f48f22eab9e92)) +* **workitems:** Add comprehensive filtering to `ListWorkItemsOptions` ([052a897](https://gitlab.com/gitlab-org/api/client-go/commit/052a897891791acba55afb2fdc5e686ca14ad1df)) +* **workitems:** Add pagination support to `ListWorkItems`. ([cfdf5ee](https://gitlab.com/gitlab-org/api/client-go/commit/cfdf5ee61077951a6504b08dfe27033e9bccec5a)) +* **workitems:** Add WorkItems service with Get methods ([00925c2](https://gitlab.com/gitlab-org/api/client-go/commit/00925c26114c6a1fb2ad9758ce2ac8658e087f01)), closes [gitlab-org/api/client-go#2213](https://gitlab.com/gitlab-org/api/client-go/issues/2213) +* **workitems:** Implement the `ListWorkItems` method. ([4f8a709](https://gitlab.com/gitlab-org/api/client-go/commit/4f8a7092a23298e3de951564cd0c46a8481c28d7)) + +## 1.33.0 + +### 🚀 Features + +- Support unauthenticated clients via Unauthenticated auth source ([!2761](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2761)) by [Timo Furrer](https://gitlab.com/timofurrer) + + + +# [1.33.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.32.0...v1.33.0) (2026-02-13) + +## 1.32.0 + +### 🚀 Features + +- Implement endpoints for runner controller scopes ([!2758](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2758)) by [Timo Furrer](https://gitlab.com/timofurrer) + +### 🔄 Other Changes + +- test(namespaces): Address test feedback to simplify the test ([!2744](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2744)) by [Patrick Rice](https://gitlab.com/PatrickRice) +- chore(deps): update golangci/golangci-lint docker tag to v2.9.0 ([!2755](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2755)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): update dependency golangci-lint to v2.9.0 ([!2754](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2754)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.32.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.31.0...v1.32.0) (2026-02-13) + +## 1.31.0 + +### 🚀 Features + +- Add missing fields to emoji and milestone event types ([!2704](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2704)) by [Huijie Shi](https://gitlab.com/lcdlyxrqy) + + + +# [1.31.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.30.0...v1.31.0) (2026-02-11) + +## 1.30.0 + +### 🚀 Features + +- Add missing query params to ListGroupsOptions ([!2726](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2726)) by [Mohamed Mongy](https://gitlab.com/mohamedmongy96) + +### 🔄 Other Changes + +- chore(deps): update module buf.build/go/protovalidate to v1.1.1 ([!2750](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2750)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- docs(no-release): update url for community fork ([!2748](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2748)) by [Oleksandr Redko](https://gitlab.com/alexandear) + + + +# [1.30.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.29.0...v1.30.0) (2026-02-10) + +## 1.29.0 + +### 🚀 Features + +- Update runner controllers to match latest state ([!2747](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2747)) by [Timo Furrer](https://gitlab.com/timofurrer) + +### 🔄 Other Changes + +- chore(deps): migrate from gopkg.in/yaml.v3 to go.yaml.in/yaml/v3 ([!2639](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2639)) by [Ville Skyttä](https://gitlab.com/scop) + + + +# [1.29.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.28.1...v1.29.0) (2026-02-09) + +## 1.28.1 + +### 🐛 Bug Fixes + +- Fix error where GetNamespace double escaped URL-encoded projects ([!2743](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2743)) by [Patrick Rice](https://gitlab.com/PatrickRice) + +### 🔄 Other Changes + +- refactor: moved comments to interface ([!2716](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2716)) by [Zubeen](https://gitlab.com/syedzubeen) + + + +## [1.28.1](https://gitlab.com/gitlab-org/api/client-go/compare/v1.28.0...v1.28.1) (2026-02-06) + +## 1.28.0 + +### 🚀 Features + +- Add `destroy` attribute for pipeline schedule inputs ([!2702](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2702)) by [long nguyen huy](https://gitlab.com/n.h.long.9697) + +### 🔄 Other Changes + +- Migrate bytes endpoints to new `do` pattern ([!2738](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2738)) by [Timo Furrer](https://gitlab.com/timofurrer) +- docs(users): document the `Locked` and `State` fields ([!2741](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2741)) by [Florian Forster](https://gitlab.com/fforster) +- ci: migrate to Danger from `common-ci-tasks` ([!2742](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2742)) by [Florian Forster](https://gitlab.com/fforster) +- chore(oauth): improve the look of the OAuth confirmation page ([!2739](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2739)) by [Tomas Vik](https://gitlab.com/viktomas) + + + +# [1.28.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.27.0...v1.28.0) (2026-02-05) + +## 1.27.0 + +### 🚀 Features + +- Trim leading `@` in user ids in `do()` requests paths ([!2736](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2736)) by [Timo Furrer](https://gitlab.com/timofurrer) + +### 🔄 Other Changes + +- Migrate endpoints with special status code handling to new `do` pattern ([!2733](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2733)) by [Timo Furrer](https://gitlab.com/timofurrer) +- Support file uploads in `do()` request handler ([!2732](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2732)) by [Timo Furrer](https://gitlab.com/timofurrer) +- Migrate more endpoints to the `do()` pattern ([!2731](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2731)) by [Timo Furrer](https://gitlab.com/timofurrer) +- Revert "refactor(http): preserve response body without copying in multiple services" ([!2730](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2730)) by [Timo Furrer](https://gitlab.com/timofurrer) +- chore(deps): update docker docker tag to v29.2.1 ([!2729](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2729)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.27.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.26.0...v1.27.0) (2026-02-04) + +## 1.26.0 + +### 🚀 Features + +- Add slack integration support ([!2692](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2692)) by [Hamza Hassanain](https://gitlab.com/HamzaHassanain) + +### 🔄 Other Changes + +- refactor(no-release): fix minor revive issues ([!2711](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2711)) by [Oleksandr Redko](https://gitlab.com/alexandear) + + + +# [1.26.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.25.0...v1.26.0) (2026-02-03) + +## 1.25.0 + +### 🚀 Features + +- feat(hooks): Add webexintegration ([!2707](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2707)) by [Preethi Atchudan](https://gitlab.com/preethiatchudan) + +### 🔄 Other Changes + +- fix: Fix broken GitLab docs anchors for Wikis API ([!2723](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2723)) by [Mohamed Othman](https://gitlab.com/mohamed.othman27) +- refactor: moved comments to interface 7 ([!2715](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2715)) by [Zubeen](https://gitlab.com/syedzubeen) +- chore(deps): update module github.com/google/cel-go to v0.27.0 ([!2721](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2721)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- refactor: moved comments to interface 1 ([!2706](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2706)) by [Zubeen](https://gitlab.com/syedzubeen) +- Refactor low complexity endpoints to use new `do` request function ([!2718](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2718)) by [Timo Furrer](https://gitlab.com/timofurrer) +- Add some additional test coverage for functions before migrating to `do` ([!2720](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2720)) by [Patrick Rice](https://gitlab.com/PatrickRice) + + + +# [1.25.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.24.0...v1.25.0) (2026-02-01) + + +### Bug Fixes + +* Fix broken GitLab docs anchors for Wikis API ([bdbb5c0](https://gitlab.com/gitlab-org/api/client-go/commit/bdbb5c0e93847846f6f786c93d649bec18db38e4)) + + +### Features + +* **hooks:** Add webexintegration ([857ac6a](https://gitlab.com/gitlab-org/api/client-go/commit/857ac6a82ff63a65ae4df221cf8347fed8946f53)) + +## 1.24.0 + +### 🚀 Features + +- Add assignee_id to issues api ([!2673](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2673)) by [David Schneider](https://gitlab.com/dvob) + + + +# [1.24.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.23.0...v1.24.0) (2026-01-29) + +## 1.23.0 + +### 🚀 Features + +- feat: add group protected branches service ([!2685](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2685)) by [Prakash Divy](https://gitlab.com/prakashdivyy) + +### 🔄 Other Changes + +- chore(no-release): refactor to slog.DiscardHandler ([!2710](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2710)) by [Oleksandr Redko](https://gitlab.com/alexandear) + + + +# [1.23.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.22.1...v1.23.0) (2026-01-28) + + +### Features + +* add group protected branches service ([c7ffe6f](https://gitlab.com/gitlab-org/api/client-go/commit/c7ffe6ff7bc12996ce27df767a706a253a3ce00b)) + +## 1.22.1 + +### 🐛 Bug Fixes + +- fix: Type Mismatch in UpdateSettingsOptions for SentryEnabled ([!2690](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2690)) by [Zubeen](https://gitlab.com/syedzubeen) + +### 🔄 Other Changes + +- fix: URL tags for throttle protected path settings in UpdateSettingsOptions ([!2705](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2705)) by [Zubeen](https://gitlab.com/syedzubeen) +- test: Refactor tests to use testify assertions 7 ([!2700](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2700)) by [Zubeen](https://gitlab.com/syedzubeen) +- test: Refactor tests to use testify assertions 4 ([!2696](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2696)) by [Zubeen](https://gitlab.com/syedzubeen) +- test: Refactor tests to use testify assertions 3 ([!2695](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2695)) by [Zubeen](https://gitlab.com/syedzubeen) +- test(no-release): Refactor tests to use testify assertions 2 ([!2694](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2694)) by [Zubeen](https://gitlab.com/syedzubeen) +- test(no-release): Refactor tests to use testify assertions ([!2693](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2693)) by [Zubeen](https://gitlab.com/syedzubeen) +- test(no-release): Refactor tests to use testify assertions 6 ([!2699](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2699)) by [Zubeen](https://gitlab.com/syedzubeen) + + + +## [1.22.1](https://gitlab.com/gitlab-org/api/client-go/compare/v1.22.0...v1.22.1) (2026-01-28) + + +### Bug Fixes + +* Type Mismatch in UpdateSettingsOptions for SentryEnabled ([c2d3ca9](https://gitlab.com/gitlab-org/api/client-go/commit/c2d3ca98450719f615a951930153ad9fc2585b19)) +* URL tags for throttle protected path settings in UpdateSettingsOptions ([a4a525d](https://gitlab.com/gitlab-org/api/client-go/commit/a4a525dce32ba6aa80f45b48fbc0261e59cdabd3)) + +## 1.22.0 + +### 🚀 Features + +- feat(project_mirror): add ForceSyncProjectMirror ([!2683](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2683)) by [Prakash Divy](https://gitlab.com/prakashdivyy) + +### 🔄 Other Changes + +- test: Refactor tests to use testify assertions 5 ([!2697](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2697)) by [Zubeen](https://gitlab.com/syedzubeen) + + + +# [1.22.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.21.0...v1.22.0) (2026-01-28) + + +### Features + +* **project_mirror:** add ForceSyncProjectMirror ([b13fcb7](https://gitlab.com/gitlab-org/api/client-go/commit/b13fcb79e6ffb454dc9fd7e332bde90c79a62376)) + +## 1.21.0 + +### 🚀 Features + +- feat(settings): Add AnonymousSearchesAllowed field support ([!2678](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2678)) by [Seif Hatem](https://gitlab.com/seif-hatem) + +### 🔄 Other Changes + +- feat: improve URL validation and error handling in client initialization ([!2656](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2656)) by [Amer Khaled](https://gitlab.com/amrkhald777) + + + +# [1.21.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.20.0...v1.21.0) (2026-01-27) + + +### Features + +* improve URL validation and error handling in client initialization ([9417155](https://gitlab.com/gitlab-org/api/client-go/commit/9417155f9c8a5d7c044d052e61d8da5c91bbe57d)) +* **settings:** Add AnonymousSearchesAllowed field support ([7185888](https://gitlab.com/gitlab-org/api/client-go/commit/7185888208173e18216ecb353fdfebe91423f0c4)) + +## 1.20.0 + +### 🚀 Features + +- feat: update events ([!2689](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2689)) by [Huijie Shi](https://gitlab.com/lcdlyxrqy) + +### 🔄 Other Changes + +- chore(no-release): remove redundant build tag ([!2701](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2701)) by [Oleksandr Redko](https://gitlab.com/alexandear) +- chore(deps): update docker docker tag to v29.2.0 ([!2698](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2698)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.20.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.19.0...v1.20.0) (2026-01-27) + + +### Features + +* update events ([46ba91c](https://gitlab.com/gitlab-org/api/client-go/commit/46ba91cabfe7c13cf4f80738d48ca60b810f520a)) + +## 1.19.0 + +### 🚀 Features + +- feat(integrations): Add Chat & Notify integrations ([!2691](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2691)) by [Hamza Hassanain](https://gitlab.com/HamzaHassanain) + +### 🔄 Other Changes + +- refactor: use do function for requests ([!2674](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2674)) by [Timo Furrer](https://gitlab.com/timofurrer) +- chore(docs): Update adding API support guide for new coding patterns ([!2688](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2688)) by [Heidi Berry](https://gitlab.com/heidi.berry) + + + +# [1.19.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.18.0...v1.19.0) (2026-01-26) + + +### Features + +* **integrations:** Add Chat & Notify integrations ([cc692ed](https://gitlab.com/gitlab-org/api/client-go/commit/cc692edd6d8dfed55fd411559af7e53b55d4e2dd)) +* **mocks:** add streaming methods for various services ([889b407](https://gitlab.com/gitlab-org/api/client-go/commit/889b407e48432b32b4c1589102ceed6fadb857db)) + +## 1.18.0 + +### 🚀 Features + +- feat(settings): Added support for inactive_resource_access_tokens_delete_after_days to the... ([!2686](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2686)) by [Preethi Atchudan](https://gitlab.com/preethiatchudan) + +### 🔄 Other Changes + +- Add missing tests for refactored functions ([!2676](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2676)) by [Patrick Rice](https://gitlab.com/PatrickRice) + + + +# [1.18.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.17.0...v1.18.0) (2026-01-25) + + +### Features + +* **settings:** Added support for inactive_resource_access_tokens_delete_after_days to the... ([52b60c3](https://gitlab.com/gitlab-org/api/client-go/commit/52b60c3345ef56cc18ae7e8e1e2dd7c9f7f71344)) + +## 1.17.0 + +### 🚀 Features + +- Add support for Group Mattermost integrations ([!2675](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2675)) by [Hamza Hassanain](https://gitlab.com/HamzaHassanain) + + + +# [1.17.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.16.0...v1.17.0) (2026-01-23) + +## 1.16.0 + +### 🚀 Features + +- Add environment, deployed_after, and deployed_before params to merge requests options struct ([!2672](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2672)) by [Filip Aleksic](https://gitlab.com/faleksic) + +### 🔄 Other Changes + +- chore(deps): update module golang.org/x/oauth2 to v0.34.0 ([!2640](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2640)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.16.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.15.0...v1.16.0) (2026-01-20) + +## 1.15.0 + +### 🚀 Features + +- Add ExpiresAt field to ProjectSharedWithGroup struct ([!2671](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2671)) by [cindy](https://gitlab.com/wscix) + +### 🔄 Other Changes + +- feat: convert examples to testable examples for pkg.go.dev ([!2655](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2655)) by [Amer Khaled](https://gitlab.com/amrkhald777) +- refactor(no-release): enable usetesting linter ([!2664](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2664)) by [Oleksandr Redko](https://gitlab.com/alexandear) +- chore(deps): update docker docker tag to v29.1.5 ([!2665](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2665)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- Draft: Users: Fix GetUsersOptions naming inconsistency ([!2667](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2667)) by [Seif Hatem](https://gitlab.com/seif-hatem) + + + +# [1.15.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.14.0...v1.15.0) (2026-01-20) + + +### Bug Fixes + +* Deprecate incorrect fields returned in response for Emails on Push integration ([71d747d](https://gitlab.com/gitlab-org/api/client-go/commit/71d747da9a297451911b0c4eb4850632a588e3e8)) + + +### Features + +* convert examples to testable examples for pkg.go.dev ([fee39f1](https://gitlab.com/gitlab-org/api/client-go/commit/fee39f1f21b264765bbbed80ba23265bd3f633a9)) +* **issue_links:** Add ID field to IssueLink struct ([8f813a8](https://gitlab.com/gitlab-org/api/client-go/commit/8f813a8a2e73c41bc81403aceb82d7d94e9ff684)) + +## 1.14.0 + +### 🚀 Features + +- feat(hooks): Add project hook support for vulnerability events and branch filter strategy ([!2658](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2658)) by [Heidi Berry](https://gitlab.com/heidi.berry) +- Add max_artifacts_size parameter to groups and projects ([!2652](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2652)) by [Betty Godier](https://gitlab.com/betty-godier) + + + +# [1.14.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.13.0...v1.14.0) (2026-01-13) + + +### Features + +* **hooks:** Add project hook support for vulnerability events and branch filter strategy ([4f6d252](https://gitlab.com/gitlab-org/api/client-go/commit/4f6d252a47411602ac6757400e6b5479d807cdb8)) + +## 1.13.0 + +### 🚀 Features + +- feat(groups): add Active parameter to ListGroupProjects ([!2657](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2657)) by [Kai Armstrong](https://gitlab.com/phikai) + +### 🔄 Other Changes + +- chore(deps): update docker docker tag to v29.1.4 ([!2651](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2651)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.13.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.12.0...v1.13.0) (2026-01-12) + + +### Features + +* **groups:** add Active parameter to ListGroupProjects ([dec511a](https://gitlab.com/gitlab-org/api/client-go/commit/dec511a199b0adb7ba87f5a02a50651049b68b71)) + +## 1.12.0 + +### 🚀 Features + +- feat: add EmojiEvents field support to Project Webhooks ([!2653](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2653)) by [Yugan](https://gitlab.com/yugannkt) + +### 🔄 Other Changes + +- chore(deps): update dependency golangci-lint to v2.8.0 ([!2650](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2650)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- refactor(no-release): use errors.New instead of fmt.Errorf ([!2644](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2644)) by [Oleksandr Redko](https://gitlab.com/alexandear) + + + +# [1.12.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.11.0...v1.12.0) (2026-01-11) + + +### Features + +* add EmojiEvents field support to Project Webhooks ([2bcfa1f](https://gitlab.com/gitlab-org/api/client-go/commit/2bcfa1fd77756a3ccdb2bcf685736ee839b745be)) + +## 1.11.0 + +### 🚀 Features + +- feat(groups): add support for merge related settings ([!2625](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2625)) by [Zubeen](https://gitlab.com/syedzubeen) + +### 🐛 Bug Fixes + +- fix(api): typo in ms teams slug ([!2643](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2643)) by [aishahsofea](https://gitlab.com/aishahsofea) + +### 🔄 Other Changes + +- chore(deps): update module golang.org/x/text to v0.32.0 ([!2642](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2642)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): update module golang.org/x/sys to v0.39.0 ([!2641](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2641)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): update module github.com/godbus/dbus/v5 to v5.2.2 ([!2637](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2637)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): update module github.com/google/go-querystring to v1.2.0 ([!2638](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2638)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.11.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.10.0...v1.11.0) (2026-01-05) + + +### Bug Fixes + +* **api:** typo in ms teams slug ([1ed6c95](https://gitlab.com/gitlab-org/api/client-go/commit/1ed6c9509b23db53c3988a2dde2f11d22c8be5f9)) + + +### Features + +* **groups:** add support for merge related settings ([cb8412f](https://gitlab.com/gitlab-org/api/client-go/commit/cb8412fc495d19ee6e44819a2f69fd213d19a199)) + +## 1.10.0 + +### 🚀 Features + +- feat: implement Runner Controller API ([!2634](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2634)) by [Duo Developer](https://gitlab.com/duo-developer) + +### 🔄 Other Changes + +- chore(deps): update module github.com/godbus/dbus/v5 to v5.2.1 ([!2635](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2635)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.10.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.9.1...v1.10.0) (2025-12-19) + + +### Features + +* implement Runner Controller API ([66f19f4](https://gitlab.com/gitlab-org/api/client-go/commit/66f19f4073ce87566c7751e0987f857eeb008849)) + +## 1.9.1 + +### 🐛 Bug Fixes + +- fix: use parameters in config.NewClient and Jobs.DownloadArtifactsFile ([!2633](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2633)) by [Oleksandr Redko](https://gitlab.com/alexandear) + +### 🔄 Other Changes + +- test: fix TestCreateMergeRequestContextCommits failing locally ([!2631](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2631)) by [Oleksandr Redko](https://gitlab.com/alexandear) +- Code Refactor Using Request Handlers - 8 ([!2523](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2523)) by [Yashesvinee V](https://gitlab.com/yashes7516) +- Code Refactor Using Request Handlers - 6 ([!2521](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2521)) by [Yashesvinee V](https://gitlab.com/yashes7516) + + + +## [1.9.1](https://gitlab.com/gitlab-org/api/client-go/compare/v1.9.0...v1.9.1) (2025-12-17) + + +### Bug Fixes + +* use parameters in config.NewClient and Jobs.DownloadArtifactsFile ([28b7cd7](https://gitlab.com/gitlab-org/api/client-go/commit/28b7cd72f06777a2d3ec7772870c26565140341a)) + +## 1.9.0 + +### 🚀 Features + +- feat(api): add support for matrix project integration ([!2630](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2630)) by [aishahsofea](https://gitlab.com/aishahsofea) + + + +# [1.9.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.8.2...v1.9.0) (2025-12-16) + + +### Features + +* **api:** add support for matrix project integration ([0a5b11b](https://gitlab.com/gitlab-org/api/client-go/commit/0a5b11b9e2e405fb0a22009d60ce38091cc96625)) + +## 1.8.2 + +### 🐛 Bug Fixes + +- fix: correct omitempty tag in VariableFilter.EnvironmentScope field ([!2629](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2629)) by [Oleksandr Redko](https://gitlab.com/alexandear) + +### 🔄 Other Changes + +- feat(protectedTags): add support for `deploy_key_id` to `protected_tags` ([!2624](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2624)) by [Zubeen](https://gitlab.com/syedzubeen) +- chore(deps): update docker docker tag to v29.1.3 ([!2623](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2623)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): update module buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go to v1.36.11-20251209175733-2a1774d88802.1 ([!2622](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2622)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): update module google.golang.org/protobuf to v1.36.11 ([!2621](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2621)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +## [1.8.2](https://gitlab.com/gitlab-org/api/client-go/compare/v1.8.1...v1.8.2) (2025-12-15) + + +### Bug Fixes + +* correct omitempty tag in VariableFilter.EnvironmentScope field ([c117da1](https://gitlab.com/gitlab-org/api/client-go/commit/c117da1b123251ba86271d1ce3bf9750617e344f)) + + +### Features + +* **protectedTags:** add support for `deploy_key_id` to `protected_tags` ([c0fc3db](https://gitlab.com/gitlab-org/api/client-go/commit/c0fc3db793b51bfabb0ac8bb42442e6916b9df3f)) + +## 1.8.1 + +### 🐛 Bug Fixes + +- fix(epics): handle datetime format in ISOTime UnmarshalJSON ([!2612](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2612)) by [Zubeen](https://gitlab.com/syedzubeen) + +### 🔄 Other Changes + +- chore(deps): update module buf.build/go/protovalidate to v1.1.0 ([!2619](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2619)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore: add deprecation notice for PersonalAccessTokens.RevokePersonalAccessToken ([!2615](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2615)) by [aishahsofea](https://gitlab.com/aishahsofea) +- chore(deps): update golangci/golangci-lint docker tag to v2.7.2 ([!2613](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2613)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): do not use the experimental package ([!2614](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2614)) by [Mikhail Mazurskiy](https://gitlab.com/ash2k) +- test: Replace SkipIfRunningCE with SkipIfNotLicensed ([!2616](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2616)) by [Patrick Rice](https://gitlab.com/PatrickRice) + + + +## [1.8.1](https://gitlab.com/gitlab-org/api/client-go/compare/v1.8.0...v1.8.1) (2025-12-10) + + +### Bug Fixes + +* **epics:** handle datetime format in ISOTime UnmarshalJSON ([257e0ac](https://gitlab.com/gitlab-org/api/client-go/commit/257e0acd29daf887456d924c0063b52ebc2e808f)) + +## 1.8.0 + +### 🚀 Features + +- feat(hooks): add support for all hook event types ([!2606](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2606)) by [Heidi Berry](https://gitlab.com/heidi.berry) + + + +# [1.8.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.7.0...v1.8.0) (2025-12-08) + + +### Features + +* **hooks:** add support for all hook event types ([c3c9ca2](https://gitlab.com/gitlab-org/api/client-go/commit/c3c9ca275969adffca37908d63e5c70f634d7bbe)) + +## 1.7.0 + +### 🚀 Features + +- feat(users): Add support for a user to see only one file diff per page ([!2597](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2597)) by [Zubeen](https://gitlab.com/syedzubeen) + + + +# [1.7.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.6.0...v1.7.0) (2025-12-06) + + +### Features + +* **users:** Add support for a user to see only one file diff per page ([e2a9e09](https://gitlab.com/gitlab-org/api/client-go/commit/e2a9e09e79e7949e0b19dcfc97e3b7b533541856)) + +## 1.6.0 + +### 🚀 Features + +- feat: add admin compliance policy settings API ([!2610](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2610)) by [Hannes Lange](https://gitlab.com/hlange4) + +### 🔄 Other Changes + +- doc: fix typo ([!2603](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2603)) by [Guilhem Bonnefille](https://gitlab.com/gbonnefille) +- chore(deps): update golangci/golangci-lint docker tag to v2.7.1 ([!2611](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2611)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): update docker docker tag to v29.1.2 ([!2609](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2609)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): update golangci/golangci-lint docker tag to v2.7.0 ([!2608](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2608)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.6.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.5.0...v1.6.0) (2025-12-05) + + +### Features + +* add admin compliance policy settings API ([5c17773](https://gitlab.com/gitlab-org/api/client-go/commit/5c17773ca94ddece28978c7396bddcc6c65fb6a7)) + +## 1.5.0 + +### 🚀 Features + +- feat(Project Mirrors): Add missing Mirror attributes when reading or updating Project Mirrors ([!2600](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2600)) by [Patrick Rice](https://gitlab.com/PatrickRice) + + + +# [1.5.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.4.1...v1.5.0) (2025-12-03) + + +### Features + +* **Project Mirrors:** Add missing Mirror attributes when reading or updating Project Mirrors ([a49b32d](https://gitlab.com/gitlab-org/api/client-go/commit/a49b32df59aeae97247d21a83be3fab97da1bbfe)) + +## 1.4.1 + +### 🐛 Bug Fixes + +- Encode package managers as CSV in query for dependencies list ([!2604](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2604)) by [Timo Furrer](https://gitlab.com/timofurrer) + + + +## [1.4.1](https://gitlab.com/gitlab-org/api/client-go/compare/v1.4.0...v1.4.1) (2025-12-02) + +## 1.4.0 + +### 🚀 Features + +- feat(integrations): Add attestations integrations ([!2582](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2582)) by [Sam Roque-Worcel](https://gitlab.com/sroque-worcel) + +### 🔄 Other Changes + +- chore(deps): update docker docker tag to v29.1.1 ([!2602](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2602)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.4.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.3.1...v1.4.0) (2025-12-02) + + +### Features + +* **integrations:** Add attestations integrations ([4f50db4](https://gitlab.com/gitlab-org/api/client-go/commit/4f50db4acfb19212bfdfc12eb808dbc7ed8d7ad2)) + +## 1.3.1 + +### 🐛 Bug Fixes + +- fix(merge_requests): Reinstate missing request option ([!2601](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2601)) by [Heidi Berry](https://gitlab.com/heidi.berry) + + + +## [1.3.1](https://gitlab.com/gitlab-org/api/client-go/compare/v1.3.0...v1.3.1) (2025-12-01) + + +### Bug Fixes + +* **merge_requests:** Reinstate missing request option ([f5f912d](https://gitlab.com/gitlab-org/api/client-go/commit/f5f912ddc2dfb1af88de8710bde783f3f7ccd7c2)) + +## 1.3.0 + +### 🚀 Features + +- feat(credentials): Add support for revoking group PATs, listing/deleting group SSH keys ([!2594](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2594)) by [Heidi Berry](https://gitlab.com/heidi.berry) + +### 🔄 Other Changes + +- refactor: moved comments to interface ([!2595](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2595)) by [Zubeen](https://gitlab.com/syedzubeen) +- refactor(users): moved comments to interface ([!2596](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2596)) by [Zubeen](https://gitlab.com/syedzubeen) +- refactor: moved comments to interface ([!2599](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2599)) by [Zubeen](https://gitlab.com/syedzubeen) +- Simplify more request functions, introducing NoEscape ([!2592](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2592)) by [Timo Furrer](https://gitlab.com/timofurrer) + + + +# [1.3.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.2.0...v1.3.0) (2025-11-30) + + +### Features + +* **credentials:** Add support for revoking group PATs, listing/deleting group SSH keys ([3439f4f](https://gitlab.com/gitlab-org/api/client-go/commit/3439f4f0345b97dea0abf926ecaac9d3a7eb6769)) + +## 1.2.0 + +### 🚀 Features + +- feat(credentials): Add support for listing all SaaS enterprise user personal access tokens ([!2593](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2593)) by [Heidi Berry](https://gitlab.com/heidi.berry) + +### 🔄 Other Changes + +- Code Refactor Using Request Handlers - 10 ([!2525](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2525)) by [Yashesvinee V](https://gitlab.com/yashes7516) + + + +# [1.2.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.1.0...v1.2.0) (2025-11-27) + + +### Features + +* **credentials:** Add support for listing all SaaS enterprise user personal access tokens ([3697779](https://gitlab.com/gitlab-org/api/client-go/commit/369777938e435b043e37460ff1feffedd84b7dd1)) + +## 1.1.0 + +### 🚀 Features + +- feat(service_account): allow providing email when update a Service Account ([!2589](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2589)) by [kilianpaquier](https://gitlab.com/u.kilianpaquier) + +### 🔄 Other Changes + +- Bump dependencies ([!2591](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2591)) by [Timo Furrer](https://gitlab.com/timofurrer) +- chore(deps): update docker docker tag to v29 ([!2586](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2586)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [1.1.0](https://gitlab.com/gitlab-org/api/client-go/compare/v1.0.1...v1.1.0) (2025-11-26) + + +### Features + +* **service_account:** allow providing email when update a Service Account ([324d080](https://gitlab.com/gitlab-org/api/client-go/commit/324d0806a5cd8cb6ae7f68381d09cf5e2a31a0cc)) + +## 1.0.1 + +### 🐛 Bug Fixes + +- fix: fix ReviewerID() and let it accept int64 ([!2587](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2587)) by [Ilya Savitsky](https://gitlab.com/ipsavitsky234) + + + +## [1.0.1](https://gitlab.com/gitlab-org/api/client-go/compare/v1.0.0...v1.0.1) (2025-11-25) + + +### Bug Fixes + +* fix ReviewerID() and let it accept int64 ([6a6d439](https://gitlab.com/gitlab-org/api/client-go/commit/6a6d43952b70191358e7b726eff4f7f24a0f7ff6)) + +## 1.0.0 + +### 💥 Breaking Changes + +- Release client-go 1.0 ([!2575](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2575)) by [Patrick Rice](https://gitlab.com/PatrickRice) + + + +# [1.0.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.161.1...v1.0.0) (2025-11-24) + + +* Merge branch 'release-client-1.0' into 'main' ([f06b8c2](https://gitlab.com/gitlab-org/api/client-go/commit/f06b8c2cb4446e2e76a13bbc707c64e22a64d477)) + + +### Bug Fixes + +* **issues:** use AssigneeIDValue for ListProjectIssuesOptions.AssigneeID ([1dcb219](https://gitlab.com/gitlab-org/api/client-go/commit/1dcb219c343bc5b5622ff49933199c003a231bd4)) + + +### Features + +* **ListOptions:** Update ListOptions to use composition instead of aliasing ([60beef3](https://gitlab.com/gitlab-org/api/client-go/commit/60beef36d0f93a7dc66749f55d98defbc1b3fe28)) + + +### BREAKING CHANGES + +* Release 1.0 +* **ListOptions:** ListOptions implementation changed from aliasing to composition +Changelog: Improvements + +## 0.161.1 + +### 🐛 Bug Fixes + +- fix(users): Fix a bug where error parsing causes user blocking to not function properly ([!2584](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2584)) by [Patrick Rice](https://gitlab.com/PatrickRice) + + + +## [0.161.1](https://gitlab.com/gitlab-org/api/client-go/compare/v0.161.0...v0.161.1) (2025-11-24) + + +### Bug Fixes + +* **users:** Fix a bug where error parsing causes user blocking to not function properly ([2ad5506](https://gitlab.com/gitlab-org/api/client-go/commit/2ad55065d624d27d1f539a3c41489989b9a0d036)) + +## 0.161.0 + +### 🚀 Features + +- fix: return detailed API errors for BlockUser instead of generic LDAP message ([!2581](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2581)) by [Zubeen](https://gitlab.com/syedzubeen) + + + +# [0.161.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.160.2...v0.161.0) (2025-11-24) + + +### Bug Fixes + +* return detailed API errors for BlockUser instead of generic LDAP message ([2ba9fa6](https://gitlab.com/gitlab-org/api/client-go/commit/2ba9fa6995de6cadf0dae1bf600979b73ee471ce)) + +## 0.160.2 + +### 🐛 Bug Fixes + +- Fix double escaping in paths ([!2583](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2583)) by [Timo Furrer](https://gitlab.com/timofurrer) + + + +## [0.160.2](https://gitlab.com/gitlab-org/api/client-go/compare/v0.160.1...v0.160.2) (2025-11-24) + +## 0.160.1 + +### 🐛 Bug Fixes + +- fix: update input field from "key" to "name" in pipeline schedules to prevent an API error ([!2580](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2580)) by [Zubeen](https://gitlab.com/syedzubeen) + +### 🔄 Other Changes + +- Code Refactor Using Request Handlers - 9 ([!2524](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2524)) by [Yashesvinee V](https://gitlab.com/yashes7516) +- Code Refactor Using Request Handlers - 7 ([!2522](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2522)) by [Yashesvinee V](https://gitlab.com/yashes7516) +- Code Refactor Using Request Handlers - 5 ([!2518](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2518)) by [Yashesvinee V](https://gitlab.com/yashes7516) +- Code Refactor Using Request Handlers - 2 ([!2515](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2515)) by [Yashesvinee V](https://gitlab.com/yashes7516) +- Code Refactor Using Request Handlers - 4 ([!2517](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2517)) by [Yashesvinee V](https://gitlab.com/yashes7516) +- Code Refactor Using Request Handlers - 3 ([!2516](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2516)) by [Yashesvinee V](https://gitlab.com/yashes7516) +- chore(deps): update module github.com/godbus/dbus/v5 to v5.2.0 ([!2576](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2576)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): update golangci/golangci-lint docker tag to v2.6.2 ([!2577](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2577)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- Code Refactor Using Request Handlers - 1 ([!2514](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2514)) by [Yashesvinee V](https://gitlab.com/yashes7516) + + + +## [0.160.1](https://gitlab.com/gitlab-org/api/client-go/compare/v0.160.0...v0.160.1) (2025-11-19) + + +### Bug Fixes + +* update input field from "key" to "name" in pipeline schedules to prevent an API error ([062133f](https://gitlab.com/gitlab-org/api/client-go/commit/062133f0c24b32ca6ae64a9f7b80fd3fa7e58256)) + +## 0.160.0 + +### 🚀 Features + +- feat (project_members): Add show_seat_info option to ProjectMembers ([!2572](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2572)) by [Zubeen](https://gitlab.com/syedzubeen) + +### 🔄 Other Changes + +- refactor: fix modernize lint issues ([!2574](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2574)) by [Oleksandr Redko](https://gitlab.com/alexandear) +- chore(deps): update module cel.dev/expr to v0.25.1 ([!2573](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2573)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- docs(no-release): format examples, update pkg doc url ([!2543](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2543)) by [Oleksandr Redko](https://gitlab.com/alexandear) + + + +# [0.160.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.159.0...v0.160.0) (2025-11-12) + +## 0.159.0 + +### 🚀 Features + +- feat(integrations): add group integration API endpoints for Jira ([!2563](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2563)) by [Harsh Rai](https://gitlab.com/harshrai654) + +### 🔄 Other Changes + +- chore(deps): update golangci/golangci-lint docker tag to v2.6.1 ([!2564](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2564)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [0.159.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.158.0...v0.159.0) (2025-11-04) + + +### Features + +* **integrations:** add group integration API endpoints for Jira ([09e18ee](https://gitlab.com/gitlab-org/api/client-go/commit/09e18ee598bb7805ac8221f6a05426b1785f9011)) + +## 0.158.0 + +### 🚀 Features + +- Add support to send variables for GraphQL queries ([!2562](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2562)) by [rafasf](https://gitlab.com/rafasf) + +### 🔄 Other Changes + +- chore(deps): update module cel.dev/expr to v0.25.0 ([!2560](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2560)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(no-release): standardize GitLab name capitalization ([!2551](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2551)) by [Zubeen](https://gitlab.com/syedzubeen) +- chore(deps): update golangci/golangci-lint docker tag to v2.6.0 ([!2558](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2558)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- refactor: moved comments to interface 2 ([!2557](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2557)) by [Zubeen](https://gitlab.com/syedzubeen) +- refactor: moved comments to interface ([!2556](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2556)) by [Zubeen](https://gitlab.com/syedzubeen) +- refactor(test): avoid panic in tests with goroutines ([!2553](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2553)) by [Oleksandr Redko](https://gitlab.com/alexandear) + + + +# [0.158.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.157.1...v0.158.0) (2025-11-03) + +## 0.157.1 + +### 🐛 Bug Fixes + +- fix(protected_packages): fix invalid types ([!2554](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2554)) by [Ruwen Schwedewsky](https://gitlab.com/RuwenSchwedewskySinch) + +### 🔄 Other Changes + +- chore: Update review instructions for mentioning GitLab ([!2552](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2552)) by [Zubeen](https://gitlab.com/syedzubeen) +- Implement do function to reduce boilerplate ([!2550](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2550)) by [Timo Furrer](https://gitlab.com/timofurrer) +- refactor(test): migrate to testify assertions 4 ([!2548](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2548)) by [Zubeen](https://gitlab.com/syedzubeen) +- refactor(test): migrate to testify assertions 2 ([!2546](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2546)) by [Zubeen](https://gitlab.com/syedzubeen) +- refactor(test): migrate to testify assertions ([!2545](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2545)) by [Zubeen](https://gitlab.com/syedzubeen) +- refactor(test): migrate to testify assertions 5 ([!2549](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2549)) by [Zubeen](https://gitlab.com/syedzubeen) +- test: add unit tests for cluster agents and deployments ([!2499](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2499)) by [Zubeen](https://gitlab.com/syedzubeen) +- refactor(test): migrate to testify assertions 3 ([!2547](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2547)) by [Zubeen](https://gitlab.com/syedzubeen) +- Fix: Helper Functions for Code Refactoring ([!2544](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2544)) by [Yashesvinee V](https://gitlab.com/yashes7516) +- test: adds UT for formatPackageURL ([!2527](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2527)) by [Zubeen](https://gitlab.com/syedzubeen) +- test: adds UT for getEpicLinks ([!2526](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2526)) by [Zubeen](https://gitlab.com/syedzubeen) +- test: add test for ApproveOrRejectProjectDeployment ([!2498](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2498)) by [Zubeen](https://gitlab.com/syedzubeen) +- test: adds UTs for packages ([!2529](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2529)) by [Zubeen](https://gitlab.com/syedzubeen) + + + +## [0.157.1](https://gitlab.com/gitlab-org/api/client-go/compare/v0.157.0...v0.157.1) (2025-10-28) + + +### Bug Fixes + +* **no-release:** Helper Functions for Code Refactoring ([6feffea](https://gitlab.com/gitlab-org/api/client-go/commit/6feffea6696a8e333fd0811eee8501e58ba743e3)) +* **protected_packages:** fix invalid types ([c09943b](https://gitlab.com/gitlab-org/api/client-go/commit/c09943b0dde510dca32a2544a9c0f75f85943d96)) + +## 0.157.0 + +### 🚀 Features + +- Add merge requests commit api ([!2539](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2539)) by [Ilya Savitsky](https://gitlab.com/ipsavitsky234) + +### 🔄 Other Changes + +- test: adds missing UTs for notifications ([!2528](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2528)) by [Zubeen](https://gitlab.com/syedzubeen) +- chore: Update review instructions ([!2537](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2537)) by [Patrick Rice](https://gitlab.com/PatrickRice) +- chore(no-release): Fix godoc comments; enable godoclint ([!2535](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2535)) by [Oleksandr Redko](https://gitlab.com/alexandear) + + + +# [0.157.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.156.0...v0.157.0) (2025-10-13) + +## 0.156.0 + +### 🚀 Features + +- feat(api): add support for test report summary api ([!2487](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2487)) by [Daniela Filipe Bento](https://gitlab.com/danifbento) + + + +# [0.156.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.155.0...v0.156.0) (2025-10-10) + + +### Features + +* **api:** add support for test report summary api ([8a0c6dd](https://gitlab.com/gitlab-org/api/client-go/commit/8a0c6dde10a4c9c034274a439eaa060dc6e40995)) + +## 0.155.0 + +### 🚀 Features + +- feat(group_relations_export): Added Group Relations API integration ([!2508](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2508)) by [Jose Gabriel Companioni Benitez](https://gitlab.com/elC0mpa) + +### 🔄 Other Changes + +- chore: use local protoc plugin with buf ([!2536](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2536)) by [Timo Furrer](https://gitlab.com/timofurrer) +- chore(no-release): Change generated file comment ([!2532](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2532)) by [Oleksandr Redko](https://gitlab.com/alexandear) +- docs(no-release): Fix the comment for EnvVarGitLabContext ([!2533](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2533)) by [Oleksandr Redko](https://gitlab.com/alexandear) +- feat(client_options): Added unit tests ([!2510](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2510)) by [Jose Gabriel Companioni Benitez](https://gitlab.com/elC0mpa) + + + +# [0.155.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.154.0...v0.155.0) (2025-10-09) + + +### Features + +* **client_options:** Added unit tests ([c148031](https://gitlab.com/gitlab-org/api/client-go/commit/c14803189aa47a0cc9e64e9b455b93e6d4c4e4b9)) +* **group_relations_export:** Added Group Relations API integration ([956e039](https://gitlab.com/gitlab-org/api/client-go/commit/956e03950d6bc03c56fa1ea4c5d6e06bfd0b264f)) + +## 0.154.0 + +### 🚀 Features + +- feat(protected_packages): Add api integration ([!2520](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2520)) by [Jose Gabriel Companioni Benitez](https://gitlab.com/elC0mpa) + + + +# [0.154.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.153.0...v0.154.0) (2025-10-08) + + +### Features + +* **protected_packages:** Add api integration ([2de15c7](https://gitlab.com/gitlab-org/api/client-go/commit/2de15c7875e232b0b0b1e5e5bb8e184cd11d0774)) + +## 0.153.0 + +### 🚀 Features + +- feat(project_Statistics): Added api integration ([!2512](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2512)) by [Jose Gabriel Companioni Benitez](https://gitlab.com/elC0mpa) + +### 🔄 Other Changes + +- refactor: moved comments to interface ([!2509](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2509)) by [ajey muthiah](https://gitlab.com/ajeymuthiah) +- chore(no-release): Helper Functions for Code Refactoring ([!2503](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2503)) by [Yashesvinee V](https://gitlab.com/yashes7516) +- Add t.Parallel() to all tests and enable linters ([!2513](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2513)) by [Timo Furrer](https://gitlab.com/timofurrer) +- ci: Remove the `commitlint` job. ([!2511](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2511)) by [Florian Forster](https://gitlab.com/fforster) +- refactor: moved comments to interface ([!2507](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2507)) by [ajey muthiah](https://gitlab.com/ajeymuthiah) + + + +# [0.153.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.152.0...v0.153.0) (2025-10-08) + + +### Features + +* **project_Statistics:** Added api integration ([75b5a03](https://gitlab.com/gitlab-org/api/client-go/commit/75b5a03010a39d5353c975a558fda0b6f00cb697)) + +## 0.152.0 + +### 🚀 Features + +- feat(api): add api support for listing users who starred a project ([!2486](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2486)) by [ajey muthiah](https://gitlab.com/ajeymuthiah) + +### 🔄 Other Changes + +- chore(no-release): Update Duo Review Instructions ([!2502](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2502)) by [Patrick Rice](https://gitlab.com/PatrickRice) +- feat(model_registry_api): Added api integration ([!2501](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2501)) by [Jose Gabriel Companioni Benitez](https://gitlab.com/elC0mpa) +- feat(no-release): Add AGENTS.md file ([!2479](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2479)) by [Patrick Rice](https://gitlab.com/PatrickRice) +- chore(no-release): Disable dependency scanning on personal forks ([!2500](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2500)) by [Patrick Rice](https://gitlab.com/PatrickRice) + + + +# [0.152.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.151.0...v0.152.0) (2025-10-06) + + +### Features + +* **api:** add api support for listing users who starred a project ([0cdb4ce](https://gitlab.com/gitlab-org/api/client-go/commit/0cdb4ce5399b43e47bf120a90b16d00c022e194c)) +* **model_registry_api:** Added api integration ([065dd63](https://gitlab.com/gitlab-org/api/client-go/commit/065dd639bc8bd0f44cab4d92dbe3ea7f134b913f)) +* **no-release:** Add AGENTS.md file ([b9febab](https://gitlab.com/gitlab-org/api/client-go/commit/b9febab3181c3f87edd1fd99b5e596f76bc8b7cc)) + +## 0.151.0 + +### 🚀 Features + +- feat(api): add api support for delete enterprise user ([!2492](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2492)) by [ajey muthiah](https://gitlab.com/ajeymuthiah) + +### 🔄 Other Changes + +- docs(no-release): Make it easier to find the docs on issues ([!2497](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2497)) by [Heidi Berry](https://gitlab.com/heidi.berry) + + + +# [0.151.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.150.0...v0.151.0) (2025-10-04) + + +### Features + +* **api:** add api support for delete enterprise user ([36ca8ab](https://gitlab.com/gitlab-org/api/client-go/commit/36ca8ab7672c352a073d59dacae3d763d4089abb)) + +## 0.150.0 + +### 🚀 Features + +- feat: add Project Aliases API support ([!2493](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2493)) by [Yashesvinee V](https://gitlab.com/yashes7516) + +### 🔄 Other Changes + +- chore(deps): update module buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go to v1.36.10-20250912141014-52f32327d4b0.1 ([!2495](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2495)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- chore(deps): update module github.com/danieljoos/wincred to v1.2.3 ([!2494](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2494)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) + + + +# [0.150.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.149.0...v0.150.0) (2025-10-03) + + +### Features + +* add Project Aliases API support ([4ece88e](https://gitlab.com/gitlab-org/api/client-go/commit/4ece88e6a8cfa0f53e68184b2905d4c2fb6e857a)) + +## 0.149.0 + +### 🚀 Features + +- feat(no-release): Add dependency scanning ([!2480](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2480)) by [Patrick Rice](https://gitlab.com/PatrickRice) + +### 🔄 Other Changes + +- ci(semantic-release): migrate to `@gitlab/semantic-release-merge-request-analyzer` ([!2490](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2490)) by [Florian Forster](https://gitlab.com/fforster) +- ci: add the `autolabels` job ([!2489](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2489)) by [Florian Forster](https://gitlab.com/fforster) +- chore(deps): update module google.golang.org/protobuf to v1.36.10 ([!2488](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2488)) by [GitLab Dependency Bot](https://gitlab.com/gitlab-dependency-update-bot) +- refactor(no-release): added tests for delete project hook method ([!2482](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2482)) by [Jose Gabriel Companioni Benitez](https://gitlab.com/elC0mpa) +- docs(no-release): Add guide for adding new APIs and issue templates ([!2478](https://gitlab.com/gitlab-org/api/client-go/-/merge_requests/2478)) by [Heidi Berry](https://gitlab.com/heidi.berry) + + + +# [0.149.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.148.1...v0.149.0) (2025-10-02) + + +### Features + +* **no-release:** Add dependency scanning ([8b0ee10](https://gitlab.com/gitlab-org/api/client-go/commit/8b0ee10acb8adceb5d34be2165b7d587b1e42e49)) + +## [0.148.1](https://gitlab.com/gitlab-org/api/client-go/compare/v0.148.0...v0.148.1) (2025-09-26) + + +### Bug Fixes + +* label unmarshaling for `BasicMergeRequest` list operations ([e80c453](https://gitlab.com/gitlab-org/api/client-go/commit/e80c453aa6a5a265ec8748ae3f3f761a70f4470e)) + +# [0.148.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.147.1...v0.148.0) (2025-09-23) + + +### Features + +* **ResourceGroup:** add `newest_ready_first` to resource group `process_mode` ([fc8f743](https://gitlab.com/gitlab-org/api/client-go/commit/fc8f7431da4ca8594723105473687e8f1378df2b)) + +## [0.147.1](https://gitlab.com/gitlab-org/api/client-go/compare/v0.147.0...v0.147.1) (2025-09-22) + + +### Bug Fixes + +* **client:** use default retry policy from retryablehttp ([2a72511](https://gitlab.com/gitlab-org/api/client-go/commit/2a725113118608712f668b159ca2dab11f4e588e)) + +# [0.147.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.146.0...v0.147.0) (2025-09-22) + + +### Features + +* **Project:** add resource_group_default_process_mode ([7804faf](https://gitlab.com/gitlab-org/api/client-go/commit/7804fafa18cc15fec8a0886a081bf3311d72eb1f)) + +# [0.146.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.145.0...v0.146.0) (2025-09-18) + + +### Features + +* **pipelines:** Add compile-time type-safe pipeline inputs support ([4b30e60](https://gitlab.com/gitlab-org/api/client-go/commit/4b30e60260e4f06e7684352693aac49abd748579)), closes [gitlab-org/api/client-go#2154](https://gitlab.com/gitlab-org/api/client-go/issues/2154) +* **PipelinesService:** Add support for pipeline inputs with type validation ([ab3056f](https://gitlab.com/gitlab-org/api/client-go/commit/ab3056f403ec0268e14b312de3f5b51b115ad97a)), closes [gitlab-org/api/client-go#2154](https://gitlab.com/gitlab-org/api/client-go/issues/2154) +* **PipelineTriggersService:** Add support for pipeline inputs to trigger API ([9ad770e](https://gitlab.com/gitlab-org/api/client-go/commit/9ad770e49e59b2a41c665dfc4781f3b56650e813)), closes [gitlab-org/api/client-go#2154](https://gitlab.com/gitlab-org/api/client-go/issues/2154) + +# [0.145.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.144.1...v0.145.0) (2025-09-15) + + +### Features + +* Add missing created_by field to ProjectMembers and GroupMembers ([5348e01](https://gitlab.com/gitlab-org/api/client-go/commit/5348e01913c358c53bdd3da46b069713273d6802)) + +## [0.144.1](https://gitlab.com/gitlab-org/api/client-go/compare/v0.144.0...v0.144.1) (2025-09-13) + +# [0.144.0](https://gitlab.com/gitlab-org/api/client-go/compare/v0.143.3...v0.144.0) (2025-09-12) + + +### Features + +* **client:** add http.RoundTripper Middleware Configuration Option to Client ([88f9d10](https://gitlab.com/gitlab-org/api/client-go/commit/88f9d1055acbd5e060ab13947b856ccc3a03da6f)) + ## [0.143.3](https://gitlab.com/gitlab-org/api/client-go/compare/v0.143.2...v0.143.3) (2025-09-10) ## [0.143.2](https://gitlab.com/gitlab-org/api/client-go/compare/v0.143.1...v0.143.2) (2025-09-09) diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/CONTRIBUTING.md b/vendor/gitlab.com/gitlab-org/api/client-go/CONTRIBUTING.md index fb7d5c273d..550771cc13 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/CONTRIBUTING.md +++ b/vendor/gitlab.com/gitlab-org/api/client-go/CONTRIBUTING.md @@ -23,7 +23,7 @@ this project only supports what is in the public API docs. ### Use community fork to contribute To contribute to this project we recommend that you use the -[community fork](https://gitlab.com/gitlab-community/api/client-go). +[community fork](https://gitlab.com/gitlab-community/gitlab-org/api/client-go). Have a look at the [community fork README](https://gitlab.com/gitlab-community#gitlab-community-forks) to learn more about what it is and why you should prefer it over @@ -43,16 +43,24 @@ New `struct` fields or methods should be placed (as much as possible) in the sam order as the ordering used in the public API docs. The idea is that this makes it easier to find things. -### Setting up your local development environment to contribute +## Setting up your local development environment to contribute 1. Install dependencies: + ```sh make setup ``` + 1. Make your changes on your feature branch in the community fork or your personal fork 1. Run the reviewable command, which tests, lints and formats the code: + ```sh make reviewable ``` + 1. Push your feature branch upstream 1. Open up your merge request + +## Development Guides + +For more detailed advice on adding new features, please see our dedicated [guide for adding new APIs or Endpoints](./docs/guides/AddingAPISupport.md). diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/Makefile b/vendor/gitlab.com/gitlab-org/api/client-go/Makefile index 1ae79c8868..9568285685 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/Makefile +++ b/vendor/gitlab.com/gitlab-org/api/client-go/Makefile @@ -10,7 +10,7 @@ reviewable: setup generate fmt lint test ## Run before committing. fmt: ## Format code @buf format -w - @gofumpt -l -w *.go testing/*.go examples/*.go + @gofumpt -l -w *.go testing/*.go lint: ## Run linter @golangci-lint run @@ -37,3 +37,18 @@ clean: ## Remove generated files test: ## Run tests go test ./... -race + +test-integration: ## Run integration tests + go test ./... -race -tags=integration + +testacc-up: ## Launch a GitLab instance. + GITLAB_TOKEN=$(GITLAB_TOKEN) $(CONTAINER_COMPOSE_ENGINE) up -d $(SERVICE) + GITLAB_BASE_URL=$(GITLAB_BASE_URL) GITLAB_TOKEN=$(GITLAB_TOKEN) ./scripts/await_healthy.sh + +testacc-down: ## Teardown a GitLab instance. + $(CONTAINER_COMPOSE_ENGINE) down --volumes + +SERVICE ?= gitlab-ee-no-license +GITLAB_TOKEN ?= glpat-ACCTEST1234567890123 +GITLAB_BASE_URL ?= http://127.0.0.1:8095/api/v4 +CONTAINER_COMPOSE_ENGINE ?= $(shell docker compose version >/dev/null 2>&1 && echo 'docker compose' || echo 'docker-compose') diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/README.md b/vendor/gitlab.com/gitlab-org/api/client-go/README.md index d8dc8d1a02..13ba22e476 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/README.md +++ b/vendor/gitlab.com/gitlab-org/api/client-go/README.md @@ -7,6 +7,12 @@ A GitLab API client enabling Go programs to interact with GitLab in a simple and [[_TOC_]] +## `client-go` 1.0 Has Been Released! + +We're pleased to announce that version 1.0 has been released for the `client-go` library, which comes with +a backwards-compatibility guarantee. For more details of our major release plans moving forward, and for +migration guides, please see the [Migration Guides](#migration-guides) section below! + ## Usage ```go @@ -20,7 +26,7 @@ users: ```go git, err := gitlab.NewClient("yourtokengoeshere") if err != nil { - log.Fatalf("Failed to create client: %v", err) + log.Fatalf("Failed to create client: %v", err) } users, _, err := git.Users.ListUsers(&gitlab.ListUsersOptions{}) ``` @@ -80,10 +86,10 @@ func main() { // Add a new snippet s := &gitlab.CreateProjectSnippetOptions{ - Title: gitlab.Ptr("Dummy Snippet"), - FileName: gitlab.Ptr("snippet.go"), - Content: gitlab.Ptr("package main...."), - Visibility: gitlab.Ptr(gitlab.PublicVisibility), + Title: gitlab.Ptr("Dummy Snippet"), + FileName: gitlab.Ptr("snippet.go"), + Content: gitlab.Ptr("package main...."), + Visibility: gitlab.Ptr(gitlab.PublicVisibility), } _, _, err = git.ProjectSnippets.CreateSnippet(project.ID, s) if err != nil { @@ -225,7 +231,7 @@ func main() { } ``` -For complete usage of go-gitlab, see the full [package docs](https://godoc.org/gitlab.com/gitlab-org/api/client-go). +For complete usage of go-gitlab, see the full [package docs](https://pkg.go.dev/gitlab.com/gitlab-org/api/client-go). ## Installation @@ -244,22 +250,22 @@ You can use them like this: ```go func TestMockExample(t *testing.T) { - client := gitlabtesting.NewTestClient(t) - opts := &gitlab.ListAgentsOptions{} - expectedResp := &gitlab.Response{} - pid := 1 - // Setup expectations - client.MockClusterAgents.EXPECT(). - ListAgents(pid, opts). - Return([]*gitlab.Agent{{ID: 1}}, expectedResp, nil) - - // Use the client in your test - // You'd probably call your own code here that gets the client injected. - // You can also retrieve a `gitlab.Client` object from `client.Client`. - agents, resp, err := client.ClusterAgents.ListAgents(pid, opts) - assert.NoError(t, err) - assert.Equal(t, expectedResp, resp) - assert.Len(t, agents, 1) + client := gitlabtesting.NewTestClient(t) + opts := &gitlab.ListAgentsOptions{} + expectedResp := &gitlab.Response{} + pid := 1 + // Setup expectations + client.MockClusterAgents.EXPECT(). + ListAgents(pid, opts). + Return([]*gitlab.Agent{{ID: 1}}, expectedResp, nil) + + // Use the client in your test + // You'd probably call your own code here that gets the client injected. + // You can also retrieve a `gitlab.Client` object from `client.Client`. + agents, resp, err := client.ClusterAgents.ListAgents(pid, opts) + assert.NoError(t, err) + assert.Equal(t, expectedResp, resp) + assert.Len(t, agents, 1) } ``` @@ -293,6 +299,13 @@ and the unit test matrix in [`.gitlab-ci.yml`](/.gitlab-ci.yml). You may also use https://endoflife.date/go to quickly discover the supported Go versions. +### Migration Guides + +`client-go` will release a major update roughly once every 6 months to align to the Go release cycle. Each +major release will publish a migration guide to help users migrate from the previous major release. + +- [Migration Guide for 0.X -> 1.X](docs/release-1.0-migration.md) + ## Contributing Contributions are always welcome. For more information, check out the diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/access_requests.go b/vendor/gitlab.com/gitlab-org/api/client-go/access_requests.go index 1c2ef31c70..b37cd94ddd 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/access_requests.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/access_requests.go @@ -17,21 +17,80 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( AccessRequestsServiceInterface interface { + // ListProjectAccessRequests gets a list of access requests + // viewable by the authenticated user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/access_requests/#list-access-requests-for-a-group-or-project + // ListProjectAccessRequests gets a list of access requests + // viewable by the authenticated user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/access_requests/#list-access-requests-for-a-group-or-project ListProjectAccessRequests(pid any, opt *ListAccessRequestsOptions, options ...RequestOptionFunc) ([]*AccessRequest, *Response, error) + + // ListGroupAccessRequests gets a list of access requests + // viewable by the authenticated user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/access_requests/#list-access-requests-for-a-group-or-project + + // ListGroupAccessRequests gets a list of access requests + // viewable by the authenticated user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/access_requests/#list-access-requests-for-a-group-or-project ListGroupAccessRequests(gid any, opt *ListAccessRequestsOptions, options ...RequestOptionFunc) ([]*AccessRequest, *Response, error) + + // RequestProjectAccess requests access for the authenticated user + // to a group or project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/access_requests/#request-access-to-a-group-or-project + + // RequestProjectAccess requests access for the authenticated user + // to a group or project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/access_requests/#request-access-to-a-group-or-project RequestProjectAccess(pid any, options ...RequestOptionFunc) (*AccessRequest, *Response, error) + + // RequestGroupAccess requests access for the authenticated user + // to a group or project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/access_requests/#request-access-to-a-group-or-project RequestGroupAccess(gid any, options ...RequestOptionFunc) (*AccessRequest, *Response, error) - ApproveProjectAccessRequest(pid any, user int, opt *ApproveAccessRequestOptions, options ...RequestOptionFunc) (*AccessRequest, *Response, error) - ApproveGroupAccessRequest(gid any, user int, opt *ApproveAccessRequestOptions, options ...RequestOptionFunc) (*AccessRequest, *Response, error) - DenyProjectAccessRequest(pid any, user int, options ...RequestOptionFunc) (*Response, error) - DenyGroupAccessRequest(gid any, user int, options ...RequestOptionFunc) (*Response, error) + + // ApproveProjectAccessRequest approves an access request for the given user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/access_requests/#approve-an-access-request + ApproveProjectAccessRequest(pid any, user int64, opt *ApproveAccessRequestOptions, options ...RequestOptionFunc) (*AccessRequest, *Response, error) + + // ApproveGroupAccessRequest approves an access request for the given user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/access_requests/#approve-an-access-request + ApproveGroupAccessRequest(gid any, user int64, opt *ApproveAccessRequestOptions, options ...RequestOptionFunc) (*AccessRequest, *Response, error) + + // DenyProjectAccessRequest denies an access request for the given user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/access_requests/#deny-an-access-request + DenyProjectAccessRequest(pid any, user int64, options ...RequestOptionFunc) (*Response, error) + + // DenyGroupAccessRequest denies an access request for the given user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/access_requests/#deny-an-access-request + DenyGroupAccessRequest(gid any, user int64, options ...RequestOptionFunc) (*Response, error) } // AccessRequestsService handles communication with the project/group @@ -50,7 +109,7 @@ var _ AccessRequestsServiceInterface = (*AccessRequestsService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/access_requests/ type AccessRequest struct { - ID int `json:"id"` + ID int64 `json:"id"` Username string `json:"username"` Name string `json:"name"` State string `json:"state"` @@ -64,110 +123,42 @@ type AccessRequest struct { // // GitLab API docs: // https://docs.gitlab.com/api/access_requests/#list-access-requests-for-a-group-or-project -type ListAccessRequestsOptions ListOptions +type ListAccessRequestsOptions struct { + ListOptions +} -// ListProjectAccessRequests gets a list of access requests -// viewable by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/access_requests/#list-access-requests-for-a-group-or-project func (s *AccessRequestsService) ListProjectAccessRequests(pid any, opt *ListAccessRequestsOptions, options ...RequestOptionFunc) ([]*AccessRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_requests", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ars []*AccessRequest - resp, err := s.client.Do(req, &ars) - if err != nil { - return nil, resp, err - } - - return ars, resp, nil + return do[[]*AccessRequest](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/access_requests", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// ListGroupAccessRequests gets a list of access requests -// viewable by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/access_requests/#list-access-requests-for-a-group-or-project func (s *AccessRequestsService) ListGroupAccessRequests(gid any, opt *ListAccessRequestsOptions, options ...RequestOptionFunc) ([]*AccessRequest, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_requests", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ars []*AccessRequest - resp, err := s.client.Do(req, &ars) - if err != nil { - return nil, resp, err - } - - return ars, resp, nil + return do[[]*AccessRequest](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/access_requests", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// RequestProjectAccess requests access for the authenticated user -// to a group or project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/access_requests/#request-access-to-a-group-or-project func (s *AccessRequestsService) RequestProjectAccess(pid any, options ...RequestOptionFunc) (*AccessRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_requests", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - ar := new(AccessRequest) - resp, err := s.client.Do(req, ar) - if err != nil { - return nil, resp, err - } - - return ar, resp, nil + return do[*AccessRequest](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/access_requests", ProjectID{pid}), + withRequestOpts(options...), + ) } -// RequestGroupAccess requests access for the authenticated user -// to a group or project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/access_requests/#request-access-to-a-group-or-project func (s *AccessRequestsService) RequestGroupAccess(gid any, options ...RequestOptionFunc) (*AccessRequest, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_requests", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - ar := new(AccessRequest) - resp, err := s.client.Do(req, ar) - if err != nil { - return nil, resp, err - } - - return ar, resp, nil + return do[*AccessRequest](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/access_requests", GroupID{gid}), + withRequestOpts(options...), + ) } // ApproveAccessRequestOptions represents the available @@ -179,90 +170,38 @@ type ApproveAccessRequestOptions struct { AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` } -// ApproveProjectAccessRequest approves an access request for the given user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/access_requests/#approve-an-access-request -func (s *AccessRequestsService) ApproveProjectAccessRequest(pid any, user int, opt *ApproveAccessRequestOptions, options ...RequestOptionFunc) (*AccessRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_requests/%d/approve", PathEscape(project), user) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ar := new(AccessRequest) - resp, err := s.client.Do(req, ar) - if err != nil { - return nil, resp, err - } - - return ar, resp, nil +func (s *AccessRequestsService) ApproveProjectAccessRequest(pid any, user int64, opt *ApproveAccessRequestOptions, options ...RequestOptionFunc) (*AccessRequest, *Response, error) { + return do[*AccessRequest](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/access_requests/%d/approve", ProjectID{pid}, user), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// ApproveGroupAccessRequest approves an access request for the given user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/access_requests/#approve-an-access-request -func (s *AccessRequestsService) ApproveGroupAccessRequest(gid any, user int, opt *ApproveAccessRequestOptions, options ...RequestOptionFunc) (*AccessRequest, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_requests/%d/approve", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ar := new(AccessRequest) - resp, err := s.client.Do(req, ar) - if err != nil { - return nil, resp, err - } - - return ar, resp, nil +func (s *AccessRequestsService) ApproveGroupAccessRequest(gid any, user int64, opt *ApproveAccessRequestOptions, options ...RequestOptionFunc) (*AccessRequest, *Response, error) { + return do[*AccessRequest](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/access_requests/%d/approve", GroupID{gid}, user), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DenyProjectAccessRequest denies an access request for the given user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/access_requests/#deny-an-access-request -func (s *AccessRequestsService) DenyProjectAccessRequest(pid any, user int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/access_requests/%d", PathEscape(project), user) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *AccessRequestsService) DenyProjectAccessRequest(pid any, user int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/access_requests/%d", ProjectID{pid}, user), + withRequestOpts(options...), + ) + return resp, err } -// DenyGroupAccessRequest denies an access request for the given user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/access_requests/#deny-an-access-request -func (s *AccessRequestsService) DenyGroupAccessRequest(gid any, user int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/access_requests/%d", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *AccessRequestsService) DenyGroupAccessRequest(gid any, user int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/access_requests/%d", GroupID{gid}, user), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/admin_compliance_policy_settings.go b/vendor/gitlab.com/gitlab-org/api/client-go/admin_compliance_policy_settings.go new file mode 100644 index 0000000000..2da145eb83 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/admin_compliance_policy_settings.go @@ -0,0 +1,85 @@ +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "net/http" +) + +type ( + AdminCompliancePolicySettingsServiceInterface interface { + // GetCompliancePolicySettings gets the current security policy settings for the GitLab instance. + // + // GitLab API docs: + // https://docs.gitlab.com/api/compliance_policy_settings/#get-security-policy-settings + GetCompliancePolicySettings(options ...RequestOptionFunc) (*AdminCompliancePolicySettings, *Response, error) + + // UpdateCompliancePolicySettings updates the security policy settings for the GitLab instance. + // + // GitLab API docs: + // https://docs.gitlab.com/api/compliance_policy_settings/#update-security-policy-settings + UpdateCompliancePolicySettings(opt *UpdateAdminCompliancePolicySettingsOptions, options ...RequestOptionFunc) (*AdminCompliancePolicySettings, *Response, error) + } + + // AdminCompliancePolicySettingsService handles communication with the + // admin compliance policy settings related methods of the GitLab API. + // + // GitLab API docs: + // https://docs.gitlab.com/api/compliance_policy_settings/ + AdminCompliancePolicySettingsService struct { + client *Client + } +) + +var _ AdminCompliancePolicySettingsServiceInterface = (*AdminCompliancePolicySettingsService)(nil) + +// AdminCompliancePolicySettings represents the GitLab admin compliance policy settings. +// +// GitLab API docs: +// https://docs.gitlab.com/api/compliance_policy_settings/ +type AdminCompliancePolicySettings struct { + CSPNamespaceID *int64 `json:"csp_namespace_id"` +} + +func (s AdminCompliancePolicySettings) String() string { + return Stringify(s) +} + +func (s *AdminCompliancePolicySettingsService) GetCompliancePolicySettings(options ...RequestOptionFunc) (*AdminCompliancePolicySettings, *Response, error) { + return do[*AdminCompliancePolicySettings](s.client, + withMethod(http.MethodGet), + withPath("admin/security/compliance_policy_settings"), + withRequestOpts(options...), + ) +} + +// UpdateAdminCompliancePolicySettingsOptions represents the available +// UpdateCompliancePolicySettings() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/compliance_policy_settings/#update-security-policy-settings +type UpdateAdminCompliancePolicySettingsOptions struct { + CSPNamespaceID *int64 `url:"csp_namespace_id,omitempty" json:"csp_namespace_id,omitempty"` +} + +func (s *AdminCompliancePolicySettingsService) UpdateCompliancePolicySettings(opt *UpdateAdminCompliancePolicySettingsOptions, options ...RequestOptionFunc) (*AdminCompliancePolicySettings, *Response, error) { + return do[*AdminCompliancePolicySettings](s.client, + withMethod(http.MethodPut), + withPath("admin/security/compliance_policy_settings"), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/alert_management.go b/vendor/gitlab.com/gitlab-org/api/client-go/alert_management.go index c7e8b955c9..c52ba35553 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/alert_management.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/alert_management.go @@ -15,7 +15,6 @@ package gitlab import ( - "fmt" "io" "net/http" "time" @@ -23,10 +22,29 @@ import ( type ( AlertManagementServiceInterface interface { - UploadMetricImage(pid any, alertIID int, content io.Reader, filename string, opt *UploadMetricImageOptions, options ...RequestOptionFunc) (*MetricImage, *Response, error) - ListMetricImages(pid any, alertIID int, opt *ListMetricImagesOptions, options ...RequestOptionFunc) ([]*MetricImage, *Response, error) - UpdateMetricImage(pid any, alertIID int, id int, opt *UpdateMetricImageOptions, options ...RequestOptionFunc) (*MetricImage, *Response, error) - DeleteMetricImage(pid any, alertIID int, id int, options ...RequestOptionFunc) (*Response, error) + // UploadMetricImage uploads a metric image to a project alert. + // + // GitLab API docs: + // https://docs.gitlab.com/api/alert_management_alerts/#upload-metric-image + UploadMetricImage(pid any, alertIID int64, content io.Reader, filename string, opt *UploadMetricImageOptions, options ...RequestOptionFunc) (*MetricImage, *Response, error) + + // ListMetricImages lists all the metric images for a project alert. + // + // GitLab API docs: + // https://docs.gitlab.com/api/alert_management_alerts/#list-all-metric-images + ListMetricImages(pid any, alertIID int64, opt *ListMetricImagesOptions, options ...RequestOptionFunc) ([]*MetricImage, *Response, error) + + // UpdateMetricImage updates a metric image for a project alert. + // + // GitLab API docs: + // https://docs.gitlab.com/api/alert_management_alerts/#update-a-metric-image + UpdateMetricImage(pid any, alertIID int64, id int64, opt *UpdateMetricImageOptions, options ...RequestOptionFunc) (*MetricImage, *Response, error) + + // DeleteMetricImage deletes a metric image for a project alert. + // + // GitLab API docs: + // https://docs.gitlab.com/api/alert_management_alerts/#delete-a-metric-image + DeleteMetricImage(pid any, alertIID int64, id int64, options ...RequestOptionFunc) (*Response, error) } // AlertManagementService handles communication with the alert management @@ -46,7 +64,7 @@ var _ AlertManagementServiceInterface = (*AlertManagementService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/alert_management_alerts/ type MetricImage struct { - ID int `json:"id"` + ID int64 `json:"id"` CreatedAt *time.Time `json:"created_at"` Filename string `json:"filename"` FilePath string `json:"file_path"` @@ -63,29 +81,14 @@ type UploadMetricImageOptions struct { URLText *string `url:"url_text,omitempty" json:"url_text,omitempty"` } -// UploadMetricImage uploads a metric image to a project alert. -// -// GitLab API docs: -// https://docs.gitlab.com/api/alert_management_alerts/#upload-metric-image -func (s *AlertManagementService) UploadMetricImage(pid any, alertIID int, content io.Reader, filename string, opt *UploadMetricImageOptions, options ...RequestOptionFunc) (*MetricImage, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/alert_management_alerts/%d/metric_images", PathEscape(project), alertIID) - - req, err := s.client.UploadRequest(http.MethodPost, u, content, filename, UploadFile, opt, options) - if err != nil { - return nil, nil, err - } - - mi := new(MetricImage) - resp, err := s.client.Do(req, mi) - if err != nil { - return nil, resp, err - } - - return mi, resp, nil +func (s *AlertManagementService) UploadMetricImage(pid any, alertIID int64, content io.Reader, filename string, opt *UploadMetricImageOptions, options ...RequestOptionFunc) (*MetricImage, *Response, error) { + return do[*MetricImage](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/alert_management_alerts/%d/metric_images", ProjectID{pid}, alertIID), + withUpload(content, filename, UploadFile), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListMetricImagesOptions represents the available ListMetricImages() options. @@ -96,29 +99,13 @@ type ListMetricImagesOptions struct { ListOptions } -// ListMetricImages lists all the metric images for a project alert. -// -// GitLab API docs: -// https://docs.gitlab.com/api/alert_management_alerts/#list-metric-images -func (s *AlertManagementService) ListMetricImages(pid any, alertIID int, opt *ListMetricImagesOptions, options ...RequestOptionFunc) ([]*MetricImage, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/alert_management_alerts/%d/metric_images", PathEscape(project), alertIID) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var mis []*MetricImage - resp, err := s.client.Do(req, &mis) - if err != nil { - return nil, resp, err - } - - return mis, resp, nil +func (s *AlertManagementService) ListMetricImages(pid any, alertIID int64, opt *ListMetricImagesOptions, options ...RequestOptionFunc) ([]*MetricImage, *Response, error) { + return do[[]*MetricImage](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/alert_management_alerts/%d/metric_images", ProjectID{pid}, alertIID), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateMetricImageOptions represents the available UpdateMetricImage() options. @@ -130,46 +117,20 @@ type UpdateMetricImageOptions struct { URLText *string `url:"url_text,omitempty" json:"url_text,omitempty"` } -// UpdateMetricImage updates a metric image for a project alert. -// -// GitLab API docs: -// https://docs.gitlab.com/api/alert_management_alerts/#update-metric-image -func (s *AlertManagementService) UpdateMetricImage(pid any, alertIID int, id int, opt *UpdateMetricImageOptions, options ...RequestOptionFunc) (*MetricImage, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/alert_management_alerts/%d/metric_images/%d", PathEscape(project), alertIID, id) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - mi := new(MetricImage) - resp, err := s.client.Do(req, mi) - if err != nil { - return nil, resp, err - } - - return mi, resp, nil +func (s *AlertManagementService) UpdateMetricImage(pid any, alertIID int64, id int64, opt *UpdateMetricImageOptions, options ...RequestOptionFunc) (*MetricImage, *Response, error) { + return do[*MetricImage](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/alert_management_alerts/%d/metric_images/%d", ProjectID{pid}, alertIID, id), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteMetricImage deletes a metric image for a project alert. -// -// GitLab API docs: -// https://docs.gitlab.com/api/alert_management_alerts/#delete-metric-image -func (s *AlertManagementService) DeleteMetricImage(pid any, alertIID int, id int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/alert_management_alerts/%d/metric_images/%d", PathEscape(project), alertIID, id) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *AlertManagementService) DeleteMetricImage(pid any, alertIID int64, id int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/alert_management_alerts/%d/metric_images/%d", ProjectID{pid}, alertIID, id), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/appearance.go b/vendor/gitlab.com/gitlab-org/api/client-go/appearance.go index 6b4724e50f..c7167ef95c 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/appearance.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/appearance.go @@ -20,13 +20,22 @@ import "net/http" type ( AppearanceServiceInterface interface { + // GetAppearance gets the current appearance configuration of the GitLab instance. + // + // GitLab API docs: + // https://docs.gitlab.com/api/appearance/#get-details-on-current-application-appearance GetAppearance(options ...RequestOptionFunc) (*Appearance, *Response, error) + + // ChangeAppearance changes the appearance configuration. + // + // GitLab API docs: + // https://docs.gitlab.com/api/appearance/#update-application-appearance ChangeAppearance(opt *ChangeAppearanceOptions, options ...RequestOptionFunc) (*Appearance, *Response, error) } - // AppearanceService handles communication with appearance of the Gitlab API. + // AppearanceService handles communication with appearance of the GitLab API. // - // Gitlab API docs: https://docs.gitlab.com/api/appearance/ + // GitLab API docs: https://docs.gitlab.com/api/appearance/ AppearanceService struct { client *Client } @@ -36,7 +45,7 @@ var _ AppearanceServiceInterface = (*AppearanceService)(nil) // Appearance represents a GitLab appearance. // -// Gitlab API docs: https://docs.gitlab.com/api/appearance/ +// GitLab API docs: https://docs.gitlab.com/api/appearance/ type Appearance struct { Title string `json:"title"` Description string `json:"description"` @@ -57,23 +66,12 @@ type Appearance struct { EmailHeaderAndFooterEnabled bool `json:"email_header_and_footer_enabled"` } -// GetAppearance gets the current appearance configuration of the GitLab instance. -// -// Gitlab API docs: -// https://docs.gitlab.com/api/appearance/#get-details-on-current-application-appearance func (s *AppearanceService) GetAppearance(options ...RequestOptionFunc) (*Appearance, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "application/appearance", nil, options) - if err != nil { - return nil, nil, err - } - - as := new(Appearance) - resp, err := s.client.Do(req, as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil + return do[*Appearance](s.client, + withMethod(http.MethodGet), + withPath("application/appearance"), + withRequestOpts(options...), + ) } // ChangeAppearanceOptions represents the available ChangeAppearance() options. @@ -101,21 +99,11 @@ type ChangeAppearanceOptions struct { URL *string `url:"url,omitempty" json:"url,omitempty"` } -// ChangeAppearance changes the appearance configuration. -// -// Gitlab API docs: -// https://docs.gitlab.com/api/appearance/#update-application-appearance func (s *AppearanceService) ChangeAppearance(opt *ChangeAppearanceOptions, options ...RequestOptionFunc) (*Appearance, *Response, error) { - req, err := s.client.NewRequest(http.MethodPut, "application/appearance", opt, options) - if err != nil { - return nil, nil, err - } - - as := new(Appearance) - resp, err := s.client.Do(req, as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil + return do[*Appearance](s.client, + withMethod(http.MethodPut), + withPath("application/appearance"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/application_statistics.go b/vendor/gitlab.com/gitlab-org/api/client-go/application_statistics.go index 5442852c50..f1be661333 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/application_statistics.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/application_statistics.go @@ -18,6 +18,10 @@ import "net/http" type ( ApplicationStatisticsServiceInterface interface { + // GetApplicationStatistics gets details on the current application statistics. + // + // GitLab API docs: + // https://docs.gitlab.com/api/statistics/#get-details-on-current-application-statistics GetApplicationStatistics(options ...RequestOptionFunc) (*ApplicationStatistics, *Response, error) } @@ -36,33 +40,23 @@ var _ ApplicationStatisticsServiceInterface = (*ApplicationStatisticsService)(ni // // GitLab API docs: https://docs.gitlab.com/api/statistics/ type ApplicationStatistics struct { - Forks int `url:"forks" json:"forks"` - Issues int `url:"issues" json:"issues"` - MergeRequests int `url:"merge_requests" json:"merge_requests"` - Notes int `url:"notes" json:"notes"` - Snippets int `url:"snippets" json:"snippets"` - SSHKeys int `url:"ssh_keys" json:"ssh_keys"` - Milestones int `url:"milestones" json:"milestones"` - Users int `url:"users" json:"users"` - Groups int `url:"groups" json:"groups"` - Projects int `url:"projects" json:"projects"` - ActiveUsers int `url:"active_users" json:"active_users"` + Forks int64 `url:"forks" json:"forks"` + Issues int64 `url:"issues" json:"issues"` + MergeRequests int64 `url:"merge_requests" json:"merge_requests"` + Notes int64 `url:"notes" json:"notes"` + Snippets int64 `url:"snippets" json:"snippets"` + SSHKeys int64 `url:"ssh_keys" json:"ssh_keys"` + Milestones int64 `url:"milestones" json:"milestones"` + Users int64 `url:"users" json:"users"` + Groups int64 `url:"groups" json:"groups"` + Projects int64 `url:"projects" json:"projects"` + ActiveUsers int64 `url:"active_users" json:"active_users"` } -// GetApplicationStatistics gets details on the current application statistics. -// -// GitLab API docs: -// https://docs.gitlab.com/api/statistics/#get-details-on-current-application-statistics func (s *ApplicationStatisticsService) GetApplicationStatistics(options ...RequestOptionFunc) (*ApplicationStatistics, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "application/statistics", nil, options) - if err != nil { - return nil, nil, err - } - - statistics := new(ApplicationStatistics) - resp, err := s.client.Do(req, statistics) - if err != nil { - return nil, resp, err - } - return statistics, resp, nil + return do[*ApplicationStatistics](s.client, + withMethod(http.MethodGet), + withPath("application/statistics"), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/applications.go b/vendor/gitlab.com/gitlab-org/api/client-go/applications.go index 06dae54cbe..c550ea5e1e 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/applications.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/applications.go @@ -17,21 +17,34 @@ package gitlab import ( - "fmt" "net/http" ) type ( ApplicationsServiceInterface interface { + // CreateApplication creates a new application owned by the authenticated user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/applications/#create-an-application CreateApplication(opt *CreateApplicationOptions, options ...RequestOptionFunc) (*Application, *Response, error) + + // ListApplications get a list of administrables applications by the authenticated user + // + // GitLab API docs: + // https://docs.gitlab.com/api/applications/#list-all-applications ListApplications(opt *ListApplicationsOptions, options ...RequestOptionFunc) ([]*Application, *Response, error) - DeleteApplication(application int, options ...RequestOptionFunc) (*Response, error) + + // DeleteApplication removes a specific application. + // + // GitLab API docs: + // https://docs.gitlab.com/api/applications/#delete-an-application + DeleteApplication(application int64, options ...RequestOptionFunc) (*Response, error) } // ApplicationsService handles communication with administrables applications - // of the Gitlab API. + // of the GitLab API. // - // Gitlab API docs: https://docs.gitlab.com/api/applications/ + // GitLab API docs: https://docs.gitlab.com/api/applications/ ApplicationsService struct { client *Client } @@ -41,7 +54,7 @@ var _ ApplicationsServiceInterface = (*ApplicationsService)(nil) // Application represents a GitLab application type Application struct { - ID int `json:"id"` + ID int64 `json:"id"` ApplicationID string `json:"application_id"` ApplicationName string `json:"application_name"` Secret string `json:"secret"` @@ -60,57 +73,35 @@ type CreateApplicationOptions struct { Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` } -// CreateApplication creates a new application owned by the authenticated user. -// -// Gitlab API docs: https://docs.gitlab.com/api/applications/#create-an-application func (s *ApplicationsService) CreateApplication(opt *CreateApplicationOptions, options ...RequestOptionFunc) (*Application, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "applications", opt, options) - if err != nil { - return nil, nil, err - } - - a := new(Application) - resp, err := s.client.Do(req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil + return do[*Application](s.client, + withMethod(http.MethodPost), + withPath("applications"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListApplicationsOptions represents the available // ListApplications() options. -type ListApplicationsOptions ListOptions +type ListApplicationsOptions struct { + ListOptions +} -// ListApplications get a list of administrables applications by the authenticated user -// -// Gitlab API docs : https://docs.gitlab.com/api/applications/#list-all-applications func (s *ApplicationsService) ListApplications(opt *ListApplicationsOptions, options ...RequestOptionFunc) ([]*Application, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "applications", opt, options) - if err != nil { - return nil, nil, err - } - - var as []*Application - resp, err := s.client.Do(req, &as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil + return do[[]*Application](s.client, + withMethod(http.MethodGet), + withPath("applications"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteApplication removes a specific application. -// -// GitLab API docs: -// https://docs.gitlab.com/api/applications/#delete-an-application -func (s *ApplicationsService) DeleteApplication(application int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("applications/%d", application) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ApplicationsService) DeleteApplication(application int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("applications/%d", application), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/attestations.go b/vendor/gitlab.com/gitlab-org/api/client-go/attestations.go new file mode 100644 index 0000000000..0cfa14119e --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/attestations.go @@ -0,0 +1,64 @@ +package gitlab + +import ( + "bytes" + "net/http" + "time" +) + +type ( + AttestationsServiceInterface interface { + // ListAttestations gets a list of all attestations + // + // GitLab API docs: https://docs.gitlab.com/api/attestations/#list-all-attestations + ListAttestations(pid any, subjectDigest string, options ...RequestOptionFunc) ([]*Attestation, *Response, error) + + // DownloadAttestation + // + // GitLab API docs: https://docs.gitlab.com/api/attestations/#download-an-attestation + DownloadAttestation(pid any, attestationIID int64, options ...RequestOptionFunc) ([]byte, *Response, error) + } + + // AttestationsService handles communication with the keys related methods + // of the GitLab API. + // + // GitLab API docs: https://docs.gitlab.com/api/attestations + AttestationsService struct { + client *Client + } +) + +var _ AttestationsServiceInterface = (*AttestationsService)(nil) + +type Attestation struct { + ID int64 `json:"id"` + IID int64 `json:"iid"` + ProjectID int64 `json:"project_id"` + BuildID int64 `json:"build_id"` + Status string `json:"status"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + ExpireAt *time.Time `json:"expire_at"` + PredicateKind string `json:"predicate_kind"` + PredicateType string `json:"predicate_type"` + SubjectDigest string `json:"subject_digest"` + DownloadURL string `json:"download_url"` +} + +func (s *AttestationsService) ListAttestations(pid any, subjectDigest string, options ...RequestOptionFunc) ([]*Attestation, *Response, error) { + return do[[]*Attestation](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/attestations/%s", ProjectID{pid}, subjectDigest), + withRequestOpts(options...), + ) +} + +func (s *AttestationsService) DownloadAttestation(pid any, attestationIID int64, options ...RequestOptionFunc) ([]byte, *Response, error) { + b, resp, err := do[bytes.Buffer](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/attestations/%d/download", ProjectID{pid}, attestationIID), + withRequestOpts(options...), + ) + + return b.Bytes(), resp, err +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/audit_events.go b/vendor/gitlab.com/gitlab-org/api/client-go/audit_events.go index 3a61d8a4f9..6e98b8e88b 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/audit_events.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/audit_events.go @@ -1,19 +1,53 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( AuditEventsServiceInterface interface { + // ListInstanceAuditEvents gets a list of audit events for instance. + // Authentication as Administrator is required. + // + // GitLab API docs: + // https://docs.gitlab.com/api/audit_events/#retrieve-all-instance-audit-events + // ListInstanceAuditEvents gets a list of audit events for instance. + // Authentication as Administrator is required. + // + // GitLab API docs: + // https://docs.gitlab.com/api/audit_events/#retrieve-all-instance-audit-events ListInstanceAuditEvents(opt *ListAuditEventsOptions, options ...RequestOptionFunc) ([]*AuditEvent, *Response, error) - GetInstanceAuditEvent(event int, options ...RequestOptionFunc) (*AuditEvent, *Response, error) + + // GetInstanceAuditEvent gets a specific instance audit event. + // Authentication as Administrator is required. + // + // GitLab API docs: + // https://docs.gitlab.com/api/audit_events/#retrieve-single-instance-audit-event + GetInstanceAuditEvent(event int64, options ...RequestOptionFunc) (*AuditEvent, *Response, error) + + // ListGroupAuditEvents gets a list of audit events for the specified group + // viewable by the authenticated user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/audit_events/#retrieve-all-group-audit-events ListGroupAuditEvents(gid any, opt *ListAuditEventsOptions, options ...RequestOptionFunc) ([]*AuditEvent, *Response, error) - GetGroupAuditEvent(gid any, event int, options ...RequestOptionFunc) (*AuditEvent, *Response, error) + + // GetGroupAuditEvent gets a specific group audit event. + // + // GitLab API docs: https://docs.gitlab.com/api/audit_events/#retrieve-a-specific-group-audit-event + GetGroupAuditEvent(gid any, event int64, options ...RequestOptionFunc) (*AuditEvent, *Response, error) + + // ListProjectAuditEvents gets a list of audit events for the specified project + // viewable by the authenticated user. + // + // GitLab API docs: https://docs.gitlab.com/api/audit_events/#retrieve-all-project-audit-events ListProjectAuditEvents(pid any, opt *ListAuditEventsOptions, options ...RequestOptionFunc) ([]*AuditEvent, *Response, error) - GetProjectAuditEvent(pid any, event int, options ...RequestOptionFunc) (*AuditEvent, *Response, error) + + // GetProjectAuditEvent gets a specific project audit event. + // + // GitLab API docs: https://docs.gitlab.com/api/audit_events/#retrieve-a-specific-project-audit-event + GetProjectAuditEvent(pid any, event int64, options ...RequestOptionFunc) (*AuditEvent, *Response, error) } // AuditEventsService handles communication with the project/group/instance @@ -31,9 +65,9 @@ var _ AuditEventsServiceInterface = (*AuditEventsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/audit_events/ type AuditEvent struct { - ID int `json:"id"` - AuthorID int `json:"author_id"` - EntityID int `json:"entity_id"` + ID int64 `json:"id"` + AuthorID int64 `json:"author_id"` + EntityID int64 `json:"entity_id"` EntityType string `json:"entity_type"` EventName string `json:"event_name"` Details AuditEventDetails `json:"details"` @@ -77,141 +111,53 @@ type ListAuditEventsOptions struct { CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` } -// ListInstanceAuditEvents gets a list of audit events for instance. -// Authentication as Administrator is required. -// -// GitLab API docs: https://docs.gitlab.com/api/audit_events/#retrieve-all-instance-audit-events func (s *AuditEventsService) ListInstanceAuditEvents(opt *ListAuditEventsOptions, options ...RequestOptionFunc) ([]*AuditEvent, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "audit_events", opt, options) - if err != nil { - return nil, nil, err - } - - var aes []*AuditEvent - resp, err := s.client.Do(req, &aes) - if err != nil { - return nil, resp, err - } - - return aes, resp, nil + return do[[]*AuditEvent](s.client, + withMethod(http.MethodGet), + withPath("audit_events"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetInstanceAuditEvent gets a specific instance audit event. -// Authentication as Administrator is required. -// -// GitLab API docs: https://docs.gitlab.com/api/audit_events/#retrieve-single-instance-audit-event -func (s *AuditEventsService) GetInstanceAuditEvent(event int, options ...RequestOptionFunc) (*AuditEvent, *Response, error) { - u := fmt.Sprintf("audit_events/%d", event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ae := new(AuditEvent) - resp, err := s.client.Do(req, ae) - if err != nil { - return nil, resp, err - } - - return ae, resp, nil +func (s *AuditEventsService) GetInstanceAuditEvent(event int64, options ...RequestOptionFunc) (*AuditEvent, *Response, error) { + return do[*AuditEvent](s.client, + withMethod(http.MethodGet), + withPath("audit_events/%d", event), + withRequestOpts(options...), + ) } -// ListGroupAuditEvents gets a list of audit events for the specified group -// viewable by the authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/api/audit_events/#retrieve-all-group-audit-events func (s *AuditEventsService) ListGroupAuditEvents(gid any, opt *ListAuditEventsOptions, options ...RequestOptionFunc) ([]*AuditEvent, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/audit_events", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var aes []*AuditEvent - resp, err := s.client.Do(req, &aes) - if err != nil { - return nil, resp, err - } - - return aes, resp, nil + return do[[]*AuditEvent](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/audit_events", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetGroupAuditEvent gets a specific group audit event. -// -// GitLab API docs: https://docs.gitlab.com/api/audit_events/#retrieve-a-specific-group-audit-event -func (s *AuditEventsService) GetGroupAuditEvent(gid any, event int, options ...RequestOptionFunc) (*AuditEvent, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/audit_events/%d", PathEscape(group), event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ae := new(AuditEvent) - resp, err := s.client.Do(req, ae) - if err != nil { - return nil, resp, err - } - - return ae, resp, nil +func (s *AuditEventsService) GetGroupAuditEvent(gid any, event int64, options ...RequestOptionFunc) (*AuditEvent, *Response, error) { + return do[*AuditEvent](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/audit_events/%d", GroupID{gid}, event), + withRequestOpts(options...), + ) } -// ListProjectAuditEvents gets a list of audit events for the specified project -// viewable by the authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/api/audit_events/#retrieve-all-project-audit-events func (s *AuditEventsService) ListProjectAuditEvents(pid any, opt *ListAuditEventsOptions, options ...RequestOptionFunc) ([]*AuditEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/audit_events", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var aes []*AuditEvent - resp, err := s.client.Do(req, &aes) - if err != nil { - return nil, resp, err - } - - return aes, resp, nil + return do[[]*AuditEvent](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/audit_events", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetProjectAuditEvent gets a specific project audit event. -// -// GitLab API docs: -// https://docs.gitlab.com/api/audit_events/#retrieve-a-specific-project-audit-event -func (s *AuditEventsService) GetProjectAuditEvent(pid any, event int, options ...RequestOptionFunc) (*AuditEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/audit_events/%d", PathEscape(project), event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ae := new(AuditEvent) - resp, err := s.client.Do(req, ae) - if err != nil { - return nil, resp, err - } - - return ae, resp, nil +func (s *AuditEventsService) GetProjectAuditEvent(pid any, event int64, options ...RequestOptionFunc) (*AuditEvent, *Response, error) { + return do[*AuditEvent](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/audit_events/%d", ProjectID{pid}, event), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/avatar.go b/vendor/gitlab.com/gitlab-org/api/client-go/avatar.go index e0bf2d6f72..9b2ed7d0a9 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/avatar.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/avatar.go @@ -22,6 +22,10 @@ import ( type ( AvatarRequestsServiceInterface interface { + // GetAvatar gets the avatar URL for a user with the given email address. + // + // GitLab API docs: + // https://docs.gitlab.com/api/avatar/#get-details-on-an-account-avatar GetAvatar(opt *GetAvatarOptions, options ...RequestOptionFunc) (*Avatar, *Response, error) } @@ -49,24 +53,14 @@ type Avatar struct { // https://docs.gitlab.com/api/avatar/#get-details-on-an-account-avatar type GetAvatarOptions struct { Email *string `url:"email,omitempty" json:"email,omitempty"` - Size *int `url:"size,omitempty" json:"size,omitempty"` + Size *int64 `url:"size,omitempty" json:"size,omitempty"` } -// GetAvatar gets the avatar URL for a user with the given email address. -// -// GitLab API docs: -// https://docs.gitlab.com/api/avatar/#get-details-on-an-account-avatar func (s *AvatarRequestsService) GetAvatar(opt *GetAvatarOptions, options ...RequestOptionFunc) (*Avatar, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "avatar", opt, options) - if err != nil { - return nil, nil, err - } - - avatar := new(Avatar) - response, err := s.client.Do(req, avatar) - if err != nil { - return nil, response, err - } - - return avatar, response, nil + return do[*Avatar](s.client, + withMethod(http.MethodGet), + withPath("avatar"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/award_emojis.go b/vendor/gitlab.com/gitlab-org/api/client-go/award_emojis.go index 360a16f3eb..26a2e28c23 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/award_emojis.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/award_emojis.go @@ -17,37 +17,161 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( AwardEmojiServiceInterface interface { - ListMergeRequestAwardEmoji(pid any, mergeRequestIID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) - ListIssueAwardEmoji(pid any, issueIID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) - ListSnippetAwardEmoji(pid any, snippetID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) - GetMergeRequestAwardEmoji(pid any, mergeRequestIID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) - GetIssueAwardEmoji(pid any, issueIID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) - GetSnippetAwardEmoji(pid any, snippetID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) - CreateMergeRequestAwardEmoji(pid any, mergeRequestIID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) - CreateIssueAwardEmoji(pid any, issueIID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) - CreateSnippetAwardEmoji(pid any, snippetID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) - DeleteIssueAwardEmoji(pid any, issueIID, awardID int, options ...RequestOptionFunc) (*Response, error) - DeleteMergeRequestAwardEmoji(pid any, mergeRequestIID, awardID int, options ...RequestOptionFunc) (*Response, error) - DeleteSnippetAwardEmoji(pid any, snippetID, awardID int, options ...RequestOptionFunc) (*Response, error) - ListIssuesAwardEmojiOnNote(pid any, issueID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) - ListMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) - ListSnippetAwardEmojiOnNote(pid any, snippetIID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) - GetIssuesAwardEmojiOnNote(pid any, issueID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) - GetMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) - GetSnippetAwardEmojiOnNote(pid any, snippetIID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) - CreateIssuesAwardEmojiOnNote(pid any, issueID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) - CreateMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) - CreateSnippetAwardEmojiOnNote(pid any, snippetIID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) - DeleteIssuesAwardEmojiOnNote(pid any, issueID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) - DeleteMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) - DeleteSnippetAwardEmojiOnNote(pid any, snippetIID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) + // ListMergeRequestAwardEmoji gets a list of all award emoji on the merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#list-an-awardables-emoji-reactions + ListMergeRequestAwardEmoji(pid any, mergeRequestIID int64, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) + + // ListIssueAwardEmoji gets a list of all award emoji on the issue. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#list-an-awardables-emoji-reactions + ListIssueAwardEmoji(pid any, issueIID int64, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) + + // ListSnippetAwardEmoji gets a list of all award emoji on the snippet. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#list-an-awardables-emoji-reactions + ListSnippetAwardEmoji(pid any, snippetID int64, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) + + // GetMergeRequestAwardEmoji get an award emoji from merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#get-single-emoji-reaction + GetMergeRequestAwardEmoji(pid any, mergeRequestIID, awardID int64, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) + + // GetIssueAwardEmoji get an award emoji from issue. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#get-single-emoji-reaction + GetIssueAwardEmoji(pid any, issueIID, awardID int64, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) + + // GetSnippetAwardEmoji get an award emoji from snippet. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#get-single-emoji-reaction + GetSnippetAwardEmoji(pid any, snippetID, awardID int64, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) + + // CreateMergeRequestAwardEmoji get an award emoji from merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#add-a-new-emoji-reaction + CreateMergeRequestAwardEmoji(pid any, mergeRequestIID int64, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) + + // CreateIssueAwardEmoji get an award emoji from issue. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#add-a-new-emoji-reaction + CreateIssueAwardEmoji(pid any, issueIID int64, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) + + // CreateSnippetAwardEmoji get an award emoji from snippet. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#add-a-new-emoji-reaction + CreateSnippetAwardEmoji(pid any, snippetID int64, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) + + // DeleteIssueAwardEmoji delete award emoji on an issue. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#delete-an-emoji-reaction + DeleteIssueAwardEmoji(pid any, issueIID, awardID int64, options ...RequestOptionFunc) (*Response, error) + + // DeleteMergeRequestAwardEmoji delete award emoji on a merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#delete-an-emoji-reaction + DeleteMergeRequestAwardEmoji(pid any, mergeRequestIID, awardID int64, options ...RequestOptionFunc) (*Response, error) + + // DeleteSnippetAwardEmoji delete award emoji on a snippet. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#delete-an-emoji-reaction + DeleteSnippetAwardEmoji(pid any, snippetID, awardID int64, options ...RequestOptionFunc) (*Response, error) + + // ListIssuesAwardEmojiOnNote gets a list of all award emoji on a note from the + // issue. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#list-a-comments-emoji-reactions + ListIssuesAwardEmojiOnNote(pid any, issueID, noteID int64, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) + + // ListMergeRequestAwardEmojiOnNote gets a list of all award emoji on a note + // from the merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#list-a-comments-emoji-reactions + ListMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID int64, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) + + // ListSnippetAwardEmojiOnNote gets a list of all award emoji on a note from the + // snippet. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#list-a-comments-emoji-reactions + ListSnippetAwardEmojiOnNote(pid any, snippetIID, noteID int64, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) + + // GetIssuesAwardEmojiOnNote gets an award emoji on a note from an issue. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#get-an-emoji-reaction-for-a-comment + GetIssuesAwardEmojiOnNote(pid any, issueID, noteID, awardID int64, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) + + // GetMergeRequestAwardEmojiOnNote gets an award emoji on a note from a + // merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#get-an-emoji-reaction-for-a-comment + GetMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID, awardID int64, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) + + // GetSnippetAwardEmojiOnNote gets an award emoji on a note from a snippet. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#get-an-emoji-reaction-for-a-comment + GetSnippetAwardEmojiOnNote(pid any, snippetIID, noteID, awardID int64, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) + + // CreateIssuesAwardEmojiOnNote gets an award emoji on a note from an issue. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#add-a-new-emoji-reaction-to-a-comment + CreateIssuesAwardEmojiOnNote(pid any, issueID, noteID int64, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) + + // CreateMergeRequestAwardEmojiOnNote gets an award emoji on a note from a + // merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#add-a-new-emoji-reaction-to-a-comment + CreateMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID int64, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) + + // CreateSnippetAwardEmojiOnNote gets an award emoji on a note from a snippet. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#add-a-new-emoji-reaction-to-a-comment + CreateSnippetAwardEmojiOnNote(pid any, snippetIID, noteID int64, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) + + // DeleteIssuesAwardEmojiOnNote deletes an award emoji on a note from an issue. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#delete-an-emoji-reaction-from-a-comment + DeleteIssuesAwardEmojiOnNote(pid any, issueID, noteID, awardID int64, options ...RequestOptionFunc) (*Response, error) + + // DeleteMergeRequestAwardEmojiOnNote deletes an award emoji on a note from a + // merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#delete-an-emoji-reaction-from-a-comment + DeleteMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID, awardID int64, options ...RequestOptionFunc) (*Response, error) + + // DeleteSnippetAwardEmojiOnNote deletes an award emoji on a note from a snippet. + // + // GitLab API docs: + // https://docs.gitlab.com/api/emoji_reactions/#delete-an-emoji-reaction-from-a-comment + DeleteSnippetAwardEmojiOnNote(pid any, snippetIID, noteID, awardID int64, options ...RequestOptionFunc) (*Response, error) } // AwardEmojiService handles communication with the emoji awards related methods @@ -65,19 +189,12 @@ var _ AwardEmojiServiceInterface = (*AwardEmojiService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/emoji_reactions/ type AwardEmoji struct { - ID int `json:"id"` - Name string `json:"name"` - User struct { - Name string `json:"name"` - Username string `json:"username"` - ID int `json:"id"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"user"` + ID int64 `json:"id"` + Name string `json:"name"` + User BasicUser `json:"user"` CreatedAt *time.Time `json:"created_at"` UpdatedAt *time.Time `json:"updated_at"` - AwardableID int `json:"awardable_id"` + AwardableID int64 `json:"awardable_id"` AwardableType string `json:"awardable_type"` } @@ -92,105 +209,49 @@ const ( // // GitLab API docs: // https://docs.gitlab.com/api/emoji_reactions/ -type ListAwardEmojiOptions ListOptions +type ListAwardEmojiOptions struct { + ListOptions +} -// ListMergeRequestAwardEmoji gets a list of all award emoji on the merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#list-an-awardables-emoji-reactions -func (s *AwardEmojiService) ListMergeRequestAwardEmoji(pid any, mergeRequestIID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) ListMergeRequestAwardEmoji(pid any, mergeRequestIID int64, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { return s.listAwardEmoji(pid, awardMergeRequest, mergeRequestIID, opt, options...) } -// ListIssueAwardEmoji gets a list of all award emoji on the issue. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#list-an-awardables-emoji-reactions -func (s *AwardEmojiService) ListIssueAwardEmoji(pid any, issueIID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) ListIssueAwardEmoji(pid any, issueIID int64, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { return s.listAwardEmoji(pid, awardIssue, issueIID, opt, options...) } -// ListSnippetAwardEmoji gets a list of all award emoji on the snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#list-an-awardables-emoji-reactions -func (s *AwardEmojiService) ListSnippetAwardEmoji(pid any, snippetID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) ListSnippetAwardEmoji(pid any, snippetID int64, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { return s.listAwardEmoji(pid, awardSnippets, snippetID, opt, options...) } -func (s *AwardEmojiService) listAwardEmoji(pid any, resource string, resourceID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/award_emoji", - PathEscape(project), - resource, - resourceID, +func (s *AwardEmojiService) listAwardEmoji(pid any, resource string, resourceID int64, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { + return do[[]*AwardEmoji](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/%s/%d/award_emoji", ProjectID{pid}, resource, resourceID), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var as []*AwardEmoji - resp, err := s.client.Do(req, &as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil } -// GetMergeRequestAwardEmoji get an award emoji from merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#get-single-emoji-reaction -func (s *AwardEmojiService) GetMergeRequestAwardEmoji(pid any, mergeRequestIID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) GetMergeRequestAwardEmoji(pid any, mergeRequestIID, awardID int64, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { return s.getAwardEmoji(pid, awardMergeRequest, mergeRequestIID, awardID, options...) } -// GetIssueAwardEmoji get an award emoji from issue. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#get-single-emoji-reaction -func (s *AwardEmojiService) GetIssueAwardEmoji(pid any, issueIID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) GetIssueAwardEmoji(pid any, issueIID, awardID int64, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { return s.getAwardEmoji(pid, awardIssue, issueIID, awardID, options...) } -// GetSnippetAwardEmoji get an award emoji from snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#get-single-emoji-reaction -func (s *AwardEmojiService) GetSnippetAwardEmoji(pid any, snippetID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) GetSnippetAwardEmoji(pid any, snippetID, awardID int64, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { return s.getAwardEmoji(pid, awardSnippets, snippetID, awardID, options...) } -func (s *AwardEmojiService) getAwardEmoji(pid any, resource string, resourceID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/award_emoji/%d", - PathEscape(project), - resource, - resourceID, - awardID, +func (s *AwardEmojiService) getAwardEmoji(pid any, resource string, resourceID, awardID int64, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return do[*AwardEmoji](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/%s/%d/award_emoji/%d", ProjectID{pid}, resource, resourceID, awardID), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - a := new(AwardEmoji) - resp, err := s.client.Do(req, &a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil } // CreateAwardEmojiOptions represents the available options for awarding emoji @@ -202,76 +263,36 @@ type CreateAwardEmojiOptions struct { Name string `json:"name"` } -// CreateMergeRequestAwardEmoji get an award emoji from merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#add-a-new-emoji-reaction -func (s *AwardEmojiService) CreateMergeRequestAwardEmoji(pid any, mergeRequestIID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) CreateMergeRequestAwardEmoji(pid any, mergeRequestIID int64, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { return s.createAwardEmoji(pid, awardMergeRequest, mergeRequestIID, opt, options...) } -// CreateIssueAwardEmoji get an award emoji from issue. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#add-a-new-emoji-reaction -func (s *AwardEmojiService) CreateIssueAwardEmoji(pid any, issueIID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) CreateIssueAwardEmoji(pid any, issueIID int64, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { return s.createAwardEmoji(pid, awardIssue, issueIID, opt, options...) } -// CreateSnippetAwardEmoji get an award emoji from snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#add-a-new-emoji-reaction -func (s *AwardEmojiService) CreateSnippetAwardEmoji(pid any, snippetID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) CreateSnippetAwardEmoji(pid any, snippetID int64, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { return s.createAwardEmoji(pid, awardSnippets, snippetID, opt, options...) } -func (s *AwardEmojiService) createAwardEmoji(pid any, resource string, resourceID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/award_emoji", - PathEscape(project), - resource, - resourceID, +func (s *AwardEmojiService) createAwardEmoji(pid any, resource string, resourceID int64, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return do[*AwardEmoji](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/%s/%d/award_emoji", ProjectID{pid}, resource, resourceID), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - a := new(AwardEmoji) - resp, err := s.client.Do(req, &a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil } -// DeleteIssueAwardEmoji delete award emoji on an issue. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#delete-an-emoji-reaction -func (s *AwardEmojiService) DeleteIssueAwardEmoji(pid any, issueIID, awardID int, options ...RequestOptionFunc) (*Response, error) { +func (s *AwardEmojiService) DeleteIssueAwardEmoji(pid any, issueIID, awardID int64, options ...RequestOptionFunc) (*Response, error) { return s.deleteAwardEmoji(pid, awardIssue, issueIID, awardID, options...) } -// DeleteMergeRequestAwardEmoji delete award emoji on a merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#delete-an-emoji-reaction -func (s *AwardEmojiService) DeleteMergeRequestAwardEmoji(pid any, mergeRequestIID, awardID int, options ...RequestOptionFunc) (*Response, error) { +func (s *AwardEmojiService) DeleteMergeRequestAwardEmoji(pid any, mergeRequestIID, awardID int64, options ...RequestOptionFunc) (*Response, error) { return s.deleteAwardEmoji(pid, awardMergeRequest, mergeRequestIID, awardID, options...) } -// DeleteSnippetAwardEmoji delete award emoji on a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#delete-an-emoji-reaction -func (s *AwardEmojiService) DeleteSnippetAwardEmoji(pid any, snippetID, awardID int, options ...RequestOptionFunc) (*Response, error) { +func (s *AwardEmojiService) DeleteSnippetAwardEmoji(pid any, snippetID, awardID int64, options ...RequestOptionFunc) (*Response, error) { return s.deleteAwardEmoji(pid, awardSnippets, snippetID, awardID, options...) } @@ -279,145 +300,66 @@ func (s *AwardEmojiService) DeleteSnippetAwardEmoji(pid any, snippetID, awardID // // GitLab API docs: // https://docs.gitlab.com/api/emoji_reactions/#delete-an-emoji-reaction -func (s *AwardEmojiService) deleteAwardEmoji(pid any, resource string, resourceID, awardID int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/award_emoji/%d", PathEscape(project), resource, - resourceID, awardID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) +func (s *AwardEmojiService) deleteAwardEmoji(pid any, resource string, resourceID, awardID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/%s/%d/award_emoji/%d", ProjectID{pid}, resource, resourceID, awardID), + withRequestOpts(options...), + ) + return resp, err } -// ListIssuesAwardEmojiOnNote gets a list of all award emoji on a note from the -// issue. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#list-a-comments-emoji-reactions -func (s *AwardEmojiService) ListIssuesAwardEmojiOnNote(pid any, issueID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) ListIssuesAwardEmojiOnNote(pid any, issueID, noteID int64, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { return s.listAwardEmojiOnNote(pid, awardIssue, issueID, noteID, opt, options...) } -// ListMergeRequestAwardEmojiOnNote gets a list of all award emoji on a note -// from the merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#list-a-comments-emoji-reactions -func (s *AwardEmojiService) ListMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) ListMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID int64, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { return s.listAwardEmojiOnNote(pid, awardMergeRequest, mergeRequestIID, noteID, opt, options...) } -// ListSnippetAwardEmojiOnNote gets a list of all award emoji on a note from the -// snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#list-a-comments-emoji-reactions -func (s *AwardEmojiService) ListSnippetAwardEmojiOnNote(pid any, snippetIID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) ListSnippetAwardEmojiOnNote(pid any, snippetIID, noteID int64, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { return s.listAwardEmojiOnNote(pid, awardSnippets, snippetIID, noteID, opt, options...) } -func (s *AwardEmojiService) listAwardEmojiOnNote(pid any, resources string, resourceID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/notes/%d/award_emoji", PathEscape(project), resources, - resourceID, noteID) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var as []*AwardEmoji - resp, err := s.client.Do(req, &as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil +func (s *AwardEmojiService) listAwardEmojiOnNote(pid any, resources string, resourceID, noteID int64, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { + return do[[]*AwardEmoji](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/%s/%d/notes/%d/award_emoji", ProjectID{pid}, resources, resourceID, noteID), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetIssuesAwardEmojiOnNote gets an award emoji on a note from an issue. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#get-an-emoji-reaction-for-a-comment -func (s *AwardEmojiService) GetIssuesAwardEmojiOnNote(pid any, issueID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) GetIssuesAwardEmojiOnNote(pid any, issueID, noteID, awardID int64, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { return s.getSingleNoteAwardEmoji(pid, awardIssue, issueID, noteID, awardID, options...) } -// GetMergeRequestAwardEmojiOnNote gets an award emoji on a note from a -// merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#get-an-emoji-reaction-for-a-comment -func (s *AwardEmojiService) GetMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) GetMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID, awardID int64, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { return s.getSingleNoteAwardEmoji(pid, awardMergeRequest, mergeRequestIID, noteID, awardID, options...) } -// GetSnippetAwardEmojiOnNote gets an award emoji on a note from a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#get-an-emoji-reaction-for-a-comment -func (s *AwardEmojiService) GetSnippetAwardEmojiOnNote(pid any, snippetIID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) GetSnippetAwardEmojiOnNote(pid any, snippetIID, noteID, awardID int64, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { return s.getSingleNoteAwardEmoji(pid, awardSnippets, snippetIID, noteID, awardID, options...) } -func (s *AwardEmojiService) getSingleNoteAwardEmoji(pid any, resource string, resourceID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/notes/%d/award_emoji/%d", - PathEscape(project), - resource, - resourceID, - noteID, - awardID, +func (s *AwardEmojiService) getSingleNoteAwardEmoji(pid any, resource string, resourceID, noteID, awardID int64, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return do[*AwardEmoji](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/%s/%d/notes/%d/award_emoji/%d", ProjectID{pid}, resource, resourceID, noteID, awardID), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - a := new(AwardEmoji) - resp, err := s.client.Do(req, &a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil } -// CreateIssuesAwardEmojiOnNote gets an award emoji on a note from an issue. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#add-a-new-emoji-reaction-to-a-comment -func (s *AwardEmojiService) CreateIssuesAwardEmojiOnNote(pid any, issueID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) CreateIssuesAwardEmojiOnNote(pid any, issueID, noteID int64, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { return s.createAwardEmojiOnNote(pid, awardIssue, issueID, noteID, opt, options...) } -// CreateMergeRequestAwardEmojiOnNote gets an award emoji on a note from a -// merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#add-a-new-emoji-reaction-to-a-comment -func (s *AwardEmojiService) CreateMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) CreateMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID int64, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { return s.createAwardEmojiOnNote(pid, awardMergeRequest, mergeRequestIID, noteID, opt, options...) } -// CreateSnippetAwardEmojiOnNote gets an award emoji on a note from a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#add-a-new-emoji-reaction-to-a-comment -func (s *AwardEmojiService) CreateSnippetAwardEmojiOnNote(pid any, snippetIID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { +func (s *AwardEmojiService) CreateSnippetAwardEmojiOnNote(pid any, snippetIID, noteID int64, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { return s.createAwardEmojiOnNote(pid, awardSnippets, snippetIID, noteID, opt, options...) } @@ -425,75 +367,33 @@ func (s *AwardEmojiService) CreateSnippetAwardEmojiOnNote(pid any, snippetIID, n // // GitLab API docs: // https://docs.gitlab.com/api/emoji_reactions/#add-a-new-emoji-reaction-to-a-comment -func (s *AwardEmojiService) createAwardEmojiOnNote(pid any, resource string, resourceID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/notes/%d/award_emoji", - PathEscape(project), - resource, - resourceID, - noteID, +func (s *AwardEmojiService) createAwardEmojiOnNote(pid any, resource string, resourceID, noteID int64, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return do[*AwardEmoji](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/%s/%d/notes/%d/award_emoji", ProjectID{pid}, resource, resourceID, noteID), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - a := new(AwardEmoji) - resp, err := s.client.Do(req, &a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil } -// DeleteIssuesAwardEmojiOnNote deletes an award emoji on a note from an issue. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#delete-an-emoji-reaction-from-a-comment -func (s *AwardEmojiService) DeleteIssuesAwardEmojiOnNote(pid any, issueID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) { +func (s *AwardEmojiService) DeleteIssuesAwardEmojiOnNote(pid any, issueID, noteID, awardID int64, options ...RequestOptionFunc) (*Response, error) { return s.deleteAwardEmojiOnNote(pid, awardIssue, issueID, noteID, awardID, options...) } -// DeleteMergeRequestAwardEmojiOnNote deletes an award emoji on a note from a -// merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#delete-an-emoji-reaction-from-a-comment -func (s *AwardEmojiService) DeleteMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) { +func (s *AwardEmojiService) DeleteMergeRequestAwardEmojiOnNote(pid any, mergeRequestIID, noteID, awardID int64, options ...RequestOptionFunc) (*Response, error) { return s.deleteAwardEmojiOnNote(pid, awardMergeRequest, mergeRequestIID, noteID, awardID, options...) } -// DeleteSnippetAwardEmojiOnNote deletes an award emoji on a note from a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/api/emoji_reactions/#delete-an-emoji-reaction-from-a-comment -func (s *AwardEmojiService) DeleteSnippetAwardEmojiOnNote(pid any, snippetIID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) { +func (s *AwardEmojiService) DeleteSnippetAwardEmojiOnNote(pid any, snippetIID, noteID, awardID int64, options ...RequestOptionFunc) (*Response, error) { return s.deleteAwardEmojiOnNote(pid, awardSnippets, snippetIID, noteID, awardID, options...) } -func (s *AwardEmojiService) deleteAwardEmojiOnNote(pid any, resource string, resourceID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/notes/%d/award_emoji/%d", - PathEscape(project), - resource, - resourceID, - noteID, - awardID, +func (s *AwardEmojiService) deleteAwardEmojiOnNote(pid any, resource string, resourceID, noteID, awardID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/%s/%d/notes/%d/award_emoji/%d", ProjectID{pid}, resource, resourceID, noteID, awardID), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/boards.go b/vendor/gitlab.com/gitlab-org/api/client-go/boards.go index 1d1558d029..0cbf49b7cb 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/boards.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/boards.go @@ -17,22 +17,76 @@ package gitlab import ( - "fmt" "net/http" ) type ( IssueBoardsServiceInterface interface { + // CreateIssueBoard creates a new issue board. + // + // GitLab API docs: + // https://docs.gitlab.com/api/boards/#create-an-issue-board + // CreateIssueBoard creates a new issue board. + // + // GitLab API docs: + // https://docs.gitlab.com/api/boards/#create-an-issue-board CreateIssueBoard(pid any, opt *CreateIssueBoardOptions, options ...RequestOptionFunc) (*IssueBoard, *Response, error) - UpdateIssueBoard(pid any, board int, opt *UpdateIssueBoardOptions, options ...RequestOptionFunc) (*IssueBoard, *Response, error) - DeleteIssueBoard(pid any, board int, options ...RequestOptionFunc) (*Response, error) + + // UpdateIssueBoard update an issue board. + // + // GitLab API docs: + // https://docs.gitlab.com/api/boards/#update-an-issue-board + UpdateIssueBoard(pid any, board int64, opt *UpdateIssueBoardOptions, options ...RequestOptionFunc) (*IssueBoard, *Response, error) + + // DeleteIssueBoard deletes an issue board. + // + // GitLab API docs: + // https://docs.gitlab.com/api/boards/#delete-an-issue-board + DeleteIssueBoard(pid any, board int64, options ...RequestOptionFunc) (*Response, error) + + // ListIssueBoards gets a list of all issue boards in a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/boards/#list-project-issue-boards ListIssueBoards(pid any, opt *ListIssueBoardsOptions, options ...RequestOptionFunc) ([]*IssueBoard, *Response, error) - GetIssueBoard(pid any, board int, options ...RequestOptionFunc) (*IssueBoard, *Response, error) - GetIssueBoardLists(pid any, board int, opt *GetIssueBoardListsOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) - GetIssueBoardList(pid any, board, list int, options ...RequestOptionFunc) (*BoardList, *Response, error) - CreateIssueBoardList(pid any, board int, opt *CreateIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) - UpdateIssueBoardList(pid any, board, list int, opt *UpdateIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) - DeleteIssueBoardList(pid any, board, list int, options ...RequestOptionFunc) (*Response, error) + + // GetIssueBoard gets a single issue board of a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/boards/#show-a-single-issue-board + GetIssueBoard(pid any, board int64, options ...RequestOptionFunc) (*IssueBoard, *Response, error) + + // GetIssueBoardLists gets a list of the issue board's lists. Does not include + // backlog and closed lists. + // + // GitLab API docs: + // https://docs.gitlab.com/api/boards/#list-board-lists-in-a-project-issue-board + GetIssueBoardLists(pid any, board int64, opt *GetIssueBoardListsOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) + + // GetIssueBoardList gets a single issue board list. + // + // GitLab API docs: + // https://docs.gitlab.com/api/boards/#show-a-single-board-list + GetIssueBoardList(pid any, board, list int64, options ...RequestOptionFunc) (*BoardList, *Response, error) + + // CreateIssueBoardList creates a new issue board list. + // + // GitLab API docs: + // https://docs.gitlab.com/api/boards/#create-a-board-list + CreateIssueBoardList(pid any, board int64, opt *CreateIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) + + // UpdateIssueBoardList updates the position of an existing issue board list. + // + // GitLab API docs: + // https://docs.gitlab.com/api/boards/#reorder-a-list-in-a-board + UpdateIssueBoardList(pid any, board, list int64, opt *UpdateIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) + + // DeleteIssueBoardList soft deletes an issue board list. Only for admins and + // project owners. + // + // GitLab API docs: + // https://docs.gitlab.com/api/boards/#delete-a-board-list-from-a-board + DeleteIssueBoardList(pid any, board, list int64, options ...RequestOptionFunc) (*Response, error) } // IssueBoardsService handles communication with the issue board related @@ -50,21 +104,16 @@ var _ IssueBoardsServiceInterface = (*IssueBoardsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/boards/ type IssueBoard struct { - ID int `json:"id"` - Name string `json:"name"` - Project *Project `json:"project"` - Milestone *Milestone `json:"milestone"` - Assignee *struct { - ID int `json:"id"` - Username string `json:"username"` - Name string `json:"name"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"assignee"` - Lists []*BoardList `json:"lists"` - Weight int `json:"weight"` - Labels []*LabelDetails `json:"labels"` + ID int64 `json:"id"` + Name string `json:"name"` + Project *Project `json:"project"` + Milestone *Milestone `json:"milestone"` + Assignee *BasicUser `json:"assignee"` + Lists []*BoardList `json:"lists"` + Weight int64 `json:"weight"` + Labels []*LabelDetails `json:"labels"` + HideBacklogList bool `json:"hide_backlog_list"` + HideClosedList bool `json:"hide_closed_list"` } func (b IssueBoard) String() string { @@ -75,24 +124,33 @@ func (b IssueBoard) String() string { // // GitLab API docs: https://docs.gitlab.com/api/boards/ type BoardList struct { - ID int `json:"id"` - Assignee *struct { - ID int `json:"id"` - Name string `json:"name"` - Username string `json:"username"` - } `json:"assignee"` - Iteration *ProjectIteration `json:"iteration"` - Label *Label `json:"label"` - MaxIssueCount int `json:"max_issue_count"` - MaxIssueWeight int `json:"max_issue_weight"` - Milestone *Milestone `json:"milestone"` - Position int `json:"position"` + ID int64 `json:"id"` + Assignee *BoardListAssignee `json:"assignee"` + Iteration *ProjectIteration `json:"iteration"` + Label *Label `json:"label"` + MaxIssueCount int64 `json:"max_issue_count"` + MaxIssueWeight int64 `json:"max_issue_weight"` + Milestone *Milestone `json:"milestone"` + Position int64 `json:"position"` } func (b BoardList) String() string { return Stringify(b) } +// BoardListAssignee represents a GitLab board list assignee. +// +// GitLab API docs: https://docs.gitlab.com/api/boards/ +type BoardListAssignee struct { + ID int64 `json:"id"` + Name string `json:"name"` + Username string `json:"username"` +} + +func (a BoardListAssignee) String() string { + return Stringify(a) +} + // CreateIssueBoardOptions represents the available CreateIssueBoard() options. // // GitLab API docs: https://docs.gitlab.com/api/boards/#create-an-issue-board @@ -100,192 +158,92 @@ type CreateIssueBoardOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` } -// CreateIssueBoard creates a new issue board. -// -// GitLab API docs: https://docs.gitlab.com/api/boards/#create-an-issue-board func (s *IssueBoardsService) CreateIssueBoard(pid any, opt *CreateIssueBoardOptions, options ...RequestOptionFunc) (*IssueBoard, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - board := new(IssueBoard) - resp, err := s.client.Do(req, board) - if err != nil { - return nil, resp, err - } - - return board, resp, nil + return do[*IssueBoard](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/boards", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateIssueBoardOptions represents the available UpdateIssueBoard() options. // // GitLab API docs: https://docs.gitlab.com/api/boards/#update-an-issue-board type UpdateIssueBoardOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` - Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` - Weight *int `url:"weight,omitempty" json:"weight,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + AssigneeID *int64 `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + MilestoneID *int64 `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` + Weight *int64 `url:"weight,omitempty" json:"weight,omitempty"` + HideBacklogList *bool `url:"hide_backlog_list,omitempty" json:"hide_backlog_list,omitempty"` + HideClosedList *bool `url:"hide_closed_list,omitempty" json:"hide_closed_list,omitempty"` } -// UpdateIssueBoard update an issue board. -// -// GitLab API docs: https://docs.gitlab.com/api/boards/#update-an-issue-board -func (s *IssueBoardsService) UpdateIssueBoard(pid any, board int, opt *UpdateIssueBoardOptions, options ...RequestOptionFunc) (*IssueBoard, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d", PathEscape(project), board) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - is := new(IssueBoard) - resp, err := s.client.Do(req, is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil +func (s *IssueBoardsService) UpdateIssueBoard(pid any, board int64, opt *UpdateIssueBoardOptions, options ...RequestOptionFunc) (*IssueBoard, *Response, error) { + return do[*IssueBoard](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/boards/%d", ProjectID{pid}, board), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteIssueBoard deletes an issue board. -// -// GitLab API docs: https://docs.gitlab.com/api/boards/#delete-an-issue-board -func (s *IssueBoardsService) DeleteIssueBoard(pid any, board int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d", PathEscape(project), board) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *IssueBoardsService) DeleteIssueBoard(pid any, board int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/boards/%d", ProjectID{pid}, board), + withRequestOpts(options...), + ) + return resp, err } // ListIssueBoardsOptions represents the available ListIssueBoards() options. // // GitLab API docs: https://docs.gitlab.com/api/boards/#list-project-issue-boards -type ListIssueBoardsOptions ListOptions +type ListIssueBoardsOptions struct { + ListOptions +} -// ListIssueBoards gets a list of all issue boards in a project. -// -// GitLab API docs: https://docs.gitlab.com/api/boards/#list-project-issue-boards func (s *IssueBoardsService) ListIssueBoards(pid any, opt *ListIssueBoardsOptions, options ...RequestOptionFunc) ([]*IssueBoard, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var is []*IssueBoard - resp, err := s.client.Do(req, &is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil + return do[[]*IssueBoard](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/boards", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetIssueBoard gets a single issue board of a project. -// -// GitLab API docs: https://docs.gitlab.com/api/boards/#show-a-single-issue-board -func (s *IssueBoardsService) GetIssueBoard(pid any, board int, options ...RequestOptionFunc) (*IssueBoard, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d", PathEscape(project), board) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ib := new(IssueBoard) - resp, err := s.client.Do(req, ib) - if err != nil { - return nil, resp, err - } - - return ib, resp, nil +func (s *IssueBoardsService) GetIssueBoard(pid any, board int64, options ...RequestOptionFunc) (*IssueBoard, *Response, error) { + return do[*IssueBoard](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/boards/%d", ProjectID{pid}, board), + withRequestOpts(options...), + ) } // GetIssueBoardListsOptions represents the available GetIssueBoardLists() options. // // GitLab API docs: https://docs.gitlab.com/api/boards/#list-board-lists-in-a-project-issue-board -type GetIssueBoardListsOptions ListOptions - -// GetIssueBoardLists gets a list of the issue board's lists. Does not include -// backlog and closed lists. -// -// GitLab API docs: https://docs.gitlab.com/api/boards/#list-board-lists-in-a-project-issue-board -func (s *IssueBoardsService) GetIssueBoardLists(pid any, board int, opt *GetIssueBoardListsOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d/lists", PathEscape(project), board) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var bl []*BoardList - resp, err := s.client.Do(req, &bl) - if err != nil { - return nil, resp, err - } - - return bl, resp, nil +type GetIssueBoardListsOptions struct { + ListOptions } -// GetIssueBoardList gets a single issue board list. -// -// GitLab API docs: https://docs.gitlab.com/api/boards/#show-a-single-board-list -func (s *IssueBoardsService) GetIssueBoardList(pid any, board, list int, options ...RequestOptionFunc) (*BoardList, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d/lists/%d", - PathEscape(project), - board, - list, +func (s *IssueBoardsService) GetIssueBoardLists(pid any, board int64, opt *GetIssueBoardListsOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) { + return do[[]*BoardList](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/boards/%d/lists", ProjectID{pid}, board), + withAPIOpts(opt), + withRequestOpts(options...), ) +} - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - bl := new(BoardList) - resp, err := s.client.Do(req, bl) - if err != nil { - return nil, resp, err - } - - return bl, resp, nil +func (s *IssueBoardsService) GetIssueBoardList(pid any, board, list int64, options ...RequestOptionFunc) (*BoardList, *Response, error) { + return do[*BoardList](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/boards/%d/lists/%d", ProjectID{pid}, board, list), + withRequestOpts(options...), + ) } // CreateIssueBoardListOptions represents the available CreateIssueBoardList() @@ -293,34 +251,19 @@ func (s *IssueBoardsService) GetIssueBoardList(pid any, board, list int, options // // GitLab API docs: https://docs.gitlab.com/api/boards/#create-a-board-list type CreateIssueBoardListOptions struct { - LabelID *int `url:"label_id,omitempty" json:"label_id,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` - IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` + LabelID *int64 `url:"label_id,omitempty" json:"label_id,omitempty"` + AssigneeID *int64 `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + MilestoneID *int64 `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + IterationID *int64 `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` } -// CreateIssueBoardList creates a new issue board list. -// -// GitLab API docs: https://docs.gitlab.com/api/boards/#create-a-board-list -func (s *IssueBoardsService) CreateIssueBoardList(pid any, board int, opt *CreateIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d/lists", PathEscape(project), board) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - bl := new(BoardList) - resp, err := s.client.Do(req, bl) - if err != nil { - return nil, resp, err - } - - return bl, resp, nil +func (s *IssueBoardsService) CreateIssueBoardList(pid any, board int64, opt *CreateIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) { + return do[*BoardList](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/boards/%d/lists", ProjectID{pid}, board), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateIssueBoardListOptions represents the available UpdateIssueBoardList() @@ -328,57 +271,23 @@ func (s *IssueBoardsService) CreateIssueBoardList(pid any, board int, opt *Creat // // GitLab API docs: https://docs.gitlab.com/api/boards/#reorder-a-list-in-a-board type UpdateIssueBoardListOptions struct { - Position *int `url:"position" json:"position"` + Position *int64 `url:"position" json:"position"` } -// UpdateIssueBoardList updates the position of an existing issue board list. -// -// GitLab API docs: https://docs.gitlab.com/api/boards/#reorder-a-list-in-a-board -func (s *IssueBoardsService) UpdateIssueBoardList(pid any, board, list int, opt *UpdateIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d/lists/%d", - PathEscape(project), - board, - list, +func (s *IssueBoardsService) UpdateIssueBoardList(pid any, board, list int64, opt *UpdateIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) { + return do[*BoardList](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/boards/%d/lists/%d", ProjectID{pid}, board, list), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - bl := new(BoardList) - resp, err := s.client.Do(req, bl) - if err != nil { - return nil, resp, err - } - - return bl, resp, nil } -// DeleteIssueBoardList soft deletes an issue board list. Only for admins and -// project owners. -// -// GitLab API docs: -// https://docs.gitlab.com/api/boards/#delete-a-board-list-from-a-board -func (s *IssueBoardsService) DeleteIssueBoardList(pid any, board, list int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d/lists/%d", - PathEscape(project), - board, - list, +func (s *IssueBoardsService) DeleteIssueBoardList(pid any, board, list int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/boards/%d/lists/%d", ProjectID{pid}, board, list), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/branches.go b/vendor/gitlab.com/gitlab-org/api/client-go/branches.go index 1c753b17fe..528532c058 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/branches.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/branches.go @@ -17,17 +17,39 @@ package gitlab import ( - "fmt" "net/http" - "net/url" ) type ( BranchesServiceInterface interface { + // ListBranches gets a list of repository branches from a project, sorted by name alphabetically. + // + // GitLab API docs: + // https://docs.gitlab.com/api/branches/#list-repository-branches ListBranches(pid any, opts *ListBranchesOptions, options ...RequestOptionFunc) ([]*Branch, *Response, error) + + // GetBranch gets a single project repository branch. + // + // GitLab API docs: + // https://docs.gitlab.com/api/branches/#get-single-repository-branch GetBranch(pid any, branch string, options ...RequestOptionFunc) (*Branch, *Response, error) + + // CreateBranch creates branch from commit SHA or existing branch. + // + // GitLab API docs: + // https://docs.gitlab.com/api/branches/#create-repository-branch CreateBranch(pid any, opt *CreateBranchOptions, options ...RequestOptionFunc) (*Branch, *Response, error) + + // DeleteBranch deletes an existing branch. + // + // GitLab API docs: + // https://docs.gitlab.com/api/branches/#delete-repository-branch DeleteBranch(pid any, branch string, options ...RequestOptionFunc) (*Response, error) + + // DeleteMergedBranches deletes all branches that are merged into the project's default branch. + // + // GitLab API docs: + // https://docs.gitlab.com/api/branches/#delete-merged-branches DeleteMergedBranches(pid any, options ...RequestOptionFunc) (*Response, error) } @@ -71,55 +93,21 @@ type ListBranchesOptions struct { Regex *string `url:"regex,omitempty" json:"regex,omitempty"` } -// ListBranches gets a list of repository branches from a project, sorted by -// name alphabetically. -// -// GitLab API docs: -// https://docs.gitlab.com/api/branches/#list-repository-branches func (s *BranchesService) ListBranches(pid any, opts *ListBranchesOptions, options ...RequestOptionFunc) ([]*Branch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/branches", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var b []*Branch - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil + return do[[]*Branch](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/repository/branches", ProjectID{pid}), + withAPIOpts(opts), + withRequestOpts(options...), + ) } -// GetBranch gets a single project repository branch. -// -// GitLab API docs: -// https://docs.gitlab.com/api/branches/#get-single-repository-branch func (s *BranchesService) GetBranch(pid any, branch string, options ...RequestOptionFunc) (*Branch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/branches/%s", PathEscape(project), url.PathEscape(branch)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - b := new(Branch) - resp, err := s.client.Do(req, b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil + return do[*Branch](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/repository/branches/%s", ProjectID{pid}, branch), + withRequestOpts(options...), + ) } // CreateBranchOptions represents the available CreateBranch() options. @@ -131,65 +119,29 @@ type CreateBranchOptions struct { Ref *string `url:"ref,omitempty" json:"ref,omitempty"` } -// CreateBranch creates branch from commit SHA or existing branch. -// -// GitLab API docs: -// https://docs.gitlab.com/api/branches/#create-repository-branch func (s *BranchesService) CreateBranch(pid any, opt *CreateBranchOptions, options ...RequestOptionFunc) (*Branch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/branches", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - b := new(Branch) - resp, err := s.client.Do(req, b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil + return do[*Branch](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/repository/branches", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteBranch deletes an existing branch. -// -// GitLab API docs: -// https://docs.gitlab.com/api/branches/#delete-repository-branch func (s *BranchesService) DeleteBranch(pid any, branch string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/repository/branches/%s", PathEscape(project), url.PathEscape(branch)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/repository/branches/%s", ProjectID{pid}, branch), + withRequestOpts(options...), + ) + return resp, err } -// DeleteMergedBranches deletes all branches that are merged into the project's default branch. -// -// GitLab API docs: -// https://docs.gitlab.com/api/branches/#delete-merged-branches func (s *BranchesService) DeleteMergedBranches(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/repository/merged_branches", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/repository/merged_branches", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/broadcast_messages.go b/vendor/gitlab.com/gitlab-org/api/client-go/broadcast_messages.go index 2e42200860..e40db9d3ce 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/broadcast_messages.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/broadcast_messages.go @@ -17,18 +17,41 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( BroadcastMessagesServiceInterface interface { + // ListBroadcastMessages gets a list of all broadcasted messages. + // + // GitLab API docs: + // https://docs.gitlab.com/api/broadcast_messages/#get-all-broadcast-messages ListBroadcastMessages(opt *ListBroadcastMessagesOptions, options ...RequestOptionFunc) ([]*BroadcastMessage, *Response, error) - GetBroadcastMessage(broadcast int, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) + + // GetBroadcastMessage gets a single broadcast message. + // + // GitLab API docs: + // https://docs.gitlab.com/api/broadcast_messages/#get-a-specific-broadcast-message + GetBroadcastMessage(broadcast int64, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) + + // CreateBroadcastMessage creates a message to broadcast. + // + // GitLab API docs: + // https://docs.gitlab.com/api/broadcast_messages/#create-a-broadcast-message CreateBroadcastMessage(opt *CreateBroadcastMessageOptions, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) - UpdateBroadcastMessage(broadcast int, opt *UpdateBroadcastMessageOptions, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) - DeleteBroadcastMessage(broadcast int, options ...RequestOptionFunc) (*Response, error) + + // UpdateBroadcastMessage updates a broadcasted message. + // + // GitLab API docs: + // https://docs.gitlab.com/api/broadcast_messages/#update-a-broadcast-message + UpdateBroadcastMessage(broadcast int64, opt *UpdateBroadcastMessageOptions, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) + + // DeleteBroadcastMessage deletes a broadcasted message. + // + // GitLab API docs: + // https://docs.gitlab.com/api/broadcast_messages/#delete-a-broadcast-message + DeleteBroadcastMessage(broadcast int64, options ...RequestOptionFunc) (*Response, error) } // BroadcastMessagesService handles communication with the broadcast @@ -51,7 +74,7 @@ type BroadcastMessage struct { StartsAt *time.Time `json:"starts_at"` EndsAt *time.Time `json:"ends_at"` Font string `json:"font"` - ID int `json:"id"` + ID int64 `json:"id"` Active bool `json:"active"` TargetAccessLevels []AccessLevelValue `json:"target_access_levels"` TargetPath string `json:"target_path"` @@ -65,46 +88,25 @@ type BroadcastMessage struct { // // GitLab API docs: // https://docs.gitlab.com/api/broadcast_messages/#get-all-broadcast-messages -type ListBroadcastMessagesOptions ListOptions +type ListBroadcastMessagesOptions struct { + ListOptions +} -// ListBroadcastMessages gets a list of all broadcasted messages. -// -// GitLab API docs: -// https://docs.gitlab.com/api/broadcast_messages/#get-all-broadcast-messages func (s *BroadcastMessagesService) ListBroadcastMessages(opt *ListBroadcastMessagesOptions, options ...RequestOptionFunc) ([]*BroadcastMessage, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "broadcast_messages", opt, options) - if err != nil { - return nil, nil, err - } - - var bs []*BroadcastMessage - resp, err := s.client.Do(req, &bs) - if err != nil { - return nil, resp, err - } - - return bs, resp, nil + return do[[]*BroadcastMessage](s.client, + withMethod(http.MethodGet), + withPath("broadcast_messages"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetBroadcastMessage gets a single broadcast message. -// -// GitLab API docs: -// https://docs.gitlab.com/api/broadcast_messages/#get-a-specific-broadcast-message -func (s *BroadcastMessagesService) GetBroadcastMessage(broadcast int, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) { - u := fmt.Sprintf("broadcast_messages/%d", broadcast) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - b := new(BroadcastMessage) - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil +func (s *BroadcastMessagesService) GetBroadcastMessage(broadcast int64, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) { + return do[*BroadcastMessage](s.client, + withMethod(http.MethodGet), + withPath("broadcast_messages/%d", broadcast), + withRequestOpts(options...), + ) } // CreateBroadcastMessageOptions represents the available CreateBroadcastMessage() @@ -124,23 +126,13 @@ type CreateBroadcastMessageOptions struct { Theme *string `url:"theme,omitempty" json:"theme,omitempty"` } -// CreateBroadcastMessage creates a message to broadcast. -// -// GitLab API docs: -// https://docs.gitlab.com/api/broadcast_messages/#create-a-broadcast-message func (s *BroadcastMessagesService) CreateBroadcastMessage(opt *CreateBroadcastMessageOptions, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "broadcast_messages", opt, options) - if err != nil { - return nil, nil, err - } - - b := new(BroadcastMessage) - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil + return do[*BroadcastMessage](s.client, + withMethod(http.MethodPost), + withPath("broadcast_messages"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateBroadcastMessageOptions represents the available CreateBroadcastMessage() @@ -160,38 +152,20 @@ type UpdateBroadcastMessageOptions struct { Theme *string `url:"theme,omitempty" json:"theme,omitempty"` } -// UpdateBroadcastMessage update a broadcasted message. -// -// GitLab API docs: -// https://docs.gitlab.com/api/broadcast_messages/#update-a-broadcast-message -func (s *BroadcastMessagesService) UpdateBroadcastMessage(broadcast int, opt *UpdateBroadcastMessageOptions, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) { - u := fmt.Sprintf("broadcast_messages/%d", broadcast) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - b := new(BroadcastMessage) - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil +func (s *BroadcastMessagesService) UpdateBroadcastMessage(broadcast int64, opt *UpdateBroadcastMessageOptions, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) { + return do[*BroadcastMessage](s.client, + withMethod(http.MethodPut), + withPath("broadcast_messages/%d", broadcast), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteBroadcastMessage deletes a broadcasted message. -// -// GitLab API docs: -// https://docs.gitlab.com/api/broadcast_messages/#delete-a-broadcast-message -func (s *BroadcastMessagesService) DeleteBroadcastMessage(broadcast int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("broadcast_messages/%d", broadcast) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *BroadcastMessagesService) DeleteBroadcastMessage(broadcast int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("broadcast_messages/%d", broadcast), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/buf.gen.yaml b/vendor/gitlab.com/gitlab-org/api/client-go/buf.gen.yaml index 897db7737b..dd0e5916d0 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/buf.gen.yaml +++ b/vendor/gitlab.com/gitlab-org/api/client-go/buf.gen.yaml @@ -1,6 +1,6 @@ version: v2 plugins: - - remote: buf.build/protocolbuffers/go:v1.36.6 + - local: ["go", "run", "google.golang.org/protobuf/cmd/protoc-gen-go@v1.36.10"] out: . opt: - paths=source_relative diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/bulk_imports.go b/vendor/gitlab.com/gitlab-org/api/client-go/bulk_imports.go index 70464498e5..fe5f319dc0 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/bulk_imports.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/bulk_imports.go @@ -52,7 +52,7 @@ type BulkImportStartMigrationOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/bulk_imports/#start-a-new-group-or-project-migration type BulkImportStartMigrationResponse struct { - ID int `json:"id"` + ID int64 `json:"id"` Status string `json:"status"` SourceType string `json:"source_type"` SourceURL string `json:"source_url"` @@ -61,20 +61,11 @@ type BulkImportStartMigrationResponse struct { HasFailures bool `json:"has_failures"` } -// StartMigration starts a migration. -// -// GitLab API docs: https://docs.gitlab.com/api/bulk_imports/#start-a-new-group-or-project-migration func (b *BulkImportsService) StartMigration(startMigrationOptions *BulkImportStartMigrationOptions, options ...RequestOptionFunc) (*BulkImportStartMigrationResponse, *Response, error) { - request, err := b.client.NewRequest(http.MethodPost, "bulk_imports", startMigrationOptions, options) - if err != nil { - return nil, nil, err - } - - startMigrationResponse := new(BulkImportStartMigrationResponse) - response, err := b.client.Do(request, startMigrationResponse) - if err != nil { - return nil, response, err - } - - return startMigrationResponse, response, nil + return do[*BulkImportStartMigrationResponse](b.client, + withMethod(http.MethodPost), + withPath("bulk_imports"), + withAPIOpts(startMigrationOptions), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/ci_yml_templates.go b/vendor/gitlab.com/gitlab-org/api/client-go/ci_yml_templates.go index 168ca1fc43..f57621ee6b 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/ci_yml_templates.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/ci_yml_templates.go @@ -17,13 +17,21 @@ package gitlab import ( - "fmt" "net/http" ) type ( CIYMLTemplatesServiceInterface interface { + // ListAllTemplates get all GitLab CI YML templates. + // + // GitLab API docs: + // https://docs.gitlab.com/api/templates/gitlab_ci_ymls/#list-gitlab-ci-yaml-templates ListAllTemplates(opt *ListCIYMLTemplatesOptions, options ...RequestOptionFunc) ([]*CIYMLTemplateListItem, *Response, error) + + // GetTemplate get a single GitLab CI YML template. + // + // GitLab API docs: + // https://docs.gitlab.com/api/templates/gitlab_ci_ymls/#single-gitlab-ci-yaml-template GetTemplate(key string, options ...RequestOptionFunc) (*CIYMLTemplate, *Response, error) } @@ -61,44 +69,23 @@ type CIYMLTemplateListItem struct { // // GitLab API docs: // https://docs.gitlab.com/api/templates/gitlab_ci_ymls/#list-gitlab-ci-yaml-templates -type ListCIYMLTemplatesOptions ListOptions +type ListCIYMLTemplatesOptions struct { + ListOptions +} -// ListAllTemplates get all GitLab CI YML templates. -// -// GitLab API docs: -// https://docs.gitlab.com/api/templates/gitlab_ci_ymls/#list-gitlab-ci-yaml-templates func (s *CIYMLTemplatesService) ListAllTemplates(opt *ListCIYMLTemplatesOptions, options ...RequestOptionFunc) ([]*CIYMLTemplateListItem, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "templates/gitlab_ci_ymls", opt, options) - if err != nil { - return nil, nil, err - } - - var cts []*CIYMLTemplateListItem - resp, err := s.client.Do(req, &cts) - if err != nil { - return nil, resp, err - } - - return cts, resp, nil + return do[[]*CIYMLTemplateListItem](s.client, + withMethod(http.MethodGet), + withPath("templates/gitlab_ci_ymls"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetTemplate get a single GitLab CI YML template. -// -// GitLab API docs: -// https://docs.gitlab.com/api/templates/gitlab_ci_ymls/#single-gitlab-ci-yaml-template func (s *CIYMLTemplatesService) GetTemplate(key string, options ...RequestOptionFunc) (*CIYMLTemplate, *Response, error) { - u := fmt.Sprintf("templates/gitlab_ci_ymls/%s", PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ct := new(CIYMLTemplate) - resp, err := s.client.Do(req, ct) - if err != nil { - return nil, resp, err - } - - return ct, resp, nil + return do[*CIYMLTemplate](s.client, + withMethod(http.MethodGet), + withPath("templates/gitlab_ci_ymls/%s", key), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/client_options.go b/vendor/gitlab.com/gitlab-org/api/client-go/client_options.go index 05c9a5f0ff..b416acac13 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/client_options.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/client_options.go @@ -17,6 +17,8 @@ package gitlab import ( + "errors" + "log/slog" "net/http" "time" @@ -149,6 +151,19 @@ func WithUserAgent(userAgent string) ClientOptionFunc { } } +// WithURLWarningLogger sets a custom logger for URL validation warnings. +// By default, warnings are logged using slog.Default(). +// Pass slog.New(slog.DiscardHandler) to disable warnings. +func WithURLWarningLogger(logger *slog.Logger) ClientOptionFunc { + return func(c *Client) error { + if logger == nil { + return errors.New("logger cannot be nil, use slog.New(slog.DiscardHandler) to discard warnings") + } + c.urlWarningLogger = logger + return nil + } +} + // WithCookieJar can be used to configure a cookie jar. func WithCookieJar(jar http.CookieJar) ClientOptionFunc { return func(c *Client) error { @@ -156,3 +171,15 @@ func WithCookieJar(jar http.CookieJar) ClientOptionFunc { return nil } } + +// WithInterceptor registers an Interceptor in the client's http request call pipeline. +// It returns a ClientOptionFunc that adds the interceptor to the client. +func WithInterceptor(i Interceptor) ClientOptionFunc { + return func(c *Client) error { + if i == nil { + return errors.New("interceptor cannot be nil") + } + c.interceptors = append(c.interceptors, i) + return nil + } +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/cluster_agents.go b/vendor/gitlab.com/gitlab-org/api/client-go/cluster_agents.go index 88855174d7..bd124bc447 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/cluster_agents.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/cluster_agents.go @@ -17,21 +17,63 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( ClusterAgentsServiceInterface interface { + // ListAgents returns a list of agents registered for the project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/cluster_agents/#list-the-agents-for-a-project + // ListAgents returns a list of agents registered for the project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/cluster_agents/#list-the-agents-for-a-project ListAgents(pid any, opt *ListAgentsOptions, options ...RequestOptionFunc) ([]*Agent, *Response, error) - GetAgent(pid any, id int, options ...RequestOptionFunc) (*Agent, *Response, error) + + // GetAgent gets a single agent details. + // + // GitLab API docs: + // https://docs.gitlab.com/api/cluster_agents/#get-details-about-an-agent + GetAgent(pid any, id int64, options ...RequestOptionFunc) (*Agent, *Response, error) + + // RegisterAgent registers an agent to the project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/cluster_agents/#register-an-agent-with-a-project RegisterAgent(pid any, opt *RegisterAgentOptions, options ...RequestOptionFunc) (*Agent, *Response, error) - DeleteAgent(pid any, id int, options ...RequestOptionFunc) (*Response, error) - ListAgentTokens(pid any, aid int, opt *ListAgentTokensOptions, options ...RequestOptionFunc) ([]*AgentToken, *Response, error) - GetAgentToken(pid any, aid int, id int, options ...RequestOptionFunc) (*AgentToken, *Response, error) - CreateAgentToken(pid any, aid int, opt *CreateAgentTokenOptions, options ...RequestOptionFunc) (*AgentToken, *Response, error) - RevokeAgentToken(pid any, aid int, id int, options ...RequestOptionFunc) (*Response, error) + + // DeleteAgent deletes an existing agent registration. + // + // GitLab API docs: + // https://docs.gitlab.com/api/cluster_agents/#delete-a-registered-agent + DeleteAgent(pid any, id int64, options ...RequestOptionFunc) (*Response, error) + + // ListAgentTokens returns a list of tokens for an agent. + // + // GitLab API docs: + // https://docs.gitlab.com/api/cluster_agents/#list-tokens-for-an-agent + ListAgentTokens(pid any, aid int64, opt *ListAgentTokensOptions, options ...RequestOptionFunc) ([]*AgentToken, *Response, error) + + // GetAgentToken gets a single agent token. + // + // GitLab API docs: + // https://docs.gitlab.com/api/cluster_agents/#get-a-single-agent-token + GetAgentToken(pid any, aid int64, id int64, options ...RequestOptionFunc) (*AgentToken, *Response, error) + + // CreateAgentToken creates a new token for an agent. + // + // GitLab API docs: + // https://docs.gitlab.com/api/cluster_agents/#create-an-agent-token + CreateAgentToken(pid any, aid int64, opt *CreateAgentTokenOptions, options ...RequestOptionFunc) (*AgentToken, *Response, error) + + // RevokeAgentToken revokes an agent token. + // + // GitLab API docs: + // https://docs.gitlab.com/api/cluster_agents/#revoke-an-agent-token + RevokeAgentToken(pid any, aid int64, id int64, options ...RequestOptionFunc) (*Response, error) } // ClusterAgentsService handles communication with the cluster agents related @@ -49,15 +91,15 @@ var _ ClusterAgentsServiceInterface = (*ClusterAgentsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/cluster_agents/ type Agent struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` CreatedAt *time.Time `json:"created_at"` - CreatedByUserID int `json:"created_by_user_id"` + CreatedByUserID int64 `json:"created_by_user_id"` ConfigProject ConfigProject `json:"config_project"` } type ConfigProject struct { - ID int `json:"id"` + ID int64 `json:"id"` Description string `json:"description"` Name string `json:"name"` NameWithNamespace string `json:"name_with_namespace"` @@ -75,13 +117,13 @@ func (a Agent) String() string { // GitLab API docs: // https://docs.gitlab.com/api/cluster_agents/#list-tokens-for-an-agent type AgentToken struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Description string `json:"description"` - AgentID int `json:"agent_id"` + AgentID int64 `json:"agent_id"` Status string `json:"status"` CreatedAt *time.Time `json:"created_at"` - CreatedByUserID int `json:"created_by_user_id"` + CreatedByUserID int64 `json:"created_by_user_id"` LastUsedAt *time.Time `json:"last_used_at"` Token string `json:"token"` } @@ -94,56 +136,23 @@ func (a AgentToken) String() string { // // GitLab API docs: // https://docs.gitlab.com/api/cluster_agents/#list-the-agents-for-a-project -type ListAgentsOptions ListOptions +type ListAgentsOptions struct { + ListOptions +} -// ListAgents returns a list of agents registered for the project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/cluster_agents/#list-the-agents-for-a-project func (s *ClusterAgentsService) ListAgents(pid any, opt *ListAgentsOptions, options ...RequestOptionFunc) ([]*Agent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, uri, opt, options) - if err != nil { - return nil, nil, err - } - - var as []*Agent - resp, err := s.client.Do(req, &as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil + return do[[]*Agent](s.client, + withPath("projects/%s/cluster_agents", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetAgent gets a single agent details. -// -// GitLab API docs: -// https://docs.gitlab.com/api/cluster_agents/#get-details-about-an-agent -func (s *ClusterAgentsService) GetAgent(pid any, id int, options ...RequestOptionFunc) (*Agent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents/%d", PathEscape(project), id) - - req, err := s.client.NewRequest(http.MethodGet, uri, nil, options) - if err != nil { - return nil, nil, err - } - - a := new(Agent) - resp, err := s.client.Do(req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil +func (s *ClusterAgentsService) GetAgent(pid any, id int64, options ...RequestOptionFunc) (*Agent, *Response, error) { + return do[*Agent](s.client, + withPath("projects/%s/cluster_agents/%d", ProjectID{pid}, id), + withRequestOpts(options...), + ) } // RegisterAgentOptions represents the available RegisterAgent() @@ -155,104 +164,47 @@ type RegisterAgentOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` } -// RegisterAgent registers an agent to the project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/cluster_agents/#register-an-agent-with-a-project func (s *ClusterAgentsService) RegisterAgent(pid any, opt *RegisterAgentOptions, options ...RequestOptionFunc) (*Agent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, uri, opt, options) - if err != nil { - return nil, nil, err - } - - a := new(Agent) - resp, err := s.client.Do(req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil + return do[*Agent](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/cluster_agents", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteAgent deletes an existing agent registration. -// -// GitLab API docs: -// https://docs.gitlab.com/api/cluster_agents/#delete-a-registered-agent -func (s *ClusterAgentsService) DeleteAgent(pid any, id int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents/%d", PathEscape(project), id) - - req, err := s.client.NewRequest(http.MethodDelete, uri, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ClusterAgentsService) DeleteAgent(pid any, id int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/cluster_agents/%d", ProjectID{pid}, id), + withRequestOpts(options...), + ) + return resp, err } // ListAgentTokensOptions represents the available ListAgentTokens() options. // // GitLab API docs: // https://docs.gitlab.com/api/cluster_agents/#list-tokens-for-an-agent -type ListAgentTokensOptions ListOptions - -// ListAgentTokens returns a list of tokens for an agent. -// -// GitLab API docs: -// https://docs.gitlab.com/api/cluster_agents/#list-tokens-for-an-agent -func (s *ClusterAgentsService) ListAgentTokens(pid any, aid int, opt *ListAgentTokensOptions, options ...RequestOptionFunc) ([]*AgentToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens", PathEscape(project), aid) - - req, err := s.client.NewRequest(http.MethodGet, uri, opt, options) - if err != nil { - return nil, nil, err - } - - var ats []*AgentToken - resp, err := s.client.Do(req, &ats) - if err != nil { - return nil, resp, err - } - - return ats, resp, nil +type ListAgentTokensOptions struct { + ListOptions } -// GetAgentToken gets a single agent token. -// -// GitLab API docs: -// https://docs.gitlab.com/api/cluster_agents/#get-a-single-agent-token -func (s *ClusterAgentsService) GetAgentToken(pid any, aid int, id int, options ...RequestOptionFunc) (*AgentToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens/%d", PathEscape(project), aid, id) - - req, err := s.client.NewRequest(http.MethodGet, uri, nil, options) - if err != nil { - return nil, nil, err - } - - at := new(AgentToken) - resp, err := s.client.Do(req, at) - if err != nil { - return nil, resp, err - } +func (s *ClusterAgentsService) ListAgentTokens(pid any, aid int64, opt *ListAgentTokensOptions, options ...RequestOptionFunc) ([]*AgentToken, *Response, error) { + return do[[]*AgentToken](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/cluster_agents/%d/tokens", ProjectID{pid}, aid), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} - return at, resp, nil +func (s *ClusterAgentsService) GetAgentToken(pid any, aid int64, id int64, options ...RequestOptionFunc) (*AgentToken, *Response, error) { + return do[*AgentToken](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/cluster_agents/%d/tokens/%d", ProjectID{pid}, aid, id), + withRequestOpts(options...), + ) } // CreateAgentTokenOptions represents the available CreateAgentToken() options. @@ -264,46 +216,20 @@ type CreateAgentTokenOptions struct { Description *string `url:"description,omitempty" json:"description,omitempty"` } -// CreateAgentToken creates a new token for an agent. -// -// GitLab API docs: -// https://docs.gitlab.com/api/cluster_agents/#create-an-agent-token -func (s *ClusterAgentsService) CreateAgentToken(pid any, aid int, opt *CreateAgentTokenOptions, options ...RequestOptionFunc) (*AgentToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens", PathEscape(project), aid) - - req, err := s.client.NewRequest(http.MethodPost, uri, opt, options) - if err != nil { - return nil, nil, err - } - - at := new(AgentToken) - resp, err := s.client.Do(req, at) - if err != nil { - return nil, resp, err - } - - return at, resp, nil +func (s *ClusterAgentsService) CreateAgentToken(pid any, aid int64, opt *CreateAgentTokenOptions, options ...RequestOptionFunc) (*AgentToken, *Response, error) { + return do[*AgentToken](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/cluster_agents/%d/tokens", ProjectID{pid}, aid), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// RevokeAgentToken revokes an agent token. -// -// GitLab API docs: -// https://docs.gitlab.com/api/cluster_agents/#revoke-an-agent-token -func (s *ClusterAgentsService) RevokeAgentToken(pid any, aid int, id int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens/%d", PathEscape(project), aid, id) - - req, err := s.client.NewRequest(http.MethodDelete, uri, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ClusterAgentsService) RevokeAgentToken(pid any, aid int64, id int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/cluster_agents/%d/tokens/%d", ProjectID{pid}, aid, id), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/commitlint.config.mjs b/vendor/gitlab.com/gitlab-org/api/client-go/commitlint.config.mjs deleted file mode 100644 index e957841b39..0000000000 --- a/vendor/gitlab.com/gitlab-org/api/client-go/commitlint.config.mjs +++ /dev/null @@ -1,8 +0,0 @@ -export default { - extends: ['@commitlint/config-conventional'], - "rules": { - "body-max-line-length": [0, "always", 100], - "subject-case": [0, "always", ["sentence-case"]], - "header-max-length": [2, "always", 200] - } -}; diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/commits.go b/vendor/gitlab.com/gitlab-org/api/client-go/commits.go index c139bdf867..36e678461e 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/commits.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/commits.go @@ -17,9 +17,8 @@ package gitlab import ( - "fmt" + "errors" "net/http" - "net/url" "time" ) @@ -29,18 +28,85 @@ import ( // GitLab API docs: https://docs.gitlab.com/api/commits/ type ( CommitsServiceInterface interface { + // ListCommits gets a list of repository commits in a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/commits/#list-repository-commits ListCommits(pid any, opt *ListCommitsOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) + + // GetCommitRefs gets all references (from branches or tags) a commit is pushed to. + // + // GitLab API docs: + // https://docs.gitlab.com/api/commits/#get-references-a-commit-is-pushed-to GetCommitRefs(pid any, sha string, opt *GetCommitRefsOptions, options ...RequestOptionFunc) ([]*CommitRef, *Response, error) + + // GetCommit gets a specific commit identified by the commit hash or name of a + // branch or tag. + // + // GitLab API docs: + // https://docs.gitlab.com/api/commits/#get-a-single-commit GetCommit(pid any, sha string, opt *GetCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) + + // CreateCommit creates a commit with multiple files and actions. + // + // GitLab API docs: + // https://docs.gitlab.com/api/commits/#create-a-commit-with-multiple-files-and-actions CreateCommit(pid any, opt *CreateCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) + + // GetCommitDiff gets the diff of a commit in a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/commits/#get-the-diff-of-a-commit GetCommitDiff(pid any, sha string, opt *GetCommitDiffOptions, options ...RequestOptionFunc) ([]*Diff, *Response, error) + + // GetCommitComments gets the comments of a commit in a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/commits/#get-the-comments-of-a-commit GetCommitComments(pid any, sha string, opt *GetCommitCommentsOptions, options ...RequestOptionFunc) ([]*CommitComment, *Response, error) + + // PostCommitComment adds a comment to a commit. Optionally you can post + // comments on a specific line of a commit. Therefore both path, line_new and + // line_old are required. + // + // GitLab API docs: + // https://docs.gitlab.com/api/commits/#post-comment-to-commit PostCommitComment(pid any, sha string, opt *PostCommitCommentOptions, options ...RequestOptionFunc) (*CommitComment, *Response, error) + + // GetCommitStatuses gets the statuses of a commit in a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/commits/#list-the-statuses-of-a-commit GetCommitStatuses(pid any, sha string, opt *GetCommitStatusesOptions, options ...RequestOptionFunc) ([]*CommitStatus, *Response, error) + + // SetCommitStatus sets the status of a commit in a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/commits/#set-the-pipeline-status-of-a-commit SetCommitStatus(pid any, sha string, opt *SetCommitStatusOptions, options ...RequestOptionFunc) (*CommitStatus, *Response, error) + + // ListMergeRequestsByCommit gets merge request associated with a commit. + // + // GitLab API docs: + // https://docs.gitlab.com/api/commits/#list-merge-requests-associated-with-a-commit ListMergeRequestsByCommit(pid any, sha string, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) + + // CherryPickCommit cherry picks a commit to a given branch. + // + // GitLab API docs: + // https://docs.gitlab.com/api/commits/#cherry-pick-a-commit CherryPickCommit(pid any, sha string, opt *CherryPickCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) + + // RevertCommit reverts a commit in a given branch. + // + // GitLab API docs: + // https://docs.gitlab.com/api/commits/#revert-a-commit RevertCommit(pid any, sha string, opt *RevertCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) + + // GetGPGSignature gets a GPG signature of a commit. + // + // GitLab API docs: + // https://docs.gitlab.com/api/commits/#get-signature-of-a-commit GetGPGSignature(pid any, sha string, options ...RequestOptionFunc) (*GPGSignature, *Response, error) } @@ -74,7 +140,7 @@ type Commit struct { Stats *CommitStats `json:"stats"` Status *BuildStateValue `json:"status"` LastPipeline *PipelineInfo `json:"last_pipeline"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` Trailers map[string]string `json:"trailers"` ExtendedTrailers map[string]string `json:"extended_trailers"` WebURL string `json:"web_url"` @@ -84,9 +150,9 @@ type Commit struct { // // GitLab API docs: https://docs.gitlab.com/api/commits/ type CommitStats struct { - Additions int `json:"additions"` - Deletions int `json:"deletions"` - Total int `json:"total"` + Additions int64 `json:"additions"` + Deletions int64 `json:"deletions"` + Total int64 `json:"total"` } func (c Commit) String() string { @@ -109,28 +175,13 @@ type ListCommitsOptions struct { Trailers *bool `url:"trailers,omitempty" json:"trailers,omitempty"` } -// ListCommits gets a list of repository commits in a project. -// -// GitLab API docs: https://docs.gitlab.com/api/commits/#list-repository-commits func (s *CommitsService) ListCommits(pid any, opt *ListCommitsOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var c []*Commit - resp, err := s.client.Do(req, &c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil + return do[[]*Commit](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/repository/commits", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CommitRef represents the reference of branches/tags in a commit. @@ -151,29 +202,13 @@ type GetCommitRefsOptions struct { Type *string `url:"type,omitempty" json:"type,omitempty"` } -// GetCommitRefs gets all references (from branches or tags) a commit is pushed to -// -// GitLab API docs: -// https://docs.gitlab.com/api/commits/#get-references-a-commit-is-pushed-to func (s *CommitsService) GetCommitRefs(pid any, sha string, opt *GetCommitRefsOptions, options ...RequestOptionFunc) ([]*CommitRef, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/refs", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var cs []*CommitRef - resp, err := s.client.Do(req, &cs) - if err != nil { - return nil, resp, err - } - - return cs, resp, nil + return do[[]*CommitRef](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/repository/commits/%s/refs", ProjectID{pid}, sha), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetCommitOptions represents the available GetCommit() options. @@ -184,32 +219,17 @@ type GetCommitOptions struct { Stats *bool `url:"stats,omitempty" json:"stats,omitempty"` } -// GetCommit gets a specific commit identified by the commit hash or name of a -// branch or tag. -// -// GitLab API docs: https://docs.gitlab.com/api/commits/#get-a-single-commit func (s *CommitsService) GetCommit(pid any, sha string, opt *GetCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } if sha == "" { - return nil, nil, fmt.Errorf("SHA must be a non-empty string") + return nil, nil, errors.New("SHA must be a non-empty string") } - u := fmt.Sprintf("projects/%s/repository/commits/%s", PathEscape(project), url.PathEscape(sha)) - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil + return do[*Commit](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/repository/commits/%s", ProjectID{pid}, sha), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateCommitOptions represents the available options for a new commit. @@ -242,28 +262,13 @@ type CommitActionOptions struct { ExecuteFilemode *bool `url:"execute_filemode,omitempty" json:"execute_filemode,omitempty"` } -// CreateCommit creates a commit with multiple files and actions. -// -// GitLab API docs: https://docs.gitlab.com/api/commits/#create-a-commit-with-multiple-files-and-actions func (s *CommitsService) CreateCommit(pid any, opt *CreateCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(req, &c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil + return do[*Commit](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/repository/commits", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // Diff represents a GitLab diff. @@ -293,29 +298,13 @@ type GetCommitDiffOptions struct { Unidiff *bool `url:"unidiff,omitempty" json:"unidiff,omitempty"` } -// GetCommitDiff gets the diff of a commit in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/commits/#get-the-diff-of-a-commit func (s *CommitsService) GetCommitDiff(pid any, sha string, opt *GetCommitDiffOptions, options ...RequestOptionFunc) ([]*Diff, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/diff", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var d []*Diff - resp, err := s.client.Do(req, &d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil + return do[[]*Diff](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/repository/commits/%s/diff", ProjectID{pid}, sha), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CommitComment represents a GitLab commit comment. @@ -324,14 +313,14 @@ func (s *CommitsService) GetCommitDiff(pid any, sha string, opt *GetCommitDiffOp type CommitComment struct { Note string `json:"note"` Path string `json:"path"` - Line int `json:"line"` + Line int64 `json:"line"` LineType string `json:"line_type"` Author Author `json:"author"` } // Author represents a GitLab commit author type Author struct { - ID int `json:"id"` + ID int64 `json:"id"` Username string `json:"username"` Email string `json:"email"` Name string `json:"name"` @@ -348,31 +337,17 @@ func (c CommitComment) String() string { // // GitLab API docs: // https://docs.gitlab.com/api/commits/#get-the-comments-of-a-commit -type GetCommitCommentsOptions ListOptions +type GetCommitCommentsOptions struct { + ListOptions +} -// GetCommitComments gets the comments of a commit in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/commits/#get-the-comments-of-a-commit func (s *CommitsService) GetCommitComments(pid any, sha string, opt *GetCommitCommentsOptions, options ...RequestOptionFunc) ([]*CommitComment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/comments", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var c []*CommitComment - resp, err := s.client.Do(req, &c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil + return do[[]*CommitComment](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/repository/commits/%s/comments", ProjectID{pid}, sha), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // PostCommitCommentOptions represents the available PostCommitComment() @@ -383,35 +358,17 @@ func (s *CommitsService) GetCommitComments(pid any, sha string, opt *GetCommitCo type PostCommitCommentOptions struct { Note *string `url:"note,omitempty" json:"note,omitempty"` Path *string `url:"path" json:"path"` - Line *int `url:"line" json:"line"` + Line *int64 `url:"line" json:"line"` LineType *string `url:"line_type" json:"line_type"` } -// PostCommitComment adds a comment to a commit. Optionally you can post -// comments on a specific line of a commit. Therefor both path, line_new and -// line_old are required. -// -// GitLab API docs: -// https://docs.gitlab.com/api/commits/#post-comment-to-commit func (s *CommitsService) PostCommitComment(pid any, sha string, opt *PostCommitCommentOptions, options ...RequestOptionFunc) (*CommitComment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/comments", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - c := new(CommitComment) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil + return do[*CommitComment](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/repository/commits/%s/comments", ProjectID{pid}, sha), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetCommitStatusesOptions represents the available GetCommitStatuses() options. @@ -422,7 +379,7 @@ type GetCommitStatusesOptions struct { Ref *string `url:"ref,omitempty" json:"ref,omitempty"` Stage *string `url:"stage,omitempty" json:"stage,omitempty"` Name *string `url:"name,omitempty" json:"name,omitempty"` - PipelineID *int `url:"pipeline_id,omitempty" json:"pipeline_id,omitempty"` + PipelineID *int64 `url:"pipeline_id,omitempty" json:"pipeline_id,omitempty"` All *bool `url:"all,omitempty" json:"all,omitempty"` } @@ -430,7 +387,7 @@ type GetCommitStatusesOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/commits/#commit-status type CommitStatus struct { - ID int `json:"id"` + ID int64 `json:"id"` SHA string `json:"sha"` Ref string `json:"ref"` Status string `json:"status"` @@ -440,34 +397,19 @@ type CommitStatus struct { Name string `json:"name"` AllowFailure bool `json:"allow_failure"` Coverage float64 `json:"coverage"` - PipelineId int `json:"pipeline_id"` + PipelineID int64 `json:"pipeline_id"` Author Author `json:"author"` Description string `json:"description"` TargetURL string `json:"target_url"` } -// GetCommitStatuses gets the statuses of a commit in a project. -// -// GitLab API docs: https://docs.gitlab.com/api/commits/#list-the-statuses-of-a-commit func (s *CommitsService) GetCommitStatuses(pid any, sha string, opt *GetCommitStatusesOptions, options ...RequestOptionFunc) ([]*CommitStatus, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/statuses", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var cs []*CommitStatus - resp, err := s.client.Do(req, &cs) - if err != nil { - return nil, resp, err - } - - return cs, resp, nil + return do[[]*CommitStatus](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/repository/commits/%s/statuses", ProjectID{pid}, sha), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // SetCommitStatusOptions represents the available SetCommitStatus() options. @@ -481,56 +423,24 @@ type SetCommitStatusOptions struct { TargetURL *string `url:"target_url,omitempty" json:"target_url,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` Coverage *float64 `url:"coverage,omitempty" json:"coverage,omitempty"` - PipelineID *int `url:"pipeline_id,omitempty" json:"pipeline_id,omitempty"` + PipelineID *int64 `url:"pipeline_id,omitempty" json:"pipeline_id,omitempty"` } -// SetCommitStatus sets the status of a commit in a project. -// -// GitLab API docs: https://docs.gitlab.com/api/commits/#set-the-pipeline-status-of-a-commit func (s *CommitsService) SetCommitStatus(pid any, sha string, opt *SetCommitStatusOptions, options ...RequestOptionFunc) (*CommitStatus, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/statuses/%s", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - cs := new(CommitStatus) - resp, err := s.client.Do(req, &cs) - if err != nil { - return nil, resp, err - } - - return cs, resp, nil + return do[*CommitStatus](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/statuses/%s", ProjectID{pid}, sha), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// ListMergeRequestsByCommit gets merge request associated with a commit. -// -// GitLab API docs: -// https://docs.gitlab.com/api/commits/#list-merge-requests-associated-with-a-commit func (s *CommitsService) ListMergeRequestsByCommit(pid any, sha string, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/merge_requests", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var mrs []*BasicMergeRequest - resp, err := s.client.Do(req, &mrs) - if err != nil { - return nil, resp, err - } - - return mrs, resp, nil + return do[[]*BasicMergeRequest](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/repository/commits/%s/merge_requests", ProjectID{pid}, sha), + withRequestOpts(options...), + ) } // CherryPickCommitOptions represents the available CherryPickCommit() options. @@ -542,94 +452,47 @@ type CherryPickCommitOptions struct { Message *string `url:"message,omitempty" json:"message,omitempty"` } -// CherryPickCommit cherry picks a commit to a given branch. -// -// GitLab API docs: https://docs.gitlab.com/api/commits/#cherry-pick-a-commit -func (s *CommitsService) CherryPickCommit(pid any, sha string, opt *CherryPickCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/cherry_pick", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(req, &c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - // RevertCommitOptions represents the available RevertCommit() options. -// // GitLab API docs: https://docs.gitlab.com/api/commits/#revert-a-commit type RevertCommitOptions struct { Branch *string `url:"branch,omitempty" json:"branch,omitempty"` } -// RevertCommit reverts a commit in a given branch. -// -// GitLab API docs: https://docs.gitlab.com/api/commits/#revert-a-commit -func (s *CommitsService) RevertCommit(pid any, sha string, opt *RevertCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/revert", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(req, &c) - if err != nil { - return nil, resp, err - } +func (s *CommitsService) CherryPickCommit(pid any, sha string, opt *CherryPickCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { + return do[*Commit](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/repository/commits/%s/cherry_pick", ProjectID{pid}, sha), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} - return c, resp, nil +func (s *CommitsService) RevertCommit(pid any, sha string, opt *RevertCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { + return do[*Commit](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/repository/commits/%s/revert", ProjectID{pid}, sha), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GPGSignature represents a Gitlab commit's GPG Signature. +// GPGSignature represents a GitLab commit's GPG Signature. // // GitLab API docs: // https://docs.gitlab.com/api/commits/#get-signature-of-a-commit type GPGSignature struct { - KeyID int `json:"gpg_key_id"` + KeyID int64 `json:"gpg_key_id"` KeyPrimaryKeyID string `json:"gpg_key_primary_keyid"` KeyUserName string `json:"gpg_key_user_name"` KeyUserEmail string `json:"gpg_key_user_email"` VerificationStatus string `json:"verification_status"` - KeySubkeyID int `json:"gpg_key_subkey_id"` + KeySubkeyID int64 `json:"gpg_key_subkey_id"` } -// GetGPGSignature gets a GPG signature of a commit. -// -// GitLab API docs: https://docs.gitlab.com/api/commits/#get-signature-of-a-commit func (s *CommitsService) GetGPGSignature(pid any, sha string, options ...RequestOptionFunc) (*GPGSignature, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/signature", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - sig := new(GPGSignature) - resp, err := s.client.Do(req, &sig) - if err != nil { - return nil, resp, err - } - - return sig, resp, nil + return do[*GPGSignature](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/repository/commits/%s/signature", ProjectID{pid}, sha), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/container_registry.go b/vendor/gitlab.com/gitlab-org/api/client-go/container_registry.go index 4e473ee0e4..5ebe2664db 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/container_registry.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/container_registry.go @@ -17,21 +17,59 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( ContainerRegistryServiceInterface interface { - ListProjectRegistryRepositories(pid any, opt *ListRegistryRepositoriesOptions, options ...RequestOptionFunc) ([]*RegistryRepository, *Response, error) - ListGroupRegistryRepositories(gid any, opt *ListRegistryRepositoriesOptions, options ...RequestOptionFunc) ([]*RegistryRepository, *Response, error) + // ListProjectRegistryRepositories gets a list of registry repositories in a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/container_registry/#within-a-project + ListProjectRegistryRepositories(pid any, opt *ListProjectRegistryRepositoriesOptions, options ...RequestOptionFunc) ([]*RegistryRepository, *Response, error) + + // ListGroupRegistryRepositories gets a list of registry repositories in a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/container_registry/#within-a-group + ListGroupRegistryRepositories(gid any, opt *ListGroupRegistryRepositoriesOptions, options ...RequestOptionFunc) ([]*RegistryRepository, *Response, error) + + // GetSingleRegistryRepository gets the details of single registry repository. + // + // GitLab API docs: + // https://docs.gitlab.com/api/container_registry/#get-details-of-a-single-repository GetSingleRegistryRepository(pid any, opt *GetSingleRegistryRepositoryOptions, options ...RequestOptionFunc) (*RegistryRepository, *Response, error) - DeleteRegistryRepository(pid any, repository int, options ...RequestOptionFunc) (*Response, error) - ListRegistryRepositoryTags(pid any, repository int, opt *ListRegistryRepositoryTagsOptions, options ...RequestOptionFunc) ([]*RegistryRepositoryTag, *Response, error) - GetRegistryRepositoryTagDetail(pid any, repository int, tagName string, options ...RequestOptionFunc) (*RegistryRepositoryTag, *Response, error) - DeleteRegistryRepositoryTag(pid any, repository int, tagName string, options ...RequestOptionFunc) (*Response, error) - DeleteRegistryRepositoryTags(pid any, repository int, opt *DeleteRegistryRepositoryTagsOptions, options ...RequestOptionFunc) (*Response, error) + + // DeleteRegistryRepository deletes a repository in a registry. + // + // GitLab API docs: + // https://docs.gitlab.com/api/container_registry/#delete-registry-repository + DeleteRegistryRepository(pid any, repository int64, options ...RequestOptionFunc) (*Response, error) + + // ListRegistryRepositoryTags gets a list of tags for given registry repository. + // + // GitLab API docs: + // https://docs.gitlab.com/api/container_registry/#list-registry-repository-tags + ListRegistryRepositoryTags(pid any, repository int64, opt *ListRegistryRepositoryTagsOptions, options ...RequestOptionFunc) ([]*RegistryRepositoryTag, *Response, error) + + // GetRegistryRepositoryTagDetail get details of a registry repository tag. + // + // GitLab API docs: + // https://docs.gitlab.com/api/container_registry/#get-details-of-a-registry-repository-tag + GetRegistryRepositoryTagDetail(pid any, repository int64, tagName string, options ...RequestOptionFunc) (*RegistryRepositoryTag, *Response, error) + + // DeleteRegistryRepositoryTag deletes a registry repository tag. + // + // GitLab API docs: + // https://docs.gitlab.com/api/container_registry/#delete-a-registry-repository-tag + DeleteRegistryRepositoryTag(pid any, repository int64, tagName string, options ...RequestOptionFunc) (*Response, error) + + // DeleteRegistryRepositoryTags deletes repository tags in bulk based on given criteria. + // + // GitLab API docs: + // https://docs.gitlab.com/api/container_registry/#delete-registry-repository-tags-in-bulk + DeleteRegistryRepositoryTags(pid any, repository int64, opt *DeleteRegistryRepositoryTagsOptions, options ...RequestOptionFunc) (*Response, error) } // ContainerRegistryService handles communication with the container registry @@ -49,15 +87,15 @@ var _ ContainerRegistryServiceInterface = (*ContainerRegistryService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/container_registry/ type RegistryRepository struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Path string `json:"path"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` Location string `json:"location"` CreatedAt *time.Time `json:"created_at"` CleanupPolicyStartedAt *time.Time `json:"cleanup_policy_started_at"` Status *ContainerRegistryStatus `json:"status"` - TagsCount int `json:"tags_count"` + TagsCount int64 `json:"tags_count"` Tags []*RegistryRepositoryTag `json:"tags"` } @@ -76,74 +114,50 @@ type RegistryRepositoryTag struct { ShortRevision string `json:"short_revision"` Digest string `json:"digest"` CreatedAt *time.Time `json:"created_at"` - TotalSize int `json:"total_size"` + TotalSize int64 `json:"total_size"` } func (s RegistryRepositoryTag) String() string { return Stringify(s) } -// ListRegistryRepositoriesOptions represents the available -// ListRegistryRepositories() options. +// ListProjectRegistryRepositoriesOptions represents the available +// ListProjectRegistryRepositories() options. // // GitLab API docs: // https://docs.gitlab.com/api/container_registry/#list-registry-repositories -type ListRegistryRepositoriesOptions struct { +type ListProjectRegistryRepositoriesOptions struct { ListOptions - // Deprecated: These options are deprecated for ListGroupRegistryRepositories calls. (Removed in GitLab 15.0) Tags *bool `url:"tags,omitempty" json:"tags,omitempty"` TagsCount *bool `url:"tags_count,omitempty" json:"tags_count,omitempty"` } -// ListProjectRegistryRepositories gets a list of registry repositories in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/container_registry/#within-a-project -func (s *ContainerRegistryService) ListProjectRegistryRepositories(pid any, opt *ListRegistryRepositoriesOptions, options ...RequestOptionFunc) ([]*RegistryRepository, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/registry/repositories", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var repos []*RegistryRepository - resp, err := s.client.Do(req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// ListGroupRegistryRepositories gets a list of registry repositories in a group. +// ListGroupRegistryRepositoriesOptions represents the available +// ListGroupRegistryRepositories() options. // // GitLab API docs: // https://docs.gitlab.com/api/container_registry/#within-a-group -func (s *ContainerRegistryService) ListGroupRegistryRepositories(gid any, opt *ListRegistryRepositoriesOptions, options ...RequestOptionFunc) ([]*RegistryRepository, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/registry/repositories", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } +type ListGroupRegistryRepositoriesOptions struct { + ListOptions +} - var repos []*RegistryRepository - resp, err := s.client.Do(req, &repos) - if err != nil { - return nil, resp, err - } +func (s *ContainerRegistryService) ListProjectRegistryRepositories(pid any, opt *ListProjectRegistryRepositoriesOptions, options ...RequestOptionFunc) ([]*RegistryRepository, *Response, error) { + return do[[]*RegistryRepository](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/registry/repositories", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} - return repos, resp, nil +func (s *ContainerRegistryService) ListGroupRegistryRepositories(gid any, opt *ListGroupRegistryRepositoriesOptions, options ...RequestOptionFunc) ([]*RegistryRepository, *Response, error) { + return do[[]*RegistryRepository](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/registry/repositories", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetSingleRegistryRepositoryOptions represents the available @@ -156,48 +170,22 @@ type GetSingleRegistryRepositoryOptions struct { TagsCount *bool `url:"tags_count,omitempty" json:"tags_count,omitempty"` } -// GetSingleRegistryRepository gets the details of single registry repository. -// -// GitLab API docs: -// https://docs.gitlab.com/api/container_registry/#get-details-of-a-single-repository func (s *ContainerRegistryService) GetSingleRegistryRepository(pid any, opt *GetSingleRegistryRepositoryOptions, options ...RequestOptionFunc) (*RegistryRepository, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("registry/repositories/%s", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - repo := new(RegistryRepository) - resp, err := s.client.Do(req, repo) - if err != nil { - return nil, resp, err - } - - return repo, resp, nil + return do[*RegistryRepository](s.client, + withMethod(http.MethodGet), + withPath("registry/repositories/%s", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteRegistryRepository deletes a repository in a registry. -// -// GitLab API docs: -// https://docs.gitlab.com/api/container_registry/#delete-registry-repository -func (s *ContainerRegistryService) DeleteRegistryRepository(pid any, repository int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/registry/repositories/%d", PathEscape(project), repository) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ContainerRegistryService) DeleteRegistryRepository(pid any, repository int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/registry/repositories/%d", ProjectID{pid}, repository), + withRequestOpts(options...), + ) + return resp, err } // ListRegistryRepositoryTagsOptions represents the available @@ -205,86 +193,34 @@ func (s *ContainerRegistryService) DeleteRegistryRepository(pid any, repository // // GitLab API docs: // https://docs.gitlab.com/api/container_registry/#list-registry-repository-tags -type ListRegistryRepositoryTagsOptions ListOptions +type ListRegistryRepositoryTagsOptions struct { + ListOptions +} -// ListRegistryRepositoryTags gets a list of tags for given registry repository. -// -// GitLab API docs: -// https://docs.gitlab.com/api/container_registry/#list-registry-repository-tags -func (s *ContainerRegistryService) ListRegistryRepositoryTags(pid any, repository int, opt *ListRegistryRepositoryTagsOptions, options ...RequestOptionFunc) ([]*RegistryRepositoryTag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/registry/repositories/%d/tags", - PathEscape(project), - repository, +func (s *ContainerRegistryService) ListRegistryRepositoryTags(pid any, repository int64, opt *ListRegistryRepositoryTagsOptions, options ...RequestOptionFunc) ([]*RegistryRepositoryTag, *Response, error) { + return do[[]*RegistryRepositoryTag](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/registry/repositories/%d/tags", ProjectID{pid}, repository), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var tags []*RegistryRepositoryTag - resp, err := s.client.Do(req, &tags) - if err != nil { - return nil, resp, err - } - - return tags, resp, nil } -// GetRegistryRepositoryTagDetail get details of a registry repository tag -// -// GitLab API docs: -// https://docs.gitlab.com/api/container_registry/#get-details-of-a-registry-repository-tag -func (s *ContainerRegistryService) GetRegistryRepositoryTagDetail(pid any, repository int, tagName string, options ...RequestOptionFunc) (*RegistryRepositoryTag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/registry/repositories/%d/tags/%s", - PathEscape(project), - repository, - tagName, +func (s *ContainerRegistryService) GetRegistryRepositoryTagDetail(pid any, repository int64, tagName string, options ...RequestOptionFunc) (*RegistryRepositoryTag, *Response, error) { + return do[*RegistryRepositoryTag](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/registry/repositories/%d/tags/%s", ProjectID{pid}, repository, tagName), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - tag := new(RegistryRepositoryTag) - resp, err := s.client.Do(req, &tag) - if err != nil { - return nil, resp, err - } - - return tag, resp, nil } -// DeleteRegistryRepositoryTag deletes a registry repository tag. -// -// GitLab API docs: -// https://docs.gitlab.com/api/container_registry/#delete-a-registry-repository-tag -func (s *ContainerRegistryService) DeleteRegistryRepositoryTag(pid any, repository int, tagName string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/registry/repositories/%d/tags/%s", - PathEscape(project), - repository, - tagName, +func (s *ContainerRegistryService) DeleteRegistryRepositoryTag(pid any, repository int64, tagName string, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/registry/repositories/%d/tags/%s", ProjectID{pid}, repository, tagName), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + return resp, err } // DeleteRegistryRepositoryTagsOptions represents the available @@ -295,32 +231,19 @@ func (s *ContainerRegistryService) DeleteRegistryRepositoryTag(pid any, reposito type DeleteRegistryRepositoryTagsOptions struct { NameRegexpDelete *string `url:"name_regex_delete,omitempty" json:"name_regex_delete,omitempty"` NameRegexpKeep *string `url:"name_regex_keep,omitempty" json:"name_regex_keep,omitempty"` - KeepN *int `url:"keep_n,omitempty" json:"keep_n,omitempty"` + KeepN *int64 `url:"keep_n,omitempty" json:"keep_n,omitempty"` OlderThan *string `url:"older_than,omitempty" json:"older_than,omitempty"` // Deprecated: NameRegexp is deprecated in favor of NameRegexpDelete. NameRegexp *string `url:"name_regex,omitempty" json:"name_regex,omitempty"` } -// DeleteRegistryRepositoryTags deletes repository tags in bulk based on -// given criteria. -// -// GitLab API docs: -// https://docs.gitlab.com/api/container_registry/#delete-registry-repository-tags-in-bulk -func (s *ContainerRegistryService) DeleteRegistryRepositoryTags(pid any, repository int, opt *DeleteRegistryRepositoryTagsOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/registry/repositories/%d/tags", - PathEscape(project), - repository, +func (s *ContainerRegistryService) DeleteRegistryRepositoryTags(pid any, repository int64, opt *DeleteRegistryRepositoryTagsOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/registry/repositories/%d/tags", ProjectID{pid}, repository), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/container_registry_protection_rules.go b/vendor/gitlab.com/gitlab-org/api/client-go/container_registry_protection_rules.go index 5871f39830..3f505e547e 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/container_registry_protection_rules.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/container_registry_protection_rules.go @@ -15,16 +15,49 @@ package gitlab import ( - "fmt" "net/http" ) type ( ContainerRegistryProtectionRulesServiceInterface interface { + // ListContainerRegistryProtectionRules gets a list of container repository + // protection rules from a project’s container registry. + // + // GitLab API docs: + // https://docs.gitlab.com/api/container_repository_protection_rules/#list-container-repository-protection-rules + // ListContainerRegistryProtectionRules gets a list of container repository + // protection rules from a project’s container registry. + // + // GitLab API docs: + // https://docs.gitlab.com/api/container_repository_protection_rules/#list-container-repository-protection-rules ListContainerRegistryProtectionRules(pid any, options ...RequestOptionFunc) ([]*ContainerRegistryProtectionRule, *Response, error) + + // CreateContainerRegistryProtectionRule creates a container repository + // protection rule for a project’s container registry. + // + // GitLab API docs: + // https://docs.gitlab.com/api/container_repository_protection_rules/#create-a-container-repository-protection-rule + + // CreateContainerRegistryProtectionRule creates a container repository + // protection rule for a project’s container registry. + // + // GitLab API docs: + // https://docs.gitlab.com/api/container_repository_protection_rules/#create-a-container-repository-protection-rule CreateContainerRegistryProtectionRule(pid any, opt *CreateContainerRegistryProtectionRuleOptions, options ...RequestOptionFunc) (*ContainerRegistryProtectionRule, *Response, error) - UpdateContainerRegistryProtectionRule(pid any, ruleID int, opt *UpdateContainerRegistryProtectionRuleOptions, options ...RequestOptionFunc) (*ContainerRegistryProtectionRule, *Response, error) - DeleteContainerRegistryProtectionRule(pid any, ruleID int, options ...RequestOptionFunc) (*Response, error) + + // UpdateContainerRegistryProtectionRule updates a container repository protection + // rule for a project’s container registry. + // + // GitLab API docs: + // https://docs.gitlab.com/api/container_repository_protection_rules/#update-a-container-repository-protection-rule + UpdateContainerRegistryProtectionRule(pid any, ruleID int64, opt *UpdateContainerRegistryProtectionRuleOptions, options ...RequestOptionFunc) (*ContainerRegistryProtectionRule, *Response, error) + + // DeleteContainerRegistryProtectionRule deletes a container repository protection + // rule from a project’s container registry. + // + // GitLab API docs: + // https://docs.gitlab.com/api/container_repository_protection_rules/#delete-a-container-repository-protection-rule + DeleteContainerRegistryProtectionRule(pid any, ruleID int64, options ...RequestOptionFunc) (*Response, error) } // ContainerRegistryProtectionRulesService handles communication with @@ -46,8 +79,8 @@ var _ ContainerRegistryProtectionRulesServiceInterface = (*ContainerRegistryProt // GitLab API docs: // https://docs.gitlab.com/api/container_repository_protection_rules/ type ContainerRegistryProtectionRule struct { - ID int `json:"id"` - ProjectID int `json:"project_id"` + ID int64 `json:"id"` + ProjectID int64 `json:"project_id"` RepositoryPathPattern string `json:"repository_path_pattern"` MinimumAccessLevelForPush ProtectionRuleAccessLevel `json:"minimum_access_level_for_push"` MinimumAccessLevelForDelete ProtectionRuleAccessLevel `json:"minimum_access_level_for_delete"` @@ -57,30 +90,12 @@ func (s ContainerRegistryProtectionRule) String() string { return Stringify(s) } -// ListContainerRegistryProtectionRules gets a list of container repository -// protection rules from a project’s container registry. -// -// GitLab API docs: -// https://docs.gitlab.com/api/container_repository_protection_rules/#list-container-repository-protection-rules func (s *ContainerRegistryProtectionRulesService) ListContainerRegistryProtectionRules(pid any, options ...RequestOptionFunc) ([]*ContainerRegistryProtectionRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/registry/protection/repository/rules", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var rules []*ContainerRegistryProtectionRule - resp, err := s.client.Do(req, &rules) - if err != nil { - return nil, resp, err - } - - return rules, resp, nil + return do[[]*ContainerRegistryProtectionRule](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/registry/protection/repository/rules", ProjectID{pid}), + withRequestOpts(options...), + ) } // CreateContainerRegistryProtectionRuleOptions represents the available @@ -94,30 +109,13 @@ type CreateContainerRegistryProtectionRuleOptions struct { MinimumAccessLevelForDelete *ProtectionRuleAccessLevel `url:"minimum_access_level_for_delete,omitempty" json:"minimum_access_level_for_delete,omitempty"` } -// CreateContainerRegistryProtectionRule creates a container repository -// protection rule for a project’s container registry. -// -// GitLab API docs: -// https://docs.gitlab.com/api/container_repository_protection_rules/#create-a-container-repository-protection-rule func (s *ContainerRegistryProtectionRulesService) CreateContainerRegistryProtectionRule(pid any, opt *CreateContainerRegistryProtectionRuleOptions, options ...RequestOptionFunc) (*ContainerRegistryProtectionRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/registry/protection/repository/rules", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - rule := new(ContainerRegistryProtectionRule) - resp, err := s.client.Do(req, rule) - if err != nil { - return nil, resp, err - } - - return rule, resp, nil + return do[*ContainerRegistryProtectionRule](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/registry/protection/repository/rules", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateContainerRegistryProtectionRuleOptions represents the available @@ -131,48 +129,20 @@ type UpdateContainerRegistryProtectionRuleOptions struct { MinimumAccessLevelForDelete *ProtectionRuleAccessLevel `url:"minimum_access_level_for_delete,omitempty" json:"minimum_access_level_for_delete,omitempty"` } -// UpdateContainerRegistryProtectionRule updates a container repository protection -// rule for a project’s container registry. -// -// GitLab API docs: -// https://docs.gitlab.com/api/container_repository_protection_rules/#update-a-container-repository-protection-rule -func (s *ContainerRegistryProtectionRulesService) UpdateContainerRegistryProtectionRule(pid any, ruleID int, opt *UpdateContainerRegistryProtectionRuleOptions, options ...RequestOptionFunc) (*ContainerRegistryProtectionRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/registry/protection/repository/rules/%d", PathEscape(project), ruleID) - - req, err := s.client.NewRequest(http.MethodPatch, u, opt, options) - if err != nil { - return nil, nil, err - } - - rule := new(ContainerRegistryProtectionRule) - resp, err := s.client.Do(req, rule) - if err != nil { - return nil, resp, err - } - - return rule, resp, nil +func (s *ContainerRegistryProtectionRulesService) UpdateContainerRegistryProtectionRule(pid any, ruleID int64, opt *UpdateContainerRegistryProtectionRuleOptions, options ...RequestOptionFunc) (*ContainerRegistryProtectionRule, *Response, error) { + return do[*ContainerRegistryProtectionRule](s.client, + withMethod(http.MethodPatch), + withPath("projects/%s/registry/protection/repository/rules/%d", ProjectID{pid}, ruleID), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteContainerRegistryProtectionRule deletes a container repository protection -// rule from a project’s container registry. -// -// GitLab API docs: -// https://docs.gitlab.com/api/container_repository_protection_rules/#delete-a-container-repository-protection-rule -func (s *ContainerRegistryProtectionRulesService) DeleteContainerRegistryProtectionRule(pid any, ruleID int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/registry/protection/repository/rules/%d", PathEscape(project), ruleID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ContainerRegistryProtectionRulesService) DeleteContainerRegistryProtectionRule(pid any, ruleID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/registry/protection/repository/rules/%d", ProjectID{pid}, ruleID), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/custom_attributes.go b/vendor/gitlab.com/gitlab-org/api/client-go/custom_attributes.go index 92e3babcec..3e3958674f 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/custom_attributes.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/custom_attributes.go @@ -17,24 +17,82 @@ package gitlab import ( - "fmt" "net/http" ) type ( CustomAttributesServiceInterface interface { - ListCustomUserAttributes(user int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) - ListCustomGroupAttributes(group int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) - ListCustomProjectAttributes(project int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) - GetCustomUserAttribute(user int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) - GetCustomGroupAttribute(group int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) - GetCustomProjectAttribute(project int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) - SetCustomUserAttribute(user int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) - SetCustomGroupAttribute(group int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) - SetCustomProjectAttribute(project int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) - DeleteCustomUserAttribute(user int, key string, options ...RequestOptionFunc) (*Response, error) - DeleteCustomGroupAttribute(group int, key string, options ...RequestOptionFunc) (*Response, error) - DeleteCustomProjectAttribute(project int, key string, options ...RequestOptionFunc) (*Response, error) + // ListCustomUserAttributes lists the custom attributes of the specified user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/custom_attributes/#list-custom-attributes + ListCustomUserAttributes(user int64, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) + + // ListCustomGroupAttributes lists the custom attributes of the specified group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/custom_attributes/#list-custom-attributes + ListCustomGroupAttributes(group int64, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) + + // ListCustomProjectAttributes lists the custom attributes of the specified project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/custom_attributes/#list-custom-attributes + ListCustomProjectAttributes(project int64, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) + + // GetCustomUserAttribute returns the user attribute with a specific key. + // + // GitLab API docs: + // https://docs.gitlab.com/api/custom_attributes/#single-custom-attribute + GetCustomUserAttribute(user int64, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) + + // GetCustomGroupAttribute returns the group attribute with a specific key. + // + // GitLab API docs: + // https://docs.gitlab.com/api/custom_attributes/#single-custom-attribute + GetCustomGroupAttribute(group int64, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) + + // GetCustomProjectAttribute returns the project attribute with a specific key. + // + // GitLab API docs: + // https://docs.gitlab.com/api/custom_attributes/#single-custom-attribute + GetCustomProjectAttribute(project int64, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) + + // SetCustomUserAttribute sets the custom attributes of the specified user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/custom_attributes/#set-custom-attribute + SetCustomUserAttribute(user int64, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) + + // SetCustomGroupAttribute sets the custom attributes of the specified group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/custom_attributes/#set-custom-attribute + SetCustomGroupAttribute(group int64, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) + + // SetCustomProjectAttribute sets the custom attributes of the specified project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/custom_attributes/#set-custom-attribute + SetCustomProjectAttribute(project int64, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) + + // DeleteCustomUserAttribute removes the custom attribute of the specified user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/custom_attributes/#delete-custom-attribute + DeleteCustomUserAttribute(user int64, key string, options ...RequestOptionFunc) (*Response, error) + + // DeleteCustomGroupAttribute removes the custom attribute of the specified group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/custom_attributes/#delete-custom-attribute + DeleteCustomGroupAttribute(group int64, key string, options ...RequestOptionFunc) (*Response, error) + + // DeleteCustomProjectAttribute removes the custom attribute of the specified project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/custom_attributes/#delete-custom-attribute + DeleteCustomProjectAttribute(project int64, key string, options ...RequestOptionFunc) (*Response, error) } // CustomAttributesService handles communication with the group, project and @@ -56,152 +114,99 @@ type CustomAttribute struct { Value string `json:"value"` } -// ListCustomUserAttributes lists the custom attributes of the specified user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/custom_attributes/#list-custom-attributes -func (s *CustomAttributesService) ListCustomUserAttributes(user int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { +func (s *CustomAttributesService) ListCustomUserAttributes(user int64, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { return s.listCustomAttributes("users", user, options...) } -// ListCustomGroupAttributes lists the custom attributes of the specified group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/custom_attributes/#list-custom-attributes -func (s *CustomAttributesService) ListCustomGroupAttributes(group int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { +func (s *CustomAttributesService) ListCustomGroupAttributes(group int64, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { return s.listCustomAttributes("groups", group, options...) } -// ListCustomProjectAttributes lists the custom attributes of the specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/custom_attributes/#list-custom-attributes -func (s *CustomAttributesService) ListCustomProjectAttributes(project int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { +func (s *CustomAttributesService) ListCustomProjectAttributes(project int64, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { return s.listCustomAttributes("projects", project, options...) } -func (s *CustomAttributesService) listCustomAttributes(resource string, id int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { - u := fmt.Sprintf("%s/%d/custom_attributes", resource, id) - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var cas []*CustomAttribute - resp, err := s.client.Do(req, &cas) +func (s *CustomAttributesService) listCustomAttributes(resource string, id int64, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { + res, resp, err := do[[]*CustomAttribute](s.client, + withMethod(http.MethodGet), + withPath("%s/%d/custom_attributes", resource, id), + withAPIOpts(nil), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - return cas, resp, nil + return res, resp, nil } -// GetCustomUserAttribute returns the user attribute with a specific key. -// -// GitLab API docs: -// https://docs.gitlab.com/api/custom_attributes/#single-custom-attribute -func (s *CustomAttributesService) GetCustomUserAttribute(user int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { +func (s *CustomAttributesService) GetCustomUserAttribute(user int64, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { return s.getCustomAttribute("users", user, key, options...) } -// GetCustomGroupAttribute returns the group attribute with a specific key. -// -// GitLab API docs: -// https://docs.gitlab.com/api/custom_attributes/#single-custom-attribute -func (s *CustomAttributesService) GetCustomGroupAttribute(group int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { +func (s *CustomAttributesService) GetCustomGroupAttribute(group int64, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { return s.getCustomAttribute("groups", group, key, options...) } -// GetCustomProjectAttribute returns the project attribute with a specific key. -// -// GitLab API docs: -// https://docs.gitlab.com/api/custom_attributes/#single-custom-attribute -func (s *CustomAttributesService) GetCustomProjectAttribute(project int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { +func (s *CustomAttributesService) GetCustomProjectAttribute(project int64, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { return s.getCustomAttribute("projects", project, key, options...) } -func (s *CustomAttributesService) getCustomAttribute(resource string, id int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { - u := fmt.Sprintf("%s/%d/custom_attributes/%s", resource, id, key) - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var ca *CustomAttribute - resp, err := s.client.Do(req, &ca) +func (s *CustomAttributesService) getCustomAttribute(resource string, id int64, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { + res, resp, err := do[*CustomAttribute](s.client, + withMethod(http.MethodGet), + withPath("%s/%d/custom_attributes/%s", resource, id, key), + withAPIOpts(nil), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - return ca, resp, nil + return res, resp, nil } -// SetCustomUserAttribute sets the custom attributes of the specified user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/custom_attributes/#set-custom-attribute -func (s *CustomAttributesService) SetCustomUserAttribute(user int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { +func (s *CustomAttributesService) SetCustomUserAttribute(user int64, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { return s.setCustomAttribute("users", user, c, options...) } -// SetCustomGroupAttribute sets the custom attributes of the specified group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/custom_attributes/#set-custom-attribute -func (s *CustomAttributesService) SetCustomGroupAttribute(group int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { +func (s *CustomAttributesService) SetCustomGroupAttribute(group int64, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { return s.setCustomAttribute("groups", group, c, options...) } -// SetCustomProjectAttribute sets the custom attributes of the specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/custom_attributes/#set-custom-attribute -func (s *CustomAttributesService) SetCustomProjectAttribute(project int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { +func (s *CustomAttributesService) SetCustomProjectAttribute(project int64, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { return s.setCustomAttribute("projects", project, c, options...) } -func (s *CustomAttributesService) setCustomAttribute(resource string, id int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { - u := fmt.Sprintf("%s/%d/custom_attributes/%s", resource, id, c.Key) - req, err := s.client.NewRequest(http.MethodPut, u, c, options) - if err != nil { - return nil, nil, err - } - - ca := new(CustomAttribute) - resp, err := s.client.Do(req, ca) +func (s *CustomAttributesService) setCustomAttribute(resource string, id int64, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { + res, resp, err := do[*CustomAttribute](s.client, + withMethod(http.MethodPut), + withPath("%s/%d/custom_attributes/%s", resource, id, c.Key), + withAPIOpts(c), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - return ca, resp, nil + return res, resp, nil } -// DeleteCustomUserAttribute removes the custom attribute of the specified user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/custom_attributes/#delete-custom-attribute -func (s *CustomAttributesService) DeleteCustomUserAttribute(user int, key string, options ...RequestOptionFunc) (*Response, error) { +func (s *CustomAttributesService) DeleteCustomUserAttribute(user int64, key string, options ...RequestOptionFunc) (*Response, error) { return s.deleteCustomAttribute("users", user, key, options...) } -// DeleteCustomGroupAttribute removes the custom attribute of the specified group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/custom_attributes/#delete-custom-attribute -func (s *CustomAttributesService) DeleteCustomGroupAttribute(group int, key string, options ...RequestOptionFunc) (*Response, error) { +func (s *CustomAttributesService) DeleteCustomGroupAttribute(group int64, key string, options ...RequestOptionFunc) (*Response, error) { return s.deleteCustomAttribute("groups", group, key, options...) } -// DeleteCustomProjectAttribute removes the custom attribute of the specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/custom_attributes/#delete-custom-attribute -func (s *CustomAttributesService) DeleteCustomProjectAttribute(project int, key string, options ...RequestOptionFunc) (*Response, error) { +func (s *CustomAttributesService) DeleteCustomProjectAttribute(project int64, key string, options ...RequestOptionFunc) (*Response, error) { return s.deleteCustomAttribute("projects", project, key, options...) } -func (s *CustomAttributesService) deleteCustomAttribute(resource string, id int, key string, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("%s/%d/custom_attributes/%s", resource, id, key) - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) +func (s *CustomAttributesService) deleteCustomAttribute(resource string, id int64, key string, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("%s/%d/custom_attributes/%s", resource, id, key), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/database_migrations.go b/vendor/gitlab.com/gitlab-org/api/client-go/database_migrations.go index 058e32cc74..a7ca83f6a1 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/database_migrations.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/database_migrations.go @@ -15,13 +15,18 @@ package gitlab import ( - "fmt" "net/http" ) type ( DatabaseMigrationsServiceInterface interface { - MarkMigrationAsSuccessful(version int, opt *MarkMigrationAsSuccessfulOptions, options ...RequestOptionFunc) (*Response, error) + // MarkMigrationAsSuccessful marks pending migrations as successfully executed + // to prevent them from being executed by the db:migrate tasks. Use this API to + // skip failing migrations after they are determined to be safe to skip. + // + // GitLab API docs: + // https://docs.gitlab.com/api/database_migrations/#mark-a-migration-as-successful + MarkMigrationAsSuccessful(version int64, opt *MarkMigrationAsSuccessfulOptions, options ...RequestOptionFunc) (*Response, error) } // DatabaseMigrationsService handles communication with the database @@ -44,19 +49,12 @@ type MarkMigrationAsSuccessfulOptions struct { Database string `url:"database,omitempty" json:"database,omitempty"` } -// MarkMigrationAsSuccessful marks pending migrations as successfully executed -// to prevent them from being executed by the db:migrate tasks. Use this API to -// skip failing migrations after they are determined to be safe to skip. -// -// GitLab API docs: -// https://docs.gitlab.com/api/database_migrations/#mark-a-migration-as-successful -func (s *DatabaseMigrationsService) MarkMigrationAsSuccessful(version int, opt *MarkMigrationAsSuccessfulOptions, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("admin/migrations/%d/mark", version) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *DatabaseMigrationsService) MarkMigrationAsSuccessful(version int64, opt *MarkMigrationAsSuccessfulOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("admin/migrations/%d/mark", version), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/dependencies.go b/vendor/gitlab.com/gitlab-org/api/client-go/dependencies.go index e4b85381ed..076d0b6bd7 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/dependencies.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/dependencies.go @@ -14,13 +14,14 @@ package gitlab -import ( - "fmt" - "net/http" -) - type ( DependenciesServiceInterface interface { + // ListProjectDependencies Get a list of project dependencies. This API partially + // mirroring Dependency List feature. This list can be generated only for languages + // and package managers supported by Gemnasium. + // + // GitLab API docs: + // https://docs.gitlab.com/api/dependencies/#list-project-dependencies ListProjectDependencies(pid any, opt *ListProjectDependenciesOptions, options ...RequestOptionFunc) ([]*Dependency, *Response, error) } @@ -53,7 +54,7 @@ type Dependency struct { type DependencyVulnerability struct { Name string `url:"name" json:"name"` Severity string `url:"severity" json:"severity"` - ID int `url:"id" json:"id"` + ID int64 `url:"id" json:"id"` URL string `url:"url" json:"url"` } @@ -72,32 +73,13 @@ type DependencyLicense struct { // https://docs.gitlab.com/api/dependencies/#list-project-dependencies type ListProjectDependenciesOptions struct { ListOptions - PackageManager []*DependencyPackageManagerValue `url:"package_manager,omitempty" json:"package_manager,omitempty"` + PackageManager []*DependencyPackageManagerValue `url:"package_manager,comma,omitempty" json:"package_manager,omitempty"` } -// ListProjectDependencies Get a list of project dependencies. This API partially -// mirroring Dependency List feature. This list can be generated only for languages -// and package managers supported by Gemnasium. -// -// GitLab API docs: -// https://docs.gitlab.com/api/dependencies/#list-project-dependencies func (s *DependenciesService) ListProjectDependencies(pid any, opt *ListProjectDependenciesOptions, options ...RequestOptionFunc) ([]*Dependency, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/dependencies", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var dependencies []*Dependency - resp, err := s.client.Do(req, &dependencies) - if err != nil { - return nil, resp, err - } - - return dependencies, resp, nil + return do[[]*Dependency](s.client, + withPath("projects/%s/dependencies", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/dependency_list_export.go b/vendor/gitlab.com/gitlab-org/api/client-go/dependency_list_export.go index ed475fd46e..6f82f59633 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/dependency_list_export.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/dependency_list_export.go @@ -2,16 +2,44 @@ package gitlab import ( "bytes" - "fmt" "io" "net/http" ) type ( DependencyListExportServiceInterface interface { - CreateDependencyListExport(pipelineID int, opt *CreateDependencyListExportOptions, options ...RequestOptionFunc) (*DependencyListExport, *Response, error) - GetDependencyListExport(id int, options ...RequestOptionFunc) (*DependencyListExport, *Response, error) - DownloadDependencyListExport(id int, options ...RequestOptionFunc) (io.Reader, *Response, error) + // CreateDependencyListExport creates a new CycloneDX JSON export for all the project dependencies + // detected in a pipeline. + // + // If an authenticated user does not have permission to read_dependency, this request returns a 403 + // Forbidden status code. + // + // SBOM exports can be only accessed by the export’s author. + // + // GitLab docs: + // https://docs.gitlab.com/api/dependency_list_export/#create-a-dependency-list-export + CreateDependencyListExport(pipelineID int64, opt *CreateDependencyListExportOptions, options ...RequestOptionFunc) (*DependencyListExport, *Response, error) + + // GetDependencyListExport gets metadata about a single dependency list export. + // + // GitLab docs: + // https://docs.gitlab.com/api/dependency_list_export/#get-single-dependency-list-export + GetDependencyListExport(id int64, options ...RequestOptionFunc) (*DependencyListExport, *Response, error) + + // DownloadDependencyListExport downloads a single dependency list export. + // + // The github.com/CycloneDX/cyclonedx-go package can be used to parse the data from the returned io.Reader. + // + // sbom := new(cdx.BOM) + // decoder := cdx.NewBOMDecoder(reader, cdx.BOMFileFormatJSON) + // + // if err = decoder.Decode(sbom); err != nil { + // panic(err) + // } + // + // GitLab docs: + // https://docs.gitlab.com/api/dependency_list_export/#download-dependency-list-export + DownloadDependencyListExport(id int64, options ...RequestOptionFunc) (io.Reader, *Response, error) } // DependencyListExportService handles communication with the dependency list export @@ -39,7 +67,7 @@ type CreateDependencyListExportOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/dependency_list_export/#create-a-dependency-list-export type DependencyListExport struct { - ID int `json:"id"` + ID int64 `json:"id"` HasFinished bool `json:"has_finished"` Self string `json:"self"` Download string `json:"download"` @@ -47,20 +75,7 @@ type DependencyListExport struct { const defaultExportType = "sbom" -// CreateDependencyListExport creates a new CycloneDX JSON export for all the project dependencies -// detected in a pipeline. -// -// If an authenticated user does not have permission to read_dependency, this request returns a 403 -// Forbidden status code. -// -// SBOM exports can be only accessed by the export’s author. -// -// GitLab docs: -// https://docs.gitlab.com/api/dependency_list_export/#create-a-dependency-list-export -func (s *DependencyListExportService) CreateDependencyListExport(pipelineID int, opt *CreateDependencyListExportOptions, options ...RequestOptionFunc) (*DependencyListExport, *Response, error) { - // POST /pipelines/:id/dependency_list_exports - createExportPath := fmt.Sprintf("pipelines/%d/dependency_list_exports", pipelineID) - +func (s *DependencyListExportService) CreateDependencyListExport(pipelineID int64, opt *CreateDependencyListExportOptions, options ...RequestOptionFunc) (*DependencyListExport, *Response, error) { if opt == nil { opt = &CreateDependencyListExportOptions{} } @@ -68,69 +83,28 @@ func (s *DependencyListExportService) CreateDependencyListExport(pipelineID int, opt.ExportType = Ptr(defaultExportType) } - req, err := s.client.NewRequest(http.MethodPost, createExportPath, opt, options) - if err != nil { - return nil, nil, err - } - - export := new(DependencyListExport) - resp, err := s.client.Do(req, &export) - if err != nil { - return nil, resp, err - } - - return export, resp, nil + return do[*DependencyListExport](s.client, + withMethod(http.MethodPost), + withPath("pipelines/%d/dependency_list_exports", pipelineID), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetDependencyListExport gets metadata about a single dependency list export. -// -// GitLab docs: -// https://docs.gitlab.com/api/dependency_list_export/#get-single-dependency-list-export -func (s *DependencyListExportService) GetDependencyListExport(id int, options ...RequestOptionFunc) (*DependencyListExport, *Response, error) { - // GET /dependency_list_exports/:id - getExportPath := fmt.Sprintf("dependency_list_exports/%d", id) - - req, err := s.client.NewRequest(http.MethodGet, getExportPath, nil, options) - if err != nil { - return nil, nil, err - } - - export := new(DependencyListExport) - resp, err := s.client.Do(req, &export) - if err != nil { - return nil, resp, err - } - - return export, resp, nil +func (s *DependencyListExportService) GetDependencyListExport(id int64, options ...RequestOptionFunc) (*DependencyListExport, *Response, error) { + return do[*DependencyListExport](s.client, + withPath("dependency_list_exports/%d", id), + withRequestOpts(options...), + ) } -// DownloadDependencyListExport downloads a single dependency list export. -// -// The github.com/CycloneDX/cyclonedx-go package can be used to parse the data from the returned io.Reader. -// -// sbom := new(cdx.BOM) -// decoder := cdx.NewBOMDecoder(reader, cdx.BOMFileFormatJSON) -// -// if err = decoder.Decode(sbom); err != nil { -// panic(err) -// } -// -// GitLab docs: -// https://docs.gitlab.com/api/dependency_list_export/#download-dependency-list-export -func (s *DependencyListExportService) DownloadDependencyListExport(id int, options ...RequestOptionFunc) (io.Reader, *Response, error) { - // GET /dependency_list_exports/:id/download - downloadExportPath := fmt.Sprintf("dependency_list_exports/%d/download", id) - - req, err := s.client.NewRequest(http.MethodGet, downloadExportPath, nil, options) - if err != nil { - return nil, nil, err - } - - var sbomBuffer bytes.Buffer - resp, err := s.client.Do(req, &sbomBuffer) +func (s *DependencyListExportService) DownloadDependencyListExport(id int64, options ...RequestOptionFunc) (io.Reader, *Response, error) { + buf, resp, err := do[bytes.Buffer](s.client, + withPath("dependency_list_exports/%d/download", id), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return &sbomBuffer, resp, nil + return &buf, resp, nil } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/dependency_proxy.go b/vendor/gitlab.com/gitlab-org/api/client-go/dependency_proxy.go index a475cb5fd4..a9516e640a 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/dependency_proxy.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/dependency_proxy.go @@ -14,13 +14,15 @@ package gitlab -import ( - "fmt" - "net/http" -) +import "net/http" type ( DependencyProxyServiceInterface interface { + // PurgeGroupDependencyProxy schedules for deletion the cached manifests and blobs + // for a group. This endpoint requires the Owner role for the group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/dependency_proxy/#purge-the-dependency-proxy-for-a-group PurgeGroupDependencyProxy(gid any, options ...RequestOptionFunc) (*Response, error) } @@ -35,22 +37,11 @@ type ( var _ DependencyProxyServiceInterface = (*DependencyProxyService)(nil) -// PurgeGroupDependencyProxy schedules for deletion the cached manifests and blobs -// for a group. This endpoint requires the Owner role for the group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/dependency_proxy/#purge-the-dependency-proxy-for-a-group func (s *DependencyProxyService) PurgeGroupDependencyProxy(gid any, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/dependency_proxy/cache", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/dependency_proxy/cache", GroupID{gid}), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/deploy_keys.go b/vendor/gitlab.com/gitlab-org/api/client-go/deploy_keys.go index 8f54a13447..78c9889954 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/deploy_keys.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/deploy_keys.go @@ -17,22 +17,86 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( DeployKeysServiceInterface interface { + // ListAllDeployKeys gets a list of all deploy keys. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_keys/#list-all-deploy-keys + // ListAllDeployKeys gets a list of all deploy keys. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_keys/#list-all-deploy-keys ListAllDeployKeys(opt *ListInstanceDeployKeysOptions, options ...RequestOptionFunc) ([]*InstanceDeployKey, *Response, error) + + // AddInstanceDeployKey creates a deploy key for the GitLab instance. + // Requires administrator access. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_keys/#add-deploy-key + + // AddInstanceDeployKey creates a deploy key for the GitLab instance. + // Requires administrator access. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_keys/#add-deploy-key AddInstanceDeployKey(opt *AddInstanceDeployKeyOptions, options ...RequestOptionFunc) (*InstanceDeployKey, *Response, error) + + // ListProjectDeployKeys gets a list of a project's deploy keys. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_keys/#list-deploy-keys-for-project + + // ListProjectDeployKeys gets a list of a project's deploy keys. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_keys/#list-deploy-keys-for-project ListProjectDeployKeys(pid any, opt *ListProjectDeployKeysOptions, options ...RequestOptionFunc) ([]*ProjectDeployKey, *Response, error) + + // ListUserProjectDeployKeys gets a list of a user's deploy keys. + // + // uid can be either a user ID (int) or a username (string). If a username + // is provided with a leading "@" (e.g., "@johndoe"), it will be trimmed. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_keys/#list-project-deploy-keys-for-user ListUserProjectDeployKeys(uid any, opt *ListUserProjectDeployKeysOptions, options ...RequestOptionFunc) ([]*ProjectDeployKey, *Response, error) - GetDeployKey(pid any, deployKey int, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) + + // GetDeployKey gets a single deploy key. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_keys/#get-a-single-deploy-key + GetDeployKey(pid any, deployKey int64, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) + + // AddDeployKey creates a new deploy key for a project. If the deploy key already + // exists in another project, it will be joined to the project but only if + // the original one is accessible by the same user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_keys/#add-deploy-key-for-a-project AddDeployKey(pid any, opt *AddDeployKeyOptions, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) - DeleteDeployKey(pid any, deployKey int, options ...RequestOptionFunc) (*Response, error) - EnableDeployKey(pid any, deployKey int, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) - UpdateDeployKey(pid any, deployKey int, opt *UpdateDeployKeyOptions, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) + + // DeleteDeployKey deletes a deploy key from a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_keys/#delete-deploy-key + DeleteDeployKey(pid any, deployKey int64, options ...RequestOptionFunc) (*Response, error) + + // EnableDeployKey enables a deploy key. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_keys/#enable-a-deploy-key + EnableDeployKey(pid any, deployKey int64, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) + + // UpdateDeployKey updates a deploy key for a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_keys/#update-deploy-key + UpdateDeployKey(pid any, deployKey int64, opt *UpdateDeployKeyOptions, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) } // DeployKeysService handles communication with the keys related methods @@ -49,7 +113,7 @@ var _ DeployKeysServiceInterface = (*DeployKeysService)(nil) // InstanceDeployKey represents a GitLab deploy key with the associated // projects it has write access to. type InstanceDeployKey struct { - ID int `json:"id"` + ID int64 `json:"id"` Title string `json:"title"` CreatedAt *time.Time `json:"created_at"` ExpiresAt *time.Time `json:"expires_at"` @@ -66,7 +130,7 @@ func (k InstanceDeployKey) String() string { // DeployKeyProject refers to a project an InstanceDeployKey has write access to. type DeployKeyProject struct { - ID int `json:"id"` + ID int64 `json:"id"` Description string `json:"description"` Name string `json:"name"` NameWithNamespace string `json:"name_with_namespace"` @@ -81,7 +145,7 @@ func (k DeployKeyProject) String() string { // ProjectDeployKey represents a GitLab project deploy key. type ProjectDeployKey struct { - ID int `json:"id"` + ID int64 `json:"id"` Title string `json:"title"` Key string `json:"key"` Fingerprint string `json:"fingerprint"` @@ -105,23 +169,12 @@ type ListInstanceDeployKeysOptions struct { Public *bool `url:"public,omitempty" json:"public,omitempty"` } -// ListAllDeployKeys gets a list of all deploy keys -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_keys/#list-all-deploy-keys func (s *DeployKeysService) ListAllDeployKeys(opt *ListInstanceDeployKeysOptions, options ...RequestOptionFunc) ([]*InstanceDeployKey, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "deploy_keys", opt, options) - if err != nil { - return nil, nil, err - } - - var ks []*InstanceDeployKey - resp, err := s.client.Do(req, &ks) - if err != nil { - return nil, resp, err - } - - return ks, resp, nil + return do[[]*InstanceDeployKey](s.client, + withPath("deploy_keys"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // AddInstanceDeployKeyOptions represents the available AddInstanceDeployKey() @@ -135,24 +188,13 @@ type AddInstanceDeployKeyOptions struct { ExpiresAt *time.Time `url:"expires_at,omitempty" json:"expires_at,omitempty"` } -// AddInstanceDeployKey creates a deploy key for the GitLab instance. -// Requires administrator access. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_keys/#add-deploy-key func (s *DeployKeysService) AddInstanceDeployKey(opt *AddInstanceDeployKeyOptions, options ...RequestOptionFunc) (*InstanceDeployKey, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "deploy_keys", opt, options) - if err != nil { - return nil, nil, err - } - - key := new(InstanceDeployKey) - resp, err := s.client.Do(req, &key) - if err != nil { - return nil, resp, err - } - - return key, resp, nil + return do[*InstanceDeployKey](s.client, + withMethod(http.MethodPost), + withPath("deploy_keys"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListProjectDeployKeysOptions represents the available ListProjectDeployKeys() @@ -160,31 +202,16 @@ func (s *DeployKeysService) AddInstanceDeployKey(opt *AddInstanceDeployKeyOption // // GitLab API docs: // https://docs.gitlab.com/api/deploy_keys/#list-deploy-keys-for-project -type ListProjectDeployKeysOptions ListOptions +type ListProjectDeployKeysOptions struct { + ListOptions +} -// ListProjectDeployKeys gets a list of a project's deploy keys -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_keys/#list-deploy-keys-for-project func (s *DeployKeysService) ListProjectDeployKeys(pid any, opt *ListProjectDeployKeysOptions, options ...RequestOptionFunc) ([]*ProjectDeployKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_keys", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ks []*ProjectDeployKey - resp, err := s.client.Do(req, &ks) - if err != nil { - return nil, resp, err - } - - return ks, resp, nil + return do[[]*ProjectDeployKey](s.client, + withPath("projects/%s/deploy_keys", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListUserProjectDeployKeysOptions represents the available ListUserProjectDeployKeys() @@ -192,56 +219,30 @@ func (s *DeployKeysService) ListProjectDeployKeys(pid any, opt *ListProjectDeplo // // GitLab API docs: // https://docs.gitlab.com/api/deploy_keys/#list-project-deploy-keys-for-user -type ListUserProjectDeployKeysOptions ListOptions +type ListUserProjectDeployKeysOptions struct { + ListOptions +} -// ListUserProjectDeployKeys gets a list of a user's deploy keys +// ListUserProjectDeployKeys gets a list of a user's deploy keys. +// +// uid can be either a user ID (int) or a username (string). If a username +// is provided with a leading "@" (e.g., "@johndoe"), it will be trimmed. // // GitLab API docs: // https://docs.gitlab.com/api/deploy_keys/#list-project-deploy-keys-for-user func (s *DeployKeysService) ListUserProjectDeployKeys(uid any, opt *ListUserProjectDeployKeysOptions, options ...RequestOptionFunc) ([]*ProjectDeployKey, *Response, error) { - user, err := parseID(uid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("users/%s/project_deploy_keys", PathEscape(user)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ks []*ProjectDeployKey - resp, err := s.client.Do(req, &ks) - if err != nil { - return nil, resp, err - } - - return ks, resp, nil + return do[[]*ProjectDeployKey](s.client, + withPath("users/%s/project_deploy_keys", UserID{uid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetDeployKey gets a single deploy key. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_keys/#get-a-single-deploy-key -func (s *DeployKeysService) GetDeployKey(pid any, deployKey int, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_keys/%d", PathEscape(project), deployKey) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - k := new(ProjectDeployKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil +func (s *DeployKeysService) GetDeployKey(pid any, deployKey int64, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { + return do[*ProjectDeployKey](s.client, + withPath("projects/%s/deploy_keys/%d", ProjectID{pid}, deployKey), + withRequestOpts(options...), + ) } // AddDeployKeyOptions represents the available ADDDeployKey() options. @@ -255,75 +256,30 @@ type AddDeployKeyOptions struct { ExpiresAt *time.Time `url:"expires_at,omitempty" json:"expires_at,omitempty"` } -// AddDeployKey creates a new deploy key for a project. If deploy key already -// exists in another project - it will be joined to project but only if -// original one is accessible by the same user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_keys/#add-deploy-key-for-a-project func (s *DeployKeysService) AddDeployKey(pid any, opt *AddDeployKeyOptions, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_keys", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - k := new(ProjectDeployKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil + return do[*ProjectDeployKey](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/deploy_keys", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteDeployKey deletes a deploy key from a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_keys/#delete-deploy-key -func (s *DeployKeysService) DeleteDeployKey(pid any, deployKey int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/deploy_keys/%d", PathEscape(project), deployKey) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *DeployKeysService) DeleteDeployKey(pid any, deployKey int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/deploy_keys/%d", ProjectID{pid}, deployKey), + withRequestOpts(options...), + ) + return resp, err } -// EnableDeployKey enables a deploy key. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_keys/#enable-a-deploy-key -func (s *DeployKeysService) EnableDeployKey(pid any, deployKey int, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_keys/%d/enable", PathEscape(project), deployKey) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - k := new(ProjectDeployKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil +func (s *DeployKeysService) EnableDeployKey(pid any, deployKey int64, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { + return do[*ProjectDeployKey](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/deploy_keys/%d/enable", ProjectID{pid}, deployKey), + withRequestOpts(options...), + ) } // UpdateDeployKeyOptions represents the available UpdateDeployKey() options. @@ -335,27 +291,11 @@ type UpdateDeployKeyOptions struct { CanPush *bool `url:"can_push,omitempty" json:"can_push,omitempty"` } -// UpdateDeployKey updates a deploy key for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_keys/#update-deploy-key -func (s *DeployKeysService) UpdateDeployKey(pid any, deployKey int, opt *UpdateDeployKeyOptions, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_keys/%d", PathEscape(project), deployKey) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - k := new(ProjectDeployKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil +func (s *DeployKeysService) UpdateDeployKey(pid any, deployKey int64, opt *UpdateDeployKeyOptions, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { + return do[*ProjectDeployKey](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/deploy_keys/%d", ProjectID{pid}, deployKey), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/deploy_tokens.go b/vendor/gitlab.com/gitlab-org/api/client-go/deploy_tokens.go index f7128a69f7..8083c845ec 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/deploy_tokens.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/deploy_tokens.go @@ -17,22 +17,74 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( DeployTokensServiceInterface interface { + // ListAllDeployTokens gets a list of all deploy tokens. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_tokens/#list-all-deploy-tokens + // ListAllDeployTokens gets a list of all deploy tokens. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_tokens/#list-all-deploy-tokens ListAllDeployTokens(options ...RequestOptionFunc) ([]*DeployToken, *Response, error) + + // ListProjectDeployTokens gets a list of a project's deploy tokens. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_tokens/#list-project-deploy-tokens + + // ListProjectDeployTokens gets a list of a project's deploy tokens. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_tokens/#list-project-deploy-tokens ListProjectDeployTokens(pid any, opt *ListProjectDeployTokensOptions, options ...RequestOptionFunc) ([]*DeployToken, *Response, error) - GetProjectDeployToken(pid any, deployToken int, options ...RequestOptionFunc) (*DeployToken, *Response, error) + + // GetProjectDeployToken gets a single deploy token. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_tokens/#get-a-project-deploy-token + GetProjectDeployToken(pid any, deployToken int64, options ...RequestOptionFunc) (*DeployToken, *Response, error) + + // CreateProjectDeployToken creates a new deploy token for a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_tokens/#create-a-project-deploy-token CreateProjectDeployToken(pid any, opt *CreateProjectDeployTokenOptions, options ...RequestOptionFunc) (*DeployToken, *Response, error) - DeleteProjectDeployToken(pid any, deployToken int, options ...RequestOptionFunc) (*Response, error) + + // DeleteProjectDeployToken removes a deploy token from the project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_tokens/#delete-a-project-deploy-token + DeleteProjectDeployToken(pid any, deployToken int64, options ...RequestOptionFunc) (*Response, error) + + // ListGroupDeployTokens gets a list of a group’s deploy tokens. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_tokens/#list-group-deploy-tokens ListGroupDeployTokens(gid any, opt *ListGroupDeployTokensOptions, options ...RequestOptionFunc) ([]*DeployToken, *Response, error) - GetGroupDeployToken(gid any, deployToken int, options ...RequestOptionFunc) (*DeployToken, *Response, error) + + // GetGroupDeployToken gets a single deploy token. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_tokens/#get-a-group-deploy-token + GetGroupDeployToken(gid any, deployToken int64, options ...RequestOptionFunc) (*DeployToken, *Response, error) + + // CreateGroupDeployToken creates a new deploy token for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_tokens/#create-a-group-deploy-token CreateGroupDeployToken(gid any, opt *CreateGroupDeployTokenOptions, options ...RequestOptionFunc) (*DeployToken, *Response, error) - DeleteGroupDeployToken(gid any, deployToken int, options ...RequestOptionFunc) (*Response, error) + + // DeleteGroupDeployToken removes a deploy token from the group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deploy_tokens/#delete-a-group-deploy-token + DeleteGroupDeployToken(gid any, deployToken int64, options ...RequestOptionFunc) (*Response, error) } // DeployTokensService handles communication with the deploy tokens related methods @@ -46,7 +98,7 @@ type ( // DeployToken represents a GitLab deploy token. type DeployToken struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Username string `json:"username"` ExpiresAt *time.Time `json:"expires_at"` @@ -60,23 +112,11 @@ func (k DeployToken) String() string { return Stringify(k) } -// ListAllDeployTokens gets a list of all deploy tokens. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_tokens/#list-all-deploy-tokens func (s *DeployTokensService) ListAllDeployTokens(options ...RequestOptionFunc) ([]*DeployToken, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "deploy_tokens", nil, options) - if err != nil { - return nil, nil, err - } - - var ts []*DeployToken - resp, err := s.client.Do(req, &ts) - if err != nil { - return nil, resp, err - } - - return ts, resp, nil + return do[[]*DeployToken](s.client, + withPath("deploy_tokens"), + withRequestOpts(options...), + ) } // ListProjectDeployTokensOptions represents the available ListProjectDeployTokens() @@ -84,56 +124,23 @@ func (s *DeployTokensService) ListAllDeployTokens(options ...RequestOptionFunc) // // GitLab API docs: // https://docs.gitlab.com/api/deploy_tokens/#list-project-deploy-tokens -type ListProjectDeployTokensOptions ListOptions +type ListProjectDeployTokensOptions struct { + ListOptions +} -// ListProjectDeployTokens gets a list of a project's deploy tokens. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_tokens/#list-project-deploy-tokens func (s *DeployTokensService) ListProjectDeployTokens(pid any, opt *ListProjectDeployTokensOptions, options ...RequestOptionFunc) ([]*DeployToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_tokens", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ts []*DeployToken - resp, err := s.client.Do(req, &ts) - if err != nil { - return nil, resp, err - } - - return ts, resp, nil + return do[[]*DeployToken](s.client, + withPath("projects/%s/deploy_tokens", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetProjectDeployToken gets a single deploy token. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_tokens/#get-a-project-deploy-token -func (s *DeployTokensService) GetProjectDeployToken(pid any, deployToken int, options ...RequestOptionFunc) (*DeployToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_tokens/%d", PathEscape(project), deployToken) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(DeployToken) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil +func (s *DeployTokensService) GetProjectDeployToken(pid any, deployToken int64, options ...RequestOptionFunc) (*DeployToken, *Response, error) { + return do[*DeployToken](s.client, + withPath("projects/%s/deploy_tokens/%d", ProjectID{pid}, deployToken), + withRequestOpts(options...), + ) } // CreateProjectDeployTokenOptions represents the available CreateProjectDeployToken() options. @@ -147,48 +154,22 @@ type CreateProjectDeployTokenOptions struct { Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` } -// CreateProjectDeployToken creates a new deploy token for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_tokens/#create-a-project-deploy-token func (s *DeployTokensService) CreateProjectDeployToken(pid any, opt *CreateProjectDeployTokenOptions, options ...RequestOptionFunc) (*DeployToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_tokens", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(DeployToken) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil + return do[*DeployToken](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/deploy_tokens", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteProjectDeployToken removes a deploy token from the project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_tokens/#delete-a-project-deploy-token -func (s *DeployTokensService) DeleteProjectDeployToken(pid any, deployToken int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/deploy_tokens/%d", PathEscape(project), deployToken) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *DeployTokensService) DeleteProjectDeployToken(pid any, deployToken int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/deploy_tokens/%d", ProjectID{pid}, deployToken), + withRequestOpts(options...), + ) + return resp, err } // ListGroupDeployTokensOptions represents the available ListGroupDeployTokens() @@ -196,56 +177,23 @@ func (s *DeployTokensService) DeleteProjectDeployToken(pid any, deployToken int, // // GitLab API docs: // https://docs.gitlab.com/api/deploy_tokens/#list-group-deploy-tokens -type ListGroupDeployTokensOptions ListOptions +type ListGroupDeployTokensOptions struct { + ListOptions +} -// ListGroupDeployTokens gets a list of a group’s deploy tokens. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_tokens/#list-group-deploy-tokens func (s *DeployTokensService) ListGroupDeployTokens(gid any, opt *ListGroupDeployTokensOptions, options ...RequestOptionFunc) ([]*DeployToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/deploy_tokens", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ts []*DeployToken - resp, err := s.client.Do(req, &ts) - if err != nil { - return nil, resp, err - } - - return ts, resp, nil + return do[[]*DeployToken](s.client, + withPath("groups/%s/deploy_tokens", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetGroupDeployToken gets a single deploy token. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_tokens/#get-a-group-deploy-token -func (s *DeployTokensService) GetGroupDeployToken(gid any, deployToken int, options ...RequestOptionFunc) (*DeployToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/deploy_tokens/%d", PathEscape(group), deployToken) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(DeployToken) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil +func (s *DeployTokensService) GetGroupDeployToken(gid any, deployToken int64, options ...RequestOptionFunc) (*DeployToken, *Response, error) { + return do[*DeployToken](s.client, + withPath("groups/%s/deploy_tokens/%d", GroupID{gid}, deployToken), + withRequestOpts(options...), + ) } // CreateGroupDeployTokenOptions represents the available CreateGroupDeployToken() options. @@ -259,46 +207,20 @@ type CreateGroupDeployTokenOptions struct { Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` } -// CreateGroupDeployToken creates a new deploy token for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_tokens/#create-a-group-deploy-token func (s *DeployTokensService) CreateGroupDeployToken(gid any, opt *CreateGroupDeployTokenOptions, options ...RequestOptionFunc) (*DeployToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/deploy_tokens", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(DeployToken) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil + return do[*DeployToken](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/deploy_tokens", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteGroupDeployToken removes a deploy token from the group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deploy_tokens/#delete-a-group-deploy-token -func (s *DeployTokensService) DeleteGroupDeployToken(gid any, deployToken int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/deploy_tokens/%d", PathEscape(group), deployToken) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *DeployTokensService) DeleteGroupDeployToken(gid any, deployToken int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/deploy_tokens/%d", GroupID{gid}, deployToken), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/deployments.go b/vendor/gitlab.com/gitlab-org/api/client-go/deployments.go index 7368477c9c..77405df2f5 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/deployments.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/deployments.go @@ -16,7 +16,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -24,12 +23,45 @@ import ( type ( // DeploymentsServiceInterface defines all the API methods for the DeploymentsService DeploymentsServiceInterface interface { + // ListProjectDeployments gets a list of deployments in a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deployments/#list-project-deployments + // ListProjectDeployments gets a list of deployments in a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deployments/#list-project-deployments ListProjectDeployments(pid any, opts *ListProjectDeploymentsOptions, options ...RequestOptionFunc) ([]*Deployment, *Response, error) - GetProjectDeployment(pid any, deployment int, options ...RequestOptionFunc) (*Deployment, *Response, error) + + // GetProjectDeployment gets a specific deployment for a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deployments/#get-a-specific-deployment + GetProjectDeployment(pid any, deployment int64, options ...RequestOptionFunc) (*Deployment, *Response, error) + + // CreateProjectDeployment creates a project deployment. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deployments/#create-a-deployment CreateProjectDeployment(pid any, opt *CreateProjectDeploymentOptions, options ...RequestOptionFunc) (*Deployment, *Response, error) - UpdateProjectDeployment(pid any, deployment int, opt *UpdateProjectDeploymentOptions, options ...RequestOptionFunc) (*Deployment, *Response, error) - ApproveOrRejectProjectDeployment(pid any, deployment int, opt *ApproveOrRejectProjectDeploymentOptions, options ...RequestOptionFunc) (*Response, error) - DeleteProjectDeployment(pid any, deployment int, options ...RequestOptionFunc) (*Response, error) + + // UpdateProjectDeployment updates a project deployment. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deployments/#update-a-deployment + UpdateProjectDeployment(pid any, deployment int64, opt *UpdateProjectDeploymentOptions, options ...RequestOptionFunc) (*Deployment, *Response, error) + + // ApproveOrRejectProjectDeployment approves or rejects a blocked deployment. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deployments/#approve-or-reject-a-blocked-deployment + ApproveOrRejectProjectDeployment(pid any, deployment int64, opt *ApproveOrRejectProjectDeploymentOptions, options ...RequestOptionFunc) (*Response, error) + + // DeleteProjectDeployment deletes a specific deployment. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deployments/#delete-a-specific-deployment + DeleteProjectDeployment(pid any, deployment int64, options ...RequestOptionFunc) (*Response, error) } // DeploymentsService handles communication with the deployment related methods @@ -43,41 +75,47 @@ type ( var _ DeploymentsServiceInterface = (*DeploymentsService)(nil) -// Deployment represents the Gitlab deployment +// Deployment represents the GitLab deployment type Deployment struct { - ID int `json:"id"` - IID int `json:"iid"` - Ref string `json:"ref"` - SHA string `json:"sha"` - Status string `json:"status"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - User *ProjectUser `json:"user"` - Environment *Environment `json:"environment"` - Deployable struct { - ID int `json:"id"` - Status string `json:"status"` - Stage string `json:"stage"` - Name string `json:"name"` - Ref string `json:"ref"` - Tag bool `json:"tag"` - Coverage float64 `json:"coverage"` - CreatedAt *time.Time `json:"created_at"` - StartedAt *time.Time `json:"started_at"` - FinishedAt *time.Time `json:"finished_at"` - Duration float64 `json:"duration"` - User *User `json:"user"` - Commit *Commit `json:"commit"` - Pipeline struct { - ID int `json:"id"` - SHA string `json:"sha"` - Ref string `json:"ref"` - Status string `json:"status"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - } `json:"pipeline"` - Runner *Runner `json:"runner"` - } `json:"deployable"` + ID int64 `json:"id"` + IID int64 `json:"iid"` + Ref string `json:"ref"` + SHA string `json:"sha"` + Status string `json:"status"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + User *ProjectUser `json:"user"` + Environment *Environment `json:"environment"` + Deployable DeploymentDeployable `json:"deployable"` +} + +// DeploymentDeployable represents the Gitlab deployment deployable +type DeploymentDeployable struct { + ID int64 `json:"id"` + Status string `json:"status"` + Stage string `json:"stage"` + Name string `json:"name"` + Ref string `json:"ref"` + Tag bool `json:"tag"` + Coverage float64 `json:"coverage"` + CreatedAt *time.Time `json:"created_at"` + StartedAt *time.Time `json:"started_at"` + FinishedAt *time.Time `json:"finished_at"` + Duration float64 `json:"duration"` + User *User `json:"user"` + Commit *Commit `json:"commit"` + Pipeline DeploymentDeployablePipeline `json:"pipeline"` + Runner *Runner `json:"runner"` +} + +// DeploymentDeployablePipeline represents the Gitlab deployment deployable pipeline +type DeploymentDeployablePipeline struct { + ID int64 `json:"id"` + SHA string `json:"sha"` + Ref string `json:"ref"` + Status string `json:"status"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` } // ListProjectDeploymentsOptions represents the available ListProjectDeployments() options. @@ -91,63 +129,28 @@ type ListProjectDeploymentsOptions struct { Environment *string `url:"environment,omitempty" json:"environment,omitempty"` Status *string `url:"status,omitempty" json:"status,omitempty"` - // Only for Gitlab versions less than 14 + // Only for GitLab versions less than 14 UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - // Only for Gitlab 14 or higher + // Only for GitLab 14 or higher FinishedAfter *time.Time `url:"finished_after,omitempty" json:"finished_after,omitempty"` FinishedBefore *time.Time `url:"finished_before,omitempty" json:"finished_before,omitempty"` } -// ListProjectDeployments gets a list of deployments in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deployments/#list-project-deployments func (s *DeploymentsService) ListProjectDeployments(pid any, opts *ListProjectDeploymentsOptions, options ...RequestOptionFunc) ([]*Deployment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deployments", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var ds []*Deployment - resp, err := s.client.Do(req, &ds) - if err != nil { - return nil, resp, err - } - - return ds, resp, nil + return do[[]*Deployment](s.client, + withPath("projects/%s/deployments", ProjectID{pid}), + withAPIOpts(opts), + withRequestOpts(options...), + ) } -// GetProjectDeployment get a deployment for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deployments/#get-a-specific-deployment -func (s *DeploymentsService) GetProjectDeployment(pid any, deployment int, options ...RequestOptionFunc) (*Deployment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deployments/%d", PathEscape(project), deployment) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - d := new(Deployment) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil +func (s *DeploymentsService) GetProjectDeployment(pid any, deployment int64, options ...RequestOptionFunc) (*Deployment, *Response, error) { + return do[*Deployment](s.client, + withPath("projects/%s/deployments/%d", ProjectID{pid}, deployment), + withRequestOpts(options...), + ) } // CreateProjectDeploymentOptions represents the available @@ -163,29 +166,13 @@ type CreateProjectDeploymentOptions struct { Status *DeploymentStatusValue `url:"status,omitempty" json:"status,omitempty"` } -// CreateProjectDeployment creates a project deployment. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deployments/#create-a-deployment func (s *DeploymentsService) CreateProjectDeployment(pid any, opt *CreateProjectDeploymentOptions, options ...RequestOptionFunc) (*Deployment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deployments", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Deployment) - resp, err := s.client.Do(req, &d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil + return do[*Deployment](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/deployments", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateProjectDeploymentOptions represents the available @@ -197,29 +184,13 @@ type UpdateProjectDeploymentOptions struct { Status *DeploymentStatusValue `url:"status,omitempty" json:"status,omitempty"` } -// UpdateProjectDeployment updates a project deployment. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deployments/#update-a-deployment -func (s *DeploymentsService) UpdateProjectDeployment(pid any, deployment int, opt *UpdateProjectDeploymentOptions, options ...RequestOptionFunc) (*Deployment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deployments/%d", PathEscape(project), deployment) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Deployment) - resp, err := s.client.Do(req, &d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil +func (s *DeploymentsService) UpdateProjectDeployment(pid any, deployment int64, opt *UpdateProjectDeploymentOptions, options ...RequestOptionFunc) (*Deployment, *Response, error) { + return do[*Deployment](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/deployments/%d", ProjectID{pid}, deployment), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ApproveOrRejectProjectDeploymentOptions represents the available @@ -233,42 +204,21 @@ type ApproveOrRejectProjectDeploymentOptions struct { RepresentedAs *string `url:"represented_as,omitempty" json:"represented_as,omitempty"` } -// ApproveOrRejectProjectDeployment approve or reject a blocked deployment. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deployments/#approve-or-reject-a-blocked-deployment -func (s *DeploymentsService) ApproveOrRejectProjectDeployment(pid any, deployment int, - opt *ApproveOrRejectProjectDeploymentOptions, options ...RequestOptionFunc, -) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/deployments/%d/approval", PathEscape(project), deployment) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *DeploymentsService) ApproveOrRejectProjectDeployment(pid any, deployment int64, opt *ApproveOrRejectProjectDeploymentOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/deployments/%d/approval", ProjectID{pid}, deployment), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } -// DeleteProjectDeployment delete a project deployment. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deployments/#delete-a-specific-deployment -func (s *DeploymentsService) DeleteProjectDeployment(pid any, deployment int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/deployments/%d", PathEscape(project), deployment) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *DeploymentsService) DeleteProjectDeployment(pid any, deployment int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/deployments/%d", ProjectID{pid}, deployment), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/deployments_merge_requests.go b/vendor/gitlab.com/gitlab-org/api/client-go/deployments_merge_requests.go index a2b0e717c0..4d418e51db 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/deployments_merge_requests.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/deployments_merge_requests.go @@ -11,17 +11,17 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package gitlab -import ( - "fmt" - "net/http" -) +package gitlab type ( // DeploymentMergeRequestsServiceInterface defines all the API methods for the DeploymentMergeRequestsService DeploymentMergeRequestsServiceInterface interface { - ListDeploymentMergeRequests(pid any, deployment int, opts *ListMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) + // ListDeploymentMergeRequests get the merge requests associated with deployment. + // + // GitLab API docs: + // https://docs.gitlab.com/api/deployments/#list-of-merge-requests-associated-with-a-deployment + ListDeploymentMergeRequests(pid any, deployment int64, opts *ListMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) } // DeploymentMergeRequestsService handles communication with the deployment's @@ -36,27 +36,10 @@ type ( var _ DeploymentMergeRequestsServiceInterface = (*DeploymentMergeRequestsService)(nil) -// ListDeploymentMergeRequests get the merge requests associated with deployment. -// -// GitLab API docs: -// https://docs.gitlab.com/api/deployments/#list-of-merge-requests-associated-with-a-deployment -func (s *DeploymentMergeRequestsService) ListDeploymentMergeRequests(pid any, deployment int, opts *ListMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deployments/%d/merge_requests", PathEscape(project), deployment) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var mrs []*MergeRequest - resp, err := s.client.Do(req, &mrs) - if err != nil { - return nil, resp, err - } - - return mrs, resp, nil +func (s *DeploymentMergeRequestsService) ListDeploymentMergeRequests(pid any, deployment int64, opts *ListMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { + return do[[]*MergeRequest](s.client, + withPath("projects/%s/deployments/%d/merge_requests", ProjectID{pid}, deployment), + withAPIOpts(opts), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/discussions.go b/vendor/gitlab.com/gitlab-org/api/client-go/discussions.go index f73a865ad4..e305dcaff9 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/discussions.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/discussions.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -25,37 +24,205 @@ import ( type ( // DiscussionsServiceInterface defines all the API methods for the DiscussionsService DiscussionsServiceInterface interface { - ListIssueDiscussions(pid any, issue int, opt *ListIssueDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) - GetIssueDiscussion(pid any, issue int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) - CreateIssueDiscussion(pid any, issue int, opt *CreateIssueDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) - AddIssueDiscussionNote(pid any, issue int, discussion string, opt *AddIssueDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - UpdateIssueDiscussionNote(pid any, issue int, discussion string, note int, opt *UpdateIssueDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - DeleteIssueDiscussionNote(pid any, issue int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) - ListSnippetDiscussions(pid any, snippet int, opt *ListSnippetDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) - GetSnippetDiscussion(pid any, snippet int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) - CreateSnippetDiscussion(pid any, snippet int, opt *CreateSnippetDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) - AddSnippetDiscussionNote(pid any, snippet int, discussion string, opt *AddSnippetDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - UpdateSnippetDiscussionNote(pid any, snippet int, discussion string, note int, opt *UpdateSnippetDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - DeleteSnippetDiscussionNote(pid any, snippet int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) - ListGroupEpicDiscussions(gid any, epic int, opt *ListGroupEpicDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) - GetEpicDiscussion(gid any, epic int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) - CreateEpicDiscussion(gid any, epic int, opt *CreateEpicDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) - AddEpicDiscussionNote(gid any, epic int, discussion string, opt *AddEpicDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - UpdateEpicDiscussionNote(gid any, epic int, discussion string, note int, opt *UpdateEpicDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - DeleteEpicDiscussionNote(gid any, epic int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) - ListMergeRequestDiscussions(pid any, mergeRequest int, opt *ListMergeRequestDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) - GetMergeRequestDiscussion(pid any, mergeRequest int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) - CreateMergeRequestDiscussion(pid any, mergeRequest int, opt *CreateMergeRequestDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) - ResolveMergeRequestDiscussion(pid any, mergeRequest int, discussion string, opt *ResolveMergeRequestDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) - AddMergeRequestDiscussionNote(pid any, mergeRequest int, discussion string, opt *AddMergeRequestDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - UpdateMergeRequestDiscussionNote(pid any, mergeRequest int, discussion string, note int, opt *UpdateMergeRequestDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - DeleteMergeRequestDiscussionNote(pid any, mergeRequest int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) + // ListIssueDiscussions gets a list of all discussions for a single issue. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#list-project-issue-discussion-items + ListIssueDiscussions(pid any, issue int64, opt *ListIssueDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) + + // GetIssueDiscussion returns a single discussion for a specific project issue. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#get-single-issue-discussion-item + GetIssueDiscussion(pid any, issue int64, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) + + // CreateIssueDiscussion creates a new discussion to a single project issue. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#create-new-issue-thread + CreateIssueDiscussion(pid any, issue int64, opt *CreateIssueDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) + + // AddIssueDiscussionNote creates a new note in an existing discussion of an issue. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#add-note-to-existing-issue-thread + AddIssueDiscussionNote(pid any, issue int64, discussion string, opt *AddIssueDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + + // UpdateIssueDiscussionNote modifies an existing note in a discussion of an issue. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#modify-existing-issue-thread-note + UpdateIssueDiscussionNote(pid any, issue int64, discussion string, note int64, opt *UpdateIssueDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + + // DeleteIssueDiscussionNote deletes a note from a discussion of an issue. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#delete-an-issue-thread-note + DeleteIssueDiscussionNote(pid any, issue int64, discussion string, note int64, options ...RequestOptionFunc) (*Response, error) + + // ListSnippetDiscussions gets all discussions for a snippet. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#list-project-snippet-discussion-items + ListSnippetDiscussions(pid any, snippet int64, opt *ListSnippetDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) + + // GetSnippetDiscussion returns a single discussion for a snippet. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#get-single-snippet-discussion-item + GetSnippetDiscussion(pid any, snippet int64, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) + + // CreateSnippetDiscussion creates a new discussion for a snippet. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#create-new-snippet-thread + CreateSnippetDiscussion(pid any, snippet int64, opt *CreateSnippetDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) + + // AddSnippetDiscussionNote adds a new note to a snippet discussion. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#add-note-to-existing-snippet-thread + AddSnippetDiscussionNote(pid any, snippet int64, discussion string, opt *AddSnippetDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + + // UpdateSnippetDiscussionNote modifies an existing note in a snippet discussion. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#modify-existing-snippet-thread-note + UpdateSnippetDiscussionNote(pid any, snippet int64, discussion string, note int64, opt *UpdateSnippetDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + + // DeleteSnippetDiscussionNote deletes a note from a snippet discussion. + // + // GitLab API docs: https://docs.gitlab.com/api/discussions/#delete-a-snippet-thread-note + DeleteSnippetDiscussionNote(pid any, snippet int64, discussion string, note int64, options ...RequestOptionFunc) (*Response, error) + + // ListGroupEpicDiscussions gets all discussions for a group epic. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#list-group-epic-discussion-items + ListGroupEpicDiscussions(gid any, epic int64, opt *ListGroupEpicDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) + + // GetEpicDiscussion returns a single discussion for a group epic. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#get-single-epic-discussion-item + GetEpicDiscussion(gid any, epic int64, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) + + // CreateEpicDiscussion creates a new discussion for a group epic. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#create-new-epic-thread + CreateEpicDiscussion(gid any, epic int64, opt *CreateEpicDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) + + // AddEpicDiscussionNote adds a new note to an epic discussion. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#add-note-to-existing-epic-thread + AddEpicDiscussionNote(gid any, epic int64, discussion string, opt *AddEpicDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + + // UpdateEpicDiscussionNote modifies an existing note in an epic discussion. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#modify-existing-epic-thread-note + UpdateEpicDiscussionNote(gid any, epic int64, discussion string, note int64, opt *UpdateEpicDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + + // DeleteEpicDiscussionNote deletes a note from an epic discussion. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#delete-an-epic-thread-note + DeleteEpicDiscussionNote(gid any, epic int64, discussion string, note int64, options ...RequestOptionFunc) (*Response, error) + + // ListMergeRequestDiscussions gets all discussions for a merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#list-project-merge-request-discussion-items + ListMergeRequestDiscussions(pid any, mergeRequest int64, opt *ListMergeRequestDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) + + // GetMergeRequestDiscussion returns a single discussion for a merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#get-single-merge-request-discussion-item + GetMergeRequestDiscussion(pid any, mergeRequest int64, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) + + // CreateMergeRequestDiscussion creates a new discussion for a merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#create-new-merge-request-thread + CreateMergeRequestDiscussion(pid any, mergeRequest int64, opt *CreateMergeRequestDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) + + // ResolveMergeRequestDiscussion resolves or unresolves a merge request discussion. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#resolve-a-merge-request-thread + ResolveMergeRequestDiscussion(pid any, mergeRequest int64, discussion string, opt *ResolveMergeRequestDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) + + // AddMergeRequestDiscussionNote adds a new note to a merge request discussion. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#add-note-to-existing-merge-request-thread + AddMergeRequestDiscussionNote(pid any, mergeRequest int64, discussion string, opt *AddMergeRequestDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + + // UpdateMergeRequestDiscussionNote modifies an existing note in a merge request discussion. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#modify-an-existing-merge-request-thread-note + UpdateMergeRequestDiscussionNote(pid any, mergeRequest int64, discussion string, note int64, opt *UpdateMergeRequestDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + + // DeleteMergeRequestDiscussionNote deletes a note from a merge request discussion. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#delete-a-merge-request-thread-note + DeleteMergeRequestDiscussionNote(pid any, mergeRequest int64, discussion string, note int64, options ...RequestOptionFunc) (*Response, error) + + // ListCommitDiscussions gets all discussions for a commit. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#list-project-commit-discussion-items ListCommitDiscussions(pid any, commit string, opt *ListCommitDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) + + // GetCommitDiscussion returns a single discussion for a commit. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#get-single-commit-discussion-item + + // GetCommitDiscussion returns a single discussion for a commit. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#get-single-commit-discussion-item GetCommitDiscussion(pid any, commit string, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) + + // CreateCommitDiscussion creates a new discussion for a commit. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#create-new-commit-thread + + // CreateCommitDiscussion creates a new discussion for a commit. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#create-new-commit-thread CreateCommitDiscussion(pid any, commit string, opt *CreateCommitDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) + + // AddCommitDiscussionNote adds a new note to a commit discussion. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#add-note-to-existing-commit-thread + + // AddCommitDiscussionNote adds a new note to a commit discussion. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#add-note-to-existing-commit-thread AddCommitDiscussionNote(pid any, commit string, discussion string, opt *AddCommitDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - UpdateCommitDiscussionNote(pid any, commit string, discussion string, note int, opt *UpdateCommitDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - DeleteCommitDiscussionNote(pid any, commit string, discussion string, note int, options ...RequestOptionFunc) (*Response, error) + + // UpdateCommitDiscussionNote modifies an existing note in a commit discussion. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#modify-an-existing-commit-thread-note + UpdateCommitDiscussionNote(pid any, commit string, discussion string, note int64, opt *UpdateCommitDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + + // DeleteCommitDiscussionNote deletes a note from a commit discussion. + // + // GitLab API docs: + // https://docs.gitlab.com/api/discussions/#delete-a-commit-thread-note + DeleteCommitDiscussionNote(pid any, commit string, discussion string, note int64, options ...RequestOptionFunc) (*Response, error) } // DiscussionsService handles communication with the discussions related @@ -87,61 +254,23 @@ func (d Discussion) String() string { // // GitLab API docs: // https://docs.gitlab.com/api/discussions/#list-project-issue-discussion-items -type ListIssueDiscussionsOptions ListOptions - -// ListIssueDiscussions gets a list of all discussions for a single -// issue. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#list-project-issue-discussion-items -func (s *DiscussionsService) ListIssueDiscussions(pid any, issue int, opt *ListIssueDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/discussions", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ds []*Discussion - resp, err := s.client.Do(req, &ds) - if err != nil { - return nil, resp, err - } - - return ds, resp, nil +type ListIssueDiscussionsOptions struct { + ListOptions } -// GetIssueDiscussion returns a single discussion for a specific project issue. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#get-single-issue-discussion-item -func (s *DiscussionsService) GetIssueDiscussion(pid any, issue int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/discussions/%s", - PathEscape(project), - issue, - discussion, +func (s *DiscussionsService) ListIssueDiscussions(pid any, issue int64, opt *ListIssueDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { + return do[[]*Discussion](s.client, + withPath("projects/%s/issues/%d/discussions", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), ) +} - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil +func (s *DiscussionsService) GetIssueDiscussion(pid any, issue int64, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { + return do[*Discussion](s.client, + withPath("projects/%s/issues/%d/discussions/%s", ProjectID{pid}, issue, discussion), + withRequestOpts(options...), + ) } // CreateIssueDiscussionOptions represents the available CreateIssueDiscussion() @@ -154,29 +283,13 @@ type CreateIssueDiscussionOptions struct { CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` } -// CreateIssueDiscussion creates a new discussion to a single project issue. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#create-new-issue-thread -func (s *DiscussionsService) CreateIssueDiscussion(pid any, issue int, opt *CreateIssueDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/discussions", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil +func (s *DiscussionsService) CreateIssueDiscussion(pid any, issue int64, opt *CreateIssueDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { + return do[*Discussion](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/issues/%d/discussions", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // AddIssueDiscussionNoteOptions represents the available AddIssueDiscussionNote() @@ -189,33 +302,13 @@ type AddIssueDiscussionNoteOptions struct { CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` } -// AddIssueDiscussionNote creates a new discussion to a single project issue. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#add-note-to-existing-issue-thread -func (s *DiscussionsService) AddIssueDiscussionNote(pid any, issue int, discussion string, opt *AddIssueDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/discussions/%s/notes", - PathEscape(project), - issue, - discussion, +func (s *DiscussionsService) AddIssueDiscussionNote(pid any, issue int64, discussion string, opt *AddIssueDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/issues/%d/discussions/%s/notes", ProjectID{pid}, issue, discussion), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil } // UpdateIssueDiscussionNoteOptions represents the available @@ -228,58 +321,22 @@ type UpdateIssueDiscussionNoteOptions struct { CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` } -// UpdateIssueDiscussionNote modifies existing discussion of an issue. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#modify-existing-issue-thread-note -func (s *DiscussionsService) UpdateIssueDiscussionNote(pid any, issue int, discussion string, note int, opt *UpdateIssueDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/discussions/%s/notes/%d", - PathEscape(project), - issue, - discussion, - note, +func (s *DiscussionsService) UpdateIssueDiscussionNote(pid any, issue int64, discussion string, note int64, opt *UpdateIssueDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/issues/%d/discussions/%s/notes/%d", ProjectID{pid}, issue, discussion, note), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil } -// DeleteIssueDiscussionNote deletes an existing discussion of an issue. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#delete-an-issue-thread-note -func (s *DiscussionsService) DeleteIssueDiscussionNote(pid any, issue int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/discussions/%s/notes/%d", - PathEscape(project), - issue, - discussion, - note, +func (s *DiscussionsService) DeleteIssueDiscussionNote(pid any, issue int64, discussion string, note int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/issues/%d/discussions/%s/notes/%d", ProjectID{pid}, issue, discussion, note), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + return resp, err } // ListSnippetDiscussionsOptions represents the available ListSnippetDiscussions() @@ -287,61 +344,23 @@ func (s *DiscussionsService) DeleteIssueDiscussionNote(pid any, issue int, discu // // GitLab API docs: // https://docs.gitlab.com/api/discussions/#list-project-snippet-discussion-items -type ListSnippetDiscussionsOptions ListOptions - -// ListSnippetDiscussions gets a list of all discussions for a single -// snippet. Snippet discussions are comments users can post to a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#list-project-snippet-discussion-items -func (s *DiscussionsService) ListSnippetDiscussions(pid any, snippet int, opt *ListSnippetDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/discussions", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ds []*Discussion - resp, err := s.client.Do(req, &ds) - if err != nil { - return nil, resp, err - } - - return ds, resp, nil +type ListSnippetDiscussionsOptions struct { + ListOptions } -// GetSnippetDiscussion returns a single discussion for a given snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#get-single-snippet-discussion-item -func (s *DiscussionsService) GetSnippetDiscussion(pid any, snippet int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/discussions/%s", - PathEscape(project), - snippet, - discussion, +func (s *DiscussionsService) ListSnippetDiscussions(pid any, snippet int64, opt *ListSnippetDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { + return do[[]*Discussion](s.client, + withPath("projects/%s/snippets/%d/discussions", ProjectID{pid}, snippet), + withAPIOpts(opt), + withRequestOpts(options...), ) +} - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil +func (s *DiscussionsService) GetSnippetDiscussion(pid any, snippet int64, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { + return do[*Discussion](s.client, + withPath("projects/%s/snippets/%d/discussions/%s", ProjectID{pid}, snippet, discussion), + withRequestOpts(options...), + ) } // CreateSnippetDiscussionOptions represents the available @@ -354,30 +373,13 @@ type CreateSnippetDiscussionOptions struct { CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` } -// CreateSnippetDiscussion creates a new discussion for a single snippet. -// Snippet discussions are comments users can post to a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#create-new-snippet-thread -func (s *DiscussionsService) CreateSnippetDiscussion(pid any, snippet int, opt *CreateSnippetDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/discussions", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil +func (s *DiscussionsService) CreateSnippetDiscussion(pid any, snippet int64, opt *CreateSnippetDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { + return do[*Discussion](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/snippets/%d/discussions", ProjectID{pid}, snippet), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // AddSnippetDiscussionNoteOptions represents the available @@ -390,34 +392,13 @@ type AddSnippetDiscussionNoteOptions struct { CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` } -// AddSnippetDiscussionNote creates a new discussion to a single project -// snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#add-note-to-existing-snippet-thread -func (s *DiscussionsService) AddSnippetDiscussionNote(pid any, snippet int, discussion string, opt *AddSnippetDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/discussions/%s/notes", - PathEscape(project), - snippet, - discussion, +func (s *DiscussionsService) AddSnippetDiscussionNote(pid any, snippet int64, discussion string, opt *AddSnippetDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/snippets/%d/discussions/%s/notes", ProjectID{pid}, snippet, discussion), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil } // UpdateSnippetDiscussionNoteOptions represents the available @@ -430,58 +411,22 @@ type UpdateSnippetDiscussionNoteOptions struct { CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` } -// UpdateSnippetDiscussionNote modifies existing discussion of a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#modify-existing-snippet-thread-note -func (s *DiscussionsService) UpdateSnippetDiscussionNote(pid any, snippet int, discussion string, note int, opt *UpdateSnippetDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/discussions/%s/notes/%d", - PathEscape(project), - snippet, - discussion, - note, +func (s *DiscussionsService) UpdateSnippetDiscussionNote(pid any, snippet int64, discussion string, note int64, opt *UpdateSnippetDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/snippets/%d/discussions/%s/notes/%d", ProjectID{pid}, snippet, discussion, note), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil } -// DeleteSnippetDiscussionNote deletes an existing discussion of a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#delete-a-snippet-thread-note -func (s *DiscussionsService) DeleteSnippetDiscussionNote(pid any, snippet int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/discussions/%s/notes/%d", - PathEscape(project), - snippet, - discussion, - note, +func (s *DiscussionsService) DeleteSnippetDiscussionNote(pid any, snippet int64, discussion string, note int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/snippets/%d/discussions/%s/notes/%d", ProjectID{pid}, snippet, discussion, note), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + return resp, err } // ListGroupEpicDiscussionsOptions represents the available @@ -489,64 +434,23 @@ func (s *DiscussionsService) DeleteSnippetDiscussionNote(pid any, snippet int, d // // GitLab API docs: // https://docs.gitlab.com/api/discussions/#list-group-epic-discussion-items -type ListGroupEpicDiscussionsOptions ListOptions +type ListGroupEpicDiscussionsOptions struct { + ListOptions +} -// ListGroupEpicDiscussions gets a list of all discussions for a single -// epic. Epic discussions are comments users can post to a epic. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#list-group-epic-discussion-items -func (s *DiscussionsService) ListGroupEpicDiscussions(gid any, epic int, opt *ListGroupEpicDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/discussions", - PathEscape(group), - epic, +func (s *DiscussionsService) ListGroupEpicDiscussions(gid any, epic int64, opt *ListGroupEpicDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { + return do[[]*Discussion](s.client, + withPath("groups/%s/epics/%d/discussions", GroupID{gid}, epic), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ds []*Discussion - resp, err := s.client.Do(req, &ds) - if err != nil { - return nil, resp, err - } - - return ds, resp, nil } -// GetEpicDiscussion returns a single discussion for a given epic. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#get-single-epic-discussion-item -func (s *DiscussionsService) GetEpicDiscussion(gid any, epic int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/discussions/%s", - PathEscape(group), - epic, - discussion, +func (s *DiscussionsService) GetEpicDiscussion(gid any, epic int64, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { + return do[*Discussion](s.client, + withPath("groups/%s/epics/%d/discussions/%s", GroupID{gid}, epic, discussion), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil } // CreateEpicDiscussionOptions represents the available CreateEpicDiscussion() @@ -559,33 +463,13 @@ type CreateEpicDiscussionOptions struct { CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` } -// CreateEpicDiscussion creates a new discussion for a single epic. Epic -// discussions are comments users can post to a epic. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#create-new-epic-thread -func (s *DiscussionsService) CreateEpicDiscussion(gid any, epic int, opt *CreateEpicDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/discussions", - PathEscape(group), - epic, +func (s *DiscussionsService) CreateEpicDiscussion(gid any, epic int64, opt *CreateEpicDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { + return do[*Discussion](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/epics/%d/discussions", GroupID{gid}, epic), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil } // AddEpicDiscussionNoteOptions represents the available @@ -598,33 +482,13 @@ type AddEpicDiscussionNoteOptions struct { CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` } -// AddEpicDiscussionNote creates a new discussion to a single project epic. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#add-note-to-existing-epic-thread -func (s *DiscussionsService) AddEpicDiscussionNote(gid any, epic int, discussion string, opt *AddEpicDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/discussions/%s/notes", - PathEscape(group), - epic, - discussion, +func (s *DiscussionsService) AddEpicDiscussionNote(gid any, epic int64, discussion string, opt *AddEpicDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/epics/%d/discussions/%s/notes", GroupID{gid}, epic, discussion), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil } // UpdateEpicDiscussionNoteOptions represents the available UpdateEpicDiscussion() @@ -637,58 +501,22 @@ type UpdateEpicDiscussionNoteOptions struct { CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` } -// UpdateEpicDiscussionNote modifies existing discussion of an epic. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#modify-existing-epic-thread-note -func (s *DiscussionsService) UpdateEpicDiscussionNote(gid any, epic int, discussion string, note int, opt *UpdateEpicDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/discussions/%s/notes/%d", - PathEscape(group), - epic, - discussion, - note, +func (s *DiscussionsService) UpdateEpicDiscussionNote(gid any, epic int64, discussion string, note int64, opt *UpdateEpicDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/epics/%d/discussions/%s/notes/%d", GroupID{gid}, epic, discussion, note), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil } -// DeleteEpicDiscussionNote deletes an existing discussion of a epic. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#delete-an-epic-thread-note -func (s *DiscussionsService) DeleteEpicDiscussionNote(gid any, epic int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/discussions/%s/notes/%d", - PathEscape(group), - epic, - discussion, - note, +func (s *DiscussionsService) DeleteEpicDiscussionNote(gid any, epic int64, discussion string, note int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/epics/%d/discussions/%s/notes/%d", GroupID{gid}, epic, discussion, note), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + return resp, err } // ListMergeRequestDiscussionsOptions represents the available @@ -696,65 +524,23 @@ func (s *DiscussionsService) DeleteEpicDiscussionNote(gid any, epic int, discuss // // GitLab API docs: // https://docs.gitlab.com/api/discussions/#list-project-merge-request-discussion-items -type ListMergeRequestDiscussionsOptions ListOptions +type ListMergeRequestDiscussionsOptions struct { + ListOptions +} -// ListMergeRequestDiscussions gets a list of all discussions for a single -// merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#list-project-merge-request-discussion-items -func (s *DiscussionsService) ListMergeRequestDiscussions(pid any, mergeRequest int, opt *ListMergeRequestDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions", - PathEscape(project), - mergeRequest, +func (s *DiscussionsService) ListMergeRequestDiscussions(pid any, mergeRequest int64, opt *ListMergeRequestDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { + return do[[]*Discussion](s.client, + withPath("projects/%s/merge_requests/%d/discussions", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ds []*Discussion - resp, err := s.client.Do(req, &ds) - if err != nil { - return nil, resp, err - } - - return ds, resp, nil } -// GetMergeRequestDiscussion returns a single discussion for a given merge -// request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#get-single-merge-request-discussion-item -func (s *DiscussionsService) GetMergeRequestDiscussion(pid any, mergeRequest int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s", - PathEscape(project), - mergeRequest, - discussion, +func (s *DiscussionsService) GetMergeRequestDiscussion(pid any, mergeRequest int64, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { + return do[*Discussion](s.client, + withPath("projects/%s/merge_requests/%d/discussions/%s", ProjectID{pid}, mergeRequest, discussion), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil } // CreateMergeRequestDiscussionOptions represents the available @@ -777,11 +563,11 @@ type PositionOptions struct { NewPath *string `url:"new_path,omitempty" json:"new_path,omitempty"` OldPath *string `url:"old_path,omitempty" json:"old_path,omitempty"` PositionType *string `url:"position_type,omitempty" json:"position_type"` - NewLine *int `url:"new_line,omitempty" json:"new_line,omitempty"` - OldLine *int `url:"old_line,omitempty" json:"old_line,omitempty"` + NewLine *int64 `url:"new_line,omitempty" json:"new_line,omitempty"` + OldLine *int64 `url:"old_line,omitempty" json:"old_line,omitempty"` LineRange *LineRangeOptions `url:"line_range,omitempty" json:"line_range,omitempty"` - Width *int `url:"width,omitempty" json:"width,omitempty"` - Height *int `url:"height,omitempty" json:"height,omitempty"` + Width *int64 `url:"width,omitempty" json:"width,omitempty"` + Height *int64 `url:"height,omitempty" json:"height,omitempty"` X *float64 `url:"x,omitempty" json:"x,omitempty"` Y *float64 `url:"y,omitempty" json:"y,omitempty"` } @@ -796,37 +582,17 @@ type LineRangeOptions struct { type LinePositionOptions struct { LineCode *string `url:"line_code,omitempty" json:"line_code,omitempty"` Type *string `url:"type,omitempty" json:"type,omitempty"` - OldLine *int `url:"old_line,omitempty" json:"old_line,omitempty"` - NewLine *int `url:"new_line,omitempty" json:"new_line,omitempty"` + OldLine *int64 `url:"old_line,omitempty" json:"old_line,omitempty"` + NewLine *int64 `url:"new_line,omitempty" json:"new_line,omitempty"` } -// CreateMergeRequestDiscussion creates a new discussion for a single merge -// request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#create-new-merge-request-thread -func (s *DiscussionsService) CreateMergeRequestDiscussion(pid any, mergeRequest int, opt *CreateMergeRequestDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions", - PathEscape(project), - mergeRequest, +func (s *DiscussionsService) CreateMergeRequestDiscussion(pid any, mergeRequest int64, opt *CreateMergeRequestDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { + return do[*Discussion](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/discussions", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil } // ResolveMergeRequestDiscussionOptions represents the available @@ -838,34 +604,13 @@ type ResolveMergeRequestDiscussionOptions struct { Resolved *bool `url:"resolved,omitempty" json:"resolved,omitempty"` } -// ResolveMergeRequestDiscussion resolves/unresolves whole discussion of a merge -// request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#resolve-a-merge-request-thread -func (s *DiscussionsService) ResolveMergeRequestDiscussion(pid any, mergeRequest int, discussion string, opt *ResolveMergeRequestDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s", - PathEscape(project), - mergeRequest, - discussion, +func (s *DiscussionsService) ResolveMergeRequestDiscussion(pid any, mergeRequest int64, discussion string, opt *ResolveMergeRequestDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { + return do[*Discussion](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/merge_requests/%d/discussions/%s", ProjectID{pid}, mergeRequest, discussion), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil } // AddMergeRequestDiscussionNoteOptions represents the available @@ -878,34 +623,13 @@ type AddMergeRequestDiscussionNoteOptions struct { CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` } -// AddMergeRequestDiscussionNote creates a new discussion to a single project -// merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#add-note-to-existing-merge-request-thread -func (s *DiscussionsService) AddMergeRequestDiscussionNote(pid any, mergeRequest int, discussion string, opt *AddMergeRequestDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s/notes", - PathEscape(project), - mergeRequest, - discussion, +func (s *DiscussionsService) AddMergeRequestDiscussionNote(pid any, mergeRequest int64, discussion string, opt *AddMergeRequestDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/discussions/%s/notes", ProjectID{pid}, mergeRequest, discussion), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil } // UpdateMergeRequestDiscussionNoteOptions represents the available @@ -919,60 +643,22 @@ type UpdateMergeRequestDiscussionNoteOptions struct { Resolved *bool `url:"resolved,omitempty" json:"resolved,omitempty"` } -// UpdateMergeRequestDiscussionNote modifies existing discussion of a merge -// request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#modify-an-existing-merge-request-thread-note -func (s *DiscussionsService) UpdateMergeRequestDiscussionNote(pid any, mergeRequest int, discussion string, note int, opt *UpdateMergeRequestDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s/notes/%d", - PathEscape(project), - mergeRequest, - discussion, - note, +func (s *DiscussionsService) UpdateMergeRequestDiscussionNote(pid any, mergeRequest int64, discussion string, note int64, opt *UpdateMergeRequestDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/merge_requests/%d/discussions/%s/notes/%d", ProjectID{pid}, mergeRequest, discussion, note), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil } -// DeleteMergeRequestDiscussionNote deletes an existing discussion of a merge -// request. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#delete-a-merge-request-thread-note -func (s *DiscussionsService) DeleteMergeRequestDiscussionNote(pid any, mergeRequest int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s/notes/%d", - PathEscape(project), - mergeRequest, - discussion, - note, +func (s *DiscussionsService) DeleteMergeRequestDiscussionNote(pid any, mergeRequest int64, discussion string, note int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/merge_requests/%d/discussions/%s/notes/%d", ProjectID{pid}, mergeRequest, discussion, note), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + return resp, err } // ListCommitDiscussionsOptions represents the available @@ -980,65 +666,23 @@ func (s *DiscussionsService) DeleteMergeRequestDiscussionNote(pid any, mergeRequ // // GitLab API docs: // https://docs.gitlab.com/api/discussions/#list-project-commit-discussion-items -type ListCommitDiscussionsOptions ListOptions +type ListCommitDiscussionsOptions struct { + ListOptions +} -// ListCommitDiscussions gets a list of all discussions for a single -// commit. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#list-project-commit-discussion-items func (s *DiscussionsService) ListCommitDiscussions(pid any, commit string, opt *ListCommitDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions", - PathEscape(project), - commit, + return do[[]*Discussion](s.client, + withPath("projects/%s/repository/commits/%s/discussions", ProjectID{pid}, commit), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ds []*Discussion - resp, err := s.client.Do(req, &ds) - if err != nil { - return nil, resp, err - } - - return ds, resp, nil } -// GetCommitDiscussion returns a single discussion for a specific project -// commit. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#get-single-commit-discussion-item func (s *DiscussionsService) GetCommitDiscussion(pid any, commit string, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions/%s", - PathEscape(project), - commit, - discussion, + return do[*Discussion](s.client, + withPath("projects/%s/repository/commits/%s/discussions/%s", ProjectID{pid}, commit, discussion), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil } // CreateCommitDiscussionOptions represents the available @@ -1052,32 +696,13 @@ type CreateCommitDiscussionOptions struct { Position *NotePosition `url:"position,omitempty" json:"position,omitempty"` } -// CreateCommitDiscussion creates a new discussion to a single project commit. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#create-new-commit-thread func (s *DiscussionsService) CreateCommitDiscussion(pid any, commit string, opt *CreateCommitDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions", - PathEscape(project), - commit, + return do[*Discussion](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/repository/commits/%s/discussions", ProjectID{pid}, commit), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil } // AddCommitDiscussionNoteOptions represents the available @@ -1090,33 +715,13 @@ type AddCommitDiscussionNoteOptions struct { CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` } -// AddCommitDiscussionNote creates a new discussion to a single project commit. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#add-note-to-existing-commit-thread func (s *DiscussionsService) AddCommitDiscussionNote(pid any, commit string, discussion string, opt *AddCommitDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions/%s/notes", - PathEscape(project), - commit, - discussion, + return do[*Note](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/repository/commits/%s/discussions/%s/notes", ProjectID{pid}, commit, discussion), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil } // UpdateCommitDiscussionNoteOptions represents the available @@ -1129,56 +734,20 @@ type UpdateCommitDiscussionNoteOptions struct { CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` } -// UpdateCommitDiscussionNote modifies existing discussion of a commit. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#modify-an-existing-commit-thread-note -func (s *DiscussionsService) UpdateCommitDiscussionNote(pid any, commit string, discussion string, note int, opt *UpdateCommitDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions/%s/notes/%d", - PathEscape(project), - commit, - discussion, - note, +func (s *DiscussionsService) UpdateCommitDiscussionNote(pid any, commit string, discussion string, note int64, opt *UpdateCommitDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/repository/commits/%s/discussions/%s/notes/%d", ProjectID{pid}, commit, discussion, note), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil } -// DeleteCommitDiscussionNote deletes an existing discussion of an commit. -// -// GitLab API docs: -// https://docs.gitlab.com/api/discussions/#delete-a-commit-thread-note -func (s *DiscussionsService) DeleteCommitDiscussionNote(pid any, commit string, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions/%s/notes/%d", - PathEscape(project), - commit, - discussion, - note, +func (s *DiscussionsService) DeleteCommitDiscussionNote(pid any, commit string, discussion string, note int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/repository/commits/%s/discussions/%s/notes/%d", ProjectID{pid}, commit, discussion, note), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/docker-compose.yml b/vendor/gitlab.com/gitlab-org/api/client-go/docker-compose.yml new file mode 100644 index 0000000000..d5fdf197e1 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/docker-compose.yml @@ -0,0 +1,61 @@ +# Acceptance tests depend on a running GitLab instance. +# Only one of these services should be run at a time. +services: + gitlab-ee-no-license: + image: docker.io/gitlab/gitlab-ee:${GITLAB_CE_VERSION:-latest} + platform: linux/amd64 + shm_size: 256m + ports: + - 8095:80 + environment: + GITLAB_ROOT_PASSWORD: "FoQ7PaTZqWaftrZ7zvAK" + # The token uses the value of GITLAB_TOKEN, which is set in the makefile (unless overridden) + GITLAB_TOKEN: $GITLAB_TOKEN + labels: + client-go/owned: "" + volumes: + - config-ce:/etc/gitlab + - logs-ce:/var/log/gitlab + - data-ce:/var/opt/gitlab + - ${PWD}/scripts/healthcheck_and_setup.sh:/healthcheck-and-setup.sh:Z + - ${PWD}/scripts/gitlab.rb:/etc/gitlab/gitlab.rb:ro + - ${PWD}/certs:/etc/gitlab/ssl:ro + healthcheck: + test: /healthcheck-and-setup.sh + interval: 60s + timeout: 10m + + gitlab-ee: + image: docker.io/gitlab/gitlab-ee:${GITLAB_EE_VERSION:-latest} + platform: linux/amd64 + shm_size: 256m + ports: + - 8095:80 + environment: + GITLAB_ROOT_PASSWORD: "FoQ7PaTZqWaftrZ7zvAK" + # The token uses the value of GITLAB_TOKEN, which is set in the makefile (unless overridden) + GITLAB_TOKEN: $GITLAB_TOKEN + GITLAB_LICENSE_FILE: /Gitlab-license.txt + labels: + client-go/owned: "" + volumes: + - config-ee:/etc/gitlab + - logs-ee:/var/log/gitlab + - data-ee:/var/opt/gitlab + - ${PWD}/scripts/healthcheck_and_setup.sh:/healthcheck-and-setup.sh:Z + - ${PWD}/Gitlab-license.txt:/Gitlab-license.txt:Z + - ${PWD}/scripts/gitlab.rb:/etc/gitlab/gitlab.rb:ro + - ${PWD}/certs:/etc/gitlab/ssl:ro + healthcheck: + test: /healthcheck-and-setup.sh + interval: 60s + timeout: 10m + +volumes: + config-ce: + logs-ce: + data-ce: + config-ee: + logs-ee: + data-ee: + diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/dockerfile_templates.go b/vendor/gitlab.com/gitlab-org/api/client-go/dockerfile_templates.go index 0885b3b887..438cba9053 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/dockerfile_templates.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/dockerfile_templates.go @@ -16,16 +16,19 @@ package gitlab -import ( - "fmt" - "net/http" - "net/url" -) - type ( // DockerfileTemplatesServiceInterface defines all the API methods for the DockerfileTemplatesService DockerfileTemplatesServiceInterface interface { + // ListTemplates get a list of available Dockerfile templates. + // + // GitLab API docs: + // https://docs.gitlab.com/api/templates/dockerfiles/#list-dockerfile-templates ListTemplates(opt *ListDockerfileTemplatesOptions, options ...RequestOptionFunc) ([]*DockerfileTemplateListItem, *Response, error) + + // GetTemplate get a single Dockerfile template. + // + // GitLab API docs: + // https://docs.gitlab.com/api/templates/dockerfiles/#single-dockerfile-template GetTemplate(key string, options ...RequestOptionFunc) (*DockerfileTemplate, *Response, error) } @@ -60,44 +63,21 @@ type DockerfileTemplateListItem struct { // // GitLab API docs: // https://docs.gitlab.com/api/templates/dockerfiles/#list-dockerfile-templates -type ListDockerfileTemplatesOptions ListOptions +type ListDockerfileTemplatesOptions struct { + ListOptions +} -// ListTemplates get a list of available Dockerfile templates. -// -// GitLab API docs: -// https://docs.gitlab.com/api/templates/dockerfiles/#list-dockerfile-templates func (s *DockerfileTemplatesService) ListTemplates(opt *ListDockerfileTemplatesOptions, options ...RequestOptionFunc) ([]*DockerfileTemplateListItem, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "templates/dockerfiles", opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*DockerfileTemplateListItem - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil + return do[[]*DockerfileTemplateListItem](s.client, + withPath("templates/dockerfiles"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetTemplate get a single Dockerfile template. -// -// GitLab API docs: -// https://docs.gitlab.com/api/templates/dockerfiles/#single-dockerfile-template func (s *DockerfileTemplatesService) GetTemplate(key string, options ...RequestOptionFunc) (*DockerfileTemplate, *Response, error) { - u := fmt.Sprintf("templates/dockerfiles/%s", url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - g := new(DockerfileTemplate) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil + return do[*DockerfileTemplate](s.client, + withPath("templates/dockerfiles/%s", key), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/dora_metrics.go b/vendor/gitlab.com/gitlab-org/api/client-go/dora_metrics.go index fd04b352d8..5db875ad5d 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/dora_metrics.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/dora_metrics.go @@ -16,22 +16,26 @@ package gitlab -import ( - "fmt" - "net/http" -) - type ( // DORAMetricsServiceInterface defines all the API methods for the DORAMetricsService DORAMetricsServiceInterface interface { + // GetProjectDORAMetrics gets the DORA metrics for a project. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/dora/metrics/#get-project-level-dora-metrics GetProjectDORAMetrics(pid any, opt GetDORAMetricsOptions, options ...RequestOptionFunc) ([]DORAMetric, *Response, error) + + // GetGroupDORAMetrics gets the DORA metrics for a group. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/dora/metrics/#get-group-level-dora-metrics GetGroupDORAMetrics(gid any, opt GetDORAMetricsOptions, options ...RequestOptionFunc) ([]DORAMetric, *Response, error) } // DORAMetricsService handles communication with the DORA metrics related methods // of the GitLab API. // - // Gitlab API docs: https://docs.gitlab.com/api/dora/metrics/ + // GitLab API docs: https://docs.gitlab.com/api/dora/metrics/ DORAMetricsService struct { client *Client } @@ -41,7 +45,7 @@ var _ DORAMetricsServiceInterface = (*DORAMetricsService)(nil) // DORAMetric represents a single DORA metric data point. // -// Gitlab API docs: https://docs.gitlab.com/api/dora/metrics/ +// GitLab API docs: https://docs.gitlab.com/api/dora/metrics/ type DORAMetric struct { Date string `json:"date"` Value float64 `json:"value"` @@ -66,52 +70,18 @@ type GetDORAMetricsOptions struct { StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` } -// GetProjectDORAMetrics gets the DORA metrics for a project. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/dora/metrics/#get-project-level-dora-metrics func (s *DORAMetricsService) GetProjectDORAMetrics(pid any, opt GetDORAMetricsOptions, options ...RequestOptionFunc) ([]DORAMetric, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/dora/metrics", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var metrics []DORAMetric - resp, err := s.client.Do(req, &metrics) - if err != nil { - return nil, resp, err - } - - return metrics, resp, err + return do[[]DORAMetric](s.client, + withPath("projects/%s/dora/metrics", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetGroupDORAMetrics gets the DORA metrics for a group. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/dora/metrics/#get-group-level-dora-metrics func (s *DORAMetricsService) GetGroupDORAMetrics(gid any, opt GetDORAMetricsOptions, options ...RequestOptionFunc) ([]DORAMetric, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/dora/metrics", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var metrics []DORAMetric - resp, err := s.client.Do(req, &metrics) - if err != nil { - return nil, resp, err - } - - return metrics, resp, err + return do[[]DORAMetric](s.client, + withPath("groups/%s/dora/metrics", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/draft_notes.go b/vendor/gitlab.com/gitlab-org/api/client-go/draft_notes.go index 4520965626..e82570cf27 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/draft_notes.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/draft_notes.go @@ -17,20 +17,53 @@ package gitlab import ( - "fmt" "net/http" ) type ( // DraftNotesServiceInterface defines all the API methods for the DraftNotesService DraftNotesServiceInterface interface { - ListDraftNotes(pid any, mergeRequest int, opt *ListDraftNotesOptions, options ...RequestOptionFunc) ([]*DraftNote, *Response, error) - GetDraftNote(pid any, mergeRequest int, note int, options ...RequestOptionFunc) (*DraftNote, *Response, error) - CreateDraftNote(pid any, mergeRequest int, opt *CreateDraftNoteOptions, options ...RequestOptionFunc) (*DraftNote, *Response, error) - UpdateDraftNote(pid any, mergeRequest int, note int, opt *UpdateDraftNoteOptions, options ...RequestOptionFunc) (*DraftNote, *Response, error) - DeleteDraftNote(pid any, mergeRequest int, note int, options ...RequestOptionFunc) (*Response, error) - PublishDraftNote(pid any, mergeRequest int, note int, options ...RequestOptionFunc) (*Response, error) - PublishAllDraftNotes(pid any, mergeRequest int, options ...RequestOptionFunc) (*Response, error) + // ListDraftNotes gets a list of all draft notes for a merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/draft_notes/#list-all-merge-request-draft-notes + ListDraftNotes(pid any, mergeRequest int64, opt *ListDraftNotesOptions, options ...RequestOptionFunc) ([]*DraftNote, *Response, error) + + // GetDraftNote gets a single draft note for a merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/draft_notes/#get-a-single-draft-note + GetDraftNote(pid any, mergeRequest int64, note int64, options ...RequestOptionFunc) (*DraftNote, *Response, error) + + // CreateDraftNote creates a draft note for a merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/draft_notes/#create-a-draft-note + CreateDraftNote(pid any, mergeRequest int64, opt *CreateDraftNoteOptions, options ...RequestOptionFunc) (*DraftNote, *Response, error) + + // UpdateDraftNote updates a draft note for a merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/draft_notes/#update-a-draft-note + UpdateDraftNote(pid any, mergeRequest int64, note int64, opt *UpdateDraftNoteOptions, options ...RequestOptionFunc) (*DraftNote, *Response, error) + + // DeleteDraftNote deletes a single draft note for a merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/draft_notes/#delete-a-draft-note + DeleteDraftNote(pid any, mergeRequest int64, note int64, options ...RequestOptionFunc) (*Response, error) + + // PublishDraftNote publishes a single draft note for a merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/draft_notes/#publish-a-draft-note + PublishDraftNote(pid any, mergeRequest int64, note int64, options ...RequestOptionFunc) (*Response, error) + + // PublishAllDraftNotes publishes all draft notes for a merge request that belong to the user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/draft_notes/#publish-a-draft-note + PublishAllDraftNotes(pid any, mergeRequest int64, options ...RequestOptionFunc) (*Response, error) } // DraftNotesService handles communication with the draft notes related methods @@ -46,9 +79,9 @@ type ( var _ DraftNotesServiceInterface = (*DraftNotesService)(nil) type DraftNote struct { - ID int `json:"id"` - AuthorID int `json:"author_id"` - MergeRequestID int `json:"merge_request_id"` + ID int64 `json:"id"` + AuthorID int64 `json:"author_id"` + MergeRequestID int64 `json:"merge_request_id"` ResolveDiscussion bool `json:"resolve_discussion"` DiscussionID string `json:"discussion_id"` Note string `json:"note"` @@ -68,60 +101,26 @@ type ListDraftNotesOptions struct { Sort *string `url:"sort,omitempty" json:"sort,omitempty"` } -// ListDraftNotes gets a list of all draft notes for a merge request. -// -// Gitlab API docs: -// https://docs.gitlab.com/api/draft_notes/#list-all-merge-request-draft-notes -func (s *DraftNotesService) ListDraftNotes(pid any, mergeRequest int, opt *ListDraftNotesOptions, options ...RequestOptionFunc) ([]*DraftNote, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var n []*DraftNote - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *DraftNotesService) ListDraftNotes(pid any, mergeRequest int64, opt *ListDraftNotesOptions, options ...RequestOptionFunc) ([]*DraftNote, *Response, error) { + return do[[]*DraftNote](s.client, + withPath("projects/%s/merge_requests/%d/draft_notes", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetDraftNote gets a single draft note for a merge request. -// -// Gitlab API docs: -// https://docs.gitlab.com/api/draft_notes/#get-a-single-draft-note -func (s *DraftNotesService) GetDraftNote(pid any, mergeRequest int, note int, options ...RequestOptionFunc) (*DraftNote, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d", PathEscape(project), mergeRequest, note) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - n := new(DraftNote) - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *DraftNotesService) GetDraftNote(pid any, mergeRequest int64, note int64, options ...RequestOptionFunc) (*DraftNote, *Response, error) { + return do[*DraftNote](s.client, + withPath("projects/%s/merge_requests/%d/draft_notes/%d", ProjectID{pid}, mergeRequest, note), + withRequestOpts(options...), + ) } // CreateDraftNoteOptions represents the available CreateDraftNote() // options. // -// Gitlab API docs: +// GitLab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/draft_notes/#create-a-draft-note type CreateDraftNoteOptions struct { Note *string `url:"note" json:"note"` @@ -131,118 +130,58 @@ type CreateDraftNoteOptions struct { Position *PositionOptions `url:"position,omitempty" json:"position,omitempty"` } -// CreateDraftNote creates a draft note for a merge request. -// -// Gitlab API docs: -// https://docs.gitlab.com/api/draft_notes/#create-a-draft-note -func (s *DraftNotesService) CreateDraftNote(pid any, mergeRequest int, opt *CreateDraftNoteOptions, options ...RequestOptionFunc) (*DraftNote, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(DraftNote) - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *DraftNotesService) CreateDraftNote(pid any, mergeRequest int64, opt *CreateDraftNoteOptions, options ...RequestOptionFunc) (*DraftNote, *Response, error) { + return do[*DraftNote](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/draft_notes", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateDraftNoteOptions represents the available UpdateDraftNote() // options. // -// Gitlab API docs: +// GitLab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/draft_notes/#create-a-draft-note type UpdateDraftNoteOptions struct { Note *string `url:"note,omitempty" json:"note,omitempty"` Position *PositionOptions `url:"position,omitempty" json:"position,omitempty"` } -// UpdateDraftNote updates a draft note for a merge request. -// -// Gitlab API docs: https://docs.gitlab.com/api/draft_notes/#create-a-draft-note -func (s *DraftNotesService) UpdateDraftNote(pid any, mergeRequest int, note int, opt *UpdateDraftNoteOptions, options ...RequestOptionFunc) (*DraftNote, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d", PathEscape(project), mergeRequest, note) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(DraftNote) - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *DraftNotesService) UpdateDraftNote(pid any, mergeRequest int64, note int64, opt *UpdateDraftNoteOptions, options ...RequestOptionFunc) (*DraftNote, *Response, error) { + return do[*DraftNote](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/merge_requests/%d/draft_notes/%d", ProjectID{pid}, mergeRequest, note), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteDraftNote deletes a single draft note for a merge request. -// -// Gitlab API docs: -// https://docs.gitlab.com/api/draft_notes/#delete-a-draft-note -func (s *DraftNotesService) DeleteDraftNote(pid any, mergeRequest int, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d", PathEscape(project), mergeRequest, note) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *DraftNotesService) DeleteDraftNote(pid any, mergeRequest int64, note int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/merge_requests/%d/draft_notes/%d", ProjectID{pid}, mergeRequest, note), + withRequestOpts(options...), + ) + return resp, err } -// PublishDraftNote publishes a single draft note for a merge request. -// -// Gitlab API docs: -// https://docs.gitlab.com/api/draft_notes/#publish-a-draft-note -func (s *DraftNotesService) PublishDraftNote(pid any, mergeRequest int, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d/publish", PathEscape(project), mergeRequest, note) - - req, err := s.client.NewRequest(http.MethodPut, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *DraftNotesService) PublishDraftNote(pid any, mergeRequest int64, note int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/merge_requests/%d/draft_notes/%d/publish", ProjectID{pid}, mergeRequest, note), + withRequestOpts(options...), + ) + return resp, err } -// PublishAllDraftNotes publishes all draft notes for a merge request that belong to the user. -// -// Gitlab API docs: -// https://docs.gitlab.com/api/draft_notes/#publish-a-draft-note -func (s *DraftNotesService) PublishAllDraftNotes(pid any, mergeRequest int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/bulk_publish", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *DraftNotesService) PublishAllDraftNotes(pid any, mergeRequest int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/draft_notes/bulk_publish", ProjectID{pid}, mergeRequest), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/enterprise_users.go b/vendor/gitlab.com/gitlab-org/api/client-go/enterprise_users.go index e50b5c5112..74f6c93fc8 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/enterprise_users.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/enterprise_users.go @@ -15,16 +15,20 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( EnterpriseUsersServiceInterface interface { + // ListEnterpriseUsers lists all enterprise users for a given top-level group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_enterprise_users/#list-all-enterprise-users ListEnterpriseUsers(gid any, opt *ListEnterpriseUsersOptions, options ...RequestOptionFunc) ([]*User, *Response, error) - GetEnterpriseUser(gid any, uid int, options ...RequestOptionFunc) (*User, *Response, error) - Disable2FAForEnterpriseUser(gid any, uid int, options ...RequestOptionFunc) (*Response, error) + GetEnterpriseUser(gid any, uid int64, options ...RequestOptionFunc) (*User, *Response, error) + Disable2FAForEnterpriseUser(gid any, uid int64, options ...RequestOptionFunc) (*Response, error) + DeleteEnterpriseUser(gid any, uid int64, deleteOptions *DeleteEnterpriseUserOptions, options ...RequestOptionFunc) (*Response, error) } // EnterpriseUsersService handles communication with the enterprise users @@ -54,54 +58,23 @@ type ListEnterpriseUsersOptions struct { TwoFactor string `url:"two_factor,omitempty" json:"two_factor,omitempty"` } -// ListEnterpriseUsers lists all enterprise users for a given top-level group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_enterprise_users/#list-all-enterprise-users func (s *EnterpriseUsersService) ListEnterpriseUsers(gid any, opt *ListEnterpriseUsersOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/enterprise_users", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var users []*User - resp, err := s.client.Do(req, &users) - if err != nil { - return nil, resp, err - } - - return users, resp, nil + return do[[]*User](s.client, + withPath("groups/%s/enterprise_users", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetEnterpriseUser gets details on a specified enterprise user. // // GitLab API docs: // https://docs.gitlab.com/api/group_enterprise_users/#get-details-on-an-enterprise-user -func (s *EnterpriseUsersService) GetEnterpriseUser(gid any, uid int, options ...RequestOptionFunc) (*User, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/enterprise_users/%d", PathEscape(group), uid) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - user := new(User) - resp, err := s.client.Do(req, &user) - if err != nil { - return nil, resp, err - } - - return user, resp, nil +func (s *EnterpriseUsersService) GetEnterpriseUser(gid any, uid int64, options ...RequestOptionFunc) (*User, *Response, error) { + return do[*User](s.client, + withPath("groups/%s/enterprise_users/%d", GroupID{gid}, uid), + withRequestOpts(options...), + ) } // Disable2FAForEnterpriseUser disables two-factor authentication (2FA) for a @@ -109,17 +82,33 @@ func (s *EnterpriseUsersService) GetEnterpriseUser(gid any, uid int, options ... // // GitLab API docs: // https://docs.gitlab.com/api/group_enterprise_users/#disable-two-factor-authentication-for-an-enterprise-user -func (s *EnterpriseUsersService) Disable2FAForEnterpriseUser(gid any, uid int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/enterprise_users/%d/disable_two_factor", PathEscape(group), uid) +func (s *EnterpriseUsersService) Disable2FAForEnterpriseUser(gid any, uid int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPatch), + withPath("groups/%s/enterprise_users/%d/disable_two_factor", GroupID{gid}, uid), + withRequestOpts(options...), + ) + return resp, err +} - req, err := s.client.NewRequest(http.MethodPatch, u, nil, options) - if err != nil { - return nil, err - } +// DeleteEnterpriseUserOptions represents the available DeleteEnterpriseUser options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_enterprise_users/#delete-an-enterprise-user +type DeleteEnterpriseUserOptions struct { + HardDelete *bool `url:"hard_delete,omitempty" json:"hard_delete,omitempty"` +} - return s.client.Do(req, nil) +// DeleteEnterpriseUser deletes an specified enterprise user. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_enterprise_users/#delete-an-enterprise-user +func (s *EnterpriseUsersService) DeleteEnterpriseUser(gid any, uid int64, opt *DeleteEnterpriseUserOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/enterprise_users/%d", GroupID{gid}, uid), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/environments.go b/vendor/gitlab.com/gitlab-org/api/client-go/environments.go index 9d8f9fedc6..6cf8512b09 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/environments.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/environments.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -25,12 +24,17 @@ import ( type ( // EnvironmentsServiceInterface defines all the API methods for the EnvironmentsService EnvironmentsServiceInterface interface { + // ListEnvironments gets a list of environments from a project, sorted by name + // alphabetically. + // + // GitLab API docs: + // https://docs.gitlab.com/api/environments/#list-environments ListEnvironments(pid any, opts *ListEnvironmentsOptions, options ...RequestOptionFunc) ([]*Environment, *Response, error) - GetEnvironment(pid any, environment int, options ...RequestOptionFunc) (*Environment, *Response, error) + GetEnvironment(pid any, environment int64, options ...RequestOptionFunc) (*Environment, *Response, error) CreateEnvironment(pid any, opt *CreateEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) - EditEnvironment(pid any, environment int, opt *EditEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) - DeleteEnvironment(pid any, environment int, options ...RequestOptionFunc) (*Response, error) - StopEnvironment(pid any, environmentID int, opt *StopEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) + EditEnvironment(pid any, environment int64, opt *EditEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) + DeleteEnvironment(pid any, environment int64, options ...RequestOptionFunc) (*Response, error) + StopEnvironment(pid any, environmentID int64, opt *StopEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) } // EnvironmentsService handles communication with the environment related methods @@ -48,7 +52,7 @@ var _ EnvironmentsServiceInterface = (*EnvironmentsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/environments/ type Environment struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Slug string `json:"slug"` Description string `json:"description"` @@ -81,55 +85,19 @@ type ListEnvironmentsOptions struct { States *string `url:"states,omitempty" json:"states,omitempty"` } -// ListEnvironments gets a list of environments from a project, sorted by name -// alphabetically. -// -// GitLab API docs: -// https://docs.gitlab.com/api/environments/#list-environments func (s *EnvironmentsService) ListEnvironments(pid any, opts *ListEnvironmentsOptions, options ...RequestOptionFunc) ([]*Environment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/environments", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var envs []*Environment - resp, err := s.client.Do(req, &envs) - if err != nil { - return nil, resp, err - } - - return envs, resp, nil + return do[[]*Environment](s.client, + withPath("projects/%s/environments", ProjectID{pid}), + withAPIOpts(opts), + withRequestOpts(options...), + ) } -// GetEnvironment gets a specific environment from a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/environments/#get-a-specific-environment -func (s *EnvironmentsService) GetEnvironment(pid any, environment int, options ...RequestOptionFunc) (*Environment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/environments/%d", PathEscape(project), environment) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - env := new(Environment) - resp, err := s.client.Do(req, env) - if err != nil { - return nil, resp, err - } - - return env, resp, nil +func (s *EnvironmentsService) GetEnvironment(pid any, environment int64, options ...RequestOptionFunc) (*Environment, *Response, error) { + return do[*Environment](s.client, + withPath("projects/%s/environments/%d", ProjectID{pid}, environment), + withRequestOpts(options...), + ) } // CreateEnvironmentOptions represents the available CreateEnvironment() options. @@ -141,37 +109,19 @@ type CreateEnvironmentOptions struct { Description *string `url:"description,omitempty" json:"description,omitempty"` ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` Tier *string `url:"tier,omitempty" json:"tier,omitempty"` - ClusterAgentID *int `url:"cluster_agent_id,omitempty" json:"cluster_agent_id,omitempty"` + ClusterAgentID *int64 `url:"cluster_agent_id,omitempty" json:"cluster_agent_id,omitempty"` KubernetesNamespace *string `url:"kubernetes_namespace,omitempty" json:"kubernetes_namespace,omitempty"` FluxResourcePath *string `url:"flux_resource_path,omitempty" json:"flux_resource_path,omitempty"` AutoStopSetting *string `url:"auto_stop_setting,omitempty" json:"auto_stop_setting,omitempty"` } -// CreateEnvironment adds an environment to a project. This method is idempotent -// and can be called multiple times with the same parameters. Creating an environment -// that already exists does not affect the existing association. -// -// GitLab API docs: -// https://docs.gitlab.com/api/environments/#create-a-new-environment func (s *EnvironmentsService) CreateEnvironment(pid any, opt *CreateEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/environments", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - env := new(Environment) - resp, err := s.client.Do(req, env) - if err != nil { - return nil, resp, err - } - - return env, resp, nil + return do[*Environment](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/environments", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditEnvironmentOptions represents the available EditEnvironment() options. @@ -183,54 +133,28 @@ type EditEnvironmentOptions struct { Description *string `url:"description,omitempty" json:"description,omitempty"` ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` Tier *string `url:"tier,omitempty" json:"tier,omitempty"` - ClusterAgentID *int `url:"cluster_agent_id,omitempty" json:"cluster_agent_id,omitempty"` + ClusterAgentID *int64 `url:"cluster_agent_id,omitempty" json:"cluster_agent_id,omitempty"` KubernetesNamespace *string `url:"kubernetes_namespace,omitempty" json:"kubernetes_namespace,omitempty"` FluxResourcePath *string `url:"flux_resource_path,omitempty" json:"flux_resource_path,omitempty"` AutoStopSetting *string `url:"auto_stop_setting,omitempty" json:"auto_stop_setting,omitempty"` } -// EditEnvironment updates a project team environment to a specified access level.. -// -// GitLab API docs: -// https://docs.gitlab.com/api/environments/#update-an-existing-environment -func (s *EnvironmentsService) EditEnvironment(pid any, environment int, opt *EditEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/environments/%d", PathEscape(project), environment) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - env := new(Environment) - resp, err := s.client.Do(req, env) - if err != nil { - return nil, resp, err - } - - return env, resp, nil +func (s *EnvironmentsService) EditEnvironment(pid any, environment int64, opt *EditEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) { + return do[*Environment](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/environments/%d", ProjectID{pid}, environment), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteEnvironment removes an environment from a project team. -// -// GitLab API docs: -// https://docs.gitlab.com/api/environments/#delete-an-environment -func (s *EnvironmentsService) DeleteEnvironment(pid any, environment int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/environments/%d", PathEscape(project), environment) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *EnvironmentsService) DeleteEnvironment(pid any, environment int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/environments/%d", ProjectID{pid}, environment), + withRequestOpts(options...), + ) + return resp, err } // StopEnvironmentOptions represents the available StopEnvironment() options. @@ -241,27 +165,11 @@ type StopEnvironmentOptions struct { Force *bool `url:"force,omitempty" json:"force,omitempty"` } -// StopEnvironment stops an environment within a specific project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/environments/#stop-an-environment -func (s *EnvironmentsService) StopEnvironment(pid any, environmentID int, opt *StopEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/environments/%d/stop", PathEscape(project), environmentID) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - env := new(Environment) - resp, err := s.client.Do(req, env) - if err != nil { - return nil, resp, err - } - - return env, resp, nil +func (s *EnvironmentsService) StopEnvironment(pid any, environmentID int64, opt *StopEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) { + return do[*Environment](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/environments/%d/stop", ProjectID{pid}, environmentID), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/epic_issues.go b/vendor/gitlab.com/gitlab-org/api/client-go/epic_issues.go index 625d86c11c..738db84eef 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/epic_issues.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/epic_issues.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" ) @@ -26,13 +25,13 @@ type ( // Will be removed in v5 of the API, use Work Items API instead EpicIssuesServiceInterface interface { // Will be removed in v5 of the API, use Work Items API instead - ListEpicIssues(gid any, epic int, opt *ListOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) + ListEpicIssues(gid any, epic int64, opt *ListOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) // Will be removed in v5 of the API, use Work Items API instead - AssignEpicIssue(gid any, epic, issue int, options ...RequestOptionFunc) (*EpicIssueAssignment, *Response, error) + AssignEpicIssue(gid any, epic, issue int64, options ...RequestOptionFunc) (*EpicIssueAssignment, *Response, error) // Will be removed in v5 of the API, use Work Items API instead - RemoveEpicIssue(gid any, epic, epicIssue int, options ...RequestOptionFunc) (*EpicIssueAssignment, *Response, error) + RemoveEpicIssue(gid any, epic, epicIssue int64, options ...RequestOptionFunc) (*EpicIssueAssignment, *Response, error) // Will be removed in v5 of the API, use Work Items API instead - UpdateEpicIssueAssignment(gid any, epic, epicIssue int, opt *UpdateEpicIssueAssignmentOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) + UpdateEpicIssueAssignment(gid any, epic, epicIssue int64, opt *UpdateEpicIssueAssignmentOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) } // EpicIssuesService handles communication with the epic issue related methods @@ -49,12 +48,12 @@ type ( var _ EpicIssuesServiceInterface = (*EpicIssuesService)(nil) // EpicIssueAssignment contains both the epic and issue objects returned from -// Gitlab with the assignment ID. +// GitLab with the assignment ID. // Will be removed in v5 of the API, use Work Items API instead // // GitLab API docs: https://docs.gitlab.com/api/epic_issues/ type EpicIssueAssignment struct { - ID int `json:"id"` + ID int64 `json:"id"` Epic *Epic `json:"epic"` Issue *Issue `json:"issue"` } @@ -64,25 +63,12 @@ type EpicIssueAssignment struct { // // Gitlab API docs: // https://docs.gitlab.com/api/epic_issues/#list-issues-for-an-epic -func (s *EpicIssuesService) ListEpicIssues(gid any, epic int, opt *ListOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/issues", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var is []*Issue - resp, err := s.client.Do(req, &is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil +func (s *EpicIssuesService) ListEpicIssues(gid any, epic int64, opt *ListOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + return do[[]*Issue](s.client, + withPath("groups/%s/epics/%d/issues", GroupID{gid}, epic), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // AssignEpicIssue assigns an existing issue to an epic. @@ -90,25 +76,12 @@ func (s *EpicIssuesService) ListEpicIssues(gid any, epic int, opt *ListOptions, // // Gitlab API Docs: // https://docs.gitlab.com/api/epic_issues/#assign-an-issue-to-the-epic -func (s *EpicIssuesService) AssignEpicIssue(gid any, epic, issue int, options ...RequestOptionFunc) (*EpicIssueAssignment, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/issues/%d", PathEscape(group), epic, issue) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - a := new(EpicIssueAssignment) - resp, err := s.client.Do(req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil +func (s *EpicIssuesService) AssignEpicIssue(gid any, epic, issue int64, options ...RequestOptionFunc) (*EpicIssueAssignment, *Response, error) { + return do[*EpicIssueAssignment](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/epics/%d/issues/%d", GroupID{gid}, epic, issue), + withRequestOpts(options...), + ) } // RemoveEpicIssue removes an issue from an epic. @@ -116,66 +89,31 @@ func (s *EpicIssuesService) AssignEpicIssue(gid any, epic, issue int, options .. // // Gitlab API Docs: // https://docs.gitlab.com/api/epic_issues/#remove-an-issue-from-the-epic -func (s *EpicIssuesService) RemoveEpicIssue(gid any, epic, epicIssue int, options ...RequestOptionFunc) (*EpicIssueAssignment, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/issues/%d", PathEscape(group), epic, epicIssue) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, nil, err - } - - a := new(EpicIssueAssignment) - resp, err := s.client.Do(req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil +func (s *EpicIssuesService) RemoveEpicIssue(gid any, epic, epicIssue int64, options ...RequestOptionFunc) (*EpicIssueAssignment, *Response, error) { + return do[*EpicIssueAssignment](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/epics/%d/issues/%d", GroupID{gid}, epic, epicIssue), + withRequestOpts(options...), + ) } -// UpdateEpicIssueAssignmentOptions describes the UpdateEpicIssueAssignment() -// options. -// Will be removed in v5 of the API, use Work Items API instead -// -// Gitlab API Docs: -// https://docs.gitlab.com/api/epic_issues/#update-epic---issue-association type UpdateEpicIssueAssignmentOptions struct { *ListOptions - MoveBeforeID *int `url:"move_before_id,omitempty" json:"move_before_id,omitempty"` - MoveAfterID *int `url:"move_after_id,omitempty" json:"move_after_id,omitempty"` + MoveBeforeID *int64 `url:"move_before_id,omitempty" json:"move_before_id,omitempty"` + MoveAfterID *int64 `url:"move_after_id,omitempty" json:"move_after_id,omitempty"` } -// UpdateEpicIsssueAssignmentOptions is kept for backwards compatibility. -// Deprecated: use UpdateEpicIssueAssignmentOptions instead. -type UpdateEpicIsssueAssignmentOptions = UpdateEpicIssueAssignmentOptions - // UpdateEpicIssueAssignment moves an issue before or after another issue in an // epic issue list. // Will be removed in v5 of the API, use Work Items API instead // // Gitlab API Docs: // https://docs.gitlab.com/api/epic_issues/#update-epic---issue-association -func (s *EpicIssuesService) UpdateEpicIssueAssignment(gid any, epic, epicIssue int, opt *UpdateEpicIssueAssignmentOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/issues/%d", PathEscape(group), epic, epicIssue) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - var is []*Issue - resp, err := s.client.Do(req, &is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil +func (s *EpicIssuesService) UpdateEpicIssueAssignment(gid any, epic, epicIssue int64, opt *UpdateEpicIssueAssignmentOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + return do[[]*Issue](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/epics/%d/issues/%d", GroupID{gid}, epic, epicIssue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/epics.go b/vendor/gitlab.com/gitlab-org/api/client-go/epics.go index fa3f6b6149..9f4818a7dc 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/epics.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/epics.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -26,18 +25,29 @@ type ( // EpicsServiceInterface defines all the API methods for the EpicsService // Will be removed in v5 of the API, use Work Items API instead EpicsServiceInterface interface { + // ListGroupEpics gets a list of group epics. This function accepts pagination + // parameters page and per_page to return the list of group epics. // Will be removed in v5 of the API, use Work Items API instead + // + // GitLab API docs: https://docs.gitlab.com/api/epics/#list-epics-for-a-group ListGroupEpics(gid any, opt *ListGroupEpicsOptions, options ...RequestOptionFunc) ([]*Epic, *Response, error) + + // GetEpic gets a single group epic. // Will be removed in v5 of the API, use Work Items API instead - GetEpic(gid any, epic int, options ...RequestOptionFunc) (*Epic, *Response, error) + GetEpic(gid any, epic int64, options ...RequestOptionFunc) (*Epic, *Response, error) // Will be removed in v5 of the API, use Work Items API instead - GetEpicLinks(gid any, epic int, options ...RequestOptionFunc) ([]*Epic, *Response, error) + GetEpicLinks(gid any, epic int64, options ...RequestOptionFunc) ([]*Epic, *Response, error) // Will be removed in v5 of the API, use Work Items API instead + // + // GitLab API docs: https://docs.gitlab.com/api/epics/#new-epic CreateEpic(gid any, opt *CreateEpicOptions, options ...RequestOptionFunc) (*Epic, *Response, error) + + // UpdateEpic updates an existing group epic. This function is also used + // to mark an epic as closed. // Will be removed in v5 of the API, use Work Items API instead - UpdateEpic(gid any, epic int, opt *UpdateEpicOptions, options ...RequestOptionFunc) (*Epic, *Response, error) + UpdateEpic(gid any, epic int64, opt *UpdateEpicOptions, options ...RequestOptionFunc) (*Epic, *Response, error) // Will be removed in v5 of the API, use Work Items API instead - DeleteEpic(gid any, epic int, options ...RequestOptionFunc) (*Response, error) + DeleteEpic(gid any, epic int64, options ...RequestOptionFunc) (*Response, error) } // EpicsService handles communication with the epic related methods @@ -56,7 +66,7 @@ var _ EpicsServiceInterface = (*EpicsService)(nil) // EpicAuthor represents a author of the epic. // Will be removed in v5 of the API, use Work Items API instead type EpicAuthor struct { - ID int `json:"id"` + ID int64 `json:"id"` State string `json:"state"` WebURL string `json:"web_url"` Name string `json:"name"` @@ -69,10 +79,10 @@ type EpicAuthor struct { // // GitLab API docs: https://docs.gitlab.com/api/epics/ type Epic struct { - ID int `json:"id"` - IID int `json:"iid"` - GroupID int `json:"group_id"` - ParentID int `json:"parent_id"` + ID int64 `json:"id"` + IID int64 `json:"iid"` + GroupID int64 `json:"group_id"` + ParentID int64 `json:"parent_id"` Title string `json:"title"` Description string `json:"description"` State string `json:"state"` @@ -91,9 +101,9 @@ type Epic struct { UpdatedAt *time.Time `json:"updated_at"` ClosedAt *time.Time `json:"closed_at"` Labels []string `json:"labels"` - Upvotes int `json:"upvotes"` - Downvotes int `json:"downvotes"` - UserNotesCount int `json:"user_notes_count"` + Upvotes int64 `json:"upvotes"` + Downvotes int64 `json:"downvotes"` + UserNotesCount int64 `json:"user_notes_count"` URL string `json:"url"` } @@ -110,7 +120,7 @@ func (e Epic) String() string { // GitLab API docs: https://docs.gitlab.com/api/epics/#list-epics-for-a-group type ListGroupEpicsOptions struct { ListOptions - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorID *int64 `url:"author_id,omitempty" json:"author_id,omitempty"` Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` @@ -126,80 +136,34 @@ type ListGroupEpicsOptions struct { MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` } -// ListGroupEpics gets a list of group epics. This function accepts pagination -// parameters page and per_page to return the list of group epics. -// Will be removed in v5 of the API, use Work Items API instead -// -// GitLab API docs: https://docs.gitlab.com/api/epics/#list-epics-for-a-group func (s *EpicsService) ListGroupEpics(gid any, opt *ListGroupEpicsOptions, options ...RequestOptionFunc) ([]*Epic, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var es []*Epic - resp, err := s.client.Do(req, &es) - if err != nil { - return nil, resp, err - } - - return es, resp, nil + return do[[]*Epic](s.client, + withPath("groups/%s/epics", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetEpic gets a single group epic. // Will be removed in v5 of the API, use Work Items API instead // // GitLab API docs: https://docs.gitlab.com/api/epics/#single-epic -func (s *EpicsService) GetEpic(gid any, epic int, options ...RequestOptionFunc) (*Epic, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - e := new(Epic) - resp, err := s.client.Do(req, e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil +func (s *EpicsService) GetEpic(gid any, epic int64, options ...RequestOptionFunc) (*Epic, *Response, error) { + return do[*Epic](s.client, + withPath("groups/%s/epics/%d", GroupID{gid}, epic), + withRequestOpts(options...), + ) } // GetEpicLinks gets all child epics of an epic. // Will be removed in v5 of the API, use Work Items API instead // // GitLab API docs: https://docs.gitlab.com/api/epic_links/ -func (s *EpicsService) GetEpicLinks(gid any, epic int, options ...RequestOptionFunc) ([]*Epic, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/epics", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var e []*Epic - resp, err := s.client.Do(req, &e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil +func (s *EpicsService) GetEpicLinks(gid any, epic int64, options ...RequestOptionFunc) ([]*Epic, *Response, error) { + return do[[]*Epic](s.client, + withPath("groups/%s/epics/%d/epics", GroupID{gid}, epic), + withRequestOpts(options...), + ) } // CreateEpicOptions represents the available CreateEpic() options. @@ -217,32 +181,16 @@ type CreateEpicOptions struct { StartDateFixed *ISOTime `url:"start_date_fixed,omitempty" json:"start_date_fixed,omitempty"` DueDateIsFixed *bool `url:"due_date_is_fixed,omitempty" json:"due_date_is_fixed,omitempty"` DueDateFixed *ISOTime `url:"due_date_fixed,omitempty" json:"due_date_fixed,omitempty"` - ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` + ParentID *int64 `url:"parent_id,omitempty" json:"parent_id,omitempty"` } -// CreateEpic creates a new group epic. -// Will be removed in v5 of the API, use Work Items API instead -// -// GitLab API docs: https://docs.gitlab.com/api/epics/#new-epic func (s *EpicsService) CreateEpic(gid any, opt *CreateEpicOptions, options ...RequestOptionFunc) (*Epic, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - e := new(Epic) - resp, err := s.client.Do(req, e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil + return do[*Epic](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/epics", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateEpicOptions represents the available UpdateEpic() options. @@ -256,7 +204,7 @@ type UpdateEpicOptions struct { DueDateFixed *ISOTime `url:"due_date_fixed,omitempty" json:"due_date_fixed,omitempty"` DueDateIsFixed *bool `url:"due_date_is_fixed,omitempty" json:"due_date_is_fixed,omitempty"` Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` + ParentID *int64 `url:"parent_id,omitempty" json:"parent_id,omitempty"` RemoveLabels *LabelOptions `url:"remove_labels,omitempty" json:"remove_labels,omitempty"` StartDateFixed *ISOTime `url:"start_date_fixed,omitempty" json:"start_date_fixed,omitempty"` StartDateIsFixed *bool `url:"start_date_is_fixed,omitempty" json:"start_date_is_fixed,omitempty"` @@ -271,42 +219,24 @@ type UpdateEpicOptions struct { // Will be removed in v5 of the API, use Work Items API instead // // GitLab API docs: https://docs.gitlab.com/api/epics/#update-epic -func (s *EpicsService) UpdateEpic(gid any, epic int, opt *UpdateEpicOptions, options ...RequestOptionFunc) (*Epic, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - e := new(Epic) - resp, err := s.client.Do(req, e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil +func (s *EpicsService) UpdateEpic(gid any, epic int64, opt *UpdateEpicOptions, options ...RequestOptionFunc) (*Epic, *Response, error) { + return do[*Epic](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/epics/%d", GroupID{gid}, epic), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteEpic deletes a single group epic. // Will be removed in v5 of the API, use Work Items API instead // // GitLab API docs: https://docs.gitlab.com/api/epics/#delete-epic -func (s *EpicsService) DeleteEpic(gid any, epic int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *EpicsService) DeleteEpic(gid any, epic int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/epics/%d", GroupID{gid}, epic), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/error_tracking.go b/vendor/gitlab.com/gitlab-org/api/client-go/error_tracking.go index 14de8ea88e..d39a10610a 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/error_tracking.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/error_tracking.go @@ -16,19 +16,41 @@ package gitlab -import ( - "fmt" - "net/http" -) +import "net/http" type ( // ErrorTrackingServiceInterface defines all the API methods for the ErrorTrackingService ErrorTrackingServiceInterface interface { + // GetErrorTrackingSettings gets error tracking settings. + // + // GitLab API docs: + // https://docs.gitlab.com/api/error_tracking/#get-error-tracking-settings GetErrorTrackingSettings(pid any, options ...RequestOptionFunc) (*ErrorTrackingSettings, *Response, error) + + // EnableDisableErrorTracking allows you to enable or disable the error tracking + // settings for a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/error_tracking/#enable-or-disable-the-error-tracking-project-settings EnableDisableErrorTracking(pid any, opt *EnableDisableErrorTrackingOptions, options ...RequestOptionFunc) (*ErrorTrackingSettings, *Response, error) + + // ListClientKeys lists error tracking project client keys. + // + // GitLab API docs: + // https://docs.gitlab.com/api/error_tracking/#list-project-client-keys ListClientKeys(pid any, opt *ListClientKeysOptions, options ...RequestOptionFunc) ([]*ErrorTrackingClientKey, *Response, error) + + // CreateClientKey creates a new client key for a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/error_tracking/#create-a-client-key CreateClientKey(pid any, options ...RequestOptionFunc) (*ErrorTrackingClientKey, *Response, error) - DeleteClientKey(pid any, keyID int, options ...RequestOptionFunc) (*Response, error) + + // DeleteClientKey removes a client key from the project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/error_tracking/#delete-a-client-key + DeleteClientKey(pid any, keyID int64, options ...RequestOptionFunc) (*Response, error) } // ErrorTrackingService handles communication with the error tracking @@ -47,7 +69,7 @@ var _ ErrorTrackingServiceInterface = (*ErrorTrackingService)(nil) // GitLab docs: // https://docs.gitlab.com/api/error_tracking/#error-tracking-client-keys type ErrorTrackingClientKey struct { - ID int `json:"id"` + ID int64 `json:"id"` Active bool `json:"active"` PublicKey string `json:"public_key"` SentryDsn string `json:"sentry_dsn"` @@ -72,29 +94,11 @@ func (p ErrorTrackingSettings) String() string { return Stringify(p) } -// GetErrorTrackingSettings gets error tracking settings. -// -// GitLab API docs: -// https://docs.gitlab.com/api/error_tracking/#get-error-tracking-settings func (s *ErrorTrackingService) GetErrorTrackingSettings(pid any, options ...RequestOptionFunc) (*ErrorTrackingSettings, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/error_tracking/settings", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ets := new(ErrorTrackingSettings) - resp, err := s.client.Do(req, ets) - if err != nil { - return nil, resp, err - } - - return ets, resp, nil + return do[*ErrorTrackingSettings](s.client, + withPath("projects/%s/error_tracking/settings", ProjectID{pid}), + withRequestOpts(options...), + ) } // EnableDisableErrorTrackingOptions represents the available @@ -107,103 +111,48 @@ type EnableDisableErrorTrackingOptions struct { Integrated *bool `url:"integrated,omitempty" json:"integrated,omitempty"` } -// EnableDisableErrorTracking allows you to enable or disable the error tracking -// settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/error_tracking/#enable-or-disable-the-error-tracking-project-settings func (s *ErrorTrackingService) EnableDisableErrorTracking(pid any, opt *EnableDisableErrorTrackingOptions, options ...RequestOptionFunc) (*ErrorTrackingSettings, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/error_tracking/settings", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPatch, u, opt, options) - if err != nil { - return nil, nil, err - } - - ets := new(ErrorTrackingSettings) - resp, err := s.client.Do(req, &ets) - if err != nil { - return nil, resp, err - } - - return ets, resp, nil + return do[*ErrorTrackingSettings](s.client, + withMethod(http.MethodPatch), + withPath("projects/%s/error_tracking/settings", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListClientKeysOptions represents the available ListClientKeys() options. // // GitLab API docs: // https://docs.gitlab.com/api/error_tracking/#list-project-client-keys -type ListClientKeysOptions ListOptions +type ListClientKeysOptions struct { + ListOptions +} -// ListClientKeys lists error tracking project client keys. -// -// GitLab API docs: -// https://docs.gitlab.com/api/error_tracking/#list-project-client-keys func (s *ErrorTrackingService) ListClientKeys(pid any, opt *ListClientKeysOptions, options ...RequestOptionFunc) ([]*ErrorTrackingClientKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/error_tracking/client_keys", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var cks []*ErrorTrackingClientKey - resp, err := s.client.Do(req, &cks) - if err != nil { - return nil, resp, err - } - - return cks, resp, nil + return do[[]*ErrorTrackingClientKey](s.client, + withPath("projects/%s/error_tracking/client_keys", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// CreateClientKey creates a new client key for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/error_tracking/#create-a-client-key func (s *ErrorTrackingService) CreateClientKey(pid any, options ...RequestOptionFunc) (*ErrorTrackingClientKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/error_tracking/client_keys", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - ck := new(ErrorTrackingClientKey) - resp, err := s.client.Do(req, ck) - if err != nil { - return nil, resp, err - } - - return ck, resp, nil + return do[*ErrorTrackingClientKey](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/error_tracking/client_keys", ProjectID{pid}), + withRequestOpts(options...), + ) } // DeleteClientKey removes a client key from the project. // // GitLab API docs: // https://docs.gitlab.com/api/error_tracking/#delete-a-client-key -func (s *ErrorTrackingService) DeleteClientKey(pid any, keyID int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/error_tracking/client_keys/%d", PathEscape(project), keyID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ErrorTrackingService) DeleteClientKey(pid any, keyID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/error_tracking/client_keys/%d", ProjectID{pid}, keyID), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/event_parsing.go b/vendor/gitlab.com/gitlab-org/api/client-go/event_parsing.go index cae15b4bce..c446da3f35 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/event_parsing.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/event_parsing.go @@ -18,11 +18,12 @@ package gitlab import ( "encoding/json" + "errors" "fmt" "net/http" ) -// EventType represents a Gitlab event type. +// EventType represents a GitLab event type. type EventType string // List of available event types. @@ -31,13 +32,16 @@ const ( EventConfidentialNote EventType = "Confidential Note Hook" EventTypeBuild EventType = "Build Hook" EventTypeDeployment EventType = "Deployment Hook" + EventTypeEmoji EventType = "Emoji Hook" EventTypeFeatureFlag EventType = "Feature Flag Hook" EventTypeIssue EventType = "Issue Hook" EventTypeJob EventType = "Job Hook" EventTypeMember EventType = "Member Hook" EventTypeMergeRequest EventType = "Merge Request Hook" + EventTypeMilestone EventType = "Milestone Hook" EventTypeNote EventType = "Note Hook" EventTypePipeline EventType = "Pipeline Hook" + EventTypeProject EventType = "Project Hook" EventTypePush EventType = "Push Hook" EventTypeRelease EventType = "Release Hook" EventTypeResourceAccessToken EventType = "Resource Access Token Hook" @@ -45,6 +49,7 @@ const ( EventTypeSubGroup EventType = "Subgroup Hook" EventTypeSystemHook EventType = "System Hook" EventTypeTagPush EventType = "Tag Push Hook" + EventTypeVulnerability EventType = "Vulnerability Hook" EventTypeWikiPage EventType = "Wiki Page Hook" ) @@ -62,10 +67,12 @@ const ( ) type noteEvent struct { - ObjectKind string `json:"object_kind"` - ObjectAttributes struct { - NoteableType string `json:"noteable_type"` - } `json:"object_attributes"` + ObjectKind string `json:"object_kind"` + ObjectAttributes noteEventObjectAttributes `json:"object_attributes"` +} + +type noteEventObjectAttributes struct { + NoteableType string `json:"noteable_type"` } type serviceEvent struct { @@ -221,6 +228,8 @@ func ParseWebhook(eventType EventType, payload []byte) (event any, err error) { event = &BuildEvent{} case EventTypeDeployment: event = &DeploymentEvent{} + case EventTypeEmoji: + event = &EmojiEvent{} case EventTypeFeatureFlag: event = &FeatureFlagEvent{} case EventTypeIssue, EventConfidentialIssue: @@ -231,6 +240,8 @@ func ParseWebhook(eventType EventType, payload []byte) (event any, err error) { event = &MemberEvent{} case EventTypeMergeRequest: event = &MergeEvent{} + case EventTypeMilestone: + event = &MilestoneWebhookEvent{} case EventTypeNote, EventConfidentialNote: note := ¬eEvent{} err := json.Unmarshal(payload, note) @@ -256,6 +267,8 @@ func ParseWebhook(eventType EventType, payload []byte) (event any, err error) { } case EventTypePipeline: event = &PipelineEvent{} + case EventTypeProject: + event = &ProjectWebhookEvent{} case EventTypePush: event = &PushEvent{} case EventTypeRelease: @@ -276,7 +289,7 @@ func ParseWebhook(eventType EventType, payload []byte) (event any, err error) { case projectEvent: event = &ProjectResourceAccessTokenEvent{} default: - return nil, fmt.Errorf("unexpected resource access token payload") + return nil, errors.New("unexpected resource access token payload") } case EventTypeServiceHook: service := &serviceEvent{} @@ -298,6 +311,8 @@ func ParseWebhook(eventType EventType, payload []byte) (event any, err error) { event = &SubGroupEvent{} case EventTypeTagPush: event = &TagEvent{} + case EventTypeVulnerability: + event = &VulnerabilityEvent{} case EventTypeWikiPage: event = &WikiPageEvent{} default: diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/event_systemhook_types.go b/vendor/gitlab.com/gitlab-org/api/client-go/event_systemhook_types.go index 50940e8326..fdbc13514e 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/event_systemhook_types.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/event_systemhook_types.go @@ -44,7 +44,7 @@ type ProjectSystemEvent struct { Name string `json:"name"` Path string `json:"path"` PathWithNamespace string `json:"path_with_namespace"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` OwnerName string `json:"owner_name"` OwnerEmail string `json:"owner_email"` ProjectVisibility string `json:"project_visibility"` @@ -60,7 +60,7 @@ type GroupSystemEvent struct { Name string `json:"name"` Path string `json:"path"` PathWithNamespace string `json:"full_path"` - GroupID int `json:"group_id"` + GroupID int64 `json:"group_id"` OwnerName string `json:"owner_name"` OwnerEmail string `json:"owner_email"` ProjectVisibility string `json:"project_visibility"` @@ -74,7 +74,7 @@ type GroupSystemEvent struct { // https://docs.gitlab.com/administration/system_hooks/ type KeySystemEvent struct { BaseSystemEvent - ID int `json:"id"` + ID int64 `json:"id"` Username string `json:"username"` Key string `json:"key"` } @@ -85,7 +85,7 @@ type KeySystemEvent struct { // https://docs.gitlab.com/administration/system_hooks/ type UserSystemEvent struct { BaseSystemEvent - ID int `json:"user_id"` + ID int64 `json:"user_id"` Name string `json:"name"` Username string `json:"username"` OldUsername string `json:"old_username,omitempty"` @@ -99,11 +99,11 @@ type UserSystemEvent struct { // https://docs.gitlab.com/administration/system_hooks/ type UserGroupSystemEvent struct { BaseSystemEvent - ID int `json:"user_id"` + ID int64 `json:"user_id"` Name string `json:"user_name"` Username string `json:"user_username"` Email string `json:"user_email"` - GroupID int `json:"group_id"` + GroupID int64 `json:"group_id"` GroupName string `json:"group_name"` GroupPath string `json:"group_path"` GroupAccess string `json:"group_access"` @@ -115,11 +115,11 @@ type UserGroupSystemEvent struct { // https://docs.gitlab.com/administration/system_hooks/ type UserTeamSystemEvent struct { BaseSystemEvent - ID int `json:"user_id"` + ID int64 `json:"user_id"` Name string `json:"user_name"` Username string `json:"user_username"` Email string `json:"user_email"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` ProjectName string `json:"project_name"` ProjectPath string `json:"project_path"` ProjectPathWithNamespace string `json:"project_path_with_namespace"` @@ -133,41 +133,59 @@ type UserTeamSystemEvent struct { // https://docs.gitlab.com/administration/system_hooks/#push-events type PushSystemEvent struct { BaseSystemEvent - Before string `json:"before"` - After string `json:"after"` - Ref string `json:"ref"` - CheckoutSHA string `json:"checkout_sha"` - UserID int `json:"user_id"` - UserName string `json:"user_name"` - UserUsername string `json:"user_username"` - UserEmail string `json:"user_email"` - UserAvatar string `json:"user_avatar"` - ProjectID int `json:"project_id"` - Project struct { - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL string `json:"avatar_url"` - GitHTTPURL string `json:"git_http_url"` - GitSSHURL string `json:"git_ssh_url"` - Namespace string `json:"namespace"` - VisibilityLevel int `json:"visibility_level"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - } `json:"project"` - Commits []struct { - ID string `json:"id"` - Message string `json:"message"` - Timestamp time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - } `json:"commits"` - TotalCommitsCount int `json:"total_commits_count"` + Before string `json:"before"` + After string `json:"after"` + Ref string `json:"ref"` + CheckoutSHA string `json:"checkout_sha"` + UserID int64 `json:"user_id"` + UserName string `json:"user_name"` + UserUsername string `json:"user_username"` + UserEmail string `json:"user_email"` + UserAvatar string `json:"user_avatar"` + ProjectID int64 `json:"project_id"` + Project PushSystemEventProject `json:"project"` + Commits []PushSystemEventCommit `json:"commits"` + TotalCommitsCount int64 `json:"total_commits_count"` +} + +// PushSystemEventProject represents a push system event's project. +// +// GitLab API docs: +// https://docs.gitlab.com/administration/system_hooks/#push-events +type PushSystemEventProject struct { + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` + GitHTTPURL string `json:"git_http_url"` + GitSSHURL string `json:"git_ssh_url"` + Namespace string `json:"namespace"` + VisibilityLevel int64 `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` +} + +// PushSystemEventCommit represents a push system event's commit. +// +// GitLab API docs: +// https://docs.gitlab.com/administration/system_hooks/#push-events +type PushSystemEventCommit struct { + ID string `json:"id"` + Message string `json:"message"` + Timestamp time.Time `json:"timestamp"` + URL string `json:"url"` + Author PushSystemEventCommitAuthor `json:"author"` +} + +// PushSystemEventCommitAuthor represents a push system event's commit author. +// +// GitLab API docs: +// https://docs.gitlab.com/administration/system_hooks/#push-events +type PushSystemEventCommitAuthor struct { + Name string `json:"name"` + Email string `json:"email"` } // TagPushSystemEvent represents a tag push system event. @@ -176,41 +194,59 @@ type PushSystemEvent struct { // https://docs.gitlab.com/administration/system_hooks/#tag-events type TagPushSystemEvent struct { BaseSystemEvent - Before string `json:"before"` - After string `json:"after"` - Ref string `json:"ref"` - CheckoutSHA string `json:"checkout_sha"` - UserID int `json:"user_id"` - UserName string `json:"user_name"` - UserUsername string `json:"user_username"` - UserEmail string `json:"user_email"` - UserAvatar string `json:"user_avatar"` - ProjectID int `json:"project_id"` - Project struct { - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL string `json:"avatar_url"` - GitHTTPURL string `json:"git_http_url"` - GitSSHURL string `json:"git_ssh_url"` - Namespace string `json:"namespace"` - VisibilityLevel int `json:"visibility_level"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - } `json:"project"` - Commits []struct { - ID string `json:"id"` - Message string `json:"message"` - Timestamp time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - } `json:"commits"` - TotalCommitsCount int `json:"total_commits_count"` + Before string `json:"before"` + After string `json:"after"` + Ref string `json:"ref"` + CheckoutSHA string `json:"checkout_sha"` + UserID int64 `json:"user_id"` + UserName string `json:"user_name"` + UserUsername string `json:"user_username"` + UserEmail string `json:"user_email"` + UserAvatar string `json:"user_avatar"` + ProjectID int64 `json:"project_id"` + Project TagPushSystemEventProject `json:"project"` + Commits []TagPushSystemEventCommit `json:"commits"` + TotalCommitsCount int64 `json:"total_commits_count"` +} + +// TagPushSystemEventProject represents a tag push system event's project. +// +// GitLab API docs: +// https://docs.gitlab.com/administration/system_hooks/#tag-events +type TagPushSystemEventProject struct { + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` + GitHTTPURL string `json:"git_http_url"` + GitSSHURL string `json:"git_ssh_url"` + Namespace string `json:"namespace"` + VisibilityLevel int64 `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` +} + +// TagPushSystemEventCommit represents a tag push system event's commit. +// +// GitLab API docs: +// https://docs.gitlab.com/administration/system_hooks/#tag-events +type TagPushSystemEventCommit struct { + ID string `json:"id"` + Message string `json:"message"` + Timestamp time.Time `json:"timestamp"` + URL string `json:"url"` + Author TagPushSystemEventCommitAuthor `json:"author"` +} + +// TagPushSystemEventCommitAuthor represents a tag push system event's commit author. +// +// GitLab API docs: +// https://docs.gitlab.com/administration/system_hooks/#tag-events +type TagPushSystemEventCommitAuthor struct { + Name string `json:"name"` + Email string `json:"email"` } // RepositoryUpdateSystemEvent represents a repository updated system event. @@ -219,31 +255,43 @@ type TagPushSystemEvent struct { // https://docs.gitlab.com/administration/system_hooks/#repository-update-events type RepositoryUpdateSystemEvent struct { BaseSystemEvent - UserID int `json:"user_id"` - UserName string `json:"user_name"` - UserEmail string `json:"user_email"` - UserAvatar string `json:"user_avatar"` - ProjectID int `json:"project_id"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL string `json:"avatar_url"` - GitHTTPURL string `json:"git_http_url"` - GitSSHURL string `json:"git_ssh_url"` - Namespace string `json:"namespace"` - VisibilityLevel int `json:"visibility_level"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - CiConfigPath string `json:"ci_config_path"` - Homepage string `json:"homepage"` - URL string `json:"url"` - } `json:"project"` - Changes []struct { - Before string `json:"before"` - After string `json:"after"` - Ref string `json:"ref"` - } `json:"changes"` - Refs []string `json:"refs"` + UserID int64 `json:"user_id"` + UserName string `json:"user_name"` + UserEmail string `json:"user_email"` + UserAvatar string `json:"user_avatar"` + ProjectID int64 `json:"project_id"` + Project RepositoryUpdateSystemEventProject `json:"project"` + Changes []RepositoryUpdateSystemEventChange `json:"changes"` + Refs []string `json:"refs"` +} + +// RepositoryUpdateSystemEventProject represents a repository updated system event's project. +// +// GitLab API docs: +// https://docs.gitlab.com/administration/system_hooks/#repository-update-events +type RepositoryUpdateSystemEventProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` + GitHTTPURL string `json:"git_http_url"` + GitSSHURL string `json:"git_ssh_url"` + Namespace string `json:"namespace"` + VisibilityLevel int64 `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + CiConfigPath string `json:"ci_config_path"` + Homepage string `json:"homepage"` + URL string `json:"url"` +} + +// RepositoryUpdateSystemEventChange represents a repository updated system event's change. +// +// GitLab API docs: +// https://docs.gitlab.com/administration/system_hooks/#repository-update-events +type RepositoryUpdateSystemEventChange struct { + Before string `json:"before"` + After string `json:"after"` + Ref string `json:"ref"` } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/event_webhook_types.go b/vendor/gitlab.com/gitlab-org/api/client-go/event_webhook_types.go index 780747329d..2123753216 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/event_webhook_types.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/event_webhook_types.go @@ -28,7 +28,7 @@ import ( // There are no GitLab API docs on the subject, but the mappings can be found in // GitLab's codebase: // https://gitlab.com/gitlab-org/gitlab-foss/-/blob/ba5be4989e/app/models/concerns/issuable.rb#L39-42 -type StateID int +type StateID int64 const ( StateIDNone StateID = 0 @@ -43,35 +43,37 @@ const ( // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#job-events type BuildEvent struct { - ObjectKind string `json:"object_kind"` - Ref string `json:"ref"` - Tag bool `json:"tag"` - BeforeSHA string `json:"before_sha"` - SHA string `json:"sha"` - BuildID int `json:"build_id"` - BuildName string `json:"build_name"` - BuildStage string `json:"build_stage"` - BuildStatus string `json:"build_status"` - BuildCreatedAt string `json:"build_created_at"` - BuildStartedAt string `json:"build_started_at"` - BuildFinishedAt string `json:"build_finished_at"` - BuildDuration float64 `json:"build_duration"` - BuildAllowFailure bool `json:"build_allow_failure"` - ProjectID int `json:"project_id"` - ProjectName string `json:"project_name"` - User *EventUser `json:"user"` - Commit struct { - ID int `json:"id"` - SHA string `json:"sha"` - Message string `json:"message"` - AuthorName string `json:"author_name"` - AuthorEmail string `json:"author_email"` - Status string `json:"status"` - Duration int `json:"duration"` - StartedAt string `json:"started_at"` - FinishedAt string `json:"finished_at"` - } `json:"commit"` - Repository *Repository `json:"repository"` + ObjectKind string `json:"object_kind"` + Ref string `json:"ref"` + Tag bool `json:"tag"` + BeforeSHA string `json:"before_sha"` + SHA string `json:"sha"` + BuildID int64 `json:"build_id"` + BuildName string `json:"build_name"` + BuildStage string `json:"build_stage"` + BuildStatus string `json:"build_status"` + BuildCreatedAt string `json:"build_created_at"` + BuildStartedAt string `json:"build_started_at"` + BuildFinishedAt string `json:"build_finished_at"` + BuildDuration float64 `json:"build_duration"` + BuildAllowFailure bool `json:"build_allow_failure"` + ProjectID int64 `json:"project_id"` + ProjectName string `json:"project_name"` + User *EventUser `json:"user"` + Commit BuildEventCommit `json:"commit"` + Repository *Repository `json:"repository"` +} + +type BuildEventCommit struct { + ID int64 `json:"id"` + SHA string `json:"sha"` + Message string `json:"message"` + AuthorName string `json:"author_name"` + AuthorEmail string `json:"author_email"` + Status string `json:"status"` + Duration int64 `json:"duration"` + StartedAt string `json:"started_at"` + FinishedAt string `json:"finished_at"` } // CommitCommentEvent represents a comment on a commit event. @@ -79,57 +81,65 @@ type BuildEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#comment-on-a-commit type CommitCommentEvent struct { - ObjectKind string `json:"object_kind"` - EventType string `json:"event_type"` - User *User `json:"user"` - ProjectID int `json:"project_id"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Repository *Repository `json:"repository"` - ObjectAttributes struct { - ID int `json:"id"` - Note string `json:"note"` - NoteableType string `json:"noteable_type"` - AuthorID int `json:"author_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - ProjectID int `json:"project_id"` - Attachment string `json:"attachment"` - LineCode string `json:"line_code"` - CommitID string `json:"commit_id"` - NoteableID int `json:"noteable_id"` - System bool `json:"system"` - StDiff *Diff `json:"st_diff"` - Description string `json:"description"` - Action CommentEventAction `json:"action"` - URL string `json:"url"` - } `json:"object_attributes"` - Commit *struct { - ID string `json:"id"` - Title string `json:"title"` - Message string `json:"message"` - Timestamp *time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - } `json:"commit"` + ObjectKind string `json:"object_kind"` + EventType string `json:"event_type"` + User *User `json:"user"` + ProjectID int64 `json:"project_id"` + Project CommitCommentEventProject `json:"project"` + Repository *Repository `json:"repository"` + ObjectAttributes CommitCommentEventObjectAttributes `json:"object_attributes"` + Commit *CommitCommentEventCommit `json:"commit"` +} + +type CommitCommentEventProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` +} + +type CommitCommentEventObjectAttributes struct { + ID int64 `json:"id"` + Note string `json:"note"` + NoteableType string `json:"noteable_type"` + AuthorID int64 `json:"author_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ProjectID int64 `json:"project_id"` + Attachment string `json:"attachment"` + LineCode string `json:"line_code"` + CommitID string `json:"commit_id"` + NoteableID int64 `json:"noteable_id"` + System bool `json:"system"` + StDiff *Diff `json:"st_diff"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` +} + +type CommitCommentEventCommit struct { + ID string `json:"id"` + Title string `json:"title"` + Message string `json:"message"` + Timestamp *time.Time `json:"timestamp"` + URL string `json:"url"` + Author EventCommitAuthor `json:"author"` +} + +type EventCommitAuthor struct { + Name string `json:"name"` + Email string `json:"email"` } // DeploymentEvent represents a deployment event. @@ -137,39 +147,41 @@ type CommitCommentEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#deployment-events type DeploymentEvent struct { - ObjectKind string `json:"object_kind"` - Status string `json:"status"` - StatusChangedAt string `json:"status_changed_at"` - DeploymentID int `json:"deployment_id"` - DeployableID int `json:"deployable_id"` - DeployableURL string `json:"deployable_url"` - Environment string `json:"environment"` - EnvironmentSlug string `json:"environment_slug"` - EnvironmentExternalURL string `json:"environment_external_url"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL *string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - VisibilityLevel int `json:"visibility_level"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - CIConfigPath string `json:"ci_config_path"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - } `json:"project"` - Ref string `json:"ref"` - ShortSHA string `json:"short_sha"` - User *EventUser `json:"user"` - UserURL string `json:"user_url"` - CommitURL string `json:"commit_url"` - CommitTitle string `json:"commit_title"` + ObjectKind string `json:"object_kind"` + Status string `json:"status"` + StatusChangedAt string `json:"status_changed_at"` + DeploymentID int64 `json:"deployment_id"` + DeployableID int64 `json:"deployable_id"` + DeployableURL string `json:"deployable_url"` + Environment string `json:"environment"` + EnvironmentSlug string `json:"environment_slug"` + EnvironmentExternalURL string `json:"environment_external_url"` + Project DeploymentEventProject `json:"project"` + Ref string `json:"ref"` + ShortSHA string `json:"short_sha"` + User *EventUser `json:"user"` + UserURL string `json:"user_url"` + CommitURL string `json:"commit_url"` + CommitTitle string `json:"commit_title"` +} + +type DeploymentEventProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL *string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + VisibilityLevel int64 `json:"visibility_level"` + CIConfigPath string `json:"ci_config_path"` } // FeatureFlagEvent represents a feature flag event. @@ -177,33 +189,37 @@ type DeploymentEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#feature-flag-events type FeatureFlagEvent struct { - ObjectKind string `json:"object_kind"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL *string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - VisibilityLevel int `json:"visibility_level"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - CIConfigPath string `json:"ci_config_path"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - } `json:"project"` - User *EventUser `json:"user"` - UserURL string `json:"user_url"` - ObjectAttributes struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - Active bool `json:"active"` - } `json:"object_attributes"` + ObjectKind string `json:"object_kind"` + Project FeatureFlagEventProject `json:"project"` + User *EventUser `json:"user"` + UserURL string `json:"user_url"` + ObjectAttributes FeatureFlagEventObjectAttributes `json:"object_attributes"` +} + +type FeatureFlagEventProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL *string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + VisibilityLevel int64 `json:"visibility_level"` + CIConfigPath string `json:"ci_config_path"` +} + +type FeatureFlagEventObjectAttributes struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Active bool `json:"active"` } // GroupResourceAccessTokenEvent represents a resource access token event for a @@ -212,20 +228,27 @@ type FeatureFlagEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#project-and-group-access-token-events type GroupResourceAccessTokenEvent struct { - EventName string `json:"event_name"` - ObjectKind string `json:"object_kind"` - Group struct { - GroupID int `json:"group_id"` - GroupName string `json:"group_name"` - GroupPath string `json:"group_path"` - } `json:"group"` - ObjectAttributes struct { - ID int `json:"id"` - UserID int `json:"user_id"` - Name string `json:"name"` - CreatedAt string `json:"created_at"` - ExpiresAt *ISOTime `json:"expires_at"` - } `json:"object_attributes"` + EventName string `json:"event_name"` + ObjectKind string `json:"object_kind"` + Group GroupResourceAccessTokenEventGroup `json:"group"` + ObjectAttributes GroupResourceAccessTokenEventObjectAttributes `json:"object_attributes"` +} + +// GroupResourceAccessTokenEventGroup represents a group in a resource access +// token event. +type GroupResourceAccessTokenEventGroup struct { + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + GroupPath string `json:"group_path"` + FullPath string `json:"full_path"` +} + +type GroupResourceAccessTokenEventObjectAttributes struct { + ID int64 `json:"id"` + UserID int64 `json:"user_id"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + ExpiresAt *ISOTime `json:"expires_at"` } // IssueCommentEvent represents a comment on an issue event. @@ -233,74 +256,80 @@ type GroupResourceAccessTokenEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#comment-on-an-issue type IssueCommentEvent struct { - ObjectKind string `json:"object_kind"` - EventType string `json:"event_type"` - User *User `json:"user"` - ProjectID int `json:"project_id"` - Project struct { - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Repository *Repository `json:"repository"` - ObjectAttributes struct { - ID int `json:"id"` - Note string `json:"note"` - NoteableType string `json:"noteable_type"` - AuthorID int `json:"author_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - ProjectID int `json:"project_id"` - Attachment string `json:"attachment"` - LineCode string `json:"line_code"` - CommitID string `json:"commit_id"` - DiscussionID string `json:"discussion_id"` - NoteableID int `json:"noteable_id"` - System bool `json:"system"` - StDiff []*Diff `json:"st_diff"` - Description string `json:"description"` - Action CommentEventAction `json:"action"` - URL string `json:"url"` - } `json:"object_attributes"` - Issue struct { - ID int `json:"id"` - IID int `json:"iid"` - ProjectID int `json:"project_id"` - MilestoneID int `json:"milestone_id"` - AuthorID int `json:"author_id"` - Position int `json:"position"` - BranchName string `json:"branch_name"` - Description string `json:"description"` - State string `json:"state"` - Title string `json:"title"` - Labels []*EventLabel `json:"labels"` - LastEditedAt string `json:"last_edit_at"` - LastEditedByID int `json:"last_edited_by_id"` - UpdatedAt string `json:"updated_at"` - UpdatedByID int `json:"updated_by_id"` - CreatedAt string `json:"created_at"` - ClosedAt string `json:"closed_at"` - DueDate *ISOTime `json:"due_date"` - URL string `json:"url"` - TimeEstimate int `json:"time_estimate"` - Confidential bool `json:"confidential"` - TotalTimeSpent int `json:"total_time_spent"` - HumanTotalTimeSpent string `json:"human_total_time_spent"` - HumanTimeEstimate string `json:"human_time_estimate"` - AssigneeIDs []int `json:"assignee_ids"` - AssigneeID int `json:"assignee_id"` - } `json:"issue"` + ObjectKind string `json:"object_kind"` + EventType string `json:"event_type"` + User *User `json:"user"` + ProjectID int64 `json:"project_id"` + Project IssueCommentEventProject `json:"project"` + Repository *Repository `json:"repository"` + ObjectAttributes IssueCommentEventObjectAttributes `json:"object_attributes"` + Issue IssueCommentEventIssue `json:"issue"` +} + +type IssueCommentEventProject struct { + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` +} + +type IssueCommentEventObjectAttributes struct { + ID int64 `json:"id"` + Note string `json:"note"` + NoteableType string `json:"noteable_type"` + AuthorID int64 `json:"author_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ProjectID int64 `json:"project_id"` + Attachment string `json:"attachment"` + LineCode string `json:"line_code"` + CommitID string `json:"commit_id"` + DiscussionID string `json:"discussion_id"` + NoteableID int64 `json:"noteable_id"` + System bool `json:"system"` + StDiff []*Diff `json:"st_diff"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` +} + +type IssueCommentEventIssue struct { + ID int64 `json:"id"` + IID int64 `json:"iid"` + ProjectID int64 `json:"project_id"` + MilestoneID int64 `json:"milestone_id"` + AuthorID int64 `json:"author_id"` + Position int64 `json:"position"` + BranchName string `json:"branch_name"` + Description string `json:"description"` + State string `json:"state"` + Title string `json:"title"` + Labels []*EventLabel `json:"labels"` + LastEditedAt string `json:"last_edit_at"` + LastEditedByID int64 `json:"last_edited_by_id"` + UpdatedAt string `json:"updated_at"` + UpdatedByID int64 `json:"updated_by_id"` + CreatedAt string `json:"created_at"` + ClosedAt string `json:"closed_at"` + DueDate *ISOTime `json:"due_date"` + URL string `json:"url"` + TimeEstimate int64 `json:"time_estimate"` + Confidential bool `json:"confidential"` + TotalTimeSpent int64 `json:"total_time_spent"` + HumanTotalTimeSpent string `json:"human_total_time_spent"` + HumanTimeEstimate string `json:"human_time_estimate"` + AssigneeIDs []int64 `json:"assignee_ids"` + AssigneeID int64 `json:"assignee_id"` } // IssueEvent represents a issue event. @@ -308,109 +337,135 @@ type IssueCommentEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#work-item-events type IssueEvent struct { - ObjectKind string `json:"object_kind"` - EventType string `json:"event_type"` - User *EventUser `json:"user"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Repository *Repository `json:"repository"` - ObjectAttributes struct { - ID int `json:"id"` - Title string `json:"title"` - AssigneeIDs []int `json:"assignee_ids"` - AssigneeID int `json:"assignee_id"` - AuthorID int `json:"author_id"` - ProjectID int `json:"project_id"` - CreatedAt string `json:"created_at"` // Should be *time.Time (see Gitlab issue #21468) - UpdatedAt string `json:"updated_at"` // Should be *time.Time (see Gitlab issue #21468) - UpdatedByID int `json:"updated_by_id"` - LastEditedAt string `json:"last_edited_at"` - LastEditedByID int `json:"last_edited_by_id"` - RelativePosition int `json:"relative_position"` - BranchName string `json:"branch_name"` - Description string `json:"description"` - MilestoneID int `json:"milestone_id"` - StateID StateID `json:"state_id"` - Confidential bool `json:"confidential"` - DiscussionLocked bool `json:"discussion_locked"` - DueDate *ISOTime `json:"due_date"` - MovedToID int `json:"moved_to_id"` - DuplicatedToID int `json:"duplicated_to_id"` - TimeEstimate int `json:"time_estimate"` - TotalTimeSpent int `json:"total_time_spent"` - TimeChange int `json:"time_change"` - HumanTotalTimeSpent string `json:"human_total_time_spent"` - HumanTimeEstimate string `json:"human_time_estimate"` - HumanTimeChange string `json:"human_time_change"` - Weight int `json:"weight"` - IID int `json:"iid"` - URL string `json:"url"` - State string `json:"state"` - Action string `json:"action"` - Severity string `json:"severity"` - EscalationStatus string `json:"escalation_status"` - EscalationPolicy struct { - ID int `json:"id"` - Name string `json:"name"` - } `json:"escalation_policy"` - Labels []*EventLabel `json:"labels"` - } `json:"object_attributes"` - Assignee *EventUser `json:"assignee"` - Assignees *[]EventUser `json:"assignees"` - Labels []*EventLabel `json:"labels"` - Changes struct { - Assignees struct { - Previous []*EventUser `json:"previous"` - Current []*EventUser `json:"current"` - } `json:"assignees"` - Description struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"description"` - Labels struct { - Previous []*EventLabel `json:"previous"` - Current []*EventLabel `json:"current"` - } `json:"labels"` - Title struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"title"` - ClosedAt struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"closed_at"` - StateID struct { - Previous StateID `json:"previous"` - Current StateID `json:"current"` - } `json:"state_id"` - UpdatedAt struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"updated_at"` - UpdatedByID struct { - Previous int `json:"previous"` - Current int `json:"current"` - } `json:"updated_by_id"` - TotalTimeSpent struct { - Previous int `json:"previous"` - Current int `json:"current"` - } `json:"total_time_spent"` - } `json:"changes"` + ObjectKind string `json:"object_kind"` + EventType string `json:"event_type"` + User *EventUser `json:"user"` + Project IssueEventProject `json:"project"` + Repository *Repository `json:"repository"` + ObjectAttributes IssueEventObjectAttributes `json:"object_attributes"` + Assignee *EventUser `json:"assignee"` + Assignees *[]EventUser `json:"assignees"` + Labels []*EventLabel `json:"labels"` + Changes IssueEventChanges `json:"changes"` +} + +type IssueEventProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` +} + +type IssueEventObjectAttributes struct { + ID int64 `json:"id"` + Title string `json:"title"` + AssigneeIDs []int64 `json:"assignee_ids"` + AssigneeID int64 `json:"assignee_id"` + AuthorID int64 `json:"author_id"` + ProjectID int64 `json:"project_id"` + CreatedAt string `json:"created_at"` // Should be *time.Time (see Gitlab issue #21468) + UpdatedAt string `json:"updated_at"` // Should be *time.Time (see Gitlab issue #21468) + UpdatedByID int64 `json:"updated_by_id"` + LastEditedAt string `json:"last_edited_at"` + LastEditedByID int64 `json:"last_edited_by_id"` + RelativePosition int64 `json:"relative_position"` + BranchName string `json:"branch_name"` + Description string `json:"description"` + MilestoneID int64 `json:"milestone_id"` + StateID StateID `json:"state_id"` + Confidential bool `json:"confidential"` + DiscussionLocked bool `json:"discussion_locked"` + DueDate *ISOTime `json:"due_date"` + MovedToID int64 `json:"moved_to_id"` + DuplicatedToID int64 `json:"duplicated_to_id"` + TimeEstimate int64 `json:"time_estimate"` + TotalTimeSpent int64 `json:"total_time_spent"` + TimeChange int64 `json:"time_change"` + HumanTotalTimeSpent string `json:"human_total_time_spent"` + HumanTimeEstimate string `json:"human_time_estimate"` + HumanTimeChange string `json:"human_time_change"` + Weight int64 `json:"weight"` + IID int64 `json:"iid"` + URL string `json:"url"` + State string `json:"state"` + Action string `json:"action"` + Severity string `json:"severity"` + EscalationStatus string `json:"escalation_status"` + EscalationPolicy IssueEventObjectAttributesEscalationPolicy `json:"escalation_policy"` + Labels []*EventLabel `json:"labels"` +} + +type IssueEventObjectAttributesEscalationPolicy struct { + ID int64 `json:"id"` + Name string `json:"name"` +} + +type IssueEventChanges struct { + Assignees EventChangesAssignees `json:"assignees"` + Description EventChangesDescription `json:"description"` + Labels EventChangesLabels `json:"labels"` + Title EventChangesTitle `json:"title"` + ClosedAt IssueEventChangesClosedAt `json:"closed_at"` + StateID EventChangesStateID `json:"state_id"` + UpdatedAt EventChangesUpdatedAt `json:"updated_at"` + UpdatedByID EventChangesUpdatedByID `json:"updated_by_id"` + TotalTimeSpent IssueEventChangesTotalTimeSpent `json:"total_time_spent"` +} + +type EventChangesAssignees struct { + Previous []*EventUser `json:"previous"` + Current []*EventUser `json:"current"` +} + +type EventChangesDescription struct { + Previous string `json:"previous"` + Current string `json:"current"` +} + +type EventChangesLabels struct { + Previous []*EventLabel `json:"previous"` + Current []*EventLabel `json:"current"` +} + +type EventChangesTitle struct { + Previous string `json:"previous"` + Current string `json:"current"` +} + +type IssueEventChangesClosedAt struct { + Previous string `json:"previous"` + Current string `json:"current"` +} + +type EventChangesStateID struct { + Previous StateID `json:"previous"` + Current StateID `json:"current"` +} + +type EventChangesUpdatedAt struct { + Previous string `json:"previous"` + Current string `json:"current"` +} + +type EventChangesUpdatedByID struct { + Previous int64 `json:"previous"` + Current int64 `json:"current"` +} + +type IssueEventChangesTotalTimeSpent struct { + Previous int64 `json:"previous"` + Current int64 `json:"current"` } // JobEvent represents a job event. @@ -418,63 +473,73 @@ type IssueEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#job-events type JobEvent struct { - ObjectKind string `json:"object_kind"` - Ref string `json:"ref"` - Tag bool `json:"tag"` - BeforeSHA string `json:"before_sha"` - SHA string `json:"sha"` - BuildID int `json:"build_id"` - BuildName string `json:"build_name"` - BuildStage string `json:"build_stage"` - BuildStatus string `json:"build_status"` - BuildCreatedAt string `json:"build_created_at"` - BuildStartedAt string `json:"build_started_at"` - BuildFinishedAt string `json:"build_finished_at"` - BuildDuration float64 `json:"build_duration"` - BuildQueuedDuration float64 `json:"build_queued_duration"` - BuildAllowFailure bool `json:"build_allow_failure"` - BuildFailureReason string `json:"build_failure_reason"` - RetriesCount int `json:"retries_count"` - PipelineID int `json:"pipeline_id"` - ProjectID int `json:"project_id"` - ProjectName string `json:"project_name"` - User *EventUser `json:"user"` - Commit struct { - ID int `json:"id"` - Name string `json:"name"` - SHA string `json:"sha"` - Message string `json:"message"` - AuthorName string `json:"author_name"` - AuthorEmail string `json:"author_email"` - AuthorURL string `json:"author_url"` - Status string `json:"status"` - Duration int `json:"duration"` - StartedAt string `json:"started_at"` - FinishedAt string `json:"finished_at"` - } `json:"commit"` - Repository *Repository `json:"repository"` - Runner struct { - ID int `json:"id"` - Active bool `json:"active"` - RunnerType string `json:"runner_type"` - IsShared bool `json:"is_shared"` - Description string `json:"description"` - Tags []string `json:"tags"` - } `json:"runner"` - Environment struct { - Name string `json:"name"` - Action string `json:"action"` - DeploymentTier string `json:"deployment_tier"` - } `json:"environment"` - SourcePipeline struct { - Project struct { - ID int `json:"id"` - WebURL string `json:"web_url"` - PathWithNamespace string `json:"path_with_namespace"` - } `json:"project"` - PipelineID int `json:"pipeline_id"` - JobID int `json:"job_id"` - } `json:"source_pipeline"` + ObjectKind string `json:"object_kind"` + Ref string `json:"ref"` + Tag bool `json:"tag"` + BeforeSHA string `json:"before_sha"` + SHA string `json:"sha"` + BuildID int64 `json:"build_id"` + BuildName string `json:"build_name"` + BuildStage string `json:"build_stage"` + BuildStatus string `json:"build_status"` + BuildCreatedAt string `json:"build_created_at"` + BuildStartedAt string `json:"build_started_at"` + BuildFinishedAt string `json:"build_finished_at"` + BuildDuration float64 `json:"build_duration"` + BuildQueuedDuration float64 `json:"build_queued_duration"` + BuildAllowFailure bool `json:"build_allow_failure"` + BuildFailureReason string `json:"build_failure_reason"` + RetriesCount int64 `json:"retries_count"` + PipelineID int64 `json:"pipeline_id"` + ProjectID int64 `json:"project_id"` + ProjectName string `json:"project_name"` + User *EventUser `json:"user"` + Commit JobEventCommit `json:"commit"` + Repository *Repository `json:"repository"` + Runner JobEventRunner `json:"runner"` + Environment EventEnvironment `json:"environment"` + SourcePipeline EventSourcePipeline `json:"source_pipeline"` +} + +type JobEventCommit struct { + ID int64 `json:"id"` + Name string `json:"name"` + SHA string `json:"sha"` + Message string `json:"message"` + AuthorName string `json:"author_name"` + AuthorEmail string `json:"author_email"` + AuthorURL string `json:"author_url"` + Status string `json:"status"` + Duration int64 `json:"duration"` + StartedAt string `json:"started_at"` + FinishedAt string `json:"finished_at"` +} + +type JobEventRunner struct { + ID int64 `json:"id"` + Active bool `json:"active"` + RunnerType string `json:"runner_type"` + IsShared bool `json:"is_shared"` + Description string `json:"description"` + Tags []string `json:"tags"` +} + +type EventEnvironment struct { + Name string `json:"name"` + Action string `json:"action"` + DeploymentTier string `json:"deployment_tier"` +} + +type EventSourcePipeline struct { + Project EventSourcePipelineProject `json:"project"` + PipelineID int64 `json:"pipeline_id"` + JobID int64 `json:"job_id"` +} + +type EventSourcePipelineProject struct { + ID int64 `json:"id"` + WebURL string `json:"web_url"` + PathWithNamespace string `json:"path_with_namespace"` } // MemberEvent represents a member event. @@ -486,11 +551,11 @@ type MemberEvent struct { UpdatedAt *time.Time `json:"updated_at"` GroupName string `json:"group_name"` GroupPath string `json:"group_path"` - GroupID int `json:"group_id"` + GroupID int64 `json:"group_id"` UserUsername string `json:"user_username"` UserName string `json:"user_name"` UserEmail string `json:"user_email"` - UserID int `json:"user_id"` + UserID int64 `json:"user_id"` GroupAccess string `json:"group_access"` GroupPlan string `json:"group_plan"` ExpiresAt *time.Time `json:"expires_at"` @@ -502,111 +567,116 @@ type MemberEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#comment-on-a-merge-request type MergeCommentEvent struct { - ObjectKind string `json:"object_kind"` - EventType string `json:"event_type"` - User *EventUser `json:"user"` - ProjectID int `json:"project_id"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - ObjectAttributes struct { - Attachment string `json:"attachment"` - AuthorID int `json:"author_id"` - ChangePosition *NotePosition `json:"change_position"` - CommitID string `json:"commit_id"` - CreatedAt string `json:"created_at"` - DiscussionID string `json:"discussion_id"` - ID int `json:"id"` - LineCode string `json:"line_code"` - Note string `json:"note"` - NoteableID int `json:"noteable_id"` - NoteableType string `json:"noteable_type"` - OriginalPosition *NotePosition `json:"original_position"` - Position *NotePosition `json:"position"` - ProjectID int `json:"project_id"` - ResolvedAt string `json:"resolved_at"` - ResolvedByID int `json:"resolved_by_id"` - ResolvedByPush bool `json:"resolved_by_push"` - StDiff *Diff `json:"st_diff"` - System bool `json:"system"` - Type string `json:"type"` - UpdatedAt string `json:"updated_at"` - UpdatedByID int `json:"updated_by_id"` - Description string `json:"description"` - Action CommentEventAction `json:"action"` - URL string `json:"url"` - } `json:"object_attributes"` - Repository *Repository `json:"repository"` - MergeRequest struct { - ID int `json:"id"` - TargetBranch string `json:"target_branch"` - SourceBranch string `json:"source_branch"` - SourceProjectID int `json:"source_project_id"` - AuthorID int `json:"author_id"` - AssigneeID int `json:"assignee_id"` - AssigneeIDs []int `json:"assignee_ids"` - ReviewerIDs []int `json:"reviewer_ids"` - Title string `json:"title"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - MilestoneID int `json:"milestone_id"` - State string `json:"state"` - MergeStatus string `json:"merge_status"` - TargetProjectID int `json:"target_project_id"` - IID int `json:"iid"` - Description string `json:"description"` - Position int `json:"position"` - Labels []*EventLabel `json:"labels"` - LockedAt string `json:"locked_at"` - UpdatedByID int `json:"updated_by_id"` - MergeError string `json:"merge_error"` - MergeParams *MergeParams `json:"merge_params"` - MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"` - MergeUserID int `json:"merge_user_id"` - MergeCommitSHA string `json:"merge_commit_sha"` - DeletedAt string `json:"deleted_at"` - InProgressMergeCommitSHA string `json:"in_progress_merge_commit_sha"` - LockVersion int `json:"lock_version"` - ApprovalsBeforeMerge string `json:"approvals_before_merge"` - RebaseCommitSHA string `json:"rebase_commit_sha"` - TimeEstimate int `json:"time_estimate"` - Squash bool `json:"squash"` - LastEditedAt string `json:"last_edited_at"` - LastEditedByID int `json:"last_edited_by_id"` - Source *Repository `json:"source"` - Target *Repository `json:"target"` - LastCommit struct { - ID string `json:"id"` - Title string `json:"title"` - Message string `json:"message"` - Timestamp *time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - } `json:"last_commit"` - WorkInProgress bool `json:"work_in_progress"` - TotalTimeSpent int `json:"total_time_spent"` - HeadPipelineID int `json:"head_pipeline_id"` - Assignee *EventUser `json:"assignee"` - DetailedMergeStatus string `json:"detailed_merge_status"` - URL string `json:"url"` - } `json:"merge_request"` + ObjectKind string `json:"object_kind"` + EventType string `json:"event_type"` + User *EventUser `json:"user"` + ProjectID int64 `json:"project_id"` + Project MergeCommentEventProject `json:"project"` + ObjectAttributes MergeCommentEventObjectAttributes `json:"object_attributes"` + Repository *Repository `json:"repository"` + MergeRequest MergeCommentEventMergeRequest `json:"merge_request"` +} + +type MergeCommentEventProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` +} + +type MergeCommentEventObjectAttributes struct { + Attachment string `json:"attachment"` + AuthorID int64 `json:"author_id"` + ChangePosition *NotePosition `json:"change_position"` + CommitID string `json:"commit_id"` + CreatedAt string `json:"created_at"` + DiscussionID string `json:"discussion_id"` + ID int64 `json:"id"` + LineCode string `json:"line_code"` + Note string `json:"note"` + NoteableID int64 `json:"noteable_id"` + NoteableType string `json:"noteable_type"` + OriginalPosition *NotePosition `json:"original_position"` + Position *NotePosition `json:"position"` + ProjectID int64 `json:"project_id"` + ResolvedAt string `json:"resolved_at"` + ResolvedByID int64 `json:"resolved_by_id"` + ResolvedByPush bool `json:"resolved_by_push"` + StDiff *Diff `json:"st_diff"` + System bool `json:"system"` + Type string `json:"type"` + UpdatedAt string `json:"updated_at"` + UpdatedByID int64 `json:"updated_by_id"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` +} + +type MergeCommentEventMergeRequest struct { + ID int64 `json:"id"` + TargetBranch string `json:"target_branch"` + SourceBranch string `json:"source_branch"` + SourceProjectID int64 `json:"source_project_id"` + AuthorID int64 `json:"author_id"` + AssigneeID int64 `json:"assignee_id"` + AssigneeIDs []int64 `json:"assignee_ids"` + ReviewerIDs []int64 `json:"reviewer_ids"` + Title string `json:"title"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + MilestoneID int64 `json:"milestone_id"` + State string `json:"state"` + MergeStatus string `json:"merge_status"` + TargetProjectID int64 `json:"target_project_id"` + IID int64 `json:"iid"` + Description string `json:"description"` + Position int64 `json:"position"` + Labels []*EventLabel `json:"labels"` + LockedAt string `json:"locked_at"` + UpdatedByID int64 `json:"updated_by_id"` + MergeError string `json:"merge_error"` + MergeParams *MergeParams `json:"merge_params"` + MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"` + MergeUserID int64 `json:"merge_user_id"` + MergeCommitSHA string `json:"merge_commit_sha"` + DeletedAt string `json:"deleted_at"` + InProgressMergeCommitSHA string `json:"in_progress_merge_commit_sha"` + LockVersion int64 `json:"lock_version"` + ApprovalsBeforeMerge string `json:"approvals_before_merge"` + RebaseCommitSHA string `json:"rebase_commit_sha"` + TimeEstimate int64 `json:"time_estimate"` + Squash bool `json:"squash"` + LastEditedAt string `json:"last_edited_at"` + LastEditedByID int64 `json:"last_edited_by_id"` + Source *Repository `json:"source"` + Target *Repository `json:"target"` + LastCommit EventMergeRequestLastCommit `json:"last_commit"` + WorkInProgress bool `json:"work_in_progress"` + TotalTimeSpent int64 `json:"total_time_spent"` + HeadPipelineID int64 `json:"head_pipeline_id"` + Assignee *EventUser `json:"assignee"` + DetailedMergeStatus string `json:"detailed_merge_status"` + URL string `json:"url"` +} + +type EventMergeRequestLastCommit struct { + ID string `json:"id"` + Title string `json:"title"` + Message string `json:"message"` + Timestamp *time.Time `json:"timestamp"` + URL string `json:"url"` + Author EventCommitAuthor `json:"author"` } // MergeEvent represents a merge event. @@ -614,173 +684,170 @@ type MergeCommentEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#merge-request-events type MergeEvent struct { - ObjectKind string `json:"object_kind"` - EventType string `json:"event_type"` - User *EventUser `json:"user"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - CIConfigPath string `json:"ci_config_path"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - ObjectAttributes struct { - ID int `json:"id"` - TargetBranch string `json:"target_branch"` - SourceBranch string `json:"source_branch"` - SourceProjectID int `json:"source_project_id"` - AuthorID int `json:"author_id"` - AssigneeID int `json:"assignee_id"` - AssigneeIDs []int `json:"assignee_ids"` - ReviewerIDs []int `json:"reviewer_ids"` - Title string `json:"title"` - CreatedAt string `json:"created_at"` // Should be *time.Time (see Gitlab issue #21468) - UpdatedAt string `json:"updated_at"` // Should be *time.Time (see Gitlab issue #21468) - StCommits []*Commit `json:"st_commits"` - StDiffs []*Diff `json:"st_diffs"` - LastEditedAt string `json:"last_edited_at"` - LastEditedByID int `json:"last_edited_by_id"` - MilestoneID int `json:"milestone_id"` - StateID StateID `json:"state_id"` - State string `json:"state"` - MergeStatus string `json:"merge_status"` - TargetProjectID int `json:"target_project_id"` - IID int `json:"iid"` - Description string `json:"description"` - Position int `json:"position"` - LockedAt string `json:"locked_at"` - UpdatedByID int `json:"updated_by_id"` - MergeError string `json:"merge_error"` - MergeParams *MergeParams `json:"merge_params"` - MergeWhenBuildSucceeds bool `json:"merge_when_build_succeeds"` - MergeUserID int `json:"merge_user_id"` - MergeCommitSHA string `json:"merge_commit_sha"` - DeletedAt string `json:"deleted_at"` - ApprovalsBeforeMerge string `json:"approvals_before_merge"` - RebaseCommitSHA string `json:"rebase_commit_sha"` - InProgressMergeCommitSHA string `json:"in_progress_merge_commit_sha"` - LockVersion int `json:"lock_version"` - TimeEstimate int `json:"time_estimate"` - Source *Repository `json:"source"` - Target *Repository `json:"target"` - HeadPipelineID *int `json:"head_pipeline_id"` - LastCommit struct { - ID string `json:"id"` - Message string `json:"message"` - Title string `json:"title"` - Timestamp *time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - } `json:"last_commit"` - BlockingDiscussionsResolved bool `json:"blocking_discussions_resolved"` - WorkInProgress bool `json:"work_in_progress"` - Draft bool `json:"draft"` - TotalTimeSpent int `json:"total_time_spent"` - TimeChange int `json:"time_change"` - HumanTotalTimeSpent string `json:"human_total_time_spent"` - HumanTimeChange string `json:"human_time_change"` - HumanTimeEstimate string `json:"human_time_estimate"` - FirstContribution bool `json:"first_contribution"` - URL string `json:"url"` - Labels []*EventLabel `json:"labels"` - Action string `json:"action"` - DetailedMergeStatus string `json:"detailed_merge_status"` - OldRev string `json:"oldrev"` - } `json:"object_attributes"` - Repository *Repository `json:"repository"` - Labels []*EventLabel `json:"labels"` - Changes struct { - Assignees struct { - Previous []*EventUser `json:"previous"` - Current []*EventUser `json:"current"` - } `json:"assignees"` - Reviewers struct { - Previous []*EventUser `json:"previous"` - Current []*EventUser `json:"current"` - } `json:"reviewers"` - Description struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"description"` - Draft struct { - Previous bool `json:"previous"` - Current bool `json:"current"` - } `json:"draft"` - Labels struct { - Previous []*EventLabel `json:"previous"` - Current []*EventLabel `json:"current"` - } `json:"labels"` - LastEditedAt struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"last_edited_at"` - LastEditedByID struct { - Previous int `json:"previous"` - Current int `json:"current"` - } `json:"last_edited_by_id"` - MergeStatus struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"merge_status"` - MilestoneID struct { - Previous int `json:"previous"` - Current int `json:"current"` - } `json:"milestone_id"` - SourceBranch struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"source_branch"` - SourceProjectID struct { - Previous int `json:"previous"` - Current int `json:"current"` - } `json:"source_project_id"` - StateID struct { - Previous StateID `json:"previous"` - Current StateID `json:"current"` - } `json:"state_id"` - TargetBranch struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"target_branch"` - TargetProjectID struct { - Previous int `json:"previous"` - Current int `json:"current"` - } `json:"target_project_id"` - Title struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"title"` - UpdatedAt struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"updated_at"` - UpdatedByID struct { - Previous int `json:"previous"` - Current int `json:"current"` - } `json:"updated_by_id"` - } `json:"changes"` - Assignees []*EventUser `json:"assignees"` - Reviewers []*EventUser `json:"reviewers"` -} - -// EventUser represents a user record in an event and is used as an even + ObjectKind string `json:"object_kind"` + EventType string `json:"event_type"` + User *EventUser `json:"user"` + Project MergeEventProject `json:"project"` + ObjectAttributes MergeEventObjectAttributes `json:"object_attributes"` + Repository *Repository `json:"repository"` + Labels []*EventLabel `json:"labels"` + Changes MergeEventChanges `json:"changes"` + Assignees []*EventUser `json:"assignees"` + Reviewers []*EventUser `json:"reviewers"` +} + +type MergeEventProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + CIConfigPath string `json:"ci_config_path"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` +} + +type MergeEventObjectAttributes struct { + ID int64 `json:"id"` + TargetBranch string `json:"target_branch"` + SourceBranch string `json:"source_branch"` + SourceProjectID int64 `json:"source_project_id"` + AuthorID int64 `json:"author_id"` + AssigneeID int64 `json:"assignee_id"` + AssigneeIDs []int64 `json:"assignee_ids"` + ReviewerIDs []int64 `json:"reviewer_ids"` + Title string `json:"title"` + CreatedAt string `json:"created_at"` // Should be *time.Time (see Gitlab issue #21468) + UpdatedAt string `json:"updated_at"` // Should be *time.Time (see Gitlab issue #21468) + StCommits []*Commit `json:"st_commits"` + StDiffs []*Diff `json:"st_diffs"` + LastEditedAt string `json:"last_edited_at"` + LastEditedByID int64 `json:"last_edited_by_id"` + MilestoneID int64 `json:"milestone_id"` + StateID StateID `json:"state_id"` + State string `json:"state"` + MergeStatus string `json:"merge_status"` + TargetProjectID int64 `json:"target_project_id"` + IID int64 `json:"iid"` + Description string `json:"description"` + Position int64 `json:"position"` + LockedAt string `json:"locked_at"` + UpdatedByID int64 `json:"updated_by_id"` + MergeError string `json:"merge_error"` + MergeParams *MergeParams `json:"merge_params"` + MergeWhenBuildSucceeds bool `json:"merge_when_build_succeeds"` + MergeUserID int64 `json:"merge_user_id"` + MergeCommitSHA string `json:"merge_commit_sha"` + DeletedAt string `json:"deleted_at"` + ApprovalsBeforeMerge string `json:"approvals_before_merge"` + RebaseCommitSHA string `json:"rebase_commit_sha"` + InProgressMergeCommitSHA string `json:"in_progress_merge_commit_sha"` + LockVersion int64 `json:"lock_version"` + TimeEstimate int64 `json:"time_estimate"` + Source *Repository `json:"source"` + Target *Repository `json:"target"` + HeadPipelineID *int64 `json:"head_pipeline_id"` + LastCommit EventMergeRequestLastCommit `json:"last_commit"` + BlockingDiscussionsResolved bool `json:"blocking_discussions_resolved"` + WorkInProgress bool `json:"work_in_progress"` + Draft bool `json:"draft"` + TotalTimeSpent int64 `json:"total_time_spent"` + TimeChange int64 `json:"time_change"` + HumanTotalTimeSpent string `json:"human_total_time_spent"` + HumanTimeChange string `json:"human_time_change"` + HumanTimeEstimate string `json:"human_time_estimate"` + FirstContribution bool `json:"first_contribution"` + URL string `json:"url"` + Labels []*EventLabel `json:"labels"` + Action string `json:"action"` + DetailedMergeStatus string `json:"detailed_merge_status"` + OldRev string `json:"oldrev"` + System bool `json:"system"` + SystemAction string `json:"system_action"` +} + +type MergeEventChanges struct { + Assignees EventChangesAssignees `json:"assignees"` + Reviewers MergeEventChangesReviewers `json:"reviewers"` + Description EventChangesDescription `json:"description"` + Draft MergeEventChangesDraft `json:"draft"` + Labels EventChangesLabels `json:"labels"` + LastEditedAt MergeEventChangesLastEditedAt `json:"last_edited_at"` + LastEditedByID MergeEventChangesLastEditedByID `json:"last_edited_by_id"` + MergeStatus MergeEventChangesMergeStatus `json:"merge_status"` + MilestoneID MergeEventChangesMilestoneID `json:"milestone_id"` + SourceBranch MergeEventChangesSourceBranch `json:"source_branch"` + SourceProjectID MergeEventChangesSourceProjectID `json:"source_project_id"` + StateID EventChangesStateID `json:"state_id"` + TargetBranch MergeEventChangesTargetBranch `json:"target_branch"` + TargetProjectID MergeEventChangesTargetProjectID `json:"target_project_id"` + Title EventChangesTitle `json:"title"` + UpdatedAt EventChangesUpdatedAt `json:"updated_at"` + UpdatedByID EventChangesUpdatedByID `json:"updated_by_id"` +} + +type MergeEventChangesReviewers struct { + Previous []*EventUser `json:"previous"` + Current []*EventUser `json:"current"` +} + +type MergeEventChangesDraft struct { + Previous bool `json:"previous"` + Current bool `json:"current"` +} + +type MergeEventChangesLastEditedAt struct { + Previous string `json:"previous"` + Current string `json:"current"` +} + +type MergeEventChangesLastEditedByID struct { + Previous int64 `json:"previous"` + Current int64 `json:"current"` +} + +type MergeEventChangesMergeStatus struct { + Previous string `json:"previous"` + Current string `json:"current"` +} + +type MergeEventChangesMilestoneID struct { + Previous int64 `json:"previous"` + Current int64 `json:"current"` +} + +type MergeEventChangesSourceBranch struct { + Previous string `json:"previous"` + Current string `json:"current"` +} + +type MergeEventChangesSourceProjectID struct { + Previous int64 `json:"previous"` + Current int64 `json:"current"` +} + +type MergeEventChangesTargetBranch struct { + Previous string `json:"previous"` + Current string `json:"current"` +} + +type MergeEventChangesTargetProjectID struct { + Previous int64 `json:"previous"` + Current int64 `json:"current"` +} + +// EventUser represents a user record in an event and is used as an event // initiator or a merge assignee. type EventUser struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Username string `json:"username"` AvatarURL string `json:"avatar_url"` @@ -832,113 +899,114 @@ func (p *MergeParams) UnmarshalJSON(b []byte) error { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#pipeline-events type PipelineEvent struct { - ObjectKind string `json:"object_kind"` - ObjectAttributes struct { - ID int `json:"id"` - IID int `json:"iid"` - Name string `json:"name"` - Ref string `json:"ref"` - Tag bool `json:"tag"` - SHA string `json:"sha"` - BeforeSHA string `json:"before_sha"` - Source string `json:"source"` - Status string `json:"status"` - DetailedStatus string `json:"detailed_status"` - Stages []string `json:"stages"` - CreatedAt string `json:"created_at"` - FinishedAt string `json:"finished_at"` - Duration int `json:"duration"` - QueuedDuration int `json:"queued_duration"` - URL string `json:"url"` - Variables []struct { - Key string `json:"key"` - Value string `json:"value"` - } `json:"variables"` - } `json:"object_attributes"` - MergeRequest struct { - ID int `json:"id"` - IID int `json:"iid"` - Title string `json:"title"` - SourceBranch string `json:"source_branch"` - SourceProjectID int `json:"source_project_id"` - TargetBranch string `json:"target_branch"` - TargetProjectID int `json:"target_project_id"` - State string `json:"state"` - MergeRequestStatus string `json:"merge_status"` - DetailedMergeStatus string `json:"detailed_merge_status"` - URL string `json:"url"` - } `json:"merge_request"` - User *EventUser `json:"user"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Commit struct { - ID string `json:"id"` - Message string `json:"message"` - Title string `json:"title"` - Timestamp *time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - } `json:"commit"` - SourcePipeline struct { - Project struct { - ID int `json:"id"` - WebURL string `json:"web_url"` - PathWithNamespace string `json:"path_with_namespace"` - } `json:"project"` - PipelineID int `json:"pipeline_id"` - JobID int `json:"job_id"` - } `json:"source_pipeline"` - Builds []struct { - ID int `json:"id"` - Stage string `json:"stage"` - Name string `json:"name"` - Status string `json:"status"` - CreatedAt string `json:"created_at"` - StartedAt string `json:"started_at"` - FinishedAt string `json:"finished_at"` - Duration float64 `json:"duration"` - QueuedDuration float64 `json:"queued_duration"` - FailureReason string `json:"failure_reason"` - When string `json:"when"` - Manual bool `json:"manual"` - AllowFailure bool `json:"allow_failure"` - User *EventUser `json:"user"` - Runner struct { - ID int `json:"id"` - Description string `json:"description"` - Active bool `json:"active"` - IsShared bool `json:"is_shared"` - RunnerType string `json:"runner_type"` - Tags []string `json:"tags"` - } `json:"runner"` - ArtifactsFile struct { - Filename string `json:"filename"` - Size int `json:"size"` - } `json:"artifacts_file"` - Environment struct { - Name string `json:"name"` - Action string `json:"action"` - DeploymentTier string `json:"deployment_tier"` - } `json:"environment"` - } `json:"builds"` + ObjectKind string `json:"object_kind"` + ObjectAttributes PipelineEventObjectAttributes `json:"object_attributes"` + MergeRequest PipelineEventMergeRequest `json:"merge_request"` + User *EventUser `json:"user"` + Project PipelineEventProject `json:"project"` + Commit PipelineEventCommit `json:"commit"` + SourcePipeline EventSourcePipeline `json:"source_pipeline"` + Builds []PipelineEventBuild `json:"builds"` +} + +type PipelineEventObjectAttributes struct { + ID int64 `json:"id"` + IID int64 `json:"iid"` + Name string `json:"name"` + Ref string `json:"ref"` + Tag bool `json:"tag"` + SHA string `json:"sha"` + BeforeSHA string `json:"before_sha"` + Source string `json:"source"` + Status string `json:"status"` + DetailedStatus string `json:"detailed_status"` + Stages []string `json:"stages"` + CreatedAt string `json:"created_at"` + FinishedAt string `json:"finished_at"` + Duration int64 `json:"duration"` + QueuedDuration int64 `json:"queued_duration"` + URL string `json:"url"` + Variables []PipelineEventObjectAttributesVariable `json:"variables"` +} + +type PipelineEventObjectAttributesVariable struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type PipelineEventMergeRequest struct { + ID int64 `json:"id"` + IID int64 `json:"iid"` + Title string `json:"title"` + SourceBranch string `json:"source_branch"` + SourceProjectID int64 `json:"source_project_id"` + TargetBranch string `json:"target_branch"` + TargetProjectID int64 `json:"target_project_id"` + State string `json:"state"` + MergeRequestStatus string `json:"merge_status"` + DetailedMergeStatus string `json:"detailed_merge_status"` + URL string `json:"url"` +} + +type PipelineEventProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` +} + +type PipelineEventCommit struct { + ID string `json:"id"` + Message string `json:"message"` + Title string `json:"title"` + Timestamp *time.Time `json:"timestamp"` + URL string `json:"url"` + Author EventCommitAuthor `json:"author"` +} + +type PipelineEventBuild struct { + ID int64 `json:"id"` + Stage string `json:"stage"` + Name string `json:"name"` + Status string `json:"status"` + CreatedAt string `json:"created_at"` + StartedAt string `json:"started_at"` + FinishedAt string `json:"finished_at"` + Duration float64 `json:"duration"` + QueuedDuration float64 `json:"queued_duration"` + FailureReason string `json:"failure_reason"` + When string `json:"when"` + Manual bool `json:"manual"` + AllowFailure bool `json:"allow_failure"` + User *EventUser `json:"user"` + Runner PipelineEventBuildRunner `json:"runner"` + ArtifactsFile PipelineEventBuildArtifactsFile `json:"artifacts_file"` + Environment EventEnvironment `json:"environment"` +} + +type PipelineEventBuildRunner struct { + ID int64 `json:"id"` + Description string `json:"description"` + Active bool `json:"active"` + IsShared bool `json:"is_shared"` + RunnerType string `json:"runner_type"` + Tags []string `json:"tags"` +} + +type PipelineEventBuildArtifactsFile struct { + Filename string `json:"filename"` + Size int64 `json:"size"` } // ProjectResourceAccessTokenEvent represents a resource access token event for @@ -947,33 +1015,37 @@ type PipelineEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#project-and-group-access-token-events type ProjectResourceAccessTokenEvent struct { - EventName string `json:"event_name"` - ObjectKind string `json:"object_kind"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - VisibilityLevel int `json:"visibility_level"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - CIConfigPath string `json:"ci_config_path"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - } `json:"project"` - ObjectAttributes struct { - ID int `json:"id"` - UserID int `json:"user_id"` - Name string `json:"name"` - CreatedAt string `json:"created_at"` - ExpiresAt *ISOTime `json:"expires_at"` - } `json:"object_attributes"` + EventName string `json:"event_name"` + ObjectKind string `json:"object_kind"` + Project ProjectResourceAccessTokenEventProject `json:"project"` + ObjectAttributes ProjectResourceAccessTokenEventObjectAttributes `json:"object_attributes"` +} + +type ProjectResourceAccessTokenEventProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + VisibilityLevel int64 `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + CIConfigPath string `json:"ci_config_path"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` +} + +type ProjectResourceAccessTokenEventObjectAttributes struct { + ID int64 `json:"id"` + UserID int64 `json:"user_id"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + ExpiresAt *ISOTime `json:"expires_at"` } // PushEvent represents a push event. @@ -981,52 +1053,53 @@ type ProjectResourceAccessTokenEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#push-events type PushEvent struct { - ObjectKind string `json:"object_kind"` - EventName string `json:"event_name"` - Before string `json:"before"` - After string `json:"after"` - Ref string `json:"ref"` - RefProtected bool `json:"ref_protected"` - CheckoutSHA string `json:"checkout_sha"` - UserID int `json:"user_id"` - UserName string `json:"user_name"` - UserUsername string `json:"user_username"` - UserEmail string `json:"user_email"` - UserAvatar string `json:"user_avatar"` - ProjectID int `json:"project_id"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Repository *Repository `json:"repository"` - Commits []*struct { - ID string `json:"id"` - Message string `json:"message"` - Title string `json:"title"` - Timestamp *time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - Added []string `json:"added"` - Modified []string `json:"modified"` - Removed []string `json:"removed"` - } `json:"commits"` - TotalCommitsCount int `json:"total_commits_count"` + ObjectKind string `json:"object_kind"` + EventName string `json:"event_name"` + Before string `json:"before"` + After string `json:"after"` + Ref string `json:"ref"` + RefProtected bool `json:"ref_protected"` + CheckoutSHA string `json:"checkout_sha"` + UserID int64 `json:"user_id"` + UserName string `json:"user_name"` + UserUsername string `json:"user_username"` + UserEmail string `json:"user_email"` + UserAvatar string `json:"user_avatar"` + ProjectID int64 `json:"project_id"` + Project PushEventProject `json:"project"` + Repository *Repository `json:"repository"` + Commits []*PushEventCommit `json:"commits"` + TotalCommitsCount int64 `json:"total_commits_count"` +} + +type PushEventProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` +} + +type PushEventCommit struct { + ID string `json:"id"` + Message string `json:"message"` + Title string `json:"title"` + Timestamp *time.Time `json:"timestamp"` + URL string `json:"url"` + Author EventCommitAuthor `json:"author"` + Added []string `json:"added"` + Modified []string `json:"modified"` + Removed []string `json:"removed"` } // ReleaseEvent represents a release event @@ -1034,58 +1107,65 @@ type PushEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#release-events type ReleaseEvent struct { - ID int `json:"id"` - CreatedAt string `json:"created_at"` // Should be *time.Time (see Gitlab issue #21468) - Description string `json:"description"` - Name string `json:"name"` - Tag string `json:"tag"` - ReleasedAt string `json:"released_at"` // Should be *time.Time (see Gitlab issue #21468) - ObjectKind string `json:"object_kind"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL *string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - VisibilityLevel int `json:"visibility_level"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - CIConfigPath string `json:"ci_config_path"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - } `json:"project"` + ID int64 `json:"id"` + CreatedAt string `json:"created_at"` // Should be *time.Time (see Gitlab issue #21468) + Description string `json:"description"` + Name string `json:"name"` + Tag string `json:"tag"` + ReleasedAt string `json:"released_at"` // Should be *time.Time (see Gitlab issue #21468) + ObjectKind string `json:"object_kind"` + Project ReleaseEventProject `json:"project"` + URL string `json:"url"` + Action string `json:"action"` + Assets ReleaseEventAssets `json:"assets"` + Commit ReleaseEventCommit `json:"commit"` +} + +type ReleaseEventProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL *string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + VisibilityLevel int64 `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + CIConfigPath string `json:"ci_config_path"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` +} + +type ReleaseEventAssets struct { + Count int64 `json:"count"` + Links []ReleaseEventAssetsLink `json:"links"` + Sources []ReleaseEventAssetsSource `json:"sources"` +} + +type ReleaseEventAssetsLink struct { + ID int64 `json:"id"` + External bool `json:"external"` + LinkType string `json:"link_type"` + Name string `json:"name"` + URL string `json:"url"` +} + +type ReleaseEventAssetsSource struct { + Format string `json:"format"` URL string `json:"url"` - Action string `json:"action"` - Assets struct { - Count int `json:"count"` - Links []struct { - ID int `json:"id"` - External bool `json:"external"` - LinkType string `json:"link_type"` - Name string `json:"name"` - URL string `json:"url"` - } `json:"links"` - Sources []struct { - Format string `json:"format"` - URL string `json:"url"` - } `json:"sources"` - } `json:"assets"` - Commit struct { - ID string `json:"id"` - Message string `json:"message"` - Title string `json:"title"` - Timestamp string `json:"timestamp"` // Should be *time.Time (see Gitlab issue #21468) - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - } `json:"commit"` +} + +type ReleaseEventCommit struct { + ID string `json:"id"` + Message string `json:"message"` + Title string `json:"title"` + Timestamp string `json:"timestamp"` // Should be *time.Time (see Gitlab issue #21468) + URL string `json:"url"` + Author EventCommitAuthor `json:"author"` } // SnippetCommentEvent represents a comment on a snippet event. @@ -1093,61 +1173,67 @@ type ReleaseEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#comment-on-a-code-snippet type SnippetCommentEvent struct { - ObjectKind string `json:"object_kind"` - EventType string `json:"event_type"` - User *EventUser `json:"user"` - ProjectID int `json:"project_id"` - Project struct { - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Repository *Repository `json:"repository"` - ObjectAttributes struct { - ID int `json:"id"` - Note string `json:"note"` - NoteableType string `json:"noteable_type"` - AuthorID int `json:"author_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - ProjectID int `json:"project_id"` - Attachment string `json:"attachment"` - LineCode string `json:"line_code"` - CommitID string `json:"commit_id"` - NoteableID int `json:"noteable_id"` - System bool `json:"system"` - StDiff *Diff `json:"st_diff"` - Description string `json:"description"` - Action CommentEventAction `json:"action"` - URL string `json:"url"` - } `json:"object_attributes"` - Snippet *struct { - ID int `json:"id"` - Title string `json:"title"` - Content string `json:"content"` - AuthorID int `json:"author_id"` - ProjectID int `json:"project_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - Filename string `json:"file_name"` - ExpiresAt string `json:"expires_at"` - Type string `json:"type"` - VisibilityLevel int `json:"visibility_level"` - Description string `json:"description"` - Secret bool `json:"secret"` - RepositoryReadOnly bool `json:"repository_read_only"` - } `json:"snippet"` + ObjectKind string `json:"object_kind"` + EventType string `json:"event_type"` + User *EventUser `json:"user"` + ProjectID int64 `json:"project_id"` + Project SnippetCommentEventProject `json:"project"` + Repository *Repository `json:"repository"` + ObjectAttributes SnippetCommentEventObjectAttributes `json:"object_attributes"` + Snippet *SnippetCommentEventSnippet `json:"snippet"` +} + +type SnippetCommentEventProject struct { + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` +} + +type SnippetCommentEventObjectAttributes struct { + ID int64 `json:"id"` + Note string `json:"note"` + NoteableType string `json:"noteable_type"` + AuthorID int64 `json:"author_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ProjectID int64 `json:"project_id"` + Attachment string `json:"attachment"` + LineCode string `json:"line_code"` + CommitID string `json:"commit_id"` + NoteableID int64 `json:"noteable_id"` + System bool `json:"system"` + StDiff *Diff `json:"st_diff"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` +} + +type SnippetCommentEventSnippet struct { + ID int64 `json:"id"` + Title string `json:"title"` + Content string `json:"content"` + AuthorID int64 `json:"author_id"` + ProjectID int64 `json:"project_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Filename string `json:"file_name"` + ExpiresAt string `json:"expires_at"` + Type string `json:"type"` + VisibilityLevel int64 `json:"visibility_level"` + Description string `json:"description"` + Secret bool `json:"secret"` + RepositoryReadOnly bool `json:"repository_read_only"` } // SubGroupEvent represents a subgroup event. @@ -1161,8 +1247,8 @@ type SubGroupEvent struct { Name string `json:"name"` Path string `json:"path"` FullPath string `json:"full_path"` - GroupID int `json:"group_id"` - ParentGroupID int `json:"parent_group_id"` + GroupID int64 `json:"group_id"` + ParentGroupID int64 `json:"parent_group_id"` ParentName string `json:"parent_name"` ParentPath string `json:"parent_path"` ParentFullPath string `json:"parent_full_path"` @@ -1173,52 +1259,53 @@ type SubGroupEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#tag-events type TagEvent struct { - ObjectKind string `json:"object_kind"` - EventName string `json:"event_name"` - Before string `json:"before"` - After string `json:"after"` - Ref string `json:"ref"` - CheckoutSHA string `json:"checkout_sha"` - UserID int `json:"user_id"` - UserName string `json:"user_name"` - UserUsername string `json:"user_username"` - UserAvatar string `json:"user_avatar"` - UserEmail string `json:"user_email"` - ProjectID int `json:"project_id"` - Message string `json:"message"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Repository *Repository `json:"repository"` - Commits []*struct { - ID string `json:"id"` - Message string `json:"message"` - Title string `json:"title"` - Timestamp *time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - Added []string `json:"added"` - Modified []string `json:"modified"` - Removed []string `json:"removed"` - } `json:"commits"` - TotalCommitsCount int `json:"total_commits_count"` + ObjectKind string `json:"object_kind"` + EventName string `json:"event_name"` + Before string `json:"before"` + After string `json:"after"` + Ref string `json:"ref"` + CheckoutSHA string `json:"checkout_sha"` + UserID int64 `json:"user_id"` + UserName string `json:"user_name"` + UserUsername string `json:"user_username"` + UserAvatar string `json:"user_avatar"` + UserEmail string `json:"user_email"` + ProjectID int64 `json:"project_id"` + Message string `json:"message"` + Project TagEventProject `json:"project"` + Repository *Repository `json:"repository"` + Commits []*TagEventCommit `json:"commits"` + TotalCommitsCount int64 `json:"total_commits_count"` +} + +type TagEventProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` +} + +type TagEventCommit struct { + ID string `json:"id"` + Message string `json:"message"` + Title string `json:"title"` + Timestamp *time.Time `json:"timestamp"` + URL string `json:"url"` + Author EventCommitAuthor `json:"author"` + Added []string `json:"added"` + Modified []string `json:"modified"` + Removed []string `json:"removed"` } // WikiPageEvent represents a wiki page event. @@ -1226,41 +1313,384 @@ type TagEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#wiki-page-events type WikiPageEvent struct { - ObjectKind string `json:"object_kind"` - User *EventUser `json:"user"` - Project struct { - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Wiki struct { - WebURL string `json:"web_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - } `json:"wiki"` - ObjectAttributes struct { - Title string `json:"title"` - Content string `json:"content"` - Format string `json:"format"` - Message string `json:"message"` - Slug string `json:"slug"` - URL string `json:"url"` - Action string `json:"action"` - DiffURL string `json:"diff_url"` - } `json:"object_attributes"` + ObjectKind string `json:"object_kind"` + User *EventUser `json:"user"` + Project WikiPageEventProject `json:"project"` + Wiki WikiPageEventWiki `json:"wiki"` + ObjectAttributes WikiPageEventObjectAttributes `json:"object_attributes"` +} + +type WikiPageEventProject struct { + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` +} + +type WikiPageEventWiki struct { + WebURL string `json:"web_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` +} + +type WikiPageEventObjectAttributes struct { + Title string `json:"title"` + Content string `json:"content"` + Format string `json:"format"` + Message string `json:"message"` + Slug string `json:"slug"` + URL string `json:"url"` + Action string `json:"action"` + DiffURL string `json:"diff_url"` +} + +// EmojiEvent represents an emoji event. +// +// GitLab API docs: +// https://docs.gitlab.com/user/project/integrations/webhook_events/#emoji-events +type EmojiEvent struct { + ObjectKind string `json:"object_kind"` + EventType string `json:"event_type"` + User EventUser `json:"user"` + ProjectID int64 `json:"project_id"` + Project EmojiEventProject `json:"project"` + ObjectAttributes EmojiEventObjectAttributes `json:"object_attributes"` + Note *EmojiEventNote `json:"note,omitempty"` + Issue *EmojiEventIssue `json:"issue,omitempty"` + MergeRequest *EmojiEventMergeRequest `json:"merge_request,omitempty"` + ProjectSnippet *EmojiEventSnippet `json:"project_snippet,omitempty"` + Commit *EmojiEventCommit `json:"commit,omitempty"` +} + +// EmojiEventProject represents a project in an emoji event. +type EmojiEventProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + VisibilityLevel int64 `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + CIConfigPath string `json:"ci_config_path"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` +} + +type EmojiEventObjectAttributes struct { + UserID int64 `json:"user_id"` + CreatedAt string `json:"created_at"` + ID int64 `json:"id"` + Name string `json:"name"` + AwardableType string `json:"awardable_type"` + AwardableID int64 `json:"awardable_id"` + UpdatedAt string `json:"updated_at"` + Action string `json:"action"` + AwardedOnURL string `json:"awarded_on_url"` +} + +type EmojiEventNote struct { + Attachment *string `json:"attachment"` + AuthorID int64 `json:"author_id"` + ChangePosition *NotePosition `json:"change_position"` + CommitID *string `json:"commit_id"` + CreatedAt string `json:"created_at"` + DiscussionID string `json:"discussion_id"` + ID int64 `json:"id"` + LineCode *string `json:"line_code"` + Note string `json:"note"` + NoteableID int64 `json:"noteable_id"` + NoteableType string `json:"noteable_type"` + OriginalPosition *NotePosition `json:"original_position"` + Position *NotePosition `json:"position"` + ProjectID int64 `json:"project_id"` + ResolvedAt *string `json:"resolved_at"` + ResolvedByID *int64 `json:"resolved_by_id"` + ResolvedByPush *bool `json:"resolved_by_push"` + StDiff *Diff `json:"st_diff"` + System bool `json:"system"` + Type *string `json:"type"` + UpdatedAt string `json:"updated_at"` + UpdatedByID *int64 `json:"updated_by_id"` + Description string `json:"description"` + URL string `json:"url"` +} + +type EmojiEventIssue struct { + ID int64 `json:"id"` + IID int64 `json:"iid"` + ProjectID int64 `json:"project_id"` + AuthorID int64 `json:"author_id"` + ClosedAt *string `json:"closed_at"` + Confidential bool `json:"confidential"` + CreatedAt string `json:"created_at"` + Description string `json:"description"` + DiscussionLocked *bool `json:"discussion_locked"` + DueDate *ISOTime `json:"due_date"` + LastEditedAt *string `json:"last_edited_at"` + LastEditedByID *int64 `json:"last_edited_by_id"` + MilestoneID *int64 `json:"milestone_id"` + MovedToID *int64 `json:"moved_to_id"` + DuplicatedToID *int64 `json:"duplicated_to_id"` + RelativePosition int64 `json:"relative_position"` + StateID StateID `json:"state_id"` + TimeEstimate int64 `json:"time_estimate"` + Title string `json:"title"` + UpdatedAt string `json:"updated_at"` + UpdatedByID *int64 `json:"updated_by_id"` + Weight *int64 `json:"weight"` + HealthStatus *string `json:"health_status"` + URL string `json:"url"` + TotalTimeSpent int64 `json:"total_time_spent"` + TimeChange int64 `json:"time_change"` + HumanTotalTimeSpent *string `json:"human_total_time_spent"` + HumanTimeChange *string `json:"human_time_change"` + HumanTimeEstimate *string `json:"human_time_estimate"` + AssigneeIDs []int64 `json:"assignee_ids"` + AssigneeID *int64 `json:"assignee_id"` + Labels []*EventLabel `json:"labels"` + State string `json:"state"` + Severity string `json:"severity"` +} + +// EmojiEventMergeRequest represents a merge request in an emoji event. +type EmojiEventMergeRequest struct { + ID int64 `json:"id"` + TargetBranch string `json:"target_branch"` + SourceBranch string `json:"source_branch"` + SourceProjectID int64 `json:"source_project_id"` + AuthorID int64 `json:"author_id"` + AssigneeID int64 `json:"assignee_id"` + AssigneeIDs []int64 `json:"assignee_ids"` + ReviewerIDs []int64 `json:"reviewer_ids"` + Title string `json:"title"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + MilestoneID int64 `json:"milestone_id"` + State string `json:"state"` + MergeStatus string `json:"merge_status"` + TargetProjectID int64 `json:"target_project_id"` + IID int64 `json:"iid"` + Description string `json:"description"` + Position int64 `json:"position"` + Labels []*EventLabel `json:"labels"` + LockedAt string `json:"locked_at"` + UpdatedByID int64 `json:"updated_by_id"` + MergeError string `json:"merge_error"` + MergeParams *MergeParams `json:"merge_params"` + MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"` + MergeUserID int64 `json:"merge_user_id"` + MergeCommitSHA string `json:"merge_commit_sha"` + DeletedAt string `json:"deleted_at"` + InProgressMergeCommitSHA string `json:"in_progress_merge_commit_sha"` + LockVersion int64 `json:"lock_version"` + ApprovalsBeforeMerge string `json:"approvals_before_merge"` + RebaseCommitSHA string `json:"rebase_commit_sha"` + TimeEstimate int64 `json:"time_estimate"` + Squash bool `json:"squash"` + LastEditedAt string `json:"last_edited_at"` + LastEditedByID int64 `json:"last_edited_by_id"` + Source *Repository `json:"source"` + Target *Repository `json:"target"` + LastCommit EventMergeRequestLastCommit `json:"last_commit"` + WorkInProgress bool `json:"work_in_progress"` + TotalTimeSpent int64 `json:"total_time_spent"` + HeadPipelineID int64 `json:"head_pipeline_id"` + Assignee *EventUser `json:"assignee"` + DetailedMergeStatus string `json:"detailed_merge_status"` + URL string `json:"url"` +} + +// EmojiEventSnippet represents a snippet in an emoji event. +type EmojiEventSnippet struct { + ID int64 `json:"id"` + Title string `json:"title"` + Content string `json:"content"` + AuthorID int64 `json:"author_id"` + ProjectID int64 `json:"project_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Filename string `json:"file_name"` + ExpiresAt string `json:"expires_at"` + Type string `json:"type"` + VisibilityLevel int64 `json:"visibility_level"` + Description string `json:"description"` + Secret bool `json:"secret"` + RepositoryReadOnly bool `json:"repository_read_only"` +} + +// EmojiEventCommit represents a commit in an emoji event. +type EmojiEventCommit struct { + ID string `json:"id"` + Title string `json:"title"` + Message string `json:"message"` + Timestamp *time.Time `json:"timestamp"` + URL string `json:"url"` + Author EventCommitAuthor `json:"author"` +} + +// MilestoneWebhookEvent represents a milestone webhook event. +// +// GitLab API docs: +// https://docs.gitlab.com/user/project/integrations/webhook_events/#milestone-events +type MilestoneWebhookEvent struct { + ObjectKind string `json:"object_kind"` + EventType string `json:"event_type"` + Project MilestoneEventProject `json:"project"` + Group *MilestoneEventGroup `json:"group,omitempty"` + ObjectAttributes MilestoneEventObjectAttributes `json:"object_attributes"` + Action string `json:"action"` +} + +type MilestoneEventObjectAttributes struct { + ID int64 `json:"id"` + IID int64 `json:"iid"` + Title string `json:"title"` + Description string `json:"description"` + State string `json:"state"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + DueDate *ISOTime `json:"due_date"` + StartDate *ISOTime `json:"start_date"` + GroupID *int64 `json:"group_id"` + ProjectID int64 `json:"project_id"` +} + +// MilestoneEventGroup represents a group in a milestone event. +type MilestoneEventGroup struct { + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + GroupPath string `json:"group_path"` + FullPath string `json:"full_path"` +} + +// MilestoneEventProject represents a project in a milestone event. +type MilestoneEventProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + VisibilityLevel int64 `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + CIConfigPath string `json:"ci_config_path"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` +} + +// ProjectWebhookEvent represents a project webhook event for group webhooks. +// +// GitLab API docs: +// https://docs.gitlab.com/user/project/integrations/webhook_events/#project-events +type ProjectWebhookEvent struct { + EventName string `json:"event_name"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Name string `json:"name"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + ProjectID int64 `json:"project_id"` + ProjectNamespaceID int64 `json:"project_namespace_id"` + Owners []ProjectEventOwner `json:"owners"` + ProjectVisibility string `json:"project_visibility"` + OldPathWithNamespace string `json:"old_path_with_namespace,omitempty"` +} + +type ProjectEventOwner struct { + Name string `json:"name"` + Email string `json:"email"` +} + +// VulnerabilityEvent represents a vulnerability event. +// +// GitLab API docs: +// https://docs.gitlab.com/user/project/integrations/webhook_events/#vulnerability-events +type VulnerabilityEvent struct { + ObjectKind string `json:"object_kind"` + ObjectAttributes VulnerabilityEventObjectAttributes `json:"object_attributes"` +} + +type VulnerabilityEventObjectAttributes struct { + ID int64 `json:"id"` + URL string `json:"url"` + Title string `json:"title"` + State string `json:"state"` + ProjectID int64 `json:"project_id"` + Location VulnerabilityEventLocation `json:"location"` + CVSS []VulnerabilityEventCVSS `json:"cvss"` + Severity string `json:"severity"` + SeverityOverridden bool `json:"severity_overridden"` + Identifiers []VulnerabilityEventIdentifier `json:"identifiers"` + Issues []VulnerabilityEventIssue `json:"issues"` + ReportType string `json:"report_type"` + Confidence string `json:"confidence"` + ConfidenceOverridden bool `json:"confidence_overridden"` + ConfirmedAt string `json:"confirmed_at"` + ConfirmedByID int64 `json:"confirmed_by_id"` + DismissedAt string `json:"dismissed_at"` + DismissedByID int64 `json:"dismissed_by_id"` + ResolvedAt string `json:"resolved_at"` + ResolvedByID int64 `json:"resolved_by_id"` + AutoResolved bool `json:"auto_resolved"` + ResolvedOnDefaultBranch bool `json:"resolved_on_default_branch"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` +} + +type VulnerabilityEventLocation struct { + File string `json:"file"` + Dependency VulnerabilityEventLocationDependency `json:"dependency"` +} + +type VulnerabilityEventLocationDependency struct { + Package VulnerabilityEventLocationDependencyPackage `json:"package"` + Version string `json:"version"` +} + +type VulnerabilityEventLocationDependencyPackage struct { + Name string `json:"name"` +} + +type VulnerabilityEventCVSS struct { + Vector string `json:"vector"` + Vendor string `json:"vendor"` +} + +type VulnerabilityEventIdentifier struct { + Name string `json:"name"` + ExternalID string `json:"external_id"` + ExternalType string `json:"external_type"` + URL string `json:"url"` +} + +type VulnerabilityEventIssue struct { + Title string `json:"title"` + URL string `json:"url"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` } // EventLabel represents a label inside a webhook event. @@ -1268,14 +1698,14 @@ type WikiPageEvent struct { // GitLab API docs: // https://docs.gitlab.com/user/project/integrations/webhook_events/#work-item-events type EventLabel struct { - ID int `json:"id"` + ID int64 `json:"id"` Title string `json:"title"` Color string `json:"color"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` Template bool `json:"template"` Description string `json:"description"` Type string `json:"type"` - GroupID int `json:"group_id"` + GroupID int64 `json:"group_id"` } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/events.go b/vendor/gitlab.com/gitlab-org/api/client-go/events.go index 5107a5aa66..75df366580 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/events.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/events.go @@ -17,15 +17,23 @@ package gitlab import ( - "fmt" - "net/http" "time" ) type ( // EventsServiceInterface defines all the API methods for the EventsService EventsServiceInterface interface { + // ListCurrentUserContributionEvents retrieves all events + // for the currently authenticated user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/events/#list-all-events ListCurrentUserContributionEvents(opt *ListContributionEventsOptions, options ...RequestOptionFunc) ([]*ContributionEvent, *Response, error) + + // ListProjectVisibleEvents gets the events for the specified project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/events/#list-a-projects-visible-events ListProjectVisibleEvents(pid any, opt *ListProjectVisibleEventsOptions, options ...RequestOptionFunc) ([]*ProjectEvent, *Response, error) } @@ -45,35 +53,34 @@ var _ EventsServiceInterface = (*EventsService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/events/#get-user-contribution-events type ContributionEvent struct { - ID int `json:"id"` - Title string `json:"title"` - ProjectID int `json:"project_id"` - ActionName string `json:"action_name"` - TargetID int `json:"target_id"` - TargetIID int `json:"target_iid"` - TargetType string `json:"target_type"` - AuthorID int `json:"author_id"` - TargetTitle string `json:"target_title"` - CreatedAt *time.Time `json:"created_at"` - PushData struct { - CommitCount int `json:"commit_count"` - Action string `json:"action"` - RefType string `json:"ref_type"` - CommitFrom string `json:"commit_from"` - CommitTo string `json:"commit_to"` - Ref string `json:"ref"` - CommitTitle string `json:"commit_title"` - } `json:"push_data"` - Note *Note `json:"note"` - Author struct { - Name string `json:"name"` - Username string `json:"username"` - ID int `json:"id"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"author"` - AuthorUsername string `json:"author_username"` + ID int64 `json:"id"` + Title string `json:"title"` + ProjectID int64 `json:"project_id"` + ActionName string `json:"action_name"` + TargetID int64 `json:"target_id"` + TargetIID int64 `json:"target_iid"` + TargetType string `json:"target_type"` + AuthorID int64 `json:"author_id"` + TargetTitle string `json:"target_title"` + CreatedAt *time.Time `json:"created_at"` + PushData ContributionEventPushData `json:"push_data"` + Note *Note `json:"note"` + Author BasicUser `json:"author"` + AuthorUsername string `json:"author_username"` +} + +// ContributionEventPushData represents a user's contribution push data. +// +// GitLab API docs: +// https://docs.gitlab.com/api/events/#get-contribution-events-for-a-user +type ContributionEventPushData struct { + CommitCount int64 `json:"commit_count"` + Action string `json:"action"` + RefType string `json:"ref_type"` + CommitFrom string `json:"commit_from"` + CommitTo string `json:"commit_to"` + Ref string `json:"ref"` + CommitTitle string `json:"commit_title"` } // ListContributionEventsOptions represents the options for GetUserContributionEvents @@ -87,125 +94,136 @@ type ListContributionEventsOptions struct { Before *ISOTime `url:"before,omitempty" json:"before,omitempty"` After *ISOTime `url:"after,omitempty" json:"after,omitempty"` Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` } -// ListUserContributionEvents retrieves user contribution events -// for the specified user, sorted from newest to oldest. -// -// GitLab API docs: -// https://docs.gitlab.com/api/events/#get-user-contribution-events func (s *UsersService) ListUserContributionEvents(uid any, opt *ListContributionEventsOptions, options ...RequestOptionFunc) ([]*ContributionEvent, *Response, error) { user, err := parseID(uid) if err != nil { return nil, nil, err } - u := fmt.Sprintf("users/%s/events", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var cs []*ContributionEvent - resp, err := s.client.Do(req, &cs) - if err != nil { - return nil, resp, err - } - return cs, resp, nil + return do[[]*ContributionEvent](s.client, + withPath("users/%s/events", user), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// ListCurrentUserContributionEvents gets a list currently authenticated user's events -// -// GitLab API docs: https://docs.gitlab.com/api/events/#list-currently-authenticated-users-events func (s *EventsService) ListCurrentUserContributionEvents(opt *ListContributionEventsOptions, options ...RequestOptionFunc) ([]*ContributionEvent, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "events", opt, options) - if err != nil { - return nil, nil, err - } - - var cs []*ContributionEvent - resp, err := s.client.Do(req, &cs) - if err != nil { - return nil, resp, err - } - - return cs, resp, nil + return do[[]*ContributionEvent](s.client, + withPath("events"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ProjectEvent represents a GitLab project event. // // GitLab API docs: -// https://docs.gitlab.com/api/events/#list-a-projects-visible-events +// https://docs.gitlab.com/api/events/#list-all-visible-events-for-a-project type ProjectEvent struct { - ID int `json:"id"` - Title string `json:"title"` - ProjectID int `json:"project_id"` - ActionName string `json:"action_name"` - TargetID int `json:"target_id"` - TargetIID int `json:"target_iid"` - TargetType string `json:"target_type"` - AuthorID int `json:"author_id"` - TargetTitle string `json:"target_title"` - CreatedAt string `json:"created_at"` - Author struct { - Name string `json:"name"` - Username string `json:"username"` - ID int `json:"id"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"author"` - AuthorUsername string `json:"author_username"` - Data struct { - Before string `json:"before"` - After string `json:"after"` - Ref string `json:"ref"` - UserID int `json:"user_id"` - UserName string `json:"user_name"` - Repository *Repository `json:"repository"` - Commits []*Commit `json:"commits"` - TotalCommitsCount int `json:"total_commits_count"` - } `json:"data"` - Note struct { - ID int `json:"id"` - Body string `json:"body"` - Attachment string `json:"attachment"` - Author struct { - ID int `json:"id"` - Username string `json:"username"` - Email string `json:"email"` - Name string `json:"name"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"author"` - CreatedAt *time.Time `json:"created_at"` - System bool `json:"system"` - NoteableID int `json:"noteable_id"` - NoteableType string `json:"noteable_type"` - NoteableIID int `json:"noteable_iid"` - } `json:"note"` - PushData struct { - CommitCount int `json:"commit_count"` - Action string `json:"action"` - RefType string `json:"ref_type"` - CommitFrom string `json:"commit_from"` - CommitTo string `json:"commit_to"` - Ref string `json:"ref"` - CommitTitle string `json:"commit_title"` - } `json:"push_data"` + ID int64 `json:"id"` + Title string `json:"title"` + ProjectID int64 `json:"project_id"` + ActionName string `json:"action_name"` + TargetID int64 `json:"target_id"` + TargetIID int64 `json:"target_iid"` + TargetType string `json:"target_type"` + AuthorID int64 `json:"author_id"` + TargetTitle string `json:"target_title"` + CreatedAt string `json:"created_at"` + Author BasicUser `json:"author"` + AuthorUsername string `json:"author_username"` + Data ProjectEventData `json:"data"` + Note ProjectEventNote `json:"note"` + PushData ProjectEventPushData `json:"push_data"` } func (s ProjectEvent) String() string { return Stringify(s) } +// ProjectEventData represents the GitLab project event data. +// +// GitLab API docs: +// https://docs.gitlab.com/api/events/#list-all-visible-events-for-a-project +type ProjectEventData struct { + Before string `json:"before"` + After string `json:"after"` + Ref string `json:"ref"` + UserID int64 `json:"user_id"` + UserName string `json:"user_name"` + Repository *Repository `json:"repository"` + Commits []*Commit `json:"commits"` + TotalCommitsCount int64 `json:"total_commits_count"` +} + +func (d ProjectEventData) String() string { + return Stringify(d) +} + +// ProjectEventNote represents a GitLab project event note. +// +// GitLab API docs: +// https://docs.gitlab.com/api/events/#list-all-visible-events-for-a-project +type ProjectEventNote struct { + ID int64 `json:"id"` + Body string `json:"body"` + Attachment string `json:"attachment"` + Author ProjectEventNoteAuthor `json:"author"` + CreatedAt *time.Time `json:"created_at"` + System bool `json:"system"` + NoteableID int64 `json:"noteable_id"` + NoteableType string `json:"noteable_type"` + NoteableIID int64 `json:"noteable_iid"` +} + +func (n ProjectEventNote) String() string { + return Stringify(n) +} + +// ProjectEventNoteAuthor represents a GitLab project event note author. +// +// GitLab API docs: +// https://docs.gitlab.com/api/events/#list-all-visible-events-for-a-project +type ProjectEventNoteAuthor struct { + ID int64 `json:"id"` + Username string `json:"username"` + Email string `json:"email"` + Name string `json:"name"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` +} + +func (a ProjectEventNoteAuthor) String() string { + return Stringify(a) +} + +// ProjectEventPushData represents a GitLab project event push data. +// +// GitLab API docs: +// https://docs.gitlab.com/api/events/#list-all-visible-events-for-a-project +type ProjectEventPushData struct { + CommitCount int64 `json:"commit_count"` + Action string `json:"action"` + RefType string `json:"ref_type"` + CommitFrom string `json:"commit_from"` + CommitTo string `json:"commit_to"` + Ref string `json:"ref"` + CommitTitle string `json:"commit_title"` +} + +func (d ProjectEventPushData) String() string { + return Stringify(d) +} + // ListProjectVisibleEventsOptions represents the available // ListProjectVisibleEvents() options. // // GitLab API docs: -// https://docs.gitlab.com/api/events/#list-a-projects-visible-events +// https://docs.gitlab.com/api/events/#list-all-visible-events-for-a-project type ListProjectVisibleEventsOptions struct { ListOptions Action *EventTypeValue `url:"action,omitempty" json:"action,omitempty"` @@ -215,27 +233,10 @@ type ListProjectVisibleEventsOptions struct { Sort *string `url:"sort,omitempty" json:"sort,omitempty"` } -// ListProjectVisibleEvents gets the events for the specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/events/#list-a-projects-visible-events func (s *EventsService) ListProjectVisibleEvents(pid any, opt *ListProjectVisibleEventsOptions, options ...RequestOptionFunc) ([]*ProjectEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/events", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*ProjectEvent - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[[]*ProjectEvent](s.client, + withPath("projects/%s/events", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/external_status_checks.go b/vendor/gitlab.com/gitlab-org/api/client-go/external_status_checks.go index aff042add7..227f17bfb7 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/external_status_checks.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/external_status_checks.go @@ -1,7 +1,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -9,28 +8,98 @@ import ( type ( // ExternalStatusChecksServiceInterface defines all the API methods for the ExternalStatusChecksService ExternalStatusChecksServiceInterface interface { + // CreateExternalStatusCheck creates an external status check. // Deprecated: to be removed in 1.0; use CreateProjectExternalStatusCheck instead + // + // GitLab API docs: + // https://docs.gitlab.com/api/status_checks/#create-external-status-check-service CreateExternalStatusCheck(pid any, opt *CreateExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) + + // DeleteExternalStatusCheck deletes an external status check. // Deprecated: to be removed in 1.0; use DeleteProjectExternalStatusCheck instead - DeleteExternalStatusCheck(pid any, check int, options ...RequestOptionFunc) (*Response, error) + // + // GitLab API docs: + // https://docs.gitlab.com/api/status_checks/#delete-external-status-check-service + DeleteExternalStatusCheck(pid any, check int64, options ...RequestOptionFunc) (*Response, error) + + // UpdateExternalStatusCheck updates an external status check. // Deprecated: to be removed in 1.0; use UpdateProjectExternalStatusCheck instead - UpdateExternalStatusCheck(pid any, check int, opt *UpdateExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) + // + // GitLab API docs: + // https://docs.gitlab.com/api/status_checks/#update-external-status-check-service + UpdateExternalStatusCheck(pid any, check int64, opt *UpdateExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) + + // ListMergeStatusChecks lists the external status checks that apply to it + // and their status for a single merge request. // Deprecated: to be removed in 1.0; use ListProjectMergeRequestExternalStatusChecks instead - ListMergeStatusChecks(pid any, mr int, opt *ListOptions, options ...RequestOptionFunc) ([]*MergeStatusCheck, *Response, error) + // + // GitLab API docs: + // https://docs.gitlab.com/api/status_checks/#list-status-checks-for-a-merge-request + ListMergeStatusChecks(pid any, mr int64, opt *ListOptions, options ...RequestOptionFunc) ([]*MergeStatusCheck, *Response, error) + + // ListProjectStatusChecks lists the project external status checks. // Deprecated: to be removed in 1.0; use ListProjectExternalStatusChecks instead + // + // GitLab API docs: + // https://docs.gitlab.com/api/status_checks/#get-project-external-status-check-services ListProjectStatusChecks(pid any, opt *ListOptions, options ...RequestOptionFunc) ([]*ProjectStatusCheck, *Response, error) + + // RetryFailedStatusCheckForAMergeRequest retries the specified failed external status check. // Deprecated: to be removed in 1.0; use RetryFailedExternalStatusCheckForProjectMergeRequest instead - RetryFailedStatusCheckForAMergeRequest(pid any, mergeRequest int, externalStatusCheck int, options ...RequestOptionFunc) (*Response, error) - // Deprecated: to be removed in 1.0; use SetProjectMergeRequestExternalStatusCheckStatus instead - SetExternalStatusCheckStatus(pid any, mergeRequest int, opt *SetExternalStatusCheckStatusOptions, options ...RequestOptionFunc) (*Response, error) + // + // GitLab API docs: + // https://docs.gitlab.com/api/status_checks/#retry-failed-status-check-for-a-merge-request + RetryFailedStatusCheckForAMergeRequest(pid any, mergeRequest int64, externalStatusCheck int64, options ...RequestOptionFunc) (*Response, error) - ListProjectMergeRequestExternalStatusChecks(pid any, mr int, opt *ListProjectMergeRequestExternalStatusChecksOptions, options ...RequestOptionFunc) ([]*MergeStatusCheck, *Response, error) + // SetExternalStatusCheckStatus sets the status of an external status check. + // Deprecated: to be removed in 1.0; use SetProjectMergeRequestExternalStatusCheckStatus instead + // + // GitLab API docs: + // https://docs.gitlab.com/api/status_checks/#set-status-of-an-external-status-check + SetExternalStatusCheckStatus(pid any, mergeRequest int64, opt *SetExternalStatusCheckStatusOptions, options ...RequestOptionFunc) (*Response, error) + + // ListProjectMergeRequestExternalStatusChecks lists the external status checks that apply to it + // and their status for a single merge request. + // + // GitLab API docs: + // https://docs.gitlab.com/api/status_checks/#list-status-checks-for-a-merge-request + ListProjectMergeRequestExternalStatusChecks(pid any, mr int64, opt *ListProjectMergeRequestExternalStatusChecksOptions, options ...RequestOptionFunc) ([]*MergeStatusCheck, *Response, error) + + // ListProjectExternalStatusChecks lists the project external status checks. + // + // GitLab API docs: + // https://docs.gitlab.com/api/status_checks/#get-project-external-status-check-services ListProjectExternalStatusChecks(pid any, opt *ListProjectExternalStatusChecksOptions, options ...RequestOptionFunc) ([]*ProjectStatusCheck, *Response, error) - RetryFailedExternalStatusCheckForProjectMergeRequest(pid any, mergeRequest int, externalStatusCheck int, opt *RetryFailedExternalStatusCheckForProjectMergeRequestOptions, options ...RequestOptionFunc) (*Response, error) + + // RetryFailedExternalStatusCheckForProjectMergeRequest retries the specified failed external status check. + // + // GitLab API docs: + // https://docs.gitlab.com/api/status_checks/#retry-failed-status-check-for-a-merge-request + RetryFailedExternalStatusCheckForProjectMergeRequest(pid any, mergeRequest int64, externalStatusCheck int64, opt *RetryFailedExternalStatusCheckForProjectMergeRequestOptions, options ...RequestOptionFunc) (*Response, error) + + // CreateProjectExternalStatusCheck creates an external status check. + // + // GitLab API docs: + // https://docs.gitlab.com/api/status_checks/#create-external-status-check-service CreateProjectExternalStatusCheck(pid any, opt *CreateProjectExternalStatusCheckOptions, options ...RequestOptionFunc) (*ProjectStatusCheck, *Response, error) - UpdateProjectExternalStatusCheck(pid any, check int, opt *UpdateProjectExternalStatusCheckOptions, options ...RequestOptionFunc) (*ProjectStatusCheck, *Response, error) - DeleteProjectExternalStatusCheck(pid any, check int, opt *DeleteProjectExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) - SetProjectMergeRequestExternalStatusCheckStatus(pid any, mergeRequest int, opt *SetProjectMergeRequestExternalStatusCheckStatusOptions, options ...RequestOptionFunc) (*Response, error) + + // UpdateProjectExternalStatusCheck updates an external status check. + // + // GitLab API docs: + // https://docs.gitlab.com/api/status_checks/#update-external-status-check-service + UpdateProjectExternalStatusCheck(pid any, check int64, opt *UpdateProjectExternalStatusCheckOptions, options ...RequestOptionFunc) (*ProjectStatusCheck, *Response, error) + + // DeleteProjectExternalStatusCheck deletes an external status check. + // + // GitLab API docs: + // https://docs.gitlab.com/api/status_checks/#delete-external-status-check-service + DeleteProjectExternalStatusCheck(pid any, check int64, opt *DeleteProjectExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) + + // SetProjectMergeRequestExternalStatusCheckStatus sets the status of an external status check. + // + // GitLab API docs: + // https://docs.gitlab.com/api/status_checks/#set-status-of-an-external-status-check + SetProjectMergeRequestExternalStatusCheckStatus(pid any, mergeRequest int64, opt *SetProjectMergeRequestExternalStatusCheckStatusOptions, options ...RequestOptionFunc) (*Response, error) } // ExternalStatusChecksService handles communication with the external @@ -45,24 +114,24 @@ type ( var _ ExternalStatusChecksServiceInterface = (*ExternalStatusChecksService)(nil) type MergeStatusCheck struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` ExternalURL string `json:"external_url"` Status string `json:"status"` } type ProjectStatusCheck struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` ExternalURL string `json:"external_url"` HMAC bool `json:"hmac"` ProtectedBranches []StatusCheckProtectedBranch `json:"protected_branches"` } type StatusCheckProtectedBranch struct { - ID int `json:"id"` - ProjectID int `json:"project_id"` + ID int64 `json:"id"` + ProjectID int64 `json:"project_id"` Name string `json:"name"` CreatedAt *time.Time `json:"created_at"` UpdatedAt *time.Time `json:"updated_at"` @@ -75,25 +144,12 @@ type StatusCheckProtectedBranch struct { // // GitLab API docs: // https://docs.gitlab.com/api/status_checks/#list-status-checks-for-a-merge-request -func (s *ExternalStatusChecksService) ListMergeStatusChecks(pid any, mr int, opt *ListOptions, options ...RequestOptionFunc) ([]*MergeStatusCheck, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/status_checks", PathEscape(project), mr) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var mscs []*MergeStatusCheck - resp, err := s.client.Do(req, &mscs) - if err != nil { - return nil, resp, err - } - - return mscs, resp, nil +func (s *ExternalStatusChecksService) ListMergeStatusChecks(pid any, mr int64, opt *ListOptions, options ...RequestOptionFunc) ([]*MergeStatusCheck, *Response, error) { + return do[[]*MergeStatusCheck](s.client, + withPath("projects/%s/merge_requests/%d/status_checks", ProjectID{pid}, mr), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // SetExternalStatusCheckStatusOptions represents the available @@ -104,7 +160,7 @@ func (s *ExternalStatusChecksService) ListMergeStatusChecks(pid any, mr int, opt // https://docs.gitlab.com/api/status_checks/#set-status-of-an-external-status-check type SetExternalStatusCheckStatusOptions struct { SHA *string `url:"sha,omitempty" json:"sha,omitempty"` - ExternalStatusCheckID *int `url:"external_status_check_id,omitempty" json:"external_status_check_id,omitempty"` + ExternalStatusCheckID *int64 `url:"external_status_check_id,omitempty" json:"external_status_check_id,omitempty"` Status *string `url:"status,omitempty" json:"status,omitempty"` } @@ -113,45 +169,22 @@ type SetExternalStatusCheckStatusOptions struct { // // Gitlab API docs: // https://docs.gitlab.com/api/status_checks/#set-status-of-an-external-status-check -func (s *ExternalStatusChecksService) SetExternalStatusCheckStatus(pid any, mergeRequest int, opt *SetExternalStatusCheckStatusOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/status_check_responses", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ExternalStatusChecksService) SetExternalStatusCheckStatus(pid any, mergeRequest int64, opt *SetExternalStatusCheckStatusOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/status_check_responses", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } -// ListProjectStatusChecks lists the project external status checks. -// Deprecated: to be removed in 1.0; use ListProjectExternalStatusChecks instead -// -// GitLab API docs: -// https://docs.gitlab.com/api/status_checks/#get-project-external-status-check-services func (s *ExternalStatusChecksService) ListProjectStatusChecks(pid any, opt *ListOptions, options ...RequestOptionFunc) ([]*ProjectStatusCheck, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/external_status_checks", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pscs []*ProjectStatusCheck - resp, err := s.client.Do(req, &pscs) - if err != nil { - return nil, resp, err - } - - return pscs, resp, nil + return do[[]*ProjectStatusCheck](s.client, + withPath("projects/%s/external_status_checks", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateExternalStatusCheckOptions represents the available @@ -161,29 +194,19 @@ func (s *ExternalStatusChecksService) ListProjectStatusChecks(pid any, opt *List // GitLab API docs: // https://docs.gitlab.com/api/status_checks/#create-external-status-check-service type CreateExternalStatusCheckOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` - ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + ProtectedBranchIDs *[]int64 `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` } -// CreateExternalStatusCheck creates an external status check. -// Deprecated: to be removed in 1.0; use CreateProjectExternalStatusCheck instead -// -// Gitlab API docs: -// https://docs.gitlab.com/api/status_checks/#create-external-status-check-service func (s *ExternalStatusChecksService) CreateExternalStatusCheck(pid any, opt *CreateExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/external_status_checks", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/external_status_checks", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // DeleteExternalStatusCheck deletes an external status check. @@ -191,19 +214,13 @@ func (s *ExternalStatusChecksService) CreateExternalStatusCheck(pid any, opt *Cr // // Gitlab API docs: // https://docs.gitlab.com/api/status_checks/#delete-external-status-check-service -func (s *ExternalStatusChecksService) DeleteExternalStatusCheck(pid any, check int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/external_status_checks/%d", PathEscape(project), check) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ExternalStatusChecksService) DeleteExternalStatusCheck(pid any, check int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/external_status_checks/%d", ProjectID{pid}, check), + withRequestOpts(options...), + ) + return resp, err } // UpdateExternalStatusCheckOptions represents the available @@ -213,9 +230,9 @@ func (s *ExternalStatusChecksService) DeleteExternalStatusCheck(pid any, check i // GitLab API docs: // https://docs.gitlab.com/api/status_checks/#update-external-status-check-service type UpdateExternalStatusCheckOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` - ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + ProtectedBranchIDs *[]int64 `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` } // UpdateExternalStatusCheck updates an external status check. @@ -223,19 +240,14 @@ type UpdateExternalStatusCheckOptions struct { // // Gitlab API docs: // https://docs.gitlab.com/api/status_checks/#update-external-status-check-service -func (s *ExternalStatusChecksService) UpdateExternalStatusCheck(pid any, check int, opt *UpdateExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/external_status_checks/%d", PathEscape(project), check) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ExternalStatusChecksService) UpdateExternalStatusCheck(pid any, check int64, opt *UpdateExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/external_status_checks/%d", ProjectID{pid}, check), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // RetryFailedStatusCheckForAMergeRequest retries the specified failed external status check. @@ -243,19 +255,13 @@ func (s *ExternalStatusChecksService) UpdateExternalStatusCheck(pid any, check i // // Gitlab API docs: // https://docs.gitlab.com/api/status_checks/#retry-failed-status-check-for-a-merge-request -func (s *ExternalStatusChecksService) RetryFailedStatusCheckForAMergeRequest(pid any, mergeRequest int, externalStatusCheck int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/status_checks/%d/retry", PathEscape(project), mergeRequest, externalStatusCheck) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ExternalStatusChecksService) RetryFailedStatusCheckForAMergeRequest(pid any, mergeRequest int64, externalStatusCheck int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/status_checks/%d/retry", ProjectID{pid}, mergeRequest, externalStatusCheck), + withRequestOpts(options...), + ) + return resp, err } // ListProjectMergeRequestExternalStatusChecksOptions represents the available @@ -272,25 +278,12 @@ type ListProjectMergeRequestExternalStatusChecksOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/status_checks/#list-status-checks-for-a-merge-request -func (s *ExternalStatusChecksService) ListProjectMergeRequestExternalStatusChecks(pid any, mr int, opt *ListProjectMergeRequestExternalStatusChecksOptions, options ...RequestOptionFunc) ([]*MergeStatusCheck, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/status_checks", PathEscape(project), mr) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var mscs []*MergeStatusCheck - resp, err := s.client.Do(req, &mscs) - if err != nil { - return nil, resp, err - } - - return mscs, resp, nil +func (s *ExternalStatusChecksService) ListProjectMergeRequestExternalStatusChecks(pid any, mr int64, opt *ListProjectMergeRequestExternalStatusChecksOptions, options ...RequestOptionFunc) ([]*MergeStatusCheck, *Response, error) { + return do[[]*MergeStatusCheck](s.client, + withPath("projects/%s/merge_requests/%d/status_checks", ProjectID{pid}, mr), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListProjectExternalStatusChecksOptions represents the available @@ -302,29 +295,12 @@ type ListProjectExternalStatusChecksOptions struct { ListOptions } -// ListProjectExternalStatusChecks lists the project external status checks. -// -// GitLab API docs: -// https://docs.gitlab.com/api/status_checks/#get-project-external-status-check-services func (s *ExternalStatusChecksService) ListProjectExternalStatusChecks(pid any, opt *ListProjectExternalStatusChecksOptions, options ...RequestOptionFunc) ([]*ProjectStatusCheck, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/external_status_checks", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pscs []*ProjectStatusCheck - resp, err := s.client.Do(req, &pscs) - if err != nil { - return nil, resp, err - } - - return pscs, resp, nil + return do[[]*ProjectStatusCheck](s.client, + withPath("projects/%s/external_status_checks", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateProjectExternalStatusCheckOptions represents the available @@ -333,35 +309,19 @@ func (s *ExternalStatusChecksService) ListProjectExternalStatusChecks(pid any, o // GitLab API docs: // https://docs.gitlab.com/api/status_checks/#create-external-status-check-service type CreateProjectExternalStatusCheckOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` - SharedSecret *string `url:"shared_secret,omitempty" json:"shared_secret,omitempty"` - ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + SharedSecret *string `url:"shared_secret,omitempty" json:"shared_secret,omitempty"` + ProtectedBranchIDs *[]int64 `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` } -// CreateProjectExternalStatusCheck creates an external status check. -// -// Gitlab API docs: -// https://docs.gitlab.com/api/status_checks/#create-external-status-check-service func (s *ExternalStatusChecksService) CreateProjectExternalStatusCheck(pid any, opt *CreateProjectExternalStatusCheckOptions, options ...RequestOptionFunc) (*ProjectStatusCheck, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/external_status_checks", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - statusCheck := new(ProjectStatusCheck) - resp, err := s.client.Do(req, statusCheck) - if err != nil { - return nil, resp, err - } - - return statusCheck, resp, nil + return do[*ProjectStatusCheck](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/external_status_checks", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteProjectExternalStatusCheckOptions represents the available @@ -375,19 +335,14 @@ type DeleteProjectExternalStatusCheckOptions struct{} // // Gitlab API docs: // https://docs.gitlab.com/api/status_checks/#delete-external-status-check-service -func (s *ExternalStatusChecksService) DeleteProjectExternalStatusCheck(pid any, check int, opt *DeleteProjectExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/external_status_checks/%d", PathEscape(project), check) - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ExternalStatusChecksService) DeleteProjectExternalStatusCheck(pid any, check int64, opt *DeleteProjectExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/external_status_checks/%d", ProjectID{pid}, check), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // UpdateProjectExternalStatusCheckOptions represents the available @@ -396,35 +351,23 @@ func (s *ExternalStatusChecksService) DeleteProjectExternalStatusCheck(pid any, // GitLab API docs: // https://docs.gitlab.com/api/status_checks/#update-external-status-check-service type UpdateProjectExternalStatusCheckOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` - SharedSecret *string `url:"shared_secret,omitempty" json:"shared_secret,omitempty"` - ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + SharedSecret *string `url:"shared_secret,omitempty" json:"shared_secret,omitempty"` + ProtectedBranchIDs *[]int64 `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` } // UpdateProjectExternalStatusCheck updates an external status check. // // Gitlab API docs: // https://docs.gitlab.com/api/status_checks/#update-external-status-check-service -func (s *ExternalStatusChecksService) UpdateProjectExternalStatusCheck(pid any, check int, opt *UpdateProjectExternalStatusCheckOptions, options ...RequestOptionFunc) (*ProjectStatusCheck, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/external_status_checks/%d", PathEscape(project), check) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - statusCheck := new(ProjectStatusCheck) - resp, err := s.client.Do(req, statusCheck) - if err != nil { - return nil, resp, err - } - - return statusCheck, resp, nil +func (s *ExternalStatusChecksService) UpdateProjectExternalStatusCheck(pid any, check int64, opt *UpdateProjectExternalStatusCheckOptions, options ...RequestOptionFunc) (*ProjectStatusCheck, *Response, error) { + return do[*ProjectStatusCheck](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/external_status_checks/%d", ProjectID{pid}, check), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RetryFailedExternalStatusCheckForProjectMergeRequestOptions represents the available @@ -438,19 +381,14 @@ type RetryFailedExternalStatusCheckForProjectMergeRequestOptions struct{} // // Gitlab API docs: // https://docs.gitlab.com/api/status_checks/#retry-failed-status-check-for-a-merge-request -func (s *ExternalStatusChecksService) RetryFailedExternalStatusCheckForProjectMergeRequest(pid any, mergeRequest int, externalStatusCheck int, opt *RetryFailedExternalStatusCheckForProjectMergeRequestOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/status_checks/%d/retry", PathEscape(project), mergeRequest, externalStatusCheck) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ExternalStatusChecksService) RetryFailedExternalStatusCheckForProjectMergeRequest(pid any, mergeRequest int64, externalStatusCheck int64, opt *RetryFailedExternalStatusCheckForProjectMergeRequestOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/status_checks/%d/retry", ProjectID{pid}, mergeRequest, externalStatusCheck), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // SetProjectMergeRequestExternalStatusCheckStatusOptions represents the available @@ -460,7 +398,7 @@ func (s *ExternalStatusChecksService) RetryFailedExternalStatusCheckForProjectMe // https://docs.gitlab.com/api/status_checks/#set-status-of-an-external-status-check type SetProjectMergeRequestExternalStatusCheckStatusOptions struct { SHA *string `url:"sha,omitempty" json:"sha,omitempty"` - ExternalStatusCheckID *int `url:"external_status_check_id,omitempty" json:"external_status_check_id,omitempty"` + ExternalStatusCheckID *int64 `url:"external_status_check_id,omitempty" json:"external_status_check_id,omitempty"` Status *string `url:"status,omitempty" json:"status,omitempty"` } @@ -468,17 +406,12 @@ type SetProjectMergeRequestExternalStatusCheckStatusOptions struct { // // Gitlab API docs: // https://docs.gitlab.com/api/status_checks/#set-status-of-an-external-status-check -func (s *ExternalStatusChecksService) SetProjectMergeRequestExternalStatusCheckStatus(pid any, mergeRequest int, opt *SetProjectMergeRequestExternalStatusCheckStatusOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/status_check_responses", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ExternalStatusChecksService) SetProjectMergeRequestExternalStatusCheckStatus(pid any, mergeRequest int64, opt *SetProjectMergeRequestExternalStatusCheckStatusOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/status_check_responses", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/feature_flag_user_lists.go b/vendor/gitlab.com/gitlab-org/api/client-go/feature_flag_user_lists.go index 6208158c6d..e0445460f1 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/feature_flag_user_lists.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/feature_flag_user_lists.go @@ -15,7 +15,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -24,9 +23,9 @@ type ( FeatureFlagUserListsServiceInterface interface { ListFeatureFlagUserLists(pid any, opt *ListFeatureFlagUserListsOptions, options ...RequestOptionFunc) ([]*FeatureFlagUserList, *Response, error) CreateFeatureFlagUserList(pid any, opt *CreateFeatureFlagUserListOptions, options ...RequestOptionFunc) (*FeatureFlagUserList, *Response, error) - GetFeatureFlagUserList(pid any, iid int, options ...RequestOptionFunc) (*FeatureFlagUserList, *Response, error) - UpdateFeatureFlagUserList(pid any, iid int, opt *UpdateFeatureFlagUserListOptions, options ...RequestOptionFunc) (*FeatureFlagUserList, *Response, error) - DeleteFeatureFlagUserList(pid any, iid int, options ...RequestOptionFunc) (*Response, error) + GetFeatureFlagUserList(pid any, iid int64, options ...RequestOptionFunc) (*FeatureFlagUserList, *Response, error) + UpdateFeatureFlagUserList(pid any, iid int64, opt *UpdateFeatureFlagUserListOptions, options ...RequestOptionFunc) (*FeatureFlagUserList, *Response, error) + DeleteFeatureFlagUserList(pid any, iid int64, options ...RequestOptionFunc) (*Response, error) } // FeatureFlagUserListsService handles communication with the feature flag @@ -46,9 +45,9 @@ var _ FeatureFlagUserListsServiceInterface = (*FeatureFlagUserListsService)(nil) type FeatureFlagUserList struct { Name string `url:"name" json:"name"` UserXIDs string `url:"user_xids" json:"user_xids"` - ID int `url:"id" json:"id"` - IID int `url:"iid" json:"iid"` - ProjectID int `url:"project_id" json:"project_id"` + ID int64 `url:"id" json:"id"` + IID int64 `url:"iid" json:"iid"` + ProjectID int64 `url:"project_id" json:"project_id"` CreatedAt *time.Time `url:"created_at" json:"created_at"` UpdatedAt *time.Time `url:"updated_at" json:"updated_at"` } @@ -69,24 +68,11 @@ type ListFeatureFlagUserListsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/feature_flag_user_lists/#list-all-feature-flag-user-lists-for-a-project func (s *FeatureFlagUserListsService) ListFeatureFlagUserLists(pid any, opt *ListFeatureFlagUserListsOptions, options ...RequestOptionFunc) ([]*FeatureFlagUserList, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags_user_lists", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var lists []*FeatureFlagUserList - resp, err := s.client.Do(req, &lists) - if err != nil { - return nil, resp, err - } - - return lists, resp, nil + return do[[]*FeatureFlagUserList](s.client, + withPath("projects/%s/feature_flags_user_lists", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateFeatureFlagUserListOptions represents the available @@ -104,49 +90,23 @@ type CreateFeatureFlagUserListOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/feature_flag_user_lists/#create-a-feature-flag-user-list func (s *FeatureFlagUserListsService) CreateFeatureFlagUserList(pid any, opt *CreateFeatureFlagUserListOptions, options ...RequestOptionFunc) (*FeatureFlagUserList, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags_user_lists", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - list := new(FeatureFlagUserList) - resp, err := s.client.Do(req, list) - if err != nil { - return nil, resp, err - } - - return list, resp, nil + return do[*FeatureFlagUserList](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/feature_flags_user_lists", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetFeatureFlagUserList gets a feature flag user list. // // GitLab API docs: // https://docs.gitlab.com/api/feature_flag_user_lists/#get-a-feature-flag-user-list -func (s *FeatureFlagUserListsService) GetFeatureFlagUserList(pid any, iid int, options ...RequestOptionFunc) (*FeatureFlagUserList, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags_user_lists/%d", PathEscape(project), iid) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - list := new(FeatureFlagUserList) - resp, err := s.client.Do(req, list) - if err != nil { - return nil, resp, err - } - - return list, resp, nil +func (s *FeatureFlagUserListsService) GetFeatureFlagUserList(pid any, iid int64, options ...RequestOptionFunc) (*FeatureFlagUserList, *Response, error) { + return do[*FeatureFlagUserList](s.client, + withPath("projects/%s/feature_flags_user_lists/%d", ProjectID{pid}, iid), + withRequestOpts(options...), + ) } // UpdateFeatureFlagUserListOptions represents the available @@ -163,42 +123,24 @@ type UpdateFeatureFlagUserListOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/feature_flag_user_lists/#update-a-feature-flag-user-list -func (s *FeatureFlagUserListsService) UpdateFeatureFlagUserList(pid any, iid int, opt *UpdateFeatureFlagUserListOptions, options ...RequestOptionFunc) (*FeatureFlagUserList, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags_user_lists/%d", PathEscape(project), iid) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - list := new(FeatureFlagUserList) - resp, err := s.client.Do(req, list) - if err != nil { - return nil, resp, err - } - - return list, resp, nil +func (s *FeatureFlagUserListsService) UpdateFeatureFlagUserList(pid any, iid int64, opt *UpdateFeatureFlagUserListOptions, options ...RequestOptionFunc) (*FeatureFlagUserList, *Response, error) { + return do[*FeatureFlagUserList](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/feature_flags_user_lists/%d", ProjectID{pid}, iid), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteFeatureFlagUserList deletes a feature flag user list. // // GitLab API docs: // https://docs.gitlab.com/api/feature_flag_user_lists/#delete-feature-flag-user-list -func (s *FeatureFlagUserListsService) DeleteFeatureFlagUserList(pid any, iid int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags_user_lists/%d", PathEscape(project), iid) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *FeatureFlagUserListsService) DeleteFeatureFlagUserList(pid any, iid int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/feature_flags_user_lists/%d", ProjectID{pid}, iid), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/feature_flags.go b/vendor/gitlab.com/gitlab-org/api/client-go/feature_flags.go index d0a5d84d71..9fa0d30a9a 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/feature_flags.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/feature_flags.go @@ -16,11 +16,7 @@ package gitlab -import ( - "fmt" - "net/http" - "net/url" -) +import "net/http" type ( // FeaturesServiceInterface defines all the API methods for the FeaturesService @@ -69,17 +65,10 @@ func (f Feature) String() string { // GitLab API docs: // https://docs.gitlab.com/api/features/#list-all-features func (s *FeaturesService) ListFeatures(options ...RequestOptionFunc) ([]*Feature, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "features", nil, options) - if err != nil { - return nil, nil, err - } - - var f []*Feature - resp, err := s.client.Do(req, &f) - if err != nil { - return nil, resp, err - } - return f, resp, nil + return do[[]*Feature](s.client, + withPath("features"), + withRequestOpts(options...), + ) } // FeatureDefinition represents a Feature Definition. @@ -106,17 +95,10 @@ func (fd FeatureDefinition) String() string { // GitLab API docs: // https://docs.gitlab.com/api/features/#list-all-feature-definitions func (s *FeaturesService) ListFeatureDefinitions(options ...RequestOptionFunc) ([]*FeatureDefinition, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "features/definitions", nil, options) - if err != nil { - return nil, nil, err - } - - var fd []*FeatureDefinition - resp, err := s.client.Do(req, &fd) - if err != nil { - return nil, resp, err - } - return fd, resp, nil + return do[[]*FeatureDefinition](s.client, + withPath("features/definitions"), + withRequestOpts(options...), + ) } // SetFeatureFlagOptions represents the available options for @@ -141,19 +123,12 @@ type SetFeatureFlagOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/features/#set-or-create-a-feature func (s *FeaturesService) SetFeatureFlag(name string, opt *SetFeatureFlagOptions, options ...RequestOptionFunc) (*Feature, *Response, error) { - u := fmt.Sprintf("features/%s", url.PathEscape(name)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - f := &Feature{} - resp, err := s.client.Do(req, f) - if err != nil { - return nil, resp, err - } - return f, resp, nil + return do[*Feature](s.client, + withMethod(http.MethodPost), + withPath("features/%s", name), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteFeatureFlag deletes a feature flag. @@ -161,12 +136,10 @@ func (s *FeaturesService) SetFeatureFlag(name string, opt *SetFeatureFlagOptions // GitLab API docs: // https://docs.gitlab.com/api/features/#delete-a-feature func (s *FeaturesService) DeleteFeatureFlag(name string, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("features/%s", url.PathEscape(name)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("features/%s", name), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/freeze_periods.go b/vendor/gitlab.com/gitlab-org/api/client-go/freeze_periods.go index 8662ac4df4..bb41500c7c 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/freeze_periods.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/freeze_periods.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -30,10 +29,10 @@ type ( // FreezePeriodsServiceInterface defines all the API methods for the FreezePeriodsService FreezePeriodsServiceInterface interface { ListFreezePeriods(pid any, opt *ListFreezePeriodsOptions, options ...RequestOptionFunc) ([]*FreezePeriod, *Response, error) - GetFreezePeriod(pid any, freezePeriod int, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) + GetFreezePeriod(pid any, freezePeriod int64, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) CreateFreezePeriodOptions(pid any, opt *CreateFreezePeriodOptions, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) - UpdateFreezePeriodOptions(pid any, freezePeriod int, opt *UpdateFreezePeriodOptions, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) - DeleteFreezePeriod(pid any, freezePeriod int, options ...RequestOptionFunc) (*Response, error) + UpdateFreezePeriodOptions(pid any, freezePeriod int64, opt *UpdateFreezePeriodOptions, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) + DeleteFreezePeriod(pid any, freezePeriod int64, options ...RequestOptionFunc) (*Response, error) } // FreezePeriodsService handles the communication with the freeze periods @@ -52,7 +51,7 @@ var _ FreezePeriodsServiceInterface = (*FreezePeriodsService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/freeze_periods/#list-freeze-periods type FreezePeriod struct { - ID int `json:"id"` + ID int64 `json:"id"` FreezeStart string `json:"freeze_start"` FreezeEnd string `json:"freeze_end"` CronTimezone string `json:"cron_timezone"` @@ -65,56 +64,31 @@ type FreezePeriod struct { // // GitLab API docs: // https://docs.gitlab.com/api/freeze_periods/#list-freeze-periods -type ListFreezePeriodsOptions ListOptions +type ListFreezePeriodsOptions struct { + ListOptions +} // ListFreezePeriods gets a list of project freeze periods. // // GitLab API docs: // https://docs.gitlab.com/api/freeze_periods/#list-freeze-periods func (s *FreezePeriodsService) ListFreezePeriods(pid any, opt *ListFreezePeriodsOptions, options ...RequestOptionFunc) ([]*FreezePeriod, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/freeze_periods", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var fp []*FreezePeriod - resp, err := s.client.Do(req, &fp) - if err != nil { - return nil, resp, err - } - - return fp, resp, nil + return do[[]*FreezePeriod](s.client, + withPath("projects/%s/freeze_periods", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetFreezePeriod gets a specific freeze period for a project. // // GitLab API docs: // https://docs.gitlab.com/api/freeze_periods/#get-a-freeze-period-by-a-freeze_period_id -func (s *FreezePeriodsService) GetFreezePeriod(pid any, freezePeriod int, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/freeze_periods/%d", PathEscape(project), freezePeriod) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - fp := new(FreezePeriod) - resp, err := s.client.Do(req, fp) - if err != nil { - return nil, resp, err - } - - return fp, resp, nil +func (s *FreezePeriodsService) GetFreezePeriod(pid any, freezePeriod int64, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) { + return do[*FreezePeriod](s.client, + withPath("projects/%s/freeze_periods/%d", ProjectID{pid}, freezePeriod), + withRequestOpts(options...), + ) } // CreateFreezePeriodOptions represents the available CreateFreezePeriodOptions() @@ -133,24 +107,12 @@ type CreateFreezePeriodOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/freeze_periods/#create-a-freeze-period func (s *FreezePeriodsService) CreateFreezePeriodOptions(pid any, opt *CreateFreezePeriodOptions, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/freeze_periods", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - fp := new(FreezePeriod) - resp, err := s.client.Do(req, fp) - if err != nil { - return nil, resp, err - } - - return fp, resp, nil + return do[*FreezePeriod](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/freeze_periods", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateFreezePeriodOptions represents the available UpdateFreezePeriodOptions() @@ -168,25 +130,13 @@ type UpdateFreezePeriodOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/freeze_periods/#update-a-freeze-period -func (s *FreezePeriodsService) UpdateFreezePeriodOptions(pid any, freezePeriod int, opt *UpdateFreezePeriodOptions, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/freeze_periods/%d", PathEscape(project), freezePeriod) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - fp := new(FreezePeriod) - resp, err := s.client.Do(req, fp) - if err != nil { - return nil, resp, err - } - - return fp, resp, nil +func (s *FreezePeriodsService) UpdateFreezePeriodOptions(pid any, freezePeriod int64, opt *UpdateFreezePeriodOptions, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) { + return do[*FreezePeriod](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/freeze_periods/%d", ProjectID{pid}, freezePeriod), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteFreezePeriod removes a freeze period from a project. This is an @@ -195,17 +145,11 @@ func (s *FreezePeriodsService) UpdateFreezePeriodOptions(pid any, freezePeriod i // // GitLab API docs: // https://docs.gitlab.com/api/freeze_periods/#delete-a-freeze-period -func (s *FreezePeriodsService) DeleteFreezePeriod(pid any, freezePeriod int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/freeze_periods/%d", PathEscape(project), freezePeriod) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *FreezePeriodsService) DeleteFreezePeriod(pid any, freezePeriod int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/freeze_periods/%d", ProjectID{pid}, freezePeriod), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/generic_packages.go b/vendor/gitlab.com/gitlab-org/api/client-go/generic_packages.go index e80fb5bbd8..e9771e57b1 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/generic_packages.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/generic_packages.go @@ -49,27 +49,33 @@ var _ GenericPackagesServiceInterface = (*GenericPackagesService)(nil) // GitLab API docs: // https://docs.gitlab.com/user/packages/generic_packages/#publish-a-single-file type GenericPackagesFile struct { - ID int `json:"id"` - PackageID int `json:"package_id"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - Size int `json:"size"` - FileStore int `json:"file_store"` - FileMD5 string `json:"file_md5"` - FileSHA1 string `json:"file_sha1"` - FileName string `json:"file_name"` - File struct { - URL string `json:"url"` - } `json:"file"` - FileSHA256 string `json:"file_sha256"` - VerificationRetryAt *time.Time `json:"verification_retry_at"` - VerifiedAt *time.Time `json:"verified_at"` - VerificationFailure bool `json:"verification_failure"` - VerificationRetryCount int `json:"verification_retry_count"` - VerificationChecksum string `json:"verification_checksum"` - VerificationState int `json:"verification_state"` - VerificationStartedAt *time.Time `json:"verification_started_at"` - NewFilePath string `json:"new_file_path"` + ID int64 `json:"id"` + PackageID int64 `json:"package_id"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + Size int64 `json:"size"` + FileStore int64 `json:"file_store"` + FileMD5 string `json:"file_md5"` + FileSHA1 string `json:"file_sha1"` + FileName string `json:"file_name"` + File GenericPackagesFileURL `json:"file"` + FileSHA256 string `json:"file_sha256"` + VerificationRetryAt *time.Time `json:"verification_retry_at"` + VerifiedAt *time.Time `json:"verified_at"` + VerificationFailure bool `json:"verification_failure"` + VerificationRetryCount int64 `json:"verification_retry_count"` + VerificationChecksum string `json:"verification_checksum"` + VerificationState int64 `json:"verification_state"` + VerificationStartedAt *time.Time `json:"verification_started_at"` + NewFilePath string `json:"new_file_path"` +} + +// GenericPackagesFileURL represents a GitLab generic package file URL. +// +// GitLab API docs: +// https://docs.gitlab.com/user/packages/generic_packages/#publish-a-single-file +type GenericPackagesFileURL struct { + URL string `json:"url"` } // FormatPackageURL returns the GitLab Package Registry URL for the given artifact metadata, without the BaseURL. @@ -142,28 +148,12 @@ func (s *GenericPackagesService) PublishPackageFile(pid any, packageName, packag // GitLab docs: // https://docs.gitlab.com/user/packages/generic_packages/#download-a-single-file func (s *GenericPackagesService) DownloadPackageFile(pid any, packageName, packageVersion, fileName string, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/packages/generic/%s/%s/%s", - PathEscape(project), - PathEscape(packageName), - PathEscape(packageVersion), - PathEscape(fileName), + buf, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/packages/generic/%s/%s/%s", ProjectID{pid}, packageName, packageVersion, fileName), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var f bytes.Buffer - resp, err := s.client.Do(req, &f) if err != nil { return nil, resp, err } - - return f.Bytes(), resp, err + return buf.Bytes(), resp, nil } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/geo_nodes.go b/vendor/gitlab.com/gitlab-org/api/client-go/geo_nodes.go index 124cc83127..670343a1a5 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/geo_nodes.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/geo_nodes.go @@ -16,10 +16,7 @@ package gitlab -import ( - "fmt" - "net/http" -) +import "net/http" type ( // Deprecated: will be removed in v5 of the API, use Geo Sites API instead @@ -29,17 +26,17 @@ type ( // Deprecated: will be removed in v5 of the API, use Geo Sites API instead ListGeoNodes(*ListGeoNodesOptions, ...RequestOptionFunc) ([]*GeoNode, *Response, error) // Deprecated: will be removed in v5 of the API, use Geo Sites API instead - GetGeoNode(int, ...RequestOptionFunc) (*GeoNode, *Response, error) + GetGeoNode(int64, ...RequestOptionFunc) (*GeoNode, *Response, error) // Deprecated: will be removed in v5 of the API, use Geo Sites API instead - EditGeoNode(int, *UpdateGeoNodesOptions, ...RequestOptionFunc) (*GeoNode, *Response, error) + EditGeoNode(int64, *UpdateGeoNodesOptions, ...RequestOptionFunc) (*GeoNode, *Response, error) // Deprecated: will be removed in v5 of the API, use Geo Sites API instead - DeleteGeoNode(int, ...RequestOptionFunc) (*Response, error) + DeleteGeoNode(int64, ...RequestOptionFunc) (*Response, error) // Deprecated: will be removed in v5 of the API, use Geo Sites API instead - RepairGeoNode(int, ...RequestOptionFunc) (*GeoNode, *Response, error) + RepairGeoNode(int64, ...RequestOptionFunc) (*GeoNode, *Response, error) // Deprecated: will be removed in v5 of the API, use Geo Sites API instead RetrieveStatusOfAllGeoNodes(...RequestOptionFunc) ([]*GeoNodeStatus, *Response, error) // Deprecated: will be removed in v5 of the API, use Geo Sites API instead - RetrieveStatusOfGeoNode(int, ...RequestOptionFunc) (*GeoNodeStatus, *Response, error) + RetrieveStatusOfGeoNode(int64, ...RequestOptionFunc) (*GeoNodeStatus, *Response, error) } // GeoNodesService handles communication with Geo Nodes related methods @@ -60,21 +57,21 @@ var _ GeoNodesServiceInterface = (*GeoNodesService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/geo_nodes/ type GeoNode struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` URL string `json:"url"` InternalURL string `json:"internal_url"` Primary bool `json:"primary"` Enabled bool `json:"enabled"` Current bool `json:"current"` - FilesMaxCapacity int `json:"files_max_capacity"` - ReposMaxCapacity int `json:"repos_max_capacity"` - VerificationMaxCapacity int `json:"verification_max_capacity"` + FilesMaxCapacity int64 `json:"files_max_capacity"` + ReposMaxCapacity int64 `json:"repos_max_capacity"` + VerificationMaxCapacity int64 `json:"verification_max_capacity"` SelectiveSyncType string `json:"selective_sync_type"` SelectiveSyncShards []string `json:"selective_sync_shards"` - SelectiveSyncNamespaceIds []int `json:"selective_sync_namespace_ids"` - MinimumReverificationInterval int `json:"minimum_reverification_interval"` - ContainerRepositoriesMaxCapacity int `json:"container_repositories_max_capacity"` + SelectiveSyncNamespaceIDs []int64 `json:"selective_sync_namespace_ids"` + MinimumReverificationInterval int64 `json:"minimum_reverification_interval"` + ContainerRepositoriesMaxCapacity int64 `json:"container_repositories_max_capacity"` SyncObjectStorage bool `json:"sync_object_storage"` CloneProtocol string `json:"clone_protocol"` WebEditURL string `json:"web_edit_url"` @@ -103,15 +100,15 @@ type CreateGeoNodesOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` URL *string `url:"url,omitempty" json:"url,omitempty"` InternalURL *string `url:"internal_url,omitempty" json:"internal_url,omitempty"` - FilesMaxCapacity *int `url:"files_max_capacity,omitempty" json:"files_max_capacity,omitempty"` - ReposMaxCapacity *int `url:"repos_max_capacity,omitempty" json:"repos_max_capacity,omitempty"` - VerificationMaxCapacity *int `url:"verification_max_capacity,omitempty" json:"verification_max_capacity,omitempty"` - ContainerRepositoriesMaxCapacity *int `url:"container_repositories_max_capacity,omitempty" json:"container_repositories_max_capacity,omitempty"` + FilesMaxCapacity *int64 `url:"files_max_capacity,omitempty" json:"files_max_capacity,omitempty"` + ReposMaxCapacity *int64 `url:"repos_max_capacity,omitempty" json:"repos_max_capacity,omitempty"` + VerificationMaxCapacity *int64 `url:"verification_max_capacity,omitempty" json:"verification_max_capacity,omitempty"` + ContainerRepositoriesMaxCapacity *int64 `url:"container_repositories_max_capacity,omitempty" json:"container_repositories_max_capacity,omitempty"` SyncObjectStorage *bool `url:"sync_object_storage,omitempty" json:"sync_object_storage,omitempty"` SelectiveSyncType *string `url:"selective_sync_type,omitempty" json:"selective_sync_type,omitempty"` SelectiveSyncShards *[]string `url:"selective_sync_shards,omitempty" json:"selective_sync_shards,omitempty"` - SelectiveSyncNamespaceIds *[]int `url:"selective_sync_namespace_ids,omitempty" json:"selective_sync_namespace_ids,omitempty"` - MinimumReverificationInterval *int `url:"minimum_reverification_interval,omitempty" json:"minimum_reverification_interval,omitempty"` + SelectiveSyncNamespaceIDs *[]int64 `url:"selective_sync_namespace_ids,omitempty" json:"selective_sync_namespace_ids,omitempty"` + MinimumReverificationInterval *int64 `url:"minimum_reverification_interval,omitempty" json:"minimum_reverification_interval,omitempty"` } // CreateGeoNode creates a new Geo Node. @@ -120,18 +117,12 @@ type CreateGeoNodesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/geo_nodes/#create-a-new-geo-node func (s *GeoNodesService) CreateGeoNode(opt *CreateGeoNodesOptions, options ...RequestOptionFunc) (*GeoNode, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "geo_nodes", opt, options) - if err != nil { - return nil, nil, err - } - - g := new(GeoNode) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil + return do[*GeoNode](s.client, + withMethod(http.MethodPost), + withPath("geo_nodes"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListGeoNodesOptions represents the available ListGeoNodes() options. @@ -139,7 +130,9 @@ func (s *GeoNodesService) CreateGeoNode(opt *CreateGeoNodesOptions, options ...R // // GitLab API docs: // https://docs.gitlab.com/api/geo_nodes/#retrieve-configuration-about-all-geo-nodes -type ListGeoNodesOptions ListOptions +type ListGeoNodesOptions struct { + ListOptions +} // ListGeoNodes gets a list of geo nodes. // Deprecated: will be removed in v5 of the API, use Geo Sites API instead @@ -147,18 +140,11 @@ type ListGeoNodesOptions ListOptions // GitLab API docs: // https://docs.gitlab.com/api/geo_nodes/#retrieve-configuration-about-all-geo-nodes func (s *GeoNodesService) ListGeoNodes(opt *ListGeoNodesOptions, options ...RequestOptionFunc) ([]*GeoNode, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "geo_nodes", opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*GeoNode - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil + return do[[]*GeoNode](s.client, + withPath("geo_nodes"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGeoNode gets a specific geo node. @@ -166,21 +152,11 @@ func (s *GeoNodesService) ListGeoNodes(opt *ListGeoNodesOptions, options ...Requ // // GitLab API docs: // https://docs.gitlab.com/api/geo_nodes/#retrieve-configuration-about-a-specific-geo-node -func (s *GeoNodesService) GetGeoNode(id int, options ...RequestOptionFunc) (*GeoNode, *Response, error) { - u := fmt.Sprintf("geo_nodes/%d", id) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - g := new(GeoNode) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil +func (s *GeoNodesService) GetGeoNode(id int64, options ...RequestOptionFunc) (*GeoNode, *Response, error) { + return do[*GeoNode](s.client, + withPath("geo_nodes/%d", id), + withRequestOpts(options...), + ) } // UpdateGeoNodesOptions represents the available EditGeoNode() options. @@ -189,20 +165,20 @@ func (s *GeoNodesService) GetGeoNode(id int, options ...RequestOptionFunc) (*Geo // GitLab API docs: // https://docs.gitlab.com/api/geo_nodes/#edit-a-geo-node type UpdateGeoNodesOptions struct { - ID *int `url:"primary,omitempty" json:"primary,omitempty"` + ID *int64 `url:"primary,omitempty" json:"primary,omitempty"` Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` Name *string `url:"name,omitempty" json:"name,omitempty"` URL *string `url:"url,omitempty" json:"url,omitempty"` InternalURL *string `url:"internal_url,omitempty" json:"internal_url,omitempty"` - FilesMaxCapacity *int `url:"files_max_capacity,omitempty" json:"files_max_capacity,omitempty"` - ReposMaxCapacity *int `url:"repos_max_capacity,omitempty" json:"repos_max_capacity,omitempty"` - VerificationMaxCapacity *int `url:"verification_max_capacity,omitempty" json:"verification_max_capacity,omitempty"` - ContainerRepositoriesMaxCapacity *int `url:"container_repositories_max_capacity,omitempty" json:"container_repositories_max_capacity,omitempty"` + FilesMaxCapacity *int64 `url:"files_max_capacity,omitempty" json:"files_max_capacity,omitempty"` + ReposMaxCapacity *int64 `url:"repos_max_capacity,omitempty" json:"repos_max_capacity,omitempty"` + VerificationMaxCapacity *int64 `url:"verification_max_capacity,omitempty" json:"verification_max_capacity,omitempty"` + ContainerRepositoriesMaxCapacity *int64 `url:"container_repositories_max_capacity,omitempty" json:"container_repositories_max_capacity,omitempty"` SyncObjectStorage *bool `url:"sync_object_storage,omitempty" json:"sync_object_storage,omitempty"` SelectiveSyncType *string `url:"selective_sync_type,omitempty" json:"selective_sync_type,omitempty"` SelectiveSyncShards *[]string `url:"selective_sync_shards,omitempty" json:"selective_sync_shards,omitempty"` - SelectiveSyncNamespaceIds *[]int `url:"selective_sync_namespace_ids,omitempty" json:"selective_sync_namespace_ids,omitempty"` - MinimumReverificationInterval *int `url:"minimum_reverification_interval,omitempty" json:"minimum_reverification_interval,omitempty"` + SelectiveSyncNamespaceIDs *[]int64 `url:"selective_sync_namespace_ids,omitempty" json:"selective_sync_namespace_ids,omitempty"` + MinimumReverificationInterval *int64 `url:"minimum_reverification_interval,omitempty" json:"minimum_reverification_interval,omitempty"` } // EditGeoNode updates settings of an existing Geo node. @@ -210,21 +186,13 @@ type UpdateGeoNodesOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/geo_nodes/#edit-a-geo-node -func (s *GeoNodesService) EditGeoNode(id int, opt *UpdateGeoNodesOptions, options ...RequestOptionFunc) (*GeoNode, *Response, error) { - u := fmt.Sprintf("geo_nodes/%d", id) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - g := new(GeoNode) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil +func (s *GeoNodesService) EditGeoNode(id int64, opt *UpdateGeoNodesOptions, options ...RequestOptionFunc) (*GeoNode, *Response, error) { + return do[*GeoNode](s.client, + withMethod(http.MethodPut), + withPath("geo_nodes/%d", id), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteGeoNode removes the Geo node. @@ -232,15 +200,13 @@ func (s *GeoNodesService) EditGeoNode(id int, opt *UpdateGeoNodesOptions, option // // GitLab API docs: // https://docs.gitlab.com/api/geo_nodes/#delete-a-geo-node -func (s *GeoNodesService) DeleteGeoNode(id int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("geo_nodes/%d", id) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GeoNodesService) DeleteGeoNode(id int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("geo_nodes/%d", id), + withRequestOpts(options...), + ) + return resp, err } // RepairGeoNode to repair the OAuth authentication of a Geo node. @@ -248,21 +214,12 @@ func (s *GeoNodesService) DeleteGeoNode(id int, options ...RequestOptionFunc) (* // // GitLab API docs: // https://docs.gitlab.com/api/geo_nodes/#repair-a-geo-node -func (s *GeoNodesService) RepairGeoNode(id int, options ...RequestOptionFunc) (*GeoNode, *Response, error) { - u := fmt.Sprintf("geo_nodes/%d/repair", id) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - g := new(GeoNode) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil +func (s *GeoNodesService) RepairGeoNode(id int64, options ...RequestOptionFunc) (*GeoNode, *Response, error) { + return do[*GeoNode](s.client, + withMethod(http.MethodPost), + withPath("geo_nodes/%d/repair", id), + withRequestOpts(options...), + ) } // GeoNodeStatus represents the status of Geo Node. @@ -271,162 +228,162 @@ func (s *GeoNodesService) RepairGeoNode(id int, options ...RequestOptionFunc) (* // GitLab API docs: // https://docs.gitlab.com/api/geo_nodes/#retrieve-status-about-all-geo-nodes type GeoNodeStatus struct { - GeoNodeID int `json:"geo_node_id"` + GeoNodeID int64 `json:"geo_node_id"` Healthy bool `json:"healthy"` Health string `json:"health"` HealthStatus string `json:"health_status"` MissingOauthApplication bool `json:"missing_oauth_application"` - AttachmentsCount int `json:"attachments_count"` - AttachmentsSyncedCount int `json:"attachments_synced_count"` - AttachmentsFailedCount int `json:"attachments_failed_count"` - AttachmentsSyncedMissingOnPrimaryCount int `json:"attachments_synced_missing_on_primary_count"` + AttachmentsCount int64 `json:"attachments_count"` + AttachmentsSyncedCount int64 `json:"attachments_synced_count"` + AttachmentsFailedCount int64 `json:"attachments_failed_count"` + AttachmentsSyncedMissingOnPrimaryCount int64 `json:"attachments_synced_missing_on_primary_count"` AttachmentsSyncedInPercentage string `json:"attachments_synced_in_percentage"` - DbReplicationLagSeconds int `json:"db_replication_lag_seconds"` - LfsObjectsCount int `json:"lfs_objects_count"` - LfsObjectsSyncedCount int `json:"lfs_objects_synced_count"` - LfsObjectsFailedCount int `json:"lfs_objects_failed_count"` - LfsObjectsSyncedMissingOnPrimaryCount int `json:"lfs_objects_synced_missing_on_primary_count"` + DbReplicationLagSeconds int64 `json:"db_replication_lag_seconds"` + LfsObjectsCount int64 `json:"lfs_objects_count"` + LfsObjectsSyncedCount int64 `json:"lfs_objects_synced_count"` + LfsObjectsFailedCount int64 `json:"lfs_objects_failed_count"` + LfsObjectsSyncedMissingOnPrimaryCount int64 `json:"lfs_objects_synced_missing_on_primary_count"` LfsObjectsSyncedInPercentage string `json:"lfs_objects_synced_in_percentage"` - JobArtifactsCount int `json:"job_artifacts_count"` - JobArtifactsSyncedCount int `json:"job_artifacts_synced_count"` - JobArtifactsFailedCount int `json:"job_artifacts_failed_count"` - JobArtifactsSyncedMissingOnPrimaryCount int `json:"job_artifacts_synced_missing_on_primary_count"` + JobArtifactsCount int64 `json:"job_artifacts_count"` + JobArtifactsSyncedCount int64 `json:"job_artifacts_synced_count"` + JobArtifactsFailedCount int64 `json:"job_artifacts_failed_count"` + JobArtifactsSyncedMissingOnPrimaryCount int64 `json:"job_artifacts_synced_missing_on_primary_count"` JobArtifactsSyncedInPercentage string `json:"job_artifacts_synced_in_percentage"` - ContainerRepositoriesCount int `json:"container_repositories_count"` - ContainerRepositoriesSyncedCount int `json:"container_repositories_synced_count"` - ContainerRepositoriesFailedCount int `json:"container_repositories_failed_count"` + ContainerRepositoriesCount int64 `json:"container_repositories_count"` + ContainerRepositoriesSyncedCount int64 `json:"container_repositories_synced_count"` + ContainerRepositoriesFailedCount int64 `json:"container_repositories_failed_count"` ContainerRepositoriesSyncedInPercentage string `json:"container_repositories_synced_in_percentage"` - DesignRepositoriesCount int `json:"design_repositories_count"` - DesignRepositoriesSyncedCount int `json:"design_repositories_synced_count"` - DesignRepositoriesFailedCount int `json:"design_repositories_failed_count"` + DesignRepositoriesCount int64 `json:"design_repositories_count"` + DesignRepositoriesSyncedCount int64 `json:"design_repositories_synced_count"` + DesignRepositoriesFailedCount int64 `json:"design_repositories_failed_count"` DesignRepositoriesSyncedInPercentage string `json:"design_repositories_synced_in_percentage"` - ProjectsCount int `json:"projects_count"` - RepositoriesCount int `json:"repositories_count"` - RepositoriesFailedCount int `json:"repositories_failed_count"` - RepositoriesSyncedCount int `json:"repositories_synced_count"` + ProjectsCount int64 `json:"projects_count"` + RepositoriesCount int64 `json:"repositories_count"` + RepositoriesFailedCount int64 `json:"repositories_failed_count"` + RepositoriesSyncedCount int64 `json:"repositories_synced_count"` RepositoriesSyncedInPercentage string `json:"repositories_synced_in_percentage"` - WikisCount int `json:"wikis_count"` - WikisFailedCount int `json:"wikis_failed_count"` - WikisSyncedCount int `json:"wikis_synced_count"` + WikisCount int64 `json:"wikis_count"` + WikisFailedCount int64 `json:"wikis_failed_count"` + WikisSyncedCount int64 `json:"wikis_synced_count"` WikisSyncedInPercentage string `json:"wikis_synced_in_percentage"` - ReplicationSlotsCount int `json:"replication_slots_count"` - ReplicationSlotsUsedCount int `json:"replication_slots_used_count"` + ReplicationSlotsCount int64 `json:"replication_slots_count"` + ReplicationSlotsUsedCount int64 `json:"replication_slots_used_count"` ReplicationSlotsUsedInPercentage string `json:"replication_slots_used_in_percentage"` - ReplicationSlotsMaxRetainedWalBytes int `json:"replication_slots_max_retained_wal_bytes"` - RepositoriesCheckedCount int `json:"repositories_checked_count"` - RepositoriesCheckedFailedCount int `json:"repositories_checked_failed_count"` + ReplicationSlotsMaxRetainedWalBytes int64 `json:"replication_slots_max_retained_wal_bytes"` + RepositoriesCheckedCount int64 `json:"repositories_checked_count"` + RepositoriesCheckedFailedCount int64 `json:"repositories_checked_failed_count"` RepositoriesCheckedInPercentage string `json:"repositories_checked_in_percentage"` - RepositoriesChecksummedCount int `json:"repositories_checksummed_count"` - RepositoriesChecksumFailedCount int `json:"repositories_checksum_failed_count"` + RepositoriesChecksummedCount int64 `json:"repositories_checksummed_count"` + RepositoriesChecksumFailedCount int64 `json:"repositories_checksum_failed_count"` RepositoriesChecksummedInPercentage string `json:"repositories_checksummed_in_percentage"` - WikisChecksummedCount int `json:"wikis_checksummed_count"` - WikisChecksumFailedCount int `json:"wikis_checksum_failed_count"` + WikisChecksummedCount int64 `json:"wikis_checksummed_count"` + WikisChecksumFailedCount int64 `json:"wikis_checksum_failed_count"` WikisChecksummedInPercentage string `json:"wikis_checksummed_in_percentage"` - RepositoriesVerifiedCount int `json:"repositories_verified_count"` - RepositoriesVerificationFailedCount int `json:"repositories_verification_failed_count"` + RepositoriesVerifiedCount int64 `json:"repositories_verified_count"` + RepositoriesVerificationFailedCount int64 `json:"repositories_verification_failed_count"` RepositoriesVerifiedInPercentage string `json:"repositories_verified_in_percentage"` - RepositoriesChecksumMismatchCount int `json:"repositories_checksum_mismatch_count"` - WikisVerifiedCount int `json:"wikis_verified_count"` - WikisVerificationFailedCount int `json:"wikis_verification_failed_count"` + RepositoriesChecksumMismatchCount int64 `json:"repositories_checksum_mismatch_count"` + WikisVerifiedCount int64 `json:"wikis_verified_count"` + WikisVerificationFailedCount int64 `json:"wikis_verification_failed_count"` WikisVerifiedInPercentage string `json:"wikis_verified_in_percentage"` - WikisChecksumMismatchCount int `json:"wikis_checksum_mismatch_count"` - RepositoriesRetryingVerificationCount int `json:"repositories_retrying_verification_count"` - WikisRetryingVerificationCount int `json:"wikis_retrying_verification_count"` - LastEventID int `json:"last_event_id"` - LastEventTimestamp int `json:"last_event_timestamp"` - CursorLastEventID int `json:"cursor_last_event_id"` - CursorLastEventTimestamp int `json:"cursor_last_event_timestamp"` - LastSuccessfulStatusCheckTimestamp int `json:"last_successful_status_check_timestamp"` + WikisChecksumMismatchCount int64 `json:"wikis_checksum_mismatch_count"` + RepositoriesRetryingVerificationCount int64 `json:"repositories_retrying_verification_count"` + WikisRetryingVerificationCount int64 `json:"wikis_retrying_verification_count"` + LastEventID int64 `json:"last_event_id"` + LastEventTimestamp int64 `json:"last_event_timestamp"` + CursorLastEventID int64 `json:"cursor_last_event_id"` + CursorLastEventTimestamp int64 `json:"cursor_last_event_timestamp"` + LastSuccessfulStatusCheckTimestamp int64 `json:"last_successful_status_check_timestamp"` Version string `json:"version"` Revision string `json:"revision"` - MergeRequestDiffsCount int `json:"merge_request_diffs_count"` - MergeRequestDiffsChecksumTotalCount int `json:"merge_request_diffs_checksum_total_count"` - MergeRequestDiffsChecksummedCount int `json:"merge_request_diffs_checksummed_count"` - MergeRequestDiffsChecksumFailedCount int `json:"merge_request_diffs_checksum_failed_count"` - MergeRequestDiffsSyncedCount int `json:"merge_request_diffs_synced_count"` - MergeRequestDiffsFailedCount int `json:"merge_request_diffs_failed_count"` - MergeRequestDiffsRegistryCount int `json:"merge_request_diffs_registry_count"` - MergeRequestDiffsVerificationTotalCount int `json:"merge_request_diffs_verification_total_count"` - MergeRequestDiffsVerifiedCount int `json:"merge_request_diffs_verified_count"` - MergeRequestDiffsVerificationFailedCount int `json:"merge_request_diffs_verification_failed_count"` + MergeRequestDiffsCount int64 `json:"merge_request_diffs_count"` + MergeRequestDiffsChecksumTotalCount int64 `json:"merge_request_diffs_checksum_total_count"` + MergeRequestDiffsChecksummedCount int64 `json:"merge_request_diffs_checksummed_count"` + MergeRequestDiffsChecksumFailedCount int64 `json:"merge_request_diffs_checksum_failed_count"` + MergeRequestDiffsSyncedCount int64 `json:"merge_request_diffs_synced_count"` + MergeRequestDiffsFailedCount int64 `json:"merge_request_diffs_failed_count"` + MergeRequestDiffsRegistryCount int64 `json:"merge_request_diffs_registry_count"` + MergeRequestDiffsVerificationTotalCount int64 `json:"merge_request_diffs_verification_total_count"` + MergeRequestDiffsVerifiedCount int64 `json:"merge_request_diffs_verified_count"` + MergeRequestDiffsVerificationFailedCount int64 `json:"merge_request_diffs_verification_failed_count"` MergeRequestDiffsSyncedInPercentage string `json:"merge_request_diffs_synced_in_percentage"` MergeRequestDiffsVerifiedInPercentage string `json:"merge_request_diffs_verified_in_percentage"` - PackageFilesCount int `json:"package_files_count"` - PackageFilesChecksumTotalCount int `json:"package_files_checksum_total_count"` - PackageFilesChecksummedCount int `json:"package_files_checksummed_count"` - PackageFilesChecksumFailedCount int `json:"package_files_checksum_failed_count"` - PackageFilesSyncedCount int `json:"package_files_synced_count"` - PackageFilesFailedCount int `json:"package_files_failed_count"` - PackageFilesRegistryCount int `json:"package_files_registry_count"` - PackageFilesVerificationTotalCount int `json:"package_files_verification_total_count"` - PackageFilesVerifiedCount int `json:"package_files_verified_count"` - PackageFilesVerificationFailedCount int `json:"package_files_verification_failed_count"` + PackageFilesCount int64 `json:"package_files_count"` + PackageFilesChecksumTotalCount int64 `json:"package_files_checksum_total_count"` + PackageFilesChecksummedCount int64 `json:"package_files_checksummed_count"` + PackageFilesChecksumFailedCount int64 `json:"package_files_checksum_failed_count"` + PackageFilesSyncedCount int64 `json:"package_files_synced_count"` + PackageFilesFailedCount int64 `json:"package_files_failed_count"` + PackageFilesRegistryCount int64 `json:"package_files_registry_count"` + PackageFilesVerificationTotalCount int64 `json:"package_files_verification_total_count"` + PackageFilesVerifiedCount int64 `json:"package_files_verified_count"` + PackageFilesVerificationFailedCount int64 `json:"package_files_verification_failed_count"` PackageFilesSyncedInPercentage string `json:"package_files_synced_in_percentage"` PackageFilesVerifiedInPercentage string `json:"package_files_verified_in_percentage"` - PagesDeploymentsCount int `json:"pages_deployments_count"` - PagesDeploymentsChecksumTotalCount int `json:"pages_deployments_checksum_total_count"` - PagesDeploymentsChecksummedCount int `json:"pages_deployments_checksummed_count"` - PagesDeploymentsChecksumFailedCount int `json:"pages_deployments_checksum_failed_count"` - PagesDeploymentsSyncedCount int `json:"pages_deployments_synced_count"` - PagesDeploymentsFailedCount int `json:"pages_deployments_failed_count"` - PagesDeploymentsRegistryCount int `json:"pages_deployments_registry_count"` - PagesDeploymentsVerificationTotalCount int `json:"pages_deployments_verification_total_count"` - PagesDeploymentsVerifiedCount int `json:"pages_deployments_verified_count"` - PagesDeploymentsVerificationFailedCount int `json:"pages_deployments_verification_failed_count"` + PagesDeploymentsCount int64 `json:"pages_deployments_count"` + PagesDeploymentsChecksumTotalCount int64 `json:"pages_deployments_checksum_total_count"` + PagesDeploymentsChecksummedCount int64 `json:"pages_deployments_checksummed_count"` + PagesDeploymentsChecksumFailedCount int64 `json:"pages_deployments_checksum_failed_count"` + PagesDeploymentsSyncedCount int64 `json:"pages_deployments_synced_count"` + PagesDeploymentsFailedCount int64 `json:"pages_deployments_failed_count"` + PagesDeploymentsRegistryCount int64 `json:"pages_deployments_registry_count"` + PagesDeploymentsVerificationTotalCount int64 `json:"pages_deployments_verification_total_count"` + PagesDeploymentsVerifiedCount int64 `json:"pages_deployments_verified_count"` + PagesDeploymentsVerificationFailedCount int64 `json:"pages_deployments_verification_failed_count"` PagesDeploymentsSyncedInPercentage string `json:"pages_deployments_synced_in_percentage"` PagesDeploymentsVerifiedInPercentage string `json:"pages_deployments_verified_in_percentage"` - TerraformStateVersionsCount int `json:"terraform_state_versions_count"` - TerraformStateVersionsChecksumTotalCount int `json:"terraform_state_versions_checksum_total_count"` - TerraformStateVersionsChecksummedCount int `json:"terraform_state_versions_checksummed_count"` - TerraformStateVersionsChecksumFailedCount int `json:"terraform_state_versions_checksum_failed_count"` - TerraformStateVersionsSyncedCount int `json:"terraform_state_versions_synced_count"` - TerraformStateVersionsFailedCount int `json:"terraform_state_versions_failed_count"` - TerraformStateVersionsRegistryCount int `json:"terraform_state_versions_registry_count"` - TerraformStateVersionsVerificationTotalCount int `json:"terraform_state_versions_verification_total_count"` - TerraformStateVersionsVerifiedCount int `json:"terraform_state_versions_verified_count"` - TerraformStateVersionsVerificationFailedCount int `json:"terraform_state_versions_verification_failed_count"` + TerraformStateVersionsCount int64 `json:"terraform_state_versions_count"` + TerraformStateVersionsChecksumTotalCount int64 `json:"terraform_state_versions_checksum_total_count"` + TerraformStateVersionsChecksummedCount int64 `json:"terraform_state_versions_checksummed_count"` + TerraformStateVersionsChecksumFailedCount int64 `json:"terraform_state_versions_checksum_failed_count"` + TerraformStateVersionsSyncedCount int64 `json:"terraform_state_versions_synced_count"` + TerraformStateVersionsFailedCount int64 `json:"terraform_state_versions_failed_count"` + TerraformStateVersionsRegistryCount int64 `json:"terraform_state_versions_registry_count"` + TerraformStateVersionsVerificationTotalCount int64 `json:"terraform_state_versions_verification_total_count"` + TerraformStateVersionsVerifiedCount int64 `json:"terraform_state_versions_verified_count"` + TerraformStateVersionsVerificationFailedCount int64 `json:"terraform_state_versions_verification_failed_count"` TerraformStateVersionsSyncedInPercentage string `json:"terraform_state_versions_synced_in_percentage"` TerraformStateVersionsVerifiedInPercentage string `json:"terraform_state_versions_verified_in_percentage"` - SnippetRepositoriesCount int `json:"snippet_repositories_count"` - SnippetRepositoriesChecksumTotalCount int `json:"snippet_repositories_checksum_total_count"` - SnippetRepositoriesChecksummedCount int `json:"snippet_repositories_checksummed_count"` - SnippetRepositoriesChecksumFailedCount int `json:"snippet_repositories_checksum_failed_count"` - SnippetRepositoriesSyncedCount int `json:"snippet_repositories_synced_count"` - SnippetRepositoriesFailedCount int `json:"snippet_repositories_failed_count"` - SnippetRepositoriesRegistryCount int `json:"snippet_repositories_registry_count"` - SnippetRepositoriesVerificationTotalCount int `json:"snippet_repositories_verification_total_count"` - SnippetRepositoriesVerifiedCount int `json:"snippet_repositories_verified_count"` - SnippetRepositoriesVerificationFailedCount int `json:"snippet_repositories_verification_failed_count"` + SnippetRepositoriesCount int64 `json:"snippet_repositories_count"` + SnippetRepositoriesChecksumTotalCount int64 `json:"snippet_repositories_checksum_total_count"` + SnippetRepositoriesChecksummedCount int64 `json:"snippet_repositories_checksummed_count"` + SnippetRepositoriesChecksumFailedCount int64 `json:"snippet_repositories_checksum_failed_count"` + SnippetRepositoriesSyncedCount int64 `json:"snippet_repositories_synced_count"` + SnippetRepositoriesFailedCount int64 `json:"snippet_repositories_failed_count"` + SnippetRepositoriesRegistryCount int64 `json:"snippet_repositories_registry_count"` + SnippetRepositoriesVerificationTotalCount int64 `json:"snippet_repositories_verification_total_count"` + SnippetRepositoriesVerifiedCount int64 `json:"snippet_repositories_verified_count"` + SnippetRepositoriesVerificationFailedCount int64 `json:"snippet_repositories_verification_failed_count"` SnippetRepositoriesSyncedInPercentage string `json:"snippet_repositories_synced_in_percentage"` SnippetRepositoriesVerifiedInPercentage string `json:"snippet_repositories_verified_in_percentage"` - GroupWikiRepositoriesCount int `json:"group_wiki_repositories_count"` - GroupWikiRepositoriesChecksumTotalCount int `json:"group_wiki_repositories_checksum_total_count"` - GroupWikiRepositoriesChecksummedCount int `json:"group_wiki_repositories_checksummed_count"` - GroupWikiRepositoriesChecksumFailedCount int `json:"group_wiki_repositories_checksum_failed_count"` - GroupWikiRepositoriesSyncedCount int `json:"group_wiki_repositories_synced_count"` - GroupWikiRepositoriesFailedCount int `json:"group_wiki_repositories_failed_count"` - GroupWikiRepositoriesRegistryCount int `json:"group_wiki_repositories_registry_count"` - GroupWikiRepositoriesVerificationTotalCount int `json:"group_wiki_repositories_verification_total_count"` - GroupWikiRepositoriesVerifiedCount int `json:"group_wiki_repositories_verified_count"` - GroupWikiRepositoriesVerificationFailedCount int `json:"group_wiki_repositories_verification_failed_count"` + GroupWikiRepositoriesCount int64 `json:"group_wiki_repositories_count"` + GroupWikiRepositoriesChecksumTotalCount int64 `json:"group_wiki_repositories_checksum_total_count"` + GroupWikiRepositoriesChecksummedCount int64 `json:"group_wiki_repositories_checksummed_count"` + GroupWikiRepositoriesChecksumFailedCount int64 `json:"group_wiki_repositories_checksum_failed_count"` + GroupWikiRepositoriesSyncedCount int64 `json:"group_wiki_repositories_synced_count"` + GroupWikiRepositoriesFailedCount int64 `json:"group_wiki_repositories_failed_count"` + GroupWikiRepositoriesRegistryCount int64 `json:"group_wiki_repositories_registry_count"` + GroupWikiRepositoriesVerificationTotalCount int64 `json:"group_wiki_repositories_verification_total_count"` + GroupWikiRepositoriesVerifiedCount int64 `json:"group_wiki_repositories_verified_count"` + GroupWikiRepositoriesVerificationFailedCount int64 `json:"group_wiki_repositories_verification_failed_count"` GroupWikiRepositoriesSyncedInPercentage string `json:"group_wiki_repositories_synced_in_percentage"` GroupWikiRepositoriesVerifiedInPercentage string `json:"group_wiki_repositories_verified_in_percentage"` - PipelineArtifactsCount int `json:"pipeline_artifacts_count"` - PipelineArtifactsChecksumTotalCount int `json:"pipeline_artifacts_checksum_total_count"` - PipelineArtifactsChecksummedCount int `json:"pipeline_artifacts_checksummed_count"` - PipelineArtifactsChecksumFailedCount int `json:"pipeline_artifacts_checksum_failed_count"` - PipelineArtifactsSyncedCount int `json:"pipeline_artifacts_synced_count"` - PipelineArtifactsFailedCount int `json:"pipeline_artifacts_failed_count"` - PipelineArtifactsRegistryCount int `json:"pipeline_artifacts_registry_count"` - PipelineArtifactsVerificationTotalCount int `json:"pipeline_artifacts_verification_total_count"` - PipelineArtifactsVerifiedCount int `json:"pipeline_artifacts_verified_count"` - PipelineArtifactsVerificationFailedCount int `json:"pipeline_artifacts_verification_failed_count"` + PipelineArtifactsCount int64 `json:"pipeline_artifacts_count"` + PipelineArtifactsChecksumTotalCount int64 `json:"pipeline_artifacts_checksum_total_count"` + PipelineArtifactsChecksummedCount int64 `json:"pipeline_artifacts_checksummed_count"` + PipelineArtifactsChecksumFailedCount int64 `json:"pipeline_artifacts_checksum_failed_count"` + PipelineArtifactsSyncedCount int64 `json:"pipeline_artifacts_synced_count"` + PipelineArtifactsFailedCount int64 `json:"pipeline_artifacts_failed_count"` + PipelineArtifactsRegistryCount int64 `json:"pipeline_artifacts_registry_count"` + PipelineArtifactsVerificationTotalCount int64 `json:"pipeline_artifacts_verification_total_count"` + PipelineArtifactsVerifiedCount int64 `json:"pipeline_artifacts_verified_count"` + PipelineArtifactsVerificationFailedCount int64 `json:"pipeline_artifacts_verification_failed_count"` PipelineArtifactsSyncedInPercentage string `json:"pipeline_artifacts_synced_in_percentage"` PipelineArtifactsVerifiedInPercentage string `json:"pipeline_artifacts_verified_in_percentage"` - UploadsCount int `json:"uploads_count"` - UploadsSyncedCount int `json:"uploads_synced_count"` - UploadsFailedCount int `json:"uploads_failed_count"` - UploadsRegistryCount int `json:"uploads_registry_count"` + UploadsCount int64 `json:"uploads_count"` + UploadsSyncedCount int64 `json:"uploads_synced_count"` + UploadsFailedCount int64 `json:"uploads_failed_count"` + UploadsRegistryCount int64 `json:"uploads_registry_count"` UploadsSyncedInPercentage string `json:"uploads_synced_in_percentage"` } @@ -436,18 +393,10 @@ type GeoNodeStatus struct { // GitLab API docs: // https://docs.gitlab.com/api/geo_nodes/#retrieve-status-about-all-geo-nodes func (s *GeoNodesService) RetrieveStatusOfAllGeoNodes(options ...RequestOptionFunc) ([]*GeoNodeStatus, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "geo_nodes/status", nil, options) - if err != nil { - return nil, nil, err - } - - var gnss []*GeoNodeStatus - resp, err := s.client.Do(req, &gnss) - if err != nil { - return nil, resp, err - } - - return gnss, resp, nil + return do[[]*GeoNodeStatus](s.client, + withPath("geo_nodes/status"), + withRequestOpts(options...), + ) } // RetrieveStatusOfGeoNode get the of status of a specific Geo Nodes. @@ -455,19 +404,9 @@ func (s *GeoNodesService) RetrieveStatusOfAllGeoNodes(options ...RequestOptionFu // // GitLab API docs: // https://docs.gitlab.com/api/geo_nodes/#retrieve-status-about-a-specific-geo-node -func (s *GeoNodesService) RetrieveStatusOfGeoNode(id int, options ...RequestOptionFunc) (*GeoNodeStatus, *Response, error) { - u := fmt.Sprintf("geo_nodes/%d/status", id) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gns := new(GeoNodeStatus) - resp, err := s.client.Do(req, gns) - if err != nil { - return nil, resp, err - } - - return gns, resp, nil +func (s *GeoNodesService) RetrieveStatusOfGeoNode(id int64, options ...RequestOptionFunc) (*GeoNodeStatus, *Response, error) { + return do[*GeoNodeStatus](s.client, + withPath("geo_nodes/%d/status", id), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/geo_sites.go b/vendor/gitlab.com/gitlab-org/api/client-go/geo_sites.go index bd0f9f2b38..f5818a3bcf 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/geo_sites.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/geo_sites.go @@ -15,21 +15,52 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( GeoSitesServiceInterface interface { + // CreateGeoSite creates a new Geo Site. + // + // GitLab API docs: + // https://docs.gitlab.com/api/geo_sites/#create-a-new-geo-site CreateGeoSite(*CreateGeoSitesOptions, ...RequestOptionFunc) (*GeoSite, *Response, error) + // ListGeoSites gets a list of geo sites. + // + // GitLab API docs: + // https://docs.gitlab.com/api/geo_sites/#retrieve-configuration-about-all-geo-sites ListGeoSites(*ListGeoSitesOptions, ...RequestOptionFunc) ([]*GeoSite, *Response, error) - GetGeoSite(int, ...RequestOptionFunc) (*GeoSite, *Response, error) - EditGeoSite(int, *EditGeoSiteOptions, ...RequestOptionFunc) (*GeoSite, *Response, error) - DeleteGeoSite(int, ...RequestOptionFunc) (*Response, error) - RepairGeoSite(int, ...RequestOptionFunc) (*GeoSite, *Response, error) + // GetGeoSite gets a specific geo site. + // + // GitLab API docs: + // https://docs.gitlab.com/api/geo_sites/#retrieve-configuration-about-a-specific-geo-site + GetGeoSite(int64, ...RequestOptionFunc) (*GeoSite, *Response, error) + // EditGeoSite updates settings of an existing Geo site. + // + // GitLab API docs: + // https://docs.gitlab.com/api/geo_sites/#edit-a-geo-site + EditGeoSite(int64, *EditGeoSiteOptions, ...RequestOptionFunc) (*GeoSite, *Response, error) + // DeleteGeoSite removes the Geo site. + // + // GitLab API docs: + // https://docs.gitlab.com/api/geo_sites/#delete-a-geo-site + DeleteGeoSite(int64, ...RequestOptionFunc) (*Response, error) + // RepairGeoSite to repair the OAuth authentication of a Geo site. + // + // GitLab API docs: + // https://docs.gitlab.com/api/geo_sites/#repair-a-geo-site + RepairGeoSite(int64, ...RequestOptionFunc) (*GeoSite, *Response, error) + // ListStatusOfAllGeoSites get the list of status of all Geo Sites. + // + // GitLab API docs: + // https://docs.gitlab.com/api/geo_sites/#retrieve-status-about-all-geo-sites ListStatusOfAllGeoSites(*ListStatusOfAllGeoSitesOptions, ...RequestOptionFunc) ([]*GeoSiteStatus, *Response, error) - GetStatusOfGeoSite(int, ...RequestOptionFunc) (*GeoSiteStatus, *Response, error) + // GetStatusOfGeoSite gets the status of a specific Geo Site. + // + // GitLab API docs: + // https://docs.gitlab.com/api/geo_sites/#retrieve-status-about-a-specific-geo-site + GetStatusOfGeoSite(int64, ...RequestOptionFunc) (*GeoSiteStatus, *Response, error) } // GeoSitesService handles communication with Geo Sites related methods @@ -47,21 +78,21 @@ var _ GeoSitesServiceInterface = (*GeoSitesService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/geo_sites/ type GeoSite struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` URL string `json:"url"` InternalURL string `json:"internal_url"` Primary bool `json:"primary"` Enabled bool `json:"enabled"` Current bool `json:"current"` - FilesMaxCapacity int `json:"files_max_capacity"` - ReposMaxCapacity int `json:"repos_max_capacity"` - VerificationMaxCapacity int `json:"verification_max_capacity"` - ContainerRepositoriesMaxCapacity int `json:"container_repositories_max_capacity"` + FilesMaxCapacity int64 `json:"files_max_capacity"` + ReposMaxCapacity int64 `json:"repos_max_capacity"` + VerificationMaxCapacity int64 `json:"verification_max_capacity"` + ContainerRepositoriesMaxCapacity int64 `json:"container_repositories_max_capacity"` SelectiveSyncType string `json:"selective_sync_type"` SelectiveSyncShards []string `json:"selective_sync_shards"` - SelectiveSyncNamespaceIDs []int `json:"selective_sync_namespace_ids"` - MinimumReverificationInterval int `json:"minimum_reverification_interval"` + SelectiveSyncNamespaceIDs []int64 `json:"selective_sync_namespace_ids"` + MinimumReverificationInterval int64 `json:"minimum_reverification_interval"` SyncObjectStorage bool `json:"sync_object_storage"` WebEditURL string `json:"web_edit_url"` WebGeoReplicationDetailsURL string `json:"web_geo_replication_details_url"` @@ -80,327 +111,263 @@ type GeoSiteLinks struct { // CreateGeoSitesOptions represents the available CreateGeoSite() options. // // GitLab API docs: -// https://docs.gitlab.com/api/geo_sites/#create-a-new-geo-site +// https://docs.gitlab.com/api/geo_sites/#create-a-geo-site type CreateGeoSitesOptions struct { Primary *bool `url:"primary,omitempty" json:"primary,omitempty"` Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` Name *string `url:"name,omitempty" json:"name,omitempty"` URL *string `url:"url,omitempty" json:"url,omitempty"` InternalURL *string `url:"internal_url,omitempty" json:"internal_url,omitempty"` - FilesMaxCapacity *int `url:"files_max_capacity,omitempty" json:"files_max_capacity,omitempty"` - ReposMaxCapacity *int `url:"repos_max_capacity,omitempty" json:"repos_max_capacity,omitempty"` - VerificationMaxCapacity *int `url:"verification_max_capacity,omitempty" json:"verification_max_capacity,omitempty"` - ContainerRepositoriesMaxCapacity *int `url:"container_repositories_max_capacity,omitempty" json:"container_repositories_max_capacity,omitempty"` + FilesMaxCapacity *int64 `url:"files_max_capacity,omitempty" json:"files_max_capacity,omitempty"` + ReposMaxCapacity *int64 `url:"repos_max_capacity,omitempty" json:"repos_max_capacity,omitempty"` + VerificationMaxCapacity *int64 `url:"verification_max_capacity,omitempty" json:"verification_max_capacity,omitempty"` + ContainerRepositoriesMaxCapacity *int64 `url:"container_repositories_max_capacity,omitempty" json:"container_repositories_max_capacity,omitempty"` SyncObjectStorage *bool `url:"sync_object_storage,omitempty" json:"sync_object_storage,omitempty"` SelectiveSyncType *string `url:"selective_sync_type,omitempty" json:"selective_sync_type,omitempty"` SelectiveSyncShards *[]string `url:"selective_sync_shards,omitempty" json:"selective_sync_shards,omitempty"` - SelectiveSyncNamespaceIDs *[]int `url:"selective_sync_namespace_ids,omitempty" json:"selective_sync_namespace_ids,omitempty"` - MinimumReverificationInterval *int `url:"minimum_reverification_interval,omitempty" json:"minimum_reverification_interval,omitempty"` + SelectiveSyncNamespaceIDs *[]int64 `url:"selective_sync_namespace_ids,omitempty" json:"selective_sync_namespace_ids,omitempty"` + MinimumReverificationInterval *int64 `url:"minimum_reverification_interval,omitempty" json:"minimum_reverification_interval,omitempty"` } -// CreateGeoSite creates a new Geo Site. -// -// GitLab API docs: -// https://docs.gitlab.com/api/geo_sites/#create-a-new-geo-site func (s *GeoSitesService) CreateGeoSite(opt *CreateGeoSitesOptions, options ...RequestOptionFunc) (*GeoSite, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "geo_sites", opt, options) - if err != nil { - return nil, nil, err - } - - site := new(GeoSite) - resp, err := s.client.Do(req, site) - if err != nil { - return nil, resp, err - } - - return site, resp, nil + return do[*GeoSite](s.client, + withMethod(http.MethodPost), + withPath("geo_sites"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListGeoSitesOptions represents the available ListGeoSites() options. // // GitLab API docs: -// https://docs.gitlab.com/api/geo_sites/#retrieve-configuration-about-all-geo-sites -type ListGeoSitesOptions ListOptions +// https://docs.gitlab.com/api/geo_sites/#list-all-geo-sites +type ListGeoSitesOptions struct { + ListOptions +} -// ListGeoSites gets a list of geo sites. -// -// GitLab API docs: -// https://docs.gitlab.com/api/geo_sites/#retrieve-configuration-about-all-geo-sites func (s *GeoSitesService) ListGeoSites(opt *ListGeoSitesOptions, options ...RequestOptionFunc) ([]*GeoSite, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "geo_sites", opt, options) - if err != nil { - return nil, nil, err - } - - var sites []*GeoSite - resp, err := s.client.Do(req, &sites) - if err != nil { - return nil, resp, err - } - - return sites, resp, nil + return do[[]*GeoSite](s.client, + withPath("geo_sites"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetGeoSite gets a specific geo site. -// -// GitLab API docs: -// https://docs.gitlab.com/api/geo_sites/#retrieve-configuration-about-a-specific-geo-site -func (s *GeoSitesService) GetGeoSite(id int, options ...RequestOptionFunc) (*GeoSite, *Response, error) { - u := fmt.Sprintf("geo_sites/%d", id) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - site := new(GeoSite) - resp, err := s.client.Do(req, site) - if err != nil { - return nil, resp, err - } - - return site, resp, nil +func (s *GeoSitesService) GetGeoSite(id int64, options ...RequestOptionFunc) (*GeoSite, *Response, error) { + return do[*GeoSite](s.client, + withPath("geo_sites/%d", id), + withRequestOpts(options...), + ) } // EditGeoSiteOptions represents the available EditGeoSite() options. // // GitLab API docs: -// https://docs.gitlab.com/api/geo_sites/#edit-a-geo-site +// https://docs.gitlab.com/api/geo_sites/#update-a-geo-site type EditGeoSiteOptions struct { Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` Name *string `url:"name,omitempty" json:"name,omitempty"` URL *string `url:"url,omitempty" json:"url,omitempty"` InternalURL *string `url:"internal_url,omitempty" json:"internal_url,omitempty"` - FilesMaxCapacity *int `url:"files_max_capacity,omitempty" json:"files_max_capacity,omitempty"` - ReposMaxCapacity *int `url:"repos_max_capacity,omitempty" json:"repos_max_capacity,omitempty"` - VerificationMaxCapacity *int `url:"verification_max_capacity,omitempty" json:"verification_max_capacity,omitempty"` - ContainerRepositoriesMaxCapacity *int `url:"container_repositories_max_capacity,omitempty" json:"container_repositories_max_capacity,omitempty"` + FilesMaxCapacity *int64 `url:"files_max_capacity,omitempty" json:"files_max_capacity,omitempty"` + ReposMaxCapacity *int64 `url:"repos_max_capacity,omitempty" json:"repos_max_capacity,omitempty"` + VerificationMaxCapacity *int64 `url:"verification_max_capacity,omitempty" json:"verification_max_capacity,omitempty"` + ContainerRepositoriesMaxCapacity *int64 `url:"container_repositories_max_capacity,omitempty" json:"container_repositories_max_capacity,omitempty"` SelectiveSyncType *string `url:"selective_sync_type,omitempty" json:"selective_sync_type,omitempty"` SelectiveSyncShards *[]string `url:"selective_sync_shards,omitempty" json:"selective_sync_shards,omitempty"` - SelectiveSyncNamespaceIDs *[]int `url:"selective_sync_namespace_ids,omitempty" json:"selective_sync_namespace_ids,omitempty"` - MinimumReverificationInterval *int `url:"minimum_reverification_interval,omitempty" json:"minimum_reverification_interval,omitempty"` + SelectiveSyncNamespaceIDs *[]int64 `url:"selective_sync_namespace_ids,omitempty" json:"selective_sync_namespace_ids,omitempty"` + MinimumReverificationInterval *int64 `url:"minimum_reverification_interval,omitempty" json:"minimum_reverification_interval,omitempty"` } -// EditGeoSite updates settings of an existing Geo site. -// -// GitLab API docs: -// https://docs.gitlab.com/api/geo_sites/#edit-a-geo-site -func (s *GeoSitesService) EditGeoSite(id int, opt *EditGeoSiteOptions, options ...RequestOptionFunc) (*GeoSite, *Response, error) { - u := fmt.Sprintf("geo_sites/%d", id) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - site := new(GeoSite) - resp, err := s.client.Do(req, site) - if err != nil { - return nil, resp, err - } - - return site, resp, nil +func (s *GeoSitesService) EditGeoSite(id int64, opt *EditGeoSiteOptions, options ...RequestOptionFunc) (*GeoSite, *Response, error) { + return do[*GeoSite](s.client, + withMethod(http.MethodPut), + withPath("geo_sites/%d", id), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteGeoSite removes the Geo site. -// -// GitLab API docs: -// https://docs.gitlab.com/api/geo_sites/#delete-a-geo-site -func (s *GeoSitesService) DeleteGeoSite(id int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("geo_sites/%d", id) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GeoSitesService) DeleteGeoSite(id int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("geo_sites/%d", id), + withRequestOpts(options...), + ) + return resp, err } -// RepairGeoSite to repair the OAuth authentication of a Geo site. -// -// GitLab API docs: -// https://docs.gitlab.com/api/geo_sites/#repair-a-geo-site -func (s *GeoSitesService) RepairGeoSite(id int, options ...RequestOptionFunc) (*GeoSite, *Response, error) { - u := fmt.Sprintf("geo_sites/%d/repair", id) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - site := new(GeoSite) - resp, err := s.client.Do(req, site) - if err != nil { - return nil, resp, err - } - - return site, resp, nil +func (s *GeoSitesService) RepairGeoSite(id int64, options ...RequestOptionFunc) (*GeoSite, *Response, error) { + return do[*GeoSite](s.client, + withMethod(http.MethodPost), + withPath("geo_sites/%d/repair", id), + withRequestOpts(options...), + ) } // GeoSiteStatus represents the status of Geo Site. // // GitLab API docs: -// https://docs.gitlab.com/api/geo_sites/#retrieve-status-about-all-geo-sites +// https://docs.gitlab.com/api/geo_sites/#list-all-geo-site-statuses type GeoSiteStatus struct { - GeoNodeID int `json:"geo_node_id"` - ProjectsCount int `json:"projects_count"` + GeoNodeID int64 `json:"geo_node_id"` + ProjectsCount int64 `json:"projects_count"` ContainerRepositoriesReplicationEnabled bool `json:"container_repositories_replication_enabled"` - LFSObjectsCount int `json:"lfs_objects_count"` - LFSObjectsChecksumTotalCount int `json:"lfs_objects_checksum_total_count"` - LFSObjectsChecksummedCount int `json:"lfs_objects_checksummed_count"` - LFSObjectsChecksumFailedCount int `json:"lfs_objects_checksum_failed_count"` - LFSObjectsSyncedCount int `json:"lfs_objects_synced_count"` - LFSObjectsFailedCount int `json:"lfs_objects_failed_count"` - LFSObjectsRegistryCount int `json:"lfs_objects_registry_count"` - LFSObjectsVerificationTotalCount int `json:"lfs_objects_verification_total_count"` - LFSObjectsVerifiedCount int `json:"lfs_objects_verified_count"` - LFSObjectsVerificationFailedCount int `json:"lfs_objects_verification_failed_count"` - MergeRequestDiffsCount int `json:"merge_request_diffs_count"` - MergeRequestDiffsChecksumTotalCount int `json:"merge_request_diffs_checksum_total_count"` - MergeRequestDiffsChecksummedCount int `json:"merge_request_diffs_checksummed_count"` - MergeRequestDiffsChecksumFailedCount int `json:"merge_request_diffs_checksum_failed_count"` - MergeRequestDiffsSyncedCount int `json:"merge_request_diffs_synced_count"` - MergeRequestDiffsFailedCount int `json:"merge_request_diffs_failed_count"` - MergeRequestDiffsRegistryCount int `json:"merge_request_diffs_registry_count"` - MergeRequestDiffsVerificationTotalCount int `json:"merge_request_diffs_verification_total_count"` - MergeRequestDiffsVerifiedCount int `json:"merge_request_diffs_verified_count"` - MergeRequestDiffsVerificationFailedCount int `json:"merge_request_diffs_verification_failed_count"` - PackageFilesCount int `json:"package_files_count"` - PackageFilesChecksumTotalCount int `json:"package_files_checksum_total_count"` - PackageFilesChecksummedCount int `json:"package_files_checksummed_count"` - PackageFilesChecksumFailedCount int `json:"package_files_checksum_failed_count"` - PackageFilesSyncedCount int `json:"package_files_synced_count"` - PackageFilesFailedCount int `json:"package_files_failed_count"` - PackageFilesRegistryCount int `json:"package_files_registry_count"` - PackageFilesVerificationTotalCount int `json:"package_files_verification_total_count"` - PackageFilesVerifiedCount int `json:"package_files_verified_count"` - PackageFilesVerificationFailedCount int `json:"package_files_verification_failed_count"` - TerraformStateVersionsCount int `json:"terraform_state_versions_count"` - TerraformStateVersionsChecksumTotalCount int `json:"terraform_state_versions_checksum_total_count"` - TerraformStateVersionsChecksummedCount int `json:"terraform_state_versions_checksummed_count"` - TerraformStateVersionsChecksumFailedCount int `json:"terraform_state_versions_checksum_failed_count"` - TerraformStateVersionsSyncedCount int `json:"terraform_state_versions_synced_count"` - TerraformStateVersionsFailedCount int `json:"terraform_state_versions_failed_count"` - TerraformStateVersionsRegistryCount int `json:"terraform_state_versions_registry_count"` - TerraformStateVersionsVerificationTotalCount int `json:"terraform_state_versions_verification_total_count"` - TerraformStateVersionsVerifiedCount int `json:"terraform_state_versions_verified_count"` - TerraformStateVersionsVerificationFailedCount int `json:"terraform_state_versions_verification_failed_count"` - SnippetRepositoriesCount int `json:"snippet_repositories_count"` - SnippetRepositoriesChecksumTotalCount int `json:"snippet_repositories_checksum_total_count"` - SnippetRepositoriesChecksummedCount int `json:"snippet_repositories_checksummed_count"` - SnippetRepositoriesChecksumFailedCount int `json:"snippet_repositories_checksum_failed_count"` - SnippetRepositoriesSyncedCount int `json:"snippet_repositories_synced_count"` - SnippetRepositoriesFailedCount int `json:"snippet_repositories_failed_count"` - SnippetRepositoriesRegistryCount int `json:"snippet_repositories_registry_count"` - SnippetRepositoriesVerificationTotalCount int `json:"snippet_repositories_verification_total_count"` - SnippetRepositoriesVerifiedCount int `json:"snippet_repositories_verified_count"` - SnippetRepositoriesVerificationFailedCount int `json:"snippet_repositories_verification_failed_count"` - GroupWikiRepositoriesCount int `json:"group_wiki_repositories_count"` - GroupWikiRepositoriesChecksumTotalCount int `json:"group_wiki_repositories_checksum_total_count"` - GroupWikiRepositoriesChecksummedCount int `json:"group_wiki_repositories_checksummed_count"` - GroupWikiRepositoriesChecksumFailedCount int `json:"group_wiki_repositories_checksum_failed_count"` - GroupWikiRepositoriesSyncedCount int `json:"group_wiki_repositories_synced_count"` - GroupWikiRepositoriesFailedCount int `json:"group_wiki_repositories_failed_count"` - GroupWikiRepositoriesRegistryCount int `json:"group_wiki_repositories_registry_count"` - GrupWikiRepositoriesVerificationTotalCount int `json:"group_wiki_repositories_verification_total_count"` - GroupWikiRepositoriesVerifiedCount int `json:"group_wiki_repositories_verified_count"` - GroupWikiRepositoriesVerificationFailedCount int `json:"group_wiki_repositories_verification_failed_count"` - PipelineArtifactsCount int `json:"pipeline_artifacts_count"` - PipelineArtifactsChecksumTotalCount int `json:"pipeline_artifacts_checksum_total_count"` - PipelineArtifactsChecksummedCount int `json:"pipeline_artifacts_checksummed_count"` - PipelineArtifactsChecksumFailedCount int `json:"pipeline_artifacts_checksum_failed_count"` - PipelineArtifactsSyncedCount int `json:"pipeline_artifacts_synced_count"` - PipelineArtifactsFailedCount int `json:"pipeline_artifacts_failed_count"` - PipelineArtifactsRegistryCount int `json:"pipeline_artifacts_registry_count"` - PipelineArtifactsVerificationTotalCount int `json:"pipeline_artifacts_verification_total_count"` - PipelineArtifactsVerifiedCount int `json:"pipeline_artifacts_verified_count"` - PipelineArtifactsVerificationFailedCount int `json:"pipeline_artifacts_verification_failed_count"` - PagesDeploymentsCount int `json:"pages_deployments_count"` - PagesDeploymentsChecksumTotalCount int `json:"pages_deployments_checksum_total_count"` - PagesDeploymentsChecksummedCount int `json:"pages_deployments_checksummed_count"` - PagesDeploymentsChecksumFailedCount int `json:"pages_deployments_checksum_failed_count"` - PagesDeploymentsSyncedCount int `json:"pages_deployments_synced_count"` - PagesDeploymentsFailedCount int `json:"pages_deployments_failed_count"` - PagesDeploymentsRegistryCount int `json:"pages_deployments_registry_count"` - PagesDeploymentsVerificationTotalCount int `json:"pages_deployments_verification_total_count"` - PagesDeploymentsVerifiedCount int `json:"pages_deployments_verified_count"` - PagesDeploymentsVerificationFailedCount int `json:"pages_deployments_verification_failed_count"` - UploadsCount int `json:"uploads_count"` - UploadsChecksumTotalCount int `json:"uploads_checksum_total_count"` - UploadsChecksummedCount int `json:"uploads_checksummed_count"` - UploadsChecksumFailedCount int `json:"uploads_checksum_failed_count"` - UploadsSyncedCount int `json:"uploads_synced_count"` - UploadsFailedCount int `json:"uploads_failed_count"` - UploadsRegistryCount int `json:"uploads_registry_count"` - UploadsVerificationTotalCount int `json:"uploads_verification_total_count"` - UploadsVerifiedCount int `json:"uploads_verified_count"` - UploadsVerificationFailedCount int `json:"uploads_verification_failed_count"` - JobArtifactsCount int `json:"job_artifacts_count"` - JobArtifactsChecksumTotalCount int `json:"job_artifacts_checksum_total_count"` - JobArtifactsChecksummedCount int `json:"job_artifacts_checksummed_count"` - JobArtifactsChecksumFailedCount int `json:"job_artifacts_checksum_failed_count"` - JobArtifactsSyncedCount int `json:"job_artifacts_synced_count"` - JobArtifactsFailedCount int `json:"job_artifacts_failed_count"` - JobArtifactsRegistryCount int `json:"job_artifacts_registry_count"` - JobArtifactsVerificationTotalCount int `json:"job_artifacts_verification_total_count"` - JobArtifactsVerifiedCount int `json:"job_artifacts_verified_count"` - JobArtifactsVerificationFailedCount int `json:"job_artifacts_verification_failed_count"` - CISecureFilesCount int `json:"ci_secure_files_count"` - CISecureFilesChecksumTotalCount int `json:"ci_secure_files_checksum_total_count"` - CISecureFilesChecksummedCount int `json:"ci_secure_files_checksummed_count"` - CISecureFilesChecksumFailedCount int `json:"ci_secure_files_checksum_failed_count"` - CISecureFilesSyncedCount int `json:"ci_secure_files_synced_count"` - CISecureFilesFailedCount int `json:"ci_secure_files_failed_count"` - CISecureFilesRegistryCount int `json:"ci_secure_files_registry_count"` - CISecureFilesVerificationTotalCount int `json:"ci_secure_files_verification_total_count"` - CISecureFilesVerifiedCount int `json:"ci_secure_files_verified_count"` - CISecureFilesVerificationFailedCount int `json:"ci_secure_files_verification_failed_count"` - ContainerRepositoriesCount int `json:"container_repositories_count"` - ContainerRepositoriesChecksumTotalCount int `json:"container_repositories_checksum_total_count"` - ContainerRepositoriesChecksummedCount int `json:"container_repositories_checksummed_count"` - ContainerRepositoriesChecksumFailedCount int `json:"container_repositories_checksum_failed_count"` - ContainerRepositoriesSyncedCount int `json:"container_repositories_synced_count"` - ContainerRepositoriesFailedCount int `json:"container_repositories_failed_count"` - ContainerRepositoriesRegistryCount int `json:"container_repositories_registry_count"` - ContainerRepositoriesVerificationTotalCount int `json:"container_repositories_verification_total_count"` - ContainerRepositoriesVerifiedCount int `json:"container_repositories_verified_count"` - ContainerRepositoriesVerificationFailedCount int `json:"container_repositories_verification_failed_count"` - DependencyProxyBlobsCount int `json:"dependency_proxy_blobs_count"` - DependencyProxyBlobsChecksumTotalCount int `json:"dependency_proxy_blobs_checksum_total_count"` - DependencyProxyBlobsChecksummedCount int `json:"dependency_proxy_blobs_checksummed_count"` - DependencyProxyBlobsChecksumFailedCount int `json:"dependency_proxy_blobs_checksum_failed_count"` - DependencyProxyBlobsSyncedCount int `json:"dependency_proxy_blobs_synced_count"` - DependencyProxyBlobsFailedCount int `json:"dependency_proxy_blobs_failed_count"` - DependencyProxyBlobsRegistryCount int `json:"dependency_proxy_blobs_registry_count"` - DependencyProxyBlobsVerificationTotalCount int `json:"dependency_proxy_blobs_verification_total_count"` - DependencyProxyBlobsVerifiedCount int `json:"dependency_proxy_blobs_verified_count"` - DependencyProxyBlobsVerificationFailedCount int `json:"dependency_proxy_blobs_verification_failed_count"` - DependencyProxyManifestsCount int `json:"dependency_proxy_manifests_count"` - DependencyProxyManifestsChecksumTotalCount int `json:"dependency_proxy_manifests_checksum_total_count"` - DependencyProxyManifestsChecksummedCount int `json:"dependency_proxy_manifests_checksummed_count"` - DependencyProxyManifestsChecksumFailedCount int `json:"dependency_proxy_manifests_checksum_failed_count"` - DependencyProxyManifestsSyncedCount int `json:"dependency_proxy_manifests_synced_count"` - DependencyProxyManifestsFailedCount int `json:"dependency_proxy_manifests_failed_count"` - DependencyProxyManifestsRegistryCount int `json:"dependency_proxy_manifests_registry_count"` - DependencyProxyManifestsVerificationTotalCount int `json:"dependency_proxy_manifests_verification_total_count"` - DependencyProxyManifestsVerifiedCount int `json:"dependency_proxy_manifests_verified_count"` - DependencyProxyManifestsVerificationFailedCount int `json:"dependency_proxy_manifests_verification_failed_count"` - ProjectWikiRepositoriesCount int `json:"project_wiki_repositories_count"` - ProjectWikiRepositoriesChecksumTotalCount int `json:"project_wiki_repositories_checksum_total_count"` - ProjectWikiRepositoriesChecksummedCount int `json:"project_wiki_repositories_checksummed_count"` - ProjectWikiRepositoriesChecksumFailedCount int `json:"project_wiki_repositories_checksum_failed_count"` - ProjectWikiRepositoriesSyncedCount int `json:"project_wiki_repositories_synced_count"` - ProjectWikiRepositoriesFailedCount int `json:"project_wiki_repositories_failed_count"` - ProjectWikiRepositoriesRegistryCount int `json:"project_wiki_repositories_registry_count"` - ProjectWikiRepositoriesVerificationTotalCount int `json:"project_wiki_repositories_verification_total_count"` - ProjectWikiRepositoriesVerifiedCount int `json:"project_wiki_repositories_verified_count"` - ProjectWikiRepositoriesVerificationFailedCount int `json:"project_wiki_repositories_verification_failed_count"` - GitFetchEventCountWeekly int `json:"git_fetch_event_count_weekly"` - GitPushEventCountWeekly int `json:"git_push_event_count_weekly"` - ProxyRemoteRequestsEventCountWeekly int `json:"proxy_remote_requests_event_count_weekly"` - ProxyLocalRequestsEventCountWeekly int `json:"proxy_local_requests_event_count_weekly"` + LFSObjectsCount int64 `json:"lfs_objects_count"` + LFSObjectsChecksumTotalCount int64 `json:"lfs_objects_checksum_total_count"` + LFSObjectsChecksummedCount int64 `json:"lfs_objects_checksummed_count"` + LFSObjectsChecksumFailedCount int64 `json:"lfs_objects_checksum_failed_count"` + LFSObjectsSyncedCount int64 `json:"lfs_objects_synced_count"` + LFSObjectsFailedCount int64 `json:"lfs_objects_failed_count"` + LFSObjectsRegistryCount int64 `json:"lfs_objects_registry_count"` + LFSObjectsVerificationTotalCount int64 `json:"lfs_objects_verification_total_count"` + LFSObjectsVerifiedCount int64 `json:"lfs_objects_verified_count"` + LFSObjectsVerificationFailedCount int64 `json:"lfs_objects_verification_failed_count"` + MergeRequestDiffsCount int64 `json:"merge_request_diffs_count"` + MergeRequestDiffsChecksumTotalCount int64 `json:"merge_request_diffs_checksum_total_count"` + MergeRequestDiffsChecksummedCount int64 `json:"merge_request_diffs_checksummed_count"` + MergeRequestDiffsChecksumFailedCount int64 `json:"merge_request_diffs_checksum_failed_count"` + MergeRequestDiffsSyncedCount int64 `json:"merge_request_diffs_synced_count"` + MergeRequestDiffsFailedCount int64 `json:"merge_request_diffs_failed_count"` + MergeRequestDiffsRegistryCount int64 `json:"merge_request_diffs_registry_count"` + MergeRequestDiffsVerificationTotalCount int64 `json:"merge_request_diffs_verification_total_count"` + MergeRequestDiffsVerifiedCount int64 `json:"merge_request_diffs_verified_count"` + MergeRequestDiffsVerificationFailedCount int64 `json:"merge_request_diffs_verification_failed_count"` + PackageFilesCount int64 `json:"package_files_count"` + PackageFilesChecksumTotalCount int64 `json:"package_files_checksum_total_count"` + PackageFilesChecksummedCount int64 `json:"package_files_checksummed_count"` + PackageFilesChecksumFailedCount int64 `json:"package_files_checksum_failed_count"` + PackageFilesSyncedCount int64 `json:"package_files_synced_count"` + PackageFilesFailedCount int64 `json:"package_files_failed_count"` + PackageFilesRegistryCount int64 `json:"package_files_registry_count"` + PackageFilesVerificationTotalCount int64 `json:"package_files_verification_total_count"` + PackageFilesVerifiedCount int64 `json:"package_files_verified_count"` + PackageFilesVerificationFailedCount int64 `json:"package_files_verification_failed_count"` + TerraformStateVersionsCount int64 `json:"terraform_state_versions_count"` + TerraformStateVersionsChecksumTotalCount int64 `json:"terraform_state_versions_checksum_total_count"` + TerraformStateVersionsChecksummedCount int64 `json:"terraform_state_versions_checksummed_count"` + TerraformStateVersionsChecksumFailedCount int64 `json:"terraform_state_versions_checksum_failed_count"` + TerraformStateVersionsSyncedCount int64 `json:"terraform_state_versions_synced_count"` + TerraformStateVersionsFailedCount int64 `json:"terraform_state_versions_failed_count"` + TerraformStateVersionsRegistryCount int64 `json:"terraform_state_versions_registry_count"` + TerraformStateVersionsVerificationTotalCount int64 `json:"terraform_state_versions_verification_total_count"` + TerraformStateVersionsVerifiedCount int64 `json:"terraform_state_versions_verified_count"` + TerraformStateVersionsVerificationFailedCount int64 `json:"terraform_state_versions_verification_failed_count"` + SnippetRepositoriesCount int64 `json:"snippet_repositories_count"` + SnippetRepositoriesChecksumTotalCount int64 `json:"snippet_repositories_checksum_total_count"` + SnippetRepositoriesChecksummedCount int64 `json:"snippet_repositories_checksummed_count"` + SnippetRepositoriesChecksumFailedCount int64 `json:"snippet_repositories_checksum_failed_count"` + SnippetRepositoriesSyncedCount int64 `json:"snippet_repositories_synced_count"` + SnippetRepositoriesFailedCount int64 `json:"snippet_repositories_failed_count"` + SnippetRepositoriesRegistryCount int64 `json:"snippet_repositories_registry_count"` + SnippetRepositoriesVerificationTotalCount int64 `json:"snippet_repositories_verification_total_count"` + SnippetRepositoriesVerifiedCount int64 `json:"snippet_repositories_verified_count"` + SnippetRepositoriesVerificationFailedCount int64 `json:"snippet_repositories_verification_failed_count"` + GroupWikiRepositoriesCount int64 `json:"group_wiki_repositories_count"` + GroupWikiRepositoriesChecksumTotalCount int64 `json:"group_wiki_repositories_checksum_total_count"` + GroupWikiRepositoriesChecksummedCount int64 `json:"group_wiki_repositories_checksummed_count"` + GroupWikiRepositoriesChecksumFailedCount int64 `json:"group_wiki_repositories_checksum_failed_count"` + GroupWikiRepositoriesSyncedCount int64 `json:"group_wiki_repositories_synced_count"` + GroupWikiRepositoriesFailedCount int64 `json:"group_wiki_repositories_failed_count"` + GroupWikiRepositoriesRegistryCount int64 `json:"group_wiki_repositories_registry_count"` + GrupWikiRepositoriesVerificationTotalCount int64 `json:"group_wiki_repositories_verification_total_count"` + GroupWikiRepositoriesVerifiedCount int64 `json:"group_wiki_repositories_verified_count"` + GroupWikiRepositoriesVerificationFailedCount int64 `json:"group_wiki_repositories_verification_failed_count"` + PipelineArtifactsCount int64 `json:"pipeline_artifacts_count"` + PipelineArtifactsChecksumTotalCount int64 `json:"pipeline_artifacts_checksum_total_count"` + PipelineArtifactsChecksummedCount int64 `json:"pipeline_artifacts_checksummed_count"` + PipelineArtifactsChecksumFailedCount int64 `json:"pipeline_artifacts_checksum_failed_count"` + PipelineArtifactsSyncedCount int64 `json:"pipeline_artifacts_synced_count"` + PipelineArtifactsFailedCount int64 `json:"pipeline_artifacts_failed_count"` + PipelineArtifactsRegistryCount int64 `json:"pipeline_artifacts_registry_count"` + PipelineArtifactsVerificationTotalCount int64 `json:"pipeline_artifacts_verification_total_count"` + PipelineArtifactsVerifiedCount int64 `json:"pipeline_artifacts_verified_count"` + PipelineArtifactsVerificationFailedCount int64 `json:"pipeline_artifacts_verification_failed_count"` + PagesDeploymentsCount int64 `json:"pages_deployments_count"` + PagesDeploymentsChecksumTotalCount int64 `json:"pages_deployments_checksum_total_count"` + PagesDeploymentsChecksummedCount int64 `json:"pages_deployments_checksummed_count"` + PagesDeploymentsChecksumFailedCount int64 `json:"pages_deployments_checksum_failed_count"` + PagesDeploymentsSyncedCount int64 `json:"pages_deployments_synced_count"` + PagesDeploymentsFailedCount int64 `json:"pages_deployments_failed_count"` + PagesDeploymentsRegistryCount int64 `json:"pages_deployments_registry_count"` + PagesDeploymentsVerificationTotalCount int64 `json:"pages_deployments_verification_total_count"` + PagesDeploymentsVerifiedCount int64 `json:"pages_deployments_verified_count"` + PagesDeploymentsVerificationFailedCount int64 `json:"pages_deployments_verification_failed_count"` + UploadsCount int64 `json:"uploads_count"` + UploadsChecksumTotalCount int64 `json:"uploads_checksum_total_count"` + UploadsChecksummedCount int64 `json:"uploads_checksummed_count"` + UploadsChecksumFailedCount int64 `json:"uploads_checksum_failed_count"` + UploadsSyncedCount int64 `json:"uploads_synced_count"` + UploadsFailedCount int64 `json:"uploads_failed_count"` + UploadsRegistryCount int64 `json:"uploads_registry_count"` + UploadsVerificationTotalCount int64 `json:"uploads_verification_total_count"` + UploadsVerifiedCount int64 `json:"uploads_verified_count"` + UploadsVerificationFailedCount int64 `json:"uploads_verification_failed_count"` + JobArtifactsCount int64 `json:"job_artifacts_count"` + JobArtifactsChecksumTotalCount int64 `json:"job_artifacts_checksum_total_count"` + JobArtifactsChecksummedCount int64 `json:"job_artifacts_checksummed_count"` + JobArtifactsChecksumFailedCount int64 `json:"job_artifacts_checksum_failed_count"` + JobArtifactsSyncedCount int64 `json:"job_artifacts_synced_count"` + JobArtifactsFailedCount int64 `json:"job_artifacts_failed_count"` + JobArtifactsRegistryCount int64 `json:"job_artifacts_registry_count"` + JobArtifactsVerificationTotalCount int64 `json:"job_artifacts_verification_total_count"` + JobArtifactsVerifiedCount int64 `json:"job_artifacts_verified_count"` + JobArtifactsVerificationFailedCount int64 `json:"job_artifacts_verification_failed_count"` + CISecureFilesCount int64 `json:"ci_secure_files_count"` + CISecureFilesChecksumTotalCount int64 `json:"ci_secure_files_checksum_total_count"` + CISecureFilesChecksummedCount int64 `json:"ci_secure_files_checksummed_count"` + CISecureFilesChecksumFailedCount int64 `json:"ci_secure_files_checksum_failed_count"` + CISecureFilesSyncedCount int64 `json:"ci_secure_files_synced_count"` + CISecureFilesFailedCount int64 `json:"ci_secure_files_failed_count"` + CISecureFilesRegistryCount int64 `json:"ci_secure_files_registry_count"` + CISecureFilesVerificationTotalCount int64 `json:"ci_secure_files_verification_total_count"` + CISecureFilesVerifiedCount int64 `json:"ci_secure_files_verified_count"` + CISecureFilesVerificationFailedCount int64 `json:"ci_secure_files_verification_failed_count"` + ContainerRepositoriesCount int64 `json:"container_repositories_count"` + ContainerRepositoriesChecksumTotalCount int64 `json:"container_repositories_checksum_total_count"` + ContainerRepositoriesChecksummedCount int64 `json:"container_repositories_checksummed_count"` + ContainerRepositoriesChecksumFailedCount int64 `json:"container_repositories_checksum_failed_count"` + ContainerRepositoriesSyncedCount int64 `json:"container_repositories_synced_count"` + ContainerRepositoriesFailedCount int64 `json:"container_repositories_failed_count"` + ContainerRepositoriesRegistryCount int64 `json:"container_repositories_registry_count"` + ContainerRepositoriesVerificationTotalCount int64 `json:"container_repositories_verification_total_count"` + ContainerRepositoriesVerifiedCount int64 `json:"container_repositories_verified_count"` + ContainerRepositoriesVerificationFailedCount int64 `json:"container_repositories_verification_failed_count"` + DependencyProxyBlobsCount int64 `json:"dependency_proxy_blobs_count"` + DependencyProxyBlobsChecksumTotalCount int64 `json:"dependency_proxy_blobs_checksum_total_count"` + DependencyProxyBlobsChecksummedCount int64 `json:"dependency_proxy_blobs_checksummed_count"` + DependencyProxyBlobsChecksumFailedCount int64 `json:"dependency_proxy_blobs_checksum_failed_count"` + DependencyProxyBlobsSyncedCount int64 `json:"dependency_proxy_blobs_synced_count"` + DependencyProxyBlobsFailedCount int64 `json:"dependency_proxy_blobs_failed_count"` + DependencyProxyBlobsRegistryCount int64 `json:"dependency_proxy_blobs_registry_count"` + DependencyProxyBlobsVerificationTotalCount int64 `json:"dependency_proxy_blobs_verification_total_count"` + DependencyProxyBlobsVerifiedCount int64 `json:"dependency_proxy_blobs_verified_count"` + DependencyProxyBlobsVerificationFailedCount int64 `json:"dependency_proxy_blobs_verification_failed_count"` + DependencyProxyManifestsCount int64 `json:"dependency_proxy_manifests_count"` + DependencyProxyManifestsChecksumTotalCount int64 `json:"dependency_proxy_manifests_checksum_total_count"` + DependencyProxyManifestsChecksummedCount int64 `json:"dependency_proxy_manifests_checksummed_count"` + DependencyProxyManifestsChecksumFailedCount int64 `json:"dependency_proxy_manifests_checksum_failed_count"` + DependencyProxyManifestsSyncedCount int64 `json:"dependency_proxy_manifests_synced_count"` + DependencyProxyManifestsFailedCount int64 `json:"dependency_proxy_manifests_failed_count"` + DependencyProxyManifestsRegistryCount int64 `json:"dependency_proxy_manifests_registry_count"` + DependencyProxyManifestsVerificationTotalCount int64 `json:"dependency_proxy_manifests_verification_total_count"` + DependencyProxyManifestsVerifiedCount int64 `json:"dependency_proxy_manifests_verified_count"` + DependencyProxyManifestsVerificationFailedCount int64 `json:"dependency_proxy_manifests_verification_failed_count"` + ProjectWikiRepositoriesCount int64 `json:"project_wiki_repositories_count"` + ProjectWikiRepositoriesChecksumTotalCount int64 `json:"project_wiki_repositories_checksum_total_count"` + ProjectWikiRepositoriesChecksummedCount int64 `json:"project_wiki_repositories_checksummed_count"` + ProjectWikiRepositoriesChecksumFailedCount int64 `json:"project_wiki_repositories_checksum_failed_count"` + ProjectWikiRepositoriesSyncedCount int64 `json:"project_wiki_repositories_synced_count"` + ProjectWikiRepositoriesFailedCount int64 `json:"project_wiki_repositories_failed_count"` + ProjectWikiRepositoriesRegistryCount int64 `json:"project_wiki_repositories_registry_count"` + ProjectWikiRepositoriesVerificationTotalCount int64 `json:"project_wiki_repositories_verification_total_count"` + ProjectWikiRepositoriesVerifiedCount int64 `json:"project_wiki_repositories_verified_count"` + ProjectWikiRepositoriesVerificationFailedCount int64 `json:"project_wiki_repositories_verification_failed_count"` + GitFetchEventCountWeekly int64 `json:"git_fetch_event_count_weekly"` + GitPushEventCountWeekly int64 `json:"git_push_event_count_weekly"` + ProxyRemoteRequestsEventCountWeekly int64 `json:"proxy_remote_requests_event_count_weekly"` + ProxyLocalRequestsEventCountWeekly int64 `json:"proxy_local_requests_event_count_weekly"` RepositoriesCheckedInPercentage string `json:"repositories_checked_in_percentage"` ReplicationSlotsUsedInPercentage string `json:"replication_slots_used_in_percentage"` LFSObjectsSyncedInPercentage string `json:"lfs_objects_synced_in_percentage"` @@ -433,21 +400,21 @@ type GeoSiteStatus struct { DependencyProxyManifestsVerifiedInPercentage string `json:"dependency_proxy_manifests_verified_in_percentage"` ProjectWikiRepositoriesSyncedInPercentage string `json:"project_wiki_repositories_synced_in_percentage"` ProjectWikiRepositoriesVerifiedInPercentage string `json:"project_wiki_repositories_verified_in_percentage"` - ReplicationSlotsCount int `json:"replication_slots_count"` - ReplicationSlotsUsedCount int `json:"replication_slots_used_count"` + ReplicationSlotsCount int64 `json:"replication_slots_count"` + ReplicationSlotsUsedCount int64 `json:"replication_slots_used_count"` Healthy bool `json:"healthy"` Health string `json:"health"` HealthStatus string `json:"health_status"` MissingOAuthApplication bool `json:"missing_oauth_application"` - DBReplicationLagSeconds int `json:"db_replication_lag_seconds"` - ReplicationSlotsMaxRetainedWalBytes int `json:"replication_slots_max_retained_wal_bytes"` - RepositoriesCheckedCount int `json:"repositories_checked_count"` - RepositoriesCheckedFailedCount int `json:"repositories_checked_failed_count"` - LastEventID int `json:"last_event_id"` - LastEventTimestamp int `json:"last_event_timestamp"` - CursorLastEventID int `json:"cursor_last_event_id"` - CursorLastEventTimestamp int `json:"cursor_last_event_timestamp"` - LastSuccessfulStatusCheckTimestamp int `json:"last_successful_status_check_timestamp"` + DBReplicationLagSeconds int64 `json:"db_replication_lag_seconds"` + ReplicationSlotsMaxRetainedWalBytes int64 `json:"replication_slots_max_retained_wal_bytes"` + RepositoriesCheckedCount int64 `json:"repositories_checked_count"` + RepositoriesCheckedFailedCount int64 `json:"repositories_checked_failed_count"` + LastEventID int64 `json:"last_event_id"` + LastEventTimestamp int64 `json:"last_event_timestamp"` + CursorLastEventID int64 `json:"cursor_last_event_id"` + CursorLastEventTimestamp int64 `json:"cursor_last_event_timestamp"` + LastSuccessfulStatusCheckTimestamp int64 `json:"last_successful_status_check_timestamp"` Version string `json:"version"` Revision string `json:"revision"` SelectiveSyncType string `json:"selective_sync_type"` @@ -460,7 +427,7 @@ type GeoSiteStatus struct { // GeoSiteStatusLink represents the links for a GitLab Geo Site status. // // GitLab API docs: -// https://docs.gitlab.com/api/geo_sites/#retrieve-status-about-all-geo-sites +// https://docs.gitlab.com/api/geo_sites/#list-all-geo-site-statuses type GeoSiteStatusLink struct { Self string `json:"self"` Site string `json:"site"` @@ -469,45 +436,22 @@ type GeoSiteStatusLink struct { // ListStatusOfAllGeoSitesOptions represents the available ListStatusOfAllGeoSites() options. // // GitLab API docs: -// https://docs.gitlab.com/api/geo_sites/#retrieve-status-about-all-geo-sites -type ListStatusOfAllGeoSitesOptions ListOptions +// https://docs.gitlab.com/api/geo_sites/#list-all-geo-site-statuses +type ListStatusOfAllGeoSitesOptions struct { + ListOptions +} -// ListStatusOfAllGeoSites get the list of status of all Geo Sites. -// -// GitLab API docs: -// https://docs.gitlab.com/api/geo_sites/#retrieve-status-about-all-geo-sites func (s *GeoSitesService) ListStatusOfAllGeoSites(opt *ListStatusOfAllGeoSitesOptions, options ...RequestOptionFunc) ([]*GeoSiteStatus, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "geo_sites/status", nil, options) - if err != nil { - return nil, nil, err - } - - var statuses []*GeoSiteStatus - resp, err := s.client.Do(req, &statuses) - if err != nil { - return nil, resp, err - } - - return statuses, resp, nil + return do[[]*GeoSiteStatus](s.client, + withPath("geo_sites/status"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetStatusOfGeoSite get the of status of a specific Geo Sites. -// -// GitLab API docs: -// https://docs.gitlab.com/api/geo_sites/#retrieve-status-about-a-specific-geo-site -func (s *GeoSitesService) GetStatusOfGeoSite(id int, options ...RequestOptionFunc) (*GeoSiteStatus, *Response, error) { - u := fmt.Sprintf("geo_sites/%d/status", id) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - status := new(GeoSiteStatus) - resp, err := s.client.Do(req, status) - if err != nil { - return nil, resp, err - } - - return status, resp, nil +func (s *GeoSitesService) GetStatusOfGeoSite(id int64, options ...RequestOptionFunc) (*GeoSiteStatus, *Response, error) { + return do[*GeoSiteStatus](s.client, + withPath("geo_sites/%d/status", id), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/gitignore_templates.go b/vendor/gitlab.com/gitlab-org/api/client-go/gitignore_templates.go index 7ffbede558..44b9b47782 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/gitignore_templates.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/gitignore_templates.go @@ -16,12 +16,6 @@ package gitlab -import ( - "fmt" - "net/http" - "net/url" -) - type ( // GitIgnoreTemplatesServiceInterface defines all the API methods for the GitIgnoreTemplatesService GitIgnoreTemplatesServiceInterface interface { @@ -60,25 +54,20 @@ type GitIgnoreTemplateListItem struct { // // GitLab API docs: // https://docs.gitlab.com/api/templates/gitignores/#get-all-gitignore-templates -type ListTemplatesOptions ListOptions +type ListTemplatesOptions struct { + ListOptions +} // ListTemplates get a list of available git ignore templates // // GitLab API docs: // https://docs.gitlab.com/api/templates/gitignores/#get-all-gitignore-templates func (s *GitIgnoreTemplatesService) ListTemplates(opt *ListTemplatesOptions, options ...RequestOptionFunc) ([]*GitIgnoreTemplateListItem, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "templates/gitignores", opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*GitIgnoreTemplateListItem - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil + return do[[]*GitIgnoreTemplateListItem](s.client, + withPath("templates/gitignores"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetTemplate get a git ignore template @@ -86,18 +75,8 @@ func (s *GitIgnoreTemplatesService) ListTemplates(opt *ListTemplatesOptions, opt // GitLab API docs: // https://docs.gitlab.com/api/templates/gitignores/#get-a-single-gitignore-template func (s *GitIgnoreTemplatesService) GetTemplate(key string, options ...RequestOptionFunc) (*GitIgnoreTemplate, *Response, error) { - u := fmt.Sprintf("templates/gitignores/%s", url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - g := new(GitIgnoreTemplate) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil + return do[*GitIgnoreTemplate](s.client, + withPath("templates/gitignores/%s", key), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/gitlab.go b/vendor/gitlab.com/gitlab-org/api/client-go/gitlab.go index b6ce7f06fa..53ea1d4477 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/gitlab.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/gitlab.go @@ -24,10 +24,12 @@ import ( "errors" "fmt" "io" + "log/slog" "maps" "math" - "math/rand" + "math/rand/v2" "mime/multipart" + "net" "net/http" "net/url" "sort" @@ -49,11 +51,11 @@ const ( apiVersionPath = "api/v4/" userAgent = "go-gitlab" - headerRateLimit = "RateLimit-Limit" - headerRateReset = "RateLimit-Reset" + headerRateLimit = "Ratelimit-Limit" + headerRateReset = "Ratelimit-Reset" - AccessTokenHeaderName = "PRIVATE-TOKEN" - JobTokenHeaderName = "JOB-TOKEN" + AccessTokenHeaderName = "Private-Token" + JobTokenHeaderName = "Job-Token" ) // AuthType represents an authentication type within GitLab. @@ -71,7 +73,28 @@ const ( PrivateToken ) -var ErrNotFound = errors.New("404 Not Found") +var ( + // ErrNotFound is returned for 404 Not Found errors + ErrNotFound = errors.New("404 Not Found") + + // errUnauthenticated is an internal sentinel error to indicate that the auth source doesn't use any authentication + errUnauthenticated = errors.New("unauthenticated") +) + +// URLValidationError wraps URL parsing errors with helpful context +type URLValidationError struct { + URL string + Err error + Hint string +} + +func (e *URLValidationError) Error() string { + msg := fmt.Sprintf("invalid base URL %q: %v", e.URL, e.Err) + if e.Hint != "" { + msg += fmt.Sprintf(" (hint: %s)", e.Hint) + } + return msg +} // A Client manages communication with the GitLab API. type Client struct { @@ -106,6 +129,13 @@ type Client struct { // Default request options applied to every request. defaultRequestOptions []RequestOptionFunc + // interceptors contain the stack of *http.Client round tripper builder func + // which are used to decorate the http.Client#Transport value. + interceptors []Interceptor + + // urlWarningLogger is used to print URL validation warnings + urlWarningLogger *slog.Logger + // User agent used when communicating with the GitLab API. UserAgent string @@ -114,10 +144,12 @@ type Client struct { // Services used for talking to different parts of the GitLab API. AccessRequests AccessRequestsServiceInterface + AdminCompliancePolicySettings AdminCompliancePolicySettingsServiceInterface AlertManagement AlertManagementServiceInterface Appearance AppearanceServiceInterface Applications ApplicationsServiceInterface ApplicationStatistics ApplicationStatisticsServiceInterface + Attestations AttestationsServiceInterface AuditEvents AuditEventsServiceInterface Avatar AvatarRequestsServiceInterface AwardEmoji AwardEmojiServiceInterface @@ -161,6 +193,7 @@ type Client struct { GroupActivityAnalytics GroupActivityAnalyticsServiceInterface GroupBadges GroupBadgesServiceInterface GroupCluster GroupClustersServiceInterface + GroupCredentials GroupCredentialsServiceInterface GroupEpicBoards GroupEpicBoardsServiceInterface GroupImportExport GroupImportExportServiceInterface Integrations IntegrationsServiceInterface @@ -171,6 +204,8 @@ type Client struct { GroupMembers GroupMembersServiceInterface GroupMilestones GroupMilestonesServiceInterface GroupProtectedEnvironments GroupProtectedEnvironmentsServiceInterface + GroupProtectedBranches GroupProtectedBranchesServiceInterface + GroupRelationsExport GroupRelationsExportServiceInterface GroupReleases GroupReleasesServiceInterface GroupRepositoryStorageMove GroupRepositoryStorageMoveServiceInterface GroupSCIM GroupSCIMServiceInterface @@ -196,10 +231,12 @@ type Client struct { MemberRolesService MemberRolesServiceInterface MergeRequestApprovals MergeRequestApprovalsServiceInterface MergeRequestApprovalSettings MergeRequestApprovalSettingsServiceInterface + MergeRequestContextCommits MergeRequestContextCommitsServiceInterface MergeRequests MergeRequestsServiceInterface MergeTrains MergeTrainsServiceInterface Metadata MetadataServiceInterface Milestones MilestonesServiceInterface + ModelRegistry ModelRegistryServiceInterface Namespaces NamespacesServiceInterface Notes NotesServiceInterface NotificationSettings NotificationSettingsServiceInterface @@ -212,6 +249,7 @@ type Client struct { Pipelines PipelinesServiceInterface PlanLimits PlanLimitsServiceInterface ProjectAccessTokens ProjectAccessTokensServiceInterface + ProjectAliases ProjectAliasesServiceInterface ProjectBadges ProjectBadgesServiceInterface ProjectCluster ProjectClustersServiceInterface ProjectFeatureFlags ProjectFeatureFlagServiceInterface @@ -223,12 +261,14 @@ type Client struct { ProjectRepositoryStorageMove ProjectRepositoryStorageMoveServiceInterface ProjectSecuritySettings ProjectSecuritySettingsServiceInterface ProjectSnippets ProjectSnippetsServiceInterface + ProjectStatistics ProjectStatisticsServiceInterface ProjectTemplates ProjectTemplatesServiceInterface ProjectVariables ProjectVariablesServiceInterface ProjectVulnerabilities ProjectVulnerabilitiesServiceInterface Projects ProjectsServiceInterface ProtectedBranches ProtectedBranchesServiceInterface ProtectedEnvironments ProtectedEnvironmentsServiceInterface + ProtectedPackages ProtectedPackagesServiceInterface ProtectedTags ProtectedTagsServiceInterface ReleaseLinks ReleaseLinksServiceInterface Releases ReleasesServiceInterface @@ -241,6 +281,9 @@ type Client struct { ResourceMilestoneEvents ResourceMilestoneEventsServiceInterface ResourceStateEvents ResourceStateEventsServiceInterface ResourceWeightEvents ResourceWeightEventsServiceInterface + RunnerControllers RunnerControllersServiceInterface + RunnerControllerScopes RunnerControllerScopesServiceInterface + RunnerControllerTokens RunnerControllerTokensServiceInterface Runners RunnersServiceInterface Search SearchServiceInterface SecureFiles SecureFilesServiceInterface @@ -259,17 +302,44 @@ type Client struct { Validate ValidateServiceInterface Version VersionServiceInterface Wikis WikisServiceInterface + WorkItems WorkItemsServiceInterface } +// Interceptor is used to build a *http.Client request pipeline, +// +// It receives the next RoundTripper in the chain and returns a new one that +// will be used for the request. +// +// This next RoundTripper might or might not be the actual transporter, +// which actually does the request call, +// but it is safe to assume that calling the next will result in the expected HTTP call. +// +// Example: +// +// // Simple logger interceptor. +// logger := func(next http.RoundTripper) http.RoundTripper { +// return roundtripperFunc(func(req *http.Request) (*http.Response, error) { +// fmt.Printf("Request: %s %s\n", req.Method, req.URL) +// resp, err := next.RoundTrip(req) +// if err == nil { +// fmt.Printf("Response status: %d\n", resp.StatusCode) +// } +// return resp, err +// }) +// } +// +// The Interceptor type lets you add such middlewares to a client by chaining them. +type Interceptor func(next http.RoundTripper) http.RoundTripper + // ListOptions specifies the optional parameters to various List methods that // support pagination. type ListOptions struct { // For keyset-based paginated result sets, the value must be `"keyset"` Pagination string `url:"pagination,omitempty" json:"pagination,omitempty"` // For offset-based and keyset-based paginated result sets, the number of results to include per page. - PerPage int `url:"per_page,omitempty" json:"per_page,omitempty"` + PerPage int64 `url:"per_page,omitempty" json:"per_page,omitempty"` // For offset-based paginated result sets, page of results to retrieve. - Page int `url:"page,omitempty" json:"page,omitempty"` + Page int64 `url:"page,omitempty" json:"page,omitempty"` // For keyset-based paginated result sets, tree record ID at which to fetch the next page. PageToken string `url:"page_token,omitempty" json:"page_token,omitempty"` // For keyset-based paginated result sets, name of the column by which to order @@ -337,8 +407,9 @@ func NewOAuthClient(token string, options ...ClientOptionFunc) (*Client, error) // NewAuthSourceClient returns a new GitLab API client that uses the AuthSource for authentication. func NewAuthSourceClient(as AuthSource, options ...ClientOptionFunc) (*Client, error) { c := &Client{ - UserAgent: userAgent, - authSource: as, + UserAgent: userAgent, + authSource: as, + urlWarningLogger: slog.Default(), } // Configure the HTTP client. @@ -365,6 +436,8 @@ func NewAuthSourceClient(as AuthSource, options ...ClientOptionFunc) (*Client, e } } + decorateHTTPClientTransportWithInterceptors(c) + // Wire up the cookie jar. // The ClientOptionFunc can't do it directly, // because the user may also specify HTTPClient @@ -393,10 +466,12 @@ func NewAuthSourceClient(as AuthSource, options ...ClientOptionFunc) (*Client, e // Create all the public services. c.AccessRequests = &AccessRequestsService{client: c} + c.AdminCompliancePolicySettings = &AdminCompliancePolicySettingsService{client: c} c.AlertManagement = &AlertManagementService{client: c} c.Appearance = &AppearanceService{client: c} c.Applications = &ApplicationsService{client: c} c.ApplicationStatistics = &ApplicationStatisticsService{client: c} + c.Attestations = &AttestationsService{client: c} c.AuditEvents = &AuditEventsService{client: c} c.Avatar = &AvatarRequestsService{client: c} c.AwardEmoji = &AwardEmojiService{client: c} @@ -440,6 +515,7 @@ func NewAuthSourceClient(as AuthSource, options ...ClientOptionFunc) (*Client, e c.GroupActivityAnalytics = &GroupActivityAnalyticsService{client: c} c.GroupBadges = &GroupBadgesService{client: c} c.GroupCluster = &GroupClustersService{client: c} + c.GroupCredentials = &GroupCredentialsService{client: c} c.GroupEpicBoards = &GroupEpicBoardsService{client: c} c.GroupImportExport = &GroupImportExportService{client: c} c.Integrations = &IntegrationsService{client: c} @@ -450,6 +526,8 @@ func NewAuthSourceClient(as AuthSource, options ...ClientOptionFunc) (*Client, e c.GroupMembers = &GroupMembersService{client: c} c.GroupMilestones = &GroupMilestonesService{client: c} c.GroupProtectedEnvironments = &GroupProtectedEnvironmentsService{client: c} + c.GroupProtectedBranches = &GroupProtectedBranchesService{client: c} + c.GroupRelationsExport = &GroupRelationsExportService{client: c} c.GroupReleases = &GroupReleasesService{client: c} c.GroupRepositoryStorageMove = &GroupRepositoryStorageMoveService{client: c} c.GroupSCIM = &GroupSCIMService{client: c} @@ -475,10 +553,12 @@ func NewAuthSourceClient(as AuthSource, options ...ClientOptionFunc) (*Client, e c.MemberRolesService = &MemberRolesService{client: c} c.MergeRequestApprovals = &MergeRequestApprovalsService{client: c} c.MergeRequestApprovalSettings = &MergeRequestApprovalSettingsService{client: c} + c.MergeRequestContextCommits = &MergeRequestContextCommitsService{client: c} c.MergeRequests = &MergeRequestsService{client: c, timeStats: timeStats} c.MergeTrains = &MergeTrainsService{client: c} c.Metadata = &MetadataService{client: c} c.Milestones = &MilestonesService{client: c} + c.ModelRegistry = &ModelRegistryService{client: c} c.Namespaces = &NamespacesService{client: c} c.Notes = &NotesService{client: c} c.NotificationSettings = &NotificationSettingsService{client: c} @@ -491,6 +571,7 @@ func NewAuthSourceClient(as AuthSource, options ...ClientOptionFunc) (*Client, e c.Pipelines = &PipelinesService{client: c} c.PlanLimits = &PlanLimitsService{client: c} c.ProjectAccessTokens = &ProjectAccessTokensService{client: c} + c.ProjectAliases = &ProjectAliasesService{client: c} c.ProjectBadges = &ProjectBadgesService{client: c} c.ProjectCluster = &ProjectClustersService{client: c} c.ProjectFeatureFlags = &ProjectFeatureFlagService{client: c} @@ -502,12 +583,14 @@ func NewAuthSourceClient(as AuthSource, options ...ClientOptionFunc) (*Client, e c.ProjectRepositoryStorageMove = &ProjectRepositoryStorageMoveService{client: c} c.ProjectSecuritySettings = &ProjectSecuritySettingsService{client: c} c.ProjectSnippets = &ProjectSnippetsService{client: c} + c.ProjectStatistics = &ProjectStatisticsService{client: c} c.ProjectTemplates = &ProjectTemplatesService{client: c} c.ProjectVariables = &ProjectVariablesService{client: c} c.ProjectVulnerabilities = &ProjectVulnerabilitiesService{client: c} c.Projects = &ProjectsService{client: c} c.ProtectedBranches = &ProtectedBranchesService{client: c} c.ProtectedEnvironments = &ProtectedEnvironmentsService{client: c} + c.ProtectedPackages = &ProtectedPackagesService{client: c} c.ProtectedTags = &ProtectedTagsService{client: c} c.ReleaseLinks = &ReleaseLinksService{client: c} c.Releases = &ReleasesService{client: c} @@ -520,6 +603,9 @@ func NewAuthSourceClient(as AuthSource, options ...ClientOptionFunc) (*Client, e c.ResourceMilestoneEvents = &ResourceMilestoneEventsService{client: c} c.ResourceStateEvents = &ResourceStateEventsService{client: c} c.ResourceWeightEvents = &ResourceWeightEventsService{client: c} + c.RunnerControllers = &RunnerControllersService{client: c} + c.RunnerControllerScopes = &RunnerControllerScopesService{client: c} + c.RunnerControllerTokens = &RunnerControllerTokensService{client: c} c.Runners = &RunnersService{client: c} c.Search = &SearchService{client: c} c.SecureFiles = &SecureFilesService{client: c} @@ -538,26 +624,124 @@ func NewAuthSourceClient(as AuthSource, options ...ClientOptionFunc) (*Client, e c.Validate = &ValidateService{client: c} c.Version = &VersionService{client: c} c.Wikis = &WikisService{client: c} + c.WorkItems = &WorkItemsService{client: c} return c, nil } +func decorateHTTPClientTransportWithInterceptors(c *Client) { + if len(c.interceptors) == 0 { + return + } + c.client.HTTPClient.Transport = chainInterceptors(c.client.HTTPClient.Transport, c.interceptors...) +} + +func chainInterceptors(rt http.RoundTripper, interceptors ...Interceptor) http.RoundTripper { + for i := len(interceptors) - 1; i >= 0; i-- { + rt = interceptors[i](rt) + } + return rt +} + func (c *Client) HTTPClient() *http.Client { return c.client.HTTPClient } // retryHTTPCheck provides a callback for Client.CheckRetry which -// will retry both rate limit (429) and server (>= 500) errors. +// respects default retries and retries what is safe to retry. func (c *Client) retryHTTPCheck(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry if retries are disabled completely + if c.disableRetries { + return false, nil + } + + // do not retry on context.Canceled or context.DeadlineExceeded if ctx.Err() != nil { return false, ctx.Err() } + if err != nil { - return false, err + // We should be able to retry requests with assumed idempotent HTTP methods. + // In a future iteration we might want to annotate requests that are idempotent + // to further improve this logic here. + if resp != nil && resp.Request != nil { + switch resp.Request.Method { + case http.MethodConnect, http.MethodOptions, http.MethodTrace, http.MethodHead, http.MethodGet: + return true, nil + } + } + + // Only retry errors that we know happened before writing to the wire + var urlErr *url.Error + var netOpErr *net.OpError + var dnsErr *net.DNSError + // see Go src/net/http/transport.go + var potentialTLSHandshakeErr interface { + Timeout() bool + Temporary() bool + } + + switch { + // DNS errors are safe - they happen before any connection + case errors.As(err, &dnsErr): + // NXDOMAIN should not be retried + if dnsErr.IsNotFound { + return false, err + } + // Other DNS errors are safe to retry + return true, nil + + // Direct net.OpError for dial operations + case errors.As(err, &netOpErr): + // from the comments in the implementation of net.OpError.Temporary + // it seems that it's safe to retry temporary OpErrors. + if netOpErr.Temporary() { + return true, nil + } + + if strings.EqualFold(netOpErr.Op, "dial") { + return true, nil + } + + // Connection refused errors are safe - they happen at TCP establishment + case errors.As(err, &urlErr): + if strings.Contains(urlErr.Error(), "connection refused") { + return true, nil + } + + // TLS handshake errors are safe if we can identify them + case errors.As(err, &potentialTLSHandshakeErr): + // Check if this is a TLS handshake timeout specifically + if strings.Contains(err.Error(), "net/http: TLS handshake timeout") { + return true, nil + } + } + + // we are conservative here and do not want to retry any "unknown" errors, because + // they could have happened when the connection was already established and the request + // partially fulfilled. We don't have the insights here if the request + // was idempotent or not, so we reject a retry. + return false, nil } - if !c.disableRetries && (resp.StatusCode == 429 || resp.StatusCode >= 500) { + + // 429 Too Many Requests is recoverable. Sometimes the server puts + // a Retry-After response header to indicate when the server is + // available to start processing request from client. + if resp.StatusCode == http.StatusTooManyRequests { + return true, nil + } + + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + // Status code 0 is especially important for AWS ALB: + // https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-troubleshooting.html#response-code-000 + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented) { + // return true, fmt.Errorf("unexpected HTTP status %s", resp.Status) return true, nil } + return false, nil } @@ -577,18 +761,15 @@ func (c *Client) retryHTTPBackoff(min, max time.Duration, attemptNum int, resp * } // rateLimitBackoff provides a callback for Client.Backoff which will use the -// RateLimit-Reset header to determine the time to wait. We add some jitter +// Ratelimit-Reset header to determine the time to wait. We add some jitter // to prevent a thundering herd. // // min and max are mainly used for bounding the jitter that will be added to // the reset time retrieved from the headers. But if the final wait time is // less then min, min will be used instead. func rateLimitBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { - // rnd is used to generate pseudo-random numbers. - rnd := rand.New(rand.NewSource(time.Now().UnixNano())) - // First create some jitter bounded by the min and max durations. - jitter := time.Duration(rnd.Float64() * float64(max-min)) + jitter := time.Duration(rand.Float64() * float64(max-min)) if resp != nil { if v := resp.Header.Get(headerRateReset); v != "" { @@ -599,7 +780,7 @@ func rateLimitBackoff(min, max time.Duration, attemptNum int, resp *http.Respons } } } else { - // In case the RateLimit-Reset header is not set, back off an additional + // In case the Ratelimit-Reset header is not set, back off an additional // 100% exponentially. With the default milliseconds being set to 100 for // `min`, this makes the 5th retry wait 3.2 seconds (3,200 ms) by default. min = time.Duration(float64(min) * math.Pow(2, float64(attemptNum))) @@ -646,16 +827,71 @@ func (c *Client) BaseURL() *url.URL { return &u } +// validateBaseURL checks for common real-world mistakes and returns them as errors. +// Returns the parsed URL if validation succeeds. +func validateBaseURL(baseURL string) (*url.URL, error) { + if baseURL == "" { + return nil, &URLValidationError{ + URL: baseURL, + Err: errors.New("empty URL"), + Hint: `provide a valid GitLab instance URL (e.g., "https://gitlab.com")`, + } + } + + if !strings.Contains(baseURL, "://") { + return nil, &URLValidationError{ + URL: baseURL, + Err: errors.New("missing scheme"), + Hint: fmt.Sprintf(`try "https://%s"`, baseURL), + } + } + + parsedURL, err := url.Parse(baseURL) + if err != nil { + return nil, &URLValidationError{ + URL: baseURL, + Err: err, + Hint: `possible issues: + - missing hostname + - invalid characters/spaces + - invalid port (must be 1-65535) + - query parameters (?) + - fragments (#) + - invalid URL encoding`, + } + } + + if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { + return nil, &URLValidationError{ + URL: baseURL, + Err: fmt.Errorf("unsupported scheme %q", parsedURL.Scheme), + Hint: fmt.Sprintf(`GitLab API requires http or https (try "https://%s")`, parsedURL.Host), + } + } + + return parsedURL, nil +} + // setBaseURL sets the base URL for API requests to a custom endpoint. func (c *Client) setBaseURL(urlStr string) error { - // Make sure the given URL end with a slash + // Make sure the given URL ends with a slash if !strings.HasSuffix(urlStr, "/") { urlStr += "/" } - baseURL, err := url.Parse(urlStr) + // Validate and parse + baseURL, err := validateBaseURL(urlStr) if err != nil { - return err + // Log the validation warning + c.urlWarningLogger.Warn("URL validation warning", "error", err) + + // Don't return the error - just warn and continue + // Try to parse anyway as a fallback + baseURL, err = url.Parse(urlStr) + if err != nil { + // If we really can't parse it, we have to give up + return fmt.Errorf("failed to parse base URL: %w", err) + } } if !strings.HasSuffix(baseURL.Path, apiVersionPath) { @@ -734,8 +970,12 @@ func (c *Client) NewRequestToURL(method string, u *url.URL, opt any, options []R } } - // Set the request specific headers. - maps.Copy(req.Header, reqHeaders) + // Set the request specific headers if they don't yet exist. + for k, v := range reqHeaders { + if _, ok := req.Header[k]; !ok { + req.Header[k] = v + } + } return req, nil } @@ -821,18 +1061,21 @@ type Response struct { *http.Response // Fields used for offset-based pagination. - TotalItems int - TotalPages int - ItemsPerPage int - CurrentPage int - NextPage int - PreviousPage int + TotalItems int64 + TotalPages int64 + ItemsPerPage int64 + CurrentPage int64 + NextPage int64 + PreviousPage int64 // Fields used for keyset-based pagination. PreviousLink string NextLink string FirstLink string LastLink string + + // GraphQL pagination. + PageInfo *PageInfo } // newResponse creates a new Response for the provided http.Response. @@ -863,28 +1106,28 @@ const ( // various pagination link values in the Response. func (r *Response) populatePageValues() { if totalItems := r.Header.Get(xTotal); totalItems != "" { - r.TotalItems, _ = strconv.Atoi(totalItems) + r.TotalItems, _ = strconv.ParseInt(totalItems, 10, 64) } if totalPages := r.Header.Get(xTotalPages); totalPages != "" { - r.TotalPages, _ = strconv.Atoi(totalPages) + r.TotalPages, _ = strconv.ParseInt(totalPages, 10, 64) } if itemsPerPage := r.Header.Get(xPerPage); itemsPerPage != "" { - r.ItemsPerPage, _ = strconv.Atoi(itemsPerPage) + r.ItemsPerPage, _ = strconv.ParseInt(itemsPerPage, 10, 64) } if currentPage := r.Header.Get(xPage); currentPage != "" { - r.CurrentPage, _ = strconv.Atoi(currentPage) + r.CurrentPage, _ = strconv.ParseInt(currentPage, 10, 64) } if nextPage := r.Header.Get(xNextPage); nextPage != "" { - r.NextPage, _ = strconv.Atoi(nextPage) + r.NextPage, _ = strconv.ParseInt(nextPage, 10, 64) } if previousPage := r.Header.Get(xPrevPage); previousPage != "" { - r.PreviousPage, _ = strconv.Atoi(previousPage) + r.PreviousPage, _ = strconv.ParseInt(previousPage, 10, 64) } } func (r *Response) populateLinkValues() { if link := r.Header.Get("Link"); link != "" { - for _, link := range strings.Split(link, ",") { + for link := range strings.SplitSeq(link, ",") { parts := strings.Split(link, ";") if len(parts) < 2 { continue @@ -927,14 +1170,17 @@ func (c *Client) Do(req *retryablehttp.Request, v any) (*Response, error) { } authKey, authValue, err := c.authSource.Header(req.Context()) - if err != nil { + switch err { + case nil: + if v := req.Header.Values(authKey); len(v) == 0 { + req.Header.Set(authKey, authValue) + } + case errUnauthenticated: //nolint:errorlint + // we simply skip using an auth header + default: // err != nil return nil, err } - if v := req.Header.Values(authKey); len(v) == 0 { - req.Header.Set(authKey, authValue) - } - client := c.client if cr := checkRetryFromContext(req.Context()); cr != nil { @@ -999,6 +1245,8 @@ func parseID(id any) (string, error) { switch v := id.(type) { case int: return strconv.Itoa(v), nil + case int64: + return strconv.FormatInt(v, 10), nil case string: return v, nil default: @@ -1030,9 +1278,8 @@ func (e *ErrorResponse) Error() string { if e.Message == "" { return fmt.Sprintf("%s %s: %d", e.Response.Request.Method, url, e.Response.StatusCode) - } else { - return fmt.Sprintf("%s %s: %d %s", e.Response.Request.Method, url, e.Response.StatusCode, e.Message) } + return fmt.Sprintf("%s %s: %d %s", e.Response.Request.Method, url, e.Response.StatusCode, e.Message) } func (e *ErrorResponse) HasStatusCode(statusCode int) bool { @@ -1217,3 +1464,14 @@ func (as *PasswordCredentialsAuthSource) Init(ctx context.Context, client *Clien return nil } + +// Unauthenticated is an authentication source for unauthenticated clients +type Unauthenticated struct{} + +func (Unauthenticated) Init(context.Context, *Client) error { + return nil +} + +func (u Unauthenticated) Header(context.Context) (string, string, error) { + return "", "", errUnauthenticated +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/graphql.go b/vendor/gitlab.com/gitlab-org/api/client-go/graphql.go index 41c64d0692..251c998276 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/graphql.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/graphql.go @@ -4,6 +4,8 @@ import ( "encoding/json" "fmt" "net/http" + "regexp" + "strconv" "strings" ) @@ -22,7 +24,8 @@ type ( } GraphQLQuery struct { - Query string `json:"query"` + Query string `json:"query"` + Variables map[string]any `json:"variables,omitempty"` } GenericGraphQLErrors struct { @@ -86,7 +89,7 @@ func (g *GraphQL) Do(query GraphQLQuery, response any, options ...RequestOptionF resp, err := g.client.Do(request, response) if err != nil { // return error, details can be read from Response - if errResp, ok := err.(*ErrorResponse); ok { //nolint: errorlint + if errResp, ok := err.(*ErrorResponse); ok { //nolint:errorlint var v GenericGraphQLErrors if json.Unmarshal(errResp.Body, &v) == nil { return resp, &GraphQLResponseError{ @@ -99,3 +102,90 @@ func (g *GraphQL) Do(query GraphQLQuery, response any, options ...RequestOptionF } return resp, nil } + +// gidGQL is a global ID. It is used by GraphQL to uniquely identify resources. +type gidGQL struct { + Type string + Int64 int64 +} + +var gidGQLRegex = regexp.MustCompile(`^gid://gitlab/([^/]+)/(\d+)$`) + +func (id *gidGQL) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + + m := gidGQLRegex.FindStringSubmatch(s) + if len(m) != 3 { + return fmt.Errorf("invalid global ID format: %q", s) + } + + i, err := strconv.ParseInt(m[2], 10, 64) + if err != nil { + return fmt.Errorf("failed parsing %q as numeric ID: %w", s, err) + } + + id.Type = m[1] + id.Int64 = i + + return nil +} + +func (id gidGQL) String() string { + return fmt.Sprintf("gid://gitlab/%s/%d", id.Type, id.Int64) +} + +// iidGQL represents an int64 ID that is encoded by GraphQL as a string. +// This type is used unmarshal the string response into an int64 type. +type iidGQL int64 + +func (id *iidGQL) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return fmt.Errorf("failed parsing %q as numeric ID: %w", s, err) + } + + *id = iidGQL(i) + return nil +} + +// PageInfo contains cursor-based pagination metadata for GraphQL connections following the Relay +// cursor pagination specification. Use EndCursor and HasNextPage for forward pagination +// (most common), or StartCursor and HasPreviousPage for backward pagination. +// +// Cursors are opaque strings that should not be parsed or constructed manually - always +// use the cursors returned by the API. +// +// Note: GraphQL cursor pagination differs from GitLab's REST API keyset pagination. +// In REST, the pagination link points to the first item of the next page. In GraphQL, +// EndCursor points to the last item of the current page - you pass this to the "after" +// parameter to fetch items after it (essentially an off-by-one difference in semantics). +// +// GitLab API docs: https://docs.gitlab.com/api/graphql/reference/#pageinfo +type PageInfo struct { + EndCursor string `json:"endCursor"` // Cursor of the last item in this page (pass to "after" for next page) + HasNextPage bool `json:"hasNextPage"` // True if more items exist after this page + StartCursor string `json:"startCursor"` // Cursor of the first item in this page (pass to "before" for previous page) + HasPreviousPage bool `json:"hasPreviousPage"` // True if items exist before this page +} + +// connectionGQL represents a paginated GraphQL connection response following the Relay +// cursor pagination specification. It wraps a list of nodes of any type T along with +// pagination metadata. This type is used internally to unmarshal GraphQL responses from +// GitLab's API, which consistently uses this connection pattern for all paginated fields. +// +// The PageInfo field provides cursors and flags for iterating through pages, while Nodes +// contains the actual data items for the current page. +// +// GitLab API docs: https://docs.gitlab.com/api/graphql/reference/#connection-fields +type connectionGQL[T any] struct { + PageInfo PageInfo `json:"pageInfo"` + Nodes []T `json:"nodes"` +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_access_tokens.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_access_tokens.go index 08946643da..755609d459 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_access_tokens.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_access_tokens.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" ) @@ -25,11 +24,11 @@ type ( // GroupAccessTokensServiceInterface defines all the API methods for the GroupAccessTokensService GroupAccessTokensServiceInterface interface { ListGroupAccessTokens(gid any, opt *ListGroupAccessTokensOptions, options ...RequestOptionFunc) ([]*GroupAccessToken, *Response, error) - GetGroupAccessToken(gid any, id int, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) + GetGroupAccessToken(gid any, id int64, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) CreateGroupAccessToken(gid any, opt *CreateGroupAccessTokenOptions, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) - RotateGroupAccessToken(gid any, id int, opt *RotateGroupAccessTokenOptions, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) + RotateGroupAccessToken(gid any, id int64, opt *RotateGroupAccessTokenOptions, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) RotateGroupAccessTokenSelf(gid any, opt *RotateGroupAccessTokenOptions, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) - RevokeGroupAccessToken(gid any, id int, options ...RequestOptionFunc) (*Response, error) + RevokeGroupAccessToken(gid any, id int64, options ...RequestOptionFunc) (*Response, error) } // GroupAccessTokensService handles communication with the @@ -68,6 +67,9 @@ type ListGroupAccessTokensOptions struct { Revoked *bool `url:"revoked,omitempty" json:"revoked,omitempty"` Search *string `url:"search,omitempty" json:"search,omitempty"` State *AccessTokenState `url:"state,omitempty" json:"state,omitempty"` + ExpiresAfter *ISOTime `url:"expires_after,omitempty" json:"expires_after,omitempty"` + ExpiresBefore *ISOTime `url:"expires_before,omitempty" json:"expires_before,omitempty"` + Sort *AccessTokenSort `url:"sort,omitempty" json:"sort,omitempty"` } // ListGroupAccessTokens gets a list of all group access tokens in a group. @@ -75,49 +77,25 @@ type ListGroupAccessTokensOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_access_tokens/#list-all-group-access-tokens func (s *GroupAccessTokensService) ListGroupAccessTokens(gid any, opt *ListGroupAccessTokensOptions, options ...RequestOptionFunc) ([]*GroupAccessToken, *Response, error) { - groups, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_tokens", PathEscape(groups)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gats []*GroupAccessToken - resp, err := s.client.Do(req, &gats) - if err != nil { - return nil, resp, err - } - - return gats, resp, nil + return do[[]*GroupAccessToken](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/access_tokens", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGroupAccessToken gets a single group access tokens in a group. // // GitLab API docs: // https://docs.gitlab.com/api/group_access_tokens/#get-details-on-a-group-access-token -func (s *GroupAccessTokensService) GetGroupAccessToken(gid any, id int, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) { - groups, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_tokens/%d", PathEscape(groups), id) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gat := new(GroupAccessToken) - resp, err := s.client.Do(req, &gat) - if err != nil { - return nil, resp, err - } - - return gat, resp, nil +func (s *GroupAccessTokensService) GetGroupAccessToken(gid any, id int64, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) { + return do[*GroupAccessToken](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/access_tokens/%d", GroupID{gid}, id), + withAPIOpts(nil), + withRequestOpts(options...), + ) } // CreateGroupAccessTokenOptions represents the available CreateVariable() @@ -138,24 +116,12 @@ type CreateGroupAccessTokenOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_access_tokens/#create-a-group-access-token func (s *GroupAccessTokensService) CreateGroupAccessToken(gid any, opt *CreateGroupAccessTokenOptions, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) { - groups, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_tokens", PathEscape(groups)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(GroupAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil + return do[*GroupAccessToken](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/access_tokens", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RotateGroupAccessTokenOptions represents the available RotateGroupAccessToken() @@ -172,24 +138,13 @@ type RotateGroupAccessTokenOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/group_access_tokens/#rotate-a-group-access-token -func (s *GroupAccessTokensService) RotateGroupAccessToken(gid any, id int, opt *RotateGroupAccessTokenOptions, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) { - groups, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_tokens/%d/rotate", PathEscape(groups), id) - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gat := new(GroupAccessToken) - resp, err := s.client.Do(req, gat) - if err != nil { - return nil, resp, err - } - - return gat, resp, nil +func (s *GroupAccessTokensService) RotateGroupAccessToken(gid any, id int64, opt *RotateGroupAccessTokenOptions, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) { + return do[*GroupAccessToken](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/access_tokens/%d/rotate", GroupID{gid}, id), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RotateGroupAccessTokenSelf revokes the group access token used for the request @@ -198,40 +153,24 @@ func (s *GroupAccessTokensService) RotateGroupAccessToken(gid any, id int, opt * // GitLab API docs: // https://docs.gitlab.com/api/group_access_tokens/#self-rotate func (s *GroupAccessTokensService) RotateGroupAccessTokenSelf(gid any, opt *RotateGroupAccessTokenOptions, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) { - groups, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_tokens/self/rotate", PathEscape(groups)) - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gat := new(GroupAccessToken) - resp, err := s.client.Do(req, gat) - if err != nil { - return nil, resp, err - } - - return gat, resp, nil + return do[*GroupAccessToken](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/access_tokens/self/rotate", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RevokeGroupAccessToken revokes a group access token. // // GitLab API docs: // https://docs.gitlab.com/api/group_access_tokens/#revoke-a-group-access-token -func (s *GroupAccessTokensService) RevokeGroupAccessToken(gid any, id int, options ...RequestOptionFunc) (*Response, error) { - groups, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/access_tokens/%d", PathEscape(groups), id) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupAccessTokensService) RevokeGroupAccessToken(gid any, id int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/access_tokens/%d", GroupID{gid}, id), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_activity_analytics.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_activity_analytics.go index 9824f4d63b..3c5e9bea9d 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_activity_analytics.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_activity_analytics.go @@ -18,8 +18,21 @@ import "net/http" type ( GroupActivityAnalyticsServiceInterface interface { + // GetRecentlyCreatedIssuesCount gets the count of recently created issues for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_activity_analytics/#get-count-of-recently-created-issues-for-group GetRecentlyCreatedIssuesCount(opt *GetRecentlyCreatedIssuesCountOptions, options ...RequestOptionFunc) (*IssuesCount, *Response, error) + // GetRecentlyCreatedMergeRequestsCount gets the count of recently created merge + // requests for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_activity_analytics/#get-count-of-recently-created-merge-requests-for-group GetRecentlyCreatedMergeRequestsCount(opt *GetRecentlyCreatedMergeRequestsCountOptions, options ...RequestOptionFunc) (*MergeRequestsCount, *Response, error) + // GetRecentlyAddedMembersCount gets the count of recently added members to a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_activity_analytics/#get-count-of-members-recently-added-to-group GetRecentlyAddedMembersCount(opt *GetRecentlyAddedMembersCountOptions, options ...RequestOptionFunc) (*NewMembersCount, *Response, error) } @@ -39,7 +52,7 @@ var _ GroupActivityAnalyticsServiceInterface = (*GroupActivityAnalyticsService)( // GitLab API docs: // https://docs.gitlab.com/api/group_activity_analytics/#get-count-of-recently-created-issues-for-group type IssuesCount struct { - IssuesCount int `url:"issues_count" json:"issues_count"` + IssuesCount int64 `url:"issues_count" json:"issues_count"` } // GetRecentlyCreatedIssuesCountOptions represents the available @@ -51,24 +64,13 @@ type GetRecentlyCreatedIssuesCountOptions struct { GroupPath string `url:"group_path" json:"group_path"` } -// GetRecentlyCreatedIssuesCount gets the count of recently created issues for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_activity_analytics/#get-count-of-recently-created-issues-for-group func (s *GroupActivityAnalyticsService) GetRecentlyCreatedIssuesCount(opt *GetRecentlyCreatedIssuesCountOptions, options ...RequestOptionFunc) (*IssuesCount, *Response, error) { - u := "analytics/group_activity/issues_count" - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - res := new(IssuesCount) - resp, err := s.client.Do(req, res) - if err != nil { - return nil, resp, err - } - - return res, resp, nil + return do[*IssuesCount](s.client, + withMethod(http.MethodGet), + withPath("analytics/group_activity/issues_count"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // MergeRequestsCount represents the total count of recently created merge requests @@ -77,7 +79,7 @@ func (s *GroupActivityAnalyticsService) GetRecentlyCreatedIssuesCount(opt *GetRe // GitLab API docs: // https://docs.gitlab.com/api/group_activity_analytics/#get-count-of-recently-created-merge-requests-for-group type MergeRequestsCount struct { - MergeRequestsCount int `url:"merge_requests_count" json:"merge_requests_count"` + MergeRequestsCount int64 `url:"merge_requests_count" json:"merge_requests_count"` } // GetRecentlyCreatedMergeRequestsCountOptions represents the available @@ -89,25 +91,13 @@ type GetRecentlyCreatedMergeRequestsCountOptions struct { GroupPath string `url:"group_path" json:"group_path"` } -// GetRecentlyCreatedMergeRequestsCount gets the count of recently created merge -// requests for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_activity_analytics/#get-count-of-recently-created-merge-requests-for-group func (s *GroupActivityAnalyticsService) GetRecentlyCreatedMergeRequestsCount(opt *GetRecentlyCreatedMergeRequestsCountOptions, options ...RequestOptionFunc) (*MergeRequestsCount, *Response, error) { - u := "analytics/group_activity/merge_requests_count" - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - res := new(MergeRequestsCount) - resp, err := s.client.Do(req, res) - if err != nil { - return nil, resp, err - } - - return res, resp, nil + return do[*MergeRequestsCount](s.client, + withMethod(http.MethodGet), + withPath("analytics/group_activity/merge_requests_count"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // NewMembersCount represents the total count of recently added members to a group. @@ -115,7 +105,7 @@ func (s *GroupActivityAnalyticsService) GetRecentlyCreatedMergeRequestsCount(opt // GitLab API docs: // https://docs.gitlab.com/api/group_activity_analytics/#get-count-of-members-recently-added-to-group type NewMembersCount struct { - NewMembersCount int `url:"new_members_count" json:"new_members_count"` + NewMembersCount int64 `url:"new_members_count" json:"new_members_count"` } // GetRecentlyAddedMembersCountOptions represents the available @@ -127,22 +117,11 @@ type GetRecentlyAddedMembersCountOptions struct { GroupPath string `url:"group_path" json:"group_path"` } -// GetRecentlyAddedMembersCount gets the count of recently added members to a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_activity_analytics/#get-count-of-members-recently-added-to-group func (s *GroupActivityAnalyticsService) GetRecentlyAddedMembersCount(opt *GetRecentlyAddedMembersCountOptions, options ...RequestOptionFunc) (*NewMembersCount, *Response, error) { - u := "analytics/group_activity/new_members_count" - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - res := new(NewMembersCount) - resp, err := s.client.Do(req, res) - if err != nil { - return nil, resp, err - } - - return res, resp, nil + return do[*NewMembersCount](s.client, + withMethod(http.MethodGet), + withPath("analytics/group_activity/new_members_count"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_badges.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_badges.go index b56c582648..614fe02494 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_badges.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_badges.go @@ -16,19 +16,16 @@ package gitlab -import ( - "fmt" - "net/http" -) +import "net/http" type ( // GroupBadgesServiceInterface defines all the API methods for the GroupBadgesService GroupBadgesServiceInterface interface { ListGroupBadges(gid any, opt *ListGroupBadgesOptions, options ...RequestOptionFunc) ([]*GroupBadge, *Response, error) - GetGroupBadge(gid any, badge int, options ...RequestOptionFunc) (*GroupBadge, *Response, error) + GetGroupBadge(gid any, badge int64, options ...RequestOptionFunc) (*GroupBadge, *Response, error) AddGroupBadge(gid any, opt *AddGroupBadgeOptions, options ...RequestOptionFunc) (*GroupBadge, *Response, error) - EditGroupBadge(gid any, badge int, opt *EditGroupBadgeOptions, options ...RequestOptionFunc) (*GroupBadge, *Response, error) - DeleteGroupBadge(gid any, badge int, options ...RequestOptionFunc) (*Response, error) + EditGroupBadge(gid any, badge int64, opt *EditGroupBadgeOptions, options ...RequestOptionFunc) (*GroupBadge, *Response, error) + DeleteGroupBadge(gid any, badge int64, options ...RequestOptionFunc) (*Response, error) PreviewGroupBadge(gid any, opt *GroupBadgePreviewOptions, options ...RequestOptionFunc) (*GroupBadge, *Response, error) } @@ -57,7 +54,7 @@ const ( // GitLab API docs: // https://docs.gitlab.com/api/group_badges/ type GroupBadge struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` LinkURL string `json:"link_url"` ImageURL string `json:"image_url"` @@ -80,49 +77,22 @@ type ListGroupBadgesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_badges/#list-all-badges-of-a-group func (s *GroupBadgesService) ListGroupBadges(gid any, opt *ListGroupBadgesOptions, options ...RequestOptionFunc) ([]*GroupBadge, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/badges", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gb []*GroupBadge - resp, err := s.client.Do(req, &gb) - if err != nil { - return nil, resp, err - } - - return gb, resp, nil + return do[[]*GroupBadge](s.client, + withPath("groups/%s/badges", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGroupBadge gets a group badge. // // GitLab API docs: // https://docs.gitlab.com/api/group_badges/#get-a-badge-of-a-group -func (s *GroupBadgesService) GetGroupBadge(gid any, badge int, options ...RequestOptionFunc) (*GroupBadge, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/badges/%d", PathEscape(group), badge) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gb := new(GroupBadge) - resp, err := s.client.Do(req, gb) - if err != nil { - return nil, resp, err - } - - return gb, resp, nil +func (s *GroupBadgesService) GetGroupBadge(gid any, badge int64, options ...RequestOptionFunc) (*GroupBadge, *Response, error) { + return do[*GroupBadge](s.client, + withPath("groups/%s/badges/%d", GroupID{gid}, badge), + withRequestOpts(options...), + ) } // AddGroupBadgeOptions represents the available AddGroupBadge() options. @@ -140,24 +110,12 @@ type AddGroupBadgeOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_badges/#add-a-badge-to-a-group func (s *GroupBadgesService) AddGroupBadge(gid any, opt *AddGroupBadgeOptions, options ...RequestOptionFunc) (*GroupBadge, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/badges", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gb := new(GroupBadge) - resp, err := s.client.Do(req, gb) - if err != nil { - return nil, resp, err - } - - return gb, resp, nil + return do[*GroupBadge](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/badges", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditGroupBadgeOptions represents the available EditGroupBadge() options. @@ -174,44 +132,26 @@ type EditGroupBadgeOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/group_badges/#edit-a-badge-of-a-group -func (s *GroupBadgesService) EditGroupBadge(gid any, badge int, opt *EditGroupBadgeOptions, options ...RequestOptionFunc) (*GroupBadge, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/badges/%d", PathEscape(group), badge) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - gb := new(GroupBadge) - resp, err := s.client.Do(req, gb) - if err != nil { - return nil, resp, err - } - - return gb, resp, nil +func (s *GroupBadgesService) EditGroupBadge(gid any, badge int64, opt *EditGroupBadgeOptions, options ...RequestOptionFunc) (*GroupBadge, *Response, error) { + return do[*GroupBadge](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/badges/%d", GroupID{gid}, badge), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteGroupBadge removes a badge from a group. // // GitLab API docs: // https://docs.gitlab.com/api/group_badges/#remove-a-badge-from-a-group -func (s *GroupBadgesService) DeleteGroupBadge(gid any, badge int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/badges/%d", PathEscape(group), badge) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupBadgesService) DeleteGroupBadge(gid any, badge int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/badges/%d", GroupID{gid}, badge), + withRequestOpts(options...), + ) + return resp, err } // GroupBadgePreviewOptions represents the available PreviewGroupBadge() options. @@ -230,22 +170,9 @@ type GroupBadgePreviewOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_badges/#preview-a-badge-from-a-group func (s *GroupBadgesService) PreviewGroupBadge(gid any, opt *GroupBadgePreviewOptions, options ...RequestOptionFunc) (*GroupBadge, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/badges/render", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - gb := new(GroupBadge) - resp, err := s.client.Do(req, &gb) - if err != nil { - return nil, resp, err - } - - return gb, resp, nil + return do[*GroupBadge](s.client, + withPath("groups/%s/badges/render", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_boards.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_boards.go index c5d7e77d22..63f0986c49 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_boards.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_boards.go @@ -17,22 +17,58 @@ package gitlab import ( - "fmt" "net/http" ) type ( GroupIssueBoardsServiceInterface interface { + // ListGroupIssueBoards gets a list of all issue boards in a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_boards/#list-all-group-issue-boards-in-a-group ListGroupIssueBoards(gid any, opt *ListGroupIssueBoardsOptions, options ...RequestOptionFunc) ([]*GroupIssueBoard, *Response, error) + // CreateGroupIssueBoard creates a new issue board. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_boards/#create-a-group-issue-board CreateGroupIssueBoard(gid any, opt *CreateGroupIssueBoardOptions, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) - GetGroupIssueBoard(gid any, board int, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) - UpdateIssueBoard(gid any, board int, opt *UpdateGroupIssueBoardOptions, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) - DeleteIssueBoard(gid any, board int, options ...RequestOptionFunc) (*Response, error) - ListGroupIssueBoardLists(gid any, board int, opt *ListGroupIssueBoardListsOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) - GetGroupIssueBoardList(gid any, board, list int, options ...RequestOptionFunc) (*BoardList, *Response, error) - CreateGroupIssueBoardList(gid any, board int, opt *CreateGroupIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) - UpdateIssueBoardList(gid any, board, list int, opt *UpdateGroupIssueBoardListOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) - DeleteGroupIssueBoardList(gid any, board, list int, options ...RequestOptionFunc) (*Response, error) + // GetGroupIssueBoard gets a single issue board of a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_boards/#single-group-issue-board + GetGroupIssueBoard(gid any, board int64, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) + // UpdateIssueBoard updates a single issue board of a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_boards/#update-a-group-issue-board + UpdateIssueBoard(gid any, board int64, opt *UpdateGroupIssueBoardOptions, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) + // DeleteIssueBoard deletes a single issue board of a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_boards/#delete-a-group-issue-board + DeleteIssueBoard(gid any, board int64, options ...RequestOptionFunc) (*Response, error) + // ListGroupIssueBoardLists gets a list of the issue board's lists. Does not include + // backlog and closed lists. + // + // GitLab API docs: https://docs.gitlab.com/api/group_boards/#list-group-issue-board-lists + ListGroupIssueBoardLists(gid any, board int64, opt *ListGroupIssueBoardListsOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) + // GetGroupIssueBoardList gets a single issue board list. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_boards/#single-group-issue-board-list + GetGroupIssueBoardList(gid any, board, list int64, options ...RequestOptionFunc) (*BoardList, *Response, error) + // CreateGroupIssueBoardList creates a new issue board list. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_boards/#new-group-issue-board-list + CreateGroupIssueBoardList(gid any, board int64, opt *CreateGroupIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) + // UpdateIssueBoardList updates the position of an existing + // group issue board list. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_boards/#edit-group-issue-board-list + UpdateIssueBoardList(gid any, board, list int64, opt *UpdateGroupIssueBoardListOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) + DeleteGroupIssueBoardList(gid any, board, list int64, options ...RequestOptionFunc) (*Response, error) } // GroupIssueBoardsService handles communication with the group issue board @@ -52,7 +88,7 @@ var _ GroupIssueBoardsServiceInterface = (*GroupIssueBoardsService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/group_boards/ type GroupIssueBoard struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Group *Group `json:"group"` Milestone *Milestone `json:"milestone"` @@ -69,31 +105,17 @@ func (b GroupIssueBoard) String() string { // // GitLab API docs: // https://docs.gitlab.com/api/group_boards/#list-all-group-issue-boards-in-a-group -type ListGroupIssueBoardsOptions ListOptions +type ListGroupIssueBoardsOptions struct { + ListOptions +} -// ListGroupIssueBoards gets a list of all issue boards in a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_boards/#list-all-group-issue-boards-in-a-group func (s *GroupIssueBoardsService) ListGroupIssueBoards(gid any, opt *ListGroupIssueBoardsOptions, options ...RequestOptionFunc) ([]*GroupIssueBoard, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*GroupIssueBoard - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil + return do[[]*GroupIssueBoard](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/boards", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateGroupIssueBoardOptions represents the available @@ -105,54 +127,21 @@ type CreateGroupIssueBoardOptions struct { Name *string `url:"name" json:"name"` } -// CreateGroupIssueBoard creates a new issue board. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_boards/#create-a-group-issue-board func (s *GroupIssueBoardsService) CreateGroupIssueBoard(gid any, opt *CreateGroupIssueBoardOptions, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gib := new(GroupIssueBoard) - resp, err := s.client.Do(req, gib) - if err != nil { - return nil, resp, err - } - - return gib, resp, nil + return do[*GroupIssueBoard](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/boards", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetGroupIssueBoard gets a single issue board of a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_boards/#single-group-issue-board -func (s *GroupIssueBoardsService) GetGroupIssueBoard(gid any, board int, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d", PathEscape(group), board) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gib := new(GroupIssueBoard) - resp, err := s.client.Do(req, gib) - if err != nil { - return nil, resp, err - } - - return gib, resp, nil +func (s *GroupIssueBoardsService) GetGroupIssueBoard(gid any, board int64, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) { + return do[*GroupIssueBoard](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/boards/%d", GroupID{gid}, board), + withRequestOpts(options...), + ) } // UpdateGroupIssueBoardOptions represents a group issue board. @@ -161,54 +150,28 @@ func (s *GroupIssueBoardsService) GetGroupIssueBoard(gid any, board int, options // https://docs.gitlab.com/api/group_boards/#update-a-group-issue-board type UpdateGroupIssueBoardOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + AssigneeID *int64 `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + MilestoneID *int64 `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` - Weight *int `url:"weight,omitempty" json:"weight,omitempty"` + Weight *int64 `url:"weight,omitempty" json:"weight,omitempty"` } -// UpdateIssueBoard updates a single issue board of a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_boards/#update-a-group-issue-board -func (s *GroupIssueBoardsService) UpdateIssueBoard(gid any, board int, opt *UpdateGroupIssueBoardOptions, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d", PathEscape(group), board) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - gib := new(GroupIssueBoard) - resp, err := s.client.Do(req, gib) - if err != nil { - return nil, resp, err - } - - return gib, resp, nil +func (s *GroupIssueBoardsService) UpdateIssueBoard(gid any, board int64, opt *UpdateGroupIssueBoardOptions, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) { + return do[*GroupIssueBoard](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/boards/%d", GroupID{gid}, board), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteIssueBoard delete a single issue board of a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_boards/#delete-a-group-issue-board -func (s *GroupIssueBoardsService) DeleteIssueBoard(gid any, board int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d", PathEscape(group), board) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupIssueBoardsService) DeleteIssueBoard(gid any, board int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/boards/%d", GroupID{gid}, board), + withRequestOpts(options...), + ) + return resp, err } // ListGroupIssueBoardListsOptions represents the available @@ -216,60 +179,25 @@ func (s *GroupIssueBoardsService) DeleteIssueBoard(gid any, board int, options . // // GitLab API docs: // https://docs.gitlab.com/api/group_boards/#list-group-issue-board-lists -type ListGroupIssueBoardListsOptions ListOptions - -// ListGroupIssueBoardLists gets a list of the issue board's lists. Does not include -// backlog and closed lists. -// -// GitLab API docs: https://docs.gitlab.com/api/group_boards/#list-group-issue-board-lists -func (s *GroupIssueBoardsService) ListGroupIssueBoardLists(gid any, board int, opt *ListGroupIssueBoardListsOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d/lists", PathEscape(group), board) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gbl []*BoardList - resp, err := s.client.Do(req, &gbl) - if err != nil { - return nil, resp, err - } - - return gbl, resp, nil +type ListGroupIssueBoardListsOptions struct { + ListOptions } -// GetGroupIssueBoardList gets a single issue board list. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_boards/#single-group-issue-board-list -func (s *GroupIssueBoardsService) GetGroupIssueBoardList(gid any, board, list int, options ...RequestOptionFunc) (*BoardList, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d/lists/%d", - PathEscape(group), - board, - list, +func (s *GroupIssueBoardsService) ListGroupIssueBoardLists(gid any, board int64, opt *ListGroupIssueBoardListsOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) { + return do[[]*BoardList](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/boards/%d/lists", GroupID{gid}, board), + withAPIOpts(opt), + withRequestOpts(options...), ) +} - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gbl := new(BoardList) - resp, err := s.client.Do(req, gbl) - if err != nil { - return nil, resp, err - } - - return gbl, resp, nil +func (s *GroupIssueBoardsService) GetGroupIssueBoardList(gid any, board, list int64, options ...RequestOptionFunc) (*BoardList, *Response, error) { + return do[*BoardList](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/boards/%d/lists/%d", GroupID{gid}, board, list), + withRequestOpts(options...), + ) } // CreateGroupIssueBoardListOptions represents the available @@ -278,32 +206,16 @@ func (s *GroupIssueBoardsService) GetGroupIssueBoardList(gid any, board, list in // GitLab API docs: // https://docs.gitlab.com/api/group_boards/#new-group-issue-board-list type CreateGroupIssueBoardListOptions struct { - LabelID *int `url:"label_id" json:"label_id"` + LabelID *int64 `url:"label_id" json:"label_id"` } -// CreateGroupIssueBoardList creates a new issue board list. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_boards/#new-group-issue-board-list -func (s *GroupIssueBoardsService) CreateGroupIssueBoardList(gid any, board int, opt *CreateGroupIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d/lists", PathEscape(group), board) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gbl := new(BoardList) - resp, err := s.client.Do(req, gbl) - if err != nil { - return nil, resp, err - } - - return gbl, resp, nil +func (s *GroupIssueBoardsService) CreateGroupIssueBoardList(gid any, board int64, opt *CreateGroupIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) { + return do[*BoardList](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/boards/%d/lists", GroupID{gid}, board), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateGroupIssueBoardListOptions represents the available @@ -312,59 +224,23 @@ func (s *GroupIssueBoardsService) CreateGroupIssueBoardList(gid any, board int, // GitLab API docs: // https://docs.gitlab.com/api/group_boards/#edit-group-issue-board-list type UpdateGroupIssueBoardListOptions struct { - Position *int `url:"position" json:"position"` + Position *int64 `url:"position" json:"position"` } -// UpdateIssueBoardList updates the position of an existing -// group issue board list. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_boards/#edit-group-issue-board-list -func (s *GroupIssueBoardsService) UpdateIssueBoardList(gid any, board, list int, opt *UpdateGroupIssueBoardListOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d/lists/%d", - PathEscape(group), - board, - list, +func (s *GroupIssueBoardsService) UpdateIssueBoardList(gid any, board, list int64, opt *UpdateGroupIssueBoardListOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) { + return do[[]*BoardList](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/boards/%d/lists/%d", GroupID{gid}, board, list), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gbl []*BoardList - resp, err := s.client.Do(req, &gbl) - if err != nil { - return nil, resp, err - } - - return gbl, resp, nil } -// DeleteGroupIssueBoardList soft deletes a group issue board list. -// Only for admins and group owners. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_boards/#delete-a-group-issue-board-list -func (s *GroupIssueBoardsService) DeleteGroupIssueBoardList(gid any, board, list int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d/lists/%d", - PathEscape(group), - board, - list, +func (s *GroupIssueBoardsService) DeleteGroupIssueBoardList(gid any, board, list int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/boards/%d/lists/%d", GroupID{gid}, board, list), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_clusters.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_clusters.go index 145f56ea42..7bb5e4f924 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_clusters.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_clusters.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -28,13 +27,13 @@ type ( // Deprecated: in GitLab 14.5, to be removed in 19.0 ListClusters(pid any, options ...RequestOptionFunc) ([]*GroupCluster, *Response, error) // Deprecated: in GitLab 14.5, to be removed in 19.0 - GetCluster(pid any, cluster int, options ...RequestOptionFunc) (*GroupCluster, *Response, error) + GetCluster(pid any, cluster int64, options ...RequestOptionFunc) (*GroupCluster, *Response, error) // Deprecated: in GitLab 14.5, to be removed in 19.0 AddCluster(pid any, opt *AddGroupClusterOptions, options ...RequestOptionFunc) (*GroupCluster, *Response, error) // Deprecated: in GitLab 14.5, to be removed in 19.0 - EditCluster(pid any, cluster int, opt *EditGroupClusterOptions, options ...RequestOptionFunc) (*GroupCluster, *Response, error) + EditCluster(pid any, cluster int64, opt *EditGroupClusterOptions, options ...RequestOptionFunc) (*GroupCluster, *Response, error) // Deprecated: in GitLab 14.5, to be removed in 19.0 - DeleteCluster(pid any, cluster int, options ...RequestOptionFunc) (*Response, error) + DeleteCluster(pid any, cluster int64, options ...RequestOptionFunc) (*Response, error) } // GroupClustersService handles communication with the @@ -56,7 +55,7 @@ var _ GroupClustersServiceInterface = (*GroupClustersService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/group_clusters/ type GroupCluster struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Domain string `json:"domain"` CreatedAt *time.Time `json:"created_at"` @@ -83,24 +82,10 @@ func (v GroupCluster) String() string { // GitLab API docs: // https://docs.gitlab.com/api/group_clusters/#list-group-clusters func (s *GroupClustersService) ListClusters(pid any, options ...RequestOptionFunc) ([]*GroupCluster, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/clusters", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var pcs []*GroupCluster - resp, err := s.client.Do(req, &pcs) - if err != nil { - return nil, resp, err - } - - return pcs, resp, nil + return do[[]*GroupCluster](s.client, + withPath("groups/%s/clusters", GroupID{pid}), + withRequestOpts(options...), + ) } // GetCluster gets a cluster. @@ -108,25 +93,11 @@ func (s *GroupClustersService) ListClusters(pid any, options ...RequestOptionFun // // GitLab API docs: // https://docs.gitlab.com/api/group_clusters/#get-a-single-group-cluster -func (s *GroupClustersService) GetCluster(pid any, cluster int, options ...RequestOptionFunc) (*GroupCluster, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/clusters/%d", PathEscape(group), cluster) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gc := new(GroupCluster) - resp, err := s.client.Do(req, &gc) - if err != nil { - return nil, resp, err - } - - return gc, resp, nil +func (s *GroupClustersService) GetCluster(pid any, cluster int64, options ...RequestOptionFunc) (*GroupCluster, *Response, error) { + return do[*GroupCluster](s.client, + withPath("groups/%s/clusters/%d", GroupID{pid}, cluster), + withRequestOpts(options...), + ) } // AddGroupClusterOptions represents the available AddCluster() options. @@ -160,24 +131,12 @@ type AddGroupPlatformKubernetesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_clusters/#add-existing-cluster-to-group func (s *GroupClustersService) AddCluster(pid any, opt *AddGroupClusterOptions, options ...RequestOptionFunc) (*GroupCluster, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/clusters/user", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gc := new(GroupCluster) - resp, err := s.client.Do(req, gc) - if err != nil { - return nil, resp, err - } - - return gc, resp, nil + return do[*GroupCluster](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/clusters/user", GroupID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditGroupClusterOptions represents the available EditCluster() options. @@ -206,25 +165,13 @@ type EditGroupPlatformKubernetesOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/group_clusters/#edit-group-cluster -func (s *GroupClustersService) EditCluster(pid any, cluster int, opt *EditGroupClusterOptions, options ...RequestOptionFunc) (*GroupCluster, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/clusters/%d", PathEscape(group), cluster) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - gc := new(GroupCluster) - resp, err := s.client.Do(req, gc) - if err != nil { - return nil, resp, err - } - - return gc, resp, nil +func (s *GroupClustersService) EditCluster(pid any, cluster int64, opt *EditGroupClusterOptions, options ...RequestOptionFunc) (*GroupCluster, *Response, error) { + return do[*GroupCluster](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/clusters/%d", GroupID{pid}, cluster), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteCluster deletes an existing group cluster. @@ -232,17 +179,11 @@ func (s *GroupClustersService) EditCluster(pid any, cluster int, opt *EditGroupC // // GitLab API docs: // https://docs.gitlab.com/api/group_clusters/#delete-group-cluster -func (s *GroupClustersService) DeleteCluster(pid any, cluster int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/clusters/%d", PathEscape(group), cluster) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupClustersService) DeleteCluster(pid any, cluster int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/clusters/%d", GroupID{pid}, cluster), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_credentials.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_credentials.go new file mode 100644 index 0000000000..bea0fff68a --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_credentials.go @@ -0,0 +1,140 @@ +package gitlab + +import ( + "net/http" + "time" +) + +type ( + GroupCredentialsServiceInterface interface { + // ListGroupPersonalAccessTokens lists all personal access tokens + // associated with enterprise users in a top-level group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/groups/#list-all-personal-access-tokens-for-a-group + ListGroupPersonalAccessTokens(gid any, opt *ListGroupPersonalAccessTokensOptions, options ...RequestOptionFunc) ([]*GroupPersonalAccessToken, *Response, error) + // ListGroupSSHKeys lists all SSH public keys associated with + // enterprise users in a top-level group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/groups/#list-all-ssh-keys-for-a-group + ListGroupSSHKeys(gid any, opt *ListGroupSSHKeysOptions, options ...RequestOptionFunc) ([]*GroupSSHKey, *Response, error) + // RevokeGroupPersonalAccessToken revokes a specified personal access token + // for an enterprise user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/groups/#revoke-a-personal-access-token-for-an-enterprise-user + RevokeGroupPersonalAccessToken(gid any, tokenID int64, options ...RequestOptionFunc) (*Response, error) + // DeleteGroupSSHKey deletes a specified SSH public key for an + // enterprise user associated with the top-level group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/groups/#delete-an-ssh-key-for-an-enterprise-user + DeleteGroupSSHKey(gid any, keyID int64, options ...RequestOptionFunc) (*Response, error) + } + + // GroupCredentialsService handles communication with the top-level group + // credentials inventory management endpoints of the GitLab API. + // + // GitLab API docs: + // https://docs.gitlab.com/api/groups/#credentials-inventory-management + GroupCredentialsService struct { + client *Client + } +) + +// GroupPersonalAccessToken represents a group enterprise users personal access token. +// +// GitLab API docs: +// https://docs.gitlab.com/api/groups/#list-all-personal-access-tokens-for-a-group +type GroupPersonalAccessToken struct { + ID int64 `json:"id"` + Name string `json:"name"` + Revoked bool `json:"revoked"` + CreatedAt *time.Time `json:"created_at"` + Description string `json:"description"` + Scopes []string `json:"scopes"` + UserID int64 `json:"user_id"` + LastUsedAt *time.Time `json:"last_used_at,omitempty"` + Active bool `json:"active"` + ExpiresAt *ISOTime `json:"expires_at"` +} + +// ListGroupPersonalAccessTokensOptions represents the available +// ListGroupPersonalAccessTokens() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/groups/#list-all-personal-access-tokens-for-a-group +type ListGroupPersonalAccessTokensOptions struct { + ListOptions + CreatedAfter *ISOTime `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *ISOTime `url:"created_before,omitempty" json:"created_before,omitempty"` + LastUsedAfter *ISOTime `url:"last_used_after,omitempty" json:"last_used_after,omitempty"` + LastUsedBefore *ISOTime `url:"last_used_before,omitempty" json:"last_used_before,omitempty"` + Revoked *bool `url:"revoked,omitempty" json:"revoked,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` +} + +func (g *GroupCredentialsService) ListGroupPersonalAccessTokens(gid any, opt *ListGroupPersonalAccessTokensOptions, options ...RequestOptionFunc) ([]*GroupPersonalAccessToken, *Response, error) { + return do[[]*GroupPersonalAccessToken](g.client, + withMethod(http.MethodGet), + withPath("groups/%s/manage/personal_access_tokens", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +// GroupSSHKey represents a group enterprise users public SSH key. +// +// GitLab API docs: +// https://docs.gitlab.com/api/groups/#list-all-ssh-keys-for-a-group +type GroupSSHKey struct { + ID int64 `json:"id"` + Title string `json:"title"` + CreatedAt *time.Time `json:"created_at"` + ExpiresAt *time.Time `json:"expires_at"` + LastUsedAt *time.Time `json:"last_used_at"` + UsageType string `json:"usage_type"` + UserID int64 `json:"user_id"` +} + +// ListGroupSSHKeysOptions represents the available +// ListGroupSSHKeys() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/groups/#list-all-ssh-keys-for-a-group +type ListGroupSSHKeysOptions struct { + ListOptions + CreatedAfter *ISOTime `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *ISOTime `url:"created_before,omitempty" json:"created_before,omitempty"` + ExpiresBefore *ISOTime `url:"expires_before,omitempty" json:"expires_before,omitempty"` + ExpiresAfter *ISOTime `url:"expires_after,omitempty" json:"expires_after,omitempty"` +} + +func (g *GroupCredentialsService) ListGroupSSHKeys(gid any, opt *ListGroupSSHKeysOptions, options ...RequestOptionFunc) ([]*GroupSSHKey, *Response, error) { + return do[[]*GroupSSHKey](g.client, + withMethod(http.MethodGet), + withPath("groups/%s/manage/ssh_keys", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +func (g *GroupCredentialsService) RevokeGroupPersonalAccessToken(gid any, tokenID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](g.client, + withMethod(http.MethodDelete), + withPath("groups/%s/manage/personal_access_tokens/%d", GroupID{gid}, tokenID), + withRequestOpts(options...), + ) + return resp, err +} + +func (g *GroupCredentialsService) DeleteGroupSSHKey(gid any, keyID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](g.client, + withMethod(http.MethodDelete), + withPath("groups/%s/manage/ssh_keys/%d", GroupID{gid}, keyID), + withRequestOpts(options...), + ) + return resp, err +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_epic_boards.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_epic_boards.go index 23097312e2..d3dacf7dff 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_epic_boards.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_epic_boards.go @@ -16,15 +16,10 @@ package gitlab -import ( - "fmt" - "net/http" -) - type ( GroupEpicBoardsServiceInterface interface { ListGroupEpicBoards(gid any, opt *ListGroupEpicBoardsOptions, options ...RequestOptionFunc) ([]*GroupEpicBoard, *Response, error) - GetGroupEpicBoard(gid any, board int, options ...RequestOptionFunc) (*GroupEpicBoard, *Response, error) + GetGroupEpicBoard(gid any, board int64, options ...RequestOptionFunc) (*GroupEpicBoard, *Response, error) } // GroupEpicBoardsService handles communication with the group epic board @@ -44,7 +39,7 @@ var _ GroupEpicBoardsServiceInterface = (*GroupEpicBoardsService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/group_epic_boards/ type GroupEpicBoard struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Group *Group `json:"group"` Labels []*LabelDetails `json:"labels"` @@ -60,54 +55,29 @@ func (b GroupEpicBoard) String() string { // // GitLab API docs: // https://docs.gitlab.com/api/group_epic_boards/#list-all-epic-boards-in-a-group -type ListGroupEpicBoardsOptions ListOptions +type ListGroupEpicBoardsOptions struct { + ListOptions +} // ListGroupEpicBoards gets a list of all epic boards in a group. // // GitLab API docs: // https://docs.gitlab.com/api/group_epic_boards/#list-all-epic-boards-in-a-group func (s *GroupEpicBoardsService) ListGroupEpicBoards(gid any, opt *ListGroupEpicBoardsOptions, options ...RequestOptionFunc) ([]*GroupEpicBoard, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epic_boards", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*GroupEpicBoard - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil + return do[[]*GroupEpicBoard](s.client, + withPath("groups/%s/epic_boards", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGroupEpicBoard gets a single epic board of a group. // // GitLab API docs: // https://docs.gitlab.com/api/group_epic_boards/#single-group-epic-board -func (s *GroupEpicBoardsService) GetGroupEpicBoard(gid any, board int, options ...RequestOptionFunc) (*GroupEpicBoard, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epic_boards/%d", PathEscape(group), board) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gib := new(GroupEpicBoard) - resp, err := s.client.Do(req, gib) - if err != nil { - return nil, resp, err - } - - return gib, resp, nil +func (s *GroupEpicBoardsService) GetGroupEpicBoard(gid any, board int64, options ...RequestOptionFunc) (*GroupEpicBoard, *Response, error) { + return do[*GroupEpicBoard](s.client, + withPath("groups/%s/epic_boards/%d", GroupID{gid}, board), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_hooks.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_hooks.go index cd7117c846..666ddb2921 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_hooks.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_hooks.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -27,7 +26,7 @@ import ( // GitLab API docs: // https://docs.gitlab.com/api/group_webhooks/ type GroupHook struct { - ID int `json:"id"` + ID int64 `json:"id"` URL string `json:"url"` Name string `json:"name"` Description string `json:"description"` @@ -42,7 +41,7 @@ type GroupHook struct { BranchFilterStrategy string `json:"branch_filter_strategy"` CustomWebhookTemplate string `json:"custom_webhook_template"` CustomHeaders []*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` - GroupID int `json:"group_id"` + GroupID int64 `json:"group_id"` IssuesEvents bool `json:"issues_events"` ConfidentialIssuesEvents bool `json:"confidential_issues_events"` NoteEvents bool `json:"note_events"` @@ -57,80 +56,53 @@ type GroupHook struct { EmojiEvents bool `json:"emoji_events"` ResourceAccessTokenEvents bool `json:"resource_access_token_events"` MemberEvents bool `json:"member_events"` + ProjectEvents bool `json:"project_events"` + MilestoneEvents bool `json:"milestone_events"` + VulnerabilityEvents bool `json:"vulnerability_events"` } // ListGroupHooksOptions represents the available ListGroupHooks() options. // // GitLab API docs: // https://docs.gitlab.com/api/group_webhooks/#list-group-hooks -type ListGroupHooksOptions ListOptions +type ListGroupHooksOptions struct { + ListOptions +} // ListGroupHooks gets a list of group hooks. // // GitLab API docs: // https://docs.gitlab.com/api/group_webhooks/#list-group-hooks func (s *GroupsService) ListGroupHooks(gid any, opt *ListGroupHooksOptions, options ...RequestOptionFunc) ([]*GroupHook, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/hooks", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - var gh []*GroupHook - resp, err := s.client.Do(req, &gh) - if err != nil { - return nil, resp, err - } - - return gh, resp, nil + return do[[]*GroupHook](s.client, + withPath("groups/%s/hooks", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGroupHook gets a specific hook for a group. // // GitLab API docs: // https://docs.gitlab.com/api/group_webhooks/#get-a-group-hook -func (s *GroupsService) GetGroupHook(gid any, hook int, options ...RequestOptionFunc) (*GroupHook, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gh := new(GroupHook) - resp, err := s.client.Do(req, gh) - if err != nil { - return nil, resp, err - } - - return gh, resp, nil +func (s *GroupsService) GetGroupHook(gid any, hook int64, options ...RequestOptionFunc) (*GroupHook, *Response, error) { + return do[*GroupHook](s.client, + withPath("groups/%s/hooks/%d", GroupID{gid}, hook), + withRequestOpts(options...), + ) } // ResendGroupHookEvent resends a specific hook event. // // GitLab API docs: // https://docs.gitlab.com/api/group_webhooks/#resend-group-hook-event -func (s *GroupsService) ResendGroupHookEvent(gid any, hook int, hookEventID int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d/events/%d/resend", PathEscape(group), hook, hookEventID) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupsService) ResendGroupHookEvent(gid any, hook int64, hookEventID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/hooks/%d/events/%d/resend", GroupID{gid}, hook, hookEventID), + withRequestOpts(options...), + ) + return resp, err } // AddGroupHookOptions represents the available AddGroupHook() options. @@ -152,13 +124,16 @@ type AddGroupHookOptions struct { ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + ProjectEvents *bool `url:"project_events,omitempty" json:"project_events,omitempty"` WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` FeatureFlagEvents *bool `url:"feature_flag_events,omitempty" json:"feature_flag_events,omitempty"` ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + MilestoneEvents *bool `url:"milestone_events,omitempty" json:"milestone_events,omitempty"` SubGroupEvents *bool `url:"subgroup_events,omitempty" json:"subgroup_events,omitempty"` EmojiEvents *bool `url:"emoji_events,omitempty" json:"emoji_events,omitempty"` MemberEvents *bool `url:"member_events,omitempty" json:"member_events,omitempty"` + VulnerabilityEvents *bool `url:"vulnerability_events,omitempty" json:"vulnerability_events,omitempty"` EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` Token *string `url:"token,omitempty" json:"token,omitempty"` ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` @@ -171,24 +146,12 @@ type AddGroupHookOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_webhooks/#add-a-group-hook func (s *GroupsService) AddGroupHook(gid any, opt *AddGroupHookOptions, options ...RequestOptionFunc) (*GroupHook, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/hooks", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gh := new(GroupHook) - resp, err := s.client.Do(req, gh) - if err != nil { - return nil, resp, err - } - - return gh, resp, nil + return do[*GroupHook](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/hooks", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditGroupHookOptions represents the available EditGroupHook() options. @@ -210,13 +173,16 @@ type EditGroupHookOptions struct { ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + ProjectEvents *bool `url:"project_events,omitempty" json:"project_events,omitempty"` WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` FeatureFlagEvents *bool `url:"feature_flag_events,omitempty" json:"feature_flag_events,omitempty"` ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + MilestoneEvents *bool `url:"milestone_events,omitempty" json:"milestone_events,omitempty"` SubGroupEvents *bool `url:"subgroup_events,omitempty" json:"subgroup_events,omitempty"` EmojiEvents *bool `url:"emoji_events,omitempty" json:"emoji_events,omitempty"` MemberEvents *bool `url:"member_events,omitempty" json:"member_events,omitempty"` + VulnerabilityEvents *bool `url:"vulnerability_events,omitempty" json:"vulnerability_events,omitempty"` EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` ServiceAccessTokensExpirationEnforced *bool `url:"service_access_tokens_expiration_enforced,omitempty" json:"service_access_tokens_expiration_enforced,omitempty"` Token *string `url:"token,omitempty" json:"token,omitempty"` @@ -227,27 +193,15 @@ type EditGroupHookOptions struct { // EditGroupHook edits a hook for a specified group. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/group_webhooks/#edit-group-hook -func (s *GroupsService) EditGroupHook(gid any, hook int, opt *EditGroupHookOptions, options ...RequestOptionFunc) (*GroupHook, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - gh := new(GroupHook) - resp, err := s.client.Do(req, gh) - if err != nil { - return nil, resp, err - } - - return gh, resp, nil +func (s *GroupsService) EditGroupHook(gid any, hook int64, opt *EditGroupHookOptions, options ...RequestOptionFunc) (*GroupHook, *Response, error) { + return do[*GroupHook](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/hooks/%d", GroupID{gid}, hook), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteGroupHook removes a hook from a group. This is an idempotent @@ -255,76 +209,53 @@ func (s *GroupsService) EditGroupHook(gid any, hook int, opt *EditGroupHookOptio // // GitLab API docs: // https://docs.gitlab.com/api/group_webhooks/#delete-a-group-hook -func (s *GroupsService) DeleteGroupHook(gid any, hook int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupsService) DeleteGroupHook(gid any, hook int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/hooks/%d", GroupID{gid}, hook), + withRequestOpts(options...), + ) + return resp, err } // TriggerTestGroupHook triggers a test hook for a specified group. // // GitLab API docs: // https://docs.gitlab.com/api/group_webhooks/#trigger-a-test-group-hook -func (s *GroupsService) TriggerTestGroupHook(pid any, hook int, trigger GroupHookTrigger, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d/test/%s", PathEscape(group), hook, trigger) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupsService) TriggerTestGroupHook(pid any, hook int64, trigger GroupHookTrigger, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/hooks/%d/test/%s", GroupID{pid}, hook, NoEscape{string(trigger)}), + withRequestOpts(options...), + ) + return resp, err } // SetGroupCustomHeader creates or updates a group custom webhook header. // // GitLab API docs: // https://docs.gitlab.com/api/group_webhooks/#set-a-custom-header -func (s *GroupsService) SetGroupCustomHeader(gid any, hook int, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d/custom_headers/%s", PathEscape(group), hook, key) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupsService) SetGroupCustomHeader(gid any, hook int64, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/hooks/%d/custom_headers/%s", GroupID{gid}, hook, NoEscape{key}), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // DeleteGroupCustomHeader deletes a group custom webhook header. // // GitLab API docs: // https://docs.gitlab.com/api/group_webhooks/#delete-a-custom-header -func (s *GroupsService) DeleteGroupCustomHeader(gid any, hook int, key string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d/custom_headers/%s", PathEscape(group), hook, key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupsService) DeleteGroupCustomHeader(gid any, hook int64, key string, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/hooks/%d/custom_headers/%s", GroupID{gid}, hook, NoEscape{key}), + withRequestOpts(options...), + ) + return resp, err } // SetHookURLVariableOptions represents the available SetGroupHookURLVariable() @@ -340,36 +271,25 @@ type SetHookURLVariableOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/group_webhooks/#set-a-url-variable -func (s *GroupsService) SetGroupHookURLVariable(gid any, hook int, key string, opt *SetHookURLVariableOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d/url_variables/%s", PathEscape(group), hook, key) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupsService) SetGroupHookURLVariable(gid any, hook int64, key string, opt *SetHookURLVariableOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/hooks/%d/url_variables/%s", GroupID{gid}, hook, NoEscape{key}), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // DeleteGroupHookURLVariable sets a group hook URL variable. // // GitLab API docs: // https://docs.gitlab.com/api/group_webhooks/#delete-a-url-variable -func (s *GroupsService) DeleteGroupHookURLVariable(gid any, hook int, key string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d/url_variables/%s", PathEscape(group), hook, key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupsService) DeleteGroupHookURLVariable(gid any, hook int64, key string, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/hooks/%d/url_variables/%s", GroupID{gid}, hook, NoEscape{key}), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_import_export.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_import_export.go index 4064800400..5b54c2fcd7 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_import_export.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_import_export.go @@ -18,7 +18,7 @@ package gitlab import ( "bytes" - "fmt" + "errors" "io" "mime/multipart" "net/http" @@ -50,18 +50,13 @@ var _ GroupImportExportServiceInterface = (*GroupImportExportService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/group_import_export/#schedule-new-export func (s *GroupImportExportService) ScheduleExport(gid any, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/export", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/export", GroupID{gid}), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } // ExportDownload downloads the finished export. @@ -69,24 +64,14 @@ func (s *GroupImportExportService) ScheduleExport(gid any, options ...RequestOpt // GitLab API docs: // https://docs.gitlab.com/api/group_import_export/#export-download func (s *GroupImportExportService) ExportDownload(gid any, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/export/download", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - exportDownload := new(bytes.Buffer) - resp, err := s.client.Do(req, exportDownload) + buf, resp, err := do[bytes.Buffer](s.client, + withPath("groups/%s/export/download", GroupID{gid}), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return bytes.NewReader(exportDownload.Bytes()), resp, err + return bytes.NewReader(buf.Bytes()), resp, nil } // GroupImportFileOptions represents the available ImportFile() options. @@ -97,7 +82,7 @@ type GroupImportFileOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` Path *string `url:"path,omitempty" json:"path,omitempty"` File *string `url:"file,omitempty" json:"file,omitempty"` - ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` + ParentID *int64 `url:"parent_id,omitempty" json:"parent_id,omitempty"` } // ImportFile imports a file. @@ -107,13 +92,13 @@ type GroupImportFileOptions struct { func (s *GroupImportExportService) ImportFile(opt *GroupImportFileOptions, options ...RequestOptionFunc) (*Response, error) { // First check if we got all required options. if opt.Name == nil || *opt.Name == "" { - return nil, fmt.Errorf("missing required option: Name") + return nil, errors.New("missing required option: Name") } if opt.Path == nil || *opt.Path == "" { - return nil, fmt.Errorf("missing required option: Path") + return nil, errors.New("missing required option: Path") } if opt.File == nil || *opt.File == "" { - return nil, fmt.Errorf("missing required option: File") + return nil, errors.New("missing required option: File") } f, err := os.Open(*opt.File) @@ -163,7 +148,7 @@ func (s *GroupImportExportService) ImportFile(opt *GroupImportFileOptions, optio return nil, err } - _, err = fw.Write([]byte(strconv.Itoa(*opt.ParentID))) + _, err = fw.Write([]byte(strconv.FormatInt(*opt.ParentID, 10))) if err != nil { return nil, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_integrations.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_integrations.go new file mode 100644 index 0000000000..bf3f87e692 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_integrations.go @@ -0,0 +1,184 @@ +package gitlab + +import ( + "net/http" + "time" +) + +// GroupMattermostIntegration represents a Mattermost integration for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#mattermost-notifications +type GroupMattermostIntegration struct { + Integration + NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines"` + BranchesToBeNotified string `json:"branches_to_be_notified"` + LabelsToBeNotified string `json:"labels_to_be_notified"` + LabelsToBeNotifiedBehavior string `json:"labels_to_be_notified_behavior"` + NotifyOnlyDefaultBranch bool `json:"notify_only_default_branch"` + Properties *GroupMattermostIntegrationProperties `json:"properties"` +} + +type GroupMattermostIntegrationProperties struct { + WebHook string `json:"webhook"` + Username string `json:"username"` + Channel string `json:"channel"` + PushChannel string `json:"push_channel"` + IssueChannel string `json:"issue_channel"` + ConfidentialIssueChannel string `json:"confidential_issue_channel"` + MergeRequestChannel string `json:"merge_request_channel"` + NoteChannel string `json:"note_channel"` + ConfidentialNoteChannel string `json:"confidential_note_channel"` + TagPushChannel string `json:"tag_push_channel"` + PipelineChannel string `json:"pipeline_channel"` + WikiPageChannel string `json:"wiki_page_channel"` + DeploymentChannel string `json:"deployment_channel"` + AlertChannel string `json:"alert_channel"` + VulnerabilityChannel string `json:"vulnerability_channel"` +} + +// GroupMattermostIntegrationOptions represents the available options for +// creating or updating a Mattermost integration for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#mattermost-notifications +type GroupMattermostIntegrationOptions struct { + WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + Channel *string `url:"channel,omitempty" json:"channel,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + AlertEvents *bool `url:"alert_events,omitempty" json:"alert_events,omitempty"` + VulnerabilityEvents *bool `url:"vulnerability_events,omitempty" json:"vulnerability_events,omitempty"` + PushChannel *string `url:"push_channel,omitempty" json:"push_channel,omitempty"` + IssueChannel *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"` + ConfidentialIssueChannel *string `url:"confidential_issue_channel,omitempty" json:"confidential_issue_channel,omitempty"` + MergeRequestChannel *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"` + NoteChannel *string `url:"note_channel,omitempty" json:"note_channel,omitempty"` + ConfidentialNoteChannel *string `url:"confidential_note_channel,omitempty" json:"confidential_note_channel,omitempty"` + TagPushChannel *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"` + PipelineChannel *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"` + WikiPageChannel *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"` + DeploymentChannel *string `url:"deployment_channel,omitempty" json:"deployment_channel,omitempty"` + AlertChannel *string `url:"alert_channel,omitempty" json:"alert_channel,omitempty"` + VulnerabilityChannel *string `url:"vulnerability_channel,omitempty" json:"vulnerability_channel,omitempty"` + LabelsToBeNotified *string `url:"labels_to_be_notified,omitempty" json:"labels_to_be_notified,omitempty"` + LabelsToBeNotifiedBehavior *string `url:"labels_to_be_notified_behavior,omitempty" json:"labels_to_be_notified_behavior,omitempty"` + NotifyOnlyDefaultBranch *bool `url:"notify_only_default_branch,omitempty" json:"notify_only_default_branch,omitempty"` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` +} + +// GetGroupMattermostIntegration retrieves the Mattermost integration for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#mattermost-notifications +func (s *IntegrationsService) GetGroupMattermostIntegration(gid any, options ...RequestOptionFunc) (*GroupMattermostIntegration, *Response, error) { + return do[*GroupMattermostIntegration]( + s.client, + withPath("groups/%s/integrations/mattermost", GroupID{gid}), + withMethod(http.MethodGet), + withRequestOpts(options...), + ) +} + +// SetGroupMattermostIntegration creates or updates the Mattermost integration for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#mattermost-notifications +func (s *IntegrationsService) SetGroupMattermostIntegration(gid any, opt *GroupMattermostIntegrationOptions, options ...RequestOptionFunc) (*GroupMattermostIntegration, *Response, error) { + return do[*GroupMattermostIntegration]( + s.client, + withPath("groups/%s/integrations/mattermost", GroupID{gid}), + withMethod(http.MethodPut), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +// DeleteGroupMattermostIntegration removes the Mattermost integration from a group. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#mattermost-notifications +func (s *IntegrationsService) DeleteGroupMattermostIntegration(gid any, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none]( + s.client, + withPath("groups/%s/integrations/mattermost", GroupID{gid}), + withMethod(http.MethodDelete), + withRequestOpts(options...), + ) + return resp, err +} + +// GroupMattermostSlashCommandsIntegration represents a Mattermost slash commands integration for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#mattermost-slash-commands +type GroupMattermostSlashCommandsIntegration struct { + ID int `json:"id"` + Title string `json:"title"` + Slug string `json:"slug"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + Token string `json:"token"` +} + +// GroupMattermostSlashCommandsIntegrationOptions represents the available options for +// creating or updating a Mattermost slash commands integration for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#mattermost-slash-commands +type GroupMattermostSlashCommandsIntegrationOptions struct { + Token *string `url:"token,omitempty" json:"token,omitempty"` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` +} + +// GetGroupMattermostSlashCommandsIntegration retrieves the Mattermost slash commands integration for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#mattermost-slash-commands +func (s *IntegrationsService) GetGroupMattermostSlashCommandsIntegration(gid any, options ...RequestOptionFunc) (*GroupMattermostSlashCommandsIntegration, *Response, error) { + return do[*GroupMattermostSlashCommandsIntegration]( + s.client, + withPath("groups/%s/integrations/mattermost-slash-commands", GroupID{gid}), + withMethod(http.MethodGet), + withRequestOpts(options...), + ) +} + +// SetGroupMattermostSlashCommandsIntegration creates or updates the Mattermost slash commands integration for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#mattermost-slash-commands +func (s *IntegrationsService) SetGroupMattermostSlashCommandsIntegration(gid any, opt *GroupMattermostSlashCommandsIntegrationOptions, options ...RequestOptionFunc) (*GroupMattermostSlashCommandsIntegration, *Response, error) { + return do[*GroupMattermostSlashCommandsIntegration]( + s.client, + withPath("groups/%s/integrations/mattermost-slash-commands", GroupID{gid}), + withMethod(http.MethodPut), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +// DeleteGroupMattermostSlashCommandsIntegration removes the Mattermost slash commands integration from a group. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#mattermost-slash-commands +func (s *IntegrationsService) DeleteGroupMattermostSlashCommandsIntegration(gid any, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none]( + s.client, + withPath("groups/%s/integrations/mattermost-slash-commands", GroupID{gid}), + withMethod(http.MethodDelete), + withRequestOpts(options...), + ) + return resp, err +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_iterations.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_iterations.go index 0a5e235e13..2b14b1bd0c 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_iterations.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_iterations.go @@ -16,11 +16,7 @@ package gitlab -import ( - "fmt" - "net/http" - "time" -) +import "time" type ( GroupIterationsServiceInterface interface { @@ -42,13 +38,13 @@ var _ GroupIterationsServiceInterface = (*GroupIterationsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/group_iterations/ type GroupIteration struct { - ID int `json:"id"` - IID int `json:"iid"` - Sequence int `json:"sequence"` - GroupID int `json:"group_id"` + ID int64 `json:"id"` + IID int64 `json:"iid"` + Sequence int64 `json:"sequence"` + GroupID int64 `json:"group_id"` Title string `json:"title"` Description string `json:"description"` - State int `json:"state"` + State int64 `json:"state"` CreatedAt *time.Time `json:"created_at"` UpdatedAt *time.Time `json:"updated_at"` DueDate *ISOTime `json:"due_date"` @@ -77,22 +73,9 @@ type ListGroupIterationsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_iterations/#list-group-iterations func (s *GroupIterationsService) ListGroupIterations(gid any, opt *ListGroupIterationsOptions, options ...RequestOptionFunc) ([]*GroupIteration, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/iterations", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gis []*GroupIteration - resp, err := s.client.Do(req, &gis) - if err != nil { - return nil, nil, err - } - - return gis, resp, nil + return do[[]*GroupIteration](s.client, + withPath("groups/%s/iterations", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_labels.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_labels.go index 7bd4d1df8b..838acda6ce 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_labels.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_labels.go @@ -16,10 +16,7 @@ package gitlab -import ( - "fmt" - "net/http" -) +import "net/http" type ( GroupLabelsServiceInterface interface { @@ -57,11 +54,11 @@ func (l GroupLabel) String() string { // GitLab API docs: https://docs.gitlab.com/api/group_labels/#list-group-labels type ListGroupLabelsOptions struct { ListOptions - WithCounts *bool `url:"with_counts,omitempty" json:"with_counts,omitempty"` - IncludeAncestorGroups *bool `url:"include_ancestor_groups,omitempty" json:"include_ancestor_groups,omitempty"` - IncludeDescendantGrouops *bool `url:"include_descendant_groups,omitempty" json:"include_descendant_groups,omitempty"` - OnlyGroupLabels *bool `url:"only_group_labels,omitempty" json:"only_group_labels,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` + WithCounts *bool `url:"with_counts,omitempty" json:"with_counts,omitempty"` + IncludeAncestorGroups *bool `url:"include_ancestor_groups,omitempty" json:"include_ancestor_groups,omitempty"` + IncludeDescendantGroups *bool `url:"include_descendant_groups,omitempty" json:"include_descendant_groups,omitempty"` + OnlyGroupLabels *bool `url:"only_group_labels,omitempty" json:"only_group_labels,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` } // ListGroupLabels gets all labels for given group. @@ -69,24 +66,11 @@ type ListGroupLabelsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_labels/#list-group-labels func (s *GroupLabelsService) ListGroupLabels(gid any, opt *ListGroupLabelsOptions, options ...RequestOptionFunc) ([]*GroupLabel, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/labels", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var l []*GroupLabel - resp, err := s.client.Do(req, &l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil + return do[[]*GroupLabel](s.client, + withPath("groups/%s/labels", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGroupLabel get a single label for a given group. @@ -94,28 +78,10 @@ func (s *GroupLabelsService) ListGroupLabels(gid any, opt *ListGroupLabelsOption // GitLab API docs: // https://docs.gitlab.com/api/group_labels/#get-a-single-group-label func (s *GroupLabelsService) GetGroupLabel(gid any, lid any, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - label, err := parseID(lid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/labels/%s", PathEscape(group), PathEscape(label)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var l *GroupLabel - resp, err := s.client.Do(req, &l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil + return do[*GroupLabel](s.client, + withPath("groups/%s/labels/%s", GroupID{gid}, LabelID{lid}), + withRequestOpts(options...), + ) } // CreateGroupLabelOptions represents the available CreateGroupLabel() options. @@ -126,7 +92,7 @@ type CreateGroupLabelOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` Color *string `url:"color,omitempty" json:"color,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` - Priority *int `url:"priority,omitempty" json:"priority,omitempty"` + Priority *int64 `url:"priority,omitempty" json:"priority,omitempty"` } // CreateGroupLabel creates a new label for given group with given name and @@ -135,24 +101,12 @@ type CreateGroupLabelOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_labels/#create-a-new-group-label func (s *GroupLabelsService) CreateGroupLabel(gid any, opt *CreateGroupLabelOptions, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/labels", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - l := new(GroupLabel) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil + return do[*GroupLabel](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/labels", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteGroupLabelOptions represents the available DeleteGroupLabel() options. @@ -168,26 +122,21 @@ type DeleteGroupLabelOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_labels/#delete-a-group-label func (s *GroupLabelsService) DeleteGroupLabel(gid any, lid any, opt *DeleteGroupLabelOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/labels", PathEscape(group)) + reqOpts := make([]doOption, 0, 4) + reqOpts = append(reqOpts, + withMethod(http.MethodDelete), + withAPIOpts(opt), + withRequestOpts(options...), + ) if lid != nil { - label, err := parseID(lid) - if err != nil { - return nil, err - } - u = fmt.Sprintf("groups/%s/labels/%s", PathEscape(group), PathEscape(label)) - } - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err + reqOpts = append(reqOpts, withPath("groups/%s/labels/%s", GroupID{gid}, LabelID{lid})) + } else { + reqOpts = append(reqOpts, withPath("groups/%s/labels", GroupID{gid})) } - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, reqOpts...) + return resp, err } // UpdateGroupLabelOptions represents the available UpdateGroupLabel() options. @@ -199,7 +148,7 @@ type UpdateGroupLabelOptions struct { NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` Color *string `url:"color,omitempty" json:"color,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` - Priority *int `url:"priority,omitempty" json:"priority,omitempty"` + Priority *int64 `url:"priority,omitempty" json:"priority,omitempty"` } // UpdateGroupLabel updates an existing label with new name or now color. At least @@ -208,32 +157,20 @@ type UpdateGroupLabelOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_labels/#update-a-group-label func (s *GroupLabelsService) UpdateGroupLabel(gid any, lid any, opt *UpdateGroupLabelOptions, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/labels", PathEscape(group)) + reqOpts := make([]doOption, 0, 4) + reqOpts = append(reqOpts, + withMethod(http.MethodPut), + withAPIOpts(opt), + withRequestOpts(options...), + ) if lid != nil { - label, err := parseID(lid) - if err != nil { - return nil, nil, err - } - u = fmt.Sprintf("groups/%s/labels/%s", PathEscape(group), PathEscape(label)) - } - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - l := new(GroupLabel) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err + reqOpts = append(reqOpts, withPath("groups/%s/labels/%s", GroupID{gid}, LabelID{lid})) + } else { + reqOpts = append(reqOpts, withPath("groups/%s/labels", GroupID{gid})) } - return l, resp, nil + return do[*GroupLabel](s.client, reqOpts...) } // SubscribeToGroupLabel subscribes the authenticated user to a label to receive @@ -243,28 +180,11 @@ func (s *GroupLabelsService) UpdateGroupLabel(gid any, lid any, opt *UpdateGroup // GitLab API docs: // https://docs.gitlab.com/api/group_labels/#subscribe-to-a-group-label func (s *GroupLabelsService) SubscribeToGroupLabel(gid any, lid any, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - label, err := parseID(lid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/labels/%s/subscribe", PathEscape(group), PathEscape(label)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - l := new(GroupLabel) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil + return do[*GroupLabel](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/labels/%s/subscribe", GroupID{gid}, LabelID{lid}), + withRequestOpts(options...), + ) } // UnsubscribeFromGroupLabel unsubscribes the authenticated user from a label to not @@ -274,20 +194,10 @@ func (s *GroupLabelsService) SubscribeToGroupLabel(gid any, lid any, options ... // GitLab API docs: // https://docs.gitlab.com/api/group_labels/#unsubscribe-from-a-group-label func (s *GroupLabelsService) UnsubscribeFromGroupLabel(gid any, lid any, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - label, err := parseID(lid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/labels/%s/unsubscribe", PathEscape(group), PathEscape(label)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/labels/%s/unsubscribe", GroupID{gid}, LabelID{lid}), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_markdown_uploads.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_markdown_uploads.go index 5c0afc8493..6d12350a98 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_markdown_uploads.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_markdown_uploads.go @@ -22,10 +22,32 @@ import ( type ( GroupMarkdownUploadsServiceInterface interface { + // ListGroupMarkdownUploads gets all markdown uploads for a group. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/group_markdown_uploads/#list-uploads ListGroupMarkdownUploads(gid any, opt *ListMarkdownUploadsOptions, options ...RequestOptionFunc) ([]*GroupMarkdownUpload, *Response, error) - DownloadGroupMarkdownUploadByID(gid any, uploadID int, options ...RequestOptionFunc) (io.Reader, *Response, error) + // DownloadGroupMarkdownUploadByID downloads a specific upload by ID. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/group_markdown_uploads/#download-an-uploaded-file-by-id + DownloadGroupMarkdownUploadByID(gid any, uploadID int64, options ...RequestOptionFunc) (io.Reader, *Response, error) + // DownloadGroupMarkdownUploadBySecretAndFilename downloads a specific upload + // by secret and filename. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/group_markdown_uploads/#download-an-uploaded-file-by-secret-and-filename DownloadGroupMarkdownUploadBySecretAndFilename(gid any, secret string, filename string, options ...RequestOptionFunc) (io.Reader, *Response, error) - DeleteGroupMarkdownUploadByID(gid any, uploadID int, options ...RequestOptionFunc) (*Response, error) + // DeleteGroupMarkdownUploadByID deletes an upload by ID. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/group_markdown_uploads/#delete-an-uploaded-file-by-id + DeleteGroupMarkdownUploadByID(gid any, uploadID int64, options ...RequestOptionFunc) (*Response, error) + // DeleteGroupMarkdownUploadBySecretAndFilename deletes an upload + // by secret and filename. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/group_markdown_uploads/#delete-an-uploaded-file-by-secret-and-filename DeleteGroupMarkdownUploadBySecretAndFilename(gid any, secret string, filename string, options ...RequestOptionFunc) (*Response, error) } @@ -46,52 +68,30 @@ type ( GroupMarkdownUpload = MarkdownUpload ) -// ListGroupMarkdownUploads gets all markdown uploads for a group. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/group_markdown_uploads/#list-uploads func (s *GroupMarkdownUploadsService) ListGroupMarkdownUploads(gid any, opt *ListMarkdownUploadsOptions, options ...RequestOptionFunc) ([]*GroupMarkdownUpload, *Response, error) { - return listMarkdownUploads[GroupMarkdownUpload](s.client, GroupResource, gid, opt, options) + return listMarkdownUploads[GroupMarkdownUpload](s.client, GroupResource, GroupID{gid}, opt, options) } -// DownloadGroupMarkdownUploadByID downloads a specific upload by ID. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/group_markdown_uploads/#download-an-uploaded-file-by-id -func (s *GroupMarkdownUploadsService) DownloadGroupMarkdownUploadByID(gid any, uploadID int, options ...RequestOptionFunc) (io.Reader, *Response, error) { - buffer, resp, err := downloadMarkdownUploadByID(s.client, GroupResource, gid, uploadID, options) +func (s *GroupMarkdownUploadsService) DownloadGroupMarkdownUploadByID(gid any, uploadID int64, options ...RequestOptionFunc) (io.Reader, *Response, error) { + buffer, resp, err := downloadMarkdownUploadByID(s.client, GroupResource, GroupID{gid}, uploadID, options) if err != nil { return nil, resp, err } return buffer, resp, nil } -// DownloadGroupMarkdownUploadBySecretAndFilename downloads a specific upload -// by secret and filename. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/group_markdown_uploads/#download-an-uploaded-file-by-secret-and-filename func (s *GroupMarkdownUploadsService) DownloadGroupMarkdownUploadBySecretAndFilename(gid any, secret string, filename string, options ...RequestOptionFunc) (io.Reader, *Response, error) { - buffer, resp, err := downloadMarkdownUploadBySecretAndFilename(s.client, GroupResource, gid, secret, filename, options) + buffer, resp, err := downloadMarkdownUploadBySecretAndFilename(s.client, GroupResource, GroupID{gid}, secret, filename, options) if err != nil { return nil, resp, err } return buffer, resp, nil } -// DeleteGroupMarkdownUploadByID deletes an upload by ID. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/group_markdown_uploads/#delete-an-uploaded-file-by-id -func (s *GroupMarkdownUploadsService) DeleteGroupMarkdownUploadByID(gid any, uploadID int, options ...RequestOptionFunc) (*Response, error) { - return deleteMarkdownUploadByID(s.client, GroupResource, gid, uploadID, options) +func (s *GroupMarkdownUploadsService) DeleteGroupMarkdownUploadByID(gid any, uploadID int64, options ...RequestOptionFunc) (*Response, error) { + return deleteMarkdownUploadByID(s.client, GroupResource, GroupID{gid}, uploadID, options) } -// DeleteGroupMarkdownUploadBySecretAndFilename deletes an upload -// by secret and filename. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/group_markdown_uploads/#delete-an-uploaded-file-by-secret-and-filename func (s *GroupMarkdownUploadsService) DeleteGroupMarkdownUploadBySecretAndFilename(gid any, secret string, filename string, options ...RequestOptionFunc) (*Response, error) { - return deleteMarkdownUploadBySecretAndFilename(s.client, GroupResource, gid, secret, filename, options) + return deleteMarkdownUploadBySecretAndFilename(s.client, GroupResource, GroupID{gid}, secret, filename, options) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_members.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_members.go index 7ba88838cc..c5ab2003f3 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_members.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_members.go @@ -17,20 +17,48 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( GroupMembersServiceInterface interface { - GetGroupMember(gid any, user int, options ...RequestOptionFunc) (*GroupMember, *Response, error) - GetInheritedGroupMember(gid any, user int, options ...RequestOptionFunc) (*GroupMember, *Response, error) + // GetGroupMember gets a member of a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/members/#get-a-member-of-a-group-or-project + GetGroupMember(gid any, user int64, options ...RequestOptionFunc) (*GroupMember, *Response, error) + // GetInheritedGroupMember gets a member of a group or project, including + // inherited and invited members + // + // GitLab API docs: + // https://docs.gitlab.com/api/members/#get-a-member-of-a-group-or-project-including-inherited-and-invited-members + GetInheritedGroupMember(gid any, user int64, options ...RequestOptionFunc) (*GroupMember, *Response, error) + // AddGroupMember adds a user to the list of group members. + // + // GitLab API docs: + // https://docs.gitlab.com/api/members/#add-a-member-to-a-group-or-project AddGroupMember(gid any, opt *AddGroupMemberOptions, options ...RequestOptionFunc) (*GroupMember, *Response, error) + // ShareWithGroup shares a group with the group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/groups/#create-a-link-to-share-a-group-with-another-group ShareWithGroup(gid any, opt *ShareWithGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) - DeleteShareWithGroup(gid any, groupID int, options ...RequestOptionFunc) (*Response, error) - EditGroupMember(gid any, user int, opt *EditGroupMemberOptions, options ...RequestOptionFunc) (*GroupMember, *Response, error) - RemoveGroupMember(gid any, user int, opt *RemoveGroupMemberOptions, options ...RequestOptionFunc) (*Response, error) + // DeleteShareWithGroup allows to unshare a group from a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/groups/#delete-the-link-that-shares-a-group-with-another-group + DeleteShareWithGroup(gid any, groupID int64, options ...RequestOptionFunc) (*Response, error) + // EditGroupMember updates a member of a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/members/#edit-a-member-of-a-group-or-project + EditGroupMember(gid any, user int64, opt *EditGroupMemberOptions, options ...RequestOptionFunc) (*GroupMember, *Response, error) + // RemoveGroupMember removes user from user team. + // + // GitLab API docs: + // https://docs.gitlab.com/api/members/#remove-a-member-from-a-group-or-project + RemoveGroupMember(gid any, user int64, opt *RemoveGroupMemberOptions, options ...RequestOptionFunc) (*Response, error) } // GroupMembersService handles communication with the group members @@ -48,19 +76,21 @@ var _ GroupMembersServiceInterface = (*GroupMembersService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/members/ type GroupMember struct { - ID int `json:"id"` + ID int64 `json:"id"` Username string `json:"username"` Name string `json:"name"` State string `json:"state"` AvatarURL string `json:"avatar_url"` WebURL string `json:"web_url"` CreatedAt *time.Time `json:"created_at"` + CreatedBy *MemberCreatedBy `json:"created_by"` ExpiresAt *ISOTime `json:"expires_at"` AccessLevel AccessLevelValue `json:"access_level"` Email string `json:"email,omitempty"` PublicEmail string `json:"public_email,omitempty"` GroupSAMLIdentity *GroupMemberSAMLIdentity `json:"group_saml_identity"` MemberRole *MemberRole `json:"member_role"` + IsUsingSeat bool `json:"is_using_seat,omitempty"` } // GroupMemberSAMLIdentity represents the SAML Identity link for the group member. @@ -70,7 +100,7 @@ type GroupMember struct { type GroupMemberSAMLIdentity struct { ExternUID string `json:"extern_uid"` Provider string `json:"provider"` - SAMLProviderID int `json:"saml_provider_id"` + SAMLProviderID int64 `json:"saml_provider_id"` } // BillableGroupMember represents a GitLab billable group member. @@ -78,7 +108,7 @@ type GroupMemberSAMLIdentity struct { // GitLab API docs: // https://docs.gitlab.com/api/members/#list-all-billable-members-of-a-group type BillableGroupMember struct { - ID int `json:"id"` + ID int64 `json:"id"` Username string `json:"username"` Name string `json:"name"` State string `json:"state"` @@ -98,8 +128,8 @@ type BillableGroupMember struct { // GitLab API docs: // https://docs.gitlab.com/api/members/#list-memberships-for-a-billable-member-of-a-group type BillableUserMembership struct { - ID int `json:"id"` - SourceID int `json:"source_id"` + ID int64 `json:"id"` + SourceID int64 `json:"source_id"` SourceFullName string `json:"source_full_name"` SourceMembersURL string `json:"source_members_url"` CreatedAt *time.Time `json:"created_at"` @@ -114,8 +144,9 @@ type BillableUserMembership struct { // https://docs.gitlab.com/api/members/#list-all-members-of-a-group-or-project type ListGroupMembersOptions struct { ListOptions - Query *string `url:"query,omitempty" json:"query,omitempty"` - UserIDs *[]int `url:"user_ids[],omitempty" json:"user_ids,omitempty"` + Query *string `url:"query,omitempty" json:"query,omitempty"` + UserIDs *[]int64 `url:"user_ids[],omitempty" json:"user_ids,omitempty"` + ShowSeatInfo *bool `url:"show_seat_info,omitempty" json:"show_seat_info,omitempty"` } // ListGroupMembers get a list of group members viewable by the authenticated @@ -124,24 +155,11 @@ type ListGroupMembersOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/members/#list-all-members-of-a-group-or-project func (s *GroupsService) ListGroupMembers(gid any, opt *ListGroupMembersOptions, options ...RequestOptionFunc) ([]*GroupMember, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/members", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gm []*GroupMember - resp, err := s.client.Do(req, &gm) - if err != nil { - return nil, resp, err - } - - return gm, resp, nil + return do[[]*GroupMember](s.client, + withPath("groups/%s/members", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListAllGroupMembers get a list of group members viewable by the authenticated @@ -150,24 +168,11 @@ func (s *GroupsService) ListGroupMembers(gid any, opt *ListGroupMembersOptions, // GitLab API docs: // https://docs.gitlab.com/api/members/#list-all-members-of-a-group-or-project-including-inherited-and-invited-members func (s *GroupsService) ListAllGroupMembers(gid any, opt *ListGroupMembersOptions, options ...RequestOptionFunc) ([]*GroupMember, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/members/all", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gm []*GroupMember - resp, err := s.client.Do(req, &gm) - if err != nil { - return nil, resp, err - } - - return gm, resp, nil + return do[[]*GroupMember](s.client, + withPath("groups/%s/members/all", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // AddGroupMemberOptions represents the available AddGroupMember() options. @@ -175,62 +180,25 @@ func (s *GroupsService) ListAllGroupMembers(gid any, opt *ListGroupMembersOption // GitLab API docs: // https://docs.gitlab.com/api/members/#add-a-member-to-a-group-or-project type AddGroupMemberOptions struct { - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + UserID *int64 `url:"user_id,omitempty" json:"user_id,omitempty"` Username *string `url:"username,omitempty" json:"username,omitempty"` AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at"` - MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` + MemberRoleID *int64 `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` } -// GetGroupMember gets a member of a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/members/#get-a-member-of-a-group-or-project -func (s *GroupMembersService) GetGroupMember(gid any, user int, options ...RequestOptionFunc) (*GroupMember, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/members/%d", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gm := new(GroupMember) - resp, err := s.client.Do(req, gm) - if err != nil { - return nil, resp, err - } - - return gm, resp, nil +func (s *GroupMembersService) GetGroupMember(gid any, user int64, options ...RequestOptionFunc) (*GroupMember, *Response, error) { + return do[*GroupMember](s.client, + withPath("groups/%s/members/%d", GroupID{gid}, user), + withRequestOpts(options...), + ) } -// GetInheritedGroupMember get a member of a group or project, including -// inherited and invited members -// -// GitLab API docs: -// https://docs.gitlab.com/api/members/#get-a-member-of-a-group-or-project-including-inherited-and-invited-members -func (s *GroupMembersService) GetInheritedGroupMember(gid any, user int, options ...RequestOptionFunc) (*GroupMember, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/members/all/%d", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gm := new(GroupMember) - resp, err := s.client.Do(req, gm) - if err != nil { - return nil, resp, err - } - - return gm, resp, err +func (s *GroupMembersService) GetInheritedGroupMember(gid any, user int64, options ...RequestOptionFunc) (*GroupMember, *Response, error) { + return do[*GroupMember](s.client, + withPath("groups/%s/members/all/%d", GroupID{gid}, user), + withRequestOpts(options...), + ) } // ListBillableGroupMembersOptions represents the available @@ -250,24 +218,11 @@ type ListBillableGroupMembersOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/members/#list-all-billable-members-of-a-group func (s *GroupsService) ListBillableGroupMembers(gid any, opt *ListBillableGroupMembersOptions, options ...RequestOptionFunc) ([]*BillableGroupMember, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/billable_members", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var bgm []*BillableGroupMember - resp, err := s.client.Do(req, &bgm) - if err != nil { - return nil, resp, err - } - - return bgm, resp, nil + return do[[]*BillableGroupMember](s.client, + withPath("groups/%s/billable_members", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListMembershipsForBillableGroupMemberOptions represents the available @@ -275,120 +230,61 @@ func (s *GroupsService) ListBillableGroupMembers(gid any, opt *ListBillableGroup // // GitLab API docs: // https://docs.gitlab.com/api/members/#list-memberships-for-a-billable-member-of-a-group -type ListMembershipsForBillableGroupMemberOptions = ListOptions +type ListMembershipsForBillableGroupMemberOptions struct { + ListOptions +} // ListMembershipsForBillableGroupMember gets a list of memberships for a // billable member of a group. // // GitLab API docs: // https://docs.gitlab.com/api/members/#list-memberships-for-a-billable-member-of-a-group -func (s *GroupsService) ListMembershipsForBillableGroupMember(gid any, user int, opt *ListMembershipsForBillableGroupMemberOptions, options ...RequestOptionFunc) ([]*BillableUserMembership, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/billable_members/%d/memberships", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var bum []*BillableUserMembership - resp, err := s.client.Do(req, &bum) - if err != nil { - return nil, resp, err - } - - return bum, resp, nil +func (s *GroupsService) ListMembershipsForBillableGroupMember(gid any, user int64, opt *ListMembershipsForBillableGroupMemberOptions, options ...RequestOptionFunc) ([]*BillableUserMembership, *Response, error) { + return do[[]*BillableUserMembership](s.client, + withPath("groups/%s/billable_members/%d/memberships", GroupID{gid}, user), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RemoveBillableGroupMember removes a given group members that count as billable. // // GitLab API docs: // https://docs.gitlab.com/api/members/#remove-a-billable-member-from-a-group -func (s *GroupsService) RemoveBillableGroupMember(gid any, user int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/billable_members/%d", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupsService) RemoveBillableGroupMember(gid any, user int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/billable_members/%d", GroupID{gid}, user), + withRequestOpts(options...), + ) + return resp, err } -// AddGroupMember adds a user to the list of group members. -// -// GitLab API docs: -// https://docs.gitlab.com/api/members/#add-a-member-to-a-group-or-project func (s *GroupMembersService) AddGroupMember(gid any, opt *AddGroupMemberOptions, options ...RequestOptionFunc) (*GroupMember, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/members", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gm := new(GroupMember) - resp, err := s.client.Do(req, gm) - if err != nil { - return nil, resp, err - } - - return gm, resp, nil + return do[*GroupMember](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/members", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// ShareWithGroup shares a group with the group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/groups/#create-a-link-to-share-a-group-with-another-group func (s *GroupMembersService) ShareWithGroup(gid any, opt *ShareWithGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/share", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil + return do[*Group](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/share", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteShareWithGroup allows to unshare a group from a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/groups/#delete-the-link-that-shares-a-group-with-another-group -func (s *GroupMembersService) DeleteShareWithGroup(gid any, groupID int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/share/%d", PathEscape(group), groupID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupMembersService) DeleteShareWithGroup(gid any, groupID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/share/%d", GroupID{gid}, groupID), + withRequestOpts(options...), + ) + return resp, err } // EditGroupMemberOptions represents the available EditGroupMember() @@ -399,32 +295,16 @@ func (s *GroupMembersService) DeleteShareWithGroup(gid any, groupID int, options type EditGroupMemberOptions struct { AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at,omitempty"` - MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` + MemberRoleID *int64 `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` } -// EditGroupMember updates a member of a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/members/#edit-a-member-of-a-group-or-project -func (s *GroupMembersService) EditGroupMember(gid any, user int, opt *EditGroupMemberOptions, options ...RequestOptionFunc) (*GroupMember, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/members/%d", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - gm := new(GroupMember) - resp, err := s.client.Do(req, gm) - if err != nil { - return nil, resp, err - } - - return gm, resp, nil +func (s *GroupMembersService) EditGroupMember(gid any, user int64, opt *EditGroupMemberOptions, options ...RequestOptionFunc) (*GroupMember, *Response, error) { + return do[*GroupMember](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/members/%d", GroupID{gid}, user), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RemoveGroupMemberOptions represents the available options to remove a group member. @@ -436,21 +316,12 @@ type RemoveGroupMemberOptions struct { UnassignIssuables *bool `url:"unassign_issuables,omitempty" json:"unassign_issuables,omitempty"` } -// RemoveGroupMember removes user from user team. -// -// GitLab API docs: -// https://docs.gitlab.com/api/members/#remove-a-member-from-a-group-or-project -func (s *GroupMembersService) RemoveGroupMember(gid any, user int, opt *RemoveGroupMemberOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/members/%d", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupMembersService) RemoveGroupMember(gid any, user int64, opt *RemoveGroupMemberOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/members/%d", GroupID{gid}, user), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_milestones.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_milestones.go index 8ae487a122..0607e2539c 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_milestones.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_milestones.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -25,13 +24,13 @@ import ( type ( GroupMilestonesServiceInterface interface { ListGroupMilestones(gid any, opt *ListGroupMilestonesOptions, options ...RequestOptionFunc) ([]*GroupMilestone, *Response, error) - GetGroupMilestone(gid any, milestone int, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) + GetGroupMilestone(gid any, milestone int64, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) CreateGroupMilestone(gid any, opt *CreateGroupMilestoneOptions, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) - UpdateGroupMilestone(gid any, milestone int, opt *UpdateGroupMilestoneOptions, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) - DeleteGroupMilestone(pid any, milestone int, options ...RequestOptionFunc) (*Response, error) - GetGroupMilestoneIssues(gid any, milestone int, opt *GetGroupMilestoneIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) - GetGroupMilestoneMergeRequests(gid any, milestone int, opt *GetGroupMilestoneMergeRequestsOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) - GetGroupMilestoneBurndownChartEvents(gid any, milestone int, opt *GetGroupMilestoneBurndownChartEventsOptions, options ...RequestOptionFunc) ([]*BurndownChartEvent, *Response, error) + UpdateGroupMilestone(gid any, milestone int64, opt *UpdateGroupMilestoneOptions, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) + DeleteGroupMilestone(pid any, milestone int64, options ...RequestOptionFunc) (*Response, error) + GetGroupMilestoneIssues(gid any, milestone int64, opt *GetGroupMilestoneIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) + GetGroupMilestoneMergeRequests(gid any, milestone int64, opt *GetGroupMilestoneMergeRequestsOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) + GetGroupMilestoneBurndownChartEvents(gid any, milestone int64, opt *GetGroupMilestoneBurndownChartEventsOptions, options ...RequestOptionFunc) ([]*BurndownChartEvent, *Response, error) } // GroupMilestonesService handles communication with the milestone related @@ -49,9 +48,9 @@ var _ GroupMilestonesServiceInterface = (*GroupMilestonesService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/group_milestones/ type GroupMilestone struct { - ID int `json:"id"` - IID int `json:"iid"` - GroupID int `json:"group_id"` + ID int64 `json:"id"` + IID int64 `json:"iid"` + GroupID int64 `json:"group_id"` Title string `json:"title"` Description string `json:"description"` StartDate *ISOTime `json:"start_date"` @@ -73,7 +72,7 @@ func (m GroupMilestone) String() string { // https://docs.gitlab.com/api/group_milestones/#list-group-milestones type ListGroupMilestonesOptions struct { ListOptions - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + IIDs *[]int64 `url:"iids[],omitempty" json:"iids,omitempty"` State *string `url:"state,omitempty" json:"state,omitempty"` Title *string `url:"title,omitempty" json:"title,omitempty"` Search *string `url:"search,omitempty" json:"search,omitempty"` @@ -95,49 +94,22 @@ type ListGroupMilestonesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_milestones/#list-group-milestones func (s *GroupMilestonesService) ListGroupMilestones(gid any, opt *ListGroupMilestonesOptions, options ...RequestOptionFunc) ([]*GroupMilestone, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/milestones", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*GroupMilestone - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil + return do[[]*GroupMilestone](s.client, + withPath("groups/%s/milestones", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGroupMilestone gets a single group milestone. // // GitLab API docs: // https://docs.gitlab.com/api/group_milestones/#get-single-milestone -func (s *GroupMilestonesService) GetGroupMilestone(gid any, milestone int, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/milestones/%d", PathEscape(group), milestone) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - m := new(GroupMilestone) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *GroupMilestonesService) GetGroupMilestone(gid any, milestone int64, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) { + return do[*GroupMilestone](s.client, + withPath("groups/%s/milestones/%d", GroupID{gid}, milestone), + withRequestOpts(options...), + ) } // CreateGroupMilestoneOptions represents the available CreateGroupMilestone() options. @@ -156,24 +128,12 @@ type CreateGroupMilestoneOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_milestones/#create-new-milestone func (s *GroupMilestonesService) CreateGroupMilestone(gid any, opt *CreateGroupMilestoneOptions, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/milestones", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(GroupMilestone) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil + return do[*GroupMilestone](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/milestones", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateGroupMilestoneOptions represents the available UpdateGroupMilestone() options. @@ -192,74 +152,46 @@ type UpdateGroupMilestoneOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/group_milestones/#edit-milestone -func (s *GroupMilestonesService) UpdateGroupMilestone(gid any, milestone int, opt *UpdateGroupMilestoneOptions, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/milestones/%d", PathEscape(group), milestone) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(GroupMilestone) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *GroupMilestonesService) UpdateGroupMilestone(gid any, milestone int64, opt *UpdateGroupMilestoneOptions, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) { + return do[*GroupMilestone](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/milestones/%d", GroupID{gid}, milestone), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteGroupMilestone deletes a specified group milestone. // // GitLab API docs: // https://docs.gitlab.com/api/group_milestones/#delete-group-milestone -func (s *GroupMilestonesService) DeleteGroupMilestone(pid any, milestone int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/milestones/%d", PathEscape(project), milestone) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) +func (s *GroupMilestonesService) DeleteGroupMilestone(pid any, milestone int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/milestones/%d", GroupID{pid}, milestone), + withRequestOpts(options...), + ) + return resp, err } // GetGroupMilestoneIssuesOptions represents the available GetGroupMilestoneIssues() options. // // GitLab API docs: // https://docs.gitlab.com/api/group_milestones/#get-all-issues-assigned-to-a-single-milestone -type GetGroupMilestoneIssuesOptions ListOptions +type GetGroupMilestoneIssuesOptions struct { + ListOptions +} // GetGroupMilestoneIssues gets all issues assigned to a single group milestone. // // GitLab API docs: // https://docs.gitlab.com/api/group_milestones/#get-all-issues-assigned-to-a-single-milestone -func (s *GroupMilestonesService) GetGroupMilestoneIssues(gid any, milestone int, opt *GetGroupMilestoneIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/milestones/%d/issues", PathEscape(group), milestone) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var i []*Issue - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil +func (s *GroupMilestonesService) GetGroupMilestoneIssues(gid any, milestone int64, opt *GetGroupMilestoneIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + return do[[]*Issue](s.client, + withPath("groups/%s/milestones/%d/issues", GroupID{gid}, milestone), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGroupMilestoneMergeRequestsOptions represents the available @@ -267,32 +199,21 @@ func (s *GroupMilestonesService) GetGroupMilestoneIssues(gid any, milestone int, // // GitLab API docs: // https://docs.gitlab.com/api/group_milestones/#get-all-merge-requests-assigned-to-a-single-milestone -type GetGroupMilestoneMergeRequestsOptions ListOptions +type GetGroupMilestoneMergeRequestsOptions struct { + ListOptions +} // GetGroupMilestoneMergeRequests gets all merge requests assigned to a // single group milestone. // // GitLab API docs: // https://docs.gitlab.com/api/group_milestones/#get-all-merge-requests-assigned-to-a-single-milestone -func (s *GroupMilestonesService) GetGroupMilestoneMergeRequests(gid any, milestone int, opt *GetGroupMilestoneMergeRequestsOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/milestones/%d/merge_requests", PathEscape(group), milestone) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var mr []*BasicMergeRequest - resp, err := s.client.Do(req, &mr) - if err != nil { - return nil, resp, err - } - - return mr, resp, nil +func (s *GroupMilestonesService) GetGroupMilestoneMergeRequests(gid any, milestone int64, opt *GetGroupMilestoneMergeRequestsOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) { + return do[[]*BasicMergeRequest](s.client, + withPath("groups/%s/milestones/%d/merge_requests", GroupID{gid}, milestone), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // BurndownChartEvent represents a burnout chart event. @@ -301,7 +222,7 @@ func (s *GroupMilestonesService) GetGroupMilestoneMergeRequests(gid any, milesto // https://docs.gitlab.com/api/group_milestones/#get-all-burndown-chart-events-for-a-single-milestone type BurndownChartEvent struct { CreatedAt *time.Time `json:"created_at"` - Weight *int `json:"weight"` + Weight *int64 `json:"weight"` Action *string `json:"action"` } @@ -310,30 +231,19 @@ type BurndownChartEvent struct { // // GitLab API docs: // https://docs.gitlab.com/api/group_milestones/#get-all-burndown-chart-events-for-a-single-milestone -type GetGroupMilestoneBurndownChartEventsOptions ListOptions +type GetGroupMilestoneBurndownChartEventsOptions struct { + ListOptions +} // GetGroupMilestoneBurndownChartEvents gets all merge requests assigned to a // single group milestone. // // GitLab API docs: // https://docs.gitlab.com/api/group_milestones/#get-all-burndown-chart-events-for-a-single-milestone -func (s *GroupMilestonesService) GetGroupMilestoneBurndownChartEvents(gid any, milestone int, opt *GetGroupMilestoneBurndownChartEventsOptions, options ...RequestOptionFunc) ([]*BurndownChartEvent, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/milestones/%d/burndown_events", PathEscape(group), milestone) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var be []*BurndownChartEvent - resp, err := s.client.Do(req, &be) - if err != nil { - return nil, resp, err - } - - return be, resp, nil +func (s *GroupMilestonesService) GetGroupMilestoneBurndownChartEvents(gid any, milestone int64, opt *GetGroupMilestoneBurndownChartEventsOptions, options ...RequestOptionFunc) ([]*BurndownChartEvent, *Response, error) { + return do[[]*BurndownChartEvent](s.client, + withPath("groups/%s/milestones/%d/burndown_events", GroupID{gid}, milestone), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_protected_branches.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_protected_branches.go new file mode 100644 index 0000000000..f43c6ed66a --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_protected_branches.go @@ -0,0 +1,177 @@ +package gitlab + +import ( + "net/http" + "net/url" +) + +type ( + GroupProtectedBranchesServiceInterface interface { + // ListProtectedBranches returns a list of protected branches from a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_protected_branches/#list-protected-branches + ListProtectedBranches(gid any, opt *ListGroupProtectedBranchesOptions, options ...RequestOptionFunc) ([]*GroupProtectedBranch, *Response, error) + + // GetProtectedBranch returns a single group-level protected branch. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_protected_branches/#get-a-single-protected-branch-or-wildcard-protected-branch + GetProtectedBranch(gid any, branch string, options ...RequestOptionFunc) (*GroupProtectedBranch, *Response, error) + + // ProtectRepositoryBranches protects a single group-level branch. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_protected_branches/#protect-repository-branches + ProtectRepositoryBranches(gid any, opt *ProtectGroupRepositoryBranchesOptions, options ...RequestOptionFunc) (*GroupProtectedBranch, *Response, error) + + // UpdateProtectedBranch updates a single group-level protected branch. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_protected_branches/#update-a-protected-branch + UpdateProtectedBranch(gid any, branch string, opt *UpdateGroupProtectedBranchOptions, options ...RequestOptionFunc) (*GroupProtectedBranch, *Response, error) + + // UnprotectRepositoryBranches unprotects the given protected group-level branch. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_protected_branches/#unprotect-repository-branches + UnprotectRepositoryBranches(gid any, branch string, options ...RequestOptionFunc) (*Response, error) + } + + // GroupProtectedBranchesService handles communication with the group-level + // protected branch methods of the GitLab API. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_protected_branches/ + GroupProtectedBranchesService struct { + client *Client + } +) + +var _ GroupProtectedBranchesServiceInterface = (*GroupProtectedBranchesService)(nil) + +// GroupProtectedBranch represents a group protected branch. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_protected_branches/#list-protected-branches +type GroupProtectedBranch struct { + ID int64 `json:"id"` + Name string `json:"name"` + PushAccessLevels []*GroupBranchAccessDescription `json:"push_access_levels"` + MergeAccessLevels []*GroupBranchAccessDescription `json:"merge_access_levels"` + UnprotectAccessLevels []*GroupBranchAccessDescription `json:"unprotect_access_levels"` + AllowForcePush bool `json:"allow_force_push"` + CodeOwnerApprovalRequired bool `json:"code_owner_approval_required"` +} + +// GroupBranchAccessDescription represents the access description for a group protected +// branch. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_protected_branches/#list-protected-branches +type GroupBranchAccessDescription struct { + ID int64 `json:"id"` + AccessLevel AccessLevelValue `json:"access_level"` + AccessLevelDescription string `json:"access_level_description"` + DeployKeyID int64 `json:"deploy_key_id"` + UserID int64 `json:"user_id"` + GroupID int64 `json:"group_id"` +} + +// ListGroupProtectedBranchesOptions represents the available ListProtectedBranches() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_protected_branches/#list-protected-branches +type ListGroupProtectedBranchesOptions struct { + ListOptions + Search *string `url:"search,omitempty" json:"search,omitempty"` +} + +func (s *GroupProtectedBranchesService) ListProtectedBranches(gid any, opt *ListGroupProtectedBranchesOptions, options ...RequestOptionFunc) ([]*GroupProtectedBranch, *Response, error) { + return do[[]*GroupProtectedBranch](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/protected_branches", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +func (s *GroupProtectedBranchesService) GetProtectedBranch(gid any, branch string, options ...RequestOptionFunc) (*GroupProtectedBranch, *Response, error) { + return do[*GroupProtectedBranch](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/protected_branches/%s", GroupID{gid}, url.PathEscape(branch)), + withRequestOpts(options...), + ) +} + +// ProtectGroupRepositoryBranchesOptions represents the available +// ProtectRepositoryBranches() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_protected_branches/#protect-repository-branches +type ProtectGroupRepositoryBranchesOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + PushAccessLevel *AccessLevelValue `url:"push_access_level,omitempty" json:"push_access_level,omitempty"` + MergeAccessLevel *AccessLevelValue `url:"merge_access_level,omitempty" json:"merge_access_level,omitempty"` + UnprotectAccessLevel *AccessLevelValue `url:"unprotect_access_level,omitempty" json:"unprotect_access_level,omitempty"` + AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` + AllowedToPush *[]*GroupBranchPermissionOptions `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` + AllowedToMerge *[]*GroupBranchPermissionOptions `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` + AllowedToUnprotect *[]*GroupBranchPermissionOptions `url:"allowed_to_unprotect,omitempty" json:"allowed_to_unprotect,omitempty"` + CodeOwnerApprovalRequired *bool `url:"code_owner_approval_required,omitempty" json:"code_owner_approval_required,omitempty"` +} + +// GroupBranchPermissionOptions represents a branch permission option. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_protected_branches/#protect-repository-branches +type GroupBranchPermissionOptions struct { + ID *int64 `url:"id,omitempty" json:"id,omitempty"` + UserID *int64 `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` + DeployKeyID *int64 `url:"deploy_key_id,omitempty" json:"deploy_key_id,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` +} + +func (s *GroupProtectedBranchesService) ProtectRepositoryBranches(gid any, opt *ProtectGroupRepositoryBranchesOptions, options ...RequestOptionFunc) (*GroupProtectedBranch, *Response, error) { + return do[*GroupProtectedBranch](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/protected_branches", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +// UpdateGroupProtectedBranchOptions represents the available +// UpdateProtectedBranch() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_protected_branches/#update-a-protected-branch +type UpdateGroupProtectedBranchOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` + CodeOwnerApprovalRequired *bool `url:"code_owner_approval_required,omitempty" json:"code_owner_approval_required,omitempty"` + AllowedToPush *[]*GroupBranchPermissionOptions `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` + AllowedToMerge *[]*GroupBranchPermissionOptions `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` + AllowedToUnprotect *[]*GroupBranchPermissionOptions `url:"allowed_to_unprotect,omitempty" json:"allowed_to_unprotect,omitempty"` +} + +func (s *GroupProtectedBranchesService) UpdateProtectedBranch(gid any, branch string, opt *UpdateGroupProtectedBranchOptions, options ...RequestOptionFunc) (*GroupProtectedBranch, *Response, error) { + return do[*GroupProtectedBranch](s.client, + withMethod(http.MethodPatch), + withPath("groups/%s/protected_branches/%s", GroupID{gid}, url.PathEscape(branch)), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +func (s *GroupProtectedBranchesService) UnprotectRepositoryBranches(gid any, branch string, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/protected_branches/%s", GroupID{gid}, url.PathEscape(branch)), + withRequestOpts(options...), + ) + return resp, err +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_protected_environments.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_protected_environments.go index 2112011640..681d9c9bce 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_protected_environments.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_protected_environments.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" ) @@ -49,7 +48,7 @@ var _ GroupProtectedEnvironmentsServiceInterface = (*GroupProtectedEnvironmentsS type GroupProtectedEnvironment struct { Name string `json:"name"` DeployAccessLevels []*GroupEnvironmentAccessDescription `json:"deploy_access_levels"` - RequiredApprovalCount int `json:"required_approval_count"` + RequiredApprovalCount int64 `json:"required_approval_count"` ApprovalRules []*GroupEnvironmentApprovalRule `json:"approval_rules"` } @@ -59,12 +58,12 @@ type GroupProtectedEnvironment struct { // GitLab API docs: // https://docs.gitlab.com/api/group_protected_environments/ type GroupEnvironmentAccessDescription struct { - ID int `json:"id"` + ID int64 `json:"id"` AccessLevel AccessLevelValue `json:"access_level"` AccessLevelDescription string `json:"access_level_description"` - UserID int `json:"user_id"` - GroupID int `json:"group_id"` - GroupInheritanceType int `json:"group_inheritance_type"` + UserID int64 `json:"user_id"` + GroupID int64 `json:"group_id"` + GroupInheritanceType int64 `json:"group_inheritance_type"` } // GroupEnvironmentApprovalRule represents the approval rules for a group-level @@ -73,13 +72,13 @@ type GroupEnvironmentAccessDescription struct { // GitLab API docs: // https://docs.gitlab.com/api/group_protected_environments/#protect-a-single-environment type GroupEnvironmentApprovalRule struct { - ID int `json:"id"` - UserID int `json:"user_id"` - GroupID int `json:"group_id"` + ID int64 `json:"id"` + UserID int64 `json:"user_id"` + GroupID int64 `json:"group_id"` AccessLevel AccessLevelValue `json:"access_level"` AccessLevelDescription string `json:"access_level_description"` - RequiredApprovalCount int `json:"required_approvals"` - GroupInheritanceType int `json:"group_inheritance_type"` + RequiredApprovalCount int64 `json:"required_approvals"` + GroupInheritanceType int64 `json:"group_inheritance_type"` } // ListGroupProtectedEnvironmentsOptions represents the available @@ -87,7 +86,9 @@ type GroupEnvironmentApprovalRule struct { // // GitLab API docs: // https://docs.gitlab.com/api/group_protected_environments/#list-group-level-protected-environments -type ListGroupProtectedEnvironmentsOptions ListOptions +type ListGroupProtectedEnvironmentsOptions struct { + ListOptions +} // ListGroupProtectedEnvironments returns a list of protected environments from // a group. @@ -95,24 +96,11 @@ type ListGroupProtectedEnvironmentsOptions ListOptions // GitLab API docs: // https://docs.gitlab.com/api/group_protected_environments/#list-group-level-protected-environments func (s *GroupProtectedEnvironmentsService) ListGroupProtectedEnvironments(gid any, opt *ListGroupProtectedEnvironmentsOptions, options ...RequestOptionFunc) ([]*GroupProtectedEnvironment, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/protected_environments", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pes []*GroupProtectedEnvironment - resp, err := s.client.Do(req, &pes) - if err != nil { - return nil, resp, err - } - - return pes, resp, nil + return do[[]*GroupProtectedEnvironment](s.client, + withPath("groups/%s/protected_environments", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGroupProtectedEnvironment returns a single group-level protected @@ -121,24 +109,10 @@ func (s *GroupProtectedEnvironmentsService) ListGroupProtectedEnvironments(gid a // GitLab API docs: // https://docs.gitlab.com/api/group_protected_environments/#get-a-single-protected-environment func (s *GroupProtectedEnvironmentsService) GetGroupProtectedEnvironment(gid any, environment string, options ...RequestOptionFunc) (*GroupProtectedEnvironment, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/protected_environments/%s", PathEscape(group), environment) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pe := new(GroupProtectedEnvironment) - resp, err := s.client.Do(req, pe) - if err != nil { - return nil, resp, err - } - - return pe, resp, nil + return do[*GroupProtectedEnvironment](s.client, + withPath("groups/%s/protected_environments/%s", GroupID{gid}, environment), + withRequestOpts(options...), + ) } // ProtectGroupEnvironmentOptions represents the available @@ -149,7 +123,7 @@ func (s *GroupProtectedEnvironmentsService) GetGroupProtectedEnvironment(gid any type ProtectGroupEnvironmentOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` DeployAccessLevels *[]*GroupEnvironmentAccessOptions `url:"deploy_access_levels,omitempty" json:"deploy_access_levels,omitempty"` - RequiredApprovalCount *int `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` + RequiredApprovalCount *int64 `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` ApprovalRules *[]*GroupEnvironmentApprovalRuleOptions `url:"approval_rules,omitempty" json:"approval_rules,omitempty"` } @@ -160,9 +134,9 @@ type ProtectGroupEnvironmentOptions struct { // https://docs.gitlab.com/api/group_protected_environments/#protect-a-single-environment type GroupEnvironmentAccessOptions struct { AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` + UserID *int64 `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupInheritanceType *int64 `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` } // GroupEnvironmentApprovalRuleOptions represents the approval rules for a @@ -171,12 +145,12 @@ type GroupEnvironmentAccessOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_protected_environments/#protect-a-single-environment type GroupEnvironmentApprovalRuleOptions struct { - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + UserID *int64 `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` AccessLevelDescription *string `url:"access_level_description,omitempty" json:"access_level_description,omitempty"` - RequiredApprovalCount *int `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` + RequiredApprovalCount *int64 `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` + GroupInheritanceType *int64 `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` } // ProtectGroupEnvironment protects a single group-level environment. @@ -184,24 +158,12 @@ type GroupEnvironmentApprovalRuleOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_protected_environments/#protect-a-single-environment func (s *GroupProtectedEnvironmentsService) ProtectGroupEnvironment(gid any, opt *ProtectGroupEnvironmentOptions, options ...RequestOptionFunc) (*GroupProtectedEnvironment, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/protected_environments", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pe := new(GroupProtectedEnvironment) - resp, err := s.client.Do(req, pe) - if err != nil { - return nil, resp, err - } - - return pe, resp, nil + return do[*GroupProtectedEnvironment](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/protected_environments", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateGroupProtectedEnvironmentOptions represents the available @@ -212,7 +174,7 @@ func (s *GroupProtectedEnvironmentsService) ProtectGroupEnvironment(gid any, opt type UpdateGroupProtectedEnvironmentOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` DeployAccessLevels *[]*UpdateGroupEnvironmentAccessOptions `url:"deploy_access_levels,omitempty" json:"deploy_access_levels,omitempty"` - RequiredApprovalCount *int `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` + RequiredApprovalCount *int64 `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` ApprovalRules *[]*UpdateGroupEnvironmentApprovalRuleOptions `url:"approval_rules,omitempty" json:"approval_rules,omitempty"` } @@ -223,10 +185,10 @@ type UpdateGroupProtectedEnvironmentOptions struct { // https://docs.gitlab.com/api/group_protected_environments/#update-a-protected-environment type UpdateGroupEnvironmentAccessOptions struct { AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ID *int `url:"id,omitempty" json:"id,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` + ID *int64 `url:"id,omitempty" json:"id,omitempty"` + UserID *int64 `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupInheritanceType *int64 `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` } @@ -236,13 +198,13 @@ type UpdateGroupEnvironmentAccessOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_protected_environments/#update-a-protected-environment type UpdateGroupEnvironmentApprovalRuleOptions struct { - ID *int `url:"id,omitempty" json:"id,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + ID *int64 `url:"id,omitempty" json:"id,omitempty"` + UserID *int64 `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` AccessLevelDescription *string `url:"access_level_description,omitempty" json:"access_level_description,omitempty"` - RequiredApprovalCount *int `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` + RequiredApprovalCount *int64 `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` + GroupInheritanceType *int64 `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` } @@ -252,24 +214,12 @@ type UpdateGroupEnvironmentApprovalRuleOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_protected_environments/#update-a-protected-environment func (s *GroupProtectedEnvironmentsService) UpdateGroupProtectedEnvironment(gid any, environment string, opt *UpdateGroupProtectedEnvironmentOptions, options ...RequestOptionFunc) (*GroupProtectedEnvironment, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/protected_environments/%s", PathEscape(group), environment) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pe := new(GroupProtectedEnvironment) - resp, err := s.client.Do(req, pe) - if err != nil { - return nil, resp, err - } - - return pe, resp, nil + return do[*GroupProtectedEnvironment](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/protected_environments/%s", GroupID{gid}, environment), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UnprotectGroupEnvironment unprotects the given protected group-level @@ -278,16 +228,10 @@ func (s *GroupProtectedEnvironmentsService) UpdateGroupProtectedEnvironment(gid // GitLab API docs: // https://docs.gitlab.com/api/group_protected_environments/#unprotect-a-single-environment func (s *GroupProtectedEnvironmentsService) UnprotectGroupEnvironment(gid any, environment string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/protected_environments/%s", PathEscape(group), environment) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/protected_environments/%s", GroupID{gid}, environment), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_relations_export.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_relations_export.go new file mode 100644 index 0000000000..4574f90e40 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_relations_export.go @@ -0,0 +1,110 @@ +package gitlab + +import ( + "bytes" + "net/http" + "time" +) + +// GroupRelationsScheduleExportOptions represents the available ScheduleExport() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_relations_export/#schedule-new-export +type GroupRelationsScheduleExportOptions struct { + Batched *bool `url:"batched,omitempty" json:"batched,omitempty"` +} + +// ListGroupRelationsStatusOptions represents the available ListExportStatus() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_relations_export/#export-status +type ListGroupRelationsStatusOptions struct { + ListOptions + + Relation *string `url:"relation,omitempty" json:"relation,omitempty"` +} + +// GroupRelationsDownloadOptions represents the available ExportDownload() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_relations_export/#download-exported-relations +type GroupRelationsDownloadOptions struct { + Relation *string `url:"relation,omitempty" json:"relation,omitempty"` + Batched *bool `url:"batched,omitempty" json:"batched,omitempty"` + BatchNumber *int64 `url:"batch_number,omitempty" json:"batch_number,omitempty"` +} + +type GroupRelationStatus struct { + Relation string `json:"relation"` + Status int64 `json:"status"` + Error string `json:"error"` + UpdatedAt time.Time `json:"updated_at"` + Batched bool `json:"batched"` + BatchesCount int64 `json:"batches_count"` + Batches []Batch `json:"batches,omitempty"` +} + +type Batch struct { + Status int64 `json:"status"` + BatchNumber int64 `json:"batch_number"` + ObjectsCount int64 `json:"objects_count"` + Error string `json:"error"` + UpdatedAt time.Time `json:"updated_at"` +} + +type ( + GroupRelationsExportServiceInterface interface { + // ScheduleExport schedules a new export of group relations. + // + // GitLab API docs: https://docs.gitlab.com/api/group_relations_export/#schedule-new-export + ScheduleExport(gid any, opt *GroupRelationsScheduleExportOptions, options ...RequestOptionFunc) (*Response, error) + // ListExportStatus gets the status of group relations export. + // + // GitLab API docs: https://docs.gitlab.com/api/group_relations_export/#export-status + ListExportStatus(gid any, opt *ListGroupRelationsStatusOptions, options ...RequestOptionFunc) ([]*GroupRelationStatus, *Response, error) + // ExportDownload downloads the exported group relations. + // + // GitLab API docs: https://docs.gitlab.com/api/group_relations_export/#download-exported-relations + ExportDownload(gid any, opt *GroupRelationsDownloadOptions, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) + } + + // GroupRelationsExportService handles communication with the group relations export related methods + // of the GitLab API. + // + // GitLab API docs: https://docs.gitlab.com/api/group_relations_export + GroupRelationsExportService struct { + client *Client + } +) + +var _ GroupRelationsExportServiceInterface = (*GroupRelationsExportService)(nil) + +func (s *GroupRelationsExportService) ScheduleExport(gid any, opt *GroupRelationsScheduleExportOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/export_relations", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err +} + +func (s *GroupRelationsExportService) ListExportStatus(gid any, opt *ListGroupRelationsStatusOptions, options ...RequestOptionFunc) ([]*GroupRelationStatus, *Response, error) { + return do[[]*GroupRelationStatus](s.client, + withPath("groups/%s/export_relations/status", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +func (s *GroupRelationsExportService) ExportDownload(gid any, opt *GroupRelationsDownloadOptions, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { + buf, resp, err := do[bytes.Buffer](s.client, + withPath("groups/%s/export_relations/download", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) + if err != nil { + return nil, resp, err + } + return bytes.NewReader(buf.Bytes()), resp, nil +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_releases.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_releases.go index da52ce9e22..ce0126788b 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_releases.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_releases.go @@ -13,11 +13,6 @@ package gitlab -import ( - "fmt" - "net/http" -) - type ( GroupReleasesServiceInterface interface { ListGroupReleases(gid any, opts *ListGroupReleasesOptions, options ...RequestOptionFunc) ([]*Release, *Response, error) @@ -49,21 +44,9 @@ type ListGroupReleasesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_releases.html#list-group-releases func (s *GroupReleasesService) ListGroupReleases(gid any, opts *ListGroupReleasesOptions, options ...RequestOptionFunc) ([]*Release, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/releases", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var releases []*Release - resp, err := s.client.Do(req, &releases) - if err != nil { - return nil, resp, err - } - return releases, resp, nil + return do[[]*Release](s.client, + withPath("groups/%s/releases", GroupID{gid}), + withAPIOpts(opts), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_repository_storage_move.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_repository_storage_move.go index 01996cd091..fc5bb52adb 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_repository_storage_move.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_repository_storage_move.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -25,10 +24,10 @@ import ( type ( GroupRepositoryStorageMoveServiceInterface interface { RetrieveAllStorageMoves(opts RetrieveAllGroupStorageMovesOptions, options ...RequestOptionFunc) ([]*GroupRepositoryStorageMove, *Response, error) - RetrieveAllStorageMovesForGroup(group int, opts RetrieveAllGroupStorageMovesOptions, options ...RequestOptionFunc) ([]*GroupRepositoryStorageMove, *Response, error) - GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) - GetStorageMoveForGroup(group int, repositoryStorage int, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) - ScheduleStorageMoveForGroup(group int, opts ScheduleStorageMoveForGroupOptions, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) + RetrieveAllStorageMovesForGroup(group int64, opts RetrieveAllGroupStorageMovesOptions, options ...RequestOptionFunc) ([]*GroupRepositoryStorageMove, *Response, error) + GetStorageMove(repositoryStorage int64, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) + GetStorageMoveForGroup(group int64, repositoryStorage int64, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) + ScheduleStorageMoveForGroup(group int64, opts ScheduleStorageMoveForGroupOptions, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) ScheduleAllStorageMoves(opts ScheduleAllGroupStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) } @@ -47,7 +46,7 @@ type ( // GitLab API docs: // https://docs.gitlab.com/api/group_repository_storage_moves/ type GroupRepositoryStorageMove struct { - ID int `json:"id"` + ID int64 `json:"id"` CreatedAt *time.Time `json:"created_at"` State string `json:"state"` SourceStorageName string `json:"source_storage_name"` @@ -56,7 +55,7 @@ type GroupRepositoryStorageMove struct { } type RepositoryGroup struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` WebURL string `json:"web_url"` } @@ -66,7 +65,9 @@ type RepositoryGroup struct { // // GitLab API docs: // https://docs.gitlab.com/api/group_repository_storage_moves/#retrieve-all-group-repository-storage-moves -type RetrieveAllGroupStorageMovesOptions ListOptions +type RetrieveAllGroupStorageMovesOptions struct { + ListOptions +} // RetrieveAllStorageMoves retrieves all group repository storage moves // accessible by the authenticated user. @@ -74,18 +75,11 @@ type RetrieveAllGroupStorageMovesOptions ListOptions // GitLab API docs: // https://docs.gitlab.com/api/group_repository_storage_moves/#retrieve-all-group-repository-storage-moves func (g GroupRepositoryStorageMoveService) RetrieveAllStorageMoves(opts RetrieveAllGroupStorageMovesOptions, options ...RequestOptionFunc) ([]*GroupRepositoryStorageMove, *Response, error) { - req, err := g.client.NewRequest(http.MethodGet, "group_repository_storage_moves", opts, options) - if err != nil { - return nil, nil, err - } - - var gsms []*GroupRepositoryStorageMove - resp, err := g.client.Do(req, &gsms) - if err != nil { - return nil, resp, err - } - - return gsms, resp, err + return do[[]*GroupRepositoryStorageMove](g.client, + withPath("group_repository_storage_moves"), + withAPIOpts(opts), + withRequestOpts(options...), + ) } // RetrieveAllStorageMovesForGroup retrieves all repository storage moves for @@ -93,63 +87,34 @@ func (g GroupRepositoryStorageMoveService) RetrieveAllStorageMoves(opts Retrieve // // GitLab API docs: // https://docs.gitlab.com/api/group_repository_storage_moves/#retrieve-all-repository-storage-moves-for-a-single-group -func (g GroupRepositoryStorageMoveService) RetrieveAllStorageMovesForGroup(group int, opts RetrieveAllGroupStorageMovesOptions, options ...RequestOptionFunc) ([]*GroupRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("groups/%d/repository_storage_moves", group) - - req, err := g.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var gsms []*GroupRepositoryStorageMove - resp, err := g.client.Do(req, &gsms) - if err != nil { - return nil, resp, err - } - - return gsms, resp, err +func (g GroupRepositoryStorageMoveService) RetrieveAllStorageMovesForGroup(group int64, opts RetrieveAllGroupStorageMovesOptions, options ...RequestOptionFunc) ([]*GroupRepositoryStorageMove, *Response, error) { + return do[[]*GroupRepositoryStorageMove](g.client, + withPath("groups/%d/repository_storage_moves", group), + withAPIOpts(opts), + withRequestOpts(options...), + ) } // GetStorageMove gets a single group repository storage move. // // GitLab API docs: // https://docs.gitlab.com/api/group_repository_storage_moves/#get-a-single-group-repository-storage-move -func (g GroupRepositoryStorageMoveService) GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("group_repository_storage_moves/%d", repositoryStorage) - - req, err := g.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gsm := new(GroupRepositoryStorageMove) - resp, err := g.client.Do(req, gsm) - if err != nil { - return nil, resp, err - } - - return gsm, resp, err +func (g GroupRepositoryStorageMoveService) GetStorageMove(repositoryStorage int64, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) { + return do[*GroupRepositoryStorageMove](g.client, + withPath("group_repository_storage_moves/%d", repositoryStorage), + withRequestOpts(options...), + ) } // GetStorageMoveForGroup gets a single repository storage move for a group. // // GitLab API docs: // https://docs.gitlab.com/api/group_repository_storage_moves/#get-a-single-repository-storage-move-for-a-group -func (g GroupRepositoryStorageMoveService) GetStorageMoveForGroup(group int, repositoryStorage int, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("groups/%d/repository_storage_moves/%d", group, repositoryStorage) - - req, err := g.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gsm := new(GroupRepositoryStorageMove) - resp, err := g.client.Do(req, gsm) - if err != nil { - return nil, resp, err - } - - return gsm, resp, err +func (g GroupRepositoryStorageMoveService) GetStorageMoveForGroup(group int64, repositoryStorage int64, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) { + return do[*GroupRepositoryStorageMove](g.client, + withPath("groups/%d/repository_storage_moves/%d", group, repositoryStorage), + withRequestOpts(options...), + ) } // ScheduleStorageMoveForGroupOptions represents the available @@ -165,21 +130,13 @@ type ScheduleStorageMoveForGroupOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/group_repository_storage_moves/#schedule-a-repository-storage-move-for-a-group -func (g GroupRepositoryStorageMoveService) ScheduleStorageMoveForGroup(group int, opts ScheduleStorageMoveForGroupOptions, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("groups/%d/repository_storage_moves", group) - - req, err := g.client.NewRequest(http.MethodPost, u, opts, options) - if err != nil { - return nil, nil, err - } - - gsm := new(GroupRepositoryStorageMove) - resp, err := g.client.Do(req, gsm) - if err != nil { - return nil, resp, err - } - - return gsm, resp, err +func (g GroupRepositoryStorageMoveService) ScheduleStorageMoveForGroup(group int64, opts ScheduleStorageMoveForGroupOptions, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) { + return do[*GroupRepositoryStorageMove](g.client, + withMethod(http.MethodPost), + withPath("groups/%d/repository_storage_moves", group), + withAPIOpts(opts), + withRequestOpts(options...), + ) } // ScheduleAllGroupStorageMovesOptions represents the available @@ -197,10 +154,11 @@ type ScheduleAllGroupStorageMovesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_repository_storage_moves/#schedule-repository-storage-moves-for-all-groups-on-a-storage-shard func (g GroupRepositoryStorageMoveService) ScheduleAllStorageMoves(opts ScheduleAllGroupStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) { - req, err := g.client.NewRequest(http.MethodPost, "group_repository_storage_moves", opts, options) - if err != nil { - return nil, err - } - - return g.client.Do(req, nil) + _, resp, err := do[none](g.client, + withMethod(http.MethodPost), + withPath("group_repository_storage_moves"), + withAPIOpts(opts), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_scim.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_scim.go index e99a266e8d..936290e523 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_scim.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_scim.go @@ -14,7 +14,6 @@ package gitlab import ( - "fmt" "net/http" ) @@ -51,23 +50,10 @@ type GroupSCIMIdentity struct { // GitLab API docs: // https://docs.gitlab.com/api/scim/#get-scim-identities-for-a-group func (s *GroupSCIMService) GetSCIMIdentitiesForGroup(gid any, options ...RequestOptionFunc) ([]*GroupSCIMIdentity, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/scim/identities", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var identities []*GroupSCIMIdentity - resp, err := s.client.Do(req, &identities) - if err != nil { - return nil, resp, err - } - return identities, resp, nil + return do[[]*GroupSCIMIdentity](s.client, + withPath("groups/%s/scim/identities", GroupID{gid}), + withRequestOpts(options...), + ) } // GetSCIMIdentity gets a SCIM identity for a group. @@ -75,23 +61,10 @@ func (s *GroupSCIMService) GetSCIMIdentitiesForGroup(gid any, options ...Request // GitLab API docs: // https://docs.gitlab.com/api/scim/#get-a-single-scim-identity func (s *GroupSCIMService) GetSCIMIdentity(gid any, uid string, options ...RequestOptionFunc) (*GroupSCIMIdentity, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/scim/%s", PathEscape(group), uid) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - identity := new(GroupSCIMIdentity) - resp, err := s.client.Do(req, identity) - if err != nil { - return nil, resp, err - } - return identity, resp, nil + return do[*GroupSCIMIdentity](s.client, + withPath("groups/%s/scim/%s", GroupID{gid}, uid), + withRequestOpts(options...), + ) } // UpdateSCIMIdentityOptions represent the request options for @@ -108,18 +81,13 @@ type UpdateSCIMIdentityOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/scim/#update-extern_uid-field-for-a-scim-identity func (s *GroupSCIMService) UpdateSCIMIdentity(gid any, uid string, opt *UpdateSCIMIdentityOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/scim/%s", PathEscape(group), uid) - - req, err := s.client.NewRequest(http.MethodPatch, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPatch), + withPath("groups/%s/scim/%s", GroupID{gid}, uid), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // DeleteSCIMIdentity deletes a SCIM identity. @@ -127,16 +95,10 @@ func (s *GroupSCIMService) UpdateSCIMIdentity(gid any, uid string, opt *UpdateSC // GitLab API docs: // https://docs.gitlab.com/api/scim/#delete-a-single-scim-identity func (s *GroupSCIMService) DeleteSCIMIdentity(gid any, uid string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/scim/%s", PathEscape(group), uid) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/scim/%s", GroupID{gid}, uid), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_security_settings.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_security_settings.go index cab9c6a1f0..445e757f76 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_security_settings.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_security_settings.go @@ -16,7 +16,6 @@ package gitlab import ( - "fmt" "net/http" ) @@ -28,7 +27,7 @@ type ( // GroupSecuritySettingsService handles communication with the Group Security Settings // related methods of the GitLab API. // - // Gitlab API docs: + // GitLab API docs: // https://docs.gitlab.com/api/group_security_settings/ GroupSecuritySettingsService struct { client *Client @@ -39,7 +38,7 @@ var _ GroupSecuritySettingsServiceInterface = (*GroupSecuritySettingsService)(ni // GroupSecuritySettings represents the group security settings data. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/group_security_settings/ type GroupSecuritySettings struct { SecretPushProtectionEnabled bool `json:"secret_push_protection_enabled"` @@ -60,8 +59,8 @@ func (s GroupSecuritySettings) String() string { // GitLab API docs: // https://docs.gitlab.com/api/group_security_settings/#update-secret_push_protection_enabled-setting type UpdateGroupSecuritySettingsOptions struct { - SecretPushProtectionEnabled *bool `url:"secret_push_protection_enabled,omitempty" json:"secret_push_protection_enabled,omitempty"` - ProjectsToExclude *[]int `url:"projects_to_exclude,omitempty" json:"projects_to_exclude,omitempty"` + SecretPushProtectionEnabled *bool `url:"secret_push_protection_enabled,omitempty" json:"secret_push_protection_enabled,omitempty"` + ProjectsToExclude *[]int64 `url:"projects_to_exclude,omitempty" json:"projects_to_exclude,omitempty"` } // UpdateSecretPushProtectionEnabledSetting updates the secret_push_protection_enabled @@ -70,21 +69,10 @@ type UpdateGroupSecuritySettingsOptions struct { // GitLab API Docs: // https://docs.gitlab.com/api/group_security_settings/#update-secret_push_protection_enabled-setting func (s *GroupSecuritySettingsService) UpdateSecretPushProtectionEnabledSetting(gid any, opt UpdateGroupSecuritySettingsOptions, options ...RequestOptionFunc) (*GroupSecuritySettings, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/security_settings", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - settings := new(GroupSecuritySettings) - resp, err := s.client.Do(req, &settings) - if err != nil { - return nil, resp, err - } - - return settings, resp, err + return do[*GroupSecuritySettings](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/security_settings", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_serviceaccounts.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_serviceaccounts.go index 8021b56de6..f8cd5bf37e 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_serviceaccounts.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_serviceaccounts.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -25,9 +24,9 @@ import ( // GroupServiceAccount represents a GitLab service account user. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#create-a-service-account-user +// https://docs.gitlab.com/api/service_accounts/#create-a-group-service-account type GroupServiceAccount struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` UserName string `json:"username"` Email string `json:"email"` @@ -36,7 +35,7 @@ type GroupServiceAccount struct { // ListServiceAccountsOptions represents the available ListServiceAccounts() options. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#list-all-service-account-users +// https://docs.gitlab.com/api/service_accounts/#list-all-group-service-accounts type ListServiceAccountsOptions struct { ListOptions OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` @@ -46,32 +45,19 @@ type ListServiceAccountsOptions struct { // ListServiceAccounts gets a list of service accounts. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#list-all-service-account-users +// https://docs.gitlab.com/api/service_accounts/#list-all-group-service-accounts func (s *GroupsService) ListServiceAccounts(gid any, opt *ListServiceAccountsOptions, options ...RequestOptionFunc) ([]*GroupServiceAccount, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var sa []*GroupServiceAccount - resp, err := s.client.Do(req, &sa) - if err != nil { - return nil, resp, err - } - - return sa, resp, nil + return do[[]*GroupServiceAccount](s.client, + withPath("groups/%s/service_accounts", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateServiceAccountOptions represents the available CreateServiceAccount() options. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#create-a-service-account-user +// https://docs.gitlab.com/api/service_accounts/#create-a-group-service-account type CreateServiceAccountOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` Username *string `url:"username,omitempty" json:"username,omitempty"` @@ -83,35 +69,24 @@ type CreateServiceAccountOptions struct { // This API endpoint works on top-level groups only. It does not work on subgroups. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#create-a-service-account-user +// https://docs.gitlab.com/api/service_accounts/#create-a-group-service-account func (s *GroupsService) CreateServiceAccount(gid any, opt *CreateServiceAccountOptions, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - sa := new(GroupServiceAccount) - resp, err := s.client.Do(req, sa) - if err != nil { - return nil, resp, err - } - - return sa, resp, nil + return do[*GroupServiceAccount](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/service_accounts", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateServiceAccountOptions represents the available UpdateServiceAccount() options. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#update-a-service-account-user +// https://docs.gitlab.com/api/service_accounts/#update-a-group-service-account type UpdateServiceAccountOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` Username *string `url:"username,omitempty" json:"username,omitempty"` + Email *string `url:"email,omitempty" json:"email,omitempty"` } // UpdateServiceAccount updates a service account user. @@ -119,32 +94,20 @@ type UpdateServiceAccountOptions struct { // This API endpoint works on top-level groups only. It does not work on subgroups. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#update-a-service-account-user -func (s *GroupsService) UpdateServiceAccount(gid any, serviceAccount int, opt *UpdateServiceAccountOptions, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts/%d", PathEscape(group), serviceAccount) - - req, err := s.client.NewRequest(http.MethodPatch, u, opt, options) - if err != nil { - return nil, nil, err - } - - sa := new(GroupServiceAccount) - resp, err := s.client.Do(req, sa) - if err != nil { - return nil, resp, err - } - - return sa, resp, nil +// https://docs.gitlab.com/api/service_accounts/#update-a-group-service-account +func (s *GroupsService) UpdateServiceAccount(gid any, serviceAccount int64, opt *UpdateServiceAccountOptions, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) { + return do[*GroupServiceAccount](s.client, + withMethod(http.MethodPatch), + withPath("groups/%s/service_accounts/%d", GroupID{gid}, serviceAccount), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteServiceAccountOptions represents the available DeleteServiceAccount() options. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#delete-a-service-account-user +// https://docs.gitlab.com/api/service_accounts/#delete-a-group-service-account type DeleteServiceAccountOptions struct { HardDelete *bool `url:"hard_delete,omitempty" json:"hard_delete,omitempty"` } @@ -154,27 +117,22 @@ type DeleteServiceAccountOptions struct { // This API endpoint works on top-level groups only. It does not work on subgroups. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#delete-a-service-account-user -func (s *GroupsService) DeleteServiceAccount(gid any, serviceAccount int, opt *DeleteServiceAccountOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts/%d", PathEscape(group), serviceAccount) - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +// https://docs.gitlab.com/api/service_accounts/#delete-a-group-service-account +func (s *GroupsService) DeleteServiceAccount(gid any, serviceAccount int64, opt *DeleteServiceAccountOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/service_accounts/%d", GroupID{gid}, serviceAccount), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // ListServiceAccountPersonalAccessTokensOptions represents the available // ListServiceAccountPersonalAccessTokens() options. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#list-all-personal-access-tokens-for-a-service-account-user +// https://docs.gitlab.com/api/service_accounts/#list-all-personal-access-tokens-for-a-group-service-account type ListServiceAccountPersonalAccessTokensOptions struct { ListOptions CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` @@ -184,7 +142,7 @@ type ListServiceAccountPersonalAccessTokensOptions struct { LastUsedAfter *time.Time `url:"last_used_after,omitempty" json:"last_used_after,omitempty"` LastUsedBefore *time.Time `url:"last_used_before,omitempty" json:"last_used_before,omitempty"` Revoked *bool `url:"revoked,omitempty" json:"revoked,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + UserID *int64 `url:"user_id,omitempty" json:"user_id,omitempty"` Search *string `url:"search,omitempty" json:"search,omitempty"` Sort *string `url:"sort,omitempty" json:"sort,omitempty"` State *string `url:"state,omitempty" json:"state,omitempty"` @@ -194,33 +152,20 @@ type ListServiceAccountPersonalAccessTokensOptions struct { // service account user for a group. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#list-all-personal-access-tokens-for-a-service-account-user -func (s *GroupsService) ListServiceAccountPersonalAccessTokens(gid any, serviceAccount int, opt *ListServiceAccountPersonalAccessTokensOptions, options ...RequestOptionFunc) ([]*PersonalAccessToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts/%d/personal_access_tokens", PathEscape(group), serviceAccount) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pats []*PersonalAccessToken - resp, err := s.client.Do(req, &pats) - if err != nil { - return nil, resp, err - } - - return pats, resp, nil +// https://docs.gitlab.com/api/service_accounts/#list-all-personal-access-tokens-for-a-group-service-account +func (s *GroupsService) ListServiceAccountPersonalAccessTokens(gid any, serviceAccount int64, opt *ListServiceAccountPersonalAccessTokensOptions, options ...RequestOptionFunc) ([]*PersonalAccessToken, *Response, error) { + return do[[]*PersonalAccessToken](s.client, + withPath("groups/%s/service_accounts/%d/personal_access_tokens", GroupID{gid}, serviceAccount), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateServiceAccountPersonalAccessTokenOptions represents the available // CreateServiceAccountPersonalAccessToken() options. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#create-a-personal-access-token-for-a-service-account-user +// https://docs.gitlab.com/api/service_accounts/#create-a-personal-access-token-for-a-group-service-account type CreateServiceAccountPersonalAccessTokenOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` @@ -232,53 +177,35 @@ type CreateServiceAccountPersonalAccessTokenOptions struct { // service account user for a group. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#create-a-personal-access-token-for-a-service-account-user -func (s *GroupsService) CreateServiceAccountPersonalAccessToken(gid any, serviceAccount int, opt *CreateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts/%d/personal_access_tokens", PathEscape(group), serviceAccount) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(PersonalAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil +// https://docs.gitlab.com/api/service_accounts/#create-a-personal-access-token-for-a-group-service-account +func (s *GroupsService) CreateServiceAccountPersonalAccessToken(gid any, serviceAccount int64, opt *CreateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + return do[*PersonalAccessToken](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/service_accounts/%d/personal_access_tokens", GroupID{gid}, serviceAccount), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RevokeServiceAccountPersonalAccessToken revokes a personal access token for an // existing service account user in a given top-level group. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#revoke-a-personal-access-token-for-a-service-account-user -func (s *GroupsService) RevokeServiceAccountPersonalAccessToken(gid any, serviceAccount, token int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts/%d/personal_access_tokens/%d", PathEscape(group), serviceAccount, token) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +// https://docs.gitlab.com/api/service_accounts/#revoke-a-personal-access-token-for-a-group-service-account +func (s *GroupsService) RevokeServiceAccountPersonalAccessToken(gid any, serviceAccount, token int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/service_accounts/%d/personal_access_tokens/%d", GroupID{gid}, serviceAccount, token), + withRequestOpts(options...), + ) + return resp, err } // RotateServiceAccountPersonalAccessTokenOptions represents the available RotateServiceAccountPersonalAccessToken() // options. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#rotate-a-personal-access-token-for-a-service-account-user +// https://docs.gitlab.com/api/service_accounts/#rotate-a-personal-access-token-for-a-group-service-account type RotateServiceAccountPersonalAccessTokenOptions struct { ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` } @@ -287,24 +214,12 @@ type RotateServiceAccountPersonalAccessTokenOptions struct { // service account user for a group. // // GitLab API docs: -// https://docs.gitlab.com/api/group_service_accounts/#rotate-a-personal-access-token-for-a-service-account-user -func (s *GroupsService) RotateServiceAccountPersonalAccessToken(gid any, serviceAccount, token int, opt *RotateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts/%d/personal_access_tokens/%d/rotate", PathEscape(group), serviceAccount, token) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(PersonalAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil +// https://docs.gitlab.com/api/service_accounts/#rotate-a-personal-access-token-for-a-group-service-account +func (s *GroupsService) RotateServiceAccountPersonalAccessToken(gid any, serviceAccount, token int64, opt *RotateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + return do[*PersonalAccessToken](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/service_accounts/%d/personal_access_tokens/%d/rotate", GroupID{gid}, serviceAccount, token), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_ssh_certificates.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_ssh_certificates.go index 62774048d3..1be649ad97 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_ssh_certificates.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_ssh_certificates.go @@ -1,7 +1,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -11,7 +10,7 @@ type ( GroupSSHCertificatesServiceInterface interface { ListGroupSSHCertificates(gid any, options ...RequestOptionFunc) ([]*GroupSSHCertificate, *Response, error) CreateGroupSSHCertificate(gid any, opt *CreateGroupSSHCertificateOptions, options ...RequestOptionFunc) (*GroupSSHCertificate, *Response, error) - DeleteGroupSSHCertificate(gid any, cert int, options ...RequestOptionFunc) (*Response, error) + DeleteGroupSSHCertificate(gid any, cert int64, options ...RequestOptionFunc) (*Response, error) } // GroupSSHCertificatesService handles communication with the group @@ -29,7 +28,7 @@ var _ GroupSSHCertificatesServiceInterface = (*GroupSSHCertificatesService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/group_ssh_certificates/ type GroupSSHCertificate struct { - ID int `json:"id"` + ID int64 `json:"id"` Title string `json:"title"` Key string `json:"key"` CreatedAt *time.Time `json:"created_at"` @@ -38,27 +37,13 @@ type GroupSSHCertificate struct { // ListGroupSSHCertificates gets a list of SSH certificates for a specified // group. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/group_ssh_certificates/#get-all-ssh-certificates-for-a-particular-group func (s *GroupSSHCertificatesService) ListGroupSSHCertificates(gid any, options ...RequestOptionFunc) ([]*GroupSSHCertificate, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/ssh_certificates", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var certs []*GroupSSHCertificate - resp, err := s.client.Do(req, &certs) - if err != nil { - return nil, resp, err - } - - return certs, resp, nil + return do[[]*GroupSSHCertificate](s.client, + withPath("groups/%s/ssh_certificates", GroupID{gid}), + withRequestOpts(options...), + ) } // CreateGroupSSHCertificateOptions represents the available @@ -73,44 +58,26 @@ type CreateGroupSSHCertificateOptions struct { // CreateGroupSSHCertificate creates a new SSH certificate in the group. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/group_ssh_certificates/#create-ssh-certificate func (s *GroupSSHCertificatesService) CreateGroupSSHCertificate(gid any, opt *CreateGroupSSHCertificateOptions, options ...RequestOptionFunc) (*GroupSSHCertificate, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/ssh_certificates", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - cert := new(GroupSSHCertificate) - resp, err := s.client.Do(req, cert) - if err != nil { - return nil, resp, err - } - - return cert, resp, nil + return do[*GroupSSHCertificate](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/ssh_certificates", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteGroupSSHCertificate deletes a SSH certificate from a specified group. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/group_ssh_certificates/#delete-group-ssh-certificate -func (s *GroupSSHCertificatesService) DeleteGroupSSHCertificate(gid any, cert int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/ssh_certificates/%d", PathEscape(group), cert) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupSSHCertificatesService) DeleteGroupSSHCertificate(gid any, cert int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/ssh_certificates/%d", GroupID{gid}, cert), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_variables.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_variables.go index ae643a2649..f85db4ef6b 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_variables.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_variables.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "net/url" ) @@ -25,10 +24,30 @@ import ( type ( // GroupVariablesServiceInterface defines methods for the GroupVariablesService. GroupVariablesServiceInterface interface { + // ListVariables gets a list of all variables for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_level_variables/#list-group-variables ListVariables(gid any, opt *ListGroupVariablesOptions, options ...RequestOptionFunc) ([]*GroupVariable, *Response, error) + // GetVariable gets a variable. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_level_variables/#show-variable-details GetVariable(gid any, key string, opt *GetGroupVariableOptions, options ...RequestOptionFunc) (*GroupVariable, *Response, error) + // CreateVariable creates a new group variable. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_level_variables/#create-variable CreateVariable(gid any, opt *CreateGroupVariableOptions, options ...RequestOptionFunc) (*GroupVariable, *Response, error) + // UpdateVariable updates an existing group variable. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_level_variables/#update-variable UpdateVariable(gid any, key string, opt *UpdateGroupVariableOptions, options ...RequestOptionFunc) (*GroupVariable, *Response, error) + // RemoveVariable removes a group's variable. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_level_variables/#remove-variable RemoveVariable(gid any, key string, opt *RemoveGroupVariableOptions, options ...RequestOptionFunc) (*Response, error) } @@ -69,31 +88,16 @@ func (v GroupVariable) String() string { // // GitLab API docs: // https://docs.gitlab.com/api/group_level_variables/#list-group-variables -type ListGroupVariablesOptions ListOptions +type ListGroupVariablesOptions struct { + ListOptions +} -// ListVariables gets a list of all variables for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_level_variables/#list-group-variables func (s *GroupVariablesService) ListVariables(gid any, opt *ListGroupVariablesOptions, options ...RequestOptionFunc) ([]*GroupVariable, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/variables", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var vs []*GroupVariable - resp, err := s.client.Do(req, &vs) - if err != nil { - return nil, resp, err - } - - return vs, resp, nil + return do[[]*GroupVariable](s.client, + withPath("groups/%s/variables", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGroupVariableOptions represents the available GetVariable() @@ -105,29 +109,12 @@ type GetGroupVariableOptions struct { Filter *VariableFilter `url:"filter,omitempty" json:"filter,omitempty"` } -// GetVariable gets a variable. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_level_variables/#show-variable-details func (s *GroupVariablesService) GetVariable(gid any, key string, opt *GetGroupVariableOptions, options ...RequestOptionFunc) (*GroupVariable, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/variables/%s", PathEscape(group), url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(GroupVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil + return do[*GroupVariable](s.client, + withPath("groups/%s/variables/%s", GroupID{gid}, url.PathEscape(key)), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateGroupVariableOptions represents the available CreateVariable() @@ -147,29 +134,13 @@ type CreateGroupVariableOptions struct { VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } -// CreateVariable creates a new group variable. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_level_variables/#create-variable func (s *GroupVariablesService) CreateVariable(gid any, opt *CreateGroupVariableOptions, options ...RequestOptionFunc) (*GroupVariable, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/variables", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(GroupVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil + return do[*GroupVariable](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/variables", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateGroupVariableOptions represents the available UpdateVariable() @@ -188,30 +159,13 @@ type UpdateGroupVariableOptions struct { VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } -// UpdateVariable updates the position of an existing -// group issue board list. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_level_variables/#update-variable func (s *GroupVariablesService) UpdateVariable(gid any, key string, opt *UpdateGroupVariableOptions, options ...RequestOptionFunc) (*GroupVariable, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/variables/%s", PathEscape(group), url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(GroupVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil + return do[*GroupVariable](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/variables/%s", GroupID{gid}, url.PathEscape(key)), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RemoveGroupVariableOptions represents the available RemoveVariable() @@ -223,21 +177,12 @@ type RemoveGroupVariableOptions struct { Filter *VariableFilter `url:"filter,omitempty" json:"filter,omitempty"` } -// RemoveVariable removes a group's variable. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_level_variables/#remove-variable func (s *GroupVariablesService) RemoveVariable(gid any, key string, opt *RemoveGroupVariableOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/variables/%s", PathEscape(group), url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/variables/%s", GroupID{gid}, url.PathEscape(key)), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_wikis.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_wikis.go index b04fd99836..c78eedbc13 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/group_wikis.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_wikis.go @@ -16,9 +16,7 @@ package gitlab import ( - "fmt" "net/http" - "net/url" ) type ( @@ -32,7 +30,7 @@ type ( } // GroupWikisService handles communication with the group wikis related methods of - // the Gitlab API. + // the GitLab API. // // GitLab API docs: https://docs.gitlab.com/api/group_wikis/ GroupWikisService struct { @@ -71,24 +69,11 @@ type ListGroupWikisOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_wikis/#list-wiki-pages func (s *GroupWikisService) ListGroupWikis(gid any, opt *ListGroupWikisOptions, options ...RequestOptionFunc) ([]*GroupWiki, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/wikis", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gws []*GroupWiki - resp, err := s.client.Do(req, &gws) - if err != nil { - return nil, resp, err - } - - return gws, resp, nil + return do[[]*GroupWiki](s.client, + withPath("groups/%s/wikis", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGroupWikiPageOptions represents options to GetGroupWikiPage @@ -105,24 +90,11 @@ type GetGroupWikiPageOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_wikis/#get-a-wiki-page func (s *GroupWikisService) GetGroupWikiPage(gid any, slug string, opt *GetGroupWikiPageOptions, options ...RequestOptionFunc) (*GroupWiki, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/wikis/%s", PathEscape(group), url.PathEscape(slug)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - gw := new(GroupWiki) - resp, err := s.client.Do(req, gw) - if err != nil { - return nil, resp, err - } - - return gw, resp, nil + return do[*GroupWiki](s.client, + withPath("groups/%s/wikis/%s", GroupID{gid}, slug), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateGroupWikiPageOptions represents options to CreateGroupWikiPage. @@ -141,24 +113,12 @@ type CreateGroupWikiPageOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_wikis/#create-a-new-wiki-page func (s *GroupWikisService) CreateGroupWikiPage(gid any, opt *CreateGroupWikiPageOptions, options ...RequestOptionFunc) (*GroupWiki, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/wikis", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - w := new(GroupWiki) - resp, err := s.client.Do(req, w) - if err != nil { - return nil, resp, err - } - - return w, resp, nil + return do[*GroupWiki](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/wikis", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditGroupWikiPageOptions represents options to EditGroupWikiPage. @@ -177,24 +137,12 @@ type EditGroupWikiPageOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_wikis/#edit-an-existing-wiki-page func (s *GroupWikisService) EditGroupWikiPage(gid any, slug string, opt *EditGroupWikiPageOptions, options ...RequestOptionFunc) (*GroupWiki, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/wikis/%s", PathEscape(group), url.PathEscape(slug)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - w := new(GroupWiki) - resp, err := s.client.Do(req, w) - if err != nil { - return nil, resp, err - } - - return w, resp, nil + return do[*GroupWiki](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/wikis/%s", GroupID{gid}, slug), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteGroupWikiPage deletes a wiki page with a given slug. @@ -202,16 +150,10 @@ func (s *GroupWikisService) EditGroupWikiPage(gid any, slug string, opt *EditGro // GitLab API docs: // https://docs.gitlab.com/api/group_wikis/#delete-a-wiki-page func (s *GroupWikisService) DeleteGroupWikiPage(gid any, slug string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/wikis/%s", PathEscape(group), url.PathEscape(slug)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/wikis/%s", GroupID{gid}, slug), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/groups.go b/vendor/gitlab.com/gitlab-org/api/client-go/groups.go index a453134283..c288b455c0 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/groups.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/groups.go @@ -19,14 +19,12 @@ package gitlab import ( "bytes" "encoding/json" - "fmt" + "errors" "io" "net/http" "net/url" "strconv" "time" - - retryablehttp "github.com/hashicorp/go-retryablehttp" ) type ( @@ -57,7 +55,7 @@ type ( AddGroupSAMLLink(gid any, opt *AddGroupSAMLLinkOptions, options ...RequestOptionFunc) (*SAMLGroupLink, *Response, error) DeleteGroupSAMLLink(gid any, samlGroupName string, options ...RequestOptionFunc) (*Response, error) ShareGroupWithGroup(gid any, opt *ShareGroupWithGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) - UnshareGroupFromGroup(gid any, groupID int, options ...RequestOptionFunc) (*Response, error) + UnshareGroupFromGroup(gid any, groupID int64, options ...RequestOptionFunc) (*Response, error) GetGroupPushRules(gid any, options ...RequestOptionFunc) (*GroupPushRules, *Response, error) AddGroupPushRule(gid any, opt *AddGroupPushRuleOptions, options ...RequestOptionFunc) (*GroupPushRules, *Response, error) EditGroupPushRule(gid any, opt *EditGroupPushRuleOptions, options ...RequestOptionFunc) (*GroupPushRules, *Response, error) @@ -65,33 +63,33 @@ type ( // group_hooks.go ListGroupHooks(gid any, opt *ListGroupHooksOptions, options ...RequestOptionFunc) ([]*GroupHook, *Response, error) - GetGroupHook(gid any, hook int, options ...RequestOptionFunc) (*GroupHook, *Response, error) - ResendGroupHookEvent(gid any, hook int, hookEventID int, options ...RequestOptionFunc) (*Response, error) + GetGroupHook(gid any, hook int64, options ...RequestOptionFunc) (*GroupHook, *Response, error) + ResendGroupHookEvent(gid any, hook int64, hookEventID int64, options ...RequestOptionFunc) (*Response, error) AddGroupHook(gid any, opt *AddGroupHookOptions, options ...RequestOptionFunc) (*GroupHook, *Response, error) - EditGroupHook(gid any, hook int, opt *EditGroupHookOptions, options ...RequestOptionFunc) (*GroupHook, *Response, error) - DeleteGroupHook(gid any, hook int, options ...RequestOptionFunc) (*Response, error) - TriggerTestGroupHook(pid any, hook int, trigger GroupHookTrigger, options ...RequestOptionFunc) (*Response, error) - SetGroupCustomHeader(gid any, hook int, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) - DeleteGroupCustomHeader(gid any, hook int, key string, options ...RequestOptionFunc) (*Response, error) - SetGroupHookURLVariable(gid any, hook int, key string, opt *SetHookURLVariableOptions, options ...RequestOptionFunc) (*Response, error) - DeleteGroupHookURLVariable(gid any, hook int, key string, options ...RequestOptionFunc) (*Response, error) + EditGroupHook(gid any, hook int64, opt *EditGroupHookOptions, options ...RequestOptionFunc) (*GroupHook, *Response, error) + DeleteGroupHook(gid any, hook int64, options ...RequestOptionFunc) (*Response, error) + TriggerTestGroupHook(pid any, hook int64, trigger GroupHookTrigger, options ...RequestOptionFunc) (*Response, error) + SetGroupCustomHeader(gid any, hook int64, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) + DeleteGroupCustomHeader(gid any, hook int64, key string, options ...RequestOptionFunc) (*Response, error) + SetGroupHookURLVariable(gid any, hook int64, key string, opt *SetHookURLVariableOptions, options ...RequestOptionFunc) (*Response, error) + DeleteGroupHookURLVariable(gid any, hook int64, key string, options ...RequestOptionFunc) (*Response, error) // group_serviceaccounts.go ListServiceAccounts(gid any, opt *ListServiceAccountsOptions, options ...RequestOptionFunc) ([]*GroupServiceAccount, *Response, error) CreateServiceAccount(gid any, opt *CreateServiceAccountOptions, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) - UpdateServiceAccount(gid any, serviceAccount int, opt *UpdateServiceAccountOptions, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) - DeleteServiceAccount(gid any, serviceAccount int, opt *DeleteServiceAccountOptions, options ...RequestOptionFunc) (*Response, error) - ListServiceAccountPersonalAccessTokens(gid any, serviceAccount int, opt *ListServiceAccountPersonalAccessTokensOptions, options ...RequestOptionFunc) ([]*PersonalAccessToken, *Response, error) - CreateServiceAccountPersonalAccessToken(gid any, serviceAccount int, opt *CreateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) - RevokeServiceAccountPersonalAccessToken(gid any, serviceAccount, token int, options ...RequestOptionFunc) (*Response, error) - RotateServiceAccountPersonalAccessToken(gid any, serviceAccount, token int, opt *RotateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) + UpdateServiceAccount(gid any, serviceAccount int64, opt *UpdateServiceAccountOptions, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) + DeleteServiceAccount(gid any, serviceAccount int64, opt *DeleteServiceAccountOptions, options ...RequestOptionFunc) (*Response, error) + ListServiceAccountPersonalAccessTokens(gid any, serviceAccount int64, opt *ListServiceAccountPersonalAccessTokensOptions, options ...RequestOptionFunc) ([]*PersonalAccessToken, *Response, error) + CreateServiceAccountPersonalAccessToken(gid any, serviceAccount int64, opt *CreateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) + RevokeServiceAccountPersonalAccessToken(gid any, serviceAccount, token int64, options ...RequestOptionFunc) (*Response, error) + RotateServiceAccountPersonalAccessToken(gid any, serviceAccount, token int64, opt *RotateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) // group_members.go ListGroupMembers(gid any, opt *ListGroupMembersOptions, options ...RequestOptionFunc) ([]*GroupMember, *Response, error) ListAllGroupMembers(gid any, opt *ListGroupMembersOptions, options ...RequestOptionFunc) ([]*GroupMember, *Response, error) ListBillableGroupMembers(gid any, opt *ListBillableGroupMembersOptions, options ...RequestOptionFunc) ([]*BillableGroupMember, *Response, error) - ListMembershipsForBillableGroupMember(gid any, user int, opt *ListMembershipsForBillableGroupMemberOptions, options ...RequestOptionFunc) ([]*BillableUserMembership, *Response, error) - RemoveBillableGroupMember(gid any, user int, options ...RequestOptionFunc) (*Response, error) + ListMembershipsForBillableGroupMember(gid any, user int64, opt *ListMembershipsForBillableGroupMemberOptions, options ...RequestOptionFunc) ([]*BillableUserMembership, *Response, error) + RemoveBillableGroupMember(gid any, user int64, options ...RequestOptionFunc) (*Response, error) } // GroupsService handles communication with the group related methods of @@ -109,13 +107,14 @@ var _ GroupsServiceInterface = (*GroupsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/groups/ type Group struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Path string `json:"path"` Description string `json:"description"` MembershipLock bool `json:"membership_lock"` Visibility VisibilityValue `json:"visibility"` LFSEnabled bool `json:"lfs_enabled"` + MaxArtifactsSize int64 `json:"max_artifacts_size"` DefaultBranch string `json:"default_branch"` DefaultBranchProtectionDefaults *BranchProtectionDefaults `json:"default_branch_protection_defaults"` AvatarURL string `json:"avatar_url"` @@ -124,13 +123,13 @@ type Group struct { RepositoryStorage string `json:"repository_storage"` FullName string `json:"full_name"` FullPath string `json:"full_path"` - FileTemplateProjectID int `json:"file_template_project_id"` - ParentID int `json:"parent_id"` + FileTemplateProjectID int64 `json:"file_template_project_id"` + ParentID int64 `json:"parent_id"` Statistics *Statistics `json:"statistics"` CustomAttributes []*CustomAttribute `json:"custom_attributes"` ShareWithGroupLock bool `json:"share_with_group_lock"` RequireTwoFactorAuth bool `json:"require_two_factor_authentication"` - TwoFactorGracePeriod int `json:"two_factor_grace_period"` + TwoFactorGracePeriod int64 `json:"two_factor_grace_period"` ProjectCreationLevel ProjectCreationLevelValue `json:"project_creation_level"` AutoDevopsEnabled bool `json:"auto_devops_enabled"` SubGroupCreationLevel SubGroupCreationLevelValue `json:"subgroup_creation_level"` @@ -143,8 +142,8 @@ type Group struct { LDAPAccess AccessLevelValue `json:"ldap_access"` LDAPGroupLinks []*LDAPGroupLink `json:"ldap_group_links"` SAMLGroupLinks []*SAMLGroupLink `json:"saml_group_links"` - SharedRunnersMinutesLimit int `json:"shared_runners_minutes_limit"` - ExtraSharedRunnersMinutesLimit int `json:"extra_shared_runners_minutes_limit"` + SharedRunnersMinutesLimit int64 `json:"shared_runners_minutes_limit"` + ExtraSharedRunnersMinutesLimit int64 `json:"extra_shared_runners_minutes_limit"` PreventForkingOutsideGroup bool `json:"prevent_forking_outside_group"` MarkedForDeletionOn *ISOTime `json:"marked_for_deletion_on"` CreatedAt *time.Time `json:"created_at"` @@ -152,6 +151,10 @@ type Group struct { AllowedEmailDomainsList string `json:"allowed_email_domains_list"` WikiAccessLevel AccessControlValue `json:"wiki_access_level"` + OnlyAllowMergeIfPipelineSucceeds bool `json:"only_allow_merge_if_pipeline_succeeds"` + AllowMergeOnSkippedPipeline bool `json:"allow_merge_on_skipped_pipeline"` + OnlyAllowMergeIfAllDiscussionsAreResolved bool `json:"only_allow_merge_if_all_discussions_are_resolved"` + // Deprecated: will be removed in v5 of the API, use ListGroupProjects instead Projects []*Project `json:"projects"` @@ -162,19 +165,19 @@ type Group struct { EmailsDisabled bool `json:"emails_disabled"` // Deprecated: Use DefaultBranchProtectionDefaults instead - DefaultBranchProtection int `json:"default_branch_protection"` + DefaultBranchProtection int64 `json:"default_branch_protection"` } // SharedWithGroup represents a GitLab group shared with a group. // // GitLab API docs: https://docs.gitlab.com/api/groups/ type SharedWithGroup struct { - GroupID int `json:"group_id"` + GroupID int64 `json:"group_id"` GroupName string `json:"group_name"` GroupFullPath string `json:"group_full_path"` - GroupAccessLevel int `json:"group_access_level"` + GroupAccessLevel int64 `json:"group_access_level"` ExpiresAt *ISOTime `json:"expires_at"` - MemberRoleID int `json:"member_role_id"` + MemberRoleID int64 `json:"member_role_id"` } // BranchProtectionDefaults represents default Git protected branch permissions. @@ -182,10 +185,11 @@ type SharedWithGroup struct { // GitLab API docs: // https://docs.gitlab.com/api/groups/#options-for-default_branch_protection_defaults type BranchProtectionDefaults struct { - AllowedToPush []*GroupAccessLevel `json:"allowed_to_push,omitempty"` - AllowForcePush bool `json:"allow_force_push,omitempty"` - AllowedToMerge []*GroupAccessLevel `json:"allowed_to_merge,omitempty"` - DeveloperCanInitialPush bool `json:"developer_can_initial_push,omitempty"` + AllowedToPush []*GroupAccessLevel `json:"allowed_to_push,omitempty"` + AllowForcePush bool `json:"allow_force_push,omitempty"` + AllowedToMerge []*GroupAccessLevel `json:"allowed_to_merge,omitempty"` + DeveloperCanInitialPush bool `json:"developer_can_initial_push,omitempty"` + CodeOwnerApprovalRequired bool `json:"code_owner_approval_required,omitempty"` } // GroupAccessLevel represents default branch protection defaults access levels. @@ -231,7 +235,8 @@ type LDAPGroupLink struct { type SAMLGroupLink struct { Name string `json:"name"` AccessLevel AccessLevelValue `json:"access_level"` - MemberRoleID int `json:"member_role_id,omitempty"` + MemberRoleID int64 `json:"member_role_id,omitempty"` + Provider string `json:"provider,omitempty"` } // ListGroupsOptions represents the available ListGroups() options. @@ -239,17 +244,21 @@ type SAMLGroupLink struct { // GitLab API docs: https://docs.gitlab.com/api/groups/#list-groups type ListGroupsOptions struct { ListOptions - SkipGroups *[]int `url:"skip_groups,omitempty" del:"," json:"skip_groups,omitempty"` + SkipGroups *[]int64 `url:"skip_groups,omitempty" del:"," json:"skip_groups,omitempty"` AllAvailable *bool `url:"all_available,omitempty" json:"all_available,omitempty"` Search *string `url:"search,omitempty" json:"search,omitempty"` OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` Sort *string `url:"sort,omitempty" json:"sort,omitempty"` Statistics *bool `url:"statistics,omitempty" json:"statistics,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` Owned *bool `url:"owned,omitempty" json:"owned,omitempty"` MinAccessLevel *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"` TopLevelOnly *bool `url:"top_level_only,omitempty" json:"top_level_only,omitempty"` RepositoryStorage *string `url:"repository_storage,omitempty" json:"repository_storage,omitempty"` + MarkedForDeletionOn *ISOTime `url:"marked_for_deletion_on,omitempty" json:"marked_for_deletion_on,omitempty"` + Active *bool `url:"active,omitempty" json:"active,omitempty"` + Archived *bool `url:"archived,omitempty" json:"archived,omitempty"` } // ListGroups gets a list of groups (as user: my groups, as admin: all groups). @@ -257,18 +266,11 @@ type ListGroupsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/groups/#list-groups func (s *GroupsService) ListGroups(opt *ListGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "groups", opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*Group - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil + return do[[]*Group](s.client, + withPath("groups"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListSubGroupsOptions represents the available ListSubGroups() options. @@ -282,24 +284,11 @@ type ListSubGroupsOptions ListGroupsOptions // GitLab API docs: // https://docs.gitlab.com/api/groups/#list-subgroups func (s *GroupsService) ListSubGroups(gid any, opt *ListSubGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/subgroups", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*Group - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil + return do[[]*Group](s.client, + withPath("groups/%s/subgroups", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListDescendantGroupsOptions represents the available ListDescendantGroups() @@ -314,32 +303,20 @@ type ListDescendantGroupsOptions ListGroupsOptions // GitLab API docs: // https://docs.gitlab.com/api/groups/#list-descendant-groups func (s *GroupsService) ListDescendantGroups(gid any, opt *ListDescendantGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/descendant_groups", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*Group - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil + return do[[]*Group](s.client, + withPath("groups/%s/descendant_groups", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// ListGroupProjectsOptions represents the available ListGroup() options. +// ListGroupProjectsOptions represents the available ListGroupProjects() options. // // GitLab API docs: // https://docs.gitlab.com/api/groups/#list-projects type ListGroupProjectsOptions struct { ListOptions + Active *bool `url:"active,omitempty" json:"active,omitempty"` Archived *bool `url:"archived,omitempty" json:"archived,omitempty"` IncludeSubGroups *bool `url:"include_subgroups,omitempty" json:"include_subgroups,omitempty"` MinAccessLevel *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"` @@ -363,24 +340,11 @@ type ListGroupProjectsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/groups/#list-projects func (s *GroupsService) ListGroupProjects(gid any, opt *ListGroupProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/projects", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Project - resp, err := s.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil + return do[[]*Project](s.client, + withPath("groups/%s/projects", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGroupOptions represents the available GetGroup() options. @@ -398,24 +362,11 @@ type GetGroupOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/groups/#get-a-single-group func (s *GroupsService) GetGroup(gid any, opt *GetGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil + return do[*Group](s.client, + withPath("groups/%s", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DownloadAvatar downloads a group avatar. @@ -423,24 +374,14 @@ func (s *GroupsService) GetGroup(gid any, opt *GetGroupOptions, options ...Reque // GitLab API docs: // https://docs.gitlab.com/api/groups/#download-a-group-avatar func (s *GroupsService) DownloadAvatar(gid any, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/avatar", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - avatar := new(bytes.Buffer) - resp, err := s.client.Do(req, avatar) + buf, resp, err := do[bytes.Buffer](s.client, + withPath("groups/%s/avatar", GroupID{gid}), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return bytes.NewReader(avatar.Bytes()), resp, err + return bytes.NewReader(buf.Bytes()), resp, nil } // CreateGroupOptions represents the available CreateGroup() options. @@ -456,7 +397,7 @@ type CreateGroupOptions struct { Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` ShareWithGroupLock *bool `url:"share_with_group_lock,omitempty" json:"share_with_group_lock,omitempty"` RequireTwoFactorAuth *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` - TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` + TwoFactorGracePeriod *int64 `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` ProjectCreationLevel *ProjectCreationLevelValue `url:"project_creation_level,omitempty" json:"project_creation_level,omitempty"` AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` SubGroupCreationLevel *SubGroupCreationLevelValue `url:"subgroup_creation_level,omitempty" json:"subgroup_creation_level,omitempty"` @@ -465,16 +406,21 @@ type CreateGroupOptions struct { LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` DefaultBranchProtectionDefaults *DefaultBranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` - ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` - SharedRunnersMinutesLimit *int `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` - ExtraSharedRunnersMinutesLimit *int `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` + ParentID *int64 `url:"parent_id,omitempty" json:"parent_id,omitempty"` + SharedRunnersMinutesLimit *int64 `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` + ExtraSharedRunnersMinutesLimit *int64 `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` // Deprecated: Use EmailsEnabled instead EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` // Deprecated: User DefaultBranchProtectionDefaults instead - DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` + DefaultBranchProtection *int64 `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` + + EnabledGitAccessProtocol *EnabledGitAccessProtocolValue `url:"enabled_git_access_protocol,omitempty" json:"enabled_git_access_protocol,omitempty"` + OrganizationID *int64 `url:"organization_id,omitempty" json:"organization_id,omitempty"` + DuoAvailability *DuoAvailabilityValue `url:"duo_availability,omitempty" json:"duo_availability,omitempty"` + ExperimentFeaturesEnabled *bool `url:"experiment_features_enabled,omitempty" json:"experiment_features_enabled,omitempty"` } // DefaultBranchProtectionDefaultsOptions represents the available options for @@ -483,10 +429,11 @@ type CreateGroupOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/groups/#options-for-default_branch_protection_defaults type DefaultBranchProtectionDefaultsOptions struct { - AllowedToPush *[]*GroupAccessLevel `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` - AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` - AllowedToMerge *[]*GroupAccessLevel `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` - DeveloperCanInitialPush *bool `url:"developer_can_initial_push,omitempty" json:"developer_can_initial_push,omitempty"` + AllowedToPush *[]*GroupAccessLevel `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` + AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` + AllowedToMerge *[]*GroupAccessLevel `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` + DeveloperCanInitialPush *bool `url:"developer_can_initial_push,omitempty" json:"developer_can_initial_push,omitempty"` + CodeOwnerApprovalRequired *bool `url:"code_owner_approval_required,omitempty" json:"code_owner_approval_required,omitempty"` } // EncodeValues implements the query.Encoder interface @@ -497,13 +444,16 @@ func (d *DefaultBranchProtectionDefaultsOptions) EncodeValues(key string, v *url if d.DeveloperCanInitialPush != nil { v.Add(key+"[developer_can_initial_push]", strconv.FormatBool(*d.DeveloperCanInitialPush)) } + if d.CodeOwnerApprovalRequired != nil { + v.Add(key+"[code_owner_approval_required]", strconv.FormatBool(*d.CodeOwnerApprovalRequired)) + } // The GitLab API only accepts one value for `allowed_to_merge` even when multiples are // provided on the request. The API will take the highest permission level. For instance, // if 'developer' and 'maintainer' are provided, the API will take 'maintainer'. if d.AllowedToMerge != nil { for _, atm := range *d.AllowedToMerge { if atm != nil { - v.Add(key+"[allowed_to_merge][][access_level]", strconv.Itoa((int)(*atm.AccessLevel))) + v.Add(key+"[allowed_to_merge][][access_level]", strconv.FormatInt((int64)(*atm.AccessLevel), 10)) } } } @@ -513,10 +463,11 @@ func (d *DefaultBranchProtectionDefaultsOptions) EncodeValues(key string, v *url if d.AllowedToPush != nil { for _, atp := range *d.AllowedToPush { if atp != nil { - v.Add(key+"[allowed_to_push][][access_level]", strconv.Itoa((int)(*atp.AccessLevel))) + v.Add(key+"[allowed_to_push][][access_level]", strconv.FormatInt((int64)(*atp.AccessLevel), 10)) } } } + return nil } @@ -531,38 +482,19 @@ func (d *DefaultBranchProtectionDefaultsOptions) EncodeValues(key string, v *url // // GitLab API docs: https://docs.gitlab.com/api/groups/#create-a-group func (s *GroupsService) CreateGroup(opt *CreateGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { - var err error - var req *retryablehttp.Request - - if opt.Avatar == nil { - req, err = s.client.NewRequest(http.MethodPost, "groups", opt, options) - } else { - // since the Avatar is provided, check allowed_to_push and - // allowed_to_merge access levels and error if multiples are provided + reqOpts := []doOption{ + withMethod(http.MethodPost), + withPath("groups"), + withAPIOpts(opt), + withRequestOpts(options...), + } + if opt.Avatar != nil { if opt.DefaultBranchProtectionDefaults != nil && (len(*opt.DefaultBranchProtectionDefaults.AllowedToMerge) > 1 || len(*opt.DefaultBranchProtectionDefaults.AllowedToPush) > 1) { - return nil, nil, fmt.Errorf("multiple access levels for allowed_to_merge or allowed_to_push are not permitted when an Avatar is also specified as it will result in unexpected behavior") + return nil, nil, errors.New("multiple access levels for allowed_to_merge or allowed_to_push are not permitted when an Avatar is also specified as it will result in unexpected behavior") } - req, err = s.client.UploadRequest( - http.MethodPost, - "groups", - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) - } - if err != nil { - return nil, nil, err + reqOpts = append(reqOpts, withUpload(opt.Avatar.Image, opt.Avatar.Filename, UploadAvatar)) } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil + return do[*Group](s.client, reqOpts...) } // TransferGroup transfers a project to the Group namespace. Available only @@ -571,28 +503,11 @@ func (s *GroupsService) CreateGroup(opt *CreateGroupOptions, options ...RequestO // GitLab API docs: // https://docs.gitlab.com/api/groups/#transfer-a-project-to-a-group func (s *GroupsService) TransferGroup(gid any, pid any, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/projects/%s", PathEscape(group), PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil + return do[*Group](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/projects/%s", GroupID{gid}, ProjectID{pid}), + withRequestOpts(options...), + ) } // TransferSubGroupOptions represents the available TransferSubGroup() options. @@ -600,7 +515,7 @@ func (s *GroupsService) TransferGroup(gid any, pid any, options ...RequestOption // GitLab API docs: // https://docs.gitlab.com/api/groups/#transfer-a-group type TransferSubGroupOptions struct { - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` } // TransferSubGroup transfers a group to a new parent group or turn a subgroup @@ -609,24 +524,12 @@ type TransferSubGroupOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/groups/#transfer-a-group func (s *GroupsService) TransferSubGroup(gid any, opt *TransferSubGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/transfer", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil + return do[*Group](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/transfer", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateGroupOptions represents the available UpdateGroup() options. @@ -642,18 +545,19 @@ type UpdateGroupOptions struct { Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` ShareWithGroupLock *bool `url:"share_with_group_lock,omitempty" json:"share_with_group_lock,omitempty"` RequireTwoFactorAuth *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` - TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` + TwoFactorGracePeriod *int64 `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` ProjectCreationLevel *ProjectCreationLevelValue `url:"project_creation_level,omitempty" json:"project_creation_level,omitempty"` AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` SubGroupCreationLevel *SubGroupCreationLevelValue `url:"subgroup_creation_level,omitempty" json:"subgroup_creation_level,omitempty"` EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` MentionsDisabled *bool `url:"mentions_disabled,omitempty" json:"mentions_disabled,omitempty"` LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` + MaxArtifactsSize *int64 `url:"max_artifacts_size,omitempty" json:"max_artifacts_size,omitempty"` RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` DefaultBranchProtectionDefaults *DefaultBranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` - FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` - SharedRunnersMinutesLimit *int `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` - ExtraSharedRunnersMinutesLimit *int `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` + FileTemplateProjectID *int64 `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` + SharedRunnersMinutesLimit *int64 `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` + ExtraSharedRunnersMinutesLimit *int64 `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` PreventForkingOutsideGroup *bool `url:"prevent_forking_outside_group,omitempty" json:"prevent_forking_outside_group,omitempty"` SharedRunnersSetting *SharedRunnersSettingValue `url:"shared_runners_setting,omitempty" json:"shared_runners_setting,omitempty"` PreventSharingGroupsOutsideHierarchy *bool `url:"prevent_sharing_groups_outside_hierarchy,omitempty" json:"prevent_sharing_groups_outside_hierarchy,omitempty"` @@ -661,11 +565,33 @@ type UpdateGroupOptions struct { AllowedEmailDomainsList *string `url:"allowed_email_domains_list,omitempty" json:"allowed_email_domains_list,omitempty"` WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` + OnlyAllowMergeIfPipelineSucceeds *bool `url:"only_allow_merge_if_pipeline_succeeds,omitempty" json:"only_allow_merge_if_pipeline_succeeds,omitempty"` + AllowMergeOnSkippedPipeline *bool `url:"allow_merge_on_skipped_pipeline,omitempty" json:"allow_merge_on_skipped_pipeline,omitempty"` + OnlyAllowMergeIfAllDiscussionsAreResolved *bool `url:"only_allow_merge_if_all_discussions_are_resolved,omitempty" json:"only_allow_merge_if_all_discussions_are_resolved,omitempty"` + // Deprecated: Use EmailsEnabled instead EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` // Deprecated: Use DefaultBranchProtectionDefaults instead - DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` + DefaultBranchProtection *int64 `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` + EnabledGitAccessProtocol *EnabledGitAccessProtocolValue `url:"enabled_git_access_protocol,omitempty" json:"enabled_git_access_protocol,omitempty"` + StepUpAuthRequiredOAuthProvider *string `url:"step_up_auth_required_oauth_provider,omitempty" json:"step_up_auth_required_oauth_provider,omitempty"` + // The following fields are Premium and Ultimate only. + UniqueProjectDownloadLimit *int64 `url:"unique_project_download_limit,omitempty" json:"unique_project_download_limit,omitempty"` + UniqueProjectDownloadLimitIntervalInSeconds *int64 `url:"unique_project_download_limit_interval_in_seconds,omitempty" json:"unique_project_download_limit_interval_in_seconds,omitempty"` + UniqueProjectDownloadLimitAllowlist *[]string `url:"unique_project_download_limit_allowlist,omitempty" json:"unique_project_download_limit_allowlist,omitempty"` + UniqueProjectDownloadLimitAlertlist *[]int64 `url:"unique_project_download_limit_alertlist,omitempty" json:"unique_project_download_limit_alertlist,omitempty"` + AutoBanUserOnExcessiveProjectsDownload *bool `url:"auto_ban_user_on_excessive_projects_download,omitempty" json:"auto_ban_user_on_excessive_projects_download,omitempty"` + + DuoAvailability *DuoAvailabilityValue `url:"duo_availability,omitempty" json:"duo_availability,omitempty"` + ExperimentFeaturesEnabled *bool `url:"experiment_features_enabled,omitempty" json:"experiment_features_enabled,omitempty"` + MathRenderingLimitsEnabled *bool `url:"math_rendering_limits_enabled,omitempty" json:"math_rendering_limits_enabled,omitempty"` + LockMathRenderingLimitsEnabled *bool `url:"lock_math_rendering_limits_enabled,omitempty" json:"lock_math_rendering_limits_enabled,omitempty"` + DuoFeaturesEnabled *bool `url:"duo_features_enabled,omitempty" json:"duo_features_enabled,omitempty"` + LockDuoFeaturesEnabled *bool `url:"lock_duo_features_enabled,omitempty" json:"lock_duo_features_enabled,omitempty"` + + WebBasedCommitSigningEnabled *bool `url:"web_based_commit_signing_enabled,omitempty" json:"web_based_commit_signing_enabled,omitempty"` + AllowPersonalSnippets *bool `url:"allow_personal_snippets,omitempty" json:"allow_personal_snippets,omitempty"` } // UpdateGroup updates an existing group; only available to group owners and @@ -679,43 +605,19 @@ type UpdateGroupOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/groups/#update-group-attributes func (s *GroupsService) UpdateGroup(gid any, opt *UpdateGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err + reqOpts := []doOption{ + withMethod(http.MethodPut), + withPath("groups/%s", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), } - u := fmt.Sprintf("groups/%s", PathEscape(group)) - - var req *retryablehttp.Request - - if opt.Avatar == nil || (opt.Avatar.Filename == "" && opt.Avatar.Image == nil) { - req, err = s.client.NewRequest(http.MethodPut, u, opt, options) - } else { - // since the Avatar is provided, check allowed_to_push and - // allowed_to_merge access levels and error if multiples are provided + if opt.Avatar != nil && (opt.Avatar.Filename != "" || opt.Avatar.Image != nil) { if opt.DefaultBranchProtectionDefaults != nil && (len(*opt.DefaultBranchProtectionDefaults.AllowedToMerge) > 1 || len(*opt.DefaultBranchProtectionDefaults.AllowedToPush) > 1) { - return nil, nil, fmt.Errorf("multiple access levels for allowed_to_merge or allowed_to_push are not permitted when an Avatar is also specified as it will result in unexpected behavior") + return nil, nil, errors.New("multiple access levels for allowed_to_merge or allowed_to_push are not permitted when an Avatar is also specified as it will result in unexpected behavior") } - req, err = s.client.UploadRequest( - http.MethodPut, - u, - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) + reqOpts = append(reqOpts, withUpload(opt.Avatar.Image, opt.Avatar.Filename, UploadAvatar)) } - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil + return do[*Group](s.client, reqOpts...) } // UploadAvatar uploads a group avatar. @@ -723,32 +625,12 @@ func (s *GroupsService) UpdateGroup(gid any, opt *UpdateGroupOptions, options .. // GitLab API docs: // https://docs.gitlab.com/api/groups/#upload-a-group-avatar func (s *GroupsService) UploadAvatar(gid any, avatar io.Reader, filename string, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s", PathEscape(group)) - - req, err := s.client.UploadRequest( - http.MethodPut, - u, - avatar, - filename, - UploadAvatar, - nil, - options, + return do[*Group](s.client, + withMethod(http.MethodPut), + withPath("groups/%s", GroupID{gid}), + withUpload(avatar, filename, UploadAvatar), + withRequestOpts(options...), ) - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil } // DeleteGroupOptions represents the available DeleteGroup() options. @@ -763,18 +645,13 @@ type DeleteGroupOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/groups/#delete-a-group func (s *GroupsService) DeleteGroup(gid any, opt *DeleteGroupOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // RestoreGroup restores a previously deleted group @@ -782,24 +659,11 @@ func (s *GroupsService) DeleteGroup(gid any, opt *DeleteGroupOptions, options .. // GitLab API docs: // https://docs.gitlab.com/api/groups/#restore-a-group-marked-for-deletion func (s *GroupsService) RestoreGroup(gid any, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/restore", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil + return do[*Group](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/restore", GroupID{gid}), + withRequestOpts(options...), + ) } // SearchGroup get all groups that match your string in their name or path. @@ -811,18 +675,11 @@ func (s *GroupsService) SearchGroup(query string, options ...RequestOptionFunc) } q.Search = query - req, err := s.client.NewRequest(http.MethodGet, "groups", &q, options) - if err != nil { - return nil, nil, err - } - - var gs []*Group - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil + return do[[]*Group](s.client, + withPath("groups"), + withAPIOpts(&q), + withRequestOpts(options...), + ) } // ListProvisionedUsersOptions represents the available ListProvisionedUsers() @@ -845,24 +702,11 @@ type ListProvisionedUsersOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/groups/#list-provisioned-users func (s *GroupsService) ListProvisionedUsers(gid any, opt *ListProvisionedUsersOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/provisioned_users", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var us []*User - resp, err := s.client.Do(req, &us) - if err != nil { - return nil, resp, err - } - - return us, resp, nil + return do[[]*User](s.client, + withPath("groups/%s/provisioned_users", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListGroupLDAPLinks lists the group's LDAP links. Available only for users who @@ -871,24 +715,10 @@ func (s *GroupsService) ListProvisionedUsers(gid any, opt *ListProvisionedUsersO // GitLab API docs: // https://docs.gitlab.com/api/group_ldap_links/#list-ldap-group-links func (s *GroupsService) ListGroupLDAPLinks(gid any, options ...RequestOptionFunc) ([]*LDAPGroupLink, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/ldap_group_links", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var gls []*LDAPGroupLink - resp, err := s.client.Do(req, &gls) - if err != nil { - return nil, resp, err - } - - return gls, resp, nil + return do[[]*LDAPGroupLink](s.client, + withPath("groups/%s/ldap_group_links", GroupID{gid}), + withRequestOpts(options...), + ) } // AddGroupLDAPLinkOptions represents the available AddGroupLDAPLink() options. @@ -909,24 +739,12 @@ type AddGroupLDAPLinkOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_ldap_links/#add-an-ldap-group-link-with-cn-or-filter func (s *GroupsService) AddGroupLDAPLink(gid any, opt *AddGroupLDAPLinkOptions, options ...RequestOptionFunc) (*LDAPGroupLink, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/ldap_group_links", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gl := new(LDAPGroupLink) - resp, err := s.client.Do(req, gl) - if err != nil { - return nil, resp, err - } - - return gl, resp, nil + return do[*LDAPGroupLink](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/ldap_group_links", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteGroupLDAPLink deletes a group LDAP link. Available only for users who @@ -936,18 +754,12 @@ func (s *GroupsService) AddGroupLDAPLink(gid any, opt *AddGroupLDAPLinkOptions, // GitLab API docs: // https://docs.gitlab.com/api/group_ldap_links/#delete-an-ldap-group-link-deprecated func (s *GroupsService) DeleteGroupLDAPLink(gid any, cn string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/ldap_group_links/%s", PathEscape(group), PathEscape(cn)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/ldap_group_links/%s", GroupID{gid}, cn), + withRequestOpts(options...), + ) + return resp, err } // DeleteGroupLDAPLinkWithCNOrFilterOptions represents the available DeleteGroupLDAPLinkWithCNOrFilter() options. @@ -966,18 +778,13 @@ type DeleteGroupLDAPLinkWithCNOrFilterOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_ldap_links/#delete-an-ldap-group-link-with-cn-or-filter func (s *GroupsService) DeleteGroupLDAPLinkWithCNOrFilter(gid any, opts *DeleteGroupLDAPLinkWithCNOrFilterOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/ldap_group_links", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodDelete, u, opts, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/ldap_group_links", GroupID{gid}), + withAPIOpts(opts), + withRequestOpts(options...), + ) + return resp, err } // DeleteGroupLDAPLinkForProvider deletes a group LDAP link from a specific @@ -986,23 +793,12 @@ func (s *GroupsService) DeleteGroupLDAPLinkWithCNOrFilter(gid any, opts *DeleteG // GitLab API docs: // https://docs.gitlab.com/api/group_ldap_links/#delete-an-ldap-group-link-deprecated func (s *GroupsService) DeleteGroupLDAPLinkForProvider(gid any, provider, cn string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf( - "groups/%s/ldap_group_links/%s/%s", - PathEscape(group), - PathEscape(provider), - PathEscape(cn), + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/ldap_group_links/%s/%s", GroupID{gid}, provider, cn), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + return resp, err } // ListGroupSAMLLinks lists the group's SAML links. Available only for users who @@ -1011,24 +807,10 @@ func (s *GroupsService) DeleteGroupLDAPLinkForProvider(gid any, provider, cn str // GitLab API docs: // https://docs.gitlab.com/api/saml/#list-saml-group-links func (s *GroupsService) ListGroupSAMLLinks(gid any, options ...RequestOptionFunc) ([]*SAMLGroupLink, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/saml_group_links", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var gl []*SAMLGroupLink - resp, err := s.client.Do(req, &gl) - if err != nil { - return nil, resp, err - } - - return gl, resp, nil + return do[[]*SAMLGroupLink](s.client, + withPath("groups/%s/saml_group_links", GroupID{gid}), + withRequestOpts(options...), + ) } // ListGroupSharedProjectsOptions represents the available ListGroupSharedProjects() options. @@ -1055,24 +837,11 @@ type ListGroupSharedProjectsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/groups/#list-shared-projects func (s *GroupsService) ListGroupSharedProjects(gid any, opt *ListGroupSharedProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/projects/shared", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*Project - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[[]*Project](s.client, + withPath("groups/%s/projects/shared", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGroupSAMLLink get a specific group SAML link. Available only for users who @@ -1081,24 +850,10 @@ func (s *GroupsService) ListGroupSharedProjects(gid any, opt *ListGroupSharedPro // GitLab API docs: // https://docs.gitlab.com/api/saml/#get-a-saml-group-link func (s *GroupsService) GetGroupSAMLLink(gid any, samlGroupName string, options ...RequestOptionFunc) (*SAMLGroupLink, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/saml_group_links/%s", PathEscape(group), PathEscape(samlGroupName)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gl := new(SAMLGroupLink) - resp, err := s.client.Do(req, &gl) - if err != nil { - return nil, resp, err - } - - return gl, resp, nil + return do[*SAMLGroupLink](s.client, + withPath("groups/%s/saml_group_links/%s", GroupID{gid}, samlGroupName), + withRequestOpts(options...), + ) } // AddGroupSAMLLinkOptions represents the available AddGroupSAMLLink() options. @@ -1108,7 +863,8 @@ func (s *GroupsService) GetGroupSAMLLink(gid any, samlGroupName string, options type AddGroupSAMLLinkOptions struct { SAMLGroupName *string `url:"saml_group_name,omitempty" json:"saml_group_name,omitempty"` AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` + MemberRoleID *int64 `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` + Provider *string `url:"provider,omitempty" json:"provider,omitempty"` } // AddGroupSAMLLink creates a new group SAML link. Available only for users who @@ -1117,24 +873,12 @@ type AddGroupSAMLLinkOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/saml/#add-a-saml-group-link func (s *GroupsService) AddGroupSAMLLink(gid any, opt *AddGroupSAMLLinkOptions, options ...RequestOptionFunc) (*SAMLGroupLink, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/saml_group_links", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gl := new(SAMLGroupLink) - resp, err := s.client.Do(req, &gl) - if err != nil { - return nil, resp, err - } - - return gl, resp, nil + return do[*SAMLGroupLink](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/saml_group_links", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteGroupSAMLLink deletes a group SAML link. Available only for users who @@ -1143,18 +887,12 @@ func (s *GroupsService) AddGroupSAMLLink(gid any, opt *AddGroupSAMLLinkOptions, // GitLab API docs: // https://docs.gitlab.com/api/saml/#delete-a-saml-group-link func (s *GroupsService) DeleteGroupSAMLLink(gid any, samlGroupName string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/saml_group_links/%s", PathEscape(group), PathEscape(samlGroupName)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/saml_group_links/%s", GroupID{gid}, samlGroupName), + withRequestOpts(options...), + ) + return resp, err } // ShareGroupWithGroupOptions represents the available ShareGroupWithGroup() options. @@ -1162,10 +900,10 @@ func (s *GroupsService) DeleteGroupSAMLLink(gid any, samlGroupName string, optio // GitLab API docs: // https://docs.gitlab.com/api/groups/#share-groups-with-groups type ShareGroupWithGroupOptions struct { - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` GroupAccess *AccessLevelValue `url:"group_access,omitempty" json:"group_access,omitempty"` ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` - MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` + MemberRoleID *int64 `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` } // ShareGroupWithGroup shares a group with another group. @@ -1173,43 +911,25 @@ type ShareGroupWithGroupOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/groups/#create-a-link-to-share-a-group-with-another-group func (s *GroupsService) ShareGroupWithGroup(gid any, opt *ShareGroupWithGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/share", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil + return do[*Group](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/share", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UnshareGroupFromGroup unshares a group from another group. // // GitLab API docs: // https://docs.gitlab.com/api/groups/#delete-the-link-that-shares-a-group-with-another-group -func (s *GroupsService) UnshareGroupFromGroup(gid any, groupID int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/share/%d", PathEscape(group), groupID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *GroupsService) UnshareGroupFromGroup(gid any, groupID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/share/%d", GroupID{gid}, groupID), + withRequestOpts(options...), + ) + return resp, err } // GroupPushRules represents a group push rule. @@ -1217,7 +937,7 @@ func (s *GroupsService) UnshareGroupFromGroup(gid any, groupID int, options ...R // GitLab API docs: // https://docs.gitlab.com/api/group_push_rules/#get-the-push-rules-of-a-group type GroupPushRules struct { - ID int `json:"id"` + ID int64 `json:"id"` CreatedAt *time.Time `json:"created_at"` CommitMessageRegex string `json:"commit_message_regex"` CommitMessageNegativeRegex string `json:"commit_message_negative_regex"` @@ -1227,7 +947,7 @@ type GroupPushRules struct { PreventSecrets bool `json:"prevent_secrets"` AuthorEmailRegex string `json:"author_email_regex"` FileNameRegex string `json:"file_name_regex"` - MaxFileSize int `json:"max_file_size"` + MaxFileSize int64 `json:"max_file_size"` CommitCommitterCheck bool `json:"commit_committer_check"` CommitCommitterNameCheck bool `json:"commit_committer_name_check"` RejectUnsignedCommits bool `json:"reject_unsigned_commits"` @@ -1239,24 +959,10 @@ type GroupPushRules struct { // GitLab API docs: // https://docs.gitlab.com/api/group_push_rules/#get-the-push-rules-of-a-group func (s *GroupsService) GetGroupPushRules(gid any, options ...RequestOptionFunc) (*GroupPushRules, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/push_rule", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gpr := new(GroupPushRules) - resp, err := s.client.Do(req, gpr) - if err != nil { - return nil, resp, err - } - - return gpr, resp, nil + return do[*GroupPushRules](s.client, + withPath("groups/%s/push_rule", GroupID{gid}), + withRequestOpts(options...), + ) } // AddGroupPushRuleOptions represents the available AddGroupPushRule() @@ -1273,7 +979,7 @@ type AddGroupPushRuleOptions struct { CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` FileNameRegex *string `url:"file_name_regex,omitempty" json:"file_name_regex,omitempty"` - MaxFileSize *int `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` + MaxFileSize *int64 `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` @@ -1285,24 +991,12 @@ type AddGroupPushRuleOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_push_rules/#add-push-rules-to-a-group func (s *GroupsService) AddGroupPushRule(gid any, opt *AddGroupPushRuleOptions, options ...RequestOptionFunc) (*GroupPushRules, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/push_rule", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gpr := new(GroupPushRules) - resp, err := s.client.Do(req, gpr) - if err != nil { - return nil, resp, err - } - - return gpr, resp, nil + return do[*GroupPushRules](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/push_rule", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditGroupPushRuleOptions represents the available EditGroupPushRule() @@ -1319,7 +1013,7 @@ type EditGroupPushRuleOptions struct { CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` FileNameRegex *string `url:"file_name_regex,omitempty" json:"file_name_regex,omitempty"` - MaxFileSize *int `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` + MaxFileSize *int64 `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` @@ -1331,24 +1025,12 @@ type EditGroupPushRuleOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/group_push_rules/#edit-the-push-rules-of-a-group func (s *GroupsService) EditGroupPushRule(gid any, opt *EditGroupPushRuleOptions, options ...RequestOptionFunc) (*GroupPushRules, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/push_rule", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - gpr := new(GroupPushRules) - resp, err := s.client.Do(req, gpr) - if err != nil { - return nil, resp, err - } - - return gpr, resp, nil + return do[*GroupPushRules](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/push_rule", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteGroupPushRule deletes the push rules of a group. @@ -1356,16 +1038,10 @@ func (s *GroupsService) EditGroupPushRule(gid any, opt *EditGroupPushRuleOptions // GitLab API docs: // https://docs.gitlab.com/api/group_push_rules/#delete-the-push-rules-of-a-group func (s *GroupsService) DeleteGroupPushRule(gid any, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/push_rule", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/push_rule", GroupID{gid}), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/import.go b/vendor/gitlab.com/gitlab-org/api/client-go/import.go index 019c4b7260..68424b690c 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/import.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/import.go @@ -46,11 +46,11 @@ var _ ImportServiceInterface = (*ImportService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/import/#import-repository-from-github type GitHubImport struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` FullPath string `json:"full_path"` FullName string `json:"full_name"` - RefsUrl string `json:"refs_url"` + RefsURL string `json:"refs_url"` ImportSource string `json:"import_source"` ImportStatus string `json:"import_status"` HumanImportStatusName string `json:"human_import_status_name"` @@ -69,17 +69,19 @@ func (s GitHubImport) String() string { // GitLab API docs: // https://docs.gitlab.com/api/import/#import-repository-from-github type ImportRepositoryFromGitHubOptions struct { - PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` - RepoID *int `url:"repo_id,omitempty" json:"repo_id,omitempty"` - NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` - TargetNamespace *string `url:"target_namespace,omitempty" json:"target_namespace,omitempty"` - GitHubHostname *string `url:"github_hostname,omitempty" json:"github_hostname,omitempty"` - OptionalStages struct { - SingleEndpointNotesImport *bool `url:"single_endpoint_notes_import,omitempty" json:"single_endpoint_notes_import,omitempty"` - AttachmentsImport *bool `url:"attachments_import,omitempty" json:"attachments_import,omitempty"` - CollaboratorsImport *bool `url:"collaborators_import,omitempty" json:"collaborators_import,omitempty"` - } `url:"optional_stages,omitempty" json:"optional_stages,omitempty"` - TimeoutStrategy *string `url:"timeout_strategy,omitempty" json:"timeout_strategy,omitempty"` + PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` + RepoID *int64 `url:"repo_id,omitempty" json:"repo_id,omitempty"` + NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` + TargetNamespace *string `url:"target_namespace,omitempty" json:"target_namespace,omitempty"` + GitHubHostname *string `url:"github_hostname,omitempty" json:"github_hostname,omitempty"` + OptionalStages ImportRepositoryFromGitHubOptionalStagesOptions `url:"optional_stages,omitempty" json:"optional_stages,omitempty"` + TimeoutStrategy *string `url:"timeout_strategy,omitempty" json:"timeout_strategy,omitempty"` +} + +type ImportRepositoryFromGitHubOptionalStagesOptions struct { + SingleEndpointNotesImport *bool `url:"single_endpoint_notes_import,omitempty" json:"single_endpoint_notes_import,omitempty"` + AttachmentsImport *bool `url:"attachments_import,omitempty" json:"attachments_import,omitempty"` + CollaboratorsImport *bool `url:"collaborators_import,omitempty" json:"collaborators_import,omitempty"` } // ImportRepositoryFromGitHub imports a repository from GitHub. @@ -87,18 +89,12 @@ type ImportRepositoryFromGitHubOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/import/#import-repository-from-github func (s *ImportService) ImportRepositoryFromGitHub(opt *ImportRepositoryFromGitHubOptions, options ...RequestOptionFunc) (*GitHubImport, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "import/github", opt, options) - if err != nil { - return nil, nil, err - } - - gi := new(GitHubImport) - resp, err := s.client.Do(req, gi) - if err != nil { - return nil, resp, err - } - - return gi, resp, nil + return do[*GitHubImport](s.client, + withMethod(http.MethodPost), + withPath("import/github"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CancelledGitHubImport represents the response when canceling @@ -107,7 +103,7 @@ func (s *ImportService) ImportRepositoryFromGitHub(opt *ImportRepositoryFromGitH // GitLab API docs: // https://docs.gitlab.com/api/import/#cancel-github-project-import type CancelledGitHubImport struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` FullPath string `json:"full_path"` FullName string `json:"full_name"` @@ -127,7 +123,7 @@ func (s CancelledGitHubImport) String() string { // GitLab API docs: // https://docs.gitlab.com/api/import/#cancel-github-project-import type CancelGitHubProjectImportOptions struct { - ProjectID *int `url:"project_id,omitempty" json:"project_id,omitempty"` + ProjectID *int64 `url:"project_id,omitempty" json:"project_id,omitempty"` } // CancelGitHubProjectImport cancels an import of a repository from GitHub. @@ -135,18 +131,12 @@ type CancelGitHubProjectImportOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/import/#cancel-github-project-import func (s *ImportService) CancelGitHubProjectImport(opt *CancelGitHubProjectImportOptions, options ...RequestOptionFunc) (*CancelledGitHubImport, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "import/github/cancel", opt, options) - if err != nil { - return nil, nil, err - } - - cgi := new(CancelledGitHubImport) - resp, err := s.client.Do(req, cgi) - if err != nil { - return nil, resp, err - } - - return cgi, resp, nil + return do[*CancelledGitHubImport](s.client, + withMethod(http.MethodPost), + withPath("import/github/cancel"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ImportGitHubGistsIntoGitLabSnippetsOptions represents the available @@ -163,12 +153,13 @@ type ImportGitHubGistsIntoGitLabSnippetsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/import/#import-github-gists-into-gitlab-snippets func (s *ImportService) ImportGitHubGistsIntoGitLabSnippets(opt *ImportGitHubGistsIntoGitLabSnippetsOptions, options ...RequestOptionFunc) (*Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "import/github/gists", opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("import/github/gists"), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // BitbucketServerImport represents the response from an import from Bitbucket @@ -177,11 +168,11 @@ func (s *ImportService) ImportGitHubGistsIntoGitLabSnippets(opt *ImportGitHubGis // GitLab API docs: // https://docs.gitlab.com/api/import/#import-repository-from-bitbucket-server type BitbucketServerImport struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` FullPath string `json:"full_path"` FullName string `json:"full_name"` - RefsUrl string `json:"refs_url"` + RefsURL string `json:"refs_url"` } func (s BitbucketServerImport) String() string { @@ -193,7 +184,7 @@ func (s BitbucketServerImport) String() string { // GitLab API docs: // https://docs.gitlab.com/api/import/#import-repository-from-bitbucket-server type ImportRepositoryFromBitbucketServerOptions struct { - BitbucketServerUrl *string `url:"bitbucket_server_url,omitempty" json:"bitbucket_server_url,omitempty"` + BitbucketServerURL *string `url:"bitbucket_server_url,omitempty" json:"bitbucket_server_url,omitempty"` BitbucketServerUsername *string `url:"bitbucket_server_username,omitempty" json:"bitbucket_server_username,omitempty"` PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` BitbucketServerProject *string `url:"bitbucket_server_project,omitempty" json:"bitbucket_server_project,omitempty"` @@ -208,18 +199,12 @@ type ImportRepositoryFromBitbucketServerOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/import/#import-repository-from-bitbucket-server func (s *ImportService) ImportRepositoryFromBitbucketServer(opt *ImportRepositoryFromBitbucketServerOptions, options ...RequestOptionFunc) (*BitbucketServerImport, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "import/bitbucket_server", opt, options) - if err != nil { - return nil, nil, err - } - - bsi := new(BitbucketServerImport) - resp, err := s.client.Do(req, bsi) - if err != nil { - return nil, resp, err - } - - return bsi, resp, nil + return do[*BitbucketServerImport](s.client, + withMethod(http.MethodPost), + withPath("import/bitbucket_server"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // BitbucketCloudImport represents the response from an import from Bitbucket @@ -228,11 +213,11 @@ func (s *ImportService) ImportRepositoryFromBitbucketServer(opt *ImportRepositor // GitLab API docs: // https://docs.gitlab.com/api/import/#import-repository-from-bitbucket-cloud type BitbucketCloudImport struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` FullPath string `json:"full_path"` FullName string `json:"full_name"` - RefsUrl string `json:"refs_url"` + RefsURL string `json:"refs_url"` ImportSource string `json:"import_source"` ImportStatus string `json:"import_status"` HumanImportStatusName string `json:"human_import_status_name"` @@ -263,16 +248,10 @@ type ImportRepositoryFromBitbucketCloudOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/import/#import-repository-from-bitbucket-cloud func (s *ImportService) ImportRepositoryFromBitbucketCloud(opt *ImportRepositoryFromBitbucketCloudOptions, options ...RequestOptionFunc) (*BitbucketCloudImport, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "import/bitbucket", opt, options) - if err != nil { - return nil, nil, err - } - - bci := new(BitbucketCloudImport) - resp, err := s.client.Do(req, bci) - if err != nil { - return nil, resp, err - } - - return bci, resp, nil + return do[*BitbucketCloudImport](s.client, + withMethod(http.MethodPost), + withPath("import/bitbucket"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/instance_clusters.go b/vendor/gitlab.com/gitlab-org/api/client-go/instance_clusters.go index fd4bce6be0..cc06e9dbe8 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/instance_clusters.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/instance_clusters.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -28,13 +27,13 @@ type ( // Deprecated: in GitLab 14.5, to be removed in 19.0 ListClusters(options ...RequestOptionFunc) ([]*InstanceCluster, *Response, error) // Deprecated: in GitLab 14.5, to be removed in 19.0 - GetCluster(cluster int, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) + GetCluster(cluster int64, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) // Deprecated: in GitLab 14.5, to be removed in 19.0 AddCluster(opt *AddClusterOptions, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) // Deprecated: in GitLab 14.5, to be removed in 19.0 - EditCluster(cluster int, opt *EditClusterOptions, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) + EditCluster(cluster int64, opt *EditClusterOptions, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) // Deprecated: in GitLab 14.5, to be removed in 19.0 - DeleteCluster(cluster int, options ...RequestOptionFunc) (*Response, error) + DeleteCluster(cluster int64, options ...RequestOptionFunc) (*Response, error) } // InstanceClustersService handles communication with the @@ -56,7 +55,7 @@ var _ InstanceClustersServiceInterface = (*InstanceClustersService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/instance_clusters/ type InstanceCluster struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Domain string `json:"domain"` Managed bool `json:"managed"` @@ -81,20 +80,10 @@ func (v InstanceCluster) String() string { // GitLab API docs: // https://docs.gitlab.com/api/instance_clusters/#list-instance-clusters func (s *InstanceClustersService) ListClusters(options ...RequestOptionFunc) ([]*InstanceCluster, *Response, error) { - u := "admin/clusters" - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var ics []*InstanceCluster - resp, err := s.client.Do(req, &ics) - if err != nil { - return nil, resp, err - } - - return ics, resp, nil + return do[[]*InstanceCluster](s.client, + withPath("admin/clusters"), + withRequestOpts(options...), + ) } // GetCluster gets an instance cluster. @@ -102,21 +91,11 @@ func (s *InstanceClustersService) ListClusters(options ...RequestOptionFunc) ([] // // GitLab API docs: // https://docs.gitlab.com/api/instance_clusters/#get-a-single-instance-cluster -func (s *InstanceClustersService) GetCluster(cluster int, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) { - u := fmt.Sprintf("admin/clusters/%d", cluster) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ic := new(InstanceCluster) - resp, err := s.client.Do(req, &ic) - if err != nil { - return nil, resp, err - } - - return ic, resp, nil +func (s *InstanceClustersService) GetCluster(cluster int64, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) { + return do[*InstanceCluster](s.client, + withPath("admin/clusters/%d", cluster), + withRequestOpts(options...), + ) } // AddCluster adds an existing cluster to the instance. @@ -125,20 +104,12 @@ func (s *InstanceClustersService) GetCluster(cluster int, options ...RequestOpti // GitLab API docs: // https://docs.gitlab.com/api/instance_clusters/#add-existing-instance-cluster func (s *InstanceClustersService) AddCluster(opt *AddClusterOptions, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) { - u := "admin/clusters/add" - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - ic := new(InstanceCluster) - resp, err := s.client.Do(req, ic) - if err != nil { - return nil, resp, err - } - - return ic, resp, nil + return do[*InstanceCluster](s.client, + withMethod(http.MethodPost), + withPath("admin/clusters/add"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditCluster updates an existing instance cluster. @@ -146,21 +117,13 @@ func (s *InstanceClustersService) AddCluster(opt *AddClusterOptions, options ... // // GitLab API docs: // https://docs.gitlab.com/api/instance_clusters/#edit-instance-cluster -func (s *InstanceClustersService) EditCluster(cluster int, opt *EditClusterOptions, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) { - u := fmt.Sprintf("admin/clusters/%d", cluster) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ic := new(InstanceCluster) - resp, err := s.client.Do(req, ic) - if err != nil { - return nil, resp, err - } - - return ic, resp, nil +func (s *InstanceClustersService) EditCluster(cluster int64, opt *EditClusterOptions, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) { + return do[*InstanceCluster](s.client, + withMethod(http.MethodPut), + withPath("admin/clusters/%d", cluster), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteCluster deletes an existing instance cluster. @@ -168,13 +131,11 @@ func (s *InstanceClustersService) EditCluster(cluster int, opt *EditClusterOptio // // GitLab API docs: // https://docs.gitlab.com/api/instance_clusters/#delete-instance-cluster -func (s *InstanceClustersService) DeleteCluster(cluster int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("admin/clusters/%d", cluster) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *InstanceClustersService) DeleteCluster(cluster int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("admin/clusters/%d", cluster), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/instance_variables.go b/vendor/gitlab.com/gitlab-org/api/client-go/instance_variables.go index 235a333e3c..2a4c05c371 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/instance_variables.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/instance_variables.go @@ -17,17 +17,36 @@ package gitlab import ( - "fmt" "net/http" "net/url" ) type ( InstanceVariablesServiceInterface interface { + // ListVariables gets a list of all variables for an instance. + // + // GitLab API docs: + // https://docs.gitlab.com/api/instance_level_ci_variables/#list-all-instance-variables ListVariables(opt *ListInstanceVariablesOptions, options ...RequestOptionFunc) ([]*InstanceVariable, *Response, error) + // GetVariable gets a variable. + // + // GitLab API docs: + // https://docs.gitlab.com/api/instance_level_ci_variables/#show-instance-variable-details GetVariable(key string, options ...RequestOptionFunc) (*InstanceVariable, *Response, error) + // CreateVariable creates a new instance level CI variable. + // + // GitLab API docs: + // https://docs.gitlab.com/api/instance_level_ci_variables/#create-instance-variable CreateVariable(opt *CreateInstanceVariableOptions, options ...RequestOptionFunc) (*InstanceVariable, *Response, error) + // UpdateVariable updates an existing instance level CI variable. + // + // GitLab API docs: + // https://docs.gitlab.com/api/instance_level_ci_variables/#update-instance-variable UpdateVariable(key string, opt *UpdateInstanceVariableOptions, options ...RequestOptionFunc) (*InstanceVariable, *Response, error) + // RemoveVariable removes an instance level CI variable. + // + // GitLab API docs: + // https://docs.gitlab.com/api/instance_level_ci_variables/#remove-instance-variable RemoveVariable(key string, options ...RequestOptionFunc) (*Response, error) } @@ -66,48 +85,23 @@ func (v InstanceVariable) String() string { // // GitLab API docs: // https://docs.gitlab.com/api/instance_level_ci_variables/#list-all-instance-variables -type ListInstanceVariablesOptions ListOptions +type ListInstanceVariablesOptions struct { + ListOptions +} -// ListVariables gets a list of all variables for an instance. -// -// GitLab API docs: -// https://docs.gitlab.com/api/instance_level_ci_variables/#list-all-instance-variables func (s *InstanceVariablesService) ListVariables(opt *ListInstanceVariablesOptions, options ...RequestOptionFunc) ([]*InstanceVariable, *Response, error) { - u := "admin/ci/variables" - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var vs []*InstanceVariable - resp, err := s.client.Do(req, &vs) - if err != nil { - return nil, resp, err - } - - return vs, resp, nil + return do[[]*InstanceVariable](s.client, + withPath("admin/ci/variables"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetVariable gets a variable. -// -// GitLab API docs: -// https://docs.gitlab.com/api/instance_level_ci_variables/#show-instance-variable-details func (s *InstanceVariablesService) GetVariable(key string, options ...RequestOptionFunc) (*InstanceVariable, *Response, error) { - u := fmt.Sprintf("admin/ci/variables/%s", url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - v := new(InstanceVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil + return do[*InstanceVariable](s.client, + withPath("admin/ci/variables/%s", url.PathEscape(key)), + withRequestOpts(options...), + ) } // CreateInstanceVariableOptions represents the available CreateVariable() @@ -125,25 +119,13 @@ type CreateInstanceVariableOptions struct { VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } -// CreateVariable creates a new instance level CI variable. -// -// GitLab API docs: -// https://docs.gitlab.com/api/instance_level_ci_variables/#create-instance-variable func (s *InstanceVariablesService) CreateVariable(opt *CreateInstanceVariableOptions, options ...RequestOptionFunc) (*InstanceVariable, *Response, error) { - u := "admin/ci/variables" - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(InstanceVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil + return do[*InstanceVariable](s.client, + withMethod(http.MethodPost), + withPath("admin/ci/variables"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateInstanceVariableOptions represents the available UpdateVariable() @@ -160,39 +142,20 @@ type UpdateInstanceVariableOptions struct { VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } -// UpdateVariable updates the position of an existing -// instance level CI variable. -// -// GitLab API docs: -// https://docs.gitlab.com/api/instance_level_ci_variables/#update-instance-variable func (s *InstanceVariablesService) UpdateVariable(key string, opt *UpdateInstanceVariableOptions, options ...RequestOptionFunc) (*InstanceVariable, *Response, error) { - u := fmt.Sprintf("admin/ci/variables/%s", url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(InstanceVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil + return do[*InstanceVariable](s.client, + withMethod(http.MethodPut), + withPath("admin/ci/variables/%s", url.PathEscape(key)), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// RemoveVariable removes an instance level CI variable. -// -// GitLab API docs: -// https://docs.gitlab.com/api/instance_level_ci_variables/#remove-instance-variable func (s *InstanceVariablesService) RemoveVariable(key string, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("admin/ci/variables/%s", url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("admin/ci/variables/%s", url.PathEscape(key)), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/integrations.go b/vendor/gitlab.com/gitlab-org/api/client-go/integrations.go index 2e4357ada8..c6921da98d 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/integrations.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/integrations.go @@ -14,20 +14,198 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( IntegrationsServiceInterface interface { + // ListActiveGroupIntegrations gets a list of all active group integrations. + // The vulnerability_events field is only available for GitLab Enterprise Edition. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#list-all-active-integrations ListActiveGroupIntegrations(gid any, opt *ListActiveIntegrationsOptions, options ...RequestOptionFunc) ([]*Integration, *Response, error) + + // SetUpGroupHarbor sets up the Harbor integration for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#set-up-harbor SetUpGroupHarbor(gid any, opt *SetUpHarborOptions, options ...RequestOptionFunc) (*Integration, *Response, error) + + // DisableGroupHarbor disables the Harbor integration for a group. + // Integration settings are reset. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#disable-harbor DisableGroupHarbor(gid any, options ...RequestOptionFunc) (*Response, error) + + // GetGroupHarborSettings gets the Harbor integration for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#get-harbor-settings GetGroupHarborSettings(gid any, options ...RequestOptionFunc) (*Integration, *Response, error) + + // SetGroupMicrosoftTeamsNotifications sets up Microsoft Teams notifications for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#set-up-microsoft-teams-notifications SetGroupMicrosoftTeamsNotifications(gid any, opt *SetMicrosoftTeamsNotificationsOptions, options ...RequestOptionFunc) (*Integration, *Response, error) + + // DisableGroupMicrosoftTeamsNotifications disables Microsoft Teams notifications + // for a group. Integration settings are reset. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#disable-microsoft-teams-notifications DisableGroupMicrosoftTeamsNotifications(gid any, options ...RequestOptionFunc) (*Response, error) + + // GetGroupMicrosoftTeamsNotifications gets the Microsoft Teams notifications for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#get-microsoft-teams-notifications-settings GetGroupMicrosoftTeamsNotifications(gid any, options ...RequestOptionFunc) (*Integration, *Response, error) + + // SetUpGroupJira sets up the Jira integration for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#set-up-jira + SetUpGroupJira(gid any, opt *SetUpJiraOptions, options ...RequestOptionFunc) (*Integration, *Response, error) + + // DisableGroupJira disables the Jira integration for a group. + // Integration settings are reset. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#disable-jira + DisableGroupJira(gid any, options ...RequestOptionFunc) (*Response, error) + + // GetGroupJiraSettings gets the Jira integration for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#get-jira-settings + GetGroupJiraSettings(gid any, options ...RequestOptionFunc) (*Integration, *Response, error) + + // GetGroupSlackSettings gets the Slack integration for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#get-slack-settings + GetGroupSlackSettings(gid any, options ...RequestOptionFunc) (*SlackIntegration, *Response, error) + + // SetGroupSlackSettings sets up the Slack integration for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#set-up-slack + SetGroupSlackSettings(gid any, opt *SetGroupSlackOptions, options ...RequestOptionFunc) (*SlackIntegration, *Response, error) + + // DisableGroupSlack disables the Slack integration for a group. + // Integration settings are reset. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#disable-slack + DisableGroupSlack(gid any, options ...RequestOptionFunc) (*Response, error) + + // GetGroupDiscordSettings gets the Discord integration settings for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#discord + GetGroupDiscordSettings(gid any, options ...RequestOptionFunc) (*DiscordIntegration, *Response, error) + + // GetGroupTelegramSettings gets the Telegram integration settings for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#telegram + GetGroupTelegramSettings(gid any, options ...RequestOptionFunc) (*TelegramIntegration, *Response, error) + + // GetGroupMattermostSettings gets the Mattermost integration settings for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#mattermost-notifications + GetGroupMattermostSettings(gid any, options ...RequestOptionFunc) (*MattermostIntegration, *Response, error) + + // GetGroupMatrixSettings gets the Matrix integration settings for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#matrix-notifications + GetGroupMatrixSettings(gid any, options ...RequestOptionFunc) (*MatrixIntegration, *Response, error) + + // GetGroupGoogleChatSettings gets the Google Chat integration settings for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#google-chat + GetGroupGoogleChatSettings(gid any, options ...RequestOptionFunc) (*GoogleChatIntegration, *Response, error) + + // SetProjectGoogleChatSettings sets up the Google Chat integration for a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/integrations.html#set-up-google-chat + SetProjectGoogleChatSettings(pid any, opt *SetProjectGoogleChatOptions, options ...RequestOptionFunc) (*GoogleChatIntegration, *Response, error) + + // DisableProjectGoogleChat disables the Google Chat integration for a project. + // Integration settings are reset. + // + // GitLab API docs: + // https://docs.gitlab.com/api/integrations.html#disable-google-chat + DisableProjectGoogleChat(pid any, options ...RequestOptionFunc) (*Response, error) + + // GetProjectGoogleChatSettings gets the Google Chat integration settings for a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/integrations.html#get-google-chat-settings + GetProjectGoogleChatSettings(pid any, options ...RequestOptionFunc) (*GoogleChatIntegration, *Response, error) + + // GetGroupMattermostIntegration retrieves the Mattermost integration for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#mattermost-notifications + GetGroupMattermostIntegration(gid any, options ...RequestOptionFunc) (*GroupMattermostIntegration, *Response, error) + + // SetGroupMattermostIntegration creates or updates the Mattermost integration for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#mattermost-notifications + SetGroupMattermostIntegration(gid any, opt *GroupMattermostIntegrationOptions, options ...RequestOptionFunc) (*GroupMattermostIntegration, *Response, error) + + // DeleteGroupMattermostIntegration removes the Mattermost integration from a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#mattermost-notifications + DeleteGroupMattermostIntegration(gid any, options ...RequestOptionFunc) (*Response, error) + + // GetGroupMattermostSlashCommandsIntegration retrieves the Mattermost slash commands integration for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#mattermost-slash-commands + GetGroupMattermostSlashCommandsIntegration(gid any, options ...RequestOptionFunc) (*GroupMattermostSlashCommandsIntegration, *Response, error) + + // SetGroupMattermostSlashCommandsIntegration creates or updates the Mattermost slash commands integration for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#mattermost-slash-commands + SetGroupMattermostSlashCommandsIntegration(gid any, opt *GroupMattermostSlashCommandsIntegrationOptions, options ...RequestOptionFunc) (*GroupMattermostSlashCommandsIntegration, *Response, error) + + // DeleteGroupMattermostSlashCommandsIntegration removes the Mattermost slash commands integration from a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#mattermost-slash-commands + DeleteGroupMattermostSlashCommandsIntegration(gid any, options ...RequestOptionFunc) (*Response, error) + + // GetGroupWebexTeamsSettings gets the Webex Teams integration for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#get-webex-teams-settings + GetGroupWebexTeamsSettings(gid any, options ...RequestOptionFunc) (*WebexTeamsIntegration, *Response, error) + + // SetGroupWebexTeamsSettings sets up the Webex Teams integration for a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#set-up-webex-teams + SetGroupWebexTeamsSettings(gid any, opt *SetGroupWebexTeamsOptions, options ...RequestOptionFunc) (*WebexTeamsIntegration, *Response, error) + + // DisableGroupWebexTeams disables the Webex Teams integration for a group. + // Integration settings are reset. + // + // GitLab API docs: + // https://docs.gitlab.com/api/group_integrations/#disable-webex-teams + DisableGroupWebexTeams(gid any, options ...RequestOptionFunc) (*Response, error) } // IntegrationsService handles communication with the group @@ -47,7 +225,7 @@ var _ IntegrationsServiceInterface = (*IntegrationsService)(nil) // https://docs.gitlab.com/api/group_integrations/ // https://docs.gitlab.com/api/project_integrations/ type Integration struct { - ID int `json:"id"` + ID int64 `json:"id"` Title string `json:"title"` Slug string `json:"slug"` CreatedAt *time.Time `json:"created_at"` @@ -74,6 +252,196 @@ type Integration struct { Inherited bool `json:"inherited"` } +// SlackIntegration represents the Slack integration settings. +// It embeds the generic Integration struct and adds Slack-specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#get-slack-settings +type SlackIntegration struct { + Integration + Properties SlackIntegrationProperties `json:"properties"` +} + +// SlackIntegrationProperties represents Slack specific properties +// returned by the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#get-slack-settings +type SlackIntegrationProperties struct { + Username string `json:"username"` + Channel string `json:"channel"` + NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines"` + BranchesToBeNotified string `json:"branches_to_be_notified"` + LabelsToBeNotified string `json:"labels_to_be_notified"` + LabelsToBeNotifiedBehavior string `json:"labels_to_be_notified_behavior"` + PushChannel string `json:"push_channel"` + IssueChannel string `json:"issue_channel"` + ConfidentialIssueChannel string `json:"confidential_issue_channel"` + MergeRequestChannel string `json:"merge_request_channel"` + NoteChannel string `json:"note_channel"` + ConfidentialNoteChannel string `json:"confidential_note_channel"` + TagPushChannel string `json:"tag_push_channel"` + PipelineChannel string `json:"pipeline_channel"` + WikiPageChannel string `json:"wiki_page_channel"` + DeploymentChannel string `json:"deployment_channel"` + IncidentChannel string `json:"incident_channel"` + AlertChannel string `json:"alert_channel"` + GroupMentionChannel string `json:"group_mention_channel"` + GroupConfidentialMentionChannel string `json:"group_confidential_mention_channel"` +} + +// DiscordIntegration represents the Discord integration settings. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#discord +type DiscordIntegration struct { + Integration + Properties DiscordIntegrationProperties `json:"properties"` +} + +// DiscordIntegrationProperties represents Discord specific properties. +type DiscordIntegrationProperties struct { + NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines,omitempty"` + BranchesToBeNotified string `json:"branches_to_be_notified,omitempty"` +} + +// TelegramIntegration represents the Telegram integration settings. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#telegram +type TelegramIntegration struct { + Integration + Properties TelegramIntegrationProperties `json:"properties"` +} + +// TelegramIntegrationProperties represents Telegram specific properties. +type TelegramIntegrationProperties struct { + Hostname string `json:"hostname,omitempty"` + Room string `json:"room,omitempty"` + Thread string `json:"thread,omitempty"` + NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines,omitempty"` + BranchesToBeNotified string `json:"branches_to_be_notified,omitempty"` +} + +// MattermostIntegration represents the Mattermost integration settings. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#mattermost-notifications +type MattermostIntegration struct { + Integration + Properties MattermostIntegrationProperties `json:"properties"` +} + +// MattermostIntegrationProperties represents Mattermost specific properties. +type MattermostIntegrationProperties struct { + Username string `json:"username,omitempty"` + Channel string `json:"channel,omitempty"` + NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines,omitempty"` + BranchesToBeNotified string `json:"branches_to_be_notified,omitempty"` + LabelsToBeNotified string `json:"labels_to_be_notified,omitempty"` + LabelsToBeNotifiedBehavior string `json:"labels_to_be_notified_behavior,omitempty"` + PushChannel string `json:"push_channel,omitempty"` + IssueChannel string `json:"issue_channel,omitempty"` + ConfidentialIssueChannel string `json:"confidential_issue_channel,omitempty"` + MergeRequestChannel string `json:"merge_request_channel,omitempty"` + NoteChannel string `json:"note_channel,omitempty"` + ConfidentialNoteChannel string `json:"confidential_note_channel,omitempty"` + TagPushChannel string `json:"tag_push_channel,omitempty"` + PipelineChannel string `json:"pipeline_channel,omitempty"` + WikiPageChannel string `json:"wiki_page_channel,omitempty"` + DeploymentChannel string `json:"deployment_channel,omitempty"` + IncidentChannel string `json:"incident_channel,omitempty"` +} + +// MatrixIntegration represents the Matrix integration settings. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#matrix-notifications +type MatrixIntegration struct { + Integration + Properties MatrixIntegrationProperties `json:"properties"` +} + +// MatrixIntegrationProperties represents Matrix specific properties. +type MatrixIntegrationProperties struct { + Hostname string `json:"hostname,omitempty"` + Room string `json:"room,omitempty"` + NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines,omitempty"` + BranchesToBeNotified string `json:"branches_to_be_notified,omitempty"` +} + +// GoogleChatIntegration represents the Google Chat integration settings. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#google-chat +type GoogleChatIntegration struct { + Integration + Properties GoogleChatIntegrationProperties `json:"properties"` +} + +// GoogleChatIntegrationProperties represents Google Chat specific properties. +type GoogleChatIntegrationProperties struct { + NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines,omitempty"` + NotifyOnlyWhenPipelineStatusChanges bool `json:"notify_only_when_pipeline_status_changes,omitempty"` + NotifyOnlyDefaultBranch bool `json:"notify_only_default_branch,omitempty"` + BranchesToBeNotified string `json:"branches_to_be_notified,omitempty"` + PushEvents bool `json:"push_events,omitempty"` + IssuesEvents bool `json:"issues_events,omitempty"` + ConfidentialIssuesEvents bool `json:"confidential_issues_events,omitempty"` + MergeRequestsEvents bool `json:"merge_requests_events,omitempty"` + TagPushEvents bool `json:"tag_push_events,omitempty"` + NoteEvents bool `json:"note_events,omitempty"` + ConfidentialNoteEvents bool `json:"confidential_note_events,omitempty"` + PipelineEvents bool `json:"pipeline_events,omitempty"` + WikiPageEvents bool `json:"wiki_page_events,omitempty"` +} + +// WebexTeamsIntegration represents the WebexTeams integration settings. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#get-webex-teams-settings +type WebexTeamsIntegration struct { + Integration + Properties WebexTeamsIntegrationProperties `json:"properties"` +} + +// WebexTeamsIntegrationProperties represents WebexTeams specific properties +type WebexTeamsIntegrationProperties struct { + NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines,omitempty"` + BranchesToBeNotified string `json:"branches_to_be_notified,omitempty"` +} + +// SetGroupWebexTeamsOptions represents the available SetGroupWebexTeamsSettings() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_integrations/#set-up-webex-teams +type SetGroupWebexTeamsOptions struct { + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` +} + +// SetProjectGoogleChatOptions represents the available SetProjectGoogleChatSettings() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/integrations.html#set-up-google-chat +type SetProjectGoogleChatOptions struct { + Webhook *string `url:"webhook,omitempty" json:"webhook,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + NotifyOnlyWhenPipelineStatusChanges *bool `url:"notify_only_when_pipeline_status_changes,omitempty" json:"notify_only_when_pipeline_status_changes,omitempty"` + NotifyOnlyDefaultBranch *bool `url:"notify_only_default_branch,omitempty" json:"notify_only_default_branch,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` +} + // ListActiveIntegrationsOptions represents the available // ListActiveIntegrations() options. // @@ -83,30 +451,14 @@ type ListActiveIntegrationsOptions struct { ListOptions } -// ListActiveGroupIntegrations gets a list of all active group integrations. -// The vulnerability_events field is only available for GitLab Enterprise Edition. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_integrations/#list-all-active-integrations func (s *IntegrationsService) ListActiveGroupIntegrations(gid any, opt *ListActiveIntegrationsOptions, options ...RequestOptionFunc) ([]*Integration, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/integrations", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var integrations []*Integration - resp, err := s.client.Do(req, &integrations) - if err != nil { - return nil, resp, err - } - - return integrations, resp, nil + return do[[]*Integration]( + s.client, + withPath("groups/%s/integrations", GroupID{gid}), + withMethod(http.MethodGet), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // SetUpHarborOptions represents the available SetUpGroupHarbor() @@ -122,76 +474,33 @@ type SetUpHarborOptions struct { UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` } -// SetUpGroupHarbor sets up the Harbor integration for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_integrations/#set-up-harbor func (s *IntegrationsService) SetUpGroupHarbor(gid any, opt *SetUpHarborOptions, options ...RequestOptionFunc) (*Integration, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/integrations/harbor", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - integration := new(Integration) - resp, err := s.client.Do(req, integration) - if err != nil { - return nil, resp, err - } - return integration, resp, nil + return do[*Integration]( + s.client, + withPath("groups/%s/integrations/harbor", GroupID{gid}), + withMethod(http.MethodPut), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DisableGroupHarbor disables the Harbor integration for a group. -// Integration settings are reset. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_integrations/#disable-harbor func (s *IntegrationsService) DisableGroupHarbor(gid any, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/integrations/harbor", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(req, nil) - if err != nil { - return nil, err - } - return resp, nil + _, resp, err := do[none]( + s.client, + withPath("groups/%s/integrations/harbor", GroupID{gid}), + withMethod(http.MethodDelete), + withRequestOpts(options...), + ) + return resp, err } -// GetGroupHarborSettings gets the Harbor integration for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_integrations/#get-harbor-settings func (s *IntegrationsService) GetGroupHarborSettings(gid any, options ...RequestOptionFunc) (*Integration, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/integrations/harbor", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - integration := new(Integration) - resp, err := s.client.Do(req, integration) - if err != nil { - return nil, nil, err - } - return integration, resp, nil + return do[*Integration]( + s.client, + withPath("groups/%s/integrations/harbor", GroupID{gid}), + withMethod(http.MethodGet), + withRequestOpts(options...), + ) } // SetMicrosoftTeamsNotificationsOptions represents the available @@ -200,91 +509,276 @@ func (s *IntegrationsService) GetGroupHarborSettings(gid any, options ...Request // GitLab API docs: // https://docs.gitlab.com/api/group_integrations/#set-up-microsoft-teams-notifications type SetMicrosoftTeamsNotificationsOptions struct { - Targets *string `url:"targets,omitempty"` - Webhook *string `url:"webhook,omitempty"` - NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty"` - NotifyOnlyDefaultBranch *bool `url:"notify_only_default_branch,omitempty"` - BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty"` - PushEvents *bool `url:"push_events,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty"` - UseInheritedSettings *bool `url:"use_inherited_settings,omitempty"` -} - -// SetGroupMicrosoftTeamsNotifications sets up Microsoft Teams notifications for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/group_integrations/#set-up-microsoft-teams-notifications + Targets *string `url:"targets,omitempty" json:"targets,omitempty"` + Webhook *string `url:"webhook,omitempty" json:"webhook,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + NotifyOnlyDefaultBranch *bool `url:"notify_only_default_branch,omitempty" json:"notify_only_default_branch,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` +} + func (s *IntegrationsService) SetGroupMicrosoftTeamsNotifications(gid any, opt *SetMicrosoftTeamsNotificationsOptions, options ...RequestOptionFunc) (*Integration, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/integrations/microsoft_teams", PathEscape(group)) + return do[*Integration]( + s.client, + withPath("groups/%s/integrations/microsoft-teams", GroupID{gid}), + withMethod(http.MethodPut), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } +func (s *IntegrationsService) DisableGroupMicrosoftTeamsNotifications(gid any, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none]( + s.client, + withPath("groups/%s/integrations/microsoft-teams", GroupID{gid}), + withMethod(http.MethodDelete), + withRequestOpts(options...), + ) + return resp, err +} - integration := new(Integration) - resp, err := s.client.Do(req, integration) - if err != nil { - return nil, resp, err - } - return integration, resp, nil +func (s *IntegrationsService) GetGroupMicrosoftTeamsNotifications(gid any, options ...RequestOptionFunc) (*Integration, *Response, error) { + return do[*Integration]( + s.client, + withPath("groups/%s/integrations/microsoft-teams", GroupID{gid}), + withMethod(http.MethodGet), + withRequestOpts(options...), + ) } -// DisableGroupMicrosoftTeamsNotifications disables Microsoft Teams notifications -// for a group. Integration settings are reset. +// SetUpJiraOptions represents the available SetUpJira() options. // // GitLab API docs: -// https://docs.gitlab.com/api/group_integrations/#disable-microsoft-teams-notifications -func (s *IntegrationsService) DisableGroupMicrosoftTeamsNotifications(gid any, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/integrations/microsoft_teams", PathEscape(group)) +// https://docs.gitlab.com/api/group_integrations/#set-up-jira +type SetUpJiraOptions struct { + URL *string `url:"url,omitempty" json:"url,omitempty"` + APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + Password *string `url:"password,omitempty" json:"password,omitempty"` + Active *bool `url:"active,omitempty" json:"active,omitempty"` + JiraAuthType *int64 `url:"jira_auth_type,omitempty" json:"jira_auth_type,omitempty"` + JiraIssuePrefix *string `url:"jira_issue_prefix,omitempty" json:"jira_issue_prefix,omitempty"` + JiraIssueRegex *string `url:"jira_issue_regex,omitempty" json:"jira_issue_regex,omitempty"` + JiraIssueTransitionAutomatic *bool `url:"jira_issue_transition_automatic,omitempty" json:"jira_issue_transition_automatic,omitempty"` + JiraIssueTransitionID *string `url:"jira_issue_transition_id,omitempty" json:"jira_issue_transition_id,omitempty"` + CommitEvents *bool `url:"commit_events,omitempty" json:"commit_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + CommentOnEventEnabled *bool `url:"comment_on_event_enabled,omitempty" json:"comment_on_event_enabled,omitempty"` + IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"` + ProjectKeys *[]string `url:"project_keys,omitempty" json:"project_keys,omitempty"` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` +} - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } +func (s *IntegrationsService) SetUpGroupJira(gid any, opt *SetUpJiraOptions, options ...RequestOptionFunc) (*Integration, *Response, error) { + return do[*Integration]( + s.client, + withPath("groups/%s/integrations/jira", GroupID{gid}), + withMethod(http.MethodPut), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} - resp, err := s.client.Do(req, nil) - if err != nil { - return nil, err - } - return resp, nil +func (s *IntegrationsService) DisableGroupJira(gid any, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none]( + s.client, + withPath("groups/%s/integrations/jira", GroupID{gid}), + withMethod(http.MethodDelete), + withRequestOpts(options...), + ) + return resp, err +} + +func (s *IntegrationsService) GetGroupJiraSettings(gid any, options ...RequestOptionFunc) (*Integration, *Response, error) { + return do[*Integration]( + s.client, + withPath("groups/%s/integrations/jira", GroupID{gid}), + withMethod(http.MethodGet), + withRequestOpts(options...), + ) } -// GetGroupMicrosoftTeamsNotifications gets the Microsoft Teams notifications for a group. +// SetGroupSlackOptions represents the available SetGroupSlackSettings() options. // // GitLab API docs: -// https://docs.gitlab.com/api/group_integrations/#get-microsoft-teams-notifications-settings -func (s *IntegrationsService) GetGroupMicrosoftTeamsNotifications(gid any, options ...RequestOptionFunc) (*Integration, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/integrations/microsoft_teams", PathEscape(group)) +// https://docs.gitlab.com/api/group_integrations/#set-up-slack-notifications +type SetGroupSlackOptions struct { + Webhook *string `url:"webhook,omitempty" json:"webhook,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + Channel *string `url:"channel,omitempty" json:"channel,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + NotifyOnlyWhenPipelineStatusChanges *bool `url:"notify_only_when_pipeline_status_changes,omitempty" json:"notify_only_when_pipeline_status_changes,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + LabelsToBeNotified *string `url:"labels_to_be_notified,omitempty" json:"labels_to_be_notified,omitempty"` + LabelsToBeNotifiedBehavior *string `url:"labels_to_be_notified_behavior,omitempty" json:"labels_to_be_notified_behavior,omitempty"` + AlertEvents *bool `url:"alert_events,omitempty" json:"alert_events,omitempty"` + CommitEvents *bool `url:"commit_events,omitempty" json:"commit_events,omitempty"` + PushChannel *string `url:"push_channel,omitempty" json:"push_channel,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + IssueChannel *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` + ConfidentialIssueChannel *string `url:"confidential_issue_channel,omitempty" json:"confidential_issue_channel,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + MergeRequestChannel *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + NoteChannel *string `url:"note_channel,omitempty" json:"note_channel,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + ConfidentialNoteChannel *string `url:"confidential_note_channel,omitempty" json:"confidential_note_channel,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + TagPushChannel *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + PipelineChannel *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + WikiPageChannel *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + DeploymentChannel *string `url:"deployment_channel,omitempty" json:"deployment_channel,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + IncidentChannel *string `url:"incident_channel,omitempty" json:"incident_channel,omitempty"` + IncidentEvents *bool `url:"incident_events,omitempty" json:"incident_events,omitempty"` + AlertChannel *string `url:"alert_channel,omitempty" json:"alert_channel,omitempty"` + GroupMentionChannel *string `url:"group_mention_channel,omitempty" json:"group_mention_channel,omitempty"` + GroupConfidentialMentionChannel *string `url:"group_confidential_mention_channel,omitempty" json:"group_confidential_mention_channel,omitempty"` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` +} - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } +func (s *IntegrationsService) SetGroupSlackSettings(gid any, opt *SetGroupSlackOptions, options ...RequestOptionFunc) (*SlackIntegration, *Response, error) { + return do[*SlackIntegration]( + s.client, + withPath("groups/%s/integrations/slack", GroupID{gid}), + withMethod(http.MethodPut), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} - integration := new(Integration) - resp, err := s.client.Do(req, integration) - if err != nil { - return nil, nil, err - } - return integration, resp, nil +func (s *IntegrationsService) DisableGroupSlack(gid any, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none]( + s.client, + withPath("groups/%s/integrations/slack", GroupID{gid}), + withMethod(http.MethodDelete), + withRequestOpts(options...), + ) + return resp, err +} + +func (s *IntegrationsService) GetGroupSlackSettings(gid any, options ...RequestOptionFunc) (*SlackIntegration, *Response, error) { + return do[*SlackIntegration]( + s.client, + withPath("groups/%s/integrations/slack", GroupID{gid}), + withMethod(http.MethodGet), + withRequestOpts(options...), + ) +} + +func (s *IntegrationsService) GetGroupDiscordSettings(gid any, options ...RequestOptionFunc) (*DiscordIntegration, *Response, error) { + return do[*DiscordIntegration]( + s.client, + withPath("groups/%s/integrations/discord", GroupID{gid}), + withMethod(http.MethodGet), + withRequestOpts(options...), + ) +} + +func (s *IntegrationsService) GetGroupTelegramSettings(gid any, options ...RequestOptionFunc) (*TelegramIntegration, *Response, error) { + return do[*TelegramIntegration]( + s.client, + withPath("groups/%s/integrations/telegram", GroupID{gid}), + withMethod(http.MethodGet), + withRequestOpts(options...), + ) +} + +func (s *IntegrationsService) GetGroupMattermostSettings(gid any, options ...RequestOptionFunc) (*MattermostIntegration, *Response, error) { + return do[*MattermostIntegration]( + s.client, + withPath("groups/%s/integrations/mattermost", GroupID{gid}), + withMethod(http.MethodGet), + withRequestOpts(options...), + ) +} + +func (s *IntegrationsService) GetGroupMatrixSettings(gid any, options ...RequestOptionFunc) (*MatrixIntegration, *Response, error) { + return do[*MatrixIntegration]( + s.client, + withPath("groups/%s/integrations/matrix", GroupID{gid}), + withMethod(http.MethodGet), + withRequestOpts(options...), + ) +} + +func (s *IntegrationsService) GetGroupGoogleChatSettings(gid any, options ...RequestOptionFunc) (*GoogleChatIntegration, *Response, error) { + return do[*GoogleChatIntegration]( + s.client, + withPath("groups/%s/integrations/hangouts-chat", GroupID{gid}), + withMethod(http.MethodGet), + withRequestOpts(options...), + ) +} + +func (s *IntegrationsService) SetProjectGoogleChatSettings(pid any, opt *SetProjectGoogleChatOptions, options ...RequestOptionFunc) (*GoogleChatIntegration, *Response, error) { + return do[*GoogleChatIntegration]( + s.client, + withPath("projects/%s/integrations/hangouts-chat", ProjectID{pid}), + withMethod(http.MethodPut), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +func (s *IntegrationsService) DisableProjectGoogleChat(pid any, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none]( + s.client, + withPath("projects/%s/integrations/hangouts-chat", ProjectID{pid}), + withMethod(http.MethodDelete), + withRequestOpts(options...), + ) + return resp, err +} + +func (s *IntegrationsService) GetProjectGoogleChatSettings(pid any, options ...RequestOptionFunc) (*GoogleChatIntegration, *Response, error) { + return do[*GoogleChatIntegration]( + s.client, + withPath("projects/%s/integrations/hangouts-chat", ProjectID{pid}), + withMethod(http.MethodGet), + withRequestOpts(options...), + ) +} + +func (s *IntegrationsService) GetGroupWebexTeamsSettings(gid any, options ...RequestOptionFunc) (*WebexTeamsIntegration, *Response, error) { + return do[*WebexTeamsIntegration]( + s.client, + withPath("groups/%s/integrations/webex-teams", GroupID{gid}), + withMethod(http.MethodGet), + withRequestOpts(options...), + ) +} + +func (s *IntegrationsService) SetGroupWebexTeamsSettings(gid any, opt *SetGroupWebexTeamsOptions, options ...RequestOptionFunc) (*WebexTeamsIntegration, *Response, error) { + return do[*WebexTeamsIntegration]( + s.client, + withPath("groups/%s/integrations/webex-teams", GroupID{gid}), + withMethod(http.MethodPut), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +func (s *IntegrationsService) DisableGroupWebexTeams(gid any, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none]( + s.client, + withPath("groups/%s/integrations/webex-teams", GroupID{gid}), + withMethod(http.MethodDelete), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/invites.go b/vendor/gitlab.com/gitlab-org/api/client-go/invites.go index 71b64e0781..5be8babe42 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/invites.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/invites.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -45,7 +44,7 @@ var _ InvitesServiceInterface = (*InvitesService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/invitations/ type PendingInvite struct { - ID int `json:"id"` + ID int64 `json:"id"` InviteEmail string `json:"invite_email"` CreatedAt *time.Time `json:"created_at"` AccessLevel AccessLevelValue `json:"access_level"` @@ -69,24 +68,11 @@ type ListPendingInvitationsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/invitations/#list-all-invitations-pending-for-a-group-or-project func (s *InvitesService) ListPendingGroupInvitations(gid any, opt *ListPendingInvitationsOptions, options ...RequestOptionFunc) ([]*PendingInvite, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/invitations", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pis []*PendingInvite - resp, err := s.client.Do(req, &pis) - if err != nil { - return nil, resp, err - } - - return pis, resp, nil + return do[[]*PendingInvite](s.client, + withPath("groups/%s/invitations", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListPendingProjectInvitations gets a list of invited project members. @@ -94,24 +80,11 @@ func (s *InvitesService) ListPendingGroupInvitations(gid any, opt *ListPendingIn // GitLab API docs: // https://docs.gitlab.com/api/invitations/#list-all-invitations-pending-for-a-group-or-project func (s *InvitesService) ListPendingProjectInvitations(pid any, opt *ListPendingInvitationsOptions, options ...RequestOptionFunc) ([]*PendingInvite, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/invitations", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pis []*PendingInvite - resp, err := s.client.Do(req, &pis) - if err != nil { - return nil, resp, err - } - - return pis, resp, nil + return do[[]*PendingInvite](s.client, + withPath("projects/%s/invitations", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // InvitesOptions represents the available GroupInvites() and ProjectInvites() @@ -141,24 +114,12 @@ type InvitesResult struct { // GitLab API docs: // https://docs.gitlab.com/api/invitations/#add-a-member-to-a-group-or-project func (s *InvitesService) GroupInvites(gid any, opt *InvitesOptions, options ...RequestOptionFunc) (*InvitesResult, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/invitations", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - ir := new(InvitesResult) - resp, err := s.client.Do(req, ir) - if err != nil { - return nil, resp, err - } - - return ir, resp, nil + return do[*InvitesResult](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/invitations", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ProjectInvites invites new users by email to join a project. @@ -166,22 +127,10 @@ func (s *InvitesService) GroupInvites(gid any, opt *InvitesOptions, options ...R // GitLab API docs: // https://docs.gitlab.com/api/invitations/#add-a-member-to-a-group-or-project func (s *InvitesService) ProjectInvites(pid any, opt *InvitesOptions, options ...RequestOptionFunc) (*InvitesResult, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/invitations", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - ir := new(InvitesResult) - resp, err := s.client.Do(req, ir) - if err != nil { - return nil, resp, err - } - - return ir, resp, nil + return do[*InvitesResult](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/invitations", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/issue_links.go b/vendor/gitlab.com/gitlab-org/api/client-go/issue_links.go index 78b3877ba8..2cf4ce4bfe 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/issue_links.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/issue_links.go @@ -17,17 +17,16 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( IssueLinksServiceInterface interface { - ListIssueRelations(pid any, issue int, options ...RequestOptionFunc) ([]*IssueRelation, *Response, error) - GetIssueLink(pid any, issue, issueLink int, options ...RequestOptionFunc) (*IssueLink, *Response, error) - CreateIssueLink(pid any, issue int, opt *CreateIssueLinkOptions, options ...RequestOptionFunc) (*IssueLink, *Response, error) - DeleteIssueLink(pid any, issue, issueLink int, options ...RequestOptionFunc) (*IssueLink, *Response, error) + ListIssueRelations(pid any, issue int64, options ...RequestOptionFunc) ([]*IssueRelation, *Response, error) + GetIssueLink(pid any, issue, issueLink int64, options ...RequestOptionFunc) (*IssueLink, *Response, error) + CreateIssueLink(pid any, issue int64, opt *CreateIssueLinkOptions, options ...RequestOptionFunc) (*IssueLink, *Response, error) + DeleteIssueLink(pid any, issue, issueLink int64, options ...RequestOptionFunc) (*IssueLink, *Response, error) } // IssueLinksService handles communication with the issue relations related methods @@ -45,6 +44,7 @@ var _ IssueLinksServiceInterface = (*IssueLinksService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/issue_links/ type IssueLink struct { + ID int64 `json:"id"` SourceIssue *Issue `json:"source_issue"` TargetIssue *Issue `json:"target_issue"` LinkType string `json:"link_type"` @@ -55,14 +55,14 @@ type IssueLink struct { // GitLab API docs: // https://docs.gitlab.com/api/issue_links/#list-issue-relations type IssueRelation struct { - ID int `json:"id"` - IID int `json:"iid"` + ID int64 `json:"id"` + IID int64 `json:"iid"` State string `json:"state"` Description string `json:"description"` Confidential bool `json:"confidential"` Author *IssueAuthor `json:"author"` Milestone *Milestone `json:"milestone"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` Assignees []*IssueAssignee `json:"assignees"` Assignee *IssueAssignee `json:"assignee"` UpdatedAt *time.Time `json:"updated_at"` @@ -72,9 +72,9 @@ type IssueRelation struct { DueDate *ISOTime `json:"due_date"` WebURL string `json:"web_url"` References *IssueReferences `json:"references"` - Weight int `json:"weight"` - UserNotesCount int `json:"user_notes_count"` - IssueLinkID int `json:"issue_link_id"` + Weight int64 `json:"weight"` + UserNotesCount int64 `json:"user_notes_count"` + IssueLinkID int64 `json:"issue_link_id"` LinkType string `json:"link_type"` LinkCreatedAt *time.Time `json:"link_created_at"` LinkUpdatedAt *time.Time `json:"link_updated_at"` @@ -87,50 +87,24 @@ type IssueRelation struct { // // GitLab API docs: // https://docs.gitlab.com/api/issue_links/#list-issue-relations -func (s *IssueLinksService) ListIssueRelations(pid any, issue int, options ...RequestOptionFunc) ([]*IssueRelation, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/links", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var is []*IssueRelation - resp, err := s.client.Do(req, &is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil +func (s *IssueLinksService) ListIssueRelations(pid any, issue int64, options ...RequestOptionFunc) ([]*IssueRelation, *Response, error) { + // Use explicit format string for the path + return do[[]*IssueRelation](s.client, + withPath("projects/%s/issues/%d/links", ProjectID{pid}, issue), + withRequestOpts(options...), + ) } // GetIssueLink gets a specific issue link. // // GitLab API docs: // https://docs.gitlab.com/api/issue_links/#get-an-issue-link -func (s *IssueLinksService) GetIssueLink(pid any, issue, issueLink int, options ...RequestOptionFunc) (*IssueLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/links/%d", PathEscape(project), issue, issueLink) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - il := new(IssueLink) - resp, err := s.client.Do(req, il) - if err != nil { - return nil, resp, err - } - - return il, resp, nil +func (s *IssueLinksService) GetIssueLink(pid any, issue, issueLink int64, options ...RequestOptionFunc) (*IssueLink, *Response, error) { + // Use explicit format string for the path + return do[*IssueLink](s.client, + withPath("projects/%s/issues/%d/links/%d", ProjectID{pid}, issue, issueLink), + withRequestOpts(options...), + ) } // CreateIssueLinkOptions represents the available CreateIssueLink() options. @@ -147,51 +121,23 @@ type CreateIssueLinkOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/issue_links/#create-an-issue-link -func (s *IssueLinksService) CreateIssueLink(pid any, issue int, opt *CreateIssueLinkOptions, options ...RequestOptionFunc) (*IssueLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/links", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - i := new(IssueLink) - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil +func (s *IssueLinksService) CreateIssueLink(pid any, issue int64, opt *CreateIssueLinkOptions, options ...RequestOptionFunc) (*IssueLink, *Response, error) { + return do[*IssueLink](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/issues/%d/links", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteIssueLink deletes an issue link, thus removes the two-way relationship. // // GitLab API docs: // https://docs.gitlab.com/api/issue_links/#delete-an-issue-link -func (s *IssueLinksService) DeleteIssueLink(pid any, issue, issueLink int, options ...RequestOptionFunc) (*IssueLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/links/%d", - PathEscape(project), - issue, - issueLink) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, nil, err - } - - il := new(IssueLink) - resp, err := s.client.Do(req, &il) - if err != nil { - return nil, resp, err - } - - return il, resp, nil +func (s *IssueLinksService) DeleteIssueLink(pid any, issue, issueLink int64, options ...RequestOptionFunc) (*IssueLink, *Response, error) { + return do[*IssueLink](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/issues/%d/links/%d", ProjectID{pid}, issue, issueLink), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/issues.go b/vendor/gitlab.com/gitlab-org/api/client-go/issues.go index cd2564e50e..71aa93bec3 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/issues.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/issues.go @@ -18,7 +18,6 @@ package gitlab import ( "encoding/json" - "fmt" "net/http" "reflect" "strings" @@ -30,24 +29,24 @@ type ( ListIssues(opt *ListIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) ListGroupIssues(pid any, opt *ListGroupIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) ListProjectIssues(pid any, opt *ListProjectIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) - GetIssueByID(issue int, options ...RequestOptionFunc) (*Issue, *Response, error) - GetIssue(pid any, issue int, options ...RequestOptionFunc) (*Issue, *Response, error) + GetIssueByID(issue int64, options ...RequestOptionFunc) (*Issue, *Response, error) + GetIssue(pid any, issue int64, options ...RequestOptionFunc) (*Issue, *Response, error) CreateIssue(pid any, opt *CreateIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) - UpdateIssue(pid any, issue int, opt *UpdateIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) - DeleteIssue(pid any, issue int, options ...RequestOptionFunc) (*Response, error) - ReorderIssue(pid any, issue int, opt *ReorderIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) - MoveIssue(pid any, issue int, opt *MoveIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) - SubscribeToIssue(pid any, issue int, options ...RequestOptionFunc) (*Issue, *Response, error) - UnsubscribeFromIssue(pid any, issue int, options ...RequestOptionFunc) (*Issue, *Response, error) - CreateTodo(pid any, issue int, options ...RequestOptionFunc) (*Todo, *Response, error) - ListMergeRequestsClosingIssue(pid any, issue int, opt *ListMergeRequestsClosingIssueOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) - ListMergeRequestsRelatedToIssue(pid any, issue int, opt *ListMergeRequestsRelatedToIssueOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) - SetTimeEstimate(pid any, issue int, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) - ResetTimeEstimate(pid any, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) - AddSpentTime(pid any, issue int, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) - ResetSpentTime(pid any, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) - GetTimeSpent(pid any, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) - GetParticipants(pid any, issue int, options ...RequestOptionFunc) ([]*BasicUser, *Response, error) + UpdateIssue(pid any, issue int64, opt *UpdateIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) + DeleteIssue(pid any, issue int64, options ...RequestOptionFunc) (*Response, error) + ReorderIssue(pid any, issue int64, opt *ReorderIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) + MoveIssue(pid any, issue int64, opt *MoveIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) + SubscribeToIssue(pid any, issue int64, options ...RequestOptionFunc) (*Issue, *Response, error) + UnsubscribeFromIssue(pid any, issue int64, options ...RequestOptionFunc) (*Issue, *Response, error) + CreateTodo(pid any, issue int64, options ...RequestOptionFunc) (*Todo, *Response, error) + ListMergeRequestsClosingIssue(pid any, issue int64, opt *ListMergeRequestsClosingIssueOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) + ListMergeRequestsRelatedToIssue(pid any, issue int64, opt *ListMergeRequestsRelatedToIssueOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) + SetTimeEstimate(pid any, issue int64, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) + ResetTimeEstimate(pid any, issue int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) + AddSpentTime(pid any, issue int64, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) + ResetSpentTime(pid any, issue int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) + GetTimeSpent(pid any, issue int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) + GetParticipants(pid any, issue int64, options ...RequestOptionFunc) ([]*BasicUser, *Response, error) } // IssuesService handles communication with the issue related methods @@ -64,7 +63,7 @@ var _ IssuesServiceInterface = (*IssuesService)(nil) // IssueAuthor represents a author of the issue. type IssueAuthor struct { - ID int `json:"id"` + ID int64 `json:"id"` State string `json:"state"` WebURL string `json:"web_url"` Name string `json:"name"` @@ -74,7 +73,7 @@ type IssueAuthor struct { // IssueAssignee represents a assignee of the issue. type IssueAssignee struct { - ID int `json:"id"` + ID int64 `json:"id"` State string `json:"state"` WebURL string `json:"web_url"` Name string `json:"name"` @@ -91,7 +90,7 @@ type IssueReferences struct { // IssueCloser represents a closer of the issue. type IssueCloser struct { - ID int `json:"id"` + ID int64 `json:"id"` State string `json:"state"` WebURL string `json:"web_url"` Name string `json:"name"` @@ -111,40 +110,40 @@ type IssueLinks struct { // // GitLab API docs: https://docs.gitlab.com/api/issues/ type Issue struct { - ID int `json:"id"` - IID int `json:"iid"` + ID int64 `json:"id"` + IID int64 `json:"iid"` ExternalID string `json:"external_id"` State string `json:"state"` Description string `json:"description"` HealthStatus string `json:"health_status"` Author *IssueAuthor `json:"author"` Milestone *Milestone `json:"milestone"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` Assignees []*IssueAssignee `json:"assignees"` UpdatedAt *time.Time `json:"updated_at"` ClosedAt *time.Time `json:"closed_at"` ClosedBy *IssueCloser `json:"closed_by"` Title string `json:"title"` CreatedAt *time.Time `json:"created_at"` - MovedToID int `json:"moved_to_id"` + MovedToID int64 `json:"moved_to_id"` Labels Labels `json:"labels"` LabelDetails []*LabelDetails `json:"label_details"` - Upvotes int `json:"upvotes"` - Downvotes int `json:"downvotes"` + Upvotes int64 `json:"upvotes"` + Downvotes int64 `json:"downvotes"` DueDate *ISOTime `json:"due_date"` WebURL string `json:"web_url"` References *IssueReferences `json:"references"` TimeStats *TimeStats `json:"time_stats"` Confidential bool `json:"confidential"` - Weight int `json:"weight"` + Weight int64 `json:"weight"` DiscussionLocked bool `json:"discussion_locked"` IssueType *string `json:"issue_type,omitempty"` Subscribed bool `json:"subscribed"` - UserNotesCount int `json:"user_notes_count"` + UserNotesCount int64 `json:"user_notes_count"` Links *IssueLinks `json:"_links"` - IssueLinkID int `json:"issue_link_id"` - MergeRequestCount int `json:"merge_requests_count"` - EpicIssueID int `json:"epic_issue_id"` + IssueLinkID int64 `json:"issue_link_id"` + MergeRequestCount int64 `json:"merge_requests_count"` + EpicIssueID int64 `json:"epic_issue_id"` Epic *Epic `json:"epic"` Iteration *GroupIteration `json:"iteration"` TaskCompletionStatus *TasksCompletionStatus `json:"task_completion_status"` @@ -198,7 +197,7 @@ func (i *Issue) UnmarshalJSON(data []byte) error { // LabelDetails represents detailed label information. type LabelDetails struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Color string `json:"color"` Description string `json:"description"` @@ -218,17 +217,17 @@ type ListIssuesOptions struct { Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorID *int64 `url:"author_id,omitempty" json:"author_id,omitempty"` AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` - NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` + NotAuthorID *[]int64 `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` + NotAssigneeID *[]int64 `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` NotAssigneeUsername *string `url:"not[assignee_username],omitempty" json:"not[assignee_username],omitempty"` MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + IIDs *[]int64 `url:"iids[],omitempty" json:"iids,omitempty"` In *string `url:"in,omitempty" json:"in,omitempty"` NotIn *string `url:"not[in],omitempty" json:"not[in],omitempty"` OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` @@ -242,7 +241,7 @@ type ListIssuesOptions struct { UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` - IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` + IterationID *int64 `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` } // ListIssues gets all issues created by authenticated user. This function @@ -250,18 +249,11 @@ type ListIssuesOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/issues/#list-issues func (s *IssuesService) ListIssues(opt *ListIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "issues", opt, options) - if err != nil { - return nil, nil, err - } - - var i []*Issue - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil + return do[[]*Issue](s.client, + withPath("issues"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListGroupIssuesOptions represents the available ListGroupIssues() options. @@ -273,20 +265,20 @@ type ListGroupIssuesOptions struct { Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + IIDs *[]int64 `url:"iids[],omitempty" json:"iids,omitempty"` Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - NotAuthorID *int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` + AuthorID *int64 `url:"author_id,omitempty" json:"author_id,omitempty"` + NotAuthorID *int64 `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` - // AssigneeID is defined as an int in the documentation, however, the field + // AssigneeID is defined as an int64 in the documentation, however, the field // must be able to accept Assignee IDs and the words 'None' and 'Any'. Use - // *AssigneeIDValue instead of *int. + // *AssigneeIDValue instead of *int64. AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - NotAssigneeID *int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` + NotAssigneeID *int64 `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` NotAssigneeUsername *string `url:"not[assignee_username],omitempty" json:"not[assignee_username],omitempty"` MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` @@ -304,7 +296,7 @@ type ListGroupIssuesOptions struct { UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` - IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` + IterationID *int64 `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` } // ListGroupIssues gets a list of group issues. This function accepts @@ -312,24 +304,11 @@ type ListGroupIssuesOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/issues/#list-group-issues func (s *IssuesService) ListGroupIssues(pid any, opt *ListGroupIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/issues", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var i []*Issue - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil + return do[[]*Issue](s.client, + withPath("groups/%s/issues", GroupID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListProjectIssuesOptions represents the available ListProjectIssues() options. @@ -337,37 +316,41 @@ func (s *IssuesService) ListGroupIssues(pid any, opt *ListGroupIssuesOptions, op // GitLab API docs: https://docs.gitlab.com/api/issues/#list-project-issues type ListProjectIssuesOptions struct { ListOptions - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - State *string `url:"state,omitempty" json:"state,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` - WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` - Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` - NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` - NotAuthorID *int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - NotAssigneeID *int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` - AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` - NotAssigneeUsername *string `url:"not[assignee_username],omitempty" json:"not[assignee_username],omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - NotMyReactionEmoji *string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - In *string `url:"in,omitempty" json:"in,omitempty"` - NotIn *string `url:"not[in],omitempty" json:"not[in],omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - DueDate *string `url:"due_date,omitempty" json:"due_date,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` - IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` - IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` + IIDs *[]int64 `url:"iids[],omitempty" json:"iids,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` + WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` + Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` + NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + AuthorID *int64 `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` + NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` + NotAuthorID *int64 `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` + + // AssigneeID is defined as an int in the documentation, however, the field + // must be able to accept Assignee IDs and the words 'None' and 'Any'. Use + // *AssigneeIDValue instead of *int. + AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + NotAssigneeID *int64 `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` + AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` + NotAssigneeUsername *string `url:"not[assignee_username],omitempty" json:"not[assignee_username],omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + NotMyReactionEmoji *string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + In *string `url:"in,omitempty" json:"in,omitempty"` + NotIn *string `url:"not[in],omitempty" json:"not[in],omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + DueDate *string `url:"due_date,omitempty" json:"due_date,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` + IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` + IterationID *int64 `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` } // ListProjectIssues gets a list of project issues. This function accepts @@ -375,87 +358,53 @@ type ListProjectIssuesOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/issues/#list-project-issues func (s *IssuesService) ListProjectIssues(pid any, opt *ListProjectIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var i []*Issue - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil + return do[[]*Issue](s.client, + withPath("projects/%s/issues", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetIssueByID gets a single issue. // // GitLab API docs: https://docs.gitlab.com/api/issues/#single-issue -func (s *IssuesService) GetIssueByID(issue int, options ...RequestOptionFunc) (*Issue, *Response, error) { - u := fmt.Sprintf("issues/%d", issue) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil +func (s *IssuesService) GetIssueByID(issue int64, options ...RequestOptionFunc) (*Issue, *Response, error) { + return do[*Issue](s.client, + withPath("issues/%d", issue), + withRequestOpts(options...), + ) } // GetIssue gets a single project issue. // // GitLab API docs: https://docs.gitlab.com/api/issues/#single-project-issue -func (s *IssuesService) GetIssue(pid any, issue int, options ...RequestOptionFunc) (*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil +func (s *IssuesService) GetIssue(pid any, issue int64, options ...RequestOptionFunc) (*Issue, *Response, error) { + return do[*Issue](s.client, + withPath("projects/%s/issues/%d", ProjectID{pid}, issue), + withRequestOpts(options...), + ) } // CreateIssueOptions represents the available CreateIssue() options. // // GitLab API docs: https://docs.gitlab.com/api/issues/#new-issue type CreateIssueOptions struct { - IID *int `url:"iid,omitempty" json:"iid,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` - AssigneeIDs *[]int `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + IID *int64 `url:"iid,omitempty" json:"iid,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` + // AssigneeID is a CE-only attribute. For EE, use AssigneeIDs instead. + AssigneeID *int64 `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + // AssigneeIDs is a EE-only attribute. For CE, use AssigneeID instead. + AssigneeIDs *[]int64 `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` + MilestoneID *int64 `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` - EpicID *int `url:"epic_id,omitempty" json:"epic_id,omitempty"` - MergeRequestToResolveDiscussionsOf *int `url:"merge_request_to_resolve_discussions_of,omitempty" json:"merge_request_to_resolve_discussions_of,omitempty"` + EpicID *int64 `url:"epic_id,omitempty" json:"epic_id,omitempty"` + MergeRequestToResolveDiscussionsOf *int64 `url:"merge_request_to_resolve_discussions_of,omitempty" json:"merge_request_to_resolve_discussions_of,omitempty"` DiscussionToResolve *string `url:"discussion_to_resolve,omitempty" json:"discussion_to_resolve,omitempty"` - Weight *int `url:"weight,omitempty" json:"weight,omitempty"` + Weight *int64 `url:"weight,omitempty" json:"weight,omitempty"` IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` } @@ -463,24 +412,12 @@ type CreateIssueOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/issues/#new-issue func (s *IssuesService) CreateIssue(pid any, opt *CreateIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil + return do[*Issue](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/issues", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateIssueOptions represents the available UpdateIssue() options. @@ -490,19 +427,22 @@ func (s *IssuesService) CreateIssue(pid any, opt *CreateIssueOptions, options .. // // GitLab API docs: https://docs.gitlab.com/api/issues/#edit-an-issue type UpdateIssueOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` - AssigneeIDs *[]int `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` + // AssigneeID is a CE-only attribute. For EE, use AssigneeIDs instead. + AssigneeID *int64 `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + // AssigneeIDs is a EE-only attribute. For CE, use AssigneeID instead. + AssigneeIDs *[]int64 `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` + MilestoneID *int64 `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` AddLabels *LabelOptions `url:"add_labels,comma,omitempty" json:"add_labels,omitempty"` RemoveLabels *LabelOptions `url:"remove_labels,comma,omitempty" json:"remove_labels,omitempty"` StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` UpdatedAt *time.Time `url:"updated_at,omitempty" json:"updated_at,omitempty"` DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` - EpicID *int `url:"epic_id,omitempty" json:"epic_id,omitempty"` - Weight *int `url:"weight,omitempty" json:"weight,omitempty"` + EpicID *int64 `url:"epic_id,omitempty" json:"epic_id,omitempty"` + Weight *int64 `url:"weight,omitempty" json:"weight,omitempty"` DiscussionLocked *bool `url:"discussion_locked,omitempty" json:"discussion_locked,omitempty"` IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` @@ -568,107 +508,65 @@ func (o UpdateIssueOptions) MarshalJSON() ([]byte, error) { // to mark an issue as closed. // // GitLab API docs: https://docs.gitlab.com/api/issues/#edit-an-issue -func (s *IssuesService) UpdateIssue(pid any, issue int, opt *UpdateIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil +func (s *IssuesService) UpdateIssue(pid any, issue int64, opt *UpdateIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { + return do[*Issue](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/issues/%d", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteIssue deletes a single project issue. // // GitLab API docs: https://docs.gitlab.com/api/issues/#delete-an-issue -func (s *IssuesService) DeleteIssue(pid any, issue int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *IssuesService) DeleteIssue(pid any, issue int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/issues/%d", ProjectID{pid}, issue), + withRequestOpts(options...), + ) + return resp, err } // ReorderIssueOptions represents the available ReorderIssue() options. // // GitLab API docs: https://docs.gitlab.com/api/issues/#reorder-an-issue type ReorderIssueOptions struct { - MoveAfterID *int `url:"move_after_id,omitempty" json:"move_after_id,omitempty"` - MoveBeforeID *int `url:"move_before_id,omitempty" json:"move_before_id,omitempty"` + MoveAfterID *int64 `url:"move_after_id,omitempty" json:"move_after_id,omitempty"` + MoveBeforeID *int64 `url:"move_before_id,omitempty" json:"move_before_id,omitempty"` } // ReorderIssue reorders an issue. // // GitLab API docs: https://docs.gitlab.com/api/issues/#reorder-an-issue -func (s *IssuesService) ReorderIssue(pid any, issue int, opt *ReorderIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/reorder", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil +func (s *IssuesService) ReorderIssue(pid any, issue int64, opt *ReorderIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { + return do[*Issue](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/issues/%d/reorder", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // MoveIssueOptions represents the available MoveIssue() options. // // GitLab API docs: https://docs.gitlab.com/api/issues/#move-an-issue type MoveIssueOptions struct { - ToProjectID *int `url:"to_project_id,omitempty" json:"to_project_id,omitempty"` + ToProjectID *int64 `url:"to_project_id,omitempty" json:"to_project_id,omitempty"` } // MoveIssue updates an existing project issue. This function is also used // to mark an issue as closed. // // GitLab API docs: https://docs.gitlab.com/api/issues/#move-an-issue -func (s *IssuesService) MoveIssue(pid any, issue int, opt *MoveIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/move", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil +func (s *IssuesService) MoveIssue(pid any, issue int64, opt *MoveIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { + return do[*Issue](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/issues/%d/move", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // SubscribeToIssue subscribes the authenticated user to the given issue to @@ -677,25 +575,12 @@ func (s *IssuesService) MoveIssue(pid any, issue int, opt *MoveIssueOptions, opt // // GitLab API docs: // https://docs.gitlab.com/api/issues/#subscribe-to-an-issue -func (s *IssuesService) SubscribeToIssue(pid any, issue int, options ...RequestOptionFunc) (*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/subscribe", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil +func (s *IssuesService) SubscribeToIssue(pid any, issue int64, options ...RequestOptionFunc) (*Issue, *Response, error) { + return do[*Issue](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/issues/%d/subscribe", ProjectID{pid}, issue), + withRequestOpts(options...), + ) } // UnsubscribeFromIssue unsubscribes the authenticated user from the given @@ -704,25 +589,12 @@ func (s *IssuesService) SubscribeToIssue(pid any, issue int, options ...RequestO // // GitLab API docs: // https://docs.gitlab.com/api/issues/#unsubscribe-from-an-issue -func (s *IssuesService) UnsubscribeFromIssue(pid any, issue int, options ...RequestOptionFunc) (*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/unsubscribe", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil +func (s *IssuesService) UnsubscribeFromIssue(pid any, issue int64, options ...RequestOptionFunc) (*Issue, *Response, error) { + return do[*Issue](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/issues/%d/unsubscribe", ProjectID{pid}, issue), + withRequestOpts(options...), + ) } // CreateTodo creates a todo for the current user for an issue. @@ -731,25 +603,12 @@ func (s *IssuesService) UnsubscribeFromIssue(pid any, issue int, options ...Requ // // GitLab API docs: // https://docs.gitlab.com/api/issues/#create-a-to-do-item -func (s *IssuesService) CreateTodo(pid any, issue int, options ...RequestOptionFunc) (*Todo, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/todo", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(Todo) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil +func (s *IssuesService) CreateTodo(pid any, issue int64, options ...RequestOptionFunc) (*Todo, *Response, error) { + return do[*Todo](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/issues/%d/todo", ProjectID{pid}, issue), + withRequestOpts(options...), + ) } // ListMergeRequestsClosingIssueOptions represents the available @@ -757,32 +616,21 @@ func (s *IssuesService) CreateTodo(pid any, issue int, options ...RequestOptionF // // GitLab API docs: // https://docs.gitlab.com/api/issues/#list-merge-requests-that-close-a-particular-issue-on-merge -type ListMergeRequestsClosingIssueOptions ListOptions +type ListMergeRequestsClosingIssueOptions struct { + ListOptions +} // ListMergeRequestsClosingIssue gets all the merge requests that will close // issue when merged. // // GitLab API docs: // https://docs.gitlab.com/api/issues/#list-merge-requests-that-close-a-particular-issue-on-merge -func (s *IssuesService) ListMergeRequestsClosingIssue(pid any, issue int, opt *ListMergeRequestsClosingIssueOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/closed_by", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*BasicMergeRequest - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *IssuesService) ListMergeRequestsClosingIssue(pid any, issue int64, opt *ListMergeRequestsClosingIssueOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) { + return do[[]*BasicMergeRequest](s.client, + withPath("projects/%s/issues/%d/closed_by", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListMergeRequestsRelatedToIssueOptions represents the available @@ -790,42 +638,28 @@ func (s *IssuesService) ListMergeRequestsClosingIssue(pid any, issue int, opt *L // // GitLab API docs: // https://docs.gitlab.com/api/issues/#list-merge-requests-related-to-issue -type ListMergeRequestsRelatedToIssueOptions ListOptions +type ListMergeRequestsRelatedToIssueOptions struct { + ListOptions +} // ListMergeRequestsRelatedToIssue gets all the merge requests that are // related to the issue // // GitLab API docs: // https://docs.gitlab.com/api/issues/#list-merge-requests-related-to-issue -func (s *IssuesService) ListMergeRequestsRelatedToIssue(pid any, issue int, opt *ListMergeRequestsRelatedToIssueOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/related_merge_requests", - PathEscape(project), - issue, +func (s *IssuesService) ListMergeRequestsRelatedToIssue(pid any, issue int64, opt *ListMergeRequestsRelatedToIssueOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) { + return do[[]*BasicMergeRequest](s.client, + withPath("projects/%s/issues/%d/related_merge_requests", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*BasicMergeRequest - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil } // SetTimeEstimate sets the time estimate for a single project issue. // // GitLab API docs: // https://docs.gitlab.com/api/issues/#set-a-time-estimate-for-an-issue -func (s *IssuesService) SetTimeEstimate(pid any, issue int, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { +func (s *IssuesService) SetTimeEstimate(pid any, issue int64, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { return s.timeStats.setTimeEstimate(pid, "issues", issue, opt, options...) } @@ -833,7 +667,7 @@ func (s *IssuesService) SetTimeEstimate(pid any, issue int, opt *SetTimeEstimate // // GitLab API docs: // https://docs.gitlab.com/api/issues/#reset-the-time-estimate-for-an-issue -func (s *IssuesService) ResetTimeEstimate(pid any, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { +func (s *IssuesService) ResetTimeEstimate(pid any, issue int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) { return s.timeStats.resetTimeEstimate(pid, "issues", issue, options...) } @@ -841,7 +675,7 @@ func (s *IssuesService) ResetTimeEstimate(pid any, issue int, options ...Request // // GitLab API docs: // https://docs.gitlab.com/api/issues/#add-spent-time-for-an-issue -func (s *IssuesService) AddSpentTime(pid any, issue int, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { +func (s *IssuesService) AddSpentTime(pid any, issue int64, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { return s.timeStats.addSpentTime(pid, "issues", issue, opt, options...) } @@ -849,7 +683,7 @@ func (s *IssuesService) AddSpentTime(pid any, issue int, opt *AddSpentTimeOption // // GitLab API docs: // https://docs.gitlab.com/api/issues/#reset-spent-time-for-an-issue -func (s *IssuesService) ResetSpentTime(pid any, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { +func (s *IssuesService) ResetSpentTime(pid any, issue int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) { return s.timeStats.resetSpentTime(pid, "issues", issue, options...) } @@ -857,7 +691,7 @@ func (s *IssuesService) ResetSpentTime(pid any, issue int, options ...RequestOpt // // GitLab API docs: // https://docs.gitlab.com/api/issues/#get-time-tracking-stats -func (s *IssuesService) GetTimeSpent(pid any, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { +func (s *IssuesService) GetTimeSpent(pid any, issue int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) { return s.timeStats.getTimeSpent(pid, "issues", issue, options...) } @@ -865,23 +699,9 @@ func (s *IssuesService) GetTimeSpent(pid any, issue int, options ...RequestOptio // // GitLab API docs: // https://docs.gitlab.com/api/issues/#list-participants-in-an-issue -func (s *IssuesService) GetParticipants(pid any, issue int, options ...RequestOptionFunc) ([]*BasicUser, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/participants", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var bu []*BasicUser - resp, err := s.client.Do(req, &bu) - if err != nil { - return nil, resp, err - } - - return bu, resp, nil +func (s *IssuesService) GetParticipants(pid any, issue int64, options ...RequestOptionFunc) ([]*BasicUser, *Response, error) { + return do[[]*BasicUser](s.client, + withPath("projects/%s/issues/%d/participants", ProjectID{pid}, issue), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/issues_statistics.go b/vendor/gitlab.com/gitlab-org/api/client-go/issues_statistics.go index 23f68169af..b3d13b1830 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/issues_statistics.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/issues_statistics.go @@ -17,15 +17,26 @@ package gitlab import ( - "fmt" - "net/http" "time" ) type ( IssuesStatisticsServiceInterface interface { + // GetIssuesStatistics gets issues statistics on all issues the authenticated + // user has access to. + // + // GitLab API docs: + // https://docs.gitlab.com/api/issues_statistics/#get-issues-statistics GetIssuesStatistics(opt *GetIssuesStatisticsOptions, options ...RequestOptionFunc) (*IssuesStatistics, *Response, error) + // GetGroupIssuesStatistics gets issues count statistics for given group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/issues_statistics/#get-group-issues-statistics GetGroupIssuesStatistics(gid any, opt *GetGroupIssuesStatisticsOptions, options ...RequestOptionFunc) (*IssuesStatistics, *Response, error) + // GetProjectIssuesStatistics gets issues count statistics for given project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/issues_statistics/#get-project-issues-statistics GetProjectIssuesStatistics(pid any, opt *GetProjectIssuesStatisticsOptions, options ...RequestOptionFunc) (*IssuesStatistics, *Response, error) } @@ -44,19 +55,29 @@ var _ IssuesStatisticsServiceInterface = (*IssuesStatisticsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/issues_statistics/ type IssuesStatistics struct { - Statistics struct { - Counts struct { - All int `json:"all"` - Closed int `json:"closed"` - Opened int `json:"opened"` - } `json:"counts"` - } `json:"statistics"` + Statistics IssuesStatisticsStatistics `json:"statistics"` } func (n IssuesStatistics) String() string { return Stringify(n) } +// IssuesStatisticsStatistics represents a GitLab issues statistic statistics. +// +// GitLab API docs: https://docs.gitlab.com/api/issues_statistics/ +type IssuesStatisticsStatistics struct { + Counts IssuesStatisticsCounts `json:"counts"` +} + +// IssuesStatisticsCounts represents a GitLab issues statistic counts. +// +// GitLab API docs: https://docs.gitlab.com/api/issues_statistics/ +type IssuesStatisticsCounts struct { + All int64 `json:"all"` + Closed int64 `json:"closed"` + Opened int64 `json:"opened"` +} + // GetIssuesStatisticsOptions represents the available GetIssuesStatistics() options. // // GitLab API docs: @@ -65,12 +86,12 @@ type GetIssuesStatisticsOptions struct { Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorID *int64 `url:"author_id,omitempty" json:"author_id,omitempty"` AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + AssigneeID *int64 `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` AssigneeUsername *[]string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + IIDs *[]int64 `url:"iids[],omitempty" json:"iids,omitempty"` Search *string `url:"search,omitempty" json:"search,omitempty"` In *string `url:"in,omitempty" json:"in,omitempty"` CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` @@ -80,24 +101,12 @@ type GetIssuesStatisticsOptions struct { Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` } -// GetIssuesStatistics gets issues statistics on all issues the authenticated -// user has access to. -// -// GitLab API docs: -// https://docs.gitlab.com/api/issues_statistics/#get-issues-statistics func (s *IssuesStatisticsService) GetIssuesStatistics(opt *GetIssuesStatisticsOptions, options ...RequestOptionFunc) (*IssuesStatistics, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "issues_statistics", opt, options) - if err != nil { - return nil, nil, err - } - - is := new(IssuesStatistics) - resp, err := s.client.Do(req, is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil + return do[*IssuesStatistics](s.client, + withPath("issues_statistics"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGroupIssuesStatisticsOptions represents the available GetGroupIssuesStatistics() @@ -107,12 +116,12 @@ func (s *IssuesStatisticsService) GetIssuesStatistics(opt *GetIssuesStatisticsOp // https://docs.gitlab.com/api/issues_statistics/#get-group-issues-statistics type GetGroupIssuesStatisticsOptions struct { Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + IIDs *[]int64 `url:"iids[],omitempty" json:"iids,omitempty"` Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorID *int64 `url:"author_id,omitempty" json:"author_id,omitempty"` AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + AssigneeID *int64 `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` AssigneeUsername *[]string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` Search *string `url:"search,omitempty" json:"search,omitempty"` @@ -123,29 +132,12 @@ type GetGroupIssuesStatisticsOptions struct { Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` } -// GetGroupIssuesStatistics gets issues count statistics for given group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/issues_statistics/#get-group-issues-statistics func (s *IssuesStatisticsService) GetGroupIssuesStatistics(gid any, opt *GetGroupIssuesStatisticsOptions, options ...RequestOptionFunc) (*IssuesStatistics, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/issues_statistics", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - is := new(IssuesStatistics) - resp, err := s.client.Do(req, is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil + return do[*IssuesStatistics](s.client, + withPath("groups/%s/issues_statistics", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetProjectIssuesStatisticsOptions represents the available @@ -154,13 +146,13 @@ func (s *IssuesStatisticsService) GetGroupIssuesStatistics(gid any, opt *GetGrou // GitLab API docs: // https://docs.gitlab.com/api/issues_statistics/#get-project-issues-statistics type GetProjectIssuesStatisticsOptions struct { - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + IIDs *[]int64 `url:"iids[],omitempty" json:"iids,omitempty"` Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorID *int64 `url:"author_id,omitempty" json:"author_id,omitempty"` AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + AssigneeID *int64 `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` AssigneeUsername *[]string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` Search *string `url:"search,omitempty" json:"search,omitempty"` @@ -171,27 +163,10 @@ type GetProjectIssuesStatisticsOptions struct { Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` } -// GetProjectIssuesStatistics gets issues count statistics for given project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/issues_statistics/#get-project-issues-statistics func (s *IssuesStatisticsService) GetProjectIssuesStatistics(pid any, opt *GetProjectIssuesStatisticsOptions, options ...RequestOptionFunc) (*IssuesStatistics, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues_statistics", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - is := new(IssuesStatistics) - resp, err := s.client.Do(req, is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil + return do[*IssuesStatistics](s.client, + withPath("projects/%s/issues_statistics", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/job_token_scope.go b/vendor/gitlab.com/gitlab-org/api/client-go/job_token_scope.go index 6c2fa0ca0c..7d418a677d 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/job_token_scope.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/job_token_scope.go @@ -11,12 +11,10 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + package gitlab -import ( - "fmt" - "net/http" -) +import "net/http" type ( JobTokenScopeServiceInterface interface { @@ -24,10 +22,10 @@ type ( PatchProjectJobTokenAccessSettings(pid any, opt *PatchProjectJobTokenAccessSettingsOptions, options ...RequestOptionFunc) (*Response, error) GetProjectJobTokenInboundAllowList(pid any, opt *GetJobTokenInboundAllowListOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) AddProjectToJobScopeAllowList(pid any, opt *JobTokenInboundAllowOptions, options ...RequestOptionFunc) (*JobTokenInboundAllowItem, *Response, error) - RemoveProjectFromJobScopeAllowList(pid any, targetProject int, options ...RequestOptionFunc) (*Response, error) + RemoveProjectFromJobScopeAllowList(pid any, targetProject int64, options ...RequestOptionFunc) (*Response, error) GetJobTokenAllowlistGroups(pid any, opt *GetJobTokenAllowlistGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) AddGroupToJobTokenAllowlist(pid any, opt *AddGroupToJobTokenAllowlistOptions, options ...RequestOptionFunc) (*JobTokenAllowlistItem, *Response, error) - RemoveGroupFromJobTokenAllowlist(pid any, targetGroup int, options ...RequestOptionFunc) (*Response, error) + RemoveGroupFromJobTokenAllowlist(pid any, targetGroup int64, options ...RequestOptionFunc) (*Response, error) } // JobTokenScopeService handles communication with project CI settings @@ -53,24 +51,10 @@ type JobTokenAccessSettings struct { // GitLab API docs: // https://docs.gitlab.com/api/project_job_token_scopes/#get-a-projects-cicd-job-token-access-settings func (j *JobTokenScopeService) GetProjectJobTokenAccessSettings(pid any, options ...RequestOptionFunc) (*JobTokenAccessSettings, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope`, PathEscape(project)) - - req, err := j.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - jt := new(JobTokenAccessSettings) - resp, err := j.client.Do(req, jt) - if err != nil { - return nil, resp, err - } - - return jt, resp, err + return do[*JobTokenAccessSettings](j.client, + withPath("projects/%s/job_token_scope", ProjectID{pid}), + withRequestOpts(options...), + ) } // PatchProjectJobTokenAccessSettingsOptions represents the available @@ -87,26 +71,21 @@ type PatchProjectJobTokenAccessSettingsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_job_token_scopes/#patch-a-projects-cicd-job-token-access-settings func (j *JobTokenScopeService) PatchProjectJobTokenAccessSettings(pid any, opt *PatchProjectJobTokenAccessSettingsOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope`, PathEscape(project)) - - req, err := j.client.NewRequest(http.MethodPatch, u, opt, options) - if err != nil { - return nil, err - } - - return j.client.Do(req, nil) + _, resp, err := do[none](j.client, + withMethod(http.MethodPatch), + withPath("projects/%s/job_token_scope", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // JobTokenInboundAllowItem represents a single job token inbound allowlist item. // // GitLab API docs: https://docs.gitlab.com/api/project_job_token_scopes/ type JobTokenInboundAllowItem struct { - SourceProjectID int `json:"source_project_id"` - TargetProjectID int `json:"target_project_id"` + SourceProjectID int64 `json:"source_project_id"` + TargetProjectID int64 `json:"target_project_id"` } // GetJobTokenInboundAllowListOptions represents the available @@ -124,24 +103,11 @@ type GetJobTokenInboundAllowListOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_job_token_scopes/#get-a-projects-cicd-job-token-inbound-allowlist func (j *JobTokenScopeService) GetProjectJobTokenInboundAllowList(pid any, opt *GetJobTokenInboundAllowListOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope/allowlist`, PathEscape(project)) - - req, err := j.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Project - resp, err := j.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil + return do[[]*Project](j.client, + withPath("projects/%s/job_token_scope/allowlist", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // JobTokenInboundAllowOptions represents the available @@ -150,7 +116,7 @@ func (j *JobTokenScopeService) GetProjectJobTokenInboundAllowList(pid any, opt * // GitLab API docs: // https://docs.gitlab.com/api/project_job_token_scopes/#add-a-project-to-a-cicd-job-token-inbound-allowlist type JobTokenInboundAllowOptions struct { - TargetProjectID *int `url:"target_project_id,omitempty" json:"target_project_id,omitempty"` + TargetProjectID *int64 `url:"target_project_id,omitempty" json:"target_project_id,omitempty"` } // AddProjectToJobScopeAllowList adds a new project to a project's job token @@ -159,24 +125,12 @@ type JobTokenInboundAllowOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_job_token_scopes/#add-a-project-to-a-cicd-job-token-inbound-allowlist func (j *JobTokenScopeService) AddProjectToJobScopeAllowList(pid any, opt *JobTokenInboundAllowOptions, options ...RequestOptionFunc) (*JobTokenInboundAllowItem, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope/allowlist`, PathEscape(project)) - - req, err := j.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - jt := new(JobTokenInboundAllowItem) - resp, err := j.client.Do(req, jt) - if err != nil { - return nil, resp, err - } - - return jt, resp, nil + return do[*JobTokenInboundAllowItem](j.client, + withMethod(http.MethodPost), + withPath("projects/%s/job_token_scope/allowlist", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RemoveProjectFromJobScopeAllowList removes a project from a project's job @@ -184,27 +138,21 @@ func (j *JobTokenScopeService) AddProjectToJobScopeAllowList(pid any, opt *JobTo // // GitLab API docs: // https://docs.gitlab.com/api/project_job_token_scopes/#remove-a-project-from-a-cicd-job-token-inbound-allowlist -func (j *JobTokenScopeService) RemoveProjectFromJobScopeAllowList(pid any, targetProject int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope/allowlist/%d`, PathEscape(project), targetProject) - - req, err := j.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return j.client.Do(req, nil) +func (j *JobTokenScopeService) RemoveProjectFromJobScopeAllowList(pid any, targetProject int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](j.client, + withMethod(http.MethodDelete), + withPath("projects/%s/job_token_scope/allowlist/%d", ProjectID{pid}, targetProject), + withRequestOpts(options...), + ) + return resp, err } // JobTokenAllowlistItem represents a single job token allowlist item. // // GitLab API docs: https://docs.gitlab.com/api/project_job_token_scopes/ type JobTokenAllowlistItem struct { - SourceProjectID int `json:"source_project_id"` - TargetGroupID int `json:"target_group_id"` + SourceProjectID int64 `json:"source_project_id"` + TargetGroupID int64 `json:"target_group_id"` } // GetJobTokenAllowlistGroupsOptions represents the available @@ -222,24 +170,11 @@ type GetJobTokenAllowlistGroupsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_job_token_scopes/#get-a-projects-cicd-job-token-allowlist-of-groups func (j *JobTokenScopeService) GetJobTokenAllowlistGroups(pid any, opt *GetJobTokenAllowlistGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope/groups_allowlist`, PathEscape(project)) - - req, err := j.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Group - resp, err := j.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil + return do[[]*Group](j.client, + withPath("projects/%s/job_token_scope/groups_allowlist", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // AddGroupToJobTokenAllowlistOptions represents the available @@ -248,7 +183,7 @@ func (j *JobTokenScopeService) GetJobTokenAllowlistGroups(pid any, opt *GetJobTo // GitLab API docs: // https://docs.gitlab.com/api/project_job_token_scopes/#add-a-group-to-a-cicd-job-token-allowlist type AddGroupToJobTokenAllowlistOptions struct { - TargetGroupID *int `url:"target_group_id,omitempty" json:"target_group_id,omitempty"` + TargetGroupID *int64 `url:"target_group_id,omitempty" json:"target_group_id,omitempty"` } // AddGroupToJobTokenAllowlist adds a new group to a project's job token @@ -257,24 +192,12 @@ type AddGroupToJobTokenAllowlistOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_job_token_scopes/#add-a-group-to-a-cicd-job-token-allowlist func (j *JobTokenScopeService) AddGroupToJobTokenAllowlist(pid any, opt *AddGroupToJobTokenAllowlistOptions, options ...RequestOptionFunc) (*JobTokenAllowlistItem, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope/groups_allowlist`, PathEscape(project)) - - req, err := j.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - jt := new(JobTokenAllowlistItem) - resp, err := j.client.Do(req, jt) - if err != nil { - return nil, resp, err - } - - return jt, resp, nil + return do[*JobTokenAllowlistItem](j.client, + withMethod(http.MethodPost), + withPath("projects/%s/job_token_scope/groups_allowlist", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RemoveGroupFromJobTokenAllowlist removes a group from a project's job @@ -282,17 +205,11 @@ func (j *JobTokenScopeService) AddGroupToJobTokenAllowlist(pid any, opt *AddGrou // // GitLab API docs: // https://docs.gitlab.com/api/project_job_token_scopes/#remove-a-group-from-a-cicd-job-token-allowlist -func (j *JobTokenScopeService) RemoveGroupFromJobTokenAllowlist(pid any, targetGroup int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope/groups_allowlist/%d`, PathEscape(project), targetGroup) - - req, err := j.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return j.client.Do(req, nil) +func (j *JobTokenScopeService) RemoveGroupFromJobTokenAllowlist(pid any, targetGroup int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](j.client, + withMethod(http.MethodDelete), + withPath("projects/%s/job_token_scope/groups_allowlist/%d", ProjectID{pid}, targetGroup), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/jobs.go b/vendor/gitlab.com/gitlab-org/api/client-go/jobs.go index 94aef9540a..de69815d86 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/jobs.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/jobs.go @@ -18,29 +18,108 @@ package gitlab import ( "bytes" - "fmt" "net/http" "time" ) type ( JobsServiceInterface interface { + // ListProjectJobs gets a list of jobs in a project. + // + // The scope of jobs to show, one or array of: created, pending, running, + // failed, success, canceled, skipped; showing all jobs if none provided + // + // GitLab API docs: + // https://docs.gitlab.com/api/jobs/#list-project-jobs ListProjectJobs(pid any, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Job, *Response, error) - ListPipelineJobs(pid any, pipelineID int, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Job, *Response, error) - ListPipelineBridges(pid any, pipelineID int, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Bridge, *Response, error) + // ListPipelineJobs gets a list of jobs for specific pipeline in a + // project. If the pipeline ID is not found, it will respond with 404. + // + // GitLab API docs: + // https://docs.gitlab.com/api/jobs/#list-pipeline-jobs + ListPipelineJobs(pid any, pipelineID int64, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Job, *Response, error) + // ListPipelineBridges gets a list of bridges for specific pipeline in a + // project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/jobs/#list-pipeline-trigger-jobs + ListPipelineBridges(pid any, pipelineID int64, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Bridge, *Response, error) + // GetJobTokensJob retrieves the job that generated a job token. + // + // GitLab API docs: https://docs.gitlab.com/api/jobs/#get-job-tokens-job GetJobTokensJob(opts *GetJobTokensJobOptions, options ...RequestOptionFunc) (*Job, *Response, error) - GetJob(pid any, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) - GetJobArtifacts(pid any, jobID int, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) + // GetJob gets a single job of a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/jobs/#get-a-single-job + GetJob(pid any, jobID int64, options ...RequestOptionFunc) (*Job, *Response, error) + // GetJobArtifacts gets jobs artifacts of a project + // + // GitLab API docs: + // https://docs.gitlab.com/api/job_artifacts/#get-job-artifacts + GetJobArtifacts(pid any, jobID int64, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) + // DownloadArtifactsFile downloads the artifacts file from the given + // reference name and job provided the job finished successfully. + // + // GitLab API docs: + // https://docs.gitlab.com/api/job_artifacts/#download-the-artifacts-archive DownloadArtifactsFile(pid any, refName string, opt *DownloadArtifactsFileOptions, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) - DownloadSingleArtifactsFile(pid any, jobID int, artifactPath string, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) + // DownloadSingleArtifactsFile downloads a file from the artifacts from the + // given reference name and job provided the job finished successfully. + // Only a single file is going to be extracted from the archive and streamed + // to a client. + // + // GitLab API docs: + // https://docs.gitlab.com/api/job_artifacts/#download-a-single-artifact-file-by-job-id + DownloadSingleArtifactsFile(pid any, jobID int64, artifactPath string, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) + // DownloadSingleArtifactsFileByTagOrBranch downloads a single file from + // a job's artifacts in the latest successful pipeline using the reference name. + // The file is extracted from the archive and streamed to the client. + // + // GitLab API docs: + // https://docs.gitlab.com/api/job_artifacts/#download-a-single-artifact-file-from-specific-tag-or-branch DownloadSingleArtifactsFileByTagOrBranch(pid any, refName string, artifactPath string, opt *DownloadArtifactsFileOptions, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) - GetTraceFile(pid any, jobID int, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) - CancelJob(pid any, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) - RetryJob(pid any, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) - EraseJob(pid any, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) - KeepArtifacts(pid any, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) - PlayJob(pid any, jobID int, opt *PlayJobOptions, options ...RequestOptionFunc) (*Job, *Response, error) - DeleteArtifacts(pid any, jobID int, options ...RequestOptionFunc) (*Response, error) + // GetTraceFile gets a trace of a specific job of a project + // + // GitLab API docs: + // https://docs.gitlab.com/api/jobs/#get-a-log-file + GetTraceFile(pid any, jobID int64, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) + // CancelJob cancels a single job of a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/jobs/#cancel-a-job + CancelJob(pid any, jobID int64, options ...RequestOptionFunc) (*Job, *Response, error) + // RetryJob retries a single job of a project + // + // GitLab API docs: + // https://docs.gitlab.com/api/jobs/#retry-a-job + RetryJob(pid any, jobID int64, options ...RequestOptionFunc) (*Job, *Response, error) + // EraseJob erases a single job of a project, removes a job + // artifacts and a job trace. + // + // GitLab API docs: + // https://docs.gitlab.com/api/jobs/#erase-a-job + EraseJob(pid any, jobID int64, options ...RequestOptionFunc) (*Job, *Response, error) + // KeepArtifacts prevents artifacts from being deleted when + // expiration is set. + // + // GitLab API docs: + // https://docs.gitlab.com/api/job_artifacts/#keep-artifacts + KeepArtifacts(pid any, jobID int64, options ...RequestOptionFunc) (*Job, *Response, error) + // PlayJob triggers a manual action to start a job. + // + // GitLab API docs: + // https://docs.gitlab.com/api/jobs/#run-a-job + PlayJob(pid any, jobID int64, opt *PlayJobOptions, options ...RequestOptionFunc) (*Job, *Response, error) + // DeleteArtifacts deletes artifacts of a job + // + // GitLab API docs: + // https://docs.gitlab.com/api/job_artifacts/#delete-job-artifacts + DeleteArtifacts(pid any, jobID int64, options ...RequestOptionFunc) (*Response, error) + // DeleteProjectArtifacts deletes artifacts eligible for deletion in a project + // + // GitLab API docs: + // https://docs.gitlab.com/api/job_artifacts/#delete-job-artifacts DeleteProjectArtifacts(pid any, options ...RequestOptionFunc) (*Response, error) } @@ -59,51 +138,71 @@ var _ JobsServiceInterface = (*JobsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/jobs/ type Job struct { - Commit *Commit `json:"commit"` - Coverage float64 `json:"coverage"` - AllowFailure bool `json:"allow_failure"` - CreatedAt *time.Time `json:"created_at"` - StartedAt *time.Time `json:"started_at"` - FinishedAt *time.Time `json:"finished_at"` - ErasedAt *time.Time `json:"erased_at"` - Duration float64 `json:"duration"` - QueuedDuration float64 `json:"queued_duration"` - ArtifactsExpireAt *time.Time `json:"artifacts_expire_at"` - TagList []string `json:"tag_list"` - ID int `json:"id"` - Name string `json:"name"` - Pipeline struct { - ID int `json:"id"` - ProjectID int `json:"project_id"` - Ref string `json:"ref"` - Sha string `json:"sha"` - Status string `json:"status"` - } `json:"pipeline"` + Commit *Commit `json:"commit"` + Coverage float64 `json:"coverage"` + AllowFailure bool `json:"allow_failure"` + CreatedAt *time.Time `json:"created_at"` + StartedAt *time.Time `json:"started_at"` + FinishedAt *time.Time `json:"finished_at"` + ErasedAt *time.Time `json:"erased_at"` + Duration float64 `json:"duration"` + QueuedDuration float64 `json:"queued_duration"` + ArtifactsExpireAt *time.Time `json:"artifacts_expire_at"` + TagList []string `json:"tag_list"` + ID int64 `json:"id"` + Name string `json:"name"` + Pipeline JobPipeline `json:"pipeline"` + Ref string `json:"ref"` + Artifacts []JobArtifact `json:"artifacts"` + ArtifactsFile JobArtifactsFile `json:"artifacts_file"` + Runner JobRunner `json:"runner"` + Stage string `json:"stage"` + Status string `json:"status"` + FailureReason string `json:"failure_reason"` + Tag bool `json:"tag"` + WebURL string `json:"web_url"` + Project *Project `json:"project"` + User *User `json:"user"` +} + +// JobPipeline represents a ci build pipeline. +// +// GitLab API docs: https://docs.gitlab.com/api/jobs/ +type JobPipeline struct { + ID int64 `json:"id"` + ProjectID int64 `json:"project_id"` Ref string `json:"ref"` - Artifacts []struct { - FileType string `json:"file_type"` - Filename string `json:"filename"` - Size int `json:"size"` - FileFormat string `json:"file_format"` - } `json:"artifacts"` - ArtifactsFile struct { - Filename string `json:"filename"` - Size int `json:"size"` - } `json:"artifacts_file"` - Runner struct { - ID int `json:"id"` - Description string `json:"description"` - Active bool `json:"active"` - IsShared bool `json:"is_shared"` - Name string `json:"name"` - } `json:"runner"` - Stage string `json:"stage"` - Status string `json:"status"` - FailureReason string `json:"failure_reason"` - Tag bool `json:"tag"` - WebURL string `json:"web_url"` - Project *Project `json:"project"` - User *User `json:"user"` + Sha string `json:"sha"` + Status string `json:"status"` +} + +// JobArtifact represents a ci build artifact. +// +// GitLab API docs: https://docs.gitlab.com/api/jobs/ +type JobArtifact struct { + FileType string `json:"file_type"` + Filename string `json:"filename"` + Size int64 `json:"size"` + FileFormat string `json:"file_format"` +} + +// JobArtifactsFile represents a ci build artifacts file. +// +// GitLab API docs: https://docs.gitlab.com/api/jobs/ +type JobArtifactsFile struct { + Filename string `json:"filename"` + Size int64 `json:"size"` +} + +// JobRunner represents a ci build runner. +// +// GitLab API docs: https://docs.gitlab.com/api/jobs/ +type JobRunner struct { + ID int64 `json:"id"` + Description string `json:"description"` + Active bool `json:"active"` + IsShared bool `json:"is_shared"` + Name string `json:"name"` } // Bridge represents a pipeline bridge. @@ -119,7 +218,7 @@ type Bridge struct { ErasedAt *time.Time `json:"erased_at"` Duration float64 `json:"duration"` QueuedDuration float64 `json:"queued_duration"` - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Pipeline PipelineInfo `json:"pipeline"` Ref string `json:"ref"` @@ -142,84 +241,31 @@ type ListJobsOptions struct { IncludeRetried *bool `url:"include_retried,omitempty" json:"include_retried,omitempty"` } -// ListProjectJobs gets a list of jobs in a project. -// -// The scope of jobs to show, one or array of: created, pending, running, -// failed, success, canceled, skipped; showing all jobs if none provided -// -// GitLab API docs: -// https://docs.gitlab.com/api/jobs/#list-project-jobs func (s *JobsService) ListProjectJobs(pid any, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var jobs []*Job - resp, err := s.client.Do(req, &jobs) - if err != nil { - return nil, resp, err - } - - return jobs, resp, nil + return do[[]*Job](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/jobs", ProjectID{pid}), + withAPIOpts(opts), + withRequestOpts(options...), + ) } -// ListPipelineJobs gets a list of jobs for specific pipeline in a -// project. If the pipeline ID is not found, it will respond with 404. -// -// GitLab API docs: -// https://docs.gitlab.com/api/jobs/#list-pipeline-jobs -func (s *JobsService) ListPipelineJobs(pid any, pipelineID int, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d/jobs", PathEscape(project), pipelineID) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var jobs []*Job - resp, err := s.client.Do(req, &jobs) - if err != nil { - return nil, resp, err - } - - return jobs, resp, nil +func (s *JobsService) ListPipelineJobs(pid any, pipelineID int64, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Job, *Response, error) { + return do[[]*Job](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/pipelines/%d/jobs", ProjectID{pid}, pipelineID), + withAPIOpts(opts), + withRequestOpts(options...), + ) } -// ListPipelineBridges gets a list of bridges for specific pipeline in a -// project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/jobs/#list-pipeline-trigger-jobs -func (s *JobsService) ListPipelineBridges(pid any, pipelineID int, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Bridge, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d/bridges", PathEscape(project), pipelineID) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var bridges []*Bridge - resp, err := s.client.Do(req, &bridges) - if err != nil { - return nil, resp, err - } - - return bridges, resp, nil +func (s *JobsService) ListPipelineBridges(pid any, pipelineID int64, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Bridge, *Response, error) { + return do[[]*Bridge](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/pipelines/%d/bridges", ProjectID{pid}, pipelineID), + withAPIOpts(opts), + withRequestOpts(options...), + ) } // GetJobTokensJobOptions represents the available GetJobTokensJob() options. @@ -229,72 +275,31 @@ type GetJobTokensJobOptions struct { JobToken *string `url:"job_token,omitempty" json:"job_token,omitempty"` } -// GetJobTokensJob retrieves the job that generated a job token. -// -// GitLab API docs: https://docs.gitlab.com/api/jobs/#get-job-tokens-job func (s *JobsService) GetJobTokensJob(opts *GetJobTokensJobOptions, options ...RequestOptionFunc) (*Job, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "job", opts, options) - if err != nil { - return nil, nil, err - } - - job := new(Job) - resp, err := s.client.Do(req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil + return do[*Job](s.client, + withMethod(http.MethodGet), + withPath("job"), + withAPIOpts(opts), + withRequestOpts(options...), + ) } -// GetJob gets a single job of a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/jobs/#get-a-single-job -func (s *JobsService) GetJob(pid any, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - job := new(Job) - resp, err := s.client.Do(req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil +func (s *JobsService) GetJob(pid any, jobID int64, options ...RequestOptionFunc) (*Job, *Response, error) { + return do[*Job](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/jobs/%d", ProjectID{pid}, jobID), + withAPIOpts(nil), + withRequestOpts(options...), + ) } -// GetJobArtifacts get jobs artifacts of a project -// -// GitLab API docs: -// https://docs.gitlab.com/api/job_artifacts/#get-job-artifacts -func (s *JobsService) GetJobArtifacts(pid any, jobID int, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/artifacts", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - artifactsBuf := new(bytes.Buffer) - resp, err := s.client.Do(req, artifactsBuf) - if err != nil { - return nil, resp, err - } +func (s *JobsService) GetJobArtifacts(pid any, jobID int64, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { + b, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/jobs/%d/artifacts", ProjectID{pid}, jobID), + withRequestOpts(options...), + ) - return bytes.NewReader(artifactsBuf.Bytes()), resp, err + return bytes.NewReader(b.Bytes()), resp, err } // DownloadArtifactsFileOptions represents the available DownloadArtifactsFile() @@ -306,224 +311,78 @@ type DownloadArtifactsFileOptions struct { Job *string `url:"job" json:"job"` } -// DownloadArtifactsFile download the artifacts file from the given -// reference name and job provided the job finished successfully. -// -// GitLab API docs: -// https://docs.gitlab.com/api/job_artifacts/#download-the-artifacts-archive func (s *JobsService) DownloadArtifactsFile(pid any, refName string, opt *DownloadArtifactsFileOptions, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/artifacts/%s/download", PathEscape(project), refName) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - artifactsBuf := new(bytes.Buffer) - resp, err := s.client.Do(req, artifactsBuf) - if err != nil { - return nil, resp, err - } + b, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/jobs/artifacts/%s/download", ProjectID{pid}, NoEscape{refName}), + withAPIOpts(opt), + withRequestOpts(options...), + ) - return bytes.NewReader(artifactsBuf.Bytes()), resp, err + return bytes.NewReader(b.Bytes()), resp, err } -// DownloadSingleArtifactsFile download a file from the artifacts from the -// given reference name and job provided the job finished successfully. -// Only a single file is going to be extracted from the archive and streamed -// to a client. -// -// GitLab API docs: -// https://docs.gitlab.com/api/job_artifacts/#download-a-single-artifact-file-by-job-id -func (s *JobsService) DownloadSingleArtifactsFile(pid any, jobID int, artifactPath string, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - - u := fmt.Sprintf( - "projects/%s/jobs/%d/artifacts/%s", - PathEscape(project), - jobID, - artifactPath, +func (s *JobsService) DownloadSingleArtifactsFile(pid any, jobID int64, artifactPath string, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { + b, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/jobs/%d/artifacts/%s", ProjectID{pid}, jobID, NoEscape{artifactPath}), + withRequestOpts(options...), ) - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - artifactBuf := new(bytes.Buffer) - resp, err := s.client.Do(req, artifactBuf) - if err != nil { - return nil, resp, err - } - - return bytes.NewReader(artifactBuf.Bytes()), resp, err + return bytes.NewReader(b.Bytes()), resp, err } -// DownloadSingleArtifactsFileByTagOrBranch downloads a single file from -// a job’s artifacts in the latest successful pipeline using the reference name. -// The file is extracted from the archive and streamed to the client. -// -// GitLab API docs: -// https://docs.gitlab.com/api/job_artifacts/#download-a-single-artifact-file-from-specific-tag-or-branch func (s *JobsService) DownloadSingleArtifactsFileByTagOrBranch(pid any, refName string, artifactPath string, opt *DownloadArtifactsFileOptions, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - - u := fmt.Sprintf( - "projects/%s/jobs/artifacts/%s/raw/%s", - PathEscape(project), - PathEscape(refName), - artifactPath, + b, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/jobs/artifacts/%s/raw/%s", ProjectID{pid}, refName, NoEscape{artifactPath}), + withAPIOpts(opt), + withRequestOpts(options...), ) - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - artifactBuf := new(bytes.Buffer) - resp, err := s.client.Do(req, artifactBuf) - if err != nil { - return nil, resp, err - } - - return bytes.NewReader(artifactBuf.Bytes()), resp, err + return bytes.NewReader(b.Bytes()), resp, err } -// GetTraceFile gets a trace of a specific job of a project -// -// GitLab API docs: -// https://docs.gitlab.com/api/jobs/#get-a-log-file -func (s *JobsService) GetTraceFile(pid any, jobID int, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/trace", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - traceBuf := new(bytes.Buffer) - resp, err := s.client.Do(req, traceBuf) - if err != nil { - return nil, resp, err - } +func (s *JobsService) GetTraceFile(pid any, jobID int64, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { + b, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/jobs/%d/trace", ProjectID{pid}, jobID), + withRequestOpts(options...), + ) - return bytes.NewReader(traceBuf.Bytes()), resp, err + return bytes.NewReader(b.Bytes()), resp, err } -// CancelJob cancels a single job of a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/jobs/#cancel-a-job -func (s *JobsService) CancelJob(pid any, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/cancel", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - job := new(Job) - resp, err := s.client.Do(req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil +func (s *JobsService) CancelJob(pid any, jobID int64, options ...RequestOptionFunc) (*Job, *Response, error) { + return do[*Job](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/jobs/%d/cancel", ProjectID{pid}, jobID), + withAPIOpts(nil), + withRequestOpts(options...), + ) } -// RetryJob retries a single job of a project -// -// GitLab API docs: -// https://docs.gitlab.com/api/jobs/#retry-a-job -func (s *JobsService) RetryJob(pid any, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/retry", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - job := new(Job) - resp, err := s.client.Do(req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil +func (s *JobsService) RetryJob(pid any, jobID int64, options ...RequestOptionFunc) (*Job, *Response, error) { + return do[*Job](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/jobs/%d/retry", ProjectID{pid}, jobID), + withAPIOpts(nil), + withRequestOpts(options...), + ) } -// EraseJob erases a single job of a project, removes a job -// artifacts and a job trace. -// -// GitLab API docs: -// https://docs.gitlab.com/api/jobs/#erase-a-job -func (s *JobsService) EraseJob(pid any, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/erase", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - job := new(Job) - resp, err := s.client.Do(req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil +func (s *JobsService) EraseJob(pid any, jobID int64, options ...RequestOptionFunc) (*Job, *Response, error) { + return do[*Job](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/jobs/%d/erase", ProjectID{pid}, jobID), + withAPIOpts(nil), + withRequestOpts(options...), + ) } -// KeepArtifacts prevents artifacts from being deleted when -// expiration is set. -// -// GitLab API docs: -// https://docs.gitlab.com/api/job_artifacts/#keep-artifacts -func (s *JobsService) KeepArtifacts(pid any, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/artifacts/keep", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - job := new(Job) - resp, err := s.client.Do(req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil +func (s *JobsService) KeepArtifacts(pid any, jobID int64, options ...RequestOptionFunc) (*Job, *Response, error) { + return do[*Job](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/jobs/%d/artifacts/keep", ProjectID{pid}, jobID), + withAPIOpts(nil), + withRequestOpts(options...), + ) } // PlayJobOptions represents the available PlayJob() options. @@ -544,65 +403,31 @@ type JobVariableOptions struct { VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } -// PlayJob triggers a manual action to start a job. -// -// GitLab API docs: -// https://docs.gitlab.com/api/jobs/#run-a-job -func (s *JobsService) PlayJob(pid any, jobID int, opt *PlayJobOptions, options ...RequestOptionFunc) (*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/play", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - job := new(Job) - resp, err := s.client.Do(req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil +func (s *JobsService) PlayJob(pid any, jobID int64, opt *PlayJobOptions, options ...RequestOptionFunc) (*Job, *Response, error) { + return do[*Job](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/jobs/%d/play", ProjectID{pid}, jobID), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteArtifacts delete artifacts of a job -// -// GitLab API docs: -// https://docs.gitlab.com/api/job_artifacts/#delete-job-artifacts -func (s *JobsService) DeleteArtifacts(pid any, jobID int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/artifacts", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *JobsService) DeleteArtifacts(pid any, jobID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/jobs/%d/artifacts", ProjectID{pid}, jobID), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } -// DeleteProjectArtifacts delete artifacts eligible for deletion in a project -// -// GitLab API docs: -// https://docs.gitlab.com/api/job_artifacts/#delete-job-artifacts func (s *JobsService) DeleteProjectArtifacts(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/artifacts", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/artifacts", ProjectID{pid}), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/keys.go b/vendor/gitlab.com/gitlab-org/api/client-go/keys.go index 3b2187c65f..2fde4860b5 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/keys.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/keys.go @@ -16,15 +16,11 @@ package gitlab -import ( - "fmt" - "net/http" - "time" -) +import "time" type ( KeysServiceInterface interface { - GetKeyWithUser(key int, options ...RequestOptionFunc) (*Key, *Response, error) + GetKeyWithUser(key int64, options ...RequestOptionFunc) (*Key, *Response, error) GetKeyByFingerprint(opt *GetKeyByFingerprintOptions, options ...RequestOptionFunc) (*Key, *Response, error) } @@ -45,7 +41,7 @@ var _ KeysServiceInterface = (*KeysService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/keys/ type Key struct { - ID int `json:"id"` + ID int64 `json:"id"` Title string `json:"title"` Key string `json:"key"` CreatedAt *time.Time `json:"created_at"` @@ -57,21 +53,11 @@ type Key struct { // // GitLab API docs: // https://docs.gitlab.com/api/keys/#get-ssh-key-with-user-by-id-of-an-ssh-key -func (s *KeysService) GetKeyWithUser(key int, options ...RequestOptionFunc) (*Key, *Response, error) { - u := fmt.Sprintf("keys/%d", key) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - k := new(Key) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil +func (s *KeysService) GetKeyWithUser(key int64, options ...RequestOptionFunc) (*Key, *Response, error) { + return do[*Key](s.client, + withPath("keys/%d", key), + withRequestOpts(options...), + ) } // GetKeyByFingerprintOptions represents the available GetKeyByFingerprint() @@ -91,16 +77,9 @@ type GetKeyByFingerprintOptions struct { // https://docs.gitlab.com/api/keys/#get-user-by-fingerprint-of-ssh-key // https://docs.gitlab.com/api/keys/#get-user-by-deploy-key-fingerprint func (s *KeysService) GetKeyByFingerprint(opt *GetKeyByFingerprintOptions, options ...RequestOptionFunc) (*Key, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "keys", opt, options) - if err != nil { - return nil, nil, err - } - - k := new(Key) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil + return do[*Key](s.client, + withPath("keys"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/labels.go b/vendor/gitlab.com/gitlab-org/api/client-go/labels.go index d7eb1e6240..1c6a379d0b 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/labels.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/labels.go @@ -18,7 +18,6 @@ package gitlab import ( "encoding/json" - "fmt" "net/http" ) @@ -49,16 +48,16 @@ var _ LabelsServiceInterface = (*LabelsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/labels/ type Label struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Color string `json:"color"` TextColor string `json:"text_color"` Description string `json:"description"` - OpenIssuesCount int `json:"open_issues_count"` - ClosedIssuesCount int `json:"closed_issues_count"` - OpenMergeRequestsCount int `json:"open_merge_requests_count"` + OpenIssuesCount int64 `json:"open_issues_count"` + ClosedIssuesCount int64 `json:"closed_issues_count"` + OpenMergeRequestsCount int64 `json:"open_merge_requests_count"` Subscribed bool `json:"subscribed"` - Priority int `json:"priority"` + Priority int64 `json:"priority"` IsProjectLabel bool `json:"is_project_label"` } @@ -94,68 +93,39 @@ type ListLabelsOptions struct { WithCounts *bool `url:"with_counts,omitempty" json:"with_counts,omitempty"` IncludeAncestorGroups *bool `url:"include_ancestor_groups,omitempty" json:"include_ancestor_groups,omitempty"` Search *string `url:"search,omitempty" json:"search,omitempty"` + Archived *bool `url:"archived,omitempty" json:"archived,omitempty"` } // ListLabels gets all labels for given project. // // GitLab API docs: https://docs.gitlab.com/api/labels/#list-labels func (s *LabelsService) ListLabels(pid any, opt *ListLabelsOptions, options ...RequestOptionFunc) ([]*Label, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/labels", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var l []*Label - resp, err := s.client.Do(req, &l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil + return do[[]*Label](s.client, + withPath("projects/%s/labels", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetLabel get a single label for a given project. // // GitLab API docs: https://docs.gitlab.com/api/labels/#get-a-single-project-label func (s *LabelsService) GetLabel(pid any, lid any, options ...RequestOptionFunc) (*Label, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - label, err := parseID(lid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/labels/%s", PathEscape(project), PathEscape(label)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var l *Label - resp, err := s.client.Do(req, &l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil + return do[*Label](s.client, + withPath("projects/%s/labels/%s", ProjectID{pid}, LabelID{lid}), + withRequestOpts(options...), + ) } // CreateLabelOptions represents the available CreateLabel() options. // -// GitLab API docs: https://docs.gitlab.com/api/labels/#create-a-new-label +// GitLab API docs: https://docs.gitlab.com/api/labels/#create-a-project-label type CreateLabelOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` Color *string `url:"color,omitempty" json:"color,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` - Priority *int `url:"priority,omitempty" json:"priority,omitempty"` + Priority *int64 `url:"priority,omitempty" json:"priority,omitempty"` + Archived *bool `url:"archived,omitempty" json:"archived,omitempty"` } // CreateLabel creates a new label for given repository with given name and @@ -163,24 +133,12 @@ type CreateLabelOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/labels/#create-a-new-label func (s *LabelsService) CreateLabel(pid any, opt *CreateLabelOptions, options ...RequestOptionFunc) (*Label, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/labels", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - l := new(Label) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil + return do[*Label](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/labels", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteLabelOptions represents the available DeleteLabel() options. @@ -194,37 +152,33 @@ type DeleteLabelOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/labels/#delete-a-label func (s *LabelsService) DeleteLabel(pid any, lid any, opt *DeleteLabelOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/labels", PathEscape(project)) + reqOpts := make([]doOption, 0, 4) + reqOpts = append(reqOpts, + withMethod(http.MethodDelete), + withAPIOpts(opt), + withRequestOpts(options...), + ) if lid != nil { - label, err := parseID(lid) - if err != nil { - return nil, err - } - u = fmt.Sprintf("projects/%s/labels/%s", PathEscape(project), PathEscape(label)) - } - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err + reqOpts = append(reqOpts, withPath("projects/%s/labels/%s", ProjectID{pid}, LabelID{lid})) + } else { + reqOpts = append(reqOpts, withPath("projects/%s/labels", ProjectID{pid})) } - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, reqOpts...) + return resp, err } // UpdateLabelOptions represents the available UpdateLabel() options. // -// GitLab API docs: https://docs.gitlab.com/api/labels/#edit-an-existing-label +// GitLab API docs: https://docs.gitlab.com/api/labels/#update-a-project-label type UpdateLabelOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` Color *string `url:"color,omitempty" json:"color,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` - Priority *int `url:"priority,omitempty" json:"priority,omitempty"` + Priority *int64 `url:"priority,omitempty" json:"priority,omitempty"` + Archived *bool `url:"archived,omitempty" json:"archived,omitempty"` } // UpdateLabel updates an existing label with new name or now color. At least @@ -232,32 +186,20 @@ type UpdateLabelOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/labels/#edit-an-existing-label func (s *LabelsService) UpdateLabel(pid any, lid any, opt *UpdateLabelOptions, options ...RequestOptionFunc) (*Label, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/labels", PathEscape(project)) + reqOpts := make([]doOption, 0, 4) + reqOpts = append(reqOpts, + withMethod(http.MethodPut), + withAPIOpts(opt), + withRequestOpts(options...), + ) if lid != nil { - label, err := parseID(lid) - if err != nil { - return nil, nil, err - } - u = fmt.Sprintf("projects/%s/labels/%s", PathEscape(project), PathEscape(label)) - } - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - l := new(Label) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err + reqOpts = append(reqOpts, withPath("projects/%s/labels/%s", ProjectID{pid}, LabelID{lid})) + } else { + reqOpts = append(reqOpts, withPath("projects/%s/labels", ProjectID{pid})) } - return l, resp, nil + return do[*Label](s.client, reqOpts...) } // SubscribeToLabel subscribes the authenticated user to a label to receive @@ -267,28 +209,11 @@ func (s *LabelsService) UpdateLabel(pid any, lid any, opt *UpdateLabelOptions, o // GitLab API docs: // https://docs.gitlab.com/api/labels/#subscribe-to-a-label func (s *LabelsService) SubscribeToLabel(pid any, lid any, options ...RequestOptionFunc) (*Label, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - label, err := parseID(lid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/labels/%s/subscribe", PathEscape(project), PathEscape(label)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - l := new(Label) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil + return do[*Label](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/labels/%s/subscribe", ProjectID{pid}, LabelID{lid}), + withRequestOpts(options...), + ) } // UnsubscribeFromLabel unsubscribes the authenticated user from a label to not @@ -298,22 +223,12 @@ func (s *LabelsService) SubscribeToLabel(pid any, lid any, options ...RequestOpt // GitLab API docs: // https://docs.gitlab.com/api/labels/#unsubscribe-from-a-label func (s *LabelsService) UnsubscribeFromLabel(pid any, lid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - label, err := parseID(lid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/labels/%s/unsubscribe", PathEscape(project), PathEscape(label)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/labels/%s/unsubscribe", ProjectID{pid}, LabelID{lid}), + withRequestOpts(options...), + ) + return resp, err } // PromoteLabel Promotes a project label to a group label. @@ -321,20 +236,10 @@ func (s *LabelsService) UnsubscribeFromLabel(pid any, lid any, options ...Reques // GitLab API docs: // https://docs.gitlab.com/api/labels/#promote-a-project-label-to-a-group-label func (s *LabelsService) PromoteLabel(pid any, lid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - label, err := parseID(lid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/labels/%s/promote", PathEscape(project), PathEscape(label)) - - req, err := s.client.NewRequest(http.MethodPut, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/labels/%s/promote", ProjectID{pid}, LabelID{lid}), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/license.go b/vendor/gitlab.com/gitlab-org/api/client-go/license.go index 9e9d2a0681..762454aa23 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/license.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/license.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -26,7 +25,7 @@ type ( LicenseServiceInterface interface { GetLicense(options ...RequestOptionFunc) (*License, *Response, error) AddLicense(opt *AddLicenseOptions, options ...RequestOptionFunc) (*License, *Response, error) - DeleteLicense(licenseID int, options ...RequestOptionFunc) (*Response, error) + DeleteLicense(licenseID int64, options ...RequestOptionFunc) (*Response, error) } // LicenseService handles communication with the license @@ -46,54 +45,66 @@ var _ LicenseServiceInterface = (*LicenseService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/license/ type License struct { - ID int `json:"id"` - Plan string `json:"plan"` - CreatedAt *time.Time `json:"created_at"` - StartsAt *ISOTime `json:"starts_at"` - ExpiresAt *ISOTime `json:"expires_at"` - HistoricalMax int `json:"historical_max"` - MaximumUserCount int `json:"maximum_user_count"` - Expired bool `json:"expired"` - Overage int `json:"overage"` - UserLimit int `json:"user_limit"` - ActiveUsers int `json:"active_users"` - Licensee struct { - Name string `json:"Name"` - Company string `json:"Company"` - Email string `json:"Email"` - } `json:"licensee"` + ID int64 `json:"id"` + Plan string `json:"plan"` + CreatedAt *time.Time `json:"created_at"` + StartsAt *ISOTime `json:"starts_at"` + ExpiresAt *ISOTime `json:"expires_at"` + HistoricalMax int64 `json:"historical_max"` + MaximumUserCount int64 `json:"maximum_user_count"` + Expired bool `json:"expired"` + Overage int64 `json:"overage"` + UserLimit int64 `json:"user_limit"` + ActiveUsers int64 `json:"active_users"` + Licensee LicenseLicensee `json:"licensee"` // Add on codes that may occur in legacy licenses that don't have a plan yet. // https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/models/license.rb - AddOns struct { - GitLabAuditorUser int `json:"GitLab_Auditor_User"` - GitLabDeployBoard int `json:"GitLab_DeployBoard"` - GitLabFileLocks int `json:"GitLab_FileLocks"` - GitLabGeo int `json:"GitLab_Geo"` - GitLabServiceDesk int `json:"GitLab_ServiceDesk"` - } `json:"add_ons"` + AddOns LicenseAddOns `json:"add_ons"` } func (l License) String() string { return Stringify(l) } +// LicenseLicensee represents a GitLab license licensee. +// +// GitLab API docs: +// https://docs.gitlab.com/api/license/ +type LicenseLicensee struct { + Name string `json:"Name"` + Company string `json:"Company"` + Email string `json:"Email"` +} + +func (l LicenseLicensee) String() string { + return Stringify(l) +} + +// LicenseAddOns represents a GitLab license add ons. +// +// GitLab API docs: +// https://docs.gitlab.com/api/license/ +type LicenseAddOns struct { + GitLabAuditorUser int64 `json:"GitLab_Auditor_User"` + GitLabDeployBoard int64 `json:"GitLab_DeployBoard"` + GitLabFileLocks int64 `json:"GitLab_FileLocks"` + GitLabGeo int64 `json:"GitLab_Geo"` + GitLabServiceDesk int64 `json:"GitLab_ServiceDesk"` +} + +func (a LicenseAddOns) String() string { + return Stringify(a) +} + // GetLicense retrieves information about the current license. // // GitLab API docs: // https://docs.gitlab.com/api/license/#retrieve-information-about-the-current-license func (s *LicenseService) GetLicense(options ...RequestOptionFunc) (*License, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "license", nil, options) - if err != nil { - return nil, nil, err - } - - l := new(License) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil + return do[*License](s.client, + withPath("license"), + withRequestOpts(options...), + ) } // AddLicenseOptions represents the available AddLicense() options. @@ -108,31 +119,23 @@ type AddLicenseOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/license/#add-a-new-license func (s *LicenseService) AddLicense(opt *AddLicenseOptions, options ...RequestOptionFunc) (*License, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "license", opt, options) - if err != nil { - return nil, nil, err - } - - l := new(License) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil + return do[*License](s.client, + withMethod(http.MethodPost), + withPath("license"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteLicense deletes an existing license. // // GitLab API docs: // https://docs.gitlab.com/api/license/#delete-a-license -func (s *LicenseService) DeleteLicense(licenseID int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("license/%d", licenseID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *LicenseService) DeleteLicense(licenseID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("license/%d", licenseID), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/license_templates.go b/vendor/gitlab.com/gitlab-org/api/client-go/license_templates.go index 8808f56b56..73939b9b13 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/license_templates.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/license_templates.go @@ -16,11 +16,6 @@ package gitlab -import ( - "fmt" - "net/http" -) - // LicenseTemplate represents a license template. // // GitLab API docs: @@ -71,18 +66,11 @@ type ListLicenseTemplatesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/templates/licenses/#list-license-templates func (s *LicenseTemplatesService) ListLicenseTemplates(opt *ListLicenseTemplatesOptions, options ...RequestOptionFunc) ([]*LicenseTemplate, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "templates/licenses", opt, options) - if err != nil { - return nil, nil, err - } - - var lts []*LicenseTemplate - resp, err := s.client.Do(req, <s) - if err != nil { - return nil, resp, err - } - - return lts, resp, nil + return do[[]*LicenseTemplate](s.client, + withPath("templates/licenses"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetLicenseTemplateOptions represents the available @@ -101,18 +89,9 @@ type GetLicenseTemplateOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/templates/licenses/#single-license-template func (s *LicenseTemplatesService) GetLicenseTemplate(template string, opt *GetLicenseTemplateOptions, options ...RequestOptionFunc) (*LicenseTemplate, *Response, error) { - u := fmt.Sprintf("templates/licenses/%s", template) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - lt := new(LicenseTemplate) - resp, err := s.client.Do(req, lt) - if err != nil { - return nil, resp, err - } - - return lt, resp, nil + return do[*LicenseTemplate](s.client, + withPath("templates/licenses/%s", NoEscape{template}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/markdown.go b/vendor/gitlab.com/gitlab-org/api/client-go/markdown.go index 8e2e30f655..cf92f1c3a9 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/markdown.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/markdown.go @@ -20,14 +20,14 @@ var _ MarkdownServiceInterface = (*MarkdownService)(nil) // Markdown represents a markdown document. // -// Gitlab API docs: https://docs.gitlab.com/api/markdown/ +// GitLab API docs: https://docs.gitlab.com/api/markdown/ type Markdown struct { HTML string `json:"html"` } // RenderOptions represents the available Render() options. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/markdown/#render-an-arbitrary-markdown-document type RenderOptions struct { Text *string `url:"text,omitempty" json:"text,omitempty"` @@ -37,19 +37,13 @@ type RenderOptions struct { // Render an arbitrary markdown document. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/markdown/#render-an-arbitrary-markdown-document func (s *MarkdownService) Render(opt *RenderOptions, options ...RequestOptionFunc) (*Markdown, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "markdown", opt, options) - if err != nil { - return nil, nil, err - } - - md := new(Markdown) - response, err := s.client.Do(req, md) - if err != nil { - return nil, response, err - } - - return md, response, nil + return do[*Markdown](s.client, + withMethod(http.MethodPost), + withPath("markdown"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/markdown_uploads.go b/vendor/gitlab.com/gitlab-org/api/client-go/markdown_uploads.go index 9acfe2377b..1f45039d29 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/markdown_uploads.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/markdown_uploads.go @@ -18,19 +18,18 @@ package gitlab import ( "bytes" - "fmt" "net/http" "time" ) // MarkdownUpload represents a single markdown upload. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/project_markdown_uploads/ // https://docs.gitlab.com/api/group_markdown_uploads/ type MarkdownUpload struct { - ID int `json:"id"` - Size int `json:"size"` + ID int64 `json:"id"` + Size int64 `json:"size"` Filename string `json:"filename"` CreatedAt *time.Time `json:"created_at"` UploadedBy *User `json:"uploaded_by"` @@ -47,10 +46,10 @@ func (m MarkdownUpload) String() string { // MarkdownUploadedFile represents a single markdown uploaded file. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/project_markdown_uploads/ type MarkdownUploadedFile struct { - ID int `json:"id"` + ID int64 `json:"id"` Alt string `json:"alt"` URL string `json:"url"` FullPath string `json:"full_path"` @@ -70,99 +69,55 @@ type ListMarkdownUploadsOptions struct { } // listMarkdownUploads gets all markdown uploads for a resource -func listMarkdownUploads[T any](client *Client, resourceType ResourceType, id any, opt *ListMarkdownUploadsOptions, options []RequestOptionFunc) ([]*T, *Response, error) { - resourceID, err := parseID(id) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("%s/%s/uploads", resourceType, PathEscape(resourceID)) - - req, err := client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var uploads []*T - resp, err := client.Do(req, &uploads) - if err != nil { - return nil, resp, err - } - - return uploads, resp, err +func listMarkdownUploads[T any](client *Client, resourceType ResourceType, id Pather, opt *ListMarkdownUploadsOptions, options []RequestOptionFunc) ([]*T, *Response, error) { + return do[[]*T](client, + withMethod(http.MethodGet), + withPath("%s/%s/uploads", resourceType, id), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // downloadMarkdownUploadByID downloads a specific upload by ID -func downloadMarkdownUploadByID(client *Client, resourceType ResourceType, id any, uploadID int, options []RequestOptionFunc) (*bytes.Buffer, *Response, error) { - resourceID, err := parseID(id) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("%s/%s/uploads/%d", resourceType, PathEscape(resourceID), uploadID) - - req, err := client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var file bytes.Buffer - resp, err := client.Do(req, &file) - if err != nil { - return nil, resp, err - } +func downloadMarkdownUploadByID(client *Client, resourceType ResourceType, id Pather, uploadID int64, options []RequestOptionFunc) (*bytes.Buffer, *Response, error) { + file, resp, err := do[bytes.Buffer](client, + withMethod(http.MethodGet), + withPath("%s/%s/uploads/%d", resourceType, id, uploadID), + withRequestOpts(options...), + ) return &file, resp, err } // downloadMarkdownUploadBySecretAndFilename downloads a specific upload by secret and filename -func downloadMarkdownUploadBySecretAndFilename(client *Client, resourceType ResourceType, id any, secret string, filename string, options []RequestOptionFunc) (*bytes.Buffer, *Response, error) { - resourceID, err := parseID(id) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("%s/%s/uploads/%s/%s", resourceType, PathEscape(resourceID), PathEscape(secret), PathEscape(filename)) - - req, err := client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var file bytes.Buffer - resp, err := client.Do(req, &file) - if err != nil { - return nil, resp, err - } +func downloadMarkdownUploadBySecretAndFilename(client *Client, resourceType ResourceType, id Pather, secret string, filename string, options []RequestOptionFunc) (*bytes.Buffer, *Response, error) { + file, resp, err := do[bytes.Buffer](client, + withMethod(http.MethodGet), + withPath("%s/%s/uploads/%s/%s", resourceType, id, secret, filename), + withRequestOpts(options...), + ) return &file, resp, err } // deleteMarkdownUploadByID deletes an upload by ID -func deleteMarkdownUploadByID(client *Client, resourceType ResourceType, id any, uploadID int, options []RequestOptionFunc) (*Response, error) { - resourceID, err := parseID(id) - if err != nil { - return nil, err - } - u := fmt.Sprintf("%s/%s/uploads/%d", resourceType, PathEscape(resourceID), uploadID) - - req, err := client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return client.Do(req, nil) +func deleteMarkdownUploadByID(client *Client, resourceType ResourceType, id Pather, uploadID int64, options []RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](client, + withMethod(http.MethodDelete), + withPath("%s/%s/uploads/%d", resourceType, id, uploadID), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } // deleteMarkdownUploadBySecretAndFilename deletes an upload by secret and filename -func deleteMarkdownUploadBySecretAndFilename(client *Client, resourceType ResourceType, id any, secret string, filename string, options []RequestOptionFunc) (*Response, error) { - resourceID, err := parseID(id) - if err != nil { - return nil, err - } - u := fmt.Sprintf("%s/%s/uploads/%s/%s", resourceType, PathEscape(resourceID), PathEscape(secret), PathEscape(filename)) - - req, err := client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return client.Do(req, nil) +func deleteMarkdownUploadBySecretAndFilename(client *Client, resourceType ResourceType, id Pather, secret string, filename string, options []RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](client, + withMethod(http.MethodDelete), + withPath("%s/%s/uploads/%s/%s", resourceType, id, secret, filename), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/member_roles.go b/vendor/gitlab.com/gitlab-org/api/client-go/member_roles.go index e9fede2146..094e3dab9d 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/member_roles.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/member_roles.go @@ -1,7 +1,6 @@ package gitlab import ( - "fmt" "net/http" ) @@ -9,10 +8,10 @@ type ( MemberRolesServiceInterface interface { ListInstanceMemberRoles(options ...RequestOptionFunc) ([]*MemberRole, *Response, error) CreateInstanceMemberRole(opt *CreateMemberRoleOptions, options ...RequestOptionFunc) (*MemberRole, *Response, error) - DeleteInstanceMemberRole(memberRoleID int, options ...RequestOptionFunc) (*Response, error) + DeleteInstanceMemberRole(memberRoleID int64, options ...RequestOptionFunc) (*Response, error) ListMemberRoles(gid any, options ...RequestOptionFunc) ([]*MemberRole, *Response, error) CreateMemberRole(gid any, opt *CreateMemberRoleOptions, options ...RequestOptionFunc) (*MemberRole, *Response, error) - DeleteMemberRole(gid any, memberRole int, options ...RequestOptionFunc) (*Response, error) + DeleteMemberRole(gid any, memberRole int64, options ...RequestOptionFunc) (*Response, error) } // MemberRolesService handles communication with the member roles related @@ -32,10 +31,10 @@ var _ MemberRolesServiceInterface = (*MemberRolesService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/member_roles/#list-all-member-roles-of-a-group type MemberRole struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Description string `json:"description,omitempty"` - GroupID int `json:"group_id"` + GroupID int64 `json:"group_id"` BaseAccessLevel AccessLevelValue `json:"base_access_level"` AdminCICDVariables bool `json:"admin_cicd_variables,omitempty"` AdminComplianceFramework bool `json:"admin_compliance_framework,omitempty"` @@ -62,21 +61,15 @@ type MemberRole struct { // ListInstanceMemberRoles gets all member roles in an instance. // Authentication as Administrator is required. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/member_roles/#get-all-instance-member-roles func (s *MemberRolesService) ListInstanceMemberRoles(options ...RequestOptionFunc) ([]*MemberRole, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "member_roles", nil, options) - if err != nil { - return nil, nil, err - } - - var mrs []*MemberRole - resp, err := s.client.Do(req, &mrs) - if err != nil { - return nil, resp, err - } - - return mrs, resp, nil + return do[[]*MemberRole](s.client, + withMethod(http.MethodGet), + withPath("member_roles"), + withAPIOpts(nil), + withRequestOpts(options...), + ) } // CreateMemberRoleOptions represents the available CreateInstanceMemberRole() @@ -113,103 +106,67 @@ type CreateMemberRoleOptions struct { // CreateInstanceMemberRole creates an instance-wide member role. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/member_roles/#create-a-instance-member-role func (s *MemberRolesService) CreateInstanceMemberRole(opt *CreateMemberRoleOptions, options ...RequestOptionFunc) (*MemberRole, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "member_roles", opt, options) - if err != nil { - return nil, nil, err - } - - mr := new(MemberRole) - resp, err := s.client.Do(req, mr) - if err != nil { - return nil, resp, err - } - - return mr, resp, nil + return do[*MemberRole](s.client, + withMethod(http.MethodPost), + withPath("member_roles"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteInstanceMemberRole deletes a member role from a specified group. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/member_roles/#delete-an-instance-member-role -func (s *MemberRolesService) DeleteInstanceMemberRole(memberRoleID int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("member_roles/%d", memberRoleID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *MemberRolesService) DeleteInstanceMemberRole(memberRoleID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("member_roles/%d", memberRoleID), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } // ListMemberRoles gets a list of member roles for a specified group. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/member_roles/#get-all-group-member-roles func (s *MemberRolesService) ListMemberRoles(gid any, options ...RequestOptionFunc) ([]*MemberRole, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/member_roles", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var mrs []*MemberRole - resp, err := s.client.Do(req, &mrs) - if err != nil { - return nil, resp, err - } - - return mrs, resp, nil + return do[[]*MemberRole](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/member_roles", GroupID{gid}), + withAPIOpts(nil), + withRequestOpts(options...), + ) } // CreateMemberRole creates a new member role for a specified group. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/member_roles/#add-a-member-role-to-a-group func (s *MemberRolesService) CreateMemberRole(gid any, opt *CreateMemberRoleOptions, options ...RequestOptionFunc) (*MemberRole, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/member_roles", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - mr := new(MemberRole) - resp, err := s.client.Do(req, mr) - if err != nil { - return nil, resp, err - } - - return mr, resp, nil + return do[*MemberRole](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/member_roles", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteMemberRole deletes a member role from a specified group. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/member_roles/#remove-member-role-of-a-group -func (s *MemberRolesService) DeleteMemberRole(gid any, memberRole int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/member_roles/%d", PathEscape(group), memberRole) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *MemberRolesService) DeleteMemberRole(gid any, memberRole int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/member_roles/%d", GroupID{gid}, memberRole), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approval_settings.go b/vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approval_settings.go index c1701be22f..4c182063d7 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approval_settings.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approval_settings.go @@ -15,16 +15,15 @@ package gitlab import ( - "fmt" "net/http" ) type ( MergeRequestApprovalSettingsServiceInterface interface { GetGroupMergeRequestApprovalSettings(gid any, options ...RequestOptionFunc) (*MergeRequestApprovalSettings, *Response, error) - UpdateGroupMergeRequestApprovalSettings(gid any, opt *UpdateMergeRequestApprovalSettingsOptions, options ...RequestOptionFunc) (*MergeRequestApprovalSettings, *Response, error) + UpdateGroupMergeRequestApprovalSettings(gid any, opt *UpdateGroupMergeRequestApprovalSettingsOptions, options ...RequestOptionFunc) (*MergeRequestApprovalSettings, *Response, error) GetProjectMergeRequestApprovalSettings(pid any, options ...RequestOptionFunc) (*MergeRequestApprovalSettings, *Response, error) - UpdateProjectMergeRequestApprovalSettings(pid any, opt *UpdateMergeRequestApprovalSettingsOptions, options ...RequestOptionFunc) (*MergeRequestApprovalSettings, *Response, error) + UpdateProjectMergeRequestApprovalSettings(pid any, opt *UpdateProjectMergeRequestApprovalSettingsOptions, options ...RequestOptionFunc) (*MergeRequestApprovalSettings, *Response, error) } // MergeRequestApprovalSettingsService handles communication with the merge @@ -70,39 +69,38 @@ type MergeRequestApprovalSetting struct { // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approval_settings/#get-group-mr-approval-settings func (s *MergeRequestApprovalSettingsService) GetGroupMergeRequestApprovalSettings(gid any, options ...RequestOptionFunc) (*MergeRequestApprovalSettings, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/merge_request_approval_setting", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - settings := new(MergeRequestApprovalSettings) - resp, err := s.client.Do(req, settings) - if err != nil { - return nil, resp, err - } - - return settings, resp, nil + return do[*MergeRequestApprovalSettings](s.client, + withPath("groups/%s/merge_request_approval_setting", GroupID{gid}), + withRequestOpts(options...), + ) } -// UpdateMergeRequestApprovalSettingsOptions represents the available -// UpdateGroupMergeRequestApprovalSettings() and UpdateProjectMergeRequestApprovalSettings() +// UpdateProjectMergeRequestApprovalSettingsOptions represents the available +// UpdateProjectMergeRequestApprovalSettings() // options. // // GitLab API docs: -// https://docs.gitlab.com/api/merge_request_approval_settings/#update-group-mr-approval-settings // https://docs.gitlab.com/api/merge_request_approval_settings/#update-project-mr-approval-settings -type UpdateMergeRequestApprovalSettingsOptions struct { +type UpdateProjectMergeRequestApprovalSettingsOptions struct { AllowAuthorApproval *bool `url:"allow_author_approval,omitempty" json:"allow_author_approval,omitempty"` AllowCommitterApproval *bool `url:"allow_committer_approval,omitempty" json:"allow_committer_approval,omitempty"` AllowOverridesToApproverListPerMergeRequest *bool `url:"allow_overrides_to_approver_list_per_merge_request,omitempty" json:"allow_overrides_to_approver_list_per_merge_request,omitempty"` RetainApprovalsOnPush *bool `url:"retain_approvals_on_push,omitempty" json:"retain_approvals_on_push,omitempty"` + RequireReauthenticationToApprove *bool `url:"require_reauthentication_to_approve,omitempty" json:"require_reauthentication_to_approve,omitempty"` SelectiveCodeOwnerRemovals *bool `url:"selective_code_owner_removals,omitempty" json:"selective_code_owner_removals,omitempty"` +} + +// UpdateGroupMergeRequestApprovalSettingsOptions represents the available +// UpdateGroupRequestApprovalSettings() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/merge_request_approval_settings/#update-group-mr-approval-settings +type UpdateGroupMergeRequestApprovalSettingsOptions struct { + AllowAuthorApproval *bool `url:"allow_author_approval,omitempty" json:"allow_author_approval,omitempty"` + AllowCommitterApproval *bool `url:"allow_committer_approval,omitempty" json:"allow_committer_approval,omitempty"` + AllowOverridesToApproverListPerMergeRequest *bool `url:"allow_overrides_to_approver_list_per_merge_request,omitempty" json:"allow_overrides_to_approver_list_per_merge_request,omitempty"` + RetainApprovalsOnPush *bool `url:"retain_approvals_on_push,omitempty" json:"retain_approvals_on_push,omitempty"` RequireReauthenticationToApprove *bool `url:"require_reauthentication_to_approve,omitempty" json:"require_reauthentication_to_approve,omitempty"` } @@ -111,25 +109,13 @@ type UpdateMergeRequestApprovalSettingsOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approval_settings/#update-group-mr-approval-settings -func (s *MergeRequestApprovalSettingsService) UpdateGroupMergeRequestApprovalSettings(gid any, opt *UpdateMergeRequestApprovalSettingsOptions, options ...RequestOptionFunc) (*MergeRequestApprovalSettings, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/merge_request_approval_setting", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - settings := new(MergeRequestApprovalSettings) - resp, err := s.client.Do(req, settings) - if err != nil { - return nil, resp, err - } - - return settings, resp, nil +func (s *MergeRequestApprovalSettingsService) UpdateGroupMergeRequestApprovalSettings(gid any, opt *UpdateGroupMergeRequestApprovalSettingsOptions, options ...RequestOptionFunc) (*MergeRequestApprovalSettings, *Response, error) { + return do[*MergeRequestApprovalSettings](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/merge_request_approval_setting", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetProjectMergeRequestApprovalSettings gets the merge request approval settings @@ -138,24 +124,10 @@ func (s *MergeRequestApprovalSettingsService) UpdateGroupMergeRequestApprovalSet // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approval_settings/#get-project-mr-approval-settings func (s *MergeRequestApprovalSettingsService) GetProjectMergeRequestApprovalSettings(pid any, options ...RequestOptionFunc) (*MergeRequestApprovalSettings, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_request_approval_setting", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - settings := new(MergeRequestApprovalSettings) - resp, err := s.client.Do(req, settings) - if err != nil { - return nil, resp, err - } - - return settings, resp, nil + return do[*MergeRequestApprovalSettings](s.client, + withPath("projects/%s/merge_request_approval_setting", ProjectID{pid}), + withRequestOpts(options...), + ) } // UpdateProjectMergeRequestApprovalSettings updates the merge request approval @@ -163,23 +135,11 @@ func (s *MergeRequestApprovalSettingsService) GetProjectMergeRequestApprovalSett // // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approval_settings/#update-project-mr-approval-settings -func (s *MergeRequestApprovalSettingsService) UpdateProjectMergeRequestApprovalSettings(pid any, opt *UpdateMergeRequestApprovalSettingsOptions, options ...RequestOptionFunc) (*MergeRequestApprovalSettings, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_request_approval_setting", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - settings := new(MergeRequestApprovalSettings) - resp, err := s.client.Do(req, settings) - if err != nil { - return nil, resp, err - } - - return settings, resp, nil +func (s *MergeRequestApprovalSettingsService) UpdateProjectMergeRequestApprovalSettings(pid any, opt *UpdateProjectMergeRequestApprovalSettingsOptions, options ...RequestOptionFunc) (*MergeRequestApprovalSettings, *Response, error) { + return do[*MergeRequestApprovalSettings](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/merge_request_approval_setting", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approvals.go b/vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approvals.go index 80d16bec68..2cdc06c636 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approvals.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approvals.go @@ -17,23 +17,22 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( MergeRequestApprovalsServiceInterface interface { - ApproveMergeRequest(pid any, mr int, opt *ApproveMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) - UnapproveMergeRequest(pid any, mr int, options ...RequestOptionFunc) (*Response, error) - ResetApprovalsOfMergeRequest(pid any, mr int, options ...RequestOptionFunc) (*Response, error) - GetConfiguration(pid any, mr int, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) - ChangeApprovalConfiguration(pid any, mergeRequest int, opt *ChangeMergeRequestApprovalConfigurationOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) - GetApprovalRules(pid any, mergeRequest int, options ...RequestOptionFunc) ([]*MergeRequestApprovalRule, *Response, error) - GetApprovalState(pid any, mergeRequest int, options ...RequestOptionFunc) (*MergeRequestApprovalState, *Response, error) - CreateApprovalRule(pid any, mergeRequest int, opt *CreateMergeRequestApprovalRuleOptions, options ...RequestOptionFunc) (*MergeRequestApprovalRule, *Response, error) - UpdateApprovalRule(pid any, mergeRequest int, approvalRule int, opt *UpdateMergeRequestApprovalRuleOptions, options ...RequestOptionFunc) (*MergeRequestApprovalRule, *Response, error) - DeleteApprovalRule(pid any, mergeRequest int, approvalRule int, options ...RequestOptionFunc) (*Response, error) + ApproveMergeRequest(pid any, mr int64, opt *ApproveMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) + UnapproveMergeRequest(pid any, mr int64, options ...RequestOptionFunc) (*Response, error) + ResetApprovalsOfMergeRequest(pid any, mr int64, options ...RequestOptionFunc) (*Response, error) + GetConfiguration(pid any, mr int64, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) + ChangeApprovalConfiguration(pid any, mergeRequest int64, opt *ChangeMergeRequestApprovalConfigurationOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) + GetApprovalRules(pid any, mergeRequest int64, options ...RequestOptionFunc) ([]*MergeRequestApprovalRule, *Response, error) + GetApprovalState(pid any, mergeRequest int64, options ...RequestOptionFunc) (*MergeRequestApprovalState, *Response, error) + CreateApprovalRule(pid any, mergeRequest int64, opt *CreateMergeRequestApprovalRuleOptions, options ...RequestOptionFunc) (*MergeRequestApprovalRule, *Response, error) + UpdateApprovalRule(pid any, mergeRequest int64, approvalRule int64, opt *UpdateMergeRequestApprovalRuleOptions, options ...RequestOptionFunc) (*MergeRequestApprovalRule, *Response, error) + DeleteApprovalRule(pid any, mergeRequest int64, approvalRule int64, options ...RequestOptionFunc) (*Response, error) } // MergeRequestApprovalsService handles communication with the merge request @@ -53,9 +52,9 @@ var _ MergeRequestApprovalsServiceInterface = (*MergeRequestApprovalsService)(ni // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#single-merge-request-approval type MergeRequestApprovals struct { - ID int `json:"id"` - IID int `json:"iid"` - ProjectID int `json:"project_id"` + ID int64 `json:"id"` + IID int64 `json:"iid"` + ProjectID int64 `json:"project_id"` Title string `json:"title"` Description string `json:"description"` State string `json:"state"` @@ -63,9 +62,9 @@ type MergeRequestApprovals struct { UpdatedAt *time.Time `json:"updated_at"` MergeStatus string `json:"merge_status"` Approved bool `json:"approved"` - ApprovalsBeforeMerge int `json:"approvals_before_merge"` - ApprovalsRequired int `json:"approvals_required"` - ApprovalsLeft int `json:"approvals_left"` + ApprovalsBeforeMerge int64 `json:"approvals_before_merge"` + ApprovalsRequired int64 `json:"approvals_required"` + ApprovalsLeft int64 `json:"approvals_left"` RequirePasswordToApprove bool `json:"require_password_to_approve"` ApprovedBy []*MergeRequestApproverUser `json:"approved_by"` SuggestedApprovers []*BasicUser `json:"suggested_approvers"` @@ -83,24 +82,26 @@ func (m MergeRequestApprovals) String() string { return Stringify(m) } -// MergeRequestApproverGroup represents GitLab project level merge request approver group. +// MergeRequestApproverGroup represents GitLab project level merge request approver group. // // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#project-approval-rules type MergeRequestApproverGroup struct { - Group struct { - ID int `json:"id"` - Name string `json:"name"` - Path string `json:"path"` - Description string `json:"description"` - Visibility string `json:"visibility"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - FullName string `json:"full_name"` - FullPath string `json:"full_path"` - LFSEnabled bool `json:"lfs_enabled"` - RequestAccessEnabled bool `json:"request_access_enabled"` - } + Group MergeRequestApproverNestedGroup `json:"group"` +} + +type MergeRequestApproverNestedGroup struct { + ID int64 `json:"id"` + Name string `json:"name"` + Path string `json:"path"` + Description string `json:"description"` + Visibility string `json:"visibility"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + FullName string `json:"full_name"` + FullPath string `json:"full_path"` + LFSEnabled bool `json:"lfs_enabled"` + RequestAccessEnabled bool `json:"request_access_enabled"` } // MergeRequestApprovalRule represents a GitLab merge request approval rule. @@ -108,12 +109,12 @@ type MergeRequestApproverGroup struct { // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#get-merge-request-approval-rules type MergeRequestApprovalRule struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` RuleType string `json:"rule_type"` ReportType string `json:"report_type"` EligibleApprovers []*BasicUser `json:"eligible_approvers"` - ApprovalsRequired int `json:"approvals_required"` + ApprovalsRequired int64 `json:"approvals_required"` SourceRule *ProjectApprovalRule `json:"source_rule"` Users []*BasicUser `json:"users"` Groups []*Group `json:"groups"` @@ -158,44 +159,26 @@ type ApproveMergeRequestOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#approve-merge-request -func (s *MergeRequestApprovalsService) ApproveMergeRequest(pid any, mr int, opt *ApproveMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approve", PathEscape(project), mr) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequestApprovals) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *MergeRequestApprovalsService) ApproveMergeRequest(pid any, mr int64, opt *ApproveMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) { + return do[*MergeRequestApprovals](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/approve", ProjectID{pid}, mr), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UnapproveMergeRequest unapproves a previously approved merge request on GitLab. // // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#unapprove-merge-request -func (s *MergeRequestApprovalsService) UnapproveMergeRequest(pid any, mr int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/unapprove", PathEscape(project), mr) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *MergeRequestApprovalsService) UnapproveMergeRequest(pid any, mr int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/unapprove", ProjectID{pid}, mr), + withRequestOpts(options...), + ) + return resp, err } // ResetApprovalsOfMergeRequest clear all approvals of merge request on GitLab. @@ -203,44 +186,24 @@ func (s *MergeRequestApprovalsService) UnapproveMergeRequest(pid any, mr int, op // // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#reset-approvals-of-a-merge-request -func (s *MergeRequestApprovalsService) ResetApprovalsOfMergeRequest(pid any, mr int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/reset_approvals", PathEscape(project), mr) - - req, err := s.client.NewRequest(http.MethodPut, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *MergeRequestApprovalsService) ResetApprovalsOfMergeRequest(pid any, mr int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/merge_requests/%d/reset_approvals", ProjectID{pid}, mr), + withRequestOpts(options...), + ) + return resp, err } // GetConfiguration shows information about single merge request approvals // // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#single-merge-request-approval -func (s *MergeRequestApprovalsService) GetConfiguration(pid any, mr int, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", PathEscape(project), mr) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequestApprovals) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *MergeRequestApprovalsService) GetConfiguration(pid any, mr int64, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) { + return do[*MergeRequestApprovals](s.client, + withPath("projects/%s/merge_requests/%d/approvals", ProjectID{pid}, mr), + withRequestOpts(options...), + ) } // ChangeMergeRequestApprovalConfigurationOptions represents the available @@ -248,81 +211,41 @@ func (s *MergeRequestApprovalsService) GetConfiguration(pid any, mr int, options // // Deprecated: in GitLab 16.0 type ChangeMergeRequestApprovalConfigurationOptions struct { - ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` + ApprovalsRequired *int64 `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` } // ChangeApprovalConfiguration updates the approval configuration of a merge request. // // Deprecated: in GitLab 16.0 -func (s *MergeRequestApprovalsService) ChangeApprovalConfiguration(pid any, mergeRequest int, opt *ChangeMergeRequestApprovalConfigurationOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *MergeRequestApprovalsService) ChangeApprovalConfiguration(pid any, mergeRequest int64, opt *ChangeMergeRequestApprovalConfigurationOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + return do[*MergeRequest](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/approvals", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetApprovalRules requests information about a merge request’s approval rules +// GetApprovalRules requests information about a merge request's approval rules // // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#get-merge-request-approval-rules -func (s *MergeRequestApprovalsService) GetApprovalRules(pid any, mergeRequest int, options ...RequestOptionFunc) ([]*MergeRequestApprovalRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_rules", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var par []*MergeRequestApprovalRule - resp, err := s.client.Do(req, &par) - if err != nil { - return nil, resp, err - } - - return par, resp, nil +func (s *MergeRequestApprovalsService) GetApprovalRules(pid any, mergeRequest int64, options ...RequestOptionFunc) ([]*MergeRequestApprovalRule, *Response, error) { + return do[[]*MergeRequestApprovalRule](s.client, + withPath("projects/%s/merge_requests/%d/approval_rules", ProjectID{pid}, mergeRequest), + withRequestOpts(options...), + ) } -// GetApprovalState requests information about a merge request’s approval state +// GetApprovalState requests information about a merge request's approval state // // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#get-the-approval-state-of-merge-requests -func (s *MergeRequestApprovalsService) GetApprovalState(pid any, mergeRequest int, options ...RequestOptionFunc) (*MergeRequestApprovalState, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_state", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var pas *MergeRequestApprovalState - resp, err := s.client.Do(req, &pas) - if err != nil { - return nil, resp, err - } - - return pas, resp, nil +func (s *MergeRequestApprovalsService) GetApprovalState(pid any, mergeRequest int64, options ...RequestOptionFunc) (*MergeRequestApprovalState, *Response, error) { + return do[*MergeRequestApprovalState](s.client, + withPath("projects/%s/merge_requests/%d/approval_state", ProjectID{pid}, mergeRequest), + withRequestOpts(options...), + ) } // CreateMergeRequestApprovalRuleOptions represents the available CreateApprovalRule() @@ -331,36 +254,24 @@ func (s *MergeRequestApprovalsService) GetApprovalState(pid any, mergeRequest in // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#create-merge-request-rule type CreateMergeRequestApprovalRuleOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` - ApprovalProjectRuleID *int `url:"approval_project_rule_id,omitempty" json:"approval_project_rule_id,omitempty"` - UserIDs *[]int `url:"user_ids,omitempty" json:"user_ids,omitempty"` - GroupIDs *[]int `url:"group_ids,omitempty" json:"group_ids,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + ApprovalsRequired *int64 `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` + ApprovalProjectRuleID *int64 `url:"approval_project_rule_id,omitempty" json:"approval_project_rule_id,omitempty"` + UserIDs *[]int64 `url:"user_ids,omitempty" json:"user_ids,omitempty"` + GroupIDs *[]int64 `url:"group_ids,omitempty" json:"group_ids,omitempty"` } // CreateApprovalRule creates a new MR level approval rule. // // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#create-merge-request-rule -func (s *MergeRequestApprovalsService) CreateApprovalRule(pid any, mergeRequest int, opt *CreateMergeRequestApprovalRuleOptions, options ...RequestOptionFunc) (*MergeRequestApprovalRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_rules", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - par := new(MergeRequestApprovalRule) - resp, err := s.client.Do(req, &par) - if err != nil { - return nil, resp, err - } - - return par, resp, nil +func (s *MergeRequestApprovalsService) CreateApprovalRule(pid any, mergeRequest int64, opt *CreateMergeRequestApprovalRuleOptions, options ...RequestOptionFunc) (*MergeRequestApprovalRule, *Response, error) { + return do[*MergeRequestApprovalRule](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/approval_rules", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateMergeRequestApprovalRuleOptions represents the available UpdateApprovalRule() @@ -369,52 +280,34 @@ func (s *MergeRequestApprovalsService) CreateApprovalRule(pid any, mergeRequest // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#update-merge-request-rule type UpdateMergeRequestApprovalRuleOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` - UserIDs *[]int `url:"user_ids,omitempty" json:"user_ids,omitempty"` - GroupIDs *[]int `url:"group_ids,omitempty" json:"group_ids,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + ApprovalsRequired *int64 `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` + UserIDs *[]int64 `url:"user_ids,omitempty" json:"user_ids,omitempty"` + GroupIDs *[]int64 `url:"group_ids,omitempty" json:"group_ids,omitempty"` } // UpdateApprovalRule updates an existing approval rule with new options. // // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#update-merge-request-rule -func (s *MergeRequestApprovalsService) UpdateApprovalRule(pid any, mergeRequest int, approvalRule int, opt *UpdateMergeRequestApprovalRuleOptions, options ...RequestOptionFunc) (*MergeRequestApprovalRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_rules/%d", PathEscape(project), mergeRequest, approvalRule) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - par := new(MergeRequestApprovalRule) - resp, err := s.client.Do(req, &par) - if err != nil { - return nil, resp, err - } - - return par, resp, nil +func (s *MergeRequestApprovalsService) UpdateApprovalRule(pid any, mergeRequest int64, approvalRule int64, opt *UpdateMergeRequestApprovalRuleOptions, options ...RequestOptionFunc) (*MergeRequestApprovalRule, *Response, error) { + return do[*MergeRequestApprovalRule](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/merge_requests/%d/approval_rules/%d", ProjectID{pid}, mergeRequest, approvalRule), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteApprovalRule deletes a mr level approval rule. // // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#delete-merge-request-rule -func (s *MergeRequestApprovalsService) DeleteApprovalRule(pid any, mergeRequest int, approvalRule int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_rules/%d", PathEscape(project), mergeRequest, approvalRule) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *MergeRequestApprovalsService) DeleteApprovalRule(pid any, mergeRequest int64, approvalRule int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/merge_requests/%d/approval_rules/%d", ProjectID{pid}, mergeRequest, approvalRule), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/merge_request_context_commits.go b/vendor/gitlab.com/gitlab-org/api/client-go/merge_request_context_commits.go new file mode 100644 index 0000000000..f2240f4154 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/merge_request_context_commits.go @@ -0,0 +1,84 @@ +package gitlab + +import ( + "net/http" +) + +type ( + // MergeRequestContextCommitsServiceInterface handles communication with the + // merge request context commits related methods of the GitLab API. + MergeRequestContextCommitsServiceInterface interface { + // ListMergeRequestContextCommits gets a list of merge request context commits. + // + // GitLab API docs: + // https://docs.gitlab.com/api/merge_request_context_commits/#list-mr-context-commits + ListMergeRequestContextCommits(pid any, mergeRequest int64, options ...RequestOptionFunc) ([]*Commit, *Response, error) + // CreateMergeRequestContextCommits creates a list of merge request context + // commits. + // + // GitLab API docs: + // https://docs.gitlab.com/api/merge_request_context_commits/#create-mr-context-commits + CreateMergeRequestContextCommits(pid any, mergeRequest int64, opt *CreateMergeRequestContextCommitsOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) + // DeleteMergeRequestContextCommits deletes a list of merge request context + // commits. + // + // GitLab API docs: + // https://docs.gitlab.com/api/merge_request_context_commits/#delete-mr-context-commits + DeleteMergeRequestContextCommits(pid any, mergeRequest int64, opt *DeleteMergeRequestContextCommitsOptions, options ...RequestOptionFunc) (*Response, error) + } + + // MergeRequestContextCommitsService handles communication with the merge + // request context commits related methods of the GitLab API. + // + // GitLab API docs: + // https://docs.gitlab.com/api/merge_request_context_commits/ + MergeRequestContextCommitsService struct { + client *Client + } +) + +var _ MergeRequestContextCommitsServiceInterface = (*MergeRequestContextCommitsService)(nil) + +func (s *MergeRequestContextCommitsService) ListMergeRequestContextCommits(pid any, mergeRequest int64, options ...RequestOptionFunc) ([]*Commit, *Response, error) { + return do[[]*Commit](s.client, + withPath("projects/%s/merge_requests/%d/context_commits", ProjectID{pid}, mergeRequest), + withRequestOpts(options...), + ) +} + +// CreateMergeRequestContextCommitsOptions represents the available +// CreateMergeRequestContextCommits() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/merge_request_context_commits/#create-mr-context-commits +type CreateMergeRequestContextCommitsOptions struct { + Commits *[]string `url:"commits,omitempty" json:"commits,omitempty"` +} + +func (s *MergeRequestContextCommitsService) CreateMergeRequestContextCommits(pid any, mergeRequest int64, opt *CreateMergeRequestContextCommitsOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { + return do[[]*Commit](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/context_commits", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +// DeleteMergeRequestContextCommitsOptions represents the available +// DeleteMergeRequestContextCommits() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/merge_request_context_commits/#delete-mr-context-commits +type DeleteMergeRequestContextCommitsOptions struct { + Commits *[]string `url:"commits,omitempty" json:"commits,omitempty"` +} + +func (s *MergeRequestContextCommitsService) DeleteMergeRequestContextCommits(pid any, mergeRequest int64, opt *DeleteMergeRequestContextCommitsOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/merge_requests/%d/context_commits", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/merge_requests.go b/vendor/gitlab.com/gitlab-org/api/client-go/merge_requests.go index 33048676e0..8bfa7aeffb 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/merge_requests.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/merge_requests.go @@ -19,7 +19,6 @@ package gitlab import ( "bytes" "encoding/json" - "fmt" "net/http" "time" ) @@ -29,37 +28,37 @@ type ( ListMergeRequests(opt *ListMergeRequestsOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) ListProjectMergeRequests(pid any, opt *ListProjectMergeRequestsOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) ListGroupMergeRequests(gid any, opt *ListGroupMergeRequestsOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) - GetMergeRequest(pid any, mergeRequest int, opt *GetMergeRequestsOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) - GetMergeRequestApprovals(pid any, mergeRequest int, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) - GetMergeRequestCommits(pid any, mergeRequest int, opt *GetMergeRequestCommitsOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) - GetMergeRequestChanges(pid any, mergeRequest int, opt *GetMergeRequestChangesOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) - ListMergeRequestDiffs(pid any, mergeRequest int, opt *ListMergeRequestDiffsOptions, options ...RequestOptionFunc) ([]*MergeRequestDiff, *Response, error) - ShowMergeRequestRawDiffs(pid any, mergeRequest int, opt *ShowMergeRequestRawDiffsOptions, options ...RequestOptionFunc) ([]byte, *Response, error) - GetMergeRequestParticipants(pid any, mergeRequest int, options ...RequestOptionFunc) ([]*BasicUser, *Response, error) - GetMergeRequestReviewers(pid any, mergeRequest int, options ...RequestOptionFunc) ([]*MergeRequestReviewer, *Response, error) - ListMergeRequestPipelines(pid any, mergeRequest int, options ...RequestOptionFunc) ([]*PipelineInfo, *Response, error) - CreateMergeRequestPipeline(pid any, mergeRequest int, options ...RequestOptionFunc) (*PipelineInfo, *Response, error) - GetIssuesClosedOnMerge(pid any, mergeRequest int, opt *GetIssuesClosedOnMergeOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) - ListRelatedIssues(pid any, mergeRequest int, opt *ListRelatedIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) + GetMergeRequest(pid any, mergeRequest int64, opt *GetMergeRequestsOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) + GetMergeRequestApprovals(pid any, mergeRequest int64, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) + GetMergeRequestCommits(pid any, mergeRequest int64, opt *GetMergeRequestCommitsOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) + GetMergeRequestChanges(pid any, mergeRequest int64, opt *GetMergeRequestChangesOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) + ListMergeRequestDiffs(pid any, mergeRequest int64, opt *ListMergeRequestDiffsOptions, options ...RequestOptionFunc) ([]*MergeRequestDiff, *Response, error) + ShowMergeRequestRawDiffs(pid any, mergeRequest int64, opt *ShowMergeRequestRawDiffsOptions, options ...RequestOptionFunc) ([]byte, *Response, error) + GetMergeRequestParticipants(pid any, mergeRequest int64, options ...RequestOptionFunc) ([]*BasicUser, *Response, error) + GetMergeRequestReviewers(pid any, mergeRequest int64, options ...RequestOptionFunc) ([]*MergeRequestReviewer, *Response, error) + ListMergeRequestPipelines(pid any, mergeRequest int64, options ...RequestOptionFunc) ([]*PipelineInfo, *Response, error) + CreateMergeRequestPipeline(pid any, mergeRequest int64, options ...RequestOptionFunc) (*PipelineInfo, *Response, error) + GetIssuesClosedOnMerge(pid any, mergeRequest int64, opt *GetIssuesClosedOnMergeOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) + ListRelatedIssues(pid any, mergeRequest int64, opt *ListRelatedIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) CreateMergeRequest(pid any, opt *CreateMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) - UpdateMergeRequest(pid any, mergeRequest int, opt *UpdateMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) - DeleteMergeRequest(pid any, mergeRequest int, options ...RequestOptionFunc) (*Response, error) - AcceptMergeRequest(pid any, mergeRequest int, opt *AcceptMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) - CancelMergeWhenPipelineSucceeds(pid any, mergeRequest int, options ...RequestOptionFunc) (*MergeRequest, *Response, error) - RebaseMergeRequest(pid any, mergeRequest int, opt *RebaseMergeRequestOptions, options ...RequestOptionFunc) (*Response, error) - GetMergeRequestDiffVersions(pid any, mergeRequest int, opt *GetMergeRequestDiffVersionsOptions, options ...RequestOptionFunc) ([]*MergeRequestDiffVersion, *Response, error) - GetSingleMergeRequestDiffVersion(pid any, mergeRequest, version int, opt *GetSingleMergeRequestDiffVersionOptions, options ...RequestOptionFunc) (*MergeRequestDiffVersion, *Response, error) - SubscribeToMergeRequest(pid any, mergeRequest int, options ...RequestOptionFunc) (*MergeRequest, *Response, error) - UnsubscribeFromMergeRequest(pid any, mergeRequest int, options ...RequestOptionFunc) (*MergeRequest, *Response, error) - CreateTodo(pid any, mergeRequest int, options ...RequestOptionFunc) (*Todo, *Response, error) - SetTimeEstimate(pid any, mergeRequest int, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) - ResetTimeEstimate(pid any, mergeRequest int, options ...RequestOptionFunc) (*TimeStats, *Response, error) - AddSpentTime(pid any, mergeRequest int, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) - ResetSpentTime(pid any, mergeRequest int, options ...RequestOptionFunc) (*TimeStats, *Response, error) - GetTimeSpent(pid any, mergeRequest int, options ...RequestOptionFunc) (*TimeStats, *Response, error) - CreateMergeRequestDependency(pid any, mergeRequest int, opts CreateMergeRequestDependencyOptions, options ...RequestOptionFunc) (*MergeRequestDependency, *Response, error) - DeleteMergeRequestDependency(pid any, mergeRequest int, blockingMergeRequest int, options ...RequestOptionFunc) (*Response, error) - GetMergeRequestDependencies(pid any, mergeRequest int, options ...RequestOptionFunc) ([]MergeRequestDependency, *Response, error) + UpdateMergeRequest(pid any, mergeRequest int64, opt *UpdateMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) + DeleteMergeRequest(pid any, mergeRequest int64, options ...RequestOptionFunc) (*Response, error) + AcceptMergeRequest(pid any, mergeRequest int64, opt *AcceptMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) + CancelMergeWhenPipelineSucceeds(pid any, mergeRequest int64, options ...RequestOptionFunc) (*MergeRequest, *Response, error) + RebaseMergeRequest(pid any, mergeRequest int64, opt *RebaseMergeRequestOptions, options ...RequestOptionFunc) (*Response, error) + GetMergeRequestDiffVersions(pid any, mergeRequest int64, opt *GetMergeRequestDiffVersionsOptions, options ...RequestOptionFunc) ([]*MergeRequestDiffVersion, *Response, error) + GetSingleMergeRequestDiffVersion(pid any, mergeRequest, version int64, opt *GetSingleMergeRequestDiffVersionOptions, options ...RequestOptionFunc) (*MergeRequestDiffVersion, *Response, error) + SubscribeToMergeRequest(pid any, mergeRequest int64, options ...RequestOptionFunc) (*MergeRequest, *Response, error) + UnsubscribeFromMergeRequest(pid any, mergeRequest int64, options ...RequestOptionFunc) (*MergeRequest, *Response, error) + CreateTodo(pid any, mergeRequest int64, options ...RequestOptionFunc) (*Todo, *Response, error) + SetTimeEstimate(pid any, mergeRequest int64, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) + ResetTimeEstimate(pid any, mergeRequest int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) + AddSpentTime(pid any, mergeRequest int64, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) + ResetSpentTime(pid any, mergeRequest int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) + GetTimeSpent(pid any, mergeRequest int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) + CreateMergeRequestDependency(pid any, mergeRequest int64, opts CreateMergeRequestDependencyOptions, options ...RequestOptionFunc) (*MergeRequestDependency, *Response, error) + DeleteMergeRequestDependency(pid any, mergeRequest int64, blockingMergeRequest int64, options ...RequestOptionFunc) (*Response, error) + GetMergeRequestDependencies(pid any, mergeRequest int64, options ...RequestOptionFunc) ([]MergeRequestDependency, *Response, error) } // MergeRequestsService handles communication with the merge requests related @@ -79,25 +78,25 @@ var _ MergeRequestsServiceInterface = (*MergeRequestsService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/ type BasicMergeRequest struct { - ID int `json:"id"` - IID int `json:"iid"` + ID int64 `json:"id"` + IID int64 `json:"iid"` TargetBranch string `json:"target_branch"` SourceBranch string `json:"source_branch"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` Title string `json:"title"` State string `json:"state"` Imported bool `json:"imported"` ImportedFrom string `json:"imported_from"` CreatedAt *time.Time `json:"created_at"` UpdatedAt *time.Time `json:"updated_at"` - Upvotes int `json:"upvotes"` - Downvotes int `json:"downvotes"` + Upvotes int64 `json:"upvotes"` + Downvotes int64 `json:"downvotes"` Author *BasicUser `json:"author"` Assignee *BasicUser `json:"assignee"` Assignees []*BasicUser `json:"assignees"` Reviewers []*BasicUser `json:"reviewers"` - SourceProjectID int `json:"source_project_id"` - TargetProjectID int `json:"target_project_id"` + SourceProjectID int64 `json:"source_project_id"` + TargetProjectID int64 `json:"target_project_id"` Labels Labels `json:"labels"` LabelDetails []*LabelDetails `json:"label_details"` Description string `json:"description"` @@ -114,7 +113,7 @@ type BasicMergeRequest struct { SHA string `json:"sha"` MergeCommitSHA string `json:"merge_commit_sha"` SquashCommitSHA string `json:"squash_commit_sha"` - UserNotesCount int `json:"user_notes_count"` + UserNotesCount int64 `json:"user_notes_count"` ShouldRemoveSourceBranch bool `json:"should_remove_source_branch"` ForceRemoveSourceBranch bool `json:"force_remove_source_branch"` AllowCollaboration bool `json:"allow_collaboration"` @@ -142,25 +141,19 @@ func (m BasicMergeRequest) String() string { // GitLab API docs: https://docs.gitlab.com/api/merge_requests/ type MergeRequest struct { BasicMergeRequest - MergeError string `json:"merge_error"` - Subscribed bool `json:"subscribed"` - ChangesCount string `json:"changes_count"` - User struct { - CanMerge bool `json:"can_merge"` - } `json:"user"` - LatestBuildStartedAt *time.Time `json:"latest_build_started_at"` - LatestBuildFinishedAt *time.Time `json:"latest_build_finished_at"` - FirstDeployedToProductionAt *time.Time `json:"first_deployed_to_production_at"` - Pipeline *PipelineInfo `json:"pipeline"` - HeadPipeline *Pipeline `json:"head_pipeline"` - DiffRefs struct { - BaseSha string `json:"base_sha"` - HeadSha string `json:"head_sha"` - StartSha string `json:"start_sha"` - } `json:"diff_refs"` - RebaseInProgress bool `json:"rebase_in_progress"` - DivergedCommitsCount int `json:"diverged_commits_count"` - FirstContribution bool `json:"first_contribution"` + MergeError string `json:"merge_error"` + Subscribed bool `json:"subscribed"` + ChangesCount string `json:"changes_count"` + User MergeRequestUser `json:"user"` + LatestBuildStartedAt *time.Time `json:"latest_build_started_at"` + LatestBuildFinishedAt *time.Time `json:"latest_build_finished_at"` + FirstDeployedToProductionAt *time.Time `json:"first_deployed_to_production_at"` + Pipeline *PipelineInfo `json:"pipeline"` + HeadPipeline *Pipeline `json:"head_pipeline"` + DiffRefs MergeRequestDiffRefs `json:"diff_refs"` + RebaseInProgress bool `json:"rebase_in_progress"` + DivergedCommitsCount int64 `json:"diverged_commits_count"` + FirstContribution bool `json:"first_contribution"` // Deprecated: use Draft instead WorkInProgress bool `json:"work_in_progress"` @@ -170,6 +163,30 @@ func (m MergeRequest) String() string { return Stringify(m) } +// MergeRequestUser represents a GitLab merge request user. +// +// GitLab API docs: https://docs.gitlab.com/api/merge_requests/ +type MergeRequestUser struct { + CanMerge bool `json:"can_merge"` +} + +func (u MergeRequestUser) String() string { + return Stringify(u) +} + +// MergeRequestDiffRefs represents a GitLab merge request diff refs. +// +// GitLab API docs: https://docs.gitlab.com/api/merge_requests/ +type MergeRequestDiffRefs struct { + BaseSha string `json:"base_sha"` + HeadSha string `json:"head_sha"` + StartSha string `json:"start_sha"` +} + +func (d MergeRequestDiffRefs) String() string { + return Stringify(d) +} + func (m *MergeRequest) UnmarshalJSON(data []byte) error { type alias MergeRequest @@ -204,9 +221,9 @@ func (m *MergeRequest) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, (*alias)(m)) } -// MergeRequestDiff represents Gitlab merge request diff. +// MergeRequestDiff represents GitLab merge request diff. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#list-merge-request-diffs type MergeRequestDiff struct { OldPath string `json:"old_path"` @@ -218,19 +235,21 @@ type MergeRequestDiff struct { RenamedFile bool `json:"renamed_file"` DeletedFile bool `json:"deleted_file"` GeneratedFile bool `json:"generated_file"` + Collapsed bool `json:"collapsed"` + TooLarge bool `json:"too_large"` } -// MergeRequestDiffVersion represents Gitlab merge request version. +// MergeRequestDiffVersion represents GitLab merge request version. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#get-merge-request-diff-versions type MergeRequestDiffVersion struct { - ID int `json:"id"` + ID int64 `json:"id"` HeadCommitSHA string `json:"head_commit_sha,omitempty"` BaseCommitSHA string `json:"base_commit_sha,omitempty"` StartCommitSHA string `json:"start_commit_sha,omitempty"` CreatedAt *time.Time `json:"created_at,omitempty"` - MergeRequestID int `json:"merge_request_id,omitempty"` + MergeRequestID int64 `json:"merge_request_id,omitempty"` State string `json:"state,omitempty"` RealSize string `json:"real_size,omitempty"` Commits []*Commit `json:"commits,omitempty"` @@ -241,6 +260,15 @@ func (m MergeRequestDiffVersion) String() string { return Stringify(m) } +// MergeRequestReviewer represents a reviewer entry returned by the reviewers API. +// Matches the JSON shape used in tests: {"user": {...}, "state": "...", "created_at": "..."} +// Placed here because it's used by MergeRequestsService.GetMergeRequestReviewers and tests/mock. +type MergeRequestReviewer struct { + User *BasicUser `json:"user"` + State string `json:"state"` + CreatedAt *time.Time `json:"created_at"` +} + // ListMergeRequestsOptions represents the available ListMergeRequests() // options. // @@ -263,7 +291,7 @@ type ListMergeRequestsOptions struct { UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorID *int64 `url:"author_id,omitempty" json:"author_id,omitempty"` AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` @@ -288,18 +316,17 @@ type ListMergeRequestsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#list-merge-requests func (s *MergeRequestsService) ListMergeRequests(opt *ListMergeRequestsOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "merge_requests", opt, options) - if err != nil { - return nil, nil, err - } - - var m []*BasicMergeRequest - resp, err := s.client.Do(req, &m) + mrs, resp, err := do[[]*MergeRequest](s.client, + withMethod(http.MethodGet), + withPath("merge_requests"), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - return m, resp, nil + return toBasic(mrs), resp, nil } // ListProjectMergeRequestsOptions represents the available ListMergeRequests() @@ -309,12 +336,13 @@ func (s *MergeRequestsService) ListMergeRequests(opt *ListMergeRequestsOptions, // https://docs.gitlab.com/api/merge_requests/#list-project-merge-requests type ListProjectMergeRequestsOptions struct { ListOptions - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + IIDs *[]int64 `url:"iids[],omitempty" json:"iids,omitempty"` State *string `url:"state,omitempty" json:"state,omitempty"` OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` Sort *string `url:"sort,omitempty" json:"sort,omitempty"` Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` View *string `url:"view,omitempty" json:"view,omitempty"` + Environment *string `url:"environment,omitempty" json:"environment,omitempty"` Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` WithLabelsDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` @@ -323,8 +351,10 @@ type ListProjectMergeRequestsOptions struct { CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + DeployedBefore *time.Time `url:"deployed_before,omitempty" json:"deployed_before,omitempty"` + DeployedAfter *time.Time `url:"deployed_after,omitempty" json:"deployed_after,omitempty"` Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorID *int64 `url:"author_id,omitempty" json:"author_id,omitempty"` AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` @@ -345,24 +375,17 @@ type ListProjectMergeRequestsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#list-project-merge-requests func (s *MergeRequestsService) ListProjectMergeRequests(pid any, opt *ListProjectMergeRequestsOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*BasicMergeRequest - resp, err := s.client.Do(req, &m) + mrs, resp, err := do[[]*MergeRequest](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_requests", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - return m, resp, nil + return toBasic(mrs), resp, nil } // ListGroupMergeRequestsOptions represents the available ListGroupMergeRequests() @@ -386,7 +409,7 @@ type ListGroupMergeRequestsOptions struct { UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorID *int64 `url:"author_id,omitempty" json:"author_id,omitempty"` AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` @@ -408,24 +431,17 @@ type ListGroupMergeRequestsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#list-group-merge-requests func (s *MergeRequestsService) ListGroupMergeRequests(gid any, opt *ListGroupMergeRequestsOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/merge_requests", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*BasicMergeRequest - resp, err := s.client.Do(req, &m) + mrs, resp, err := do[[]*MergeRequest](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/merge_requests", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - return m, resp, nil + return toBasic(mrs), resp, nil } // GetMergeRequestsOptions represents the available GetMergeRequests() @@ -443,50 +459,26 @@ type GetMergeRequestsOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#get-single-mr -func (s *MergeRequestsService) GetMergeRequest(pid any, mergeRequest int, opt *GetMergeRequestsOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *MergeRequestsService) GetMergeRequest(pid any, mergeRequest int64, opt *GetMergeRequestsOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + return do[*MergeRequest](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_requests/%d", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetMergeRequestApprovals gets information about a merge requests approvals // // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#single-merge-request-approval -func (s *MergeRequestsService) GetMergeRequestApprovals(pid any, mergeRequest int, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - a := new(MergeRequestApprovals) - resp, err := s.client.Do(req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil +func (s *MergeRequestsService) GetMergeRequestApprovals(pid any, mergeRequest int64, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) { + return do[*MergeRequestApprovals](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_requests/%d/approvals", ProjectID{pid}, mergeRequest), + withAPIOpts(nil), + withRequestOpts(options...), + ) } // GetMergeRequestCommitsOptions represents the available GetMergeRequestCommits() @@ -494,31 +486,21 @@ func (s *MergeRequestsService) GetMergeRequestApprovals(pid any, mergeRequest in // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#get-single-merge-request-commits -type GetMergeRequestCommitsOptions ListOptions +type GetMergeRequestCommitsOptions struct { + ListOptions +} // GetMergeRequestCommits gets a list of merge request commits. // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#get-single-merge-request-commits -func (s *MergeRequestsService) GetMergeRequestCommits(pid any, mergeRequest int, opt *GetMergeRequestCommitsOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/commits", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var c []*Commit - resp, err := s.client.Do(req, &c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil +func (s *MergeRequestsService) GetMergeRequestCommits(pid any, mergeRequest int64, opt *GetMergeRequestCommitsOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { + return do[[]*Commit](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_requests/%d/commits", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetMergeRequestChangesOptions represents the available GetMergeRequestChanges() @@ -541,25 +523,13 @@ type GetMergeRequestChangesOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#get-single-merge-request-changes -func (s *MergeRequestsService) GetMergeRequestChanges(pid any, mergeRequest int, opt *GetMergeRequestChangesOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/changes", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *MergeRequestsService) GetMergeRequestChanges(pid any, mergeRequest int64, opt *GetMergeRequestChangesOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + return do[*MergeRequest](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_requests/%d/changes", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListMergeRequestDiffsOptions represents the available ListMergeRequestDiffs() @@ -576,25 +546,13 @@ type ListMergeRequestDiffsOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#list-merge-request-diffs -func (s *MergeRequestsService) ListMergeRequestDiffs(pid any, mergeRequest int, opt *ListMergeRequestDiffsOptions, options ...RequestOptionFunc) ([]*MergeRequestDiff, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/diffs", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*MergeRequestDiff - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *MergeRequestsService) ListMergeRequestDiffs(pid any, mergeRequest int64, opt *ListMergeRequestDiffsOptions, options ...RequestOptionFunc) ([]*MergeRequestDiff, *Response, error) { + return do[[]*MergeRequestDiff](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_requests/%d/diffs", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ShowMergeRequestRawDiffsOptions represents the available ShowMergeRequestRawDiffs() @@ -608,139 +566,66 @@ type ShowMergeRequestRawDiffsOptions struct{} // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#show-merge-request-raw-diffs -func (s *MergeRequestsService) ShowMergeRequestRawDiffs(pid any, mergeRequest int, opt *ShowMergeRequestRawDiffsOptions, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return []byte{}, nil, err - } - u := fmt.Sprintf( - "projects/%s/merge_requests/%d/raw_diffs", - PathEscape(project), - mergeRequest, +func (s *MergeRequestsService) ShowMergeRequestRawDiffs(pid any, mergeRequest int64, opt *ShowMergeRequestRawDiffsOptions, options ...RequestOptionFunc) ([]byte, *Response, error) { + b, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/merge_requests/%d/raw_diffs", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), ) - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return []byte{}, nil, err - } - - var rd bytes.Buffer - resp, err := s.client.Do(req, &rd) - if err != nil { - return []byte{}, resp, err - } - - return rd.Bytes(), resp, nil + return b.Bytes(), resp, err } // GetMergeRequestParticipants gets a list of merge request participants. // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#get-single-merge-request-participants -func (s *MergeRequestsService) GetMergeRequestParticipants(pid any, mergeRequest int, options ...RequestOptionFunc) ([]*BasicUser, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/participants", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var bu []*BasicUser - resp, err := s.client.Do(req, &bu) - if err != nil { - return nil, resp, err - } - - return bu, resp, nil -} - -// MergeRequestReviewer represents a GitLab merge request reviewer. -// -// GitLab API docs: -// https://docs.gitlab.com/api/merge_requests/#get-single-merge-request-reviewers -type MergeRequestReviewer struct { - User *BasicUser `json:"user"` - State string `json:"state"` - CreatedAt *time.Time `json:"created_at"` +func (s *MergeRequestsService) GetMergeRequestParticipants(pid any, mergeRequest int64, options ...RequestOptionFunc) ([]*BasicUser, *Response, error) { + return do[[]*BasicUser](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_requests/%d/participants", ProjectID{pid}, mergeRequest), + withAPIOpts(nil), + withRequestOpts(options...), + ) } // GetMergeRequestReviewers gets a list of merge request reviewers. // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#get-single-merge-request-reviewers -func (s *MergeRequestsService) GetMergeRequestReviewers(pid any, mergeRequest int, options ...RequestOptionFunc) ([]*MergeRequestReviewer, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/reviewers", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var mrr []*MergeRequestReviewer - resp, err := s.client.Do(req, &mrr) - if err != nil { - return nil, resp, err - } - - return mrr, resp, nil +func (s *MergeRequestsService) GetMergeRequestReviewers(pid any, mergeRequest int64, options ...RequestOptionFunc) ([]*MergeRequestReviewer, *Response, error) { + return do[[]*MergeRequestReviewer](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_requests/%d/reviewers", ProjectID{pid}, mergeRequest), + withAPIOpts(nil), + withRequestOpts(options...), + ) } // ListMergeRequestPipelines gets all pipelines for the provided merge request. // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#list-merge-request-pipelines -func (s *MergeRequestsService) ListMergeRequestPipelines(pid any, mergeRequest int, options ...RequestOptionFunc) ([]*PipelineInfo, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/pipelines", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var p []*PipelineInfo - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil +func (s *MergeRequestsService) ListMergeRequestPipelines(pid any, mergeRequest int64, options ...RequestOptionFunc) ([]*PipelineInfo, *Response, error) { + return do[[]*PipelineInfo](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_requests/%d/pipelines", ProjectID{pid}, mergeRequest), + withAPIOpts(nil), + withRequestOpts(options...), + ) } // CreateMergeRequestPipeline creates a new pipeline for a merge request. // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#create-merge-request-pipeline -func (s *MergeRequestsService) CreateMergeRequestPipeline(pid any, mergeRequest int, options ...RequestOptionFunc) (*PipelineInfo, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/pipelines", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineInfo) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil +func (s *MergeRequestsService) CreateMergeRequestPipeline(pid any, mergeRequest int64, options ...RequestOptionFunc) (*PipelineInfo, *Response, error) { + return do[*PipelineInfo](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/pipelines", ProjectID{pid}, mergeRequest), + withAPIOpts(nil), + withRequestOpts(options...), + ) } // GetIssuesClosedOnMergeOptions represents the available GetIssuesClosedOnMerge() @@ -748,64 +633,43 @@ func (s *MergeRequestsService) CreateMergeRequestPipeline(pid any, mergeRequest // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#list-issues-that-close-on-merge -type GetIssuesClosedOnMergeOptions ListOptions +type GetIssuesClosedOnMergeOptions struct { + ListOptions +} // GetIssuesClosedOnMerge gets all the issues that would be closed by merging the // provided merge request. // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#list-issues-that-close-on-merge -func (s *MergeRequestsService) GetIssuesClosedOnMerge(pid any, mergeRequest int, opt *GetIssuesClosedOnMergeOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/closes_issues", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var i []*Issue - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil +func (s *MergeRequestsService) GetIssuesClosedOnMerge(pid any, mergeRequest int64, opt *GetIssuesClosedOnMergeOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + return do[[]*Issue](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_requests/%d/closes_issues", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListRelatedIssuesOptions represents the available ListRelatedIssues() options. // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#list-issues-related-to-the-merge-request -type ListRelatedIssuesOptions ListOptions +type ListRelatedIssuesOptions struct { + ListOptions +} // ListRelatedIssues gets all the issues related to provided merge request. // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#list-issues-related-to-the-merge-request -func (s *MergeRequestsService) ListRelatedIssues(pid any, mergeRequest int, opt *ListRelatedIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - - u := fmt.Sprintf("projects/%s/merge_requests/%d/related_issues", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var i []*Issue - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil +func (s *MergeRequestsService) ListRelatedIssues(pid any, mergeRequest int64, opt *ListRelatedIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + return do[[]*Issue](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_requests/%d/related_issues", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateMergeRequestOptions represents the available CreateMergeRequest() @@ -819,17 +683,17 @@ type CreateMergeRequestOptions struct { SourceBranch *string `url:"source_branch,omitempty" json:"source_branch,omitempty"` TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"` Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - AssigneeIDs *[]int `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` - ReviewerIDs *[]int `url:"reviewer_ids,omitempty" json:"reviewer_ids,omitempty"` - TargetProjectID *int `url:"target_project_id,omitempty" json:"target_project_id,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + AssigneeID *int64 `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + AssigneeIDs *[]int64 `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` + ReviewerIDs *[]int64 `url:"reviewer_ids,omitempty" json:"reviewer_ids,omitempty"` + TargetProjectID *int64 `url:"target_project_id,omitempty" json:"target_project_id,omitempty"` + MilestoneID *int64 `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` RemoveSourceBranch *bool `url:"remove_source_branch,omitempty" json:"remove_source_branch,omitempty"` Squash *bool `url:"squash,omitempty" json:"squash,omitempty"` AllowCollaboration *bool `url:"allow_collaboration,omitempty" json:"allow_collaboration,omitempty"` // Deprecated: will be removed in v5 of the API, use the Merge Request Approvals API instead - ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` + ApprovalsBeforeMerge *int64 `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` } // CreateMergeRequest creates a new merge request. @@ -837,24 +701,12 @@ type CreateMergeRequestOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#create-mr func (s *MergeRequestsService) CreateMergeRequest(pid any, opt *CreateMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil + return do[*MergeRequest](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateMergeRequestOptions represents the available UpdateMergeRequest() @@ -866,13 +718,13 @@ type UpdateMergeRequestOptions struct { Title *string `url:"title,omitempty" json:"title,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - AssigneeIDs *[]int `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` - ReviewerIDs *[]int `url:"reviewer_ids,omitempty" json:"reviewer_ids,omitempty"` + AssigneeID *int64 `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + AssigneeIDs *[]int64 `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` + ReviewerIDs *[]int64 `url:"reviewer_ids,omitempty" json:"reviewer_ids,omitempty"` Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` AddLabels *LabelOptions `url:"add_labels,comma,omitempty" json:"add_labels,omitempty"` RemoveLabels *LabelOptions `url:"remove_labels,comma,omitempty" json:"remove_labels,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + MilestoneID *int64 `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` RemoveSourceBranch *bool `url:"remove_source_branch,omitempty" json:"remove_source_branch,omitempty"` Squash *bool `url:"squash,omitempty" json:"squash,omitempty"` @@ -884,44 +736,27 @@ type UpdateMergeRequestOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#update-mr -func (s *MergeRequestsService) UpdateMergeRequest(pid any, mergeRequest int, opt *UpdateMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *MergeRequestsService) UpdateMergeRequest(pid any, mergeRequest int64, opt *UpdateMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + return do[*MergeRequest](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/merge_requests/%d", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteMergeRequest deletes a merge request. // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#delete-a-merge-request -func (s *MergeRequestsService) DeleteMergeRequest(pid any, mergeRequest int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *MergeRequestsService) DeleteMergeRequest(pid any, mergeRequest int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/merge_requests/%d", ProjectID{pid}, mergeRequest), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } // AcceptMergeRequestOptions represents the available AcceptMergeRequest() @@ -948,25 +783,13 @@ type AcceptMergeRequestOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#merge-a-merge-request -func (s *MergeRequestsService) AcceptMergeRequest(pid any, mergeRequest int, opt *AcceptMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/merge", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *MergeRequestsService) AcceptMergeRequest(pid any, mergeRequest int64, opt *AcceptMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + return do[*MergeRequest](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/merge_requests/%d/merge", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CancelMergeWhenPipelineSucceeds cancels a merge when pipeline succeeds. If @@ -977,25 +800,13 @@ func (s *MergeRequestsService) AcceptMergeRequest(pid any, mergeRequest int, opt // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#cancel-merge-when-pipeline-succeeds -func (s *MergeRequestsService) CancelMergeWhenPipelineSucceeds(pid any, mergeRequest int, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/cancel_merge_when_pipeline_succeeds", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *MergeRequestsService) CancelMergeWhenPipelineSucceeds(pid any, mergeRequest int64, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + return do[*MergeRequest](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/cancel_merge_when_pipeline_succeeds", ProjectID{pid}, mergeRequest), + withAPIOpts(nil), + withRequestOpts(options...), + ) } // RebaseMergeRequestOptions represents the available RebaseMergeRequest() @@ -1013,19 +824,14 @@ type RebaseMergeRequestOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#rebase-a-merge-request -func (s *MergeRequestsService) RebaseMergeRequest(pid any, mergeRequest int, opt *RebaseMergeRequestOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/rebase", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *MergeRequestsService) RebaseMergeRequest(pid any, mergeRequest int64, opt *RebaseMergeRequestOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/merge_requests/%d/rebase", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // GetMergeRequestDiffVersionsOptions represents the available @@ -1033,31 +839,21 @@ func (s *MergeRequestsService) RebaseMergeRequest(pid any, mergeRequest int, opt // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#get-merge-request-diff-versions -type GetMergeRequestDiffVersionsOptions ListOptions +type GetMergeRequestDiffVersionsOptions struct { + ListOptions +} // GetMergeRequestDiffVersions get a list of merge request diff versions. // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#get-merge-request-diff-versions -func (s *MergeRequestsService) GetMergeRequestDiffVersions(pid any, mergeRequest int, opt *GetMergeRequestDiffVersionsOptions, options ...RequestOptionFunc) ([]*MergeRequestDiffVersion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/versions", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var v []*MergeRequestDiffVersion - resp, err := s.client.Do(req, &v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil +func (s *MergeRequestsService) GetMergeRequestDiffVersions(pid any, mergeRequest int64, opt *GetMergeRequestDiffVersionsOptions, options ...RequestOptionFunc) ([]*MergeRequestDiffVersion, *Response, error) { + return do[[]*MergeRequestDiffVersion](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_requests/%d/versions", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetSingleMergeRequestDiffVersionOptions represents the available @@ -1073,25 +869,13 @@ type GetSingleMergeRequestDiffVersionOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#get-a-single-merge-request-diff-version -func (s *MergeRequestsService) GetSingleMergeRequestDiffVersion(pid any, mergeRequest, version int, opt *GetSingleMergeRequestDiffVersionOptions, options ...RequestOptionFunc) (*MergeRequestDiffVersion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/versions/%d", PathEscape(project), mergeRequest, version) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(MergeRequestDiffVersion) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil +func (s *MergeRequestsService) GetSingleMergeRequestDiffVersion(pid any, mergeRequest, version int64, opt *GetSingleMergeRequestDiffVersionOptions, options ...RequestOptionFunc) (*MergeRequestDiffVersion, *Response, error) { + return do[*MergeRequestDiffVersion](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_requests/%d/versions/%d", ProjectID{pid}, mergeRequest, version), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // SubscribeToMergeRequest subscribes the authenticated user to the given merge @@ -1100,25 +884,13 @@ func (s *MergeRequestsService) GetSingleMergeRequestDiffVersion(pid any, mergeRe // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#subscribe-to-a-merge-request -func (s *MergeRequestsService) SubscribeToMergeRequest(pid any, mergeRequest int, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/subscribe", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *MergeRequestsService) SubscribeToMergeRequest(pid any, mergeRequest int64, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + return do[*MergeRequest](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/subscribe", ProjectID{pid}, mergeRequest), + withAPIOpts(nil), + withRequestOpts(options...), + ) } // UnsubscribeFromMergeRequest unsubscribes the authenticated user from the @@ -1128,25 +900,13 @@ func (s *MergeRequestsService) SubscribeToMergeRequest(pid any, mergeRequest int // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#unsubscribe-from-a-merge-request -func (s *MergeRequestsService) UnsubscribeFromMergeRequest(pid any, mergeRequest int, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/unsubscribe", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *MergeRequestsService) UnsubscribeFromMergeRequest(pid any, mergeRequest int64, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + return do[*MergeRequest](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/unsubscribe", ProjectID{pid}, mergeRequest), + withAPIOpts(nil), + withRequestOpts(options...), + ) } // CreateTodo manually creates a todo for the current user on a merge request. @@ -1155,32 +915,20 @@ func (s *MergeRequestsService) UnsubscribeFromMergeRequest(pid any, mergeRequest // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#create-a-to-do-item -func (s *MergeRequestsService) CreateTodo(pid any, mergeRequest int, options ...RequestOptionFunc) (*Todo, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/todo", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(Todo) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil +func (s *MergeRequestsService) CreateTodo(pid any, mergeRequest int64, options ...RequestOptionFunc) (*Todo, *Response, error) { + return do[*Todo](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/todo", ProjectID{pid}, mergeRequest), + withAPIOpts(nil), + withRequestOpts(options...), + ) } // SetTimeEstimate sets the time estimate for a single project merge request. // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#set-a-time-estimate-for-a-merge-request -func (s *MergeRequestsService) SetTimeEstimate(pid any, mergeRequest int, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { +func (s *MergeRequestsService) SetTimeEstimate(pid any, mergeRequest int64, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { return s.timeStats.setTimeEstimate(pid, "merge_requests", mergeRequest, opt, options...) } @@ -1188,7 +936,7 @@ func (s *MergeRequestsService) SetTimeEstimate(pid any, mergeRequest int, opt *S // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#reset-the-time-estimate-for-a-merge-request -func (s *MergeRequestsService) ResetTimeEstimate(pid any, mergeRequest int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { +func (s *MergeRequestsService) ResetTimeEstimate(pid any, mergeRequest int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) { return s.timeStats.resetTimeEstimate(pid, "merge_requests", mergeRequest, options...) } @@ -1196,7 +944,7 @@ func (s *MergeRequestsService) ResetTimeEstimate(pid any, mergeRequest int, opti // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#add-spent-time-for-a-merge-request -func (s *MergeRequestsService) AddSpentTime(pid any, mergeRequest int, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { +func (s *MergeRequestsService) AddSpentTime(pid any, mergeRequest int64, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { return s.timeStats.addSpentTime(pid, "merge_requests", mergeRequest, opt, options...) } @@ -1204,7 +952,7 @@ func (s *MergeRequestsService) AddSpentTime(pid any, mergeRequest int, opt *AddS // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#reset-spent-time-for-a-merge-request -func (s *MergeRequestsService) ResetSpentTime(pid any, mergeRequest int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { +func (s *MergeRequestsService) ResetSpentTime(pid any, mergeRequest int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) { return s.timeStats.resetSpentTime(pid, "merge_requests", mergeRequest, options...) } @@ -1212,7 +960,7 @@ func (s *MergeRequestsService) ResetSpentTime(pid any, mergeRequest int, options // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#get-time-tracking-stats -func (s *MergeRequestsService) GetTimeSpent(pid any, mergeRequest int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { +func (s *MergeRequestsService) GetTimeSpent(pid any, mergeRequest int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) { return s.timeStats.getTimeSpent(pid, "merge_requests", mergeRequest, options...) } @@ -1221,9 +969,9 @@ func (s *MergeRequestsService) GetTimeSpent(pid any, mergeRequest int, options . // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#create-a-merge-request-dependency type MergeRequestDependency struct { - ID int `json:"id"` + ID int64 `json:"id"` BlockingMergeRequest BlockingMergeRequest `json:"blocking_merge_request"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` } // BlockingMergeRequest represents a GitLab merge request dependency. @@ -1231,23 +979,23 @@ type MergeRequestDependency struct { // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#create-a-merge-request-dependency type BlockingMergeRequest struct { - ID int `json:"id"` - Iid int `json:"iid"` + ID int64 `json:"id"` + Iid int64 `json:"iid"` TargetBranch string `json:"target_branch"` SourceBranch string `json:"source_branch"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` Title string `json:"title"` State string `json:"state"` CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` - Upvotes int `json:"upvotes"` - Downvotes int `json:"downvotes"` + Upvotes int64 `json:"upvotes"` + Downvotes int64 `json:"downvotes"` Author *BasicUser `json:"author"` Assignee *BasicUser `json:"assignee"` Assignees []*BasicUser `json:"assignees"` Reviewers []*BasicUser `json:"reviewers"` - SourceProjectID int `json:"source_project_id"` - TargetProjectID int `json:"target_project_id"` + SourceProjectID int64 `json:"source_project_id"` + TargetProjectID int64 `json:"target_project_id"` Labels *LabelOptions `json:"labels"` Description string `json:"description"` Draft bool `json:"draft"` @@ -1260,7 +1008,7 @@ type BlockingMergeRequest struct { Sha string `json:"sha"` MergeCommitSha string `json:"merge_commit_sha"` SquashCommitSha string `json:"squash_commit_sha"` - UserNotesCount int `json:"user_notes_count"` + UserNotesCount int64 `json:"user_notes_count"` ShouldRemoveSourceBranch *bool `json:"should_remove_source_branch"` ForceRemoveSourceBranch bool `json:"force_remove_source_branch"` WebURL string `json:"web_url"` @@ -1285,7 +1033,7 @@ type BlockingMergeRequest struct { // Deprecated: will be removed in v5 of the API, use MergeUser instead MergedBy *BasicUser `json:"merged_by"` // Deprecated: will be removed in v5 of the API, use the Merge Request Approvals API instead - ApprovalsBeforeMerge *int `json:"approvals_before_merge"` + ApprovalsBeforeMerge *int64 `json:"approvals_before_merge"` // Deprecated: will be removed in v5 of the API, use References instead Reference string `json:"reference"` // Deprecated: in 15.6, use DetailedMergeStatus instead @@ -1302,7 +1050,7 @@ func (m MergeRequestDependency) String() string { // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#create-a-merge-request-dependency type CreateMergeRequestDependencyOptions struct { - BlockingMergeRequestID *int `url:"blocking_merge_request_id,omitempty" json:"blocking_merge_request_id,omitempty"` + BlockingMergeRequestID *int64 `url:"blocking_merge_request_id,omitempty" json:"blocking_merge_request_id,omitempty"` } // CreateMergeRequestDependency creates a new merge request dependency for a given @@ -1310,25 +1058,13 @@ type CreateMergeRequestDependencyOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#create-a-merge-request-dependency -func (s *MergeRequestsService) CreateMergeRequestDependency(pid any, mergeRequest int, opts CreateMergeRequestDependencyOptions, options ...RequestOptionFunc) (*MergeRequestDependency, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/blocks", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, opts, options) - if err != nil { - return nil, nil, err - } - - var mrd MergeRequestDependency - resp, err := s.client.Do(req, &mrd) - if err != nil { - return nil, resp, err - } - - return &mrd, resp, err +func (s *MergeRequestsService) CreateMergeRequestDependency(pid any, mergeRequest int64, opts CreateMergeRequestDependencyOptions, options ...RequestOptionFunc) (*MergeRequestDependency, *Response, error) { + return do[*MergeRequestDependency](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/blocks", ProjectID{pid}, mergeRequest), + withAPIOpts(opts), + withRequestOpts(options...), + ) } // DeleteMergeRequestDependency deletes a merge request dependency for a given @@ -1336,42 +1072,35 @@ func (s *MergeRequestsService) CreateMergeRequestDependency(pid any, mergeReques // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#delete-a-merge-request-dependency -func (s *MergeRequestsService) DeleteMergeRequestDependency(pid any, mergeRequest int, blockingMergeRequest int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/blocks/%d", PathEscape(project), mergeRequest, blockingMergeRequest) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *MergeRequestsService) DeleteMergeRequestDependency(pid any, mergeRequest int64, blockingMergeRequest int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/merge_requests/%d/blocks/%d", ProjectID{pid}, mergeRequest, blockingMergeRequest), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } // GetMergeRequestDependencies gets a list of merge request dependencies. // // GitLab API docs: // https://docs.gitlab.com/api/merge_requests/#get-merge-request-dependencies -func (s *MergeRequestsService) GetMergeRequestDependencies(pid any, mergeRequest int, options ...RequestOptionFunc) ([]MergeRequestDependency, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/blocks", PathEscape(project), mergeRequest) +func (s *MergeRequestsService) GetMergeRequestDependencies(pid any, mergeRequest int64, options ...RequestOptionFunc) ([]MergeRequestDependency, *Response, error) { + return do[[]MergeRequestDependency](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_requests/%d/blocks", ProjectID{pid}, mergeRequest), + withAPIOpts(nil), + withRequestOpts(options...), + ) +} - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } +func toBasic(mrs []*MergeRequest) []*BasicMergeRequest { + ret := make([]*BasicMergeRequest, len(mrs)) - var mrd []MergeRequestDependency - resp, err := s.client.Do(req, &mrd) - if err != nil { - return nil, resp, err + for i, mr := range mrs { + ret[i] = &mr.BasicMergeRequest } - return mrd, resp, err + return ret } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/merge_trains.go b/vendor/gitlab.com/gitlab-org/api/client-go/merge_trains.go index 059612e1b3..9d807fc3bf 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/merge_trains.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/merge_trains.go @@ -1,7 +1,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -10,8 +9,8 @@ type ( MergeTrainsServiceInterface interface { ListProjectMergeTrains(pid any, opt *ListMergeTrainsOptions, options ...RequestOptionFunc) ([]*MergeTrain, *Response, error) ListMergeRequestInMergeTrain(pid any, targetBranch string, opts *ListMergeTrainsOptions, options ...RequestOptionFunc) ([]*MergeTrain, *Response, error) - GetMergeRequestOnAMergeTrain(pid any, mergeRequest int, options ...RequestOptionFunc) (*MergeTrain, *Response, error) - AddMergeRequestToMergeTrain(pid any, mergeRequest int, opts *AddMergeRequestToMergeTrainOptions, options ...RequestOptionFunc) ([]*MergeTrain, *Response, error) + GetMergeRequestOnAMergeTrain(pid any, mergeRequest int64, options ...RequestOptionFunc) (*MergeTrain, *Response, error) + AddMergeRequestToMergeTrain(pid any, mergeRequest int64, opts *AddMergeRequestToMergeTrainOptions, options ...RequestOptionFunc) ([]*MergeTrain, *Response, error) } // MergeTrainsService handles communication with the merge trains related @@ -25,11 +24,11 @@ type ( var _ MergeTrainsServiceInterface = (*MergeTrainsService)(nil) -// MergeTrain represents a Gitlab merge train. +// MergeTrain represents a GitLab merge train. // // GitLab API docs: https://docs.gitlab.com/api/merge_trains/ type MergeTrain struct { - ID int `json:"id"` + ID int64 `json:"id"` MergeRequest *MergeTrainMergeRequest `json:"merge_request"` User *BasicUser `json:"user"` Pipeline *Pipeline `json:"pipeline"` @@ -38,16 +37,16 @@ type MergeTrain struct { TargetBranch string `json:"target_branch"` Status string `json:"status"` MergedAt *time.Time `json:"merged_at"` - Duration int `json:"duration"` + Duration int64 `json:"duration"` } -// MergeTrainMergeRequest represents a Gitlab merge request inside merge train. +// MergeTrainMergeRequest represents a GitLab merge request inside merge train. // // GitLab API docs: https://docs.gitlab.com/api/merge_trains/ type MergeTrainMergeRequest struct { - ID int `json:"id"` - IID int `json:"iid"` - ProjectID int `json:"project_id"` + ID int64 `json:"id"` + IID int64 `json:"iid"` + ProjectID int64 `json:"project_id"` Title string `json:"title"` Description string `json:"description"` State string `json:"state"` @@ -71,24 +70,12 @@ type ListMergeTrainsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/merge_trains/#list-merge-trains-for-a-project func (s *MergeTrainsService) ListProjectMergeTrains(pid any, opt *ListMergeTrainsOptions, options ...RequestOptionFunc) ([]*MergeTrain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_trains", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var mts []*MergeTrain - resp, err := s.client.Do(req, &mts) - if err != nil { - return nil, resp, err - } - - return mts, resp, nil + return do[[]*MergeTrain](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_trains", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListMergeRequestInMergeTrain gets a list of merge requests added to a merge @@ -97,24 +84,12 @@ func (s *MergeTrainsService) ListProjectMergeTrains(pid any, opt *ListMergeTrain // GitLab API docs: // https://docs.gitlab.com/api/merge_trains/#list-merge-requests-in-a-merge-train func (s *MergeTrainsService) ListMergeRequestInMergeTrain(pid any, targetBranch string, opts *ListMergeTrainsOptions, options ...RequestOptionFunc) ([]*MergeTrain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_trains/%s", PathEscape(project), targetBranch) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var mts []*MergeTrain - resp, err := s.client.Do(req, &mts) - if err != nil { - return nil, resp, err - } - - return mts, resp, nil + return do[[]*MergeTrain](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/merge_trains/%s", ProjectID{pid}, targetBranch), + withAPIOpts(opts), + withRequestOpts(options...), + ) } // GetMergeRequestOnAMergeTrain Get merge train information for the requested @@ -122,25 +97,11 @@ func (s *MergeTrainsService) ListMergeRequestInMergeTrain(pid any, targetBranch // // GitLab API docs: // https://docs.gitlab.com/api/merge_trains/#get-the-status-of-a-merge-request-on-a-merge-train -func (s *MergeTrainsService) GetMergeRequestOnAMergeTrain(pid any, mergeRequest int, options ...RequestOptionFunc) (*MergeTrain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_trains/merge_requests/%d", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - mt := new(MergeTrain) - resp, err := s.client.Do(req, mt) - if err != nil { - return nil, resp, err - } - - return mt, resp, nil +func (s *MergeTrainsService) GetMergeRequestOnAMergeTrain(pid any, mergeRequest int64, options ...RequestOptionFunc) (*MergeTrain, *Response, error) { + return do[*MergeTrain](s.client, + withPath("projects/%s/merge_trains/merge_requests/%d", ProjectID{pid}, mergeRequest), + withRequestOpts(options...), + ) } // AddMergeRequestToMergeTrainOptions represents the available @@ -162,23 +123,11 @@ type AddMergeRequestToMergeTrainOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/merge_trains/#add-a-merge-request-to-a-merge-train -func (s *MergeTrainsService) AddMergeRequestToMergeTrain(pid any, mergeRequest int, opts *AddMergeRequestToMergeTrainOptions, options ...RequestOptionFunc) ([]*MergeTrain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_trains/merge_requests/%d", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, opts, options) - if err != nil { - return nil, nil, err - } - - var mts []*MergeTrain - resp, err := s.client.Do(req, &mts) - if err != nil { - return nil, resp, err - } - - return mts, resp, nil +func (s *MergeTrainsService) AddMergeRequestToMergeTrain(pid any, mergeRequest int64, opts *AddMergeRequestToMergeTrainOptions, options ...RequestOptionFunc) ([]*MergeTrain, *Response, error) { + return do[[]*MergeTrain](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_trains/merge_requests/%d", ProjectID{pid}, mergeRequest), + withAPIOpts(opts), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/metadata.go b/vendor/gitlab.com/gitlab-org/api/client-go/metadata.go index 0a702f8e33..8a79cd3928 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/metadata.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/metadata.go @@ -16,8 +16,6 @@ package gitlab -import "net/http" - type ( MetadataServiceInterface interface { GetMetadata(options ...RequestOptionFunc) (*Metadata, *Response, error) @@ -38,35 +36,36 @@ var _ MetadataServiceInterface = (*MetadataService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/metadata/ type Metadata struct { - Version string `json:"version"` - Revision string `json:"revision"` - KAS struct { - Enabled bool `json:"enabled"` - ExternalURL string `json:"externalUrl"` - ExternalK8SProxyURL string `json:"externalK8sProxyUrl"` - Version string `json:"version"` - } `json:"kas"` - Enterprise bool `json:"enterprise"` + Version string `json:"version"` + Revision string `json:"revision"` + KAS MetadataKAS `json:"kas"` + Enterprise bool `json:"enterprise"` } func (s Metadata) String() string { return Stringify(s) } -// GetMetadata gets a GitLab server instance meteadata. +// MetadataKAS represents a GitLab instance version metadata KAS. // // GitLab API docs: https://docs.gitlab.com/api/metadata/ -func (s *MetadataService) GetMetadata(options ...RequestOptionFunc) (*Metadata, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "metadata", nil, options) - if err != nil { - return nil, nil, err - } +type MetadataKAS struct { + Enabled bool `json:"enabled"` + ExternalURL string `json:"externalUrl"` + ExternalK8SProxyURL string `json:"externalK8sProxyUrl"` + Version string `json:"version"` +} - v := new(Metadata) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } +func (k MetadataKAS) String() string { + return Stringify(k) +} - return v, resp, nil +// GetMetadata gets a GitLab server instance meteadata. +// +// GitLab API docs: https://docs.gitlab.com/api/metadata/ +func (s *MetadataService) GetMetadata(options ...RequestOptionFunc) (*Metadata, *Response, error) { + return do[*Metadata](s.client, + withPath("metadata"), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/milestones.go b/vendor/gitlab.com/gitlab-org/api/client-go/milestones.go index 3bcf0657bb..f416c67762 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/milestones.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/milestones.go @@ -17,20 +17,48 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( MilestonesServiceInterface interface { + // ListMilestones returns a list of project milestones. + // + // GitLab API docs: + // https://docs.gitlab.com/api/milestones/#list-project-milestones ListMilestones(pid any, opt *ListMilestonesOptions, options ...RequestOptionFunc) ([]*Milestone, *Response, error) - GetMilestone(pid any, milestone int, options ...RequestOptionFunc) (*Milestone, *Response, error) + // GetMilestone gets a single project milestone. + // + // GitLab API docs: + // https://docs.gitlab.com/api/milestones/#get-single-milestone + GetMilestone(pid any, milestone int64, options ...RequestOptionFunc) (*Milestone, *Response, error) + // CreateMilestone creates a new project milestone. + // + // GitLab API docs: + // https://docs.gitlab.com/api/milestones/#create-new-milestone CreateMilestone(pid any, opt *CreateMilestoneOptions, options ...RequestOptionFunc) (*Milestone, *Response, error) - UpdateMilestone(pid any, milestone int, opt *UpdateMilestoneOptions, options ...RequestOptionFunc) (*Milestone, *Response, error) - DeleteMilestone(pid any, milestone int, options ...RequestOptionFunc) (*Response, error) - GetMilestoneIssues(pid any, milestone int, opt *GetMilestoneIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) - GetMilestoneMergeRequests(pid any, milestone int, opt *GetMilestoneMergeRequestsOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) + // UpdateMilestone updates an existing project milestone. + // + // GitLab API docs: + // https://docs.gitlab.com/api/milestones/#edit-milestone + UpdateMilestone(pid any, milestone int64, opt *UpdateMilestoneOptions, options ...RequestOptionFunc) (*Milestone, *Response, error) + // DeleteMilestone deletes a specified project milestone. + // + // GitLab API docs: + // https://docs.gitlab.com/api/milestones/#delete-project-milestone + DeleteMilestone(pid any, milestone int64, options ...RequestOptionFunc) (*Response, error) + // GetMilestoneIssues gets all issues assigned to a single project milestone. + // + // GitLab API docs: + // https://docs.gitlab.com/api/milestones/#get-all-issues-assigned-to-a-single-milestone + GetMilestoneIssues(pid any, milestone int64, opt *GetMilestoneIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) + // GetMilestoneMergeRequests gets all merge requests assigned to a single + // project milestone. + // + // GitLab API docs: + // https://docs.gitlab.com/api/milestones/#get-all-merge-requests-assigned-to-a-single-milestone + GetMilestoneMergeRequests(pid any, milestone int64, opt *GetMilestoneMergeRequestsOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) } // MilestonesService handles communication with the milestone related methods @@ -48,10 +76,10 @@ var _ MilestonesServiceInterface = (*MilestonesService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/milestones/ type Milestone struct { - ID int `json:"id"` - IID int `json:"iid"` - GroupID int `json:"group_id"` - ProjectID int `json:"project_id"` + ID int64 `json:"id"` + IID int64 `json:"iid"` + GroupID int64 `json:"group_id"` + ProjectID int64 `json:"project_id"` Title string `json:"title"` Description string `json:"description"` StartDate *ISOTime `json:"start_date"` @@ -73,64 +101,29 @@ func (m Milestone) String() string { // https://docs.gitlab.com/api/milestones/#list-project-milestones type ListMilestonesOptions struct { ListOptions - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - State *string `url:"state,omitempty" json:"state,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - IncludeAncestors *bool `url:"include_ancestors,omitempty" json:"include_ancestors,omitempty"` + IIDs *[]int64 `url:"iids[],omitempty" json:"iids,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + IncludeAncestors *bool `url:"include_ancestors,omitempty" json:"include_ancestors,omitempty"` // Deprecated: in GitLab 16,7, use IncludeAncestors instead IncludeParentMilestones *bool `url:"include_parent_milestones,omitempty" json:"include_parent_milestones,omitempty"` } -// ListMilestones returns a list of project milestones. -// -// GitLab API docs: -// https://docs.gitlab.com/api/milestones/#list-project-milestones func (s *MilestonesService) ListMilestones(pid any, opt *ListMilestonesOptions, options ...RequestOptionFunc) ([]*Milestone, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/milestones", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*Milestone - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil + return do[[]*Milestone](s.client, + withPath("projects/%s/milestones", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetMilestone gets a single project milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/api/milestones/#get-single-milestone -func (s *MilestonesService) GetMilestone(pid any, milestone int, options ...RequestOptionFunc) (*Milestone, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/milestones/%d", PathEscape(project), milestone) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - m := new(Milestone) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *MilestonesService) GetMilestone(pid any, milestone int64, options ...RequestOptionFunc) (*Milestone, *Response, error) { + return do[*Milestone](s.client, + withPath("projects/%s/milestones/%d", ProjectID{pid}, milestone), + withRequestOpts(options...), + ) } // CreateMilestoneOptions represents the available CreateMilestone() options. @@ -144,29 +137,13 @@ type CreateMilestoneOptions struct { DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` } -// CreateMilestone creates a new project milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/api/milestones/#create-new-milestone func (s *MilestonesService) CreateMilestone(pid any, opt *CreateMilestoneOptions, options ...RequestOptionFunc) (*Milestone, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/milestones", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(Milestone) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil + return do[*Milestone](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/milestones", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateMilestoneOptions represents the available UpdateMilestone() options. @@ -181,78 +158,38 @@ type UpdateMilestoneOptions struct { StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` } -// UpdateMilestone updates an existing project milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/api/milestones/#edit-milestone -func (s *MilestonesService) UpdateMilestone(pid any, milestone int, opt *UpdateMilestoneOptions, options ...RequestOptionFunc) (*Milestone, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/milestones/%d", PathEscape(project), milestone) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(Milestone) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *MilestonesService) UpdateMilestone(pid any, milestone int64, opt *UpdateMilestoneOptions, options ...RequestOptionFunc) (*Milestone, *Response, error) { + return do[*Milestone](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/milestones/%d", ProjectID{pid}, milestone), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteMilestone deletes a specified project milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/api/milestones/#delete-project-milestone -func (s *MilestonesService) DeleteMilestone(pid any, milestone int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/milestones/%d", PathEscape(project), milestone) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) +func (s *MilestonesService) DeleteMilestone(pid any, milestone int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/milestones/%d", ProjectID{pid}, milestone), + withRequestOpts(options...), + ) + return resp, err } // GetMilestoneIssuesOptions represents the available GetMilestoneIssues() options. // // GitLab API docs: // https://docs.gitlab.com/api/milestones/#get-all-issues-assigned-to-a-single-milestone -type GetMilestoneIssuesOptions ListOptions - -// GetMilestoneIssues gets all issues assigned to a single project milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/api/milestones/#get-all-issues-assigned-to-a-single-milestone -func (s *MilestonesService) GetMilestoneIssues(pid any, milestone int, opt *GetMilestoneIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/milestones/%d/issues", PathEscape(project), milestone) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var i []*Issue - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } +type GetMilestoneIssuesOptions struct { + ListOptions +} - return i, resp, nil +func (s *MilestonesService) GetMilestoneIssues(pid any, milestone int64, opt *GetMilestoneIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + return do[[]*Issue](s.client, + withPath("projects/%s/milestones/%d/issues", ProjectID{pid}, milestone), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetMilestoneMergeRequestsOptions represents the available @@ -260,30 +197,14 @@ func (s *MilestonesService) GetMilestoneIssues(pid any, milestone int, opt *GetM // // GitLab API docs: // https://docs.gitlab.com/api/milestones/#get-all-merge-requests-assigned-to-a-single-milestone -type GetMilestoneMergeRequestsOptions ListOptions - -// GetMilestoneMergeRequests gets all merge requests assigned to a single -// project milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/api/milestones/#get-all-merge-requests-assigned-to-a-single-milestone -func (s *MilestonesService) GetMilestoneMergeRequests(pid any, milestone int, opt *GetMilestoneMergeRequestsOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/milestones/%d/merge_requests", PathEscape(project), milestone) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var mr []*BasicMergeRequest - resp, err := s.client.Do(req, &mr) - if err != nil { - return nil, resp, err - } +type GetMilestoneMergeRequestsOptions struct { + ListOptions +} - return mr, resp, nil +func (s *MilestonesService) GetMilestoneMergeRequests(pid any, milestone int64, opt *GetMilestoneMergeRequestsOptions, options ...RequestOptionFunc) ([]*BasicMergeRequest, *Response, error) { + return do[[]*BasicMergeRequest](s.client, + withPath("projects/%s/milestones/%d/merge_requests", ProjectID{pid}, milestone), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/model_registry.go b/vendor/gitlab.com/gitlab-org/api/client-go/model_registry.go new file mode 100644 index 0000000000..876512b6e5 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/model_registry.go @@ -0,0 +1,50 @@ +package gitlab + +import ( + "bytes" + "net/url" +) + +type ( + ModelRegistryServiceInterface interface { + DownloadMachineLearningModelPackage(pid, modelVersionID any, path string, filename string, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) + } + + // ModelRegistryService handles communication with the model registry related methods + // of the GitLab API. + // + // GitLab API docs: https://docs.gitlab.com/api/model_registry/ + ModelRegistryService struct { + client *Client + } +) + +var _ ModelRegistryServiceInterface = (*ModelRegistryService)(nil) + +// DownloadMachineLearningModelPackage downloads a machine learning model package file. +// +// GitLab API docs: https://docs.gitlab.com/api/model_registry/#download-a-model-package-file +func (s *ModelRegistryService) DownloadMachineLearningModelPackage(pid, modelVersionID any, path string, filename string, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { + // The modelVersionID can be an int or a string like "candidate:5", + // so we convert it to a string for the URL. + mvid, err := parseID(modelVersionID) + if err != nil { + return nil, nil, err + } + + buf, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/packages/ml_models/%s/files/%s/%s", + ProjectID{pid}, + // the following URI components must not escape `.` which is what withPath does by default + // without NoEscape. + NoEscape{url.PathEscape(mvid)}, + NoEscape{url.PathEscape(path)}, + NoEscape{url.PathEscape(filename)}, + ), + withRequestOpts(options...), + ) + if err != nil { + return nil, resp, err + } + return bytes.NewReader(buf.Bytes()), resp, nil +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/namespaces.go b/vendor/gitlab.com/gitlab-org/api/client-go/namespaces.go index b4768551c8..e9760bc1ea 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/namespaces.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/namespaces.go @@ -16,11 +16,6 @@ package gitlab -import ( - "fmt" - "net/http" -) - type ( NamespacesServiceInterface interface { ListNamespaces(opt *ListNamespacesOptions, options ...RequestOptionFunc) ([]*Namespace, *Response, error) @@ -44,21 +39,21 @@ var _ NamespacesServiceInterface = (*NamespacesService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/namespaces/ type Namespace struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Path string `json:"path"` Kind string `json:"kind"` FullPath string `json:"full_path"` - ParentID int `json:"parent_id"` + ParentID int64 `json:"parent_id"` AvatarURL *string `json:"avatar_url"` WebURL string `json:"web_url"` - MembersCountWithDescendants int `json:"members_count_with_descendants"` - BillableMembersCount int `json:"billable_members_count"` + MembersCountWithDescendants int64 `json:"members_count_with_descendants"` + BillableMembersCount int64 `json:"billable_members_count"` Plan string `json:"plan"` TrialEndsOn *ISOTime `json:"trial_ends_on"` Trial bool `json:"trial"` - MaxSeatsUsed *int `json:"max_seats_used"` - SeatsInUse *int `json:"seats_in_use"` + MaxSeatsUsed *int64 `json:"max_seats_used"` + SeatsInUse *int64 `json:"seats_in_use"` } func (n Namespace) String() string { @@ -79,18 +74,11 @@ type ListNamespacesOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/namespaces/#list-all-namespaces func (s *NamespacesService) ListNamespaces(opt *ListNamespacesOptions, options ...RequestOptionFunc) ([]*Namespace, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "namespaces", opt, options) - if err != nil { - return nil, nil, err - } - - var n []*Namespace - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil + return do[[]*Namespace](s.client, + withPath("namespaces"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // SearchNamespace gets all namespaces that match your string in their name @@ -104,18 +92,11 @@ func (s *NamespacesService) SearchNamespace(query string, options ...RequestOpti } q.Search = query - req, err := s.client.NewRequest(http.MethodGet, "namespaces", &q, options) - if err != nil { - return nil, nil, err - } - - var n []*Namespace - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil + return do[[]*Namespace](s.client, + withPath("namespaces"), + withAPIOpts(&q), + withRequestOpts(options...), + ) } // GetNamespace gets a namespace by id. @@ -127,20 +108,10 @@ func (s *NamespacesService) GetNamespace(id any, options ...RequestOptionFunc) ( if err != nil { return nil, nil, err } - u := fmt.Sprintf("namespaces/%s", PathEscape(namespace)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - n := new(Namespace) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil + return do[*Namespace](s.client, + withPath("namespaces/%s", namespace), + withRequestOpts(options...), + ) } // NamespaceExistance represents a namespace exists result. @@ -157,7 +128,7 @@ type NamespaceExistance struct { // GitLab API docs: // https://docs.gitlab.com/api/namespaces/#verify-namespace-availability type NamespaceExistsOptions struct { - ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` + ParentID *int64 `url:"parent_id,omitempty" json:"parent_id,omitempty"` } // NamespaceExists checks the existence of a namespace. @@ -169,18 +140,9 @@ func (s *NamespacesService) NamespaceExists(id any, opt *NamespaceExistsOptions, if err != nil { return nil, nil, err } - u := fmt.Sprintf("namespaces/%s/exists", namespace) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(NamespaceExistance) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil + return do[*NamespaceExistance](s.client, + withPath("namespaces/%s/exists", namespace), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/notes.go b/vendor/gitlab.com/gitlab-org/api/client-go/notes.go index b54b950190..e4dc2f0de6 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/notes.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/notes.go @@ -17,33 +17,32 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( NotesServiceInterface interface { - ListIssueNotes(pid any, issue int, opt *ListIssueNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) - GetIssueNote(pid any, issue, note int, options ...RequestOptionFunc) (*Note, *Response, error) - CreateIssueNote(pid any, issue int, opt *CreateIssueNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - UpdateIssueNote(pid any, issue, note int, opt *UpdateIssueNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - DeleteIssueNote(pid any, issue, note int, options ...RequestOptionFunc) (*Response, error) - ListSnippetNotes(pid any, snippet int, opt *ListSnippetNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) - GetSnippetNote(pid any, snippet, note int, options ...RequestOptionFunc) (*Note, *Response, error) - CreateSnippetNote(pid any, snippet int, opt *CreateSnippetNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - UpdateSnippetNote(pid any, snippet, note int, opt *UpdateSnippetNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - DeleteSnippetNote(pid any, snippet, note int, options ...RequestOptionFunc) (*Response, error) - ListMergeRequestNotes(pid any, mergeRequest int, opt *ListMergeRequestNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) - GetMergeRequestNote(pid any, mergeRequest, note int, options ...RequestOptionFunc) (*Note, *Response, error) - CreateMergeRequestNote(pid any, mergeRequest int, opt *CreateMergeRequestNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - UpdateMergeRequestNote(pid any, mergeRequest, note int, opt *UpdateMergeRequestNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - DeleteMergeRequestNote(pid any, mergeRequest, note int, options ...RequestOptionFunc) (*Response, error) - ListEpicNotes(gid any, epic int, opt *ListEpicNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) - GetEpicNote(gid any, epic, note int, options ...RequestOptionFunc) (*Note, *Response, error) - CreateEpicNote(gid any, epic int, opt *CreateEpicNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - UpdateEpicNote(gid any, epic, note int, opt *UpdateEpicNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) - DeleteEpicNote(gid any, epic, note int, options ...RequestOptionFunc) (*Response, error) + ListIssueNotes(pid any, issue int64, opt *ListIssueNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) + GetIssueNote(pid any, issue, note int64, options ...RequestOptionFunc) (*Note, *Response, error) + CreateIssueNote(pid any, issue int64, opt *CreateIssueNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + UpdateIssueNote(pid any, issue, note int64, opt *UpdateIssueNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + DeleteIssueNote(pid any, issue, note int64, options ...RequestOptionFunc) (*Response, error) + ListSnippetNotes(pid any, snippet int64, opt *ListSnippetNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) + GetSnippetNote(pid any, snippet, note int64, options ...RequestOptionFunc) (*Note, *Response, error) + CreateSnippetNote(pid any, snippet int64, opt *CreateSnippetNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + UpdateSnippetNote(pid any, snippet, note int64, opt *UpdateSnippetNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + DeleteSnippetNote(pid any, snippet, note int64, options ...RequestOptionFunc) (*Response, error) + ListMergeRequestNotes(pid any, mergeRequest int64, opt *ListMergeRequestNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) + GetMergeRequestNote(pid any, mergeRequest, note int64, options ...RequestOptionFunc) (*Note, *Response, error) + CreateMergeRequestNote(pid any, mergeRequest int64, opt *CreateMergeRequestNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + UpdateMergeRequestNote(pid any, mergeRequest, note int64, opt *UpdateMergeRequestNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + DeleteMergeRequestNote(pid any, mergeRequest, note int64, options ...RequestOptionFunc) (*Response, error) + ListEpicNotes(gid any, epic int64, opt *ListEpicNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) + GetEpicNote(gid any, epic, note int64, options ...RequestOptionFunc) (*Note, *Response, error) + CreateEpicNote(gid any, epic int64, opt *CreateEpicNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + UpdateEpicNote(gid any, epic, note int64, opt *UpdateEpicNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) + DeleteEpicNote(gid any, epic, note int64, options ...RequestOptionFunc) (*Response, error) } // NotesService handles communication with the notes related methods @@ -63,44 +62,50 @@ var _ NotesServiceInterface = (*NotesService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/notes/ type Note struct { - ID int `json:"id"` - Type NoteTypeValue `json:"type"` - Body string `json:"body"` - Attachment string `json:"attachment"` - Title string `json:"title"` - FileName string `json:"file_name"` - Author NoteAuthor `json:"author"` - System bool `json:"system"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - ExpiresAt *time.Time `json:"expires_at"` - CommitID string `json:"commit_id"` - Position *NotePosition `json:"position"` - NoteableID int `json:"noteable_id"` - NoteableType string `json:"noteable_type"` - ProjectID int `json:"project_id"` - NoteableIID int `json:"noteable_iid"` - Resolvable bool `json:"resolvable"` - Resolved bool `json:"resolved"` - ResolvedAt *time.Time `json:"resolved_at"` - ResolvedBy struct { - ID int `json:"id"` - Username string `json:"username"` - Email string `json:"email"` - Name string `json:"name"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"resolved_by"` - Internal bool `json:"internal"` + ID int64 `json:"id"` + Type NoteTypeValue `json:"type"` + Body string `json:"body"` + Attachment string `json:"attachment"` + Title string `json:"title"` + FileName string `json:"file_name"` + Author NoteAuthor `json:"author"` + System bool `json:"system"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + ExpiresAt *time.Time `json:"expires_at"` + CommitID string `json:"commit_id"` + Position *NotePosition `json:"position"` + NoteableID int64 `json:"noteable_id"` + NoteableType string `json:"noteable_type"` + ProjectID int64 `json:"project_id"` + NoteableIID int64 `json:"noteable_iid"` + Resolvable bool `json:"resolvable"` + Resolved bool `json:"resolved"` + ResolvedAt *time.Time `json:"resolved_at"` + ResolvedBy NoteResolvedBy `json:"resolved_by"` + Internal bool `json:"internal"` // Deprecated: use Internal instead Confidential bool `json:"confidential"` } +// NoteResolvedBy represents the resolver of a GitLab note. +// +// GitLab API docs: +// https://docs.gitlab.com/api/notes/ +type NoteResolvedBy struct { + ID int64 `json:"id"` + Username string `json:"username"` + Email string `json:"email"` + Name string `json:"name"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` +} + // NoteAuthor represents the author of a note. type NoteAuthor struct { - ID int `json:"id"` + ID int64 `json:"id"` Username string `json:"username"` Email string `json:"email"` Name string `json:"name"` @@ -116,9 +121,9 @@ type NotePosition struct { HeadSHA string `json:"head_sha"` PositionType string `json:"position_type"` NewPath string `json:"new_path,omitempty"` - NewLine int `json:"new_line,omitempty"` + NewLine int64 `json:"new_line,omitempty"` OldPath string `json:"old_path,omitempty"` - OldLine int `json:"old_line,omitempty"` + OldLine int64 `json:"old_line,omitempty"` LineRange *LineRange `json:"line_range,omitempty"` } @@ -132,8 +137,8 @@ type LineRange struct { type LinePosition struct { LineCode string `json:"line_code"` Type string `json:"type"` - OldLine int `json:"old_line"` - NewLine int `json:"new_line"` + OldLine int64 `json:"old_line"` + NewLine int64 `json:"new_line"` } func (n Note) String() string { @@ -154,50 +159,23 @@ type ListIssueNotesOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/notes/#list-project-issue-notes -func (s *NotesService) ListIssueNotes(pid any, issue int, opt *ListIssueNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/notes", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var n []*Note - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) ListIssueNotes(pid any, issue int64, opt *ListIssueNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { + return do[[]*Note](s.client, + withPath("projects/%s/issues/%d/notes", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetIssueNote returns a single note for a specific project issue. // // GitLab API docs: // https://docs.gitlab.com/api/notes/#get-single-issue-note -func (s *NotesService) GetIssueNote(pid any, issue, note int, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/notes/%d", PathEscape(project), issue, note) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) GetIssueNote(pid any, issue, note int64, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withPath("projects/%s/issues/%d/notes/%d", ProjectID{pid}, issue, note), + withRequestOpts(options...), + ) } // CreateIssueNoteOptions represents the available CreateIssueNote() @@ -215,25 +193,13 @@ type CreateIssueNoteOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/notes/#create-new-issue-note -func (s *NotesService) CreateIssueNote(pid any, issue int, opt *CreateIssueNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/notes", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) CreateIssueNote(pid any, issue int64, opt *CreateIssueNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/issues/%d/notes", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateIssueNoteOptions represents the available UpdateIssueNote() @@ -249,44 +215,26 @@ type UpdateIssueNoteOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/notes/#modify-existing-issue-note -func (s *NotesService) UpdateIssueNote(pid any, issue, note int, opt *UpdateIssueNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/notes/%d", PathEscape(project), issue, note) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) UpdateIssueNote(pid any, issue, note int64, opt *UpdateIssueNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/issues/%d/notes/%d", ProjectID{pid}, issue, note), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteIssueNote deletes an existing note of an issue. // // GitLab API docs: // https://docs.gitlab.com/api/notes/#delete-an-issue-note -func (s *NotesService) DeleteIssueNote(pid any, issue, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/notes/%d", PathEscape(project), issue, note) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *NotesService) DeleteIssueNote(pid any, issue, note int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/issues/%d/notes/%d", ProjectID{pid}, issue, note), + withRequestOpts(options...), + ) + return resp, err } // ListSnippetNotesOptions represents the available ListSnippetNotes() options. @@ -304,50 +252,23 @@ type ListSnippetNotesOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/notes/#list-all-snippet-notes -func (s *NotesService) ListSnippetNotes(pid any, snippet int, opt *ListSnippetNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/notes", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var n []*Note - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) ListSnippetNotes(pid any, snippet int64, opt *ListSnippetNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { + return do[[]*Note](s.client, + withPath("projects/%s/snippets/%d/notes", ProjectID{pid}, snippet), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetSnippetNote returns a single note for a given snippet. // // GitLab API docs: // https://docs.gitlab.com/api/notes/#get-single-snippet-note -func (s *NotesService) GetSnippetNote(pid any, snippet, note int, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/notes/%d", PathEscape(project), snippet, note) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) GetSnippetNote(pid any, snippet, note int64, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withPath("projects/%s/snippets/%d/notes/%d", ProjectID{pid}, snippet, note), + withRequestOpts(options...), + ) } // CreateSnippetNoteOptions represents the available CreateSnippetNote() @@ -365,25 +286,13 @@ type CreateSnippetNoteOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/notes/#create-new-snippet-note -func (s *NotesService) CreateSnippetNote(pid any, snippet int, opt *CreateSnippetNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/notes", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) CreateSnippetNote(pid any, snippet int64, opt *CreateSnippetNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/snippets/%d/notes", ProjectID{pid}, snippet), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateSnippetNoteOptions represents the available UpdateSnippetNote() @@ -399,44 +308,26 @@ type UpdateSnippetNoteOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/notes/#modify-existing-snippet-note -func (s *NotesService) UpdateSnippetNote(pid any, snippet, note int, opt *UpdateSnippetNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/notes/%d", PathEscape(project), snippet, note) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) UpdateSnippetNote(pid any, snippet, note int64, opt *UpdateSnippetNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/snippets/%d/notes/%d", ProjectID{pid}, snippet, note), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteSnippetNote deletes an existing note of a snippet. // // GitLab API docs: // https://docs.gitlab.com/api/notes/#delete-a-snippet-note -func (s *NotesService) DeleteSnippetNote(pid any, snippet, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/notes/%d", PathEscape(project), snippet, note) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *NotesService) DeleteSnippetNote(pid any, snippet, note int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/snippets/%d/notes/%d", ProjectID{pid}, snippet, note), + withRequestOpts(options...), + ) + return resp, err } // ListMergeRequestNotesOptions represents the available ListMergeRequestNotes() @@ -454,50 +345,23 @@ type ListMergeRequestNotesOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/notes/#list-all-merge-request-notes -func (s *NotesService) ListMergeRequestNotes(pid any, mergeRequest int, opt *ListMergeRequestNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/notes", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var n []*Note - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) ListMergeRequestNotes(pid any, mergeRequest int64, opt *ListMergeRequestNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { + return do[[]*Note](s.client, + withPath("projects/%s/merge_requests/%d/notes", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetMergeRequestNote returns a single note for a given merge request. // // GitLab API docs: // https://docs.gitlab.com/api/notes/#get-single-merge-request-note -func (s *NotesService) GetMergeRequestNote(pid any, mergeRequest, note int, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/notes/%d", PathEscape(project), mergeRequest, note) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) GetMergeRequestNote(pid any, mergeRequest, note int64, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withPath("projects/%s/merge_requests/%d/notes/%d", ProjectID{pid}, mergeRequest, note), + withRequestOpts(options...), + ) } // CreateMergeRequestNoteOptions represents the available @@ -516,25 +380,13 @@ type CreateMergeRequestNoteOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/notes/#create-new-merge-request-note -func (s *NotesService) CreateMergeRequestNote(pid any, mergeRequest int, opt *CreateMergeRequestNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/notes", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) CreateMergeRequestNote(pid any, mergeRequest int64, opt *CreateMergeRequestNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/merge_requests/%d/notes", ProjectID{pid}, mergeRequest), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateMergeRequestNoteOptions represents the available @@ -550,45 +402,26 @@ type UpdateMergeRequestNoteOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/notes/#modify-existing-merge-request-note -func (s *NotesService) UpdateMergeRequestNote(pid any, mergeRequest, note int, opt *UpdateMergeRequestNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/merge_requests/%d/notes/%d", PathEscape(project), mergeRequest, note) - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) UpdateMergeRequestNote(pid any, mergeRequest, note int64, opt *UpdateMergeRequestNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/merge_requests/%d/notes/%d", ProjectID{pid}, mergeRequest, note), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteMergeRequestNote deletes an existing note of a merge request. // // GitLab API docs: // https://docs.gitlab.com/api/notes/#delete-a-merge-request-note -func (s *NotesService) DeleteMergeRequestNote(pid any, mergeRequest, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf( - "projects/%s/merge_requests/%d/notes/%d", PathEscape(project), mergeRequest, note) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *NotesService) DeleteMergeRequestNote(pid any, mergeRequest, note int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/merge_requests/%d/notes/%d", ProjectID{pid}, mergeRequest, note), + withRequestOpts(options...), + ) + return resp, err } // ListEpicNotesOptions represents the available ListEpicNotes() options. @@ -607,25 +440,12 @@ type ListEpicNotesOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/notes/#list-all-epic-notes -func (s *NotesService) ListEpicNotes(gid any, epic int, opt *ListEpicNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/notes", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var n []*Note - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) ListEpicNotes(gid any, epic int64, opt *ListEpicNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { + return do[[]*Note](s.client, + withPath("groups/%s/epics/%d/notes", GroupID{gid}, epic), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetEpicNote returns a single note for an epic. @@ -633,25 +453,11 @@ func (s *NotesService) ListEpicNotes(gid any, epic int, opt *ListEpicNotesOption // // GitLab API docs: // https://docs.gitlab.com/api/notes/#get-single-epic-note -func (s *NotesService) GetEpicNote(gid any, epic, note int, options ...RequestOptionFunc) (*Note, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/notes/%d", PathEscape(group), epic, note) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) GetEpicNote(gid any, epic, note int64, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withPath("groups/%s/epics/%d/notes/%d", GroupID{gid}, epic, note), + withRequestOpts(options...), + ) } // CreateEpicNoteOptions represents the available CreateEpicNote() options. @@ -668,25 +474,13 @@ type CreateEpicNoteOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/notes/#create-new-epic-note -func (s *NotesService) CreateEpicNote(gid any, epic int, opt *CreateEpicNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/notes", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) CreateEpicNote(gid any, epic int64, opt *CreateEpicNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/epics/%d/notes", GroupID{gid}, epic), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateEpicNoteOptions represents the available UpdateEpicNote() options. @@ -702,42 +496,24 @@ type UpdateEpicNoteOptions struct { // Will be removed in v5 of the API, use Work Items API instead // // https://docs.gitlab.com/api/notes/#modify-existing-epic-note -func (s *NotesService) UpdateEpicNote(gid any, epic, note int, opt *UpdateEpicNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/notes/%d", PathEscape(group), epic, note) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil +func (s *NotesService) UpdateEpicNote(gid any, epic, note int64, opt *UpdateEpicNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + return do[*Note](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/epics/%d/notes/%d", GroupID{gid}, epic, note), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteEpicNote deletes an existing note of a merge request. // Will be removed in v5 of the API, use Work Items API instead // // https://docs.gitlab.com/api/notes/#delete-an-epic-note -func (s *NotesService) DeleteEpicNote(gid any, epic, note int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/notes/%d", PathEscape(group), epic, note) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *NotesService) DeleteEpicNote(gid any, epic, note int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("groups/%s/epics/%d/notes/%d", GroupID{gid}, epic, note), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/notifications.go b/vendor/gitlab.com/gitlab-org/api/client-go/notifications.go index 539600f3cc..5f95f6cba2 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/notifications.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/notifications.go @@ -18,17 +18,40 @@ package gitlab import ( "errors" - "fmt" "net/http" ) type ( NotificationSettingsServiceInterface interface { + // GetGlobalSettings returns current notification settings and email address. + // + // GitLab API docs: + // https://docs.gitlab.com/api/notification_settings/#global-notification-settings GetGlobalSettings(options ...RequestOptionFunc) (*NotificationSettings, *Response, error) + // UpdateGlobalSettings updates current notification settings and email address. + // + // GitLab API docs: + // https://docs.gitlab.com/api/notification_settings/#update-global-notification-settings UpdateGlobalSettings(opt *NotificationSettingsOptions, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) + // GetSettingsForGroup returns current group notification settings. + // + // GitLab API docs: + // https://docs.gitlab.com/api/notification_settings/#group--project-level-notification-settings GetSettingsForGroup(gid any, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) + // GetSettingsForProject returns current project notification settings. + // + // GitLab API docs: + // https://docs.gitlab.com/api/notification_settings/#group--project-level-notification-settings GetSettingsForProject(pid any, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) + // UpdateSettingsForGroup updates current group notification settings. + // + // GitLab API docs: + // https://docs.gitlab.com/api/notification_settings/#update-groupproject-level-notification-settings UpdateSettingsForGroup(gid any, opt *NotificationSettingsOptions, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) + // UpdateSettingsForProject updates current project notification settings. + // + // GitLab API docs: + // https://docs.gitlab.com/api/notification_settings/#update-groupproject-level-notification-settings UpdateSettingsForProject(pid any, opt *NotificationSettingsOptions, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) } @@ -43,7 +66,7 @@ type ( var _ NotificationSettingsServiceInterface = (*NotificationSettingsService)(nil) -// NotificationSettings represents the Gitlab notification setting. +// NotificationSettings represents the GitLab notification setting. // // GitLab API docs: // https://docs.gitlab.com/api/notification_settings/#valid-notification-levels @@ -82,25 +105,11 @@ func (ns NotificationSettings) String() string { return Stringify(ns) } -// GetGlobalSettings returns current notification settings and email address. -// -// GitLab API docs: -// https://docs.gitlab.com/api/notification_settings/#global-notification-settings func (s *NotificationSettingsService) GetGlobalSettings(options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { - u := "notification_settings" - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ns := new(NotificationSettings) - resp, err := s.client.Do(req, ns) - if err != nil { - return nil, resp, err - } - - return ns, resp, nil + return do[*NotificationSettings](s.client, + withPath("notification_settings"), + withRequestOpts(options...), + ) } // NotificationSettingsOptions represents the available options that can be passed @@ -128,128 +137,48 @@ type NotificationSettingsOptions struct { SuccessPipeline *bool `url:"success_pipeline,omitempty" json:"success_pipeline,omitempty"` } -// UpdateGlobalSettings updates current notification settings and email address. -// -// GitLab API docs: -// https://docs.gitlab.com/api/notification_settings/#update-global-notification-settings func (s *NotificationSettingsService) UpdateGlobalSettings(opt *NotificationSettingsOptions, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { if opt.Level != nil && *opt.Level == GlobalNotificationLevel { return nil, nil, errors.New( "notification level 'global' is not valid for global notification settings") } - u := "notification_settings" - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ns := new(NotificationSettings) - resp, err := s.client.Do(req, ns) - if err != nil { - return nil, resp, err - } - - return ns, resp, nil + return do[*NotificationSettings](s.client, + withMethod(http.MethodPut), + withPath("notification_settings"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetSettingsForGroup returns current group notification settings. -// -// GitLab API docs: -// https://docs.gitlab.com/api/notification_settings/#group--project-level-notification-settings func (s *NotificationSettingsService) GetSettingsForGroup(gid any, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/notification_settings", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ns := new(NotificationSettings) - resp, err := s.client.Do(req, ns) - if err != nil { - return nil, resp, err - } - - return ns, resp, nil + return do[*NotificationSettings](s.client, + withPath("groups/%s/notification_settings", GroupID{gid}), + withRequestOpts(options...), + ) } -// GetSettingsForProject returns current project notification settings. -// -// GitLab API docs: -// https://docs.gitlab.com/api/notification_settings/#group--project-level-notification-settings func (s *NotificationSettingsService) GetSettingsForProject(pid any, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/notification_settings", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ns := new(NotificationSettings) - resp, err := s.client.Do(req, ns) - if err != nil { - return nil, resp, err - } - - return ns, resp, nil + return do[*NotificationSettings](s.client, + withPath("projects/%s/notification_settings", ProjectID{pid}), + withRequestOpts(options...), + ) } -// UpdateSettingsForGroup updates current group notification settings. -// -// GitLab API docs: -// https://docs.gitlab.com/api/notification_settings/#update-groupproject-level-notification-settings func (s *NotificationSettingsService) UpdateSettingsForGroup(gid any, opt *NotificationSettingsOptions, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/notification_settings", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ns := new(NotificationSettings) - resp, err := s.client.Do(req, ns) - if err != nil { - return nil, resp, err - } - - return ns, resp, nil + return do[*NotificationSettings](s.client, + withMethod(http.MethodPut), + withPath("groups/%s/notification_settings", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// UpdateSettingsForProject updates current project notification settings. -// -// GitLab API docs: -// https://docs.gitlab.com/api/notification_settings/#update-groupproject-level-notification-settings func (s *NotificationSettingsService) UpdateSettingsForProject(pid any, opt *NotificationSettingsOptions, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/notification_settings", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ns := new(NotificationSettings) - resp, err := s.client.Do(req, ns) - if err != nil { - return nil, resp, err - } - - return ns, resp, nil + return do[*NotificationSettings](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/notification_settings", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/packages.go b/vendor/gitlab.com/gitlab-org/api/client-go/packages.go index f9f3b59afc..57e8eee164 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/packages.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/packages.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -26,9 +25,9 @@ type ( PackagesServiceInterface interface { ListProjectPackages(pid any, opt *ListProjectPackagesOptions, options ...RequestOptionFunc) ([]*Package, *Response, error) ListGroupPackages(gid any, opt *ListGroupPackagesOptions, options ...RequestOptionFunc) ([]*GroupPackage, *Response, error) - ListPackageFiles(pid any, pkg int, opt *ListPackageFilesOptions, options ...RequestOptionFunc) ([]*PackageFile, *Response, error) - DeleteProjectPackage(pid any, pkg int, options ...RequestOptionFunc) (*Response, error) - DeletePackageFile(pid any, pkg, file int, options ...RequestOptionFunc) (*Response, error) + ListPackageFiles(pid any, pkg int64, opt *ListPackageFilesOptions, options ...RequestOptionFunc) ([]*PackageFile, *Response, error) + DeleteProjectPackage(pid any, pkg int64, options ...RequestOptionFunc) (*Response, error) + DeletePackageFile(pid any, pkg, file int64, options ...RequestOptionFunc) (*Response, error) } // PackagesService handles communication with the packages related methods @@ -46,7 +45,7 @@ var _ PackagesServiceInterface = (*PackagesService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/packages/ type Package struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Version string `json:"version"` PackageType string `json:"package_type"` @@ -66,7 +65,7 @@ func (s Package) String() string { // GitLab API docs: https://docs.gitlab.com/api/packages/ type GroupPackage struct { Package - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` ProjectPath string `json:"project_path"` } @@ -86,8 +85,8 @@ func (s PackageLinks) String() string { // PackageTag holds label information about the package type PackageTag struct { - ID int `json:"id"` - PackageID int `json:"package_id"` + ID int64 `json:"id"` + PackageID int64 `json:"package_id"` Name string `json:"name"` CreatedAt *time.Time `json:"created_at"` UpdatedAt *time.Time `json:"updated_at"` @@ -101,11 +100,11 @@ func (s PackageTag) String() string { // // GitLab API docs: https://docs.gitlab.com/api/packages/ type PackageFile struct { - ID int `json:"id"` - PackageID int `json:"package_id"` + ID int64 `json:"id"` + PackageID int64 `json:"package_id"` CreatedAt *time.Time `json:"created_at"` FileName string `json:"file_name"` - Size int `json:"size"` + Size int64 `json:"size"` FileMD5 string `json:"file_md5"` FileSHA1 string `json:"file_sha1"` FileSHA256 string `json:"file_sha256"` @@ -137,24 +136,11 @@ type ListProjectPackagesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/packages/#for-a-project func (s *PackagesService) ListProjectPackages(pid any, opt *ListProjectPackagesOptions, options ...RequestOptionFunc) ([]*Package, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/packages", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Package - resp, err := s.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil + return do[[]*Package](s.client, + withPath("projects/%s/packages", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListGroupPackagesOptions represents the available ListGroupPackages() @@ -178,24 +164,11 @@ type ListGroupPackagesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/packages/#for-a-group func (s *PackagesService) ListGroupPackages(gid any, opt *ListGroupPackagesOptions, options ...RequestOptionFunc) ([]*GroupPackage, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/packages", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*GroupPackage - resp, err := s.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil + return do[[]*GroupPackage](s.client, + withPath("groups/%s/packages", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListPackageFilesOptions represents the available ListPackageFiles() @@ -203,71 +176,44 @@ func (s *PackagesService) ListGroupPackages(gid any, opt *ListGroupPackagesOptio // // GitLab API docs: // https://docs.gitlab.com/api/packages/#list-package-files -type ListPackageFilesOptions ListOptions +type ListPackageFilesOptions struct { + ListOptions +} // ListPackageFiles gets a list of files that are within a package // // GitLab API docs: // https://docs.gitlab.com/api/packages/#list-package-files -func (s *PackagesService) ListPackageFiles(pid any, pkg int, opt *ListPackageFilesOptions, options ...RequestOptionFunc) ([]*PackageFile, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/packages/%d/package_files", - PathEscape(project), - pkg, +func (s *PackagesService) ListPackageFiles(pid any, pkg int64, opt *ListPackageFilesOptions, options ...RequestOptionFunc) ([]*PackageFile, *Response, error) { + return do[[]*PackageFile](s.client, + withPath("projects/%s/packages/%d/package_files", ProjectID{pid}, pkg), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pfs []*PackageFile - resp, err := s.client.Do(req, &pfs) - if err != nil { - return nil, resp, err - } - - return pfs, resp, nil } // DeleteProjectPackage deletes a package in a project. // // GitLab API docs: // https://docs.gitlab.com/api/packages/#delete-a-project-package -func (s *PackagesService) DeleteProjectPackage(pid any, pkg int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/packages/%d", PathEscape(project), pkg) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *PackagesService) DeleteProjectPackage(pid any, pkg int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/packages/%d", ProjectID{pid}, pkg), + withRequestOpts(options...), + ) + return resp, err } // DeletePackageFile deletes a file in project package // // GitLab API docs: // https://docs.gitlab.com/api/packages/#delete-a-package-file -func (s *PackagesService) DeletePackageFile(pid any, pkg, file int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/packages/%d/package_files/%d", PathEscape(project), pkg, file) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *PackagesService) DeletePackageFile(pid any, pkg, file int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/packages/%d/package_files/%d", ProjectID{pid}, pkg, file), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/pages.go b/vendor/gitlab.com/gitlab-org/api/client-go/pages.go index baffb25591..3b80328ce2 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/pages.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/pages.go @@ -17,15 +17,28 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( PagesServiceInterface interface { + // UnpublishPages unpublishes pages. The user must have admin privileges. + // + // GitLab API docs: + // https://docs.gitlab.com/api/pages/#unpublish-pages UnpublishPages(gid any, options ...RequestOptionFunc) (*Response, error) + // GetPages lists Pages settings for a project. The user must have at least + // maintainer privileges. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/pages/#get-pages-settings-for-a-project GetPages(gid any, options ...RequestOptionFunc) (*Pages, *Response, error) + // UpdatePages updates Pages settings for a project. The user must have + // administrator privileges. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/pages/#update-pages-settings-for-a-project UpdatePages(pid any, opt UpdatePagesOptions, options ...RequestOptionFunc) (*Pages, *Response, error) } @@ -48,6 +61,7 @@ type Pages struct { IsUniqueDomainEnabled bool `json:"is_unique_domain_enabled"` ForceHTTPS bool `json:"force_https"` Deployments []*PagesDeployment `json:"deployments"` + PrimaryDomain string `json:"primary_domain"` } // PagesDeployment represents a Pages deployment. @@ -60,49 +74,20 @@ type PagesDeployment struct { RootDirectory string `json:"root_directory"` } -// UnpublishPages unpublished pages. The user must have admin privileges. -// -// GitLab API docs: -// https://docs.gitlab.com/api/pages/#unpublish-pages func (s *PagesService) UnpublishPages(gid any, options ...RequestOptionFunc) (*Response, error) { - page, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/pages", PathEscape(page)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/pages", ProjectID{gid}), + withRequestOpts(options...), + ) + return resp, err } -// GetPages lists Pages settings for a project. The user must have at least -// maintainer privileges. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/pages/#get-pages-settings-for-a-project func (s *PagesService) GetPages(gid any, options ...RequestOptionFunc) (*Pages, *Response, error) { - project, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pages", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Pages) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*Pages](s.client, + withPath("projects/%s/pages", ProjectID{gid}), + withRequestOpts(options...), + ) } // UpdatePagesOptions represents the available UpdatePages() options. @@ -110,32 +95,16 @@ func (s *PagesService) GetPages(gid any, options ...RequestOptionFunc) (*Pages, // GitLab API docs: // https://docs.gitlab.com/api/pages/#update-pages-settings-for-a-project type UpdatePagesOptions struct { - PagesUniqueDomainEnabled *bool `url:"pages_unique_domain_enabled,omitempty" json:"pages_unique_domain_enabled,omitempty"` - PagesHTTPSOnly *bool `url:"pages_https_only,omitempty" json:"pages_https_only,omitempty"` + PagesUniqueDomainEnabled *bool `url:"pages_unique_domain_enabled,omitempty" json:"pages_unique_domain_enabled,omitempty"` + PagesHTTPSOnly *bool `url:"pages_https_only,omitempty" json:"pages_https_only,omitempty"` + PagesPrimaryDomain *string `url:"pages_primary_domain,omitempty" json:"pages_primary_domain,omitempty"` } -// UpdatePages updates Pages settings for a project. The user must have -// administrator privileges. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/pages/#update-pages-settings-for-a-project func (s *PagesService) UpdatePages(pid any, opt UpdatePagesOptions, options ...RequestOptionFunc) (*Pages, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pages", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPatch, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(Pages) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*Pages](s.client, + withMethod(http.MethodPatch), + withPath("projects/%s/pages", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/pages_domains.go b/vendor/gitlab.com/gitlab-org/api/client-go/pages_domains.go index de18e48300..745dd3d002 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/pages_domains.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/pages_domains.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -47,51 +46,45 @@ var _ PagesDomainsServiceInterface = (*PagesDomainsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/pages_domains/ type PagesDomain struct { - Domain string `json:"domain"` - AutoSslEnabled bool `json:"auto_ssl_enabled"` - URL string `json:"url"` - ProjectID int `json:"project_id"` - Verified bool `json:"verified"` - VerificationCode string `json:"verification_code"` - EnabledUntil *time.Time `json:"enabled_until"` - Certificate struct { - Subject string `json:"subject"` - Expired bool `json:"expired"` - Expiration *time.Time `json:"expiration"` - Certificate string `json:"certificate"` - CertificateText string `json:"certificate_text"` - } `json:"certificate"` + Domain string `json:"domain"` + AutoSslEnabled bool `json:"auto_ssl_enabled"` + URL string `json:"url"` + ProjectID int64 `json:"project_id"` + Verified bool `json:"verified"` + VerificationCode string `json:"verification_code"` + EnabledUntil *time.Time `json:"enabled_until"` + Certificate PagesDomainCertificate `json:"certificate"` +} + +// PagesDomainCertificate represents a pages domain certificate. +// +// GitLab API docs: https://docs.gitlab.com/api/pages_domains/ +type PagesDomainCertificate struct { + Subject string `json:"subject"` + Expired bool `json:"expired"` + Expiration *time.Time `json:"expiration"` + Certificate string `json:"certificate"` + CertificateText string `json:"certificate_text"` } // ListPagesDomainsOptions represents the available ListPagesDomains() options. // // GitLab API docs: // https://docs.gitlab.com/api/pages_domains/#list-pages-domains -type ListPagesDomainsOptions ListOptions +type ListPagesDomainsOptions struct { + ListOptions +} // ListPagesDomains gets a list of project pages domains. // // GitLab API docs: // https://docs.gitlab.com/api/pages_domains/#list-pages-domains func (s *PagesDomainsService) ListPagesDomains(pid any, opt *ListPagesDomainsOptions, options ...RequestOptionFunc) ([]*PagesDomain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pages/domains", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pd []*PagesDomain - resp, err := s.client.Do(req, &pd) - if err != nil { - return nil, resp, err - } - - return pd, resp, nil + return do[[]*PagesDomain](s.client, + withPath("projects/%s/pages/domains", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListAllPagesDomains gets a list of all pages domains. @@ -99,18 +92,10 @@ func (s *PagesDomainsService) ListPagesDomains(pid any, opt *ListPagesDomainsOpt // GitLab API docs: // https://docs.gitlab.com/api/pages_domains/#list-all-pages-domains func (s *PagesDomainsService) ListAllPagesDomains(options ...RequestOptionFunc) ([]*PagesDomain, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "pages/domains", nil, options) - if err != nil { - return nil, nil, err - } - - var pd []*PagesDomain - resp, err := s.client.Do(req, &pd) - if err != nil { - return nil, resp, err - } - - return pd, resp, nil + return do[[]*PagesDomain](s.client, + withPath("pages/domains"), + withRequestOpts(options...), + ) } // GetPagesDomain get a specific pages domain for a project. @@ -118,24 +103,10 @@ func (s *PagesDomainsService) ListAllPagesDomains(options ...RequestOptionFunc) // GitLab API docs: // https://docs.gitlab.com/api/pages_domains/#single-pages-domain func (s *PagesDomainsService) GetPagesDomain(pid any, domain string, options ...RequestOptionFunc) (*PagesDomain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pages/domains/%s", PathEscape(project), domain) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pd := new(PagesDomain) - resp, err := s.client.Do(req, pd) - if err != nil { - return nil, resp, err - } - - return pd, resp, nil + return do[*PagesDomain](s.client, + withPath("projects/%s/pages/domains/%s", ProjectID{pid}, domain), + withRequestOpts(options...), + ) } // CreatePagesDomainOptions represents the available CreatePagesDomain() options. @@ -154,24 +125,12 @@ type CreatePagesDomainOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/pages_domains/#create-new-pages-domain func (s *PagesDomainsService) CreatePagesDomain(pid any, opt *CreatePagesDomainOptions, options ...RequestOptionFunc) (*PagesDomain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pages/domains", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pd := new(PagesDomain) - resp, err := s.client.Do(req, pd) - if err != nil { - return nil, resp, err - } - - return pd, resp, nil + return do[*PagesDomain](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/pages/domains", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdatePagesDomainOptions represents the available UpdatePagesDomain() options. @@ -189,24 +148,12 @@ type UpdatePagesDomainOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/pages_domains/#update-pages-domain func (s *PagesDomainsService) UpdatePagesDomain(pid any, domain string, opt *UpdatePagesDomainOptions, options ...RequestOptionFunc) (*PagesDomain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pages/domains/%s", PathEscape(project), domain) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pd := new(PagesDomain) - resp, err := s.client.Do(req, pd) - if err != nil { - return nil, resp, err - } - - return pd, resp, nil + return do[*PagesDomain](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/pages/domains/%s", ProjectID{pid}, domain), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeletePagesDomain deletes an existing project pages domain. @@ -214,16 +161,10 @@ func (s *PagesDomainsService) UpdatePagesDomain(pid any, domain string, opt *Upd // GitLab API docs: // https://docs.gitlab.com/api/pages_domains/#delete-pages-domain func (s *PagesDomainsService) DeletePagesDomain(pid any, domain string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/pages/domains/%s", PathEscape(project), domain) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/pages/domains/%s", ProjectID{pid}, domain), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/pagination.go b/vendor/gitlab.com/gitlab-org/api/client-go/pagination.go index 8214e84e3b..47dde6ba48 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/pagination.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/pagination.go @@ -1,12 +1,8 @@ -//go:build go1.23 -// +build go1.23 - package gitlab import ( "fmt" "iter" - "slices" ) type PaginationOptionFunc = RequestOptionFunc @@ -102,11 +98,11 @@ func Scan[T any](f func(p PaginationOptionFunc) ([]T, *Response, error)) (iter.S // Attention: This API is experimental and may be subject to breaking changes to improve the API in the future. func Scan2[T any](f func(p PaginationOptionFunc) ([]T, *Response, error)) iter.Seq2[T, error] { return func(yield func(T, error) bool) { - var nextOpt PaginationOptionFunc + var page PaginationOptionFunc Pagination: for { - ts, resp, err := f(nextOpt) + ts, resp, err := f(page) if err != nil { var t T yield(t, err) @@ -122,15 +118,13 @@ func Scan2[T any](f func(p PaginationOptionFunc) ([]T, *Response, error)) iter.S // the f request function was either configured for offset- or keyset-based // pagination. We support both here, by checking if the next link is provided (keyset) // or not. If both are provided, keyset-based pagination takes precedence. - switch { - case resp.NextLink != "": - nextOpt = WithKeysetPaginationParameters(resp.NextLink) - case resp.NextPage != 0: - nextOpt = WithOffsetPaginationParameters(resp.NextPage) - default: + next, ok := WithNext(resp) + if !ok { // no more pages break Pagination } + + page = next } } } @@ -159,7 +153,7 @@ func Must[T any](it iter.Seq2[T, error]) iter.Seq[T] { } } -// ScanAndCollect is a convenience function that collects all results and returns them as slice as well as an error if one happens. +// ScanAndCollect is a convenience function that collects all results and returns them as a slice as well as an error if one happens. // // opts := &ListProjectsOptions{} // projects, err := ScanAndCollect(func(p PaginationOptionFunc) ([]*Project, *Response, error) { @@ -172,10 +166,38 @@ func Must[T any](it iter.Seq2[T, error]) iter.Seq[T] { // // Attention: This API is experimental and may be subject to breaking changes to improve the API in the future. func ScanAndCollect[T any](f func(p PaginationOptionFunc) ([]T, *Response, error)) ([]T, error) { - it, hasErr := Scan(f) - allItems := slices.Collect(it) - if err := hasErr(); err != nil { - return nil, err + return ScanAndCollectN(f, -1) +} + +// ScanAndCollectN is a convenience function that collects at most n results and +// returns them as a slice as well as an error if one happens. +// +// This is useful when you need a slice, e.g. for marshaling the data +// structures, passing the data to a function expecting a slice, or implementing +// custom sorting logic. If you want to iterate over all items, the iterator +// returned by [Scan2] is a more memory efficient alternative. +// +// n determines the number of items to collect: +// - n > 0: at most n items are returned +// - n == 0: the result is a nil slice (zero items) +// - n < 0: all items are returned (no limit) +// +// Attention: This API is experimental and may be subject to breaking changes to +// improve the API in the future. +func ScanAndCollectN[T any](f func(p PaginationOptionFunc) ([]T, *Response, error), n int) ([]T, error) { + var items []T + + for item, err := range Scan2(f) { + if err != nil { + return nil, err + } + + if n >= 0 && len(items) >= n { + break + } + + items = append(items, item) } - return allItems, nil + + return items, nil } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/personal_access_tokens.go b/vendor/gitlab.com/gitlab-org/api/client-go/personal_access_tokens.go index 9dcbe68720..f840773e85 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/personal_access_tokens.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/personal_access_tokens.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -25,13 +24,14 @@ import ( type ( PersonalAccessTokensServiceInterface interface { ListPersonalAccessTokens(opt *ListPersonalAccessTokensOptions, options ...RequestOptionFunc) ([]*PersonalAccessToken, *Response, error) - GetSinglePersonalAccessTokenByID(token int, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) + GetSinglePersonalAccessTokenByID(token int64, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) GetSinglePersonalAccessToken(options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) - RotatePersonalAccessToken(token int, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) - RotatePersonalAccessTokenByID(token int, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) + RotatePersonalAccessToken(token int64, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) + RotatePersonalAccessTokenByID(token int64, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) RotatePersonalAccessTokenSelf(opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) - RevokePersonalAccessToken(token int, options ...RequestOptionFunc) (*Response, error) - RevokePersonalAccessTokenByID(token int, options ...RequestOptionFunc) (*Response, error) + // Deprecated: to be removed in 2.0; use RevokePersonalAccessTokenByID instead + RevokePersonalAccessToken(token int64, options ...RequestOptionFunc) (*Response, error) + RevokePersonalAccessTokenByID(token int64, options ...RequestOptionFunc) (*Response, error) RevokePersonalAccessTokenSelf(options ...RequestOptionFunc) (*Response, error) } @@ -50,13 +50,13 @@ var _ PersonalAccessTokensServiceInterface = (*PersonalAccessTokensService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/personal_access_tokens/ type PersonalAccessToken struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Revoked bool `json:"revoked"` CreatedAt *time.Time `json:"created_at"` Description string `json:"description"` Scopes []string `json:"scopes"` - UserID int `json:"user_id"` + UserID int64 `json:"user_id"` LastUsedAt *time.Time `json:"last_used_at,omitempty"` Active bool `json:"active"` ExpiresAt *ISOTime `json:"expires_at"` @@ -92,7 +92,7 @@ type ListPersonalAccessTokensOptions struct { Search *string `url:"search,omitempty" json:"search,omitempty"` Sort *string `url:"sort,omitempty" json:"sort,omitempty"` State *string `url:"state,omitempty" json:"state,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + UserID *int64 `url:"user_id,omitempty" json:"user_id,omitempty"` } // ListPersonalAccessTokens gets a list of all personal access tokens. @@ -100,38 +100,22 @@ type ListPersonalAccessTokensOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/personal_access_tokens/#list-all-personal-access-tokens func (s *PersonalAccessTokensService) ListPersonalAccessTokens(opt *ListPersonalAccessTokensOptions, options ...RequestOptionFunc) ([]*PersonalAccessToken, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "personal_access_tokens", opt, options) - if err != nil { - return nil, nil, err - } - - var pats []*PersonalAccessToken - resp, err := s.client.Do(req, &pats) - if err != nil { - return nil, resp, err - } - - return pats, resp, nil + return do[[]*PersonalAccessToken](s.client, + withPath("personal_access_tokens"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetSinglePersonalAccessTokenByID get a single personal access token by its ID. // // GitLab API docs: // https://docs.gitlab.com/api/personal_access_tokens/#get-details-on-a-personal-access-token -func (s *PersonalAccessTokensService) GetSinglePersonalAccessTokenByID(token int, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - u := fmt.Sprintf("personal_access_tokens/%d", token) - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pat := new(PersonalAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil +func (s *PersonalAccessTokensService) GetSinglePersonalAccessTokenByID(token int64, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + return do[*PersonalAccessToken](s.client, + withPath("personal_access_tokens/%d", token), + withRequestOpts(options...), + ) } // GetSinglePersonalAccessToken get a single personal access token by using @@ -140,19 +124,10 @@ func (s *PersonalAccessTokensService) GetSinglePersonalAccessTokenByID(token int // GitLab API docs: // https://docs.gitlab.com/api/personal_access_tokens/#self-inform func (s *PersonalAccessTokensService) GetSinglePersonalAccessToken(options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - u := "personal_access_tokens/self" - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pat := new(PersonalAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil + return do[*PersonalAccessToken](s.client, + withPath("personal_access_tokens/self"), + withRequestOpts(options...), + ) } // RotatePersonalAccessTokenOptions represents the available RotatePersonalAccessToken() @@ -165,7 +140,7 @@ type RotatePersonalAccessTokenOptions struct { } // RotatePersonalAccessToken is a backwards-compat shim for RotatePersonalAccessTokenByID. -func (s *PersonalAccessTokensService) RotatePersonalAccessToken(token int, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { +func (s *PersonalAccessTokensService) RotatePersonalAccessToken(token int64, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { return s.RotatePersonalAccessTokenByID(token, opt, options...) } @@ -174,21 +149,13 @@ func (s *PersonalAccessTokensService) RotatePersonalAccessToken(token int, opt * // // GitLab API docs: // https://docs.gitlab.com/api/personal_access_tokens/#rotate-a-personal-access-token -func (s *PersonalAccessTokensService) RotatePersonalAccessTokenByID(token int, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - u := fmt.Sprintf("personal_access_tokens/%d/rotate", token) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(PersonalAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil +func (s *PersonalAccessTokensService) RotatePersonalAccessTokenByID(token int64, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + return do[*PersonalAccessToken](s.client, + withMethod(http.MethodPost), + withPath("personal_access_tokens/%d/rotate", token), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RotatePersonalAccessTokenSelf revokes the currently authenticated token @@ -197,24 +164,17 @@ func (s *PersonalAccessTokensService) RotatePersonalAccessTokenByID(token int, o // GitLab API docs: // https://docs.gitlab.com/api/personal_access_tokens/#self-rotate func (s *PersonalAccessTokensService) RotatePersonalAccessTokenSelf(opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - u := "personal_access_tokens/self/rotate" - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(PersonalAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil + return do[*PersonalAccessToken](s.client, + withMethod(http.MethodPost), + withPath("personal_access_tokens/self/rotate"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RevokePersonalAccessToken is a backwards-compat shim for RevokePersonalAccessTokenByID. -func (s *PersonalAccessTokensService) RevokePersonalAccessToken(token int, options ...RequestOptionFunc) (*Response, error) { +// Deprecated: to be removed in 2.0; use RevokePersonalAccessTokenByID instead +func (s *PersonalAccessTokensService) RevokePersonalAccessToken(token int64, options ...RequestOptionFunc) (*Response, error) { return s.RevokePersonalAccessTokenByID(token, options...) } @@ -222,15 +182,13 @@ func (s *PersonalAccessTokensService) RevokePersonalAccessToken(token int, optio // // GitLab API docs: // https://docs.gitlab.com/api/personal_access_tokens/#revoke-a-personal-access-token -func (s *PersonalAccessTokensService) RevokePersonalAccessTokenByID(token int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("personal_access_tokens/%d", token) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *PersonalAccessTokensService) RevokePersonalAccessTokenByID(token int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("personal_access_tokens/%d", token), + withRequestOpts(options...), + ) + return resp, err } // RevokePersonalAccessTokenSelf revokes the currently authenticated @@ -239,12 +197,10 @@ func (s *PersonalAccessTokensService) RevokePersonalAccessTokenByID(token int, o // GitLab API docs: // https://docs.gitlab.com/api/personal_access_tokens/#self-revoke func (s *PersonalAccessTokensService) RevokePersonalAccessTokenSelf(options ...RequestOptionFunc) (*Response, error) { - u := "personal_access_tokens/self" - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("personal_access_tokens/self"), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/pipeline_schedules.go b/vendor/gitlab.com/gitlab-org/api/client-go/pipeline_schedules.go index 2e2f156ddf..e98e583714 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/pipeline_schedules.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/pipeline_schedules.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -25,16 +24,16 @@ import ( type ( PipelineSchedulesServiceInterface interface { ListPipelineSchedules(pid any, opt *ListPipelineSchedulesOptions, options ...RequestOptionFunc) ([]*PipelineSchedule, *Response, error) - GetPipelineSchedule(pid any, schedule int, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) - ListPipelinesTriggeredBySchedule(pid any, schedule int, opt *ListPipelinesTriggeredByScheduleOptions, options ...RequestOptionFunc) ([]*Pipeline, *Response, error) + GetPipelineSchedule(pid any, schedule int64, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) + ListPipelinesTriggeredBySchedule(pid any, schedule int64, opt *ListPipelinesTriggeredByScheduleOptions, options ...RequestOptionFunc) ([]*Pipeline, *Response, error) CreatePipelineSchedule(pid any, opt *CreatePipelineScheduleOptions, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) - EditPipelineSchedule(pid any, schedule int, opt *EditPipelineScheduleOptions, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) - TakeOwnershipOfPipelineSchedule(pid any, schedule int, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) - DeletePipelineSchedule(pid any, schedule int, options ...RequestOptionFunc) (*Response, error) - RunPipelineSchedule(pid any, schedule int, options ...RequestOptionFunc) (*Response, error) - CreatePipelineScheduleVariable(pid any, schedule int, opt *CreatePipelineScheduleVariableOptions, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) - EditPipelineScheduleVariable(pid any, schedule int, key string, opt *EditPipelineScheduleVariableOptions, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) - DeletePipelineScheduleVariable(pid any, schedule int, key string, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) + EditPipelineSchedule(pid any, schedule int64, opt *EditPipelineScheduleOptions, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) + TakeOwnershipOfPipelineSchedule(pid any, schedule int64, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) + DeletePipelineSchedule(pid any, schedule int64, options ...RequestOptionFunc) (*Response, error) + RunPipelineSchedule(pid any, schedule int64, options ...RequestOptionFunc) (*Response, error) + CreatePipelineScheduleVariable(pid any, schedule int64, opt *CreatePipelineScheduleVariableOptions, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) + EditPipelineScheduleVariable(pid any, schedule int64, key string, opt *EditPipelineScheduleVariableOptions, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) + DeletePipelineScheduleVariable(pid any, schedule int64, key string, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) } // PipelineSchedulesService handles communication with the pipeline @@ -53,7 +52,7 @@ var _ PipelineSchedulesServiceInterface = (*PipelineSchedulesService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/pipeline_schedules/ type PipelineSchedule struct { - ID int `json:"id"` + ID int64 `json:"id"` Description string `json:"description"` Ref string `json:"ref"` Cron string `json:"cron"` @@ -71,7 +70,7 @@ type PipelineSchedule struct { // LastPipeline represents the last pipeline ran by schedule // this will be returned only for individual schedule get operation type LastPipeline struct { - ID int `json:"id"` + ID int64 `json:"id"` SHA string `json:"sha"` Ref string `json:"ref"` Status string `json:"status"` @@ -92,49 +91,22 @@ type ListPipelineSchedulesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/pipeline_schedules/#get-all-pipeline-schedules func (s *PipelineSchedulesService) ListPipelineSchedules(pid any, opt *ListPipelineSchedulesOptions, options ...RequestOptionFunc) ([]*PipelineSchedule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*PipelineSchedule - resp, err := s.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil + return do[[]*PipelineSchedule](s.client, + withPath("projects/%s/pipeline_schedules", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetPipelineSchedule gets a pipeline schedule. // // GitLab API docs: // https://docs.gitlab.com/api/pipeline_schedules/#get-a-single-pipeline-schedule -func (s *PipelineSchedulesService) GetPipelineSchedule(pid any, schedule int, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d", PathEscape(project), schedule) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineSchedule) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil +func (s *PipelineSchedulesService) GetPipelineSchedule(pid any, schedule int64, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { + return do[*PipelineSchedule](s.client, + withPath("projects/%s/pipeline_schedules/%d", ProjectID{pid}, schedule), + withRequestOpts(options...), + ) } // ListPipelinesTriggeredByScheduleOptions represents the available @@ -142,32 +114,21 @@ func (s *PipelineSchedulesService) GetPipelineSchedule(pid any, schedule int, op // // GitLab API docs: // https://docs.gitlab.com/api/pipeline_schedules/#get-all-pipelines-triggered-by-a-pipeline-schedule -type ListPipelinesTriggeredByScheduleOptions ListOptions +type ListPipelinesTriggeredByScheduleOptions struct { + ListOptions +} // ListPipelinesTriggeredBySchedule gets all pipelines triggered by a pipeline // schedule. // // GitLab API docs: // https://docs.gitlab.com/api/pipeline_schedules/#get-all-pipelines-triggered-by-a-pipeline-schedule -func (s *PipelineSchedulesService) ListPipelinesTriggeredBySchedule(pid any, schedule int, opt *ListPipelinesTriggeredByScheduleOptions, options ...RequestOptionFunc) ([]*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/pipelines", PathEscape(project), schedule) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*Pipeline - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil +func (s *PipelineSchedulesService) ListPipelinesTriggeredBySchedule(pid any, schedule int64, opt *ListPipelinesTriggeredByScheduleOptions, options ...RequestOptionFunc) ([]*Pipeline, *Response, error) { + return do[[]*Pipeline](s.client, + withPath("projects/%s/pipeline_schedules/%d/pipelines", ProjectID{pid}, schedule), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreatePipelineScheduleOptions represents the available @@ -189,24 +150,12 @@ type CreatePipelineScheduleOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/pipeline_schedules/#create-a-new-pipeline-schedule func (s *PipelineSchedulesService) CreatePipelineSchedule(pid any, opt *CreatePipelineScheduleOptions, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineSchedule) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*PipelineSchedule](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/pipeline_schedules", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditPipelineScheduleOptions represents the available @@ -227,25 +176,13 @@ type EditPipelineScheduleOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/pipeline_schedules/#edit-a-pipeline-schedule -func (s *PipelineSchedulesService) EditPipelineSchedule(pid any, schedule int, opt *EditPipelineScheduleOptions, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d", PathEscape(project), schedule) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineSchedule) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil +func (s *PipelineSchedulesService) EditPipelineSchedule(pid any, schedule int64, opt *EditPipelineScheduleOptions, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { + return do[*PipelineSchedule](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/pipeline_schedules/%d", ProjectID{pid}, schedule), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // TakeOwnershipOfPipelineSchedule sets the owner of the specified @@ -253,63 +190,38 @@ func (s *PipelineSchedulesService) EditPipelineSchedule(pid any, schedule int, o // // GitLab API docs: // https://docs.gitlab.com/api/pipeline_schedules/#take-ownership-of-a-pipeline-schedule -func (s *PipelineSchedulesService) TakeOwnershipOfPipelineSchedule(pid any, schedule int, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/take_ownership", PathEscape(project), schedule) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineSchedule) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil +func (s *PipelineSchedulesService) TakeOwnershipOfPipelineSchedule(pid any, schedule int64, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { + return do[*PipelineSchedule](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/pipeline_schedules/%d/take_ownership", ProjectID{pid}, schedule), + withRequestOpts(options...), + ) } // DeletePipelineSchedule deletes a pipeline schedule. // // GitLab API docs: // https://docs.gitlab.com/api/pipeline_schedules/#delete-a-pipeline-schedule -func (s *PipelineSchedulesService) DeletePipelineSchedule(pid any, schedule int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d", PathEscape(project), schedule) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *PipelineSchedulesService) DeletePipelineSchedule(pid any, schedule int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/pipeline_schedules/%d", ProjectID{pid}, schedule), + withRequestOpts(options...), + ) + return resp, err } // RunPipelineSchedule triggers a new scheduled pipeline to run immediately. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/pipeline_schedules/#run-a-scheduled-pipeline-immediately -func (s *PipelineSchedulesService) RunPipelineSchedule(pid any, schedule int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/play", PathEscape(project), schedule) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *PipelineSchedulesService) RunPipelineSchedule(pid any, schedule int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/pipeline_schedules/%d/play", ProjectID{pid}, schedule), + withRequestOpts(options...), + ) + return resp, err } // CreatePipelineScheduleVariableOptions represents the available @@ -327,25 +239,13 @@ type CreatePipelineScheduleVariableOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/pipeline_schedules/#create-a-new-pipeline-schedule -func (s *PipelineSchedulesService) CreatePipelineScheduleVariable(pid any, schedule int, opt *CreatePipelineScheduleVariableOptions, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/variables", PathEscape(project), schedule) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineVariable) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil +func (s *PipelineSchedulesService) CreatePipelineScheduleVariable(pid any, schedule int64, opt *CreatePipelineScheduleVariableOptions, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) { + return do[*PipelineVariable](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/pipeline_schedules/%d/variables", ProjectID{pid}, schedule), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditPipelineScheduleVariableOptions represents the available @@ -362,48 +262,23 @@ type EditPipelineScheduleVariableOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/pipeline_schedules/#edit-a-pipeline-schedule-variable -func (s *PipelineSchedulesService) EditPipelineScheduleVariable(pid any, schedule int, key string, opt *EditPipelineScheduleVariableOptions, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/variables/%s", PathEscape(project), schedule, key) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineVariable) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil +func (s *PipelineSchedulesService) EditPipelineScheduleVariable(pid any, schedule int64, key string, opt *EditPipelineScheduleVariableOptions, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) { + return do[*PipelineVariable](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/pipeline_schedules/%d/variables/%s", ProjectID{pid}, schedule, NoEscape{key}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeletePipelineScheduleVariable creates a pipeline schedule variable. // // GitLab API docs: // https://docs.gitlab.com/api/pipeline_schedules/#delete-a-pipeline-schedule-variable -func (s *PipelineSchedulesService) DeletePipelineScheduleVariable(pid any, schedule int, key string, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/variables/%s", PathEscape(project), schedule, key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineVariable) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil +func (s *PipelineSchedulesService) DeletePipelineScheduleVariable(pid any, schedule int64, key string, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) { + return do[*PipelineVariable](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/pipeline_schedules/%d/variables/%s", ProjectID{pid}, schedule, NoEscape{key}), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/pipeline_triggers.go b/vendor/gitlab.com/gitlab-org/api/client-go/pipeline_triggers.go index deff0c89f3..c540cf3b25 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/pipeline_triggers.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/pipeline_triggers.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -25,10 +24,10 @@ import ( type ( PipelineTriggersServiceInterface interface { ListPipelineTriggers(pid any, opt *ListPipelineTriggersOptions, options ...RequestOptionFunc) ([]*PipelineTrigger, *Response, error) - GetPipelineTrigger(pid any, trigger int, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) + GetPipelineTrigger(pid any, trigger int64, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) AddPipelineTrigger(pid any, opt *AddPipelineTriggerOptions, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) - EditPipelineTrigger(pid any, trigger int, opt *EditPipelineTriggerOptions, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) - DeletePipelineTrigger(pid any, trigger int, options ...RequestOptionFunc) (*Response, error) + EditPipelineTrigger(pid any, trigger int64, opt *EditPipelineTriggerOptions, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) + DeletePipelineTrigger(pid any, trigger int64, options ...RequestOptionFunc) (*Response, error) RunPipelineTrigger(pid any, opt *RunPipelineTriggerOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) } @@ -48,7 +47,7 @@ var _ PipelineTriggersServiceInterface = (*PipelineTriggersService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/pipeline_triggers/ type PipelineTrigger struct { - ID int `json:"id"` + ID int64 `json:"id"` Description string `json:"description"` CreatedAt *time.Time `json:"created_at"` DeletedAt *time.Time `json:"deleted_at"` @@ -62,56 +61,31 @@ type PipelineTrigger struct { // // GitLab API docs: // https://docs.gitlab.com/api/pipeline_triggers/#list-project-trigger-tokens -type ListPipelineTriggersOptions ListOptions +type ListPipelineTriggersOptions struct { + ListOptions +} // ListPipelineTriggers gets a list of project triggers. // // GitLab API docs: // https://docs.gitlab.com/api/pipeline_triggers/#list-project-trigger-tokens func (s *PipelineTriggersService) ListPipelineTriggers(pid any, opt *ListPipelineTriggersOptions, options ...RequestOptionFunc) ([]*PipelineTrigger, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/triggers", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pt []*PipelineTrigger - resp, err := s.client.Do(req, &pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil + return do[[]*PipelineTrigger](s.client, + withPath("projects/%s/triggers", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetPipelineTrigger gets a specific pipeline trigger for a project. // // GitLab API docs: // https://docs.gitlab.com/api/pipeline_triggers/#get-trigger-token-details -func (s *PipelineTriggersService) GetPipelineTrigger(pid any, trigger int, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/triggers/%d", PathEscape(project), trigger) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pt := new(PipelineTrigger) - resp, err := s.client.Do(req, pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil +func (s *PipelineTriggersService) GetPipelineTrigger(pid any, trigger int64, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) { + return do[*PipelineTrigger](s.client, + withPath("projects/%s/triggers/%d", ProjectID{pid}, trigger), + withRequestOpts(options...), + ) } // AddPipelineTriggerOptions represents the available AddPipelineTrigger() options. @@ -127,24 +101,12 @@ type AddPipelineTriggerOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/pipeline_triggers/#create-a-trigger-token func (s *PipelineTriggersService) AddPipelineTrigger(pid any, opt *AddPipelineTriggerOptions, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/triggers", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pt := new(PipelineTrigger) - resp, err := s.client.Do(req, pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil + return do[*PipelineTrigger](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/triggers", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditPipelineTriggerOptions represents the available EditPipelineTrigger() options. @@ -159,44 +121,26 @@ type EditPipelineTriggerOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/pipeline_triggers/#update-a-pipeline-trigger-token -func (s *PipelineTriggersService) EditPipelineTrigger(pid any, trigger int, opt *EditPipelineTriggerOptions, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/triggers/%d", PathEscape(project), trigger) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pt := new(PipelineTrigger) - resp, err := s.client.Do(req, pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil +func (s *PipelineTriggersService) EditPipelineTrigger(pid any, trigger int64, opt *EditPipelineTriggerOptions, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) { + return do[*PipelineTrigger](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/triggers/%d", ProjectID{pid}, trigger), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeletePipelineTrigger removes a trigger from a project. // // GitLab API docs: // https://docs.gitlab.com/api/pipeline_triggers/#remove-a-pipeline-trigger-token -func (s *PipelineTriggersService) DeletePipelineTrigger(pid any, trigger int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/triggers/%d", PathEscape(project), trigger) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *PipelineTriggersService) DeletePipelineTrigger(pid any, trigger int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/triggers/%d", ProjectID{pid}, trigger), + withRequestOpts(options...), + ) + return resp, err } // RunPipelineTriggerOptions represents the available RunPipelineTrigger() options. @@ -207,6 +151,10 @@ type RunPipelineTriggerOptions struct { Ref *string `url:"ref" json:"ref"` Token *string `url:"token" json:"token"` Variables map[string]string `url:"variables,omitempty" json:"variables,omitempty"` + + // Inputs contains pipeline input parameters. + // See PipelineInputsOption for supported types and usage. + Inputs PipelineInputsOption `url:"inputs,omitempty" json:"inputs,omitempty"` } // RunPipelineTrigger starts a trigger from a project. @@ -214,22 +162,10 @@ type RunPipelineTriggerOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/pipeline_triggers/#trigger-a-pipeline-with-a-token func (s *PipelineTriggersService) RunPipelineTrigger(pid any, opt *RunPipelineTriggerOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/trigger/pipeline", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pt := new(Pipeline) - resp, err := s.client.Do(req, pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil + return do[*Pipeline](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/trigger/pipeline", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/pipelines.go b/vendor/gitlab.com/gitlab-org/api/client-go/pipelines.go index 8385836887..437dcc9e30 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/pipelines.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/pipelines.go @@ -17,7 +17,7 @@ package gitlab import ( - "fmt" + "encoding/json" "net/http" "time" ) @@ -47,15 +47,16 @@ const ( type ( PipelinesServiceInterface interface { ListProjectPipelines(pid any, opt *ListProjectPipelinesOptions, options ...RequestOptionFunc) ([]*PipelineInfo, *Response, error) - GetPipeline(pid any, pipeline int, options ...RequestOptionFunc) (*Pipeline, *Response, error) - GetPipelineVariables(pid any, pipeline int, options ...RequestOptionFunc) ([]*PipelineVariable, *Response, error) - GetPipelineTestReport(pid any, pipeline int, options ...RequestOptionFunc) (*PipelineTestReport, *Response, error) + GetPipeline(pid any, pipeline int64, options ...RequestOptionFunc) (*Pipeline, *Response, error) + GetPipelineVariables(pid any, pipeline int64, options ...RequestOptionFunc) ([]*PipelineVariable, *Response, error) + GetPipelineTestReport(pid any, pipeline int64, options ...RequestOptionFunc) (*PipelineTestReport, *Response, error) + GetPipelineTestReportSummary(pid any, pipeline int64, options ...RequestOptionFunc) (*PipelineTestReportSummary, *Response, error) GetLatestPipeline(pid any, opt *GetLatestPipelineOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) CreatePipeline(pid any, opt *CreatePipelineOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) - RetryPipelineBuild(pid any, pipeline int, options ...RequestOptionFunc) (*Pipeline, *Response, error) - CancelPipelineBuild(pid any, pipeline int, options ...RequestOptionFunc) (*Pipeline, *Response, error) - DeletePipeline(pid any, pipeline int, options ...RequestOptionFunc) (*Response, error) - UpdatePipelineMetadata(pid any, pipeline int, opt *UpdatePipelineMetadataOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) + RetryPipelineBuild(pid any, pipeline int64, options ...RequestOptionFunc) (*Pipeline, *Response, error) + CancelPipelineBuild(pid any, pipeline int64, options ...RequestOptionFunc) (*Pipeline, *Response, error) + DeletePipeline(pid any, pipeline int64, options ...RequestOptionFunc) (*Response, error) + UpdatePipelineMetadata(pid any, pipeline int64, opt *UpdatePipelineMetadataOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) } // PipelinesService handles communication with the repositories related @@ -82,17 +83,18 @@ type PipelineVariable struct { // // GitLab API docs: https://docs.gitlab.com/api/pipelines/ type PipelineInput struct { - Name string `json:"key"` - Value any `json:"value"` + Name string `json:"name"` + Value any `json:"value"` + Destroy *bool `json:"destroy,omitempty"` } // Pipeline represents a GitLab pipeline. // // GitLab API docs: https://docs.gitlab.com/api/pipelines/ type Pipeline struct { - ID int `json:"id"` - IID int `json:"iid"` - ProjectID int `json:"project_id"` + ID int64 `json:"id"` + IID int64 `json:"iid"` + ProjectID int64 `json:"project_id"` Status string `json:"status"` Source PipelineSource `json:"source"` Ref string `json:"ref"` @@ -107,40 +109,51 @@ type Pipeline struct { StartedAt *time.Time `json:"started_at"` FinishedAt *time.Time `json:"finished_at"` CommittedAt *time.Time `json:"committed_at"` - Duration int `json:"duration"` - QueuedDuration int `json:"queued_duration"` + Duration int64 `json:"duration"` + QueuedDuration int64 `json:"queued_duration"` Coverage string `json:"coverage"` WebURL string `json:"web_url"` DetailedStatus *DetailedStatus `json:"detailed_status"` } +func (p Pipeline) String() string { + return Stringify(p) +} + // DetailedStatus contains detailed information about the status of a pipeline. type DetailedStatus struct { - Icon string `json:"icon"` - Text string `json:"text"` - Label string `json:"label"` - Group string `json:"group"` - Tooltip string `json:"tooltip"` - HasDetails bool `json:"has_details"` - DetailsPath string `json:"details_path"` - Illustration struct { - Image string `json:"image"` - } `json:"illustration"` - Favicon string `json:"favicon"` + Icon string `json:"icon"` + Text string `json:"text"` + Label string `json:"label"` + Group string `json:"group"` + Tooltip string `json:"tooltip"` + HasDetails bool `json:"has_details"` + DetailsPath string `json:"details_path"` + Illustration DetailedStatusIllustration `json:"illustration"` + Favicon string `json:"favicon"` } -func (p Pipeline) String() string { - return Stringify(p) +func (s DetailedStatus) String() string { + return Stringify(s) +} + +// DetailedStatusIllustration contains detailed information about the status illustration of a pipeline. +type DetailedStatusIllustration struct { + Image string `json:"image"` +} + +func (i DetailedStatusIllustration) String() string { + return Stringify(i) } // PipelineTestReport contains a detailed report of a test run. type PipelineTestReport struct { TotalTime float64 `json:"total_time"` - TotalCount int `json:"total_count"` - SuccessCount int `json:"success_count"` - FailedCount int `json:"failed_count"` - SkippedCount int `json:"skipped_count"` - ErrorCount int `json:"error_count"` + TotalCount int64 `json:"total_count"` + SuccessCount int64 `json:"success_count"` + FailedCount int64 `json:"failed_count"` + SkippedCount int64 `json:"skipped_count"` + ErrorCount int64 `json:"error_count"` TestSuites []*PipelineTestSuites `json:"test_suites"` } @@ -148,11 +161,11 @@ type PipelineTestReport struct { type PipelineTestSuites struct { Name string `json:"name"` TotalTime float64 `json:"total_time"` - TotalCount int `json:"total_count"` - SuccessCount int `json:"success_count"` - FailedCount int `json:"failed_count"` - SkippedCount int `json:"skipped_count"` - ErrorCount int `json:"error_count"` + TotalCount int64 `json:"total_count"` + SuccessCount int64 `json:"success_count"` + FailedCount int64 `json:"failed_count"` + SkippedCount int64 `json:"skipped_count"` + ErrorCount int64 `json:"error_count"` TestCases []*PipelineTestCases `json:"test_cases"` } @@ -169,9 +182,40 @@ type PipelineTestCases struct { RecentFailures *RecentFailures `json:"recent_failures"` } +// PipelineTestReportSummary contains a summary report of a test run +type PipelineTestReportSummary struct { + Total PipelineTotalSummary `json:"total"` + TestSuites []PipelineTestSuiteSummary `json:"test_suites"` +} + +// PipelineTotalSummary contains a total summary of a test run +type PipelineTotalSummary struct { + // Documentation examples only show whole numbers, but the test specs for GitLab show decimals, so `float64` is the better attribute here. + Time float64 `json:"time"` + Count int64 `json:"count"` + Success int64 `json:"success"` + Failed int64 `json:"failed"` + Skipped int64 `json:"skipped"` + Error int64 `json:"error"` + SuiteError *string `json:"suite_error"` +} + +// PipelineTestSuiteSummary contains a test suite summary of a test run +type PipelineTestSuiteSummary struct { + Name string `json:"name"` + TotalTime float64 `json:"total_time"` + TotalCount int64 `json:"total_count"` + SuccessCount int64 `json:"success_count"` + FailedCount int64 `json:"failed_count"` + SkippedCount int64 `json:"skipped_count"` + ErrorCount int64 `json:"error_count"` + BuildIDs []int64 `json:"build_ids"` + SuiteError *string `json:"suite_error"` +} + // RecentFailures contains failures count for the project's default branch. type RecentFailures struct { - Count int `json:"count"` + Count int64 `json:"count"` BaseBranch string `json:"base_branch"` } @@ -182,9 +226,9 @@ func (p PipelineTestReport) String() string { // PipelineInfo shows the basic entities of a pipeline, mostly used as fields // on other assets, like Commit. type PipelineInfo struct { - ID int `json:"id"` - IID int `json:"iid"` - ProjectID int `json:"project_id"` + ID int64 `json:"id"` + IID int64 `json:"iid"` + ProjectID int64 `json:"project_id"` Status string `json:"status"` Source string `json:"source"` Ref string `json:"ref"` @@ -227,99 +271,55 @@ type ListProjectPipelinesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/pipelines/#list-project-pipelines func (s *PipelinesService) ListProjectPipelines(pid any, opt *ListProjectPipelinesOptions, options ...RequestOptionFunc) ([]*PipelineInfo, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*PipelineInfo - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[[]*PipelineInfo](s.client, + withPath("projects/%s/pipelines", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetPipeline gets a single project pipeline. // // GitLab API docs: // https://docs.gitlab.com/api/pipelines/#get-a-single-pipeline -func (s *PipelinesService) GetPipeline(pid any, pipeline int, options ...RequestOptionFunc) (*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d", PathEscape(project), pipeline) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Pipeline) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil +func (s *PipelinesService) GetPipeline(pid any, pipeline int64, options ...RequestOptionFunc) (*Pipeline, *Response, error) { + return do[*Pipeline](s.client, + withPath("projects/%s/pipelines/%d", ProjectID{pid}, pipeline), + withRequestOpts(options...), + ) } // GetPipelineVariables gets the variables of a single project pipeline. // // GitLab API docs: // https://docs.gitlab.com/api/pipelines/#get-variables-of-a-pipeline -func (s *PipelinesService) GetPipelineVariables(pid any, pipeline int, options ...RequestOptionFunc) ([]*PipelineVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d/variables", PathEscape(project), pipeline) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var p []*PipelineVariable - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil +func (s *PipelinesService) GetPipelineVariables(pid any, pipeline int64, options ...RequestOptionFunc) ([]*PipelineVariable, *Response, error) { + return do[[]*PipelineVariable](s.client, + withPath("projects/%s/pipelines/%d/variables", ProjectID{pid}, pipeline), + withRequestOpts(options...), + ) } // GetPipelineTestReport gets the test report of a single project pipeline. // // GitLab API docs: // https://docs.gitlab.com/api/pipelines/#get-a-pipelines-test-report -func (s *PipelinesService) GetPipelineTestReport(pid any, pipeline int, options ...RequestOptionFunc) (*PipelineTestReport, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d/test_report", PathEscape(project), pipeline) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineTestReport) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } +func (s *PipelinesService) GetPipelineTestReport(pid any, pipeline int64, options ...RequestOptionFunc) (*PipelineTestReport, *Response, error) { + return do[*PipelineTestReport](s.client, + withPath("projects/%s/pipelines/%d/test_report", ProjectID{pid}, pipeline), + withRequestOpts(options...), + ) +} - return p, resp, nil +// GetPipelineTestReportSummary gets the test report summary of a single project pipeline. +// +// GitLab API docs: +// https://docs.gitlab.com/api/pipelines/#get-a-test-report-summary-for-a-pipeline +func (s *PipelinesService) GetPipelineTestReportSummary(pid any, pipeline int64, options ...RequestOptionFunc) (*PipelineTestReportSummary, *Response, error) { + return do[*PipelineTestReportSummary](s.client, + withPath("projects/%s/pipelines/%d/test_report_summary", ProjectID{pid}, pipeline), + withRequestOpts(options...), + ) } // GetLatestPipelineOptions represents the available GetLatestPipeline() options. @@ -335,24 +335,11 @@ type GetLatestPipelineOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/pipelines/#get-the-latest-pipeline func (s *PipelinesService) GetLatestPipeline(pid any, opt *GetLatestPipelineOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/latest", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(Pipeline) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*Pipeline](s.client, + withPath("projects/%s/pipelines/latest", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreatePipelineOptions represents the available CreatePipeline() options. @@ -362,6 +349,10 @@ func (s *PipelinesService) GetLatestPipeline(pid any, opt *GetLatestPipelineOpti type CreatePipelineOptions struct { Ref *string `url:"ref" json:"ref"` Variables *[]*PipelineVariableOptions `url:"variables,omitempty" json:"variables,omitempty"` + + // Inputs contains pipeline input parameters. + // See PipelineInputsOption for supported types and usage. + Inputs PipelineInputsOption `url:"inputs,omitempty" json:"inputs,omitempty"` } // PipelineVariableOptions represents a pipeline variable option. @@ -373,98 +364,131 @@ type PipelineVariableOptions struct { VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } -// CreatePipeline creates a new project pipeline. +// PipelineInputsOption represents pipeline input parameters with type-safe values. +// Each value must be wrapped using NewPipelineInputValue() to ensure compile-time type safety. +// +// Supported value types: +// - string +// - integers (int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64) +// - floats (float32, float64) +// - bool +// - []string +// +// Example: +// +// inputs := PipelineInputsOption{ +// "environment": NewPipelineInputValue("production"), +// "replicas": NewPipelineInputValue(3), +// "debug": NewPipelineInputValue(false), +// "regions": NewPipelineInputValue([]string{"us-east", "eu-west"}), +// } // // GitLab API docs: -// https://docs.gitlab.com/api/pipelines/#create-a-new-pipeline -func (s *PipelinesService) CreatePipeline(pid any, opt *CreatePipelineOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline", PathEscape(project)) +// - https://docs.gitlab.com/api/pipelines/#create-a-new-pipeline +// - https://docs.gitlab.com/api/pipeline_triggers/#trigger-a-pipeline-with-a-token +type PipelineInputsOption map[string]PipelineInputValueInterface - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } +// PipelineInputValueInterface is implemented by PipelineInputValue[T] for supported pipeline input types. +// Use NewPipelineInputValue() to create instances - do not implement this interface directly. +// +// See PipelineInputsOption for supported types and usage examples. +type PipelineInputValueInterface interface { + pipelineInputValue() +} + +type constraintSigned interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +type constraintUnsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +type constraintInteger interface { + constraintSigned | constraintUnsigned +} + +type constraintFloat interface { + ~float32 | ~float64 +} + +// PipelineInputValueType is a type constraint for valid pipeline input value types. +// This constraint ensures only supported GitLab pipeline input types can be used. +type PipelineInputValueType interface { + ~string | constraintInteger | constraintFloat | ~bool | []string +} + +// PipelineInputValue wraps a pipeline input value with compile-time type safety. +// Use NewPipelineInputValue() to create instances of this type. +type PipelineInputValue[T PipelineInputValueType] struct { + Value T +} - p := new(Pipeline) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err +// MarshalJSON implements the json.Marshaler interface. +func (v PipelineInputValue[T]) MarshalJSON() ([]byte, error) { + return json.Marshal(v.Value) +} + +// pipelineInputValue implements PipelineInputValueInterface. +func (PipelineInputValue[T]) pipelineInputValue() {} + +// NewPipelineInputValue wraps a value for use in pipeline inputs. +// Similar to Ptr(), this ensures type safety at compile time. +// Supported types: string, integers, floats, bool, []string +func NewPipelineInputValue[T PipelineInputValueType](value T) PipelineInputValue[T] { + return PipelineInputValue[T]{ + Value: value, } +} - return p, resp, nil +// CreatePipeline creates a new project pipeline. +// +// GitLab API docs: +// https://docs.gitlab.com/api/pipelines/#create-a-new-pipeline +func (s *PipelinesService) CreatePipeline(pid any, opt *CreatePipelineOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { + return do[*Pipeline](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/pipeline", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RetryPipelineBuild retries failed builds in a pipeline. // // GitLab API docs: // https://docs.gitlab.com/api/pipelines/#retry-jobs-in-a-pipeline -func (s *PipelinesService) RetryPipelineBuild(pid any, pipeline int, options ...RequestOptionFunc) (*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d/retry", PathEscape(project), pipeline) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Pipeline) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil +func (s *PipelinesService) RetryPipelineBuild(pid any, pipeline int64, options ...RequestOptionFunc) (*Pipeline, *Response, error) { + return do[*Pipeline](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/pipelines/%d/retry", ProjectID{pid}, pipeline), + withRequestOpts(options...), + ) } // CancelPipelineBuild cancels a pipeline builds. // // GitLab API docs: // https://docs.gitlab.com/api/pipelines/#cancel-a-pipelines-jobs -func (s *PipelinesService) CancelPipelineBuild(pid any, pipeline int, options ...RequestOptionFunc) (*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d/cancel", PathEscape(project), pipeline) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Pipeline) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil +func (s *PipelinesService) CancelPipelineBuild(pid any, pipeline int64, options ...RequestOptionFunc) (*Pipeline, *Response, error) { + return do[*Pipeline](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/pipelines/%d/cancel", ProjectID{pid}, pipeline), + withRequestOpts(options...), + ) } // DeletePipeline deletes an existing pipeline. // // GitLab API docs: // https://docs.gitlab.com/api/pipelines/#delete-a-pipeline -func (s *PipelinesService) DeletePipeline(pid any, pipeline int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d", PathEscape(project), pipeline) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *PipelinesService) DeletePipeline(pid any, pipeline int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/pipelines/%d", ProjectID{pid}, pipeline), + withRequestOpts(options...), + ) + return resp, err } // UpdatePipelineMetadataOptions represents the available UpdatePipelineMetadata() @@ -481,23 +505,11 @@ type UpdatePipelineMetadataOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/pipelines/#update-pipeline-metadata -func (s *PipelinesService) UpdatePipelineMetadata(pid any, pipeline int, opt *UpdatePipelineMetadataOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d/metadata", PathEscape(project), pipeline) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(Pipeline) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil +func (s *PipelinesService) UpdatePipelineMetadata(pid any, pipeline int64, opt *UpdatePipelineMetadataOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { + return do[*Pipeline](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/pipelines/%d/metadata", ProjectID{pid}, pipeline), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/plan_limits.go b/vendor/gitlab.com/gitlab-org/api/client-go/plan_limits.go index 53e9c9fd54..d7247fa58f 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/plan_limits.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/plan_limits.go @@ -39,14 +39,14 @@ var _ PlanLimitsServiceInterface = (*PlanLimitsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/plan_limits/ type PlanLimit struct { - ConanMaxFileSize int `json:"conan_max_file_size,omitempty"` - GenericPackagesMaxFileSize int `json:"generic_packages_max_file_size,omitempty"` - HelmMaxFileSize int `json:"helm_max_file_size,omitempty"` - MavenMaxFileSize int `json:"maven_max_file_size,omitempty"` - NPMMaxFileSize int `json:"npm_max_file_size,omitempty"` - NugetMaxFileSize int `json:"nuget_max_file_size,omitempty"` - PyPiMaxFileSize int `json:"pypi_max_file_size,omitempty"` - TerraformModuleMaxFileSize int `json:"terraform_module_max_file_size,omitempty"` + ConanMaxFileSize int64 `json:"conan_max_file_size,omitempty"` + GenericPackagesMaxFileSize int64 `json:"generic_packages_max_file_size,omitempty"` + HelmMaxFileSize int64 `json:"helm_max_file_size,omitempty"` + MavenMaxFileSize int64 `json:"maven_max_file_size,omitempty"` + NPMMaxFileSize int64 `json:"npm_max_file_size,omitempty"` + NugetMaxFileSize int64 `json:"nuget_max_file_size,omitempty"` + PyPiMaxFileSize int64 `json:"pypi_max_file_size,omitempty"` + TerraformModuleMaxFileSize int64 `json:"terraform_module_max_file_size,omitempty"` } // GetCurrentPlanLimitsOptions represents the available GetCurrentPlanLimits() @@ -63,18 +63,11 @@ type GetCurrentPlanLimitsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/plan_limits/#get-current-plan-limits func (s *PlanLimitsService) GetCurrentPlanLimits(opt *GetCurrentPlanLimitsOptions, options ...RequestOptionFunc) (*PlanLimit, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "application/plan_limits", opt, options) - if err != nil { - return nil, nil, err - } - - pl := new(PlanLimit) - resp, err := s.client.Do(req, pl) - if err != nil { - return nil, resp, err - } - - return pl, resp, nil + return do[*PlanLimit](s.client, + withPath("application/plan_limits"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ChangePlanLimitOptions represents the available ChangePlanLimits() options. @@ -83,14 +76,14 @@ func (s *PlanLimitsService) GetCurrentPlanLimits(opt *GetCurrentPlanLimitsOption // https://docs.gitlab.com/api/plan_limits/#change-plan-limits type ChangePlanLimitOptions struct { PlanName *string `url:"plan_name,omitempty" json:"plan_name,omitempty"` - ConanMaxFileSize *int `url:"conan_max_file_size,omitempty" json:"conan_max_file_size,omitempty"` - GenericPackagesMaxFileSize *int `url:"generic_packages_max_file_size,omitempty" json:"generic_packages_max_file_size,omitempty"` - HelmMaxFileSize *int `url:"helm_max_file_size,omitempty" json:"helm_max_file_size,omitempty"` - MavenMaxFileSize *int `url:"maven_max_file_size,omitempty" json:"maven_max_file_size,omitempty"` - NPMMaxFileSize *int `url:"npm_max_file_size,omitempty" json:"npm_max_file_size,omitempty"` - NugetMaxFileSize *int `url:"nuget_max_file_size,omitempty" json:"nuget_max_file_size,omitempty"` - PyPiMaxFileSize *int `url:"pypi_max_file_size,omitempty" json:"pypi_max_file_size,omitempty"` - TerraformModuleMaxFileSize *int `url:"terraform_module_max_file_size,omitempty" json:"terraform_module_max_file_size,omitempty"` + ConanMaxFileSize *int64 `url:"conan_max_file_size,omitempty" json:"conan_max_file_size,omitempty"` + GenericPackagesMaxFileSize *int64 `url:"generic_packages_max_file_size,omitempty" json:"generic_packages_max_file_size,omitempty"` + HelmMaxFileSize *int64 `url:"helm_max_file_size,omitempty" json:"helm_max_file_size,omitempty"` + MavenMaxFileSize *int64 `url:"maven_max_file_size,omitempty" json:"maven_max_file_size,omitempty"` + NPMMaxFileSize *int64 `url:"npm_max_file_size,omitempty" json:"npm_max_file_size,omitempty"` + NugetMaxFileSize *int64 `url:"nuget_max_file_size,omitempty" json:"nuget_max_file_size,omitempty"` + PyPiMaxFileSize *int64 `url:"pypi_max_file_size,omitempty" json:"pypi_max_file_size,omitempty"` + TerraformModuleMaxFileSize *int64 `url:"terraform_module_max_file_size,omitempty" json:"terraform_module_max_file_size,omitempty"` } // ChangePlanLimits modifies the limits of a plan on the GitLab instance. @@ -98,16 +91,10 @@ type ChangePlanLimitOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/plan_limits/#change-plan-limits func (s *PlanLimitsService) ChangePlanLimits(opt *ChangePlanLimitOptions, options ...RequestOptionFunc) (*PlanLimit, *Response, error) { - req, err := s.client.NewRequest(http.MethodPut, "application/plan_limits", opt, options) - if err != nil { - return nil, nil, err - } - - pl := new(PlanLimit) - resp, err := s.client.Do(req, pl) - if err != nil { - return nil, resp, err - } - - return pl, resp, nil + return do[*PlanLimit](s.client, + withMethod(http.MethodPut), + withPath("application/plan_limits"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_access_tokens.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_access_tokens.go index e98e658550..4fae8ebf50 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_access_tokens.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_access_tokens.go @@ -17,18 +17,17 @@ package gitlab import ( - "fmt" "net/http" ) type ( ProjectAccessTokensServiceInterface interface { ListProjectAccessTokens(pid any, opt *ListProjectAccessTokensOptions, options ...RequestOptionFunc) ([]*ProjectAccessToken, *Response, error) - GetProjectAccessToken(pid any, id int, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) + GetProjectAccessToken(pid any, id int64, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) CreateProjectAccessToken(pid any, opt *CreateProjectAccessTokenOptions, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) - RotateProjectAccessToken(pid any, id int, opt *RotateProjectAccessTokenOptions, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) + RotateProjectAccessToken(pid any, id int64, opt *RotateProjectAccessTokenOptions, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) RotateProjectAccessTokenSelf(pid any, opt *RotateProjectAccessTokenOptions, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) - RevokeProjectAccessToken(pid any, id int, options ...RequestOptionFunc) (*Response, error) + RevokeProjectAccessToken(pid any, id int64, options ...RequestOptionFunc) (*Response, error) } // ProjectAccessTokensService handles communication with the @@ -69,49 +68,22 @@ type ListProjectAccessTokensOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_access_tokens/#list-all-project-access-tokens func (s *ProjectAccessTokensService) ListProjectAccessTokens(pid any, opt *ListProjectAccessTokensOptions, options ...RequestOptionFunc) ([]*ProjectAccessToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_tokens", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pats []*ProjectAccessToken - resp, err := s.client.Do(req, &pats) - if err != nil { - return nil, resp, err - } - - return pats, resp, nil + return do[[]*ProjectAccessToken](s.client, + withPath("projects/%s/access_tokens", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetProjectAccessToken gets a single project access tokens in a project. // // GitLab API docs: // https://docs.gitlab.com/api/project_access_tokens/#get-details-on-a-project-access-token -func (s *ProjectAccessTokensService) GetProjectAccessToken(pid any, id int, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_tokens/%d", PathEscape(project), id) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pat := new(ProjectAccessToken) - resp, err := s.client.Do(req, &pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil +func (s *ProjectAccessTokensService) GetProjectAccessToken(pid any, id int64, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) { + return do[*ProjectAccessToken](s.client, + withPath("projects/%s/access_tokens/%d", ProjectID{pid}, id), + withRequestOpts(options...), + ) } // CreateProjectAccessTokenOptions represents the available CreateVariable() @@ -132,24 +104,12 @@ type CreateProjectAccessTokenOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_access_tokens/#create-a-project-access-token func (s *ProjectAccessTokensService) CreateProjectAccessToken(pid any, opt *CreateProjectAccessTokenOptions, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_tokens", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(ProjectAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil + return do[*ProjectAccessToken](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/access_tokens", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RotateProjectAccessTokenOptions represents the available RotateProjectAccessToken() @@ -166,24 +126,13 @@ type RotateProjectAccessTokenOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/project_access_tokens/#rotate-a-project-access-token -func (s *ProjectAccessTokensService) RotateProjectAccessToken(pid any, id int, opt *RotateProjectAccessTokenOptions, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) { - projects, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_tokens/%d/rotate", PathEscape(projects), id) - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(ProjectAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil +func (s *ProjectAccessTokensService) RotateProjectAccessToken(pid any, id int64, opt *RotateProjectAccessTokenOptions, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) { + return do[*ProjectAccessToken](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/access_tokens/%d/rotate", ProjectID{pid}, id), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RotateProjectAccessTokenSelf revokes the project access token used for the request @@ -192,40 +141,23 @@ func (s *ProjectAccessTokensService) RotateProjectAccessToken(pid any, id int, o // GitLab API docs: // https://docs.gitlab.com/api/project_access_tokens/#self-rotate func (s *ProjectAccessTokensService) RotateProjectAccessTokenSelf(pid any, opt *RotateProjectAccessTokenOptions, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) { - projects, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_tokens/self/rotate", PathEscape(projects)) - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(ProjectAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil + return do[*ProjectAccessToken](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/access_tokens/self/rotate", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RevokeProjectAccessToken revokes a project access token. // // GitLab API docs: // https://docs.gitlab.com/api/project_access_tokens/#revoke-a-project-access-token -func (s *ProjectAccessTokensService) RevokeProjectAccessToken(pid any, id int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/access_tokens/%d", PathEscape(project), id) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ProjectAccessTokensService) RevokeProjectAccessToken(pid any, id int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/access_tokens/%d", ProjectID{pid}, id), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_aliases.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_aliases.go new file mode 100644 index 0000000000..9059abbc36 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_aliases.go @@ -0,0 +1,90 @@ +package gitlab + +import ( + "net/http" +) + +type ( + ProjectAliasesServiceInterface interface { + ListProjectAliases(options ...RequestOptionFunc) ([]*ProjectAlias, *Response, error) + GetProjectAlias(name string, options ...RequestOptionFunc) (*ProjectAlias, *Response, error) + CreateProjectAlias(opt *CreateProjectAliasOptions, options ...RequestOptionFunc) (*ProjectAlias, *Response, error) + DeleteProjectAlias(name string, options ...RequestOptionFunc) (*Response, error) + } + + // ProjectAliasesService handles communication with the project aliases related methods of the GitLab API. + // + // GitLab API docs: https://docs.gitlab.com/api/project_aliases/ + ProjectAliasesService struct { + client *Client + } +) + +var _ ProjectAliasesServiceInterface = (*ProjectAliasesService)(nil) + +// ProjectAlias represents a GitLab project alias. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_aliases/ +type ProjectAlias struct { + ID int64 `json:"id"` + ProjectID int64 `json:"project_id"` + Name string `json:"name"` +} + +// CreateProjectAliasOptions represents the options for creating a project alias. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_aliases/#create-a-project-alias +type CreateProjectAliasOptions struct { + Name *string `json:"name" url:"name,omitempty"` + ProjectID int64 `json:"project_id" url:"project_id,omitempty"` +} + +// ListProjectAliases gets a list of all project aliases. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_aliases/#list-all-project-aliases +func (s *ProjectAliasesService) ListProjectAliases(options ...RequestOptionFunc) ([]*ProjectAlias, *Response, error) { + return do[[]*ProjectAlias](s.client, + withPath("project_aliases"), + withRequestOpts(options...), + ) +} + +// GetProjectAlias gets details of a project alias. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_aliases/#get-project-alias-details +func (s *ProjectAliasesService) GetProjectAlias(name string, options ...RequestOptionFunc) (*ProjectAlias, *Response, error) { + return do[*ProjectAlias](s.client, + withPath("project_aliases/%s", name), + withRequestOpts(options...), + ) +} + +// CreateProjectAlias creates a new project alias. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_aliases/#create-a-project-alias +func (s *ProjectAliasesService) CreateProjectAlias(opt *CreateProjectAliasOptions, options ...RequestOptionFunc) (*ProjectAlias, *Response, error) { + return do[*ProjectAlias](s.client, + withMethod(http.MethodPost), + withPath("project_aliases"), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +// DeleteProjectAlias deletes a project alias. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_aliases/#delete-a-project-alias +func (s *ProjectAliasesService) DeleteProjectAlias(name string, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("project_aliases/%s", name), + withRequestOpts(options...), + ) + return resp, err +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_badges.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_badges.go index c65daa3893..e0b14c948b 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_badges.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_badges.go @@ -17,17 +17,16 @@ package gitlab import ( - "fmt" "net/http" ) type ( ProjectBadgesServiceInterface interface { ListProjectBadges(pid any, opt *ListProjectBadgesOptions, options ...RequestOptionFunc) ([]*ProjectBadge, *Response, error) - GetProjectBadge(pid any, badge int, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) + GetProjectBadge(pid any, badge int64, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) AddProjectBadge(pid any, opt *AddProjectBadgeOptions, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) - EditProjectBadge(pid any, badge int, opt *EditProjectBadgeOptions, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) - DeleteProjectBadge(pid any, badge int, options ...RequestOptionFunc) (*Response, error) + EditProjectBadge(pid any, badge int64, opt *EditProjectBadgeOptions, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) + DeleteProjectBadge(pid any, badge int64, options ...RequestOptionFunc) (*Response, error) PreviewProjectBadge(pid any, opt *ProjectBadgePreviewOptions, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) } @@ -47,7 +46,7 @@ var _ ProjectBadgesServiceInterface = (*ProjectBadgesService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/project_badges/#list-all-badges-of-a-project type ProjectBadge struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` LinkURL string `json:"link_url"` ImageURL string `json:"image_url"` @@ -72,49 +71,22 @@ type ListProjectBadgesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_badges/#list-all-badges-of-a-project func (s *ProjectBadgesService) ListProjectBadges(pid any, opt *ListProjectBadgesOptions, options ...RequestOptionFunc) ([]*ProjectBadge, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/badges", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pb []*ProjectBadge - resp, err := s.client.Do(req, &pb) - if err != nil { - return nil, resp, err - } - - return pb, resp, nil + return do[[]*ProjectBadge](s.client, + withPath("projects/%s/badges", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetProjectBadge gets a project badge. // // GitLab API docs: // https://docs.gitlab.com/api/project_badges/#get-a-badge-of-a-project -func (s *ProjectBadgesService) GetProjectBadge(pid any, badge int, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/badges/%d", PathEscape(project), badge) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pb := new(ProjectBadge) - resp, err := s.client.Do(req, pb) - if err != nil { - return nil, resp, err - } - - return pb, resp, nil +func (s *ProjectBadgesService) GetProjectBadge(pid any, badge int64, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) { + return do[*ProjectBadge](s.client, + withPath("projects/%s/badges/%d", ProjectID{pid}, badge), + withRequestOpts(options...), + ) } // AddProjectBadgeOptions represents the available AddProjectBadge() options. @@ -132,24 +104,12 @@ type AddProjectBadgeOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_badges/#add-a-badge-to-a-project func (s *ProjectBadgesService) AddProjectBadge(pid any, opt *AddProjectBadgeOptions, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/badges", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pb := new(ProjectBadge) - resp, err := s.client.Do(req, pb) - if err != nil { - return nil, resp, err - } - - return pb, resp, nil + return do[*ProjectBadge](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/badges", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditProjectBadgeOptions represents the available EditProjectBadge() options. @@ -166,25 +126,13 @@ type EditProjectBadgeOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/project_badges/#edit-a-badge-of-a-project -func (s *ProjectBadgesService) EditProjectBadge(pid any, badge int, opt *EditProjectBadgeOptions, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/badges/%d", PathEscape(project), badge) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pb := new(ProjectBadge) - resp, err := s.client.Do(req, pb) - if err != nil { - return nil, resp, err - } - - return pb, resp, nil +func (s *ProjectBadgesService) EditProjectBadge(pid any, badge int64, opt *EditProjectBadgeOptions, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) { + return do[*ProjectBadge](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/badges/%d", ProjectID{pid}, badge), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteProjectBadge removes a badge from a project. Only project's @@ -192,19 +140,13 @@ func (s *ProjectBadgesService) EditProjectBadge(pid any, badge int, opt *EditPro // // GitLab API docs: // https://docs.gitlab.com/api/project_badges/#remove-a-badge-from-a-project -func (s *ProjectBadgesService) DeleteProjectBadge(pid any, badge int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/badges/%d", PathEscape(project), badge) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ProjectBadgesService) DeleteProjectBadge(pid any, badge int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/badges/%d", ProjectID{pid}, badge), + withRequestOpts(options...), + ) + return resp, err } // ProjectBadgePreviewOptions represents the available PreviewProjectBadge() options. @@ -222,22 +164,9 @@ type ProjectBadgePreviewOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_badges/#preview-a-badge-from-a-project func (s *ProjectBadgesService) PreviewProjectBadge(pid any, opt *ProjectBadgePreviewOptions, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/badges/render", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - pb := new(ProjectBadge) - resp, err := s.client.Do(req, &pb) - if err != nil { - return nil, resp, err - } - - return pb, resp, nil + return do[*ProjectBadge](s.client, + withPath("projects/%s/badges/render", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_clusters.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_clusters.go index 0572e1b618..e62da7580e 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_clusters.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_clusters.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -28,13 +27,13 @@ type ( // Deprecated: in GitLab 14.5, to be removed in 19.0 ListClusters(pid any, options ...RequestOptionFunc) ([]*ProjectCluster, *Response, error) // Deprecated: in GitLab 14.5, to be removed in 19.0 - GetCluster(pid any, cluster int, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) + GetCluster(pid any, cluster int64, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) // Deprecated: in GitLab 14.5, to be removed in 19.0 AddCluster(pid any, opt *AddClusterOptions, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) // Deprecated: in GitLab 14.5, to be removed in 19.0 - EditCluster(pid any, cluster int, opt *EditClusterOptions, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) + EditCluster(pid any, cluster int64, opt *EditClusterOptions, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) // Deprecated: in GitLab 14.5, to be removed in 19.0 - DeleteCluster(pid any, cluster int, options ...RequestOptionFunc) (*Response, error) + DeleteCluster(pid any, cluster int64, options ...RequestOptionFunc) (*Response, error) } // ProjectClustersService handles communication with the @@ -56,7 +55,7 @@ var _ ProjectClustersServiceInterface = (*ProjectClustersService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/project_clusters/ type ProjectCluster struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Domain string `json:"domain"` CreatedAt *time.Time `json:"created_at"` @@ -88,7 +87,7 @@ type PlatformKubernetes struct { // ManagementProject represents a GitLab Project Cluster management_project. // Deprecated: in GitLab 14.5, to be removed in 19.0 type ManagementProject struct { - ID int `json:"id"` + ID int64 `json:"id"` Description string `json:"description"` Name string `json:"name"` NameWithNamespace string `json:"name_with_namespace"` @@ -103,24 +102,10 @@ type ManagementProject struct { // GitLab API docs: // https://docs.gitlab.com/api/project_clusters/#list-project-clusters func (s *ProjectClustersService) ListClusters(pid any, options ...RequestOptionFunc) ([]*ProjectCluster, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/clusters", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var pcs []*ProjectCluster - resp, err := s.client.Do(req, &pcs) - if err != nil { - return nil, resp, err - } - - return pcs, resp, nil + return do[[]*ProjectCluster](s.client, + withPath("projects/%s/clusters", ProjectID{pid}), + withRequestOpts(options...), + ) } // GetCluster gets a cluster. @@ -128,25 +113,11 @@ func (s *ProjectClustersService) ListClusters(pid any, options ...RequestOptionF // // GitLab API docs: // https://docs.gitlab.com/api/project_clusters/#get-a-single-project-cluster -func (s *ProjectClustersService) GetCluster(pid any, cluster int, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/clusters/%d", PathEscape(project), cluster) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pc := new(ProjectCluster) - resp, err := s.client.Do(req, &pc) - if err != nil { - return nil, resp, err - } - - return pc, resp, nil +func (s *ProjectClustersService) GetCluster(pid any, cluster int64, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) { + return do[*ProjectCluster](s.client, + withPath("projects/%s/clusters/%d", ProjectID{pid}, cluster), + withRequestOpts(options...), + ) } // AddClusterOptions represents the available AddCluster() options. @@ -180,24 +151,12 @@ type AddPlatformKubernetesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_clusters/#add-existing-cluster-to-project func (s *ProjectClustersService) AddCluster(pid any, opt *AddClusterOptions, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/clusters/user", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pc := new(ProjectCluster) - resp, err := s.client.Do(req, pc) - if err != nil { - return nil, resp, err - } - - return pc, resp, nil + return do[*ProjectCluster](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/clusters/user", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditClusterOptions represents the available EditCluster() options. @@ -227,25 +186,13 @@ type EditPlatformKubernetesOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/project_clusters/#edit-project-cluster -func (s *ProjectClustersService) EditCluster(pid any, cluster int, opt *EditClusterOptions, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/clusters/%d", PathEscape(project), cluster) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pc := new(ProjectCluster) - resp, err := s.client.Do(req, pc) - if err != nil { - return nil, resp, err - } - - return pc, resp, nil +func (s *ProjectClustersService) EditCluster(pid any, cluster int64, opt *EditClusterOptions, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) { + return do[*ProjectCluster](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/clusters/%d", ProjectID{pid}, cluster), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteCluster deletes an existing project cluster. @@ -253,17 +200,11 @@ func (s *ProjectClustersService) EditCluster(pid any, cluster int, opt *EditClus // // GitLab API docs: // https://docs.gitlab.com/api/project_clusters/#delete-project-cluster -func (s *ProjectClustersService) DeleteCluster(pid any, cluster int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/clusters/%d", PathEscape(project), cluster) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ProjectClustersService) DeleteCluster(pid any, cluster int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/clusters/%d", ProjectID{pid}, cluster), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_feature_flags.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_feature_flags.go index 53491bccf3..af9c413690 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_feature_flags.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_feature_flags.go @@ -1,17 +1,36 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( ProjectFeatureFlagServiceInterface interface { + // ListProjectFeatureFlags returns a list with the feature flags of a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/feature_flags/#list-feature-flags-for-a-project ListProjectFeatureFlags(pid any, opt *ListProjectFeatureFlagOptions, options ...RequestOptionFunc) ([]*ProjectFeatureFlag, *Response, error) + // GetProjectFeatureFlag gets a single feature flag for the specified project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/feature_flags/#get-a-single-feature-flag GetProjectFeatureFlag(pid any, name string, options ...RequestOptionFunc) (*ProjectFeatureFlag, *Response, error) + // CreateProjectFeatureFlag creates a feature flag. + // + // GitLab API docs: + // https://docs.gitlab.com/api/feature_flags/#create-a-feature-flag CreateProjectFeatureFlag(pid any, opt *CreateProjectFeatureFlagOptions, options ...RequestOptionFunc) (*ProjectFeatureFlag, *Response, error) + // UpdateProjectFeatureFlag updates a feature flag. + // + // GitLab API docs: + // https://docs.gitlab.com/api/feature_flags/#update-a-feature-flag UpdateProjectFeatureFlag(pid any, name string, opt *UpdateProjectFeatureFlagOptions, options ...RequestOptionFunc) (*ProjectFeatureFlag, *Response, error) + // DeleteProjectFeatureFlag deletes a feature flag. + // + // GitLab API docs: + // https://docs.gitlab.com/api/feature_flags/#delete-a-feature-flag DeleteProjectFeatureFlag(pid any, name string, options ...RequestOptionFunc) (*Response, error) } @@ -44,7 +63,7 @@ type ProjectFeatureFlag struct { // // GitLab API docs: https://docs.gitlab.com/api/feature_flags/ type ProjectFeatureFlagScope struct { - ID int `json:"id"` + ID int64 `json:"id"` EnvironmentScope string `json:"environment_scope"` } @@ -52,7 +71,7 @@ type ProjectFeatureFlagScope struct { // // GitLab API docs: https://docs.gitlab.com/api/feature_flags/ type ProjectFeatureFlagStrategy struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Parameters *ProjectFeatureFlagStrategyParameter `json:"parameters"` Scopes []*ProjectFeatureFlagScope `json:"scopes"` @@ -66,8 +85,8 @@ type ProjectFeatureFlagStrategyParameter struct { UserIDs string `json:"userIds,omitempty"` Percentage string `json:"percentage,omitempty"` - // Following fields aren't documented in Gitlab API docs, - // but are present in Gitlab API since 13.5. + // Following fields aren't documented in GitLab API docs, + // but are present in GitLab API since 13.5. // Docs: https://docs.getunleash.io/reference/activation-strategies#gradual-rollout Rollout string `json:"rollout,omitempty"` Stickiness string `json:"stickiness,omitempty"` @@ -86,60 +105,25 @@ type ListProjectFeatureFlagOptions struct { Scope *string `url:"scope,omitempty" json:"scope,omitempty"` } -// ListProjectFeatureFlags returns a list with the feature flags of a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/feature_flags/#list-feature-flags-for-a-project func (s *ProjectFeatureFlagService) ListProjectFeatureFlags(pid any, opt *ListProjectFeatureFlagOptions, options ...RequestOptionFunc) ([]*ProjectFeatureFlag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pffs []*ProjectFeatureFlag - resp, err := s.client.Do(req, &pffs) - if err != nil { - return nil, resp, err - } - - return pffs, resp, nil + return do[[]*ProjectFeatureFlag](s.client, + withPath("projects/%s/feature_flags", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetProjectFeatureFlag gets a single feature flag for the specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/feature_flags/#get-a-single-feature-flag func (s *ProjectFeatureFlagService) GetProjectFeatureFlag(pid any, name string, options ...RequestOptionFunc) (*ProjectFeatureFlag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags/%s", PathEscape(project), name) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - flag := new(ProjectFeatureFlag) - resp, err := s.client.Do(req, flag) - if err != nil { - return nil, resp, err - } - - return flag, resp, nil + return do[*ProjectFeatureFlag](s.client, + withPath("projects/%s/feature_flags/%s", ProjectID{pid}, name), + withRequestOpts(options...), + ) } // CreateProjectFeatureFlagOptions represents the available // CreateProjectFeatureFlag() options. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/feature_flags/#create-a-feature-flag type CreateProjectFeatureFlagOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` @@ -152,10 +136,10 @@ type CreateProjectFeatureFlagOptions struct { // FeatureFlagStrategyOptions represents the available feature flag strategy // options. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/feature_flags/#create-a-feature-flag type FeatureFlagStrategyOptions struct { - ID *int `url:"id,omitempty" json:"id,omitempty"` + ID *int64 `url:"id,omitempty" json:"id,omitempty"` Name *string `url:"name,omitempty" json:"name,omitempty"` Parameters *ProjectFeatureFlagStrategyParameter `url:"parameters,omitempty" json:"parameters,omitempty"` Scopes *[]*ProjectFeatureFlagScope `url:"scopes,omitempty" json:"scopes,omitempty"` @@ -164,44 +148,26 @@ type FeatureFlagStrategyOptions struct { // ProjectFeatureFlagScopeOptions represents the available feature flag scope // options. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/feature_flags/#create-a-feature-flag type ProjectFeatureFlagScopeOptions struct { - ID *int `url:"id,omitempty" json:"id,omitempty"` + ID *int64 `url:"id,omitempty" json:"id,omitempty"` EnvironmentScope *string `url:"id,omitempty" json:"environment_scope,omitempty"` } -// CreateProjectFeatureFlag creates a feature flag -// -// Gitlab API docs: -// https://docs.gitlab.com/api/feature_flags/#create-a-feature-flag func (s *ProjectFeatureFlagService) CreateProjectFeatureFlag(pid any, opt *CreateProjectFeatureFlagOptions, options ...RequestOptionFunc) (*ProjectFeatureFlag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags", - PathEscape(project), + return do[*ProjectFeatureFlag](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/feature_flags", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - flag := new(ProjectFeatureFlag) - resp, err := s.client.Do(req, flag) - if err != nil { - return flag, resp, err - } - - return flag, resp, nil } // UpdateProjectFeatureFlagOptions represents the available // UpdateProjectFeatureFlag() options. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/feature_flags/#update-a-feature-flag type UpdateProjectFeatureFlagOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` @@ -210,49 +176,20 @@ type UpdateProjectFeatureFlagOptions struct { Strategies *[]*FeatureFlagStrategyOptions `url:"strategies,omitempty" json:"strategies,omitempty"` } -// UpdateProjectFeatureFlag updates a feature flag -// -// Gitlab API docs: -// https://docs.gitlab.com/api/feature_flags/#update-a-feature-flag func (s *ProjectFeatureFlagService) UpdateProjectFeatureFlag(pid any, name string, opt *UpdateProjectFeatureFlagOptions, options ...RequestOptionFunc) (*ProjectFeatureFlag, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags/%s", - PathEscape(group), - name, + return do[*ProjectFeatureFlag](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/feature_flags/%s", ProjectID{pid}, name), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - flag := new(ProjectFeatureFlag) - resp, err := s.client.Do(req, flag) - if err != nil { - return flag, resp, err - } - - return flag, resp, nil } -// DeleteProjectFeatureFlag deletes a feature flag -// -// Gitlab API docs: -// https://docs.gitlab.com/api/feature_flags/#delete-a-feature-flag func (s *ProjectFeatureFlagService) DeleteProjectFeatureFlag(pid any, name string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags/%s", PathEscape(project), name) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/feature_flags/%s", ProjectID{pid}, name), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_import_export.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_import_export.go index 105ec930ce..671804a57f 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_import_export.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_import_export.go @@ -18,7 +18,6 @@ package gitlab import ( "bytes" - "fmt" "io" "net/http" "time" @@ -26,10 +25,30 @@ import ( type ( ProjectImportExportServiceInterface interface { + // ScheduleExport schedules a project export. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_import_export/#schedule-an-export ScheduleExport(pid any, opt *ScheduleExportOptions, options ...RequestOptionFunc) (*Response, error) + // ExportStatus gets the status of export. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_import_export/#export-status ExportStatus(pid any, options ...RequestOptionFunc) (*ExportStatus, *Response, error) + // ExportDownload downloads the finished export. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_import_export/#export-download ExportDownload(pid any, options ...RequestOptionFunc) ([]byte, *Response, error) + // ImportFromFile imports a project from an archive file. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_import_export/#import-a-file ImportFromFile(archive io.Reader, opt *ImportFileOptions, options ...RequestOptionFunc) (*ImportStatus, *Response, error) + // ImportStatus gets the status of an import. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_import_export/#import-status ImportStatus(pid any, options ...RequestOptionFunc) (*ImportStatus, *Response, error) } @@ -50,7 +69,7 @@ var _ ProjectImportExportServiceInterface = (*ProjectImportExportService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/project_import_export/#import-status type ImportStatus struct { - ID int `json:"id"` + ID int64 `json:"id"` Description string `json:"description"` Name string `json:"name"` NameWithNamespace string `json:"name_with_namespace"` @@ -72,104 +91,75 @@ func (s ImportStatus) String() string { // GitLab API docs: // https://docs.gitlab.com/api/project_import_export/#export-status type ExportStatus struct { - ID int `json:"id"` - Description string `json:"description"` - Name string `json:"name"` - NameWithNamespace string `json:"name_with_namespace"` - Path string `json:"path"` - PathWithNamespace string `json:"path_with_namespace"` - CreatedAt *time.Time `json:"created_at"` - ExportStatus string `json:"export_status"` - Message string `json:"message"` - Links struct { - APIURL string `json:"api_url"` - WebURL string `json:"web_url"` - } `json:"_links"` + ID int64 `json:"id"` + Description string `json:"description"` + Name string `json:"name"` + NameWithNamespace string `json:"name_with_namespace"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + CreatedAt *time.Time `json:"created_at"` + ExportStatus string `json:"export_status"` + Message string `json:"message"` + Links ExportStatusLinks `json:"_links"` } func (s ExportStatus) String() string { return Stringify(s) } -// ScheduleExportOptions represents the available ScheduleExport() options. +// ExportStatusLinks represents the project export status links. // // GitLab API docs: -// https://docs.gitlab.com/api/project_import_export/#schedule-an-export -type ScheduleExportOptions struct { - Description *string `url:"description,omitempty" json:"description,omitempty"` - Upload struct { - URL *string `url:"url,omitempty" json:"url,omitempty"` - HTTPMethod *string `url:"http_method,omitempty" json:"http_method,omitempty"` - } `url:"upload,omitempty" json:"upload,omitempty"` +// https://docs.gitlab.com/api/project_import_export/#export-status +type ExportStatusLinks struct { + APIURL string `json:"api_url"` + WebURL string `json:"web_url"` } -// ScheduleExport schedules a project export. +func (l ExportStatusLinks) String() string { + return Stringify(l) +} + +// ScheduleExportOptions represents the available ScheduleExport() options. // // GitLab API docs: // https://docs.gitlab.com/api/project_import_export/#schedule-an-export -func (s *ProjectImportExportService) ScheduleExport(pid any, opt *ScheduleExportOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/export", PathEscape(project)) +type ScheduleExportOptions struct { + Description *string `url:"description,omitempty" json:"description,omitempty"` + Upload ScheduleExportUploadOptions `url:"upload,omitempty" json:"upload,omitempty"` +} - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } +type ScheduleExportUploadOptions struct { + URL *string `url:"url,omitempty" json:"url,omitempty"` + HTTPMethod *string `url:"http_method,omitempty" json:"http_method,omitempty"` +} - return s.client.Do(req, nil) +func (s *ProjectImportExportService) ScheduleExport(pid any, opt *ScheduleExportOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/export", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } -// ExportStatus get the status of export. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_import_export/#export-status func (s *ProjectImportExportService) ExportStatus(pid any, options ...RequestOptionFunc) (*ExportStatus, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/export", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - es := new(ExportStatus) - resp, err := s.client.Do(req, es) - if err != nil { - return nil, resp, err - } - - return es, resp, nil + return do[*ExportStatus](s.client, + withPath("projects/%s/export", ProjectID{pid}), + withRequestOpts(options...), + ) } -// ExportDownload download the finished export. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_import_export/#export-download func (s *ProjectImportExportService) ExportDownload(pid any, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/export/download", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) + buf, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/export/download", ProjectID{pid}), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return b.Bytes(), resp, err + return buf.Bytes(), resp, nil } // ImportFileOptions represents the available ImportFile() options. @@ -184,54 +174,19 @@ type ImportFileOptions struct { OverrideParams *CreateProjectOptions `url:"override_params,omitempty" json:"override_params,omitempty"` } -// ImportFromFile imports a project from an archive file. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_import_export/#import-a-file func (s *ProjectImportExportService) ImportFromFile(archive io.Reader, opt *ImportFileOptions, options ...RequestOptionFunc) (*ImportStatus, *Response, error) { - req, err := s.client.UploadRequest( - http.MethodPost, - "projects/import", - archive, - "archive.tar.gz", - UploadFile, - opt, - options, + return do[*ImportStatus](s.client, + withMethod(http.MethodPost), + withPath("projects/import"), + withUpload(archive, "archive.tar.gz", UploadFile), + withAPIOpts(opt), + withRequestOpts(options...), ) - if err != nil { - return nil, nil, err - } - - is := new(ImportStatus) - resp, err := s.client.Do(req, is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil } -// ImportStatus get the status of an import. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_import_export/#import-status func (s *ProjectImportExportService) ImportStatus(pid any, options ...RequestOptionFunc) (*ImportStatus, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/import", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - is := new(ImportStatus) - resp, err := s.client.Do(req, is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil + return do[*ImportStatus](s.client, + withPath("projects/%s/import", ProjectID{pid}), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_iterations.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_iterations.go index 2495e4cfa8..5f33c6cd14 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_iterations.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_iterations.go @@ -16,11 +16,7 @@ package gitlab -import ( - "fmt" - "net/http" - "time" -) +import "time" type ( ProjectIterationsServiceInterface interface { @@ -42,13 +38,13 @@ var _ ProjectIterationsServiceInterface = (*ProjectIterationsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/iterations/ type ProjectIteration struct { - ID int `json:"id"` - IID int `json:"iid"` - Sequence int `json:"sequence"` - GroupID int `json:"group_id"` + ID int64 `json:"id"` + IID int64 `json:"iid"` + Sequence int64 `json:"sequence"` + GroupID int64 `json:"group_id"` Title string `json:"title"` Description string `json:"description"` - State int `json:"state"` + State int64 `json:"state"` CreatedAt *time.Time `json:"created_at"` UpdatedAt *time.Time `json:"updated_at"` DueDate *ISOTime `json:"due_date"` @@ -77,22 +73,9 @@ type ListProjectIterationsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/iterations/#list-project-iterations func (i *ProjectIterationsService) ListProjectIterations(pid any, opt *ListProjectIterationsOptions, options ...RequestOptionFunc) ([]*ProjectIteration, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/iterations", PathEscape(project)) - - req, err := i.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pis []*ProjectIteration - resp, err := i.client.Do(req, &pis) - if err != nil { - return nil, resp, err - } - - return pis, resp, nil + return do[[]*ProjectIteration](i.client, + withPath("projects/%s/iterations", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_markdown_uploads.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_markdown_uploads.go index 88a39320b1..67e23fe9ca 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_markdown_uploads.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_markdown_uploads.go @@ -17,18 +17,43 @@ package gitlab import ( - "fmt" "io" "net/http" ) type ( ProjectMarkdownUploadsServiceInterface interface { + // UploadProjectMarkdown uploads a markdown file to a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_markdown_uploads/#upload-a-file UploadProjectMarkdown(pid any, content io.Reader, filename string, options ...RequestOptionFunc) (*ProjectMarkdownUploadedFile, *Response, error) + // ListProjectMarkdownUploads gets all markdown uploads for a project. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/project_markdown_uploads/#list-uploads ListProjectMarkdownUploads(pid any, options ...RequestOptionFunc) ([]*ProjectMarkdownUpload, *Response, error) - DownloadProjectMarkdownUploadByID(pid any, uploadID int, options ...RequestOptionFunc) ([]byte, *Response, error) + // DownloadProjectMarkdownUploadByID downloads a specific upload by ID. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/project_markdown_uploads/#download-an-uploaded-file-by-id + DownloadProjectMarkdownUploadByID(pid any, uploadID int64, options ...RequestOptionFunc) ([]byte, *Response, error) + // DownloadProjectMarkdownUploadBySecretAndFilename downloads a specific upload + // by secret and filename. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/project_markdown_uploads/#download-an-uploaded-file-by-secret-and-filename DownloadProjectMarkdownUploadBySecretAndFilename(pid any, secret string, filename string, options ...RequestOptionFunc) ([]byte, *Response, error) - DeleteProjectMarkdownUploadByID(pid any, uploadID int, options ...RequestOptionFunc) (*Response, error) + // DeleteProjectMarkdownUploadByID deletes an upload by ID. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/project_markdown_uploads/#delete-an-uploaded-file-by-id + DeleteProjectMarkdownUploadByID(pid any, uploadID int64, options ...RequestOptionFunc) (*Response, error) + // DeleteProjectMarkdownUploadBySecretAndFilename deletes an upload + // by secret and filename. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/project_markdown_uploads/#delete-an-uploaded-file-by-secret-and-filename DeleteProjectMarkdownUploadBySecretAndFilename(pid any, secret string, filename string, options ...RequestOptionFunc) (*Response, error) } @@ -50,85 +75,39 @@ type ( ProjectMarkdownUploadedFile = MarkdownUploadedFile ) -// UploadProjectMarkdown uploads a markdown file to a project. -// -// GitLab docs: -// https://docs.gitlab.com/api/project_markdown_uploads/#upload-a-file func (s *ProjectMarkdownUploadsService) UploadProjectMarkdown(pid any, content io.Reader, filename string, options ...RequestOptionFunc) (*ProjectMarkdownUploadedFile, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/uploads", PathEscape(project)) - - req, err := s.client.UploadRequest( - http.MethodPost, - u, - content, - filename, - UploadFile, - nil, - options, + return do[*ProjectMarkdownUploadedFile](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/uploads", ProjectID{pid}), + withUpload(content, filename, UploadFile), + withRequestOpts(options...), ) - if err != nil { - return nil, nil, err - } - - f := new(ProjectMarkdownUploadedFile) - resp, err := s.client.Do(req, f) - if err != nil { - return nil, resp, err - } - - return f, resp, nil } -// ListProjectMarkdownUploads gets all markdown uploads for a project. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/project_markdown_uploads/#list-uploads func (s *ProjectMarkdownUploadsService) ListProjectMarkdownUploads(pid any, options ...RequestOptionFunc) ([]*ProjectMarkdownUpload, *Response, error) { - return listMarkdownUploads[ProjectMarkdownUpload](s.client, ProjectResource, pid, nil, options) + return listMarkdownUploads[ProjectMarkdownUpload](s.client, ProjectResource, ProjectID{pid}, nil, options) } -// DownloadProjectMarkdownUploadByID downloads a specific upload by ID. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/project_markdown_uploads/#download-an-uploaded-file-by-id -func (s *ProjectMarkdownUploadsService) DownloadProjectMarkdownUploadByID(pid any, uploadID int, options ...RequestOptionFunc) ([]byte, *Response, error) { - buffer, resp, err := downloadMarkdownUploadByID(s.client, ProjectResource, pid, uploadID, options) +func (s *ProjectMarkdownUploadsService) DownloadProjectMarkdownUploadByID(pid any, uploadID int64, options ...RequestOptionFunc) ([]byte, *Response, error) { + buffer, resp, err := downloadMarkdownUploadByID(s.client, ProjectResource, ProjectID{pid}, uploadID, options) if err != nil { return nil, resp, err } return buffer.Bytes(), resp, nil } -// DownloadProjectMarkdownUploadBySecretAndFilename downloads a specific upload -// by secret and filename. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/project_markdown_uploads/#download-an-uploaded-file-by-secret-and-filename func (s *ProjectMarkdownUploadsService) DownloadProjectMarkdownUploadBySecretAndFilename(pid any, secret string, filename string, options ...RequestOptionFunc) ([]byte, *Response, error) { - buffer, resp, err := downloadMarkdownUploadBySecretAndFilename(s.client, ProjectResource, pid, secret, filename, options) + buffer, resp, err := downloadMarkdownUploadBySecretAndFilename(s.client, ProjectResource, ProjectID{pid}, secret, filename, options) if err != nil { return nil, resp, err } return buffer.Bytes(), resp, nil } -// DeleteProjectMarkdownUploadByID deletes an upload by ID. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/project_markdown_uploads/#delete-an-uploaded-file-by-id -func (s *ProjectMarkdownUploadsService) DeleteProjectMarkdownUploadByID(pid any, uploadID int, options ...RequestOptionFunc) (*Response, error) { - return deleteMarkdownUploadByID(s.client, ProjectResource, pid, uploadID, options) +func (s *ProjectMarkdownUploadsService) DeleteProjectMarkdownUploadByID(pid any, uploadID int64, options ...RequestOptionFunc) (*Response, error) { + return deleteMarkdownUploadByID(s.client, ProjectResource, ProjectID{pid}, uploadID, options) } -// DeleteProjectMarkdownUploadBySecretAndFilename deletes an upload -// by secret and filename. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/project_markdown_uploads/#delete-an-uploaded-file-by-secret-and-filename func (s *ProjectMarkdownUploadsService) DeleteProjectMarkdownUploadBySecretAndFilename(pid any, secret string, filename string, options ...RequestOptionFunc) (*Response, error) { - return deleteMarkdownUploadBySecretAndFilename(s.client, ProjectResource, pid, secret, filename, options) + return deleteMarkdownUploadBySecretAndFilename(s.client, ProjectResource, ProjectID{pid}, secret, filename, options) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_members.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_members.go index fa4bbcef7c..1d2b6a552a 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_members.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_members.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -26,11 +25,11 @@ type ( ProjectMembersServiceInterface interface { ListProjectMembers(pid any, opt *ListProjectMembersOptions, options ...RequestOptionFunc) ([]*ProjectMember, *Response, error) ListAllProjectMembers(pid any, opt *ListProjectMembersOptions, options ...RequestOptionFunc) ([]*ProjectMember, *Response, error) - GetProjectMember(pid any, user int, options ...RequestOptionFunc) (*ProjectMember, *Response, error) - GetInheritedProjectMember(pid any, user int, options ...RequestOptionFunc) (*ProjectMember, *Response, error) + GetProjectMember(pid any, user int64, options ...RequestOptionFunc) (*ProjectMember, *Response, error) + GetInheritedProjectMember(pid any, user int64, options ...RequestOptionFunc) (*ProjectMember, *Response, error) AddProjectMember(pid any, opt *AddProjectMemberOptions, options ...RequestOptionFunc) (*ProjectMember, *Response, error) - EditProjectMember(pid any, user int, opt *EditProjectMemberOptions, options ...RequestOptionFunc) (*ProjectMember, *Response, error) - DeleteProjectMember(pid any, user int, options ...RequestOptionFunc) (*Response, error) + EditProjectMember(pid any, user int64, opt *EditProjectMemberOptions, options ...RequestOptionFunc) (*ProjectMember, *Response, error) + DeleteProjectMember(pid any, user int64, options ...RequestOptionFunc) (*Response, error) } // ProjectMembersService handles communication with the project members @@ -49,17 +48,28 @@ var _ ProjectMembersServiceInterface = (*ProjectMembersService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/members/ type ProjectMember struct { - ID int `json:"id"` + ID int64 `json:"id"` Username string `json:"username"` Email string `json:"email"` Name string `json:"name"` State string `json:"state"` CreatedAt *time.Time `json:"created_at"` + CreatedBy *MemberCreatedBy `json:"created_by"` ExpiresAt *ISOTime `json:"expires_at"` AccessLevel AccessLevelValue `json:"access_level"` WebURL string `json:"web_url"` AvatarURL string `json:"avatar_url"` MemberRole *MemberRole `json:"member_role"` + IsUsingSeat bool `json:"is_using_seat,omitempty"` +} + +type MemberCreatedBy struct { + ID int64 `json:"id"` + Username string `json:"username"` + Name string `json:"name"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` } // ListProjectMembersOptions represents the available ListProjectMembers() and @@ -69,8 +79,9 @@ type ProjectMember struct { // https://docs.gitlab.com/api/members/#list-all-members-of-a-group-or-project type ListProjectMembersOptions struct { ListOptions - Query *string `url:"query,omitempty" json:"query,omitempty"` - UserIDs *[]int `url:"user_ids[],omitempty" json:"user_ids,omitempty"` + Query *string `url:"query,omitempty" json:"query,omitempty"` + UserIDs *[]int64 `url:"user_ids[],omitempty" json:"user_ids,omitempty"` + ShowSeatInfo *bool `url:"show_seat_info,omitempty" json:"show_seat_info,omitempty"` } // ListProjectMembers gets a list of a project's team members viewable by the @@ -80,24 +91,11 @@ type ListProjectMembersOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/members/#list-all-members-of-a-group-or-project func (s *ProjectMembersService) ListProjectMembers(pid any, opt *ListProjectMembersOptions, options ...RequestOptionFunc) ([]*ProjectMember, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/members", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pm []*ProjectMember - resp, err := s.client.Do(req, &pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil + return do[[]*ProjectMember](s.client, + withPath("projects/%s/members", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListAllProjectMembers gets a list of a project's team members viewable by the @@ -107,74 +105,33 @@ func (s *ProjectMembersService) ListProjectMembers(pid any, opt *ListProjectMemb // GitLab API docs: // https://docs.gitlab.com/api/members/#list-all-members-of-a-group-or-project-including-inherited-and-invited-members func (s *ProjectMembersService) ListAllProjectMembers(pid any, opt *ListProjectMembersOptions, options ...RequestOptionFunc) ([]*ProjectMember, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/members/all", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pm []*ProjectMember - resp, err := s.client.Do(req, &pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil + return do[[]*ProjectMember](s.client, + withPath("projects/%s/members/all", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetProjectMember gets a project team member. // // GitLab API docs: // https://docs.gitlab.com/api/members/#get-a-member-of-a-group-or-project -func (s *ProjectMembersService) GetProjectMember(pid any, user int, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/members/%d", PathEscape(project), user) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pm := new(ProjectMember) - resp, err := s.client.Do(req, pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil +func (s *ProjectMembersService) GetProjectMember(pid any, user int64, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { + return do[*ProjectMember](s.client, + withPath("projects/%s/members/%d", ProjectID{pid}, user), + withRequestOpts(options...), + ) } // GetInheritedProjectMember gets a project team member, including inherited // // GitLab API docs: // https://docs.gitlab.com/api/members/#get-a-member-of-a-group-or-project-including-inherited-and-invited-members -func (s *ProjectMembersService) GetInheritedProjectMember(pid any, user int, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/members/all/%d", PathEscape(project), user) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pm := new(ProjectMember) - resp, err := s.client.Do(req, pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil +func (s *ProjectMembersService) GetInheritedProjectMember(pid any, user int64, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { + return do[*ProjectMember](s.client, + withPath("projects/%s/members/all/%d", ProjectID{pid}, user), + withRequestOpts(options...), + ) } // AddProjectMemberOptions represents the available AddProjectMember() options. @@ -186,7 +143,7 @@ type AddProjectMemberOptions struct { Username *string `url:"username,omitempty" json:"username,omitempty"` AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at"` - MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` + MemberRoleID *int64 `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` } // AddProjectMember adds a user to a project team. This is an idempotent @@ -197,24 +154,12 @@ type AddProjectMemberOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/members/#add-a-member-to-a-group-or-project func (s *ProjectMembersService) AddProjectMember(pid any, opt *AddProjectMemberOptions, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/members", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pm := new(ProjectMember) - resp, err := s.client.Do(req, pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil + return do[*ProjectMember](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/members", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditProjectMemberOptions represents the available EditProjectMember() options. @@ -224,49 +169,31 @@ func (s *ProjectMembersService) AddProjectMember(pid any, opt *AddProjectMemberO type EditProjectMemberOptions struct { AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at,omitempty"` - MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` + MemberRoleID *int64 `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` } // EditProjectMember updates a project team member to a specified access level.. // // GitLab API docs: // https://docs.gitlab.com/api/members/#edit-a-member-of-a-group-or-project -func (s *ProjectMembersService) EditProjectMember(pid any, user int, opt *EditProjectMemberOptions, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/members/%d", PathEscape(project), user) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pm := new(ProjectMember) - resp, err := s.client.Do(req, pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil +func (s *ProjectMembersService) EditProjectMember(pid any, user int64, opt *EditProjectMemberOptions, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { + return do[*ProjectMember](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/members/%d", ProjectID{pid}, user), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteProjectMember removes a user from a project team. // // GitLab API docs: // https://docs.gitlab.com/api/members/#remove-a-member-from-a-group-or-project -func (s *ProjectMembersService) DeleteProjectMember(pid any, user int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/members/%d", PathEscape(project), user) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ProjectMembersService) DeleteProjectMember(pid any, user int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/members/%d", ProjectID{pid}, user), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_mirror.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_mirror.go index c8ef609700..51cf3eaa1b 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_mirror.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_mirror.go @@ -17,19 +17,47 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( ProjectMirrorServiceInterface interface { + // ListProjectMirror gets a list of mirrors configured on the project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/remote_mirrors/#list-a-projects-remote-mirrors ListProjectMirror(pid any, opt *ListProjectMirrorOptions, options ...RequestOptionFunc) ([]*ProjectMirror, *Response, error) - GetProjectMirror(pid any, mirror int, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) - GetProjectMirrorPublicKey(pid any, mirror int, options ...RequestOptionFunc) (*ProjectMirrorPublicKey, *Response, error) + // GetProjectMirror gets a single mirror configured on the project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/remote_mirrors/#get-a-single-projects-remote-mirror + GetProjectMirror(pid any, mirror int64, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) + // GetProjectMirrorPublicKey gets the SSH public key for a single mirror configured on the project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/remote_mirrors/#get-a-single-projects-remote-mirror-public-key + GetProjectMirrorPublicKey(pid any, mirror int64, options ...RequestOptionFunc) (*ProjectMirrorPublicKey, *Response, error) + // AddProjectMirror creates a new mirror on the project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/remote_mirrors/#create-a-push-mirror AddProjectMirror(pid any, opt *AddProjectMirrorOptions, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) - EditProjectMirror(pid any, mirror int, opt *EditProjectMirrorOptions, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) - DeleteProjectMirror(pid any, mirror int, options ...RequestOptionFunc) (*Response, error) + // EditProjectMirror updates a remote mirror's attributes. + // + // GitLab API docs: + // https://docs.gitlab.com/api/remote_mirrors/#update-a-remote-mirrors-attributes + EditProjectMirror(pid any, mirror int64, opt *EditProjectMirrorOptions, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) + // DeleteProjectMirror deletes a project mirror. + // + // GitLab API docs: + // https://docs.gitlab.com/api/remote_mirrors/#delete-a-remote-mirror + DeleteProjectMirror(pid any, mirror int64, options ...RequestOptionFunc) (*Response, error) + // ForcePushMirrorUpdate triggers a manual update for a project mirror. + // + // GitLab API docs: + // https://docs.gitlab.com/api/remote_mirrors/#force-push-mirror-update + ForcePushMirrorUpdate(pid any, mirror int64, options ...RequestOptionFunc) (*Response, error) } // ProjectMirrorService handles communication with the project mirror @@ -48,7 +76,7 @@ var _ ProjectMirrorServiceInterface = (*ProjectMirrorService)(nil) // GitLAb API docs: https://docs.gitlab.com/api/remote_mirrors/ type ProjectMirror struct { Enabled bool `json:"enabled"` - ID int `json:"id"` + ID int64 `json:"id"` LastError string `json:"last_error"` LastSuccessfulUpdateAt *time.Time `json:"last_successful_update_at"` LastUpdateAt *time.Time `json:"last_update_at"` @@ -66,81 +94,30 @@ type ProjectMirrorPublicKey struct { } // ListProjectMirrorOptions represents the available ListProjectMirror() options. -type ListProjectMirrorOptions ListOptions +type ListProjectMirrorOptions struct { + ListOptions +} -// ListProjectMirror gets a list of mirrors configured on the project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/remote_mirrors/#list-a-projects-remote-mirrors func (s *ProjectMirrorService) ListProjectMirror(pid any, opt *ListProjectMirrorOptions, options ...RequestOptionFunc) ([]*ProjectMirror, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/remote_mirrors", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pm []*ProjectMirror - resp, err := s.client.Do(req, &pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil + return do[[]*ProjectMirror](s.client, + withPath("projects/%s/remote_mirrors", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetProjectMirror gets a single mirror configured on the project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/remote_mirrors/#get-a-single-projects-remote-mirror -func (s *ProjectMirrorService) GetProjectMirror(pid any, mirror int, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/remote_mirrors/%d", PathEscape(project), mirror) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pm := new(ProjectMirror) - resp, err := s.client.Do(req, &pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil +func (s *ProjectMirrorService) GetProjectMirror(pid any, mirror int64, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) { + return do[*ProjectMirror](s.client, + withPath("projects/%s/remote_mirrors/%d", ProjectID{pid}, mirror), + withRequestOpts(options...), + ) } -// GetProjectMirrorPublicKey gets the SSH public key for a single mirror configured on the project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/remote_mirrors/#get-a-single-projects-remote-mirror-public-key -func (s *ProjectMirrorService) GetProjectMirrorPublicKey(pid any, mirror int, options ...RequestOptionFunc) (*ProjectMirrorPublicKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/remote_mirrors/%d/public_key", PathEscape(project), mirror) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pmpk := new(ProjectMirrorPublicKey) - resp, err := s.client.Do(req, &pmpk) - if err != nil { - return nil, resp, err - } - - return pmpk, resp, nil +func (s *ProjectMirrorService) GetProjectMirrorPublicKey(pid any, mirror int64, options ...RequestOptionFunc) (*ProjectMirrorPublicKey, *Response, error) { + return do[*ProjectMirrorPublicKey](s.client, + withPath("projects/%s/remote_mirrors/%d/public_key", ProjectID{pid}, mirror), + withRequestOpts(options...), + ) } // AddProjectMirrorOptions contains the properties requires to create @@ -157,29 +134,13 @@ type AddProjectMirrorOptions struct { AuthMethod *string `url:"auth_method,omitempty" json:"auth_method,omitempty"` } -// AddProjectMirror creates a new mirror on the project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/remote_mirrors/#create-a-push-mirror func (s *ProjectMirrorService) AddProjectMirror(pid any, opt *AddProjectMirrorOptions, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/remote_mirrors", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pm := new(ProjectMirror) - resp, err := s.client.Do(req, pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil + return do[*ProjectMirror](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/remote_mirrors", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditProjectMirrorOptions contains the properties requires to edit @@ -195,46 +156,29 @@ type EditProjectMirrorOptions struct { AuthMethod *string `url:"auth_method,omitempty" json:"auth_method,omitempty"` } -// EditProjectMirror updates a project team member to a specified access level.. -// -// GitLab API docs: -// https://docs.gitlab.com/api/remote_mirrors/#update-a-remote-mirrors-attributes -func (s *ProjectMirrorService) EditProjectMirror(pid any, mirror int, opt *EditProjectMirrorOptions, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/remote_mirrors/%d", PathEscape(project), mirror) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pm := new(ProjectMirror) - resp, err := s.client.Do(req, pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil +func (s *ProjectMirrorService) EditProjectMirror(pid any, mirror int64, opt *EditProjectMirrorOptions, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) { + return do[*ProjectMirror](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/remote_mirrors/%d", ProjectID{pid}, mirror), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteProjectMirror deletes a project mirror. -// -// GitLab API docs: -// https://docs.gitlab.com/api/remote_mirrors/#delete-a-remote-mirror -func (s *ProjectMirrorService) DeleteProjectMirror(pid any, mirror int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/remote_mirrors/%d", PathEscape(project), mirror) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } +func (s *ProjectMirrorService) DeleteProjectMirror(pid any, mirror int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/remote_mirrors/%d", ProjectID{pid}, mirror), + withRequestOpts(options...), + ) + return resp, err +} - return s.client.Do(req, nil) +func (s *ProjectMirrorService) ForcePushMirrorUpdate(pid any, mirror int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/remote_mirrors/%d/sync", ProjectID{pid}, mirror), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_repository_storage_move.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_repository_storage_move.go index fe23dfbc8c..a3411ed3d0 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_repository_storage_move.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_repository_storage_move.go @@ -17,18 +17,43 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( ProjectRepositoryStorageMoveServiceInterface interface { + // RetrieveAllStorageMoves retrieves all project repository storage moves + // accessible by the authenticated user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_repository_storage_moves/#retrieve-all-project-repository-storage-moves RetrieveAllStorageMoves(opts RetrieveAllProjectStorageMovesOptions, options ...RequestOptionFunc) ([]*ProjectRepositoryStorageMove, *Response, error) - RetrieveAllStorageMovesForProject(project int, opts RetrieveAllProjectStorageMovesOptions, options ...RequestOptionFunc) ([]*ProjectRepositoryStorageMove, *Response, error) - GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) - GetStorageMoveForProject(project int, repositoryStorage int, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) - ScheduleStorageMoveForProject(project int, opts ScheduleStorageMoveForProjectOptions, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) + // RetrieveAllStorageMovesForProject retrieves all repository storage moves for + // a single project accessible by the authenticated user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_repository_storage_moves/#retrieve-all-repository-storage-moves-for-a-project + RetrieveAllStorageMovesForProject(project int64, opts RetrieveAllProjectStorageMovesOptions, options ...RequestOptionFunc) ([]*ProjectRepositoryStorageMove, *Response, error) + // GetStorageMove gets a single project repository storage move. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_repository_storage_moves/#get-a-single-project-repository-storage-move + GetStorageMove(repositoryStorage int64, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) + // GetStorageMoveForProject gets a single repository storage move for a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_repository_storage_moves/#get-a-single-repository-storage-move-for-a-project + GetStorageMoveForProject(project int64, repositoryStorage int64, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) + // ScheduleStorageMoveForProject schedule a repository to be moved for a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_repository_storage_moves/#schedule-a-repository-storage-move-for-a-project + ScheduleStorageMoveForProject(project int64, opts ScheduleStorageMoveForProjectOptions, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) + // ScheduleAllStorageMoves schedules all repositories to be moved. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_repository_storage_moves/#schedule-repository-storage-moves-for-all-projects-on-a-storage-shard ScheduleAllStorageMoves(opts ScheduleAllProjectStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) } @@ -49,7 +74,7 @@ var _ ProjectRepositoryStorageMoveServiceInterface = (*ProjectRepositoryStorageM // GitLab API docs: // https://docs.gitlab.com/api/project_repository_storage_moves/ type ProjectRepositoryStorageMove struct { - ID int `json:"id"` + ID int64 `json:"id"` CreatedAt *time.Time `json:"created_at"` State string `json:"state"` SourceStorageName string `json:"source_storage_name"` @@ -58,7 +83,7 @@ type ProjectRepositoryStorageMove struct { } type RepositoryProject struct { - ID int `json:"id"` + ID int64 `json:"id"` Description string `json:"description"` Name string `json:"name"` NameWithNamespace string `json:"name_with_namespace"` @@ -72,90 +97,38 @@ type RepositoryProject struct { // // GitLab API docs: // https://docs.gitlab.com/api/project_repository_storage_moves/#retrieve-all-project-repository-storage-moves -type RetrieveAllProjectStorageMovesOptions ListOptions +type RetrieveAllProjectStorageMovesOptions struct { + ListOptions +} -// RetrieveAllStorageMoves retrieves all project repository storage moves -// accessible by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_repository_storage_moves/#retrieve-all-project-repository-storage-moves func (p ProjectRepositoryStorageMoveService) RetrieveAllStorageMoves(opts RetrieveAllProjectStorageMovesOptions, options ...RequestOptionFunc) ([]*ProjectRepositoryStorageMove, *Response, error) { - req, err := p.client.NewRequest(http.MethodGet, "project_repository_storage_moves", opts, options) - if err != nil { - return nil, nil, err - } - - var psms []*ProjectRepositoryStorageMove - resp, err := p.client.Do(req, &psms) - if err != nil { - return nil, resp, err - } - - return psms, resp, err + return do[[]*ProjectRepositoryStorageMove](p.client, + withPath("project_repository_storage_moves"), + withAPIOpts(opts), + withRequestOpts(options...), + ) } -// RetrieveAllStorageMovesForProject retrieves all repository storage moves for -// a single project accessible by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_repository_storage_moves/#retrieve-all-repository-storage-moves-for-a-project -func (p ProjectRepositoryStorageMoveService) RetrieveAllStorageMovesForProject(project int, opts RetrieveAllProjectStorageMovesOptions, options ...RequestOptionFunc) ([]*ProjectRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("projects/%d/repository_storage_moves", project) - - req, err := p.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var psms []*ProjectRepositoryStorageMove - resp, err := p.client.Do(req, &psms) - if err != nil { - return nil, resp, err - } - - return psms, resp, err +func (p ProjectRepositoryStorageMoveService) RetrieveAllStorageMovesForProject(project int64, opts RetrieveAllProjectStorageMovesOptions, options ...RequestOptionFunc) ([]*ProjectRepositoryStorageMove, *Response, error) { + return do[[]*ProjectRepositoryStorageMove](p.client, + withPath("projects/%d/repository_storage_moves", project), + withAPIOpts(opts), + withRequestOpts(options...), + ) } -// GetStorageMove gets a single project repository storage move. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_repository_storage_moves/#get-a-single-project-repository-storage-move -func (p ProjectRepositoryStorageMoveService) GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("project_repository_storage_moves/%d", repositoryStorage) - - req, err := p.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - psm := new(ProjectRepositoryStorageMove) - resp, err := p.client.Do(req, psm) - if err != nil { - return nil, resp, err - } - - return psm, resp, err +func (p ProjectRepositoryStorageMoveService) GetStorageMove(repositoryStorage int64, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) { + return do[*ProjectRepositoryStorageMove](p.client, + withPath("project_repository_storage_moves/%d", repositoryStorage), + withRequestOpts(options...), + ) } -// GetStorageMoveForProject gets a single repository storage move for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_repository_storage_moves/#get-a-single-repository-storage-move-for-a-project -func (p ProjectRepositoryStorageMoveService) GetStorageMoveForProject(project int, repositoryStorage int, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("projects/%d/repository_storage_moves/%d", project, repositoryStorage) - - req, err := p.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - psm := new(ProjectRepositoryStorageMove) - resp, err := p.client.Do(req, psm) - if err != nil { - return nil, resp, err - } - - return psm, resp, err +func (p ProjectRepositoryStorageMoveService) GetStorageMoveForProject(project int64, repositoryStorage int64, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) { + return do[*ProjectRepositoryStorageMove](p.client, + withPath("projects/%d/repository_storage_moves/%d", project, repositoryStorage), + withRequestOpts(options...), + ) } // ScheduleStorageMoveForProjectOptions represents the available @@ -167,25 +140,13 @@ type ScheduleStorageMoveForProjectOptions struct { DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"` } -// ScheduleStorageMoveForProject schedule a repository to be moved for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_repository_storage_moves/#schedule-a-repository-storage-move-for-a-project -func (p ProjectRepositoryStorageMoveService) ScheduleStorageMoveForProject(project int, opts ScheduleStorageMoveForProjectOptions, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("projects/%d/repository_storage_moves", project) - - req, err := p.client.NewRequest(http.MethodPost, u, opts, options) - if err != nil { - return nil, nil, err - } - - psm := new(ProjectRepositoryStorageMove) - resp, err := p.client.Do(req, psm) - if err != nil { - return nil, resp, err - } - - return psm, resp, err +func (p ProjectRepositoryStorageMoveService) ScheduleStorageMoveForProject(project int64, opts ScheduleStorageMoveForProjectOptions, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) { + return do[*ProjectRepositoryStorageMove](p.client, + withMethod(http.MethodPost), + withPath("projects/%d/repository_storage_moves", project), + withAPIOpts(opts), + withRequestOpts(options...), + ) } // ScheduleAllProjectStorageMovesOptions represents the available @@ -198,15 +159,12 @@ type ScheduleAllProjectStorageMovesOptions struct { DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"` } -// ScheduleAllStorageMoves schedules all repositories to be moved. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_repository_storage_moves/#schedule-repository-storage-moves-for-all-projects-on-a-storage-shard func (p ProjectRepositoryStorageMoveService) ScheduleAllStorageMoves(opts ScheduleAllProjectStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) { - req, err := p.client.NewRequest(http.MethodPost, "project_repository_storage_moves", opts, options) - if err != nil { - return nil, err - } - - return p.client.Do(req, nil) + _, resp, err := do[none](p.client, + withMethod(http.MethodPost), + withPath("project_repository_storage_moves"), + withAPIOpts(opts), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_security_settings.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_security_settings.go index 64768ca0a2..d200c576ab 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_security_settings.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_security_settings.go @@ -16,21 +16,29 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( ProjectSecuritySettingsServiceInterface interface { + // ListProjectSecuritySettings lists all of a project's security settings. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/project_security_settings/#list-project-security-settings ListProjectSecuritySettings(pid any, options ...RequestOptionFunc) (*ProjectSecuritySettings, *Response, error) + // UpdateSecretPushProtectionEnabledSetting updates the secret_push_protection_enabled + // setting for a project to the provided value. + // + // GitLab API Docs: + // https://docs.gitlab.com/api/project_security_settings/#update-secret_push_protection_enabled-setting UpdateSecretPushProtectionEnabledSetting(pid any, opt UpdateProjectSecuritySettingsOptions, options ...RequestOptionFunc) (*ProjectSecuritySettings, *Response, error) } // ProjectSecuritySettingsService handles communication with the Project Security Settings // related methods of the GitLab API. // - // Gitlab API docs: + // GitLab API docs: // https://docs.gitlab.com/api/project_security_settings/ ProjectSecuritySettingsService struct { client *Client @@ -41,7 +49,7 @@ var _ ProjectSecuritySettingsServiceInterface = (*ProjectSecuritySettingsService // ProjectSecuritySettings represents the project security settings data. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/project_security_settings/ type ProjectSecuritySettings struct { ProjectID int64 `json:"project_id"` @@ -64,28 +72,11 @@ func (s ProjectSecuritySettings) String() string { return Stringify(s) } -// ListProjectSecuritySettings lists all of a project's security settings. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/project_security_settings/#list-project-security-settings func (s *ProjectSecuritySettingsService) ListProjectSecuritySettings(pid any, options ...RequestOptionFunc) (*ProjectSecuritySettings, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/security_settings", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - settings := new(ProjectSecuritySettings) - resp, err := s.client.Do(req, &settings) - if err != nil { - return nil, resp, err - } - - return settings, resp, err + return do[*ProjectSecuritySettings](s.client, + withPath("projects/%s/security_settings", ProjectID{pid}), + withRequestOpts(options...), + ) } // UpdateProjectSecuritySettingsOptions represent the request options for updating @@ -97,27 +88,11 @@ type UpdateProjectSecuritySettingsOptions struct { SecretPushProtectionEnabled *bool `url:"secret_push_protection_enabled,omitempty" json:"secret_push_protection_enabled,omitempty"` } -// UpdateSecretPushProtectionEnabledSetting updates the secret_push_protection_enabled -// setting for the all projects in a project to the provided value. -// -// GitLab API Docs: -// https://docs.gitlab.com/api/project_security_settings/#update-secret_push_protection_enabled-setting func (s *ProjectSecuritySettingsService) UpdateSecretPushProtectionEnabledSetting(pid any, opt UpdateProjectSecuritySettingsOptions, options ...RequestOptionFunc) (*ProjectSecuritySettings, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/security_settings", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - settings := new(ProjectSecuritySettings) - resp, err := s.client.Do(req, &settings) - if err != nil { - return nil, resp, err - } - - return settings, resp, err + return do[*ProjectSecuritySettings](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/security_settings", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_snippets.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_snippets.go index 61c7e59e27..b35e2a7cff 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_snippets.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_snippets.go @@ -18,18 +18,17 @@ package gitlab import ( "bytes" - "fmt" "net/http" ) type ( ProjectSnippetsServiceInterface interface { ListSnippets(pid any, opt *ListProjectSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) - GetSnippet(pid any, snippet int, options ...RequestOptionFunc) (*Snippet, *Response, error) + GetSnippet(pid any, snippet int64, options ...RequestOptionFunc) (*Snippet, *Response, error) CreateSnippet(pid any, opt *CreateProjectSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) - UpdateSnippet(pid any, snippet int, opt *UpdateProjectSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) - DeleteSnippet(pid any, snippet int, options ...RequestOptionFunc) (*Response, error) - SnippetContent(pid any, snippet int, options ...RequestOptionFunc) ([]byte, *Response, error) + UpdateSnippet(pid any, snippet int64, opt *UpdateProjectSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) + DeleteSnippet(pid any, snippet int64, options ...RequestOptionFunc) (*Response, error) + SnippetContent(pid any, snippet int64, options ...RequestOptionFunc) ([]byte, *Response, error) } // ProjectSnippetsService handles communication with the project snippets @@ -46,55 +45,30 @@ var _ ProjectSnippetsServiceInterface = (*ProjectSnippetsService)(nil) // ListProjectSnippetsOptions represents the available ListSnippets() options. // // GitLab API docs: https://docs.gitlab.com/api/project_snippets/#list-snippets -type ListProjectSnippetsOptions ListOptions +type ListProjectSnippetsOptions struct { + ListOptions +} // ListSnippets gets a list of project snippets. // // GitLab API docs: https://docs.gitlab.com/api/project_snippets/#list-snippets func (s *ProjectSnippetsService) ListSnippets(pid any, opt *ListProjectSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Snippet - resp, err := s.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil + return do[[]*Snippet](s.client, + withPath("projects/%s/snippets", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetSnippet gets a single project snippet // // GitLab API docs: // https://docs.gitlab.com/api/project_snippets/#single-snippet -func (s *ProjectSnippetsService) GetSnippet(pid any, snippet int, options ...RequestOptionFunc) (*Snippet, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ps := new(Snippet) - resp, err := s.client.Do(req, ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil +func (s *ProjectSnippetsService) GetSnippet(pid any, snippet int64, options ...RequestOptionFunc) (*Snippet, *Response, error) { + return do[*Snippet](s.client, + withPath("projects/%s/snippets/%d", ProjectID{pid}, snippet), + withRequestOpts(options...), + ) } // CreateProjectSnippetOptions represents the available CreateSnippet() options. @@ -119,24 +93,12 @@ type CreateProjectSnippetOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_snippets/#create-new-snippet func (s *ProjectSnippetsService) CreateSnippet(pid any, opt *CreateProjectSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - ps := new(Snippet) - resp, err := s.client.Do(req, ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil + return do[*Snippet](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/snippets", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateProjectSnippetOptions represents the available UpdateSnippet() options. @@ -160,25 +122,13 @@ type UpdateProjectSnippetOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/project_snippets/#update-snippet -func (s *ProjectSnippetsService) UpdateSnippet(pid any, snippet int, opt *UpdateProjectSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ps := new(Snippet) - resp, err := s.client.Do(req, ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil +func (s *ProjectSnippetsService) UpdateSnippet(pid any, snippet int64, opt *UpdateProjectSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) { + return do[*Snippet](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/snippets/%d", ProjectID{pid}, snippet), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteSnippet deletes an existing project snippet. This is an idempotent @@ -187,42 +137,26 @@ func (s *ProjectSnippetsService) UpdateSnippet(pid any, snippet int, opt *Update // // GitLab API docs: // https://docs.gitlab.com/api/project_snippets/#delete-snippet -func (s *ProjectSnippetsService) DeleteSnippet(pid any, snippet int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ProjectSnippetsService) DeleteSnippet(pid any, snippet int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/snippets/%d", ProjectID{pid}, snippet), + withRequestOpts(options...), + ) + return resp, err } // SnippetContent returns the raw project snippet as plain text. // // GitLab API docs: // https://docs.gitlab.com/api/project_snippets/#snippet-content -func (s *ProjectSnippetsService) SnippetContent(pid any, snippet int, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/raw", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) +func (s *ProjectSnippetsService) SnippetContent(pid any, snippet int64, options ...RequestOptionFunc) ([]byte, *Response, error) { + buf, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/snippets/%d/raw", ProjectID{pid}, snippet), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return b.Bytes(), resp, err + return buf.Bytes(), resp, nil } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_statistics.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_statistics.go new file mode 100644 index 0000000000..86dfd022ee --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_statistics.go @@ -0,0 +1,44 @@ +package gitlab + +type ( + ProjectStatisticsServiceInterface interface { + // Last30DaysStatistics gets the project statistics for the last 30 days. + // + // GitLab API docs: https://docs.gitlab.com/api/project_statistics/#get-the-statistics-of-the-last-30-days + Last30DaysStatistics(pid any, options ...RequestOptionFunc) (*ProjectStatistics, *Response, error) + } + + // ProjectStatisticsService handles communication with the project statistics related methods + // of the GitLab API. + // + // GitLab API docs: https://docs.gitlab.com/api/project_statistics + ProjectStatisticsService struct { + client *Client + } +) + +// ProjectStatistics represents the Project Statistics. +// +// GitLab API docs: https://docs.gitlab.com/api/project_statistics +type ProjectStatistics struct { + Fetches FetchStats `json:"fetches"` +} + +type FetchStats struct { + Total int64 `json:"total"` + Days []DayStats `json:"days"` +} + +type DayStats struct { + Count int64 `json:"count"` + Date string `json:"date"` +} + +var _ ProjectStatisticsServiceInterface = (*ProjectStatisticsService)(nil) + +func (s *ProjectStatisticsService) Last30DaysStatistics(pid any, options ...RequestOptionFunc) (*ProjectStatistics, *Response, error) { + return do[*ProjectStatistics](s.client, + withPath("projects/%s/statistics", ProjectID{pid}), + withRequestOpts(options...), + ) +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_templates.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_templates.go index 1e7c6489af..0811270847 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_templates.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_templates.go @@ -16,14 +16,16 @@ package gitlab -import ( - "fmt" - "net/http" -) - type ( ProjectTemplatesServiceInterface interface { + // ListTemplates gets a list of project templates. + // + // GitLab API docs: https://docs.gitlab.com/api/project_templates/#get-all-templates-of-a-particular-type ListTemplates(pid any, templateType string, opt *ListProjectTemplatesOptions, options ...RequestOptionFunc) ([]*ProjectTemplate, *Response, error) + // GetProjectTemplate gets a single project template. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_templates/#get-one-template-of-a-particular-type GetProjectTemplate(pid any, templateType string, templateName string, options ...RequestOptionFunc) (*ProjectTemplate, *Response, error) } @@ -65,55 +67,21 @@ func (s ProjectTemplate) String() string { // https://docs.gitlab.com/api/project_templates/#get-all-templates-of-a-particular-type type ListProjectTemplatesOptions struct { ListOptions - ID *int `url:"id,omitempty" json:"id,omitempty"` + ID *int64 `url:"id,omitempty" json:"id,omitempty"` Type *string `url:"type,omitempty" json:"type,omitempty"` } -// ListTemplates gets a list of project templates. -// -// GitLab API docs: https://docs.gitlab.com/api/project_templates/#get-all-templates-of-a-particular-type func (s *ProjectTemplatesService) ListTemplates(pid any, templateType string, opt *ListProjectTemplatesOptions, options ...RequestOptionFunc) ([]*ProjectTemplate, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/templates/%s", PathEscape(project), templateType) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pt []*ProjectTemplate - resp, err := s.client.Do(req, &pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil + return do[[]*ProjectTemplate](s.client, + withPath("projects/%s/templates/%s", ProjectID{pid}, NoEscape{templateType}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetProjectTemplate gets a single project template. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_templates/#get-one-template-of-a-particular-type func (s *ProjectTemplatesService) GetProjectTemplate(pid any, templateType string, templateName string, options ...RequestOptionFunc) (*ProjectTemplate, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/templates/%s/%s", PathEscape(project), templateType, templateName) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ptd := new(ProjectTemplate) - resp, err := s.client.Do(req, ptd) - if err != nil { - return nil, resp, err - } - - return ptd, resp, nil + return do[*ProjectTemplate](s.client, + withPath("projects/%s/templates/%s/%s", ProjectID{pid}, NoEscape{templateType}, NoEscape{templateName}), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_variables.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_variables.go index 870837ec68..d2b356e363 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_variables.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_variables.go @@ -17,17 +17,36 @@ package gitlab import ( - "fmt" "net/http" "net/url" ) type ( ProjectVariablesServiceInterface interface { + // ListVariables gets a list of all variables in a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_level_variables/#list-project-variables ListVariables(pid any, opt *ListProjectVariablesOptions, options ...RequestOptionFunc) ([]*ProjectVariable, *Response, error) + // GetVariable gets a variable. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_level_variables/#get-a-single-variable GetVariable(pid any, key string, opt *GetProjectVariableOptions, options ...RequestOptionFunc) (*ProjectVariable, *Response, error) + // CreateVariable creates a new project variable. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_level_variables/#create-a-variable CreateVariable(pid any, opt *CreateProjectVariableOptions, options ...RequestOptionFunc) (*ProjectVariable, *Response, error) + // UpdateVariable updates a project's variable. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_level_variables/#update-a-variable UpdateVariable(pid any, key string, opt *UpdateProjectVariableOptions, options ...RequestOptionFunc) (*ProjectVariable, *Response, error) + // RemoveVariable removes a project's variable. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_level_variables/#delete-a-variable RemoveVariable(pid any, key string, opt *RemoveProjectVariableOptions, options ...RequestOptionFunc) (*Response, error) } @@ -65,7 +84,7 @@ func (v ProjectVariable) String() string { // VariableFilter filters available for project variable related functions type VariableFilter struct { - EnvironmentScope string `url:"environment_scope, omitempty" json:"environment_scope,omitempty"` + EnvironmentScope string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` } // ListProjectVariablesOptions represents the available options for listing variables @@ -73,31 +92,16 @@ type VariableFilter struct { // // GitLab API docs: // https://docs.gitlab.com/api/project_level_variables/#list-project-variables -type ListProjectVariablesOptions ListOptions +type ListProjectVariablesOptions struct { + ListOptions +} -// ListVariables gets a list of all variables in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_level_variables/#list-project-variables func (s *ProjectVariablesService) ListVariables(pid any, opt *ListProjectVariablesOptions, options ...RequestOptionFunc) ([]*ProjectVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/variables", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var vs []*ProjectVariable - resp, err := s.client.Do(req, &vs) - if err != nil { - return nil, resp, err - } - - return vs, resp, nil + return do[[]*ProjectVariable](s.client, + withPath("projects/%s/variables", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetProjectVariableOptions represents the available GetVariable() @@ -109,29 +113,12 @@ type GetProjectVariableOptions struct { Filter *VariableFilter `url:"filter,omitempty" json:"filter,omitempty"` } -// GetVariable gets a variable. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_level_variables/#get-a-single-variable func (s *ProjectVariablesService) GetVariable(pid any, key string, opt *GetProjectVariableOptions, options ...RequestOptionFunc) (*ProjectVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/variables/%s", PathEscape(project), url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(ProjectVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil + return do[*ProjectVariable](s.client, + withPath("projects/%s/variables/%s", ProjectID{pid}, url.PathEscape(key)), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateProjectVariableOptions represents the available CreateVariable() @@ -151,29 +138,13 @@ type CreateProjectVariableOptions struct { VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } -// CreateVariable creates a new project variable. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_level_variables/#create-a-variable func (s *ProjectVariablesService) CreateVariable(pid any, opt *CreateProjectVariableOptions, options ...RequestOptionFunc) (*ProjectVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/variables", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(ProjectVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil + return do[*ProjectVariable](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/variables", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateProjectVariableOptions represents the available UpdateVariable() @@ -192,29 +163,13 @@ type UpdateProjectVariableOptions struct { VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } -// UpdateVariable updates a project's variable. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_level_variables/#update-a-variable func (s *ProjectVariablesService) UpdateVariable(pid any, key string, opt *UpdateProjectVariableOptions, options ...RequestOptionFunc) (*ProjectVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/variables/%s", PathEscape(project), url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(ProjectVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil + return do[*ProjectVariable](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/variables/%s", ProjectID{pid}, url.PathEscape(key)), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // RemoveProjectVariableOptions represents the available RemoveVariable() @@ -226,21 +181,12 @@ type RemoveProjectVariableOptions struct { Filter *VariableFilter `url:"filter,omitempty" json:"filter,omitempty"` } -// RemoveVariable removes a project's variable. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_level_variables/#delete-a-variable func (s *ProjectVariablesService) RemoveVariable(pid any, key string, opt *RemoveProjectVariableOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/variables/%s", PathEscape(project), url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/variables/%s", ProjectID{pid}, url.PathEscape(key)), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_vulnerabilities.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_vulnerabilities.go index aa85f43ba5..783adf6c1c 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/project_vulnerabilities.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_vulnerabilities.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -49,29 +48,29 @@ var _ ProjectVulnerabilitiesServiceInterface = (*ProjectVulnerabilitiesService)( // // GitLab API docs: https://docs.gitlab.com/api/project_vulnerabilities/ type ProjectVulnerability struct { - AuthorID int `json:"author_id"` + AuthorID int64 `json:"author_id"` Confidence string `json:"confidence"` CreatedAt *time.Time `json:"created_at"` Description string `json:"description"` DismissedAt *time.Time `json:"dismissed_at"` - DismissedByID int `json:"dismissed_by_id"` + DismissedByID int64 `json:"dismissed_by_id"` DueDate *time.Time `json:"due_date"` Finding *Finding `json:"finding"` - ID int `json:"id"` + ID int64 `json:"id"` LastEditedAt *time.Time `json:"last_edited_at"` - LastEditedByID int `json:"last_edited_by_id"` + LastEditedByID int64 `json:"last_edited_by_id"` Project *Project `json:"project"` ProjectDefaultBranch string `json:"project_default_branch"` ReportType string `json:"report_type"` ResolvedAt *time.Time `json:"resolved_at"` - ResolvedByID int `json:"resolved_by_id"` + ResolvedByID int64 `json:"resolved_by_id"` ResolvedOnDefaultBranch bool `json:"resolved_on_default_branch"` Severity string `json:"severity"` StartDate *time.Time `json:"start_date"` State string `json:"state"` Title string `json:"title"` UpdatedAt *time.Time `json:"updated_at"` - UpdatedByID int `json:"updated_by_id"` + UpdatedByID int64 `json:"updated_by_id"` } // Finding represents a GitLab project vulnerability finding. @@ -81,20 +80,20 @@ type ProjectVulnerability struct { type Finding struct { Confidence string `json:"confidence"` CreatedAt *time.Time `json:"created_at"` - ID int `json:"id"` + ID int64 `json:"id"` LocationFingerprint string `json:"location_fingerprint"` MetadataVersion string `json:"metadata_version"` Name string `json:"name"` - PrimaryIdentifierID int `json:"primary_identifier_id"` + PrimaryIdentifierID int64 `json:"primary_identifier_id"` ProjectFingerprint string `json:"project_fingerprint"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` RawMetadata string `json:"raw_metadata"` ReportType string `json:"report_type"` - ScannerID int `json:"scanner_id"` + ScannerID int64 `json:"scanner_id"` Severity string `json:"severity"` UpdatedAt *time.Time `json:"updated_at"` UUID string `json:"uuid"` - VulnerabilityID int `json:"vulnerability_id"` + VulnerabilityID int64 `json:"vulnerability_id"` } // ListProjectVulnerabilitiesOptions represents the available @@ -113,24 +112,11 @@ type ListProjectVulnerabilitiesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_vulnerabilities/#list-project-vulnerabilities func (s *ProjectVulnerabilitiesService) ListProjectVulnerabilities(pid any, opt *ListProjectVulnerabilitiesOptions, options ...RequestOptionFunc) ([]*ProjectVulnerability, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/vulnerabilities", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*ProjectVulnerability - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[[]*ProjectVulnerability](s.client, + withPath("projects/%s/vulnerabilities", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateVulnerabilityOptions represents the available CreateVulnerability() @@ -140,7 +126,7 @@ func (s *ProjectVulnerabilitiesService) ListProjectVulnerabilities(pid any, opt // GitLab API docs: // https://docs.gitlab.com/api/project_vulnerabilities/#new-vulnerability type CreateVulnerabilityOptions struct { - FindingID *int `url:"finding_id,omitempty" json:"finding_id,omitempty"` + FindingID *int64 `url:"finding_id,omitempty" json:"finding_id,omitempty"` } // CreateVulnerability creates a new vulnerability on the selected project. @@ -149,22 +135,10 @@ type CreateVulnerabilityOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_vulnerabilities/#new-vulnerability func (s *ProjectVulnerabilitiesService) CreateVulnerability(pid any, opt *CreateVulnerabilityOptions, options ...RequestOptionFunc) (*ProjectVulnerability, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/vulnerabilities", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(ProjectVulnerability) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*ProjectVulnerability](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/vulnerabilities", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/projects.go b/vendor/gitlab.com/gitlab-org/api/client-go/projects.go index 92423a2f3c..27c5af9aa8 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/projects.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/projects.go @@ -19,12 +19,9 @@ package gitlab import ( "bytes" "encoding/json" - "fmt" "io" "net/http" "time" - - "github.com/hashicorp/go-retryablehttp" ) type ( @@ -33,59 +30,296 @@ type ( // // GitLab API docs: https://docs.gitlab.com/api/projects/ ProjectsServiceInterface interface { + // ListProjects gets a list of projects accessible by the authenticated user. + // + // GitLab API docs: https://docs.gitlab.com/api/projects/#list-all-projects ListProjects(opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) + // ListUserProjects gets a list of projects for the given user. + // + // uid can be either a user ID (int) or a username (string). If a username + // is provided with a leading "@" (e.g., "@johndoe"), it will be trimmed. + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#list-a-users-projects ListUserProjects(uid any, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) + // ListUserContributedProjects gets a list of visible projects a given user + // has contributed to. + // + // uid can be either a user ID (int) or a username (string). If a username + // is provided with a leading "@" (e.g., "@johndoe"), it will be trimmed. + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#list-projects-a-user-has-contributed-to ListUserContributedProjects(uid any, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) + // ListUserStarredProjects gets a list of projects starred by the given user. + // + // uid can be either a user ID (int) or a username (string). If a username + // is provided with a leading "@" (e.g., "@johndoe"), it will be trimmed. + // GitLab API docs: + // https://docs.gitlab.com/api/project_starring/#list-projects-starred-by-a-user ListUserStarredProjects(uid any, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) + // ListProjectsUsers gets a list of users for the given project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#list-users ListProjectsUsers(pid any, opt *ListProjectUserOptions, options ...RequestOptionFunc) ([]*ProjectUser, *Response, error) + // ListProjectsGroups gets a list of groups for the given project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#list-groups ListProjectsGroups(pid any, opt *ListProjectGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) + // GetProjectLanguages gets a list of languages used by the project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#list-programming-languages-used GetProjectLanguages(pid any, options ...RequestOptionFunc) (*ProjectLanguages, *Response, error) + // GetProject gets a specific project, identified by project ID or + // NAMESPACE/PROJECT_NAME, which is owned by the authenticated user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#get-a-single-project GetProject(pid any, opt *GetProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) + // CreateProject creates a new project owned by the authenticated user. + // + // GitLab API docs: https://docs.gitlab.com/api/projects/#create-a-project CreateProject(opt *CreateProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) - CreateProjectForUser(user int, opt *CreateProjectForUserOptions, options ...RequestOptionFunc) (*Project, *Response, error) + // CreateProjectForUser creates a new project owned by the specified user. + // Available only for admins. + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#create-a-project-for-a-user + CreateProjectForUser(user int64, opt *CreateProjectForUserOptions, options ...RequestOptionFunc) (*Project, *Response, error) + // EditProject updates an existing project. + // + // GitLab API docs: https://docs.gitlab.com/api/projects/#edit-a-project EditProject(pid any, opt *EditProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) + // ForkProject forks a project into the user namespace of the authenticated + // user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_forks/#fork-a-project ForkProject(pid any, opt *ForkProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) + // StarProject stars a given project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_starring/#star-a-project StarProject(pid any, options ...RequestOptionFunc) (*Project, *Response, error) - ListProjectsInvitedGroups(pid any, opt *ListProjectInvidedGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) + // ListProjectsInvitedGroups lists invited groups of a project + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#list-a-projects-invited-groups + ListProjectsInvitedGroups(pid any, opt *ListProjectInvitedGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) + // UnstarProject unstars a given project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_starring/#unstar-a-project UnstarProject(pid any, options ...RequestOptionFunc) (*Project, *Response, error) + // ArchiveProject archives the project if the user is either admin or the + // project owner of this project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#archive-a-project ArchiveProject(pid any, options ...RequestOptionFunc) (*Project, *Response, error) + // UnarchiveProject unarchives the project if the user is either admin or + // the project owner of this project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#unarchive-a-project UnarchiveProject(pid any, options ...RequestOptionFunc) (*Project, *Response, error) + // RestoreProject restores a project that is marked for deletion. + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#restore-a-project-marked-for-deletion RestoreProject(pid any, options ...RequestOptionFunc) (*Project, *Response, error) + // DeleteProject removes a project including all associated resources + // (issues, merge requests etc.) + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#delete-a-project DeleteProject(pid any, opt *DeleteProjectOptions, options ...RequestOptionFunc) (*Response, error) + // ShareProjectWithGroup allows to share a project with a group. + // + // GitLab API docs: https://docs.gitlab.com/api/projects/#share-a-project-with-a-group ShareProjectWithGroup(pid any, opt *ShareWithGroupOptions, options ...RequestOptionFunc) (*Response, error) - DeleteSharedProjectFromGroup(pid any, groupID int, options ...RequestOptionFunc) (*Response, error) + // DeleteSharedProjectFromGroup allows to unshare a project from a group. + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#delete-a-shared-project-link-in-a-group + DeleteSharedProjectFromGroup(pid any, groupID int64, options ...RequestOptionFunc) (*Response, error) + // ListProjectHooks gets a list of project hooks. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_webhooks/#list-webhooks-for-a-project ListProjectHooks(pid any, opt *ListProjectHooksOptions, options ...RequestOptionFunc) ([]*ProjectHook, *Response, error) - GetProjectHook(pid any, hook int, options ...RequestOptionFunc) (*ProjectHook, *Response, error) + // GetProjectHook gets a specific hook for a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_webhooks/#get-a-project-webhook + GetProjectHook(pid any, hook int64, options ...RequestOptionFunc) (*ProjectHook, *Response, error) + // AddProjectHook adds a hook to a specified project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_webhooks/#add-a-webhook-to-a-project AddProjectHook(pid any, opt *AddProjectHookOptions, options ...RequestOptionFunc) (*ProjectHook, *Response, error) - EditProjectHook(pid any, hook int, opt *EditProjectHookOptions, options ...RequestOptionFunc) (*ProjectHook, *Response, error) - DeleteProjectHook(pid any, hook int, options ...RequestOptionFunc) (*Response, error) - TriggerTestProjectHook(pid any, hook int, event ProjectHookEvent, options ...RequestOptionFunc) (*Response, error) - SetProjectCustomHeader(pid any, hook int, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) - DeleteProjectCustomHeader(pid any, hook int, key string, options ...RequestOptionFunc) (*Response, error) - SetProjectWebhookURLVariable(pid any, hook int, key string, opt *SetProjectWebhookURLVariableOptions, options ...RequestOptionFunc) (*Response, error) - DeleteProjectWebhookURLVariable(pid any, hook int, key string, options ...RequestOptionFunc) (*Response, error) - CreateProjectForkRelation(pid any, fork int, options ...RequestOptionFunc) (*ProjectForkRelation, *Response, error) + // EditProjectHook edits a hook for a specified project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_webhooks/#edit-a-project-webhook + EditProjectHook(pid any, hook int64, opt *EditProjectHookOptions, options ...RequestOptionFunc) (*ProjectHook, *Response, error) + // DeleteProjectHook removes a hook from a project. This is an idempotent + // method and can be called multiple times. Either the hook is available or not. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_webhooks/#delete-project-webhook + DeleteProjectHook(pid any, hook int64, options ...RequestOptionFunc) (*Response, error) + // TriggerTestProjectHook Trigger a test hook for a specified project. + // + // In GitLab 17.0 and later, this endpoint has a special rate limit. + // In GitLab 17.0 the rate was three requests per minute for each project hook. + // In GitLab 17.1 this was changed to five requests per minute for each project + // and authenticated user. + // + // To disable this limit on self-managed GitLab and GitLab Dedicated, + // an administrator can disable the feature flag named web_hook_test_api_endpoint_rate_limit. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_webhooks/#trigger-a-test-project-webhook + TriggerTestProjectHook(pid any, hook int64, event ProjectHookEvent, options ...RequestOptionFunc) (*Response, error) + // SetProjectCustomHeader creates or updates a project custom webhook header. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_webhooks/#set-a-custom-header + SetProjectCustomHeader(pid any, hook int64, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) + // DeleteProjectCustomHeader deletes a project custom webhook header. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_webhooks/#delete-a-custom-header + DeleteProjectCustomHeader(pid any, hook int64, key string, options ...RequestOptionFunc) (*Response, error) + // SetProjectWebhookURLVariable creates or updates a project webhook URL variable. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_webhooks/#set-a-url-variable + SetProjectWebhookURLVariable(pid any, hook int64, key string, opt *SetProjectWebhookURLVariableOptions, options ...RequestOptionFunc) (*Response, error) + // DeleteProjectWebhookURLVariable deletes a project webhook URL variable. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_webhooks/#delete-a-url-variable + DeleteProjectWebhookURLVariable(pid any, hook int64, key string, options ...RequestOptionFunc) (*Response, error) + // CreateProjectForkRelation creates a forked from/to relation between + // existing projects. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_forks/#create-a-fork-relationship-between-projects + CreateProjectForkRelation(pid any, fork int64, options ...RequestOptionFunc) (*ProjectForkRelation, *Response, error) + // DeleteProjectForkRelation deletes an existing forked from relationship. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_forks/#delete-a-fork-relationship-between-projects DeleteProjectForkRelation(pid any, options ...RequestOptionFunc) (*Response, error) + // UploadAvatar uploads an avatar. + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#upload-a-project-avatar UploadAvatar(pid any, avatar io.Reader, filename string, options ...RequestOptionFunc) (*Project, *Response, error) + // DownloadAvatar downloads an avatar. + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#download-a-project-avatar DownloadAvatar(pid any, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) + // ListProjectForks gets a list of project forks. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_forks/#list-forks-of-a-project ListProjectForks(pid any, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) + // GetProjectPushRules gets the push rules of a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_push_rules/#get-project-push-rules GetProjectPushRules(pid any, options ...RequestOptionFunc) (*ProjectPushRules, *Response, error) + // AddProjectPushRule adds a push rule to a specified project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_push_rules/#add-a-project-push-rule AddProjectPushRule(pid any, opt *AddProjectPushRuleOptions, options ...RequestOptionFunc) (*ProjectPushRules, *Response, error) + // EditProjectPushRule edits a push rule for a specified project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_push_rules/#edit-project-push-rule EditProjectPushRule(pid any, opt *EditProjectPushRuleOptions, options ...RequestOptionFunc) (*ProjectPushRules, *Response, error) + // DeleteProjectPushRule removes a push rule from a project. This is an + // idempotent method and can be called multiple times. Either the push rule is + // available or not. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_push_rules/#delete-project-push-rule DeleteProjectPushRule(pid any, options ...RequestOptionFunc) (*Response, error) + // GetApprovalConfiguration get the approval configuration for a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/merge_request_approvals/#project-approval-rules GetApprovalConfiguration(pid any, options ...RequestOptionFunc) (*ProjectApprovals, *Response, error) + // ChangeApprovalConfiguration updates the approval configuration for a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/merge_request_approvals/#change-configuration ChangeApprovalConfiguration(pid any, opt *ChangeApprovalConfigurationOptions, options ...RequestOptionFunc) (*ProjectApprovals, *Response, error) + // GetProjectApprovalRules looks up the list of project level approver rules. + // + // GitLab API docs: + // https://docs.gitlab.com/api/merge_request_approvals/#get-all-approval-rules-for-project GetProjectApprovalRules(pid any, opt *GetProjectApprovalRulesListsOptions, options ...RequestOptionFunc) ([]*ProjectApprovalRule, *Response, error) - GetProjectApprovalRule(pid any, ruleID int, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) + // GetProjectApprovalRule gets the project level approvers. + // + // GitLab API docs: + // https://docs.gitlab.com/api/merge_request_approvals/#get-single-approval-rule-for-project + GetProjectApprovalRule(pid any, ruleID int64, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) + // CreateProjectApprovalRule creates a new project-level approval rule. + // + // GitLab API docs: + // https://docs.gitlab.com/api/merge_request_approvals/#create-project-approval-rule CreateProjectApprovalRule(pid any, opt *CreateProjectLevelRuleOptions, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) - UpdateProjectApprovalRule(pid any, approvalRule int, opt *UpdateProjectLevelRuleOptions, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) - DeleteProjectApprovalRule(pid any, approvalRule int, options ...RequestOptionFunc) (*Response, error) + // UpdateProjectApprovalRule updates an existing approval rule with new options. + // + // GitLab API docs: + // https://docs.gitlab.com/api/merge_request_approvals/#update-project-approval-rule + UpdateProjectApprovalRule(pid any, approvalRule int64, opt *UpdateProjectLevelRuleOptions, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) + // DeleteProjectApprovalRule deletes a project-level approval rule. + // + // GitLab API docs: + // https://docs.gitlab.com/api/merge_request_approvals/#delete-project-approval-rule + DeleteProjectApprovalRule(pid any, approvalRule int64, options ...RequestOptionFunc) (*Response, error) + // GetProjectPullMirrorDetails returns the pull mirror details. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_pull_mirroring/#get-a-projects-pull-mirror-details GetProjectPullMirrorDetails(pid any, options ...RequestOptionFunc) (*ProjectPullMirrorDetails, *Response, error) + // ConfigureProjectPullMirror configures pull mirroring settings. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_pull_mirroring/#configure-pull-mirroring-for-a-project ConfigureProjectPullMirror(pid any, opt *ConfigureProjectPullMirrorOptions, options ...RequestOptionFunc) (*ProjectPullMirrorDetails, *Response, error) + // StartMirroringProject start the pull mirroring process for a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_pull_mirroring/#start-the-pull-mirroring-process-for-a-project StartMirroringProject(pid any, options ...RequestOptionFunc) (*Response, error) + // TransferProject transfer a project into the specified namespace + // + // GitLab API docs: https://docs.gitlab.com/api/projects/#transfer-a-project-to-a-new-namespace TransferProject(pid any, opt *TransferProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) + // StartHousekeepingProject start the Housekeeping task for a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#start-the-housekeeping-task-for-a-project StartHousekeepingProject(pid any, options ...RequestOptionFunc) (*Response, error) - GetRepositoryStorage(pid any, options ...RequestOptionFunc) (*ProjectReposityStorage, *Response, error) + // GetRepositoryStorage Get the path to repository storage. + // + // GitLab API docs: + // https://docs.gitlab.com/api/projects/#get-the-path-to-repository-storage + GetRepositoryStorage(pid any, options ...RequestOptionFunc) (*ProjectRepositoryStorage, *Response, error) + // ListProjectStarrers gets users who starred a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_starring/#list-users-who-starred-a-project + ListProjectStarrers(pid any, opts *ListProjectStarrersOptions, options ...RequestOptionFunc) ([]*ProjectStarrer, *Response, error) } // ProjectsService handles communication with the repositories related methods @@ -103,137 +337,134 @@ var _ ProjectsServiceInterface = (*ProjectsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/projects/ type Project struct { - ID int `json:"id"` - Description string `json:"description"` - DefaultBranch string `json:"default_branch"` - Visibility VisibilityValue `json:"visibility"` - SSHURLToRepo string `json:"ssh_url_to_repo"` - HTTPURLToRepo string `json:"http_url_to_repo"` - WebURL string `json:"web_url"` - ReadmeURL string `json:"readme_url"` - Topics []string `json:"topics"` - Owner *User `json:"owner"` - Name string `json:"name"` - NameWithNamespace string `json:"name_with_namespace"` - Path string `json:"path"` - PathWithNamespace string `json:"path_with_namespace"` - OpenIssuesCount int `json:"open_issues_count"` - ResolveOutdatedDiffDiscussions bool `json:"resolve_outdated_diff_discussions"` - ContainerExpirationPolicy *ContainerExpirationPolicy `json:"container_expiration_policy,omitempty"` - ContainerRegistryAccessLevel AccessControlValue `json:"container_registry_access_level"` - ContainerRegistryImagePrefix string `json:"container_registry_image_prefix,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - LastActivityAt *time.Time `json:"last_activity_at,omitempty"` - CreatorID int `json:"creator_id"` - Namespace *ProjectNamespace `json:"namespace"` - Permissions *Permissions `json:"permissions"` - MarkedForDeletionOn *ISOTime `json:"marked_for_deletion_on"` - EmptyRepo bool `json:"empty_repo"` - Archived bool `json:"archived"` - AvatarURL string `json:"avatar_url"` - LicenseURL string `json:"license_url"` - License *ProjectLicense `json:"license"` - SharedRunnersEnabled bool `json:"shared_runners_enabled"` - GroupRunnersEnabled bool `json:"group_runners_enabled"` - RunnerTokenExpirationInterval int `json:"runner_token_expiration_interval"` - ForksCount int `json:"forks_count"` - StarCount int `json:"star_count"` - RunnersToken string `json:"runners_token"` - AllowMergeOnSkippedPipeline bool `json:"allow_merge_on_skipped_pipeline"` - AllowPipelineTriggerApproveDeployment bool `json:"allow_pipeline_trigger_approve_deployment"` - OnlyAllowMergeIfPipelineSucceeds bool `json:"only_allow_merge_if_pipeline_succeeds"` - OnlyAllowMergeIfAllDiscussionsAreResolved bool `json:"only_allow_merge_if_all_discussions_are_resolved"` - RemoveSourceBranchAfterMerge bool `json:"remove_source_branch_after_merge"` - PreventMergeWithoutJiraIssue bool `json:"prevent_merge_without_jira_issue"` - PrintingMergeRequestLinkEnabled bool `json:"printing_merge_request_link_enabled"` - LFSEnabled bool `json:"lfs_enabled"` - RepositoryStorage string `json:"repository_storage"` - RequestAccessEnabled bool `json:"request_access_enabled"` - MergeMethod MergeMethodValue `json:"merge_method"` - CanCreateMergeRequestIn bool `json:"can_create_merge_request_in"` - ForkedFromProject *ForkParent `json:"forked_from_project"` - Mirror bool `json:"mirror"` - MirrorUserID int `json:"mirror_user_id"` - MirrorTriggerBuilds bool `json:"mirror_trigger_builds"` - OnlyMirrorProtectedBranches bool `json:"only_mirror_protected_branches"` - MirrorOverwritesDivergedBranches bool `json:"mirror_overwrites_diverged_branches"` - PackagesEnabled bool `json:"packages_enabled"` - ServiceDeskEnabled bool `json:"service_desk_enabled"` - ServiceDeskAddress string `json:"service_desk_address"` - IssuesAccessLevel AccessControlValue `json:"issues_access_level"` - ReleasesAccessLevel AccessControlValue `json:"releases_access_level,omitempty"` - RepositoryAccessLevel AccessControlValue `json:"repository_access_level"` - MergeRequestsAccessLevel AccessControlValue `json:"merge_requests_access_level"` - ForkingAccessLevel AccessControlValue `json:"forking_access_level"` - WikiAccessLevel AccessControlValue `json:"wiki_access_level"` - BuildsAccessLevel AccessControlValue `json:"builds_access_level"` - SnippetsAccessLevel AccessControlValue `json:"snippets_access_level"` - PagesAccessLevel AccessControlValue `json:"pages_access_level"` - OperationsAccessLevel AccessControlValue `json:"operations_access_level"` - AnalyticsAccessLevel AccessControlValue `json:"analytics_access_level"` - EnvironmentsAccessLevel AccessControlValue `json:"environments_access_level"` - FeatureFlagsAccessLevel AccessControlValue `json:"feature_flags_access_level"` - InfrastructureAccessLevel AccessControlValue `json:"infrastructure_access_level"` - MonitorAccessLevel AccessControlValue `json:"monitor_access_level"` - AutocloseReferencedIssues bool `json:"autoclose_referenced_issues"` - SuggestionCommitMessage string `json:"suggestion_commit_message"` - SquashOption SquashOptionValue `json:"squash_option"` - EnforceAuthChecksOnUploads bool `json:"enforce_auth_checks_on_uploads,omitempty"` - SharedWithGroups []struct { - GroupID int `json:"group_id"` - GroupName string `json:"group_name"` - GroupFullPath string `json:"group_full_path"` - GroupAccessLevel int `json:"group_access_level"` - } `json:"shared_with_groups"` - Statistics *Statistics `json:"statistics"` - Links *Links `json:"_links,omitempty"` - ImportURL string `json:"import_url"` - ImportType string `json:"import_type"` - ImportStatus string `json:"import_status"` - ImportError string `json:"import_error"` - CIDefaultGitDepth int `json:"ci_default_git_depth"` - CIDeletePipelinesInSeconds int `json:"ci_delete_pipelines_in_seconds,omitempty"` - CIForwardDeploymentEnabled bool `json:"ci_forward_deployment_enabled"` - CIForwardDeploymentRollbackAllowed bool `json:"ci_forward_deployment_rollback_allowed"` - CIPushRepositoryForJobTokenAllowed bool `json:"ci_push_repository_for_job_token_allowed"` - CIIdTokenSubClaimComponents []string `json:"ci_id_token_sub_claim_components"` - CISeperateCache bool `json:"ci_separated_caches"` - CIJobTokenScopeEnabled bool `json:"ci_job_token_scope_enabled"` - CIOptInJWT bool `json:"ci_opt_in_jwt"` - CIAllowForkPipelinesToRunInParentProject bool `json:"ci_allow_fork_pipelines_to_run_in_parent_project"` - CIRestrictPipelineCancellationRole AccessControlValue `json:"ci_restrict_pipeline_cancellation_role"` - PublicJobs bool `json:"public_jobs"` - BuildTimeout int `json:"build_timeout"` - AutoCancelPendingPipelines string `json:"auto_cancel_pending_pipelines"` - CIConfigPath string `json:"ci_config_path"` - CustomAttributes []*CustomAttribute `json:"custom_attributes"` - ComplianceFrameworks []string `json:"compliance_frameworks"` - BuildCoverageRegex string `json:"build_coverage_regex"` - IssuesTemplate string `json:"issues_template"` - MergeRequestsTemplate string `json:"merge_requests_template"` - IssueBranchTemplate string `json:"issue_branch_template"` - KeepLatestArtifact bool `json:"keep_latest_artifact"` - MergePipelinesEnabled bool `json:"merge_pipelines_enabled"` - MergeTrainsEnabled bool `json:"merge_trains_enabled"` - MergeTrainsSkipTrainAllowed bool `json:"merge_trains_skip_train_allowed"` - CIPipelineVariablesMinimumOverrideRole CIPipelineVariablesMinimumOverrideRoleValue `json:"ci_pipeline_variables_minimum_override_role"` - MergeCommitTemplate string `json:"merge_commit_template"` - SquashCommitTemplate string `json:"squash_commit_template"` - AutoDevopsDeployStrategy string `json:"auto_devops_deploy_strategy"` - AutoDevopsEnabled bool `json:"auto_devops_enabled"` - BuildGitStrategy string `json:"build_git_strategy"` - EmailsEnabled bool `json:"emails_enabled"` - ExternalAuthorizationClassificationLabel string `json:"external_authorization_classification_label"` - RequirementsEnabled bool `json:"requirements_enabled"` - RequirementsAccessLevel AccessControlValue `json:"requirements_access_level"` - SecurityAndComplianceEnabled bool `json:"security_and_compliance_enabled"` - SecurityAndComplianceAccessLevel AccessControlValue `json:"security_and_compliance_access_level"` - MergeRequestDefaultTargetSelf bool `json:"mr_default_target_self"` - ModelExperimentsAccessLevel AccessControlValue `json:"model_experiments_access_level"` - ModelRegistryAccessLevel AccessControlValue `json:"model_registry_access_level"` - PreReceiveSecretDetectionEnabled bool `json:"pre_receive_secret_detection_enabled"` - AutoDuoCodeReviewEnabled bool `json:"auto_duo_code_review_enabled"` + ID int64 `json:"id"` + Description string `json:"description"` + DefaultBranch string `json:"default_branch"` + Visibility VisibilityValue `json:"visibility"` + SSHURLToRepo string `json:"ssh_url_to_repo"` + HTTPURLToRepo string `json:"http_url_to_repo"` + WebURL string `json:"web_url"` + ReadmeURL string `json:"readme_url"` + Topics []string `json:"topics"` + Owner *User `json:"owner"` + Name string `json:"name"` + NameWithNamespace string `json:"name_with_namespace"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + OpenIssuesCount int64 `json:"open_issues_count"` + ResolveOutdatedDiffDiscussions bool `json:"resolve_outdated_diff_discussions"` + ContainerExpirationPolicy *ContainerExpirationPolicy `json:"container_expiration_policy,omitempty"` + ContainerRegistryAccessLevel AccessControlValue `json:"container_registry_access_level"` + ContainerRegistryImagePrefix string `json:"container_registry_image_prefix,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` + LastActivityAt *time.Time `json:"last_activity_at,omitempty"` + CreatorID int64 `json:"creator_id"` + Namespace *ProjectNamespace `json:"namespace"` + Permissions *Permissions `json:"permissions"` + MarkedForDeletionOn *ISOTime `json:"marked_for_deletion_on"` + EmptyRepo bool `json:"empty_repo"` + Archived bool `json:"archived"` + AvatarURL string `json:"avatar_url"` + LicenseURL string `json:"license_url"` + License *ProjectLicense `json:"license"` + SharedRunnersEnabled bool `json:"shared_runners_enabled"` + GroupRunnersEnabled bool `json:"group_runners_enabled"` + ResourceGroupDefaultProcessMode ResourceGroupProcessMode `json:"resource_group_default_process_mode"` + RunnerTokenExpirationInterval int64 `json:"runner_token_expiration_interval"` + ForksCount int64 `json:"forks_count"` + StarCount int64 `json:"star_count"` + RunnersToken string `json:"runners_token"` + AllowMergeOnSkippedPipeline bool `json:"allow_merge_on_skipped_pipeline"` + AllowPipelineTriggerApproveDeployment bool `json:"allow_pipeline_trigger_approve_deployment"` + OnlyAllowMergeIfPipelineSucceeds bool `json:"only_allow_merge_if_pipeline_succeeds"` + OnlyAllowMergeIfAllDiscussionsAreResolved bool `json:"only_allow_merge_if_all_discussions_are_resolved"` + RemoveSourceBranchAfterMerge bool `json:"remove_source_branch_after_merge"` + PreventMergeWithoutJiraIssue bool `json:"prevent_merge_without_jira_issue"` + PrintingMergeRequestLinkEnabled bool `json:"printing_merge_request_link_enabled"` + LFSEnabled bool `json:"lfs_enabled"` + MaxArtifactsSize int64 `json:"max_artifacts_size"` + RepositoryStorage string `json:"repository_storage"` + RequestAccessEnabled bool `json:"request_access_enabled"` + MergeMethod MergeMethodValue `json:"merge_method"` + CanCreateMergeRequestIn bool `json:"can_create_merge_request_in"` + ForkedFromProject *ForkParent `json:"forked_from_project"` + Mirror bool `json:"mirror"` + MirrorUserID int64 `json:"mirror_user_id"` + MirrorTriggerBuilds bool `json:"mirror_trigger_builds"` + OnlyMirrorProtectedBranches bool `json:"only_mirror_protected_branches"` + MirrorOverwritesDivergedBranches bool `json:"mirror_overwrites_diverged_branches"` + PackagesEnabled bool `json:"packages_enabled"` + ServiceDeskEnabled bool `json:"service_desk_enabled"` + ServiceDeskAddress string `json:"service_desk_address"` + IssuesAccessLevel AccessControlValue `json:"issues_access_level"` + ReleasesAccessLevel AccessControlValue `json:"releases_access_level,omitempty"` + RepositoryAccessLevel AccessControlValue `json:"repository_access_level"` + MergeRequestsAccessLevel AccessControlValue `json:"merge_requests_access_level"` + ForkingAccessLevel AccessControlValue `json:"forking_access_level"` + WikiAccessLevel AccessControlValue `json:"wiki_access_level"` + BuildsAccessLevel AccessControlValue `json:"builds_access_level"` + SnippetsAccessLevel AccessControlValue `json:"snippets_access_level"` + PagesAccessLevel AccessControlValue `json:"pages_access_level"` + OperationsAccessLevel AccessControlValue `json:"operations_access_level"` + AnalyticsAccessLevel AccessControlValue `json:"analytics_access_level"` + EnvironmentsAccessLevel AccessControlValue `json:"environments_access_level"` + FeatureFlagsAccessLevel AccessControlValue `json:"feature_flags_access_level"` + InfrastructureAccessLevel AccessControlValue `json:"infrastructure_access_level"` + MonitorAccessLevel AccessControlValue `json:"monitor_access_level"` + AutocloseReferencedIssues bool `json:"autoclose_referenced_issues"` + SuggestionCommitMessage string `json:"suggestion_commit_message"` + SquashOption SquashOptionValue `json:"squash_option"` + EnforceAuthChecksOnUploads bool `json:"enforce_auth_checks_on_uploads,omitempty"` + SharedWithGroups []ProjectSharedWithGroup `json:"shared_with_groups"` + Statistics *Statistics `json:"statistics"` + Links *Links `json:"_links,omitempty"` + ImportURL string `json:"import_url"` + ImportType string `json:"import_type"` + ImportStatus string `json:"import_status"` + ImportError string `json:"import_error"` + CIDefaultGitDepth int64 `json:"ci_default_git_depth"` + CIDeletePipelinesInSeconds int64 `json:"ci_delete_pipelines_in_seconds,omitempty"` + CIForwardDeploymentEnabled bool `json:"ci_forward_deployment_enabled"` + CIForwardDeploymentRollbackAllowed bool `json:"ci_forward_deployment_rollback_allowed"` + CIPushRepositoryForJobTokenAllowed bool `json:"ci_push_repository_for_job_token_allowed"` + CIIdTokenSubClaimComponents []string `json:"ci_id_token_sub_claim_components"` + CISeparatedCaches bool `json:"ci_separated_caches"` + CIJobTokenScopeEnabled bool `json:"ci_job_token_scope_enabled"` + CIOptInJWT bool `json:"ci_opt_in_jwt"` + CIAllowForkPipelinesToRunInParentProject bool `json:"ci_allow_fork_pipelines_to_run_in_parent_project"` + CIRestrictPipelineCancellationRole AccessControlValue `json:"ci_restrict_pipeline_cancellation_role"` + PublicJobs bool `json:"public_jobs"` + BuildTimeout int64 `json:"build_timeout"` + AutoCancelPendingPipelines string `json:"auto_cancel_pending_pipelines"` + CIConfigPath string `json:"ci_config_path"` + CustomAttributes []*CustomAttribute `json:"custom_attributes"` + ComplianceFrameworks []string `json:"compliance_frameworks"` + BuildCoverageRegex string `json:"build_coverage_regex"` + IssuesTemplate string `json:"issues_template"` + MergeRequestsTemplate string `json:"merge_requests_template"` + IssueBranchTemplate string `json:"issue_branch_template"` + KeepLatestArtifact bool `json:"keep_latest_artifact"` + MergePipelinesEnabled bool `json:"merge_pipelines_enabled"` + MergeTrainsEnabled bool `json:"merge_trains_enabled"` + MergeTrainsSkipTrainAllowed bool `json:"merge_trains_skip_train_allowed"` + CIPipelineVariablesMinimumOverrideRole CIPipelineVariablesMinimumOverrideRoleValue `json:"ci_pipeline_variables_minimum_override_role"` + MergeCommitTemplate string `json:"merge_commit_template"` + SquashCommitTemplate string `json:"squash_commit_template"` + AutoDevopsDeployStrategy string `json:"auto_devops_deploy_strategy"` + AutoDevopsEnabled bool `json:"auto_devops_enabled"` + BuildGitStrategy string `json:"build_git_strategy"` + EmailsEnabled bool `json:"emails_enabled"` + ExternalAuthorizationClassificationLabel string `json:"external_authorization_classification_label"` + RequirementsEnabled bool `json:"requirements_enabled"` + RequirementsAccessLevel AccessControlValue `json:"requirements_access_level"` + SecurityAndComplianceEnabled bool `json:"security_and_compliance_enabled"` + SecurityAndComplianceAccessLevel AccessControlValue `json:"security_and_compliance_access_level"` + MergeRequestDefaultTargetSelf bool `json:"mr_default_target_self"` + ModelExperimentsAccessLevel AccessControlValue `json:"model_experiments_access_level"` + ModelRegistryAccessLevel AccessControlValue `json:"model_registry_access_level"` + PreReceiveSecretDetectionEnabled bool `json:"pre_receive_secret_detection_enabled"` + AutoDuoCodeReviewEnabled bool `json:"auto_duo_code_review_enabled"` // Deprecated: use Topics instead TagList []string `json:"tag_list"` @@ -242,7 +473,7 @@ type Project struct { // Deprecated: use MergeRequestsAccessLevel instead MergeRequestsEnabled bool `json:"merge_requests_enabled"` // Deprecated: use Merge Request Approvals API instead - ApprovalsBeforeMerge int `json:"approvals_before_merge"` + ApprovalsBeforeMerge int64 `json:"approvals_before_merge"` // Deprecated: use BuildsAccessLevel instead JobsEnabled bool `json:"jobs_enabled"` // Deprecated: use WikiAccessLevel instead @@ -261,9 +492,20 @@ type Project struct { PublicBuilds bool `json:"public_builds"` } +// ProjectSharedWithGroup represents a GitLab project shared group. +// +// GitLab API docs: https://docs.gitlab.com/api/projects/ +type ProjectSharedWithGroup struct { + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + GroupFullPath string `json:"group_full_path"` + GroupAccessLevel int64 `json:"group_access_level"` + ExpiresAt *ISOTime `json:"expires_at"` +} + // BasicProject included in other service responses (such as todos). type BasicProject struct { - ID int `json:"id"` + ID int64 `json:"id"` Description string `json:"description"` Name string `json:"name"` NameWithNamespace string `json:"name_with_namespace"` @@ -275,7 +517,7 @@ type BasicProject struct { // ContainerExpirationPolicy represents the container expiration policy. type ContainerExpirationPolicy struct { Cadence string `json:"cadence"` - KeepN int `json:"keep_n"` + KeepN int64 `json:"keep_n"` OlderThan string `json:"older_than"` NameRegexDelete string `json:"name_regex_delete"` NameRegexKeep string `json:"name_regex_keep"` @@ -288,7 +530,7 @@ type ContainerExpirationPolicy struct { // ForkParent represents the parent project when this is a fork. type ForkParent struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` NameWithNamespace string `json:"name_with_namespace"` Path string `json:"path"` @@ -340,12 +582,12 @@ type ProjectLicense struct { // ProjectNamespace represents a project namespace. type ProjectNamespace struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Path string `json:"path"` Kind string `json:"kind"` FullPath string `json:"full_path"` - ParentID int `json:"parent_id"` + ParentID int64 `json:"parent_id"` AvatarURL string `json:"avatar_url"` WebURL string `json:"web_url"` } @@ -392,12 +634,12 @@ func (s Project) String() string { // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#get-all-approval-rules-for-project type ProjectApprovalRule struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` RuleType string `json:"rule_type"` ReportType string `json:"report_type"` EligibleApprovers []*BasicUser `json:"eligible_approvers"` - ApprovalsRequired int `json:"approvals_required"` + ApprovalsRequired int64 `json:"approvals_required"` Users []*BasicUser `json:"users"` Groups []*Group `json:"groups"` ContainsHiddenGroups bool `json:"contains_hidden_groups"` @@ -416,8 +658,8 @@ type ListProjectsOptions struct { ListOptions Active *bool `url:"active,omitempty" json:"active,omitempty"` Archived *bool `url:"archived,omitempty" json:"archived,omitempty"` - IDAfter *int `url:"id_after,omitempty" json:"id_after,omitempty"` - IDBefore *int `url:"id_before,omitempty" json:"id_before,omitempty"` + IDAfter *int64 `url:"id_after,omitempty" json:"id_after,omitempty"` + IDBefore *int64 `url:"id_before,omitempty" json:"id_before,omitempty"` Imported *bool `url:"imported,omitempty" json:"imported,omitempty"` IncludeHidden *bool `url:"include_hidden,omitempty" json:"include_hidden,omitempty"` IncludePendingDelete *bool `url:"include_pending_delete,omitempty" json:"include_pending_delete,omitempty"` @@ -444,102 +686,49 @@ type ListProjectsOptions struct { WithProgrammingLanguage *string `url:"with_programming_language,omitempty" json:"with_programming_language,omitempty"` } -// ListProjects gets a list of projects accessible by the authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/api/projects/#list-all-projects func (s *ProjectsService) ListProjects(opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "projects", opt, options) - if err != nil { - return nil, nil, err - } - - var p []*Project - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[[]*Project](s.client, + withPath("projects"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// ListUserProjects gets a list of projects for the given user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#list-a-users-projects func (s *ProjectsService) ListUserProjects(uid any, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - user, err := parseID(uid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("users/%s/projects", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*Project - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[[]*Project](s.client, + withPath("users/%s/projects", UserID{uid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// ListUserContributedProjects gets a list of visible projects a given user has contributed to. +// ListUserContributedProjects gets a list of visible projects a given user has +// contributed to. +// +// uid can be either a user ID (int) or a username (string). If a username +// is provided with a leading "@" (e.g., "@johndoe"), it will be trimmed. // // GitLab API docs: // https://docs.gitlab.com/api/projects/#list-projects-a-user-has-contributed-to func (s *ProjectsService) ListUserContributedProjects(uid any, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - user, err := parseID(uid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("users/%s/contributed_projects", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*Project - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[[]*Project](s.client, + withPath("users/%s/contributed_projects", UserID{uid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// ListUserStarredProjects gets a list of projects starred by the given user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_starring/#list-projects-starred-by-a-user func (s *ProjectsService) ListUserStarredProjects(uid any, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - user, err := parseID(uid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("users/%s/starred_projects", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*Project - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[[]*Project](s.client, + withPath("users/%s/starred_projects", UserID{uid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ProjectUser represents a GitLab project user. type ProjectUser struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Username string `json:"username"` State string `json:"state"` @@ -555,35 +744,18 @@ type ListProjectUserOptions struct { Search *string `url:"search,omitempty" json:"search,omitempty"` } -// ListProjectsUsers gets a list of users for the given project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#list-users func (s *ProjectsService) ListProjectsUsers(pid any, opt *ListProjectUserOptions, options ...RequestOptionFunc) ([]*ProjectUser, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/users", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*ProjectUser - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[[]*ProjectUser](s.client, + withPath("projects/%s/users", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ProjectGroup represents a GitLab project group. // GitLab API docs: https://docs.gitlab.com/api/projects/#list-groups type ProjectGroup struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` AvatarURL string `json:"avatar_url"` WebURL string `json:"web_url"` @@ -598,65 +770,30 @@ type ListProjectGroupOptions struct { ListOptions Search *string `url:"search,omitempty" json:"search,omitempty"` SharedMinAccessLevel *AccessLevelValue `url:"shared_min_access_level,omitempty" json:"shared_min_access_level,omitempty"` - SharedVisiableOnly *bool `url:"shared_visible_only,omitempty" json:"shared_visible_only,omitempty"` - SkipGroups *[]int `url:"skip_groups,omitempty" json:"skip_groups,omitempty"` + SharedVisibleOnly *bool `url:"shared_visible_only,omitempty" json:"shared_visible_only,omitempty"` + SkipGroups *[]int64 `url:"skip_groups,omitempty" json:"skip_groups,omitempty"` WithShared *bool `url:"with_shared,omitempty" json:"with_shared,omitempty"` } -// ListProjectsGroups gets a list of groups for the given project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#list-groups func (s *ProjectsService) ListProjectsGroups(pid any, opt *ListProjectGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/groups", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*ProjectGroup - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[[]*ProjectGroup](s.client, + withPath("projects/%s/groups", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ProjectLanguages is a map of strings because the response is arbitrary // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/projects/#list-programming-languages-used type ProjectLanguages map[string]float32 -// GetProjectLanguages gets a list of languages used by the project -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#list-programming-languages-used func (s *ProjectsService) GetProjectLanguages(pid any, options ...RequestOptionFunc) (*ProjectLanguages, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/languages", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(ProjectLanguages) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*ProjectLanguages](s.client, + withPath("projects/%s/languages", ProjectID{pid}), + withRequestOpts(options...), + ) } // GetProjectOptions represents the available GetProject() options. @@ -668,30 +805,12 @@ type GetProjectOptions struct { WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` } -// GetProject gets a specific project, identified by project ID or -// NAMESPACE/PROJECT_NAME, which is owned by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#get-a-single-project func (s *ProjectsService) GetProject(pid any, opt *GetProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*Project](s.client, + withPath("projects/%s", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateProjectOptions represents the available CreateProject() options. @@ -708,7 +827,7 @@ type CreateProjectOptions struct { Avatar *ProjectAvatar `url:"-" json:"-"` BuildCoverageRegex *string `url:"build_coverage_regex,omitempty" json:"build_coverage_regex,omitempty"` BuildGitStrategy *string `url:"build_git_strategy,omitempty" json:"build_git_strategy,omitempty"` - BuildTimeout *int `url:"build_timeout,omitempty" json:"build_timeout,omitempty"` + BuildTimeout *int64 `url:"build_timeout,omitempty" json:"build_timeout,omitempty"` BuildsAccessLevel *AccessControlValue `url:"builds_access_level,omitempty" json:"builds_access_level,omitempty"` CIConfigPath *string `url:"ci_config_path,omitempty" json:"ci_config_path,omitempty"` ContainerExpirationPolicyAttributes *ContainerExpirationPolicyAttributes `url:"container_expiration_policy_attributes,omitempty" json:"container_expiration_policy_attributes,omitempty"` @@ -719,7 +838,7 @@ type CreateProjectOptions struct { EnforceAuthChecksOnUploads *bool `url:"enforce_auth_checks_on_uploads,omitempty" json:"enforce_auth_checks_on_uploads,omitempty"` ExternalAuthorizationClassificationLabel *string `url:"external_authorization_classification_label,omitempty" json:"external_authorization_classification_label,omitempty"` ForkingAccessLevel *AccessControlValue `url:"forking_access_level,omitempty" json:"forking_access_level,omitempty"` - GroupWithProjectTemplatesID *int `url:"group_with_project_templates_id,omitempty" json:"group_with_project_templates_id,omitempty"` + GroupWithProjectTemplatesID *int64 `url:"group_with_project_templates_id,omitempty" json:"group_with_project_templates_id,omitempty"` ImportURL *string `url:"import_url,omitempty" json:"import_url,omitempty"` InitializeWithReadme *bool `url:"initialize_with_readme,omitempty" json:"initialize_with_readme,omitempty"` IssuesAccessLevel *AccessControlValue `url:"issues_access_level,omitempty" json:"issues_access_level,omitempty"` @@ -736,7 +855,7 @@ type CreateProjectOptions struct { ModelExperimentsAccessLevel *AccessControlValue `url:"model_experiments_access_level,omitempty" json:"model_experiments_access_level,omitempty"` ModelRegistryAccessLevel *AccessControlValue `url:"model_registry_access_level,omitempty" json:"model_registry_access_level,omitempty"` Name *string `url:"name,omitempty" json:"name,omitempty"` - NamespaceID *int `url:"namespace_id,omitempty" json:"namespace_id,omitempty"` + NamespaceID *int64 `url:"namespace_id,omitempty" json:"namespace_id,omitempty"` OnlyAllowMergeIfAllDiscussionsAreResolved *bool `url:"only_allow_merge_if_all_discussions_are_resolved,omitempty" json:"only_allow_merge_if_all_discussions_are_resolved,omitempty"` OnlyAllowMergeIfPipelineSucceeds *bool `url:"only_allow_merge_if_pipeline_succeeds,omitempty" json:"only_allow_merge_if_pipeline_succeeds,omitempty"` OperationsAccessLevel *AccessControlValue `url:"operations_access_level,omitempty" json:"operations_access_level,omitempty"` @@ -758,20 +877,21 @@ type CreateProjectOptions struct { SecurityAndComplianceAccessLevel *AccessControlValue `url:"security_and_compliance_access_level,omitempty" json:"security_and_compliance_access_level,omitempty"` SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` GroupRunnersEnabled *bool `url:"group_runners_enabled,omitempty" json:"group_runners_enabled,omitempty"` + ResourceGroupDefaultProcessMode *ResourceGroupProcessMode `url:"resource_group_default_process_mode,omitempty" json:"resource_group_default_process_mode,omitempty"` ShowDefaultAwardEmojis *bool `url:"show_default_award_emojis,omitempty" json:"show_default_award_emojis,omitempty"` SnippetsAccessLevel *AccessControlValue `url:"snippets_access_level,omitempty" json:"snippets_access_level,omitempty"` SquashCommitTemplate *string `url:"squash_commit_template,omitempty" json:"squash_commit_template,omitempty"` SquashOption *SquashOptionValue `url:"squash_option,omitempty" json:"squash_option,omitempty"` SuggestionCommitMessage *string `url:"suggestion_commit_message,omitempty" json:"suggestion_commit_message,omitempty"` TemplateName *string `url:"template_name,omitempty" json:"template_name,omitempty"` - TemplateProjectID *int `url:"template_project_id,omitempty" json:"template_project_id,omitempty"` + TemplateProjectID *int64 `url:"template_project_id,omitempty" json:"template_project_id,omitempty"` Topics *[]string `url:"topics,omitempty" json:"topics,omitempty"` UseCustomTemplate *bool `url:"use_custom_template,omitempty" json:"use_custom_template,omitempty"` Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` // Deprecated: use Merge Request Approvals API instead - ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` + ApprovalsBeforeMerge *int64 `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` // Deprecated: use PublicJobs instead PublicBuilds *bool `url:"public_builds,omitempty" json:"public_builds,omitempty"` // Deprecated: No longer supported in recent versions. @@ -806,7 +926,7 @@ type CreateProjectOptions struct { // GitLab API docs: https://docs.gitlab.com/api/projects/#create-a-project type ContainerExpirationPolicyAttributes struct { Cadence *string `url:"cadence,omitempty" json:"cadence,omitempty"` - KeepN *int `url:"keep_n,omitempty" json:"keep_n,omitempty"` + KeepN *int64 `url:"keep_n,omitempty" json:"keep_n,omitempty"` OlderThan *string `url:"older_than,omitempty" json:"older_than,omitempty"` NameRegexDelete *string `url:"name_regex_delete,omitempty" json:"name_regex_delete,omitempty"` NameRegexKeep *string `url:"name_regex_keep,omitempty" json:"name_regex_keep,omitempty"` @@ -833,9 +953,6 @@ func (a *ProjectAvatar) MarshalJSON() ([]byte, error) { return json.Marshal((*alias)(a)) } -// CreateProject creates a new project owned by the authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/api/projects/#create-a-project func (s *ProjectsService) CreateProject(opt *CreateProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { if opt.ContainerExpirationPolicyAttributes != nil { // This is needed to satisfy the API. Should be deleted @@ -843,33 +960,16 @@ func (s *ProjectsService) CreateProject(opt *CreateProjectOptions, options ...Re opt.ContainerExpirationPolicyAttributes.NameRegex = opt.ContainerExpirationPolicyAttributes.NameRegexDelete } - var err error - var req *retryablehttp.Request - - if opt.Avatar == nil { - req, err = s.client.NewRequest(http.MethodPost, "projects", opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPost, - "projects", - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) - } - if err != nil { - return nil, nil, err + reqOpts := []doOption{ + withMethod(http.MethodPost), + withPath("projects"), + withAPIOpts(opt), + withRequestOpts(options...), } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err + if opt.Avatar != nil { + reqOpts = append(reqOpts, withUpload(opt.Avatar.Image, opt.Avatar.Filename, UploadAvatar)) } - - return p, resp, nil + return do[*Project](s.client, reqOpts...) } // CreateProjectForUserOptions represents the available CreateProjectForUser() @@ -879,46 +979,23 @@ func (s *ProjectsService) CreateProject(opt *CreateProjectOptions, options ...Re // https://docs.gitlab.com/api/projects/#create-a-project-for-a-user type CreateProjectForUserOptions CreateProjectOptions -// CreateProjectForUser creates a new project owned by the specified user. -// Available only for admins. -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#create-a-project-for-a-user -func (s *ProjectsService) CreateProjectForUser(user int, opt *CreateProjectForUserOptions, options ...RequestOptionFunc) (*Project, *Response, error) { +func (s *ProjectsService) CreateProjectForUser(user int64, opt *CreateProjectForUserOptions, options ...RequestOptionFunc) (*Project, *Response, error) { if opt.ContainerExpirationPolicyAttributes != nil { // This is needed to satisfy the API. Should be deleted // when NameRegex is removed (it's now deprecated). opt.ContainerExpirationPolicyAttributes.NameRegex = opt.ContainerExpirationPolicyAttributes.NameRegexDelete } - var err error - var req *retryablehttp.Request - u := fmt.Sprintf("projects/user/%d", user) - - if opt.Avatar == nil { - req, err = s.client.NewRequest(http.MethodPost, u, opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPost, - u, - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) - } - if err != nil { - return nil, nil, err + reqOpts := []doOption{ + withMethod(http.MethodPost), + withPath("projects/user/%d", user), + withAPIOpts(opt), + withRequestOpts(options...), } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err + if opt.Avatar != nil { + reqOpts = append(reqOpts, withUpload(opt.Avatar.Image, opt.Avatar.Filename, UploadAvatar)) } - - return p, resp, nil + return do[*Project](s.client, reqOpts...) } // EditProjectOptions represents the available EditProject() options. @@ -937,16 +1014,16 @@ type EditProjectOptions struct { Avatar *ProjectAvatar `url:"-" json:"avatar,omitempty"` BuildCoverageRegex *string `url:"build_coverage_regex,omitempty" json:"build_coverage_regex,omitempty"` BuildGitStrategy *string `url:"build_git_strategy,omitempty" json:"build_git_strategy,omitempty"` - BuildTimeout *int `url:"build_timeout,omitempty" json:"build_timeout,omitempty"` + BuildTimeout *int64 `url:"build_timeout,omitempty" json:"build_timeout,omitempty"` BuildsAccessLevel *AccessControlValue `url:"builds_access_level,omitempty" json:"builds_access_level,omitempty"` CIConfigPath *string `url:"ci_config_path,omitempty" json:"ci_config_path,omitempty"` - CIDefaultGitDepth *int `url:"ci_default_git_depth,omitempty" json:"ci_default_git_depth,omitempty"` - CIDeletePipelinesInSeconds *int `url:"ci_delete_pipelines_in_seconds,omitempty" json:"ci_delete_pipelines_in_seconds,omitempty"` + CIDefaultGitDepth *int64 `url:"ci_default_git_depth,omitempty" json:"ci_default_git_depth,omitempty"` + CIDeletePipelinesInSeconds *int64 `url:"ci_delete_pipelines_in_seconds,omitempty" json:"ci_delete_pipelines_in_seconds,omitempty"` CIForwardDeploymentEnabled *bool `url:"ci_forward_deployment_enabled,omitempty" json:"ci_forward_deployment_enabled,omitempty"` CIForwardDeploymentRollbackAllowed *bool `url:"ci_forward_deployment_rollback_allowed,omitempty" json:"ci_forward_deployment_rollback_allowed,omitempty"` CIPushRepositoryForJobTokenAllowed *bool `url:"ci_push_repository_for_job_token_allowed,omitempty" json:"ci_push_repository_for_job_token_allowed,omitempty"` CIIdTokenSubClaimComponents *[]string `url:"ci_id_token_sub_claim_components,omitempty" json:"ci_id_token_sub_claim_components,omitempty"` - CISeperateCache *bool `url:"ci_separated_caches,omitempty" json:"ci_separated_caches,omitempty"` + CISeparatedCaches *bool `url:"ci_separated_caches,omitempty" json:"ci_separated_caches,omitempty"` CIRestrictPipelineCancellationRole *AccessControlValue `url:"ci_restrict_pipeline_cancellation_role,omitempty" json:"ci_restrict_pipeline_cancellation_role,omitempty"` CIPipelineVariablesMinimumOverrideRole *CIPipelineVariablesMinimumOverrideRoleValue `url:"ci_pipeline_variables_minimum_override_role,omitempty" json:"ci_pipeline_variables_minimum_override_role,omitempty"` ContainerExpirationPolicyAttributes *ContainerExpirationPolicyAttributes `url:"container_expiration_policy_attributes,omitempty" json:"container_expiration_policy_attributes,omitempty"` @@ -963,6 +1040,7 @@ type EditProjectOptions struct { IssuesTemplate *string `url:"issues_template,omitempty" json:"issues_template,omitempty"` KeepLatestArtifact *bool `url:"keep_latest_artifact,omitempty" json:"keep_latest_artifact,omitempty"` LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` + MaxArtifactsSize *int64 `url:"max_artifacts_size,omitempty" json:"max_artifacts_size,omitempty"` MergeCommitTemplate *string `url:"merge_commit_template,omitempty" json:"merge_commit_template,omitempty"` MergeRequestDefaultTargetSelf *bool `url:"mr_default_target_self,omitempty" json:"mr_default_target_self,omitempty"` MergeMethod *MergeMethodValue `url:"merge_method,omitempty" json:"merge_method,omitempty"` @@ -975,7 +1053,7 @@ type EditProjectOptions struct { MirrorBranchRegex *string `url:"mirror_branch_regex,omitempty" json:"mirror_branch_regex,omitempty"` MirrorOverwritesDivergedBranches *bool `url:"mirror_overwrites_diverged_branches,omitempty" json:"mirror_overwrites_diverged_branches,omitempty"` MirrorTriggerBuilds *bool `url:"mirror_trigger_builds,omitempty" json:"mirror_trigger_builds,omitempty"` - MirrorUserID *int `url:"mirror_user_id,omitempty" json:"mirror_user_id,omitempty"` + MirrorUserID *int64 `url:"mirror_user_id,omitempty" json:"mirror_user_id,omitempty"` ModelExperimentsAccessLevel *AccessControlValue `url:"model_experiments_access_level,omitempty" json:"model_experiments_access_level,omitempty"` ModelRegistryAccessLevel *AccessControlValue `url:"model_registry_access_level,omitempty" json:"model_registry_access_level,omitempty"` Name *string `url:"name,omitempty" json:"name,omitempty"` @@ -1004,6 +1082,7 @@ type EditProjectOptions struct { ServiceDeskEnabled *bool `url:"service_desk_enabled,omitempty" json:"service_desk_enabled,omitempty"` SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` GroupRunnersEnabled *bool `url:"group_runners_enabled,omitempty" json:"group_runners_enabled,omitempty"` + ResourceGroupDefaultProcessMode *ResourceGroupProcessMode `url:"resource_group_default_process_mode,omitempty" json:"resource_group_default_process_mode,omitempty"` ShowDefaultAwardEmojis *bool `url:"show_default_award_emojis,omitempty" json:"show_default_award_emojis,omitempty"` SnippetsAccessLevel *AccessControlValue `url:"snippets_access_level,omitempty" json:"snippets_access_level,omitempty"` SquashCommitTemplate *string `url:"squash_commit_template,omitempty" json:"squash_commit_template,omitempty"` @@ -1014,7 +1093,7 @@ type EditProjectOptions struct { WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` // Deprecated: use Merge Request Approvals API instead - ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` + ApprovalsBeforeMerge *int64 `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` // Deprecated: use PublicJobs instead PublicBuilds *bool `url:"public_builds,omitempty" json:"public_builds,omitempty"` // Deprecated: use CIPipelineVariablesMinimumOverrideRole instead @@ -1037,9 +1116,6 @@ type EditProjectOptions struct { WikiEnabled *bool `url:"wiki_enabled,omitempty" json:"wiki_enabled,omitempty"` } -// EditProject updates an existing project. -// -// GitLab API docs: https://docs.gitlab.com/api/projects/#edit-a-project func (s *ProjectsService) EditProject(pid any, opt *EditProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { if opt.ContainerExpirationPolicyAttributes != nil { // This is needed to satisfy the API. Should be deleted @@ -1047,38 +1123,16 @@ func (s *ProjectsService) EditProject(pid any, opt *EditProjectOptions, options opt.ContainerExpirationPolicyAttributes.NameRegex = opt.ContainerExpirationPolicyAttributes.NameRegexDelete } - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s", PathEscape(project)) - - var req *retryablehttp.Request - - if opt.Avatar == nil || (opt.Avatar.Filename == "" && opt.Avatar.Image == nil) { - req, err = s.client.NewRequest(http.MethodPut, u, opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPut, - u, - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) + reqOpts := []doOption{ + withMethod(http.MethodPut), + withPath("projects/%s", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), } - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err + if opt.Avatar != nil && (opt.Avatar.Filename != "" || opt.Avatar.Image != nil) { + reqOpts = append(reqOpts, withUpload(opt.Avatar.Image, opt.Avatar.Filename, UploadAvatar)) } - - return p, resp, nil + return do[*Project](s.client, reqOpts...) } // ForkProjectOptions represents the available ForkProject() options. @@ -1090,7 +1144,7 @@ type ForkProjectOptions struct { Description *string `url:"description,omitempty" json:"description,omitempty"` MergeRequestDefaultTargetSelf *bool `url:"mr_default_target_self,omitempty" json:"mr_default_target_self,omitempty"` Name *string `url:"name,omitempty" json:"name,omitempty"` - NamespaceID *int `url:"namespace_id,omitempty" json:"namespace_id,omitempty"` + NamespaceID *int64 `url:"namespace_id,omitempty" json:"namespace_id,omitempty"` NamespacePath *string `url:"namespace_path,omitempty" json:"namespace_path,omitempty"` Path *string `url:"path,omitempty" json:"path,omitempty"` Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` @@ -1099,55 +1153,21 @@ type ForkProjectOptions struct { Namespace *string `url:"namespace,omitempty" json:"namespace,omitempty"` } -// ForkProject forks a project into the user namespace of the authenticated -// user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_forks/#fork-a-project func (s *ProjectsService) ForkProject(pid any, opt *ForkProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/fork", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*Project](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/fork", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// StarProject stars a given the project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_starring/#star-a-project func (s *ProjectsService) StarProject(pid any, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/star", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*Project](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/star", ProjectID{pid}), + withRequestOpts(options...), + ) } // ListProjectInvitedGroupOptions represents the available @@ -1163,136 +1183,44 @@ type ListProjectInvitedGroupOptions struct { WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` } -// ListProjectInvidedGroupOptions is kept for backwards compatibility. -// -// Deprecated: use ListProjectInvitedGroupOptions instead. The ListProjectInvidedGroupOptions type will be removed in the next release. -type ListProjectInvidedGroupOptions = ListProjectInvitedGroupOptions - -// ListProjectsInvitedGroups lists invited groups of a project -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#list-a-projects-invited-groups -func (s *ProjectsService) ListProjectsInvitedGroups(pid any, opt *ListProjectInvidedGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/invited_groups", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pg []*ProjectGroup - resp, err := s.client.Do(req, &pg) - if err != nil { - return nil, resp, err - } - - return pg, resp, nil +func (s *ProjectsService) ListProjectsInvitedGroups(pid any, opt *ListProjectInvitedGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) { + return do[[]*ProjectGroup](s.client, + withPath("projects/%s/invited_groups", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// UnstarProject unstars a given project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_starring/#unstar-a-project func (s *ProjectsService) UnstarProject(pid any, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/unstar", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*Project](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/unstar", ProjectID{pid}), + withRequestOpts(options...), + ) } -// ArchiveProject archives the project if the user is either admin or the -// project owner of this project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#archive-a-project func (s *ProjectsService) ArchiveProject(pid any, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/archive", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*Project](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/archive", ProjectID{pid}), + withRequestOpts(options...), + ) } -// UnarchiveProject unarchives the project if the user is either admin or -// the project owner of this project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#unarchive-a-project func (s *ProjectsService) UnarchiveProject(pid any, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/unarchive", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*Project](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/unarchive", ProjectID{pid}), + withRequestOpts(options...), + ) } -// RestoreProject restores a project that is marked for deletion. -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#restore-a-project-marked-for-deletion func (s *ProjectsService) RestoreProject(pid any, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/restore", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*Project](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/restore", ProjectID{pid}), + withRequestOpts(options...), + ) } // DeleteProjectOptions represents the available DeleteProject() options. @@ -1304,24 +1232,14 @@ type DeleteProjectOptions struct { PermanentlyRemove *bool `url:"permanently_remove" json:"permanently_remove"` } -// DeleteProject removes a project including all associated resources -// (issues, merge requests etc.) -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#delete-a-project func (s *ProjectsService) DeleteProject(pid any, opt *DeleteProjectOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // ShareWithGroupOptions represents the available SharedWithGroup() options. @@ -1330,53 +1248,35 @@ func (s *ProjectsService) DeleteProject(pid any, opt *DeleteProjectOptions, opti type ShareWithGroupOptions struct { ExpiresAt *string `url:"expires_at" json:"expires_at"` GroupAccess *AccessLevelValue `url:"group_access" json:"group_access"` - GroupID *int `url:"group_id" json:"group_id"` + GroupID *int64 `url:"group_id" json:"group_id"` } -// ShareProjectWithGroup allows to share a project with a group. -// -// GitLab API docs: https://docs.gitlab.com/api/projects/#share-a-project-with-a-group func (s *ProjectsService) ShareProjectWithGroup(pid any, opt *ShareWithGroupOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/share", PathEscape(project)) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/share", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err +} - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } +func (s *ProjectsService) DeleteSharedProjectFromGroup(pid any, groupID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/share/%d", ProjectID{pid}, groupID), + withRequestOpts(options...), + ) + return resp, err +} - return s.client.Do(req, nil) -} - -// DeleteSharedProjectFromGroup allows to unshare a project from a group. -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#delete-a-shared-project-link-in-a-group -func (s *ProjectsService) DeleteSharedProjectFromGroup(pid any, groupID int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/share/%d", PathEscape(project), groupID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// HookCustomHeader represents a project or group hook custom header -// Note: "Key" is returned from the Get operation, but "Value" is not -// The List operation doesn't return any headers at all for Projects, -// but does return headers for Groups -type HookCustomHeader struct { - Key string `json:"key"` - Value string `json:"value"` +// HookCustomHeader represents a project or group hook custom header +// Note: "Key" is returned from the Get operation, but "Value" is not +// The List operation doesn't return any headers at all for Projects, +// but does return headers for Groups +type HookCustomHeader struct { + Key string `json:"key"` + Value string `json:"value"` } // HookURLVariable represents a project or group hook URL variable @@ -1390,11 +1290,11 @@ type HookURLVariable struct { // GitLab API docs: // https://docs.gitlab.com/api/project_webhooks/#list-webhooks-for-a-project type ProjectHook struct { - ID int `json:"id"` + ID int64 `json:"id"` URL string `json:"url"` Name string `json:"name"` Description string `json:"description"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` PushEvents bool `json:"push_events"` PushEventsBranchFilter string `json:"push_events_branch_filter"` IssuesEvents bool `json:"issues_events"` @@ -1410,6 +1310,7 @@ type ProjectHook struct { ReleasesEvents bool `json:"releases_events"` MilestoneEvents bool `json:"milestone_events"` FeatureFlagEvents bool `json:"feature_flag_events"` + EmojiEvents bool `json:"emoji_events"` EnableSSLVerification bool `json:"enable_ssl_verification"` RepositoryUpdateEvents bool `json:"repository_update_events"` AlertStatus string `json:"alert_status"` @@ -1419,62 +1320,31 @@ type ProjectHook struct { ResourceAccessTokenEvents bool `json:"resource_access_token_events"` CustomWebhookTemplate string `json:"custom_webhook_template"` CustomHeaders []*HookCustomHeader `json:"custom_headers"` + VulnerabilityEvents bool `json:"vulnerability_events"` + BranchFilterStrategy string `json:"branch_filter_strategy"` } // ListProjectHooksOptions represents the available ListProjectHooks() options. // // GitLab API docs: // https://docs.gitlab.com/api/project_webhooks/#list-webhooks-for-a-project -type ListProjectHooksOptions ListOptions +type ListProjectHooksOptions struct { + ListOptions +} -// ListProjectHooks gets a list of project hooks. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_webhooks/#list-webhooks-for-a-project func (s *ProjectsService) ListProjectHooks(pid any, opt *ListProjectHooksOptions, options ...RequestOptionFunc) ([]*ProjectHook, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/hooks", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ph []*ProjectHook - resp, err := s.client.Do(req, &ph) - if err != nil { - return nil, resp, err - } - - return ph, resp, nil + return do[[]*ProjectHook](s.client, + withPath("projects/%s/hooks", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetProjectHook gets a specific hook for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_webhooks/#get-a-project-webhook -func (s *ProjectsService) GetProjectHook(pid any, hook int, options ...RequestOptionFunc) (*ProjectHook, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/hooks/%d", PathEscape(project), hook) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ph := new(ProjectHook) - resp, err := s.client.Do(req, ph) - if err != nil { - return nil, resp, err - } - - return ph, resp, nil +func (s *ProjectsService) GetProjectHook(pid any, hook int64, options ...RequestOptionFunc) (*ProjectHook, *Response, error) { + return do[*ProjectHook](s.client, + withPath("projects/%s/hooks/%d", ProjectID{pid}, hook), + withRequestOpts(options...), + ) } // AddProjectHookOptions represents the available AddProjectHook() options. @@ -1496,6 +1366,7 @@ type AddProjectHookOptions struct { PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + EmojiEvents *bool `url:"emoji_events,omitempty" json:"emoji_events,omitempty"` TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` Token *string `url:"token,omitempty" json:"token,omitempty"` URL *string `url:"url,omitempty" json:"url,omitempty"` @@ -1503,31 +1374,17 @@ type AddProjectHookOptions struct { ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` + VulnerabilityEvents *bool `url:"vulnerability_events,omitempty" json:"vulnerability_events,omitempty"` + BranchFilterStrategy *string `url:"branch_filter_strategy,omitempty" json:"branch_filter_strategy,omitempty"` } -// AddProjectHook adds a hook to a specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_webhooks/#add-a-webhook-to-a-project func (s *ProjectsService) AddProjectHook(pid any, opt *AddProjectHookOptions, options ...RequestOptionFunc) (*ProjectHook, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/hooks", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - ph := new(ProjectHook) - resp, err := s.client.Do(req, ph) - if err != nil { - return nil, resp, err - } - - return ph, resp, nil + return do[*ProjectHook](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/hooks", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditProjectHookOptions represents the available EditProjectHook() options. @@ -1549,6 +1406,7 @@ type EditProjectHookOptions struct { PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + EmojiEvents *bool `url:"emoji_events,omitempty" json:"emoji_events,omitempty"` TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` Token *string `url:"token,omitempty" json:"token,omitempty"` URL *string `url:"url,omitempty" json:"url,omitempty"` @@ -1556,51 +1414,26 @@ type EditProjectHookOptions struct { ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` + VulnerabilityEvents *bool `url:"vulnerability_events,omitempty" json:"vulnerability_events,omitempty"` + BranchFilterStrategy *string `url:"branch_filter_strategy,omitempty" json:"branch_filter_strategy,omitempty"` } -// EditProjectHook edits a hook for a specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_webhooks/#edit-a-project-webhook -func (s *ProjectsService) EditProjectHook(pid any, hook int, opt *EditProjectHookOptions, options ...RequestOptionFunc) (*ProjectHook, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/hooks/%d", PathEscape(project), hook) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ph := new(ProjectHook) - resp, err := s.client.Do(req, ph) - if err != nil { - return nil, resp, err - } - - return ph, resp, nil +func (s *ProjectsService) EditProjectHook(pid any, hook int64, opt *EditProjectHookOptions, options ...RequestOptionFunc) (*ProjectHook, *Response, error) { + return do[*ProjectHook](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/hooks/%d", ProjectID{pid}, hook), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteProjectHook removes a hook from a project. This is an idempotent -// method and can be called multiple times. Either the hook is available or not. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_webhooks/#delete-project-webhook -func (s *ProjectsService) DeleteProjectHook(pid any, hook int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/hooks/%d", PathEscape(project), hook) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ProjectsService) DeleteProjectHook(pid any, hook int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/hooks/%d", ProjectID{pid}, hook), + withRequestOpts(options...), + ) + return resp, err } // TriggerTestProjectHook Trigger a test hook for a specified project. @@ -1615,19 +1448,13 @@ func (s *ProjectsService) DeleteProjectHook(pid any, hook int, options ...Reques // // GitLab API docs: // https://docs.gitlab.com/api/project_webhooks/#trigger-a-test-project-webhook -func (s *ProjectsService) TriggerTestProjectHook(pid any, hook int, event ProjectHookEvent, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/hooks/%d/test/%s", PathEscape(project), hook, string(event)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ProjectsService) TriggerTestProjectHook(pid any, hook int64, event ProjectHookEvent, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/hooks/%d/test/%s", ProjectID{pid}, hook, string(event)), + withRequestOpts(options...), + ) + return resp, err } // SetHookCustomHeaderOptions represents the available SetProjectCustomHeader() @@ -1639,42 +1466,23 @@ type SetHookCustomHeaderOptions struct { Value *string `json:"value,omitempty"` } -// SetProjectCustomHeader creates or updates a project custom webhook header. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_webhooks/#set-a-custom-header -func (s *ProjectsService) SetProjectCustomHeader(pid any, hook int, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/hooks/%d/custom_headers/%s", PathEscape(project), hook, key) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ProjectsService) SetProjectCustomHeader(pid any, hook int64, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/hooks/%d/custom_headers/%s", ProjectID{pid}, hook, key), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } -// DeleteProjectCustomHeader deletes a project custom webhook header. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_webhooks/#delete-a-custom-header -func (s *ProjectsService) DeleteProjectCustomHeader(pid any, hook int, key string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/hooks/%d/custom_headers/%s", PathEscape(project), hook, key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ProjectsService) DeleteProjectCustomHeader(pid any, hook int64, key string, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/hooks/%d/custom_headers/%s", ProjectID{pid}, hook, key), + withRequestOpts(options...), + ) + return resp, err } // SetProjectWebhookURLVariableOptions represents the available @@ -1686,42 +1494,23 @@ type SetProjectWebhookURLVariableOptions struct { Value *string `json:"value,omitempty"` } -// SetProjectWebhookURLVariable creates or updates a project webhook URL variable. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_webhooks/#set-a-url-variable -func (s *ProjectsService) SetProjectWebhookURLVariable(pid any, hook int, key string, opt *SetProjectWebhookURLVariableOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/hooks/%d/url_variables/%s", PathEscape(project), hook, PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ProjectsService) SetProjectWebhookURLVariable(pid any, hook int64, key string, opt *SetProjectWebhookURLVariableOptions, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/hooks/%d/url_variables/%s", ProjectID{pid}, hook, key), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } -// DeleteProjectWebhookURLVariable deletes a project webhook URL variable. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_webhooks/#delete-a-url-variable -func (s *ProjectsService) DeleteProjectWebhookURLVariable(pid any, hook int, key string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/hooks/%d/url_variables/%s", PathEscape(project), hook, PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ProjectsService) DeleteProjectWebhookURLVariable(pid any, hook int64, key string, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/hooks/%d/url_variables/%s", ProjectID{pid}, hook, key), + withRequestOpts(options...), + ) + return resp, err } // ProjectForkRelation represents a project fork relationship. @@ -1729,139 +1518,56 @@ func (s *ProjectsService) DeleteProjectWebhookURLVariable(pid any, hook int, key // GitLab API docs: // https://docs.gitlab.com/api/project_forks/#create-a-fork-relationship-between-projects type ProjectForkRelation struct { - ID int `json:"id"` - ForkedToProjectID int `json:"forked_to_project_id"` - ForkedFromProjectID int `json:"forked_from_project_id"` + ID int64 `json:"id"` + ForkedToProjectID int64 `json:"forked_to_project_id"` + ForkedFromProjectID int64 `json:"forked_from_project_id"` CreatedAt *time.Time `json:"created_at"` UpdatedAt *time.Time `json:"updated_at"` } -// CreateProjectForkRelation creates a forked from/to relation between -// existing projects. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_forks/#create-a-fork-relationship-between-projects -func (s *ProjectsService) CreateProjectForkRelation(pid any, fork int, options ...RequestOptionFunc) (*ProjectForkRelation, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/fork/%d", PathEscape(project), fork) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - pfr := new(ProjectForkRelation) - resp, err := s.client.Do(req, pfr) - if err != nil { - return nil, resp, err - } - - return pfr, resp, nil +func (s *ProjectsService) CreateProjectForkRelation(pid any, fork int64, options ...RequestOptionFunc) (*ProjectForkRelation, *Response, error) { + return do[*ProjectForkRelation](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/fork/%d", ProjectID{pid}, fork), + withRequestOpts(options...), + ) } -// DeleteProjectForkRelation deletes an existing forked from relationship. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_forks/#delete-a-fork-relationship-between-projects func (s *ProjectsService) DeleteProjectForkRelation(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/fork", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/fork", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } -// UploadAvatar uploads an avatar. -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#upload-a-project-avatar func (s *ProjectsService) UploadAvatar(pid any, avatar io.Reader, filename string, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s", PathEscape(project)) - - req, err := s.client.UploadRequest( - http.MethodPut, - u, - avatar, - filename, - UploadAvatar, - nil, - options, + return do[*Project](s.client, + withMethod(http.MethodPut), + withPath("projects/%s", ProjectID{pid}), + withUpload(avatar, filename, UploadAvatar), + withRequestOpts(options...), ) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil } -// DownloadAvatar downloads an avatar. -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#download-a-project-avatar func (s *ProjectsService) DownloadAvatar(pid any, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/avatar", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - avatar := new(bytes.Buffer) - resp, err := s.client.Do(req, avatar) + buf, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/avatar", ProjectID{pid}), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return bytes.NewReader(avatar.Bytes()), resp, err + return bytes.NewReader(buf.Bytes()), resp, nil } -// ListProjectForks gets a list of project forks. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_forks/#list-forks-of-a-project func (s *ProjectsService) ListProjectForks(pid any, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/forks", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var forks []*Project - resp, err := s.client.Do(req, &forks) - if err != nil { - return nil, resp, err - } - - return forks, resp, nil + return do[[]*Project](s.client, + withPath("projects/%s/forks", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ProjectPushRules represents a project push rule. @@ -1869,8 +1575,8 @@ func (s *ProjectsService) ListProjectForks(pid any, opt *ListProjectsOptions, op // GitLab API docs: // https://docs.gitlab.com/api/project_push_rules/ type ProjectPushRules struct { - ID int `json:"id"` - ProjectID int `json:"project_id"` + ID int64 `json:"id"` + ProjectID int64 `json:"project_id"` CommitMessageRegex string `json:"commit_message_regex"` CommitMessageNegativeRegex string `json:"commit_message_negative_regex"` BranchNameRegex string `json:"branch_name_regex"` @@ -1880,36 +1586,18 @@ type ProjectPushRules struct { PreventSecrets bool `json:"prevent_secrets"` AuthorEmailRegex string `json:"author_email_regex"` FileNameRegex string `json:"file_name_regex"` - MaxFileSize int `json:"max_file_size"` + MaxFileSize int64 `json:"max_file_size"` CommitCommitterCheck bool `json:"commit_committer_check"` CommitCommitterNameCheck bool `json:"commit_committer_name_check"` RejectUnsignedCommits bool `json:"reject_unsigned_commits"` RejectNonDCOCommits bool `json:"reject_non_dco_commits"` } -// GetProjectPushRules gets the push rules of a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_push_rules/#get-project-push-rules func (s *ProjectsService) GetProjectPushRules(pid any, options ...RequestOptionFunc) (*ProjectPushRules, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/push_rule", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ppr := new(ProjectPushRules) - resp, err := s.client.Do(req, ppr) - if err != nil { - return nil, resp, err - } - - return ppr, resp, nil + return do[*ProjectPushRules](s.client, + withPath("projects/%s/push_rule", ProjectID{pid}), + withRequestOpts(options...), + ) } // AddProjectPushRuleOptions represents the available AddProjectPushRule() @@ -1926,36 +1614,20 @@ type AddProjectPushRuleOptions struct { CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` FileNameRegex *string `url:"file_name_regex,omitempty" json:"file_name_regex,omitempty"` - MaxFileSize *int `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` + MaxFileSize *int64 `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` } -// AddProjectPushRule adds a push rule to a specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_push_rules/#add-a-project-push-rule func (s *ProjectsService) AddProjectPushRule(pid any, opt *AddProjectPushRuleOptions, options ...RequestOptionFunc) (*ProjectPushRules, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/push_rule", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - ppr := new(ProjectPushRules) - resp, err := s.client.Do(req, ppr) - if err != nil { - return nil, resp, err - } - - return ppr, resp, nil + return do[*ProjectPushRules](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/push_rule", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditProjectPushRuleOptions represents the available EditProjectPushRule() @@ -1972,57 +1644,29 @@ type EditProjectPushRuleOptions struct { CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` FileNameRegex *string `url:"file_name_regex,omitempty" json:"file_name_regex,omitempty"` - MaxFileSize *int `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` + MaxFileSize *int64 `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` } -// EditProjectPushRule edits a push rule for a specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_push_rules/#edit-project-push-rule func (s *ProjectsService) EditProjectPushRule(pid any, opt *EditProjectPushRuleOptions, options ...RequestOptionFunc) (*ProjectPushRules, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/push_rule", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ppr := new(ProjectPushRules) - resp, err := s.client.Do(req, ppr) - if err != nil { - return nil, resp, err - } - - return ppr, resp, nil + return do[*ProjectPushRules](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/push_rule", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteProjectPushRule removes a push rule from a project. This is an -// idempotent method and can be called multiple times. Either the push rule is -// available or not. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_push_rules/#delete-project-push-rule func (s *ProjectsService) DeleteProjectPushRule(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/push_rule", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/push_rule", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // ProjectApprovals represents GitLab project level merge request approvals. @@ -2040,32 +1684,14 @@ type ProjectApprovals struct { SelectiveCodeOwnerRemovals bool `json:"selective_code_owner_removals,omitempty"` // Deprecated: use Merge Request Approvals API instead - ApprovalsBeforeMerge int `json:"approvals_before_merge"` + ApprovalsBeforeMerge int64 `json:"approvals_before_merge"` } -// GetApprovalConfiguration get the approval configuration for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/merge_request_approvals/#project-approval-rules func (s *ProjectsService) GetApprovalConfiguration(pid any, options ...RequestOptionFunc) (*ProjectApprovals, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/approvals", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pa := new(ProjectApprovals) - resp, err := s.client.Do(req, pa) - if err != nil { - return nil, resp, err - } - - return pa, resp, nil + return do[*ProjectApprovals](s.client, + withPath("projects/%s/approvals", ProjectID{pid}), + withRequestOpts(options...), + ) } // ChangeApprovalConfigurationOptions represents the available @@ -2082,32 +1708,16 @@ type ChangeApprovalConfigurationOptions struct { SelectiveCodeOwnerRemovals *bool `url:"selective_code_owner_removals,omitempty" json:"selective_code_owner_removals,omitempty"` // Deprecated: use Merge Request Approvals API instead - ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` + ApprovalsBeforeMerge *int64 `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` } -// ChangeApprovalConfiguration updates the approval configuration for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/merge_request_approvals/#change-configuration func (s *ProjectsService) ChangeApprovalConfiguration(pid any, opt *ChangeApprovalConfigurationOptions, options ...RequestOptionFunc) (*ProjectApprovals, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/approvals", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pa := new(ProjectApprovals) - resp, err := s.client.Do(req, pa) - if err != nil { - return nil, resp, err - } - - return pa, resp, nil + return do[*ProjectApprovals](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/approvals", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetProjectApprovalRulesListsOptions represents the available @@ -2115,56 +1725,23 @@ func (s *ProjectsService) ChangeApprovalConfiguration(pid any, opt *ChangeApprov // // GitLab API docs: // https://docs.gitlab.com/api/merge_request_approvals/#get-all-approval-rules-for-project -type GetProjectApprovalRulesListsOptions ListOptions +type GetProjectApprovalRulesListsOptions struct { + ListOptions +} -// GetProjectApprovalRules looks up the list of project level approver rules. -// -// GitLab API docs: -// https://docs.gitlab.com/api/merge_request_approvals/#get-all-approval-rules-for-project func (s *ProjectsService) GetProjectApprovalRules(pid any, opt *GetProjectApprovalRulesListsOptions, options ...RequestOptionFunc) ([]*ProjectApprovalRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/approval_rules", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var par []*ProjectApprovalRule - resp, err := s.client.Do(req, &par) - if err != nil { - return nil, resp, err - } - - return par, resp, nil + return do[[]*ProjectApprovalRule](s.client, + withPath("projects/%s/approval_rules", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetProjectApprovalRule gets the project level approvers. -// -// GitLab API docs: -// https://docs.gitlab.com/api/merge_request_approvals/#get-single-approval-rule-for-project -func (s *ProjectsService) GetProjectApprovalRule(pid any, ruleID int, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/approval_rules/%d", PathEscape(project), ruleID) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - par := new(ProjectApprovalRule) - resp, err := s.client.Do(req, &par) - if err != nil { - return nil, resp, err - } - - return par, resp, nil +func (s *ProjectsService) GetProjectApprovalRule(pid any, ruleID int64, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) { + return do[*ProjectApprovalRule](s.client, + withPath("projects/%s/approval_rules/%d", ProjectID{pid}, ruleID), + withRequestOpts(options...), + ) } // CreateProjectLevelRuleOptions represents the available CreateProjectApprovalRule() @@ -2174,39 +1751,23 @@ func (s *ProjectsService) GetProjectApprovalRule(pid any, ruleID int, options .. // https://docs.gitlab.com/api/merge_request_approvals/#create-project-approval-rule type CreateProjectLevelRuleOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` - ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` + ApprovalsRequired *int64 `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` ReportType *string `url:"report_type,omitempty" json:"report_type,omitempty"` RuleType *string `url:"rule_type,omitempty" json:"rule_type,omitempty"` - UserIDs *[]int `url:"user_ids,omitempty" json:"user_ids,omitempty"` - GroupIDs *[]int `url:"group_ids,omitempty" json:"group_ids,omitempty"` - ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` + UserIDs *[]int64 `url:"user_ids,omitempty" json:"user_ids,omitempty"` + GroupIDs *[]int64 `url:"group_ids,omitempty" json:"group_ids,omitempty"` + ProtectedBranchIDs *[]int64 `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` AppliesToAllProtectedBranches *bool `url:"applies_to_all_protected_branches,omitempty" json:"applies_to_all_protected_branches,omitempty"` Usernames *[]string `url:"usernames,omitempty" json:"usernames,omitempty"` } -// CreateProjectApprovalRule creates a new project-level approval rule. -// -// GitLab API docs: -// https://docs.gitlab.com/api/merge_request_approvals/#create-project-approval-rule func (s *ProjectsService) CreateProjectApprovalRule(pid any, opt *CreateProjectLevelRuleOptions, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/approval_rules", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - par := new(ProjectApprovalRule) - resp, err := s.client.Do(req, &par) - if err != nil { - return nil, resp, err - } - - return par, resp, nil + return do[*ProjectApprovalRule](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/approval_rules", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateProjectLevelRuleOptions represents the available UpdateProjectApprovalRule() @@ -2216,56 +1777,30 @@ func (s *ProjectsService) CreateProjectApprovalRule(pid any, opt *CreateProjectL // https://docs.gitlab.com/api/merge_request_approvals/#update-project-approval-rule type UpdateProjectLevelRuleOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` - ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` - UserIDs *[]int `url:"user_ids,omitempty" json:"user_ids,omitempty"` - GroupIDs *[]int `url:"group_ids,omitempty" json:"group_ids,omitempty"` - ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` + ApprovalsRequired *int64 `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` + UserIDs *[]int64 `url:"user_ids,omitempty" json:"user_ids,omitempty"` + GroupIDs *[]int64 `url:"group_ids,omitempty" json:"group_ids,omitempty"` + ProtectedBranchIDs *[]int64 `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` AppliesToAllProtectedBranches *bool `url:"applies_to_all_protected_branches,omitempty" json:"applies_to_all_protected_branches,omitempty"` Usernames *[]string `url:"usernames,omitempty" json:"usernames,omitempty"` } -// UpdateProjectApprovalRule updates an existing approval rule with new options. -// -// GitLab API docs: -// https://docs.gitlab.com/api/merge_request_approvals/#update-project-approval-rule -func (s *ProjectsService) UpdateProjectApprovalRule(pid any, approvalRule int, opt *UpdateProjectLevelRuleOptions, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/approval_rules/%d", PathEscape(project), approvalRule) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - par := new(ProjectApprovalRule) - resp, err := s.client.Do(req, &par) - if err != nil { - return nil, resp, err - } - - return par, resp, nil +func (s *ProjectsService) UpdateProjectApprovalRule(pid any, approvalRule int64, opt *UpdateProjectLevelRuleOptions, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) { + return do[*ProjectApprovalRule](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/approval_rules/%d", ProjectID{pid}, approvalRule), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteProjectApprovalRule deletes a project-level approval rule. -// -// GitLab API docs: -// https://docs.gitlab.com/api/merge_request_approvals/#delete-project-approval-rule -func (s *ProjectsService) DeleteProjectApprovalRule(pid any, approvalRule int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/approval_rules/%d", PathEscape(project), approvalRule) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *ProjectsService) DeleteProjectApprovalRule(pid any, approvalRule int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/approval_rules/%d", ProjectID{pid}, approvalRule), + withRequestOpts(options...), + ) + return resp, err } // ProjectPullMirrorDetails represent the details of the configuration pull @@ -2274,38 +1809,25 @@ func (s *ProjectsService) DeleteProjectApprovalRule(pid any, approvalRule int, o // GitLab API docs: // https://docs.gitlab.com/api/project_pull_mirroring/ type ProjectPullMirrorDetails struct { - ID int `json:"id"` - LastError string `json:"last_error"` - LastSuccessfulUpdateAt *time.Time `json:"last_successful_update_at"` - LastUpdateAt *time.Time `json:"last_update_at"` - LastUpdateStartedAt *time.Time `json:"last_update_started_at"` - UpdateStatus string `json:"update_status"` - URL string `json:"url"` + ID int64 `json:"id"` + LastError string `json:"last_error"` + LastSuccessfulUpdateAt *time.Time `json:"last_successful_update_at"` + LastUpdateAt *time.Time `json:"last_update_at"` + LastUpdateStartedAt *time.Time `json:"last_update_started_at"` + UpdateStatus string `json:"update_status"` + URL string `json:"url"` + Enabled bool `json:"enabled"` + MirrorTriggerBuilds bool `json:"mirror_trigger_builds"` + OnlyMirrorProtectedBranches bool `json:"only_mirror_protected_branches"` + MirrorOverwritesDivergedBranches bool `json:"mirror_overwrites_diverged_branches"` + MirrorBranchRegex string `json:"mirror_branch_regex"` } -// GetProjectPullMirrorDetails returns the pull mirror details. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_pull_mirroring/#get-a-projects-pull-mirror-details func (s *ProjectsService) GetProjectPullMirrorDetails(pid any, options ...RequestOptionFunc) (*ProjectPullMirrorDetails, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/mirror/pull", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pmd := new(ProjectPullMirrorDetails) - resp, err := s.client.Do(req, pmd) - if err != nil { - return nil, resp, err - } - - return pmd, resp, nil + return do[*ProjectPullMirrorDetails](s.client, + withPath("projects/%s/mirror/pull", ProjectID{pid}), + withRequestOpts(options...), + ) } // ConfigureProjectPullMirrorOptions represents the available ConfigureProjectPullMirror() options. @@ -2323,48 +1845,22 @@ type ConfigureProjectPullMirrorOptions struct { MirrorBranchRegex *string `url:"mirror_branch_regex,omitempty" json:"mirror_branch_regex,omitempty"` } -// ConfigureProjectPullMirror configures pull mirroring settings. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_pull_mirroring/#configure-pull-mirroring-for-a-project func (s *ProjectsService) ConfigureProjectPullMirror(pid any, opt *ConfigureProjectPullMirrorOptions, options ...RequestOptionFunc) (*ProjectPullMirrorDetails, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/mirror/pull", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pmd := new(ProjectPullMirrorDetails) - resp, err := s.client.Do(req, pmd) - if err != nil { - return nil, resp, err - } - - return pmd, resp, nil + return do[*ProjectPullMirrorDetails](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/mirror/pull", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// StartMirroringProject start the pull mirroring process for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/project_pull_mirroring/#start-the-pull-mirroring-process-for-a-project func (s *ProjectsService) StartMirroringProject(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/mirror/pull", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/mirror/pull", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // TransferProjectOptions represents the available TransferProject() options. @@ -2375,47 +1871,22 @@ type TransferProjectOptions struct { Namespace any `url:"namespace,omitempty" json:"namespace,omitempty"` } -// TransferProject transfer a project into the specified namespace -// -// GitLab API docs: https://docs.gitlab.com/api/projects/#transfer-a-project-to-a-new-namespace func (s *ProjectsService) TransferProject(pid any, opt *TransferProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/transfer", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*Project](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/transfer", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// StartHousekeepingProject start the Housekeeping task for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/projects/#start-the-housekeeping-task-for-a-project func (s *ProjectsService) StartHousekeepingProject(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/housekeeping", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/housekeeping", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // ProjectRepositoryStorage represents the repository storage information for a project. @@ -2423,38 +1894,41 @@ func (s *ProjectsService) StartHousekeepingProject(pid any, options ...RequestOp // GitLab API docs: // https://docs.gitlab.com/api/projects/#get-the-path-to-repository-storage type ProjectRepositoryStorage struct { - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` DiskPath string `json:"disk_path"` CreatedAt *time.Time `json:"created_at"` RepositoryStorage string `json:"repository_storage"` } -// ProjectReposityStorage is kept for backwards compatibility. -// -// Deprecated: use ProjectRepositoryStorage instead. The ProjectReposityStorage type will be removed in the next release. -type ProjectReposityStorage = ProjectRepositoryStorage +func (s *ProjectsService) GetRepositoryStorage(pid any, options ...RequestOptionFunc) (*ProjectRepositoryStorage, *Response, error) { + return do[*ProjectRepositoryStorage](s.client, + withPath("projects/%s/storage", ProjectID{pid}), + withRequestOpts(options...), + ) +} -// GetRepositoryStorage Get the path to repository storage. +// ProjectStarrer represents a user who starred a project. // // GitLab API docs: -// https://docs.gitlab.com/api/projects/#get-the-path-to-repository-storage -func (s *ProjectsService) GetRepositoryStorage(pid any, options ...RequestOptionFunc) (*ProjectReposityStorage, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/storage", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } +// https://docs.gitlab.com/api/project_starring/#list-users-who-starred-a-project +type ProjectStarrer struct { + StarredSince time.Time `json:"starred_since"` + User ProjectUser `json:"user"` +} - prs := new(ProjectReposityStorage) - resp, err := s.client.Do(req, prs) - if err != nil { - return nil, resp, err - } +// ListProjectStarrersOptions represents the available ListProjectStarrers() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_starring/#list-users-who-starred-a-project +type ListProjectStarrersOptions struct { + ListOptions + Search *string `url:"search,omitempty" json:"search,omitempty"` +} - return prs, resp, nil +func (s *ProjectsService) ListProjectStarrers(pid any, opts *ListProjectStarrersOptions, options ...RequestOptionFunc) ([]*ProjectStarrer, *Response, error) { + return do[[]*ProjectStarrer](s.client, + withPath("projects/%s/starrers", ProjectID{pid}), + withAPIOpts(opts), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/protected_branches.go b/vendor/gitlab.com/gitlab-org/api/client-go/protected_branches.go index d3c1aaa76c..0fd162ffde 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/protected_branches.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/protected_branches.go @@ -16,11 +16,7 @@ package gitlab -import ( - "fmt" - "net/http" - "net/url" -) +import "net/http" type ( ProtectedBranchesServiceInterface interface { @@ -48,7 +44,7 @@ var _ ProtectedBranchesServiceInterface = (*ProtectedBranchesService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/protected_branches/#list-protected-branches type ProtectedBranch struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` PushAccessLevels []*BranchAccessDescription `json:"push_access_levels"` MergeAccessLevels []*BranchAccessDescription `json:"merge_access_levels"` @@ -63,12 +59,12 @@ type ProtectedBranch struct { // GitLab API docs: // https://docs.gitlab.com/api/protected_branches/#list-protected-branches type BranchAccessDescription struct { - ID int `json:"id"` + ID int64 `json:"id"` AccessLevel AccessLevelValue `json:"access_level"` AccessLevelDescription string `json:"access_level_description"` - DeployKeyID int `json:"deploy_key_id"` - UserID int `json:"user_id"` - GroupID int `json:"group_id"` + DeployKeyID int64 `json:"deploy_key_id"` + UserID int64 `json:"user_id"` + GroupID int64 `json:"group_id"` } // ListProtectedBranchesOptions represents the available ListProtectedBranches() @@ -86,24 +82,11 @@ type ListProtectedBranchesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/protected_branches/#list-protected-branches func (s *ProtectedBranchesService) ListProtectedBranches(pid any, opt *ListProtectedBranchesOptions, options ...RequestOptionFunc) ([]*ProtectedBranch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_branches", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*ProtectedBranch - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[[]*ProtectedBranch](s.client, + withPath("projects/%s/protected_branches", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetProtectedBranch gets a single protected branch or wildcard protected branch. @@ -111,24 +94,10 @@ func (s *ProtectedBranchesService) ListProtectedBranches(pid any, opt *ListProte // GitLab API docs: // https://docs.gitlab.com/api/protected_branches/#get-a-single-protected-branch-or-wildcard-protected-branch func (s *ProtectedBranchesService) GetProtectedBranch(pid any, branch string, options ...RequestOptionFunc) (*ProtectedBranch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_branches/%s", PathEscape(project), url.PathEscape(branch)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(ProtectedBranch) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*ProtectedBranch](s.client, + withPath("projects/%s/protected_branches/%s", ProjectID{pid}, branch), + withRequestOpts(options...), + ) } // ProtectRepositoryBranchesOptions represents the available @@ -153,10 +122,10 @@ type ProtectRepositoryBranchesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/protected_branches/#protect-repository-branches type BranchPermissionOptions struct { - ID *int `url:"id,omitempty" json:"id,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - DeployKeyID *int `url:"deploy_key_id,omitempty" json:"deploy_key_id,omitempty"` + ID *int64 `url:"id,omitempty" json:"id,omitempty"` + UserID *int64 `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` + DeployKeyID *int64 `url:"deploy_key_id,omitempty" json:"deploy_key_id,omitempty"` AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` } @@ -167,24 +136,12 @@ type BranchPermissionOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/protected_branches/#protect-repository-branches func (s *ProtectedBranchesService) ProtectRepositoryBranches(pid any, opt *ProtectRepositoryBranchesOptions, options ...RequestOptionFunc) (*ProtectedBranch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_branches", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(ProtectedBranch) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*ProtectedBranch](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/protected_branches", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UnprotectRepositoryBranches unprotects the given protected branch or wildcard @@ -193,18 +150,12 @@ func (s *ProtectedBranchesService) ProtectRepositoryBranches(pid any, opt *Prote // GitLab API docs: // https://docs.gitlab.com/api/protected_branches/#unprotect-repository-branches func (s *ProtectedBranchesService) UnprotectRepositoryBranches(pid any, branch string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/protected_branches/%s", PathEscape(project), url.PathEscape(branch)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/protected_branches/%s", ProjectID{pid}, branch), + withRequestOpts(options...), + ) + return resp, err } // UpdateProtectedBranchOptions represents the available @@ -223,25 +174,13 @@ type UpdateProtectedBranchOptions struct { // UpdateProtectedBranch updates a protected branch. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/protected_branches/#update-a-protected-branch func (s *ProtectedBranchesService) UpdateProtectedBranch(pid any, branch string, opt *UpdateProtectedBranchOptions, options ...RequestOptionFunc) (*ProtectedBranch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_branches/%s", PathEscape(project), url.PathEscape(branch)) - - req, err := s.client.NewRequest(http.MethodPatch, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(ProtectedBranch) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*ProtectedBranch](s.client, + withMethod(http.MethodPatch), + withPath("projects/%s/protected_branches/%s", ProjectID{pid}, branch), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/protected_environments.go b/vendor/gitlab.com/gitlab-org/api/client-go/protected_environments.go index 17a440d1ac..57363ef0df 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/protected_environments.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/protected_environments.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" ) @@ -49,7 +48,7 @@ var _ ProtectedEnvironmentsServiceInterface = (*ProtectedEnvironmentsService)(ni type ProtectedEnvironment struct { Name string `json:"name"` DeployAccessLevels []*EnvironmentAccessDescription `json:"deploy_access_levels"` - RequiredApprovalCount int `json:"required_approval_count"` + RequiredApprovalCount int64 `json:"required_approval_count"` ApprovalRules []*EnvironmentApprovalRule `json:"approval_rules"` } @@ -59,12 +58,12 @@ type ProtectedEnvironment struct { // GitLab API docs: // https://docs.gitlab.com/api/protected_environments/ type EnvironmentAccessDescription struct { - ID int `json:"id"` + ID int64 `json:"id"` AccessLevel AccessLevelValue `json:"access_level"` AccessLevelDescription string `json:"access_level_description"` - UserID int `json:"user_id"` - GroupID int `json:"group_id"` - GroupInheritanceType int `json:"group_inheritance_type"` + UserID int64 `json:"user_id"` + GroupID int64 `json:"group_id"` + GroupInheritanceType int64 `json:"group_inheritance_type"` } // EnvironmentApprovalRule represents the approval rules for a protected @@ -73,13 +72,13 @@ type EnvironmentAccessDescription struct { // GitLab API docs: // https://docs.gitlab.com/api/protected_environments/#protect-a-single-environment type EnvironmentApprovalRule struct { - ID int `json:"id"` - UserID int `json:"user_id"` - GroupID int `json:"group_id"` + ID int64 `json:"id"` + UserID int64 `json:"user_id"` + GroupID int64 `json:"group_id"` AccessLevel AccessLevelValue `json:"access_level"` AccessLevelDescription string `json:"access_level_description"` - RequiredApprovalCount int `json:"required_approvals"` - GroupInheritanceType int `json:"group_inheritance_type"` + RequiredApprovalCount int64 `json:"required_approvals"` + GroupInheritanceType int64 `json:"group_inheritance_type"` } // ListProtectedEnvironmentsOptions represents the available @@ -87,7 +86,9 @@ type EnvironmentApprovalRule struct { // // GitLab API docs: // https://docs.gitlab.com/api/protected_environments/#list-protected-environments -type ListProtectedEnvironmentsOptions ListOptions +type ListProtectedEnvironmentsOptions struct { + ListOptions +} // ListProtectedEnvironments returns a list of protected environments from a // project. @@ -95,24 +96,11 @@ type ListProtectedEnvironmentsOptions ListOptions // GitLab API docs: // https://docs.gitlab.com/api/protected_environments/#list-protected-environments func (s *ProtectedEnvironmentsService) ListProtectedEnvironments(pid any, opt *ListProtectedEnvironmentsOptions, options ...RequestOptionFunc) ([]*ProtectedEnvironment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_environments", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pes []*ProtectedEnvironment - resp, err := s.client.Do(req, &pes) - if err != nil { - return nil, resp, err - } - - return pes, resp, nil + return do[[]*ProtectedEnvironment](s.client, + withPath("projects/%s/protected_environments", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetProtectedEnvironment returns a single protected environment or wildcard @@ -121,24 +109,10 @@ func (s *ProtectedEnvironmentsService) ListProtectedEnvironments(pid any, opt *L // GitLab API docs: // https://docs.gitlab.com/api/protected_environments/#get-a-single-protected-environment func (s *ProtectedEnvironmentsService) GetProtectedEnvironment(pid any, environment string, options ...RequestOptionFunc) (*ProtectedEnvironment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_environments/%s", PathEscape(project), PathEscape(environment)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pe := new(ProtectedEnvironment) - resp, err := s.client.Do(req, pe) - if err != nil { - return nil, resp, err - } - - return pe, resp, nil + return do[*ProtectedEnvironment](s.client, + withPath("projects/%s/protected_environments/%s", ProjectID{pid}, environment), + withRequestOpts(options...), + ) } // ProtectRepositoryEnvironmentsOptions represents the available @@ -149,7 +123,7 @@ func (s *ProtectedEnvironmentsService) GetProtectedEnvironment(pid any, environm type ProtectRepositoryEnvironmentsOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` DeployAccessLevels *[]*EnvironmentAccessOptions `url:"deploy_access_levels,omitempty" json:"deploy_access_levels,omitempty"` - RequiredApprovalCount *int `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` + RequiredApprovalCount *int64 `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` ApprovalRules *[]*EnvironmentApprovalRuleOptions `url:"approval_rules,omitempty" json:"approval_rules,omitempty"` } @@ -160,9 +134,9 @@ type ProtectRepositoryEnvironmentsOptions struct { // https://docs.gitlab.com/api/protected_environments/#protect-a-single-environment type EnvironmentAccessOptions struct { AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` + UserID *int64 `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupInheritanceType *int64 `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` } // EnvironmentApprovalRuleOptions represents the approval rules for a protected @@ -171,12 +145,12 @@ type EnvironmentAccessOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/protected_environments/#protect-a-single-environment type EnvironmentApprovalRuleOptions struct { - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + UserID *int64 `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` AccessLevelDescription *string `url:"access_level_description,omitempty" json:"access_level_description,omitempty"` - RequiredApprovalCount *int `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` + RequiredApprovalCount *int64 `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` + GroupInheritanceType *int64 `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` } // ProtectRepositoryEnvironments protects a single repository environment or @@ -185,24 +159,12 @@ type EnvironmentApprovalRuleOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/protected_environments/#protect-a-single-environment func (s *ProtectedEnvironmentsService) ProtectRepositoryEnvironments(pid any, opt *ProtectRepositoryEnvironmentsOptions, options ...RequestOptionFunc) (*ProtectedEnvironment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_environments", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pe := new(ProtectedEnvironment) - resp, err := s.client.Do(req, pe) - if err != nil { - return nil, resp, err - } - - return pe, resp, nil + return do[*ProtectedEnvironment](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/protected_environments", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateProtectedEnvironmentsOptions represents the available @@ -213,7 +175,7 @@ func (s *ProtectedEnvironmentsService) ProtectRepositoryEnvironments(pid any, op type UpdateProtectedEnvironmentsOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` DeployAccessLevels *[]*UpdateEnvironmentAccessOptions `url:"deploy_access_levels,omitempty" json:"deploy_access_levels,omitempty"` - RequiredApprovalCount *int `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` + RequiredApprovalCount *int64 `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` ApprovalRules *[]*UpdateEnvironmentApprovalRuleOptions `url:"approval_rules,omitempty" json:"approval_rules,omitempty"` } @@ -224,10 +186,10 @@ type UpdateProtectedEnvironmentsOptions struct { // https://docs.gitlab.com/api/protected_environments/#update-a-protected-environment type UpdateEnvironmentAccessOptions struct { AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ID *int `url:"id,omitempty" json:"id,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` + ID *int64 `url:"id,omitempty" json:"id,omitempty"` + UserID *int64 `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupInheritanceType *int64 `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` } @@ -237,13 +199,13 @@ type UpdateEnvironmentAccessOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/protected_environments/#update-a-protected-environment type UpdateEnvironmentApprovalRuleOptions struct { - ID *int `url:"id,omitempty" json:"id,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + ID *int64 `url:"id,omitempty" json:"id,omitempty"` + UserID *int64 `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` AccessLevelDescription *string `url:"access_level_description,omitempty" json:"access_level_description,omitempty"` - RequiredApprovalCount *int `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` + RequiredApprovalCount *int64 `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` + GroupInheritanceType *int64 `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` } @@ -253,24 +215,12 @@ type UpdateEnvironmentApprovalRuleOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/protected_environments/#update-a-protected-environment func (s *ProtectedEnvironmentsService) UpdateProtectedEnvironments(pid any, environment string, opt *UpdateProtectedEnvironmentsOptions, options ...RequestOptionFunc) (*ProtectedEnvironment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_environments/%s", PathEscape(project), PathEscape(environment)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pe := new(ProtectedEnvironment) - resp, err := s.client.Do(req, pe) - if err != nil { - return nil, resp, err - } - - return pe, resp, nil + return do[*ProtectedEnvironment](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/protected_environments/%s", ProjectID{pid}, environment), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UnprotectEnvironment unprotects the given protected environment or wildcard @@ -279,16 +229,10 @@ func (s *ProtectedEnvironmentsService) UpdateProtectedEnvironments(pid any, envi // GitLab API docs: // https://docs.gitlab.com/api/protected_environments/#unprotect-a-single-environment func (s *ProtectedEnvironmentsService) UnprotectEnvironment(pid any, environment string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/protected_environments/%s", PathEscape(project), PathEscape(environment)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/protected_environments/%s", ProjectID{pid}, environment), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/protected_packages.go b/vendor/gitlab.com/gitlab-org/api/client-go/protected_packages.go new file mode 100644 index 0000000000..bca13f1b10 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/protected_packages.go @@ -0,0 +1,120 @@ +package gitlab + +import ( + "net/http" +) + +type ( + ProtectedPackagesServiceInterface interface { + // ListPackageProtectionRules gets a list of project package protection rules. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_packages_protection_rules/#list-package-protection-rules + ListPackageProtectionRules(pid any, opt *ListPackageProtectionRulesOptions, options ...RequestOptionFunc) ([]*PackageProtectionRule, *Response, error) + // CreatePackageProtectionRules creates a new package protection rules. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_packages_protection_rules/#create-a-package-protection-rule + CreatePackageProtectionRules(pid any, opt *CreatePackageProtectionRulesOptions, options ...RequestOptionFunc) (*PackageProtectionRule, *Response, error) + // UpdatePackageProtectionRules updates an existing package protection rule. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_packages_protection_rules/#update-a-package-protection-rule + UpdatePackageProtectionRules(pid any, packageProtectionRule int64, opt *UpdatePackageProtectionRulesOptions, options ...RequestOptionFunc) (*PackageProtectionRule, *Response, error) + // DeletePackageProtectionRules deletes an existing package protection rules. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_packages_protection_rules/#delete-a-package-protection-rule + DeletePackageProtectionRules(pid any, packageProtectionRule int64, options ...RequestOptionFunc) (*Response, error) + } + + // ProtectedPackagesService handles communication with the protected packages related methods + // of the GitLab API. + // + // GitLab API docs: + // https://docs.gitlab.com/api/project_packages_protection_rules/ + ProtectedPackagesService struct { + client *Client + } +) + +var _ ProtectedPackagesServiceInterface = (*ProtectedPackagesService)(nil) + +// PackageProtectionRule represents a GitLab package protection rule. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_packages_protection_rules +type PackageProtectionRule struct { + ID int64 `json:"id"` + ProjectID int64 `json:"project_id"` + PackageNamePattern string `json:"package_name_pattern"` + PackageType string `json:"package_type"` + MinimumAccessLevelForDelete string `json:"minimum_access_level_for_delete"` + MinimumAccessLevelForPush string `json:"minimum_access_level_for_push"` +} + +// ListPackageProtectionRulesOptions represents the available ListPackageProtectionRules() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_packages_protection_rules/#list-package-protection-rules +type ListPackageProtectionRulesOptions struct { + ListOptions +} + +// CreatePackageProtectionRulesOptions represents the available CreatePackageProtectionRules() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_packages_protection_rules/#create-a-package-protection-rule +type CreatePackageProtectionRulesOptions struct { + PackageNamePattern *string `url:"package_name_pattern" json:"package_name_pattern"` + PackageType *string `url:"package_type" json:"package_type"` + MinimumAccessLevelForDelete *int64 `url:"minimum_access_level_for_delete" json:"minimum_access_level_for_delete"` + MinimumAccessLevelForPush *int64 `url:"minimum_access_level_for_push" json:"minimum_access_level_for_push"` +} + +// UpdatePackageProtectionRulesOptions represents the available +// UpdatePackageProtectionRules() options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_packages_protection_rules/#update-a-package-protection-rule +type UpdatePackageProtectionRulesOptions struct { + PackageNamePattern *string `url:"package_name_pattern" json:"package_name_pattern"` + PackageType *string `url:"package_type" json:"package_type"` + MinimumAccessLevelForDelete *int64 `url:"minimum_access_level_for_delete" json:"minimum_access_level_for_delete"` + MinimumAccessLevelForPush *int64 `url:"minimum_access_level_for_push" json:"minimum_access_level_for_push"` +} + +func (s *ProtectedPackagesService) ListPackageProtectionRules(pid any, opts *ListPackageProtectionRulesOptions, options ...RequestOptionFunc) ([]*PackageProtectionRule, *Response, error) { + return do[[]*PackageProtectionRule](s.client, + withPath("projects/%s/packages/protection/rules", ProjectID{pid}), + withAPIOpts(opts), + withRequestOpts(options...), + ) +} + +func (s *ProtectedPackagesService) CreatePackageProtectionRules(pid any, opt *CreatePackageProtectionRulesOptions, options ...RequestOptionFunc) (*PackageProtectionRule, *Response, error) { + return do[*PackageProtectionRule](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/packages/protection/rules", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +func (s *ProtectedPackagesService) DeletePackageProtectionRules(pid any, packageProtectionRule int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/packages/protection/rules/%d", ProjectID{pid}, packageProtectionRule), + withRequestOpts(options...), + ) + return resp, err +} + +func (s *ProtectedPackagesService) UpdatePackageProtectionRules(pid any, packageProtectionRule int64, opt *UpdatePackageProtectionRulesOptions, options ...RequestOptionFunc) (*PackageProtectionRule, *Response, error) { + return do[*PackageProtectionRule](s.client, + withMethod(http.MethodPatch), + withPath("projects/%s/packages/protection/rules/%d", ProjectID{pid}, packageProtectionRule), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/protected_tags.go b/vendor/gitlab.com/gitlab-org/api/client-go/protected_tags.go index 2d776eed9c..78d0be82fb 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/protected_tags.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/protected_tags.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" ) @@ -55,9 +54,10 @@ type ProtectedTag struct { // GitLab API docs: // https://docs.gitlab.com/api/protected_tags/ type TagAccessDescription struct { - ID int `json:"id"` - UserID int `json:"user_id"` - GroupID int `json:"group_id"` + ID int64 `json:"id"` + UserID int64 `json:"user_id"` + GroupID int64 `json:"group_id"` + DeployKeyID int64 `json:"deploy_key_id"` AccessLevel AccessLevelValue `json:"access_level"` AccessLevelDescription string `json:"access_level_description"` } @@ -67,31 +67,20 @@ type TagAccessDescription struct { // // GitLab API docs: // https://docs.gitlab.com/api/protected_tags/#list-protected-tags -type ListProtectedTagsOptions ListOptions +type ListProtectedTagsOptions struct { + ListOptions +} // ListProtectedTags returns a list of protected tags from a project. // // GitLab API docs: // https://docs.gitlab.com/api/protected_tags/#list-protected-tags func (s *ProtectedTagsService) ListProtectedTags(pid any, opt *ListProtectedTagsOptions, options ...RequestOptionFunc) ([]*ProtectedTag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_tags", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pts []*ProtectedTag - resp, err := s.client.Do(req, &pts) - if err != nil { - return nil, resp, err - } - - return pts, resp, nil + return do[[]*ProtectedTag](s.client, + withPath("projects/%s/protected_tags", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetProtectedTag returns a single protected tag or wildcard protected tag. @@ -99,24 +88,10 @@ func (s *ProtectedTagsService) ListProtectedTags(pid any, opt *ListProtectedTags // GitLab API docs: // https://docs.gitlab.com/api/protected_tags/#get-a-single-protected-tag-or-wildcard-protected-tag func (s *ProtectedTagsService) GetProtectedTag(pid any, tag string, options ...RequestOptionFunc) (*ProtectedTag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_tags/%s", PathEscape(project), PathEscape(tag)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pt := new(ProtectedTag) - resp, err := s.client.Do(req, pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil + return do[*ProtectedTag](s.client, + withPath("projects/%s/protected_tags/%s", ProjectID{pid}, tag), + withRequestOpts(options...), + ) } // ProtectRepositoryTagsOptions represents the available ProtectRepositoryTags() @@ -135,8 +110,9 @@ type ProtectRepositoryTagsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/protected_tags/#protect-repository-tags type TagsPermissionOptions struct { - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + UserID *int64 `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` + DeployKeyID *int64 `url:"deploy_key_id,omitempty" json:"deploy_key_id,omitempty"` AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` } @@ -146,24 +122,12 @@ type TagsPermissionOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/protected_tags/#protect-repository-tags func (s *ProtectedTagsService) ProtectRepositoryTags(pid any, opt *ProtectRepositoryTagsOptions, options ...RequestOptionFunc) (*ProtectedTag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_tags", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pt := new(ProtectedTag) - resp, err := s.client.Do(req, pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil + return do[*ProtectedTag](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/protected_tags", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UnprotectRepositoryTags unprotects the given protected tag or wildcard @@ -172,16 +136,10 @@ func (s *ProtectedTagsService) ProtectRepositoryTags(pid any, opt *ProtectReposi // GitLab API docs: // https://docs.gitlab.com/api/protected_tags/#unprotect-repository-tags func (s *ProtectedTagsService) UnprotectRepositoryTags(pid any, tag string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/protected_tags/%s", PathEscape(project), PathEscape(tag)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/protected_tags/%s", ProjectID{pid}, tag), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/releaselinks.go b/vendor/gitlab.com/gitlab-org/api/client-go/releaselinks.go index 804e1ae51a..dce9cd3bef 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/releaselinks.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/releaselinks.go @@ -16,18 +16,15 @@ package gitlab -import ( - "fmt" - "net/http" -) +import "net/http" type ( ReleaseLinksServiceInterface interface { ListReleaseLinks(pid any, tagName string, opt *ListReleaseLinksOptions, options ...RequestOptionFunc) ([]*ReleaseLink, *Response, error) - GetReleaseLink(pid any, tagName string, link int, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) + GetReleaseLink(pid any, tagName string, link int64, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) CreateReleaseLink(pid any, tagName string, opt *CreateReleaseLinkOptions, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) - UpdateReleaseLink(pid any, tagName string, link int, opt *UpdateReleaseLinkOptions, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) - DeleteReleaseLink(pid any, tagName string, link int, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) + UpdateReleaseLink(pid any, tagName string, link int64, opt *UpdateReleaseLinkOptions, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) + DeleteReleaseLink(pid any, tagName string, link int64, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) } // ReleaseLinksService handles communication with the release link methods @@ -45,7 +42,7 @@ var _ ReleaseLinksServiceInterface = (*ReleaseLinksService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/releases/links/ type ReleaseLink struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` URL string `json:"url"` DirectAssetURL string `json:"direct_asset_url"` @@ -56,57 +53,29 @@ type ReleaseLink struct { // ListReleaseLinksOptions represents ListReleaseLinks() options. // // GitLab API docs: https://docs.gitlab.com/api/releases/links/#list-links-of-a-release -type ListReleaseLinksOptions ListOptions +type ListReleaseLinksOptions struct { + ListOptions +} // ListReleaseLinks gets assets as links from a Release. // // GitLab API docs: https://docs.gitlab.com/api/releases/links/#list-links-of-a-release func (s *ReleaseLinksService) ListReleaseLinks(pid any, tagName string, opt *ListReleaseLinksOptions, options ...RequestOptionFunc) ([]*ReleaseLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s/assets/links", PathEscape(project), PathEscape(tagName)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var rls []*ReleaseLink - resp, err := s.client.Do(req, &rls) - if err != nil { - return nil, resp, err - } - - return rls, resp, nil + return do[[]*ReleaseLink](s.client, + withPath("projects/%s/releases/%s/assets/links", ProjectID{pid}, tagName), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetReleaseLink returns a link from release assets. // // GitLab API docs: https://docs.gitlab.com/api/releases/links/#get-a-release-link -func (s *ReleaseLinksService) GetReleaseLink(pid any, tagName string, link int, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s/assets/links/%d", - PathEscape(project), - PathEscape(tagName), - link) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - rl := new(ReleaseLink) - resp, err := s.client.Do(req, rl) - if err != nil { - return nil, resp, err - } - - return rl, resp, nil +func (s *ReleaseLinksService) GetReleaseLink(pid any, tagName string, link int64, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { + return do[*ReleaseLink](s.client, + withPath("projects/%s/releases/%s/assets/links/%d", ProjectID{pid}, tagName, link), + withRequestOpts(options...), + ) } // CreateReleaseLinkOptions represents CreateReleaseLink() options. @@ -124,24 +93,12 @@ type CreateReleaseLinkOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/releases/links/#create-a-release-link func (s *ReleaseLinksService) CreateReleaseLink(pid any, tagName string, opt *CreateReleaseLinkOptions, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s/assets/links", PathEscape(project), PathEscape(tagName)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - rl := new(ReleaseLink) - resp, err := s.client.Do(req, rl) - if err != nil { - return nil, resp, err - } - - return rl, resp, nil + return do[*ReleaseLink](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/releases/%s/assets/links", ProjectID{pid}, tagName), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UpdateReleaseLinkOptions represents UpdateReleaseLink() options. @@ -160,54 +117,22 @@ type UpdateReleaseLinkOptions struct { // UpdateReleaseLink updates an asset link. // // GitLab API docs: https://docs.gitlab.com/api/releases/links/#update-a-release-link -func (s *ReleaseLinksService) UpdateReleaseLink(pid any, tagName string, link int, opt *UpdateReleaseLinkOptions, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s/assets/links/%d", - PathEscape(project), - PathEscape(tagName), - link) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - rl := new(ReleaseLink) - resp, err := s.client.Do(req, rl) - if err != nil { - return nil, resp, err - } - - return rl, resp, nil +func (s *ReleaseLinksService) UpdateReleaseLink(pid any, tagName string, link int64, opt *UpdateReleaseLinkOptions, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { + return do[*ReleaseLink](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/releases/%s/assets/links/%d", ProjectID{pid}, tagName, link), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteReleaseLink deletes a link from release. // // GitLab API docs: https://docs.gitlab.com/api/releases/links/#delete-a-release-link -func (s *ReleaseLinksService) DeleteReleaseLink(pid any, tagName string, link int, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s/assets/links/%d", - PathEscape(project), - PathEscape(tagName), - link, +func (s *ReleaseLinksService) DeleteReleaseLink(pid any, tagName string, link int64, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { + return do[*ReleaseLink](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/releases/%s/assets/links/%d", ProjectID{pid}, tagName, link), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, nil, err - } - - rl := new(ReleaseLink) - resp, err := s.client.Do(req, rl) - if err != nil { - return nil, resp, err - } - - return rl, resp, nil } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/releases.go b/vendor/gitlab.com/gitlab-org/api/client-go/releases.go index 8810690db6..801e890f00 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/releases.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/releases.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -48,44 +47,55 @@ var _ ReleasesServiceInterface = (*ReleasesService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/releases/#list-releases type Release struct { - TagName string `json:"tag_name"` - Name string `json:"name"` - Description string `json:"description"` - DescriptionHTML string `json:"description_html"` - CreatedAt *time.Time `json:"created_at"` - ReleasedAt *time.Time `json:"released_at"` - Author struct { - ID int `json:"id"` - Name string `json:"name"` - Username string `json:"username"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"author"` + TagName string `json:"tag_name"` + Name string `json:"name"` + Description string `json:"description"` + DescriptionHTML string `json:"description_html"` + CreatedAt *time.Time `json:"created_at"` + ReleasedAt *time.Time `json:"released_at"` + Author BasicUser `json:"author"` Commit Commit `json:"commit"` Milestones []*ReleaseMilestone `json:"milestones"` UpcomingRelease bool `json:"upcoming_release"` CommitPath string `json:"commit_path"` TagPath string `json:"tag_path"` - Assets struct { - Count int `json:"count"` - Sources []struct { - Format string `json:"format"` - URL string `json:"url"` - } `json:"sources"` - Links []*ReleaseLink `json:"links"` - EvidenceFilePath string `json:"evidence_file_path"` - } `json:"assets"` - Evidences []*ReleaseEvidence `json:"evidences"` - Links struct { - ClosedIssueURL string `json:"closed_issues_url"` - ClosedMergeRequest string `json:"closed_merge_requests_url"` - EditURL string `json:"edit_url"` - MergedMergeRequest string `json:"merged_merge_requests_url"` - OpenedIssues string `json:"opened_issues_url"` - OpenedMergeRequest string `json:"opened_merge_requests_url"` - Self string `json:"self"` - } `json:"_links"` + Assets ReleaseAssets `json:"assets"` + Evidences []*ReleaseEvidence `json:"evidences"` + Links ReleaseLinks `json:"_links"` +} + +// ReleaseAssets represents a project release assets. +// +// GitLab API docs: +// https://docs.gitlab.com/api/releases/#list-releases +type ReleaseAssets struct { + Count int64 `json:"count"` + Sources []ReleaseAssetsSource `json:"sources"` + Links []*ReleaseLink `json:"links"` + EvidenceFilePath string `json:"evidence_file_path"` +} + +// ReleaseAssetsSource represents a project release assets source. +// +// GitLab API docs: +// https://docs.gitlab.com/api/releases/#list-releases +type ReleaseAssetsSource struct { + Format string `json:"format"` + URL string `json:"url"` +} + +// ReleaseLinks represents a project release links. +// +// GitLab API docs: +// https://docs.gitlab.com/api/releases/#list-releases +type ReleaseLinks struct { + ClosedIssueURL string `json:"closed_issues_url"` + ClosedMergeRequest string `json:"closed_merge_requests_url"` + EditURL string `json:"edit_url"` + MergedMergeRequest string `json:"merged_merge_requests_url"` + OpenedIssues string `json:"opened_issues_url"` + OpenedMergeRequest string `json:"opened_merge_requests_url"` + Self string `json:"self"` } // ReleaseMilestone represents a project release milestone. @@ -93,9 +103,9 @@ type Release struct { // GitLab API docs: // https://docs.gitlab.com/api/releases/#list-releases type ReleaseMilestone struct { - ID int `json:"id"` - IID int `json:"iid"` - ProjectID int `json:"project_id"` + ID int64 `json:"id"` + IID int64 `json:"iid"` + ProjectID int64 `json:"project_id"` Title string `json:"title"` Description string `json:"description"` State string `json:"state"` @@ -113,8 +123,8 @@ type ReleaseMilestone struct { // GitLab API docs: // https://docs.gitlab.com/api/releases/#list-releases type ReleaseMilestoneIssueStats struct { - Total int `json:"total"` - Closed int `json:"closed"` + Total int64 `json:"total"` + Closed int64 `json:"closed"` } // ReleaseEvidence represents a project release's evidence. @@ -143,24 +153,11 @@ type ListReleasesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/releases/#list-releases func (s *ReleasesService) ListReleases(pid any, opt *ListReleasesOptions, options ...RequestOptionFunc) ([]*Release, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var rs []*Release - resp, err := s.client.Do(req, &rs) - if err != nil { - return nil, resp, err - } - - return rs, resp, nil + return do[[]*Release](s.client, + withPath("projects/%s/releases", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetRelease returns a single release, identified by a tag name. @@ -168,24 +165,10 @@ func (s *ReleasesService) ListReleases(pid any, opt *ListReleasesOptions, option // GitLab API docs: // https://docs.gitlab.com/api/releases/#get-a-release-by-a-tag-name func (s *ReleasesService) GetRelease(pid any, tagName string, options ...RequestOptionFunc) (*Release, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s", PathEscape(project), PathEscape(tagName)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - r := new(Release) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil + return do[*Release](s.client, + withPath("projects/%s/releases/%s", ProjectID{pid}, tagName), + withRequestOpts(options...), + ) } // GetLatestRelease returns the latest release for the project. @@ -193,24 +176,10 @@ func (s *ReleasesService) GetRelease(pid any, tagName string, options ...Request // GitLab API docs: // https://docs.gitlab.com/api/releases/#get-the-latest-release func (s *ReleasesService) GetLatestRelease(pid any, options ...RequestOptionFunc) (*Release, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/permalink/latest", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - r := new(Release) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, err + return do[*Release](s.client, + withPath("projects/%s/releases/permalink/latest", ProjectID{pid}), + withRequestOpts(options...), + ) } // CreateReleaseOptions represents CreateRelease() options. @@ -254,24 +223,12 @@ type ReleaseAssetLinkOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/releases/#create-a-release func (s *ReleasesService) CreateRelease(pid any, opts *CreateReleaseOptions, options ...RequestOptionFunc) (*Release, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opts, options) - if err != nil { - return nil, nil, err - } - - r := new(Release) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil + return do[*Release](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/releases", ProjectID{pid}), + withAPIOpts(opts), + withRequestOpts(options...), + ) } // UpdateReleaseOptions represents UpdateRelease() options. @@ -290,24 +247,12 @@ type UpdateReleaseOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/releases/#update-a-release func (s *ReleasesService) UpdateRelease(pid any, tagName string, opts *UpdateReleaseOptions, options ...RequestOptionFunc) (*Release, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s", PathEscape(project), PathEscape(tagName)) - - req, err := s.client.NewRequest(http.MethodPut, u, opts, options) - if err != nil { - return nil, nil, err - } - - r := new(Release) - resp, err := s.client.Do(req, &r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil + return do[*Release](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/releases/%s", ProjectID{pid}, tagName), + withAPIOpts(opts), + withRequestOpts(options...), + ) } // DeleteRelease deletes a release. @@ -315,22 +260,9 @@ func (s *ReleasesService) UpdateRelease(pid any, tagName string, opts *UpdateRel // GitLab API docs: // https://docs.gitlab.com/api/releases/#delete-a-release func (s *ReleasesService) DeleteRelease(pid any, tagName string, options ...RequestOptionFunc) (*Release, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s", PathEscape(project), PathEscape(tagName)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, nil, err - } - - r := new(Release) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil + return do[*Release](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/releases/%s", ProjectID{pid}, tagName), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/repositories.go b/vendor/gitlab.com/gitlab-org/api/client-go/repositories.go index 05e16e6f3c..dafbe57d9c 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/repositories.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/repositories.go @@ -21,20 +21,62 @@ import ( "fmt" "io" "net/http" - "net/url" ) type ( RepositoriesServiceInterface interface { + // ListTree gets a list of repository files and directories in a project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/repositories/#list-repository-tree ListTree(pid any, opt *ListTreeOptions, options ...RequestOptionFunc) ([]*TreeNode, *Response, error) + // Blob gets information about blob in repository like size and content. Note + // that blob content is Base64 encoded. + // + // GitLab API docs: + // https://docs.gitlab.com/api/repositories/#get-a-blob-from-repository Blob(pid any, sha string, options ...RequestOptionFunc) ([]byte, *Response, error) + // RawBlobContent gets the raw file contents for a blob by blob SHA. + // + // GitLab API docs: + // https://docs.gitlab.com/api/repositories/#raw-blob-content RawBlobContent(pid any, sha string, options ...RequestOptionFunc) ([]byte, *Response, error) + // Archive gets an archive of the repository. + // + // GitLab API docs: + // https://docs.gitlab.com/api/repositories/#get-file-archive Archive(pid any, opt *ArchiveOptions, options ...RequestOptionFunc) ([]byte, *Response, error) + // StreamArchive streams an archive of the repository to the provided + // io.Writer. + // + // GitLab API docs: + // https://docs.gitlab.com/api/repositories/#get-file-archive StreamArchive(pid any, w io.Writer, opt *ArchiveOptions, options ...RequestOptionFunc) (*Response, error) + // Compare compares branches, tags or commits. + // + // GitLab API docs: + // https://docs.gitlab.com/api/repositories/#compare-branches-tags-or-commits Compare(pid any, opt *CompareOptions, options ...RequestOptionFunc) (*Compare, *Response, error) + // Contributors gets the repository contributors list. + // + // GitLab API docs: https://docs.gitlab.com/api/repositories/#contributors Contributors(pid any, opt *ListContributorsOptions, options ...RequestOptionFunc) ([]*Contributor, *Response, error) + // MergeBase gets the common ancestor for 2 refs (commit SHAs, branch + // names or tags). + // + // GitLab API docs: + // https://docs.gitlab.com/api/repositories/#merge-base MergeBase(pid any, opt *MergeBaseOptions, options ...RequestOptionFunc) (*Commit, *Response, error) + // AddChangelog generates changelog data based on commits in a repository. + // + // GitLab API docs: + // https://docs.gitlab.com/api/repositories/#add-changelog-data-to-a-changelog-file AddChangelog(pid any, opt *AddChangelogOptions, options ...RequestOptionFunc) (*Response, error) + // GenerateChangelogData generates changelog data based on commits in a + // repository, without committing them to a changelog file. + // + // GitLab API docs: + // https://docs.gitlab.com/api/repositories/#generate-changelog-data GenerateChangelogData(pid any, opt GenerateChangelogDataOptions, options ...RequestOptionFunc) (*ChangelogData, *Response, error) } @@ -75,80 +117,34 @@ type ListTreeOptions struct { Recursive *bool `url:"recursive,omitempty" json:"recursive,omitempty"` } -// ListTree gets a list of repository files and directories in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/repositories/#list-repository-tree func (s *RepositoriesService) ListTree(pid any, opt *ListTreeOptions, options ...RequestOptionFunc) ([]*TreeNode, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/tree", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var t []*TreeNode - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil + return do[[]*TreeNode](s.client, + withPath("projects/%s/repository/tree", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// Blob gets information about blob in repository like size and content. Note -// that blob content is Base64 encoded. -// -// GitLab API docs: -// https://docs.gitlab.com/api/repositories/#get-a-blob-from-repository func (s *RepositoriesService) Blob(pid any, sha string, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/blobs/%s", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) + buf, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/repository/blobs/%s", ProjectID{pid}, sha), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return b.Bytes(), resp, err + return buf.Bytes(), resp, nil } -// RawBlobContent gets the raw file contents for a blob by blob SHA. -// -// GitLab API docs: -// https://docs.gitlab.com/api/repositories/#raw-blob-content func (s *RepositoriesService) RawBlobContent(pid any, sha string, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/blobs/%s/raw", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) + buf, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/repository/blobs/%s/raw", ProjectID{pid}, sha), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return b.Bytes(), resp, err + return buf.Bytes(), resp, nil } // ArchiveOptions represents the available Archive() options. @@ -161,41 +157,23 @@ type ArchiveOptions struct { SHA *string `url:"sha,omitempty" json:"sha,omitempty"` } -// Archive gets an archive of the repository. -// -// GitLab API docs: -// https://docs.gitlab.com/api/repositories/#get-file-archive func (s *RepositoriesService) Archive(pid any, opt *ArchiveOptions, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/archive", PathEscape(project)) - - // Set an optional format for the archive. + suffix := "" if opt != nil && opt.Format != nil { - u = fmt.Sprintf("%s.%s", u, *opt.Format) - } - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err + suffix = "." + *opt.Format } - var b bytes.Buffer - resp, err := s.client.Do(req, &b) + buf, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/repository/archive%s", ProjectID{pid}, NoEscape{suffix}), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return b.Bytes(), resp, err + return buf.Bytes(), resp, nil } -// StreamArchive streams an archive of the repository to the provided -// io.Writer. -// -// GitLab API docs: -// https://docs.gitlab.com/api/repositories/#get-file-archive func (s *RepositoriesService) StreamArchive(pid any, w io.Writer, opt *ArchiveOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -244,29 +222,12 @@ type CompareOptions struct { Unidiff *bool `url:"unidiff,omitempty" json:"unidiff,omitempty"` } -// Compare compares branches, tags or commits. -// -// GitLab API docs: -// https://docs.gitlab.com/api/repositories/#compare-branches-tags-or-commits func (s *RepositoriesService) Compare(pid any, opt *CompareOptions, options ...RequestOptionFunc) (*Compare, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/compare", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - c := new(Compare) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil + return do[*Compare](s.client, + withPath("projects/%s/repository/compare", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // Contributor represents a GitLab contributor. @@ -275,9 +236,9 @@ func (s *RepositoriesService) Compare(pid any, opt *CompareOptions, options ...R type Contributor struct { Name string `json:"name"` Email string `json:"email"` - Commits int `json:"commits"` - Additions int `json:"additions"` - Deletions int `json:"deletions"` + Commits int64 `json:"commits"` + Additions int64 `json:"additions"` + Deletions int64 `json:"deletions"` } func (c Contributor) String() string { @@ -293,28 +254,12 @@ type ListContributorsOptions struct { Sort *string `url:"sort,omitempty" json:"sort,omitempty"` } -// Contributors gets the repository contributors list. -// -// GitLab API docs: https://docs.gitlab.com/api/repositories/#contributors func (s *RepositoriesService) Contributors(pid any, opt *ListContributorsOptions, options ...RequestOptionFunc) ([]*Contributor, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/contributors", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var c []*Contributor - resp, err := s.client.Do(req, &c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil + return do[[]*Contributor](s.client, + withPath("projects/%s/repository/contributors", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // MergeBaseOptions represents the available MergeBase() options. @@ -325,30 +270,12 @@ type MergeBaseOptions struct { Ref *[]string `url:"refs[],omitempty" json:"refs,omitempty"` } -// MergeBase gets the common ancestor for 2 refs (commit SHAs, branch -// names or tags). -// -// GitLab API docs: -// https://docs.gitlab.com/api/repositories/#merge-base func (s *RepositoriesService) MergeBase(pid any, opt *MergeBaseOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/merge_base", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil + return do[*Commit](s.client, + withPath("projects/%s/repository/merge_base", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // AddChangelogOptions represents the available AddChangelog() options. @@ -367,23 +294,14 @@ type AddChangelogOptions struct { Trailer *string `url:"trailer,omitempty" json:"trailer,omitempty"` } -// AddChangelog generates changelog data based on commits in a repository. -// -// Gitlab API docs: -// https://docs.gitlab.com/api/repositories/#add-changelog-data-to-a-changelog-file func (s *RepositoriesService) AddChangelog(pid any, opt *AddChangelogOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/repository/changelog", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/repository/changelog", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // ChangelogData represents the generated changelog data. @@ -412,28 +330,10 @@ type GenerateChangelogDataOptions struct { Trailer *string `url:"trailer,omitempty" json:"trailer,omitempty"` } -// GenerateChangelogData generates changelog data based on commits in a -// repository, without committing them to a changelog file. -// -// Gitlab API docs: -// https://docs.gitlab.com/api/repositories/#generate-changelog-data func (s *RepositoriesService) GenerateChangelogData(pid any, opt GenerateChangelogDataOptions, options ...RequestOptionFunc) (*ChangelogData, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/changelog", project) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - cd := new(ChangelogData) - resp, err := s.client.Do(req, cd) - if err != nil { - return nil, resp, err - } - - return cd, resp, nil + return do[*ChangelogData](s.client, + withPath("projects/%s/repository/changelog", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/repository_files.go b/vendor/gitlab.com/gitlab-org/api/client-go/repository_files.go index 32d1a49881..69a022bd5b 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/repository_files.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/repository_files.go @@ -18,7 +18,6 @@ package gitlab import ( "bytes" - "fmt" "net/http" "strconv" "time" @@ -53,7 +52,7 @@ var _ RepositoryFilesServiceInterface = (*RepositoryFilesService)(nil) type File struct { FileName string `json:"file_name"` FilePath string `json:"file_path"` - Size int `json:"size"` + Size int64 `json:"size"` Encoding string `json:"encoding"` Content string `json:"content"` ExecuteFilemode bool `json:"execute_filemode"` @@ -82,28 +81,11 @@ type GetFileOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/repository_files/#get-file-from-repository func (s *RepositoryFilesService) GetFile(pid any, fileName string, opt *GetFileOptions, options ...RequestOptionFunc) (*File, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s", - PathEscape(project), - PathEscape(fileName), + return do[*File](s.client, + withPath("projects/%s/repository/files/%s", ProjectID{pid}, fileName), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - f := new(File) - resp, err := s.client.Do(req, f) - if err != nil { - return nil, resp, err - } - - return f, resp, nil } // GetFileMetaDataOptions represents the available GetFileMetaData() options. @@ -120,22 +102,12 @@ type GetFileMetaDataOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/repository_files/#get-file-from-repository func (s *RepositoryFilesService) GetFileMetaData(pid any, fileName string, opt *GetFileMetaDataOptions, options ...RequestOptionFunc) (*File, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s", - PathEscape(project), - PathEscape(fileName), + _, resp, err := do[none](s.client, + withMethod(http.MethodHead), + withPath("projects/%s/repository/files/%s", ProjectID{pid}, fileName), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodHead, u, opt, options) - if err != nil { - return nil, nil, err - } - - resp, err := s.client.Do(req, nil) if err != nil { return nil, resp, err } @@ -164,7 +136,7 @@ func getMetaDataFileFromHeaders(resp *Response) (*File, error) { } if sizeString := resp.Header.Get("X-Gitlab-Size"); sizeString != "" { - size, err := strconv.Atoi(sizeString) + size, err := strconv.ParseInt(sizeString, 10, 64) if err != nil { return nil, err } @@ -179,32 +151,42 @@ func getMetaDataFileFromHeaders(resp *Response) (*File, error) { // GitLab API docs: // https://docs.gitlab.com/api/repository_files/#get-file-blame-from-repository type FileBlameRange struct { - Commit struct { - ID string `json:"id"` - ParentIDs []string `json:"parent_ids"` - Message string `json:"message"` - AuthoredDate *time.Time `json:"authored_date"` - AuthorName string `json:"author_name"` - AuthorEmail string `json:"author_email"` - CommittedDate *time.Time `json:"committed_date"` - CommitterName string `json:"committer_name"` - CommitterEmail string `json:"committer_email"` - } `json:"commit"` - Lines []string `json:"lines"` + Commit FileBlameRangeCommit `json:"commit"` + Lines []string `json:"lines"` } func (b FileBlameRange) String() string { return Stringify(b) } +// FileBlameRangeCommit represents one item of blame information's commit. +// +// GitLab API docs: +// https://docs.gitlab.com/api/repository_files/#get-file-blame-from-repository +type FileBlameRangeCommit struct { + ID string `json:"id"` + ParentIDs []string `json:"parent_ids"` + Message string `json:"message"` + AuthoredDate *time.Time `json:"authored_date"` + AuthorName string `json:"author_name"` + AuthorEmail string `json:"author_email"` + CommittedDate *time.Time `json:"committed_date"` + CommitterName string `json:"committer_name"` + CommitterEmail string `json:"committer_email"` +} + +func (c FileBlameRangeCommit) String() string { + return Stringify(c) +} + // GetFileBlameOptions represents the available GetFileBlame() options. // // GitLab API docs: // https://docs.gitlab.com/api/repository_files/#get-file-blame-from-repository type GetFileBlameOptions struct { Ref *string `url:"ref,omitempty" json:"ref,omitempty"` - RangeStart *int `url:"range[start],omitempty" json:"range[start],omitempty"` - RangeEnd *int `url:"range[end],omitempty" json:"range[end],omitempty"` + RangeStart *int64 `url:"range[start],omitempty" json:"range[start],omitempty"` + RangeEnd *int64 `url:"range[end],omitempty" json:"range[end],omitempty"` } // GetFileBlame allows you to receive blame information. Each blame range @@ -213,28 +195,11 @@ type GetFileBlameOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/repository_files/#get-file-blame-from-repository func (s *RepositoryFilesService) GetFileBlame(pid any, file string, opt *GetFileBlameOptions, options ...RequestOptionFunc) ([]*FileBlameRange, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s/blame", - PathEscape(project), - PathEscape(file), + return do[[]*FileBlameRange](s.client, + withPath("projects/%s/repository/files/%s/blame", ProjectID{pid}, file), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var br []*FileBlameRange - resp, err := s.client.Do(req, &br) - if err != nil { - return nil, resp, err - } - - return br, resp, nil } // GetRawFileOptions represents the available GetRawFile() options. @@ -251,28 +216,15 @@ type GetRawFileOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/repository_files/#get-raw-file-from-repository func (s *RepositoryFilesService) GetRawFile(pid any, fileName string, opt *GetRawFileOptions, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s/raw", - PathEscape(project), - PathEscape(fileName), + buf, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/repository/files/%s/raw", ProjectID{pid}, fileName), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var f bytes.Buffer - resp, err := s.client.Do(req, &f) if err != nil { return nil, resp, err } - - return f.Bytes(), resp, err + return buf.Bytes(), resp, nil } // GetRawFileMetaData gets the metadata of a raw file from a repository. @@ -280,22 +232,12 @@ func (s *RepositoryFilesService) GetRawFile(pid any, fileName string, opt *GetRa // GitLab API docs: // https://docs.gitlab.com/api/repository_files/#get-raw-file-from-repository func (s *RepositoryFilesService) GetRawFileMetaData(pid any, fileName string, opt *GetRawFileOptions, options ...RequestOptionFunc) (*File, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s/raw", - PathEscape(project), - PathEscape(fileName), + _, resp, err := do[none](s.client, + withMethod(http.MethodHead), + withPath("projects/%s/repository/files/%s/raw", ProjectID{pid}, fileName), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodHead, u, opt, options) - if err != nil { - return nil, nil, err - } - - resp, err := s.client.Do(req, nil) if err != nil { return nil, resp, err } @@ -340,28 +282,12 @@ type CreateFileOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/repository_files/#create-new-file-in-repository func (s *RepositoryFilesService) CreateFile(pid any, fileName string, opt *CreateFileOptions, options ...RequestOptionFunc) (*FileInfo, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s", - PathEscape(project), - PathEscape(fileName), + return do[*FileInfo](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/repository/files/%s", ProjectID{pid}, fileName), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - f := new(FileInfo) - resp, err := s.client.Do(req, f) - if err != nil { - return nil, resp, err - } - - return f, resp, nil } // UpdateFileOptions represents the available UpdateFile() options. @@ -385,28 +311,12 @@ type UpdateFileOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/repository_files/#update-existing-file-in-repository func (s *RepositoryFilesService) UpdateFile(pid any, fileName string, opt *UpdateFileOptions, options ...RequestOptionFunc) (*FileInfo, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s", - PathEscape(project), - PathEscape(fileName), + return do[*FileInfo](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/repository/files/%s", ProjectID{pid}, fileName), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - f := new(FileInfo) - resp, err := s.client.Do(req, f) - if err != nil { - return nil, resp, err - } - - return f, resp, nil } // DeleteFileOptions represents the available DeleteFile() options. @@ -427,20 +337,11 @@ type DeleteFileOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/repository_files/#delete-existing-file-in-repository func (s *RepositoryFilesService) DeleteFile(pid any, fileName string, opt *DeleteFileOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s", - PathEscape(project), - PathEscape(fileName), + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/repository/files/%s", ProjectID{pid}, fileName), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/repository_submodules.go b/vendor/gitlab.com/gitlab-org/api/client-go/repository_submodules.go index 84a0491d42..7f08fb6a05 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/repository_submodules.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/repository_submodules.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -76,26 +75,10 @@ type UpdateSubmoduleOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/repository_submodules/#update-existing-submodule-reference-in-repository func (s *RepositorySubmodulesService) UpdateSubmodule(pid any, submodule string, opt *UpdateSubmoduleOptions, options ...RequestOptionFunc) (*SubmoduleCommit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/submodules/%s", - PathEscape(project), - PathEscape(submodule), + return do[*SubmoduleCommit](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/repository/submodules/%s", ProjectID{pid}, submodule), + withAPIOpts(opt), + withRequestOpts(options...), ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - sc := new(SubmoduleCommit) - resp, err := s.client.Do(req, sc) - if err != nil { - return nil, resp, err - } - - return sc, resp, nil } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/request_handler.go b/vendor/gitlab.com/gitlab-org/api/client-go/request_handler.go new file mode 100644 index 0000000000..7ee3b334fe --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/request_handler.go @@ -0,0 +1,276 @@ +package gitlab + +import ( + "fmt" + "io" + "net/http" + "reflect" + "strings" + + "github.com/hashicorp/go-retryablehttp" +) + +type Pather interface { + forPath() (string, error) +} + +type ProjectID struct { + Value any +} + +func (i ProjectID) forPath() (string, error) { + id, err := parseID(i.Value) + if err != nil { + return "", err + } + + return PathEscape(id), nil +} + +type GroupID struct { + Value any +} + +func (i GroupID) forPath() (string, error) { + id, err := parseID(i.Value) + if err != nil { + return "", err + } + + return PathEscape(id), nil +} + +type RunnerID struct { + Value any +} + +func (i RunnerID) forPath() (string, error) { + id, err := parseID(i.Value) + if err != nil { + return "", err + } + + return PathEscape(id), nil +} + +// UserID represents a user identifier for API paths. It accepts either a +// numeric user ID or a username string. If a username is provided with a +// leading "@" character (e.g., "@johndoe"), the "@" will be trimmed. +type UserID struct { + Value any +} + +func (i UserID) forPath() (string, error) { + id, err := parseID(i.Value) + if err != nil { + return "", err + } + + return PathEscape(strings.TrimPrefix(id, "@")), nil +} + +type LabelID struct { + Value any +} + +func (i LabelID) forPath() (string, error) { + id, err := parseID(i.Value) + if err != nil { + return "", err + } + + return PathEscape(id), nil +} + +type NoEscape struct { + Value string +} + +func (n NoEscape) forPath() (string, error) { + return n.Value, nil +} + +type doConfig struct { + method string + path string + apiOpts any + requestOpts []RequestOptionFunc + upload *uploadConfig +} + +type uploadConfig struct { + content io.Reader + filename string + uploadType UploadType +} + +type doOption func(c *doConfig) error + +func withMethod(method string) doOption { + return func(c *doConfig) error { + c.method = method + return nil + } +} + +func withPath(path string, args ...any) doOption { + return func(c *doConfig) error { + as := make([]any, len(args)) + for i, a := range args { + switch v := a.(type) { + case Pather: + path, err := v.forPath() + if err != nil { + return err + } + as[i] = path + case string: + as[i] = PathEscape(v) + default: + as[i] = v + } + } + c.path = fmt.Sprintf(path, as...) + + return nil + } +} + +func withAPIOpts(o any) doOption { + return func(c *doConfig) error { + c.apiOpts = o + return nil + } +} + +func withRequestOpts(o ...RequestOptionFunc) doOption { + return func(c *doConfig) error { + c.requestOpts = o + return nil + } +} + +func withUpload(content io.Reader, filename string, uploadType UploadType) doOption { + return func(c *doConfig) error { + c.upload = &uploadConfig{ + content: content, + filename: filename, + uploadType: uploadType, + } + return nil + } +} + +// none is a sentinel type to signal that a request performed with do does not return a value. +type none struct{} + +// do constructs an API requests, performs it and processes the response. +// +// Use the opts to configure the request. +// If the response body shouldn't be handled, use the none sentinel type +// and ignore the first return argument. +// +// Example: +// +// // Get Request to return single *Agent: +// return do[*Agent](s.client, +// +// withPath("projects/%s/cluster_agents/%d", project, id), +// withRequestOpts(options...), +// +// ) +// +// // Get Request to return multiple []*Agents +// return do[[]*Agent](s.client, +// +// withPath("projects/%s/cluster_agents", project), +// withRequestOpts(options...), +// +// ) +// +// // Post Request to create Agent and return *Agents +// return do[*Agent](s.client, +// +// withMethod(http.MethodPost), +// withPath("projects/%s/cluster_agents", project), +// withAPIOpts(opt), +// withRequestOpts(options...), +// +// ) +// +// // Delete Request that returns nothing: +// _, resp, err := do[none](s.client, +// +// withMethod(http.MethodDelete), +// withPath("projects/%s/cluster_agents/%d", project, id), +// withRequestOpts(options...), +// +// ) +// +// // Upload file Request: +// return do[*WikiAttachment](s.client, +// +// withMethod(http.MethodPost), +// withPath("projects/%s/wikis/attachments", project), +// withUpload(content, filename, UploadFile), +// withAPIOpts(opt), +// withRequestOpts(options...), +// +// ) +func do[T any](client *Client, opts ...doOption) (T, *Response, error) { + // default config + config := &doConfig{ + method: http.MethodGet, + apiOpts: nil, + } + + // apply options to config + for _, f := range opts { + err := f(config) + if err != nil { + var z T + return z, nil, err + } + } + + var ( + req *retryablehttp.Request + err error + ) + switch { + case config.upload != nil: + req, err = client.UploadRequest( + config.method, + config.path, + config.upload.content, + config.upload.filename, + config.upload.uploadType, + config.apiOpts, + config.requestOpts, + ) + default: + req, err = client.NewRequest(config.method, config.path, config.apiOpts, config.requestOpts) + } + + if err != nil { + var z T + return z, nil, err + } + + var ( + as T + resp *Response + ) + if reflect.TypeOf(as) == reflect.TypeFor[none]() { + resp, err = client.Do(req, nil) + } else { + resp, err = client.Do(req, &as) + } + + if err != nil { + var z T + return z, resp, err + } + + return as, resp, nil +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/request_options.go b/vendor/gitlab.com/gitlab-org/api/client-go/request_options.go index 166c1d92c8..973477e9bb 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/request_options.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/request_options.go @@ -18,6 +18,8 @@ package gitlab import ( "context" + "encoding/json" + "fmt" "net/url" "strconv" @@ -90,24 +92,92 @@ func WithKeysetPaginationParameters(nextLink string) RequestOptionFunc { // WithOffsetPaginationParameters takes a page number and modifies the request // to use that page for offset-based pagination, overriding any existing page value. -func WithOffsetPaginationParameters(page int) RequestOptionFunc { +func WithOffsetPaginationParameters(page int64) RequestOptionFunc { return func(req *retryablehttp.Request) error { q := req.URL.Query() q.Del("page") - q.Add("page", strconv.Itoa(page)) + q.Add("page", strconv.FormatInt(page, 10)) req.URL.RawQuery = q.Encode() return nil } } -// WithSudo takes either a username or user ID and sets the SUDO request header. +// withGraphQLPaginationParamters takes a PageInfo from a GraphQL response and +// modifies the request to use that cursor for GraphQL pagination, overriding +// any existing "after" variable. +// +// GraphQL API docs: +// https://docs.gitlab.com/development/graphql_guide/pagination/ +func withGraphQLPaginationParamters(pi PageInfo) RequestOptionFunc { + if !pi.HasNextPage { + return nil + } + + return func(req *retryablehttp.Request) error { + var q GraphQLQuery + + data, err := req.BodyBytes() + if err != nil { + return fmt.Errorf("reading request body failed: %w", err) + } + + if err := json.Unmarshal(data, &q); err != nil { + return fmt.Errorf("decoding request body failed: %w", err) + } + + if q.Variables == nil { + q.Variables = make(map[string]any) + } + + q.Variables["after"] = pi.EndCursor + + data, err = json.Marshal(q) + if err != nil { + return fmt.Errorf("encoding request body failed: %w", err) + } + + return req.SetBody(data) + } +} + +// WithNext returns a RequestOptionFunc that configures the next page of a paginated +// request based on pagination metadata from a previous response. It automatically +// detects and handles all three pagination styles used by GitLab's APIs: +// +// - GraphQL cursor pagination: Uses PageInfo.EndCursor with the "after" variable +// - REST keyset pagination: Extracts parameters from the "next" link header +// - REST offset pagination: Uses the NextPage number with "page" parameter +// +// If multiple pagination styles are present in the response, keyset/cursor pagination +// is preferred over offset pagination for better performance and consistency. +// +// The boolean return value indicates whether more pages are available, similar to +// the comma-ok idiom used for map accesses. When false, the returned +// RequestOptionFunc is nil. +func WithNext(resp *Response) (RequestOptionFunc, bool) { + switch { + case resp.PageInfo != nil: + return withGraphQLPaginationParamters(*resp.PageInfo), resp.PageInfo.HasNextPage + + case resp.NextLink != "": + return WithKeysetPaginationParameters(resp.NextLink), true + + case resp.NextPage != 0: + return WithOffsetPaginationParameters(resp.NextPage), true + + default: + return nil, false + } +} + +// WithSudo takes either a username or user ID and sets the Sudo request header. func WithSudo(uid any) RequestOptionFunc { return func(req *retryablehttp.Request) error { user, err := parseID(uid) if err != nil { return err } - req.Header.Set("SUDO", user) + req.Header.Set("Sudo", user) return nil } } @@ -117,11 +187,11 @@ func WithToken(authType AuthType, token string) RequestOptionFunc { return func(req *retryablehttp.Request) error { switch authType { case JobToken: - req.Header.Set("JOB-TOKEN", token) + req.Header.Set("Job-Token", token) case OAuthToken: req.Header.Set("Authorization", "Bearer "+token) case PrivateToken: - req.Header.Set("PRIVATE-TOKEN", token) + req.Header.Set("Private-Token", token) } return nil } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/resource_group.go b/vendor/gitlab.com/gitlab-org/api/client-go/resource_group.go index 6b851143f6..b6c3c979fb 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/resource_group.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/resource_group.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -47,7 +46,7 @@ var _ ResourceGroupServiceInterface = (*ResourceGroupService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/resource_groups/ type ResourceGroup struct { - ID int `json:"id"` + ID int64 `json:"id"` Key string `json:"key"` ProcessMode string `json:"process_mode"` CreatedAt *time.Time `json:"created_at"` @@ -68,24 +67,10 @@ func (rg ResourceGroup) String() string { // GitLab API docs: // https://docs.gitlab.com/api/resource_groups/#get-all-resource-groups-for-a-project func (s *ResourceGroupService) GetAllResourceGroupsForAProject(pid any, options ...RequestOptionFunc) ([]*ResourceGroup, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/resource_groups", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var rgs []*ResourceGroup - resp, err := s.client.Do(req, &rgs) - if err != nil { - return nil, resp, err - } - - return rgs, resp, nil + return do[[]*ResourceGroup](s.client, + withPath("projects/%s/resource_groups", ProjectID{pid}), + withRequestOpts(options...), + ) } // GetASpecificResourceGroup allows you to get a specific @@ -94,24 +79,10 @@ func (s *ResourceGroupService) GetAllResourceGroupsForAProject(pid any, options // GitLab API docs: // https://docs.gitlab.com/api/resource_groups/#get-a-specific-resource-group func (s *ResourceGroupService) GetASpecificResourceGroup(pid any, key string, options ...RequestOptionFunc) (*ResourceGroup, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/resource_groups/%s", PathEscape(project), key) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - rg := new(ResourceGroup) - resp, err := s.client.Do(req, rg) - if err != nil { - return nil, resp, err - } - - return rg, resp, nil + return do[*ResourceGroup](s.client, + withPath("projects/%s/resource_groups/%s", ProjectID{pid}, key), + withRequestOpts(options...), + ) } // ListUpcomingJobsForASpecificResourceGroup allows you to get all @@ -120,24 +91,10 @@ func (s *ResourceGroupService) GetASpecificResourceGroup(pid any, key string, op // GitLab API docs: // https://docs.gitlab.com/api/resource_groups/#list-upcoming-jobs-for-a-specific-resource-group func (s *ResourceGroupService) ListUpcomingJobsForASpecificResourceGroup(pid any, key string, options ...RequestOptionFunc) ([]*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/resource_groups/%s/upcoming_jobs", PathEscape(project), key) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var js []*Job - resp, err := s.client.Do(req, &js) - if err != nil { - return nil, resp, err - } - - return js, resp, nil + return do[[]*Job](s.client, + withPath("projects/%s/resource_groups/%s/upcoming_jobs", ProjectID{pid}, key), + withRequestOpts(options...), + ) } // EditAnExistingResourceGroupOptions represents the available @@ -155,22 +112,10 @@ type EditAnExistingResourceGroupOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/resource_groups/#edit-an-existing-resource-group func (s *ResourceGroupService) EditAnExistingResourceGroup(pid any, key string, opts *EditAnExistingResourceGroupOptions, options ...RequestOptionFunc) (*ResourceGroup, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/resource_groups/%s", PathEscape(project), key) - - req, err := s.client.NewRequest(http.MethodPut, u, opts, options) - if err != nil { - return nil, nil, err - } - - rg := new(ResourceGroup) - resp, err := s.client.Do(req, rg) - if err != nil { - return nil, resp, err - } - - return rg, resp, nil + return do[*ResourceGroup](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/resource_groups/%s", ProjectID{pid}, key), + withAPIOpts(opts), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/resource_iteration_events.go b/vendor/gitlab.com/gitlab-org/api/client-go/resource_iteration_events.go index bb52fc0eea..85499efc6f 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/resource_iteration_events.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/resource_iteration_events.go @@ -17,15 +17,13 @@ package gitlab import ( - "fmt" - "net/http" "time" ) type ( ResourceIterationEventsServiceInterface interface { - ListIssueIterationEvents(pid any, issue int, opt *ListIterationEventsOptions, options ...RequestOptionFunc) ([]*IterationEvent, *Response, error) - GetIssueIterationEvent(pid any, issue int, event int, options ...RequestOptionFunc) (*IterationEvent, *Response, error) + ListIssueIterationEvents(pid any, issue int64, opt *ListIterationEventsOptions, options ...RequestOptionFunc) ([]*IterationEvent, *Response, error) + GetIssueIterationEvent(pid any, issue int64, event int64, options ...RequestOptionFunc) (*IterationEvent, *Response, error) } // ResourceIterationEventsService handles communication with the event related @@ -43,11 +41,11 @@ var _ ResourceIterationEventsServiceInterface = (*ResourceIterationEventsService // // GitLab API docs: https://docs.gitlab.com/api/resource_iteration_events/ type IterationEvent struct { - ID int `json:"id"` + ID int64 `json:"id"` User *BasicUser `json:"user"` CreatedAt *time.Time `json:"created_at"` ResourceType string `json:"resource_type"` - ResourceID int `json:"resource_id"` + ResourceID int64 `json:"resource_id"` Iteration *Iteration `json:"iteration"` Action string `json:"action"` } @@ -56,13 +54,13 @@ type IterationEvent struct { // // GitLab API docs: https://docs.gitlab.com/api/resource_iteration_events/ type Iteration struct { - ID int `json:"id"` - IID int `json:"iid"` - Sequence int `json:"sequence"` - GroupID int `json:"group_id"` + ID int64 `json:"id"` + IID int64 `json:"iid"` + Sequence int64 `json:"sequence"` + GroupID int64 `json:"group_id"` Title string `json:"title"` Description string `json:"description"` - State int `json:"state"` + State int64 `json:"state"` CreatedAt *time.Time `json:"created_at"` UpdatedAt *time.Time `json:"updated_at"` DueDate *ISOTime `json:"due_date"` @@ -84,48 +82,21 @@ type ListIterationEventsOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/resource_iteration_events/#list-project-issue-iteration-events -func (s *ResourceIterationEventsService) ListIssueIterationEvents(pid any, issue int, opt *ListIterationEventsOptions, options ...RequestOptionFunc) ([]*IterationEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_iteration_events", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ies []*IterationEvent - resp, err := s.client.Do(req, &ies) - if err != nil { - return nil, resp, err - } - - return ies, resp, nil +func (s *ResourceIterationEventsService) ListIssueIterationEvents(pid any, issue int64, opt *ListIterationEventsOptions, options ...RequestOptionFunc) ([]*IterationEvent, *Response, error) { + return do[[]*IterationEvent](s.client, + withPath("projects/%s/issues/%d/resource_iteration_events", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetIssueIterationEvent gets a single issue iteration event. // // GitLab API docs: // https://docs.gitlab.com/api/resource_iteration_events/#get-single-issue-iteration-event -func (s *ResourceIterationEventsService) GetIssueIterationEvent(pid any, issue int, event int, options ...RequestOptionFunc) (*IterationEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_iteration_events/%d", PathEscape(project), issue, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ie := new(IterationEvent) - resp, err := s.client.Do(req, ie) - if err != nil { - return nil, resp, err - } - - return ie, resp, nil +func (s *ResourceIterationEventsService) GetIssueIterationEvent(pid any, issue int64, event int64, options ...RequestOptionFunc) (*IterationEvent, *Response, error) { + return do[*IterationEvent](s.client, + withPath("projects/%s/issues/%d/resource_iteration_events/%d", ProjectID{pid}, issue, event), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/resource_label_events.go b/vendor/gitlab.com/gitlab-org/api/client-go/resource_label_events.go index f37296748b..4a36332dd6 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/resource_label_events.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/resource_label_events.go @@ -17,22 +17,20 @@ package gitlab import ( - "fmt" - "net/http" "time" ) type ( ResourceLabelEventsServiceInterface interface { - ListIssueLabelEvents(pid any, issue int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) - GetIssueLabelEvent(pid any, issue int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) - ListMergeRequestsLabelEvents(pid any, request int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) - GetMergeRequestLabelEvent(pid any, request int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) + ListIssueLabelEvents(pid any, issue int64, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) + GetIssueLabelEvent(pid any, issue int64, event int64, options ...RequestOptionFunc) (*LabelEvent, *Response, error) + ListMergeRequestsLabelEvents(pid any, request int64, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) + GetMergeRequestLabelEvent(pid any, request int64, event int64, options ...RequestOptionFunc) (*LabelEvent, *Response, error) // Will be removed in v5, use Work Items API instead - ListGroupEpicLabelEvents(gid any, epic int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) + ListGroupEpicLabelEvents(gid any, epic int64, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) // Will be removed in v5, use Work Items API instead - GetGroupEpicLabelEvent(gid any, epic int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) + GetGroupEpicLabelEvent(gid any, epic int64, event int64, options ...RequestOptionFunc) (*LabelEvent, *Response, error) } // ResourceLabelEventsService handles communication with the event related @@ -51,26 +49,25 @@ var _ ResourceLabelEventsServiceInterface = (*ResourceLabelEventsService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/resource_label_events/#get-single-issue-label-event type LabelEvent struct { - ID int `json:"id"` - Action string `json:"action"` - CreatedAt *time.Time `json:"created_at"` - ResourceType string `json:"resource_type"` - ResourceID int `json:"resource_id"` - User struct { - ID int `json:"id"` - Name string `json:"name"` - Username string `json:"username"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"user"` - Label struct { - ID int `json:"id"` - Name string `json:"name"` - Color string `json:"color"` - TextColor string `json:"text_color"` - Description string `json:"description"` - } `json:"label"` + ID int64 `json:"id"` + Action string `json:"action"` + CreatedAt *time.Time `json:"created_at"` + ResourceType string `json:"resource_type"` + ResourceID int64 `json:"resource_id"` + User BasicUser `json:"user"` + Label LabelEventLabel `json:"label"` +} + +// LabelEventLabel represents a resource label event label. +// +// GitLab API docs: +// https://docs.gitlab.com/api/resource_label_events/#get-single-issue-label-event +type LabelEventLabel struct { + ID int64 `json:"id"` + Name string `json:"name"` + Color string `json:"color"` + TextColor string `json:"text_color"` + Description string `json:"description"` } // ListLabelEventsOptions represents the options for all resource label events @@ -87,50 +84,23 @@ type ListLabelEventsOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/resource_label_events/#list-project-issue-label-events -func (s *ResourceLabelEventsService) ListIssueLabelEvents(pid any, issue int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_label_events", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ls []*LabelEvent - resp, err := s.client.Do(req, &ls) - if err != nil { - return nil, resp, err - } - - return ls, resp, nil +func (s *ResourceLabelEventsService) ListIssueLabelEvents(pid any, issue int64, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) { + return do[[]*LabelEvent](s.client, + withPath("projects/%s/issues/%d/resource_label_events", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetIssueLabelEvent gets a single issue-label-event. // // GitLab API docs: // https://docs.gitlab.com/api/resource_label_events/#get-single-issue-label-event -func (s *ResourceLabelEventsService) GetIssueLabelEvent(pid any, issue int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_label_events/%d", PathEscape(project), issue, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - l := new(LabelEvent) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil +func (s *ResourceLabelEventsService) GetIssueLabelEvent(pid any, issue int64, event int64, options ...RequestOptionFunc) (*LabelEvent, *Response, error) { + return do[*LabelEvent](s.client, + withPath("projects/%s/issues/%d/resource_label_events/%d", ProjectID{pid}, issue, event), + withRequestOpts(options...), + ) } // ListGroupEpicLabelEvents retrieves resource label events for the specified @@ -139,25 +109,12 @@ func (s *ResourceLabelEventsService) GetIssueLabelEvent(pid any, issue int, even // // GitLab API docs: // https://docs.gitlab.com/api/resource_label_events/#list-group-epic-label-events -func (s *ResourceLabelEventsService) ListGroupEpicLabelEvents(gid any, epic int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/resource_label_events", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ls []*LabelEvent - resp, err := s.client.Do(req, &ls) - if err != nil { - return nil, resp, err - } - - return ls, resp, nil +func (s *ResourceLabelEventsService) ListGroupEpicLabelEvents(gid any, epic int64, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) { + return do[[]*LabelEvent](s.client, + withPath("groups/%s/epics/%d/resource_label_events", GroupID{gid}, epic), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetGroupEpicLabelEvent gets a single group epic label event. @@ -165,25 +122,11 @@ func (s *ResourceLabelEventsService) ListGroupEpicLabelEvents(gid any, epic int, // // GitLab API docs: // https://docs.gitlab.com/api/resource_label_events/#get-single-epic-label-event -func (s *ResourceLabelEventsService) GetGroupEpicLabelEvent(gid any, epic int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/resource_label_events/%d", PathEscape(group), epic, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - l := new(LabelEvent) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil +func (s *ResourceLabelEventsService) GetGroupEpicLabelEvent(gid any, epic int64, event int64, options ...RequestOptionFunc) (*LabelEvent, *Response, error) { + return do[*LabelEvent](s.client, + withPath("groups/%s/epics/%d/resource_label_events/%d", GroupID{gid}, epic, event), + withRequestOpts(options...), + ) } // ListMergeRequestsLabelEvents retrieves resource label events for the specified @@ -191,48 +134,21 @@ func (s *ResourceLabelEventsService) GetGroupEpicLabelEvent(gid any, epic int, e // // GitLab API docs: // https://docs.gitlab.com/api/resource_label_events/#list-project-merge-request-label-events -func (s *ResourceLabelEventsService) ListMergeRequestsLabelEvents(pid any, request int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_label_events", PathEscape(project), request) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ls []*LabelEvent - resp, err := s.client.Do(req, &ls) - if err != nil { - return nil, resp, err - } - - return ls, resp, nil +func (s *ResourceLabelEventsService) ListMergeRequestsLabelEvents(pid any, request int64, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) { + return do[[]*LabelEvent](s.client, + withPath("projects/%s/merge_requests/%d/resource_label_events", ProjectID{pid}, request), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetMergeRequestLabelEvent gets a single merge request label event. // // GitLab API docs: // https://docs.gitlab.com/api/resource_label_events/#get-single-merge-request-label-event -func (s *ResourceLabelEventsService) GetMergeRequestLabelEvent(pid any, request int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_label_events/%d", PathEscape(project), request, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - l := new(LabelEvent) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil +func (s *ResourceLabelEventsService) GetMergeRequestLabelEvent(pid any, request int64, event int64, options ...RequestOptionFunc) (*LabelEvent, *Response, error) { + return do[*LabelEvent](s.client, + withPath("projects/%s/merge_requests/%d/resource_label_events/%d", ProjectID{pid}, request, event), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/resource_milestone_events.go b/vendor/gitlab.com/gitlab-org/api/client-go/resource_milestone_events.go index 76cb105da3..063bbdcf79 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/resource_milestone_events.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/resource_milestone_events.go @@ -17,17 +17,15 @@ package gitlab import ( - "fmt" - "net/http" "time" ) type ( ResourceMilestoneEventsServiceInterface interface { - ListIssueMilestoneEvents(pid any, issue int, opt *ListMilestoneEventsOptions, options ...RequestOptionFunc) ([]*MilestoneEvent, *Response, error) - GetIssueMilestoneEvent(pid any, issue int, event int, options ...RequestOptionFunc) (*MilestoneEvent, *Response, error) - ListMergeMilestoneEvents(pid any, request int, opt *ListMilestoneEventsOptions, options ...RequestOptionFunc) ([]*MilestoneEvent, *Response, error) - GetMergeRequestMilestoneEvent(pid any, request int, event int, options ...RequestOptionFunc) (*MilestoneEvent, *Response, error) + ListIssueMilestoneEvents(pid any, issue int64, opt *ListMilestoneEventsOptions, options ...RequestOptionFunc) ([]*MilestoneEvent, *Response, error) + GetIssueMilestoneEvent(pid any, issue int64, event int64, options ...RequestOptionFunc) (*MilestoneEvent, *Response, error) + ListMergeMilestoneEvents(pid any, request int64, opt *ListMilestoneEventsOptions, options ...RequestOptionFunc) ([]*MilestoneEvent, *Response, error) + GetMergeRequestMilestoneEvent(pid any, request int64, event int64, options ...RequestOptionFunc) (*MilestoneEvent, *Response, error) } // ResourceMilestoneEventsService handles communication with the event related @@ -45,11 +43,11 @@ var _ ResourceMilestoneEventsServiceInterface = (*ResourceMilestoneEventsService // // GitLab API docs: https://docs.gitlab.com/api/resource_milestone_events/ type MilestoneEvent struct { - ID int `json:"id"` + ID int64 `json:"id"` User *BasicUser `json:"user"` CreatedAt *time.Time `json:"created_at"` ResourceType string `json:"resource_type"` - ResourceID int `json:"resource_id"` + ResourceID int64 `json:"resource_id"` Milestone *Milestone `json:"milestone"` Action string `json:"action"` } @@ -68,50 +66,23 @@ type ListMilestoneEventsOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/resource_milestone_events/#list-project-issue-milestone-events -func (s *ResourceMilestoneEventsService) ListIssueMilestoneEvents(pid any, issue int, opt *ListMilestoneEventsOptions, options ...RequestOptionFunc) ([]*MilestoneEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_milestone_events", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var mes []*MilestoneEvent - resp, err := s.client.Do(req, &mes) - if err != nil { - return nil, resp, err - } - - return mes, resp, nil +func (s *ResourceMilestoneEventsService) ListIssueMilestoneEvents(pid any, issue int64, opt *ListMilestoneEventsOptions, options ...RequestOptionFunc) ([]*MilestoneEvent, *Response, error) { + return do[[]*MilestoneEvent](s.client, + withPath("projects/%s/issues/%d/resource_milestone_events", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetIssueMilestoneEvent gets a single issue milestone event. // // GitLab API docs: // https://docs.gitlab.com/api/resource_milestone_events/#get-single-issue-milestone-event -func (s *ResourceMilestoneEventsService) GetIssueMilestoneEvent(pid any, issue int, event int, options ...RequestOptionFunc) (*MilestoneEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_milestone_events/%d", PathEscape(project), issue, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - me := new(MilestoneEvent) - resp, err := s.client.Do(req, me) - if err != nil { - return nil, resp, err - } - - return me, resp, nil +func (s *ResourceMilestoneEventsService) GetIssueMilestoneEvent(pid any, issue int64, event int64, options ...RequestOptionFunc) (*MilestoneEvent, *Response, error) { + return do[*MilestoneEvent](s.client, + withPath("projects/%s/issues/%d/resource_milestone_events/%d", ProjectID{pid}, issue, event), + withRequestOpts(options...), + ) } // ListMergeMilestoneEvents retrieves resource milestone events for the specified @@ -119,48 +90,21 @@ func (s *ResourceMilestoneEventsService) GetIssueMilestoneEvent(pid any, issue i // // GitLab API docs: // https://docs.gitlab.com/api/resource_milestone_events/#list-project-merge-request-milestone-events -func (s *ResourceMilestoneEventsService) ListMergeMilestoneEvents(pid any, request int, opt *ListMilestoneEventsOptions, options ...RequestOptionFunc) ([]*MilestoneEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_milestone_events", PathEscape(project), request) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var mes []*MilestoneEvent - resp, err := s.client.Do(req, &mes) - if err != nil { - return nil, resp, err - } - - return mes, resp, nil +func (s *ResourceMilestoneEventsService) ListMergeMilestoneEvents(pid any, request int64, opt *ListMilestoneEventsOptions, options ...RequestOptionFunc) ([]*MilestoneEvent, *Response, error) { + return do[[]*MilestoneEvent](s.client, + withPath("projects/%s/merge_requests/%d/resource_milestone_events", ProjectID{pid}, request), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetMergeRequestMilestoneEvent gets a single merge request milestone event. // // GitLab API docs: // https://docs.gitlab.com/api/resource_milestone_events/#get-single-merge-request-milestone-event -func (s *ResourceMilestoneEventsService) GetMergeRequestMilestoneEvent(pid any, request int, event int, options ...RequestOptionFunc) (*MilestoneEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_milestone_events/%d", PathEscape(project), request, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - me := new(MilestoneEvent) - resp, err := s.client.Do(req, me) - if err != nil { - return nil, resp, err - } - - return me, resp, nil +func (s *ResourceMilestoneEventsService) GetMergeRequestMilestoneEvent(pid any, request int64, event int64, options ...RequestOptionFunc) (*MilestoneEvent, *Response, error) { + return do[*MilestoneEvent](s.client, + withPath("projects/%s/merge_requests/%d/resource_milestone_events/%d", ProjectID{pid}, request, event), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/resource_state_events.go b/vendor/gitlab.com/gitlab-org/api/client-go/resource_state_events.go index b1acec9ce6..318c5f15b7 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/resource_state_events.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/resource_state_events.go @@ -17,17 +17,15 @@ package gitlab import ( - "fmt" - "net/http" "time" ) type ( ResourceStateEventsServiceInterface interface { - ListIssueStateEvents(pid any, issue int, opt *ListStateEventsOptions, options ...RequestOptionFunc) ([]*StateEvent, *Response, error) - GetIssueStateEvent(pid any, issue int, event int, options ...RequestOptionFunc) (*StateEvent, *Response, error) - ListMergeStateEvents(pid any, request int, opt *ListStateEventsOptions, options ...RequestOptionFunc) ([]*StateEvent, *Response, error) - GetMergeRequestStateEvent(pid any, request int, event int, options ...RequestOptionFunc) (*StateEvent, *Response, error) + ListIssueStateEvents(pid any, issue int64, opt *ListStateEventsOptions, options ...RequestOptionFunc) ([]*StateEvent, *Response, error) + GetIssueStateEvent(pid any, issue int64, event int64, options ...RequestOptionFunc) (*StateEvent, *Response, error) + ListMergeStateEvents(pid any, request int64, opt *ListStateEventsOptions, options ...RequestOptionFunc) ([]*StateEvent, *Response, error) + GetMergeRequestStateEvent(pid any, request int64, event int64, options ...RequestOptionFunc) (*StateEvent, *Response, error) } // ResourceStateEventsService handles communication with the event related @@ -45,11 +43,11 @@ var _ ResourceStateEventsServiceInterface = (*ResourceStateEventsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/resource_state_events/ type StateEvent struct { - ID int `json:"id"` + ID int64 `json:"id"` User *BasicUser `json:"user"` CreatedAt *time.Time `json:"created_at"` ResourceType string `json:"resource_type"` - ResourceID int `json:"resource_id"` + ResourceID int64 `json:"resource_id"` State EventTypeValue `json:"state"` } @@ -67,50 +65,23 @@ type ListStateEventsOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/resource_state_events/#list-project-issue-state-events -func (s *ResourceStateEventsService) ListIssueStateEvents(pid any, issue int, opt *ListStateEventsOptions, options ...RequestOptionFunc) ([]*StateEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_state_events", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ses []*StateEvent - resp, err := s.client.Do(req, &ses) - if err != nil { - return nil, resp, err - } - - return ses, resp, nil +func (s *ResourceStateEventsService) ListIssueStateEvents(pid any, issue int64, opt *ListStateEventsOptions, options ...RequestOptionFunc) ([]*StateEvent, *Response, error) { + return do[[]*StateEvent](s.client, + withPath("projects/%s/issues/%d/resource_state_events", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetIssueStateEvent gets a single issue-state-event. // // GitLab API docs: // https://docs.gitlab.com/api/resource_state_events/#get-single-issue-state-event -func (s *ResourceStateEventsService) GetIssueStateEvent(pid any, issue int, event int, options ...RequestOptionFunc) (*StateEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_state_events/%d", PathEscape(project), issue, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - se := new(StateEvent) - resp, err := s.client.Do(req, se) - if err != nil { - return nil, resp, err - } - - return se, resp, nil +func (s *ResourceStateEventsService) GetIssueStateEvent(pid any, issue int64, event int64, options ...RequestOptionFunc) (*StateEvent, *Response, error) { + return do[*StateEvent](s.client, + withPath("projects/%s/issues/%d/resource_state_events/%d", ProjectID{pid}, issue, event), + withRequestOpts(options...), + ) } // ListMergeStateEvents retrieves resource state events for the specified @@ -118,48 +89,21 @@ func (s *ResourceStateEventsService) GetIssueStateEvent(pid any, issue int, even // // GitLab API docs: // https://docs.gitlab.com/api/resource_state_events/#list-project-merge-request-state-events -func (s *ResourceStateEventsService) ListMergeStateEvents(pid any, request int, opt *ListStateEventsOptions, options ...RequestOptionFunc) ([]*StateEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_state_events", PathEscape(project), request) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ses []*StateEvent - resp, err := s.client.Do(req, &ses) - if err != nil { - return nil, resp, err - } - - return ses, resp, nil +func (s *ResourceStateEventsService) ListMergeStateEvents(pid any, request int64, opt *ListStateEventsOptions, options ...RequestOptionFunc) ([]*StateEvent, *Response, error) { + return do[[]*StateEvent](s.client, + withPath("projects/%s/merge_requests/%d/resource_state_events", ProjectID{pid}, request), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetMergeRequestStateEvent gets a single merge request state event. // // GitLab API docs: // https://docs.gitlab.com/api/resource_state_events/#get-single-merge-request-state-event -func (s *ResourceStateEventsService) GetMergeRequestStateEvent(pid any, request int, event int, options ...RequestOptionFunc) (*StateEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_state_events/%d", PathEscape(project), request, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - se := new(StateEvent) - resp, err := s.client.Do(req, se) - if err != nil { - return nil, resp, err - } - - return se, resp, nil +func (s *ResourceStateEventsService) GetMergeRequestStateEvent(pid any, request int64, event int64, options ...RequestOptionFunc) (*StateEvent, *Response, error) { + return do[*StateEvent](s.client, + withPath("projects/%s/merge_requests/%d/resource_state_events/%d", ProjectID{pid}, request, event), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/resource_weight_events.go b/vendor/gitlab.com/gitlab-org/api/client-go/resource_weight_events.go index e515f312ab..7d6d8b5328 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/resource_weight_events.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/resource_weight_events.go @@ -17,14 +17,12 @@ package gitlab import ( - "fmt" - "net/http" "time" ) type ( ResourceWeightEventsServiceInterface interface { - ListIssueWeightEvents(pid any, issue int, opt *ListWeightEventsOptions, options ...RequestOptionFunc) ([]*WeightEvent, *Response, error) + ListIssueWeightEvents(pid any, issue int64, opt *ListWeightEventsOptions, options ...RequestOptionFunc) ([]*WeightEvent, *Response, error) } // ResourceWeightEventsService handles communication with the event related @@ -42,14 +40,14 @@ var _ ResourceWeightEventsServiceInterface = (*ResourceWeightEventsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/resource_weight_events/ type WeightEvent struct { - ID int `json:"id"` + ID int64 `json:"id"` User *BasicUser `json:"user"` CreatedAt *time.Time `json:"created_at"` ResourceType string `json:"resource_type"` - ResourceID int `json:"resource_id"` + ResourceID int64 `json:"resource_id"` State EventTypeValue `json:"state"` - IssueID int `json:"issue_id"` - Weight int `json:"weight"` + IssueID int64 `json:"issue_id"` + Weight int64 `json:"weight"` } // ListWeightEventsOptions represents the options for all resource weight events @@ -66,23 +64,10 @@ type ListWeightEventsOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/resource_weight_events/#list-project-issue-weight-events -func (s *ResourceWeightEventsService) ListIssueWeightEvents(pid any, issue int, opt *ListWeightEventsOptions, options ...RequestOptionFunc) ([]*WeightEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_weight_events", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var wes []*WeightEvent - resp, err := s.client.Do(req, &wes) - if err != nil { - return nil, resp, err - } - - return wes, resp, nil +func (s *ResourceWeightEventsService) ListIssueWeightEvents(pid any, issue int64, opt *ListWeightEventsOptions, options ...RequestOptionFunc) ([]*WeightEvent, *Response, error) { + return do[[]*WeightEvent](s.client, + withPath("projects/%s/issues/%d/resource_weight_events", ProjectID{pid}, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/runner_controller_scopes.go b/vendor/gitlab.com/gitlab-org/api/client-go/runner_controller_scopes.go new file mode 100644 index 0000000000..6bf400e356 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/runner_controller_scopes.go @@ -0,0 +1,131 @@ +package gitlab + +import ( + "net/http" + "time" +) + +type ( + // RunnerControllerScopesServiceInterface handles communication with the + // runner controller scopes related methods of the GitLab API. This is an + // admin-only endpoint. + // + // Note: This API is experimental and may change or be removed in future versions. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controllers/#runner-controller-scopes + RunnerControllerScopesServiceInterface interface { + // ListRunnerControllerScopes lists all scopes for a specific runner + // controller. This is an admin-only endpoint. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runner_controllers/#list-all-scopes-for-a-runner-controller + ListRunnerControllerScopes(rid int64, options ...RequestOptionFunc) (*RunnerControllerScopes, *Response, error) + // AddRunnerControllerInstanceScope adds an instance-level scope to a + // runner controller. This is an admin-only endpoint. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runner_controllers/#add-instance-level-scope + AddRunnerControllerInstanceScope(rid int64, options ...RequestOptionFunc) (*RunnerControllerInstanceLevelScoping, *Response, error) + // RemoveRunnerControllerInstanceScope removes an instance-level scope + // from a runner controller. This is an admin-only endpoint. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runner_controllers/#remove-instance-level-scope + RemoveRunnerControllerInstanceScope(rid int64, options ...RequestOptionFunc) (*Response, error) + // AddRunnerControllerRunnerScope adds a runner scope to a runner + // controller. This is an admin-only endpoint. The runner must be an + // instance-level runner. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runner_controllers/#add-runner-scope + AddRunnerControllerRunnerScope(rid, runnerID int64, options ...RequestOptionFunc) (*RunnerControllerRunnerLevelScoping, *Response, error) + // RemoveRunnerControllerRunnerScope removes a runner scope from a runner + // controller. This is an admin-only endpoint. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runner_controllers/#remove-runner-scope + RemoveRunnerControllerRunnerScope(rid, runnerID int64, options ...RequestOptionFunc) (*Response, error) + } + + // RunnerControllerScopesService handles communication with the runner + // controller scopes related methods of the GitLab API. This is an admin-only + // endpoint. + // + // Note: This API is experimental and may change or be removed in future versions. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controllers/#runner-controller-scopes + RunnerControllerScopesService struct { + client *Client + } +) + +var _ RunnerControllerScopesServiceInterface = (*RunnerControllerScopesService)(nil) + +// RunnerControllerInstanceLevelScoping represents an instance-level scoping +// for a GitLab runner controller. +// +// GitLab API docs: https://docs.gitlab.com/api/runner_controllers/#runner-controller-scopes +type RunnerControllerInstanceLevelScoping struct { + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` +} + +// RunnerControllerRunnerLevelScoping represents a runner-level scoping for a +// GitLab runner controller. +// +// GitLab API docs: https://docs.gitlab.com/api/runner_controllers/#runner-controller-scopes +type RunnerControllerRunnerLevelScoping struct { + RunnerID int64 `json:"runner_id"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` +} + +// RunnerControllerScopes represents all scopes configured for a GitLab runner +// controller. +// +// GitLab API docs: https://docs.gitlab.com/api/runner_controllers/#runner-controller-scopes +type RunnerControllerScopes struct { + InstanceLevelScopings []*RunnerControllerInstanceLevelScoping `json:"instance_level_scopings"` + RunnerLevelScopings []*RunnerControllerRunnerLevelScoping `json:"runner_level_scopings"` +} + +func (s *RunnerControllerScopesService) ListRunnerControllerScopes(rid int64, options ...RequestOptionFunc) (*RunnerControllerScopes, *Response, error) { + return do[*RunnerControllerScopes](s.client, + withPath("runner_controllers/%d/scopes", rid), + withRequestOpts(options...), + ) +} + +func (s *RunnerControllerScopesService) AddRunnerControllerInstanceScope(rid int64, options ...RequestOptionFunc) (*RunnerControllerInstanceLevelScoping, *Response, error) { + return do[*RunnerControllerInstanceLevelScoping](s.client, + withMethod(http.MethodPost), + withPath("runner_controllers/%d/scopes/instance", rid), + withRequestOpts(options...), + ) +} + +func (s *RunnerControllerScopesService) RemoveRunnerControllerInstanceScope(rid int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("runner_controllers/%d/scopes/instance", rid), + withRequestOpts(options...), + ) + return resp, err +} + +func (s *RunnerControllerScopesService) AddRunnerControllerRunnerScope(rid, runnerID int64, options ...RequestOptionFunc) (*RunnerControllerRunnerLevelScoping, *Response, error) { + return do[*RunnerControllerRunnerLevelScoping](s.client, + withMethod(http.MethodPost), + withPath("runner_controllers/%d/scopes/runners/%d", rid, runnerID), + withRequestOpts(options...), + ) +} + +func (s *RunnerControllerScopesService) RemoveRunnerControllerRunnerScope(rid, runnerID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("runner_controllers/%d/scopes/runners/%d", rid, runnerID), + withRequestOpts(options...), + ) + return resp, err +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/runner_controller_tokens.go b/vendor/gitlab.com/gitlab-org/api/client-go/runner_controller_tokens.go new file mode 100644 index 0000000000..efbc91f2d3 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/runner_controller_tokens.go @@ -0,0 +1,125 @@ +package gitlab + +import ( + "net/http" + "time" +) + +type ( + // RunnerControllerTokensServiceInterface handles communication with the runner + // controller token related methods of the GitLab API. This is an admin-only + // endpoint. + // + // Note: This API is experimental and may change or be removed in future versions. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controller_tokens/ + RunnerControllerTokensServiceInterface interface { + // ListRunnerControllerTokens lists all runner controller tokens. This is an + // admin-only endpoint. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controller_tokens/#list-all-runner-controller-tokens + ListRunnerControllerTokens(rid int64, opt *ListRunnerControllerTokensOptions, options ...RequestOptionFunc) ([]*RunnerControllerToken, *Response, error) + // GetRunnerControllerToken retrieves a single runner controller token. This + // is an admin-only endpoint. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controller_tokens/#retrieve-a-single-runner-controller-token + GetRunnerControllerToken(rid int64, tokenID int64, options ...RequestOptionFunc) (*RunnerControllerToken, *Response, error) + // CreateRunnerControllerToken creates a new runner controller token. This is + // an admin-only endpoint. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controller_tokens/#create-a-runner-controller-token + CreateRunnerControllerToken(rid int64, opt *CreateRunnerControllerTokenOptions, options ...RequestOptionFunc) (*RunnerControllerToken, *Response, error) + // RotateRunnerControllerToken rotates an existing runner controller token. + // This is an admin-only endpoint. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controller_tokens/#rotate-a-runner-controller-token + RotateRunnerControllerToken(rid int64, tokenID int64, options ...RequestOptionFunc) (*RunnerControllerToken, *Response, error) + // RevokeRunnerControllerToken revokes a runner controller token. This is an + // admin-only endpoint. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controller_tokens/#revoke-a-runner-controller-token + RevokeRunnerControllerToken(rid int64, tokenID int64, options ...RequestOptionFunc) (*Response, error) + } + + // RunnerControllerTokensService handles communication with the runner + // controller token related methods of the GitLab API. This is an admin-only + // endpoint. + // + // Note: This API is experimental and may change or be removed in future versions. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controller_tokens/ + RunnerControllerTokensService struct { + client *Client + } +) + +var _ RunnerControllerTokensServiceInterface = (*RunnerControllerTokensService)(nil) + +// RunnerControllerToken represents a GitLab runner controller token. +// +// GitLab API docs: https://docs.gitlab.com/api/runner_controller_tokens/ +type RunnerControllerToken struct { + ID int64 `json:"id"` + RunnerControllerID int64 `json:"runner_controller_id"` + Description string `json:"description"` + Token string `json:"token,omitempty"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` +} + +// ListRunnerControllerTokensOptions represents the available +// ListRunnerControllerTokens() options. +// +// GitLab API docs: https://docs.gitlab.com/api/runner_controller_tokens/#list-all-runner-controller-tokens +type ListRunnerControllerTokensOptions struct { + ListOptions +} + +func (s *RunnerControllerTokensService) ListRunnerControllerTokens(rid int64, opt *ListRunnerControllerTokensOptions, options ...RequestOptionFunc) ([]*RunnerControllerToken, *Response, error) { + return do[[]*RunnerControllerToken](s.client, + withPath("runner_controllers/%d/tokens", rid), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +func (s *RunnerControllerTokensService) GetRunnerControllerToken(rid int64, tokenID int64, options ...RequestOptionFunc) (*RunnerControllerToken, *Response, error) { + return do[*RunnerControllerToken](s.client, + withPath("runner_controllers/%d/tokens/%d", rid, tokenID), + withRequestOpts(options...), + ) +} + +// CreateRunnerControllerTokenOptions represents the available +// CreateRunnerControllerToken() options. +// +// GitLab API docs: https://docs.gitlab.com/api/runner_controller_tokens/#create-a-runner-controller-token +type CreateRunnerControllerTokenOptions struct { + Description *string `url:"description,omitempty" json:"description,omitempty"` +} + +func (s *RunnerControllerTokensService) CreateRunnerControllerToken(rid int64, opt *CreateRunnerControllerTokenOptions, options ...RequestOptionFunc) (*RunnerControllerToken, *Response, error) { + return do[*RunnerControllerToken](s.client, + withMethod(http.MethodPost), + withPath("runner_controllers/%d/tokens", rid), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +func (s *RunnerControllerTokensService) RotateRunnerControllerToken(rid int64, tokenID int64, options ...RequestOptionFunc) (*RunnerControllerToken, *Response, error) { + return do[*RunnerControllerToken](s.client, + withMethod(http.MethodPost), + withPath("runner_controllers/%d/tokens/%d/rotate", rid, tokenID), + withRequestOpts(options...), + ) +} + +func (s *RunnerControllerTokensService) RevokeRunnerControllerToken(rid int64, tokenID int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("runner_controllers/%d/tokens/%d", rid, tokenID), + withRequestOpts(options...), + ) + return resp, err +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/runner_controllers.go b/vendor/gitlab.com/gitlab-org/api/client-go/runner_controllers.go new file mode 100644 index 0000000000..5732857b63 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/runner_controllers.go @@ -0,0 +1,145 @@ +package gitlab + +import ( + "net/http" + "time" +) + +type ( + // RunnerControllersServiceInterface handles communication with the runner + // controller related methods of the GitLab API. This is an admin-only endpoint. + // + // Note: This API is experimental and may change or be removed in future versions. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controllers/ + RunnerControllersServiceInterface interface { + // ListRunnerControllers gets a list of runner controllers. This is an + // admin-only endpoint. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controllers/#list-all-runner-controllers + ListRunnerControllers(opt *ListRunnerControllersOptions, options ...RequestOptionFunc) ([]*RunnerController, *Response, error) + // GetRunnerController retrieves a single runner controller. This is an + // admin-only endpoint. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controllers/#retrieve-a-single-runner-controller + GetRunnerController(rid int64, options ...RequestOptionFunc) (*RunnerController, *Response, error) + // CreateRunnerController registers a new runner controller. This is an + // admin-only endpoint. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controllers/#register-a-runner-controller + CreateRunnerController(opt *CreateRunnerControllerOptions, options ...RequestOptionFunc) (*RunnerController, *Response, error) + // UpdateRunnerController updates a runner controller. This is an admin-only + // endpoint. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controllers/#update-a-runner-controller + UpdateRunnerController(rid int64, opt *UpdateRunnerControllerOptions, options ...RequestOptionFunc) (*RunnerController, *Response, error) + // DeleteRunnerController deletes a runner controller. This is an admin-only + // endpoint. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controllers/#delete-a-runner-controller + DeleteRunnerController(rid int64, options ...RequestOptionFunc) (*Response, error) + } + + // RunnerControllersService handles communication with the runner controller + // related methods of the GitLab API. This is an admin-only endpoint. + // + // Note: This API is experimental and may change or be removed in future versions. + // + // GitLab API docs: https://docs.gitlab.com/api/runner_controllers/ + RunnerControllersService struct { + client *Client + } +) + +var _ RunnerControllersServiceInterface = (*RunnerControllersService)(nil) + +// RunnerControllerStateValue represents the state of a runner controller. +// +// GitLab API docs: https://docs.gitlab.com/api/runner_controllers/ +type RunnerControllerStateValue string + +// These constants represent all valid runner controller states. +const ( + RunnerControllerStateDisabled RunnerControllerStateValue = "disabled" + RunnerControllerStateEnabled RunnerControllerStateValue = "enabled" + RunnerControllerStateDryRun RunnerControllerStateValue = "dry_run" +) + +// RunnerController represents a GitLab runner controller. +// +// GitLab API docs: https://docs.gitlab.com/api/runner_controllers/ +type RunnerController struct { + ID int64 `json:"id"` + Description string `json:"description"` + State RunnerControllerStateValue `json:"state"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` +} + +// ListRunnerControllersOptions represents the available +// ListRunnerControllers() options. +// +// GitLab API docs: https://docs.gitlab.com/api/runner_controllers/#list-all-runner-controllers +type ListRunnerControllersOptions struct { + ListOptions +} + +func (s *RunnerControllersService) ListRunnerControllers(opt *ListRunnerControllersOptions, options ...RequestOptionFunc) ([]*RunnerController, *Response, error) { + return do[[]*RunnerController](s.client, + withPath("runner_controllers"), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +func (s *RunnerControllersService) GetRunnerController(rid int64, options ...RequestOptionFunc) (*RunnerController, *Response, error) { + return do[*RunnerController](s.client, + withPath("runner_controllers/%d", rid), + withRequestOpts(options...), + ) +} + +// CreateRunnerControllerOptions represents the available +// CreateRunnerController() options. +// +// GitLab API docs: https://docs.gitlab.com/api/runner_controllers/#register-a-runner-controller +type CreateRunnerControllerOptions struct { + Description *string `url:"description,omitempty" json:"description,omitempty"` + State *RunnerControllerStateValue `url:"state,omitempty" json:"state,omitempty"` +} + +func (s *RunnerControllersService) CreateRunnerController(opt *CreateRunnerControllerOptions, options ...RequestOptionFunc) (*RunnerController, *Response, error) { + return do[*RunnerController](s.client, + withMethod(http.MethodPost), + withPath("runner_controllers"), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +// UpdateRunnerControllerOptions represents the available +// UpdateRunnerController() options. +// +// GitLab API docs: https://docs.gitlab.com/api/runner_controllers/#update-a-runner-controller +type UpdateRunnerControllerOptions struct { + Description *string `url:"description,omitempty" json:"description,omitempty"` + State *RunnerControllerStateValue `url:"state,omitempty" json:"state,omitempty"` +} + +func (s *RunnerControllersService) UpdateRunnerController(rid int64, opt *UpdateRunnerControllerOptions, options ...RequestOptionFunc) (*RunnerController, *Response, error) { + return do[*RunnerController](s.client, + withMethod(http.MethodPut), + withPath("runner_controllers/%d", rid), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +func (s *RunnerControllersService) DeleteRunnerController(rid int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("runner_controllers/%d", rid), + withRequestOpts(options...), + ) + return resp, err +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/runners.go b/vendor/gitlab.com/gitlab-org/api/client-go/runners.go index b5285c8068..0edf46f065 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/runners.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/runners.go @@ -24,21 +24,84 @@ import ( type ( RunnersServiceInterface interface { + // ListRunners gets a list of runners accessible by the authenticated user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#list-owned-runners ListRunners(opt *ListRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) + // ListAllRunners gets a list of all runners in the GitLab instance. Access is + // restricted to users with admin privileges. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#list-all-runners ListAllRunners(opt *ListRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) + // GetRunnerDetails returns details for given runner. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#get-runners-details GetRunnerDetails(rid any, options ...RequestOptionFunc) (*RunnerDetails, *Response, error) + // UpdateRunnerDetails updates details for a given runner. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#update-runners-details UpdateRunnerDetails(rid any, opt *UpdateRunnerDetailsOptions, options ...RequestOptionFunc) (*RunnerDetails, *Response, error) + // RemoveRunner removes a runner. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#delete-a-runner RemoveRunner(rid any, options ...RequestOptionFunc) (*Response, error) + // ListRunnerJobs gets a list of jobs that are being processed or were processed by specified Runner. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#list-jobs-processed-by-a-runner ListRunnerJobs(rid any, opt *ListRunnerJobsOptions, options ...RequestOptionFunc) ([]*Job, *Response, error) + // ListProjectRunners gets a list of runners accessible by the authenticated user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#list-projects-runners ListProjectRunners(pid any, opt *ListProjectRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) + // EnableProjectRunner enables an available specific runner in the project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#assign-a-runner-to-project EnableProjectRunner(pid any, opt *EnableProjectRunnerOptions, options ...RequestOptionFunc) (*Runner, *Response, error) - DisableProjectRunner(pid any, runner int, options ...RequestOptionFunc) (*Response, error) + // DisableProjectRunner disables a specific runner from project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#unassign-a-runner-from-project + DisableProjectRunner(pid any, runner int64, options ...RequestOptionFunc) (*Response, error) + // ListGroupsRunners lists all runners (specific and shared) available in the + // group as well it's ancestor groups. Shared runners are listed if at least one + // shared runner is defined. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#list-groups-runners ListGroupsRunners(gid any, opt *ListGroupsRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) + // RegisterNewRunner registers a new Runner for the instance. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#create-a-runner RegisterNewRunner(opt *RegisterNewRunnerOptions, options ...RequestOptionFunc) (*Runner, *Response, error) + // DeleteRegisteredRunner deletes a Runner by Token. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#delete-a-runner-by-authentication-token DeleteRegisteredRunner(opt *DeleteRegisteredRunnerOptions, options ...RequestOptionFunc) (*Response, error) - DeleteRegisteredRunnerByID(rid int, options ...RequestOptionFunc) (*Response, error) + // DeleteRegisteredRunnerByID deletes a runner by ID. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#delete-a-runner-by-id + DeleteRegisteredRunnerByID(rid int64, options ...RequestOptionFunc) (*Response, error) + // VerifyRegisteredRunner registers a new runner for the instance. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#verify-authentication-for-a-registered-runner VerifyRegisteredRunner(opt *VerifyRegisteredRunnerOptions, options ...RequestOptionFunc) (*Response, error) - ResetRunnerAuthenticationToken(rid int, options ...RequestOptionFunc) (*RunnerAuthenticationToken, *Response, error) + // ResetRunnerAuthenticationToken resets a runner's authentication token. + // + // GitLab API docs: + // https://docs.gitlab.com/api/runners/#reset-runners-authentication-token-by-using-the-runner-id + ResetRunnerAuthenticationToken(rid int64, options ...RequestOptionFunc) (*RunnerAuthenticationToken, *Response, error) // Deprecated: for removal in GitLab 20.0, see https://docs.gitlab.com/ci/runners/new_creation_workflow/ instead ResetInstanceRunnerRegistrationToken(options ...RequestOptionFunc) (*RunnerRegistrationToken, *Response, error) @@ -65,7 +128,7 @@ var _ RunnersServiceInterface = (*RunnersService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/runners/ type Runner struct { - ID int `json:"id"` + ID int64 `json:"id"` Description string `json:"description"` Paused bool `json:"paused"` IsShared bool `json:"is_shared"` @@ -87,34 +150,24 @@ type Runner struct { // // GitLab API docs: https://docs.gitlab.com/api/runners/ type RunnerDetails struct { - Paused bool `json:"paused"` - Description string `json:"description"` - ID int `json:"id"` - IsShared bool `json:"is_shared"` - RunnerType string `json:"runner_type"` - ContactedAt *time.Time `json:"contacted_at"` - MaintenanceNote string `json:"maintenance_note"` - Name string `json:"name"` - Online bool `json:"online"` - Status string `json:"status"` - Projects []struct { - ID int `json:"id"` - Name string `json:"name"` - NameWithNamespace string `json:"name_with_namespace"` - Path string `json:"path"` - PathWithNamespace string `json:"path_with_namespace"` - } `json:"projects"` - Token string `json:"token"` - TagList []string `json:"tag_list"` - RunUntagged bool `json:"run_untagged"` - Locked bool `json:"locked"` - AccessLevel string `json:"access_level"` - MaximumTimeout int `json:"maximum_timeout"` - Groups []struct { - ID int `json:"id"` - Name string `json:"name"` - WebURL string `json:"web_url"` - } `json:"groups"` + Paused bool `json:"paused"` + Description string `json:"description"` + ID int64 `json:"id"` + IsShared bool `json:"is_shared"` + RunnerType string `json:"runner_type"` + ContactedAt *time.Time `json:"contacted_at"` + MaintenanceNote string `json:"maintenance_note"` + Name string `json:"name"` + Online bool `json:"online"` + Status string `json:"status"` + Projects []RunnerDetailsProject `json:"projects"` + Token string `json:"token"` + TagList []string `json:"tag_list"` + RunUntagged bool `json:"run_untagged"` + Locked bool `json:"locked"` + AccessLevel string `json:"access_level"` + MaximumTimeout int64 `json:"maximum_timeout"` + Groups []RunnerDetailsGroup `json:"groups"` // Deprecated: for removal in v5 of the API, see GraphQL resource CiRunnerManager instead Architecture string `json:"architecture"` @@ -135,6 +188,26 @@ type RunnerDetails struct { Active bool `json:"active"` } +// RunnerDetailsProject represents the GitLab CI runner details project. +// +// GitLab API docs: https://docs.gitlab.com/api/runners/ +type RunnerDetailsProject struct { + ID int64 `json:"id"` + Name string `json:"name"` + NameWithNamespace string `json:"name_with_namespace"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` +} + +// RunnerDetailsGroup represents the GitLab CI runner details group. +// +// GitLab API docs: https://docs.gitlab.com/api/runners/ +type RunnerDetailsGroup struct { + ID int64 `json:"id"` + Name string `json:"name"` + WebURL string `json:"web_url"` +} + // ListRunnersOptions represents the available ListRunners() options. // // GitLab API docs: @@ -150,68 +223,43 @@ type ListRunnersOptions struct { Scope *string `url:"scope,omitempty" json:"scope,omitempty"` } -// ListRunners gets a list of runners accessible by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#list-owned-runners func (s *RunnersService) ListRunners(opt *ListRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "runners", opt, options) - if err != nil { - return nil, nil, err - } - - var rs []*Runner - resp, err := s.client.Do(req, &rs) + res, resp, err := do[[]*Runner](s.client, + withMethod(http.MethodGet), + withPath("runners"), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return rs, resp, nil + return res, resp, nil } -// ListAllRunners gets a list of all runners in the GitLab instance. Access is -// restricted to users with admin privileges. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#list-all-runners func (s *RunnersService) ListAllRunners(opt *ListRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "runners/all", opt, options) - if err != nil { - return nil, nil, err - } - - var rs []*Runner - resp, err := s.client.Do(req, &rs) + res, resp, err := do[[]*Runner](s.client, + withMethod(http.MethodGet), + withPath("runners/all"), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return rs, resp, nil + return res, resp, nil } -// GetRunnerDetails returns details for given runner. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#get-runners-details func (s *RunnersService) GetRunnerDetails(rid any, options ...RequestOptionFunc) (*RunnerDetails, *Response, error) { - runner, err := parseID(rid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("runners/%s", runner) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - rs := new(RunnerDetails) - resp, err := s.client.Do(req, &rs) + res, resp, err := do[*RunnerDetails](s.client, + withMethod(http.MethodGet), + withPath("runners/%s", RunnerID{rid}), + withAPIOpts(nil), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return rs, resp, nil + return res, resp, nil } // UpdateRunnerDetailsOptions represents the available UpdateRunnerDetails() options. @@ -225,55 +273,34 @@ type UpdateRunnerDetailsOptions struct { RunUntagged *bool `url:"run_untagged,omitempty" json:"run_untagged,omitempty"` Locked *bool `url:"locked,omitempty" json:"locked,omitempty"` AccessLevel *string `url:"access_level,omitempty" json:"access_level,omitempty"` - MaximumTimeout *int `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` + MaximumTimeout *int64 `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` MaintenanceNote *string `url:"maintenance_note,omitempty" json:"maintenance_note,omitempty"` // Deprecated: for removal in v5 of the API, use Paused instead Active *bool `url:"active,omitempty" json:"active,omitempty"` } -// UpdateRunnerDetails updates details for a given runner. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#update-runners-details func (s *RunnersService) UpdateRunnerDetails(rid any, opt *UpdateRunnerDetailsOptions, options ...RequestOptionFunc) (*RunnerDetails, *Response, error) { - runner, err := parseID(rid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("runners/%s", runner) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - rs := new(RunnerDetails) - resp, err := s.client.Do(req, &rs) + res, resp, err := do[*RunnerDetails](s.client, + withMethod(http.MethodPut), + withPath("runners/%s", RunnerID{rid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return rs, resp, nil + return res, resp, nil } -// RemoveRunner removes a runner. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#delete-a-runner func (s *RunnersService) RemoveRunner(rid any, options ...RequestOptionFunc) (*Response, error) { - runner, err := parseID(rid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("runners/%s", runner) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("runners/%s", RunnerID{rid}), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } // ListRunnerJobsOptions represents the available ListRunnerJobs() @@ -288,29 +315,17 @@ type ListRunnerJobsOptions struct { Sort *string `url:"sort,omitempty" json:"sort,omitempty"` } -// ListRunnerJobs gets a list of jobs that are being processed or were processed by specified Runner. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#list-jobs-processed-by-a-runner func (s *RunnersService) ListRunnerJobs(rid any, opt *ListRunnerJobsOptions, options ...RequestOptionFunc) ([]*Job, *Response, error) { - runner, err := parseID(rid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("runners/%s/jobs", runner) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var rs []*Job - resp, err := s.client.Do(req, &rs) + res, resp, err := do[[]*Job](s.client, + withMethod(http.MethodGet), + withPath("runners/%s/jobs", RunnerID{rid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return rs, resp, nil + return res, resp, nil } // ListProjectRunnersOptions represents the available ListProjectRunners() @@ -320,29 +335,17 @@ func (s *RunnersService) ListRunnerJobs(rid any, opt *ListRunnerJobsOptions, opt // https://docs.gitlab.com/api/runners/#list-projects-runners type ListProjectRunnersOptions ListRunnersOptions -// ListProjectRunners gets a list of runners accessible by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#list-projects-runners func (s *RunnersService) ListProjectRunners(pid any, opt *ListProjectRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/runners", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var rs []*Runner - resp, err := s.client.Do(req, &rs) + res, resp, err := do[[]*Runner](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/runners", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return rs, resp, nil + return res, resp, nil } // EnableProjectRunnerOptions represents the available EnableProjectRunner() @@ -351,51 +354,30 @@ func (s *RunnersService) ListProjectRunners(pid any, opt *ListProjectRunnersOpti // GitLab API docs: // https://docs.gitlab.com/api/runners/#assign-a-runner-to-project type EnableProjectRunnerOptions struct { - RunnerID int `json:"runner_id"` + RunnerID int64 `json:"runner_id"` } -// EnableProjectRunner enables an available specific runner in the project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#assign-a-runner-to-project func (s *RunnersService) EnableProjectRunner(pid any, opt *EnableProjectRunnerOptions, options ...RequestOptionFunc) (*Runner, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/runners", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - r := new(Runner) - resp, err := s.client.Do(req, &r) + res, resp, err := do[*Runner](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/runners", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return r, resp, nil + return res, resp, nil } -// DisableProjectRunner disables a specific runner from project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#unassign-a-runner-from-project -func (s *RunnersService) DisableProjectRunner(pid any, runner int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/runners/%d", PathEscape(project), runner) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *RunnersService) DisableProjectRunner(pid any, runner int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/runners/%d", ProjectID{pid}, runner), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } // ListGroupsRunnersOptions represents the available ListGroupsRunners() options. @@ -409,31 +391,17 @@ type ListGroupsRunnersOptions struct { TagList *[]string `url:"tag_list,comma,omitempty" json:"tag_list,omitempty"` } -// ListGroupsRunners lists all runners (specific and shared) available in the -// group as well it’s ancestor groups. Shared runners are listed if at least one -// shared runner is defined. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#list-groups-runners func (s *RunnersService) ListGroupsRunners(gid any, opt *ListGroupsRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/runners", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var rs []*Runner - resp, err := s.client.Do(req, &rs) + res, resp, err := do[[]*Runner](s.client, + withMethod(http.MethodGet), + withPath("groups/%s/runners", GroupID{gid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return rs, resp, nil + return res, resp, nil } // RegisterNewRunnerOptions represents the available RegisterNewRunner() @@ -450,7 +418,7 @@ type RegisterNewRunnerOptions struct { RunUntagged *bool `url:"run_untagged,omitempty" json:"run_untagged,omitempty"` TagList *[]string `url:"tag_list[],omitempty" json:"tag_list,omitempty"` AccessLevel *string `url:"access_level,omitempty" json:"access_level,omitempty"` - MaximumTimeout *int `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` + MaximumTimeout *int64 `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` MaintenanceNote *string `url:"maintenance_note,omitempty" json:"maintenance_note,omitempty"` // Deprecated: for removal in v5 of the API, use Paused instead @@ -470,23 +438,17 @@ type RegisterNewRunnerInfoOptions struct { Architecture *string `url:"architecture,omitempty" json:"architecture,omitempty"` } -// RegisterNewRunner registers a new Runner for the instance. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#create-a-runner func (s *RunnersService) RegisterNewRunner(opt *RegisterNewRunnerOptions, options ...RequestOptionFunc) (*Runner, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "runners", opt, options) - if err != nil { - return nil, nil, err - } - - r := new(Runner) - resp, err := s.client.Do(req, &r) + res, resp, err := do[*Runner](s.client, + withMethod(http.MethodPost), + withPath("runners"), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return r, resp, nil + return res, resp, nil } // DeleteRegisteredRunnerOptions represents the available @@ -498,30 +460,24 @@ type DeleteRegisteredRunnerOptions struct { Token *string `url:"token" json:"token"` } -// DeleteRegisteredRunner deletes a Runner by Token. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#delete-a-runner-by-authentication-token func (s *RunnersService) DeleteRegisteredRunner(opt *DeleteRegisteredRunnerOptions, options ...RequestOptionFunc) (*Response, error) { - req, err := s.client.NewRequest(http.MethodDelete, "runners", opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("runners"), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } -// DeleteRegisteredRunnerByID deletes a runner by ID. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#delete-a-runner-by-id -func (s *RunnersService) DeleteRegisteredRunnerByID(rid int, options ...RequestOptionFunc) (*Response, error) { - req, err := s.client.NewRequest(http.MethodDelete, fmt.Sprintf("runners/%d", rid), nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *RunnersService) DeleteRegisteredRunnerByID(rid int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath(fmt.Sprintf("runners/%d", rid)), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } // VerifyRegisteredRunnerOptions represents the available @@ -533,17 +489,14 @@ type VerifyRegisteredRunnerOptions struct { Token *string `url:"token" json:"token"` } -// VerifyRegisteredRunner registers a new runner for the instance. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#verify-authentication-for-a-registered-runner func (s *RunnersService) VerifyRegisteredRunner(opt *VerifyRegisteredRunnerOptions, options ...RequestOptionFunc) (*Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "runners/verify", opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("runners/verify"), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } type RunnerRegistrationToken struct { @@ -558,18 +511,16 @@ type RunnerRegistrationToken struct { // GitLab API docs: // https://docs.gitlab.com/api/runners/#reset-instances-runner-registration-token func (s *RunnersService) ResetInstanceRunnerRegistrationToken(options ...RequestOptionFunc) (*RunnerRegistrationToken, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "runners/reset_registration_token", nil, options) - if err != nil { - return nil, nil, err - } - - r := new(RunnerRegistrationToken) - resp, err := s.client.Do(req, &r) + res, resp, err := do[*RunnerRegistrationToken](s.client, + withMethod(http.MethodPost), + withPath("runners/reset_registration_token"), + withAPIOpts(nil), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return r, resp, nil + return res, resp, nil } // ResetGroupRunnerRegistrationToken resets a group's runner registration token. @@ -578,24 +529,16 @@ func (s *RunnersService) ResetInstanceRunnerRegistrationToken(options ...Request // GitLab API docs: // https://docs.gitlab.com/api/runners/#reset-groups-runner-registration-token func (s *RunnersService) ResetGroupRunnerRegistrationToken(gid any, options ...RequestOptionFunc) (*RunnerRegistrationToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/runners/reset_registration_token", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - r := new(RunnerRegistrationToken) - resp, err := s.client.Do(req, &r) + res, resp, err := do[*RunnerRegistrationToken](s.client, + withMethod(http.MethodPost), + withPath("groups/%s/runners/reset_registration_token", GroupID{gid}), + withAPIOpts(nil), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return r, resp, nil + return res, resp, nil } // ResetProjectRunnerRegistrationToken resets a projects's runner registration token. @@ -604,23 +547,16 @@ func (s *RunnersService) ResetGroupRunnerRegistrationToken(gid any, options ...R // GitLab API docs: // https://docs.gitlab.com/api/runners/#reset-projects-runner-registration-token func (s *RunnersService) ResetProjectRunnerRegistrationToken(pid any, options ...RequestOptionFunc) (*RunnerRegistrationToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/runners/reset_registration_token", PathEscape(project)) - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - r := new(RunnerRegistrationToken) - resp, err := s.client.Do(req, &r) + res, resp, err := do[*RunnerRegistrationToken](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/runners/reset_registration_token", ProjectID{pid}), + withAPIOpts(nil), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return r, resp, nil + return res, resp, nil } type RunnerAuthenticationToken struct { @@ -628,22 +564,15 @@ type RunnerAuthenticationToken struct { TokenExpiresAt *time.Time `url:"token_expires_at" json:"token_expires_at"` } -// ResetRunnerAuthenticationToken resets a runner's authentication token. -// -// GitLab API docs: -// https://docs.gitlab.com/api/runners/#reset-runners-authentication-token-by-using-the-runner-id -func (s *RunnersService) ResetRunnerAuthenticationToken(rid int, options ...RequestOptionFunc) (*RunnerAuthenticationToken, *Response, error) { - u := fmt.Sprintf("runners/%d/reset_authentication_token", rid) - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - r := new(RunnerAuthenticationToken) - resp, err := s.client.Do(req, &r) +func (s *RunnersService) ResetRunnerAuthenticationToken(rid int64, options ...RequestOptionFunc) (*RunnerAuthenticationToken, *Response, error) { + res, resp, err := do[*RunnerAuthenticationToken](s.client, + withMethod(http.MethodPost), + withPath("runners/%d/reset_authentication_token", rid), + withAPIOpts(nil), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return r, resp, nil + return res, resp, nil } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/search.go b/vendor/gitlab.com/gitlab-org/api/client-go/search.go index 580e56ff10..baec55b667 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/search.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/search.go @@ -16,11 +16,6 @@ package gitlab -import ( - "fmt" - "net/http" -) - type ( SearchServiceInterface interface { Projects(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) @@ -79,9 +74,11 @@ type searchOptions struct { // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-projects func (s *SearchService) Projects(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - var ps []*Project - resp, err := s.search("projects", query, &ps, opt, options...) - return ps, resp, err + return do[[]*Project](s.client, + withPath("search"), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "projects", Search: query}), + withRequestOpts(options...), + ) } // ProjectsByGroup searches the expression within projects for @@ -89,18 +86,22 @@ func (s *SearchService) Projects(query string, opt *SearchOptions, options ...Re // // GitLab API docs: https://docs.gitlab.com/api/search/#group-search-api func (s *SearchService) ProjectsByGroup(gid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - var ps []*Project - resp, err := s.searchByGroup(gid, "projects", query, &ps, opt, options...) - return ps, resp, err + return do[[]*Project](s.client, + withPath("groups/%s/-/search", GroupID{gid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "projects", Search: query}), + withRequestOpts(options...), + ) } // Issues searches the expression within issues // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-issues func (s *SearchService) Issues(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - var is []*Issue - resp, err := s.search("issues", query, &is, opt, options...) - return is, resp, err + return do[[]*Issue](s.client, + withPath("search"), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "issues", Search: query}), + withRequestOpts(options...), + ) } // IssuesByGroup searches the expression within issues for @@ -108,9 +109,11 @@ func (s *SearchService) Issues(query string, opt *SearchOptions, options ...Requ // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-issues-1 func (s *SearchService) IssuesByGroup(gid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - var is []*Issue - resp, err := s.searchByGroup(gid, "issues", query, &is, opt, options...) - return is, resp, err + return do[[]*Issue](s.client, + withPath("groups/%s/-/search", GroupID{gid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "issues", Search: query}), + withRequestOpts(options...), + ) } // IssuesByProject searches the expression within issues for @@ -118,9 +121,11 @@ func (s *SearchService) IssuesByGroup(gid any, query string, opt *SearchOptions, // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-issues-2 func (s *SearchService) IssuesByProject(pid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - var is []*Issue - resp, err := s.searchByProject(pid, "issues", query, &is, opt, options...) - return is, resp, err + return do[[]*Issue](s.client, + withPath("projects/%s/-/search", ProjectID{pid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "issues", Search: query}), + withRequestOpts(options...), + ) } // MergeRequests searches the expression within merge requests @@ -128,9 +133,11 @@ func (s *SearchService) IssuesByProject(pid any, query string, opt *SearchOption // GitLab API docs: // https://docs.gitlab.com/api/search/#scope-merge_requests func (s *SearchService) MergeRequests(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - var ms []*MergeRequest - resp, err := s.search("merge_requests", query, &ms, opt, options...) - return ms, resp, err + return do[[]*MergeRequest](s.client, + withPath("search"), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "merge_requests", Search: query}), + withRequestOpts(options...), + ) } // MergeRequestsByGroup searches the expression within merge requests for @@ -139,9 +146,11 @@ func (s *SearchService) MergeRequests(query string, opt *SearchOptions, options // GitLab API docs: // https://docs.gitlab.com/api/search/#scope-merge_requests-1 func (s *SearchService) MergeRequestsByGroup(gid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - var ms []*MergeRequest - resp, err := s.searchByGroup(gid, "merge_requests", query, &ms, opt, options...) - return ms, resp, err + return do[[]*MergeRequest](s.client, + withPath("groups/%s/-/search", GroupID{gid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "merge_requests", Search: query}), + withRequestOpts(options...), + ) } // MergeRequestsByProject searches the expression within merge requests for @@ -150,18 +159,22 @@ func (s *SearchService) MergeRequestsByGroup(gid any, query string, opt *SearchO // GitLab API docs: // https://docs.gitlab.com/api/search/#scope-merge_requests-2 func (s *SearchService) MergeRequestsByProject(pid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - var ms []*MergeRequest - resp, err := s.searchByProject(pid, "merge_requests", query, &ms, opt, options...) - return ms, resp, err + return do[[]*MergeRequest](s.client, + withPath("projects/%s/-/search", ProjectID{pid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "merge_requests", Search: query}), + withRequestOpts(options...), + ) } // Milestones searches the expression within milestones // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-milestones func (s *SearchService) Milestones(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Milestone, *Response, error) { - var ms []*Milestone - resp, err := s.search("milestones", query, &ms, opt, options...) - return ms, resp, err + return do[[]*Milestone](s.client, + withPath("search"), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "milestones", Search: query}), + withRequestOpts(options...), + ) } // MilestonesByGroup searches the expression within milestones for @@ -169,9 +182,11 @@ func (s *SearchService) Milestones(query string, opt *SearchOptions, options ... // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-milestones-1 func (s *SearchService) MilestonesByGroup(gid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Milestone, *Response, error) { - var ms []*Milestone - resp, err := s.searchByGroup(gid, "milestones", query, &ms, opt, options...) - return ms, resp, err + return do[[]*Milestone](s.client, + withPath("groups/%s/-/search", GroupID{gid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "milestones", Search: query}), + withRequestOpts(options...), + ) } // MilestonesByProject searches the expression within milestones for @@ -179,9 +194,11 @@ func (s *SearchService) MilestonesByGroup(gid any, query string, opt *SearchOpti // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-milestones-2 func (s *SearchService) MilestonesByProject(pid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Milestone, *Response, error) { - var ms []*Milestone - resp, err := s.searchByProject(pid, "milestones", query, &ms, opt, options...) - return ms, resp, err + return do[[]*Milestone](s.client, + withPath("projects/%s/-/search", ProjectID{pid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "milestones", Search: query}), + withRequestOpts(options...), + ) } // SnippetTitles searches the expression within snippet titles @@ -189,9 +206,11 @@ func (s *SearchService) MilestonesByProject(pid any, query string, opt *SearchOp // GitLab API docs: // https://docs.gitlab.com/api/search/#scope-snippet_titles func (s *SearchService) SnippetTitles(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { - var ss []*Snippet - resp, err := s.search("snippet_titles", query, &ss, opt, options...) - return ss, resp, err + return do[[]*Snippet](s.client, + withPath("search"), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "snippet_titles", Search: query}), + withRequestOpts(options...), + ) } // NotesByProject searches the expression within notes for the specified @@ -199,9 +218,11 @@ func (s *SearchService) SnippetTitles(query string, opt *SearchOptions, options // // GitLab API docs: // https://docs.gitlab.com/api/search/#scope-notes func (s *SearchService) NotesByProject(pid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { - var ns []*Note - resp, err := s.searchByProject(pid, "notes", query, &ns, opt, options...) - return ns, resp, err + return do[[]*Note](s.client, + withPath("projects/%s/-/search", ProjectID{pid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "notes", Search: query}), + withRequestOpts(options...), + ) } // WikiBlobs searches the expression within all wiki blobs @@ -209,9 +230,11 @@ func (s *SearchService) NotesByProject(pid any, query string, opt *SearchOptions // GitLab API docs: // https://docs.gitlab.com/api/search/#scope-wiki_blobs func (s *SearchService) WikiBlobs(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Wiki, *Response, error) { - var ws []*Wiki - resp, err := s.search("wiki_blobs", query, &ws, opt, options...) - return ws, resp, err + return do[[]*Wiki](s.client, + withPath("search"), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "wiki_blobs", Search: query}), + withRequestOpts(options...), + ) } // WikiBlobsByGroup searches the expression within wiki blobs for @@ -220,9 +243,11 @@ func (s *SearchService) WikiBlobs(query string, opt *SearchOptions, options ...R // GitLab API docs: // https://docs.gitlab.com/api/search/#scope-wiki_blobs-1 func (s *SearchService) WikiBlobsByGroup(gid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Wiki, *Response, error) { - var ws []*Wiki - resp, err := s.searchByGroup(gid, "wiki_blobs", query, &ws, opt, options...) - return ws, resp, err + return do[[]*Wiki](s.client, + withPath("groups/%s/-/search", GroupID{gid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "wiki_blobs", Search: query}), + withRequestOpts(options...), + ) } // WikiBlobsByProject searches the expression within wiki blobs for @@ -231,18 +256,22 @@ func (s *SearchService) WikiBlobsByGroup(gid any, query string, opt *SearchOptio // GitLab API docs: // https://docs.gitlab.com/api/search/#scope-wiki_blobs-2 func (s *SearchService) WikiBlobsByProject(pid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Wiki, *Response, error) { - var ws []*Wiki - resp, err := s.searchByProject(pid, "wiki_blobs", query, &ws, opt, options...) - return ws, resp, err + return do[[]*Wiki](s.client, + withPath("projects/%s/-/search", ProjectID{pid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "wiki_blobs", Search: query}), + withRequestOpts(options...), + ) } // Commits searches the expression within all commits // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-commits func (s *SearchService) Commits(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { - var cs []*Commit - resp, err := s.search("commits", query, &cs, opt, options...) - return cs, resp, err + return do[[]*Commit](s.client, + withPath("search"), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "commits", Search: query}), + withRequestOpts(options...), + ) } // CommitsByGroup searches the expression within commits for the specified @@ -250,9 +279,11 @@ func (s *SearchService) Commits(query string, opt *SearchOptions, options ...Req // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-commits-1 func (s *SearchService) CommitsByGroup(gid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { - var cs []*Commit - resp, err := s.searchByGroup(gid, "commits", query, &cs, opt, options...) - return cs, resp, err + return do[[]*Commit](s.client, + withPath("groups/%s/-/search", GroupID{gid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "commits", Search: query}), + withRequestOpts(options...), + ) } // CommitsByProject searches the expression within commits for the @@ -260,9 +291,11 @@ func (s *SearchService) CommitsByGroup(gid any, query string, opt *SearchOptions // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-commits-2 func (s *SearchService) CommitsByProject(pid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { - var cs []*Commit - resp, err := s.searchByProject(pid, "commits", query, &cs, opt, options...) - return cs, resp, err + return do[[]*Commit](s.client, + withPath("projects/%s/-/search", ProjectID{pid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "commits", Search: query}), + withRequestOpts(options...), + ) } // Blob represents a single blob. @@ -273,17 +306,19 @@ type Blob struct { Filename string `json:"filename"` ID string `json:"id"` Ref string `json:"ref"` - Startline int `json:"startline"` - ProjectID int `json:"project_id"` + Startline int64 `json:"startline"` + ProjectID int64 `json:"project_id"` } // Blobs searches the expression within all blobs // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-blobs func (s *SearchService) Blobs(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Blob, *Response, error) { - var bs []*Blob - resp, err := s.search("blobs", query, &bs, opt, options...) - return bs, resp, err + return do[[]*Blob](s.client, + withPath("search"), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "blobs", Search: query}), + withRequestOpts(options...), + ) } // BlobsByGroup searches the expression within blobs for the specified @@ -291,9 +326,11 @@ func (s *SearchService) Blobs(query string, opt *SearchOptions, options ...Reque // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-blobs-1 func (s *SearchService) BlobsByGroup(gid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Blob, *Response, error) { - var bs []*Blob - resp, err := s.searchByGroup(gid, "blobs", query, &bs, opt, options...) - return bs, resp, err + return do[[]*Blob](s.client, + withPath("groups/%s/-/search", GroupID{gid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "blobs", Search: query}), + withRequestOpts(options...), + ) } // BlobsByProject searches the expression within blobs for the specified @@ -301,18 +338,22 @@ func (s *SearchService) BlobsByGroup(gid any, query string, opt *SearchOptions, // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-blobs-2 func (s *SearchService) BlobsByProject(pid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Blob, *Response, error) { - var bs []*Blob - resp, err := s.searchByProject(pid, "blobs", query, &bs, opt, options...) - return bs, resp, err + return do[[]*Blob](s.client, + withPath("projects/%s/-/search", ProjectID{pid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "blobs", Search: query}), + withRequestOpts(options...), + ) } // Users searches the expression within all users // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-users func (s *SearchService) Users(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { - var ret []*User - resp, err := s.search("users", query, &ret, opt, options...) - return ret, resp, err + return do[[]*User](s.client, + withPath("search"), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "users", Search: query}), + withRequestOpts(options...), + ) } // UsersByGroup searches the expression within users for the specified @@ -323,9 +364,11 @@ func (s *SearchService) UsersByGroup(gid any, query string, opt *SearchOptions, if opt == nil { opt = &SearchOptions{} } - var ret []*User - resp, err := s.searchByGroup(gid, "users", query, &ret, opt, options...) - return ret, resp, err + return do[[]*User](s.client, + withPath("groups/%s/-/search", GroupID{gid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "users", Search: query}), + withRequestOpts(options...), + ) } // UsersByProject searches the expression within users for the @@ -333,52 +376,9 @@ func (s *SearchService) UsersByGroup(gid any, query string, opt *SearchOptions, // // GitLab API docs: https://docs.gitlab.com/api/search/#scope-users-2 func (s *SearchService) UsersByProject(pid any, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { - var ret []*User - resp, err := s.searchByProject(pid, "users", query, &ret, opt, options...) - return ret, resp, err -} - -func (s *SearchService) search(scope, query string, result any, opt *SearchOptions, options ...RequestOptionFunc) (*Response, error) { - opts := &searchOptions{SearchOptions: *opt, Scope: scope, Search: query} - - req, err := s.client.NewRequest(http.MethodGet, "search", opts, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, result) -} - -func (s *SearchService) searchByGroup(gid any, scope, query string, result any, opt *SearchOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/-/search", PathEscape(group)) - - opts := &searchOptions{SearchOptions: *opt, Scope: scope, Search: query} - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, result) -} - -func (s *SearchService) searchByProject(pid any, scope, query string, result any, opt *SearchOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/-/search", PathEscape(project)) - - opts := &searchOptions{SearchOptions: *opt, Scope: scope, Search: query} - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, result) + return do[[]*User](s.client, + withPath("projects/%s/-/search", ProjectID{pid}), + withAPIOpts(&searchOptions{SearchOptions: *opt, Scope: "users", Search: query}), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/secure_files.go b/vendor/gitlab.com/gitlab-org/api/client-go/secure_files.go index 13fc386cc4..1371333c50 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/secure_files.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/secure_files.go @@ -15,7 +15,6 @@ package gitlab import ( "bytes" - "fmt" "io" "net/http" "time" @@ -24,10 +23,10 @@ import ( type ( SecureFilesServiceInterface interface { ListProjectSecureFiles(pid any, opt *ListProjectSecureFilesOptions, options ...RequestOptionFunc) ([]*SecureFile, *Response, error) - ShowSecureFileDetails(pid any, id int, options ...RequestOptionFunc) (*SecureFile, *Response, error) + ShowSecureFileDetails(pid any, id int64, options ...RequestOptionFunc) (*SecureFile, *Response, error) CreateSecureFile(pid any, content io.Reader, opt *CreateSecureFileOptions, options ...RequestOptionFunc) (*SecureFile, *Response, error) - DownloadSecureFile(pid any, id int, options ...RequestOptionFunc) (io.Reader, *Response, error) - RemoveSecureFile(pid any, id int, options ...RequestOptionFunc) (*Response, error) + DownloadSecureFile(pid any, id int64, options ...RequestOptionFunc) (io.Reader, *Response, error) + RemoveSecureFile(pid any, id int64, options ...RequestOptionFunc) (*Response, error) } // SecureFilesService handles communication with the secure files related @@ -47,7 +46,7 @@ var _ SecureFilesServiceInterface = (*SecureFilesService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/secure_files/ type SecureFile struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Checksum string `json:"checksum"` ChecksumAlgorithm string `json:"checksum_algorithm"` @@ -103,55 +102,31 @@ func (f SecureFile) String() string { // // GitLab API docs: // https://docs.gitlab.com/api/secure_files/#list-project-secure-files -type ListProjectSecureFilesOptions ListOptions +type ListProjectSecureFilesOptions struct { + ListOptions +} // ListProjectSecureFiles gets a list of secure files in a project. // // GitLab API docs: // https://docs.gitlab.com/api/secure_files/#list-project-secure-files func (s SecureFilesService) ListProjectSecureFiles(pid any, opt *ListProjectSecureFilesOptions, options ...RequestOptionFunc) ([]*SecureFile, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/secure_files", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var files []*SecureFile - resp, err := s.client.Do(req, &files) - if err != nil { - return nil, resp, err - } - return files, resp, nil + return do[[]*SecureFile](s.client, + withPath("projects/%s/secure_files", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ShowSecureFileDetails gets the details of a specific secure file in a project. // // GitLab API docs: // https://docs.gitlab.com/api/secure_files/#show-secure-file-details -func (s SecureFilesService) ShowSecureFileDetails(pid any, id int, options ...RequestOptionFunc) (*SecureFile, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/secure_files/%d", PathEscape(project), id) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - file := new(SecureFile) - resp, err := s.client.Do(req, file) - if err != nil { - return nil, resp, err - } - - return file, resp, nil +func (s SecureFilesService) ShowSecureFileDetails(pid any, id int64, options ...RequestOptionFunc) (*SecureFile, *Response, error) { + return do[*SecureFile](s.client, + withPath("projects/%s/secure_files/%d", ProjectID{pid}, id), + withRequestOpts(options...), + ) } // CreateSecureFileOptions represents the available @@ -168,66 +143,39 @@ type CreateSecureFileOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/secure_files/#create-secure-file func (s SecureFilesService) CreateSecureFile(pid any, content io.Reader, opt *CreateSecureFileOptions, options ...RequestOptionFunc) (*SecureFile, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/secure_files", PathEscape(project)) - - req, err := s.client.UploadRequest(http.MethodPost, u, content, *opt.Name, UploadFile, opt, options) - if err != nil { - return nil, nil, err - } - - file := new(SecureFile) - resp, err := s.client.Do(req, file) - if err != nil { - return nil, resp, err - } - - return file, resp, nil + return do[*SecureFile](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/secure_files", ProjectID{pid}), + withUpload(content, *opt.Name, UploadFile), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DownloadSecureFile downloads the contents of a project's secure file. // // GitLab API docs: // https://docs.gitlab.com/api/secure_files/#download-secure-file -func (s SecureFilesService) DownloadSecureFile(pid any, id int, options ...RequestOptionFunc) (io.Reader, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/secure_files/%d/download", PathEscape(project), id) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var file bytes.Buffer - resp, err := s.client.Do(req, &file) +func (s SecureFilesService) DownloadSecureFile(pid any, id int64, options ...RequestOptionFunc) (io.Reader, *Response, error) { + buf, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/secure_files/%d/download", ProjectID{pid}, id), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return &file, resp, err + return &buf, resp, nil } // RemoveSecureFile removes a project's secure file. // // GitLab API docs: // https://docs.gitlab.com/api/secure_files/#remove-secure-file -func (s SecureFilesService) RemoveSecureFile(pid any, id int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/secure_files/%d", PathEscape(project), id) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s SecureFilesService) RemoveSecureFile(pid any, id int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/secure_files/%d", ProjectID{pid}, id), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/services.go b/vendor/gitlab.com/gitlab-org/api/client-go/services.go index 98ad3ed556..c8eeb0d70f 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/services.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/services.go @@ -59,6 +59,9 @@ type ( GetJiraService(pid any, options ...RequestOptionFunc) (*JiraService, *Response, error) SetJiraService(pid any, opt *SetJiraServiceOptions, options ...RequestOptionFunc) (*JiraService, *Response, error) DeleteJiraService(pid any, options ...RequestOptionFunc) (*Response, error) + GetMatrixService(pid any, options ...RequestOptionFunc) (*MatrixService, *Response, error) + SetMatrixService(pid any, opt *SetMatrixServiceOptions, options ...RequestOptionFunc) (*MatrixService, *Response, error) + DeleteMatrixService(pid any, options ...RequestOptionFunc) (*Response, error) GetMattermostService(pid any, options ...RequestOptionFunc) (*MattermostService, *Response, error) SetMattermostService(pid any, opt *SetMattermostServiceOptions, options ...RequestOptionFunc) (*MattermostService, *Response, error) DeleteMattermostService(pid any, options ...RequestOptionFunc) (*Response, error) @@ -108,24 +111,10 @@ type Service = Integration // // GitLab API docs: https://docs.gitlab.com/api/project_integrations/#list-all-active-integrations func (s *ServicesService) ListServices(pid any, options ...RequestOptionFunc) ([]*Service, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var svcs []*Service - resp, err := s.client.Do(req, &svcs) - if err != nil { - return nil, resp, err - } - - return svcs, resp, nil + return do[[]*Service](s.client, + withPath("projects/%s/services", ProjectID{pid}), + withRequestOpts(options...), + ) } // CustomIssueTrackerService represents Custom Issue Tracker service settings. @@ -152,24 +141,10 @@ type CustomIssueTrackerServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-custom-issue-tracker-settings func (s *ServicesService) GetCustomIssueTrackerService(pid any, options ...RequestOptionFunc) (*CustomIssueTrackerService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/custom-issue-tracker", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(CustomIssueTrackerService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*CustomIssueTrackerService](s.client, + withPath("projects/%s/services/custom-issue-tracker", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetCustomIssueTrackerServiceOptions represents the available SetCustomIssueTrackerService() @@ -188,24 +163,12 @@ type SetCustomIssueTrackerServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-a-custom-issue-tracker func (s *ServicesService) SetCustomIssueTrackerService(pid any, opt *SetCustomIssueTrackerServiceOptions, options ...RequestOptionFunc) (*CustomIssueTrackerService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/custom-issue-tracker", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(CustomIssueTrackerService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*CustomIssueTrackerService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/services/custom-issue-tracker", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteCustomIssueTrackerService deletes Custom Issue Tracker service settings for a project. @@ -213,18 +176,12 @@ func (s *ServicesService) SetCustomIssueTrackerService(pid any, opt *SetCustomIs // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-a-custom-issue-tracker func (s *ServicesService) DeleteCustomIssueTrackerService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/custom-issue-tracker", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/services/custom-issue-tracker", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // DataDogService represents DataDog service settings. @@ -256,24 +213,10 @@ type DataDogServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-datadog-settings func (s *ServicesService) GetDataDogService(pid any, options ...RequestOptionFunc) (*DataDogService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/datadog", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(DataDogService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*DataDogService](s.client, + withPath("projects/%s/integrations/datadog", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetDataDogServiceOptions represents the available SetDataDogService() @@ -298,24 +241,12 @@ type SetDataDogServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-datadog func (s *ServicesService) SetDataDogService(pid any, opt *SetDataDogServiceOptions, options ...RequestOptionFunc) (*DataDogService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/datadog", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(DataDogService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*DataDogService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/integrations/datadog", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteDataDogService deletes the DataDog service settings for a project. @@ -323,18 +254,12 @@ func (s *ServicesService) SetDataDogService(pid any, opt *SetDataDogServiceOptio // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-datadog func (s *ServicesService) DeleteDataDogService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/integrations/datadog", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/integrations/datadog", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // DiscordService represents Discord service settings. @@ -360,24 +285,10 @@ type DiscordServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-discord-notifications-settings func (s *ServicesService) GetDiscordService(pid any, options ...RequestOptionFunc) (*DiscordService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/discord", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(DiscordService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*DiscordService](s.client, + withPath("projects/%s/services/discord", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetDiscordServiceOptions represents the available SetDiscordService() @@ -420,24 +331,12 @@ type SetDiscordServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-discord-notifications func (s *ServicesService) SetDiscordService(pid any, opt *SetDiscordServiceOptions, options ...RequestOptionFunc) (*DiscordService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/discord", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(DiscordService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*DiscordService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/services/discord", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteDiscordService deletes Discord service settings for a project. @@ -445,18 +344,12 @@ func (s *ServicesService) SetDiscordService(pid any, opt *SetDiscordServiceOptio // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-discord-notifications func (s *ServicesService) DeleteDiscordService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/discord", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/services/discord", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // DroneCIService represents Drone CI service settings. @@ -482,24 +375,10 @@ type DroneCIServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-drone-settings func (s *ServicesService) GetDroneCIService(pid any, options ...RequestOptionFunc) (*DroneCIService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/drone-ci", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(DroneCIService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*DroneCIService](s.client, + withPath("projects/%s/services/drone-ci", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetDroneCIServiceOptions represents the available SetDroneCIService() @@ -521,24 +400,12 @@ type SetDroneCIServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-drone func (s *ServicesService) SetDroneCIService(pid any, opt *SetDroneCIServiceOptions, options ...RequestOptionFunc) (*DroneCIService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/drone-ci", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(DroneCIService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*DroneCIService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/services/drone-ci", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteDroneCIService deletes Drone CI service settings for a project. @@ -546,18 +413,12 @@ func (s *ServicesService) SetDroneCIService(pid any, opt *SetDroneCIServiceOptio // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-drone func (s *ServicesService) DeleteDroneCIService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/drone-ci", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/services/drone-ci", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // EmailsOnPushService represents Emails on Push service settings. @@ -577,9 +438,12 @@ type EmailsOnPushServiceProperties struct { Recipients string `json:"recipients"` DisableDiffs bool `json:"disable_diffs"` SendFromCommitterEmail bool `json:"send_from_committer_email"` - PushEvents bool `json:"push_events"` - TagPushEvents bool `json:"tag_push_events"` BranchesToBeNotified string `json:"branches_to_be_notified"` + + // Deprecated: to be removed in 2.0 - use EmailsOnPushService.PushEvents instead. + PushEvents bool `json:"push_events"` + // Deprecated: to be removed in 2.0 - use EmailsOnPushService.TagPushEvents instead. + TagPushEvents bool `json:"tag_push_events"` } // GetEmailsOnPushService gets Emails on Push service settings for a project. @@ -587,24 +451,10 @@ type EmailsOnPushServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-emails-on-push-settings func (s *ServicesService) GetEmailsOnPushService(pid any, options ...RequestOptionFunc) (*EmailsOnPushService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/emails-on-push", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(EmailsOnPushService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*EmailsOnPushService](s.client, + withPath("projects/%s/integrations/emails-on-push", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetEmailsOnPushServiceOptions represents the available SetEmailsOnPushService() @@ -626,24 +476,12 @@ type SetEmailsOnPushServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-emails-on-push func (s *ServicesService) SetEmailsOnPushService(pid any, opt *SetEmailsOnPushServiceOptions, options ...RequestOptionFunc) (*EmailsOnPushService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/emails-on-push", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(EmailsOnPushService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*EmailsOnPushService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/integrations/emails-on-push", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteEmailsOnPushService deletes Emails on Push service settings for a project. @@ -651,18 +489,12 @@ func (s *ServicesService) SetEmailsOnPushService(pid any, opt *SetEmailsOnPushSe // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-emails-on-push func (s *ServicesService) DeleteEmailsOnPushService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/integrations/emails-on-push", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/integrations/emails-on-push", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // ExternalWikiService represents External Wiki service settings. @@ -687,24 +519,10 @@ type ExternalWikiServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-external-wiki-settings func (s *ServicesService) GetExternalWikiService(pid any, options ...RequestOptionFunc) (*ExternalWikiService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/external-wiki", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(ExternalWikiService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*ExternalWikiService](s.client, + withPath("projects/%s/services/external-wiki", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetExternalWikiServiceOptions represents the available SetExternalWikiService() @@ -721,24 +539,12 @@ type SetExternalWikiServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-an-external-wiki func (s *ServicesService) SetExternalWikiService(pid any, opt *SetExternalWikiServiceOptions, options ...RequestOptionFunc) (*ExternalWikiService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/external-wiki", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(ExternalWikiService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*ExternalWikiService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/services/external-wiki", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteExternalWikiService deletes External Wiki service for project. @@ -746,18 +552,12 @@ func (s *ServicesService) SetExternalWikiService(pid any, opt *SetExternalWikiSe // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-an-external-wiki func (s *ServicesService) DeleteExternalWikiService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/external-wiki", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/services/external-wiki", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // GithubService represents Github service settings. @@ -783,24 +583,10 @@ type GithubServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-github-settings func (s *ServicesService) GetGithubService(pid any, options ...RequestOptionFunc) (*GithubService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/github", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(GithubService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*GithubService](s.client, + withPath("projects/%s/services/github", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetGithubServiceOptions represents the available SetGithubService() @@ -819,24 +605,12 @@ type SetGithubServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-github func (s *ServicesService) SetGithubService(pid any, opt *SetGithubServiceOptions, options ...RequestOptionFunc) (*GithubService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/github", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(GithubService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*GithubService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/services/github", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteGithubService deletes Github service for a project @@ -844,18 +618,12 @@ func (s *ServicesService) SetGithubService(pid any, opt *SetGithubServiceOptions // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-github func (s *ServicesService) DeleteGithubService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/github", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/services/github", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // HarborService represents the Harbor service settings. @@ -884,24 +652,10 @@ type HarborServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-harbor-settings func (s *ServicesService) GetHarborService(pid any, options ...RequestOptionFunc) (*HarborService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/harbor", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(HarborService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*HarborService](s.client, + withPath("projects/%s/integrations/harbor", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetHarborServiceOptions represents the available SetHarborService() @@ -916,24 +670,12 @@ type SetHarborServiceOptions = SetUpHarborOptions // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-harbor func (s *ServicesService) SetHarborService(pid any, opt *SetHarborServiceOptions, options ...RequestOptionFunc) (*HarborService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/harbor", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(HarborService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*HarborService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/integrations/harbor", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteHarborService deletes Harbor service for a project. @@ -941,18 +683,12 @@ func (s *ServicesService) SetHarborService(pid any, opt *SetHarborServiceOptions // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-harbor func (s *ServicesService) DeleteHarborService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/integrations/harbor", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/integrations/harbor", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // SlackApplication represents GitLab for slack application settings. @@ -998,24 +734,10 @@ type SlackApplicationProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-gitlab-for-slack-app-settings func (s *ServicesService) GetSlackApplication(pid any, options ...RequestOptionFunc) (*SlackApplication, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/gitlab-slack-application", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(SlackApplication) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*SlackApplication](s.client, + withPath("projects/%s/integrations/gitlab-slack-application", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetSlackApplicationOptions represents the available SetSlackApplication() @@ -1066,24 +788,12 @@ type SetSlackApplicationOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-gitlab-for-slack-app func (s *ServicesService) SetSlackApplication(pid any, opt *SetSlackApplicationOptions, options ...RequestOptionFunc) (*SlackApplication, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/gitlab-slack-application", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(SlackApplication) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*SlackApplication](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/integrations/gitlab-slack-application", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DisableSlackApplication disable the GitLab for Slack app integration for a project. @@ -1091,18 +801,12 @@ func (s *ServicesService) SetSlackApplication(pid any, opt *SetSlackApplicationO // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-gitlab-for-slack-app func (s *ServicesService) DisableSlackApplication(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/integrations/gitlab-slack-application", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/integrations/gitlab-slack-application", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // JenkinsCIService represents Jenkins CI service settings. @@ -1130,24 +834,10 @@ type JenkinsCIServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-jenkins-settings func (s *ServicesService) GetJenkinsCIService(pid any, options ...RequestOptionFunc) (*JenkinsCIService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/jenkins", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(JenkinsCIService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*JenkinsCIService](s.client, + withPath("projects/%s/services/jenkins", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetJenkinsCIServiceOptions represents the available SetJenkinsCIService() @@ -1171,24 +861,12 @@ type SetJenkinsCIServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-jenkins func (s *ServicesService) SetJenkinsCIService(pid any, opt *SetJenkinsCIServiceOptions, options ...RequestOptionFunc) (*JenkinsCIService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/jenkins", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(JenkinsCIService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*JenkinsCIService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/services/jenkins", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteJenkinsCIService deletes Jenkins CI service for project. @@ -1196,18 +874,12 @@ func (s *ServicesService) SetJenkinsCIService(pid any, opt *SetJenkinsCIServiceO // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-jenkins func (s *ServicesService) DeleteJenkinsCIService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/jenkins", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/services/jenkins", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // JiraService represents Jira service settings. @@ -1229,7 +901,7 @@ type JiraServiceProperties struct { Username string `json:"username" ` Password string `json:"password" ` Active bool `json:"active"` - JiraAuthType int `json:"jira_auth_type"` + JiraAuthType int64 `json:"jira_auth_type"` JiraIssuePrefix string `json:"jira_issue_prefix"` JiraIssueRegex string `json:"jira_issue_regex"` JiraIssueTransitionAutomatic bool `json:"jira_issue_transition_automatic"` @@ -1264,7 +936,7 @@ func (p *JiraServiceProperties) UnmarshalJSON(b []byte) error { case string: p.JiraIssueTransitionID = id case float64: - p.JiraIssueTransitionID = strconv.Itoa(int(id)) + p.JiraIssueTransitionID = strconv.FormatInt(int64(id), 10) default: return fmt.Errorf("failed to unmarshal JiraTransitionID of type: %T", id) } @@ -1277,24 +949,10 @@ func (p *JiraServiceProperties) UnmarshalJSON(b []byte) error { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-jira-settings func (s *ServicesService) GetJiraService(pid any, options ...RequestOptionFunc) (*JiraService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(JiraService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*JiraService](s.client, + withPath("projects/%s/integrations/jira", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetJiraServiceOptions represents the available SetJiraService() @@ -1308,7 +966,7 @@ type SetJiraServiceOptions struct { Username *string `url:"username,omitempty" json:"username,omitempty" ` Password *string `url:"password,omitempty" json:"password,omitempty" ` Active *bool `url:"active,omitempty" json:"active,omitempty"` - JiraAuthType *int `url:"jira_auth_type,omitempty" json:"jira_auth_type,omitempty"` + JiraAuthType *int64 `url:"jira_auth_type,omitempty" json:"jira_auth_type,omitempty"` JiraIssuePrefix *string `url:"jira_issue_prefix,omitempty" json:"jira_issue_prefix,omitempty"` JiraIssueRegex *string `url:"jira_issue_regex,omitempty" json:"jira_issue_regex,omitempty"` JiraIssueTransitionAutomatic *bool `url:"jira_issue_transition_automatic,omitempty" json:"jira_issue_transition_automatic,omitempty"` @@ -1326,24 +984,12 @@ type SetJiraServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-jira-issues func (s *ServicesService) SetJiraService(pid any, opt *SetJiraServiceOptions, options ...RequestOptionFunc) (*JiraService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(JiraService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*JiraService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/integrations/jira", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteJiraService deletes Jira service for project. @@ -1351,18 +997,95 @@ func (s *ServicesService) SetJiraService(pid any, opt *SetJiraServiceOptions, op // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-jira func (s *ServicesService) DeleteJiraService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/integrations/jira", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err +} - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } +// MatrixService represents Matrix service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_integrations/#matrix-notifications +type MatrixService struct { + Service + Properties *MatrixServiceProperties `json:"properties"` +} + +// MatrixServiceProperties represents Matrix specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_integrations/#matrix-notifications +type MatrixServiceProperties struct { + Hostname string `json:"hostname"` + Token string `json:"token"` + Room string `json:"room"` + NotifyOnlyBrokenPipelines BoolValue `json:"notify_only_broken_pipelines"` + BranchesToBeNotified string `json:"branches_to_be_notified"` + UseInheritedSettings BoolValue `json:"use_inherited_settings"` +} + +// GetMatrixService gets Matrix service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_integrations/#get-matrix-notifications-settings +func (s *ServicesService) GetMatrixService(pid any, options ...RequestOptionFunc) (*MatrixService, *Response, error) { + return do[*MatrixService](s.client, + withMethod(http.MethodGet), + withPath("projects/%s/integrations/matrix", ProjectID{pid}), + withRequestOpts(options...), + ) +} + +// SetMatrixServiceOptions represents the available SetMatrixService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_integrations/#set-up-matrix-notifications +type SetMatrixServiceOptions struct { + Hostname *string `url:"hostname,omitempty" json:"hostname,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + Room *string `url:"room,omitempty" json:"room,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` +} - return s.client.Do(req, nil) +// SetMatrixService sets Matrix service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_integrations/#set-up-matrix-notifications +func (s *ServicesService) SetMatrixService(pid any, opt *SetMatrixServiceOptions, options ...RequestOptionFunc) (*MatrixService, *Response, error) { + return do[*MatrixService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/integrations/matrix", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} + +// DeleteMatrixService deletes Matrix service for project. +// +// GitLab API docs: +// https://docs.gitlab.com/api/project_integrations/#disable-matrix-notifications +func (s *ServicesService) DeleteMatrixService(pid any, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/integrations/matrix", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // MattermostService represents Mattermost service settings. @@ -1401,24 +1124,10 @@ type MattermostServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-mattermost-notifications-settings func (s *ServicesService) GetMattermostService(pid any, options ...RequestOptionFunc) (*MattermostService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/mattermost", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(MattermostService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*MattermostService](s.client, + withPath("projects/%s/services/mattermost", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetMattermostServiceOptions represents the available SetMattermostService() @@ -1457,24 +1166,12 @@ type SetMattermostServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-mattermost-notifications func (s *ServicesService) SetMattermostService(pid any, opt *SetMattermostServiceOptions, options ...RequestOptionFunc) (*MattermostService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/mattermost", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(MattermostService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*MattermostService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/services/mattermost", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteMattermostService deletes Mattermost service for project. @@ -1482,18 +1179,12 @@ func (s *ServicesService) SetMattermostService(pid any, opt *SetMattermostServic // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-mattermost-notifications func (s *ServicesService) DeleteMattermostService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/mattermost", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/services/mattermost", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // MattermostSlashCommandsService represents Mattermost slash commands settings. @@ -1519,24 +1210,10 @@ type MattermostSlashCommandsProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-mattermost-slash-commands-settings func (s *ServicesService) GetMattermostSlashCommandsService(pid any, options ...RequestOptionFunc) (*MattermostSlashCommandsService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/mattermost-slash-commands", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(MattermostSlashCommandsService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*MattermostSlashCommandsService](s.client, + withPath("projects/%s/services/mattermost-slash-commands", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetMattermostSlashCommandsServiceOptions represents the available SetSlackSlashCommandsService() @@ -1554,24 +1231,12 @@ type SetMattermostSlashCommandsServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-mattermost-slash-commands func (s *ServicesService) SetMattermostSlashCommandsService(pid any, opt *SetMattermostSlashCommandsServiceOptions, options ...RequestOptionFunc) (*MattermostSlashCommandsService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/mattermost-slash-commands", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(MattermostSlashCommandsService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*MattermostSlashCommandsService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/services/mattermost-slash-commands", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteMattermostSlashCommandsService deletes Mattermost slash commands service for project. @@ -1579,18 +1244,12 @@ func (s *ServicesService) SetMattermostSlashCommandsService(pid any, opt *SetMat // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-mattermost-slash-commands func (s *ServicesService) DeleteMattermostSlashCommandsService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/mattermost-slash-commands", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/services/mattermost-slash-commands", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // MicrosoftTeamsService represents Microsoft Teams service settings. @@ -1625,24 +1284,10 @@ type MicrosoftTeamsServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-microsoft-teams-notifications-settings func (s *ServicesService) GetMicrosoftTeamsService(pid any, options ...RequestOptionFunc) (*MicrosoftTeamsService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/microsoft-teams", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(MicrosoftTeamsService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*MicrosoftTeamsService](s.client, + withPath("projects/%s/services/microsoft-teams", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetMicrosoftTeamsServiceOptions represents the available SetMicrosoftTeamsService() @@ -1670,24 +1315,12 @@ type SetMicrosoftTeamsServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-microsoft-teams-notifications func (s *ServicesService) SetMicrosoftTeamsService(pid any, opt *SetMicrosoftTeamsServiceOptions, options ...RequestOptionFunc) (*MicrosoftTeamsService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/microsoft-teams", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(MicrosoftTeamsService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*MicrosoftTeamsService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/services/microsoft-teams", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteMicrosoftTeamsService deletes Microsoft Teams service for project. @@ -1695,18 +1328,12 @@ func (s *ServicesService) SetMicrosoftTeamsService(pid any, opt *SetMicrosoftTea // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-microsoft-teams-notifications func (s *ServicesService) DeleteMicrosoftTeamsService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/microsoft-teams", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/services/microsoft-teams", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // PipelinesEmailService represents Pipelines Email service settings. @@ -1734,24 +1361,10 @@ type PipelinesEmailProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-pipeline-status-emails-settings func (s *ServicesService) GetPipelinesEmailService(pid any, options ...RequestOptionFunc) (*PipelinesEmailService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/pipelines-email", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(PipelinesEmailService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*PipelinesEmailService](s.client, + withPath("projects/%s/services/pipelines-email", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetPipelinesEmailServiceOptions represents the available @@ -1773,24 +1386,12 @@ type SetPipelinesEmailServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-pipeline-status-emails func (s *ServicesService) SetPipelinesEmailService(pid any, opt *SetPipelinesEmailServiceOptions, options ...RequestOptionFunc) (*PipelinesEmailService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/pipelines-email", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(PipelinesEmailService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*PipelinesEmailService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/services/pipelines-email", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeletePipelinesEmailService deletes Pipelines Email service settings for a project. @@ -1798,18 +1399,12 @@ func (s *ServicesService) SetPipelinesEmailService(pid any, opt *SetPipelinesEma // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-pipeline-status-emails func (s *ServicesService) DeletePipelinesEmailService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/pipelines-email", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/services/pipelines-email", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // RedmineService represents the Redmine service settings. @@ -1837,24 +1432,10 @@ type RedmineServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-redmine-settings func (s *ServicesService) GetRedmineService(pid any, options ...RequestOptionFunc) (*RedmineService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/redmine", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(RedmineService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*RedmineService](s.client, + withPath("projects/%s/integrations/redmine", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetRedmineServiceOptions represents the available SetRedmineService(). @@ -1874,24 +1455,12 @@ type SetRedmineServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-redmine func (s *ServicesService) SetRedmineService(pid any, opt *SetRedmineServiceOptions, options ...RequestOptionFunc) (*RedmineService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/redmine", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(RedmineService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*RedmineService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/integrations/redmine", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteRedmineService deletes Redmine service for project. @@ -1899,18 +1468,12 @@ func (s *ServicesService) SetRedmineService(pid any, opt *SetRedmineServiceOptio // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-redmine func (s *ServicesService) DeleteRedmineService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/integrations/redmine", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/integrations/redmine", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // SlackService represents Slack service settings. @@ -1952,24 +1515,10 @@ type SlackServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-slack-notifications-settings func (s *ServicesService) GetSlackService(pid any, options ...RequestOptionFunc) (*SlackService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/slack", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(SlackService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*SlackService](s.client, + withPath("projects/%s/services/slack", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetSlackServiceOptions represents the available SetSlackService() @@ -2013,24 +1562,12 @@ type SetSlackServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-slack-notifications func (s *ServicesService) SetSlackService(pid any, opt *SetSlackServiceOptions, options ...RequestOptionFunc) (*SlackService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/slack", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(SlackService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*SlackService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/services/slack", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteSlackService deletes Slack service for project. @@ -2038,18 +1575,12 @@ func (s *ServicesService) SetSlackService(pid any, opt *SetSlackServiceOptions, // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-slack-notifications func (s *ServicesService) DeleteSlackService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/slack", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/services/slack", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // SlackSlashCommandsService represents Slack slash commands settings. @@ -2074,24 +1605,10 @@ type SlackSlashCommandsProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-slack-slash-commands-settings func (s *ServicesService) GetSlackSlashCommandsService(pid any, options ...RequestOptionFunc) (*SlackSlashCommandsService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/slack-slash-commands", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(SlackSlashCommandsService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*SlackSlashCommandsService](s.client, + withPath("projects/%s/services/slack-slash-commands", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetSlackSlashCommandsServiceOptions represents the available SetSlackSlashCommandsService() @@ -2108,24 +1625,12 @@ type SetSlackSlashCommandsServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-slack-slash-commands func (s *ServicesService) SetSlackSlashCommandsService(pid any, opt *SetSlackSlashCommandsServiceOptions, options ...RequestOptionFunc) (*SlackSlashCommandsService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/slack-slash-commands", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(SlackSlashCommandsService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*SlackSlashCommandsService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/services/slack-slash-commands", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteSlackSlashCommandsService deletes Slack slash commands service for project. @@ -2133,23 +1638,17 @@ func (s *ServicesService) SetSlackSlashCommandsService(pid any, opt *SetSlackSla // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-slack-slash-commands func (s *ServicesService) DeleteSlackSlashCommandsService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/slack-slash-commands", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/services/slack-slash-commands", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // TelegramService represents Telegram service settings. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#telegram type TelegramService struct { Service @@ -2171,24 +1670,10 @@ type TelegramServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-telegram-settings func (s *ServicesService) GetTelegramService(pid any, options ...RequestOptionFunc) (*TelegramService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/telegram", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(TelegramService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*TelegramService](s.client, + withPath("projects/%s/services/telegram", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetTelegramServiceOptions represents the available SetTelegramService() @@ -2217,24 +1702,12 @@ type SetTelegramServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-telegram func (s *ServicesService) SetTelegramService(pid any, opt *SetTelegramServiceOptions, options ...RequestOptionFunc) (*TelegramService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/telegram", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - svc := new(TelegramService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*TelegramService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/services/telegram", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteTelegramService deletes Telegram service for project. @@ -2242,18 +1715,12 @@ func (s *ServicesService) SetTelegramService(pid any, opt *SetTelegramServiceOpt // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-telegram func (s *ServicesService) DeleteTelegramService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/telegram", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/services/telegram", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } // YouTrackService represents YouTrack service settings. @@ -2281,24 +1748,10 @@ type YouTrackServiceProperties struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#get-youtrack-settings func (s *ServicesService) GetYouTrackService(pid any, options ...RequestOptionFunc) (*YouTrackService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/youtrack", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(YouTrackService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil + return do[*YouTrackService](s.client, + withPath("projects/%s/services/youtrack", ProjectID{pid}), + withRequestOpts(options...), + ) } // SetYouTrackServiceOptions represents the available SetYouTrackService() @@ -2318,24 +1771,12 @@ type SetYouTrackServiceOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#set-up-youtrack func (s *ServicesService) SetYouTrackService(pid any, opt *SetYouTrackServiceOptions, options ...RequestOptionFunc) (*YouTrackService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/youtrack", PathEscape(project)) - - svc := new(YouTrackService) - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, nil, err - } - - return svc, resp, nil + return do[*YouTrackService](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/services/youtrack", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // DeleteYouTrackService deletes YouTrack service settings for a project. @@ -2343,16 +1784,10 @@ func (s *ServicesService) SetYouTrackService(pid any, opt *SetYouTrackServiceOpt // GitLab API docs: // https://docs.gitlab.com/api/project_integrations/#disable-youtrack func (s *ServicesService) DeleteYouTrackService(pid any, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/youtrack", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/services/youtrack", ProjectID{pid}), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/settings.go b/vendor/gitlab.com/gitlab-org/api/client-go/settings.go index 9e79816953..03f16d35bc 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/settings.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/settings.go @@ -52,7 +52,7 @@ var _ SettingsServiceInterface = (*SettingsService)(nil) // https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/lib/ee/api/helpers/settings_helpers.rb#L10 // https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/app/helpers/ee/application_settings_helper.rb#L20 type Settings struct { - ID int `json:"id"` + ID int64 `json:"id"` AbuseNotificationEmail string `json:"abuse_notification_email"` AdminMode bool `json:"admin_mode"` AfterSignOutPath string `json:"after_sign_out_path"` @@ -67,73 +67,74 @@ type Settings struct { AllowLocalRequestsFromWebHooksAndServices bool `json:"allow_local_requests_from_web_hooks_and_services"` AllowProjectCreationForGuestAndBelow bool `json:"allow_project_creation_for_guest_and_below"` AllowRunnerRegistrationToken bool `json:"allow_runner_registration_token"` + AnonymousSearchesAllowed bool `json:"anonymous_searches_allowed"` ArchiveBuildsInHumanReadable string `json:"archive_builds_in_human_readable"` - ASCIIDocMaxIncludes int `json:"asciidoc_max_includes"` + ASCIIDocMaxIncludes int64 `json:"asciidoc_max_includes"` AssetProxyAllowlist []string `json:"asset_proxy_allowlist"` AssetProxyEnabled bool `json:"asset_proxy_enabled"` AssetProxyURL string `json:"asset_proxy_url"` AssetProxySecretKey string `json:"asset_proxy_secret_key"` AuthorizedKeysEnabled bool `json:"authorized_keys_enabled"` AutoBanUserOnExcessiveProjectsDownload bool `json:"auto_ban_user_on_excessive_projects_download"` - AutocompleteUsers int `json:"autocomplete_users"` - AutocompleteUsersUnauthenticated int `json:"autocomplete_users_unauthenticated"` + AutocompleteUsers int64 `json:"autocomplete_users"` + AutocompleteUsersUnauthenticated int64 `json:"autocomplete_users_unauthenticated"` AutoDevOpsDomain string `json:"auto_devops_domain"` AutoDevOpsEnabled bool `json:"auto_devops_enabled"` AutomaticPurchasedStorageAllocation bool `json:"automatic_purchased_storage_allocation"` - BulkImportConcurrentPipelineBatchLimit int `json:"bulk_import_concurrent_pipeline_batch_limit"` + BulkImportConcurrentPipelineBatchLimit int64 `json:"bulk_import_concurrent_pipeline_batch_limit"` BulkImportEnabled bool `json:"bulk_import_enabled"` - BulkImportMaxDownloadFileSize int `json:"bulk_import_max_download_file_size"` + BulkImportMaxDownloadFileSize int64 `json:"bulk_import_max_download_file_size"` CanCreateGroup bool `json:"can_create_group"` CheckNamespacePlan bool `json:"check_namespace_plan"` CIJobLiveTraceEnabled bool `json:"ci_job_live_trace_enabled"` - CIMaxIncludes int `json:"ci_max_includes"` - CIMaxTotalYAMLSizeBytes int `json:"ci_max_total_yaml_size_bytes"` - CIPartitionsSizeLimit int `json:"ci_partitions_size_limit"` + CIMaxIncludes int64 `json:"ci_max_includes"` + CIMaxTotalYAMLSizeBytes int64 `json:"ci_max_total_yaml_size_bytes"` + CIPartitionsSizeLimit int64 `json:"ci_partitions_size_limit"` CommitEmailHostname string `json:"commit_email_hostname"` - ConcurrentBitbucketImportJobsLimit int `json:"concurrent_bitbucket_import_jobs_limit"` - ConcurrentBitbucketServerImportJobsLimit int `json:"concurrent_bitbucket_server_import_jobs_limit"` - ConcurrentGitHubImportJobsLimit int `json:"concurrent_github_import_jobs_limit"` + ConcurrentBitbucketImportJobsLimit int64 `json:"concurrent_bitbucket_import_jobs_limit"` + ConcurrentBitbucketServerImportJobsLimit int64 `json:"concurrent_bitbucket_server_import_jobs_limit"` + ConcurrentGitHubImportJobsLimit int64 `json:"concurrent_github_import_jobs_limit"` ContainerExpirationPoliciesEnableHistoricEntries bool `json:"container_expiration_policies_enable_historic_entries"` - ContainerRegistryCleanupTagsServiceMaxListSize int `json:"container_registry_cleanup_tags_service_max_list_size"` - ContainerRegistryDeleteTagsServiceTimeout int `json:"container_registry_delete_tags_service_timeout"` + ContainerRegistryCleanupTagsServiceMaxListSize int64 `json:"container_registry_cleanup_tags_service_max_list_size"` + ContainerRegistryDeleteTagsServiceTimeout int64 `json:"container_registry_delete_tags_service_timeout"` ContainerRegistryExpirationPoliciesCaching bool `json:"container_registry_expiration_policies_caching"` - ContainerRegistryExpirationPoliciesWorkerCapacity int `json:"container_registry_expiration_policies_worker_capacity"` + ContainerRegistryExpirationPoliciesWorkerCapacity int64 `json:"container_registry_expiration_policies_worker_capacity"` ContainerRegistryImportCreatedBefore *time.Time `json:"container_registry_import_created_before"` - ContainerRegistryImportMaxRetries int `json:"container_registry_import_max_retries"` - ContainerRegistryImportMaxStepDuration int `json:"container_registry_import_max_step_duration"` - ContainerRegistryImportMaxTagsCount int `json:"container_registry_import_max_tags_count"` - ContainerRegistryImportStartMaxRetries int `json:"container_registry_import_start_max_retries"` + ContainerRegistryImportMaxRetries int64 `json:"container_registry_import_max_retries"` + ContainerRegistryImportMaxStepDuration int64 `json:"container_registry_import_max_step_duration"` + ContainerRegistryImportMaxTagsCount int64 `json:"container_registry_import_max_tags_count"` + ContainerRegistryImportStartMaxRetries int64 `json:"container_registry_import_start_max_retries"` ContainerRegistryImportTargetPlan string `json:"container_registry_import_target_plan"` - ContainerRegistryTokenExpireDelay int `json:"container_registry_token_expire_delay"` + ContainerRegistryTokenExpireDelay int64 `json:"container_registry_token_expire_delay"` CreatedAt *time.Time `json:"created_at"` CustomHTTPCloneURLRoot string `json:"custom_http_clone_url_root"` DNSRebindingProtectionEnabled bool `json:"dns_rebinding_protection_enabled"` - DSAKeyRestriction int `json:"dsa_key_restriction"` + DSAKeyRestriction int64 `json:"dsa_key_restriction"` DeactivateDormantUsers bool `json:"deactivate_dormant_users"` - DeactivateDormantUsersPeriod int `json:"deactivate_dormant_users_period"` - DecompressArchiveFileTimeout int `json:"decompress_archive_file_timeout"` + DeactivateDormantUsersPeriod int64 `json:"deactivate_dormant_users_period"` + DecompressArchiveFileTimeout int64 `json:"decompress_archive_file_timeout"` DefaultArtifactsExpireIn string `json:"default_artifacts_expire_in"` DefaultBranchName string `json:"default_branch_name"` DefaultBranchProtectionDefaults *BranchProtectionDefaults `json:"default_branch_protection_defaults,omitempty"` DefaultCiConfigPath string `json:"default_ci_config_path"` DefaultGroupVisibility VisibilityValue `json:"default_group_visibility"` DefaultPreferredLanguage string `json:"default_preferred_language"` - DefaultProjectCreation int `json:"default_project_creation"` + DefaultProjectCreation int64 `json:"default_project_creation"` DefaultProjectDeletionProtection bool `json:"default_project_deletion_protection"` DefaultProjectVisibility VisibilityValue `json:"default_project_visibility"` - DefaultProjectsLimit int `json:"default_projects_limit"` + DefaultProjectsLimit int64 `json:"default_projects_limit"` DefaultSnippetVisibility VisibilityValue `json:"default_snippet_visibility"` - DefaultSyntaxHighlightingTheme int `json:"default_syntax_highlighting_theme"` + DefaultSyntaxHighlightingTheme int64 `json:"default_syntax_highlighting_theme"` DelayedGroupDeletion bool `json:"delayed_group_deletion"` DelayedProjectDeletion bool `json:"delayed_project_deletion"` DeleteInactiveProjects bool `json:"delete_inactive_projects"` DeleteUnconfirmedUsers bool `json:"delete_unconfirmed_users"` - DeletionAdjournedPeriod int `json:"deletion_adjourned_period"` + DeletionAdjournedPeriod int64 `json:"deletion_adjourned_period"` DiagramsnetEnabled bool `json:"diagramsnet_enabled"` DiagramsnetURL string `json:"diagramsnet_url"` - DiffMaxFiles int `json:"diff_max_files"` - DiffMaxLines int `json:"diff_max_lines"` - DiffMaxPatchBytes int `json:"diff_max_patch_bytes"` + DiffMaxFiles int64 `json:"diff_max_files"` + DiffMaxLines int64 `json:"diff_max_lines"` + DiffMaxPatchBytes int64 `json:"diff_max_patch_bytes"` DisableAdminOAuthScopes bool `json:"disable_admin_oauth_scopes"` DisableFeedToken bool `json:"disable_feed_token"` DisableOverridingApproversPerMergeRequest bool `json:"disable_overriding_approvers_per_merge_request"` @@ -142,16 +143,16 @@ type Settings struct { DomainAllowlist []string `json:"domain_allowlist"` DomainDenylist []string `json:"domain_denylist"` DomainDenylistEnabled bool `json:"domain_denylist_enabled"` - DownstreamPipelineTriggerLimitPerProjectUserSHA int `json:"downstream_pipeline_trigger_limit_per_project_user_sha"` + DownstreamPipelineTriggerLimitPerProjectUserSHA int64 `json:"downstream_pipeline_trigger_limit_per_project_user_sha"` DuoFeaturesEnabled bool `json:"duo_features_enabled"` - ECDSAKeyRestriction int `json:"ecdsa_key_restriction"` - ECDSASKKeyRestriction int `json:"ecdsa_sk_key_restriction"` + ECDSAKeyRestriction int64 `json:"ecdsa_key_restriction"` + ECDSASKKeyRestriction int64 `json:"ecdsa_sk_key_restriction"` EKSAccessKeyID string `json:"eks_access_key_id"` EKSAccountID string `json:"eks_account_id"` EKSIntegrationEnabled bool `json:"eks_integration_enabled"` EKSSecretAccessKey string `json:"eks_secret_access_key"` - Ed25519KeyRestriction int `json:"ed25519_key_restriction"` - Ed25519SKKeyRestriction int `json:"ed25519_sk_key_restriction"` + Ed25519KeyRestriction int64 `json:"ed25519_key_restriction"` + Ed25519SKKeyRestriction int64 `json:"ed25519_sk_key_restriction"` ElasticsearchAWS bool `json:"elasticsearch_aws"` ElasticsearchAWSAccessKey string `json:"elasticsearch_aws_access_key"` ElasticsearchAWSRegion string `json:"elasticsearch_aws_region"` @@ -160,26 +161,26 @@ type Settings struct { ElasticsearchAnalyzersKuromojiSearch bool `json:"elasticsearch_analyzers_kuromoji_search"` ElasticsearchAnalyzersSmartCNEnabled bool `json:"elasticsearch_analyzers_smartcn_enabled"` ElasticsearchAnalyzersSmartCNSearch bool `json:"elasticsearch_analyzers_smartcn_search"` - ElasticsearchClientRequestTimeout int `json:"elasticsearch_client_request_timeout"` - ElasticsearchIndexedFieldLengthLimit int `json:"elasticsearch_indexed_field_length_limit"` - ElasticsearchIndexedFileSizeLimitKB int `json:"elasticsearch_indexed_file_size_limit_kb"` + ElasticsearchClientRequestTimeout int64 `json:"elasticsearch_client_request_timeout"` + ElasticsearchIndexedFieldLengthLimit int64 `json:"elasticsearch_indexed_field_length_limit"` + ElasticsearchIndexedFileSizeLimitKB int64 `json:"elasticsearch_indexed_file_size_limit_kb"` ElasticsearchIndexing bool `json:"elasticsearch_indexing"` ElasticsearchLimitIndexing bool `json:"elasticsearch_limit_indexing"` - ElasticsearchMaxBulkConcurrency int `json:"elasticsearch_max_bulk_concurrency"` - ElasticsearchMaxBulkSizeMB int `json:"elasticsearch_max_bulk_size_mb"` - ElasticsearchMaxCodeIndexingConcurrency int `json:"elasticsearch_max_code_indexing_concurrency"` - ElasticsearchNamespaceIDs []int `json:"elasticsearch_namespace_ids"` + ElasticsearchMaxBulkConcurrency int64 `json:"elasticsearch_max_bulk_concurrency"` + ElasticsearchMaxBulkSizeMB int64 `json:"elasticsearch_max_bulk_size_mb"` + ElasticsearchMaxCodeIndexingConcurrency int64 `json:"elasticsearch_max_code_indexing_concurrency"` + ElasticsearchNamespaceIDs []int64 `json:"elasticsearch_namespace_ids"` ElasticsearchPassword string `json:"elasticsearch_password"` ElasticsearchPauseIndexing bool `json:"elasticsearch_pause_indexing"` - ElasticsearchProjectIDs []int `json:"elasticsearch_project_ids"` - ElasticsearchReplicas int `json:"elasticsearch_replicas"` + ElasticsearchProjectIDs []int64 `json:"elasticsearch_project_ids"` + ElasticsearchReplicas int64 `json:"elasticsearch_replicas"` ElasticsearchRequeueWorkers bool `json:"elasticsearch_requeue_workers"` - ElasticsearchRetryOnFailure int `json:"elasticsearch_retry_on_failure"` + ElasticsearchRetryOnFailure int64 `json:"elasticsearch_retry_on_failure"` ElasticsearchSearch bool `json:"elasticsearch_search"` - ElasticsearchShards int `json:"elasticsearch_shards"` + ElasticsearchShards int64 `json:"elasticsearch_shards"` ElasticsearchURL []string `json:"elasticsearch_url"` ElasticsearchUsername string `json:"elasticsearch_username"` - ElasticsearchWorkerNumberOfShards int `json:"elasticsearch_worker_number_of_shards"` + ElasticsearchWorkerNumberOfShards int64 `json:"elasticsearch_worker_number_of_shards"` EmailAdditionalText string `json:"email_additional_text"` EmailAuthorInBody bool `json:"email_author_in_body"` EmailConfirmationSetting string `json:"email_confirmation_setting"` @@ -199,23 +200,23 @@ type Settings struct { ExternalAuthorizationServiceEnabled bool `json:"external_authorization_service_enabled"` ExternalAuthorizationServiceTimeout float64 `json:"external_authorization_service_timeout"` ExternalAuthorizationServiceURL string `json:"external_authorization_service_url"` - ExternalPipelineValidationServiceTimeout int `json:"external_pipeline_validation_service_timeout"` + ExternalPipelineValidationServiceTimeout int64 `json:"external_pipeline_validation_service_timeout"` ExternalPipelineValidationServiceToken string `json:"external_pipeline_validation_service_token"` ExternalPipelineValidationServiceURL string `json:"external_pipeline_validation_service_url"` - FailedLoginAttemptsUnlockPeriodInMinutes int `json:"failed_login_attempts_unlock_period_in_minutes"` - FileTemplateProjectID int `json:"file_template_project_id"` - FirstDayOfWeek int `json:"first_day_of_week"` + FailedLoginAttemptsUnlockPeriodInMinutes int64 `json:"failed_login_attempts_unlock_period_in_minutes"` + FileTemplateProjectID int64 `json:"file_template_project_id"` + FirstDayOfWeek int64 `json:"first_day_of_week"` FlocEnabled bool `json:"floc_enabled"` GeoNodeAllowedIPs string `json:"geo_node_allowed_ips"` - GeoStatusTimeout int `json:"geo_status_timeout"` - GitRateLimitUsersAlertlist []int `json:"git_rate_limit_users_alertlist"` - GitTwoFactorSessionExpiry int `json:"git_two_factor_session_expiry"` - GitalyTimeoutDefault int `json:"gitaly_timeout_default"` - GitalyTimeoutFast int `json:"gitaly_timeout_fast"` - GitalyTimeoutMedium int `json:"gitaly_timeout_medium"` + GeoStatusTimeout int64 `json:"geo_status_timeout"` + GitRateLimitUsersAlertlist []int64 `json:"git_rate_limit_users_alertlist"` + GitTwoFactorSessionExpiry int64 `json:"git_two_factor_session_expiry"` + GitalyTimeoutDefault int64 `json:"gitaly_timeout_default"` + GitalyTimeoutFast int64 `json:"gitaly_timeout_fast"` + GitalyTimeoutMedium int64 `json:"gitaly_timeout_medium"` GitlabDedicatedInstance bool `json:"gitlab_dedicated_instance"` GitlabEnvironmentToolkitInstance bool `json:"gitlab_environment_toolkit_instance"` - GitlabShellOperationLimit int `json:"gitlab_shell_operation_limit"` + GitlabShellOperationLimit int64 `json:"gitlab_shell_operation_limit"` GitpodEnabled bool `json:"gitpod_enabled"` GitpodURL string `json:"gitpod_url"` GitRateLimitUsersAllowlist []string `json:"git_rate_limit_users_allowlist"` @@ -223,11 +224,11 @@ type Settings struct { GrafanaEnabled bool `json:"grafana_enabled"` GrafanaURL string `json:"grafana_url"` GravatarEnabled bool `json:"gravatar_enabled"` - GroupDownloadExportLimit int `json:"group_download_export_limit"` - GroupExportLimit int `json:"group_export_limit"` - GroupImportLimit int `json:"group_import_limit"` + GroupDownloadExportLimit int64 `json:"group_download_export_limit"` + GroupExportLimit int64 `json:"group_export_limit"` + GroupImportLimit int64 `json:"group_import_limit"` GroupOwnersCanManageDefaultBranchProtection bool `json:"group_owners_can_manage_default_branch_protection"` - GroupRunnerTokenExpirationInterval int `json:"group_runner_token_expiration_interval"` + GroupRunnerTokenExpirationInterval int64 `json:"group_runner_token_expiration_interval"` HTMLEmailsEnabled bool `json:"html_emails_enabled"` HashedStorageEnabled bool `json:"hashed_storage_enabled"` HelpPageDocumentationBaseURL string `json:"help_page_documentation_base_url"` @@ -238,15 +239,16 @@ type Settings struct { HideThirdPartyOffers bool `json:"hide_third_party_offers"` HomePageURL string `json:"home_page_url"` HousekeepingEnabled bool `json:"housekeeping_enabled"` - HousekeepingOptimizeRepositoryPeriod int `json:"housekeeping_optimize_repository_period"` + HousekeepingOptimizeRepositoryPeriod int64 `json:"housekeeping_optimize_repository_period"` ImportSources []string `json:"import_sources"` - InactiveProjectsDeleteAfterMonths int `json:"inactive_projects_delete_after_months"` - InactiveProjectsMinSizeMB int `json:"inactive_projects_min_size_mb"` - InactiveProjectsSendWarningEmailAfterMonths int `json:"inactive_projects_send_warning_email_after_months"` + InactiveProjectsDeleteAfterMonths int64 `json:"inactive_projects_delete_after_months"` + InactiveProjectsMinSizeMB int64 `json:"inactive_projects_min_size_mb"` + InactiveProjectsSendWarningEmailAfterMonths int64 `json:"inactive_projects_send_warning_email_after_months"` + InactiveResourceAccessTokensDeleteAfterDays int64 `json:"inactive_resource_access_tokens_delete_after_days"` IncludeOptionalMetricsInServicePing bool `json:"include_optional_metrics_in_service_ping"` InProductMarketingEmailsEnabled bool `json:"in_product_marketing_emails_enabled"` InvisibleCaptchaEnabled bool `json:"invisible_captcha_enabled"` - IssuesCreateLimit int `json:"issues_create_limit"` + IssuesCreateLimit int64 `json:"issues_create_limit"` JiraConnectApplicationKey string `json:"jira_connect_application_key"` JiraConnectPublicKeyStorageEnabled bool `json:"jira_connect_public_key_storage_enabled"` JiraConnectProxyURL string `json:"jira_connect_proxy_url"` @@ -254,7 +256,7 @@ type Settings struct { KrokiEnabled bool `json:"kroki_enabled"` KrokiFormats map[string]bool `json:"kroki_formats"` KrokiURL string `json:"kroki_url"` - LocalMarkdownVersion int `json:"local_markdown_version"` + LocalMarkdownVersion int64 `json:"local_markdown_version"` LockDuoFeaturesEnabled bool `json:"lock_duo_features_enabled"` LockMembershipsToLDAP bool `json:"lock_memberships_to_ldap"` LoginRecaptchaProtectionEnabled bool `json:"login_recaptcha_protection_enabled"` @@ -263,36 +265,36 @@ type Settings struct { MaintenanceMode bool `json:"maintenance_mode"` MaintenanceModeMessage string `json:"maintenance_mode_message"` MavenPackageRequestsForwarding bool `json:"maven_package_requests_forwarding"` - MaxArtifactsSize int `json:"max_artifacts_size"` - MaxAttachmentSize int `json:"max_attachment_size"` - MaxDecompressedArchiveSize int `json:"max_decompressed_archive_size"` - MaxExportSize int `json:"max_export_size"` - MaxImportRemoteFileSize int `json:"max_import_remote_file_size"` - MaxImportSize int `json:"max_import_size"` - MaxLoginAttempts int `json:"max_login_attempts"` - MaxNumberOfRepositoryDownloads int `json:"max_number_of_repository_downloads"` - MaxNumberOfRepositoryDownloadsWithinTimePeriod int `json:"max_number_of_repository_downloads_within_time_period"` - MaxPagesSize int `json:"max_pages_size"` - MaxPersonalAccessTokenLifetime int `json:"max_personal_access_token_lifetime"` - MaxSSHKeyLifetime int `json:"max_ssh_key_lifetime"` - MaxTerraformStateSizeBytes int `json:"max_terraform_state_size_bytes"` - MaxYAMLDepth int `json:"max_yaml_depth"` - MaxYAMLSizeBytes int `json:"max_yaml_size_bytes"` - MetricsMethodCallThreshold int `json:"metrics_method_call_threshold"` - MinimumPasswordLength int `json:"minimum_password_length"` + MaxArtifactsSize int64 `json:"max_artifacts_size"` + MaxAttachmentSize int64 `json:"max_attachment_size"` + MaxDecompressedArchiveSize int64 `json:"max_decompressed_archive_size"` + MaxExportSize int64 `json:"max_export_size"` + MaxImportRemoteFileSize int64 `json:"max_import_remote_file_size"` + MaxImportSize int64 `json:"max_import_size"` + MaxLoginAttempts int64 `json:"max_login_attempts"` + MaxNumberOfRepositoryDownloads int64 `json:"max_number_of_repository_downloads"` + MaxNumberOfRepositoryDownloadsWithinTimePeriod int64 `json:"max_number_of_repository_downloads_within_time_period"` + MaxPagesSize int64 `json:"max_pages_size"` + MaxPersonalAccessTokenLifetime int64 `json:"max_personal_access_token_lifetime"` + MaxSSHKeyLifetime int64 `json:"max_ssh_key_lifetime"` + MaxTerraformStateSizeBytes int64 `json:"max_terraform_state_size_bytes"` + MaxYAMLDepth int64 `json:"max_yaml_depth"` + MaxYAMLSizeBytes int64 `json:"max_yaml_size_bytes"` + MetricsMethodCallThreshold int64 `json:"metrics_method_call_threshold"` + MinimumPasswordLength int64 `json:"minimum_password_length"` MirrorAvailable bool `json:"mirror_available"` - MirrorCapacityThreshold int `json:"mirror_capacity_threshold"` - MirrorMaxCapacity int `json:"mirror_max_capacity"` - MirrorMaxDelay int `json:"mirror_max_delay"` + MirrorCapacityThreshold int64 `json:"mirror_capacity_threshold"` + MirrorMaxCapacity int64 `json:"mirror_max_capacity"` + MirrorMaxDelay int64 `json:"mirror_max_delay"` NPMPackageRequestsForwarding bool `json:"npm_package_requests_forwarding"` - NotesCreateLimit int `json:"notes_create_limit"` + NotesCreateLimit int64 `json:"notes_create_limit"` NotifyOnUnknownSignIn bool `json:"notify_on_unknown_sign_in"` NugetSkipMetadataURLValidation bool `json:"nuget_skip_metadata_url_validation"` OutboundLocalRequestsAllowlistRaw string `json:"outbound_local_requests_allowlist_raw"` OutboundLocalRequestsWhitelist []string `json:"outbound_local_requests_whitelist"` - PackageMetadataPURLTypes []int `json:"package_metadata_purl_types"` + PackageMetadataPURLTypes []int64 `json:"package_metadata_purl_types"` PackageRegistryAllowAnyoneToPullOption bool `json:"package_registry_allow_anyone_to_pull_option"` - PackageRegistryCleanupPoliciesWorkerCapacity int `json:"package_registry_cleanup_policies_worker_capacity"` + PackageRegistryCleanupPoliciesWorkerCapacity int64 `json:"package_registry_cleanup_policies_worker_capacity"` PagesDomainVerificationEnabled bool `json:"pages_domain_verification_enabled"` PasswordAuthenticationEnabledForGit bool `json:"password_authentication_enabled_for_git"` PasswordAuthenticationEnabledForWeb bool `json:"password_authentication_enabled_for_web"` @@ -302,51 +304,51 @@ type Settings struct { PasswordLowercaseRequired bool `json:"password_lowercase_required"` PerformanceBarAllowedGroupPath string `json:"performance_bar_allowed_group_path"` PersonalAccessTokenPrefix string `json:"personal_access_token_prefix"` - PipelineLimitPerProjectUserSha int `json:"pipeline_limit_per_project_user_sha"` + PipelineLimitPerProjectUserSha int64 `json:"pipeline_limit_per_project_user_sha"` PlantumlEnabled bool `json:"plantuml_enabled"` PlantumlURL string `json:"plantuml_url"` PollingIntervalMultiplier float64 `json:"polling_interval_multiplier,string"` PreventMergeRequestsAuthorApproval bool `json:"prevent_merge_request_author_approval"` PreventMergeRequestsCommittersApproval bool `json:"prevent_merge_request_committers_approval"` - ProjectDownloadExportLimit int `json:"project_download_export_limit"` + ProjectDownloadExportLimit int64 `json:"project_download_export_limit"` ProjectExportEnabled bool `json:"project_export_enabled"` - ProjectExportLimit int `json:"project_export_limit"` - ProjectImportLimit int `json:"project_import_limit"` - ProjectJobsAPIRateLimit int `json:"project_jobs_api_rate_limit"` - ProjectRunnerTokenExpirationInterval int `json:"project_runner_token_expiration_interval"` - ProjectsAPIRateLimitUnauthenticated int `json:"projects_api_rate_limit_unauthenticated"` + ProjectExportLimit int64 `json:"project_export_limit"` + ProjectImportLimit int64 `json:"project_import_limit"` + ProjectJobsAPIRateLimit int64 `json:"project_jobs_api_rate_limit"` + ProjectRunnerTokenExpirationInterval int64 `json:"project_runner_token_expiration_interval"` + ProjectsAPIRateLimitUnauthenticated int64 `json:"projects_api_rate_limit_unauthenticated"` PrometheusMetricsEnabled bool `json:"prometheus_metrics_enabled"` ProtectedCIVariables bool `json:"protected_ci_variables"` PseudonymizerEnabled bool `json:"pseudonymizer_enabled"` - PushEventActivitiesLimit int `json:"push_event_activities_limit"` - PushEventHooksLimit int `json:"push_event_hooks_limit"` + PushEventActivitiesLimit int64 `json:"push_event_activities_limit"` + PushEventHooksLimit int64 `json:"push_event_hooks_limit"` PyPIPackageRequestsForwarding bool `json:"pypi_package_requests_forwarding"` - RSAKeyRestriction int `json:"rsa_key_restriction"` + RSAKeyRestriction int64 `json:"rsa_key_restriction"` RateLimitingResponseText string `json:"rate_limiting_response_text"` - RawBlobRequestLimit int `json:"raw_blob_request_limit"` + RawBlobRequestLimit int64 `json:"raw_blob_request_limit"` RecaptchaEnabled bool `json:"recaptcha_enabled"` RecaptchaPrivateKey string `json:"recaptcha_private_key"` RecaptchaSiteKey string `json:"recaptcha_site_key"` - ReceiveMaxInputSize int `json:"receive_max_input_size"` + ReceiveMaxInputSize int64 `json:"receive_max_input_size"` ReceptiveClusterAgentsEnabled bool `json:"receptive_cluster_agents_enabled"` RememberMeEnabled bool `json:"remember_me_enabled"` RepositoryChecksEnabled bool `json:"repository_checks_enabled"` - RepositorySizeLimit int `json:"repository_size_limit"` + RepositorySizeLimit int64 `json:"repository_size_limit"` RepositoryStorages []string `json:"repository_storages"` - RepositoryStoragesWeighted map[string]int `json:"repository_storages_weighted"` + RepositoryStoragesWeighted map[string]int64 `json:"repository_storages_weighted"` RequireAdminApprovalAfterUserSignup bool `json:"require_admin_approval_after_user_signup"` RequireAdminTwoFactorAuthentication bool `json:"require_admin_two_factor_authentication"` RequirePersonalAccessTokenExpiry bool `json:"require_personal_access_token_expiry"` RequireTwoFactorAuthentication bool `json:"require_two_factor_authentication"` RestrictedVisibilityLevels []VisibilityValue `json:"restricted_visibility_levels"` - RunnerTokenExpirationInterval int `json:"runner_token_expiration_interval"` - SearchRateLimit int `json:"search_rate_limit"` - SearchRateLimitUnauthenticated int `json:"search_rate_limit_unauthenticated"` + RunnerTokenExpirationInterval int64 `json:"runner_token_expiration_interval"` + SearchRateLimit int64 `json:"search_rate_limit"` + SearchRateLimitUnauthenticated int64 `json:"search_rate_limit_unauthenticated"` SecretDetectionRevocationTokenTypesURL string `json:"secret_detection_revocation_token_types_url"` SecretDetectionTokenRevocationEnabled bool `json:"secret_detection_token_revocation_enabled"` SecretDetectionTokenRevocationToken string `json:"secret_detection_token_revocation_token"` SecretDetectionTokenRevocationURL string `json:"secret_detection_token_revocation_url"` - SecurityApprovalPoliciesLimit int `json:"security_approval_policies_limit"` + SecurityApprovalPoliciesLimit int64 `json:"security_approval_policies_limit"` SecurityPolicyGlobalGroupApproversEnabled bool `json:"security_policy_global_group_approvers_enabled"` SecurityTXTContent string `json:"security_txt_content"` SendUserConfirmationEmail bool `json:"send_user_confirmation_email"` @@ -355,12 +357,12 @@ type Settings struct { SentryEnabled bool `json:"sentry_enabled"` SentryEnvironment string `json:"sentry_environment"` ServiceAccessTokensExpirationEnforced bool `json:"service_access_tokens_expiration_enforced"` - SessionExpireDelay int `json:"session_expire_delay"` + SessionExpireDelay int64 `json:"session_expire_delay"` SharedRunnersEnabled bool `json:"shared_runners_enabled"` - SharedRunnersMinutes int `json:"shared_runners_minutes"` + SharedRunnersMinutes int64 `json:"shared_runners_minutes"` SharedRunnersText string `json:"shared_runners_text"` - SidekiqJobLimiterCompressionThresholdBytes int `json:"sidekiq_job_limiter_compression_threshold_bytes"` - SidekiqJobLimiterLimitBytes int `json:"sidekiq_job_limiter_limit_bytes"` + SidekiqJobLimiterCompressionThresholdBytes int64 `json:"sidekiq_job_limiter_compression_threshold_bytes"` + SidekiqJobLimiterLimitBytes int64 `json:"sidekiq_job_limiter_limit_bytes"` SidekiqJobLimiterMode string `json:"sidekiq_job_limiter_mode"` SignInText string `json:"sign_in_text"` SignupEnabled bool `json:"signup_enabled"` @@ -371,7 +373,7 @@ type Settings struct { SlackAppSecret string `json:"slack_app_secret"` SlackAppSigningSecret string `json:"slack_app_signing_secret"` SlackAppVerificationToken string `json:"slack_app_verification_token"` - SnippetSizeLimit int `json:"snippet_size_limit"` + SnippetSizeLimit int64 `json:"snippet_size_limit"` SnowplowAppID string `json:"snowplow_app_id"` SnowplowCollectorHostname string `json:"snowplow_collector_hostname"` SnowplowCookieDomain string `json:"snowplow_cookie_domain"` @@ -386,56 +388,56 @@ type Settings struct { StaticObjectsExternalStorageAuthToken string `json:"static_objects_external_storage_auth_token"` StaticObjectsExternalStorageURL string `json:"static_objects_external_storage_url"` SuggestPipelineEnabled bool `json:"suggest_pipeline_enabled"` - TerminalMaxSessionTime int `json:"terminal_max_session_time"` + TerminalMaxSessionTime int64 `json:"terminal_max_session_time"` Terms string `json:"terms"` ThrottleAuthenticatedAPIEnabled bool `json:"throttle_authenticated_api_enabled"` - ThrottleAuthenticatedAPIPeriodInSeconds int `json:"throttle_authenticated_api_period_in_seconds"` - ThrottleAuthenticatedAPIRequestsPerPeriod int `json:"throttle_authenticated_api_requests_per_period"` + ThrottleAuthenticatedAPIPeriodInSeconds int64 `json:"throttle_authenticated_api_period_in_seconds"` + ThrottleAuthenticatedAPIRequestsPerPeriod int64 `json:"throttle_authenticated_api_requests_per_period"` ThrottleAuthenticatedDeprecatedAPIEnabled bool `json:"throttle_authenticated_deprecated_api_enabled"` - ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_authenticated_deprecated_api_period_in_seconds"` - ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_authenticated_deprecated_api_requests_per_period"` + ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds int64 `json:"throttle_authenticated_deprecated_api_period_in_seconds"` + ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod int64 `json:"throttle_authenticated_deprecated_api_requests_per_period"` ThrottleAuthenticatedFilesAPIEnabled bool `json:"throttle_authenticated_files_api_enabled"` - ThrottleAuthenticatedFilesAPIPeriodInSeconds int `json:"throttle_authenticated_files_api_period_in_seconds"` - ThrottleAuthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_authenticated_files_api_requests_per_period"` + ThrottleAuthenticatedFilesAPIPeriodInSeconds int64 `json:"throttle_authenticated_files_api_period_in_seconds"` + ThrottleAuthenticatedFilesAPIRequestsPerPeriod int64 `json:"throttle_authenticated_files_api_requests_per_period"` ThrottleAuthenticatedGitLFSEnabled bool `json:"throttle_authenticated_git_lfs_enabled"` - ThrottleAuthenticatedGitLFSPeriodInSeconds int `json:"throttle_authenticated_git_lfs_period_in_seconds"` - ThrottleAuthenticatedGitLFSRequestsPerPeriod int `json:"throttle_authenticated_git_lfs_requests_per_period"` + ThrottleAuthenticatedGitLFSPeriodInSeconds int64 `json:"throttle_authenticated_git_lfs_period_in_seconds"` + ThrottleAuthenticatedGitLFSRequestsPerPeriod int64 `json:"throttle_authenticated_git_lfs_requests_per_period"` ThrottleAuthenticatedPackagesAPIEnabled bool `json:"throttle_authenticated_packages_api_enabled"` - ThrottleAuthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_authenticated_packages_api_period_in_seconds"` - ThrottleAuthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_authenticated_packages_api_requests_per_period"` + ThrottleAuthenticatedPackagesAPIPeriodInSeconds int64 `json:"throttle_authenticated_packages_api_period_in_seconds"` + ThrottleAuthenticatedPackagesAPIRequestsPerPeriod int64 `json:"throttle_authenticated_packages_api_requests_per_period"` ThrottleAuthenticatedWebEnabled bool `json:"throttle_authenticated_web_enabled"` - ThrottleAuthenticatedWebPeriodInSeconds int `json:"throttle_authenticated_web_period_in_seconds"` - ThrottleAuthenticatedWebRequestsPerPeriod int `json:"throttle_authenticated_web_requests_per_period"` + ThrottleAuthenticatedWebPeriodInSeconds int64 `json:"throttle_authenticated_web_period_in_seconds"` + ThrottleAuthenticatedWebRequestsPerPeriod int64 `json:"throttle_authenticated_web_requests_per_period"` ThrottleIncidentManagementNotificationEnabled bool `json:"throttle_incident_management_notification_enabled"` - ThrottleIncidentManagementNotificationPerPeriod int `json:"throttle_incident_management_notification_per_period"` - ThrottleIncidentManagementNotificationPeriodInSeconds int `json:"throttle_incident_management_notification_period_in_seconds"` + ThrottleIncidentManagementNotificationPerPeriod int64 `json:"throttle_incident_management_notification_per_period"` + ThrottleIncidentManagementNotificationPeriodInSeconds int64 `json:"throttle_incident_management_notification_period_in_seconds"` ThrottleProtectedPathsEnabled bool `json:"throttle_protected_paths_enabled"` - ThrottleProtectedPathsPeriodInSeconds int `json:"throttle_protected_paths_period_in_seconds"` - ThrottleProtectedPathsRequestsPerPeriod int `json:"throttle_protected_paths_requests_per_period"` + ThrottleProtectedPathsPeriodInSeconds int64 `json:"throttle_protected_paths_period_in_seconds"` + ThrottleProtectedPathsRequestsPerPeriod int64 `json:"throttle_protected_paths_requests_per_period"` ThrottleUnauthenticatedAPIEnabled bool `json:"throttle_unauthenticated_api_enabled"` - ThrottleUnauthenticatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_api_period_in_seconds"` - ThrottleUnauthenticatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_api_requests_per_period"` + ThrottleUnauthenticatedAPIPeriodInSeconds int64 `json:"throttle_unauthenticated_api_period_in_seconds"` + ThrottleUnauthenticatedAPIRequestsPerPeriod int64 `json:"throttle_unauthenticated_api_requests_per_period"` ThrottleUnauthenticatedDeprecatedAPIEnabled bool `json:"throttle_unauthenticated_deprecated_api_enabled"` - ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_deprecated_api_period_in_seconds"` - ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_deprecated_api_requests_per_period"` + ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds int64 `json:"throttle_unauthenticated_deprecated_api_period_in_seconds"` + ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod int64 `json:"throttle_unauthenticated_deprecated_api_requests_per_period"` ThrottleUnauthenticatedFilesAPIEnabled bool `json:"throttle_unauthenticated_files_api_enabled"` - ThrottleUnauthenticatedFilesAPIPeriodInSeconds int `json:"throttle_unauthenticated_files_api_period_in_seconds"` - ThrottleUnauthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_files_api_requests_per_period"` + ThrottleUnauthenticatedFilesAPIPeriodInSeconds int64 `json:"throttle_unauthenticated_files_api_period_in_seconds"` + ThrottleUnauthenticatedFilesAPIRequestsPerPeriod int64 `json:"throttle_unauthenticated_files_api_requests_per_period"` ThrottleUnauthenticatedGitLFSEnabled bool `json:"throttle_unauthenticated_git_lfs_enabled"` - ThrottleUnauthenticatedGitLFSPeriodInSeconds int `json:"throttle_unauthenticated_git_lfs_period_in_seconds"` - ThrottleUnauthenticatedGitLFSRequestsPerPeriod int `json:"throttle_unauthenticated_git_lfs_requests_per_period"` + ThrottleUnauthenticatedGitLFSPeriodInSeconds int64 `json:"throttle_unauthenticated_git_lfs_period_in_seconds"` + ThrottleUnauthenticatedGitLFSRequestsPerPeriod int64 `json:"throttle_unauthenticated_git_lfs_requests_per_period"` ThrottleUnauthenticatedPackagesAPIEnabled bool `json:"throttle_unauthenticated_packages_api_enabled"` - ThrottleUnauthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_unauthenticated_packages_api_period_in_seconds"` - ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_packages_api_requests_per_period"` + ThrottleUnauthenticatedPackagesAPIPeriodInSeconds int64 `json:"throttle_unauthenticated_packages_api_period_in_seconds"` + ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod int64 `json:"throttle_unauthenticated_packages_api_requests_per_period"` ThrottleUnauthenticatedWebEnabled bool `json:"throttle_unauthenticated_web_enabled"` - ThrottleUnauthenticatedWebPeriodInSeconds int `json:"throttle_unauthenticated_web_period_in_seconds"` - ThrottleUnauthenticatedWebRequestsPerPeriod int `json:"throttle_unauthenticated_web_requests_per_period"` + ThrottleUnauthenticatedWebPeriodInSeconds int64 `json:"throttle_unauthenticated_web_period_in_seconds"` + ThrottleUnauthenticatedWebRequestsPerPeriod int64 `json:"throttle_unauthenticated_web_requests_per_period"` TimeTrackingLimitToHours bool `json:"time_tracking_limit_to_hours"` - TwoFactorGracePeriod int `json:"two_factor_grace_period"` - UnconfirmedUsersDeleteAfterDays int `json:"unconfirmed_users_delete_after_days"` + TwoFactorGracePeriod int64 `json:"two_factor_grace_period"` + UnconfirmedUsersDeleteAfterDays int64 `json:"unconfirmed_users_delete_after_days"` UniqueIPsLimitEnabled bool `json:"unique_ips_limit_enabled"` - UniqueIPsLimitPerUser int `json:"unique_ips_limit_per_user"` - UniqueIPsLimitTimeWindow int `json:"unique_ips_limit_time_window"` + UniqueIPsLimitPerUser int64 `json:"unique_ips_limit_per_user"` + UniqueIPsLimitTimeWindow int64 `json:"unique_ips_limit_time_window"` UpdateRunnerVersionsEnabled bool `json:"update_runner_versions_enabled"` UpdatedAt *time.Time `json:"updated_at"` UpdatingNameDisabledForUsers bool `json:"updating_name_disabled_for_users"` @@ -448,26 +450,27 @@ type Settings struct { UserDefaultsToPrivateProfile bool `json:"user_defaults_to_private_profile"` UserOauthApplications bool `json:"user_oauth_applications"` UserShowAddSSHKeyMessage bool `json:"user_show_add_ssh_key_message"` - UsersGetByIDLimit int `json:"users_get_by_id_limit"` + UsersGetByIDLimit int64 `json:"users_get_by_id_limit"` UsersGetByIDLimitAllowlistRaw string `json:"users_get_by_id_limit_allowlist_raw"` ValidRunnerRegistrars []string `json:"valid_runner_registrars"` VersionCheckEnabled bool `json:"version_check_enabled"` WebIDEClientsidePreviewEnabled bool `json:"web_ide_clientside_preview_enabled"` WhatsNewVariant string `json:"whats_new_variant"` - WikiPageMaxContentBytes int `json:"wiki_page_max_content_bytes"` + WikiPageMaxContentBytes int64 `json:"wiki_page_max_content_bytes"` + LockMembershipsToSAML bool `json:"lock_memberships_to_saml"` // Deprecated: Use DefaultBranchProtectionDefaults instead. - DefaultBranchProtection int `json:"default_branch_protection"` + DefaultBranchProtection int64 `json:"default_branch_protection"` // Deprecated: Cannot be set through the API, always true HousekeepingBitmapsEnabled bool `json:"housekeeping_bitmaps_enabled"` // Deprecated: use HousekeepingOptimizeRepositoryPeriod instead - HousekeepingFullRepackPeriod int `json:"housekeeping_full_repack_period"` + HousekeepingFullRepackPeriod int64 `json:"housekeeping_full_repack_period"` // Deprecated: use HousekeepingOptimizeRepositoryPeriod instead - HousekeepingGcPeriod int `json:"housekeeping_gc_period"` + HousekeepingGcPeriod int64 `json:"housekeeping_gc_period"` // Deprecated: use HousekeepingOptimizeRepositoryPeriod instead - HousekeepingIncrementalRepackPeriod int `json:"housekeeping_incremental_repack_period"` + HousekeepingIncrementalRepackPeriod int64 `json:"housekeeping_incremental_repack_period"` // Deprecated: use PerformanceBarAllowedGroupPath instead - PerformanceBarAllowedGroupID int `json:"performance_bar_allowed_group_id"` + PerformanceBarAllowedGroupID int64 `json:"performance_bar_allowed_group_id"` // Deprecated: use PerformanceBarAllowedGroupPath: nil instead PerformanceBarEnabled bool `json:"performance_bar_enabled"` // Deprecated: Use AbuseNotificationEmail instead. @@ -479,11 +482,11 @@ type Settings struct { // Deprecated: Use ThrottleUnauthenticatedWebEnabled or ThrottleUnauthenticatedAPIEnabled instead. (Deprecated in GitLab 14.3) ThrottleUnauthenticatedEnabled bool `json:"throttle_unauthenticated_enabled"` // Deprecated: Use ThrottleUnauthenticatedWebPeriodInSeconds or ThrottleUnauthenticatedAPIPeriodInSeconds instead. (Deprecated in GitLab 14.3) - ThrottleUnauthenticatedPeriodInSeconds int `json:"throttle_unauthenticated_period_in_seconds"` + ThrottleUnauthenticatedPeriodInSeconds int64 `json:"throttle_unauthenticated_period_in_seconds"` // Deprecated: Use ThrottleUnauthenticatedWebRequestsPerPeriod or ThrottleUnauthenticatedAPIRequestsPerPeriod instead. (Deprecated in GitLab 14.3) - ThrottleUnauthenticatedRequestsPerPeriod int `json:"throttle_unauthenticated_requests_per_period"` + ThrottleUnauthenticatedRequestsPerPeriod int64 `json:"throttle_unauthenticated_requests_per_period"` // Deprecated: Replaced by SearchRateLimit in GitLab 14.9 (removed in 15.0). - UserEmailLookupLimit int `json:"user_email_lookup_limit"` + UserEmailLookupLimit int64 `json:"user_email_lookup_limit"` } // UnmarshalJSON implements the json.Unmarshaler interface. @@ -521,18 +524,10 @@ func (s Settings) String() string { // GitLab API docs: // https://docs.gitlab.com/api/settings/#get-details-on-current-application-settings func (s *SettingsService) GetSettings(options ...RequestOptionFunc) (*Settings, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "application/settings", nil, options) - if err != nil { - return nil, nil, err - } - - as := new(Settings) - resp, err := s.client.Do(req, as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil + return do[*Settings](s.client, + withPath("application/settings"), + withRequestOpts(options...), + ) } // UpdateSettingsOptions represents the available UpdateSettings() options. @@ -554,73 +549,74 @@ type UpdateSettingsOptions struct { AllowLocalRequestsFromWebHooksAndServices *bool `url:"allow_local_requests_from_web_hooks_and_services,omitempty" json:"allow_local_requests_from_web_hooks_and_services,omitempty"` AllowProjectCreationForGuestAndBelow *bool `url:"allow_project_creation_for_guest_and_below,omitempty" json:"allow_project_creation_for_guest_and_below,omitempty"` AllowRunnerRegistrationToken *bool `url:"allow_runner_registration_token,omitempty" json:"allow_runner_registration_token,omitempty"` + AnonymousSearchesAllowed *bool `url:"anonymous_searches_allowed,omitempty" json:"anonymous_searches_allowed,omitempty"` ArchiveBuildsInHumanReadable *string `url:"archive_builds_in_human_readable,omitempty" json:"archive_builds_in_human_readable,omitempty"` - ASCIIDocMaxIncludes *int `url:"asciidoc_max_includes,omitempty" json:"asciidoc_max_includes,omitempty"` + ASCIIDocMaxIncludes *int64 `url:"asciidoc_max_includes,omitempty" json:"asciidoc_max_includes,omitempty"` AssetProxyAllowlist *[]string `url:"asset_proxy_allowlist,omitempty" json:"asset_proxy_allowlist,omitempty"` AssetProxyEnabled *bool `url:"asset_proxy_enabled,omitempty" json:"asset_proxy_enabled,omitempty"` AssetProxySecretKey *string `url:"asset_proxy_secret_key,omitempty" json:"asset_proxy_secret_key,omitempty"` AssetProxyURL *string `url:"asset_proxy_url,omitempty" json:"asset_proxy_url,omitempty"` AuthorizedKeysEnabled *bool `url:"authorized_keys_enabled,omitempty" json:"authorized_keys_enabled,omitempty"` AutoBanUserOnExcessiveProjectsDownload *bool `url:"auto_ban_user_on_excessive_projects_download,omitempty" json:"auto_ban_user_on_excessive_projects_download,omitempty"` - AutocompleteUsers *int `url:"autocomplete_users,omitempty" json:"autocomplete_users,omitempty"` - AutocompleteUsersUnauthenticated *int `url:"autocomplete_users_unauthenticated,omitempty" json:"autocomplete_users_unauthenticated,omitempty"` + AutocompleteUsers *int64 `url:"autocomplete_users,omitempty" json:"autocomplete_users,omitempty"` + AutocompleteUsersUnauthenticated *int64 `url:"autocomplete_users_unauthenticated,omitempty" json:"autocomplete_users_unauthenticated,omitempty"` AutoDevOpsDomain *string `url:"auto_devops_domain,omitempty" json:"auto_devops_domain,omitempty"` AutoDevOpsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` AutomaticPurchasedStorageAllocation *bool `url:"automatic_purchased_storage_allocation,omitempty" json:"automatic_purchased_storage_allocation,omitempty"` - BulkImportConcurrentPipelineBatchLimit *int `url:"bulk_import_concurrent_pipeline_batch_limit,omitempty" json:"bulk_import_concurrent_pipeline_batch_limit,omitempty"` + BulkImportConcurrentPipelineBatchLimit *int64 `url:"bulk_import_concurrent_pipeline_batch_limit,omitempty" json:"bulk_import_concurrent_pipeline_batch_limit,omitempty"` BulkImportEnabled *bool `url:"bulk_import_enabled,omitempty" json:"bulk_import_enabled,omitempty"` - BulkImportMaxDownloadFileSize *int `url:"bulk_import_max_download_file_size,omitempty" json:"bulk_import_max_download_file_size,omitempty"` + BulkImportMaxDownloadFileSize *int64 `url:"bulk_import_max_download_file_size,omitempty" json:"bulk_import_max_download_file_size,omitempty"` CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` CanCreateOrganization *bool `url:"can_create_organization,omitempty" json:"can_create_organization,omitempty"` CheckNamespacePlan *bool `url:"check_namespace_plan,omitempty" json:"check_namespace_plan,omitempty"` CIJobLiveTraceEnabled *bool `url:"ci_job_live_trace_enabled,omitempty" json:"ci_job_live_trace_enabled,omitempty"` - CIMaxIncludes *int `url:"ci_max_includes,omitempty" json:"ci_max_includes,omitempty"` - CIMaxTotalYAMLSizeBytes *int `url:"ci_max_total_yaml_size_bytes,omitempty" json:"ci_max_total_yaml_size_bytes,omitempty"` - CIPartitionsSizeLimit *int `url:"ci_partitions_size_limit,omitempty" json:"ci_partitions_size_limit,omitempty"` + CIMaxIncludes *int64 `url:"ci_max_includes,omitempty" json:"ci_max_includes,omitempty"` + CIMaxTotalYAMLSizeBytes *int64 `url:"ci_max_total_yaml_size_bytes,omitempty" json:"ci_max_total_yaml_size_bytes,omitempty"` + CIPartitionsSizeLimit *int64 `url:"ci_partitions_size_limit,omitempty" json:"ci_partitions_size_limit,omitempty"` CommitEmailHostname *string `url:"commit_email_hostname,omitempty" json:"commit_email_hostname,omitempty"` - ConcurrentBitbucketImportJobsLimit *int `url:"concurrent_bitbucket_import_jobs_limit,omitempty" json:"concurrent_bitbucket_import_jobs_limit,omitempty"` - ConcurrentBitbucketServerImportJobsLimit *int `url:"concurrent_bitbucket_server_import_jobs_limit,omitempty" json:"concurrent_bitbucket_server_import_jobs_limit,omitempty"` - ConcurrentGitHubImportJobsLimit *int `url:"concurrent_github_import_jobs_limit,omitempty" json:"concurrent_github_import_jobs_limit,omitempty"` + ConcurrentBitbucketImportJobsLimit *int64 `url:"concurrent_bitbucket_import_jobs_limit,omitempty" json:"concurrent_bitbucket_import_jobs_limit,omitempty"` + ConcurrentBitbucketServerImportJobsLimit *int64 `url:"concurrent_bitbucket_server_import_jobs_limit,omitempty" json:"concurrent_bitbucket_server_import_jobs_limit,omitempty"` + ConcurrentGitHubImportJobsLimit *int64 `url:"concurrent_github_import_jobs_limit,omitempty" json:"concurrent_github_import_jobs_limit,omitempty"` ContainerExpirationPoliciesEnableHistoricEntries *bool `url:"container_expiration_policies_enable_historic_entries,omitempty" json:"container_expiration_policies_enable_historic_entries,omitempty"` - ContainerRegistryCleanupTagsServiceMaxListSize *int `url:"container_registry_cleanup_tags_service_max_list_size,omitempty" json:"container_registry_cleanup_tags_service_max_list_size,omitempty"` - ContainerRegistryDeleteTagsServiceTimeout *int `url:"container_registry_delete_tags_service_timeout,omitempty" json:"container_registry_delete_tags_service_timeout,omitempty"` + ContainerRegistryCleanupTagsServiceMaxListSize *int64 `url:"container_registry_cleanup_tags_service_max_list_size,omitempty" json:"container_registry_cleanup_tags_service_max_list_size,omitempty"` + ContainerRegistryDeleteTagsServiceTimeout *int64 `url:"container_registry_delete_tags_service_timeout,omitempty" json:"container_registry_delete_tags_service_timeout,omitempty"` ContainerRegistryExpirationPoliciesCaching *bool `url:"container_registry_expiration_policies_caching,omitempty" json:"container_registry_expiration_policies_caching,omitempty"` - ContainerRegistryExpirationPoliciesWorkerCapacity *int `url:"container_registry_expiration_policies_worker_capacity,omitempty" json:"container_registry_expiration_policies_worker_capacity,omitempty"` + ContainerRegistryExpirationPoliciesWorkerCapacity *int64 `url:"container_registry_expiration_policies_worker_capacity,omitempty" json:"container_registry_expiration_policies_worker_capacity,omitempty"` ContainerRegistryImportCreatedBefore *time.Time `url:"container_registry_import_created_before,omitempty" json:"container_registry_import_created_before,omitempty"` - ContainerRegistryImportMaxRetries *int `url:"container_registry_import_max_retries,omitempty" json:"container_registry_import_max_retries,omitempty"` - ContainerRegistryImportMaxStepDuration *int `url:"container_registry_import_max_step_duration,omitempty" json:"container_registry_import_max_step_duration,omitempty"` - ContainerRegistryImportMaxTagsCount *int `url:"container_registry_import_max_tags_count,omitempty" json:"container_registry_import_max_tags_count,omitempty"` - ContainerRegistryImportStartMaxRetries *int `url:"container_registry_import_start_max_retries,omitempty" json:"container_registry_import_start_max_retries,omitempty"` + ContainerRegistryImportMaxRetries *int64 `url:"container_registry_import_max_retries,omitempty" json:"container_registry_import_max_retries,omitempty"` + ContainerRegistryImportMaxStepDuration *int64 `url:"container_registry_import_max_step_duration,omitempty" json:"container_registry_import_max_step_duration,omitempty"` + ContainerRegistryImportMaxTagsCount *int64 `url:"container_registry_import_max_tags_count,omitempty" json:"container_registry_import_max_tags_count,omitempty"` + ContainerRegistryImportStartMaxRetries *int64 `url:"container_registry_import_start_max_retries,omitempty" json:"container_registry_import_start_max_retries,omitempty"` ContainerRegistryImportTargetPlan *string `url:"container_registry_import_target_plan,omitempty" json:"container_registry_import_target_plan,omitempty"` - ContainerRegistryTokenExpireDelay *int `url:"container_registry_token_expire_delay,omitempty" json:"container_registry_token_expire_delay,omitempty"` + ContainerRegistryTokenExpireDelay *int64 `url:"container_registry_token_expire_delay,omitempty" json:"container_registry_token_expire_delay,omitempty"` CustomHTTPCloneURLRoot *string `url:"custom_http_clone_url_root,omitempty" json:"custom_http_clone_url_root,omitempty"` DNSRebindingProtectionEnabled *bool `url:"dns_rebinding_protection_enabled,omitempty" json:"dns_rebinding_protection_enabled,omitempty"` - DSAKeyRestriction *int `url:"dsa_key_restriction,omitempty" json:"dsa_key_restriction,omitempty"` + DSAKeyRestriction *int64 `url:"dsa_key_restriction,omitempty" json:"dsa_key_restriction,omitempty"` DeactivateDormantUsers *bool `url:"deactivate_dormant_users,omitempty" json:"deactivate_dormant_users,omitempty"` - DeactivateDormantUsersPeriod *int `url:"deactivate_dormant_users_period,omitempty" json:"deactivate_dormant_users_period,omitempty"` - DecompressArchiveFileTimeout *int `url:"decompress_archive_file_timeout,omitempty" json:"decompress_archive_file_timeout,omitempty"` + DeactivateDormantUsersPeriod *int64 `url:"deactivate_dormant_users_period,omitempty" json:"deactivate_dormant_users_period,omitempty"` + DecompressArchiveFileTimeout *int64 `url:"decompress_archive_file_timeout,omitempty" json:"decompress_archive_file_timeout,omitempty"` DefaultArtifactsExpireIn *string `url:"default_artifacts_expire_in,omitempty" json:"default_artifacts_expire_in,omitempty"` DefaultBranchName *string `url:"default_branch_name,omitempty" json:"default_branch_name,omitempty"` DefaultBranchProtectionDefaults *DefaultBranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` DefaultCiConfigPath *string `url:"default_ci_config_path,omitempty" json:"default_ci_config_path,omitempty"` DefaultGroupVisibility *VisibilityValue `url:"default_group_visibility,omitempty" json:"default_group_visibility,omitempty"` DefaultPreferredLanguage *string `url:"default_preferred_language,omitempty" json:"default_preferred_language,omitempty"` - DefaultProjectCreation *int `url:"default_project_creation,omitempty" json:"default_project_creation,omitempty"` + DefaultProjectCreation *int64 `url:"default_project_creation,omitempty" json:"default_project_creation,omitempty"` DefaultProjectDeletionProtection *bool `url:"default_project_deletion_protection,omitempty" json:"default_project_deletion_protection,omitempty"` DefaultProjectVisibility *VisibilityValue `url:"default_project_visibility,omitempty" json:"default_project_visibility,omitempty"` - DefaultProjectsLimit *int `url:"default_projects_limit,omitempty" json:"default_projects_limit,omitempty"` + DefaultProjectsLimit *int64 `url:"default_projects_limit,omitempty" json:"default_projects_limit,omitempty"` DefaultSnippetVisibility *VisibilityValue `url:"default_snippet_visibility,omitempty" json:"default_snippet_visibility,omitempty"` - DefaultSyntaxHighlightingTheme *int `url:"default_syntax_highlighting_theme,omitempty" json:"default_syntax_highlighting_theme,omitempty"` + DefaultSyntaxHighlightingTheme *int64 `url:"default_syntax_highlighting_theme,omitempty" json:"default_syntax_highlighting_theme,omitempty"` DelayedGroupDeletion *bool `url:"delayed_group_deletion,omitempty" json:"delayed_group_deletion,omitempty"` DelayedProjectDeletion *bool `url:"delayed_project_deletion,omitempty" json:"delayed_project_deletion,omitempty"` DeleteInactiveProjects *bool `url:"delete_inactive_projects,omitempty" json:"delete_inactive_projects,omitempty"` DeleteUnconfirmedUsers *bool `url:"delete_unconfirmed_users,omitempty" json:"delete_unconfirmed_users,omitempty"` - DeletionAdjournedPeriod *int `url:"deletion_adjourned_period,omitempty" json:"deletion_adjourned_period,omitempty"` + DeletionAdjournedPeriod *int64 `url:"deletion_adjourned_period,omitempty" json:"deletion_adjourned_period,omitempty"` DiagramsnetEnabled *bool `url:"diagramsnet_enabled,omitempty" json:"diagramsnet_enabled,omitempty"` DiagramsnetURL *string `url:"diagramsnet_url,omitempty" json:"diagramsnet_url,omitempty"` - DiffMaxFiles *int `url:"diff_max_files,omitempty" json:"diff_max_files,omitempty"` - DiffMaxLines *int `url:"diff_max_lines,omitempty" json:"diff_max_lines,omitempty"` - DiffMaxPatchBytes *int `url:"diff_max_patch_bytes,omitempty" json:"diff_max_patch_bytes,omitempty"` + DiffMaxFiles *int64 `url:"diff_max_files,omitempty" json:"diff_max_files,omitempty"` + DiffMaxLines *int64 `url:"diff_max_lines,omitempty" json:"diff_max_lines,omitempty"` + DiffMaxPatchBytes *int64 `url:"diff_max_patch_bytes,omitempty" json:"diff_max_patch_bytes,omitempty"` DisableFeedToken *bool `url:"disable_feed_token,omitempty" json:"disable_feed_token,omitempty"` DisableAdminOAuthScopes *bool `url:"disable_admin_oauth_scopes,omitempty" json:"disable_admin_oauth_scopes,omitempty"` DisableOverridingApproversPerMergeRequest *bool `url:"disable_overriding_approvers_per_merge_request,omitempty" json:"disable_overriding_approvers_per_merge_request,omitempty"` @@ -629,44 +625,44 @@ type UpdateSettingsOptions struct { DomainAllowlist *[]string `url:"domain_allowlist,omitempty" json:"domain_allowlist,omitempty"` DomainDenylist *[]string `url:"domain_denylist,omitempty" json:"domain_denylist,omitempty"` DomainDenylistEnabled *bool `url:"domain_denylist_enabled,omitempty" json:"domain_denylist_enabled,omitempty"` - DownstreamPipelineTriggerLimitPerProjectUserSHA *int `url:"downstream_pipeline_trigger_limit_per_project_user_sha,omitempty" json:"downstream_pipeline_trigger_limit_per_project_user_sha,omitempty"` + DownstreamPipelineTriggerLimitPerProjectUserSHA *int64 `url:"downstream_pipeline_trigger_limit_per_project_user_sha,omitempty" json:"downstream_pipeline_trigger_limit_per_project_user_sha,omitempty"` DuoFeaturesEnabled *bool `url:"duo_features_enabled,omitempty" json:"duo_features_enabled,omitempty"` - ECDSAKeyRestriction *int `url:"ecdsa_key_restriction,omitempty" json:"ecdsa_key_restriction,omitempty"` - ECDSASKKeyRestriction *int `url:"ecdsa_sk_key_restriction,omitempty" json:"ecdsa_sk_key_restriction,omitempty"` + ECDSAKeyRestriction *int64 `url:"ecdsa_key_restriction,omitempty" json:"ecdsa_key_restriction,omitempty"` + ECDSASKKeyRestriction *int64 `url:"ecdsa_sk_key_restriction,omitempty" json:"ecdsa_sk_key_restriction,omitempty"` EKSAccessKeyID *string `url:"eks_access_key_id,omitempty" json:"eks_access_key_id,omitempty"` EKSAccountID *string `url:"eks_account_id,omitempty" json:"eks_account_id,omitempty"` EKSIntegrationEnabled *bool `url:"eks_integration_enabled,omitempty" json:"eks_integration_enabled,omitempty"` EKSSecretAccessKey *string `url:"eks_secret_access_key,omitempty" json:"eks_secret_access_key,omitempty"` - Ed25519KeyRestriction *int `url:"ed25519_key_restriction,omitempty" json:"ed25519_key_restriction,omitempty"` - Ed25519SKKeyRestriction *int `url:"ed25519_sk_key_restriction,omitempty" json:"ed25519_sk_key_restriction,omitempty"` + Ed25519KeyRestriction *int64 `url:"ed25519_key_restriction,omitempty" json:"ed25519_key_restriction,omitempty"` + Ed25519SKKeyRestriction *int64 `url:"ed25519_sk_key_restriction,omitempty" json:"ed25519_sk_key_restriction,omitempty"` ElasticsearchAWS *bool `url:"elasticsearch_aws,omitempty" json:"elasticsearch_aws,omitempty"` ElasticsearchAWSAccessKey *string `url:"elasticsearch_aws_access_key,omitempty" json:"elasticsearch_aws_access_key,omitempty"` ElasticsearchAWSRegion *string `url:"elasticsearch_aws_region,omitempty" json:"elasticsearch_aws_region,omitempty"` ElasticsearchAWSSecretAccessKey *string `url:"elasticsearch_aws_secret_access_key,omitempty" json:"elasticsearch_aws_secret_access_key,omitempty"` ElasticsearchAnalyzersKuromojiEnabled *bool `url:"elasticsearch_analyzers_kuromoji_enabled,omitempty" json:"elasticsearch_analyzers_kuromoji_enabled,omitempty"` - ElasticsearchAnalyzersKuromojiSearch *int `url:"elasticsearch_analyzers_kuromoji_search,omitempty" json:"elasticsearch_analyzers_kuromoji_search,omitempty"` + ElasticsearchAnalyzersKuromojiSearch *int64 `url:"elasticsearch_analyzers_kuromoji_search,omitempty" json:"elasticsearch_analyzers_kuromoji_search,omitempty"` ElasticsearchAnalyzersSmartCNEnabled *bool `url:"elasticsearch_analyzers_smartcn_enabled,omitempty" json:"elasticsearch_analyzers_smartcn_enabled,omitempty"` - ElasticsearchAnalyzersSmartCNSearch *int `url:"elasticsearch_analyzers_smartcn_search,omitempty" json:"elasticsearch_analyzers_smartcn_search,omitempty"` - ElasticsearchClientRequestTimeout *int `url:"elasticsearch_client_request_timeout,omitempty" json:"elasticsearch_client_request_timeout,omitempty"` - ElasticsearchIndexedFieldLengthLimit *int `url:"elasticsearch_indexed_field_length_limit,omitempty" json:"elasticsearch_indexed_field_length_limit,omitempty"` - ElasticsearchIndexedFileSizeLimitKB *int `url:"elasticsearch_indexed_file_size_limit_kb,omitempty" json:"elasticsearch_indexed_file_size_limit_kb,omitempty"` + ElasticsearchAnalyzersSmartCNSearch *int64 `url:"elasticsearch_analyzers_smartcn_search,omitempty" json:"elasticsearch_analyzers_smartcn_search,omitempty"` + ElasticsearchClientRequestTimeout *int64 `url:"elasticsearch_client_request_timeout,omitempty" json:"elasticsearch_client_request_timeout,omitempty"` + ElasticsearchIndexedFieldLengthLimit *int64 `url:"elasticsearch_indexed_field_length_limit,omitempty" json:"elasticsearch_indexed_field_length_limit,omitempty"` + ElasticsearchIndexedFileSizeLimitKB *int64 `url:"elasticsearch_indexed_file_size_limit_kb,omitempty" json:"elasticsearch_indexed_file_size_limit_kb,omitempty"` ElasticsearchIndexing *bool `url:"elasticsearch_indexing,omitempty" json:"elasticsearch_indexing,omitempty"` ElasticsearchLimitIndexing *bool `url:"elasticsearch_limit_indexing,omitempty" json:"elasticsearch_limit_indexing,omitempty"` - ElasticsearchMaxBulkConcurrency *int `url:"elasticsearch_max_bulk_concurrency,omitempty" json:"elasticsearch_max_bulk_concurrency,omitempty"` - ElasticsearchMaxBulkSizeMB *int `url:"elasticsearch_max_bulk_size_mb,omitempty" json:"elasticsearch_max_bulk_size_mb,omitempty"` - ElasticsearchMaxCodeIndexingConcurrency *int `url:"elasticsearch_max_code_indexing_concurrency,omitempty" json:"elasticsearch_max_code_indexing_concurrency,omitempty"` - ElasticsearchNamespaceIDs *[]int `url:"elasticsearch_namespace_ids,omitempty" json:"elasticsearch_namespace_ids,omitempty"` + ElasticsearchMaxBulkConcurrency *int64 `url:"elasticsearch_max_bulk_concurrency,omitempty" json:"elasticsearch_max_bulk_concurrency,omitempty"` + ElasticsearchMaxBulkSizeMB *int64 `url:"elasticsearch_max_bulk_size_mb,omitempty" json:"elasticsearch_max_bulk_size_mb,omitempty"` + ElasticsearchMaxCodeIndexingConcurrency *int64 `url:"elasticsearch_max_code_indexing_concurrency,omitempty" json:"elasticsearch_max_code_indexing_concurrency,omitempty"` + ElasticsearchNamespaceIDs *[]int64 `url:"elasticsearch_namespace_ids,omitempty" json:"elasticsearch_namespace_ids,omitempty"` ElasticsearchPassword *string `url:"elasticsearch_password,omitempty" json:"elasticsearch_password,omitempty"` ElasticsearchPauseIndexing *bool `url:"elasticsearch_pause_indexing,omitempty" json:"elasticsearch_pause_indexing,omitempty"` - ElasticsearchProjectIDs *[]int `url:"elasticsearch_project_ids,omitempty" json:"elasticsearch_project_ids,omitempty"` - ElasticsearchReplicas *int `url:"elasticsearch_replicas,omitempty" json:"elasticsearch_replicas,omitempty"` + ElasticsearchProjectIDs *[]int64 `url:"elasticsearch_project_ids,omitempty" json:"elasticsearch_project_ids,omitempty"` + ElasticsearchReplicas *int64 `url:"elasticsearch_replicas,omitempty" json:"elasticsearch_replicas,omitempty"` ElasticsearchRequeueWorkers *bool `url:"elasticsearch_requeue_workers,omitempty" json:"elasticsearch_requeue_workers,omitempty"` - ElasticsearchRetryOnFailure *int `url:"elasticsearch_retry_on_failure,omitempty" json:"elasticsearch_retry_on_failure,omitempty"` + ElasticsearchRetryOnFailure *int64 `url:"elasticsearch_retry_on_failure,omitempty" json:"elasticsearch_retry_on_failure,omitempty"` ElasticsearchSearch *bool `url:"elasticsearch_search,omitempty" json:"elasticsearch_search,omitempty"` - ElasticsearchShards *int `url:"elasticsearch_shards,omitempty" json:"elasticsearch_shards,omitempty"` + ElasticsearchShards *int64 `url:"elasticsearch_shards,omitempty" json:"elasticsearch_shards,omitempty"` ElasticsearchURL *string `url:"elasticsearch_url,omitempty" json:"elasticsearch_url,omitempty"` ElasticsearchUsername *string `url:"elasticsearch_username,omitempty" json:"elasticsearch_username,omitempty"` - ElasticsearchWorkerNumberOfShards *int `url:"elasticsearch_worker_number_of_shards,omitempty" json:"elasticsearch_worker_number_of_shards,omitempty"` + ElasticsearchWorkerNumberOfShards *int64 `url:"elasticsearch_worker_number_of_shards,omitempty" json:"elasticsearch_worker_number_of_shards,omitempty"` EmailAdditionalText *string `url:"email_additional_text,omitempty" json:"email_additional_text,omitempty"` EmailAuthorInBody *bool `url:"email_author_in_body,omitempty" json:"email_author_in_body,omitempty"` EmailConfirmationSetting *string `url:"email_confirmation_setting,omitempty" json:"email_confirmation_setting,omitempty"` @@ -686,23 +682,23 @@ type UpdateSettingsOptions struct { ExternalAuthorizationServiceEnabled *bool `url:"external_authorization_service_enabled,omitempty" json:"external_authorization_service_enabled,omitempty"` ExternalAuthorizationServiceTimeout *float64 `url:"external_authorization_service_timeout,omitempty" json:"external_authorization_service_timeout,omitempty"` ExternalAuthorizationServiceURL *string `url:"external_authorization_service_url,omitempty" json:"external_authorization_service_url,omitempty"` - ExternalPipelineValidationServiceTimeout *int `url:"external_pipeline_validation_service_timeout,omitempty" json:"external_pipeline_validation_service_timeout,omitempty"` + ExternalPipelineValidationServiceTimeout *int64 `url:"external_pipeline_validation_service_timeout,omitempty" json:"external_pipeline_validation_service_timeout,omitempty"` ExternalPipelineValidationServiceToken *string `url:"external_pipeline_validation_service_token,omitempty" json:"external_pipeline_validation_service_token,omitempty"` ExternalPipelineValidationServiceURL *string `url:"external_pipeline_validation_service_url,omitempty" json:"external_pipeline_validation_service_url,omitempty"` - FailedLoginAttemptsUnlockPeriodInMinutes *int `url:"failed_login_attempts_unlock_period_in_minutes,omitempty" json:"failed_login_attempts_unlock_period_in_minutes,omitempty"` - FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` - FirstDayOfWeek *int `url:"first_day_of_week,omitempty" json:"first_day_of_week,omitempty"` + FailedLoginAttemptsUnlockPeriodInMinutes *int64 `url:"failed_login_attempts_unlock_period_in_minutes,omitempty" json:"failed_login_attempts_unlock_period_in_minutes,omitempty"` + FileTemplateProjectID *int64 `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` + FirstDayOfWeek *int64 `url:"first_day_of_week,omitempty" json:"first_day_of_week,omitempty"` FlocEnabled *bool `url:"floc_enabled,omitempty" json:"floc_enabled,omitempty"` GeoNodeAllowedIPs *string `url:"geo_node_allowed_ips,omitempty" json:"geo_node_allowed_ips,omitempty"` - GeoStatusTimeout *int `url:"geo_status_timeout,omitempty" json:"geo_status_timeout,omitempty"` - GitRateLimitUsersAlertlist *[]int `url:"git_rate_limit_users_alertlist,omitempty" json:"git_rate_limit_users_alertlist,omitempty"` - GitTwoFactorSessionExpiry *int `url:"git_two_factor_session_expiry,omitempty" json:"git_two_factor_session_expiry,omitempty"` - GitalyTimeoutDefault *int `url:"gitaly_timeout_default,omitempty" json:"gitaly_timeout_default,omitempty"` - GitalyTimeoutFast *int `url:"gitaly_timeout_fast,omitempty" json:"gitaly_timeout_fast,omitempty"` - GitalyTimeoutMedium *int `url:"gitaly_timeout_medium,omitempty" json:"gitaly_timeout_medium,omitempty"` + GeoStatusTimeout *int64 `url:"geo_status_timeout,omitempty" json:"geo_status_timeout,omitempty"` + GitRateLimitUsersAlertlist *[]int64 `url:"git_rate_limit_users_alertlist,omitempty" json:"git_rate_limit_users_alertlist,omitempty"` + GitTwoFactorSessionExpiry *int64 `url:"git_two_factor_session_expiry,omitempty" json:"git_two_factor_session_expiry,omitempty"` + GitalyTimeoutDefault *int64 `url:"gitaly_timeout_default,omitempty" json:"gitaly_timeout_default,omitempty"` + GitalyTimeoutFast *int64 `url:"gitaly_timeout_fast,omitempty" json:"gitaly_timeout_fast,omitempty"` + GitalyTimeoutMedium *int64 `url:"gitaly_timeout_medium,omitempty" json:"gitaly_timeout_medium,omitempty"` GitlabDedicatedInstance *bool `url:"gitlab_dedicated_instance,omitempty" json:"gitlab_dedicated_instance,omitempty"` GitlabEnvironmentToolkitInstance *bool `url:"gitlab_environment_toolkit_instance,omitempty" json:"gitlab_environment_toolkit_instance,omitempty"` - GitlabShellOperationLimit *int `url:"gitlab_shell_operation_limit,omitempty" json:"gitlab_shell_operation_limit,omitempty"` + GitlabShellOperationLimit *int64 `url:"gitlab_shell_operation_limit,omitempty" json:"gitlab_shell_operation_limit,omitempty"` GitpodEnabled *bool `url:"gitpod_enabled,omitempty" json:"gitpod_enabled,omitempty"` GitpodURL *string `url:"gitpod_url,omitempty" json:"gitpod_url,omitempty"` GitRateLimitUsersAllowlist *[]string `url:"git_rate_limit_users_allowlist,omitempty" json:"git_rate_limit_users_allowlist,omitempty"` @@ -710,11 +706,11 @@ type UpdateSettingsOptions struct { GrafanaEnabled *bool `url:"grafana_enabled,omitempty" json:"grafana_enabled,omitempty"` GrafanaURL *string `url:"grafana_url,omitempty" json:"grafana_url,omitempty"` GravatarEnabled *bool `url:"gravatar_enabled,omitempty" json:"gravatar_enabled,omitempty"` - GroupDownloadExportLimit *int `url:"group_download_export_limit,omitempty" json:"group_download_export_limit,omitempty"` - GroupExportLimit *int `url:"group_export_limit,omitempty" json:"group_export_limit,omitempty"` - GroupImportLimit *int `url:"group_import_limit,omitempty" json:"group_import_limit,omitempty"` + GroupDownloadExportLimit *int64 `url:"group_download_export_limit,omitempty" json:"group_download_export_limit,omitempty"` + GroupExportLimit *int64 `url:"group_export_limit,omitempty" json:"group_export_limit,omitempty"` + GroupImportLimit *int64 `url:"group_import_limit,omitempty" json:"group_import_limit,omitempty"` GroupOwnersCanManageDefaultBranchProtection *bool `url:"group_owners_can_manage_default_branch_protection,omitempty" json:"group_owners_can_manage_default_branch_protection,omitempty"` - GroupRunnerTokenExpirationInterval *int `url:"group_runner_token_expiration_interval,omitempty" json:"group_runner_token_expiration_interval,omitempty"` + GroupRunnerTokenExpirationInterval *int64 `url:"group_runner_token_expiration_interval,omitempty" json:"group_runner_token_expiration_interval,omitempty"` HTMLEmailsEnabled *bool `url:"html_emails_enabled,omitempty" json:"html_emails_enabled,omitempty"` HashedStorageEnabled *bool `url:"hashed_storage_enabled,omitempty" json:"hashed_storage_enabled,omitempty"` HelpPageDocumentationBaseURL *string `url:"help_page_documentation_base_url,omitempty" json:"help_page_documentation_base_url,omitempty"` @@ -725,15 +721,16 @@ type UpdateSettingsOptions struct { HideThirdPartyOffers *bool `url:"hide_third_party_offers,omitempty" json:"hide_third_party_offers,omitempty"` HomePageURL *string `url:"home_page_url,omitempty" json:"home_page_url,omitempty"` HousekeepingEnabled *bool `url:"housekeeping_enabled,omitempty" json:"housekeeping_enabled,omitempty"` - HousekeepingOptimizeRepositoryPeriod *int `url:"housekeeping_optimize_repository_period,omitempty" json:"housekeeping_optimize_repository_period,omitempty"` + HousekeepingOptimizeRepositoryPeriod *int64 `url:"housekeeping_optimize_repository_period,omitempty" json:"housekeeping_optimize_repository_period,omitempty"` ImportSources *[]string `url:"import_sources,omitempty" json:"import_sources,omitempty"` - InactiveProjectsDeleteAfterMonths *int `url:"inactive_projects_delete_after_months,omitempty" json:"inactive_projects_delete_after_months,omitempty"` - InactiveProjectsMinSizeMB *int `url:"inactive_projects_min_size_mb,omitempty" json:"inactive_projects_min_size_mb,omitempty"` - InactiveProjectsSendWarningEmailAfterMonths *int `url:"inactive_projects_send_warning_email_after_months,omitempty" json:"inactive_projects_send_warning_email_after_months,omitempty"` + InactiveProjectsDeleteAfterMonths *int64 `url:"inactive_projects_delete_after_months,omitempty" json:"inactive_projects_delete_after_months,omitempty"` + InactiveProjectsMinSizeMB *int64 `url:"inactive_projects_min_size_mb,omitempty" json:"inactive_projects_min_size_mb,omitempty"` + InactiveProjectsSendWarningEmailAfterMonths *int64 `url:"inactive_projects_send_warning_email_after_months,omitempty" json:"inactive_projects_send_warning_email_after_months,omitempty"` + InactiveResourceAccessTokensDeleteAfterDays *int64 `url:"inactive_resource_access_tokens_delete_after_days,omitempty" json:"inactive_resource_access_tokens_delete_after_days,omitempty"` IncludeOptionalMetricsInServicePing *bool `url:"include_optional_metrics_in_service_ping,omitempty" json:"include_optional_metrics_in_service_ping,omitempty"` InProductMarketingEmailsEnabled *bool `url:"in_product_marketing_emails_enabled,omitempty" json:"in_product_marketing_emails_enabled,omitempty"` InvisibleCaptchaEnabled *bool `url:"invisible_captcha_enabled,omitempty" json:"invisible_captcha_enabled,omitempty"` - IssuesCreateLimit *int `url:"issues_create_limit,omitempty" json:"issues_create_limit,omitempty"` + IssuesCreateLimit *int64 `url:"issues_create_limit,omitempty" json:"issues_create_limit,omitempty"` JiraConnectApplicationKey *string `url:"jira_connect_application_key,omitempty" json:"jira_connect_application_key,omitempty"` JiraConnectPublicKeyStorageEnabled *bool `url:"jira_connect_public_key_storage_enabled,omitempty" json:"jira_connect_public_key_storage_enabled,omitempty"` JiraConnectProxyURL *string `url:"jira_connect_proxy_url,omitempty" json:"jira_connect_proxy_url,omitempty"` @@ -741,7 +738,7 @@ type UpdateSettingsOptions struct { KrokiEnabled *bool `url:"kroki_enabled,omitempty" json:"kroki_enabled,omitempty"` KrokiFormats *map[string]bool `url:"kroki_formats,omitempty" json:"kroki_formats,omitempty"` KrokiURL *string `url:"kroki_url,omitempty" json:"kroki_url,omitempty"` - LocalMarkdownVersion *int `url:"local_markdown_version,omitempty" json:"local_markdown_version,omitempty"` + LocalMarkdownVersion *int64 `url:"local_markdown_version,omitempty" json:"local_markdown_version,omitempty"` LockDuoFeaturesEnabled *bool `url:"lock_duo_features_enabled,omitempty" json:"lock_duo_features_enabled,omitempty"` LockMembershipsToLDAP *bool `url:"lock_memberships_to_ldap,omitempty" json:"lock_memberships_to_ldap,omitempty"` LoginRecaptchaProtectionEnabled *bool `url:"login_recaptcha_protection_enabled,omitempty" json:"login_recaptcha_protection_enabled,omitempty"` @@ -750,36 +747,36 @@ type UpdateSettingsOptions struct { MaintenanceMode *bool `url:"maintenance_mode,omitempty" json:"maintenance_mode,omitempty"` MaintenanceModeMessage *string `url:"maintenance_mode_message,omitempty" json:"maintenance_mode_message,omitempty"` MavenPackageRequestsForwarding *bool `url:"maven_package_requests_forwarding,omitempty" json:"maven_package_requests_forwarding,omitempty"` - MaxArtifactsSize *int `url:"max_artifacts_size,omitempty" json:"max_artifacts_size,omitempty"` - MaxAttachmentSize *int `url:"max_attachment_size,omitempty" json:"max_attachment_size,omitempty"` - MaxDecompressedArchiveSize *int `url:"max_decompressed_archive_size,omitempty" json:"max_decompressed_archive_size,omitempty"` - MaxExportSize *int `url:"max_export_size,omitempty" json:"max_export_size,omitempty"` - MaxImportRemoteFileSize *int `url:"max_import_remote_file_size,omitempty" json:"max_import_remote_file_size,omitempty"` - MaxImportSize *int `url:"max_import_size,omitempty" json:"max_import_size,omitempty"` - MaxLoginAttempts *int `url:"max_login_attempts,omitempty" json:"max_login_attempts,omitempty"` - MaxNumberOfRepositoryDownloads *int `url:"max_number_of_repository_downloads,omitempty" json:"max_number_of_repository_downloads,omitempty"` - MaxNumberOfRepositoryDownloadsWithinTimePeriod *int `url:"max_number_of_repository_downloads_within_time_period,omitempty" json:"max_number_of_repository_downloads_within_time_period,omitempty"` - MaxPagesSize *int `url:"max_pages_size,omitempty" json:"max_pages_size,omitempty"` - MaxPersonalAccessTokenLifetime *int `url:"max_personal_access_token_lifetime,omitempty" json:"max_personal_access_token_lifetime,omitempty"` - MaxSSHKeyLifetime *int `url:"max_ssh_key_lifetime,omitempty" json:"max_ssh_key_lifetime,omitempty"` - MaxTerraformStateSizeBytes *int `url:"max_terraform_state_size_bytes,omitempty" json:"max_terraform_state_size_bytes,omitempty"` - MaxYAMLDepth *int `url:"max_yaml_depth,omitempty" json:"max_yaml_depth,omitempty"` - MaxYAMLSizeBytes *int `url:"max_yaml_size_bytes,omitempty" json:"max_yaml_size_bytes,omitempty"` - MetricsMethodCallThreshold *int `url:"metrics_method_call_threshold,omitempty" json:"metrics_method_call_threshold,omitempty"` - MinimumPasswordLength *int `url:"minimum_password_length,omitempty" json:"minimum_password_length,omitempty"` + MaxArtifactsSize *int64 `url:"max_artifacts_size,omitempty" json:"max_artifacts_size,omitempty"` + MaxAttachmentSize *int64 `url:"max_attachment_size,omitempty" json:"max_attachment_size,omitempty"` + MaxDecompressedArchiveSize *int64 `url:"max_decompressed_archive_size,omitempty" json:"max_decompressed_archive_size,omitempty"` + MaxExportSize *int64 `url:"max_export_size,omitempty" json:"max_export_size,omitempty"` + MaxImportRemoteFileSize *int64 `url:"max_import_remote_file_size,omitempty" json:"max_import_remote_file_size,omitempty"` + MaxImportSize *int64 `url:"max_import_size,omitempty" json:"max_import_size,omitempty"` + MaxLoginAttempts *int64 `url:"max_login_attempts,omitempty" json:"max_login_attempts,omitempty"` + MaxNumberOfRepositoryDownloads *int64 `url:"max_number_of_repository_downloads,omitempty" json:"max_number_of_repository_downloads,omitempty"` + MaxNumberOfRepositoryDownloadsWithinTimePeriod *int64 `url:"max_number_of_repository_downloads_within_time_period,omitempty" json:"max_number_of_repository_downloads_within_time_period,omitempty"` + MaxPagesSize *int64 `url:"max_pages_size,omitempty" json:"max_pages_size,omitempty"` + MaxPersonalAccessTokenLifetime *int64 `url:"max_personal_access_token_lifetime,omitempty" json:"max_personal_access_token_lifetime,omitempty"` + MaxSSHKeyLifetime *int64 `url:"max_ssh_key_lifetime,omitempty" json:"max_ssh_key_lifetime,omitempty"` + MaxTerraformStateSizeBytes *int64 `url:"max_terraform_state_size_bytes,omitempty" json:"max_terraform_state_size_bytes,omitempty"` + MaxYAMLDepth *int64 `url:"max_yaml_depth,omitempty" json:"max_yaml_depth,omitempty"` + MaxYAMLSizeBytes *int64 `url:"max_yaml_size_bytes,omitempty" json:"max_yaml_size_bytes,omitempty"` + MetricsMethodCallThreshold *int64 `url:"metrics_method_call_threshold,omitempty" json:"metrics_method_call_threshold,omitempty"` + MinimumPasswordLength *int64 `url:"minimum_password_length,omitempty" json:"minimum_password_length,omitempty"` MirrorAvailable *bool `url:"mirror_available,omitempty" json:"mirror_available,omitempty"` - MirrorCapacityThreshold *int `url:"mirror_capacity_threshold,omitempty" json:"mirror_capacity_threshold,omitempty"` - MirrorMaxCapacity *int `url:"mirror_max_capacity,omitempty" json:"mirror_max_capacity,omitempty"` - MirrorMaxDelay *int `url:"mirror_max_delay,omitempty" json:"mirror_max_delay,omitempty"` + MirrorCapacityThreshold *int64 `url:"mirror_capacity_threshold,omitempty" json:"mirror_capacity_threshold,omitempty"` + MirrorMaxCapacity *int64 `url:"mirror_max_capacity,omitempty" json:"mirror_max_capacity,omitempty"` + MirrorMaxDelay *int64 `url:"mirror_max_delay,omitempty" json:"mirror_max_delay,omitempty"` NPMPackageRequestsForwarding *bool `url:"npm_package_requests_forwarding,omitempty" json:"npm_package_requests_forwarding,omitempty"` - NotesCreateLimit *int `url:"notes_create_limit,omitempty" json:"notes_create_limit,omitempty"` + NotesCreateLimit *int64 `url:"notes_create_limit,omitempty" json:"notes_create_limit,omitempty"` NotifyOnUnknownSignIn *bool `url:"notify_on_unknown_sign_in,omitempty" json:"notify_on_unknown_sign_in,omitempty"` NugetSkipMetadataURLValidation *bool `url:"nuget_skip_metadata_url_validation,omitempty" json:"nuget_skip_metadata_url_validation,omitempty"` OutboundLocalRequestsAllowlistRaw *string `url:"outbound_local_requests_allowlist_raw,omitempty" json:"outbound_local_requests_allowlist_raw,omitempty"` OutboundLocalRequestsWhitelist *[]string `url:"outbound_local_requests_whitelist,omitempty" json:"outbound_local_requests_whitelist,omitempty"` - PackageMetadataPURLTypes *[]int `url:"package_metadata_purl_types,omitempty" json:"package_metadata_purl_types,omitempty"` + PackageMetadataPURLTypes *[]int64 `url:"package_metadata_purl_types,omitempty" json:"package_metadata_purl_types,omitempty"` PackageRegistryAllowAnyoneToPullOption *bool `url:"package_registry_allow_anyone_to_pull_option,omitempty" json:"package_registry_allow_anyone_to_pull_option,omitempty"` - PackageRegistryCleanupPoliciesWorkerCapacity *int `url:"package_registry_cleanup_policies_worker_capacity,omitempty" json:"package_registry_cleanup_policies_worker_capacity,omitempty"` + PackageRegistryCleanupPoliciesWorkerCapacity *int64 `url:"package_registry_cleanup_policies_worker_capacity,omitempty" json:"package_registry_cleanup_policies_worker_capacity,omitempty"` PagesDomainVerificationEnabled *bool `url:"pages_domain_verification_enabled,omitempty" json:"pages_domain_verification_enabled,omitempty"` PasswordAuthenticationEnabledForGit *bool `url:"password_authentication_enabled_for_git,omitempty" json:"password_authentication_enabled_for_git,omitempty"` PasswordAuthenticationEnabledForWeb *bool `url:"password_authentication_enabled_for_web,omitempty" json:"password_authentication_enabled_for_web,omitempty"` @@ -791,63 +788,63 @@ type UpdateSettingsOptions struct { PersonalAccessTokenPrefix *string `url:"personal_access_token_prefix,omitempty" json:"personal_access_token_prefix,omitempty"` PlantumlEnabled *bool `url:"plantuml_enabled,omitempty" json:"plantuml_enabled,omitempty"` PlantumlURL *string `url:"plantuml_url,omitempty" json:"plantuml_url,omitempty"` - PipelineLimitPerProjectUserSha *int `url:"pipeline_limit_per_project_user_sha,omitempty" json:"pipeline_limit_per_project_user_sha,omitempty"` + PipelineLimitPerProjectUserSha *int64 `url:"pipeline_limit_per_project_user_sha,omitempty" json:"pipeline_limit_per_project_user_sha,omitempty"` PollingIntervalMultiplier *float64 `url:"polling_interval_multiplier,omitempty" json:"polling_interval_multiplier,omitempty"` PreventMergeRequestsAuthorApproval *bool `url:"prevent_merge_requests_author_approval,omitempty" json:"prevent_merge_requests_author_approval,omitempty"` PreventMergeRequestsCommittersApproval *bool `url:"prevent_merge_requests_committers_approval,omitempty" json:"prevent_merge_requests_committers_approval,omitempty"` - ProjectDownloadExportLimit *int `url:"project_download_export_limit,omitempty" json:"project_download_export_limit,omitempty"` + ProjectDownloadExportLimit *int64 `url:"project_download_export_limit,omitempty" json:"project_download_export_limit,omitempty"` ProjectExportEnabled *bool `url:"project_export_enabled,omitempty" json:"project_export_enabled,omitempty"` - ProjectExportLimit *int `url:"project_export_limit,omitempty" json:"project_export_limit,omitempty"` - ProjectImportLimit *int `url:"project_import_limit,omitempty" json:"project_import_limit,omitempty"` - ProjectJobsAPIRateLimit *int `url:"project_jobs_api_rate_limit,omitempty" json:"project_jobs_api_rate_limit,omitempty"` - ProjectRunnerTokenExpirationInterval *int `url:"project_runner_token_expiration_interval,omitempty" json:"project_runner_token_expiration_interval,omitempty"` - ProjectsAPIRateLimitUnauthenticated *int `url:"projects_api_rate_limit_unauthenticated,omitempty" json:"projects_api_rate_limit_unauthenticated,omitempty"` + ProjectExportLimit *int64 `url:"project_export_limit,omitempty" json:"project_export_limit,omitempty"` + ProjectImportLimit *int64 `url:"project_import_limit,omitempty" json:"project_import_limit,omitempty"` + ProjectJobsAPIRateLimit *int64 `url:"project_jobs_api_rate_limit,omitempty" json:"project_jobs_api_rate_limit,omitempty"` + ProjectRunnerTokenExpirationInterval *int64 `url:"project_runner_token_expiration_interval,omitempty" json:"project_runner_token_expiration_interval,omitempty"` + ProjectsAPIRateLimitUnauthenticated *int64 `url:"projects_api_rate_limit_unauthenticated,omitempty" json:"projects_api_rate_limit_unauthenticated,omitempty"` PrometheusMetricsEnabled *bool `url:"prometheus_metrics_enabled,omitempty" json:"prometheus_metrics_enabled,omitempty"` ProtectedCIVariables *bool `url:"protected_ci_variables,omitempty" json:"protected_ci_variables,omitempty"` PseudonymizerEnabled *bool `url:"pseudonymizer_enabled,omitempty" json:"pseudonymizer_enabled,omitempty"` - PushEventActivitiesLimit *int `url:"push_event_activities_limit,omitempty" json:"push_event_activities_limit,omitempty"` - PushEventHooksLimit *int `url:"push_event_hooks_limit,omitempty" json:"push_event_hooks_limit,omitempty"` + PushEventActivitiesLimit *int64 `url:"push_event_activities_limit,omitempty" json:"push_event_activities_limit,omitempty"` + PushEventHooksLimit *int64 `url:"push_event_hooks_limit,omitempty" json:"push_event_hooks_limit,omitempty"` PyPIPackageRequestsForwarding *bool `url:"pypi_package_requests_forwarding,omitempty" json:"pypi_package_requests_forwarding,omitempty"` - RSAKeyRestriction *int `url:"rsa_key_restriction,omitempty" json:"rsa_key_restriction,omitempty"` + RSAKeyRestriction *int64 `url:"rsa_key_restriction,omitempty" json:"rsa_key_restriction,omitempty"` RateLimitingResponseText *string `url:"rate_limiting_response_text,omitempty" json:"rate_limiting_response_text,omitempty"` - RawBlobRequestLimit *int `url:"raw_blob_request_limit,omitempty" json:"raw_blob_request_limit,omitempty"` + RawBlobRequestLimit *int64 `url:"raw_blob_request_limit,omitempty" json:"raw_blob_request_limit,omitempty"` RecaptchaEnabled *bool `url:"recaptcha_enabled,omitempty" json:"recaptcha_enabled,omitempty"` RecaptchaPrivateKey *string `url:"recaptcha_private_key,omitempty" json:"recaptcha_private_key,omitempty"` RecaptchaSiteKey *string `url:"recaptcha_site_key,omitempty" json:"recaptcha_site_key,omitempty"` - ReceiveMaxInputSize *int `url:"receive_max_input_size,omitempty" json:"receive_max_input_size,omitempty"` + ReceiveMaxInputSize *int64 `url:"receive_max_input_size,omitempty" json:"receive_max_input_size,omitempty"` ReceptiveClusterAgentsEnabled *bool `url:"receptive_cluster_agents_enabled,omitempty" json:"receptive_cluster_agents_enabled,omitempty"` RememberMeEnabled *bool `url:"remember_me_enabled,omitempty" json:"remember_me_enabled,omitempty"` RepositoryChecksEnabled *bool `url:"repository_checks_enabled,omitempty" json:"repository_checks_enabled,omitempty"` - RepositorySizeLimit *int `url:"repository_size_limit,omitempty" json:"repository_size_limit,omitempty"` + RepositorySizeLimit *int64 `url:"repository_size_limit,omitempty" json:"repository_size_limit,omitempty"` RepositoryStorages *[]string `url:"repository_storages,omitempty" json:"repository_storages,omitempty"` - RepositoryStoragesWeighted *map[string]int `url:"repository_storages_weighted,omitempty" json:"repository_storages_weighted,omitempty"` + RepositoryStoragesWeighted *map[string]int64 `url:"repository_storages_weighted,omitempty" json:"repository_storages_weighted,omitempty"` RequireAdminApprovalAfterUserSignup *bool `url:"require_admin_approval_after_user_signup,omitempty" json:"require_admin_approval_after_user_signup,omitempty"` RequireAdminTwoFactorAuthentication *bool `url:"require_admin_two_factor_authentication,omitempty" json:"require_admin_two_factor_authentication,omitempty"` RequirePersonalAccessTokenExpiry *bool `url:"require_personal_access_token_expiry,omitempty" json:"require_personal_access_token_expiry,omitempty"` RequireTwoFactorAuthentication *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` RestrictedVisibilityLevels *[]VisibilityValue `url:"restricted_visibility_levels,omitempty" json:"restricted_visibility_levels,omitempty"` - RunnerTokenExpirationInterval *int `url:"runner_token_expiration_interval,omitempty" json:"runner_token_expiration_interval,omitempty"` - SearchRateLimit *int `url:"search_rate_limit,omitempty" json:"search_rate_limit,omitempty"` - SearchRateLimitUnauthenticated *int `url:"search_rate_limit_unauthenticated,omitempty" json:"search_rate_limit_unauthenticated,omitempty"` + RunnerTokenExpirationInterval *int64 `url:"runner_token_expiration_interval,omitempty" json:"runner_token_expiration_interval,omitempty"` + SearchRateLimit *int64 `url:"search_rate_limit,omitempty" json:"search_rate_limit,omitempty"` + SearchRateLimitUnauthenticated *int64 `url:"search_rate_limit_unauthenticated,omitempty" json:"search_rate_limit_unauthenticated,omitempty"` SecretDetectionRevocationTokenTypesURL *string `url:"secret_detection_revocation_token_types_url,omitempty" json:"secret_detection_revocation_token_types_url,omitempty"` SecretDetectionTokenRevocationEnabled *bool `url:"secret_detection_token_revocation_enabled,omitempty" json:"secret_detection_token_revocation_enabled,omitempty"` SecretDetectionTokenRevocationToken *string `url:"secret_detection_token_revocation_token,omitempty" json:"secret_detection_token_revocation_token,omitempty"` SecretDetectionTokenRevocationURL *string `url:"secret_detection_token_revocation_url,omitempty" json:"secret_detection_token_revocation_url,omitempty"` - SecurityApprovalPoliciesLimit *int `url:"security_approval_policies_limit,omitempty" json:"security_approval_policies_limit,omitempty"` + SecurityApprovalPoliciesLimit *int64 `url:"security_approval_policies_limit,omitempty" json:"security_approval_policies_limit,omitempty"` SecurityPolicyGlobalGroupApproversEnabled *bool `url:"security_policy_global_group_approvers_enabled,omitempty" json:"security_policy_global_group_approvers_enabled,omitempty"` SecurityTXTContent *string `url:"security_txt_content,omitempty" json:"security_txt_content,omitempty"` SendUserConfirmationEmail *bool `url:"send_user_confirmation_email,omitempty" json:"send_user_confirmation_email,omitempty"` SentryClientsideDSN *string `url:"sentry_clientside_dsn,omitempty" json:"sentry_clientside_dsn,omitempty"` SentryDSN *string `url:"sentry_dsn,omitempty" json:"sentry_dsn,omitempty"` - SentryEnabled *string `url:"sentry_enabled,omitempty" json:"sentry_enabled,omitempty"` + SentryEnabled *bool `url:"sentry_enabled,omitempty" json:"sentry_enabled,omitempty"` SentryEnvironment *string `url:"sentry_environment,omitempty" json:"sentry_environment,omitempty"` ServiceAccessTokensExpirationEnforced *bool `url:"service_access_tokens_expiration_enforced,omitempty" json:"service_access_tokens_expiration_enforced,omitempty"` - SessionExpireDelay *int `url:"session_expire_delay,omitempty" json:"session_expire_delay,omitempty"` + SessionExpireDelay *int64 `url:"session_expire_delay,omitempty" json:"session_expire_delay,omitempty"` SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` - SharedRunnersMinutes *int `url:"shared_runners_minutes,omitempty" json:"shared_runners_minutes,omitempty"` + SharedRunnersMinutes *int64 `url:"shared_runners_minutes,omitempty" json:"shared_runners_minutes,omitempty"` SharedRunnersText *string `url:"shared_runners_text,omitempty" json:"shared_runners_text,omitempty"` - SidekiqJobLimiterCompressionThresholdBytes *int `url:"sidekiq_job_limiter_compression_threshold_bytes,omitempty" json:"sidekiq_job_limiter_compression_threshold_bytes,omitempty"` - SidekiqJobLimiterLimitBytes *int `url:"sidekiq_job_limiter_limit_bytes,omitempty" json:"sidekiq_job_limiter_limit_bytes,omitempty"` + SidekiqJobLimiterCompressionThresholdBytes *int64 `url:"sidekiq_job_limiter_compression_threshold_bytes,omitempty" json:"sidekiq_job_limiter_compression_threshold_bytes,omitempty"` + SidekiqJobLimiterLimitBytes *int64 `url:"sidekiq_job_limiter_limit_bytes,omitempty" json:"sidekiq_job_limiter_limit_bytes,omitempty"` SidekiqJobLimiterMode *string `url:"sidekiq_job_limiter_mode,omitempty" json:"sidekiq_job_limiter_mode,omitempty"` SignInText *string `url:"sign_in_text,omitempty" json:"sign_in_text,omitempty"` SignupEnabled *bool `url:"signup_enabled,omitempty" json:"signup_enabled,omitempty"` @@ -858,7 +855,7 @@ type UpdateSettingsOptions struct { SlackAppSecret *string `url:"slack_app_secret,omitempty" json:"slack_app_secret,omitempty"` SlackAppSigningSecret *string `url:"slack_app_signing_secret,omitempty" json:"slack_app_signing_secret,omitempty"` SlackAppVerificationToken *string `url:"slack_app_verification_token,omitempty" json:"slack_app_verification_token,omitempty"` - SnippetSizeLimit *int `url:"snippet_size_limit,omitempty" json:"snippet_size_limit,omitempty"` + SnippetSizeLimit *int64 `url:"snippet_size_limit,omitempty" json:"snippet_size_limit,omitempty"` SnowplowAppID *string `url:"snowplow_app_id,omitempty" json:"snowplow_app_id,omitempty"` SnowplowCollectorHostname *string `url:"snowplow_collector_hostname,omitempty" json:"snowplow_collector_hostname,omitempty"` SnowplowCookieDomain *string `url:"snowplow_cookie_domain,omitempty" json:"snowplow_cookie_domain,omitempty"` @@ -873,56 +870,56 @@ type UpdateSettingsOptions struct { StaticObjectsExternalStorageAuthToken *string `url:"static_objects_external_storage_auth_token,omitempty" json:"static_objects_external_storage_auth_token,omitempty"` StaticObjectsExternalStorageURL *string `url:"static_objects_external_storage_url,omitempty" json:"static_objects_external_storage_url,omitempty"` SuggestPipelineEnabled *bool `url:"suggest_pipeline_enabled,omitempty" json:"suggest_pipeline_enabled,omitempty"` - TerminalMaxSessionTime *int `url:"terminal_max_session_time,omitempty" json:"terminal_max_session_time,omitempty"` + TerminalMaxSessionTime *int64 `url:"terminal_max_session_time,omitempty" json:"terminal_max_session_time,omitempty"` Terms *string `url:"terms,omitempty" json:"terms,omitempty"` ThrottleAuthenticatedAPIEnabled *bool `url:"throttle_authenticated_api_enabled,omitempty" json:"throttle_authenticated_api_enabled,omitempty"` - ThrottleAuthenticatedAPIPeriodInSeconds *int `url:"throttle_authenticated_api_period_in_seconds,omitempty" json:"throttle_authenticated_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_api_requests_per_period,omitempty" json:"throttle_authenticated_api_requests_per_period,omitempty"` + ThrottleAuthenticatedAPIPeriodInSeconds *int64 `url:"throttle_authenticated_api_period_in_seconds,omitempty" json:"throttle_authenticated_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedAPIRequestsPerPeriod *int64 `url:"throttle_authenticated_api_requests_per_period,omitempty" json:"throttle_authenticated_api_requests_per_period,omitempty"` ThrottleAuthenticatedDeprecatedAPIEnabled *bool `url:"throttle_authenticated_deprecated_api_enabled,omitempty" json:"throttle_authenticated_deprecated_api_enabled,omitempty"` - ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_authenticated_deprecated_api_requests_per_period,omitempty"` + ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds *int64 `url:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod *int64 `url:"throttle_authenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_authenticated_deprecated_api_requests_per_period,omitempty"` ThrottleAuthenticatedFilesAPIEnabled *bool `url:"throttle_authenticated_files_api_enabled,omitempty" json:"throttle_authenticated_files_api_enabled,omitempty"` - ThrottleAuthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_authenticated_files_api_period_in_seconds,omitempty" json:"throttle_authenticated_files_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_authenticated_files_api_requests_per_period,omitempty" json:"throttle_authenticated_files_api_requests_per_period,omitempty"` + ThrottleAuthenticatedFilesAPIPeriodInSeconds *int64 `url:"throttle_authenticated_files_api_period_in_seconds,omitempty" json:"throttle_authenticated_files_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedFilesAPIRequestsPerPeriod *int64 `url:"throttle_authenticated_files_api_requests_per_period,omitempty" json:"throttle_authenticated_files_api_requests_per_period,omitempty"` ThrottleAuthenticatedGitLFSEnabled *bool `url:"throttle_authenticated_git_lfs_enabled,omitempty" json:"throttle_authenticated_git_lfs_enabled,omitempty"` - ThrottleAuthenticatedGitLFSPeriodInSeconds *int `url:"throttle_authenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_authenticated_git_lfs_period_in_seconds,omitempty"` - ThrottleAuthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_authenticated_git_lfs_requests_per_period,omitempty" json:"throttle_authenticated_git_lfs_requests_per_period,omitempty"` + ThrottleAuthenticatedGitLFSPeriodInSeconds *int64 `url:"throttle_authenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_authenticated_git_lfs_period_in_seconds,omitempty"` + ThrottleAuthenticatedGitLFSRequestsPerPeriod *int64 `url:"throttle_authenticated_git_lfs_requests_per_period,omitempty" json:"throttle_authenticated_git_lfs_requests_per_period,omitempty"` ThrottleAuthenticatedPackagesAPIEnabled *bool `url:"throttle_authenticated_packages_api_enabled,omitempty" json:"throttle_authenticated_packages_api_enabled,omitempty"` - ThrottleAuthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_authenticated_packages_api_period_in_seconds,omitempty" json:"throttle_authenticated_packages_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_authenticated_packages_api_requests_per_period,omitempty" json:"throttle_authenticated_packages_api_requests_per_period,omitempty"` + ThrottleAuthenticatedPackagesAPIPeriodInSeconds *int64 `url:"throttle_authenticated_packages_api_period_in_seconds,omitempty" json:"throttle_authenticated_packages_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedPackagesAPIRequestsPerPeriod *int64 `url:"throttle_authenticated_packages_api_requests_per_period,omitempty" json:"throttle_authenticated_packages_api_requests_per_period,omitempty"` ThrottleAuthenticatedWebEnabled *bool `url:"throttle_authenticated_web_enabled,omitempty" json:"throttle_authenticated_web_enabled,omitempty"` - ThrottleAuthenticatedWebPeriodInSeconds *int `url:"throttle_authenticated_web_period_in_seconds,omitempty" json:"throttle_authenticated_web_period_in_seconds,omitempty"` - ThrottleAuthenticatedWebRequestsPerPeriod *int `url:"throttle_authenticated_web_requests_per_period,omitempty" json:"throttle_authenticated_web_requests_per_period,omitempty"` + ThrottleAuthenticatedWebPeriodInSeconds *int64 `url:"throttle_authenticated_web_period_in_seconds,omitempty" json:"throttle_authenticated_web_period_in_seconds,omitempty"` + ThrottleAuthenticatedWebRequestsPerPeriod *int64 `url:"throttle_authenticated_web_requests_per_period,omitempty" json:"throttle_authenticated_web_requests_per_period,omitempty"` ThrottleIncidentManagementNotificationEnabled *bool `url:"throttle_incident_management_notification_enabled,omitempty" json:"throttle_incident_management_notification_enabled,omitempty"` - ThrottleIncidentManagementNotificationPerPeriod *int `url:"throttle_incident_management_notification_per_period,omitempty" json:"throttle_incident_management_notification_per_period,omitempty"` - ThrottleIncidentManagementNotificationPeriodInSeconds *int `url:"throttle_incident_management_notification_period_in_seconds,omitempty" json:"throttle_incident_management_notification_period_in_seconds,omitempty"` - ThrottleProtectedPathsEnabled *bool `url:"throttle_protected_paths_enabled_enabled,omitempty" json:"throttle_protected_paths_enabled,omitempty"` - ThrottleProtectedPathsPeriodInSeconds *int `url:"throttle_protected_paths_enabled_period_in_seconds,omitempty" json:"throttle_protected_paths_period_in_seconds,omitempty"` - ThrottleProtectedPathsRequestsPerPeriod *int `url:"throttle_protected_paths_enabled_requests_per_period,omitempty" json:"throttle_protected_paths_per_period,omitempty"` + ThrottleIncidentManagementNotificationPerPeriod *int64 `url:"throttle_incident_management_notification_per_period,omitempty" json:"throttle_incident_management_notification_per_period,omitempty"` + ThrottleIncidentManagementNotificationPeriodInSeconds *int64 `url:"throttle_incident_management_notification_period_in_seconds,omitempty" json:"throttle_incident_management_notification_period_in_seconds,omitempty"` + ThrottleProtectedPathsEnabled *bool `url:"throttle_protected_paths_enabled,omitempty" json:"throttle_protected_paths_enabled,omitempty"` + ThrottleProtectedPathsPeriodInSeconds *int64 `url:"throttle_protected_paths_period_in_seconds,omitempty" json:"throttle_protected_paths_period_in_seconds,omitempty"` + ThrottleProtectedPathsRequestsPerPeriod *int64 `url:"throttle_protected_paths_requests_per_period,omitempty" json:"throttle_protected_paths_requests_per_period,omitempty"` ThrottleUnauthenticatedAPIEnabled *bool `url:"throttle_unauthenticated_api_enabled,omitempty" json:"throttle_unauthenticated_api_enabled,omitempty"` - ThrottleUnauthenticatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedAPIPeriodInSeconds *int64 `url:"throttle_unauthenticated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedAPIRequestsPerPeriod *int64 `url:"throttle_unauthenticated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_api_requests_per_period,omitempty"` ThrottleUnauthenticatedDeprecatedAPIEnabled *bool `url:"throttle_unauthenticated_deprecated_api_enabled,omitempty" json:"throttle_unauthenticated_deprecated_api_enabled,omitempty"` - ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds *int64 `url:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod *int64 `url:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty"` ThrottleUnauthenticatedFilesAPIEnabled *bool `url:"throttle_unauthenticated_files_api_enabled,omitempty" json:"throttle_unauthenticated_files_api_enabled,omitempty"` - ThrottleUnauthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_files_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_files_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_files_api_requests_per_period,omitempty" json:"throttle_unauthenticated_files_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedFilesAPIPeriodInSeconds *int64 `url:"throttle_unauthenticated_files_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_files_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedFilesAPIRequestsPerPeriod *int64 `url:"throttle_unauthenticated_files_api_requests_per_period,omitempty" json:"throttle_unauthenticated_files_api_requests_per_period,omitempty"` ThrottleUnauthenticatedGitLFSEnabled *bool `url:"throttle_unauthenticated_git_lfs_enabled,omitempty" json:"throttle_unauthenticated_git_lfs_enabled,omitempty"` - ThrottleUnauthenticatedGitLFSPeriodInSeconds *int `url:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty"` - ThrottleUnauthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty" json:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty"` + ThrottleUnauthenticatedGitLFSPeriodInSeconds *int64 `url:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty"` + ThrottleUnauthenticatedGitLFSRequestsPerPeriod *int64 `url:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty" json:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty"` ThrottleUnauthenticatedPackagesAPIEnabled *bool `url:"throttle_unauthenticated_packages_api_enabled,omitempty" json:"throttle_unauthenticated_packages_api_enabled,omitempty"` - ThrottleUnauthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_packages_api_requests_per_period,omitempty" json:"throttle_unauthenticated_packages_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedPackagesAPIPeriodInSeconds *int64 `url:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod *int64 `url:"throttle_unauthenticated_packages_api_requests_per_period,omitempty" json:"throttle_unauthenticated_packages_api_requests_per_period,omitempty"` ThrottleUnauthenticatedWebEnabled *bool `url:"throttle_unauthenticated_web_enabled,omitempty" json:"throttle_unauthenticated_web_enabled,omitempty"` - ThrottleUnauthenticatedWebPeriodInSeconds *int `url:"throttle_unauthenticated_web_period_in_seconds,omitempty" json:"throttle_unauthenticated_web_period_in_seconds,omitempty"` - ThrottleUnauthenticatedWebRequestsPerPeriod *int `url:"throttle_unauthenticated_web_requests_per_period,omitempty" json:"throttle_unauthenticated_web_requests_per_period,omitempty"` + ThrottleUnauthenticatedWebPeriodInSeconds *int64 `url:"throttle_unauthenticated_web_period_in_seconds,omitempty" json:"throttle_unauthenticated_web_period_in_seconds,omitempty"` + ThrottleUnauthenticatedWebRequestsPerPeriod *int64 `url:"throttle_unauthenticated_web_requests_per_period,omitempty" json:"throttle_unauthenticated_web_requests_per_period,omitempty"` TimeTrackingLimitToHours *bool `url:"time_tracking_limit_to_hours,omitempty" json:"time_tracking_limit_to_hours,omitempty"` - TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` - UnconfirmedUsersDeleteAfterDays *int `url:"unconfirmed_users_delete_after_days,omitempty" json:"unconfirmed_users_delete_after_days,omitempty"` + TwoFactorGracePeriod *int64 `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` + UnconfirmedUsersDeleteAfterDays *int64 `url:"unconfirmed_users_delete_after_days,omitempty" json:"unconfirmed_users_delete_after_days,omitempty"` UniqueIPsLimitEnabled *bool `url:"unique_ips_limit_enabled,omitempty" json:"unique_ips_limit_enabled,omitempty"` - UniqueIPsLimitPerUser *int `url:"unique_ips_limit_per_user,omitempty" json:"unique_ips_limit_per_user,omitempty"` - UniqueIPsLimitTimeWindow *int `url:"unique_ips_limit_time_window,omitempty" json:"unique_ips_limit_time_window,omitempty"` + UniqueIPsLimitPerUser *int64 `url:"unique_ips_limit_per_user,omitempty" json:"unique_ips_limit_per_user,omitempty"` + UniqueIPsLimitTimeWindow *int64 `url:"unique_ips_limit_time_window,omitempty" json:"unique_ips_limit_time_window,omitempty"` UpdateRunnerVersionsEnabled *bool `url:"update_runner_versions_enabled,omitempty" json:"update_runner_versions_enabled,omitempty"` UpdatingNameDisabledForUsers *bool `url:"updating_name_disabled_for_users,omitempty" json:"updating_name_disabled_for_users,omitempty"` UsagePingEnabled *bool `url:"usage_ping_enabled,omitempty" json:"usage_ping_enabled,omitempty"` @@ -932,16 +929,17 @@ type UpdateSettingsOptions struct { UserDefaultExternal *bool `url:"user_default_external,omitempty" json:"user_default_external,omitempty"` UserDefaultInternalRegex *string `url:"user_default_internal_regex,omitempty" json:"user_default_internal_regex,omitempty"` UserDefaultsToPrivateProfile *bool `url:"user_defaults_to_private_profile,omitempty" json:"user_defaults_to_private_profile,omitempty"` - UserEmailLookupLimit *int `url:"user_email_lookup_limit,omitempty" json:"user_email_lookup_limit,omitempty"` + UserEmailLookupLimit *int64 `url:"user_email_lookup_limit,omitempty" json:"user_email_lookup_limit,omitempty"` UserOauthApplications *bool `url:"user_oauth_applications,omitempty" json:"user_oauth_applications,omitempty"` UserShowAddSSHKeyMessage *bool `url:"user_show_add_ssh_key_message,omitempty" json:"user_show_add_ssh_key_message,omitempty"` - UsersGetByIDLimit *int `url:"users_get_by_id_limit,omitempty" json:"users_get_by_id_limit,omitempty"` + UsersGetByIDLimit *int64 `url:"users_get_by_id_limit,omitempty" json:"users_get_by_id_limit,omitempty"` UsersGetByIDLimitAllowlistRaw *string `url:"users_get_by_id_limit_allowlist_raw,omitempty" json:"users_get_by_id_limit_allowlist_raw,omitempty"` ValidRunnerRegistrars *[]string `url:"valid_runner_registrars,omitempty" json:"valid_runner_registrars,omitempty"` VersionCheckEnabled *bool `url:"version_check_enabled,omitempty" json:"version_check_enabled,omitempty"` WebIDEClientsidePreviewEnabled *bool `url:"web_ide_clientside_preview_enabled,omitempty" json:"web_ide_clientside_preview_enabled,omitempty"` WhatsNewVariant *string `url:"whats_new_variant,omitempty" json:"whats_new_variant,omitempty"` - WikiPageMaxContentBytes *int `url:"wiki_page_max_content_bytes,omitempty" json:"wiki_page_max_content_bytes,omitempty"` + WikiPageMaxContentBytes *int64 `url:"wiki_page_max_content_bytes,omitempty" json:"wiki_page_max_content_bytes,omitempty"` + LockMembershipsToSAML *bool `url:"lock_memberships_to_saml,omitempty" json:"lock_memberships_to_saml,omitempty"` // Deprecated: Use AbuseNotificationEmail instead. AdminNotificationEmail *string `url:"admin_notification_email,omitempty" json:"admin_notification_email,omitempty"` @@ -950,25 +948,25 @@ type UpdateSettingsOptions struct { // Deprecated: Use AssetProxyAllowlist instead. AssetProxyWhitelist *[]string `url:"asset_proxy_whitelist,omitempty" json:"asset_proxy_whitelist,omitempty"` // Deprecated: Use DefaultBranchProtectionDefaults instead. - DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` + DefaultBranchProtection *int64 `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` // Deprecated: Cannot be set through the API, always true HousekeepingBitmapsEnabled *bool `url:"housekeeping_bitmaps_enabled,omitempty" json:"housekeeping_bitmaps_enabled,omitempty"` // Deprecated: use HousekeepingOptimizeRepositoryPeriod instead - HousekeepingFullRepackPeriod *int `url:"housekeeping_full_repack_period,omitempty" json:"housekeeping_full_repack_period,omitempty"` + HousekeepingFullRepackPeriod *int64 `url:"housekeeping_full_repack_period,omitempty" json:"housekeeping_full_repack_period,omitempty"` // Deprecated: use HousekeepingOptimizeRepositoryPeriod instead - HousekeepingGcPeriod *int `url:"housekeeping_gc_period,omitempty" json:"housekeeping_gc_period,omitempty"` + HousekeepingGcPeriod *int64 `url:"housekeeping_gc_period,omitempty" json:"housekeeping_gc_period,omitempty"` // Deprecated: use HousekeepingOptimizeRepositoryPeriod instead - HousekeepingIncrementalRepackPeriod *int `url:"housekeeping_incremental_repack_period,omitempty" json:"housekeeping_incremental_repack_period,omitempty"` + HousekeepingIncrementalRepackPeriod *int64 `url:"housekeeping_incremental_repack_period,omitempty" json:"housekeeping_incremental_repack_period,omitempty"` // Deprecated: use PerformanceBarAllowedGroupPath instead - PerformanceBarAllowedGroupID *int `url:"performance_bar_allowed_group_id,omitempty" json:"performance_bar_allowed_group_id,omitempty"` + PerformanceBarAllowedGroupID *int64 `url:"performance_bar_allowed_group_id,omitempty" json:"performance_bar_allowed_group_id,omitempty"` // Deprecated: use PerformanceBarAllowedGroupPath: nil instead PerformanceBarEnabled *bool `url:"performance_bar_enabled,omitempty" json:"performance_bar_enabled,omitempty"` // Deprecated: Use ThrottleUnauthenticatedWebEnabled or ThrottleUnauthenticatedAPIEnabled instead. (Deprecated in GitLab 14.3) ThrottleUnauthenticatedEnabled *bool `url:"throttle_unauthenticated_enabled,omitempty" json:"throttle_unauthenticated_enabled,omitempty"` // Deprecated: Use ThrottleUnauthenticatedWebPeriodInSeconds or ThrottleUnauthenticatedAPIPeriodInSeconds instead. (Deprecated in GitLab 14.3) - ThrottleUnauthenticatedPeriodInSeconds *int `url:"throttle_unauthenticated_period_in_seconds,omitempty" json:"throttle_unauthenticated_period_in_seconds,omitempty"` + ThrottleUnauthenticatedPeriodInSeconds *int64 `url:"throttle_unauthenticated_period_in_seconds,omitempty" json:"throttle_unauthenticated_period_in_seconds,omitempty"` // Deprecated: Use ThrottleUnauthenticatedWebRequestsPerPeriod or ThrottleUnauthenticatedAPIRequestsPerPeriod instead. (Deprecated in GitLab 14.3) - ThrottleUnauthenticatedRequestsPerPeriod *int `url:"throttle_unauthenticated_requests_per_period,omitempty" json:"throttle_unauthenticated_requests_per_period,omitempty"` + ThrottleUnauthenticatedRequestsPerPeriod *int64 `url:"throttle_unauthenticated_requests_per_period,omitempty" json:"throttle_unauthenticated_requests_per_period,omitempty"` } // BranchProtectionDefaultsOptions represents default Git protected branch permissions options. @@ -976,10 +974,10 @@ type UpdateSettingsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/groups/#options-for-default_branch_protection_defaults type BranchProtectionDefaultsOptions struct { - AllowedToPush *[]int `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` - AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` - AllowedToMerge *[]int `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` - DeveloperCanInitialPush *bool `url:"developer_can_initial_push,omitempty" json:"developer_can_initial_push,omitempty"` + AllowedToPush *[]int64 `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` + AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` + AllowedToMerge *[]int64 `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` + DeveloperCanInitialPush *bool `url:"developer_can_initial_push,omitempty" json:"developer_can_initial_push,omitempty"` } // UpdateSettings updates the application settings. @@ -987,16 +985,10 @@ type BranchProtectionDefaultsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/settings/#update-application-settings func (s *SettingsService) UpdateSettings(opt *UpdateSettingsOptions, options ...RequestOptionFunc) (*Settings, *Response, error) { - req, err := s.client.NewRequest(http.MethodPut, "application/settings", opt, options) - if err != nil { - return nil, nil, err - } - - as := new(Settings) - resp, err := s.client.Do(req, as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil + return do[*Settings](s.client, + withMethod(http.MethodPut), + withPath("application/settings"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/sidekiq_metrics.go b/vendor/gitlab.com/gitlab-org/api/client-go/sidekiq_metrics.go index 0a985847fc..0eb6a29ac3 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/sidekiq_metrics.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/sidekiq_metrics.go @@ -17,7 +17,6 @@ package gitlab import ( - "net/http" "time" ) @@ -44,10 +43,16 @@ var _ SidekiqServiceInterface = (*SidekiqService)(nil) // GitLab API docs: // https://docs.gitlab.com/api/sidekiq_metrics/#get-the-current-queue-metrics type QueueMetrics struct { - Queues map[string]struct { - Backlog int `json:"backlog"` - Latency int `json:"latency"` - } `json:"queues"` + Queues map[string]QueueMetricsQueue `json:"queues"` +} + +// QueueMetricsQueue represents the GitLab sidekiq queue metrics queue. +// +// GitLab API docs: +// https://docs.gitlab.com/api/sidekiq_metrics/#get-the-current-queue-metrics +type QueueMetricsQueue struct { + Backlog int64 `json:"backlog"` + Latency int64 `json:"latency"` } // GetQueueMetrics lists information about all the registered queues, @@ -56,18 +61,10 @@ type QueueMetrics struct { // GitLab API docs: // https://docs.gitlab.com/api/sidekiq_metrics/#get-the-current-queue-metrics func (s *SidekiqService) GetQueueMetrics(options ...RequestOptionFunc) (*QueueMetrics, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "/sidekiq/queue_metrics", nil, options) - if err != nil { - return nil, nil, err - } - - q := new(QueueMetrics) - resp, err := s.client.Do(req, q) - if err != nil { - return nil, resp, err - } - - return q, resp, nil + return do[*QueueMetrics](s.client, + withPath("/sidekiq/queue_metrics"), + withRequestOpts(options...), + ) } // ProcessMetrics represents the GitLab sidekiq process metrics. @@ -75,16 +72,22 @@ func (s *SidekiqService) GetQueueMetrics(options ...RequestOptionFunc) (*QueueMe // GitLab API docs: // https://docs.gitlab.com/api/sidekiq_metrics/#get-the-current-process-metrics type ProcessMetrics struct { - Processes []struct { - Hostname string `json:"hostname"` - Pid int `json:"pid"` - Tag string `json:"tag"` - StartedAt *time.Time `json:"started_at"` - Queues []string `json:"queues"` - Labels []string `json:"labels"` - Concurrency int `json:"concurrency"` - Busy int `json:"busy"` - } `json:"processes"` + Processes []ProcessMetricsProcess `json:"processes"` +} + +// ProcessMetricsProcess represents the GitLab sidekiq process metrics process. +// +// GitLab API docs: +// https://docs.gitlab.com/api/sidekiq_metrics/#get-the-current-process-metrics +type ProcessMetricsProcess struct { + Hostname string `json:"hostname"` + Pid int64 `json:"pid"` + Tag string `json:"tag"` + StartedAt *time.Time `json:"started_at"` + Queues []string `json:"queues"` + Labels []string `json:"labels"` + Concurrency int64 `json:"concurrency"` + Busy int64 `json:"busy"` } // GetProcessMetrics lists information about all the Sidekiq workers registered @@ -93,18 +96,10 @@ type ProcessMetrics struct { // GitLab API docs: // https://docs.gitlab.com/api/sidekiq_metrics/#get-the-current-process-metrics func (s *SidekiqService) GetProcessMetrics(options ...RequestOptionFunc) (*ProcessMetrics, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "/sidekiq/process_metrics", nil, options) - if err != nil { - return nil, nil, err - } - - p := new(ProcessMetrics) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil + return do[*ProcessMetrics](s.client, + withPath("/sidekiq/process_metrics"), + withRequestOpts(options...), + ) } // JobStats represents the GitLab sidekiq job stats. @@ -112,11 +107,17 @@ func (s *SidekiqService) GetProcessMetrics(options ...RequestOptionFunc) (*Proce // GitLab API docs: // https://docs.gitlab.com/api/sidekiq_metrics/#get-the-current-job-statistics type JobStats struct { - Jobs struct { - Processed int `json:"processed"` - Failed int `json:"failed"` - Enqueued int `json:"enqueued"` - } `json:"jobs"` + Jobs JobStatsJobs `json:"jobs"` +} + +// JobStatsJobs represents the GitLab sidekiq job stats jobs. +// +// GitLab API docs: +// https://docs.gitlab.com/api/sidekiq_metrics/#get-the-current-job-statistics +type JobStatsJobs struct { + Processed int64 `json:"processed"` + Failed int64 `json:"failed"` + Enqueued int64 `json:"enqueued"` } // GetJobStats list information about the jobs that Sidekiq has performed. @@ -124,18 +125,10 @@ type JobStats struct { // GitLab API docs: // https://docs.gitlab.com/api/sidekiq_metrics/#get-the-current-job-statistics func (s *SidekiqService) GetJobStats(options ...RequestOptionFunc) (*JobStats, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "/sidekiq/job_stats", nil, options) - if err != nil { - return nil, nil, err - } - - j := new(JobStats) - resp, err := s.client.Do(req, j) - if err != nil { - return nil, resp, err - } - - return j, resp, nil + return do[*JobStats](s.client, + withPath("/sidekiq/job_stats"), + withRequestOpts(options...), + ) } // CompoundMetrics represents the GitLab sidekiq compounded stats. @@ -154,16 +147,8 @@ type CompoundMetrics struct { // GitLab API docs: // https://docs.gitlab.com/api/sidekiq_metrics/#get-a-compound-response-of-all-the-previously-mentioned-metrics func (s *SidekiqService) GetCompoundMetrics(options ...RequestOptionFunc) (*CompoundMetrics, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "/sidekiq/compound_metrics", nil, options) - if err != nil { - return nil, nil, err - } - - c := new(CompoundMetrics) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil + return do[*CompoundMetrics](s.client, + withPath("/sidekiq/compound_metrics"), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/snippet_repository_storage_move.go b/vendor/gitlab.com/gitlab-org/api/client-go/snippet_repository_storage_move.go index b25c3a4875..ee20002a09 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/snippet_repository_storage_move.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/snippet_repository_storage_move.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" "time" ) @@ -25,10 +24,10 @@ import ( type ( SnippetRepositoryStorageMoveServiceInterface interface { RetrieveAllStorageMoves(opts RetrieveAllSnippetStorageMovesOptions, options ...RequestOptionFunc) ([]*SnippetRepositoryStorageMove, *Response, error) - RetrieveAllStorageMovesForSnippet(snippet int, opts RetrieveAllSnippetStorageMovesOptions, options ...RequestOptionFunc) ([]*SnippetRepositoryStorageMove, *Response, error) - GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) - GetStorageMoveForSnippet(snippet int, repositoryStorage int, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) - ScheduleStorageMoveForSnippet(snippet int, opts ScheduleStorageMoveForSnippetOptions, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) + RetrieveAllStorageMovesForSnippet(snippet int64, opts RetrieveAllSnippetStorageMovesOptions, options ...RequestOptionFunc) ([]*SnippetRepositoryStorageMove, *Response, error) + GetStorageMove(repositoryStorage int64, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) + GetStorageMoveForSnippet(snippet int64, repositoryStorage int64, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) + ScheduleStorageMoveForSnippet(snippet int64, opts ScheduleStorageMoveForSnippetOptions, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) ScheduleAllStorageMoves(opts ScheduleAllSnippetStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) } @@ -49,7 +48,7 @@ var _ SnippetRepositoryStorageMoveServiceInterface = (*SnippetRepositoryStorageM // GitLab API docs: // https://docs.gitlab.com/api/snippet_repository_storage_moves/ type SnippetRepositoryStorageMove struct { - ID int `json:"id"` + ID int64 `json:"id"` CreatedAt *time.Time `json:"created_at"` State string `json:"state"` SourceStorageName string `json:"source_storage_name"` @@ -58,13 +57,13 @@ type SnippetRepositoryStorageMove struct { } type RepositorySnippet struct { - ID int `json:"id"` + ID int64 `json:"id"` Title string `json:"title"` Description string `json:"description"` Visibility VisibilityValue `json:"visibility"` UpdatedAt *time.Time `json:"updated_at"` CreatedAt *time.Time `json:"created_at"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` WebURL string `json:"web_url"` RawURL string `json:"raw_url"` SSHURLToRepo string `json:"ssh_url_to_repo"` @@ -77,7 +76,9 @@ type RepositorySnippet struct { // GitLab API docs: // https://docs.gitlab.com/api/snippet_repository_storage_moves/#retrieve-all-snippet-repository-storage-moves // https://docs.gitlab.com/api/snippet_repository_storage_moves/#retrieve-all-repository-storage-moves-for-a-snippet -type RetrieveAllSnippetStorageMovesOptions ListOptions +type RetrieveAllSnippetStorageMovesOptions struct { + ListOptions +} // RetrieveAllStorageMoves retrieves all snippet repository storage moves // accessible by the authenticated user. @@ -85,18 +86,11 @@ type RetrieveAllSnippetStorageMovesOptions ListOptions // GitLab API docs: // https://docs.gitlab.com/api/snippet_repository_storage_moves/#retrieve-all-snippet-repository-storage-moves func (s SnippetRepositoryStorageMoveService) RetrieveAllStorageMoves(opts RetrieveAllSnippetStorageMovesOptions, options ...RequestOptionFunc) ([]*SnippetRepositoryStorageMove, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "snippet_repository_storage_moves", opts, options) - if err != nil { - return nil, nil, err - } - - var ssms []*SnippetRepositoryStorageMove - resp, err := s.client.Do(req, &ssms) - if err != nil { - return nil, resp, err - } - - return ssms, resp, err + return do[[]*SnippetRepositoryStorageMove](s.client, + withPath("snippet_repository_storage_moves"), + withAPIOpts(opts), + withRequestOpts(options...), + ) } // RetrieveAllStorageMovesForSnippet retrieves all repository storage moves for @@ -104,63 +98,34 @@ func (s SnippetRepositoryStorageMoveService) RetrieveAllStorageMoves(opts Retrie // // GitLab API docs: // https://docs.gitlab.com/api/snippet_repository_storage_moves/#retrieve-all-repository-storage-moves-for-a-snippet -func (s SnippetRepositoryStorageMoveService) RetrieveAllStorageMovesForSnippet(snippet int, opts RetrieveAllSnippetStorageMovesOptions, options ...RequestOptionFunc) ([]*SnippetRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("snippets/%d/repository_storage_moves", snippet) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var ssms []*SnippetRepositoryStorageMove - resp, err := s.client.Do(req, &ssms) - if err != nil { - return nil, resp, err - } - - return ssms, resp, err +func (s SnippetRepositoryStorageMoveService) RetrieveAllStorageMovesForSnippet(snippet int64, opts RetrieveAllSnippetStorageMovesOptions, options ...RequestOptionFunc) ([]*SnippetRepositoryStorageMove, *Response, error) { + return do[[]*SnippetRepositoryStorageMove](s.client, + withPath("snippets/%d/repository_storage_moves", snippet), + withAPIOpts(opts), + withRequestOpts(options...), + ) } // GetStorageMove gets a single snippet repository storage move. // // GitLab API docs: // https://docs.gitlab.com/api/snippet_repository_storage_moves/#get-a-single-snippet-repository-storage-move -func (s SnippetRepositoryStorageMoveService) GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("snippet_repository_storage_moves/%d", repositoryStorage) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ssm := new(SnippetRepositoryStorageMove) - resp, err := s.client.Do(req, ssm) - if err != nil { - return nil, resp, err - } - - return ssm, resp, err +func (s SnippetRepositoryStorageMoveService) GetStorageMove(repositoryStorage int64, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) { + return do[*SnippetRepositoryStorageMove](s.client, + withPath("snippet_repository_storage_moves/%d", repositoryStorage), + withRequestOpts(options...), + ) } // GetStorageMoveForSnippet gets a single repository storage move for a snippet. // // GitLab API docs: // https://docs.gitlab.com/api/snippet_repository_storage_moves/#get-a-single-repository-storage-move-for-a-snippet -func (s SnippetRepositoryStorageMoveService) GetStorageMoveForSnippet(snippet int, repositoryStorage int, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("snippets/%d/repository_storage_moves/%d", snippet, repositoryStorage) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ssm := new(SnippetRepositoryStorageMove) - resp, err := s.client.Do(req, ssm) - if err != nil { - return nil, resp, err - } - - return ssm, resp, err +func (s SnippetRepositoryStorageMoveService) GetStorageMoveForSnippet(snippet int64, repositoryStorage int64, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) { + return do[*SnippetRepositoryStorageMove](s.client, + withPath("snippets/%d/repository_storage_moves/%d", snippet, repositoryStorage), + withRequestOpts(options...), + ) } // ScheduleStorageMoveForSnippetOptions represents the available @@ -176,21 +141,13 @@ type ScheduleStorageMoveForSnippetOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/snippet_repository_storage_moves/#schedule-a-repository-storage-move-for-a-snippet -func (s SnippetRepositoryStorageMoveService) ScheduleStorageMoveForSnippet(snippet int, opts ScheduleStorageMoveForSnippetOptions, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("snippets/%d/repository_storage_moves", snippet) - - req, err := s.client.NewRequest(http.MethodPost, u, opts, options) - if err != nil { - return nil, nil, err - } - - ssm := new(SnippetRepositoryStorageMove) - resp, err := s.client.Do(req, ssm) - if err != nil { - return nil, resp, err - } - - return ssm, resp, err +func (s SnippetRepositoryStorageMoveService) ScheduleStorageMoveForSnippet(snippet int64, opts ScheduleStorageMoveForSnippetOptions, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) { + return do[*SnippetRepositoryStorageMove](s.client, + withMethod(http.MethodPost), + withPath("snippets/%d/repository_storage_moves", snippet), + withAPIOpts(opts), + withRequestOpts(options...), + ) } // ScheduleAllSnippetStorageMovesOptions represents the available @@ -208,10 +165,11 @@ type ScheduleAllSnippetStorageMovesOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/snippet_repository_storage_moves/#schedule-repository-storage-moves-for-all-snippets-on-a-storage-shard func (s SnippetRepositoryStorageMoveService) ScheduleAllStorageMoves(opts ScheduleAllSnippetStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "snippet_repository_storage_moves", opts, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("snippet_repository_storage_moves"), + withAPIOpts(opts), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/snippets.go b/vendor/gitlab.com/gitlab-org/api/client-go/snippets.go index d3bdad822e..4c7cd150fb 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/snippets.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/snippets.go @@ -18,7 +18,6 @@ package gitlab import ( "bytes" - "fmt" "net/http" "time" ) @@ -26,12 +25,12 @@ import ( type ( SnippetsServiceInterface interface { ListSnippets(opt *ListSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) - GetSnippet(snippet int, options ...RequestOptionFunc) (*Snippet, *Response, error) - SnippetContent(snippet int, options ...RequestOptionFunc) ([]byte, *Response, error) - SnippetFileContent(snippet int, ref, filename string, options ...RequestOptionFunc) ([]byte, *Response, error) + GetSnippet(snippet int64, options ...RequestOptionFunc) (*Snippet, *Response, error) + SnippetContent(snippet int64, options ...RequestOptionFunc) ([]byte, *Response, error) + SnippetFileContent(snippet int64, ref, filename string, options ...RequestOptionFunc) ([]byte, *Response, error) CreateSnippet(opt *CreateSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) - UpdateSnippet(snippet int, opt *UpdateSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) - DeleteSnippet(snippet int, options ...RequestOptionFunc) (*Response, error) + UpdateSnippet(snippet int64, opt *UpdateSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) + DeleteSnippet(snippet int64, options ...RequestOptionFunc) (*Response, error) ExploreSnippets(opt *ExploreSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) ListAllSnippets(opt *ListAllSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) } @@ -51,122 +50,123 @@ var _ SnippetsServiceInterface = (*SnippetsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/snippets/ type Snippet struct { - ID int `json:"id"` - Title string `json:"title"` - FileName string `json:"file_name"` - Description string `json:"description"` - Visibility string `json:"visibility"` - Author struct { - ID int `json:"id"` - Username string `json:"username"` - Email string `json:"email"` - Name string `json:"name"` - State string `json:"state"` - CreatedAt *time.Time `json:"created_at"` - } `json:"author"` - UpdatedAt *time.Time `json:"updated_at"` - CreatedAt *time.Time `json:"created_at"` - ProjectID int `json:"project_id"` - WebURL string `json:"web_url"` - RawURL string `json:"raw_url"` - Files []struct { - Path string `json:"path"` - RawURL string `json:"raw_url"` - } `json:"files"` - RepositoryStorage string `json:"repository_storage"` + ID int64 `json:"id"` + Title string `json:"title"` + FileName string `json:"file_name"` + Description string `json:"description"` + Visibility string `json:"visibility"` + Author SnippetAuthor `json:"author"` + UpdatedAt *time.Time `json:"updated_at"` + CreatedAt *time.Time `json:"created_at"` + ProjectID int64 `json:"project_id"` + WebURL string `json:"web_url"` + RawURL string `json:"raw_url"` + Files []SnippetFile `json:"files"` + RepositoryStorage string `json:"repository_storage"` } func (s Snippet) String() string { return Stringify(s) } +// SnippetAuthor represents a GitLab snippet author. +// +// GitLab API docs: https://docs.gitlab.com/api/snippets/ +type SnippetAuthor struct { + ID int64 `json:"id"` + Username string `json:"username"` + Email string `json:"email"` + Name string `json:"name"` + State string `json:"state"` + CreatedAt *time.Time `json:"created_at"` +} + +func (a SnippetAuthor) String() string { + return Stringify(a) +} + +// SnippetFile represents a GitLab snippet file. +// +// GitLab API docs: https://docs.gitlab.com/api/snippets/ +type SnippetFile struct { + Path string `json:"path"` + RawURL string `json:"raw_url"` +} + +func (f SnippetFile) String() string { + return Stringify(f) +} + // ListSnippetsOptions represents the available ListSnippets() options. // // GitLab API docs: // https://docs.gitlab.com/api/snippets/#list-all-snippets-for-current-user -type ListSnippetsOptions ListOptions +type ListSnippetsOptions struct { + ListOptions +} // ListSnippets gets a list of snippets. // // GitLab API docs: // https://docs.gitlab.com/api/snippets/#list-all-snippets-for-current-user func (s *SnippetsService) ListSnippets(opt *ListSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "snippets", opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Snippet - resp, err := s.client.Do(req, &ps) + res, resp, err := do[[]*Snippet](s.client, + withMethod(http.MethodGet), + withPath("snippets"), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return ps, resp, nil + return res, resp, nil } // GetSnippet gets a single snippet // // GitLab API docs: // https://docs.gitlab.com/api/snippets/#get-a-single-snippet -func (s *SnippetsService) GetSnippet(snippet int, options ...RequestOptionFunc) (*Snippet, *Response, error) { - u := fmt.Sprintf("snippets/%d", snippet) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ps := new(Snippet) - resp, err := s.client.Do(req, ps) +func (s *SnippetsService) GetSnippet(snippet int64, options ...RequestOptionFunc) (*Snippet, *Response, error) { + res, resp, err := do[*Snippet](s.client, + withMethod(http.MethodGet), + withPath("snippets/%d", snippet), + withAPIOpts(nil), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return ps, resp, nil + return res, resp, nil } // SnippetContent gets a single snippet’s raw contents. // // GitLab API docs: // https://docs.gitlab.com/api/snippets/#single-snippet-contents -func (s *SnippetsService) SnippetContent(snippet int, options ...RequestOptionFunc) ([]byte, *Response, error) { - u := fmt.Sprintf("snippets/%d/raw", snippet) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) +func (s *SnippetsService) SnippetContent(snippet int64, options ...RequestOptionFunc) ([]byte, *Response, error) { + buf, resp, err := do[bytes.Buffer](s.client, + withPath("snippets/%d/raw", snippet), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return b.Bytes(), resp, err + return buf.Bytes(), resp, nil } // SnippetFileContent returns the raw file content as plain text. // // GitLab API docs: // https://docs.gitlab.com/api/snippets/#snippet-repository-file-content -func (s *SnippetsService) SnippetFileContent(snippet int, ref, filename string, options ...RequestOptionFunc) ([]byte, *Response, error) { - filepath := PathEscape(filename) - u := fmt.Sprintf("snippets/%d/files/%s/%s/raw", snippet, ref, filepath) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) +func (s *SnippetsService) SnippetFileContent(snippet int64, ref, filename string, options ...RequestOptionFunc) ([]byte, *Response, error) { + buf, resp, err := do[bytes.Buffer](s.client, + withPath("snippets/%d/files/%s/%s/raw", snippet, ref, filename), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return b.Bytes(), resp, err + return buf.Bytes(), resp, nil } // CreateSnippetFileOptions represents the create snippet file options. @@ -197,18 +197,16 @@ type CreateSnippetOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/snippets/#create-new-snippet func (s *SnippetsService) CreateSnippet(opt *CreateSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "snippets", opt, options) - if err != nil { - return nil, nil, err - } - - ps := new(Snippet) - resp, err := s.client.Do(req, ps) + res, resp, err := do[*Snippet](s.client, + withMethod(http.MethodPost), + withPath("snippets"), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return ps, resp, nil + return res, resp, nil } // UpdateSnippetFileOptions represents the update snippet file options. @@ -240,21 +238,17 @@ type UpdateSnippetOptions struct { // // GitLab API docs: // https://docs.gitlab.com/api/snippets/#update-snippet -func (s *SnippetsService) UpdateSnippet(snippet int, opt *UpdateSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) { - u := fmt.Sprintf("snippets/%d", snippet) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ps := new(Snippet) - resp, err := s.client.Do(req, ps) +func (s *SnippetsService) UpdateSnippet(snippet int64, opt *UpdateSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) { + res, resp, err := do[*Snippet](s.client, + withMethod(http.MethodPut), + withPath("snippets/%d", snippet), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return ps, resp, nil + return res, resp, nil } // DeleteSnippet deletes an existing snippet. This is an idempotent @@ -263,40 +257,39 @@ func (s *SnippetsService) UpdateSnippet(snippet int, opt *UpdateSnippetOptions, // // GitLab API docs: // https://docs.gitlab.com/api/snippets/#delete-snippet -func (s *SnippetsService) DeleteSnippet(snippet int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("snippets/%d", snippet) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *SnippetsService) DeleteSnippet(snippet int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("snippets/%d", snippet), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } // ExploreSnippetsOptions represents the available ExploreSnippets() options. // // GitLab API docs: // https://docs.gitlab.com/api/snippets/#list-all-public-snippets -type ExploreSnippetsOptions ListOptions +type ExploreSnippetsOptions struct { + ListOptions +} // ExploreSnippets gets the list of public snippets. // // GitLab API docs: // https://docs.gitlab.com/api/snippets/#list-all-public-snippets func (s *SnippetsService) ExploreSnippets(opt *ExploreSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "snippets/public", opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Snippet - resp, err := s.client.Do(req, &ps) + res, resp, err := do[[]*Snippet](s.client, + withMethod(http.MethodGet), + withPath("snippets/public"), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return ps, resp, nil + return res, resp, nil } // ListAllSnippetsOptions represents the available ListAllSnippets() options. @@ -315,16 +308,14 @@ type ListAllSnippetsOptions struct { // GitLab API docs: // https://docs.gitlab.com/api/snippets/#list-all-snippets func (s *SnippetsService) ListAllSnippets(opt *ListAllSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "snippets/all", opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Snippet - resp, err := s.client.Do(req, &ps) + res, resp, err := do[[]*Snippet](s.client, + withMethod(http.MethodGet), + withPath("snippets/all"), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return ps, resp, nil + return res, resp, nil } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/strings.go b/vendor/gitlab.com/gitlab-org/api/client-go/strings.go index def37b2552..d8aaa7f52c 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/strings.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/strings.go @@ -23,7 +23,7 @@ import ( ) // Stringify attempts to create a reasonable string representation of types in -// the Gitlab library. It does things like resolve pointers to their values +// the GitLab library. It does things like resolve pointers to their values // and omits struct fields with nil values. func Stringify(message any) string { var buf bytes.Buffer diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/system_hooks.go b/vendor/gitlab.com/gitlab-org/api/client-go/system_hooks.go index 3f778ec0f3..d337ddf919 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/system_hooks.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/system_hooks.go @@ -17,18 +17,39 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( SystemHooksServiceInterface interface { + // ListHooks gets a list of system hooks. + // + // GitLab API docs: + // https://docs.gitlab.com/api/system_hooks/#list-system-hooks ListHooks(options ...RequestOptionFunc) ([]*Hook, *Response, error) - GetHook(hook int, options ...RequestOptionFunc) (*Hook, *Response, error) + // GetHook gets a single system hook. + // + // GitLab API docs: + // https://docs.gitlab.com/api/system_hooks/#get-system-hook + GetHook(hook int64, options ...RequestOptionFunc) (*Hook, *Response, error) + // AddHook adds a new system hook. + // + // GitLab API docs: + // https://docs.gitlab.com/api/system_hooks/#add-new-system-hook AddHook(opt *AddHookOptions, options ...RequestOptionFunc) (*Hook, *Response, error) - TestHook(hook int, options ...RequestOptionFunc) (*HookEvent, *Response, error) - DeleteHook(hook int, options ...RequestOptionFunc) (*Response, error) + // TestHook tests a system hook. + // + // GitLab API docs: + // https://docs.gitlab.com/api/system_hooks/#test-system-hook + TestHook(hook int64, options ...RequestOptionFunc) (*HookEvent, *Response, error) + // DeleteHook deletes a system hook. This is an idempotent API function and + // returns 200 OK even if the hook is not available. If the hook is deleted it + // is also returned as JSON. + // + // GitLab API docs: + // https://docs.gitlab.com/api/system_hooks/#delete-system-hook + DeleteHook(hook int64, options ...RequestOptionFunc) (*Response, error) } // SystemHooksService handles communication with the system hooks related @@ -46,7 +67,7 @@ var _ SystemHooksServiceInterface = (*SystemHooksService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/system_hooks/ type Hook struct { - ID int `json:"id"` + ID int64 `json:"id"` URL string `json:"url"` CreatedAt *time.Time `json:"created_at"` PushEvents bool `json:"push_events"` @@ -60,44 +81,18 @@ func (h Hook) String() string { return Stringify(h) } -// ListHooks gets a list of system hooks. -// -// GitLab API docs: -// https://docs.gitlab.com/api/system_hooks/#list-system-hooks func (s *SystemHooksService) ListHooks(options ...RequestOptionFunc) ([]*Hook, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "hooks", nil, options) - if err != nil { - return nil, nil, err - } - - var h []*Hook - resp, err := s.client.Do(req, &h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil + return do[[]*Hook](s.client, + withPath("hooks"), + withRequestOpts(options...), + ) } -// GetHook get a single system hook. -// -// GitLab API docs: -// https://docs.gitlab.com/api/system_hooks/#get-system-hook -func (s *SystemHooksService) GetHook(hook int, options ...RequestOptionFunc) (*Hook, *Response, error) { - u := fmt.Sprintf("hooks/%d", hook) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var h *Hook - resp, err := s.client.Do(req, &h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil +func (s *SystemHooksService) GetHook(hook int64, options ...RequestOptionFunc) (*Hook, *Response, error) { + return do[*Hook](s.client, + withPath("hooks/%d", hook), + withRequestOpts(options...), + ) } // AddHookOptions represents the available AddHook() options. @@ -114,23 +109,13 @@ type AddHookOptions struct { EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` } -// AddHook adds a new system hook hook. -// -// GitLab API docs: -// https://docs.gitlab.com/api/system_hooks/#add-new-system-hook func (s *SystemHooksService) AddHook(opt *AddHookOptions, options ...RequestOptionFunc) (*Hook, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "hooks", opt, options) - if err != nil { - return nil, nil, err - } - - h := new(Hook) - resp, err := s.client.Do(req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil + return do[*Hook](s.client, + withMethod(http.MethodPost), + withPath("hooks"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // HookEvent represents an event trigger by a GitLab system hook. @@ -140,7 +125,7 @@ type HookEvent struct { EventName string `json:"event_name"` Name string `json:"name"` Path string `json:"path"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` OwnerName string `json:"owner_name"` OwnerEmail string `json:"owner_email"` } @@ -149,40 +134,18 @@ func (h HookEvent) String() string { return Stringify(h) } -// TestHook tests a system hook. -// -// GitLab API docs: -// https://docs.gitlab.com/api/system_hooks/#test-system-hook -func (s *SystemHooksService) TestHook(hook int, options ...RequestOptionFunc) (*HookEvent, *Response, error) { - u := fmt.Sprintf("hooks/%d", hook) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - h := new(HookEvent) - resp, err := s.client.Do(req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil +func (s *SystemHooksService) TestHook(hook int64, options ...RequestOptionFunc) (*HookEvent, *Response, error) { + return do[*HookEvent](s.client, + withPath("hooks/%d", hook), + withRequestOpts(options...), + ) } -// DeleteHook deletes a system hook. This is an idempotent API function and -// returns 200 OK even if the hook is not available. If the hook is deleted it -// is also returned as JSON. -// -// GitLab API docs: -// https://docs.gitlab.com/api/system_hooks/#delete-system-hook -func (s *SystemHooksService) DeleteHook(hook int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("hooks/%d", hook) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *SystemHooksService) DeleteHook(hook int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("hooks/%d", hook), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/tags.go b/vendor/gitlab.com/gitlab-org/api/client-go/tags.go index aec5f3d6b3..333dbfe44f 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/tags.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/tags.go @@ -17,19 +17,40 @@ package gitlab import ( - "fmt" "math/big" "net/http" - "net/url" "time" ) type ( TagsServiceInterface interface { + // ListTags gets a list of tags from a project, sorted by name in reverse + // alphabetical order. + // + // GitLab API docs: + // https://docs.gitlab.com/api/tags/#list-project-repository-tags ListTags(pid any, opt *ListTagsOptions, options ...RequestOptionFunc) ([]*Tag, *Response, error) + // GetTag a specific repository tag determined by its name. It returns 200 together + // with the tag information if the tag exists. It returns 404 if the tag does not exist. + // + // GitLab API docs: + // https://docs.gitlab.com/api/tags/#get-a-single-repository-tag GetTag(pid any, tag string, options ...RequestOptionFunc) (*Tag, *Response, error) + // GetTagSignature a specific repository tag determined by its name. It returns 200 together + // with the signature if the tag exists. It returns 404 if the tag does not exist. + // + // GitLab API docs: + // https://docs.gitlab.com/api/tags/#get-x509-signature-of-a-tag GetTagSignature(pid any, tag string, options ...RequestOptionFunc) (*X509Signature, *Response, error) + // CreateTag creates a new tag in the repository that points to the supplied ref. + // + // GitLab API docs: + // https://docs.gitlab.com/api/tags/#create-a-new-tag CreateTag(pid any, opt *CreateTagOptions, options ...RequestOptionFunc) (*Tag, *Response, error) + // DeleteTag deletes a tag of a repository with given name. + // + // GitLab API docs: + // https://docs.gitlab.com/api/tags/#delete-a-tag DeleteTag(pid any, tag string, options ...RequestOptionFunc) (*Response, error) } @@ -67,7 +88,7 @@ type X509Signature struct { } type X509Certificate struct { - ID int `json:"id"` + ID int64 `json:"id"` Subject string `json:"subject"` SubjectKeyIdentifier string `json:"subject_key_identifier"` Email string `json:"email"` @@ -77,10 +98,10 @@ type X509Certificate struct { } type X509Issuer struct { - ID int `json:"id"` + ID int64 `json:"id"` Subject string `json:"subject"` SubjectKeyIdentifier string `json:"subject_key_identifier"` - CrlUrl string `json:"crl_url"` + CrlURL string `json:"crl_url"` } // ReleaseNote represents a GitLab version release. @@ -106,82 +127,26 @@ type ListTagsOptions struct { Sort *string `url:"sort,omitempty" json:"sort,omitempty"` } -// ListTags gets a list of tags from a project, sorted by name in reverse -// alphabetical order. -// -// GitLab API docs: -// https://docs.gitlab.com/api/tags/#list-project-repository-tags func (s *TagsService) ListTags(pid any, opt *ListTagsOptions, options ...RequestOptionFunc) ([]*Tag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/tags", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var t []*Tag - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil + return do[[]*Tag](s.client, + withPath("projects/%s/repository/tags", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetTag a specific repository tag determined by its name. It returns 200 together -// with the tag information if the tag exists. It returns 404 if the tag does not exist. -// -// GitLab API docs: -// https://docs.gitlab.com/api/tags/#get-a-single-repository-tag func (s *TagsService) GetTag(pid any, tag string, options ...RequestOptionFunc) (*Tag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/tags/%s", PathEscape(project), url.PathEscape(tag)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var t *Tag - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil + return do[*Tag](s.client, + withPath("projects/%s/repository/tags/%s", ProjectID{pid}, tag), + withRequestOpts(options...), + ) } -// GetTagSignature a specific repository tag determined by its name. It returns 200 together -// with the signature if the tag exists. It returns 404 if the tag does not exist. -// -// GitLab API docs: -// https://docs.gitlab.com/api/tags/#get-x509-signature-of-a-tag func (s *TagsService) GetTagSignature(pid any, tag string, options ...RequestOptionFunc) (*X509Signature, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/tags/%s/signature", PathEscape(project), url.PathEscape(tag)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var sig *X509Signature - resp, err := s.client.Do(req, &sig) - if err != nil { - return nil, resp, err - } - - return sig, resp, nil + return do[*X509Signature](s.client, + withPath("projects/%s/repository/tags/%s/signature", ProjectID{pid}, tag), + withRequestOpts(options...), + ) } // CreateTagOptions represents the available CreateTag() options. @@ -194,46 +159,20 @@ type CreateTagOptions struct { Message *string `url:"message,omitempty" json:"message,omitempty"` } -// CreateTag creates a new tag in the repository that points to the supplied ref. -// -// GitLab API docs: -// https://docs.gitlab.com/api/tags/#create-a-new-tag func (s *TagsService) CreateTag(pid any, opt *CreateTagOptions, options ...RequestOptionFunc) (*Tag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/tags", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(Tag) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil + return do[*Tag](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/repository/tags", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteTag deletes a tag of a repository with given name. -// -// GitLab API docs: -// https://docs.gitlab.com/api/tags/#delete-a-tag func (s *TagsService) DeleteTag(pid any, tag string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/repository/tags/%s", PathEscape(project), url.PathEscape(tag)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/repository/tags/%s", ProjectID{pid}, tag), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/terraform_states.go b/vendor/gitlab.com/gitlab-org/api/client-go/terraform_states.go index 1de1ba124e..34b30d46bf 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/terraform_states.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/terraform_states.go @@ -30,6 +30,8 @@ type ( var _ TerraformStatesServiceInterface = (*TerraformStatesService)(nil) +// TerraformState represents a Terraform state. +// // GitLab API docs: https://docs.gitlab.com/api/graphql/reference/#terraformstate type TerraformState struct { Name string `json:"name"` @@ -40,6 +42,8 @@ type TerraformState struct { LockedAt time.Time `json:"lockedAt"` } +// TerraformStateVersion represents a Terraform state version. +// // GitLab API docs: https://docs.gitlab.com/api/graphql/reference/#terraformstateversion type TerraformStateVersion struct { Serial uint64 `json:"serial"` @@ -137,115 +141,71 @@ func (s *TerraformStatesService) Get(projectFullPath string, name string, option } func (s *TerraformStatesService) DownloadLatest(pid any, name string, options ...RequestOptionFunc) (io.Reader, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - uri := fmt.Sprintf("projects/%s/terraform/state/%s", PathEscape(project), PathEscape(name)) - - req, err := s.client.NewRequest(http.MethodGet, uri, nil, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) + buf, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/terraform/state/%s", ProjectID{pid}, name), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return &b, resp, nil + return &buf, resp, nil } func (s *TerraformStatesService) Download(pid any, name string, serial uint64, options ...RequestOptionFunc) (io.Reader, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - uri := fmt.Sprintf("projects/%s/terraform/state/%s/versions/%d", PathEscape(project), PathEscape(name), serial) - - req, err := s.client.NewRequest(http.MethodGet, uri, nil, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) + buf, resp, err := do[bytes.Buffer](s.client, + withPath("projects/%s/terraform/state/%s/versions/%d", ProjectID{pid}, name, serial), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return &b, resp, nil + return &buf, resp, nil } // Delete deletes a single Terraform state // // GitLab API docs: https://docs.gitlab.com/user/infrastructure/iac/terraform_state/ func (s *TerraformStatesService) Delete(pid any, name string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - uri := fmt.Sprintf("projects/%s/terraform/state/%s", PathEscape(project), PathEscape(name)) - - req, err := s.client.NewRequest(http.MethodDelete, uri, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/terraform/state/%s", ProjectID{pid}, name), + withRequestOpts(options...), + ) + return resp, err } // DeleteVersion deletes a single Terraform state version // // GitLab API docs: https://docs.gitlab.com/user/infrastructure/iac/terraform_state/ func (s *TerraformStatesService) DeleteVersion(pid any, name string, serial uint64, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - uri := fmt.Sprintf("projects/%s/terraform/state/%s/versions/%d", PathEscape(project), PathEscape(name), serial) - - req, err := s.client.NewRequest(http.MethodDelete, uri, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/terraform/state/%s/versions/%d", ProjectID{pid}, name, serial), + withRequestOpts(options...), + ) + return resp, err } // Lock locks a single Terraform state // // GitLab API docs: https://docs.gitlab.com/user/infrastructure/iac/terraform_state/ func (s *TerraformStatesService) Lock(pid any, name string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - uri := fmt.Sprintf("projects/%s/terraform/state/%s/lock", PathEscape(project), PathEscape(name)) - - req, err := s.client.NewRequest(http.MethodPost, uri, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/terraform/state/%s/lock", ProjectID{pid}, name), + withRequestOpts(options...), + ) + return resp, err } // Unlock unlocks a single Terraform state // // GitLab API docs: https://docs.gitlab.com/user/infrastructure/iac/terraform_state/ func (s *TerraformStatesService) Unlock(pid any, name string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - uri := fmt.Sprintf("projects/%s/terraform/state/%s/lock", PathEscape(project), PathEscape(name)) - - req, err := s.client.NewRequest(http.MethodDelete, uri, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/terraform/state/%s/lock", ProjectID{pid}, name), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/time_stats.go b/vendor/gitlab.com/gitlab-org/api/client-go/time_stats.go index 580acf1a67..035a88d190 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/time_stats.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/time_stats.go @@ -17,7 +17,6 @@ package gitlab import ( - "fmt" "net/http" ) @@ -35,8 +34,8 @@ type timeStatsService struct { type TimeStats struct { HumanTimeEstimate string `json:"human_time_estimate"` HumanTotalTimeSpent string `json:"human_total_time_spent"` - TimeEstimate int `json:"time_estimate"` - TotalTimeSpent int `json:"total_time_spent"` + TimeEstimate int64 `json:"time_estimate"` + TotalTimeSpent int64 `json:"total_time_spent"` } func (t TimeStats) String() string { @@ -54,49 +53,24 @@ type SetTimeEstimateOptions struct { // setTimeEstimate sets the time estimate for a single project issue. // // GitLab docs: https://docs.gitlab.com/api/issues/#set-a-time-estimate-for-an-issue -func (s *timeStatsService) setTimeEstimate(pid any, entity string, issue int, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/time_estimate", PathEscape(project), entity, issue) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(TimeStats) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil +func (s *timeStatsService) setTimeEstimate(pid any, entity string, issue int64, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return do[*TimeStats](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/%s/%d/time_estimate", ProjectID{pid}, entity, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // resetTimeEstimate resets the time estimate for a single project issue. // // GitLab docs: https://docs.gitlab.com/api/issues/#reset-the-time-estimate-for-an-issue -func (s *timeStatsService) resetTimeEstimate(pid any, entity string, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/reset_time_estimate", PathEscape(project), entity, issue) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(TimeStats) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil +func (s *timeStatsService) resetTimeEstimate(pid any, entity string, issue int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return do[*TimeStats](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/%s/%d/reset_time_estimate", ProjectID{pid}, entity, issue), + withRequestOpts(options...), + ) } // AddSpentTimeOptions represents the available AddSpentTime() options. @@ -110,71 +84,32 @@ type AddSpentTimeOptions struct { // addSpentTime adds spent time for a single project issue. // // GitLab docs: https://docs.gitlab.com/api/issues/#add-spent-time-for-an-issue -func (s *timeStatsService) addSpentTime(pid any, entity string, issue int, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/add_spent_time", PathEscape(project), entity, issue) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(TimeStats) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil +func (s *timeStatsService) addSpentTime(pid any, entity string, issue int64, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return do[*TimeStats](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/%s/%d/add_spent_time", ProjectID{pid}, entity, issue), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // resetSpentTime resets the spent time for a single project issue. // // GitLab docs: https://docs.gitlab.com/api/issues/#reset-spent-time-for-an-issue -func (s *timeStatsService) resetSpentTime(pid any, entity string, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/reset_spent_time", PathEscape(project), entity, issue) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(TimeStats) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil +func (s *timeStatsService) resetSpentTime(pid any, entity string, issue int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return do[*TimeStats](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/%s/%d/reset_spent_time", ProjectID{pid}, entity, issue), + withRequestOpts(options...), + ) } // getTimeSpent gets the spent time for a single project issue. // // GitLab docs: https://docs.gitlab.com/api/issues/#get-time-tracking-stats -func (s *timeStatsService) getTimeSpent(pid any, entity string, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/time_stats", PathEscape(project), entity, issue) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(TimeStats) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil +func (s *timeStatsService) getTimeSpent(pid any, entity string, issue int64, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return do[*TimeStats](s.client, + withPath("projects/%s/%s/%d/time_stats", ProjectID{pid}, entity, issue), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/todos.go b/vendor/gitlab.com/gitlab-org/api/client-go/todos.go index 7711e72509..ab47c78c85 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/todos.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/todos.go @@ -17,20 +17,30 @@ package gitlab import ( - "fmt" "net/http" "time" ) type ( TodosServiceInterface interface { + // ListTodos lists all todos created by authenticated user. + // When no filter is applied, it returns all pending todos for the current user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/todos/#get-a-list-of-to-do-items ListTodos(opt *ListTodosOptions, options ...RequestOptionFunc) ([]*Todo, *Response, error) - MarkTodoAsDone(id int, options ...RequestOptionFunc) (*Response, error) + // MarkTodoAsDone marks a single pending todo given by its ID for the current user as done. + // + // GitLab API docs: https://docs.gitlab.com/api/todos/#mark-a-to-do-item-as-done + MarkTodoAsDone(id int64, options ...RequestOptionFunc) (*Response, error) + // MarkAllTodosAsDone marks all pending todos for the current user as done. + // + // GitLab API docs: https://docs.gitlab.com/api/todos/#mark-all-to-do-items-as-done MarkAllTodosAsDone(options ...RequestOptionFunc) (*Response, error) } // TodosService handles communication with the todos related methods of - // the Gitlab API. + // the GitLab API. // // GitLab API docs: https://docs.gitlab.com/api/todos/ TodosService struct { @@ -44,7 +54,7 @@ var _ TodosServiceInterface = (*TodosService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/todos/ type Todo struct { - ID int `json:"id"` + ID int64 `json:"id"` Project *BasicProject `json:"project"` Author *BasicUser `json:"author"` ActionName TodoAction `json:"action_name"` @@ -67,19 +77,19 @@ type TodoTarget struct { Author *BasicUser `json:"author"` CreatedAt *time.Time `json:"created_at"` Description string `json:"description"` - Downvotes int `json:"downvotes"` + Downvotes int64 `json:"downvotes"` ID any `json:"id"` - IID int `json:"iid"` + IID int64 `json:"iid"` Labels []string `json:"labels"` Milestone *Milestone `json:"milestone"` - ProjectID int `json:"project_id"` + ProjectID int64 `json:"project_id"` State string `json:"state"` Subscribed bool `json:"subscribed"` TaskCompletionStatus *TasksCompletionStatus `json:"task_completion_status"` Title string `json:"title"` UpdatedAt *time.Time `json:"updated_at"` - Upvotes int `json:"upvotes"` - UserNotesCount int `json:"user_notes_count"` + Upvotes int64 `json:"upvotes"` + UserNotesCount int64 `json:"user_notes_count"` WebURL string `json:"web_url"` // Only available for type Issue @@ -87,13 +97,13 @@ type TodoTarget struct { DueDate string `json:"due_date"` HasTasks bool `json:"has_tasks"` Links *IssueLinks `json:"_links"` - MovedToID int `json:"moved_to_id"` + MovedToID int64 `json:"moved_to_id"` TimeStats *TimeStats `json:"time_stats"` - Weight int `json:"weight"` + Weight int64 `json:"weight"` // Only available for type MergeRequest MergedAt *time.Time `json:"merged_at"` - ApprovalsBeforeMerge int `json:"approvals_before_merge"` + ApprovalsBeforeMerge int64 `json:"approvals_before_merge"` ForceRemoveSourceBranch bool `json:"force_remove_source_branch"` MergeCommitSHA string `json:"merge_commit_sha"` MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"` @@ -103,10 +113,10 @@ type TodoTarget struct { SHA string `json:"sha"` ShouldRemoveSourceBranch bool `json:"should_remove_source_branch"` SourceBranch string `json:"source_branch"` - SourceProjectID int `json:"source_project_id"` + SourceProjectID int64 `json:"source_project_id"` Squash bool `json:"squash"` TargetBranch string `json:"target_branch"` - TargetProjectID int `json:"target_project_id"` + TargetProjectID int64 `json:"target_project_id"` WorkInProgress bool `json:"work_in_progress"` // Only available for type DesignManagement::Design @@ -120,55 +130,35 @@ type TodoTarget struct { type ListTodosOptions struct { ListOptions Action *TodoAction `url:"action,omitempty" json:"action,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - ProjectID *int `url:"project_id,omitempty" json:"project_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + AuthorID *int64 `url:"author_id,omitempty" json:"author_id,omitempty"` + ProjectID *int64 `url:"project_id,omitempty" json:"project_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` State *string `url:"state,omitempty" json:"state,omitempty"` Type *string `url:"type,omitempty" json:"type,omitempty"` } -// ListTodos lists all todos created by authenticated user. -// When no filter is applied, it returns all pending todos for the current user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/todos/#get-a-list-of-to-do-items func (s *TodosService) ListTodos(opt *ListTodosOptions, options ...RequestOptionFunc) ([]*Todo, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "todos", opt, options) - if err != nil { - return nil, nil, err - } - - var t []*Todo - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil + return do[[]*Todo](s.client, + withPath("todos"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// MarkTodoAsDone marks a single pending todo given by its ID for the current user as done. -// -// GitLab API docs: https://docs.gitlab.com/api/todos/#mark-a-to-do-item-as-done -func (s *TodosService) MarkTodoAsDone(id int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("todos/%d/mark_as_done", id) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *TodosService) MarkTodoAsDone(id int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("todos/%d/mark_as_done", id), + withRequestOpts(options...), + ) + return resp, err } -// MarkAllTodosAsDone marks all pending todos for the current user as done. -// -// GitLab API docs: https://docs.gitlab.com/api/todos/#mark-all-to-do-items-as-done func (s *TodosService) MarkAllTodosAsDone(options ...RequestOptionFunc) (*Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "todos/mark_as_done", nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("todos/mark_as_done"), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/topics.go b/vendor/gitlab.com/gitlab-org/api/client-go/topics.go index abd4ff453f..e2cca41eca 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/topics.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/topics.go @@ -18,20 +18,39 @@ package gitlab import ( "encoding/json" - "fmt" "io" "net/http" - - retryablehttp "github.com/hashicorp/go-retryablehttp" ) type ( TopicsServiceInterface interface { + // ListTopics returns a list of project topics in the GitLab instance ordered + // by number of associated projects. + // + // GitLab API docs: https://docs.gitlab.com/api/topics/#list-topics ListTopics(opt *ListTopicsOptions, options ...RequestOptionFunc) ([]*Topic, *Response, error) - GetTopic(topic int, options ...RequestOptionFunc) (*Topic, *Response, error) + // GetTopic gets a project topic by ID. + // + // GitLab API docs: https://docs.gitlab.com/api/topics/#get-a-topic + GetTopic(topic int64, options ...RequestOptionFunc) (*Topic, *Response, error) + // CreateTopic creates a new project topic. + // + // GitLab API docs: + // https://docs.gitlab.com/api/topics/#create-a-project-topic CreateTopic(opt *CreateTopicOptions, options ...RequestOptionFunc) (*Topic, *Response, error) - UpdateTopic(topic int, opt *UpdateTopicOptions, options ...RequestOptionFunc) (*Topic, *Response, error) - DeleteTopic(topic int, options ...RequestOptionFunc) (*Response, error) + // UpdateTopic updates a project topic. Only available to administrators. + // + // To remove a topic avatar set the TopicAvatar.Filename to an empty string + // and set TopicAvatar.Image to nil. + // + // GitLab API docs: + // https://docs.gitlab.com/api/topics/#update-a-project-topic + UpdateTopic(topic int64, opt *UpdateTopicOptions, options ...RequestOptionFunc) (*Topic, *Response, error) + // DeleteTopic deletes a project topic. Only available to administrators. + // + // GitLab API docs: + // https://docs.gitlab.com/api/topics/#delete-a-project-topic + DeleteTopic(topic int64, options ...RequestOptionFunc) (*Response, error) } // TopicsService handles communication with the topics related methods @@ -49,7 +68,7 @@ var _ TopicsServiceInterface = (*TopicsService)(nil) // // GitLab API docs: https://docs.gitlab.com/api/topics/ type Topic struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Title string `json:"title"` Description string `json:"description"` @@ -69,43 +88,30 @@ type ListTopicsOptions struct { Search *string `url:"search,omitempty" json:"search,omitempty"` } -// ListTopics returns a list of project topics in the GitLab instance ordered -// by number of associated projects. -// -// GitLab API docs: https://docs.gitlab.com/api/topics/#list-topics func (s *TopicsService) ListTopics(opt *ListTopicsOptions, options ...RequestOptionFunc) ([]*Topic, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "topics", opt, options) - if err != nil { - return nil, nil, err - } - - var t []*Topic - resp, err := s.client.Do(req, &t) + res, resp, err := do[[]*Topic](s.client, + withMethod(http.MethodGet), + withPath("topics"), + withAPIOpts(opt), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return t, resp, nil + return res, resp, nil } -// GetTopic gets a project topic by ID. -// -// GitLab API docs: https://docs.gitlab.com/api/topics/#get-a-topic -func (s *TopicsService) GetTopic(topic int, options ...RequestOptionFunc) (*Topic, *Response, error) { - u := fmt.Sprintf("topics/%d", topic) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(Topic) - resp, err := s.client.Do(req, t) +func (s *TopicsService) GetTopic(topic int64, options ...RequestOptionFunc) (*Topic, *Response, error) { + res, resp, err := do[*Topic](s.client, + withMethod(http.MethodGet), + withPath("topics/%d", topic), + withAPIOpts(nil), + withRequestOpts(options...), + ) if err != nil { return nil, resp, err } - - return t, resp, nil + return res, resp, nil } // CreateTopicOptions represents the available CreateTopic() options. @@ -134,38 +140,17 @@ func (a *TopicAvatar) MarshalJSON() ([]byte, error) { return json.Marshal((*alias)(a)) } -// CreateTopic creates a new project topic. -// -// GitLab API docs: -// https://docs.gitlab.com/api/topics/#create-a-project-topic func (s *TopicsService) CreateTopic(opt *CreateTopicOptions, options ...RequestOptionFunc) (*Topic, *Response, error) { - var err error - var req *retryablehttp.Request - - if opt.Avatar == nil { - req, err = s.client.NewRequest(http.MethodPost, "topics", opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPost, - "topics", - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) - } - if err != nil { - return nil, nil, err + reqOpts := []doOption{ + withMethod(http.MethodPost), + withPath("topics"), + withAPIOpts(opt), + withRequestOpts(options...), } - - t := new(Topic) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err + if opt.Avatar != nil { + reqOpts = append(reqOpts, withUpload(opt.Avatar.Image, opt.Avatar.Filename, UploadAvatar)) } - - return t, resp, nil + return do[*Topic](s.client, reqOpts...) } // UpdateTopicOptions represents the available UpdateTopic() options. @@ -179,56 +164,25 @@ type UpdateTopicOptions struct { Avatar *TopicAvatar `url:"-" json:"avatar,omitempty"` } -// UpdateTopic updates a project topic. Only available to administrators. -// -// To remove a topic avatar set the TopicAvatar.Filename to an empty string -// and set TopicAvatar.Image to nil. -// -// GitLab API docs: -// https://docs.gitlab.com/api/topics/#update-a-project-topic -func (s *TopicsService) UpdateTopic(topic int, opt *UpdateTopicOptions, options ...RequestOptionFunc) (*Topic, *Response, error) { - u := fmt.Sprintf("topics/%d", topic) - - var err error - var req *retryablehttp.Request - - if opt.Avatar == nil || (opt.Avatar.Filename == "" && opt.Avatar.Image == nil) { - req, err = s.client.NewRequest(http.MethodPut, u, opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPut, - u, - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) +func (s *TopicsService) UpdateTopic(topic int64, opt *UpdateTopicOptions, options ...RequestOptionFunc) (*Topic, *Response, error) { + reqOpts := []doOption{ + withMethod(http.MethodPut), + withPath("topics/%d", topic), + withAPIOpts(opt), + withRequestOpts(options...), } - if err != nil { - return nil, nil, err + if opt.Avatar != nil && (opt.Avatar.Filename != "" || opt.Avatar.Image != nil) { + reqOpts = append(reqOpts, withUpload(opt.Avatar.Image, opt.Avatar.Filename, UploadAvatar)) } - - t := new(Topic) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil + return do[*Topic](s.client, reqOpts...) } -// DeleteTopic deletes a project topic. Only available to administrators. -// -// GitLab API docs: -// https://docs.gitlab.com/api/topics/#delete-a-project-topic -func (s *TopicsService) DeleteTopic(topic int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("topics/%d", topic) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *TopicsService) DeleteTopic(topic int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("topics/%d", topic), + withAPIOpts(nil), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/types.go b/vendor/gitlab.com/gitlab-org/api/client-go/types.go index 10c853006c..457bfbb138 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/types.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/types.go @@ -86,6 +86,23 @@ const ( AccessTokenStateInactive AccessTokenState = "inactive" ) +// AccessTokenSort represents the available sorting options for access tokens. +// +// GitLab API docs: +// https://docs.gitlab.com/api/group_access_tokens/#list-all-group-access-tokens +type AccessTokenSort string + +const ( + CreatedAsc AccessTokenSort = "created_asc" + CreatedDesc AccessTokenSort = "created_desc" + ExpiresAsc AccessTokenSort = "expires_asc" + ExpiresDesc AccessTokenSort = "expires_desc" + LastUsedAsc AccessTokenSort = "last_used_asc" + LastUsedDesc AccessTokenSort = "last_used_desc" + NameAsc AccessTokenSort = "name_asc" + NameDesc AccessTokenSort = "name_desc" +) + // UserIDValue represents a user ID value within GitLab. type UserIDValue string @@ -103,7 +120,7 @@ type ApproverIDsValue struct { // ApproverIDs is a helper routine that creates a new ApproverIDsValue. func ApproverIDs(v any) *ApproverIDsValue { switch v.(type) { - case UserIDValue, []int: + case UserIDValue, []int64: return &ApproverIDsValue{value: v} default: panic("Unsupported value passed as approver ID") @@ -115,11 +132,11 @@ func (a *ApproverIDsValue) EncodeValues(key string, v *url.Values) error { switch value := a.value.(type) { case UserIDValue: v.Set(key, string(value)) - case []int: + case []int64: v.Del(key) v.Del(key + "[]") for _, id := range value { - v.Add(key+"[]", strconv.Itoa(id)) + v.Add(key+"[]", strconv.FormatInt(id, 10)) } } return nil @@ -143,7 +160,7 @@ type AssigneeIDValue struct { // AssigneeID is a helper routine that creates a new AssigneeIDValue. func AssigneeID(v any) *AssigneeIDValue { switch v.(type) { - case UserIDValue, int: + case UserIDValue, int, int64: return &AssigneeIDValue{value: v} default: panic("Unsupported value passed as assignee ID") @@ -157,6 +174,8 @@ func (a *AssigneeIDValue) EncodeValues(key string, v *url.Values) error { v.Set(key, string(value)) case int: v.Set(key, strconv.Itoa(value)) + case int64: + v.Set(key, strconv.FormatInt(value, 10)) } return nil } @@ -179,7 +198,7 @@ type ReviewerIDValue struct { // ReviewerID is a helper routine that creates a new ReviewerIDValue. func ReviewerID(v any) *ReviewerIDValue { switch v.(type) { - case UserIDValue, int: + case UserIDValue, int, int64: return &ReviewerIDValue{value: v} default: panic("Unsupported value passed as reviewer ID") @@ -193,6 +212,8 @@ func (a *ReviewerIDValue) EncodeValues(key string, v *url.Values) error { v.Set(key, string(value)) case int: v.Set(key, strconv.Itoa(value)) + case int64: + v.Set(key, strconv.FormatInt(value, 10)) } return nil } @@ -299,7 +320,7 @@ const ( ProtectionRuleAccessLevelAdmin ProtectionRuleAccessLevel = "admin" ) -// DeploymentApprovalStatus represents a Gitlab deployment approval status. +// DeploymentApprovalStatus represents a GitLab deployment approval status. type DeploymentApprovalStatus string // These constants represent all valid deployment approval statuses. @@ -308,7 +329,7 @@ const ( DeploymentApprovalStatusRejected DeploymentApprovalStatus = "rejected" ) -// DeploymentStatusValue represents a Gitlab deployment status. +// DeploymentStatusValue represents a GitLab deployment status. type DeploymentStatusValue string // These constants represent all valid deployment statuses. @@ -477,10 +498,18 @@ func (t *ISOTime) UnmarshalJSON(data []byte) error { return nil } - isotime, err := time.Parse(`"`+iso8601+`"`, string(data)) - *t = ISOTime(isotime) + // Try parsing as datetime first (ISO 8601 with time) + isotime, err := time.Parse(`"`+time.RFC3339+`"`, string(data)) + if err != nil { + // If that fails, try parsing as date-only + isotime, err = time.Parse(`"`+iso8601+`"`, string(data)) + if err != nil { + return err + } + } - return err + *t = ISOTime(isotime) + return nil } // EncodeValues implements the query.Encoder interface. @@ -718,9 +747,10 @@ type ResourceGroupProcessMode string // GitLab API docs: // https://docs.gitlab.com/ci/resource_groups/#process-modes const ( - Unordered ResourceGroupProcessMode = "unordered" - OldestFirst ResourceGroupProcessMode = "oldest_first" - NewestFirst ResourceGroupProcessMode = "newest_first" + Unordered ResourceGroupProcessMode = "unordered" + OldestFirst ResourceGroupProcessMode = "oldest_first" + NewestFirst ResourceGroupProcessMode = "newest_first" + NewestReadyFirst ResourceGroupProcessMode = "newest_ready_first" ) // SharedRunnersSettingValue determines whether shared runners are enabled for a @@ -774,8 +804,8 @@ const ( // TasksCompletionStatus represents tasks of the issue/merge request. type TasksCompletionStatus struct { - Count int `json:"count"` - CompletedCount int `json:"completed_count"` + Count int64 `json:"count"` + CompletedCount int64 `json:"completed_count"` } // TodoAction represents the available actions that can be performed on a todo. @@ -864,16 +894,10 @@ type BoolValue bool // https://github.com/gitlabhq/terraform-provider-gitlab/issues/348 func (t *BoolValue) UnmarshalJSON(b []byte) error { switch string(b) { - case `"1"`: + case `"1"`, `"true"`: *t = true return nil - case `"0"`: - *t = false - return nil - case `"true"`: - *t = true - return nil - case `"false"`: + case `"0"`, `"false"`: *t = false return nil default: @@ -899,3 +923,21 @@ const ( CiPipelineVariablesMaintainerRole CIPipelineVariablesMinimumOverrideRoleValue = "maintainer" CIPipelineVariablesDeveloperRole CIPipelineVariablesMinimumOverrideRoleValue = "developer" ) + +// EnabledGitAccessProtocolValue represents a git access protocol value. +type EnabledGitAccessProtocolValue string + +const ( + EnabledGitAccessProtocolSSH EnabledGitAccessProtocolValue = "ssh" + EnabledGitAccessProtocolHTTP EnabledGitAccessProtocolValue = "http" + EnabledGitAccessProtocolAll EnabledGitAccessProtocolValue = "all" +) + +// DuoAvailabilityValue represents a GitLab Duo availability value. +type DuoAvailabilityValue string + +const ( + DuoAvailabilityDefaultOn DuoAvailabilityValue = "default_on" + DuoAvailabilityDefaultOff DuoAvailabilityValue = "default_off" + DuoAvailabilityNeverOn DuoAvailabilityValue = "never_on" // Displayed as "Always Off" in the UI +) diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/usage_data.go b/vendor/gitlab.com/gitlab-org/api/client-go/usage_data.go index 788522a64e..737f72c6a3 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/usage_data.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/usage_data.go @@ -9,11 +9,35 @@ import ( type ( UsageDataServiceInterface interface { + // GetServicePing gets the current service ping data. + // + // GitLab API docs: + // https://docs.gitlab.com/api/usage_data/#export-service-ping-data GetServicePing(options ...RequestOptionFunc) (*ServicePingData, *Response, error) + // GetMetricDefinitionsAsYAML gets all metric definitions as a single YAML file. + // + // GitLab API docs: + // https://docs.gitlab.com/api/usage_data/#export-metric-definitions-as-a-single-yaml-file GetMetricDefinitionsAsYAML(options ...RequestOptionFunc) (io.Reader, *Response, error) + // GetQueries gets all raw SQL queries used to compute service ping. + // + // GitLab API docs: + // https://docs.gitlab.com/api/usage_data/#export-service-ping-sql-queries GetQueries(options ...RequestOptionFunc) (*ServicePingQueries, *Response, error) - GetNonSQLMetrics(options ...RequestOptionFunc) (*ServicePingNonSqlMetrics, *Response, error) + // GetNonSQLMetrics gets all non-SQL metrics data used in the service ping. + // + // GitLab API docs: + // https://docs.gitlab.com/api/usage_data/#usagedatanonsqlmetrics-api + GetNonSQLMetrics(options ...RequestOptionFunc) (*ServicePingNonSQLMetrics, *Response, error) + // TrackEvent tracks an internal GitLab event. + // + // GitLab API docs: + // https://docs.gitlab.com/api/usage_data/#events-tracking-api TrackEvent(opt *TrackEventOptions, options ...RequestOptionFunc) (*Response, error) + // TrackEvents tracks multiple internal GitLab events. + // + // GitLab API docs: + // https://docs.gitlab.com/api/usage_data/#events-tracking-api TrackEvents(opt *TrackEventsOptions, options ...RequestOptionFunc) (*Response, error) } @@ -30,46 +54,24 @@ type ( type ServicePingData struct { RecordedAt *time.Time `json:"recorded_at"` License map[string]string `json:"license"` - Counts map[string]int `json:"counts"` + Counts map[string]int64 `json:"counts"` } -// GetServicePing gets the current service ping data. -// -// GitLab API docs: -// https://docs.gitlab.com/api/usage_data/#export-service-ping-data func (s *UsageDataService) GetServicePing(options ...RequestOptionFunc) (*ServicePingData, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "usage_data/service_ping", nil, options) - if err != nil { - return nil, nil, err - } - - sp := new(ServicePingData) - resp, err := s.client.Do(req, sp) - if err != nil { - return nil, resp, err - } - - return sp, resp, nil + return do[*ServicePingData](s.client, + withPath("usage_data/service_ping"), + withRequestOpts(options...), + ) } -// GetMetricDefinitionsAsYAML gets all metric definitions as a single YAML file. -// -// GitLab API docs: -// https://docs.gitlab.com/api/usage_data/#export-metric-definitions-as-a-single-yaml-file func (s *UsageDataService) GetMetricDefinitionsAsYAML(options ...RequestOptionFunc) (io.Reader, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "usage_data/metric_definitions", nil, options) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", "text/yaml") - - var buf bytes.Buffer - resp, err := s.client.Do(req, &buf) + buf, resp, err := do[bytes.Buffer](s.client, + withPath("usage_data/metric_definitions"), + withRequestOpts(append([]RequestOptionFunc{WithHeader("Accept", "text/yaml")}, options...)...), + ) if err != nil { return nil, resp, err } - return &buf, resp, nil } @@ -85,13 +87,13 @@ type ServicePingQueries struct { LicenseMD5 string `json:"license_md5"` LicenseSHA256 string `json:"license_sha256"` LicenseID string `json:"license_id"` - HistoricalMaxUsers int `json:"historical_max_users"` + HistoricalMaxUsers int64 `json:"historical_max_users"` Licensee map[string]string `json:"licensee"` - LicenseUserCount int `json:"license_user_count"` + LicenseUserCount int64 `json:"license_user_count"` LicenseStartsAt string `json:"license_starts_at"` LicenseExpiresAt string `json:"license_expires_at"` LicensePlan string `json:"license_plan"` - LicenseAddOns map[string]int `json:"license_add_ons"` + LicenseAddOns map[string]int64 `json:"license_add_ons"` LicenseTrial string `json:"license_trial"` LicenseSubscriptionID string `json:"license_subscription_id"` License map[string]string `json:"license"` @@ -99,89 +101,62 @@ type ServicePingQueries struct { Counts map[string]string `json:"counts"` } -// GetQueries gets all raw SQL queries used to compute service ping. -// -// GitLab API docs: -// https://docs.gitlab.com/api/usage_data/#export-service-ping-sql-queries func (s *UsageDataService) GetQueries(options ...RequestOptionFunc) (*ServicePingQueries, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "usage_data/queries", nil, options) - if err != nil { - return nil, nil, err - } - - sq := new(ServicePingQueries) - resp, err := s.client.Do(req, sq) - if err != nil { - return nil, resp, err - } - - return sq, resp, nil + return do[*ServicePingQueries](s.client, + withPath("usage_data/queries"), + withRequestOpts(options...), + ) } -// ServicePingNonSqlMetrics represents the non-SQL metrics used in service ping. -type ServicePingNonSqlMetrics struct { +// ServicePingNonSQLMetrics represents the non-SQL metrics used in service ping. +type ServicePingNonSQLMetrics struct { RecordedAt string `json:"recorded_at"` UUID string `json:"uuid"` Hostname string `json:"hostname"` Version string `json:"version"` InstallationType string `json:"installation_type"` - ActiveUserCount int `json:"active_user_count"` + ActiveUserCount int64 `json:"active_user_count"` Edition string `json:"edition"` LicenseMD5 string `json:"license_md5"` LicenseSHA256 string `json:"license_sha256"` LicenseID string `json:"license_id"` - HistoricalMaxUsers int `json:"historical_max_users"` + HistoricalMaxUsers int64 `json:"historical_max_users"` Licensee map[string]string `json:"licensee"` - LicenseUserCount int `json:"license_user_count"` + LicenseUserCount int64 `json:"license_user_count"` LicenseStartsAt string `json:"license_starts_at"` LicenseExpiresAt string `json:"license_expires_at"` LicensePlan string `json:"license_plan"` - LicenseAddOns map[string]int `json:"license_add_ons"` + LicenseAddOns map[string]int64 `json:"license_add_ons"` LicenseTrial string `json:"license_trial"` LicenseSubscriptionID string `json:"license_subscription_id"` License map[string]string `json:"license"` Settings map[string]string `json:"settings"` } -// GetNonSQLMetrics gets all non-SQL metrics data used in the service ping. -// -// GitLab API docs: -// https://docs.gitlab.com/api/usage_data/#usagedatanonsqlmetrics-api -func (s *UsageDataService) GetNonSQLMetrics(options ...RequestOptionFunc) (*ServicePingNonSqlMetrics, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "usage_data/non_sql_metrics", nil, options) - if err != nil { - return nil, nil, err - } - - nsm := new(ServicePingNonSqlMetrics) - resp, err := s.client.Do(req, nsm) - if err != nil { - return nil, resp, err - } - - return nsm, resp, nil +func (s *UsageDataService) GetNonSQLMetrics(options ...RequestOptionFunc) (*ServicePingNonSQLMetrics, *Response, error) { + return do[*ServicePingNonSQLMetrics](s.client, + withPath("usage_data/non_sql_metrics"), + withRequestOpts(options...), + ) } // TrackEventOptions represents the available options for tracking events. type TrackEventOptions struct { Event string `json:"event" url:"event"` SendToSnowplow *bool `json:"send_to_snowplow,omitempty" url:"send_to_snowplow,omitempty"` - NamespaceID *int `json:"namespace_id,omitempty" url:"namespace_id,omitempty"` - ProjectID *int `json:"project_id,omitempty" url:"project_id,omitempty"` + NamespaceID *int64 `json:"namespace_id,omitempty" url:"namespace_id,omitempty"` + ProjectID *int64 `json:"project_id,omitempty" url:"project_id,omitempty"` AdditionalProperties map[string]string `json:"additional_properties,omitempty" url:"additional_properties,omitempty"` } -// TrackEvent tracks an internal GitLab event. -// -// GitLab API docs: -// https://docs.gitlab.com/api/usage_data/#events-tracking-api func (s *UsageDataService) TrackEvent(opt *TrackEventOptions, options ...RequestOptionFunc) (*Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "usage_data/track_event", opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("usage_data/track_event"), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } // TrackEventsOptions represents the available options for tracking multiple events. @@ -189,15 +164,12 @@ type TrackEventsOptions struct { Events []TrackEventOptions `json:"events" url:"events"` } -// TrackEvents tracks multiple internal GitLab events. -// -// GitLab API docs: -// https://docs.gitlab.com/api/usage_data/#events-tracking-api func (s *UsageDataService) TrackEvents(opt *TrackEventsOptions, options ...RequestOptionFunc) (*Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "usage_data/track_events", opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("usage_data/track_events"), + withAPIOpts(opt), + withRequestOpts(options...), + ) + return resp, err } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/users.go b/vendor/gitlab.com/gitlab-org/api/client-go/users.go index 93c72216f2..b97cd4eb6f 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/users.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/users.go @@ -23,69 +23,284 @@ import ( "io" "net" "net/http" - "strings" + "text/template" "time" - - "github.com/hashicorp/go-retryablehttp" ) type ( UsersServiceInterface interface { + // ListUsers gets a list of users. + // + // GitLab API docs: https://docs.gitlab.com/api/users/#list-users ListUsers(opt *ListUsersOptions, options ...RequestOptionFunc) ([]*User, *Response, error) - GetUser(user int, opt GetUsersOptions, options ...RequestOptionFunc) (*User, *Response, error) + // GetUser gets a single user. + // + // GitLab API docs: https://docs.gitlab.com/api/users/#get-a-single-user + GetUser(user int64, opt GetUsersOptions, options ...RequestOptionFunc) (*User, *Response, error) + // CreateUser creates a new user. Note only administrators can create new users. + // + // GitLab API docs: https://docs.gitlab.com/api/users/#create-a-user CreateUser(opt *CreateUserOptions, options ...RequestOptionFunc) (*User, *Response, error) - ModifyUser(user int, opt *ModifyUserOptions, options ...RequestOptionFunc) (*User, *Response, error) - DeleteUser(user int, options ...RequestOptionFunc) (*Response, error) + // ModifyUser modifies an existing user. Only administrators can change attributes + // of a user. + // + // GitLab API docs: https://docs.gitlab.com/api/users/#modify-a-user + ModifyUser(user int64, opt *ModifyUserOptions, options ...RequestOptionFunc) (*User, *Response, error) + // DeleteUser deletes a user. Available only for administrators. This is an + // idempotent function, calling this function for a non-existent user id still + // returns a status code 200 OK. The JSON response differs if the user was + // actually deleted or not. In the former the user is returned and in the + // latter not. + // + // GitLab API docs: https://docs.gitlab.com/api/users/#delete-a-user + DeleteUser(user int64, options ...RequestOptionFunc) (*Response, error) + // CurrentUser gets currently authenticated user. + // + // GitLab API docs: https://docs.gitlab.com/api/users/#get-the-current-user CurrentUser(options ...RequestOptionFunc) (*User, *Response, error) + // CurrentUserStatus retrieves the user status + // + // GitLab API docs: + // https://docs.gitlab.com/api/users/#get-your-user-status CurrentUserStatus(options ...RequestOptionFunc) (*UserStatus, *Response, error) + // GetUserStatus retrieves a user's status. + // + // uid can be either a user ID (int) or a username (string); will trim one "@" character off the username, if present. + // Other types will cause an error to be returned. + // + // GitLab API docs: + // https://docs.gitlab.com/api/users/#get-the-status-of-a-user GetUserStatus(uid any, options ...RequestOptionFunc) (*UserStatus, *Response, error) + // SetUserStatus sets the user's status + // + // GitLab API docs: + // https://docs.gitlab.com/api/users/#set-your-user-status SetUserStatus(opt *UserStatusOptions, options ...RequestOptionFunc) (*UserStatus, *Response, error) - GetUserAssociationsCount(user int, options ...RequestOptionFunc) (*UserAssociationsCount, *Response, error) + // GetUserAssociationsCount gets a list of a specified user's associations. + // + // GitLab API docs: + // https://docs.gitlab.com/api/users/#get-a-count-of-a-users-projects-groups-issues-and-merge-requests + GetUserAssociationsCount(user int64, options ...RequestOptionFunc) (*UserAssociationsCount, *Response, error) + // ListSSHKeys gets a list of currently authenticated user's SSH keys. + // + // GitLab API docs: https://docs.gitlab.com/api/user_keys/#list-all-ssh-keys ListSSHKeys(opt *ListSSHKeysOptions, options ...RequestOptionFunc) ([]*SSHKey, *Response, error) + // ListSSHKeysForUser gets a list of a specified user's SSH keys. + // + // uid can be either a user ID (int) or a username (string). If a username + // is provided with a leading "@" (e.g., "@johndoe"), it will be trimmed. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_keys/#list-all-ssh-keys-for-a-user ListSSHKeysForUser(uid any, opt *ListSSHKeysForUserOptions, options ...RequestOptionFunc) ([]*SSHKey, *Response, error) - GetSSHKey(key int, options ...RequestOptionFunc) (*SSHKey, *Response, error) - GetSSHKeyForUser(user int, key int, options ...RequestOptionFunc) (*SSHKey, *Response, error) + // GetSSHKey gets a single key. + // + // GitLab API docs: https://docs.gitlab.com/api/user_keys/#get-an-ssh-key + GetSSHKey(key int64, options ...RequestOptionFunc) (*SSHKey, *Response, error) + // GetSSHKeyForUser gets a single key for a given user. + // + // GitLab API docs: https://docs.gitlab.com/api/user_keys/#get-an-ssh-key-for-a-user + GetSSHKeyForUser(user int64, key int64, options ...RequestOptionFunc) (*SSHKey, *Response, error) + // AddSSHKey creates a new key owned by the currently authenticated user. + // + // GitLab API docs: https://docs.gitlab.com/api/user_keys/#add-an-ssh-key AddSSHKey(opt *AddSSHKeyOptions, options ...RequestOptionFunc) (*SSHKey, *Response, error) - AddSSHKeyForUser(user int, opt *AddSSHKeyOptions, options ...RequestOptionFunc) (*SSHKey, *Response, error) - DeleteSSHKey(key int, options ...RequestOptionFunc) (*Response, error) - DeleteSSHKeyForUser(user, key int, options ...RequestOptionFunc) (*Response, error) + // AddSSHKeyForUser creates new key owned by specified user. Available only for + // admin. + // + // GitLab API docs: https://docs.gitlab.com/api/user_keys/#add-an-ssh-key-for-a-user + AddSSHKeyForUser(user int64, opt *AddSSHKeyOptions, options ...RequestOptionFunc) (*SSHKey, *Response, error) + // DeleteSSHKey deletes key owned by currently authenticated user. This is an + // idempotent function and calling it on a key that is already deleted or not + // available results in 200 OK. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_keys/#delete-an-ssh-key + DeleteSSHKey(key int64, options ...RequestOptionFunc) (*Response, error) + // DeleteSSHKeyForUser deletes key owned by a specified user. Available only + // for admin. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_keys/#delete-an-ssh-key-for-a-user + DeleteSSHKeyForUser(user, key int64, options ...RequestOptionFunc) (*Response, error) + // ListGPGKeys gets a list of currently authenticated user’s GPG keys. + // + // GitLab API docs: https://docs.gitlab.com/api/user_keys/#list-all-gpg-keys ListGPGKeys(options ...RequestOptionFunc) ([]*GPGKey, *Response, error) - GetGPGKey(key int, options ...RequestOptionFunc) (*GPGKey, *Response, error) + // GetGPGKey gets a specific GPG key of currently authenticated user. + // + // GitLab API docs: https://docs.gitlab.com/api/user_keys/#get-a-gpg-key + GetGPGKey(key int64, options ...RequestOptionFunc) (*GPGKey, *Response, error) + // AddGPGKey creates a new GPG key owned by the currently authenticated user. + // + // GitLab API docs: https://docs.gitlab.com/api/user_keys/#add-a-gpg-key AddGPGKey(opt *AddGPGKeyOptions, options ...RequestOptionFunc) (*GPGKey, *Response, error) - DeleteGPGKey(key int, options ...RequestOptionFunc) (*Response, error) - ListGPGKeysForUser(user int, options ...RequestOptionFunc) ([]*GPGKey, *Response, error) - GetGPGKeyForUser(user, key int, options ...RequestOptionFunc) (*GPGKey, *Response, error) - AddGPGKeyForUser(user int, opt *AddGPGKeyOptions, options ...RequestOptionFunc) (*GPGKey, *Response, error) - DeleteGPGKeyForUser(user, key int, options ...RequestOptionFunc) (*Response, error) + // DeleteGPGKey deletes a GPG key owned by currently authenticated user. + // + // GitLab API docs: https://docs.gitlab.com/api/user_keys/#delete-a-gpg-key + DeleteGPGKey(key int64, options ...RequestOptionFunc) (*Response, error) + // ListGPGKeysForUser gets a list of a specified user’s GPG keys. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_keys/#list-all-gpg-keys-for-a-user + ListGPGKeysForUser(user int64, options ...RequestOptionFunc) ([]*GPGKey, *Response, error) + // GetGPGKeyForUser gets a specific GPG key for a given user. + // + // GitLab API docs: https://docs.gitlab.com/api/user_keys/#get-a-gpg-key-for-a-user + GetGPGKeyForUser(user, key int64, options ...RequestOptionFunc) (*GPGKey, *Response, error) + // AddGPGKeyForUser creates new GPG key owned by the specified user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_keys/#add-a-gpg-key-for-a-user + AddGPGKeyForUser(user int64, opt *AddGPGKeyOptions, options ...RequestOptionFunc) (*GPGKey, *Response, error) + // DeleteGPGKeyForUser deletes a GPG key owned by a specified user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_keys/#delete-a-gpg-key-for-a-user + DeleteGPGKeyForUser(user, key int64, options ...RequestOptionFunc) (*Response, error) + // ListEmails gets a list of currently authenticated user's Emails. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_email_addresses/#list-all-email-addresses ListEmails(options ...RequestOptionFunc) ([]*Email, *Response, error) - ListEmailsForUser(user int, opt *ListEmailsForUserOptions, options ...RequestOptionFunc) ([]*Email, *Response, error) - GetEmail(email int, options ...RequestOptionFunc) (*Email, *Response, error) + // ListEmailsForUser gets a list of a specified user's Emails. Available + // only for admin + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_email_addresses/#list-all-email-addresses-for-a-user + ListEmailsForUser(user int64, opt *ListEmailsForUserOptions, options ...RequestOptionFunc) ([]*Email, *Response, error) + // GetEmail gets a single email. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_email_addresses/#get-details-on-an-email-address + GetEmail(email int64, options ...RequestOptionFunc) (*Email, *Response, error) + // AddEmail creates a new email owned by the currently authenticated user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_email_addresses/#add-an-email-address AddEmail(opt *AddEmailOptions, options ...RequestOptionFunc) (*Email, *Response, error) - AddEmailForUser(user int, opt *AddEmailOptions, options ...RequestOptionFunc) (*Email, *Response, error) - DeleteEmail(email int, options ...RequestOptionFunc) (*Response, error) - DeleteEmailForUser(user, email int, options ...RequestOptionFunc) (*Response, error) - BlockUser(user int, options ...RequestOptionFunc) error - UnblockUser(user int, options ...RequestOptionFunc) error - BanUser(user int, options ...RequestOptionFunc) error - UnbanUser(user int, options ...RequestOptionFunc) error - DeactivateUser(user int, options ...RequestOptionFunc) error - ActivateUser(user int, options ...RequestOptionFunc) error - ApproveUser(user int, options ...RequestOptionFunc) error - RejectUser(user int, options ...RequestOptionFunc) error - GetAllImpersonationTokens(user int, opt *GetAllImpersonationTokensOptions, options ...RequestOptionFunc) ([]*ImpersonationToken, *Response, error) - GetImpersonationToken(user, token int, options ...RequestOptionFunc) (*ImpersonationToken, *Response, error) - CreateImpersonationToken(user int, opt *CreateImpersonationTokenOptions, options ...RequestOptionFunc) (*ImpersonationToken, *Response, error) - RevokeImpersonationToken(user, token int, options ...RequestOptionFunc) (*Response, error) - CreatePersonalAccessToken(user int, opt *CreatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) + // AddEmailForUser creates new email owned by specified user. Available only for + // admin. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_email_addresses/#add-an-email-address-for-a-user + AddEmailForUser(user int64, opt *AddEmailOptions, options ...RequestOptionFunc) (*Email, *Response, error) + // DeleteEmail deletes email owned by currently authenticated user. This is an + // idempotent function and calling it on a key that is already deleted or not + // available results in 200 OK. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_email_addresses/#delete-an-email-address + DeleteEmail(email int64, options ...RequestOptionFunc) (*Response, error) + // DeleteEmailForUser deletes email owned by a specified user. Available only + // for admin. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_email_addresses/#delete-an-email-address-for-a-user + DeleteEmailForUser(user, email int64, options ...RequestOptionFunc) (*Response, error) + // BlockUser blocks the specified user. Available only for admin. + // + // GitLab API docs: https://docs.gitlab.com/api/user_moderation/#block-access-to-a-user + BlockUser(user int64, options ...RequestOptionFunc) error + // UnblockUser unblocks the specified user. Available only for admin. + // + // GitLab API docs: https://docs.gitlab.com/api/user_moderation/#unblock-access-to-a-user + UnblockUser(user int64, options ...RequestOptionFunc) error + // BanUser bans the specified user. Available only for admin. + // + // GitLab API docs: https://docs.gitlab.com/api/user_moderation/#ban-a-user + BanUser(user int64, options ...RequestOptionFunc) error + // UnbanUser unbans the specified user. Available only for admin. + // + // GitLab API docs: https://docs.gitlab.com/api/user_moderation/#unban-a-user + UnbanUser(user int64, options ...RequestOptionFunc) error + // DeactivateUser deactivate the specified user. Available only for admin. + // + // GitLab API docs: https://docs.gitlab.com/api/user_moderation/#deactivate-a-user + DeactivateUser(user int64, options ...RequestOptionFunc) error + // ActivateUser activate the specified user. Available only for admin. + // + // GitLab API docs: https://docs.gitlab.com/api/user_moderation/#reactivate-a-user + ActivateUser(user int64, options ...RequestOptionFunc) error + // ApproveUser approve the specified user. Available only for admin. + // + // GitLab API docs: https://docs.gitlab.com/api/user_moderation/#approve-access-to-a-user + ApproveUser(user int64, options ...RequestOptionFunc) error + // RejectUser reject the specified user. Available only for admin. + // + // GitLab API docs: https://docs.gitlab.com/api/user_moderation/#reject-access-to-a-user + RejectUser(user int64, options ...RequestOptionFunc) error + // GetAllImpersonationTokens retrieves all impersonation tokens of a user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_tokens/#list-all-impersonation-tokens-for-a-user + GetAllImpersonationTokens(user int64, opt *GetAllImpersonationTokensOptions, options ...RequestOptionFunc) ([]*ImpersonationToken, *Response, error) + // GetImpersonationToken retrieves an impersonation token of a user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_tokens/#get-an-impersonation-token-for-a-user + GetImpersonationToken(user, token int64, options ...RequestOptionFunc) (*ImpersonationToken, *Response, error) + // CreateImpersonationToken creates an impersonation token. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_tokens/#create-an-impersonation-token + CreateImpersonationToken(user int64, opt *CreateImpersonationTokenOptions, options ...RequestOptionFunc) (*ImpersonationToken, *Response, error) + // RevokeImpersonationToken revokes an impersonation token. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_tokens/#revoke-an-impersonation-token + RevokeImpersonationToken(user, token int64, options ...RequestOptionFunc) (*Response, error) + // CreatePersonalAccessToken creates a personal access token. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_tokens/#create-a-personal-access-token-for-a-user + CreatePersonalAccessToken(user int64, opt *CreatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) + // CreatePersonalAccessTokenForCurrentUser creates a personal access token with limited scopes for the currently authenticated user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_tokens/#create-a-personal-access-token CreatePersonalAccessTokenForCurrentUser(opt *CreatePersonalAccessTokenForCurrentUserOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) + // GetUserActivities retrieves user activities (admin only) + // + // GitLab API docs: + // https://docs.gitlab.com/api/users/#list-a-users-activity GetUserActivities(opt *GetUserActivitiesOptions, options ...RequestOptionFunc) ([]*UserActivity, *Response, error) - GetUserMemberships(user int, opt *GetUserMembershipOptions, options ...RequestOptionFunc) ([]*UserMembership, *Response, error) - DisableTwoFactor(user int, options ...RequestOptionFunc) error + // GetUserMemberships retrieves a list of the user's memberships. + // + // GitLab API docs: + // https://docs.gitlab.com/api/users/#list-projects-and-groups-that-a-user-is-a-member-of + GetUserMemberships(user int64, opt *GetUserMembershipOptions, options ...RequestOptionFunc) ([]*UserMembership, *Response, error) + // DisableTwoFactor disables two factor authentication for the specified user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/users/#disable-two-factor-authentication-for-a-user + DisableTwoFactor(user int64, options ...RequestOptionFunc) error + // CreateUserRunner creates a runner linked to the current user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/users/#create-a-runner-linked-to-a-user CreateUserRunner(opts *CreateUserRunnerOptions, options ...RequestOptionFunc) (*UserRunner, *Response, error) + // CreateServiceAccountUser creates a new service account user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_service_accounts/#create-a-service-account-user CreateServiceAccountUser(opts *CreateServiceAccountUserOptions, options ...RequestOptionFunc) (*User, *Response, error) + // ListServiceAccounts lists all service accounts. + // + // GitLab API docs: + // https://docs.gitlab.com/api/user_service_accounts/#list-all-service-account-users ListServiceAccounts(opt *ListServiceAccountsOptions, options ...RequestOptionFunc) ([]*ServiceAccount, *Response, error) + // UploadAvatar uploads an avatar to the current user. + // + // GitLab API docs: + // https://docs.gitlab.com/api/users/#upload-an-avatar-for-yourself UploadAvatar(avatar io.Reader, filename string, options ...RequestOptionFunc) (*User, *Response, error) - DeleteUserIdentity(user int, provider string, options ...RequestOptionFunc) (*Response, error) + // DeleteUserIdentity deletes a user's authentication identity using the provider + // name associated with that identity. Only available for administrators. + // + // GitLab API docs: + // https://docs.gitlab.com/api/users/#delete-authentication-identity-from-a-user + DeleteUserIdentity(user int64, provider string, options ...RequestOptionFunc) (*Response, error) // events.go ListUserContributionEvents(uid any, opt *ListContributionEventsOptions, options ...RequestOptionFunc) ([]*ContributionEvent, *Response, error) @@ -120,11 +335,24 @@ var ( // BasicUser included in other service responses (such as merge requests, pipelines, etc). type BasicUser struct { - ID int `json:"id"` - Username string `json:"username"` - Name string `json:"name"` - State string `json:"state"` - Locked bool `json:"locked"` + ID int64 `json:"id"` + Username string `json:"username"` + Name string `json:"name"` + + // State represents the administrative status of the user account. + // Common values: "active", "blocked", "deactivated", "banned", + // "ldap_blocked", "blocked_pending_approval". + // + // This is independent from the Locked field: State tracks permanent + // administrative actions, while Locked handles temporary login failures. + State string `json:"state"` + + // Locked indicates whether the user account is temporarily locked due to + // excessive failed login attempts. This is separate from administrative + // blocking (the State field). Locks automatically expire after a configured + // time period (default: 10 minutes). + Locked bool `json:"locked"` + CreatedAt *time.Time `json:"created_at"` AvatarURL string `json:"avatar_url"` WebURL string `json:"web_url"` @@ -135,7 +363,7 @@ type BasicUser struct { // GitLab API docs: // https://docs.gitlab.com/api/user_service_accounts/ type ServiceAccount struct { - ID int `json:"id"` + ID int64 `json:"id"` Username string `json:"username"` Name string `json:"name"` } @@ -144,7 +372,7 @@ type ServiceAccount struct { // // GitLab API docs: https://docs.gitlab.com/api/users/ type User struct { - ID int `json:"id"` + ID int64 `json:"id"` Username string `json:"username"` Email string `json:"email"` Name string `json:"name"` @@ -163,16 +391,16 @@ type User struct { JobTitle string `json:"job_title"` ExternUID string `json:"extern_uid"` Provider string `json:"provider"` - ThemeID int `json:"theme_id"` + ThemeID int64 `json:"theme_id"` LastActivityOn *ISOTime `json:"last_activity_on"` - ColorSchemeID int `json:"color_scheme_id"` + ColorSchemeID int64 `json:"color_scheme_id"` IsAdmin bool `json:"is_admin"` IsAuditor bool `json:"is_auditor"` AvatarURL string `json:"avatar_url"` CanCreateGroup bool `json:"can_create_group"` CanCreateProject bool `json:"can_create_project"` CanCreateOrganization bool `json:"can_create_organization"` - ProjectsLimit int `json:"projects_limit"` + ProjectsLimit int64 `json:"projects_limit"` CurrentSignInAt *time.Time `json:"current_sign_in_at"` CurrentSignInIP *net.IP `json:"current_sign_in_ip"` LastSignInAt *time.Time `json:"last_sign_in_at"` @@ -183,11 +411,11 @@ type User struct { Identities []*UserIdentity `json:"identities"` External bool `json:"external"` PrivateProfile bool `json:"private_profile"` - SharedRunnersMinutesLimit int `json:"shared_runners_minutes_limit"` - ExtraSharedRunnersMinutesLimit int `json:"extra_shared_runners_minutes_limit"` + SharedRunnersMinutesLimit int64 `json:"shared_runners_minutes_limit"` + ExtraSharedRunnersMinutesLimit int64 `json:"extra_shared_runners_minutes_limit"` UsingLicenseSeat bool `json:"using_license_seat"` CustomAttributes []*CustomAttribute `json:"custom_attributes"` - NamespaceID int `json:"namespace_id"` + NamespaceID int64 `json:"namespace_id"` Locked bool `json:"locked"` CreatedBy *BasicUser `json:"created_by"` } @@ -246,22 +474,12 @@ type ListUsersOptions struct { WithoutProjectBots *bool `url:"without_project_bots,omitempty" json:"without_project_bots,omitempty"` } -// ListUsers gets a list of users. -// -// GitLab API docs: https://docs.gitlab.com/api/users/#list-users func (s *UsersService) ListUsers(opt *ListUsersOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "users", opt, options) - if err != nil { - return nil, nil, err - } - - var usr []*User - resp, err := s.client.Do(req, &usr) - if err != nil { - return nil, resp, err - } - - return usr, resp, nil + return do[[]*User](s.client, + withPath("users"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetUsersOptions represents the available GetUser() options. @@ -271,24 +489,12 @@ type GetUsersOptions struct { WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` } -// GetUser gets a single user. -// -// GitLab API docs: https://docs.gitlab.com/api/users/#get-a-single-user -func (s *UsersService) GetUser(user int, opt GetUsersOptions, options ...RequestOptionFunc) (*User, *Response, error) { - u := fmt.Sprintf("users/%d", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - usr := new(User) - resp, err := s.client.Do(req, usr) - if err != nil { - return nil, resp, err - } - - return usr, resp, nil +func (s *UsersService) GetUser(user int64, opt GetUsersOptions, options ...RequestOptionFunc) (*User, *Response, error) { + return do[*User](s.client, + withPath("users/%d", user), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateUserOptions represents the available CreateUser() options. @@ -311,150 +517,91 @@ type CreateUserOptions struct { Organization *string `url:"organization,omitempty" json:"organization,omitempty"` Password *string `url:"password,omitempty" json:"password,omitempty"` PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` - ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` + ProjectsLimit *int64 `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` Provider *string `url:"provider,omitempty" json:"provider,omitempty"` ResetPassword *bool `url:"reset_password,omitempty" json:"reset_password,omitempty"` SkipConfirmation *bool `url:"skip_confirmation,omitempty" json:"skip_confirmation,omitempty"` Skype *string `url:"skype,omitempty" json:"skype,omitempty"` - ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` + ThemeID *int64 `url:"theme_id,omitempty" json:"theme_id,omitempty"` Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` Username *string `url:"username,omitempty" json:"username,omitempty"` WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` + ViewDiffsFileByFile *bool `url:"view_diffs_file_by_file,omitempty" json:"view_diffs_file_by_file,omitempty"` + PublicEmail *string `url:"public_email,omitempty" json:"public_email,omitempty"` } -// CreateUser creates a new user. Note only administrators can create new users. -// -// GitLab API docs: https://docs.gitlab.com/api/users/#create-a-user func (s *UsersService) CreateUser(opt *CreateUserOptions, options ...RequestOptionFunc) (*User, *Response, error) { - var err error - var req *retryablehttp.Request - - if opt.Avatar == nil { - req, err = s.client.NewRequest(http.MethodPost, "users", opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPost, - "users", - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) + reqOpts := []doOption{ + withMethod(http.MethodPost), + withPath("users"), + withAPIOpts(opt), + withRequestOpts(options...), } - if err != nil { - return nil, nil, err + if opt.Avatar != nil { + reqOpts = append(reqOpts, withUpload(opt.Avatar.Image, opt.Avatar.Filename, UploadAvatar)) } - - usr := new(User) - resp, err := s.client.Do(req, usr) - if err != nil { - return nil, resp, err - } - - return usr, resp, nil + return do[*User](s.client, reqOpts...) } // ModifyUserOptions represents the available ModifyUser() options. // // GitLab API docs: https://docs.gitlab.com/api/users/#modify-a-user type ModifyUserOptions struct { - Admin *bool `url:"admin,omitempty" json:"admin,omitempty"` - Avatar *UserAvatar `url:"-" json:"avatar,omitempty"` - Bio *string `url:"bio,omitempty" json:"bio,omitempty"` - CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` - CommitEmail *string `url:"commit_email,omitempty" json:"commit_email,omitempty"` - Email *string `url:"email,omitempty" json:"email,omitempty"` - External *bool `url:"external,omitempty" json:"external,omitempty"` - ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` - JobTitle *string `url:"job_title,omitempty" json:"job_title,omitempty"` - Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"` - Location *string `url:"location,omitempty" json:"location,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - Note *string `url:"note,omitempty" json:"note,omitempty"` - Organization *string `url:"organization,omitempty" json:"organization,omitempty"` - Password *string `url:"password,omitempty" json:"password,omitempty"` - PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` - ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` - Provider *string `url:"provider,omitempty" json:"provider,omitempty"` - PublicEmail *string `url:"public_email,omitempty" json:"public_email,omitempty"` - SkipReconfirmation *bool `url:"skip_reconfirmation,omitempty" json:"skip_reconfirmation,omitempty"` - Skype *string `url:"skype,omitempty" json:"skype,omitempty"` - ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` - Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` -} - -// ModifyUser modifies an existing user. Only administrators can change attributes -// of a user. -// -// GitLab API docs: https://docs.gitlab.com/api/users/#modify-a-user -func (s *UsersService) ModifyUser(user int, opt *ModifyUserOptions, options ...RequestOptionFunc) (*User, *Response, error) { - var err error - var req *retryablehttp.Request - u := fmt.Sprintf("users/%d", user) - - if opt.Avatar == nil || (opt.Avatar.Filename == "" && opt.Avatar.Image == nil) { - req, err = s.client.NewRequest(http.MethodPut, u, opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPut, - u, - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) - } - if err != nil { - return nil, nil, err - } - - usr := new(User) - resp, err := s.client.Do(req, usr) - if err != nil { - return nil, resp, err - } - - return usr, resp, nil + Admin *bool `url:"admin,omitempty" json:"admin,omitempty"` + Avatar *UserAvatar `url:"-" json:"avatar,omitempty"` + Bio *string `url:"bio,omitempty" json:"bio,omitempty"` + CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` + CommitEmail *string `url:"commit_email,omitempty" json:"commit_email,omitempty"` + Email *string `url:"email,omitempty" json:"email,omitempty"` + External *bool `url:"external,omitempty" json:"external,omitempty"` + ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` + JobTitle *string `url:"job_title,omitempty" json:"job_title,omitempty"` + Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"` + Location *string `url:"location,omitempty" json:"location,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Note *string `url:"note,omitempty" json:"note,omitempty"` + Organization *string `url:"organization,omitempty" json:"organization,omitempty"` + Password *string `url:"password,omitempty" json:"password,omitempty"` + PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` + ProjectsLimit *int64 `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` + Provider *string `url:"provider,omitempty" json:"provider,omitempty"` + PublicEmail *string `url:"public_email,omitempty" json:"public_email,omitempty"` + SkipReconfirmation *bool `url:"skip_reconfirmation,omitempty" json:"skip_reconfirmation,omitempty"` + Skype *string `url:"skype,omitempty" json:"skype,omitempty"` + ThemeID *int64 `url:"theme_id,omitempty" json:"theme_id,omitempty"` + Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` + ViewDiffsFileByFile *bool `url:"view_diffs_file_by_file,omitempty" json:"view_diffs_file_by_file,omitempty"` } -// DeleteUser deletes a user. Available only for administrators. This is an -// idempotent function, calling this function for a non-existent user id still -// returns a status code 200 OK. The JSON response differs if the user was -// actually deleted or not. In the former the user is returned and in the -// latter not. -// -// GitLab API docs: https://docs.gitlab.com/api/users/#delete-a-user -func (s *UsersService) DeleteUser(user int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("users/%d", user) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err +func (s *UsersService) ModifyUser(user int64, opt *ModifyUserOptions, options ...RequestOptionFunc) (*User, *Response, error) { + reqOpts := []doOption{ + withMethod(http.MethodPut), + withPath("users/%d", user), + withAPIOpts(opt), + withRequestOpts(options...), + } + if opt.Avatar != nil && (opt.Avatar.Filename != "" || opt.Avatar.Image != nil) { + reqOpts = append(reqOpts, withUpload(opt.Avatar.Image, opt.Avatar.Filename, UploadAvatar)) } + return do[*User](s.client, reqOpts...) +} - return s.client.Do(req, nil) +func (s *UsersService) DeleteUser(user int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("users/%d", user), + withRequestOpts(options...), + ) + return resp, err } -// CurrentUser gets currently authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/api/users/#get-the-current-user func (s *UsersService) CurrentUser(options ...RequestOptionFunc) (*User, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "user", nil, options) - if err != nil { - return nil, nil, err - } - - usr := new(User) - resp, err := s.client.Do(req, usr) - if err != nil { - return nil, resp, err - } - - return usr, resp, nil + return do[*User](s.client, + withPath("user"), + withRequestOpts(options...), + ) } // UserStatus represents the current status of a user @@ -469,52 +616,25 @@ type UserStatus struct { ClearStatusAt *time.Time `json:"clear_status_at"` } -// CurrentUserStatus retrieves the user status -// -// GitLab API docs: -// https://docs.gitlab.com/api/users/#get-your-user-status func (s *UsersService) CurrentUserStatus(options ...RequestOptionFunc) (*UserStatus, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "user/status", nil, options) - if err != nil { - return nil, nil, err - } - - status := new(UserStatus) - resp, err := s.client.Do(req, status) - if err != nil { - return nil, resp, err - } - - return status, resp, nil + return do[*UserStatus](s.client, + withPath("user/status"), + withRequestOpts(options...), + ) } // GetUserStatus retrieves a user's status. // -// uid can be either a user ID (int) or a username (string); will trim one "@" character off the username, if present. -// Other types will cause an error to be returned. +// uid can be either a user ID (int) or a username (string). If a username +// is provided with a leading "@" (e.g., "@johndoe"), it will be trimmed. // // GitLab API docs: // https://docs.gitlab.com/api/users/#get-the-status-of-a-user func (s *UsersService) GetUserStatus(uid any, options ...RequestOptionFunc) (*UserStatus, *Response, error) { - user, err := parseID(uid) - if err != nil { - return nil, nil, err - } - - u := fmt.Sprintf("users/%s/status", strings.TrimPrefix(user, "@")) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - status := new(UserStatus) - resp, err := s.client.Do(req, status) - if err != nil { - return nil, resp, err - } - - return status, resp, nil + return do[*UserStatus](s.client, + withPath("users/%s/status", UserID{uid}), + withRequestOpts(options...), + ) } // UserStatusOptions represents the options required to set the status @@ -528,62 +648,42 @@ type UserStatusOptions struct { ClearStatusAfter *ClearStatusAfterValue `url:"clear_status_after,omitempty" json:"clear_status_after,omitempty"` } -// SetUserStatus sets the user's status -// -// GitLab API docs: -// https://docs.gitlab.com/api/users/#set-your-user-status func (s *UsersService) SetUserStatus(opt *UserStatusOptions, options ...RequestOptionFunc) (*UserStatus, *Response, error) { - req, err := s.client.NewRequest(http.MethodPut, "user/status", opt, options) - if err != nil { - return nil, nil, err - } - - status := new(UserStatus) - resp, err := s.client.Do(req, status) - if err != nil { - return nil, resp, err - } - - return status, resp, nil + return do[*UserStatus](s.client, + withMethod(http.MethodPut), + withPath("user/status"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UserAssociationsCount represents the user associations count. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/users/#get-a-count-of-a-users-projects-groups-issues-and-merge-requests type UserAssociationsCount struct { - GroupsCount int `json:"groups_count"` - ProjectsCount int `json:"projects_count"` - IssuesCount int `json:"issues_count"` - MergeRequestsCount int `json:"merge_requests_count"` + GroupsCount int64 `json:"groups_count"` + ProjectsCount int64 `json:"projects_count"` + IssuesCount int64 `json:"issues_count"` + MergeRequestsCount int64 `json:"merge_requests_count"` } // GetUserAssociationsCount gets a list of a specified user associations. // -// Gitlab API docs: +// GitLab API docs: // https://docs.gitlab.com/api/users/#get-a-count-of-a-users-projects-groups-issues-and-merge-requests -func (s *UsersService) GetUserAssociationsCount(user int, options ...RequestOptionFunc) (*UserAssociationsCount, *Response, error) { - u := fmt.Sprintf("users/%d/associations_count", user) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - uac := new(UserAssociationsCount) - resp, err := s.client.Do(req, uac) - if err != nil { - return nil, resp, err - } - - return uac, resp, nil +func (s *UsersService) GetUserAssociationsCount(user int64, options ...RequestOptionFunc) (*UserAssociationsCount, *Response, error) { + return do[*UserAssociationsCount](s.client, + withPath("users/%d/associations_count", user), + withRequestOpts(options...), + ) } // SSHKey represents a SSH key. // // GitLab API docs: https://docs.gitlab.com/api/user_keys/#list-all-ssh-keys type SSHKey struct { - ID int `json:"id"` + ID int64 `json:"id"` Title string `json:"title"` Key string `json:"key"` CreatedAt *time.Time `json:"created_at"` @@ -594,95 +694,53 @@ type SSHKey struct { // ListSSHKeysOptions represents the available ListSSHKeys options. // // GitLab API docs: https://docs.gitlab.com/api/user_keys/#list-all-ssh-keys -type ListSSHKeysOptions ListOptions +type ListSSHKeysOptions struct { + ListOptions +} -// ListSSHKeys gets a list of currently authenticated user's SSH keys. -// -// GitLab API docs: https://docs.gitlab.com/api/user_keys/#list-all-ssh-keys func (s *UsersService) ListSSHKeys(opt *ListSSHKeysOptions, options ...RequestOptionFunc) ([]*SSHKey, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "user/keys", opt, options) - if err != nil { - return nil, nil, err - } - - var k []*SSHKey - resp, err := s.client.Do(req, &k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil + return do[[]*SSHKey](s.client, + withPath("user/keys"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ListSSHKeysForUserOptions represents the available ListSSHKeysForUser() options. // // GitLab API docs: // https://docs.gitlab.com/api/user_keys/#list-all-ssh-keys-for-a-user -type ListSSHKeysForUserOptions ListOptions +type ListSSHKeysForUserOptions struct { + ListOptions +} // ListSSHKeysForUser gets a list of a specified user's SSH keys. // +// uid can be either a user ID (int) or a username (string). If a username +// is provided with a leading "@" (e.g., "@johndoe"), it will be trimmed. +// // GitLab API docs: // https://docs.gitlab.com/api/user_keys/#list-all-ssh-keys-for-a-user func (s *UsersService) ListSSHKeysForUser(uid any, opt *ListSSHKeysForUserOptions, options ...RequestOptionFunc) ([]*SSHKey, *Response, error) { - user, err := parseID(uid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("users/%s/keys", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var k []*SSHKey - resp, err := s.client.Do(req, &k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil + return do[[]*SSHKey](s.client, + withPath("users/%s/keys", UserID{uid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetSSHKey gets a single key. -// -// GitLab API docs: https://docs.gitlab.com/api/user_keys/#get-an-ssh-key -func (s *UsersService) GetSSHKey(key int, options ...RequestOptionFunc) (*SSHKey, *Response, error) { - u := fmt.Sprintf("user/keys/%d", key) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - k := new(SSHKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil +func (s *UsersService) GetSSHKey(key int64, options ...RequestOptionFunc) (*SSHKey, *Response, error) { + return do[*SSHKey](s.client, + withPath("user/keys/%d", key), + withRequestOpts(options...), + ) } -// GetSSHKeyForUser gets a single key for a given user. -// -// GitLab API docs: https://docs.gitlab.com/api/user_keys/#get-an-ssh-key-for-a-user -func (s *UsersService) GetSSHKeyForUser(user int, key int, options ...RequestOptionFunc) (*SSHKey, *Response, error) { - u := fmt.Sprintf("users/%d/keys/%d", user, key) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - k := new(SSHKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil +func (s *UsersService) GetSSHKeyForUser(user int64, key int64, options ...RequestOptionFunc) (*SSHKey, *Response, error) { + return do[*SSHKey](s.client, + withPath("users/%d/keys/%d", user, key), + withRequestOpts(options...), + ) } // AddSSHKeyOptions represents the available AddSSHKey() options. @@ -695,123 +753,63 @@ type AddSSHKeyOptions struct { UsageType *string `url:"usage_type,omitempty" json:"usage_type,omitempty"` } -// AddSSHKey creates a new key owned by the currently authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/api/user_keys/#add-an-ssh-key func (s *UsersService) AddSSHKey(opt *AddSSHKeyOptions, options ...RequestOptionFunc) (*SSHKey, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "user/keys", opt, options) - if err != nil { - return nil, nil, err - } - - k := new(SSHKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil + return do[*SSHKey](s.client, + withMethod(http.MethodPost), + withPath("user/keys"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// AddSSHKeyForUser creates new key owned by specified user. Available only for -// admin. -// -// GitLab API docs: https://docs.gitlab.com/api/user_keys/#add-an-ssh-key-for-a-user -func (s *UsersService) AddSSHKeyForUser(user int, opt *AddSSHKeyOptions, options ...RequestOptionFunc) (*SSHKey, *Response, error) { - u := fmt.Sprintf("users/%d/keys", user) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - k := new(SSHKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil +func (s *UsersService) AddSSHKeyForUser(user int64, opt *AddSSHKeyOptions, options ...RequestOptionFunc) (*SSHKey, *Response, error) { + return do[*SSHKey](s.client, + withMethod(http.MethodPost), + withPath("users/%d/keys", user), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteSSHKey deletes key owned by currently authenticated user. This is an -// idempotent function and calling it on a key that is already deleted or not -// available results in 200 OK. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_keys/#delete-an-ssh-key -func (s *UsersService) DeleteSSHKey(key int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("user/keys/%d", key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *UsersService) DeleteSSHKey(key int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("user/keys/%d", key), + withRequestOpts(options...), + ) + return resp, err } -// DeleteSSHKeyForUser deletes key owned by a specified user. Available only -// for admin. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_keys/#delete-an-ssh-key-for-a-user -func (s *UsersService) DeleteSSHKeyForUser(user, key int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("users/%d/keys/%d", user, key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *UsersService) DeleteSSHKeyForUser(user, key int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("users/%d/keys/%d", user, key), + withRequestOpts(options...), + ) + return resp, err } // GPGKey represents a GPG key. // // GitLab API docs: https://docs.gitlab.com/api/user_keys/#list-all-gpg-keys type GPGKey struct { - ID int `json:"id"` + ID int64 `json:"id"` Key string `json:"key"` CreatedAt *time.Time `json:"created_at"` } -// ListGPGKeys gets a list of currently authenticated user’s GPG keys. -// -// GitLab API docs: https://docs.gitlab.com/api/user_keys/#list-all-gpg-keys func (s *UsersService) ListGPGKeys(options ...RequestOptionFunc) ([]*GPGKey, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "user/gpg_keys", nil, options) - if err != nil { - return nil, nil, err - } - - var ks []*GPGKey - resp, err := s.client.Do(req, &ks) - if err != nil { - return nil, resp, err - } - - return ks, resp, nil + return do[[]*GPGKey](s.client, + withPath("user/gpg_keys"), + withRequestOpts(options...), + ) } -// GetGPGKey gets a specific GPG key of currently authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/api/user_keys/#get-a-gpg-key -func (s *UsersService) GetGPGKey(key int, options ...RequestOptionFunc) (*GPGKey, *Response, error) { - u := fmt.Sprintf("user/gpg_keys/%d", key) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - k := new(GPGKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil +func (s *UsersService) GetGPGKey(key int64, options ...RequestOptionFunc) (*GPGKey, *Response, error) { + return do[*GPGKey](s.client, + withPath("user/gpg_keys/%d", key), + withRequestOpts(options...), + ) } // AddGPGKeyOptions represents the available AddGPGKey() options. @@ -821,113 +819,54 @@ type AddGPGKeyOptions struct { Key *string `url:"key,omitempty" json:"key,omitempty"` } -// AddGPGKey creates a new GPG key owned by the currently authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/api/user_keys/#add-a-gpg-key func (s *UsersService) AddGPGKey(opt *AddGPGKeyOptions, options ...RequestOptionFunc) (*GPGKey, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "user/gpg_keys", opt, options) - if err != nil { - return nil, nil, err - } - - k := new(GPGKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil + return do[*GPGKey](s.client, + withMethod(http.MethodPost), + withPath("user/gpg_keys"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteGPGKey deletes a GPG key owned by currently authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/api/user_keys/#delete-a-gpg-key -func (s *UsersService) DeleteGPGKey(key int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("user/gpg_keys/%d", key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *UsersService) DeleteGPGKey(key int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("user/gpg_keys/%d", key), + withRequestOpts(options...), + ) + return resp, err } -// ListGPGKeysForUser gets a list of a specified user’s GPG keys. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_keys/#list-all-gpg-keys-for-a-user -func (s *UsersService) ListGPGKeysForUser(user int, options ...RequestOptionFunc) ([]*GPGKey, *Response, error) { - u := fmt.Sprintf("users/%d/gpg_keys", user) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var ks []*GPGKey - resp, err := s.client.Do(req, &ks) - if err != nil { - return nil, resp, err - } - - return ks, resp, nil +func (s *UsersService) ListGPGKeysForUser(user int64, options ...RequestOptionFunc) ([]*GPGKey, *Response, error) { + return do[[]*GPGKey](s.client, + withPath("users/%d/gpg_keys", user), + withRequestOpts(options...), + ) } -// GetGPGKeyForUser gets a specific GPG key for a given user. -// -// GitLab API docs: https://docs.gitlab.com/api/user_keys/#get-a-gpg-key-for-a-user -func (s *UsersService) GetGPGKeyForUser(user, key int, options ...RequestOptionFunc) (*GPGKey, *Response, error) { - u := fmt.Sprintf("users/%d/gpg_keys/%d", user, key) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - k := new(GPGKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil +func (s *UsersService) GetGPGKeyForUser(user, key int64, options ...RequestOptionFunc) (*GPGKey, *Response, error) { + return do[*GPGKey](s.client, + withPath("users/%d/gpg_keys/%d", user, key), + withRequestOpts(options...), + ) } -// AddGPGKeyForUser creates new GPG key owned by the specified user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_keys/#add-a-gpg-key-for-a-user -func (s *UsersService) AddGPGKeyForUser(user int, opt *AddGPGKeyOptions, options ...RequestOptionFunc) (*GPGKey, *Response, error) { - u := fmt.Sprintf("users/%d/gpg_keys", user) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - k := new(GPGKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil +func (s *UsersService) AddGPGKeyForUser(user int64, opt *AddGPGKeyOptions, options ...RequestOptionFunc) (*GPGKey, *Response, error) { + return do[*GPGKey](s.client, + withMethod(http.MethodPost), + withPath("users/%d/gpg_keys", user), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteGPGKeyForUser deletes a GPG key owned by a specified user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_keys/#delete-a-gpg-key-for-a-user -func (s *UsersService) DeleteGPGKeyForUser(user, key int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("users/%d/gpg_keys/%d", user, key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *UsersService) DeleteGPGKeyForUser(user, key int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("users/%d/gpg_keys/%d", user, key), + withRequestOpts(options...), + ) + return resp, err } // Email represents an Email. @@ -935,77 +874,39 @@ func (s *UsersService) DeleteGPGKeyForUser(user, key int, options ...RequestOpti // GitLab API docs: // https://docs.gitlab.com/api/user_email_addresses/#list-all-email-addresses type Email struct { - ID int `json:"id"` + ID int64 `json:"id"` Email string `json:"email"` ConfirmedAt *time.Time `json:"confirmed_at"` } -// ListEmails gets a list of currently authenticated user's Emails. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_email_addresses/#list-all-email-addresses func (s *UsersService) ListEmails(options ...RequestOptionFunc) ([]*Email, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "user/emails", nil, options) - if err != nil { - return nil, nil, err - } - - var e []*Email - resp, err := s.client.Do(req, &e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil + return do[[]*Email](s.client, + withPath("user/emails"), + withRequestOpts(options...), + ) } // ListEmailsForUserOptions represents the available ListEmailsForUser() options. // // GitLab API docs: // https://docs.gitlab.com/api/user_email_addresses/#list-all-email-addresses-for-a-user -type ListEmailsForUserOptions ListOptions - -// ListEmailsForUser gets a list of a specified user's Emails. Available -// only for admin -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_email_addresses/#list-all-email-addresses-for-a-user -func (s *UsersService) ListEmailsForUser(user int, opt *ListEmailsForUserOptions, options ...RequestOptionFunc) ([]*Email, *Response, error) { - u := fmt.Sprintf("users/%d/emails", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var e []*Email - resp, err := s.client.Do(req, &e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil +type ListEmailsForUserOptions struct { + ListOptions } -// GetEmail gets a single email. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_email_addresses/#get-details-on-an-email-address -func (s *UsersService) GetEmail(email int, options ...RequestOptionFunc) (*Email, *Response, error) { - u := fmt.Sprintf("user/emails/%d", email) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - e := new(Email) - resp, err := s.client.Do(req, e) - if err != nil { - return nil, resp, err - } +func (s *UsersService) ListEmailsForUser(user int64, opt *ListEmailsForUserOptions, options ...RequestOptionFunc) ([]*Email, *Response, error) { + return do[[]*Email](s.client, + withPath("users/%d/emails", user), + withAPIOpts(opt), + withRequestOpts(options...), + ) +} - return e, resp, nil +func (s *UsersService) GetEmail(email int64, options ...RequestOptionFunc) (*Email, *Response, error) { + return do[*Email](s.client, + withPath("user/emails/%d", email), + withRequestOpts(options...), + ) } // AddEmailOptions represents the available AddEmail() options. @@ -1017,296 +918,197 @@ type AddEmailOptions struct { SkipConfirmation *bool `url:"skip_confirmation,omitempty" json:"skip_confirmation,omitempty"` } -// AddEmail creates a new email owned by the currently authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_email_addresses/#add-an-email-address func (s *UsersService) AddEmail(opt *AddEmailOptions, options ...RequestOptionFunc) (*Email, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "user/emails", opt, options) - if err != nil { - return nil, nil, err - } - - e := new(Email) - resp, err := s.client.Do(req, e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil + return do[*Email](s.client, + withMethod(http.MethodPost), + withPath("user/emails"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// AddEmailForUser creates new email owned by specified user. Available only for -// admin. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_email_addresses/#add-an-email-address-for-a-user -func (s *UsersService) AddEmailForUser(user int, opt *AddEmailOptions, options ...RequestOptionFunc) (*Email, *Response, error) { - u := fmt.Sprintf("users/%d/emails", user) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - e := new(Email) - resp, err := s.client.Do(req, e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil +func (s *UsersService) AddEmailForUser(user int64, opt *AddEmailOptions, options ...RequestOptionFunc) (*Email, *Response, error) { + return do[*Email](s.client, + withMethod(http.MethodPost), + withPath("users/%d/emails", user), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteEmail deletes email owned by currently authenticated user. This is an -// idempotent function and calling it on a key that is already deleted or not -// available results in 200 OK. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_email_addresses/#delete-an-email-address -func (s *UsersService) DeleteEmail(email int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("user/emails/%d", email) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *UsersService) DeleteEmail(email int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("user/emails/%d", email), + withRequestOpts(options...), + ) + return resp, err } -// DeleteEmailForUser deletes email owned by a specified user. Available only -// for admin. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_email_addresses/#delete-an-email-address-for-a-user -func (s *UsersService) DeleteEmailForUser(user, email int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("users/%d/emails/%d", user, email) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *UsersService) DeleteEmailForUser(user, email int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("users/%d/emails/%d", user, email), + withRequestOpts(options...), + ) + return resp, err } -// BlockUser blocks the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/api/user_moderation/#block-access-to-a-user -func (s *UsersService) BlockUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/block", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) - if err != nil && resp == nil { - return err - } - - switch resp.StatusCode { - case 201: - return nil - case 403: - return ErrUserBlockPrevented - case 404: - return ErrUserNotFound - default: - return fmt.Errorf("%w: %d", errUnexpectedResultCode, resp.StatusCode) - } +func (s *UsersService) BlockUser(user int64, options ...RequestOptionFunc) error { + _, _, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("users/%d/block", user), + withRequestOpts(options...), + ) + return err } -// UnblockUser unblocks the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/api/user_moderation/#unblock-access-to-a-user -func (s *UsersService) UnblockUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/unblock", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) +func (s *UsersService) UnblockUser(user int64, options ...RequestOptionFunc) error { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("users/%d/unblock", user), + withRequestOpts(options...), + ) if err != nil && resp == nil { return err } switch resp.StatusCode { - case 201: + case http.StatusCreated: return nil - case 403: + case http.StatusForbidden: return ErrUserUnblockPrevented - case 404: + case http.StatusNotFound: return ErrUserNotFound default: return fmt.Errorf("%w: %d", errUnexpectedResultCode, resp.StatusCode) } } -// BanUser bans the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/api/user_moderation/#ban-a-user -func (s *UsersService) BanUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/ban", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) +func (s *UsersService) BanUser(user int64, options ...RequestOptionFunc) error { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("users/%d/ban", user), + withRequestOpts(options...), + ) if err != nil && resp == nil { return err } switch resp.StatusCode { - case 201: + case http.StatusCreated: return nil - case 404: + case http.StatusNotFound: return ErrUserNotFound default: return fmt.Errorf("%w: %d", errUnexpectedResultCode, resp.StatusCode) } } -// UnbanUser unbans the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/api/user_moderation/#unban-a-user -func (s *UsersService) UnbanUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/unban", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) +func (s *UsersService) UnbanUser(user int64, options ...RequestOptionFunc) error { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("users/%d/unban", user), + withRequestOpts(options...), + ) if err != nil && resp == nil { return err } switch resp.StatusCode { - case 201: + case http.StatusCreated: return nil - case 404: + case http.StatusNotFound: return ErrUserNotFound default: return fmt.Errorf("%w: %d", errUnexpectedResultCode, resp.StatusCode) } } -// DeactivateUser deactivate the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/api/user_moderation/#deactivate-a-user -func (s *UsersService) DeactivateUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/deactivate", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) +func (s *UsersService) DeactivateUser(user int64, options ...RequestOptionFunc) error { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("users/%d/deactivate", user), + withRequestOpts(options...), + ) if err != nil && resp == nil { return err } switch resp.StatusCode { - case 201: + case http.StatusCreated: return nil - case 403: + case http.StatusForbidden: return ErrUserDeactivatePrevented - case 404: + case http.StatusNotFound: return ErrUserNotFound default: return fmt.Errorf("%w: %d", errUnexpectedResultCode, resp.StatusCode) } } -// ActivateUser activate the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/api/user_moderation/#reactivate-a-user -func (s *UsersService) ActivateUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/activate", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) +func (s *UsersService) ActivateUser(user int64, options ...RequestOptionFunc) error { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("users/%d/activate", user), + withRequestOpts(options...), + ) if err != nil && resp == nil { return err } switch resp.StatusCode { - case 201: + case http.StatusCreated: return nil - case 403: + case http.StatusForbidden: return ErrUserActivatePrevented - case 404: + case http.StatusNotFound: return ErrUserNotFound default: return fmt.Errorf("%w: %d", errUnexpectedResultCode, resp.StatusCode) } } -// ApproveUser approve the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/api/user_moderation/#approve-access-to-a-user -func (s *UsersService) ApproveUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/approve", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) +func (s *UsersService) ApproveUser(user int64, options ...RequestOptionFunc) error { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("users/%d/approve", user), + withRequestOpts(options...), + ) if err != nil && resp == nil { return err } switch resp.StatusCode { - case 201: + case http.StatusCreated: return nil - case 403: + case http.StatusForbidden: return ErrUserApprovePrevented - case 404: + case http.StatusNotFound: return ErrUserNotFound default: return fmt.Errorf("%w: %d", errUnexpectedResultCode, resp.StatusCode) } } -// RejectUser reject the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/api/user_moderation/#reject-access-to-a-user -func (s *UsersService) RejectUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/reject", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) +func (s *UsersService) RejectUser(user int64, options ...RequestOptionFunc) error { + _, resp, err := do[none](s.client, + withMethod(http.MethodPost), + withPath("users/%d/reject", user), + withRequestOpts(options...), + ) if err != nil && resp == nil { return err } switch resp.StatusCode { - case 200: + case http.StatusOK: return nil - case 403: + case http.StatusForbidden: return ErrUserRejectPrevented - case 404: + case http.StatusNotFound: return ErrUserNotFound - case 409: + case http.StatusConflict: return ErrUserConflict default: return fmt.Errorf("%w: %d", errUnexpectedResultCode, resp.StatusCode) @@ -1318,7 +1120,7 @@ func (s *UsersService) RejectUser(user int, options ...RequestOptionFunc) error // GitLab API docs: // https://docs.gitlab.com/api/user_tokens/#list-all-impersonation-tokens-for-a-user type ImpersonationToken struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Active bool `json:"active"` Token string `json:"token"` @@ -1339,46 +1141,19 @@ type GetAllImpersonationTokensOptions struct { State *string `url:"state,omitempty" json:"state,omitempty"` } -// GetAllImpersonationTokens retrieves all impersonation tokens of a user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_tokens/#list-all-impersonation-tokens-for-a-user -func (s *UsersService) GetAllImpersonationTokens(user int, opt *GetAllImpersonationTokensOptions, options ...RequestOptionFunc) ([]*ImpersonationToken, *Response, error) { - u := fmt.Sprintf("users/%d/impersonation_tokens", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ts []*ImpersonationToken - resp, err := s.client.Do(req, &ts) - if err != nil { - return nil, resp, err - } - - return ts, resp, nil +func (s *UsersService) GetAllImpersonationTokens(user int64, opt *GetAllImpersonationTokensOptions, options ...RequestOptionFunc) ([]*ImpersonationToken, *Response, error) { + return do[[]*ImpersonationToken](s.client, + withPath("users/%d/impersonation_tokens", user), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// GetImpersonationToken retrieves an impersonation token of a user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_tokens/#get-an-impersonation-token-for-a-user -func (s *UsersService) GetImpersonationToken(user, token int, options ...RequestOptionFunc) (*ImpersonationToken, *Response, error) { - u := fmt.Sprintf("users/%d/impersonation_tokens/%d", user, token) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(ImpersonationToken) - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil +func (s *UsersService) GetImpersonationToken(user, token int64, options ...RequestOptionFunc) (*ImpersonationToken, *Response, error) { + return do[*ImpersonationToken](s.client, + withPath("users/%d/impersonation_tokens/%d", user, token), + withRequestOpts(options...), + ) } // CreateImpersonationTokenOptions represents the available @@ -1392,40 +1167,22 @@ type CreateImpersonationTokenOptions struct { ExpiresAt *time.Time `url:"expires_at,omitempty" json:"expires_at,omitempty"` } -// CreateImpersonationToken creates an impersonation token. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_tokens/#create-an-impersonation-token -func (s *UsersService) CreateImpersonationToken(user int, opt *CreateImpersonationTokenOptions, options ...RequestOptionFunc) (*ImpersonationToken, *Response, error) { - u := fmt.Sprintf("users/%d/impersonation_tokens", user) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(ImpersonationToken) - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil +func (s *UsersService) CreateImpersonationToken(user int64, opt *CreateImpersonationTokenOptions, options ...RequestOptionFunc) (*ImpersonationToken, *Response, error) { + return do[*ImpersonationToken](s.client, + withMethod(http.MethodPost), + withPath("users/%d/impersonation_tokens", user), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// RevokeImpersonationToken revokes an impersonation token. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_tokens/#revoke-an-impersonation-token -func (s *UsersService) RevokeImpersonationToken(user, token int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("users/%d/impersonation_tokens/%d", user, token) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) +func (s *UsersService) RevokeImpersonationToken(user, token int64, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("users/%d/impersonation_tokens/%d", user, token), + withRequestOpts(options...), + ) + return resp, err } // CreatePersonalAccessTokenOptions represents the available @@ -1440,25 +1197,13 @@ type CreatePersonalAccessTokenOptions struct { Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` } -// CreatePersonalAccessToken creates a personal access token. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_tokens/#create-a-personal-access-token-for-a-user -func (s *UsersService) CreatePersonalAccessToken(user int, opt *CreatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - u := fmt.Sprintf("users/%d/personal_access_tokens", user) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(PersonalAccessToken) - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil +func (s *UsersService) CreatePersonalAccessToken(user int64, opt *CreatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + return do[*PersonalAccessToken](s.client, + withMethod(http.MethodPost), + withPath("users/%d/personal_access_tokens", user), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreatePersonalAccessTokenForCurrentUserOptions represents the available @@ -1473,25 +1218,13 @@ type CreatePersonalAccessTokenForCurrentUserOptions struct { ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` } -// CreatePersonalAccessTokenForCurrentUser creates a personal access token with limited scopes for the currently authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_tokens/#create-a-personal-access-token func (s *UsersService) CreatePersonalAccessTokenForCurrentUser(opt *CreatePersonalAccessTokenForCurrentUserOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - u := "user/personal_access_tokens" - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(PersonalAccessToken) - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil + return do[*PersonalAccessToken](s.client, + withMethod(http.MethodPost), + withPath("user/personal_access_tokens"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UserActivity represents an entry in the user/activities response @@ -1512,23 +1245,12 @@ type GetUserActivitiesOptions struct { From *ISOTime `url:"from,omitempty" json:"from,omitempty"` } -// GetUserActivities retrieves user activities (admin only) -// -// GitLab API docs: -// https://docs.gitlab.com/api/users/#list-a-users-activity func (s *UsersService) GetUserActivities(opt *GetUserActivitiesOptions, options ...RequestOptionFunc) ([]*UserActivity, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "user/activities", opt, options) - if err != nil { - return nil, nil, err - } - - var t []*UserActivity - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil + return do[[]*UserActivity](s.client, + withPath("user/activities"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // UserMembership represents a membership of the user in a namespace or project. @@ -1536,7 +1258,7 @@ func (s *UsersService) GetUserActivities(opt *GetUserActivitiesOptions, options // GitLab API docs: // https://docs.gitlab.com/api/users/#list-projects-and-groups-that-a-user-is-a-member-of type UserMembership struct { - SourceID int `json:"source_id"` + SourceID int64 `json:"source_id"` SourceName string `json:"source_name"` SourceType string `json:"source_type"` AccessLevel AccessLevelValue `json:"access_level"` @@ -1551,52 +1273,32 @@ type GetUserMembershipOptions struct { Type *string `url:"type,omitempty" json:"type,omitempty"` } -// GetUserMemberships retrieves a list of the user's memberships. -// -// GitLab API docs: -// https://docs.gitlab.com/api/users/#list-projects-and-groups-that-a-user-is-a-member-of -func (s *UsersService) GetUserMemberships(user int, opt *GetUserMembershipOptions, options ...RequestOptionFunc) ([]*UserMembership, *Response, error) { - u := fmt.Sprintf("users/%d/memberships", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*UserMembership - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil +func (s *UsersService) GetUserMemberships(user int64, opt *GetUserMembershipOptions, options ...RequestOptionFunc) ([]*UserMembership, *Response, error) { + return do[[]*UserMembership](s.client, + withPath("users/%d/memberships", user), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DisableTwoFactor disables two factor authentication for the specified user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/users/#disable-two-factor-authentication-for-a-user -func (s *UsersService) DisableTwoFactor(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/disable_two_factor", user) - - req, err := s.client.NewRequest(http.MethodPatch, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) +func (s *UsersService) DisableTwoFactor(user int64, options ...RequestOptionFunc) error { + _, resp, err := do[none](s.client, + withMethod(http.MethodPatch), + withPath("users/%d/disable_two_factor", user), + withRequestOpts(options...), + ) if err != nil && resp == nil { return err } switch resp.StatusCode { - case 204: + case http.StatusNoContent: return nil - case 400: + case http.StatusBadRequest: return ErrUserTwoFactorNotEnabled - case 403: + case http.StatusForbidden: return ErrUserDisableTwoFactorPrevented - case 404: + case http.StatusNotFound: return ErrUserNotFound default: return fmt.Errorf("%w: %d", errUnexpectedResultCode, resp.StatusCode) @@ -1608,7 +1310,7 @@ func (s *UsersService) DisableTwoFactor(user int, options ...RequestOptionFunc) // GitLab API docs: // https://docs.gitlab.com/api/users/#create-a-runner-linked-to-a-user type UserRunner struct { - ID int `json:"id"` + ID int64 `json:"id"` Token string `json:"token"` TokenExpiresAt *time.Time `json:"token_expires_at"` } @@ -1619,35 +1321,25 @@ type UserRunner struct { // https://docs.gitlab.com/api/users/#create-a-runner-linked-to-a-user type CreateUserRunnerOptions struct { RunnerType *string `url:"runner_type,omitempty" json:"runner_type,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - ProjectID *int `url:"project_id,omitempty" json:"project_id,omitempty"` + GroupID *int64 `url:"group_id,omitempty" json:"group_id,omitempty"` + ProjectID *int64 `url:"project_id,omitempty" json:"project_id,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` Paused *bool `url:"paused,omitempty" json:"paused,omitempty"` Locked *bool `url:"locked,omitempty" json:"locked,omitempty"` RunUntagged *bool `url:"run_untagged,omitempty" json:"run_untagged,omitempty"` TagList *[]string `url:"tag_list,omitempty" json:"tag_list,omitempty"` AccessLevel *string `url:"access_level,omitempty" json:"access_level,omitempty"` - MaximumTimeout *int `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` + MaximumTimeout *int64 `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` MaintenanceNote *string `url:"maintenance_note,omitempty" json:"maintenance_note,omitempty"` } -// CreateUserRunner creates a runner linked to the current user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/users/#create-a-runner-linked-to-a-user func (s *UsersService) CreateUserRunner(opts *CreateUserRunnerOptions, options ...RequestOptionFunc) (*UserRunner, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "user/runners", opts, options) - if err != nil { - return nil, nil, err - } - - r := new(UserRunner) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil + return do[*UserRunner](s.client, + withMethod(http.MethodPost), + withPath("user/runners"), + withAPIOpts(opts), + withRequestOpts(options...), + ) } // CreateServiceAccountUserOptions represents the available CreateServiceAccountUser() options. @@ -1660,85 +1352,77 @@ type CreateServiceAccountUserOptions struct { Email *string `url:"email,omitempty" json:"email,omitempty"` } -// CreateServiceAccountUser creates a new service account user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_service_accounts/#create-a-service-account-user func (s *UsersService) CreateServiceAccountUser(opts *CreateServiceAccountUserOptions, options ...RequestOptionFunc) (*User, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "service_accounts", opts, options) - if err != nil { - return nil, nil, err - } - - usr := new(User) - resp, err := s.client.Do(req, usr) - if err != nil { - return nil, resp, err - } - - return usr, resp, nil + return do[*User](s.client, + withMethod(http.MethodPost), + withPath("service_accounts"), + withAPIOpts(opts), + withRequestOpts(options...), + ) } -// ListServiceAccounts lists all service accounts. -// -// GitLab API docs: -// https://docs.gitlab.com/api/user_service_accounts/#list-all-service-account-users func (s *UsersService) ListServiceAccounts(opt *ListServiceAccountsOptions, options ...RequestOptionFunc) ([]*ServiceAccount, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "service_accounts", opt, options) - if err != nil { - return nil, nil, err - } - - var sas []*ServiceAccount - resp, err := s.client.Do(req, &sas) - if err != nil { - return nil, resp, err - } - - return sas, resp, nil + return do[[]*ServiceAccount](s.client, + withPath("service_accounts"), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// UploadAvatar uploads an avatar to the current user. -// -// GitLab API docs: -// https://docs.gitlab.com/api/users/#upload-an-avatar-for-yourself func (s *UsersService) UploadAvatar(avatar io.Reader, filename string, options ...RequestOptionFunc) (*User, *Response, error) { - u := "user/avatar" - - req, err := s.client.UploadRequest( - http.MethodPut, - u, - avatar, - filename, - UploadAvatar, - nil, - options, + return do[*User](s.client, + withMethod(http.MethodPut), + withPath("user/avatar"), + withUpload(avatar, filename, UploadAvatar), + withRequestOpts(options...), ) - if err != nil { - return nil, nil, err - } - - usr := new(User) - resp, err := s.client.Do(req, usr) - if err != nil { - return nil, resp, err - } - - return usr, resp, nil } -// DeleteUserIdentity deletes a user's authentication identity using the provider -// name associated with that identity. Only available for administrators. -// -// GitLab API docs: -// https://docs.gitlab.com/api/users/#delete-authentication-identity-from-a-user -func (s *UsersService) DeleteUserIdentity(user int, provider string, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("users/%d/identities/%s", user, provider) +func (s *UsersService) DeleteUserIdentity(user int64, provider string, options ...RequestOptionFunc) (*Response, error) { + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("users/%d/identities/%s", user, provider), + withRequestOpts(options...), + ) + return resp, err +} + +// userCoreBasicTemplate defines the common fields for a user in GraphQL queries. +var userCoreBasicTemplate = template.Must(template.New("UserCoreBasic").Parse(` + id + username + name + state + createdAt + avatarUrl + webUrl +`)) + +// userCoreBasicGQL represents the UserCore GraphQL type. It unwraps to a *BasicUser type. +type userCoreBasicGQL struct { + ID gidGQL `json:"id"` + Username string `json:"username"` + Name string `json:"name"` + State string `json:"state"` + CreatedAt *time.Time `json:"createdAt"` + AvatarURL string `json:"avatarUrl"` + WebURL string `json:"webUrl"` +} - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err +// unwrap converts the GraphQL data structure to a *BasicUser. +func (u userCoreBasicGQL) unwrap() *BasicUser { + if u.Username == "" { + return nil } - return s.client.Do(req, nil) + return &BasicUser{ + ID: u.ID.Int64, + Username: u.Username, + Name: u.Name, + State: u.State, + Locked: u.State != "active", + CreatedAt: u.CreatedAt, + AvatarURL: u.AvatarURL, + WebURL: u.WebURL, + } } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/validate.go b/vendor/gitlab.com/gitlab-org/api/client-go/validate.go index 9b05ad4ab3..0aa6e58708 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/validate.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/validate.go @@ -16,14 +16,19 @@ package gitlab -import ( - "fmt" - "net/http" -) +import "net/http" type ( ValidateServiceInterface interface { + // ProjectNamespaceLint validates .gitlab-ci.yml content by project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/lint/#validate-sample-cicd-configuration ProjectNamespaceLint(pid any, opt *ProjectNamespaceLintOptions, options ...RequestOptionFunc) (*ProjectLintResult, *Response, error) + // ProjectLint validates .gitlab-ci.yml content by project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/lint/#validate-a-projects-cicd-configuration ProjectLint(pid any, opt *ProjectLintOptions, options ...RequestOptionFunc) (*ProjectLintResult, *Response, error) } @@ -86,29 +91,13 @@ type ProjectNamespaceLintOptions struct { Ref *string `url:"ref,omitempty" json:"ref,omitempty"` } -// ProjectNamespaceLint validates .gitlab-ci.yml content by project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/lint/#validate-sample-cicd-configuration func (s *ValidateService) ProjectNamespaceLint(pid any, opt *ProjectNamespaceLintOptions, options ...RequestOptionFunc) (*ProjectLintResult, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/ci/lint", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, &opt, options) - if err != nil { - return nil, nil, err - } - - l := new(ProjectLintResult) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil + return do[*ProjectLintResult](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/ci/lint", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // ProjectLintOptions represents the available ProjectLint() options. @@ -123,27 +112,10 @@ type ProjectLintOptions struct { Ref *string `url:"ref,omitempty" json:"ref,omitempty"` } -// ProjectLint validates .gitlab-ci.yml content by project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/lint/#validate-a-projects-cicd-configuration func (s *ValidateService) ProjectLint(pid any, opt *ProjectLintOptions, options ...RequestOptionFunc) (*ProjectLintResult, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/ci/lint", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, &opt, options) - if err != nil { - return nil, nil, err - } - - l := new(ProjectLintResult) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil + return do[*ProjectLintResult](s.client, + withPath("projects/%s/ci/lint", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/version.go b/vendor/gitlab.com/gitlab-org/api/client-go/version.go index 90bd2552a1..c548f1c7d1 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/version.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/version.go @@ -16,8 +16,6 @@ package gitlab -import "net/http" - type ( VersionServiceInterface interface { GetVersion(options ...RequestOptionFunc) (*Version, *Response, error) @@ -51,16 +49,8 @@ func (s Version) String() string { // // GitLab API docs: https://docs.gitlab.com/api/version/ func (s *VersionService) GetVersion(options ...RequestOptionFunc) (*Version, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "version", nil, options) - if err != nil { - return nil, nil, err - } - - v := new(Version) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil + return do[*Version](s.client, + withPath("version"), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/wikis.go b/vendor/gitlab.com/gitlab-org/api/client-go/wikis.go index e17b2041ce..65b3d85d94 100644 --- a/vendor/gitlab.com/gitlab-org/api/client-go/wikis.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/wikis.go @@ -16,24 +16,49 @@ package gitlab import ( - "fmt" "io" "net/http" - "net/url" ) type ( WikisServiceInterface interface { + // ListWikis lists all pages of the wiki of the given project id. + // When with_content is set, it also returns the content of the pages. + // + // GitLab API docs: + // https://docs.gitlab.com/api/wikis/#list-all-wiki-pages ListWikis(pid any, opt *ListWikisOptions, options ...RequestOptionFunc) ([]*Wiki, *Response, error) + // GetWikiPage gets a wiki page for a given project. + // + // GitLab API docs: + // https://docs.gitlab.com/api/wikis/#retrieve-a-wiki-page GetWikiPage(pid any, slug string, opt *GetWikiPageOptions, options ...RequestOptionFunc) (*Wiki, *Response, error) + // CreateWikiPage creates a new wiki page for the given repository with + // the given title, slug, and content. + // + // GitLab API docs: + // https://docs.gitlab.com/api/wikis/#create-a-wiki-page CreateWikiPage(pid any, opt *CreateWikiPageOptions, options ...RequestOptionFunc) (*Wiki, *Response, error) + // EditWikiPage Updates an existing wiki page. At least one parameter is + // required to update the wiki page. + // + // GitLab API docs: + // https://docs.gitlab.com/api/wikis/#update-a-wiki-page EditWikiPage(pid any, slug string, opt *EditWikiPageOptions, options ...RequestOptionFunc) (*Wiki, *Response, error) + // DeleteWikiPage deletes a wiki page with a given slug. + // + // GitLab API docs: + // https://docs.gitlab.com/api/wikis/#delete-a-wiki-page DeleteWikiPage(pid any, slug string, options ...RequestOptionFunc) (*Response, error) + // UploadWikiAttachment uploads a file to the attachment folder inside the wiki’s repository. The attachment folder is the uploads folder. + // + // GitLab API docs: + // https://docs.gitlab.com/api/wikis/#upload-an-attachment-to-the-wiki-repository UploadWikiAttachment(pid any, content io.Reader, filename string, opt *UploadWikiAttachmentOptions, options ...RequestOptionFunc) (*WikiAttachment, *Response, error) } // WikisService handles communication with the wikis related methods of - // the Gitlab API. + // the GitLab API. // // GitLab API docs: // https://docs.gitlab.com/api/wikis/ @@ -92,30 +117,12 @@ type ListWikisOptions struct { WithContent *bool `url:"with_content,omitempty" json:"with_content,omitempty"` } -// ListWikis lists all pages of the wiki of the given project id. -// When with_content is set, it also returns the content of the pages. -// -// GitLab API docs: -// https://docs.gitlab.com/api/wikis/#list-wiki-pages func (s *WikisService) ListWikis(pid any, opt *ListWikisOptions, options ...RequestOptionFunc) ([]*Wiki, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/wikis", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ws []*Wiki - resp, err := s.client.Do(req, &ws) - if err != nil { - return nil, resp, err - } - - return ws, resp, nil + return do[[]*Wiki](s.client, + withPath("projects/%s/wikis", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // GetWikiPageOptions represents options to GetWikiPage @@ -127,29 +134,12 @@ type GetWikiPageOptions struct { Version *string `url:"version,omitempty" json:"version,omitempty"` } -// GetWikiPage gets a wiki page for a given project. -// -// GitLab API docs: -// https://docs.gitlab.com/api/wikis/#get-a-wiki-page func (s *WikisService) GetWikiPage(pid any, slug string, opt *GetWikiPageOptions, options ...RequestOptionFunc) (*Wiki, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/wikis/%s", PathEscape(project), url.PathEscape(slug)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - w := new(Wiki) - resp, err := s.client.Do(req, w) - if err != nil { - return nil, resp, err - } - - return w, resp, nil + return do[*Wiki](s.client, + withPath("projects/%s/wikis/%s", ProjectID{pid}, slug), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // CreateWikiPageOptions represents options to CreateWikiPage. @@ -162,30 +152,13 @@ type CreateWikiPageOptions struct { Format *WikiFormatValue `url:"format,omitempty" json:"format,omitempty"` } -// CreateWikiPage creates a new wiki page for the given repository with -// the given title, slug, and content. -// -// GitLab API docs: -// https://docs.gitlab.com/api/wikis/#create-a-new-wiki-page func (s *WikisService) CreateWikiPage(pid any, opt *CreateWikiPageOptions, options ...RequestOptionFunc) (*Wiki, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/wikis", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - w := new(Wiki) - resp, err := s.client.Do(req, w) - if err != nil { - return nil, resp, err - } - - return w, resp, nil + return do[*Wiki](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/wikis", ProjectID{pid}), + withAPIOpts(opt), + withRequestOpts(options...), + ) } // EditWikiPageOptions represents options to EditWikiPage. @@ -198,49 +171,22 @@ type EditWikiPageOptions struct { Format *WikiFormatValue `url:"format,omitempty" json:"format,omitempty"` } -// EditWikiPage Updates an existing wiki page. At least one parameter is -// required to update the wiki page. -// -// GitLab API docs: -// https://docs.gitlab.com/api/wikis/#edit-an-existing-wiki-page func (s *WikisService) EditWikiPage(pid any, slug string, opt *EditWikiPageOptions, options ...RequestOptionFunc) (*Wiki, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/wikis/%s", PathEscape(project), url.PathEscape(slug)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - w := new(Wiki) - resp, err := s.client.Do(req, w) - if err != nil { - return nil, resp, err - } - - return w, resp, nil + return do[*Wiki](s.client, + withMethod(http.MethodPut), + withPath("projects/%s/wikis/%s", ProjectID{pid}, slug), + withAPIOpts(opt), + withRequestOpts(options...), + ) } -// DeleteWikiPage deletes a wiki page with a given slug. -// -// GitLab API docs: -// https://docs.gitlab.com/api/wikis/#delete-a-wiki-page func (s *WikisService) DeleteWikiPage(pid any, slug string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/wikis/%s", PathEscape(project), url.PathEscape(slug)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) + _, resp, err := do[none](s.client, + withMethod(http.MethodDelete), + withPath("projects/%s/wikis/%s", ProjectID{pid}, slug), + withRequestOpts(options...), + ) + return resp, err } // UploadWikiAttachmentOptions represents options to UploadWikiAttachment. @@ -251,27 +197,12 @@ type UploadWikiAttachmentOptions struct { Branch *string `url:"branch,omitempty" json:"branch,omitempty"` } -// UploadWikiAttachment uploads a file to the attachment folder inside the wiki’s repository. The attachment folder is the uploads folder. -// -// GitLab API docs: -// https://docs.gitlab.com/api/wikis/#upload-an-attachment-to-the-wiki-repository func (s *WikisService) UploadWikiAttachment(pid any, content io.Reader, filename string, opt *UploadWikiAttachmentOptions, options ...RequestOptionFunc) (*WikiAttachment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/wikis/attachments", PathEscape(project)) - - req, err := s.client.UploadRequest(http.MethodPost, u, content, filename, UploadFile, opt, options) - if err != nil { - return nil, nil, err - } - - attachment := new(WikiAttachment) - resp, err := s.client.Do(req, attachment) - if err != nil { - return nil, resp, err - } - - return attachment, resp, nil + return do[*WikiAttachment](s.client, + withMethod(http.MethodPost), + withPath("projects/%s/wikis/attachments", ProjectID{pid}), + withUpload(content, filename, UploadFile), + withAPIOpts(opt), + withRequestOpts(options...), + ) } diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/workitems.go b/vendor/gitlab.com/gitlab-org/api/client-go/workitems.go new file mode 100644 index 0000000000..23c959ab3e --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/workitems.go @@ -0,0 +1,455 @@ +package gitlab + +import ( + "errors" + "strconv" + "strings" + "text/template" + "time" +) + +type ( + WorkItemsServiceInterface interface { + GetWorkItem(fullPath string, iid int64, options ...RequestOptionFunc) (*WorkItem, *Response, error) + ListWorkItems(fullPath string, opt *ListWorkItemsOptions, options ...RequestOptionFunc) ([]*WorkItem, *Response, error) + } + + // WorkItemsService handles communication with the work item related methods + // of the GitLab API. + // + // GitLab API docs: https://docs.gitlab.com/api/graphql/reference/#workitem + WorkItemsService struct { + client *Client + } +) + +var _ WorkItemsServiceInterface = (*WorkItemsService)(nil) + +// WorkItem represents a GitLab work item. +// +// GitLab API docs: https://docs.gitlab.com/api/graphql/reference/#workitem +type WorkItem struct { + ID int64 + IID int64 + Type string + State string + Status string + Title string + Description string + CreatedAt *time.Time + UpdatedAt *time.Time + ClosedAt *time.Time + WebURL string + Author *BasicUser + Assignees []*BasicUser +} + +func (wi WorkItem) GID() string { + return gidGQL{ + Type: "WorkItem", + Int64: wi.ID, + }.String() +} + +// workItemTemplate defines the common fields for a work item in GraphQL queries. +// It's chained from userCoreBasicTemplate so nested templates work. +var workItemTemplate = template.Must(template.Must(userCoreBasicTemplate.Clone()).New("WorkItem").Parse(` + id + iid + workItemType { + name + } + state + title + description + author { + {{ template "UserCoreBasic" }} + } + createdAt + updatedAt + closedAt + webUrl + features { + assignees { + assignees { + nodes { + {{ template "UserCoreBasic" }} + } + } + } + status { + status { + name + } + } + } +`)) + +// getWorkItemTemplate is chained from workItemTemplate so it has access to both +// UserCoreBasic and WorkItem templates. +var getWorkItemTemplate = template.Must(template.Must(workItemTemplate.Clone()).New("GetWorkItem").Parse(` + query GetWorkItem($fullPath: ID!, $iid: String!) { + namespace(fullPath: $fullPath) { + workItem(iid: $iid) { + {{ template "WorkItem" }} + } + } + } +`)) + +// GetWorkItem gets a single work item. +// +// fullPath is the full path to either a group or project. +// iid is the internal ID of the work item. +// +// GitLab API docs: https://docs.gitlab.com/api/graphql/reference/#namespaceworkitem +func (s *WorkItemsService) GetWorkItem(fullPath string, iid int64, options ...RequestOptionFunc) (*WorkItem, *Response, error) { + var queryBuilder strings.Builder + if err := getWorkItemTemplate.Execute(&queryBuilder, nil); err != nil { + return nil, nil, err + } + + q := GraphQLQuery{ + Query: queryBuilder.String(), + Variables: map[string]any{ + "fullPath": fullPath, + "iid": strconv.FormatInt(iid, 10), + }, + } + + var result struct { + Data struct { + Namespace struct { + WorkItem *workItemGQL `json:"workItem"` + } `json:"namespace"` + } + GenericGraphQLErrors + } + + resp, err := s.client.GraphQL.Do(q, &result, options...) + if err != nil { + return nil, resp, err + } + + if len(result.Errors) != 0 { + return nil, resp, &GraphQLResponseError{ + Err: errors.New("GraphQL query failed"), + Errors: result.GenericGraphQLErrors, + } + } + + wiQL := result.Data.Namespace.WorkItem + if wiQL == nil { + return nil, resp, ErrNotFound + } + + return wiQL.unwrap(), resp, nil +} + +// ListWorkItemsOptions represents the available ListWorkItems() options. +// +// GitLab API docs: https://docs.gitlab.com/api/graphql/reference/#namespaceworkitems +type ListWorkItemsOptions struct { + AssigneeUsernames []string + AssigneeWildcardID *string + AuthorUsername *string + Confidential *bool + CRMContactID *string + CRMOrganizationID *string + HealthStatusFilter *string + IDs []string + IIDs []string + IncludeAncestors *bool + IncludeDescendants *bool + IterationCadenceID []string + IterationID []string + IterationWildcardID *string + LabelName []string + MilestoneTitle []string + MilestoneWildcardID *string + MyReactionEmoji *string + ParentIDs []string + ReleaseTag []string + ReleaseTagWildcardID *string + State *string + Subscribed *string + Types []string + Weight *string + WeightWildcardID *string + + // Time filters + ClosedAfter *time.Time + ClosedBefore *time.Time + CreatedAfter *time.Time + CreatedBefore *time.Time + DueAfter *time.Time + DueBefore *time.Time + UpdatedAfter *time.Time + UpdatedBefore *time.Time + + // Sorting + Sort *string + + // Search + Search *string + In []string + + // Pagination + After *string + Before *string + First *int64 + Last *int64 +} + +// listWorkItemsTemplate is chained from workItemTemplate so it has access to both +// UserCoreBasic and WorkItem templates. +var listWorkItemsTemplate = template.Must(template.Must(workItemTemplate.Clone()).New("ListWorkItems").Parse(` + query ListWorkItems( + $fullPath: ID! + $assigneeUsernames: [String!] + $assigneeWildcardId: AssigneeWildcardId + $authorUsername: String + $confidential: Boolean + $crmContactId: String + $crmOrganizationId: String + $healthStatusFilter: HealthStatusFilter + $ids: [WorkItemID!] + $iids: [String!] + $includeAncestors: Boolean + $includeDescendants: Boolean + $iterationCadenceId: [IterationsCadenceID!] + $iterationId: [ID] + $iterationWildcardId: IterationWildcardId + $labelName: [String!] + $milestoneTitle: [String!] + $milestoneWildcardId: MilestoneWildcardId + $myReactionEmoji: String + $parentIds: [WorkItemID!] + $releaseTag: [String!] + $releaseTagWildcardId: ReleaseTagWildcardId + $state: IssuableState + $subscribed: SubscriptionStatus + $types: [IssueType!] + $weight: String + $weightWildcardId: WeightWildcardId + $closedAfter: Time + $closedBefore: Time + $createdAfter: Time + $createdBefore: Time + $dueAfter: Time + $dueBefore: Time + $updatedAfter: Time + $updatedBefore: Time + $sort: WorkItemSort + $search: String + $in: [IssuableSearchableField!] + $after: String + $before: String + $first: Int + $last: Int + ) { + namespace(fullPath: $fullPath) { + workItems( + assigneeUsernames: $assigneeUsernames + assigneeWildcardId: $assigneeWildcardId + authorUsername: $authorUsername + confidential: $confidential + crmContactId: $crmContactId + crmOrganizationId: $crmOrganizationId + healthStatusFilter: $healthStatusFilter + ids: $ids + iids: $iids + includeAncestors: $includeAncestors + includeDescendants: $includeDescendants + iterationCadenceId: $iterationCadenceId + iterationId: $iterationId + iterationWildcardId: $iterationWildcardId + labelName: $labelName + milestoneTitle: $milestoneTitle + milestoneWildcardId: $milestoneWildcardId + myReactionEmoji: $myReactionEmoji + parentIds: $parentIds + releaseTag: $releaseTag + releaseTagWildcardId: $releaseTagWildcardId + state: $state + subscribed: $subscribed + types: $types + weight: $weight + weightWildcardId: $weightWildcardId + closedAfter: $closedAfter + closedBefore: $closedBefore + createdAfter: $createdAfter + createdBefore: $createdBefore + dueAfter: $dueAfter + dueBefore: $dueBefore + updatedAfter: $updatedAfter + updatedBefore: $updatedBefore + sort: $sort + search: $search + in: $in + after: $after + before: $before + first: $first + last: $last + ) { + nodes { + {{ template "WorkItem" }} + } + pageInfo { + endCursor + hasNextPage + startCursor + hasPreviousPage + } + } + } + } +`)) + +// ListWorkItems lists workitems in a given namespace (group or project). +// +// GitLab API docs: https://docs.gitlab.com/api/graphql/reference/#namespaceworkitems +func (s *WorkItemsService) ListWorkItems(fullPath string, opt *ListWorkItemsOptions, options ...RequestOptionFunc) ([]*WorkItem, *Response, error) { + var queryBuilder strings.Builder + + if err := listWorkItemsTemplate.Execute(&queryBuilder, nil); err != nil { + return nil, nil, err + } + + vars := map[string]any{ + "fullPath": fullPath, + "assigneeUsernames": opt.AssigneeUsernames, + "assigneeWildcardId": opt.AssigneeWildcardID, + "authorUsername": opt.AuthorUsername, + "confidential": opt.Confidential, + "crmContactId": opt.CRMContactID, + "crmOrganizationId": opt.CRMOrganizationID, + "healthStatusFilter": opt.HealthStatusFilter, + "ids": opt.IDs, + "iids": opt.IIDs, + "includeAncestors": opt.IncludeAncestors, + "includeDescendants": opt.IncludeDescendants, + "iterationCadenceId": opt.IterationCadenceID, + "iterationId": opt.IterationID, + "iterationWildcardId": opt.IterationWildcardID, + "labelName": opt.LabelName, + "milestoneTitle": opt.MilestoneTitle, + "milestoneWildcardId": opt.MilestoneWildcardID, + "myReactionEmoji": opt.MyReactionEmoji, + "parentIds": opt.ParentIDs, + "releaseTag": opt.ReleaseTag, + "releaseTagWildcardId": opt.ReleaseTagWildcardID, + "state": opt.State, + "subscribed": opt.Subscribed, + "types": opt.Types, + "weight": opt.Weight, + "weightWildcardId": opt.WeightWildcardID, + "closedAfter": opt.ClosedAfter, + "closedBefore": opt.ClosedBefore, + "createdAfter": opt.CreatedAfter, + "createdBefore": opt.CreatedBefore, + "dueAfter": opt.DueAfter, + "dueBefore": opt.DueBefore, + "updatedAfter": opt.UpdatedAfter, + "updatedBefore": opt.UpdatedBefore, + "sort": opt.Sort, + "search": opt.Search, + "in": opt.In, + "after": opt.After, + "before": opt.Before, + "first": opt.First, + "last": opt.Last, + } + + query := GraphQLQuery{ + Query: queryBuilder.String(), + Variables: vars, + } + + var result struct { + Data struct { + Namespace struct { + WorkItems connectionGQL[workItemGQL] `json:"workItems"` + } `json:"namespace"` + } + GenericGraphQLErrors + } + + resp, err := s.client.GraphQL.Do(query, &result, options...) + if err != nil { + return nil, resp, err + } + + if len(result.Errors) != 0 { + return nil, resp, &GraphQLResponseError{ + Err: errors.New("GraphQL query failed"), + Errors: result.GenericGraphQLErrors, + } + } + + var ret []*WorkItem + + for _, wi := range result.Data.Namespace.WorkItems.Nodes { + ret = append(ret, wi.unwrap()) + } + + resp.PageInfo = &result.Data.Namespace.WorkItems.PageInfo + + return ret, resp, nil +} + +// workItemGQL represents the JSON structure returned by the GraphQL query. +// It is used to parse the response and convert it to the more user-friendly WorkItem type. +type workItemGQL struct { + ID gidGQL `json:"id"` + IID iidGQL `json:"iid"` + WorkItemType struct { + Name string `json:"name"` + } `json:"workItemType"` + State string `json:"state"` + Title string `json:"title"` + Description string `json:"description"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` + ClosedAt *time.Time `json:"closedAt"` + Author userCoreBasicGQL `json:"author"` + Features workItemFeaturesGQL `json:"features"` + WebURL string `json:"webUrl"` +} + +func (w workItemGQL) unwrap() *WorkItem { + var assignees []*BasicUser + + for _, a := range w.Features.Assignees.Assignees.Nodes { + assignees = append(assignees, a.unwrap()) + } + + return &WorkItem{ + ID: w.ID.Int64, + IID: int64(w.IID), + Type: w.WorkItemType.Name, + State: w.State, + Status: w.Features.Status.Status.Name, + Title: w.Title, + Description: w.Description, + CreatedAt: w.CreatedAt, + UpdatedAt: w.UpdatedAt, + ClosedAt: w.ClosedAt, + WebURL: w.WebURL, + Author: w.Author.unwrap(), + Assignees: assignees, + } +} + +type workItemFeaturesGQL struct { + Assignees struct { + Assignees struct { + Nodes []userCoreBasicGQL `json:"nodes"` + } `json:"assignees"` + } `json:"assignees"` + Status struct { + Status struct { + Name string + } + } +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go deleted file mode 100644 index a0d8185826..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bson.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer -// See THIRD-PARTY-NOTICES for original license terms. - -package bson // import "go.mongodb.org/mongo-driver/bson" - -import ( - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters, -// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead. -// -// A D should not be constructed with duplicate key names, as that can cause undefined server behavior. -// -// Example usage: -// -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} -type D = primitive.D - -// E represents a BSON element for a D. It is usually used inside a D. -type E = primitive.E - -// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not -// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be -// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead. -// -// Example usage: -// -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} -type M = primitive.M - -// An A is an ordered representation of a BSON array. -// -// Example usage: -// -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} -type A = primitive.A diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go deleted file mode 100644 index 652aa48b85..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// ArrayCodec is the Codec used for bsoncore.Array values. -// -// Deprecated: ArrayCodec will not be directly accessible in Go Driver 2.0. -type ArrayCodec struct{} - -var defaultArrayCodec = NewArrayCodec() - -// NewArrayCodec returns an ArrayCodec. -// -// Deprecated: NewArrayCodec will not be available in Go Driver 2.0. See -// [ArrayCodec] for more details. -func NewArrayCodec() *ArrayCodec { - return &ArrayCodec{} -} - -// EncodeValue is the ValueEncoder for bsoncore.Array values. -func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCoreArray { - return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val} - } - - arr := val.Interface().(bsoncore.Array) - return bsonrw.Copier{}.CopyArrayFromBytes(vw, arr) -} - -// DecodeValue is the ValueDecoder for bsoncore.Array values. -func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCoreArray { - return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val} - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, 0)) - } - - val.SetLen(0) - arr, err := bsonrw.Copier{}.AppendArrayBytes(val.Interface().(bsoncore.Array), vr) - val.Set(reflect.ValueOf(arr)) - return err -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go deleted file mode 100644 index 0693bd432f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ /dev/null @@ -1,382 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec // import "go.mongodb.org/mongo-driver/bson/bsoncodec" - -import ( - "fmt" - "reflect" - "strings" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -var ( - emptyValue = reflect.Value{} -) - -// Marshaler is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead. -type Marshaler interface { - MarshalBSON() ([]byte, error) -} - -// ValueMarshaler is an interface implemented by types that can marshal -// themselves into a BSON value as bytes. The type must be the valid type for -// the bytes returned. The bytes and byte type together must be valid if the -// error is nil. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead. -type ValueMarshaler interface { - MarshalBSONValue() (bsontype.Type, []byte, error) -} - -// Unmarshaler is an interface implemented by types that can unmarshal a BSON -// document representation of themselves. The BSON bytes can be assumed to be -// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data -// after returning. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead. -type Unmarshaler interface { - UnmarshalBSON([]byte) error -} - -// ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead. -type ValueUnmarshaler interface { - UnmarshalBSONValue(bsontype.Type, []byte) error -} - -// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be -// encoded by the ValueEncoder. -type ValueEncoderError struct { - Name string - Types []reflect.Type - Kinds []reflect.Kind - Received reflect.Value -} - -func (vee ValueEncoderError) Error() string { - typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds)) - for _, t := range vee.Types { - typeKinds = append(typeKinds, t.String()) - } - for _, k := range vee.Kinds { - if k == reflect.Map { - typeKinds = append(typeKinds, "map[string]*") - continue - } - typeKinds = append(typeKinds, k.String()) - } - received := vee.Received.Kind().String() - if vee.Received.IsValid() { - received = vee.Received.Type().String() - } - return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received) -} - -// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be -// decoded by the ValueDecoder. -type ValueDecoderError struct { - Name string - Types []reflect.Type - Kinds []reflect.Kind - Received reflect.Value -} - -func (vde ValueDecoderError) Error() string { - typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds)) - for _, t := range vde.Types { - typeKinds = append(typeKinds, t.String()) - } - for _, k := range vde.Kinds { - if k == reflect.Map { - typeKinds = append(typeKinds, "map[string]*") - continue - } - typeKinds = append(typeKinds, k.String()) - } - received := vde.Received.Kind().String() - if vde.Received.IsValid() { - received = vde.Received.Type().String() - } - return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received) -} - -// EncodeContext is the contextual information required for a Codec to encode a -// value. -type EncodeContext struct { - *Registry - - // MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, - // uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) - // that can represent the integer value. - // - // Deprecated: Use bson.Encoder.IntMinSize instead. - MinSize bool - - errorOnInlineDuplicates bool - stringifyMapKeysWithFmt bool - nilMapAsEmpty bool - nilSliceAsEmpty bool - nilByteSliceAsEmpty bool - omitZeroStruct bool - useJSONStructTags bool -} - -// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in -// the marshaled BSON when the "inline" struct tag option is set. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. -func (ec *EncodeContext) ErrorOnInlineDuplicates() { - ec.errorOnInlineDuplicates = true -} - -// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name -// strings using fmt.Sprintf() instead of the default string conversion logic. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. -func (ec *EncodeContext) StringifyMapKeysWithFmt() { - ec.stringifyMapKeysWithFmt = true -} - -// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON -// null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. -func (ec *EncodeContext) NilMapAsEmpty() { - ec.nilMapAsEmpty = true -} - -// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON -// null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. -func (ec *EncodeContext) NilSliceAsEmpty() { - ec.nilSliceAsEmpty = true -} - -// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values -// instead of BSON null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. -func (ec *EncodeContext) NilByteSliceAsEmpty() { - ec.nilByteSliceAsEmpty = true -} - -// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) -// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. -// -// Note that the Encoder only examines exported struct fields when determining if a struct is the -// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. -func (ec *EncodeContext) OmitZeroStruct() { - ec.omitZeroStruct = true -} - -// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" -// struct tag is not specified. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead. -func (ec *EncodeContext) UseJSONStructTags() { - ec.useJSONStructTags = true -} - -// DecodeContext is the contextual information required for a Codec to decode a -// value. -type DecodeContext struct { - *Registry - - // Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double" - // values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64, - // uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to - // BSON "decimal128" values. - // - // Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead. - Truncate bool - - // Ancestor is the type of a containing document. This is mainly used to determine what type - // should be used when decoding an embedded document into an empty interface. For example, if - // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface - // will be decoded into a bson.M. - // - // Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead. - Ancestor reflect.Type - - // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the - // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is - // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an - // error. DocumentType overrides the Ancestor field. - defaultDocumentType reflect.Type - - binaryAsSlice bool - useJSONStructTags bool - useLocalTimeZone bool - zeroMaps bool - zeroStructs bool -} - -// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or -// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. -func (dc *DecodeContext) BinaryAsSlice() { - dc.binaryAsSlice = true -} - -// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" -// struct tag is not specified. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. -func (dc *DecodeContext) UseJSONStructTags() { - dc.useJSONStructTags = true -} - -// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead -// of the UTC timezone. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. -func (dc *DecodeContext) UseLocalTimeZone() { - dc.useLocalTimeZone = true -} - -// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value -// passed to Decode before unmarshaling BSON documents into them. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. -func (dc *DecodeContext) ZeroMaps() { - dc.zeroMaps = true -} - -// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination -// value passed to Decode before unmarshaling BSON documents into them. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. -func (dc *DecodeContext) ZeroStructs() { - dc.zeroStructs = true -} - -// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This -// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead. -func (dc *DecodeContext) DefaultDocumentM() { - dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) -} - -// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This -// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead. -func (dc *DecodeContext) DefaultDocumentD() { - dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) -} - -// ValueCodec is an interface for encoding and decoding a reflect.Value. -// values. -// -// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead. -type ValueCodec interface { - ValueEncoder - ValueDecoder -} - -// ValueEncoder is the interface implemented by types that can handle the encoding of a value. -type ValueEncoder interface { - EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error -} - -// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be -// used as a ValueEncoder. -type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error - -// EncodeValue implements the ValueEncoder interface. -func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - return fn(ec, vw, val) -} - -// ValueDecoder is the interface implemented by types that can handle the decoding of a value. -type ValueDecoder interface { - DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error -} - -// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be -// used as a ValueDecoder. -type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error - -// DecodeValue implements the ValueDecoder interface. -func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - return fn(dc, vr, val) -} - -// typeDecoder is the interface implemented by types that can handle the decoding of a value given its type. -type typeDecoder interface { - decodeType(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) -} - -// typeDecoderFunc is an adapter function that allows a function with the correct signature to be used as a typeDecoder. -type typeDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) - -func (fn typeDecoderFunc) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - return fn(dc, vr, t) -} - -// decodeAdapter allows two functions with the correct signatures to be used as both a ValueDecoder and typeDecoder. -type decodeAdapter struct { - ValueDecoderFunc - typeDecoderFunc -} - -var _ ValueDecoder = decodeAdapter{} -var _ typeDecoder = decodeAdapter{} - -// decodeTypeOrValue calls decoder.decodeType is decoder is a typeDecoder. Otherwise, it allocates a new element of type -// t and calls decoder.DecodeValue on it. -func decodeTypeOrValue(decoder ValueDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - td, _ := decoder.(typeDecoder) - return decodeTypeOrValueWithInfo(decoder, td, dc, vr, t, true) -} - -func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type, convert bool) (reflect.Value, error) { - if td != nil { - val, err := td.decodeType(dc, vr, t) - if err == nil && convert && val.Type() != t { - // This conversion step is necessary for slices and maps. If a user declares variables like: - // - // type myBool bool - // var m map[string]myBool - // - // and tries to decode BSON bytes into the map, the decoding will fail if this conversion is not present - // because we'll try to assign a value of type bool to one of type myBool. - val = val.Convert(t) - } - return val, err - } - - val := reflect.New(t).Elem() - err := vd.DecodeValue(dc, vr, val) - return val, err -} - -// CodecZeroer is the interface implemented by Codecs that can also determine if -// a value of the type that would be encoded is zero. -// -// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver -// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to -// nil instead. -type CodecZeroer interface { - IsTypeZero(interface{}) bool -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go deleted file mode 100644 index 0134b5a94b..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// ByteSliceCodec is the Codec used for []byte values. -// -// Deprecated: ByteSliceCodec will not be directly configurable in Go Driver -// 2.0. To configure the byte slice encode and decode behavior, use the -// configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the byte slice -// encode and decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to encode nil byte slices as empty -// BSON binary values, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// NilByteSliceAsEmpty: true, -// }) -// -// See the deprecation notice for each field in ByteSliceCodec for the -// corresponding settings. -type ByteSliceCodec struct { - // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values - // instead of BSON null. - // - // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty or options.BSONOptions.NilByteSliceAsEmpty - // instead. - EncodeNilAsEmpty bool -} - -var ( - defaultByteSliceCodec = NewByteSliceCodec() - - // Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be - // used by collection type decoders (e.g. map, slice, etc) to set individual values in a - // collection. - _ typeDecoder = defaultByteSliceCodec -) - -// NewByteSliceCodec returns a ByteSliceCodec with options opts. -// -// Deprecated: NewByteSliceCodec will not be available in Go Driver 2.0. See -// [ByteSliceCodec] for more details. -func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { - byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) - codec := ByteSliceCodec{} - if byteSliceOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *byteSliceOpt.EncodeNilAsEmpty - } - return &codec -} - -// EncodeValue is the ValueEncoder for []byte. -func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tByteSlice { - return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty { - return vw.WriteNull() - } - return vw.WriteBinary(val.Interface().([]byte)) -} - -func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tByteSlice { - return emptyValue, ValueDecoderError{ - Name: "ByteSliceDecodeValue", - Types: []reflect.Type{tByteSlice}, - Received: reflect.Zero(t), - } - } - - var data []byte - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - str, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - data = []byte(str) - case bsontype.Symbol: - sym, err := vr.ReadSymbol() - if err != nil { - return emptyValue, err - } - data = []byte(sym) - case bsontype.Binary: - var subtype byte - data, subtype, err = vr.ReadBinary() - if err != nil { - return emptyValue, err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "[]byte"} - } - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a []byte", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(data), nil -} - -// DecodeValue is the ValueDecoder for []byte. -func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tByteSlice { - return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - - elem, err := bsc.decodeType(dc, vr, tByteSlice) - if err != nil { - return err - } - - val.Set(elem) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go deleted file mode 100644 index 844b50299f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - "sync" - "sync/atomic" -) - -// Runtime check that the kind encoder and decoder caches can store any valid -// reflect.Kind constant. -func init() { - if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" { - panic("The capacity of kindEncoderCache is too small.\n" + - "This is due to a new type being added to reflect.Kind.") - } -} - -// statically assert array size -var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer] -var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer] - -type typeEncoderCache struct { - cache sync.Map // map[reflect.Type]ValueEncoder -} - -func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) { - c.cache.Store(rt, enc) -} - -func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) { - if v, _ := c.cache.Load(rt); v != nil { - return v.(ValueEncoder), true - } - return nil, false -} - -func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder { - if v, loaded := c.cache.LoadOrStore(rt, enc); loaded { - enc = v.(ValueEncoder) - } - return enc -} - -func (c *typeEncoderCache) Clone() *typeEncoderCache { - cc := new(typeEncoderCache) - c.cache.Range(func(k, v interface{}) bool { - if k != nil && v != nil { - cc.cache.Store(k, v) - } - return true - }) - return cc -} - -type typeDecoderCache struct { - cache sync.Map // map[reflect.Type]ValueDecoder -} - -func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) { - c.cache.Store(rt, dec) -} - -func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) { - if v, _ := c.cache.Load(rt); v != nil { - return v.(ValueDecoder), true - } - return nil, false -} - -func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder { - if v, loaded := c.cache.LoadOrStore(rt, dec); loaded { - dec = v.(ValueDecoder) - } - return dec -} - -func (c *typeDecoderCache) Clone() *typeDecoderCache { - cc := new(typeDecoderCache) - c.cache.Range(func(k, v interface{}) bool { - if k != nil && v != nil { - cc.cache.Store(k, v) - } - return true - }) - return cc -} - -// atomic.Value requires that all calls to Store() have the same concrete type -// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type -// is always the same (since different concrete types may implement the -// ValueEncoder interface). -type kindEncoderCacheEntry struct { - enc ValueEncoder -} - -type kindEncoderCache struct { - entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry -} - -func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) { - if enc != nil && rt < reflect.Kind(len(c.entries)) { - c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc}) - } -} - -func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) { - if rt < reflect.Kind(len(c.entries)) { - if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok { - return ent.enc, ent.enc != nil - } - } - return nil, false -} - -func (c *kindEncoderCache) Clone() *kindEncoderCache { - cc := new(kindEncoderCache) - for i, v := range c.entries { - if val := v.Load(); val != nil { - cc.entries[i].Store(val) - } - } - return cc -} - -// atomic.Value requires that all calls to Store() have the same concrete type -// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type -// is always the same (since different concrete types may implement the -// ValueDecoder interface). -type kindDecoderCacheEntry struct { - dec ValueDecoder -} - -type kindDecoderCache struct { - entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry -} - -func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) { - if rt < reflect.Kind(len(c.entries)) { - c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec}) - } -} - -func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) { - if rt < reflect.Kind(len(c.entries)) { - if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok { - return ent.dec, ent.dec != nil - } - } - return nil, false -} - -func (c *kindDecoderCache) Clone() *kindDecoderCache { - cc := new(kindDecoderCache) - for i, v := range c.entries { - if val := v.Load(); val != nil { - cc.entries[i].Store(val) - } - } - return cc -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go deleted file mode 100644 index cb8180f25c..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" -) - -// condAddrEncoder is the encoder used when a pointer to the encoding value has an encoder. -type condAddrEncoder struct { - canAddrEnc ValueEncoder - elseEnc ValueEncoder -} - -var _ ValueEncoder = (*condAddrEncoder)(nil) - -// newCondAddrEncoder returns an condAddrEncoder. -func newCondAddrEncoder(canAddrEnc, elseEnc ValueEncoder) *condAddrEncoder { - encoder := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} - return &encoder -} - -// EncodeValue is the ValueEncoderFunc for a value that may be addressable. -func (cae *condAddrEncoder) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.CanAddr() { - return cae.canAddrEnc.EncodeValue(ec, vw, val) - } - if cae.elseEnc != nil { - return cae.elseEnc.EncodeValue(ec, vw, val) - } - return ErrNoEncoder{Type: val.Type()} -} - -// condAddrDecoder is the decoder used when a pointer to the value has a decoder. -type condAddrDecoder struct { - canAddrDec ValueDecoder - elseDec ValueDecoder -} - -var _ ValueDecoder = (*condAddrDecoder)(nil) - -// newCondAddrDecoder returns an CondAddrDecoder. -func newCondAddrDecoder(canAddrDec, elseDec ValueDecoder) *condAddrDecoder { - decoder := condAddrDecoder{canAddrDec: canAddrDec, elseDec: elseDec} - return &decoder -} - -// DecodeValue is the ValueDecoderFunc for a value that may be addressable. -func (cad *condAddrDecoder) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if val.CanAddr() { - return cad.canAddrDec.DecodeValue(dc, vr, val) - } - if cad.elseDec != nil { - return cad.elseDec.DecodeValue(dc, vr, val) - } - return ErrNoDecoder{Type: val.Type()} -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go deleted file mode 100644 index 8702d6d39e..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ /dev/null @@ -1,1819 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "net/url" - "reflect" - "strconv" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var ( - defaultValueDecoders DefaultValueDecoders - errCannotTruncate = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled") -) - -type decodeBinaryError struct { - subtype byte - typeName string -} - -func (d decodeBinaryError) Error() string { - return fmt.Sprintf("only binary values with subtype 0x00 or 0x02 can be decoded into %s, but got subtype %v", d.typeName, d.subtype) -} - -func newDefaultStructCodec() *StructCodec { - codec, err := NewStructCodec(DefaultStructTagParser) - if err != nil { - // This function is called from the codec registration path, so errors can't be propagated. If there's an error - // constructing the StructCodec, we panic to avoid losing it. - panic(fmt.Errorf("error creating default StructCodec: %w", err)) - } - return codec -} - -// DefaultValueDecoders is a namespace type for the default ValueDecoders used -// when creating a registry. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -type DefaultValueDecoders struct{} - -// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with -// the provided RegistryBuilder. -// -// There is no support for decoding map[string]interface{} because there is no decoder for -// interface{}, so users must either register this decoder themselves or use the -// EmptyInterfaceDecoder available in the bson package. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { - if rb == nil { - panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) - } - - intDecoder := decodeAdapter{dvd.IntDecodeValue, dvd.intDecodeType} - floatDecoder := decodeAdapter{dvd.FloatDecodeValue, dvd.floatDecodeType} - - rb. - RegisterTypeDecoder(tD, ValueDecoderFunc(dvd.DDecodeValue)). - RegisterTypeDecoder(tBinary, decodeAdapter{dvd.BinaryDecodeValue, dvd.binaryDecodeType}). - RegisterTypeDecoder(tUndefined, decodeAdapter{dvd.UndefinedDecodeValue, dvd.undefinedDecodeType}). - RegisterTypeDecoder(tDateTime, decodeAdapter{dvd.DateTimeDecodeValue, dvd.dateTimeDecodeType}). - RegisterTypeDecoder(tNull, decodeAdapter{dvd.NullDecodeValue, dvd.nullDecodeType}). - RegisterTypeDecoder(tRegex, decodeAdapter{dvd.RegexDecodeValue, dvd.regexDecodeType}). - RegisterTypeDecoder(tDBPointer, decodeAdapter{dvd.DBPointerDecodeValue, dvd.dBPointerDecodeType}). - RegisterTypeDecoder(tTimestamp, decodeAdapter{dvd.TimestampDecodeValue, dvd.timestampDecodeType}). - RegisterTypeDecoder(tMinKey, decodeAdapter{dvd.MinKeyDecodeValue, dvd.minKeyDecodeType}). - RegisterTypeDecoder(tMaxKey, decodeAdapter{dvd.MaxKeyDecodeValue, dvd.maxKeyDecodeType}). - RegisterTypeDecoder(tJavaScript, decodeAdapter{dvd.JavaScriptDecodeValue, dvd.javaScriptDecodeType}). - RegisterTypeDecoder(tSymbol, decodeAdapter{dvd.SymbolDecodeValue, dvd.symbolDecodeType}). - RegisterTypeDecoder(tByteSlice, defaultByteSliceCodec). - RegisterTypeDecoder(tTime, defaultTimeCodec). - RegisterTypeDecoder(tEmpty, defaultEmptyInterfaceCodec). - RegisterTypeDecoder(tCoreArray, defaultArrayCodec). - RegisterTypeDecoder(tOID, decodeAdapter{dvd.ObjectIDDecodeValue, dvd.objectIDDecodeType}). - RegisterTypeDecoder(tDecimal, decodeAdapter{dvd.Decimal128DecodeValue, dvd.decimal128DecodeType}). - RegisterTypeDecoder(tJSONNumber, decodeAdapter{dvd.JSONNumberDecodeValue, dvd.jsonNumberDecodeType}). - RegisterTypeDecoder(tURL, decodeAdapter{dvd.URLDecodeValue, dvd.urlDecodeType}). - RegisterTypeDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)). - RegisterTypeDecoder(tCodeWithScope, decodeAdapter{dvd.CodeWithScopeDecodeValue, dvd.codeWithScopeDecodeType}). - RegisterDefaultDecoder(reflect.Bool, decodeAdapter{dvd.BooleanDecodeValue, dvd.booleanDecodeType}). - RegisterDefaultDecoder(reflect.Int, intDecoder). - RegisterDefaultDecoder(reflect.Int8, intDecoder). - RegisterDefaultDecoder(reflect.Int16, intDecoder). - RegisterDefaultDecoder(reflect.Int32, intDecoder). - RegisterDefaultDecoder(reflect.Int64, intDecoder). - RegisterDefaultDecoder(reflect.Uint, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint8, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint16, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint32, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint64, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Float32, floatDecoder). - RegisterDefaultDecoder(reflect.Float64, floatDecoder). - RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)). - RegisterDefaultDecoder(reflect.Map, defaultMapCodec). - RegisterDefaultDecoder(reflect.Slice, defaultSliceCodec). - RegisterDefaultDecoder(reflect.String, defaultStringCodec). - RegisterDefaultDecoder(reflect.Struct, newDefaultStructCodec()). - RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()). - RegisterTypeMapEntry(bsontype.Double, tFloat64). - RegisterTypeMapEntry(bsontype.String, tString). - RegisterTypeMapEntry(bsontype.Array, tA). - RegisterTypeMapEntry(bsontype.Binary, tBinary). - RegisterTypeMapEntry(bsontype.Undefined, tUndefined). - RegisterTypeMapEntry(bsontype.ObjectID, tOID). - RegisterTypeMapEntry(bsontype.Boolean, tBool). - RegisterTypeMapEntry(bsontype.DateTime, tDateTime). - RegisterTypeMapEntry(bsontype.Regex, tRegex). - RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer). - RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript). - RegisterTypeMapEntry(bsontype.Symbol, tSymbol). - RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope). - RegisterTypeMapEntry(bsontype.Int32, tInt32). - RegisterTypeMapEntry(bsontype.Int64, tInt64). - RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp). - RegisterTypeMapEntry(bsontype.Decimal128, tDecimal). - RegisterTypeMapEntry(bsontype.MinKey, tMinKey). - RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey). - RegisterTypeMapEntry(bsontype.Type(0), tD). - RegisterTypeMapEntry(bsontype.EmbeddedDocument, tD). - RegisterHookDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)). - RegisterHookDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue)) -} - -// DDecodeValue is the ValueDecoderFunc for primitive.D instances. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || !val.CanSet() || val.Type() != tD { - return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - dc.Ancestor = tD - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return fmt.Errorf("cannot decode %v into a primitive.D", vrType) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - decoder, err := dc.LookupDecoder(tEmpty) - if err != nil { - return err - } - tEmptyTypeDecoder, _ := decoder.(typeDecoder) - - // Use the elements in the provided value if it's non nil. Otherwise, allocate a new D instance. - var elems primitive.D - if !val.IsNil() { - val.SetLen(0) - elems = val.Interface().(primitive.D) - } else { - elems = make(primitive.D, 0) - } - - for { - key, elemVr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } else if err != nil { - return err - } - - // Pass false for convert because we don't need to call reflect.Value.Convert for tEmpty. - elem, err := decodeTypeOrValueWithInfo(decoder, tEmptyTypeDecoder, dc, elemVr, tEmpty, false) - if err != nil { - return err - } - - elems = append(elems, primitive.E{Key: key, Value: elem.Interface()}) - } - - val.Set(reflect.ValueOf(elems)) - return nil -} - -func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t.Kind() != reflect.Bool { - return emptyValue, ValueDecoderError{ - Name: "BooleanDecodeValue", - Kinds: []reflect.Kind{reflect.Bool}, - Received: reflect.Zero(t), - } - } - - var b bool - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - b = (i32 != 0) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - b = (i64 != 0) - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - b = (f64 != 0) - case bsontype.Boolean: - b, err = vr.ReadBoolean() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a boolean", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(b), nil -} - -// BooleanDecodeValue is the ValueDecoderFunc for bool types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { - return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} - } - - elem, err := dvd.booleanDecodeType(dctx, vr, val.Type()) - if err != nil { - return err - } - - val.SetBool(elem.Bool()) - return nil -} - -func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var i64 int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return emptyValue, err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return emptyValue, errCannotTruncate - } - if f64 > float64(math.MaxInt64) { - return emptyValue, fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - i64 = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) - } - - switch t.Kind() { - case reflect.Int8: - if i64 < math.MinInt8 || i64 > math.MaxInt8 { - return emptyValue, fmt.Errorf("%d overflows int8", i64) - } - - return reflect.ValueOf(int8(i64)), nil - case reflect.Int16: - if i64 < math.MinInt16 || i64 > math.MaxInt16 { - return emptyValue, fmt.Errorf("%d overflows int16", i64) - } - - return reflect.ValueOf(int16(i64)), nil - case reflect.Int32: - if i64 < math.MinInt32 || i64 > math.MaxInt32 { - return emptyValue, fmt.Errorf("%d overflows int32", i64) - } - - return reflect.ValueOf(int32(i64)), nil - case reflect.Int64: - return reflect.ValueOf(i64), nil - case reflect.Int: - if i64 > math.MaxInt { // Can we fit this inside of an int - return emptyValue, fmt.Errorf("%d overflows int", i64) - } - - return reflect.ValueOf(int(i64)), nil - default: - return emptyValue, ValueDecoderError{ - Name: "IntDecodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: reflect.Zero(t), - } - } -} - -// IntDecodeValue is the ValueDecoderFunc for int types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "IntDecodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: val, - } - } - - elem, err := dvd.intDecodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.SetInt(elem.Int()) - return nil -} - -// UintDecodeValue is the ValueDecoderFunc for uint types. -// -// Deprecated: UintDecodeValue is not registered by default. Use UintCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - var i64 int64 - var err error - switch vr.Type() { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled") - } - if f64 > float64(math.MaxInt64) { - return fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return err - } - if b { - i64 = 1 - } - default: - return fmt.Errorf("cannot decode %v into an integer type", vr.Type()) - } - - if !val.CanSet() { - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - switch val.Kind() { - case reflect.Uint8: - if i64 < 0 || i64 > math.MaxUint8 { - return fmt.Errorf("%d overflows uint8", i64) - } - case reflect.Uint16: - if i64 < 0 || i64 > math.MaxUint16 { - return fmt.Errorf("%d overflows uint16", i64) - } - case reflect.Uint32: - if i64 < 0 || i64 > math.MaxUint32 { - return fmt.Errorf("%d overflows uint32", i64) - } - case reflect.Uint64: - if i64 < 0 { - return fmt.Errorf("%d overflows uint64", i64) - } - case reflect.Uint: - if i64 < 0 || uint64(i64) > uint64(math.MaxUint) { // Can we fit this inside of an uint - return fmt.Errorf("%d overflows uint", i64) - } - default: - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - val.SetUint(uint64(i64)) - return nil -} - -func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var f float64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - f = float64(i32) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - f = float64(i64) - case bsontype.Double: - f, err = vr.ReadDouble() - if err != nil { - return emptyValue, err - } - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - f = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a float32 or float64 type", vrType) - } - - switch t.Kind() { - case reflect.Float32: - if !dc.Truncate && float64(float32(f)) != f { - return emptyValue, errCannotTruncate - } - - return reflect.ValueOf(float32(f)), nil - case reflect.Float64: - return reflect.ValueOf(f), nil - default: - return emptyValue, ValueDecoderError{ - Name: "FloatDecodeValue", - Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, - Received: reflect.Zero(t), - } - } -} - -// FloatDecodeValue is the ValueDecoderFunc for float types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "FloatDecodeValue", - Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, - Received: val, - } - } - - elem, err := dvd.floatDecodeType(ec, vr, val.Type()) - if err != nil { - return err - } - - val.SetFloat(elem.Float()) - return nil -} - -// StringDecodeValue is the ValueDecoderFunc for string types. -// -// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - var str string - var err error - switch vr.Type() { - // TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed. - case bsontype.String: - str, err = vr.ReadString() - if err != nil { - return err - } - default: - return fmt.Errorf("cannot decode %v into a string type", vr.Type()) - } - if !val.CanSet() || val.Kind() != reflect.String { - return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} - } - - val.SetString(str) - return nil -} - -func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tJavaScript { - return emptyValue, ValueDecoderError{ - Name: "JavaScriptDecodeValue", - Types: []reflect.Type{tJavaScript}, - Received: reflect.Zero(t), - } - } - - var js string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.JavaScript: - js, err = vr.ReadJavascript() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.JavaScript", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.JavaScript(js)), nil -} - -// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tJavaScript { - return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} - } - - elem, err := dvd.javaScriptDecodeType(dctx, vr, tJavaScript) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} - -func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tSymbol { - return emptyValue, ValueDecoderError{ - Name: "SymbolDecodeValue", - Types: []reflect.Type{tSymbol}, - Received: reflect.Zero(t), - } - } - - var symbol string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - symbol, err = vr.ReadString() - case bsontype.Symbol: - symbol, err = vr.ReadSymbol() - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return emptyValue, err - } - - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "primitive.Symbol"} - } - symbol = string(data) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Symbol", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Symbol(symbol)), nil -} - -// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tSymbol { - return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} - } - - elem, err := dvd.symbolDecodeType(dctx, vr, tSymbol) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} - -func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tBinary { - return emptyValue, ValueDecoderError{ - Name: "BinaryDecodeValue", - Types: []reflect.Type{tBinary}, - Received: reflect.Zero(t), - } - } - - var data []byte - var subtype byte - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Binary: - data, subtype, err = vr.ReadBinary() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Binary", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data}), nil -} - -// BinaryDecodeValue is the ValueDecoderFunc for Binary. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tBinary { - return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} - } - - elem, err := dvd.binaryDecodeType(dc, vr, tBinary) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tUndefined { - return emptyValue, ValueDecoderError{ - Name: "UndefinedDecodeValue", - Types: []reflect.Type{tUndefined}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Undefined: - err = vr.ReadUndefined() - case bsontype.Null: - err = vr.ReadNull() - default: - return emptyValue, fmt.Errorf("cannot decode %v into an Undefined", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Undefined{}), nil -} - -// UndefinedDecodeValue is the ValueDecoderFunc for Undefined. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tUndefined { - return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} - } - - elem, err := dvd.undefinedDecodeType(dc, vr, tUndefined) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// Accept both 12-byte string and pretty-printed 24-byte hex string formats. -func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tOID { - return emptyValue, ValueDecoderError{ - Name: "ObjectIDDecodeValue", - Types: []reflect.Type{tOID}, - Received: reflect.Zero(t), - } - } - - var oid primitive.ObjectID - var err error - switch vrType := vr.Type(); vrType { - case bsontype.ObjectID: - oid, err = vr.ReadObjectID() - if err != nil { - return emptyValue, err - } - case bsontype.String: - str, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - if oid, err = primitive.ObjectIDFromHex(str); err == nil { - break - } - if len(str) != 12 { - return emptyValue, fmt.Errorf("an ObjectID string must be exactly 12 bytes long (got %v)", len(str)) - } - byteArr := []byte(str) - copy(oid[:], byteArr) - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an ObjectID", vrType) - } - - return reflect.ValueOf(oid), nil -} - -// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tOID { - return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} - } - - elem, err := dvd.objectIDDecodeType(dc, vr, tOID) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDateTime { - return emptyValue, ValueDecoderError{ - Name: "DateTimeDecodeValue", - Types: []reflect.Type{tDateTime}, - Received: reflect.Zero(t), - } - } - - var dt int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.DateTime: - dt, err = vr.ReadDateTime() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a DateTime", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.DateTime(dt)), nil -} - -// DateTimeDecodeValue is the ValueDecoderFunc for DateTime. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDateTime { - return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} - } - - elem, err := dvd.dateTimeDecodeType(dc, vr, tDateTime) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tNull { - return emptyValue, ValueDecoderError{ - Name: "NullDecodeValue", - Types: []reflect.Type{tNull}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Undefined: - err = vr.ReadUndefined() - case bsontype.Null: - err = vr.ReadNull() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Null", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Null{}), nil -} - -// NullDecodeValue is the ValueDecoderFunc for Null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tNull { - return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} - } - - elem, err := dvd.nullDecodeType(dc, vr, tNull) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tRegex { - return emptyValue, ValueDecoderError{ - Name: "RegexDecodeValue", - Types: []reflect.Type{tRegex}, - Received: reflect.Zero(t), - } - } - - var pattern, options string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Regex: - pattern, options, err = vr.ReadRegex() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Regex", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options}), nil -} - -// RegexDecodeValue is the ValueDecoderFunc for Regex. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tRegex { - return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} - } - - elem, err := dvd.regexDecodeType(dc, vr, tRegex) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDBPointer { - return emptyValue, ValueDecoderError{ - Name: "DBPointerDecodeValue", - Types: []reflect.Type{tDBPointer}, - Received: reflect.Zero(t), - } - } - - var ns string - var pointer primitive.ObjectID - var err error - switch vrType := vr.Type(); vrType { - case bsontype.DBPointer: - ns, pointer, err = vr.ReadDBPointer() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a DBPointer", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer}), nil -} - -// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDBPointer { - return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} - } - - elem, err := dvd.dBPointerDecodeType(dc, vr, tDBPointer) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { - if reflectType != tTimestamp { - return emptyValue, ValueDecoderError{ - Name: "TimestampDecodeValue", - Types: []reflect.Type{tTimestamp}, - Received: reflect.Zero(reflectType), - } - } - - var t, incr uint32 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Timestamp: - t, incr, err = vr.ReadTimestamp() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Timestamp", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Timestamp{T: t, I: incr}), nil -} - -// TimestampDecodeValue is the ValueDecoderFunc for Timestamp. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tTimestamp { - return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} - } - - elem, err := dvd.timestampDecodeType(dc, vr, tTimestamp) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tMinKey { - return emptyValue, ValueDecoderError{ - Name: "MinKeyDecodeValue", - Types: []reflect.Type{tMinKey}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.MinKey: - err = vr.ReadMinKey() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a MinKey", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.MinKey{}), nil -} - -// MinKeyDecodeValue is the ValueDecoderFunc for MinKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tMinKey { - return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} - } - - elem, err := dvd.minKeyDecodeType(dc, vr, tMinKey) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tMaxKey { - return emptyValue, ValueDecoderError{ - Name: "MaxKeyDecodeValue", - Types: []reflect.Type{tMaxKey}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.MaxKey: - err = vr.ReadMaxKey() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a MaxKey", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.MaxKey{}), nil -} - -// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tMaxKey { - return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} - } - - elem, err := dvd.maxKeyDecodeType(dc, vr, tMaxKey) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDecimal { - return emptyValue, ValueDecoderError{ - Name: "Decimal128DecodeValue", - Types: []reflect.Type{tDecimal}, - Received: reflect.Zero(t), - } - } - - var d128 primitive.Decimal128 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Decimal128: - d128, err = vr.ReadDecimal128() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(d128), nil -} - -// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDecimal { - return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} - } - - elem, err := dvd.decimal128DecodeType(dctx, vr, tDecimal) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tJSONNumber { - return emptyValue, ValueDecoderError{ - Name: "JSONNumberDecodeValue", - Types: []reflect.Type{tJSONNumber}, - Received: reflect.Zero(t), - } - } - - var jsonNum json.Number - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatFloat(f64, 'f', -1, 64)) - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatInt(int64(i32), 10)) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatInt(i64, 10)) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a json.Number", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(jsonNum), nil -} - -// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tJSONNumber { - return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} - } - - elem, err := dvd.jsonNumberDecodeType(dc, vr, tJSONNumber) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tURL { - return emptyValue, ValueDecoderError{ - Name: "URLDecodeValue", - Types: []reflect.Type{tURL}, - Received: reflect.Zero(t), - } - } - - urlPtr := &url.URL{} - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - var str string // Declare str here to avoid shadowing err during the ReadString call. - str, err = vr.ReadString() - if err != nil { - return emptyValue, err - } - - urlPtr, err = url.Parse(str) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a *url.URL", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(urlPtr).Elem(), nil -} - -// URLDecodeValue is the ValueDecoderFunc for url.URL. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tURL { - return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} - } - - elem, err := dvd.urlDecodeType(dc, vr, tURL) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// TimeDecodeValue is the ValueDecoderFunc for time.Time. -// -// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if vr.Type() != bsontype.DateTime { - return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) - } - - dt, err := vr.ReadDateTime() - if err != nil { - return err - } - - if !val.CanSet() || val.Type() != tTime { - return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} - } - - val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000).UTC())) - return nil -} - -// ByteSliceDecodeValue is the ValueDecoderFunc for []byte. -// -// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { - return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) - } - - if !val.CanSet() || val.Type() != tByteSlice { - return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - - if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - } - - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != 0x00 { - return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype) - } - - val.Set(reflect.ValueOf(data)) - return nil -} - -// MapDecodeValue is the ValueDecoderFunc for map[string]* types. -// -// Deprecated: MapDecodeValue is not registered by default. Use MapCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { - return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - switch vr.Type() { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeMap(val.Type())) - } - - eType := val.Type().Elem() - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return err - } - - if eType == tEmpty { - dc.Ancestor = val.Type() - } - - keyType := val.Type().Key() - for { - key, vr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } - if err != nil { - return err - } - - elem := reflect.New(eType).Elem() - - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - - val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem) - } - return nil -} - -// ArrayDecodeValue is the ValueDecoderFunc for array types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Array { - return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Array: - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - case bsontype.Binary: - if val.Type().Elem() != tByte { - return fmt.Errorf("ArrayDecodeValue can only be used to decode binary into a byte array, got %v", vrType) - } - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return fmt.Errorf("ArrayDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) - } - - if len(data) > val.Len() { - return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type()) - } - - for idx, elem := range data { - val.Index(idx).Set(reflect.ValueOf(elem)) - } - return nil - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - default: - return fmt.Errorf("cannot decode %v into an array", vrType) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - elemsFunc = dvd.decodeD - default: - elemsFunc = dvd.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if len(elems) > val.Len() { - return fmt.Errorf("more elements returned in array than can fit inside %s, got %v elements", val.Type(), len(elems)) - } - - for idx, elem := range elems { - val.Index(idx).Set(elem) - } - - return nil -} - -// SliceDecodeValue is the ValueDecoderFunc for slice types. -// -// Deprecated: SliceDecodeValue is not registered by default. Use SliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Slice { - return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vr.Type() { - case bsontype.Array: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - default: - return fmt.Errorf("cannot decode %v into a slice", vr.Type()) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - dc.Ancestor = val.Type() - elemsFunc = dvd.decodeD - default: - elemsFunc = dvd.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) - } - - val.SetLen(0) - val.Set(reflect.Append(val, elems...)) - - return nil -} - -// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - - // If BSON value is null and the go value is a pointer, then don't call - // UnmarshalBSONValue. Even if the Go pointer is already initialized (i.e., - // non-nil), encountering null in BSON will result in the pointer being - // directly set to nil here. Since the pointer is being replaced with nil, - // there is no opportunity (or reason) for the custom UnmarshalBSONValue logic - // to be called. - if vr.Type() == bsontype.Null && val.Kind() == reflect.Ptr { - val.Set(reflect.Zero(val.Type())) - - return vr.ReadNull() - } - - if val.Kind() == reflect.Ptr && val.IsNil() { - if !val.CanSet() { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - val.Set(reflect.New(val.Type().Elem())) - } - - if !val.Type().Implements(tValueUnmarshaler) { - if !val.CanAddr() { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. - } - - t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err - } - - m, ok := val.Interface().(ValueUnmarshaler) - if !ok { - // NB: this error should be unreachable due to the above checks - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - return m.UnmarshalBSONValue(t, src) -} - -// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - - if val.Kind() == reflect.Ptr && val.IsNil() { - if !val.CanSet() { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - val.Set(reflect.New(val.Type().Elem())) - } - - _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err - } - - // If the target Go value is a pointer and the BSON field value is empty, set the value to the - // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to - // change the pointer value from within the function (only the value at the pointer address), - // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON - // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches - // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and - // the JSON field value is "null". - if val.Kind() == reflect.Ptr && len(src) == 0 { - val.Set(reflect.Zero(val.Type())) - return nil - } - - if !val.Type().Implements(tUnmarshaler) { - if !val.CanAddr() { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. - } - - m, ok := val.Interface().(Unmarshaler) - if !ok { - // NB: this error should be unreachable due to the above checks - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - return m.UnmarshalBSON(src) -} - -// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. -// -// Deprecated: EmptyInterfaceDecodeValue is not registered by default. Use EmptyInterfaceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tEmpty { - return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - rtype, err := dc.LookupTypeMapEntry(vr.Type()) - if err != nil { - switch vr.Type() { - case bsontype.EmbeddedDocument: - if dc.Ancestor != nil { - rtype = dc.Ancestor - break - } - rtype = tD - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return err - } - } - - decoder, err := dc.LookupDecoder(rtype) - if err != nil { - return err - } - - elem := reflect.New(rtype).Elem() - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCoreDocument { - return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, 0)) - } - - val.SetLen(0) - - cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr) - val.Set(reflect.ValueOf(cdoc)) - return err -} - -func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) { - elems := make([]reflect.Value, 0) - - ar, err := vr.ReadArray() - if err != nil { - return nil, err - } - - eType := val.Type().Elem() - - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return nil, err - } - eTypeDecoder, _ := decoder.(typeDecoder) - - idx := 0 - for { - vr, err := ar.ReadValue() - if errors.Is(err, bsonrw.ErrEOA) { - break - } - if err != nil { - return nil, err - } - - elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) - if err != nil { - return nil, newDecodeError(strconv.Itoa(idx), err) - } - elems = append(elems, elem) - idx++ - } - - return elems, nil -} - -func (dvd DefaultValueDecoders) readCodeWithScope(dc DecodeContext, vr bsonrw.ValueReader) (primitive.CodeWithScope, error) { - var cws primitive.CodeWithScope - - code, dr, err := vr.ReadCodeWithScope() - if err != nil { - return cws, err - } - - scope := reflect.New(tD).Elem() - elems, err := dvd.decodeElemsFromDocumentReader(dc, dr) - if err != nil { - return cws, err - } - - scope.Set(reflect.MakeSlice(tD, 0, len(elems))) - scope.Set(reflect.Append(scope, elems...)) - - cws = primitive.CodeWithScope{ - Code: primitive.JavaScript(code), - Scope: scope.Interface().(primitive.D), - } - return cws, nil -} - -func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tCodeWithScope { - return emptyValue, ValueDecoderError{ - Name: "CodeWithScopeDecodeValue", - Types: []reflect.Type{tCodeWithScope}, - Received: reflect.Zero(t), - } - } - - var cws primitive.CodeWithScope - var err error - switch vrType := vr.Type(); vrType { - case bsontype.CodeWithScope: - cws, err = dvd.readCodeWithScope(dc, vr) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(cws), nil -} - -// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCodeWithScope { - return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} - } - - elem, err := dvd.codeWithScopeDecodeType(dc, vr, tCodeWithScope) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) { - switch vr.Type() { - case bsontype.Type(0), bsontype.EmbeddedDocument: - default: - return nil, fmt.Errorf("cannot decode %v into a D", vr.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return nil, err - } - - return dvd.decodeElemsFromDocumentReader(dc, dr) -} - -func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) { - decoder, err := dc.LookupDecoder(tEmpty) - if err != nil { - return nil, err - } - - elems := make([]reflect.Value, 0) - for { - key, vr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } - if err != nil { - return nil, err - } - - val := reflect.New(tEmpty).Elem() - err = decoder.DecodeValue(dc, vr, val) - if err != nil { - return nil, newDecodeError(key, err) - } - - elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()})) - } - - return elems, nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go deleted file mode 100644 index 4751ae995e..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ /dev/null @@ -1,856 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "net/url" - "reflect" - "sync" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var defaultValueEncoders DefaultValueEncoders - -var bvwPool = bsonrw.NewBSONValueWriterPool() - -var errInvalidValue = errors.New("cannot encode invalid element") - -var sliceWriterPool = sync.Pool{ - New: func() interface{} { - sw := make(bsonrw.SliceWriter, 0) - return &sw - }, -} - -func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error { - vw, err := dw.WriteDocumentElement(e.Key) - if err != nil { - return err - } - - if e.Value == nil { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value)) - if err != nil { - return err - } - - err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value)) - if err != nil { - return err - } - return nil -} - -// DefaultValueEncoders is a namespace type for the default ValueEncoders used -// when creating a registry. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -type DefaultValueEncoders struct{} - -// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with -// the provided RegistryBuilder. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { - if rb == nil { - panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) - } - rb. - RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec). - RegisterTypeEncoder(tTime, defaultTimeCodec). - RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec). - RegisterTypeEncoder(tCoreArray, defaultArrayCodec). - RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)). - RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)). - RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)). - RegisterTypeEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)). - RegisterTypeEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)). - RegisterTypeEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)). - RegisterTypeEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)). - RegisterTypeEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)). - RegisterTypeEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)). - RegisterTypeEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)). - RegisterTypeEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)). - RegisterTypeEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)). - RegisterTypeEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)). - RegisterTypeEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)). - RegisterTypeEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)). - RegisterTypeEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)). - RegisterTypeEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)). - RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)). - RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Uint, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint8, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint16, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint32, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint64, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)). - RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)). - RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)). - RegisterDefaultEncoder(reflect.Map, defaultMapCodec). - RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec). - RegisterDefaultEncoder(reflect.String, defaultStringCodec). - RegisterDefaultEncoder(reflect.Struct, newDefaultStructCodec()). - RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()). - RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)). - RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)). - RegisterHookEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)) -} - -// BooleanEncodeValue is the ValueEncoderFunc for bool types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Bool { - return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} - } - return vw.WriteBoolean(val.Bool()) -} - -func fitsIn32Bits(i int64) bool { - return math.MinInt32 <= i && i <= math.MaxInt32 -} - -// IntEncodeValue is the ValueEncoderFunc for int types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32: - return vw.WriteInt32(int32(val.Int())) - case reflect.Int: - i64 := val.Int() - if fitsIn32Bits(i64) { - return vw.WriteInt32(int32(i64)) - } - return vw.WriteInt64(i64) - case reflect.Int64: - i64 := val.Int() - if ec.MinSize && fitsIn32Bits(i64) { - return vw.WriteInt32(int32(i64)) - } - return vw.WriteInt64(i64) - } - - return ValueEncoderError{ - Name: "IntEncodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: val, - } -} - -// UintEncodeValue is the ValueEncoderFunc for uint types. -// -// Deprecated: UintEncodeValue is not registered by default. Use UintCodec.EncodeValue instead. -func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Uint8, reflect.Uint16: - return vw.WriteInt32(int32(val.Uint())) - case reflect.Uint, reflect.Uint32, reflect.Uint64: - u64 := val.Uint() - if ec.MinSize && u64 <= math.MaxInt32 { - return vw.WriteInt32(int32(u64)) - } - if u64 > math.MaxInt64 { - return fmt.Errorf("%d overflows int64", u64) - } - return vw.WriteInt64(int64(u64)) - } - - return ValueEncoderError{ - Name: "UintEncodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } -} - -// FloatEncodeValue is the ValueEncoderFunc for float types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Float32, reflect.Float64: - return vw.WriteDouble(val.Float()) - } - - return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} -} - -// StringEncodeValue is the ValueEncoderFunc for string types. -// -// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. -func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.String { - return ValueEncoderError{ - Name: "StringEncodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: val, - } - } - - return vw.WriteString(val.String()) -} - -// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tOID { - return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} - } - return vw.WriteObjectID(val.Interface().(primitive.ObjectID)) -} - -// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDecimal { - return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} - } - return vw.WriteDecimal128(val.Interface().(primitive.Decimal128)) -} - -// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tJSONNumber { - return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} - } - jsnum := val.Interface().(json.Number) - - // Attempt int first, then float64 - if i64, err := jsnum.Int64(); err == nil { - return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64)) - } - - f64, err := jsnum.Float64() - if err != nil { - return err - } - - return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64)) -} - -// URLEncodeValue is the ValueEncoderFunc for url.URL. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tURL { - return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} - } - u := val.Interface().(url.URL) - return vw.WriteString(u.String()) -} - -// TimeEncodeValue is the ValueEncoderFunc for time.TIme. -// -// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. -func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTime { - return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} - } - tt := val.Interface().(time.Time) - dt := primitive.NewDateTimeFromTime(tt) - return vw.WriteDateTime(int64(dt)) -} - -// ByteSliceEncodeValue is the ValueEncoderFunc for []byte. -// -// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tByteSlice { - return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - if val.IsNil() { - return vw.WriteNull() - } - return vw.WriteBinary(val.Interface().([]byte)) -} - -// MapEncodeValue is the ValueEncoderFunc for map[string]* types. -// -// Deprecated: MapEncodeValue is not registered by default. Use MapCodec.EncodeValue instead. -func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { - return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - if val.IsNil() { - // If we have a nill map but we can't WriteNull, that means we're probably trying to encode - // to a TopLevel document. We can't currently tell if this is what actually happened, but if - // there's a deeper underlying problem, the error will also be returned from WriteDocument, - // so just continue. The operations on a map reflection value are valid, so we can call - // MapKeys within mapEncodeValue without a problem. - err := vw.WriteNull() - if err == nil { - return nil - } - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - return dve.mapEncodeValue(ec, dw, val, nil) -} - -// mapEncodeValue handles encoding of the values of a map. The collisionFn returns -// true if the provided key exists, this is mainly used for inline maps in the -// struct codec. -func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - keys := val.MapKeys() - for _, key := range keys { - if collisionFn != nil && collisionFn(key.String()) { - return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) - } - - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := dw.WriteDocumentElement(key.String()) - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} - -// ArrayEncodeValue is the ValueEncoderFunc for array types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Array { - return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().Elem() == tE { - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - e := val.Index(idx).Interface().(primitive.E) - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - // If we have a []byte we want to treat it as a binary instead of as an array. - if val.Type().Elem() == tByte { - var byteSlice []byte - for idx := 0; idx < val.Len(); idx++ { - byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) - } - return vw.WriteBinary(byteSlice) - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -// SliceEncodeValue is the ValueEncoderFunc for slice types. -// -// Deprecated: SliceEncodeValue is not registered by default. Use SliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Slice { - return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().ConvertibleTo(tD) { - d := val.Convert(tD).Interface().(primitive.D) - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for _, e := range d { - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncoder ValueEncoder, currVal reflect.Value) (ValueEncoder, reflect.Value, error) { - if origEncoder != nil || (currVal.Kind() != reflect.Interface) { - return origEncoder, currVal, nil - } - currVal = currVal.Elem() - if !currVal.IsValid() { - return nil, currVal, errInvalidValue - } - currEncoder, err := ec.LookupEncoder(currVal.Type()) - - return currEncoder, currVal, err -} - -// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}. -// -// Deprecated: EmptyInterfaceEncodeValue is not registered by default. Use EmptyInterfaceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tEmpty { - return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(val.Elem().Type()) - if err != nil { - return err - } - - return encoder.EncodeValue(ec, vw, val.Elem()) -} - -// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement ValueMarshaler - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} - case val.Type().Implements(tValueMarshaler): - // If ValueMarshaler is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tValueMarshaler) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tValueMarshaler) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} - } - - m, ok := val.Interface().(ValueMarshaler) - if !ok { - return vw.WriteNull() - } - t, data, err := m.MarshalBSONValue() - if err != nil { - return err - } - return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) -} - -// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement Marshaler - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} - case val.Type().Implements(tMarshaler): - // If Marshaler is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tMarshaler) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tMarshaler) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} - } - - m, ok := val.Interface().(Marshaler) - if !ok { - return vw.WriteNull() - } - data, err := m.MarshalBSON() - if err != nil { - return err - } - return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) -} - -// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement Proxy - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} - case val.Type().Implements(tProxy): - // If Proxy is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tProxy) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tProxy) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} - } - - m, ok := val.Interface().(Proxy) - if !ok { - return vw.WriteNull() - } - v, err := m.ProxyBSON() - if err != nil { - return err - } - if v == nil { - encoder, err := ec.LookupEncoder(nil) - if err != nil { - return err - } - return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil)) - } - vv := reflect.ValueOf(v) - switch vv.Kind() { - case reflect.Ptr, reflect.Interface: - vv = vv.Elem() - } - encoder, err := ec.LookupEncoder(vv.Type()) - if err != nil { - return err - } - return encoder.EncodeValue(ec, vw, vv) -} - -// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tJavaScript { - return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} - } - - return vw.WriteJavascript(val.String()) -} - -// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tSymbol { - return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} - } - - return vw.WriteSymbol(val.String()) -} - -// BinaryEncodeValue is the ValueEncoderFunc for Binary. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tBinary { - return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} - } - b := val.Interface().(primitive.Binary) - - return vw.WriteBinaryWithSubtype(b.Data, b.Subtype) -} - -// UndefinedEncodeValue is the ValueEncoderFunc for Undefined. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tUndefined { - return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} - } - - return vw.WriteUndefined() -} - -// DateTimeEncodeValue is the ValueEncoderFunc for DateTime. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDateTime { - return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} - } - - return vw.WriteDateTime(val.Int()) -} - -// NullEncodeValue is the ValueEncoderFunc for Null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tNull { - return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} - } - - return vw.WriteNull() -} - -// RegexEncodeValue is the ValueEncoderFunc for Regex. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tRegex { - return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} - } - - regex := val.Interface().(primitive.Regex) - - return vw.WriteRegex(regex.Pattern, regex.Options) -} - -// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDBPointer { - return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} - } - - dbp := val.Interface().(primitive.DBPointer) - - return vw.WriteDBPointer(dbp.DB, dbp.Pointer) -} - -// TimestampEncodeValue is the ValueEncoderFunc for Timestamp. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTimestamp { - return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} - } - - ts := val.Interface().(primitive.Timestamp) - - return vw.WriteTimestamp(ts.T, ts.I) -} - -// MinKeyEncodeValue is the ValueEncoderFunc for MinKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tMinKey { - return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} - } - - return vw.WriteMinKey() -} - -// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tMaxKey { - return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} - } - - return vw.WriteMaxKey() -} - -// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCoreDocument { - return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} - } - - cdoc := val.Interface().(bsoncore.Document) - - return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc) -} - -// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCodeWithScope { - return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} - } - - cws := val.Interface().(primitive.CodeWithScope) - - dw, err := vw.WriteCodeWithScope(string(cws.Code)) - if err != nil { - return err - } - - sw := sliceWriterPool.Get().(*bsonrw.SliceWriter) - defer sliceWriterPool.Put(sw) - *sw = (*sw)[:0] - - scopeVW := bvwPool.Get(sw) - defer bvwPool.Put(scopeVW) - - encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope)) - if err != nil { - return err - } - - err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope)) - if err != nil { - return err - } - - err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw) - if err != nil { - return err - } - return dw.WriteDocumentEnd() -} - -// isImplementationNil returns if val is a nil pointer and inter is implemented on a concrete type -func isImplementationNil(val reflect.Value, inter reflect.Type) bool { - vt := val.Type() - for vt.Kind() == reflect.Ptr { - vt = vt.Elem() - } - return vt.Implements(inter) && val.Kind() == reflect.Ptr && val.IsNil() -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go deleted file mode 100644 index 4613e5a1ec..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2022-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsoncodec provides a system for encoding values to BSON representations and decoding -// values from BSON representations. This package considers both binary BSON and ExtendedJSON as -// BSON representations. The types in this package enable a flexible system for handling this -// encoding and decoding. -// -// The codec system is composed of two parts: -// -// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON -// representations. -// -// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for -// retrieving them. -// -// # ValueEncoders and ValueDecoders -// -// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. -// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the -// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc -// is provided to allow use of a function with the correct signature as a ValueEncoder. An -// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and -// to provide configuration information. -// -// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that -// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to -// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext -// instance is provided and serves similar functionality to the EncodeContext. -// -// # Registry -// -// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type -// documentation for examples of registering various custom encoders and decoders. A Registry can -// have three main types of codecs: -// -// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and -// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value -// whose type matches the registered type exactly. -// If the registered type is an interface, the codec will be invoked when encoding or decoding -// values whose type is the interface, but not for values with concrete types that implement the -// interface. -// -// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and -// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs -// will be invoked when encoding or decoding values whose types implement the interface. An example -// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method -// for any value whose type implements bson.Marshaler, regardless of the value's concrete type. -// -// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type -// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}. -// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances, -// respectively, when decoding into a bson.D. The following code would change the behavior so these -// values decode as Go int instances instead: -// -// intType := reflect.TypeOf(int(0)) -// registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) -// -// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and -// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding -// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't -// match a registered type or hook encoder/decoder first. These methods should be used to change the -// behavior for all values for a specific kind. -// -// # Registry Lookup Procedure -// -// When looking up an encoder in a Registry, the precedence rules are as follows: -// -// 1. A type encoder registered for the exact type of the value. -// -// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to -// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and -// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries -// constructed using bson.NewRegistry have driver-defined hooks registered for the -// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take -// precedence over any new hooks. -// -// 3. A kind encoder registered for the value's kind. -// -// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The -// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder -// will be returned if no decoder is found. -// -// # DefaultValueEncoders and DefaultValueDecoders -// -// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and -// ValueDecoders for handling a wide range of Go types, including all of the types within the -// primitive package. To make registering these codecs easier, a helper method on each type is -// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for -// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also -// handles registering type map entries for each BSON type. -package bsoncodec diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go deleted file mode 100644 index 098368f071..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// EmptyInterfaceCodec is the Codec used for interface{} values. -// -// Deprecated: EmptyInterfaceCodec will not be directly configurable in Go -// Driver 2.0. To configure the empty interface encode and decode behavior, use -// the configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the empty interface -// encode and decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to unmarshal BSON binary field -// values as a Go byte slice, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// BinaryAsSlice: true, -// }) -// -// See the deprecation notice for each field in EmptyInterfaceCodec for the -// corresponding settings. -type EmptyInterfaceCodec struct { - // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the - // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. - // - // Deprecated: Use bson.Decoder.BinaryAsSlice or options.BSONOptions.BinaryAsSlice instead. - DecodeBinaryAsSlice bool -} - -var ( - defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() - - // Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it - // to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a - // collection. - _ typeDecoder = defaultEmptyInterfaceCodec -) - -// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. -// -// Deprecated: NewEmptyInterfaceCodec will not be available in Go Driver 2.0. See -// [EmptyInterfaceCodec] for more details. -func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { - interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) - - codec := EmptyInterfaceCodec{} - if interfaceOpt.DecodeBinaryAsSlice != nil { - codec.DecodeBinaryAsSlice = *interfaceOpt.DecodeBinaryAsSlice - } - return &codec -} - -// EncodeValue is the ValueEncoderFunc for interface{}. -func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tEmpty { - return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(val.Elem().Type()) - if err != nil { - return err - } - - return encoder.EncodeValue(ec, vw, val.Elem()) -} - -func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { - isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument - if isDocument { - if dc.defaultDocumentType != nil { - // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return - // that type. - return dc.defaultDocumentType, nil - } - if dc.Ancestor != nil { - // Using ancestor information rather than looking up the type map entry forces consistent decoding. - // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry - // has been registered. - return dc.Ancestor, nil - } - } - - rtype, err := dc.LookupTypeMapEntry(valueType) - if err == nil { - return rtype, nil - } - - if isDocument { - // For documents, fallback to looking up a type map entry for bsontype.Type(0) or bsontype.EmbeddedDocument, - // depending on the original valueType. - var lookupType bsontype.Type - switch valueType { - case bsontype.Type(0): - lookupType = bsontype.EmbeddedDocument - case bsontype.EmbeddedDocument: - lookupType = bsontype.Type(0) - } - - rtype, err = dc.LookupTypeMapEntry(lookupType) - if err == nil { - return rtype, nil - } - } - - return nil, err -} - -func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tEmpty { - return emptyValue, ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.Zero(t)} - } - - rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type()) - if err != nil { - switch vr.Type() { - case bsontype.Null: - return reflect.Zero(t), vr.ReadNull() - default: - return emptyValue, err - } - } - - decoder, err := dc.LookupDecoder(rtype) - if err != nil { - return emptyValue, err - } - - elem, err := decodeTypeOrValue(decoder, dc, vr, rtype) - if err != nil { - return emptyValue, err - } - - if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary { - binElem := elem.Interface().(primitive.Binary) - if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { - elem = reflect.ValueOf(binElem.Data) - } - } - - return elem, nil -} - -// DecodeValue is the ValueDecoderFunc for interface{}. -func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tEmpty { - return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - elem, err := eic.decodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.Set(elem) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go deleted file mode 100644 index d7e00ffa8d..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ /dev/null @@ -1,343 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -var defaultMapCodec = NewMapCodec() - -// MapCodec is the Codec used for map values. -// -// Deprecated: MapCodec will not be directly configurable in Go Driver 2.0. To -// configure the map encode and decode behavior, use the configuration methods -// on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the map encode and -// decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to marshal nil Go maps as empty BSON -// documents, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// NilMapAsEmpty: true, -// }) -// -// See the deprecation notice for each field in MapCodec for the corresponding -// settings. -type MapCodec struct { - // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination - // value passed to Decode before unmarshaling BSON documents into them. - // - // Deprecated: Use bson.Decoder.ZeroMaps or options.BSONOptions.ZeroMaps instead. - DecodeZerosMap bool - - // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of - // BSON null. - // - // Deprecated: Use bson.Encoder.NilMapAsEmpty or options.BSONOptions.NilMapAsEmpty instead. - EncodeNilAsEmpty bool - - // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name - // strings using fmt.Sprintf() instead of the default string conversion logic. - // - // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt or - // options.BSONOptions.StringifyMapKeysWithFmt instead. - EncodeKeysWithStringer bool -} - -// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. -// This applies to types used as map keys and is similar to encoding.TextMarshaler. -type KeyMarshaler interface { - MarshalKey() (key string, err error) -} - -// KeyUnmarshaler is the interface implemented by an object that can unmarshal a string representation -// of itself. This applies to types used as map keys and is similar to encoding.TextUnmarshaler. -// -// UnmarshalKey must be able to decode the form generated by MarshalKey. -// UnmarshalKey must copy the text if it wishes to retain the text -// after returning. -type KeyUnmarshaler interface { - UnmarshalKey(key string) error -} - -// NewMapCodec returns a MapCodec with options opts. -// -// Deprecated: NewMapCodec will not be available in Go Driver 2.0. See -// [MapCodec] for more details. -func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { - mapOpt := bsonoptions.MergeMapCodecOptions(opts...) - - codec := MapCodec{} - if mapOpt.DecodeZerosMap != nil { - codec.DecodeZerosMap = *mapOpt.DecodeZerosMap - } - if mapOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty - } - if mapOpt.EncodeKeysWithStringer != nil { - codec.EncodeKeysWithStringer = *mapOpt.EncodeKeysWithStringer - } - return &codec -} - -// EncodeValue is the ValueEncoder for map[*]* types. -func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Map { - return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty { - // If we have a nil map but we can't WriteNull, that means we're probably trying to encode - // to a TopLevel document. We can't currently tell if this is what actually happened, but if - // there's a deeper underlying problem, the error will also be returned from WriteDocument, - // so just continue. The operations on a map reflection value are valid, so we can call - // MapKeys within mapEncodeValue without a problem. - err := vw.WriteNull() - if err == nil { - return nil - } - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - return mc.mapEncodeValue(ec, dw, val, nil) -} - -// mapEncodeValue handles encoding of the values of a map. The collisionFn returns -// true if the provided key exists, this is mainly used for inline maps in the -// struct codec. -func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - keys := val.MapKeys() - for _, key := range keys { - keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt) - if err != nil { - return err - } - - if collisionFn != nil && collisionFn(keyStr) { - return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) - } - - currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := dw.WriteDocumentElement(keyStr) - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} - -// DecodeValue is the ValueDecoder for map[string/decimal]* types. -func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if val.Kind() != reflect.Map || (!val.CanSet() && val.IsNil()) { - return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - default: - return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeMap(val.Type())) - } - - if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) { - clearMap(val) - } - - eType := val.Type().Elem() - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return err - } - eTypeDecoder, _ := decoder.(typeDecoder) - - if eType == tEmpty { - dc.Ancestor = val.Type() - } - - keyType := val.Type().Key() - - for { - key, vr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } - if err != nil { - return err - } - - k, err := mc.decodeKey(key, keyType) - if err != nil { - return err - } - - elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) - if err != nil { - return newDecodeError(key, err) - } - - val.SetMapIndex(k, elem) - } - return nil -} - -func clearMap(m reflect.Value) { - var none reflect.Value - for _, k := range m.MapKeys() { - m.SetMapIndex(k, none) - } -} - -func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) { - if mc.EncodeKeysWithStringer || encodeKeysWithStringer { - return fmt.Sprint(val), nil - } - - // keys of any string type are used directly - if val.Kind() == reflect.String { - return val.String(), nil - } - // KeyMarshalers are marshaled - if km, ok := val.Interface().(KeyMarshaler); ok { - if val.Kind() == reflect.Ptr && val.IsNil() { - return "", nil - } - buf, err := km.MarshalKey() - if err == nil { - return buf, nil - } - return "", err - } - // keys implement encoding.TextMarshaler are marshaled. - if km, ok := val.Interface().(encoding.TextMarshaler); ok { - if val.Kind() == reflect.Ptr && val.IsNil() { - return "", nil - } - - buf, err := km.MarshalText() - if err != nil { - return "", err - } - - return string(buf), nil - } - - switch val.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(val.Int(), 10), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return strconv.FormatUint(val.Uint(), 10), nil - } - return "", fmt.Errorf("unsupported key type: %v", val.Type()) -} - -var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() -var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - -func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { - keyVal := reflect.ValueOf(key) - var err error - switch { - // First, if EncodeKeysWithStringer is not enabled, try to decode withKeyUnmarshaler - case !mc.EncodeKeysWithStringer && reflect.PtrTo(keyType).Implements(keyUnmarshalerType): - keyVal = reflect.New(keyType) - v := keyVal.Interface().(KeyUnmarshaler) - err = v.UnmarshalKey(key) - keyVal = keyVal.Elem() - // Try to decode encoding.TextUnmarshalers. - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): - keyVal = reflect.New(keyType) - v := keyVal.Interface().(encoding.TextUnmarshaler) - err = v.UnmarshalText([]byte(key)) - keyVal = keyVal.Elem() - // Otherwise, go to type specific behavior - default: - switch keyType.Kind() { - case reflect.String: - keyVal = reflect.ValueOf(key).Convert(keyType) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, parseErr := strconv.ParseInt(key, 10, 64) - if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) { - err = fmt.Errorf("failed to unmarshal number key %v", key) - } - keyVal = reflect.ValueOf(n).Convert(keyType) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, parseErr := strconv.ParseUint(key, 10, 64) - if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) { - err = fmt.Errorf("failed to unmarshal number key %v", key) - break - } - keyVal = reflect.ValueOf(n).Convert(keyType) - case reflect.Float32, reflect.Float64: - if mc.EncodeKeysWithStringer { - parsed, err := strconv.ParseFloat(key, 64) - if err != nil { - return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %w", keyType.Kind(), err) - } - keyVal = reflect.ValueOf(parsed) - break - } - fallthrough - default: - return keyVal, fmt.Errorf("unsupported key type: %v", keyType) - } - } - return keyVal, err -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go deleted file mode 100644 index fbd9f0a9e9..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import "fmt" - -type mode int - -const ( - _ mode = iota - mTopLevel - mDocument - mArray - mValue - mElement - mCodeWithScope - mSpacer -) - -func (m mode) String() string { - var str string - - switch m { - case mTopLevel: - str = "TopLevel" - case mDocument: - str = "DocumentMode" - case mArray: - str = "ArrayMode" - case mValue: - str = "ValueMode" - case mElement: - str = "ElementMode" - case mCodeWithScope: - str = "CodeWithScopeMode" - case mSpacer: - str = "CodeWithScopeSpacerFrame" - default: - str = "UnknownMode" - } - - return str -} - -// TransitionError is an error returned when an invalid progressing a -// ValueReader or ValueWriter state machine occurs. -type TransitionError struct { - parent mode - current mode - destination mode -} - -func (te TransitionError) Error() string { - if te.destination == mode(0) { - return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current) - } - if te.parent == mode(0) { - return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination) - } - return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go deleted file mode 100644 index ddfa4a33e1..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -var _ ValueEncoder = &PointerCodec{} -var _ ValueDecoder = &PointerCodec{} - -// PointerCodec is the Codec used for pointers. -// -// Deprecated: PointerCodec will not be directly accessible in Go Driver 2.0. To -// override the default pointer encode and decode behavior, create a new registry -// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new -// encoder and decoder for pointers. -// -// For example, -// -// reg := bson.NewRegistry() -// reg.RegisterKindEncoder(reflect.Ptr, myPointerEncoder) -// reg.RegisterKindDecoder(reflect.Ptr, myPointerDecoder) -type PointerCodec struct { - ecache typeEncoderCache - dcache typeDecoderCache -} - -// NewPointerCodec returns a PointerCodec that has been initialized. -// -// Deprecated: NewPointerCodec will not be available in Go Driver 2.0. See -// [PointerCodec] for more details. -func NewPointerCodec() *PointerCodec { - return &PointerCodec{} -} - -// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil -// or looking up an encoder for the type of value the pointer points to. -func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.Ptr { - if !val.IsValid() { - return vw.WriteNull() - } - return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - typ := val.Type() - if v, ok := pc.ecache.Load(typ); ok { - if v == nil { - return ErrNoEncoder{Type: typ} - } - return v.EncodeValue(ec, vw, val.Elem()) - } - // TODO(charlie): handle concurrent requests for the same type - enc, err := ec.LookupEncoder(typ.Elem()) - enc = pc.ecache.LoadOrStore(typ, enc) - if err != nil { - return err - } - return enc.EncodeValue(ec, vw, val.Elem()) -} - -// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and -// using that to decode. If the BSON value is Null, this method will set the pointer to nil. -func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Ptr { - return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} - } - - typ := val.Type() - if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(typ)) - return vr.ReadNull() - } - if vr.Type() == bsontype.Undefined { - val.Set(reflect.Zero(typ)) - return vr.ReadUndefined() - } - - if val.IsNil() { - val.Set(reflect.New(typ.Elem())) - } - - if v, ok := pc.dcache.Load(typ); ok { - if v == nil { - return ErrNoDecoder{Type: typ} - } - return v.DecodeValue(dc, vr, val.Elem()) - } - // TODO(charlie): handle concurrent requests for the same type - dec, err := dc.LookupDecoder(typ.Elem()) - dec = pc.dcache.LoadOrStore(typ, dec) - if err != nil { - return err - } - return dec.DecodeValue(dc, vr, val.Elem()) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go deleted file mode 100644 index 4cf2b01ab4..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types -// that implement this interface with have ProxyBSON called during the encoding process and that -// value will be encoded in place for the implementer. -type Proxy interface { - ProxyBSON() (interface{}, error) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go deleted file mode 100644 index 196c491bbb..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - "sync" - - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. -// -// Deprecated: ErrNilType will not be supported in Go Driver 2.0. -var ErrNilType = errors.New("cannot perform a decoder lookup on ") - -// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. -// -// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0. -var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") - -// ErrNoEncoder is returned when there wasn't an encoder available for a type. -// -// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0. -type ErrNoEncoder struct { - Type reflect.Type -} - -func (ene ErrNoEncoder) Error() string { - if ene.Type == nil { - return "no encoder found for " - } - return "no encoder found for " + ene.Type.String() -} - -// ErrNoDecoder is returned when there wasn't a decoder available for a type. -// -// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0. -type ErrNoDecoder struct { - Type reflect.Type -} - -func (end ErrNoDecoder) Error() string { - return "no decoder found for " + end.Type.String() -} - -// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. -// -// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0. -type ErrNoTypeMapEntry struct { - Type bsontype.Type -} - -func (entme ErrNoTypeMapEntry) Error() string { - return "no type map entry found for " + entme.Type.String() -} - -// ErrNotInterface is returned when the provided type is not an interface. -// -// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0. -var ErrNotInterface = errors.New("The provided type is not an interface") - -// A RegistryBuilder is used to build a Registry. This type is not goroutine -// safe. -// -// Deprecated: Use Registry instead. -type RegistryBuilder struct { - registry *Registry -} - -// NewRegistryBuilder creates a new empty RegistryBuilder. -// -// Deprecated: Use NewRegistry instead. -func NewRegistryBuilder() *RegistryBuilder { - return &RegistryBuilder{ - registry: NewRegistry(), - } -} - -// RegisterCodec will register the provided ValueCodec for the provided type. -// -// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead. -func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { - rb.RegisterTypeEncoder(t, codec) - rb.RegisterTypeDecoder(t, codec) - return rb -} - -// RegisterTypeEncoder will register the provided ValueEncoder for the provided type. -// -// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered -// for a pointer to that type. -// -// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It -// will not be called when marshaling a non-interface type that implements the interface. -// -// Deprecated: Use Registry.RegisterTypeEncoder instead. -func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - rb.registry.RegisterTypeEncoder(t, enc) - return rb -} - -// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when -// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not -// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. -// -// Deprecated: Use Registry.RegisterInterfaceEncoder instead. -func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - rb.registry.RegisterInterfaceEncoder(t, enc) - return rb -} - -// RegisterTypeDecoder will register the provided ValueDecoder for the provided type. -// -// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered -// for a pointer to that type. -// -// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface. -// It will not be called when unmarshaling into a non-interface type that implements the interface. -// -// Deprecated: Use Registry.RegisterTypeDecoder instead. -func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - rb.registry.RegisterTypeDecoder(t, dec) - return rb -} - -// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when -// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not -// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. -// -// Deprecated: Use Registry.RegisterInterfaceDecoder instead. -func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - rb.registry.RegisterInterfaceDecoder(t, dec) - return rb -} - -// RegisterEncoder registers the provided type and encoder pair. -// -// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead. -func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - if t == tEmpty { - rb.registry.RegisterTypeEncoder(t, enc) - return rb - } - switch t.Kind() { - case reflect.Interface: - rb.registry.RegisterInterfaceEncoder(t, enc) - default: - rb.registry.RegisterTypeEncoder(t, enc) - } - return rb -} - -// RegisterDecoder registers the provided type and decoder pair. -// -// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead. -func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - if t == nil { - rb.registry.RegisterTypeDecoder(t, dec) - return rb - } - if t == tEmpty { - rb.registry.RegisterTypeDecoder(t, dec) - return rb - } - switch t.Kind() { - case reflect.Interface: - rb.registry.RegisterInterfaceDecoder(t, dec) - default: - rb.registry.RegisterTypeDecoder(t, dec) - } - return rb -} - -// RegisterDefaultEncoder will register the provided ValueEncoder to the provided -// kind. -// -// Deprecated: Use Registry.RegisterKindEncoder instead. -func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { - rb.registry.RegisterKindEncoder(kind, enc) - return rb -} - -// RegisterDefaultDecoder will register the provided ValueDecoder to the -// provided kind. -// -// Deprecated: Use Registry.RegisterKindDecoder instead. -func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { - rb.registry.RegisterKindDecoder(kind, dec) - return rb -} - -// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this -// mapping is decoding situations where an empty interface is used and a default type needs to be -// created and decoded into. -// -// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON -// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents -// to decode to bson.Raw, use the following code: -// -// rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) -// -// Deprecated: Use Registry.RegisterTypeMapEntry instead. -func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { - rb.registry.RegisterTypeMapEntry(bt, rt) - return rb -} - -// Build creates a Registry from the current state of this RegistryBuilder. -// -// Deprecated: Use NewRegistry instead. -func (rb *RegistryBuilder) Build() *Registry { - r := &Registry{ - interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...), - interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...), - typeEncoders: rb.registry.typeEncoders.Clone(), - typeDecoders: rb.registry.typeDecoders.Clone(), - kindEncoders: rb.registry.kindEncoders.Clone(), - kindDecoders: rb.registry.kindDecoders.Clone(), - } - rb.registry.typeMap.Range(func(k, v interface{}) bool { - if k != nil && v != nil { - r.typeMap.Store(k, v) - } - return true - }) - return r -} - -// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main -// typed passed around and Encoders and Decoders are constructed from it. -type Registry struct { - interfaceEncoders []interfaceValueEncoder - interfaceDecoders []interfaceValueDecoder - typeEncoders *typeEncoderCache - typeDecoders *typeDecoderCache - kindEncoders *kindEncoderCache - kindDecoders *kindDecoderCache - typeMap sync.Map // map[bsontype.Type]reflect.Type -} - -// NewRegistry creates a new empty Registry. -func NewRegistry() *Registry { - return &Registry{ - typeEncoders: new(typeEncoderCache), - typeDecoders: new(typeDecoderCache), - kindEncoders: new(kindEncoderCache), - kindDecoders: new(kindDecoderCache), - } -} - -// RegisterTypeEncoder registers the provided ValueEncoder for the provided type. -// -// The type will be used as provided, so an encoder can be registered for a type and a different -// encoder can be registered for a pointer to that type. -// -// If the given type is an interface, the encoder will be called when marshaling a type that is -// that interface. It will not be called when marshaling a non-interface type that implements the -// interface. To get the latter behavior, call RegisterHookEncoder instead. -// -// RegisterTypeEncoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) { - r.typeEncoders.Store(valueType, enc) -} - -// RegisterTypeDecoder registers the provided ValueDecoder for the provided type. -// -// The type will be used as provided, so a decoder can be registered for a type and a different -// decoder can be registered for a pointer to that type. -// -// If the given type is an interface, the decoder will be called when unmarshaling into a type that -// is that interface. It will not be called when unmarshaling into a non-interface type that -// implements the interface. To get the latter behavior, call RegisterHookDecoder instead. -// -// RegisterTypeDecoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) { - r.typeDecoders.Store(valueType, dec) -} - -// RegisterKindEncoder registers the provided ValueEncoder for the provided kind. -// -// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For -// example, consider the type MyInt defined as -// -// type MyInt int32 -// -// To define an encoder for MyInt and int32, use RegisterKindEncoder like -// -// reg.RegisterKindEncoder(reflect.Int32, myEncoder) -// -// RegisterKindEncoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { - r.kindEncoders.Store(kind, enc) -} - -// RegisterKindDecoder registers the provided ValueDecoder for the provided kind. -// -// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For -// example, consider the type MyInt defined as -// -// type MyInt int32 -// -// To define an decoder for MyInt and int32, use RegisterKindDecoder like -// -// reg.RegisterKindDecoder(reflect.Int32, myDecoder) -// -// RegisterKindDecoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) { - r.kindDecoders.Store(kind, dec) -} - -// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will -// be called when marshaling a type if the type implements iface or a pointer to the type -// implements iface. If the provided type is not an interface -// (i.e. iface.Kind() != reflect.Interface), this method will panic. -// -// RegisterInterfaceEncoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) { - if iface.Kind() != reflect.Interface { - panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", iface, iface.Kind()) - panic(panicStr) - } - - for idx, encoder := range r.interfaceEncoders { - if encoder.i == iface { - r.interfaceEncoders[idx].ve = enc - return - } - } - - r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc}) -} - -// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will -// be called when unmarshaling into a type if the type implements iface or a pointer to the type -// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface), -// this method will panic. -// -// RegisterInterfaceDecoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) { - if iface.Kind() != reflect.Interface { - panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", iface, iface.Kind()) - panic(panicStr) - } - - for idx, decoder := range r.interfaceDecoders { - if decoder.i == iface { - r.interfaceDecoders[idx].vd = dec - return - } - } - - r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec}) -} - -// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this -// mapping is decoding situations where an empty interface is used and a default type needs to be -// created and decoded into. -// -// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON -// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents -// to decode to bson.Raw, use the following code: -// -// reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) -func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { - r.typeMap.Store(bt, rt) -} - -// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup -// order: -// -// 1. An encoder registered for the exact type. If the given type is an interface, an encoder -// registered using RegisterTypeEncoder for that interface will be selected. -// -// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type -// or by a pointer to the type. -// -// 3. An encoder registered using RegisterKindEncoder for the kind of value. -// -// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for -// concurrent use by multiple goroutines after all codecs and encoders are registered. -func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { - if valueType == nil { - return nil, ErrNoEncoder{Type: valueType} - } - enc, found := r.lookupTypeEncoder(valueType) - if found { - if enc == nil { - return nil, ErrNoEncoder{Type: valueType} - } - return enc, nil - } - - enc, found = r.lookupInterfaceEncoder(valueType, true) - if found { - return r.typeEncoders.LoadOrStore(valueType, enc), nil - } - - if v, ok := r.kindEncoders.Load(valueType.Kind()); ok { - return r.storeTypeEncoder(valueType, v), nil - } - return nil, ErrNoEncoder{Type: valueType} -} - -func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder { - return r.typeEncoders.LoadOrStore(rt, enc) -} - -func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) { - return r.typeEncoders.Load(rt) -} - -func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) { - if valueType == nil { - return nil, false - } - for _, ienc := range r.interfaceEncoders { - if valueType.Implements(ienc.i) { - return ienc.ve, true - } - if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) { - // if *t implements an interface, this will catch if t implements an interface further - // ahead in interfaceEncoders - defaultEnc, found := r.lookupInterfaceEncoder(valueType, false) - if !found { - defaultEnc, _ = r.kindEncoders.Load(valueType.Kind()) - } - return newCondAddrEncoder(ienc.ve, defaultEnc), true - } - } - return nil, false -} - -// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup -// order: -// -// 1. A decoder registered for the exact type. If the given type is an interface, a decoder -// registered using RegisterTypeDecoder for that interface will be selected. -// -// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by -// a pointer to the type. -// -// 3. A decoder registered using RegisterKindDecoder for the kind of value. -// -// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for -// concurrent use by multiple goroutines after all codecs and decoders are registered. -func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { - if valueType == nil { - return nil, ErrNilType - } - dec, found := r.lookupTypeDecoder(valueType) - if found { - if dec == nil { - return nil, ErrNoDecoder{Type: valueType} - } - return dec, nil - } - - dec, found = r.lookupInterfaceDecoder(valueType, true) - if found { - return r.storeTypeDecoder(valueType, dec), nil - } - - if v, ok := r.kindDecoders.Load(valueType.Kind()); ok { - return r.storeTypeDecoder(valueType, v), nil - } - return nil, ErrNoDecoder{Type: valueType} -} - -func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) { - return r.typeDecoders.Load(valueType) -} - -func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder { - return r.typeDecoders.LoadOrStore(typ, dec) -} - -func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) { - for _, idec := range r.interfaceDecoders { - if valueType.Implements(idec.i) { - return idec.vd, true - } - if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) { - // if *t implements an interface, this will catch if t implements an interface further - // ahead in interfaceDecoders - defaultDec, found := r.lookupInterfaceDecoder(valueType, false) - if !found { - defaultDec, _ = r.kindDecoders.Load(valueType.Kind()) - } - return newCondAddrDecoder(idec.vd, defaultDec), true - } - } - return nil, false -} - -// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON -// type. If no type is found, ErrNoTypeMapEntry is returned. -// -// LookupTypeMapEntry should not be called concurrently with any other Registry method. -func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { - v, ok := r.typeMap.Load(bt) - if v == nil || !ok { - return nil, ErrNoTypeMapEntry{Type: bt} - } - return v.(reflect.Type), nil -} - -type interfaceValueEncoder struct { - i reflect.Type - ve ValueEncoder -} - -type interfaceValueDecoder struct { - i reflect.Type - vd ValueDecoder -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go deleted file mode 100644 index 14c9fd2564..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -var defaultSliceCodec = NewSliceCodec() - -// SliceCodec is the Codec used for slice values. -// -// Deprecated: SliceCodec will not be directly configurable in Go Driver 2.0. To -// configure the slice encode and decode behavior, use the configuration methods -// on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the slice encode and -// decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to marshal nil Go slices as empty -// BSON arrays, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// NilSliceAsEmpty: true, -// }) -// -// See the deprecation notice for each field in SliceCodec for the corresponding -// settings. -type SliceCodec struct { - // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of - // BSON null. - // - // Deprecated: Use bson.Encoder.NilSliceAsEmpty instead. - EncodeNilAsEmpty bool -} - -// NewSliceCodec returns a MapCodec with options opts. -// -// Deprecated: NewSliceCodec will not be available in Go Driver 2.0. See -// [SliceCodec] for more details. -func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { - sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) - - codec := SliceCodec{} - if sliceOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *sliceOpt.EncodeNilAsEmpty - } - return &codec -} - -// EncodeValue is the ValueEncoder for slice types. -func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Slice { - return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty { - return vw.WriteNull() - } - - // If we have a []byte we want to treat it as a binary instead of as an array. - if val.Type().Elem() == tByte { - byteSlice := make([]byte, val.Len()) - reflect.Copy(reflect.ValueOf(byteSlice), val) - return vw.WriteBinary(byteSlice) - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type() == tD || val.Type().ConvertibleTo(tD) { - d := val.Convert(tD).Interface().(primitive.D) - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for _, e := range d { - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -// DecodeValue is the ValueDecoder for slice types. -func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Slice { - return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Array: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - case bsontype.Binary: - if val.Type().Elem() != tByte { - return fmt.Errorf("SliceDecodeValue can only decode a binary into a byte array, got %v", vrType) - } - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) - } - val.SetLen(0) - val.Set(reflect.AppendSlice(val, reflect.ValueOf(data))) - return nil - case bsontype.String: - if sliceType := val.Type().Elem(); sliceType != tByte { - return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", sliceType) - } - str, err := vr.ReadString() - if err != nil { - return err - } - byteStr := []byte(str) - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) - } - val.SetLen(0) - val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr))) - return nil - default: - return fmt.Errorf("cannot decode %v into a slice", vrType) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - dc.Ancestor = val.Type() - elemsFunc = defaultValueDecoders.decodeD - default: - elemsFunc = defaultValueDecoders.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) - } - - val.SetLen(0) - val.Set(reflect.Append(val, elems...)) - - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go deleted file mode 100644 index a8f885a854..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// StringCodec is the Codec used for string values. -// -// Deprecated: StringCodec will not be directly accessible in Go Driver 2.0. To -// override the default string encode and decode behavior, create a new registry -// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new -// encoder and decoder for strings. -// -// For example, -// -// reg := bson.NewRegistry() -// reg.RegisterKindEncoder(reflect.String, myStringEncoder) -// reg.RegisterKindDecoder(reflect.String, myStringDecoder) -type StringCodec struct { - // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. - // If false, a string made from the raw object ID bytes will be used. Defaults to true. - // - // Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. - DecodeObjectIDAsHex bool -} - -var ( - defaultStringCodec = NewStringCodec() - - // Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be - // used by collection type decoders (e.g. map, slice, etc) to set individual values in a - // collection. - _ typeDecoder = defaultStringCodec -) - -// NewStringCodec returns a StringCodec with options opts. -// -// Deprecated: NewStringCodec will not be available in Go Driver 2.0. See -// [StringCodec] for more details. -func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { - stringOpt := bsonoptions.MergeStringCodecOptions(opts...) - return &StringCodec{*stringOpt.DecodeObjectIDAsHex} -} - -// EncodeValue is the ValueEncoder for string types. -func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.String { - return ValueEncoderError{ - Name: "StringEncodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: val, - } - } - - return vw.WriteString(val.String()) -} - -func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t.Kind() != reflect.String { - return emptyValue, ValueDecoderError{ - Name: "StringDecodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: reflect.Zero(t), - } - } - - var str string - var err error - switch vr.Type() { - case bsontype.String: - str, err = vr.ReadString() - if err != nil { - return emptyValue, err - } - case bsontype.ObjectID: - oid, err := vr.ReadObjectID() - if err != nil { - return emptyValue, err - } - if sc.DecodeObjectIDAsHex { - str = oid.Hex() - } else { - // TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string. - byteArray := [12]byte(oid) - str = string(byteArray[:]) - } - case bsontype.Symbol: - str, err = vr.ReadSymbol() - if err != nil { - return emptyValue, err - } - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return emptyValue, err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "string"} - } - str = string(data) - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a string type", vr.Type()) - } - - return reflect.ValueOf(str), nil -} - -// DecodeValue is the ValueDecoder for string types. -func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.String { - return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} - } - - elem, err := sc.decodeType(dctx, vr, val.Type()) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go deleted file mode 100644 index f8d9690c13..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ /dev/null @@ -1,736 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strings" - "sync" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// DecodeError represents an error that occurs when unmarshalling BSON bytes into a native Go type. -type DecodeError struct { - keys []string - wrapped error -} - -// Unwrap returns the underlying error -func (de *DecodeError) Unwrap() error { - return de.wrapped -} - -// Error implements the error interface. -func (de *DecodeError) Error() string { - // The keys are stored in reverse order because the de.keys slice is builtup while propagating the error up the - // stack of BSON keys, so we call de.Keys(), which reverses them. - keyPath := strings.Join(de.Keys(), ".") - return fmt.Sprintf("error decoding key %s: %v", keyPath, de.wrapped) -} - -// Keys returns the BSON key path that caused an error as a slice of strings. The keys in the slice are in top-down -// order. For example, if the document being unmarshalled was {a: {b: {c: 1}}} and the value for c was supposed to be -// a string, the keys slice will be ["a", "b", "c"]. -func (de *DecodeError) Keys() []string { - reversedKeys := make([]string, 0, len(de.keys)) - for idx := len(de.keys) - 1; idx >= 0; idx-- { - reversedKeys = append(reversedKeys, de.keys[idx]) - } - - return reversedKeys -} - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// StructCodec is the Codec used for struct values. -// -// Deprecated: StructCodec will not be directly configurable in Go Driver 2.0. -// To configure the struct encode and decode behavior, use the configuration -// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the struct encode -// and decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to omit zero-value structs when -// using the "omitempty" struct tag, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// OmitZeroStruct: true, -// }) -// -// See the deprecation notice for each field in StructCodec for the corresponding -// settings. -type StructCodec struct { - cache sync.Map // map[reflect.Type]*structDescription - parser StructTagParser - - // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the - // destination value passed to Decode before unmarshaling BSON documents into them. - // - // Deprecated: Use bson.Decoder.ZeroStructs or options.BSONOptions.ZeroStructs instead. - DecodeZeroStruct bool - - // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the - // destination value passed to Decode before unmarshaling BSON documents into them. - // - // Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. - DecodeDeepZeroInline bool - - // EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g. - // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag - // option is set. - // - // Deprecated: Use bson.Encoder.OmitZeroStruct or options.BSONOptions.OmitZeroStruct instead. - EncodeOmitDefaultStruct bool - - // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields. - // - // Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be - // supported in Go Driver 2.0. - AllowUnexportedFields bool - - // OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is - // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The - // default value is true. - // - // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates or - // options.BSONOptions.ErrorOnInlineDuplicates instead. - OverwriteDuplicatedInlinedFields bool -} - -var _ ValueEncoder = &StructCodec{} -var _ ValueDecoder = &StructCodec{} - -// NewStructCodec returns a StructCodec that uses p for struct tag parsing. -// -// Deprecated: NewStructCodec will not be available in Go Driver 2.0. See -// [StructCodec] for more details. -func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { - if p == nil { - return nil, errors.New("a StructTagParser must be provided to NewStructCodec") - } - - structOpt := bsonoptions.MergeStructCodecOptions(opts...) - - codec := &StructCodec{ - parser: p, - } - - if structOpt.DecodeZeroStruct != nil { - codec.DecodeZeroStruct = *structOpt.DecodeZeroStruct - } - if structOpt.DecodeDeepZeroInline != nil { - codec.DecodeDeepZeroInline = *structOpt.DecodeDeepZeroInline - } - if structOpt.EncodeOmitDefaultStruct != nil { - codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct - } - if structOpt.OverwriteDuplicatedInlinedFields != nil { - codec.OverwriteDuplicatedInlinedFields = *structOpt.OverwriteDuplicatedInlinedFields - } - if structOpt.AllowUnexportedFields != nil { - codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields - } - - return codec, nil -} - -// EncodeValue handles encoding generic struct types. -func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Struct { - return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} - } - - sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates) - if err != nil { - return err - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - var rv reflect.Value - for _, desc := range sd.fl { - if desc.inline == nil { - rv = val.Field(desc.idx) - } else { - rv, err = fieldByIndexErr(val, desc.inline) - if err != nil { - continue - } - } - - desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv) - - if err != nil && !errors.Is(err, errInvalidValue) { - return err - } - - if errors.Is(err, errInvalidValue) { - if desc.omitEmpty { - continue - } - vw2, err := dw.WriteDocumentElement(desc.name) - if err != nil { - return err - } - err = vw2.WriteNull() - if err != nil { - return err - } - continue - } - - if desc.encoder == nil { - return ErrNoEncoder{Type: rv.Type()} - } - - encoder := desc.encoder - - var empty bool - if cz, ok := encoder.(CodecZeroer); ok { - empty = cz.IsTypeZero(rv.Interface()) - } else if rv.Kind() == reflect.Interface { - // isEmpty will not treat an interface rv as an interface, so we need to check for the - // nil interface separately. - empty = rv.IsNil() - } else { - empty = isEmpty(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) - } - if desc.omitEmpty && empty { - continue - } - - vw2, err := dw.WriteDocumentElement(desc.name) - if err != nil { - return err - } - - ectx := EncodeContext{ - Registry: ec.Registry, - MinSize: desc.minSize || ec.MinSize, - errorOnInlineDuplicates: ec.errorOnInlineDuplicates, - stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt, - nilMapAsEmpty: ec.nilMapAsEmpty, - nilSliceAsEmpty: ec.nilSliceAsEmpty, - nilByteSliceAsEmpty: ec.nilByteSliceAsEmpty, - omitZeroStruct: ec.omitZeroStruct, - useJSONStructTags: ec.useJSONStructTags, - } - err = encoder.EncodeValue(ectx, vw2, rv) - if err != nil { - return err - } - } - - if sd.inlineMap >= 0 { - rv := val.Field(sd.inlineMap) - collisionFn := func(key string) bool { - _, exists := sd.fm[key] - return exists - } - - return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn) - } - - return dw.WriteDocumentEnd() -} - -func newDecodeError(key string, original error) error { - var de *DecodeError - if !errors.As(original, &de) { - return &DecodeError{ - keys: []string{key}, - wrapped: original, - } - } - - de.keys = append(de.keys, key) - return de -} - -// DecodeValue implements the Codec interface. -// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. -// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. -func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Struct { - return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - if err := vr.ReadNull(); err != nil { - return err - } - - val.Set(reflect.Zero(val.Type())) - return nil - case bsontype.Undefined: - if err := vr.ReadUndefined(); err != nil { - return err - } - - val.Set(reflect.Zero(val.Type())) - return nil - default: - return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) - } - - sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false) - if err != nil { - return err - } - - if sc.DecodeZeroStruct || dc.zeroStructs { - val.Set(reflect.Zero(val.Type())) - } - if sc.DecodeDeepZeroInline && sd.inline { - val.Set(deepZero(val.Type())) - } - - var decoder ValueDecoder - var inlineMap reflect.Value - if sd.inlineMap >= 0 { - inlineMap = val.Field(sd.inlineMap) - decoder, err = dc.LookupDecoder(inlineMap.Type().Elem()) - if err != nil { - return err - } - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - for { - name, vr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } - if err != nil { - return err - } - - fd, exists := sd.fm[name] - if !exists { - // if the original name isn't found in the struct description, try again with the name in lowercase - // this could match if a BSON tag isn't specified because by default, describeStruct lowercases all field - // names - fd, exists = sd.fm[strings.ToLower(name)] - } - - if !exists { - if sd.inlineMap < 0 { - // The encoding/json package requires a flag to return on error for non-existent fields. - // This functionality seems appropriate for the struct codec. - err = vr.Skip() - if err != nil { - return err - } - continue - } - - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - - elem := reflect.New(inlineMap.Type().Elem()).Elem() - dc.Ancestor = inlineMap.Type() - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - inlineMap.SetMapIndex(reflect.ValueOf(name), elem) - continue - } - - var field reflect.Value - if fd.inline == nil { - field = val.Field(fd.idx) - } else { - field, err = getInlineField(val, fd.inline) - if err != nil { - return err - } - } - - if !field.CanSet() { // Being settable is a super set of being addressable. - innerErr := fmt.Errorf("field %v is not settable", field) - return newDecodeError(fd.name, innerErr) - } - if field.Kind() == reflect.Ptr && field.IsNil() { - field.Set(reflect.New(field.Type().Elem())) - } - field = field.Addr() - - dctx := DecodeContext{ - Registry: dc.Registry, - Truncate: fd.truncate || dc.Truncate, - defaultDocumentType: dc.defaultDocumentType, - binaryAsSlice: dc.binaryAsSlice, - useJSONStructTags: dc.useJSONStructTags, - useLocalTimeZone: dc.useLocalTimeZone, - zeroMaps: dc.zeroMaps, - zeroStructs: dc.zeroStructs, - } - - if fd.decoder == nil { - return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) - } - - err = fd.decoder.DecodeValue(dctx, vr, field.Elem()) - if err != nil { - return newDecodeError(fd.name, err) - } - } - - return nil -} - -func isEmpty(v reflect.Value, omitZeroStruct bool) bool { - kind := v.Kind() - if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { - return v.Interface().(Zeroer).IsZero() - } - switch kind { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Struct: - if !omitZeroStruct { - return false - } - vt := v.Type() - if vt == tTime { - return v.Interface().(time.Time).IsZero() - } - numField := vt.NumField() - for i := 0; i < numField; i++ { - ff := vt.Field(i) - if ff.PkgPath != "" && !ff.Anonymous { - continue // Private field - } - if !isEmpty(v.Field(i), omitZeroStruct) { - return false - } - } - return true - } - return !v.IsValid() || v.IsZero() -} - -type structDescription struct { - fm map[string]fieldDescription - fl []fieldDescription - inlineMap int - inline bool -} - -type fieldDescription struct { - name string // BSON key name - fieldName string // struct field name - idx int - omitEmpty bool - minSize bool - truncate bool - inline []int - encoder ValueEncoder - decoder ValueDecoder -} - -type byIndex []fieldDescription - -func (bi byIndex) Len() int { return len(bi) } - -func (bi byIndex) Swap(i, j int) { bi[i], bi[j] = bi[j], bi[i] } - -func (bi byIndex) Less(i, j int) bool { - // If a field is inlined, its index in the top level struct is stored at inline[0] - iIdx, jIdx := bi[i].idx, bi[j].idx - if len(bi[i].inline) > 0 { - iIdx = bi[i].inline[0] - } - if len(bi[j].inline) > 0 { - jIdx = bi[j].inline[0] - } - if iIdx != jIdx { - return iIdx < jIdx - } - for k, biik := range bi[i].inline { - if k >= len(bi[j].inline) { - return false - } - if biik != bi[j].inline[k] { - return biik < bi[j].inline[k] - } - } - return len(bi[i].inline) < len(bi[j].inline) -} - -func (sc *StructCodec) describeStruct( - r *Registry, - t reflect.Type, - useJSONStructTags bool, - errorOnDuplicates bool, -) (*structDescription, error) { - // We need to analyze the struct, including getting the tags, collecting - // information about inlining, and create a map of the field name to the field. - if v, ok := sc.cache.Load(t); ok { - return v.(*structDescription), nil - } - // TODO(charlie): Only describe the struct once when called - // concurrently with the same type. - ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates) - if err != nil { - return nil, err - } - if v, loaded := sc.cache.LoadOrStore(t, ds); loaded { - ds = v.(*structDescription) - } - return ds, nil -} - -func (sc *StructCodec) describeStructSlow( - r *Registry, - t reflect.Type, - useJSONStructTags bool, - errorOnDuplicates bool, -) (*structDescription, error) { - numFields := t.NumField() - sd := &structDescription{ - fm: make(map[string]fieldDescription, numFields), - fl: make([]fieldDescription, 0, numFields), - inlineMap: -1, - } - - var fields []fieldDescription - for i := 0; i < numFields; i++ { - sf := t.Field(i) - if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) { - // field is private or unexported fields aren't allowed, ignore - continue - } - - sfType := sf.Type - encoder, err := r.LookupEncoder(sfType) - if err != nil { - encoder = nil - } - decoder, err := r.LookupDecoder(sfType) - if err != nil { - decoder = nil - } - - description := fieldDescription{ - fieldName: sf.Name, - idx: i, - encoder: encoder, - decoder: decoder, - } - - var stags StructTags - // If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser - // instead of the parser defined on the codec. - if useJSONStructTags { - stags, err = JSONFallbackStructTagParser.ParseStructTags(sf) - } else { - stags, err = sc.parser.ParseStructTags(sf) - } - if err != nil { - return nil, err - } - if stags.Skip { - continue - } - description.name = stags.Name - description.omitEmpty = stags.OmitEmpty - description.minSize = stags.MinSize - description.truncate = stags.Truncate - - if stags.Inline { - sd.inline = true - switch sfType.Kind() { - case reflect.Map: - if sd.inlineMap >= 0 { - return nil, errors.New("(struct " + t.String() + ") multiple inline maps") - } - if sfType.Key() != tString { - return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys") - } - sd.inlineMap = description.idx - case reflect.Ptr: - sfType = sfType.Elem() - if sfType.Kind() != reflect.Struct { - return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) - } - fallthrough - case reflect.Struct: - inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates) - if err != nil { - return nil, err - } - for _, fd := range inlinesf.fl { - if fd.inline == nil { - fd.inline = []int{i, fd.idx} - } else { - fd.inline = append([]int{i}, fd.inline...) - } - fields = append(fields, fd) - - } - default: - return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) - } - continue - } - fields = append(fields, description) - } - - // Sort fieldDescriptions by name and use dominance rules to determine which should be added for each name - sort.Slice(fields, func(i, j int) bool { - x := fields - // sort field by name, breaking ties with depth, then - // breaking ties with index sequence. - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].inline) != len(x[j].inline) { - return len(x[i].inline) < len(x[j].inline) - } - return byIndex(x).Less(i, j) - }) - - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - sd.fl = append(sd.fl, fi) - sd.fm[name] = fi - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates { - return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) - } - sd.fl = append(sd.fl, dominant) - sd.fm[name] = dominant - } - - sort.Sort(byIndex(sd.fl)) - - return sd, nil -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's inlining rules. If there are multiple top-level -// fields, the boolean will be false: This condition is an error in Go -// and we skip all the fields. -func dominantField(fields []fieldDescription) (fieldDescription, bool) { - // The fields are sorted in increasing index-length order, then by presence of tag. - // That means that the first field is the dominant one. We need only check - // for error cases: two fields at top level. - if len(fields) > 1 && - len(fields[0].inline) == len(fields[1].inline) { - return fieldDescription{}, false - } - return fields[0], true -} - -func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) { - defer func() { - if recovered := recover(); recovered != nil { - switch r := recovered.(type) { - case string: - err = fmt.Errorf("%s", r) - case error: - err = r - } - } - }() - - result = v.FieldByIndex(index) - return -} - -func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { - field, err := fieldByIndexErr(val, index) - if err == nil { - return field, nil - } - - // if parent of this element doesn't exist, fix its parent - inlineParent := index[:len(index)-1] - var fParent reflect.Value - if fParent, err = fieldByIndexErr(val, inlineParent); err != nil { - fParent, err = getInlineField(val, inlineParent) - if err != nil { - return fParent, err - } - } - fParent.Set(reflect.New(fParent.Type().Elem())) - - return fieldByIndexErr(val, index) -} - -// DeepZero returns recursive zero object -func deepZero(st reflect.Type) (result reflect.Value) { - if st.Kind() == reflect.Struct { - numField := st.NumField() - for i := 0; i < numField; i++ { - if result == emptyValue { - result = reflect.Indirect(reflect.New(st)) - } - f := result.Field(i) - if f.CanInterface() { - if f.Type().Kind() == reflect.Struct { - result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem()))) - } - } - } - } - return result -} - -// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside -func recursivePointerTo(v reflect.Value) reflect.Value { - v = reflect.Indirect(v) - result := reflect.New(v.Type()) - if v.Kind() == reflect.Struct { - for i := 0; i < v.NumField(); i++ { - if f := v.Field(i); f.Kind() == reflect.Ptr { - if f.Elem().Kind() == reflect.Struct { - result.Elem().Field(i).Set(recursivePointerTo(f)) - } - } - } - } - - return result -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go deleted file mode 100644 index 18d85bfb03..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - "strings" -) - -// StructTagParser returns the struct tags for a given struct field. -// -// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. -type StructTagParser interface { - ParseStructTags(reflect.StructField) (StructTags, error) -} - -// StructTagParserFunc is an adapter that allows a generic function to be used -// as a StructTagParser. -// -// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. -type StructTagParserFunc func(reflect.StructField) (StructTags, error) - -// ParseStructTags implements the StructTagParser interface. -func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) { - return stpf(sf) -} - -// StructTags represents the struct tag fields that the StructCodec uses during -// the encoding and decoding process. -// -// In the case of a struct, the lowercased field name is used as the key for each exported -// field but this behavior may be changed using a struct tag. The tag may also contain flags to -// adjust the marshalling behavior for the field. -// -// The properties are defined below: -// -// OmitEmpty Only include the field if it's not set to the zero value for the type or to -// empty slices or maps. -// -// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's -// feasible while preserving the numeric value. -// -// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within -// a float32. -// -// Inline Inline the field, which must be a struct or a map, causing all of its fields -// or keys to be processed as if they were part of the outer struct. For maps, -// keys must not conflict with the bson keys of other struct fields. -// -// Skip This struct field should be skipped. This is usually denoted by parsing a "-" -// for the name. -// -// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. -type StructTags struct { - Name string - OmitEmpty bool - MinSize bool - Truncate bool - Inline bool - Skip bool -} - -// DefaultStructTagParser is the StructTagParser used by the StructCodec by default. -// It will handle the bson struct tag. See the documentation for StructTags to see -// what each of the returned fields means. -// -// If there is no name in the struct tag fields, the struct field name is lowercased. -// The tag formats accepted are: -// -// "[][,[,]]" -// -// `(...) bson:"[][,[,]]" (...)` -// -// An example: -// -// type T struct { -// A bool -// B int "myb" -// C string "myc,omitempty" -// D string `bson:",omitempty" json:"jsonkey"` -// E int64 ",minsize" -// F int64 "myf,omitempty,minsize" -// } -// -// A struct tag either consisting entirely of '-' or with a bson key with a -// value consisting entirely of '-' will return a StructTags with Skip true and -// the remaining fields will be their default values. -// -// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0. -var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { - key := strings.ToLower(sf.Name) - tag, ok := sf.Tag.Lookup("bson") - if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { - tag = string(sf.Tag) - } - return parseTags(key, tag) -} - -func parseTags(key string, tag string) (StructTags, error) { - var st StructTags - if tag == "-" { - st.Skip = true - return st, nil - } - - for idx, str := range strings.Split(tag, ",") { - if idx == 0 && str != "" { - key = str - } - switch str { - case "omitempty": - st.OmitEmpty = true - case "minsize": - st.MinSize = true - case "truncate": - st.Truncate = true - case "inline": - st.Inline = true - } - } - - st.Name = key - - return st, nil -} - -// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser -// but will also fallback to parsing the json tag instead on a field where the -// bson tag isn't available. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and -// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. -var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { - key := strings.ToLower(sf.Name) - tag, ok := sf.Tag.Lookup("bson") - if !ok { - tag, ok = sf.Tag.Lookup("json") - } - if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { - tag = string(sf.Tag) - } - - return parseTags(key, tag) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go deleted file mode 100644 index 22fb762c41..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -const ( - timeFormatString = "2006-01-02T15:04:05.999Z07:00" -) - -// TimeCodec is the Codec used for time.Time values. -// -// Deprecated: TimeCodec will not be directly configurable in Go Driver 2.0. -// To configure the time.Time encode and decode behavior, use the configuration -// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the time.Time encode -// and decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to ..., use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// UseLocalTimeZone: true, -// }) -// -// See the deprecation notice for each field in TimeCodec for the corresponding -// settings. -type TimeCodec struct { - // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. - // - // Deprecated: Use bson.Decoder.UseLocalTimeZone or options.BSONOptions.UseLocalTimeZone - // instead. - UseLocalTimeZone bool -} - -var ( - defaultTimeCodec = NewTimeCodec() - - // Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used - // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. - _ typeDecoder = defaultTimeCodec -) - -// NewTimeCodec returns a TimeCodec with options opts. -// -// Deprecated: NewTimeCodec will not be available in Go Driver 2.0. See -// [TimeCodec] for more details. -func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { - timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) - - codec := TimeCodec{} - if timeOpt.UseLocalTimeZone != nil { - codec.UseLocalTimeZone = *timeOpt.UseLocalTimeZone - } - return &codec -} - -func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tTime { - return emptyValue, ValueDecoderError{ - Name: "TimeDecodeValue", - Types: []reflect.Type{tTime}, - Received: reflect.Zero(t), - } - } - - var timeVal time.Time - switch vrType := vr.Type(); vrType { - case bsontype.DateTime: - dt, err := vr.ReadDateTime() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(dt/1000, dt%1000*1000000) - case bsontype.String: - // assume strings are in the isoTimeFormat - timeStr, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - timeVal, err = time.Parse(timeFormatString, timeStr) - if err != nil { - return emptyValue, err - } - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(i64/1000, i64%1000*1000000) - case bsontype.Timestamp: - t, _, err := vr.ReadTimestamp() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(int64(t), 0) - case bsontype.Null: - if err := vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err := vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType) - } - - if !tc.UseLocalTimeZone && !dc.useLocalTimeZone { - timeVal = timeVal.UTC() - } - return reflect.ValueOf(timeVal), nil -} - -// DecodeValue is the ValueDecoderFunc for time.Time. -func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tTime { - return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} - } - - elem, err := tc.decodeType(dc, vr, tTime) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// EncodeValue is the ValueEncoderFunc for time.TIme. -func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTime { - return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} - } - tt := val.Interface().(time.Time) - dt := primitive.NewDateTimeFromTime(tt) - return vw.WriteDateTime(int64(dt)) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go deleted file mode 100644 index 6ade17b7d3..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "net/url" - "reflect" - "time" - - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var tBool = reflect.TypeOf(false) -var tFloat64 = reflect.TypeOf(float64(0)) -var tInt32 = reflect.TypeOf(int32(0)) -var tInt64 = reflect.TypeOf(int64(0)) -var tString = reflect.TypeOf("") -var tTime = reflect.TypeOf(time.Time{}) - -var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() -var tByteSlice = reflect.TypeOf([]byte(nil)) -var tByte = reflect.TypeOf(byte(0x00)) -var tURL = reflect.TypeOf(url.URL{}) -var tJSONNumber = reflect.TypeOf(json.Number("")) - -var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem() -var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() -var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() -var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() -var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem() - -var tBinary = reflect.TypeOf(primitive.Binary{}) -var tUndefined = reflect.TypeOf(primitive.Undefined{}) -var tOID = reflect.TypeOf(primitive.ObjectID{}) -var tDateTime = reflect.TypeOf(primitive.DateTime(0)) -var tNull = reflect.TypeOf(primitive.Null{}) -var tRegex = reflect.TypeOf(primitive.Regex{}) -var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) -var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) -var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) -var tSymbol = reflect.TypeOf(primitive.Symbol("")) -var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) -var tDecimal = reflect.TypeOf(primitive.Decimal128{}) -var tMinKey = reflect.TypeOf(primitive.MinKey{}) -var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) -var tD = reflect.TypeOf(primitive.D{}) -var tA = reflect.TypeOf(primitive.A{}) -var tE = reflect.TypeOf(primitive.E{}) - -var tCoreDocument = reflect.TypeOf(bsoncore.Document{}) -var tCoreArray = reflect.TypeOf(bsoncore.Array{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go deleted file mode 100644 index 39b07135b1..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "math" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// UIntCodec is the Codec used for uint values. -// -// Deprecated: UIntCodec will not be directly configurable in Go Driver 2.0. To -// configure the uint encode and decode behavior, use the configuration methods -// on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the uint encode and -// decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to marshal Go uint values as the -// minimum BSON int size that can represent the value, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// IntMinSize: true, -// }) -// -// See the deprecation notice for each field in UIntCodec for the corresponding -// settings. -type UIntCodec struct { - // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the - // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value. - // - // Deprecated: Use bson.Encoder.IntMinSize or options.BSONOptions.IntMinSize instead. - EncodeToMinSize bool -} - -var ( - defaultUIntCodec = NewUIntCodec() - - // Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used - // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. - _ typeDecoder = defaultUIntCodec -) - -// NewUIntCodec returns a UIntCodec with options opts. -// -// Deprecated: NewUIntCodec will not be available in Go Driver 2.0. See -// [UIntCodec] for more details. -func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { - uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) - - codec := UIntCodec{} - if uintOpt.EncodeToMinSize != nil { - codec.EncodeToMinSize = *uintOpt.EncodeToMinSize - } - return &codec -} - -// EncodeValue is the ValueEncoder for uint types. -func (uic *UIntCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Uint8, reflect.Uint16: - return vw.WriteInt32(int32(val.Uint())) - case reflect.Uint, reflect.Uint32, reflect.Uint64: - u64 := val.Uint() - - // If ec.MinSize or if encodeToMinSize is true for a non-uint64 value we should write val as an int32 - useMinSize := ec.MinSize || (uic.EncodeToMinSize && val.Kind() != reflect.Uint64) - - if u64 <= math.MaxInt32 && useMinSize { - return vw.WriteInt32(int32(u64)) - } - if u64 > math.MaxInt64 { - return fmt.Errorf("%d overflows int64", u64) - } - return vw.WriteInt64(int64(u64)) - } - - return ValueEncoderError{ - Name: "UintEncodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } -} - -func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var i64 int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return emptyValue, err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return emptyValue, errCannotTruncate - } - if f64 > float64(math.MaxInt64) { - return emptyValue, fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - i64 = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) - } - - switch t.Kind() { - case reflect.Uint8: - if i64 < 0 || i64 > math.MaxUint8 { - return emptyValue, fmt.Errorf("%d overflows uint8", i64) - } - - return reflect.ValueOf(uint8(i64)), nil - case reflect.Uint16: - if i64 < 0 || i64 > math.MaxUint16 { - return emptyValue, fmt.Errorf("%d overflows uint16", i64) - } - - return reflect.ValueOf(uint16(i64)), nil - case reflect.Uint32: - if i64 < 0 || i64 > math.MaxUint32 { - return emptyValue, fmt.Errorf("%d overflows uint32", i64) - } - - return reflect.ValueOf(uint32(i64)), nil - case reflect.Uint64: - if i64 < 0 { - return emptyValue, fmt.Errorf("%d overflows uint64", i64) - } - - return reflect.ValueOf(uint64(i64)), nil - case reflect.Uint: - if i64 < 0 { - return emptyValue, fmt.Errorf("%d overflows uint", i64) - } - v := uint64(i64) - if v > math.MaxUint { // Can we fit this inside of an uint - return emptyValue, fmt.Errorf("%d overflows uint", i64) - } - - return reflect.ValueOf(uint(v)), nil - default: - return emptyValue, ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: reflect.Zero(t), - } - } -} - -// DecodeValue is the ValueDecoder for uint types. -func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - elem, err := uic.decodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.SetUint(elem.Uint()) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go deleted file mode 100644 index 996bd17127..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type ByteSliceCodecOptions struct { - EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. -} - -// ByteSliceCodec creates a new *ByteSliceCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func ByteSliceCodec() *ByteSliceCodecOptions { - return &ByteSliceCodecOptions{} -} - -// SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. -func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { - bs.EncodeNilAsEmpty = &b - return bs -} - -// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { - bs := ByteSliceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeNilAsEmpty != nil { - bs.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - } - - return bs -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go deleted file mode 100644 index c40973c8d4..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2022-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsonoptions defines the optional configurations for the BSON codecs. -package bsonoptions diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go deleted file mode 100644 index f522c7e03f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type EmptyInterfaceCodecOptions struct { - DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. -} - -// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { - return &EmptyInterfaceCodecOptions{} -} - -// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. -func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { - e.DecodeBinaryAsSlice = &b - return e -} - -// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { - e := EmptyInterfaceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeBinaryAsSlice != nil { - e.DecodeBinaryAsSlice = opt.DecodeBinaryAsSlice - } - } - - return e -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go deleted file mode 100644 index a7a7c1d980..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// MapCodecOptions represents all possible options for map encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type MapCodecOptions struct { - DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. - EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. - // Specifies how keys should be handled. If false, the behavior matches encoding/json, where the encoding key type must - // either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key type must either be a - // string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with fmt.Sprint() and the - // encoding key type must be a string, an integer type, or a float. If true, the use of Stringer will override - // TextMarshaler/TextUnmarshaler. Defaults to false. - EncodeKeysWithStringer *bool -} - -// MapCodec creates a new *MapCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func MapCodec() *MapCodecOptions { - return &MapCodecOptions{} -} - -// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. -func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { - t.DecodeZerosMap = &b - return t -} - -// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. -func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { - t.EncodeNilAsEmpty = &b - return t -} - -// SetEncodeKeysWithStringer specifies how keys should be handled. If false, the behavior matches encoding/json, where the -// encoding key type must either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key -// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with -// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer -// will override TextMarshaler/TextUnmarshaler. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. -func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { - t.EncodeKeysWithStringer = &b - return t -} - -// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { - s := MapCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeZerosMap != nil { - s.DecodeZerosMap = opt.DecodeZerosMap - } - if opt.EncodeNilAsEmpty != nil { - s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - if opt.EncodeKeysWithStringer != nil { - s.EncodeKeysWithStringer = opt.EncodeKeysWithStringer - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go deleted file mode 100644 index 3c1e4f35ba..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// SliceCodecOptions represents all possible options for slice encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type SliceCodecOptions struct { - EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. -} - -// SliceCodec creates a new *SliceCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func SliceCodec() *SliceCodecOptions { - return &SliceCodecOptions{} -} - -// SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. -func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { - s.EncodeNilAsEmpty = &b - return s -} - -// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { - s := SliceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeNilAsEmpty != nil { - s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go deleted file mode 100644 index f8b76f996e..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -var defaultDecodeOIDAsHex = true - -// StringCodecOptions represents all possible options for string encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type StringCodecOptions struct { - DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. -} - -// StringCodec creates a new *StringCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func StringCodec() *StringCodecOptions { - return &StringCodecOptions{} -} - -// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made -// from the raw object ID bytes will be used. Defaults to true. -// -// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. -func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { - t.DecodeObjectIDAsHex = &b - return t -} - -// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { - s := &StringCodecOptions{&defaultDecodeOIDAsHex} - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeObjectIDAsHex != nil { - s.DecodeObjectIDAsHex = opt.DecodeObjectIDAsHex - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go deleted file mode 100644 index 1cbfa32e8b..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -var defaultOverwriteDuplicatedInlinedFields = true - -// StructCodecOptions represents all possible options for struct encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type StructCodecOptions struct { - DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. - DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. - EncodeOmitDefaultStruct *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false. - AllowUnexportedFields *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. - OverwriteDuplicatedInlinedFields *bool // Specifies if fields in inlined structs can be overwritten by higher level struct fields with the same key. Defaults to true. -} - -// StructCodec creates a new *StructCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func StructCodec() *StructCodecOptions { - return &StructCodecOptions{} -} - -// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. -func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { - t.DecodeZeroStruct = &b - return t -} - -// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. -// -// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. -func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { - t.DecodeDeepZeroInline = &b - return t -} - -// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all -// its values set to their default value. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. -func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { - t.EncodeOmitDefaultStruct = &b - return t -} - -// SetOverwriteDuplicatedInlinedFields specifies if inlined struct fields can be overwritten by higher level struct fields with the -// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when -// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if -// there are duplicate keys after the struct is inlined. Defaults to true. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. -func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions { - t.OverwriteDuplicatedInlinedFields = &b - return t -} - -// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. -// -// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be -// supported in Go Driver 2.0. -func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { - t.AllowUnexportedFields = &b - return t -} - -// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { - s := &StructCodecOptions{ - OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields, - } - for _, opt := range opts { - if opt == nil { - continue - } - - if opt.DecodeZeroStruct != nil { - s.DecodeZeroStruct = opt.DecodeZeroStruct - } - if opt.DecodeDeepZeroInline != nil { - s.DecodeDeepZeroInline = opt.DecodeDeepZeroInline - } - if opt.EncodeOmitDefaultStruct != nil { - s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct - } - if opt.OverwriteDuplicatedInlinedFields != nil { - s.OverwriteDuplicatedInlinedFields = opt.OverwriteDuplicatedInlinedFields - } - if opt.AllowUnexportedFields != nil { - s.AllowUnexportedFields = opt.AllowUnexportedFields - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go deleted file mode 100644 index 3f38433d22..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// TimeCodecOptions represents all possible options for time.Time encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type TimeCodecOptions struct { - UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. -} - -// TimeCodec creates a new *TimeCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func TimeCodec() *TimeCodecOptions { - return &TimeCodecOptions{} -} - -// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. -func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { - t.UseLocalTimeZone = &b - return t -} - -// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { - t := TimeCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.UseLocalTimeZone != nil { - t.UseLocalTimeZone = opt.UseLocalTimeZone - } - } - - return t -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go deleted file mode 100644 index 5091e4d963..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// UIntCodecOptions represents all possible options for uint encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type UIntCodecOptions struct { - EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. -} - -// UIntCodec creates a new *UIntCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func UIntCodec() *UIntCodecOptions { - return &UIntCodecOptions{} -} - -// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead. -func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { - u.EncodeToMinSize = &b - return u -} - -// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { - u := UIntCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeToMinSize != nil { - u.EncodeToMinSize = opt.EncodeToMinSize - } - } - - return u -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go deleted file mode 100644 index 1e25570b85..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "errors" - "fmt" - "io" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// Copier is a type that allows copying between ValueReaders, ValueWriters, and -// []byte values. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -type Copier struct{} - -// NewCopier creates a new copier with the given registry. If a nil registry is provided -// a default registry is used. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func NewCopier() Copier { - return Copier{} -} - -// CopyDocument handles copying a document from src to dst. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func CopyDocument(dst ValueWriter, src ValueReader) error { - return Copier{}.CopyDocument(dst, src) -} - -// CopyDocument handles copying one document from the src to the dst. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { - dr, err := src.ReadDocument() - if err != nil { - return err - } - - dw, err := dst.WriteDocument() - if err != nil { - return err - } - - return c.copyDocumentCore(dw, dr) -} - -// CopyArrayFromBytes copies the values from a BSON array represented as a -// []byte to a ValueWriter. -// -// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { - aw, err := dst.WriteArray() - if err != nil { - return err - } - - err = c.CopyBytesToArrayWriter(aw, src) - if err != nil { - return err - } - - return aw.WriteArrayEnd() -} - -// CopyDocumentFromBytes copies the values from a BSON document represented as a -// []byte to a ValueWriter. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { - dw, err := dst.WriteDocument() - if err != nil { - return err - } - - err = c.CopyBytesToDocumentWriter(dw, src) - if err != nil { - return err - } - - return dw.WriteDocumentEnd() -} - -type writeElementFn func(key string) (ValueWriter, error) - -// CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an -// ArrayWriter. -// -// Deprecated: Copying BSON arrays using the ArrayWriter interface will not be supported in Go -// Driver 2.0. -func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { - wef := func(_ string) (ValueWriter, error) { - return dst.WriteArrayElement() - } - - return c.copyBytesToValueWriter(src, wef) -} - -// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a -// DocumentWriter. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { - wef := func(key string) (ValueWriter, error) { - return dst.WriteDocumentElement(key) - } - - return c.copyBytesToValueWriter(src, wef) -} - -func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { - // TODO(skriptble): Create errors types here. Anything that is a tag should be a property. - length, rem, ok := bsoncore.ReadLength(src) - if !ok { - return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) - } - if len(src) < int(length) { - return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length) - } - rem = rem[:length-4] - - var t bsontype.Type - var key string - var val bsoncore.Value - for { - t, rem, ok = bsoncore.ReadType(rem) - if !ok { - return io.EOF - } - if t == bsontype.Type(0) { - if len(rem) != 0 { - return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem) - } - break - } - - key, rem, ok = bsoncore.ReadKey(rem) - if !ok { - return fmt.Errorf("invalid key found. remaining bytes=%v", rem) - } - - // write as either array element or document element using writeElementFn - vw, err := wef(key) - if err != nil { - return err - } - - val, rem, ok = bsoncore.ReadValue(rem, t) - if !ok { - return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t) - } - err = c.CopyValueFromBytes(vw, t, val.Data) - if err != nil { - return err - } - } - return nil -} - -// CopyDocumentToBytes copies an entire document from the ValueReader and -// returns it as bytes. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { - return c.AppendDocumentBytes(nil, src) -} - -// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will -// append the result to dst. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { - if br, ok := src.(BytesReader); ok { - _, dst, err := br.ReadValueBytes(dst) - return dst, err - } - - vw := vwPool.Get().(*valueWriter) - defer putValueWriter(vw) - - vw.reset(dst) - - err := c.CopyDocument(vw, src) - dst = vw.buf - return dst, err -} - -// AppendArrayBytes copies an array from the ValueReader to dst. -// -// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { - if br, ok := src.(BytesReader); ok { - _, dst, err := br.ReadValueBytes(dst) - return dst, err - } - - vw := vwPool.Get().(*valueWriter) - defer putValueWriter(vw) - - vw.reset(dst) - - err := c.copyArray(vw, src) - dst = vw.buf - return dst, err -} - -// CopyValueFromBytes will write the value represtend by t and src to dst. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.UnmarshalValue] instead. -func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { - if wvb, ok := dst.(BytesWriter); ok { - return wvb.WriteValueBytes(t, src) - } - - vr := vrPool.Get().(*valueReader) - defer vrPool.Put(vr) - - vr.reset(src) - vr.pushElement(t) - - return c.CopyValue(dst, vr) -} - -// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a -// []byte. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.MarshalValue] instead. -func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { - return c.AppendValueBytes(nil, src) -} - -// AppendValueBytes functions the same as CopyValueToBytes, but will append the -// result to dst. -// -// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go -// Driver 2.0. -func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { - if br, ok := src.(BytesReader); ok { - return br.ReadValueBytes(dst) - } - - vw := vwPool.Get().(*valueWriter) - defer putValueWriter(vw) - - start := len(dst) - - vw.reset(dst) - vw.push(mElement) - - err := c.CopyValue(vw, src) - if err != nil { - return 0, dst, err - } - - return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil -} - -// CopyValue will copy a single value from src to dst. -// -// Deprecated: Copying BSON values using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { - var err error - switch src.Type() { - case bsontype.Double: - var f64 float64 - f64, err = src.ReadDouble() - if err != nil { - break - } - err = dst.WriteDouble(f64) - case bsontype.String: - var str string - str, err = src.ReadString() - if err != nil { - return err - } - err = dst.WriteString(str) - case bsontype.EmbeddedDocument: - err = c.CopyDocument(dst, src) - case bsontype.Array: - err = c.copyArray(dst, src) - case bsontype.Binary: - var data []byte - var subtype byte - data, subtype, err = src.ReadBinary() - if err != nil { - break - } - err = dst.WriteBinaryWithSubtype(data, subtype) - case bsontype.Undefined: - err = src.ReadUndefined() - if err != nil { - break - } - err = dst.WriteUndefined() - case bsontype.ObjectID: - var oid primitive.ObjectID - oid, err = src.ReadObjectID() - if err != nil { - break - } - err = dst.WriteObjectID(oid) - case bsontype.Boolean: - var b bool - b, err = src.ReadBoolean() - if err != nil { - break - } - err = dst.WriteBoolean(b) - case bsontype.DateTime: - var dt int64 - dt, err = src.ReadDateTime() - if err != nil { - break - } - err = dst.WriteDateTime(dt) - case bsontype.Null: - err = src.ReadNull() - if err != nil { - break - } - err = dst.WriteNull() - case bsontype.Regex: - var pattern, options string - pattern, options, err = src.ReadRegex() - if err != nil { - break - } - err = dst.WriteRegex(pattern, options) - case bsontype.DBPointer: - var ns string - var pointer primitive.ObjectID - ns, pointer, err = src.ReadDBPointer() - if err != nil { - break - } - err = dst.WriteDBPointer(ns, pointer) - case bsontype.JavaScript: - var js string - js, err = src.ReadJavascript() - if err != nil { - break - } - err = dst.WriteJavascript(js) - case bsontype.Symbol: - var symbol string - symbol, err = src.ReadSymbol() - if err != nil { - break - } - err = dst.WriteSymbol(symbol) - case bsontype.CodeWithScope: - var code string - var srcScope DocumentReader - code, srcScope, err = src.ReadCodeWithScope() - if err != nil { - break - } - - var dstScope DocumentWriter - dstScope, err = dst.WriteCodeWithScope(code) - if err != nil { - break - } - err = c.copyDocumentCore(dstScope, srcScope) - case bsontype.Int32: - var i32 int32 - i32, err = src.ReadInt32() - if err != nil { - break - } - err = dst.WriteInt32(i32) - case bsontype.Timestamp: - var t, i uint32 - t, i, err = src.ReadTimestamp() - if err != nil { - break - } - err = dst.WriteTimestamp(t, i) - case bsontype.Int64: - var i64 int64 - i64, err = src.ReadInt64() - if err != nil { - break - } - err = dst.WriteInt64(i64) - case bsontype.Decimal128: - var d128 primitive.Decimal128 - d128, err = src.ReadDecimal128() - if err != nil { - break - } - err = dst.WriteDecimal128(d128) - case bsontype.MinKey: - err = src.ReadMinKey() - if err != nil { - break - } - err = dst.WriteMinKey() - case bsontype.MaxKey: - err = src.ReadMaxKey() - if err != nil { - break - } - err = dst.WriteMaxKey() - default: - err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type()) - } - - return err -} - -func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { - ar, err := src.ReadArray() - if err != nil { - return err - } - - aw, err := dst.WriteArray() - if err != nil { - return err - } - - for { - vr, err := ar.ReadValue() - if errors.Is(err, ErrEOA) { - break - } - if err != nil { - return err - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - err = c.CopyValue(vw, vr) - if err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { - for { - key, vr, err := dr.ReadElement() - if errors.Is(err, ErrEOD) { - break - } - if err != nil { - return err - } - - vw, err := dw.WriteDocumentElement(key) - if err != nil { - return err - } - - err = c.CopyValue(vw, vr) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go deleted file mode 100644 index 750b0d2af5..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsonrw contains abstractions for reading and writing -// BSON and BSON like types from sources. -package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw" diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go deleted file mode 100644 index f0702d9d30..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "encoding/base64" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" - - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -const maxNestingDepth = 200 - -// ErrInvalidJSON indicates the JSON input is invalid -var ErrInvalidJSON = errors.New("invalid JSON input") - -type jsonParseState byte - -const ( - jpsStartState jsonParseState = iota - jpsSawBeginObject - jpsSawEndObject - jpsSawBeginArray - jpsSawEndArray - jpsSawColon - jpsSawComma - jpsSawKey - jpsSawValue - jpsDoneState - jpsInvalidState -) - -type jsonParseMode byte - -const ( - jpmInvalidMode jsonParseMode = iota - jpmObjectMode - jpmArrayMode -) - -type extJSONValue struct { - t bsontype.Type - v interface{} -} - -type extJSONObject struct { - keys []string - values []*extJSONValue -} - -type extJSONParser struct { - js *jsonScanner - s jsonParseState - m []jsonParseMode - k string - v *extJSONValue - - err error - canonical bool - depth int - maxDepth int - - emptyObject bool - relaxedUUID bool -} - -// newExtJSONParser returns a new extended JSON parser, ready to to begin -// parsing from the first character of the argued json input. It will not -// perform any read-ahead and will therefore not report any errors about -// malformed JSON at this point. -func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser { - return &extJSONParser{ - js: &jsonScanner{r: r}, - s: jpsStartState, - m: []jsonParseMode{}, - canonical: canonical, - maxDepth: maxNestingDepth, - } -} - -// peekType examines the next value and returns its BSON Type -func (ejp *extJSONParser) peekType() (bsontype.Type, error) { - var t bsontype.Type - var err error - initialState := ejp.s - - ejp.advanceState() - switch ejp.s { - case jpsSawValue: - t = ejp.v.t - case jpsSawBeginArray: - t = bsontype.Array - case jpsInvalidState: - err = ejp.err - case jpsSawComma: - // in array mode, seeing a comma means we need to progress again to actually observe a type - if ejp.peekMode() == jpmArrayMode { - return ejp.peekType() - } - case jpsSawEndArray: - // this would only be a valid state if we were in array mode, so return end-of-array error - err = ErrEOA - case jpsSawBeginObject: - // peek key to determine type - ejp.advanceState() - switch ejp.s { - case jpsSawEndObject: // empty embedded document - t = bsontype.EmbeddedDocument - ejp.emptyObject = true - case jpsInvalidState: - err = ejp.err - case jpsSawKey: - if initialState == jpsStartState { - return bsontype.EmbeddedDocument, nil - } - t = wrapperKeyBSONType(ejp.k) - - // if $uuid is encountered, parse as binary subtype 4 - if ejp.k == "$uuid" { - ejp.relaxedUUID = true - t = bsontype.Binary - } - - switch t { - case bsontype.JavaScript: - // just saw $code, need to check for $scope at same level - _, err = ejp.readValue(bsontype.JavaScript) - if err != nil { - break - } - - switch ejp.s { - case jpsSawEndObject: // type is TypeJavaScript - case jpsSawComma: - ejp.advanceState() - - if ejp.s == jpsSawKey && ejp.k == "$scope" { - t = bsontype.CodeWithScope - } else { - err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k) - } - case jpsInvalidState: - err = ejp.err - default: - err = ErrInvalidJSON - } - case bsontype.CodeWithScope: - err = errors.New("invalid extended JSON: code with $scope must contain $code before $scope") - } - } - } - - return t, err -} - -// readKey parses the next key and its type and returns them -func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) { - if ejp.emptyObject { - ejp.emptyObject = false - return "", 0, ErrEOD - } - - // advance to key (or return with error) - switch ejp.s { - case jpsStartState: - ejp.advanceState() - if ejp.s == jpsSawBeginObject { - ejp.advanceState() - } - case jpsSawBeginObject: - ejp.advanceState() - case jpsSawValue, jpsSawEndObject, jpsSawEndArray: - ejp.advanceState() - switch ejp.s { - case jpsSawBeginObject, jpsSawComma: - ejp.advanceState() - case jpsSawEndObject: - return "", 0, ErrEOD - case jpsDoneState: - return "", 0, io.EOF - case jpsInvalidState: - return "", 0, ejp.err - default: - return "", 0, ErrInvalidJSON - } - case jpsSawKey: // do nothing (key was peeked before) - default: - return "", 0, invalidRequestError("key") - } - - // read key - var key string - - switch ejp.s { - case jpsSawKey: - key = ejp.k - case jpsSawEndObject: - return "", 0, ErrEOD - case jpsInvalidState: - return "", 0, ejp.err - default: - return "", 0, invalidRequestError("key") - } - - // check for colon - ejp.advanceState() - if err := ensureColon(ejp.s, key); err != nil { - return "", 0, err - } - - // peek at the value to determine type - t, err := ejp.peekType() - if err != nil { - return "", 0, err - } - - return key, t, nil -} - -// readValue returns the value corresponding to the Type returned by peekType -func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { - if ejp.s == jpsInvalidState { - return nil, ejp.err - } - - var v *extJSONValue - - switch t { - case bsontype.Null, bsontype.Boolean, bsontype.String: - if ejp.s != jpsSawValue { - return nil, invalidRequestError(t.String()) - } - v = ejp.v - case bsontype.Int32, bsontype.Int64, bsontype.Double: - // relaxed version allows these to be literal number values - if ejp.s == jpsSawValue { - v = ejp.v - break - } - fallthrough - case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined: - switch ejp.s { - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read value - ejp.advanceState() - if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) { - return nil, invalidJSONErrorForType("value", t) - } - - v = ejp.v - - // read end object - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("} after value", t) - } - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer: - if ejp.s != jpsSawKey { - return nil, invalidRequestError(t.String()) - } - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - ejp.advanceState() - if t == bsontype.Binary && ejp.s == jpsSawValue { - // convert relaxed $uuid format - if ejp.relaxedUUID { - defer func() { ejp.relaxedUUID = false }() - uuid, err := ejp.v.parseSymbol() - if err != nil { - return nil, err - } - - // RFC 4122 defines the length of a UUID as 36 and the hyphens in a UUID as appearing - // in the 8th, 13th, 18th, and 23rd characters. - // - // See https://tools.ietf.org/html/rfc4122#section-3 - valid := len(uuid) == 36 && - string(uuid[8]) == "-" && - string(uuid[13]) == "-" && - string(uuid[18]) == "-" && - string(uuid[23]) == "-" - if !valid { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") - } - - // remove hyphens - uuidNoHyphens := strings.ReplaceAll(uuid, "-", "") - if len(uuidNoHyphens) != 32 { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") - } - - // convert hex to bytes - bytes, err := hex.DecodeString(uuidNoHyphens) - if err != nil { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %w", err) - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("$uuid and value and then }", bsontype.Binary) - } - - base64 := &extJSONValue{ - t: bsontype.String, - v: base64.StdEncoding.EncodeToString(bytes), - } - subType := &extJSONValue{ - t: bsontype.String, - v: "04", - } - - v = &extJSONValue{ - t: bsontype.EmbeddedDocument, - v: &extJSONObject{ - keys: []string{"base64", "subType"}, - values: []*extJSONValue{base64, subType}, - }, - } - - break - } - - // convert legacy $binary format - base64 := ejp.v - - ejp.advanceState() - if ejp.s != jpsSawComma { - return nil, invalidJSONErrorForType(",", bsontype.Binary) - } - - ejp.advanceState() - key, t, err := ejp.readKey() - if err != nil { - return nil, err - } - if key != "$type" { - return nil, invalidJSONErrorForType("$type", bsontype.Binary) - } - - subType, err := ejp.readValue(t) - if err != nil { - return nil, err - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary) - } - - v = &extJSONValue{ - t: bsontype.EmbeddedDocument, - v: &extJSONObject{ - keys: []string{"base64", "subType"}, - values: []*extJSONValue{base64, subType}, - }, - } - break - } - - // read KV pairs - if ejp.s != jpsSawBeginObject { - return nil, invalidJSONErrorForType("{", t) - } - - keys, vals, err := ejp.readObject(2, true) - if err != nil { - return nil, err - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("2 key-value pairs and then }", t) - } - - v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} - - case bsontype.DateTime: - switch ejp.s { - case jpsSawValue: - v = ejp.v - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - ejp.advanceState() - switch ejp.s { - case jpsSawBeginObject: - keys, vals, err := ejp.readObject(1, true) - if err != nil { - return nil, err - } - v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} - case jpsSawValue: - if ejp.canonical { - return nil, invalidJSONError("{") - } - v = ejp.v - default: - if ejp.canonical { - return nil, invalidJSONErrorForType("object", t) - } - return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("value and then }", t) - } - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.JavaScript: - switch ejp.s { - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read value - ejp.advanceState() - if ejp.s != jpsSawValue { - return nil, invalidJSONErrorForType("value", t) - } - v = ejp.v - - // read end object or comma and just return - ejp.advanceState() - case jpsSawEndObject: - v = ejp.v - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.CodeWithScope: - if ejp.s == jpsSawKey && ejp.k == "$scope" { - v = ejp.v // this is the $code string from earlier - - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read { - ejp.advanceState() - if ejp.s != jpsSawBeginObject { - return nil, invalidJSONError("$scope to be embedded document") - } - } else { - return nil, invalidRequestError(t.String()) - } - case bsontype.EmbeddedDocument, bsontype.Array: - return nil, invalidRequestError(t.String()) - } - - return v, nil -} - -// readObject is a utility method for reading full objects of known (or expected) size -// it is useful for extended JSON types such as binary, datetime, regex, and timestamp -func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) { - keys := make([]string, numKeys) - vals := make([]*extJSONValue, numKeys) - - if !started { - ejp.advanceState() - if ejp.s != jpsSawBeginObject { - return nil, nil, invalidJSONError("{") - } - } - - for i := 0; i < numKeys; i++ { - key, t, err := ejp.readKey() - if err != nil { - return nil, nil, err - } - - switch ejp.s { - case jpsSawKey: - v, err := ejp.readValue(t) - if err != nil { - return nil, nil, err - } - - keys[i] = key - vals[i] = v - case jpsSawValue: - keys[i] = key - vals[i] = ejp.v - default: - return nil, nil, invalidJSONError("value") - } - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, nil, invalidJSONError("}") - } - - return keys, vals, nil -} - -// advanceState reads the next JSON token from the scanner and transitions -// from the current state based on that token's type -func (ejp *extJSONParser) advanceState() { - if ejp.s == jpsDoneState || ejp.s == jpsInvalidState { - return - } - - jt, err := ejp.js.nextToken() - - if err != nil { - ejp.err = err - ejp.s = jpsInvalidState - return - } - - valid := ejp.validateToken(jt.t) - if !valid { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - return - } - - switch jt.t { - case jttBeginObject: - ejp.s = jpsSawBeginObject - ejp.pushMode(jpmObjectMode) - ejp.depth++ - - if ejp.depth > ejp.maxDepth { - ejp.err = nestingDepthError(jt.p, ejp.depth) - ejp.s = jpsInvalidState - } - case jttEndObject: - ejp.s = jpsSawEndObject - ejp.depth-- - - if ejp.popMode() != jpmObjectMode { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttBeginArray: - ejp.s = jpsSawBeginArray - ejp.pushMode(jpmArrayMode) - case jttEndArray: - ejp.s = jpsSawEndArray - - if ejp.popMode() != jpmArrayMode { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttColon: - ejp.s = jpsSawColon - case jttComma: - ejp.s = jpsSawComma - case jttEOF: - ejp.s = jpsDoneState - if len(ejp.m) != 0 { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttString: - switch ejp.s { - case jpsSawComma: - if ejp.peekMode() == jpmArrayMode { - ejp.s = jpsSawValue - ejp.v = extendJSONToken(jt) - return - } - fallthrough - case jpsSawBeginObject: - ejp.s = jpsSawKey - ejp.k = jt.v.(string) - return - } - fallthrough - default: - ejp.s = jpsSawValue - ejp.v = extendJSONToken(jt) - } -} - -var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{ - jpsStartState: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - jttEOF: true, - }, - jpsSawBeginObject: { - jttEndObject: true, - jttString: true, - }, - jpsSawEndObject: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsSawBeginArray: { - jttBeginObject: true, - jttBeginArray: true, - jttEndArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawEndArray: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsSawColon: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawComma: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawKey: { - jttColon: true, - }, - jpsSawValue: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsDoneState: {}, - jpsInvalidState: {}, -} - -func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool { - switch ejp.s { - case jpsSawEndObject: - // if we are at depth zero and the next token is a '{', - // we can consider it valid only if we are not in array mode. - if jtt == jttBeginObject && ejp.depth == 0 { - return ejp.peekMode() != jpmArrayMode - } - case jpsSawComma: - switch ejp.peekMode() { - // the only valid next token after a comma inside a document is a string (a key) - case jpmObjectMode: - return jtt == jttString - case jpmInvalidMode: - return false - } - } - - _, ok := jpsValidTransitionTokens[ejp.s][jtt] - return ok -} - -// ensureExtValueType returns true if the current value has the expected -// value type for single-key extended JSON types. For example, -// {"$numberInt": v} v must be TypeString -func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool { - switch t { - case bsontype.MinKey, bsontype.MaxKey: - return ejp.v.t == bsontype.Int32 - case bsontype.Undefined: - return ejp.v.t == bsontype.Boolean - case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID: - return ejp.v.t == bsontype.String - default: - return false - } -} - -func (ejp *extJSONParser) pushMode(m jsonParseMode) { - ejp.m = append(ejp.m, m) -} - -func (ejp *extJSONParser) popMode() jsonParseMode { - l := len(ejp.m) - if l == 0 { - return jpmInvalidMode - } - - m := ejp.m[l-1] - ejp.m = ejp.m[:l-1] - - return m -} - -func (ejp *extJSONParser) peekMode() jsonParseMode { - l := len(ejp.m) - if l == 0 { - return jpmInvalidMode - } - - return ejp.m[l-1] -} - -func extendJSONToken(jt *jsonToken) *extJSONValue { - var t bsontype.Type - - switch jt.t { - case jttInt32: - t = bsontype.Int32 - case jttInt64: - t = bsontype.Int64 - case jttDouble: - t = bsontype.Double - case jttString: - t = bsontype.String - case jttBool: - t = bsontype.Boolean - case jttNull: - t = bsontype.Null - default: - return nil - } - - return &extJSONValue{t: t, v: jt.v} -} - -func ensureColon(s jsonParseState, key string) error { - if s != jpsSawColon { - return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key) - } - - return nil -} - -func invalidRequestError(s string) error { - return fmt.Errorf("invalid request to read %s", s) -} - -func invalidJSONError(expected string) error { - return fmt.Errorf("invalid JSON input; expected %s", expected) -} - -func invalidJSONErrorForType(expected string, t bsontype.Type) error { - return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t) -} - -func unexpectedTokenError(jt *jsonToken) error { - switch jt.t { - case jttInt32, jttInt64, jttDouble: - return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p) - case jttString: - return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p) - case jttBool: - return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p) - case jttNull: - return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p) - case jttEOF: - return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p) - default: - return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p) - } -} - -func nestingDepthError(p, depth int) error { - return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go deleted file mode 100644 index 59ddfc4485..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go +++ /dev/null @@ -1,653 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "errors" - "fmt" - "io" - "sync" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -type ExtJSONValueReaderPool struct { - pool sync.Pool -} - -// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { - return &ExtJSONValueReaderPool{ - pool: sync.Pool{ - New: func() interface{} { - return new(extJSONValueReader) - }, - }, - } -} - -// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { - vr := bvrp.pool.Get().(*extJSONValueReader) - return vr.reset(r, canonical) -} - -// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing -// is inserted into the pool and ok will be false. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { - bvr, ok := vr.(*extJSONValueReader) - if !ok { - return false - } - - bvr, _ = bvr.reset(nil, false) - bvrp.pool.Put(bvr) - return true -} - -type ejvrState struct { - mode mode - vType bsontype.Type - depth int -} - -// extJSONValueReader is for reading extended JSON. -type extJSONValueReader struct { - p *extJSONParser - - stack []ejvrState - frame int -} - -// NewExtJSONValueReader creates a new ValueReader from a given io.Reader -// It will interpret the JSON of r as canonical or relaxed according to the -// given canonical flag -func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) { - return newExtJSONValueReader(r, canonical) -} - -func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) { - ejvr := new(extJSONValueReader) - return ejvr.reset(r, canonical) -} - -func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) { - p := newExtJSONParser(r, canonical) - typ, err := p.peekType() - - if err != nil { - return nil, ErrInvalidJSON - } - - var m mode - switch typ { - case bsontype.EmbeddedDocument: - m = mTopLevel - case bsontype.Array: - m = mArray - default: - m = mValue - } - - stack := make([]ejvrState, 1, 5) - stack[0] = ejvrState{ - mode: m, - vType: typ, - } - return &extJSONValueReader{ - p: p, - stack: stack, - }, nil -} - -func (ejvr *extJSONValueReader) advanceFrame() { - if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack - length := len(ejvr.stack) - if length+1 >= cap(ejvr.stack) { - // double it - buf := make([]ejvrState, 2*cap(ejvr.stack)+1) - copy(buf, ejvr.stack) - ejvr.stack = buf - } - ejvr.stack = ejvr.stack[:length+1] - } - ejvr.frame++ - - // Clean the stack - ejvr.stack[ejvr.frame].mode = 0 - ejvr.stack[ejvr.frame].vType = 0 - ejvr.stack[ejvr.frame].depth = 0 -} - -func (ejvr *extJSONValueReader) pushDocument() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mDocument - ejvr.stack[ejvr.frame].depth = ejvr.p.depth -} - -func (ejvr *extJSONValueReader) pushCodeWithScope() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mCodeWithScope -} - -func (ejvr *extJSONValueReader) pushArray() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mArray -} - -func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = m - ejvr.stack[ejvr.frame].vType = t -} - -func (ejvr *extJSONValueReader) pop() { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - ejvr.frame-- - case mDocument, mArray, mCodeWithScope: - ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc... - } -} - -func (ejvr *extJSONValueReader) skipObject() { - // read entire object until depth returns to 0 (last ending } or ] seen) - depth := 1 - for depth > 0 { - ejvr.p.advanceState() - - // If object is empty, raise depth and continue. When emptyObject is true, the - // parser has already read both the opening and closing brackets of an empty - // object ("{}"), so the next valid token will be part of the parent document, - // not part of the nested document. - // - // If there is a comma, there are remaining fields, emptyObject must be set back - // to false, and comma must be skipped with advanceState(). - if ejvr.p.emptyObject { - if ejvr.p.s == jpsSawComma { - ejvr.p.emptyObject = false - ejvr.p.advanceState() - } - depth-- - continue - } - - switch ejvr.p.s { - case jpsSawBeginObject, jpsSawBeginArray: - depth++ - case jpsSawEndObject, jpsSawEndArray: - depth-- - } - } -} - -func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error { - te := TransitionError{ - name: name, - current: ejvr.stack[ejvr.frame].mode, - destination: destination, - modes: modes, - action: "read", - } - if ejvr.frame != 0 { - te.parent = ejvr.stack[ejvr.frame-1].mode - } - return te -} - -func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error { - return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t) -} - -func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - if ejvr.stack[ejvr.frame].vType != t { - return ejvr.typeError(t) - } - default: - modes := []mode{mElement, mValue} - if addModes != nil { - modes = append(modes, addModes...) - } - return ejvr.invalidTransitionErr(destination, callerName, modes) - } - - return nil -} - -func (ejvr *extJSONValueReader) Type() bsontype.Type { - return ejvr.stack[ejvr.frame].vType -} - -func (ejvr *extJSONValueReader) Skip() error { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - default: - return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue}) - } - - defer ejvr.pop() - - t := ejvr.stack[ejvr.frame].vType - switch t { - case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope: - // read entire array, doc or CodeWithScope - ejvr.skipObject() - default: - _, err := ejvr.p.readValue(t) - if err != nil { - return err - } - } - - return nil -} - -func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel: // allow reading array from top level - case mArray: - return ejvr, nil - default: - if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil { - return nil, err - } - } - - ejvr.pushArray() - - return ejvr, nil -} - -func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) { - if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil { - return nil, 0, err - } - - v, err := ejvr.p.readValue(bsontype.Binary) - if err != nil { - return nil, 0, err - } - - b, btype, err = v.parseBinary() - - ejvr.pop() - return b, btype, err -} - -func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) { - if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil { - return false, err - } - - v, err := ejvr.p.readValue(bsontype.Boolean) - if err != nil { - return false, err - } - - if v.t != bsontype.Boolean { - return false, fmt.Errorf("expected type bool, but got type %s", v.t) - } - - ejvr.pop() - return v.v.(bool), nil -} - -func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel: - return ejvr, nil - case mElement, mValue: - if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument { - return nil, ejvr.typeError(bsontype.EmbeddedDocument) - } - - ejvr.pushDocument() - return ejvr, nil - default: - return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue}) - } -} - -func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) { - if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil { - return "", nil, err - } - - v, err := ejvr.p.readValue(bsontype.CodeWithScope) - if err != nil { - return "", nil, err - } - - code, err = v.parseJavascript() - - ejvr.pushCodeWithScope() - return code, ejvr, err -} - -func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) { - if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil { - return "", primitive.NilObjectID, err - } - - v, err := ejvr.p.readValue(bsontype.DBPointer) - if err != nil { - return "", primitive.NilObjectID, err - } - - ns, oid, err = v.parseDBPointer() - - ejvr.pop() - return ns, oid, err -} - -func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) { - if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.DateTime) - if err != nil { - return 0, err - } - - d, err := v.parseDateTime() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) { - if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil { - return primitive.Decimal128{}, err - } - - v, err := ejvr.p.readValue(bsontype.Decimal128) - if err != nil { - return primitive.Decimal128{}, err - } - - d, err := v.parseDecimal128() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadDouble() (float64, error) { - if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Double) - if err != nil { - return 0, err - } - - d, err := v.parseDouble() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadInt32() (int32, error) { - if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Int32) - if err != nil { - return 0, err - } - - i, err := v.parseInt32() - - ejvr.pop() - return i, err -} - -func (ejvr *extJSONValueReader) ReadInt64() (int64, error) { - if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Int64) - if err != nil { - return 0, err - } - - i, err := v.parseInt64() - - ejvr.pop() - return i, err -} - -func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) { - if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.JavaScript) - if err != nil { - return "", err - } - - code, err = v.parseJavascript() - - ejvr.pop() - return code, err -} - -func (ejvr *extJSONValueReader) ReadMaxKey() error { - if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.MaxKey) - if err != nil { - return err - } - - err = v.parseMinMaxKey("max") - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadMinKey() error { - if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.MinKey) - if err != nil { - return err - } - - err = v.parseMinMaxKey("min") - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadNull() error { - if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.Null) - if err != nil { - return err - } - - if v.t != bsontype.Null { - return fmt.Errorf("expected type null but got type %s", v.t) - } - - ejvr.pop() - return nil -} - -func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) { - if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil { - return primitive.ObjectID{}, err - } - - v, err := ejvr.p.readValue(bsontype.ObjectID) - if err != nil { - return primitive.ObjectID{}, err - } - - oid, err := v.parseObjectID() - - ejvr.pop() - return oid, err -} - -func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) { - if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil { - return "", "", err - } - - v, err := ejvr.p.readValue(bsontype.Regex) - if err != nil { - return "", "", err - } - - pattern, options, err = v.parseRegex() - - ejvr.pop() - return pattern, options, err -} - -func (ejvr *extJSONValueReader) ReadString() (string, error) { - if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.String) - if err != nil { - return "", err - } - - if v.t != bsontype.String { - return "", fmt.Errorf("expected type string but got type %s", v.t) - } - - ejvr.pop() - return v.v.(string), nil -} - -func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) { - if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.Symbol) - if err != nil { - return "", err - } - - symbol, err = v.parseSymbol() - - ejvr.pop() - return symbol, err -} - -func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) { - if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil { - return 0, 0, err - } - - v, err := ejvr.p.readValue(bsontype.Timestamp) - if err != nil { - return 0, 0, err - } - - t, i, err = v.parseTimestamp() - - ejvr.pop() - return t, i, err -} - -func (ejvr *extJSONValueReader) ReadUndefined() error { - if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.Undefined) - if err != nil { - return err - } - - err = v.parseUndefined() - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel, mDocument, mCodeWithScope: - default: - return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope}) - } - - name, t, err := ejvr.p.readKey() - - if err != nil { - if errors.Is(err, ErrEOD) { - if ejvr.stack[ejvr.frame].mode == mCodeWithScope { - _, err := ejvr.p.peekType() - if err != nil { - return "", nil, err - } - } - - ejvr.pop() - } - - return "", nil, err - } - - ejvr.push(mElement, t) - return name, ejvr, nil -} - -func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mArray: - default: - return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray}) - } - - t, err := ejvr.p.peekType() - if err != nil { - if errors.Is(err, ErrEOA) { - ejvr.pop() - } - - return nil, err - } - - ejvr.push(mValue, t) - return ejvr, nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go deleted file mode 100644 index ba39c9601f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on github.com/golang/go by The Go Authors -// See THIRD-PARTY-NOTICES for original license terms. - -package bsonrw - -import "unicode/utf8" - -// safeSet holds the value true if the ASCII character with the given array -// position can be represented inside a JSON string without any further -// escaping. -// -// All values are true except for the ASCII control characters (0-31), the -// double quote ("), and the backslash character ("\"). -var safeSet = [utf8.RuneSelf]bool{ - ' ': true, - '!': true, - '"': false, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '(': true, - ')': true, - '*': true, - '+': true, - ',': true, - '-': true, - '.': true, - '/': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - ':': true, - ';': true, - '<': true, - '=': true, - '>': true, - '?': true, - '@': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'V': true, - 'W': true, - 'X': true, - 'Y': true, - 'Z': true, - '[': true, - '\\': false, - ']': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '{': true, - '|': true, - '}': true, - '~': true, - '\u007f': true, -} - -// htmlSafeSet holds the value true if the ASCII character with the given -// array position can be safely represented inside a JSON string, embedded -// inside of HTML